summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bonding/bond_3ad.c64
-rw-r--r--drivers/net/bonding/bond_main.c24
-rw-r--r--drivers/net/can/Kconfig11
-rw-r--r--drivers/net/can/Makefile2
-rw-r--r--drivers/net/can/at91_can.c5
-rw-r--r--drivers/net/can/c_can/c_can.c38
-rw-r--r--drivers/net/can/dev.c149
-rw-r--r--drivers/net/can/rcar/Kconfig21
-rw-r--r--drivers/net/can/rcar/Makefile6
-rw-r--r--drivers/net/can/rcar/rcar_can.c (renamed from drivers/net/can/rcar_can.c)0
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c1858
-rw-r--r--drivers/net/can/sja1000/tscan1.c12
-rw-r--r--drivers/net/can/slcan.c4
-rw-r--r--drivers/net/can/spi/mcp251x.c7
-rw-r--r--drivers/net/can/usb/Kconfig5
-rw-r--r--drivers/net/can/usb/gs_usb.c155
-rw-r--r--drivers/net/can/usb/kvaser_usb.c8
-rw-r--r--drivers/net/dsa/Kconfig12
-rw-r--r--drivers/net/dsa/Makefile4
-rw-r--r--drivers/net/dsa/b53/Kconfig33
-rw-r--r--drivers/net/dsa/b53/Makefile6
-rw-r--r--drivers/net/dsa/b53/b53_common.c1787
-rw-r--r--drivers/net/dsa/b53/b53_mdio.c392
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c274
-rw-r--r--drivers/net/dsa/b53/b53_priv.h387
-rw-r--r--drivers/net/dsa/b53/b53_regs.h434
-rw-r--r--drivers/net/dsa/b53/b53_spi.c331
-rw-r--r--drivers/net/dsa/b53/b53_srab.c415
-rw-r--r--drivers/net/dsa/bcm_sf2.c697
-rw-r--r--drivers/net/dsa/bcm_sf2.h16
-rw-r--r--drivers/net/dsa/bcm_sf2_regs.h70
-rw-r--r--drivers/net/dsa/mv88e6xxx/Kconfig7
-rw-r--r--drivers/net/dsa/mv88e6xxx/Makefile1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c (renamed from drivers/net/dsa/mv88e6xxx.c)1733
-rw-r--r--drivers/net/dsa/mv88e6xxx/mv88e6xxx.h (renamed from drivers/net/dsa/mv88e6xxx.h)41
-rw-r--r--drivers/net/ethernet/8390/ax88796.c40
-rw-r--r--drivers/net/ethernet/agere/et131x.c60
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c54
-rw-r--r--drivers/net/ethernet/altera/altera_tse.h1
-rw-r--r--drivers/net/ethernet/altera/altera_tse_ethtool.c26
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c16
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c4
-rw-r--r--drivers/net/ethernet/arc/emac.h1
-rw-r--r--drivers/net/ethernet/arc/emac_main.c65
-rw-r--r--drivers/net/ethernet/atheros/alx/alx.h4
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c61
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c69
-rw-r--r--drivers/net/ethernet/aurora/nb8800.h1
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig21
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c49
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h1
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c160
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c245
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c483
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h82
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c253
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h87
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c65
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c24
-rw-r--r--drivers/net/ethernet/cadence/macb.c54
-rw-r--r--drivers/net/ethernet/cadence/macb.h1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.c61
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.h5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_device.c13
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_device.h1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c1009
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c1416
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h408
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_config.h16
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_console.c50
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c262
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h52
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c213
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.h41
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_iq.h85
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_main.h25
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c24
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_network.h252
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.c67
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.h154
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c313
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.c30
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c103
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c16
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c91
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c82
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c55
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c12
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c28
-rw-r--r--drivers/net/ethernet/dnet.c48
-rw-r--r--drivers/net/ethernet/dnet.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/Kconfig8
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h58
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c160
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h16
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c66
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c326
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.h2
-rw-r--r--drivers/net/ethernet/ethoc.c7
-rw-r--r--drivers/net/ethernet/freescale/fec.h2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c42
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c5
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig2
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c44
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h15
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c54
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c41
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c234
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c52
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h21
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c70
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c61
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c80
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c14
-rw-r--r--drivers/net/ethernet/intel/Kconfig43
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c18
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c56
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c359
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c8
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h7
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c12
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c92
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c133
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_model.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c1
-rw-r--r--drivers/net/ethernet/lantiq_etop.c37
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c48
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c274
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c277
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c116
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h28
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h174
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c160
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c573
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c586
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c866
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c394
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c335
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h245
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c259
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c561
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c313
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c209
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h43
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h1206
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c1851
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h225
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c91
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c1814
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c446
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c41
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c4
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c45
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig30
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c1623
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.h28
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c104
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h47
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c23
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c75
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c80
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c31
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c488
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c95
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h13
-rw-r--r--drivers/net/ethernet/qlogic/qede/Makefile1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h9
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_dcbnl.c348
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c102
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c266
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c30
-rw-r--r--drivers/net/ethernet/rdc/r6040.c91
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c3
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h1
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c31
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c32
-rw-r--r--drivers/net/ethernet/sfc/ef10.c744
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c44
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.h3
-rw-r--r--drivers/net/ethernet/sfc/efx.c66
-rw-r--r--drivers/net/ethernet/sfc/efx.h9
-rw-r--r--drivers/net/ethernet/sfc/farch.c3
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h1327
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c7
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h19
-rw-r--r--drivers/net/ethernet/sfc/nic.h5
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c82
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c165
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h86
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c147
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h43
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c98
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c60
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c56
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h159
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c10
-rw-r--r--drivers/net/ethernet/synopsys/dwc_eth_qos.c70
-rw-r--r--drivers/net/ethernet/ti/cpsw.c100
-rw-r--r--drivers/net/ethernet/ti/cpsw.h1
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c137
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.h1
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c92
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c169
-rw-r--r--drivers/net/ethernet/ti/tlan.c1
-rw-r--r--drivers/net/ethernet/tile/tilegx.c6
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c10
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c4
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c46
-rw-r--r--drivers/net/fddi/skfp/Makefile2
-rw-r--r--drivers/net/geneve.c83
-rw-r--r--drivers/net/gtp.c1
-rw-r--r--drivers/net/hamradio/baycom_par.c6
-rw-r--r--drivers/net/hyperv/netvsc.c14
-rw-r--r--drivers/net/hyperv/rndis_filter.c117
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c22
-rw-r--r--drivers/net/macsec.c49
-rw-r--r--drivers/net/macvlan.c11
-rw-r--r--drivers/net/macvtap.c95
-rw-r--r--drivers/net/phy/Kconfig23
-rw-r--r--drivers/net/phy/Makefile5
-rw-r--r--drivers/net/phy/fixed_phy.c175
-rw-r--r--drivers/net/phy/intel-xway.c376
-rw-r--r--drivers/net/phy/marvell.c82
-rw-r--r--drivers/net/phy/mdio-mux-bcm-iproc.c248
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c2
-rw-r--r--drivers/net/phy/mdio-mux-mmioreg.c2
-rw-r--r--drivers/net/phy/mdio-mux.c26
-rw-r--r--drivers/net/phy/smsc.c17
-rw-r--r--drivers/net/phy/swphy.c179
-rw-r--r--drivers/net/phy/swphy.h9
-rw-r--r--drivers/net/ppp/ppp_generic.c3
-rw-r--r--drivers/net/team/team.c23
-rw-r--r--drivers/net/tun.c237
-rw-r--r--drivers/net/usb/r8152.c96
-rw-r--r--drivers/net/virtio_net.c90
-rw-r--r--drivers/net/vmxnet3/Makefile4
-rw-r--r--drivers/net/vmxnet3/upt1_defs.h4
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h105
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c287
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c215
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h54
-rw-r--r--drivers/net/vrf.c346
-rw-r--r--drivers/net/vxlan.c143
-rw-r--r--drivers/net/wan/Kconfig22
-rw-r--r--drivers/net/wan/Makefile2
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c1192
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.h147
-rw-r--r--drivers/net/wan/slic_ds26522.c255
-rw-r--r--drivers/net/wan/slic_ds26522.h134
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c11
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c107
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h24
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c32
-rw-r--r--drivers/net/wireless/ath/ath10k/hif.h14
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c37
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c26
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h73
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c61
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c245
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h87
-rw-r--r--drivers/net/wireless/ath/ath10k/targaddrs.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h4
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/tx99.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/Kconfig8
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c16
-rw-r--r--drivers/net/wireless/ath/wil6210/debug.c46
-rw-r--r--drivers/net/wireless/ath/wil6210/p2p.c6
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c68
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c25
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c42
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h6
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_platform.h4
-rw-r--r--drivers/net/wireless/broadcom/b43/Makefile2
-rw-r--r--drivers/net/wireless/broadcom/b43/leds.c8
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c31
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_a.c595
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_a.h22
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_common.h3
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_g.c25
-rw-r--r--drivers/net/wireless/broadcom/b43/wa.c283
-rw-r--r--drivers/net/wireless/broadcom/b43/xmit.c30
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c47
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c180
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c26
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c17
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c18
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcmu_d11.h22
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c16
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c98
-rw-r--r--drivers/net/wireless/marvell/libertas/if_sdio.c3
-rw-r--r--drivers/net/wireless/marvell/libertas/if_spi.c4
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/main.c7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_aggr.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/join.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c9
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c64
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_txrx.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c17
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c15
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c2
-rw-r--r--drivers/net/wireless/wl3501_cs.c31
419 files changed, 34557 insertions, 10482 deletions
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index b9304a295f86..ca81f46ea1aa 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -657,6 +657,20 @@ static void __set_agg_ports_ready(struct aggregator *aggregator, int val)
}
}
+static int __agg_active_ports(struct aggregator *agg)
+{
+ struct port *port;
+ int active = 0;
+
+ for (port = agg->lag_ports; port;
+ port = port->next_port_in_aggregator) {
+ if (port->is_enabled)
+ active++;
+ }
+
+ return active;
+}
+
/**
* __get_agg_bandwidth - get the total bandwidth of an aggregator
* @aggregator: the aggregator we're looking at
@@ -664,39 +678,40 @@ static void __set_agg_ports_ready(struct aggregator *aggregator, int val)
*/
static u32 __get_agg_bandwidth(struct aggregator *aggregator)
{
+ int nports = __agg_active_ports(aggregator);
u32 bandwidth = 0;
- if (aggregator->num_of_ports) {
+ if (nports) {
switch (__get_link_speed(aggregator->lag_ports)) {
case AD_LINK_SPEED_1MBPS:
- bandwidth = aggregator->num_of_ports;
+ bandwidth = nports;
break;
case AD_LINK_SPEED_10MBPS:
- bandwidth = aggregator->num_of_ports * 10;
+ bandwidth = nports * 10;
break;
case AD_LINK_SPEED_100MBPS:
- bandwidth = aggregator->num_of_ports * 100;
+ bandwidth = nports * 100;
break;
case AD_LINK_SPEED_1000MBPS:
- bandwidth = aggregator->num_of_ports * 1000;
+ bandwidth = nports * 1000;
break;
case AD_LINK_SPEED_2500MBPS:
- bandwidth = aggregator->num_of_ports * 2500;
+ bandwidth = nports * 2500;
break;
case AD_LINK_SPEED_10000MBPS:
- bandwidth = aggregator->num_of_ports * 10000;
+ bandwidth = nports * 10000;
break;
case AD_LINK_SPEED_20000MBPS:
- bandwidth = aggregator->num_of_ports * 20000;
+ bandwidth = nports * 20000;
break;
case AD_LINK_SPEED_40000MBPS:
- bandwidth = aggregator->num_of_ports * 40000;
+ bandwidth = nports * 40000;
break;
case AD_LINK_SPEED_56000MBPS:
- bandwidth = aggregator->num_of_ports * 56000;
+ bandwidth = nports * 56000;
break;
case AD_LINK_SPEED_100000MBPS:
- bandwidth = aggregator->num_of_ports * 100000;
+ bandwidth = nports * 100000;
break;
default:
bandwidth = 0; /* to silence the compiler */
@@ -1530,10 +1545,10 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
switch (__get_agg_selection_mode(curr->lag_ports)) {
case BOND_AD_COUNT:
- if (curr->num_of_ports > best->num_of_ports)
+ if (__agg_active_ports(curr) > __agg_active_ports(best))
return curr;
- if (curr->num_of_ports < best->num_of_ports)
+ if (__agg_active_ports(curr) < __agg_active_ports(best))
return best;
/*FALLTHROUGH*/
@@ -1561,8 +1576,14 @@ static int agg_device_up(const struct aggregator *agg)
if (!port)
return 0;
- return netif_running(port->slave->dev) &&
- netif_carrier_ok(port->slave->dev);
+ for (port = agg->lag_ports; port;
+ port = port->next_port_in_aggregator) {
+ if (netif_running(port->slave->dev) &&
+ netif_carrier_ok(port->slave->dev))
+ return 1;
+ }
+
+ return 0;
}
/**
@@ -1610,7 +1631,7 @@ static void ad_agg_selection_logic(struct aggregator *agg,
agg->is_active = 0;
- if (agg->num_of_ports && agg_device_up(agg))
+ if (__agg_active_ports(agg) && agg_device_up(agg))
best = ad_agg_selection_test(best, agg);
}
@@ -1622,7 +1643,7 @@ static void ad_agg_selection_logic(struct aggregator *agg,
* answering partner.
*/
if (active && active->lag_ports &&
- active->lag_ports->is_enabled &&
+ __agg_active_ports(active) &&
(__agg_has_partner(active) ||
(!__agg_has_partner(active) &&
!__agg_has_partner(best)))) {
@@ -2133,7 +2154,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
else
temp_aggregator->lag_ports = temp_port->next_port_in_aggregator;
temp_aggregator->num_of_ports--;
- if (temp_aggregator->num_of_ports == 0) {
+ if (__agg_active_ports(temp_aggregator) == 0) {
select_new_active_agg = temp_aggregator->is_active;
ad_clear_agg(temp_aggregator);
if (select_new_active_agg) {
@@ -2432,7 +2453,9 @@ void bond_3ad_adapter_speed_duplex_changed(struct slave *slave)
*/
void bond_3ad_handle_link_change(struct slave *slave, char link)
{
+ struct aggregator *agg;
struct port *port;
+ bool dummy;
port = &(SLAVE_AD_INFO(slave)->port);
@@ -2459,6 +2482,9 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
port->is_enabled = false;
ad_update_actor_keys(port, true);
}
+ agg = __get_first_agg(port);
+ ad_agg_selection_logic(agg, &dummy);
+
netdev_dbg(slave->bond->dev, "Port %d changed link status to %s\n",
port->actor_port_number,
link == BOND_LINK_UP ? "UP" : "DOWN");
@@ -2499,7 +2525,7 @@ int bond_3ad_set_carrier(struct bonding *bond)
active = __get_active_agg(&(SLAVE_AD_INFO(first_slave)->aggregator));
if (active) {
/* are enough slaves available to consider link up? */
- if (active->num_of_ports < bond->params.min_links) {
+ if (__agg_active_ports(active) < bond->params.min_links) {
if (netif_carrier_ok(bond->dev)) {
netif_carrier_off(bond->dev);
goto out;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 941ec99cd3b6..480d73ac7d1b 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4137,6 +4137,8 @@ static const struct net_device_ops bond_netdev_ops = {
.ndo_add_slave = bond_enslave,
.ndo_del_slave = bond_release,
.ndo_fix_features = bond_fix_features,
+ .ndo_neigh_construct = netdev_default_l2upper_neigh_construct,
+ .ndo_neigh_destroy = netdev_default_l2upper_neigh_destroy,
.ndo_bridge_setlink = switchdev_port_bridge_setlink,
.ndo_bridge_getlink = switchdev_port_bridge_getlink,
.ndo_bridge_dellink = switchdev_port_bridge_dellink,
@@ -4607,26 +4609,6 @@ static int bond_check_params(struct bond_params *params)
return 0;
}
-static struct lock_class_key bonding_netdev_xmit_lock_key;
-static struct lock_class_key bonding_netdev_addr_lock_key;
-static struct lock_class_key bonding_tx_busylock_key;
-
-static void bond_set_lockdep_class_one(struct net_device *dev,
- struct netdev_queue *txq,
- void *_unused)
-{
- lockdep_set_class(&txq->_xmit_lock,
- &bonding_netdev_xmit_lock_key);
-}
-
-static void bond_set_lockdep_class(struct net_device *dev)
-{
- lockdep_set_class(&dev->addr_list_lock,
- &bonding_netdev_addr_lock_key);
- netdev_for_each_tx_queue(dev, bond_set_lockdep_class_one, NULL);
- dev->qdisc_tx_busylock = &bonding_tx_busylock_key;
-}
-
/* Called from registration process */
static int bond_init(struct net_device *bond_dev)
{
@@ -4639,7 +4621,7 @@ static int bond_init(struct net_device *bond_dev)
if (!bond->wq)
return -ENOMEM;
- bond_set_lockdep_class(bond_dev);
+ netdev_lockdep_set_classes(bond_dev);
list_add_tail(&bond->bond_list, &bn->dev_list);
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 0d40aef928e2..22570ea3a8d2 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -104,16 +104,6 @@ config CAN_JANZ_ICAN3
This driver can also be built as a module. If so, the module will be
called janz-ican3.ko.
-config CAN_RCAR
- tristate "Renesas R-Car CAN controller"
- depends on ARCH_RENESAS || ARM
- ---help---
- Say Y here if you want to use CAN controller found on Renesas R-Car
- SoCs.
-
- To compile this driver as a module, choose M here: the module will
- be called rcar_can.
-
config CAN_SUN4I
tristate "Allwinner A10 CAN controller"
depends on MACH_SUN4I || MACH_SUN7I || COMPILE_TEST
@@ -152,6 +142,7 @@ source "drivers/net/can/cc770/Kconfig"
source "drivers/net/can/ifi_canfd/Kconfig"
source "drivers/net/can/m_can/Kconfig"
source "drivers/net/can/mscan/Kconfig"
+source "drivers/net/can/rcar/Kconfig"
source "drivers/net/can/sja1000/Kconfig"
source "drivers/net/can/softing/Kconfig"
source "drivers/net/can/spi/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index e3db0c807f55..26ba4b794a0b 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -10,6 +10,7 @@ can-dev-y := dev.o
can-dev-$(CONFIG_CAN_LEDS) += led.o
+obj-y += rcar/
obj-y += spi/
obj-y += usb/
obj-y += softing/
@@ -24,7 +25,6 @@ obj-$(CONFIG_CAN_IFI_CANFD) += ifi_canfd/
obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
obj-$(CONFIG_CAN_MSCAN) += mscan/
obj-$(CONFIG_CAN_M_CAN) += m_can/
-obj-$(CONFIG_CAN_RCAR) += rcar_can.o
obj-$(CONFIG_CAN_SJA1000) += sja1000/
obj-$(CONFIG_CAN_SUN4I) += sun4i_can.o
obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 8b3275d7792a..8f5e93cb7975 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -712,9 +712,10 @@ static int at91_poll_rx(struct net_device *dev, int quota)
/* upper group completed, look again in lower */
if (priv->rx_next > get_mb_rx_low_last(priv) &&
- quota > 0 && mb > get_mb_rx_last(priv)) {
+ mb > get_mb_rx_last(priv)) {
priv->rx_next = get_mb_rx_first(priv);
- goto again;
+ if (quota > 0)
+ goto again;
}
return received;
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index f91b094288da..e3dccd3200d5 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -332,9 +332,23 @@ static void c_can_setup_tx_object(struct net_device *dev, int iface,
priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
- for (i = 0; i < frame->can_dlc; i += 2) {
- priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2,
- frame->data[i] | (frame->data[i + 1] << 8));
+ if (priv->type == BOSCH_D_CAN) {
+ u32 data = 0, dreg = C_CAN_IFACE(DATA1_REG, iface);
+
+ for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
+ data = (u32)frame->data[i];
+ data |= (u32)frame->data[i + 1] << 8;
+ data |= (u32)frame->data[i + 2] << 16;
+ data |= (u32)frame->data[i + 3] << 24;
+ priv->write_reg32(priv, dreg, data);
+ }
+ } else {
+ for (i = 0; i < frame->can_dlc; i += 2) {
+ priv->write_reg(priv,
+ C_CAN_IFACE(DATA1_REG, iface) + i / 2,
+ frame->data[i] |
+ (frame->data[i + 1] << 8));
+ }
}
}
@@ -402,10 +416,20 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
} else {
int i, dreg = C_CAN_IFACE(DATA1_REG, iface);
- for (i = 0; i < frame->can_dlc; i += 2, dreg ++) {
- data = priv->read_reg(priv, dreg);
- frame->data[i] = data;
- frame->data[i + 1] = data >> 8;
+ if (priv->type == BOSCH_D_CAN) {
+ for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
+ data = priv->read_reg32(priv, dreg);
+ frame->data[i] = data;
+ frame->data[i + 1] = data >> 8;
+ frame->data[i + 2] = data >> 16;
+ frame->data[i + 3] = data >> 24;
+ }
+ } else {
+ for (i = 0; i < frame->can_dlc; i += 2, dreg++) {
+ data = priv->read_reg(priv, dreg);
+ frame->data[i] = data;
+ frame->data[i + 1] = data >> 8;
+ }
}
}
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 910c12e2638e..e21f7cc5ae4d 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -69,6 +69,7 @@ EXPORT_SYMBOL_GPL(can_len2dlc);
#ifdef CONFIG_CAN_CALC_BITTIMING
#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
+#define CAN_CALC_SYNC_SEG 1
/*
* Bit-timing calculation derived from:
@@ -83,98 +84,126 @@ EXPORT_SYMBOL_GPL(can_len2dlc);
* registers of the CAN controller. You can find more information
* in the header file linux/can/netlink.h.
*/
-static int can_update_spt(const struct can_bittiming_const *btc,
- int sampl_pt, int tseg, int *tseg1, int *tseg2)
+static int can_update_sample_point(const struct can_bittiming_const *btc,
+ unsigned int sample_point_nominal, unsigned int tseg,
+ unsigned int *tseg1_ptr, unsigned int *tseg2_ptr,
+ unsigned int *sample_point_error_ptr)
{
- *tseg2 = tseg + 1 - (sampl_pt * (tseg + 1)) / 1000;
- if (*tseg2 < btc->tseg2_min)
- *tseg2 = btc->tseg2_min;
- if (*tseg2 > btc->tseg2_max)
- *tseg2 = btc->tseg2_max;
- *tseg1 = tseg - *tseg2;
- if (*tseg1 > btc->tseg1_max) {
- *tseg1 = btc->tseg1_max;
- *tseg2 = tseg - *tseg1;
+ unsigned int sample_point_error, best_sample_point_error = UINT_MAX;
+ unsigned int sample_point, best_sample_point = 0;
+ unsigned int tseg1, tseg2;
+ int i;
+
+ for (i = 0; i <= 1; i++) {
+ tseg2 = tseg + CAN_CALC_SYNC_SEG - (sample_point_nominal * (tseg + CAN_CALC_SYNC_SEG)) / 1000 - i;
+ tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max);
+ tseg1 = tseg - tseg2;
+ if (tseg1 > btc->tseg1_max) {
+ tseg1 = btc->tseg1_max;
+ tseg2 = tseg - tseg1;
+ }
+
+ sample_point = 1000 * (tseg + CAN_CALC_SYNC_SEG - tseg2) / (tseg + CAN_CALC_SYNC_SEG);
+ sample_point_error = abs(sample_point_nominal - sample_point);
+
+ if ((sample_point <= sample_point_nominal) && (sample_point_error < best_sample_point_error)) {
+ best_sample_point = sample_point;
+ best_sample_point_error = sample_point_error;
+ *tseg1_ptr = tseg1;
+ *tseg2_ptr = tseg2;
+ }
}
- return 1000 * (tseg + 1 - *tseg2) / (tseg + 1);
+
+ if (sample_point_error_ptr)
+ *sample_point_error_ptr = best_sample_point_error;
+
+ return best_sample_point;
}
static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
const struct can_bittiming_const *btc)
{
struct can_priv *priv = netdev_priv(dev);
- long best_error = 1000000000, error = 0;
- int best_tseg = 0, best_brp = 0, brp = 0;
- int tsegall, tseg = 0, tseg1 = 0, tseg2 = 0;
- int spt_error = 1000, spt = 0, sampl_pt;
- long rate;
+ unsigned int bitrate; /* current bitrate */
+ unsigned int bitrate_error; /* difference between current and nominal value */
+ unsigned int best_bitrate_error = UINT_MAX;
+ unsigned int sample_point_error; /* difference between current and nominal value */
+ unsigned int best_sample_point_error = UINT_MAX;
+ unsigned int sample_point_nominal; /* nominal sample point */
+ unsigned int best_tseg = 0; /* current best value for tseg */
+ unsigned int best_brp = 0; /* current best value for brp */
+ unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0;
u64 v64;
/* Use CiA recommended sample points */
if (bt->sample_point) {
- sampl_pt = bt->sample_point;
+ sample_point_nominal = bt->sample_point;
} else {
if (bt->bitrate > 800000)
- sampl_pt = 750;
+ sample_point_nominal = 750;
else if (bt->bitrate > 500000)
- sampl_pt = 800;
+ sample_point_nominal = 800;
else
- sampl_pt = 875;
+ sample_point_nominal = 875;
}
/* tseg even = round down, odd = round up */
for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) {
- tsegall = 1 + tseg / 2;
+ tsegall = CAN_CALC_SYNC_SEG + tseg / 2;
+
/* Compute all possible tseg choices (tseg=tseg1+tseg2) */
brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2;
- /* chose brp step which is possible in system */
+
+ /* choose brp step which is possible in system */
brp = (brp / btc->brp_inc) * btc->brp_inc;
if ((brp < btc->brp_min) || (brp > btc->brp_max))
continue;
- rate = priv->clock.freq / (brp * tsegall);
- error = bt->bitrate - rate;
+
+ bitrate = priv->clock.freq / (brp * tsegall);
+ bitrate_error = abs(bt->bitrate - bitrate);
+
/* tseg brp biterror */
- if (error < 0)
- error = -error;
- if (error > best_error)
+ if (bitrate_error > best_bitrate_error)
continue;
- best_error = error;
- if (error == 0) {
- spt = can_update_spt(btc, sampl_pt, tseg / 2,
- &tseg1, &tseg2);
- error = sampl_pt - spt;
- if (error < 0)
- error = -error;
- if (error > spt_error)
- continue;
- spt_error = error;
- }
+
+ /* reset sample point error if we have a better bitrate */
+ if (bitrate_error < best_bitrate_error)
+ best_sample_point_error = UINT_MAX;
+
+ can_update_sample_point(btc, sample_point_nominal, tseg / 2, &tseg1, &tseg2, &sample_point_error);
+ if (sample_point_error > best_sample_point_error)
+ continue;
+
+ best_sample_point_error = sample_point_error;
+ best_bitrate_error = bitrate_error;
best_tseg = tseg / 2;
best_brp = brp;
- if (error == 0)
+
+ if (bitrate_error == 0 && sample_point_error == 0)
break;
}
- if (best_error) {
+ if (best_bitrate_error) {
/* Error in one-tenth of a percent */
- error = (best_error * 1000) / bt->bitrate;
- if (error > CAN_CALC_MAX_ERROR) {
+ v64 = (u64)best_bitrate_error * 1000;
+ do_div(v64, bt->bitrate);
+ bitrate_error = (u32)v64;
+ if (bitrate_error > CAN_CALC_MAX_ERROR) {
netdev_err(dev,
- "bitrate error %ld.%ld%% too high\n",
- error / 10, error % 10);
+ "bitrate error %d.%d%% too high\n",
+ bitrate_error / 10, bitrate_error % 10);
return -EDOM;
- } else {
- netdev_warn(dev, "bitrate error %ld.%ld%%\n",
- error / 10, error % 10);
}
+ netdev_warn(dev, "bitrate error %d.%d%%\n",
+ bitrate_error / 10, bitrate_error % 10);
}
/* real sample point */
- bt->sample_point = can_update_spt(btc, sampl_pt, best_tseg,
- &tseg1, &tseg2);
+ bt->sample_point = can_update_sample_point(btc, sample_point_nominal, best_tseg,
+ &tseg1, &tseg2, NULL);
- v64 = (u64)best_brp * 1000000000UL;
+ v64 = (u64)best_brp * 1000 * 1000 * 1000;
do_div(v64, priv->clock.freq);
bt->tq = (u32)v64;
bt->prop_seg = tseg1 / 2;
@@ -182,9 +211,9 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
bt->phase_seg2 = tseg2;
/* check for sjw user settings */
- if (!bt->sjw || !btc->sjw_max)
+ if (!bt->sjw || !btc->sjw_max) {
bt->sjw = 1;
- else {
+ } else {
/* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */
if (bt->sjw > btc->sjw_max)
bt->sjw = btc->sjw_max;
@@ -194,8 +223,9 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
}
bt->brp = best_brp;
- /* real bit-rate */
- bt->bitrate = priv->clock.freq / (bt->brp * (tseg1 + tseg2 + 1));
+
+ /* real bitrate */
+ bt->bitrate = priv->clock.freq / (bt->brp * (CAN_CALC_SYNC_SEG + tseg1 + tseg2));
return 0;
}
@@ -798,6 +828,9 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[])
* - control mode with CAN_CTRLMODE_FD set
*/
+ if (!data)
+ return 0;
+
if (data[IFLA_CAN_CTRLMODE]) {
struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
@@ -1008,6 +1041,11 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
return -EOPNOTSUPP;
}
+static void can_dellink(struct net_device *dev, struct list_head *head)
+{
+ return;
+}
+
static struct rtnl_link_ops can_link_ops __read_mostly = {
.kind = "can",
.maxtype = IFLA_CAN_MAX,
@@ -1016,6 +1054,7 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
.validate = can_validate,
.newlink = can_newlink,
.changelink = can_changelink,
+ .dellink = can_dellink,
.get_size = can_get_size,
.fill_info = can_fill_info,
.get_xstats_size = can_get_xstats_size,
diff --git a/drivers/net/can/rcar/Kconfig b/drivers/net/can/rcar/Kconfig
new file mode 100644
index 000000000000..7b03a3a37db7
--- /dev/null
+++ b/drivers/net/can/rcar/Kconfig
@@ -0,0 +1,21 @@
+config CAN_RCAR
+ tristate "Renesas R-Car CAN controller"
+ depends on ARCH_RENESAS || ARM
+ ---help---
+ Say Y here if you want to use CAN controller found on Renesas R-Car
+ SoCs.
+
+ To compile this driver as a module, choose M here: the module will
+ be called rcar_can.
+
+config CAN_RCAR_CANFD
+ tristate "Renesas R-Car CAN FD controller"
+ depends on ARCH_RENESAS || ARM
+ ---help---
+ Say Y here if you want to use CAN FD controller found on
+ Renesas R-Car SoCs. The driver puts the controller in CAN FD only
+ mode, which can interoperate with CAN2.0 nodes but does not support
+ dedicated CAN 2.0 mode.
+
+ To compile this driver as a module, choose M here: the module will
+ be called rcar_canfd.
diff --git a/drivers/net/can/rcar/Makefile b/drivers/net/can/rcar/Makefile
new file mode 100644
index 000000000000..08de36a4cfcc
--- /dev/null
+++ b/drivers/net/can/rcar/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the Renesas R-Car CAN & CAN FD controller drivers
+#
+
+obj-$(CONFIG_CAN_RCAR) += rcar_can.o
+obj-$(CONFIG_CAN_RCAR_CANFD) += rcar_canfd.o
diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index 788459f6bf5c..788459f6bf5c 100644
--- a/drivers/net/can/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
new file mode 100644
index 000000000000..43cdd5544b0c
--- /dev/null
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -0,0 +1,1858 @@
+/* Renesas R-Car CAN FD device driver
+ *
+ * Copyright (C) 2015 Renesas Electronics Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/* The R-Car CAN FD controller can operate in either one of the below two modes
+ * - CAN FD only mode
+ * - Classical CAN (CAN 2.0) only mode
+ *
+ * This driver puts the controller in CAN FD only mode by default. In this
+ * mode, the controller acts as a CAN FD node that can also interoperate with
+ * CAN 2.0 nodes.
+ *
+ * To switch the controller to Classical CAN (CAN 2.0) only mode, add
+ * "renesas,no-can-fd" optional property to the device tree node. A h/w reset is
+ * also required to switch modes.
+ *
+ * Note: The h/w manual register naming convention is clumsy and not acceptable
+ * to use as it is in the driver. However, those names are added as comments
+ * wherever it is modified to a readable name.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/can/led.h>
+#include <linux/can/dev.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/iopoll.h>
+
+#define RCANFD_DRV_NAME "rcar_canfd"
+
+/* Global register bits */
+
+/* RSCFDnCFDGRMCFG */
+#define RCANFD_GRMCFG_RCMC BIT(0)
+
+/* RSCFDnCFDGCFG / RSCFDnGCFG */
+#define RCANFD_GCFG_EEFE BIT(6)
+#define RCANFD_GCFG_CMPOC BIT(5) /* CAN FD only */
+#define RCANFD_GCFG_DCS BIT(4)
+#define RCANFD_GCFG_DCE BIT(1)
+#define RCANFD_GCFG_TPRI BIT(0)
+
+/* RSCFDnCFDGCTR / RSCFDnGCTR */
+#define RCANFD_GCTR_TSRST BIT(16)
+#define RCANFD_GCTR_CFMPOFIE BIT(11) /* CAN FD only */
+#define RCANFD_GCTR_THLEIE BIT(10)
+#define RCANFD_GCTR_MEIE BIT(9)
+#define RCANFD_GCTR_DEIE BIT(8)
+#define RCANFD_GCTR_GSLPR BIT(2)
+#define RCANFD_GCTR_GMDC_MASK (0x3)
+#define RCANFD_GCTR_GMDC_GOPM (0x0)
+#define RCANFD_GCTR_GMDC_GRESET (0x1)
+#define RCANFD_GCTR_GMDC_GTEST (0x2)
+
+/* RSCFDnCFDGSTS / RSCFDnGSTS */
+#define RCANFD_GSTS_GRAMINIT BIT(3)
+#define RCANFD_GSTS_GSLPSTS BIT(2)
+#define RCANFD_GSTS_GHLTSTS BIT(1)
+#define RCANFD_GSTS_GRSTSTS BIT(0)
+/* Non-operational status */
+#define RCANFD_GSTS_GNOPM (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+
+/* RSCFDnCFDGERFL / RSCFDnGERFL */
+#define RCANFD_GERFL_EEF1 BIT(17)
+#define RCANFD_GERFL_EEF0 BIT(16)
+#define RCANFD_GERFL_CMPOF BIT(3) /* CAN FD only */
+#define RCANFD_GERFL_THLES BIT(2)
+#define RCANFD_GERFL_MES BIT(1)
+#define RCANFD_GERFL_DEF BIT(0)
+
+#define RCANFD_GERFL_ERR(gpriv, x) ((x) & (RCANFD_GERFL_EEF1 |\
+ RCANFD_GERFL_EEF0 | RCANFD_GERFL_MES |\
+ (gpriv->fdmode ?\
+ RCANFD_GERFL_CMPOF : 0)))
+
+/* AFL Rx rules registers */
+
+/* RSCFDnCFDGAFLCFG0 / RSCFDnGAFLCFG0 */
+#define RCANFD_GAFLCFG_SETRNC(n, x) (((x) & 0xff) << (24 - n * 8))
+#define RCANFD_GAFLCFG_GETRNC(n, x) (((x) >> (24 - n * 8)) & 0xff)
+
+/* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */
+#define RCANFD_GAFLECTR_AFLDAE BIT(8)
+#define RCANFD_GAFLECTR_AFLPN(x) ((x) & 0x1f)
+
+/* RSCFDnCFDGAFLIDj / RSCFDnGAFLIDj */
+#define RCANFD_GAFLID_GAFLLB BIT(29)
+
+/* RSCFDnCFDGAFLP1_j / RSCFDnGAFLP1_j */
+#define RCANFD_GAFLP1_GAFLFDP(x) (1 << (x))
+
+/* Channel register bits */
+
+/* RSCFDnCmCFG - Classical CAN only */
+#define RCANFD_CFG_SJW(x) (((x) & 0x3) << 24)
+#define RCANFD_CFG_TSEG2(x) (((x) & 0x7) << 20)
+#define RCANFD_CFG_TSEG1(x) (((x) & 0xf) << 16)
+#define RCANFD_CFG_BRP(x) (((x) & 0x3ff) << 0)
+
+/* RSCFDnCFDCmNCFG - CAN FD only */
+#define RCANFD_NCFG_NTSEG2(x) (((x) & 0x1f) << 24)
+#define RCANFD_NCFG_NTSEG1(x) (((x) & 0x7f) << 16)
+#define RCANFD_NCFG_NSJW(x) (((x) & 0x1f) << 11)
+#define RCANFD_NCFG_NBRP(x) (((x) & 0x3ff) << 0)
+
+/* RSCFDnCFDCmCTR / RSCFDnCmCTR */
+#define RCANFD_CCTR_CTME BIT(24)
+#define RCANFD_CCTR_ERRD BIT(23)
+#define RCANFD_CCTR_BOM_MASK (0x3 << 21)
+#define RCANFD_CCTR_BOM_ISO (0x0 << 21)
+#define RCANFD_CCTR_BOM_BENTRY (0x1 << 21)
+#define RCANFD_CCTR_BOM_BEND (0x2 << 21)
+#define RCANFD_CCTR_TDCVFIE BIT(19)
+#define RCANFD_CCTR_SOCOIE BIT(18)
+#define RCANFD_CCTR_EOCOIE BIT(17)
+#define RCANFD_CCTR_TAIE BIT(16)
+#define RCANFD_CCTR_ALIE BIT(15)
+#define RCANFD_CCTR_BLIE BIT(14)
+#define RCANFD_CCTR_OLIE BIT(13)
+#define RCANFD_CCTR_BORIE BIT(12)
+#define RCANFD_CCTR_BOEIE BIT(11)
+#define RCANFD_CCTR_EPIE BIT(10)
+#define RCANFD_CCTR_EWIE BIT(9)
+#define RCANFD_CCTR_BEIE BIT(8)
+#define RCANFD_CCTR_CSLPR BIT(2)
+#define RCANFD_CCTR_CHMDC_MASK (0x3)
+#define RCANFD_CCTR_CHDMC_COPM (0x0)
+#define RCANFD_CCTR_CHDMC_CRESET (0x1)
+#define RCANFD_CCTR_CHDMC_CHLT (0x2)
+
+/* RSCFDnCFDCmSTS / RSCFDnCmSTS */
+#define RCANFD_CSTS_COMSTS BIT(7)
+#define RCANFD_CSTS_RECSTS BIT(6)
+#define RCANFD_CSTS_TRMSTS BIT(5)
+#define RCANFD_CSTS_BOSTS BIT(4)
+#define RCANFD_CSTS_EPSTS BIT(3)
+#define RCANFD_CSTS_SLPSTS BIT(2)
+#define RCANFD_CSTS_HLTSTS BIT(1)
+#define RCANFD_CSTS_CRSTSTS BIT(0)
+
+#define RCANFD_CSTS_TECCNT(x) (((x) >> 24) & 0xff)
+#define RCANFD_CSTS_RECCNT(x) (((x) >> 16) & 0xff)
+
+/* RSCFDnCFDCmERFL / RSCFDnCmERFL */
+#define RCANFD_CERFL_ADERR BIT(14)
+#define RCANFD_CERFL_B0ERR BIT(13)
+#define RCANFD_CERFL_B1ERR BIT(12)
+#define RCANFD_CERFL_CERR BIT(11)
+#define RCANFD_CERFL_AERR BIT(10)
+#define RCANFD_CERFL_FERR BIT(9)
+#define RCANFD_CERFL_SERR BIT(8)
+#define RCANFD_CERFL_ALF BIT(7)
+#define RCANFD_CERFL_BLF BIT(6)
+#define RCANFD_CERFL_OVLF BIT(5)
+#define RCANFD_CERFL_BORF BIT(4)
+#define RCANFD_CERFL_BOEF BIT(3)
+#define RCANFD_CERFL_EPF BIT(2)
+#define RCANFD_CERFL_EWF BIT(1)
+#define RCANFD_CERFL_BEF BIT(0)
+
+#define RCANFD_CERFL_ERR(x) ((x) & (0x7fff)) /* above bits 14:0 */
+
+/* RSCFDnCFDCmDCFG */
+#define RCANFD_DCFG_DSJW(x) (((x) & 0x7) << 24)
+#define RCANFD_DCFG_DTSEG2(x) (((x) & 0x7) << 20)
+#define RCANFD_DCFG_DTSEG1(x) (((x) & 0xf) << 16)
+#define RCANFD_DCFG_DBRP(x) (((x) & 0xff) << 0)
+
+/* RSCFDnCFDCmFDCFG */
+#define RCANFD_FDCFG_TDCE BIT(9)
+#define RCANFD_FDCFG_TDCOC BIT(8)
+#define RCANFD_FDCFG_TDCO(x) (((x) & 0x7f) >> 16)
+
+/* RSCFDnCFDRFCCx */
+#define RCANFD_RFCC_RFIM BIT(12)
+#define RCANFD_RFCC_RFDC(x) (((x) & 0x7) << 8)
+#define RCANFD_RFCC_RFPLS(x) (((x) & 0x7) << 4)
+#define RCANFD_RFCC_RFIE BIT(1)
+#define RCANFD_RFCC_RFE BIT(0)
+
+/* RSCFDnCFDRFSTSx */
+#define RCANFD_RFSTS_RFIF BIT(3)
+#define RCANFD_RFSTS_RFMLT BIT(2)
+#define RCANFD_RFSTS_RFFLL BIT(1)
+#define RCANFD_RFSTS_RFEMP BIT(0)
+
+/* RSCFDnCFDRFIDx */
+#define RCANFD_RFID_RFIDE BIT(31)
+#define RCANFD_RFID_RFRTR BIT(30)
+
+/* RSCFDnCFDRFPTRx */
+#define RCANFD_RFPTR_RFDLC(x) (((x) >> 28) & 0xf)
+#define RCANFD_RFPTR_RFPTR(x) (((x) >> 16) & 0xfff)
+#define RCANFD_RFPTR_RFTS(x) (((x) >> 0) & 0xffff)
+
+/* RSCFDnCFDRFFDSTSx */
+#define RCANFD_RFFDSTS_RFFDF BIT(2)
+#define RCANFD_RFFDSTS_RFBRS BIT(1)
+#define RCANFD_RFFDSTS_RFESI BIT(0)
+
+/* Common FIFO bits */
+
+/* RSCFDnCFDCFCCk */
+#define RCANFD_CFCC_CFTML(x) (((x) & 0xf) << 20)
+#define RCANFD_CFCC_CFM(x) (((x) & 0x3) << 16)
+#define RCANFD_CFCC_CFIM BIT(12)
+#define RCANFD_CFCC_CFDC(x) (((x) & 0x7) << 8)
+#define RCANFD_CFCC_CFPLS(x) (((x) & 0x7) << 4)
+#define RCANFD_CFCC_CFTXIE BIT(2)
+#define RCANFD_CFCC_CFE BIT(0)
+
+/* RSCFDnCFDCFSTSk */
+#define RCANFD_CFSTS_CFMC(x) (((x) >> 8) & 0xff)
+#define RCANFD_CFSTS_CFTXIF BIT(4)
+#define RCANFD_CFSTS_CFMLT BIT(2)
+#define RCANFD_CFSTS_CFFLL BIT(1)
+#define RCANFD_CFSTS_CFEMP BIT(0)
+
+/* RSCFDnCFDCFIDk */
+#define RCANFD_CFID_CFIDE BIT(31)
+#define RCANFD_CFID_CFRTR BIT(30)
+#define RCANFD_CFID_CFID_MASK(x) ((x) & 0x1fffffff)
+
+/* RSCFDnCFDCFPTRk */
+#define RCANFD_CFPTR_CFDLC(x) (((x) & 0xf) << 28)
+#define RCANFD_CFPTR_CFPTR(x) (((x) & 0xfff) << 16)
+#define RCANFD_CFPTR_CFTS(x) (((x) & 0xff) << 0)
+
+/* RSCFDnCFDCFFDCSTSk */
+#define RCANFD_CFFDCSTS_CFFDF BIT(2)
+#define RCANFD_CFFDCSTS_CFBRS BIT(1)
+#define RCANFD_CFFDCSTS_CFESI BIT(0)
+
+/* This controller supports either Classical CAN only mode or CAN FD only mode.
+ * These modes are supported in two separate set of register maps & names.
+ * However, some of the register offsets are common for both modes. Those
+ * offsets are listed below as Common registers.
+ *
+ * The CAN FD only mode specific registers & Classical CAN only mode specific
+ * registers are listed separately. Their register names starts with
+ * RCANFD_F_xxx & RCANFD_C_xxx respectively.
+ */
+
+/* Common registers */
+
+/* RSCFDnCFDCmNCFG / RSCFDnCmCFG */
+#define RCANFD_CCFG(m) (0x0000 + (0x10 * (m)))
+/* RSCFDnCFDCmCTR / RSCFDnCmCTR */
+#define RCANFD_CCTR(m) (0x0004 + (0x10 * (m)))
+/* RSCFDnCFDCmSTS / RSCFDnCmSTS */
+#define RCANFD_CSTS(m) (0x0008 + (0x10 * (m)))
+/* RSCFDnCFDCmERFL / RSCFDnCmERFL */
+#define RCANFD_CERFL(m) (0x000C + (0x10 * (m)))
+
+/* RSCFDnCFDGCFG / RSCFDnGCFG */
+#define RCANFD_GCFG (0x0084)
+/* RSCFDnCFDGCTR / RSCFDnGCTR */
+#define RCANFD_GCTR (0x0088)
+/* RSCFDnCFDGCTS / RSCFDnGCTS */
+#define RCANFD_GSTS (0x008c)
+/* RSCFDnCFDGERFL / RSCFDnGERFL */
+#define RCANFD_GERFL (0x0090)
+/* RSCFDnCFDGTSC / RSCFDnGTSC */
+#define RCANFD_GTSC (0x0094)
+/* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */
+#define RCANFD_GAFLECTR (0x0098)
+/* RSCFDnCFDGAFLCFG0 / RSCFDnGAFLCFG0 */
+#define RCANFD_GAFLCFG0 (0x009c)
+/* RSCFDnCFDGAFLCFG1 / RSCFDnGAFLCFG1 */
+#define RCANFD_GAFLCFG1 (0x00a0)
+/* RSCFDnCFDRMNB / RSCFDnRMNB */
+#define RCANFD_RMNB (0x00a4)
+/* RSCFDnCFDRMND / RSCFDnRMND */
+#define RCANFD_RMND(y) (0x00a8 + (0x04 * (y)))
+
+/* RSCFDnCFDRFCCx / RSCFDnRFCCx */
+#define RCANFD_RFCC(x) (0x00b8 + (0x04 * (x)))
+/* RSCFDnCFDRFSTSx / RSCFDnRFSTSx */
+#define RCANFD_RFSTS(x) (0x00d8 + (0x04 * (x)))
+/* RSCFDnCFDRFPCTRx / RSCFDnRFPCTRx */
+#define RCANFD_RFPCTR(x) (0x00f8 + (0x04 * (x)))
+
+/* Common FIFO Control registers */
+
+/* RSCFDnCFDCFCCx / RSCFDnCFCCx */
+#define RCANFD_CFCC(ch, idx) (0x0118 + (0x0c * (ch)) + \
+ (0x04 * (idx)))
+/* RSCFDnCFDCFSTSx / RSCFDnCFSTSx */
+#define RCANFD_CFSTS(ch, idx) (0x0178 + (0x0c * (ch)) + \
+ (0x04 * (idx)))
+/* RSCFDnCFDCFPCTRx / RSCFDnCFPCTRx */
+#define RCANFD_CFPCTR(ch, idx) (0x01d8 + (0x0c * (ch)) + \
+ (0x04 * (idx)))
+
+/* RSCFDnCFDFESTS / RSCFDnFESTS */
+#define RCANFD_FESTS (0x0238)
+/* RSCFDnCFDFFSTS / RSCFDnFFSTS */
+#define RCANFD_FFSTS (0x023c)
+/* RSCFDnCFDFMSTS / RSCFDnFMSTS */
+#define RCANFD_FMSTS (0x0240)
+/* RSCFDnCFDRFISTS / RSCFDnRFISTS */
+#define RCANFD_RFISTS (0x0244)
+/* RSCFDnCFDCFRISTS / RSCFDnCFRISTS */
+#define RCANFD_CFRISTS (0x0248)
+/* RSCFDnCFDCFTISTS / RSCFDnCFTISTS */
+#define RCANFD_CFTISTS (0x024c)
+
+/* RSCFDnCFDTMCp / RSCFDnTMCp */
+#define RCANFD_TMC(p) (0x0250 + (0x01 * (p)))
+/* RSCFDnCFDTMSTSp / RSCFDnTMSTSp */
+#define RCANFD_TMSTS(p) (0x02d0 + (0x01 * (p)))
+
+/* RSCFDnCFDTMTRSTSp / RSCFDnTMTRSTSp */
+#define RCANFD_TMTRSTS(y) (0x0350 + (0x04 * (y)))
+/* RSCFDnCFDTMTARSTSp / RSCFDnTMTARSTSp */
+#define RCANFD_TMTARSTS(y) (0x0360 + (0x04 * (y)))
+/* RSCFDnCFDTMTCSTSp / RSCFDnTMTCSTSp */
+#define RCANFD_TMTCSTS(y) (0x0370 + (0x04 * (y)))
+/* RSCFDnCFDTMTASTSp / RSCFDnTMTASTSp */
+#define RCANFD_TMTASTS(y) (0x0380 + (0x04 * (y)))
+/* RSCFDnCFDTMIECy / RSCFDnTMIECy */
+#define RCANFD_TMIEC(y) (0x0390 + (0x04 * (y)))
+
+/* RSCFDnCFDTXQCCm / RSCFDnTXQCCm */
+#define RCANFD_TXQCC(m) (0x03a0 + (0x04 * (m)))
+/* RSCFDnCFDTXQSTSm / RSCFDnTXQSTSm */
+#define RCANFD_TXQSTS(m) (0x03c0 + (0x04 * (m)))
+/* RSCFDnCFDTXQPCTRm / RSCFDnTXQPCTRm */
+#define RCANFD_TXQPCTR(m) (0x03e0 + (0x04 * (m)))
+
+/* RSCFDnCFDTHLCCm / RSCFDnTHLCCm */
+#define RCANFD_THLCC(m) (0x0400 + (0x04 * (m)))
+/* RSCFDnCFDTHLSTSm / RSCFDnTHLSTSm */
+#define RCANFD_THLSTS(m) (0x0420 + (0x04 * (m)))
+/* RSCFDnCFDTHLPCTRm / RSCFDnTHLPCTRm */
+#define RCANFD_THLPCTR(m) (0x0440 + (0x04 * (m)))
+
+/* RSCFDnCFDGTINTSTS0 / RSCFDnGTINTSTS0 */
+#define RCANFD_GTINTSTS0 (0x0460)
+/* RSCFDnCFDGTINTSTS1 / RSCFDnGTINTSTS1 */
+#define RCANFD_GTINTSTS1 (0x0464)
+/* RSCFDnCFDGTSTCFG / RSCFDnGTSTCFG */
+#define RCANFD_GTSTCFG (0x0468)
+/* RSCFDnCFDGTSTCTR / RSCFDnGTSTCTR */
+#define RCANFD_GTSTCTR (0x046c)
+/* RSCFDnCFDGLOCKK / RSCFDnGLOCKK */
+#define RCANFD_GLOCKK (0x047c)
+/* RSCFDnCFDGRMCFG */
+#define RCANFD_GRMCFG (0x04fc)
+
+/* RSCFDnCFDGAFLIDj / RSCFDnGAFLIDj */
+#define RCANFD_GAFLID(offset, j) ((offset) + (0x10 * (j)))
+/* RSCFDnCFDGAFLMj / RSCFDnGAFLMj */
+#define RCANFD_GAFLM(offset, j) ((offset) + 0x04 + (0x10 * (j)))
+/* RSCFDnCFDGAFLP0j / RSCFDnGAFLP0j */
+#define RCANFD_GAFLP0(offset, j) ((offset) + 0x08 + (0x10 * (j)))
+/* RSCFDnCFDGAFLP1j / RSCFDnGAFLP1j */
+#define RCANFD_GAFLP1(offset, j) ((offset) + 0x0c + (0x10 * (j)))
+
+/* Classical CAN only mode register map */
+
+/* RSCFDnGAFLXXXj offset */
+#define RCANFD_C_GAFL_OFFSET (0x0500)
+
+/* RSCFDnRMXXXq -> RCANFD_C_RMXXX(q) */
+#define RCANFD_C_RMID(q) (0x0600 + (0x10 * (q)))
+#define RCANFD_C_RMPTR(q) (0x0604 + (0x10 * (q)))
+#define RCANFD_C_RMDF0(q) (0x0608 + (0x10 * (q)))
+#define RCANFD_C_RMDF1(q) (0x060c + (0x10 * (q)))
+
+/* RSCFDnRFXXx -> RCANFD_C_RFXX(x) */
+#define RCANFD_C_RFOFFSET (0x0e00)
+#define RCANFD_C_RFID(x) (RCANFD_C_RFOFFSET + (0x10 * (x)))
+#define RCANFD_C_RFPTR(x) (RCANFD_C_RFOFFSET + 0x04 + \
+ (0x10 * (x)))
+#define RCANFD_C_RFDF(x, df) (RCANFD_C_RFOFFSET + 0x08 + \
+ (0x10 * (x)) + (0x04 * (df)))
+
+/* RSCFDnCFXXk -> RCANFD_C_CFXX(ch, k) */
+#define RCANFD_C_CFOFFSET (0x0e80)
+#define RCANFD_C_CFID(ch, idx) (RCANFD_C_CFOFFSET + (0x30 * (ch)) + \
+ (0x10 * (idx)))
+#define RCANFD_C_CFPTR(ch, idx) (RCANFD_C_CFOFFSET + 0x04 + \
+ (0x30 * (ch)) + (0x10 * (idx)))
+#define RCANFD_C_CFDF(ch, idx, df) (RCANFD_C_CFOFFSET + 0x08 + \
+ (0x30 * (ch)) + (0x10 * (idx)) + \
+ (0x04 * (df)))
+
+/* RSCFDnTMXXp -> RCANFD_C_TMXX(p) */
+#define RCANFD_C_TMID(p) (0x1000 + (0x10 * (p)))
+#define RCANFD_C_TMPTR(p) (0x1004 + (0x10 * (p)))
+#define RCANFD_C_TMDF0(p) (0x1008 + (0x10 * (p)))
+#define RCANFD_C_TMDF1(p) (0x100c + (0x10 * (p)))
+
+/* RSCFDnTHLACCm */
+#define RCANFD_C_THLACC(m) (0x1800 + (0x04 * (m)))
+/* RSCFDnRPGACCr */
+#define RCANFD_C_RPGACC(r) (0x1900 + (0x04 * (r)))
+
+/* CAN FD mode specific regsiter map */
+
+/* RSCFDnCFDCmXXX -> RCANFD_F_XXX(m) */
+#define RCANFD_F_DCFG(m) (0x0500 + (0x20 * (m)))
+#define RCANFD_F_CFDCFG(m) (0x0504 + (0x20 * (m)))
+#define RCANFD_F_CFDCTR(m) (0x0508 + (0x20 * (m)))
+#define RCANFD_F_CFDSTS(m) (0x050c + (0x20 * (m)))
+#define RCANFD_F_CFDCRC(m) (0x0510 + (0x20 * (m)))
+
+/* RSCFDnCFDGAFLXXXj offset */
+#define RCANFD_F_GAFL_OFFSET (0x1000)
+
+/* RSCFDnCFDRMXXXq -> RCANFD_F_RMXXX(q) */
+#define RCANFD_F_RMID(q) (0x2000 + (0x20 * (q)))
+#define RCANFD_F_RMPTR(q) (0x2004 + (0x20 * (q)))
+#define RCANFD_F_RMFDSTS(q) (0x2008 + (0x20 * (q)))
+#define RCANFD_F_RMDF(q, b) (0x200c + (0x04 * (b)) + (0x20 * (q)))
+
+/* RSCFDnCFDRFXXx -> RCANFD_F_RFXX(x) */
+#define RCANFD_F_RFOFFSET (0x3000)
+#define RCANFD_F_RFID(x) (RCANFD_F_RFOFFSET + (0x80 * (x)))
+#define RCANFD_F_RFPTR(x) (RCANFD_F_RFOFFSET + 0x04 + \
+ (0x80 * (x)))
+#define RCANFD_F_RFFDSTS(x) (RCANFD_F_RFOFFSET + 0x08 + \
+ (0x80 * (x)))
+#define RCANFD_F_RFDF(x, df) (RCANFD_F_RFOFFSET + 0x0c + \
+ (0x80 * (x)) + (0x04 * (df)))
+
+/* RSCFDnCFDCFXXk -> RCANFD_F_CFXX(ch, k) */
+#define RCANFD_F_CFOFFSET (0x3400)
+#define RCANFD_F_CFID(ch, idx) (RCANFD_F_CFOFFSET + (0x180 * (ch)) + \
+ (0x80 * (idx)))
+#define RCANFD_F_CFPTR(ch, idx) (RCANFD_F_CFOFFSET + 0x04 + \
+ (0x180 * (ch)) + (0x80 * (idx)))
+#define RCANFD_F_CFFDCSTS(ch, idx) (RCANFD_F_CFOFFSET + 0x08 + \
+ (0x180 * (ch)) + (0x80 * (idx)))
+#define RCANFD_F_CFDF(ch, idx, df) (RCANFD_F_CFOFFSET + 0x0c + \
+ (0x180 * (ch)) + (0x80 * (idx)) + \
+ (0x04 * (df)))
+
+/* RSCFDnCFDTMXXp -> RCANFD_F_TMXX(p) */
+#define RCANFD_F_TMID(p) (0x4000 + (0x20 * (p)))
+#define RCANFD_F_TMPTR(p) (0x4004 + (0x20 * (p)))
+#define RCANFD_F_TMFDCTR(p) (0x4008 + (0x20 * (p)))
+#define RCANFD_F_TMDF(p, b) (0x400c + (0x20 * (p)) + (0x04 * (b)))
+
+/* RSCFDnCFDTHLACCm */
+#define RCANFD_F_THLACC(m) (0x6000 + (0x04 * (m)))
+/* RSCFDnCFDRPGACCr */
+#define RCANFD_F_RPGACC(r) (0x6400 + (0x04 * (r)))
+
+/* Constants */
+#define RCANFD_FIFO_DEPTH 8 /* Tx FIFO depth */
+#define RCANFD_NAPI_WEIGHT 8 /* Rx poll quota */
+
+#define RCANFD_NUM_CHANNELS 2 /* Two channels max */
+#define RCANFD_CHANNELS_MASK BIT((RCANFD_NUM_CHANNELS) - 1)
+
+#define RCANFD_GAFL_PAGENUM(entry) ((entry) / 16)
+#define RCANFD_CHANNEL_NUMRULES 1 /* only one rule per channel */
+
+/* Rx FIFO is a global resource of the controller. There are 8 such FIFOs
+ * available. Each channel gets a dedicated Rx FIFO (i.e.) the channel
+ * number is added to RFFIFO index.
+ */
+#define RCANFD_RFFIFO_IDX 0
+
+/* Tx/Rx or Common FIFO is a per channel resource. Each channel has 3 Common
+ * FIFOs dedicated to them. Use the first (index 0) FIFO out of the 3 for Tx.
+ */
+#define RCANFD_CFFIFO_IDX 0
+
+/* fCAN clock select register settings */
+enum rcar_canfd_fcanclk {
+ RCANFD_CANFDCLK = 0, /* CANFD clock */
+ RCANFD_EXTCLK, /* Externally input clock */
+};
+
+struct rcar_canfd_global;
+
+/* Channel priv data */
+struct rcar_canfd_channel {
+ struct can_priv can; /* Must be the first member */
+ struct net_device *ndev;
+ struct rcar_canfd_global *gpriv; /* Controller reference */
+ void __iomem *base; /* Register base address */
+ struct napi_struct napi;
+ u8 tx_len[RCANFD_FIFO_DEPTH]; /* For net stats */
+ u32 tx_head; /* Incremented on xmit */
+ u32 tx_tail; /* Incremented on xmit done */
+ u32 channel; /* Channel number */
+ spinlock_t tx_lock; /* To protect tx path */
+};
+
+/* Global priv data */
+struct rcar_canfd_global {
+ struct rcar_canfd_channel *ch[RCANFD_NUM_CHANNELS];
+ void __iomem *base; /* Register base address */
+ struct platform_device *pdev; /* Respective platform device */
+ struct clk *clkp; /* Peripheral clock */
+ struct clk *can_clk; /* fCAN clock */
+ enum rcar_canfd_fcanclk fcan; /* CANFD or Ext clock */
+ unsigned long channels_mask; /* Enabled channels mask */
+ bool fdmode; /* CAN FD or Classical CAN only mode */
+};
+
+/* CAN FD mode nominal rate constants */
+static const struct can_bittiming_const rcar_canfd_nom_bittiming_const = {
+ .name = RCANFD_DRV_NAME,
+ .tseg1_min = 2,
+ .tseg1_max = 128,
+ .tseg2_min = 2,
+ .tseg2_max = 32,
+ .sjw_max = 32,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+};
+
+/* CAN FD mode data rate constants */
+static const struct can_bittiming_const rcar_canfd_data_bittiming_const = {
+ .name = RCANFD_DRV_NAME,
+ .tseg1_min = 2,
+ .tseg1_max = 16,
+ .tseg2_min = 2,
+ .tseg2_max = 8,
+ .sjw_max = 8,
+ .brp_min = 1,
+ .brp_max = 256,
+ .brp_inc = 1,
+};
+
+/* Classical CAN mode bitrate constants */
+static const struct can_bittiming_const rcar_canfd_bittiming_const = {
+ .name = RCANFD_DRV_NAME,
+ .tseg1_min = 4,
+ .tseg1_max = 16,
+ .tseg2_min = 2,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+};
+
+/* Helper functions */
+static inline void rcar_canfd_update(u32 mask, u32 val, u32 __iomem *reg)
+{
+ u32 data = readl(reg);
+
+ data &= ~mask;
+ data |= (val & mask);
+ writel(data, reg);
+}
+
+static inline u32 rcar_canfd_read(void __iomem *base, u32 offset)
+{
+ return readl(base + (offset));
+}
+
+static inline void rcar_canfd_write(void __iomem *base, u32 offset, u32 val)
+{
+ writel(val, base + (offset));
+}
+
+static void rcar_canfd_set_bit(void __iomem *base, u32 reg, u32 val)
+{
+ rcar_canfd_update(val, val, base + (reg));
+}
+
+static void rcar_canfd_clear_bit(void __iomem *base, u32 reg, u32 val)
+{
+ rcar_canfd_update(val, 0, base + (reg));
+}
+
+static void rcar_canfd_update_bit(void __iomem *base, u32 reg,
+ u32 mask, u32 val)
+{
+ rcar_canfd_update(mask, val, base + (reg));
+}
+
+static void rcar_canfd_get_data(struct rcar_canfd_channel *priv,
+ struct canfd_frame *cf, u32 off)
+{
+ u32 i, lwords;
+
+ lwords = DIV_ROUND_UP(cf->len, sizeof(u32));
+ for (i = 0; i < lwords; i++)
+ *((u32 *)cf->data + i) =
+ rcar_canfd_read(priv->base, off + (i * sizeof(u32)));
+}
+
+static void rcar_canfd_put_data(struct rcar_canfd_channel *priv,
+ struct canfd_frame *cf, u32 off)
+{
+ u32 i, lwords;
+
+ lwords = DIV_ROUND_UP(cf->len, sizeof(u32));
+ for (i = 0; i < lwords; i++)
+ rcar_canfd_write(priv->base, off + (i * sizeof(u32)),
+ *((u32 *)cf->data + i));
+}
+
+static void rcar_canfd_tx_failure_cleanup(struct net_device *ndev)
+{
+ u32 i;
+
+ for (i = 0; i < RCANFD_FIFO_DEPTH; i++)
+ can_free_echo_skb(ndev, i);
+}
+
+static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
+{
+ u32 sts, ch;
+ int err;
+
+ /* Check RAMINIT flag as CAN RAM initialization takes place
+ * after the MCU reset
+ */
+ err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts,
+ !(sts & RCANFD_GSTS_GRAMINIT), 2, 500000);
+ if (err) {
+ dev_dbg(&gpriv->pdev->dev, "global raminit failed\n");
+ return err;
+ }
+
+ /* Transition to Global Reset mode */
+ rcar_canfd_clear_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GSLPR);
+ rcar_canfd_update_bit(gpriv->base, RCANFD_GCTR,
+ RCANFD_GCTR_GMDC_MASK, RCANFD_GCTR_GMDC_GRESET);
+
+ /* Ensure Global reset mode */
+ err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts,
+ (sts & RCANFD_GSTS_GRSTSTS), 2, 500000);
+ if (err) {
+ dev_dbg(&gpriv->pdev->dev, "global reset failed\n");
+ return err;
+ }
+
+ /* Reset Global error flags */
+ rcar_canfd_write(gpriv->base, RCANFD_GERFL, 0x0);
+
+ /* Set the controller into appropriate mode */
+ if (gpriv->fdmode)
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG,
+ RCANFD_GRMCFG_RCMC);
+ else
+ rcar_canfd_clear_bit(gpriv->base, RCANFD_GRMCFG,
+ RCANFD_GRMCFG_RCMC);
+
+ /* Transition all Channels to reset mode */
+ for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ rcar_canfd_clear_bit(gpriv->base,
+ RCANFD_CCTR(ch), RCANFD_CCTR_CSLPR);
+
+ rcar_canfd_update_bit(gpriv->base, RCANFD_CCTR(ch),
+ RCANFD_CCTR_CHMDC_MASK,
+ RCANFD_CCTR_CHDMC_CRESET);
+
+ /* Ensure Channel reset mode */
+ err = readl_poll_timeout((gpriv->base + RCANFD_CSTS(ch)), sts,
+ (sts & RCANFD_CSTS_CRSTSTS),
+ 2, 500000);
+ if (err) {
+ dev_dbg(&gpriv->pdev->dev,
+ "channel %u reset failed\n", ch);
+ return err;
+ }
+ }
+ return 0;
+}
+
+static void rcar_canfd_configure_controller(struct rcar_canfd_global *gpriv)
+{
+ u32 cfg, ch;
+
+ /* Global configuration settings */
+
+ /* ECC Error flag Enable */
+ cfg = RCANFD_GCFG_EEFE;
+
+ if (gpriv->fdmode)
+ /* Truncate payload to configured message size RFPLS */
+ cfg |= RCANFD_GCFG_CMPOC;
+
+ /* Set External Clock if selected */
+ if (gpriv->fcan != RCANFD_CANFDCLK)
+ cfg |= RCANFD_GCFG_DCS;
+
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GCFG, cfg);
+
+ /* Channel configuration settings */
+ for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ rcar_canfd_set_bit(gpriv->base, RCANFD_CCTR(ch),
+ RCANFD_CCTR_ERRD);
+ rcar_canfd_update_bit(gpriv->base, RCANFD_CCTR(ch),
+ RCANFD_CCTR_BOM_MASK,
+ RCANFD_CCTR_BOM_BENTRY);
+ }
+}
+
+static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv,
+ u32 ch)
+{
+ u32 cfg;
+ int offset, start, page, num_rules = RCANFD_CHANNEL_NUMRULES;
+ u32 ridx = ch + RCANFD_RFFIFO_IDX;
+
+ if (ch == 0) {
+ start = 0; /* Channel 0 always starts from 0th rule */
+ } else {
+ /* Get number of Channel 0 rules and adjust */
+ cfg = rcar_canfd_read(gpriv->base, RCANFD_GAFLCFG0);
+ start = RCANFD_GAFLCFG_GETRNC(0, cfg);
+ }
+
+ /* Enable write access to entry */
+ page = RCANFD_GAFL_PAGENUM(start);
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLECTR,
+ (RCANFD_GAFLECTR_AFLPN(page) |
+ RCANFD_GAFLECTR_AFLDAE));
+
+ /* Write number of rules for channel */
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLCFG0,
+ RCANFD_GAFLCFG_SETRNC(ch, num_rules));
+ if (gpriv->fdmode)
+ offset = RCANFD_F_GAFL_OFFSET;
+ else
+ offset = RCANFD_C_GAFL_OFFSET;
+
+ /* Accept all IDs */
+ rcar_canfd_write(gpriv->base, RCANFD_GAFLID(offset, start), 0);
+ /* IDE or RTR is not considered for matching */
+ rcar_canfd_write(gpriv->base, RCANFD_GAFLM(offset, start), 0);
+ /* Any data length accepted */
+ rcar_canfd_write(gpriv->base, RCANFD_GAFLP0(offset, start), 0);
+ /* Place the msg in corresponding Rx FIFO entry */
+ rcar_canfd_write(gpriv->base, RCANFD_GAFLP1(offset, start),
+ RCANFD_GAFLP1_GAFLFDP(ridx));
+
+ /* Disable write access to page */
+ rcar_canfd_clear_bit(gpriv->base,
+ RCANFD_GAFLECTR, RCANFD_GAFLECTR_AFLDAE);
+}
+
+static void rcar_canfd_configure_rx(struct rcar_canfd_global *gpriv, u32 ch)
+{
+ /* Rx FIFO is used for reception */
+ u32 cfg;
+ u16 rfdc, rfpls;
+
+ /* Select Rx FIFO based on channel */
+ u32 ridx = ch + RCANFD_RFFIFO_IDX;
+
+ rfdc = 2; /* b010 - 8 messages Rx FIFO depth */
+ if (gpriv->fdmode)
+ rfpls = 7; /* b111 - Max 64 bytes payload */
+ else
+ rfpls = 0; /* b000 - Max 8 bytes payload */
+
+ cfg = (RCANFD_RFCC_RFIM | RCANFD_RFCC_RFDC(rfdc) |
+ RCANFD_RFCC_RFPLS(rfpls) | RCANFD_RFCC_RFIE);
+ rcar_canfd_write(gpriv->base, RCANFD_RFCC(ridx), cfg);
+}
+
+static void rcar_canfd_configure_tx(struct rcar_canfd_global *gpriv, u32 ch)
+{
+ /* Tx/Rx(Common) FIFO configured in Tx mode is
+ * used for transmission
+ *
+ * Each channel has 3 Common FIFO dedicated to them.
+ * Use the 1st (index 0) out of 3
+ */
+ u32 cfg;
+ u16 cftml, cfm, cfdc, cfpls;
+
+ cftml = 0; /* 0th buffer */
+ cfm = 1; /* b01 - Transmit mode */
+ cfdc = 2; /* b010 - 8 messages Tx FIFO depth */
+ if (gpriv->fdmode)
+ cfpls = 7; /* b111 - Max 64 bytes payload */
+ else
+ cfpls = 0; /* b000 - Max 8 bytes payload */
+
+ cfg = (RCANFD_CFCC_CFTML(cftml) | RCANFD_CFCC_CFM(cfm) |
+ RCANFD_CFCC_CFIM | RCANFD_CFCC_CFDC(cfdc) |
+ RCANFD_CFCC_CFPLS(cfpls) | RCANFD_CFCC_CFTXIE);
+ rcar_canfd_write(gpriv->base, RCANFD_CFCC(ch, RCANFD_CFFIFO_IDX), cfg);
+
+ if (gpriv->fdmode)
+ /* Clear FD mode specific control/status register */
+ rcar_canfd_write(gpriv->base,
+ RCANFD_F_CFFDCSTS(ch, RCANFD_CFFIFO_IDX), 0);
+}
+
+static void rcar_canfd_enable_global_interrupts(struct rcar_canfd_global *gpriv)
+{
+ u32 ctr;
+
+ /* Clear any stray error interrupt flags */
+ rcar_canfd_write(gpriv->base, RCANFD_GERFL, 0);
+
+ /* Global interrupts setup */
+ ctr = RCANFD_GCTR_MEIE;
+ if (gpriv->fdmode)
+ ctr |= RCANFD_GCTR_CFMPOFIE;
+
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GCTR, ctr);
+}
+
+static void rcar_canfd_disable_global_interrupts(struct rcar_canfd_global
+ *gpriv)
+{
+ /* Disable all interrupts */
+ rcar_canfd_write(gpriv->base, RCANFD_GCTR, 0);
+
+ /* Clear any stray error interrupt flags */
+ rcar_canfd_write(gpriv->base, RCANFD_GERFL, 0);
+}
+
+static void rcar_canfd_enable_channel_interrupts(struct rcar_canfd_channel
+ *priv)
+{
+ u32 ctr, ch = priv->channel;
+
+ /* Clear any stray error flags */
+ rcar_canfd_write(priv->base, RCANFD_CERFL(ch), 0);
+
+ /* Channel interrupts setup */
+ ctr = (RCANFD_CCTR_TAIE |
+ RCANFD_CCTR_ALIE | RCANFD_CCTR_BLIE |
+ RCANFD_CCTR_OLIE | RCANFD_CCTR_BORIE |
+ RCANFD_CCTR_BOEIE | RCANFD_CCTR_EPIE |
+ RCANFD_CCTR_EWIE | RCANFD_CCTR_BEIE);
+ rcar_canfd_set_bit(priv->base, RCANFD_CCTR(ch), ctr);
+}
+
+static void rcar_canfd_disable_channel_interrupts(struct rcar_canfd_channel
+ *priv)
+{
+ u32 ctr, ch = priv->channel;
+
+ ctr = (RCANFD_CCTR_TAIE |
+ RCANFD_CCTR_ALIE | RCANFD_CCTR_BLIE |
+ RCANFD_CCTR_OLIE | RCANFD_CCTR_BORIE |
+ RCANFD_CCTR_BOEIE | RCANFD_CCTR_EPIE |
+ RCANFD_CCTR_EWIE | RCANFD_CCTR_BEIE);
+ rcar_canfd_clear_bit(priv->base, RCANFD_CCTR(ch), ctr);
+
+ /* Clear any stray error flags */
+ rcar_canfd_write(priv->base, RCANFD_CERFL(ch), 0);
+}
+
+static void rcar_canfd_global_error(struct net_device *ndev)
+{
+ struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ struct rcar_canfd_global *gpriv = priv->gpriv;
+ struct net_device_stats *stats = &ndev->stats;
+ u32 ch = priv->channel;
+ u32 gerfl, sts;
+ u32 ridx = ch + RCANFD_RFFIFO_IDX;
+
+ gerfl = rcar_canfd_read(priv->base, RCANFD_GERFL);
+ if ((gerfl & RCANFD_GERFL_EEF0) && (ch == 0)) {
+ netdev_dbg(ndev, "Ch0: ECC Error flag\n");
+ stats->tx_dropped++;
+ }
+ if ((gerfl & RCANFD_GERFL_EEF1) && (ch == 1)) {
+ netdev_dbg(ndev, "Ch1: ECC Error flag\n");
+ stats->tx_dropped++;
+ }
+ if (gerfl & RCANFD_GERFL_MES) {
+ sts = rcar_canfd_read(priv->base,
+ RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX));
+ if (sts & RCANFD_CFSTS_CFMLT) {
+ netdev_dbg(ndev, "Tx Message Lost flag\n");
+ stats->tx_dropped++;
+ rcar_canfd_write(priv->base,
+ RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX),
+ sts & ~RCANFD_CFSTS_CFMLT);
+ }
+
+ sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx));
+ if (sts & RCANFD_RFSTS_RFMLT) {
+ netdev_dbg(ndev, "Rx Message Lost flag\n");
+ stats->rx_dropped++;
+ rcar_canfd_write(priv->base, RCANFD_RFSTS(ridx),
+ sts & ~RCANFD_RFSTS_RFMLT);
+ }
+ }
+ if (gpriv->fdmode && gerfl & RCANFD_GERFL_CMPOF) {
+ /* Message Lost flag will be set for respective channel
+ * when this condition happens with counters and flags
+ * already updated.
+ */
+ netdev_dbg(ndev, "global payload overflow interrupt\n");
+ }
+
+ /* Clear all global error interrupts. Only affected channels bits
+ * get cleared
+ */
+ rcar_canfd_write(priv->base, RCANFD_GERFL, 0);
+}
+
+static void rcar_canfd_error(struct net_device *ndev, u32 cerfl,
+ u16 txerr, u16 rxerr)
+{
+ struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ u32 ch = priv->channel;
+
+ netdev_dbg(ndev, "ch erfl %x txerr %u rxerr %u\n", cerfl, txerr, rxerr);
+
+ /* Propagate the error condition to the CAN stack */
+ skb = alloc_can_err_skb(ndev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ /* Channel error interrupts */
+ if (cerfl & RCANFD_CERFL_BEF) {
+ netdev_dbg(ndev, "Bus error\n");
+ cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
+ cf->data[2] = CAN_ERR_PROT_UNSPEC;
+ priv->can.can_stats.bus_error++;
+ }
+ if (cerfl & RCANFD_CERFL_ADERR) {
+ netdev_dbg(ndev, "ACK Delimiter Error\n");
+ stats->tx_errors++;
+ cf->data[3] |= CAN_ERR_PROT_LOC_ACK_DEL;
+ }
+ if (cerfl & RCANFD_CERFL_B0ERR) {
+ netdev_dbg(ndev, "Bit Error (dominant)\n");
+ stats->tx_errors++;
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
+ }
+ if (cerfl & RCANFD_CERFL_B1ERR) {
+ netdev_dbg(ndev, "Bit Error (recessive)\n");
+ stats->tx_errors++;
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
+ }
+ if (cerfl & RCANFD_CERFL_CERR) {
+ netdev_dbg(ndev, "CRC Error\n");
+ stats->rx_errors++;
+ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+ }
+ if (cerfl & RCANFD_CERFL_AERR) {
+ netdev_dbg(ndev, "ACK Error\n");
+ stats->tx_errors++;
+ cf->can_id |= CAN_ERR_ACK;
+ cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
+ }
+ if (cerfl & RCANFD_CERFL_FERR) {
+ netdev_dbg(ndev, "Form Error\n");
+ stats->rx_errors++;
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ }
+ if (cerfl & RCANFD_CERFL_SERR) {
+ netdev_dbg(ndev, "Stuff Error\n");
+ stats->rx_errors++;
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ }
+ if (cerfl & RCANFD_CERFL_ALF) {
+ netdev_dbg(ndev, "Arbitration lost Error\n");
+ priv->can.can_stats.arbitration_lost++;
+ cf->can_id |= CAN_ERR_LOSTARB;
+ cf->data[0] |= CAN_ERR_LOSTARB_UNSPEC;
+ }
+ if (cerfl & RCANFD_CERFL_BLF) {
+ netdev_dbg(ndev, "Bus Lock Error\n");
+ stats->rx_errors++;
+ cf->can_id |= CAN_ERR_BUSERROR;
+ }
+ if (cerfl & RCANFD_CERFL_EWF) {
+ netdev_dbg(ndev, "Error warning interrupt\n");
+ priv->can.state = CAN_STATE_ERROR_WARNING;
+ priv->can.can_stats.error_warning++;
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_WARNING :
+ CAN_ERR_CRTL_RX_WARNING;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
+ if (cerfl & RCANFD_CERFL_EPF) {
+ netdev_dbg(ndev, "Error passive interrupt\n");
+ priv->can.state = CAN_STATE_ERROR_PASSIVE;
+ priv->can.can_stats.error_passive++;
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_PASSIVE :
+ CAN_ERR_CRTL_RX_PASSIVE;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
+ if (cerfl & RCANFD_CERFL_BOEF) {
+ netdev_dbg(ndev, "Bus-off entry interrupt\n");
+ rcar_canfd_tx_failure_cleanup(ndev);
+ priv->can.state = CAN_STATE_BUS_OFF;
+ priv->can.can_stats.bus_off++;
+ can_bus_off(ndev);
+ cf->can_id |= CAN_ERR_BUSOFF;
+ }
+ if (cerfl & RCANFD_CERFL_OVLF) {
+ netdev_dbg(ndev,
+ "Overload Frame Transmission error interrupt\n");
+ stats->tx_errors++;
+ cf->can_id |= CAN_ERR_PROT;
+ cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
+ }
+
+ /* Clear channel error interrupts that are handled */
+ rcar_canfd_write(priv->base, RCANFD_CERFL(ch),
+ RCANFD_CERFL_ERR(~cerfl));
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+}
+
+static void rcar_canfd_tx_done(struct net_device *ndev)
+{
+ struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ u32 sts;
+ unsigned long flags;
+ u32 ch = priv->channel;
+
+ do {
+ u8 unsent, sent;
+
+ sent = priv->tx_tail % RCANFD_FIFO_DEPTH;
+ stats->tx_packets++;
+ stats->tx_bytes += priv->tx_len[sent];
+ priv->tx_len[sent] = 0;
+ can_get_echo_skb(ndev, sent);
+
+ spin_lock_irqsave(&priv->tx_lock, flags);
+ priv->tx_tail++;
+ sts = rcar_canfd_read(priv->base,
+ RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX));
+ unsent = RCANFD_CFSTS_CFMC(sts);
+
+ /* Wake producer only when there is room */
+ if (unsent != RCANFD_FIFO_DEPTH)
+ netif_wake_queue(ndev);
+
+ if (priv->tx_head - priv->tx_tail <= unsent) {
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+ break;
+ }
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+ } while (1);
+
+ /* Clear interrupt */
+ rcar_canfd_write(priv->base, RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX),
+ sts & ~RCANFD_CFSTS_CFTXIF);
+ can_led_event(ndev, CAN_LED_EVENT_TX);
+}
+
+static irqreturn_t rcar_canfd_global_interrupt(int irq, void *dev_id)
+{
+ struct rcar_canfd_global *gpriv = dev_id;
+ struct net_device *ndev;
+ struct rcar_canfd_channel *priv;
+ u32 sts, gerfl;
+ u32 ch, ridx;
+
+ /* Global error interrupts still indicate a condition specific
+ * to a channel. RxFIFO interrupt is a global interrupt.
+ */
+ for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ priv = gpriv->ch[ch];
+ ndev = priv->ndev;
+ ridx = ch + RCANFD_RFFIFO_IDX;
+
+ /* Global error interrupts */
+ gerfl = rcar_canfd_read(priv->base, RCANFD_GERFL);
+ if (unlikely(RCANFD_GERFL_ERR(gpriv, gerfl)))
+ rcar_canfd_global_error(ndev);
+
+ /* Handle Rx interrupts */
+ sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx));
+ if (likely(sts & RCANFD_RFSTS_RFIF)) {
+ if (napi_schedule_prep(&priv->napi)) {
+ /* Disable Rx FIFO interrupts */
+ rcar_canfd_clear_bit(priv->base,
+ RCANFD_RFCC(ridx),
+ RCANFD_RFCC_RFIE);
+ __napi_schedule(&priv->napi);
+ }
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static void rcar_canfd_state_change(struct net_device *ndev,
+ u16 txerr, u16 rxerr)
+{
+ struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ enum can_state rx_state, tx_state, state = priv->can.state;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ /* Handle transition from error to normal states */
+ if (txerr < 96 && rxerr < 96)
+ state = CAN_STATE_ERROR_ACTIVE;
+ else if (txerr < 128 && rxerr < 128)
+ state = CAN_STATE_ERROR_WARNING;
+
+ if (state != priv->can.state) {
+ netdev_dbg(ndev, "state: new %d, old %d: txerr %u, rxerr %u\n",
+ state, priv->can.state, txerr, rxerr);
+ skb = alloc_can_err_skb(ndev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+ tx_state = txerr >= rxerr ? state : 0;
+ rx_state = txerr <= rxerr ? state : 0;
+
+ can_change_state(ndev, cf, tx_state, rx_state);
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+ }
+}
+
+static irqreturn_t rcar_canfd_channel_interrupt(int irq, void *dev_id)
+{
+ struct rcar_canfd_global *gpriv = dev_id;
+ struct net_device *ndev;
+ struct rcar_canfd_channel *priv;
+ u32 sts, ch, cerfl;
+ u16 txerr, rxerr;
+
+ /* Common FIFO is a per channel resource */
+ for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ priv = gpriv->ch[ch];
+ ndev = priv->ndev;
+
+ /* Channel error interrupts */
+ cerfl = rcar_canfd_read(priv->base, RCANFD_CERFL(ch));
+ sts = rcar_canfd_read(priv->base, RCANFD_CSTS(ch));
+ txerr = RCANFD_CSTS_TECCNT(sts);
+ rxerr = RCANFD_CSTS_RECCNT(sts);
+ if (unlikely(RCANFD_CERFL_ERR(cerfl)))
+ rcar_canfd_error(ndev, cerfl, txerr, rxerr);
+
+ /* Handle state change to lower states */
+ if (unlikely((priv->can.state != CAN_STATE_ERROR_ACTIVE) &&
+ (priv->can.state != CAN_STATE_BUS_OFF)))
+ rcar_canfd_state_change(ndev, txerr, rxerr);
+
+ /* Handle Tx interrupts */
+ sts = rcar_canfd_read(priv->base,
+ RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX));
+ if (likely(sts & RCANFD_CFSTS_CFTXIF))
+ rcar_canfd_tx_done(ndev);
+ }
+ return IRQ_HANDLED;
+}
+
+static void rcar_canfd_set_bittiming(struct net_device *dev)
+{
+ struct rcar_canfd_channel *priv = netdev_priv(dev);
+ const struct can_bittiming *bt = &priv->can.bittiming;
+ const struct can_bittiming *dbt = &priv->can.data_bittiming;
+ u16 brp, sjw, tseg1, tseg2;
+ u32 cfg;
+ u32 ch = priv->channel;
+
+ /* Nominal bit timing settings */
+ brp = bt->brp - 1;
+ sjw = bt->sjw - 1;
+ tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
+ tseg2 = bt->phase_seg2 - 1;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+ /* CAN FD only mode */
+ cfg = (RCANFD_NCFG_NTSEG1(tseg1) | RCANFD_NCFG_NBRP(brp) |
+ RCANFD_NCFG_NSJW(sjw) | RCANFD_NCFG_NTSEG2(tseg2));
+
+ rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg);
+ netdev_dbg(priv->ndev, "nrate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
+ brp, sjw, tseg1, tseg2);
+
+ /* Data bit timing settings */
+ brp = dbt->brp - 1;
+ sjw = dbt->sjw - 1;
+ tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1;
+ tseg2 = dbt->phase_seg2 - 1;
+
+ cfg = (RCANFD_DCFG_DTSEG1(tseg1) | RCANFD_DCFG_DBRP(brp) |
+ RCANFD_DCFG_DSJW(sjw) | RCANFD_DCFG_DTSEG2(tseg2));
+
+ rcar_canfd_write(priv->base, RCANFD_F_DCFG(ch), cfg);
+ netdev_dbg(priv->ndev, "drate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
+ brp, sjw, tseg1, tseg2);
+ } else {
+ /* Classical CAN only mode */
+ cfg = (RCANFD_CFG_TSEG1(tseg1) | RCANFD_CFG_BRP(brp) |
+ RCANFD_CFG_SJW(sjw) | RCANFD_CFG_TSEG2(tseg2));
+
+ rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg);
+ netdev_dbg(priv->ndev,
+ "rate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
+ brp, sjw, tseg1, tseg2);
+ }
+}
+
+static int rcar_canfd_start(struct net_device *ndev)
+{
+ struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ int err = -EOPNOTSUPP;
+ u32 sts, ch = priv->channel;
+ u32 ridx = ch + RCANFD_RFFIFO_IDX;
+
+ rcar_canfd_set_bittiming(ndev);
+
+ rcar_canfd_enable_channel_interrupts(priv);
+
+ /* Set channel to Operational mode */
+ rcar_canfd_update_bit(priv->base, RCANFD_CCTR(ch),
+ RCANFD_CCTR_CHMDC_MASK, RCANFD_CCTR_CHDMC_COPM);
+
+ /* Verify channel mode change */
+ err = readl_poll_timeout((priv->base + RCANFD_CSTS(ch)), sts,
+ (sts & RCANFD_CSTS_COMSTS), 2, 500000);
+ if (err) {
+ netdev_err(ndev, "channel %u communication state failed\n", ch);
+ goto fail_mode_change;
+ }
+
+ /* Enable Common & Rx FIFO */
+ rcar_canfd_set_bit(priv->base, RCANFD_CFCC(ch, RCANFD_CFFIFO_IDX),
+ RCANFD_CFCC_CFE);
+ rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx), RCANFD_RFCC_RFE);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ return 0;
+
+fail_mode_change:
+ rcar_canfd_disable_channel_interrupts(priv);
+ return err;
+}
+
+static int rcar_canfd_open(struct net_device *ndev)
+{
+ struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ struct rcar_canfd_global *gpriv = priv->gpriv;
+ int err;
+
+ /* Peripheral clock is already enabled in probe */
+ err = clk_prepare_enable(gpriv->can_clk);
+ if (err) {
+ netdev_err(ndev, "failed to enable CAN clock, error %d\n", err);
+ goto out_clock;
+ }
+
+ err = open_candev(ndev);
+ if (err) {
+ netdev_err(ndev, "open_candev() failed, error %d\n", err);
+ goto out_can_clock;
+ }
+
+ napi_enable(&priv->napi);
+ err = rcar_canfd_start(ndev);
+ if (err)
+ goto out_close;
+ netif_start_queue(ndev);
+ can_led_event(ndev, CAN_LED_EVENT_OPEN);
+ return 0;
+out_close:
+ napi_disable(&priv->napi);
+ close_candev(ndev);
+out_can_clock:
+ clk_disable_unprepare(gpriv->can_clk);
+out_clock:
+ return err;
+}
+
+static void rcar_canfd_stop(struct net_device *ndev)
+{
+ struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ int err;
+ u32 sts, ch = priv->channel;
+ u32 ridx = ch + RCANFD_RFFIFO_IDX;
+
+ /* Transition to channel reset mode */
+ rcar_canfd_update_bit(priv->base, RCANFD_CCTR(ch),
+ RCANFD_CCTR_CHMDC_MASK, RCANFD_CCTR_CHDMC_CRESET);
+
+ /* Check Channel reset mode */
+ err = readl_poll_timeout((priv->base + RCANFD_CSTS(ch)), sts,
+ (sts & RCANFD_CSTS_CRSTSTS), 2, 500000);
+ if (err)
+ netdev_err(ndev, "channel %u reset failed\n", ch);
+
+ rcar_canfd_disable_channel_interrupts(priv);
+
+ /* Disable Common & Rx FIFO */
+ rcar_canfd_clear_bit(priv->base, RCANFD_CFCC(ch, RCANFD_CFFIFO_IDX),
+ RCANFD_CFCC_CFE);
+ rcar_canfd_clear_bit(priv->base, RCANFD_RFCC(ridx), RCANFD_RFCC_RFE);
+
+ /* Set the state as STOPPED */
+ priv->can.state = CAN_STATE_STOPPED;
+}
+
+static int rcar_canfd_close(struct net_device *ndev)
+{
+ struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ struct rcar_canfd_global *gpriv = priv->gpriv;
+
+ netif_stop_queue(ndev);
+ rcar_canfd_stop(ndev);
+ napi_disable(&priv->napi);
+ clk_disable_unprepare(gpriv->can_clk);
+ close_candev(ndev);
+ can_led_event(ndev, CAN_LED_EVENT_STOP);
+ return 0;
+}
+
+static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+ u32 sts = 0, id, dlc;
+ unsigned long flags;
+ u32 ch = priv->channel;
+
+ if (can_dropped_invalid_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ if (cf->can_id & CAN_EFF_FLAG) {
+ id = cf->can_id & CAN_EFF_MASK;
+ id |= RCANFD_CFID_CFIDE;
+ } else {
+ id = cf->can_id & CAN_SFF_MASK;
+ }
+
+ if (cf->can_id & CAN_RTR_FLAG)
+ id |= RCANFD_CFID_CFRTR;
+
+ dlc = RCANFD_CFPTR_CFDLC(can_len2dlc(cf->len));
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+ rcar_canfd_write(priv->base,
+ RCANFD_F_CFID(ch, RCANFD_CFFIFO_IDX), id);
+ rcar_canfd_write(priv->base,
+ RCANFD_F_CFPTR(ch, RCANFD_CFFIFO_IDX), dlc);
+
+ if (can_is_canfd_skb(skb)) {
+ /* CAN FD frame format */
+ sts |= RCANFD_CFFDCSTS_CFFDF;
+ if (cf->flags & CANFD_BRS)
+ sts |= RCANFD_CFFDCSTS_CFBRS;
+
+ if (priv->can.state == CAN_STATE_ERROR_PASSIVE)
+ sts |= RCANFD_CFFDCSTS_CFESI;
+ }
+
+ rcar_canfd_write(priv->base,
+ RCANFD_F_CFFDCSTS(ch, RCANFD_CFFIFO_IDX), sts);
+
+ rcar_canfd_put_data(priv, cf,
+ RCANFD_F_CFDF(ch, RCANFD_CFFIFO_IDX, 0));
+ } else {
+ rcar_canfd_write(priv->base,
+ RCANFD_C_CFID(ch, RCANFD_CFFIFO_IDX), id);
+ rcar_canfd_write(priv->base,
+ RCANFD_C_CFPTR(ch, RCANFD_CFFIFO_IDX), dlc);
+ rcar_canfd_put_data(priv, cf,
+ RCANFD_C_CFDF(ch, RCANFD_CFFIFO_IDX, 0));
+ }
+
+ priv->tx_len[priv->tx_head % RCANFD_FIFO_DEPTH] = cf->len;
+ can_put_echo_skb(skb, ndev, priv->tx_head % RCANFD_FIFO_DEPTH);
+
+ spin_lock_irqsave(&priv->tx_lock, flags);
+ priv->tx_head++;
+
+ /* Stop the queue if we've filled all FIFO entries */
+ if (priv->tx_head - priv->tx_tail >= RCANFD_FIFO_DEPTH)
+ netif_stop_queue(ndev);
+
+ /* Start Tx: Write 0xff to CFPC to increment the CPU-side
+ * pointer for the Common FIFO
+ */
+ rcar_canfd_write(priv->base,
+ RCANFD_CFPCTR(ch, RCANFD_CFFIFO_IDX), 0xff);
+
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+ return NETDEV_TX_OK;
+}
+
+static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct canfd_frame *cf;
+ struct sk_buff *skb;
+ u32 sts = 0, id, dlc;
+ u32 ch = priv->channel;
+ u32 ridx = ch + RCANFD_RFFIFO_IDX;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+ id = rcar_canfd_read(priv->base, RCANFD_F_RFID(ridx));
+ dlc = rcar_canfd_read(priv->base, RCANFD_F_RFPTR(ridx));
+
+ sts = rcar_canfd_read(priv->base, RCANFD_F_RFFDSTS(ridx));
+ if (sts & RCANFD_RFFDSTS_RFFDF)
+ skb = alloc_canfd_skb(priv->ndev, &cf);
+ else
+ skb = alloc_can_skb(priv->ndev,
+ (struct can_frame **)&cf);
+ } else {
+ id = rcar_canfd_read(priv->base, RCANFD_C_RFID(ridx));
+ dlc = rcar_canfd_read(priv->base, RCANFD_C_RFPTR(ridx));
+ skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cf);
+ }
+
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ if (id & RCANFD_RFID_RFIDE)
+ cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG;
+ else
+ cf->can_id = id & CAN_SFF_MASK;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+ if (sts & RCANFD_RFFDSTS_RFFDF)
+ cf->len = can_dlc2len(RCANFD_RFPTR_RFDLC(dlc));
+ else
+ cf->len = get_can_dlc(RCANFD_RFPTR_RFDLC(dlc));
+
+ if (sts & RCANFD_RFFDSTS_RFESI) {
+ cf->flags |= CANFD_ESI;
+ netdev_dbg(priv->ndev, "ESI Error\n");
+ }
+
+ if (!(sts & RCANFD_RFFDSTS_RFFDF) && (id & RCANFD_RFID_RFRTR)) {
+ cf->can_id |= CAN_RTR_FLAG;
+ } else {
+ if (sts & RCANFD_RFFDSTS_RFBRS)
+ cf->flags |= CANFD_BRS;
+
+ rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(ridx, 0));
+ }
+ } else {
+ cf->len = get_can_dlc(RCANFD_RFPTR_RFDLC(dlc));
+ if (id & RCANFD_RFID_RFRTR)
+ cf->can_id |= CAN_RTR_FLAG;
+ else
+ rcar_canfd_get_data(priv, cf, RCANFD_C_RFDF(ridx, 0));
+ }
+
+ /* Write 0xff to RFPC to increment the CPU-side
+ * pointer of the Rx FIFO
+ */
+ rcar_canfd_write(priv->base, RCANFD_RFPCTR(ridx), 0xff);
+
+ can_led_event(priv->ndev, CAN_LED_EVENT_RX);
+
+ stats->rx_bytes += cf->len;
+ stats->rx_packets++;
+ netif_receive_skb(skb);
+}
+
+static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
+{
+ struct rcar_canfd_channel *priv =
+ container_of(napi, struct rcar_canfd_channel, napi);
+ int num_pkts;
+ u32 sts;
+ u32 ch = priv->channel;
+ u32 ridx = ch + RCANFD_RFFIFO_IDX;
+
+ for (num_pkts = 0; num_pkts < quota; num_pkts++) {
+ sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx));
+ /* Check FIFO empty condition */
+ if (sts & RCANFD_RFSTS_RFEMP)
+ break;
+
+ rcar_canfd_rx_pkt(priv);
+
+ /* Clear interrupt bit */
+ if (sts & RCANFD_RFSTS_RFIF)
+ rcar_canfd_write(priv->base, RCANFD_RFSTS(ridx),
+ sts & ~RCANFD_RFSTS_RFIF);
+ }
+
+ /* All packets processed */
+ if (num_pkts < quota) {
+ napi_complete(napi);
+ /* Enable Rx FIFO interrupts */
+ rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
+ RCANFD_RFCC_RFIE);
+ }
+ return num_pkts;
+}
+
+static int rcar_canfd_do_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+ int err;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ err = rcar_canfd_start(ndev);
+ if (err)
+ return err;
+ netif_wake_queue(ndev);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int rcar_canfd_get_berr_counter(const struct net_device *dev,
+ struct can_berr_counter *bec)
+{
+ struct rcar_canfd_channel *priv = netdev_priv(dev);
+ u32 val, ch = priv->channel;
+
+ /* Peripheral clock is already enabled in probe */
+ val = rcar_canfd_read(priv->base, RCANFD_CSTS(ch));
+ bec->txerr = RCANFD_CSTS_TECCNT(val);
+ bec->rxerr = RCANFD_CSTS_RECCNT(val);
+ return 0;
+}
+
+static const struct net_device_ops rcar_canfd_netdev_ops = {
+ .ndo_open = rcar_canfd_open,
+ .ndo_stop = rcar_canfd_close,
+ .ndo_start_xmit = rcar_canfd_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
+};
+
+static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
+ u32 fcan_freq)
+{
+ struct platform_device *pdev = gpriv->pdev;
+ struct rcar_canfd_channel *priv;
+ struct net_device *ndev;
+ int err = -ENODEV;
+
+ ndev = alloc_candev(sizeof(*priv), RCANFD_FIFO_DEPTH);
+ if (!ndev) {
+ dev_err(&pdev->dev, "alloc_candev() failed\n");
+ err = -ENOMEM;
+ goto fail;
+ }
+ priv = netdev_priv(ndev);
+
+ ndev->netdev_ops = &rcar_canfd_netdev_ops;
+ ndev->flags |= IFF_ECHO;
+ priv->ndev = ndev;
+ priv->base = gpriv->base;
+ priv->channel = ch;
+ priv->can.clock.freq = fcan_freq;
+ dev_info(&pdev->dev, "can_clk rate is %u\n", priv->can.clock.freq);
+
+ if (gpriv->fdmode) {
+ priv->can.bittiming_const = &rcar_canfd_nom_bittiming_const;
+ priv->can.data_bittiming_const =
+ &rcar_canfd_data_bittiming_const;
+
+ /* Controller starts in CAN FD only mode */
+ can_set_static_ctrlmode(ndev, CAN_CTRLMODE_FD);
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING;
+ } else {
+ /* Controller starts in Classical CAN only mode */
+ priv->can.bittiming_const = &rcar_canfd_bittiming_const;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING;
+ }
+
+ priv->can.do_set_mode = rcar_canfd_do_set_mode;
+ priv->can.do_get_berr_counter = rcar_canfd_get_berr_counter;
+ priv->gpriv = gpriv;
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ netif_napi_add(ndev, &priv->napi, rcar_canfd_rx_poll,
+ RCANFD_NAPI_WEIGHT);
+ err = register_candev(ndev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "register_candev() failed, error %d\n", err);
+ goto fail_candev;
+ }
+ spin_lock_init(&priv->tx_lock);
+ devm_can_led_init(ndev);
+ gpriv->ch[priv->channel] = priv;
+ dev_info(&pdev->dev, "device registered (channel %u)\n", priv->channel);
+ return 0;
+
+fail_candev:
+ netif_napi_del(&priv->napi);
+ free_candev(ndev);
+fail:
+ return err;
+}
+
+static void rcar_canfd_channel_remove(struct rcar_canfd_global *gpriv, u32 ch)
+{
+ struct rcar_canfd_channel *priv = gpriv->ch[ch];
+
+ if (priv) {
+ unregister_candev(priv->ndev);
+ netif_napi_del(&priv->napi);
+ free_candev(priv->ndev);
+ }
+}
+
+static int rcar_canfd_probe(struct platform_device *pdev)
+{
+ struct resource *mem;
+ void __iomem *addr;
+ u32 sts, ch, fcan_freq;
+ struct rcar_canfd_global *gpriv;
+ struct device_node *of_child;
+ unsigned long channels_mask = 0;
+ int err, ch_irq, g_irq;
+ bool fdmode = true; /* CAN FD only mode - default */
+
+ if (of_property_read_bool(pdev->dev.of_node, "renesas,no-can-fd"))
+ fdmode = false; /* Classical CAN only mode */
+
+ of_child = of_get_child_by_name(pdev->dev.of_node, "channel0");
+ if (of_child && of_device_is_available(of_child))
+ channels_mask |= BIT(0); /* Channel 0 */
+
+ of_child = of_get_child_by_name(pdev->dev.of_node, "channel1");
+ if (of_child && of_device_is_available(of_child))
+ channels_mask |= BIT(1); /* Channel 1 */
+
+ ch_irq = platform_get_irq(pdev, 0);
+ if (ch_irq < 0) {
+ dev_err(&pdev->dev, "no Channel IRQ resource\n");
+ err = ch_irq;
+ goto fail_dev;
+ }
+
+ g_irq = platform_get_irq(pdev, 1);
+ if (g_irq < 0) {
+ dev_err(&pdev->dev, "no Global IRQ resource\n");
+ err = g_irq;
+ goto fail_dev;
+ }
+
+ /* Global controller context */
+ gpriv = devm_kzalloc(&pdev->dev, sizeof(*gpriv), GFP_KERNEL);
+ if (!gpriv) {
+ err = -ENOMEM;
+ goto fail_dev;
+ }
+ gpriv->pdev = pdev;
+ gpriv->channels_mask = channels_mask;
+ gpriv->fdmode = fdmode;
+
+ /* Peripheral clock */
+ gpriv->clkp = devm_clk_get(&pdev->dev, "fck");
+ if (IS_ERR(gpriv->clkp)) {
+ err = PTR_ERR(gpriv->clkp);
+ dev_err(&pdev->dev, "cannot get peripheral clock, error %d\n",
+ err);
+ goto fail_dev;
+ }
+
+ /* fCAN clock: Pick External clock. If not available fallback to
+ * CANFD clock
+ */
+ gpriv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
+ if (IS_ERR(gpriv->can_clk) || (clk_get_rate(gpriv->can_clk) == 0)) {
+ gpriv->can_clk = devm_clk_get(&pdev->dev, "canfd");
+ if (IS_ERR(gpriv->can_clk)) {
+ err = PTR_ERR(gpriv->can_clk);
+ dev_err(&pdev->dev,
+ "cannot get canfd clock, error %d\n", err);
+ goto fail_dev;
+ }
+ gpriv->fcan = RCANFD_CANFDCLK;
+
+ } else {
+ gpriv->fcan = RCANFD_EXTCLK;
+ }
+ fcan_freq = clk_get_rate(gpriv->can_clk);
+
+ if (gpriv->fcan == RCANFD_CANFDCLK)
+ /* CANFD clock is further divided by (1/2) within the IP */
+ fcan_freq /= 2;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ addr = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(addr)) {
+ err = PTR_ERR(addr);
+ goto fail_dev;
+ }
+ gpriv->base = addr;
+
+ /* Request IRQ that's common for both channels */
+ err = devm_request_irq(&pdev->dev, ch_irq,
+ rcar_canfd_channel_interrupt, 0,
+ "canfd.chn", gpriv);
+ if (err) {
+ dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n",
+ ch_irq, err);
+ goto fail_dev;
+ }
+ err = devm_request_irq(&pdev->dev, g_irq,
+ rcar_canfd_global_interrupt, 0,
+ "canfd.gbl", gpriv);
+ if (err) {
+ dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n",
+ g_irq, err);
+ goto fail_dev;
+ }
+
+ /* Enable peripheral clock for register access */
+ err = clk_prepare_enable(gpriv->clkp);
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed to enable peripheral clock, error %d\n", err);
+ goto fail_dev;
+ }
+
+ err = rcar_canfd_reset_controller(gpriv);
+ if (err) {
+ dev_err(&pdev->dev, "reset controller failed\n");
+ goto fail_clk;
+ }
+
+ /* Controller in Global reset & Channel reset mode */
+ rcar_canfd_configure_controller(gpriv);
+
+ /* Configure per channel attributes */
+ for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ /* Configure Channel's Rx fifo */
+ rcar_canfd_configure_rx(gpriv, ch);
+
+ /* Configure Channel's Tx (Common) fifo */
+ rcar_canfd_configure_tx(gpriv, ch);
+
+ /* Configure receive rules */
+ rcar_canfd_configure_afl_rules(gpriv, ch);
+ }
+
+ /* Configure common interrupts */
+ rcar_canfd_enable_global_interrupts(gpriv);
+
+ /* Start Global operation mode */
+ rcar_canfd_update_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GMDC_MASK,
+ RCANFD_GCTR_GMDC_GOPM);
+
+ /* Verify mode change */
+ err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts,
+ !(sts & RCANFD_GSTS_GNOPM), 2, 500000);
+ if (err) {
+ dev_err(&pdev->dev, "global operational mode failed\n");
+ goto fail_mode;
+ }
+
+ for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ err = rcar_canfd_channel_probe(gpriv, ch, fcan_freq);
+ if (err)
+ goto fail_channel;
+ }
+
+ platform_set_drvdata(pdev, gpriv);
+ dev_info(&pdev->dev, "global operational state (clk %d, fdmode %d)\n",
+ gpriv->fcan, gpriv->fdmode);
+ return 0;
+
+fail_channel:
+ for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS)
+ rcar_canfd_channel_remove(gpriv, ch);
+fail_mode:
+ rcar_canfd_disable_global_interrupts(gpriv);
+fail_clk:
+ clk_disable_unprepare(gpriv->clkp);
+fail_dev:
+ return err;
+}
+
+static int rcar_canfd_remove(struct platform_device *pdev)
+{
+ struct rcar_canfd_global *gpriv = platform_get_drvdata(pdev);
+ u32 ch;
+
+ rcar_canfd_reset_controller(gpriv);
+ rcar_canfd_disable_global_interrupts(gpriv);
+
+ for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ rcar_canfd_disable_channel_interrupts(gpriv->ch[ch]);
+ rcar_canfd_channel_remove(gpriv, ch);
+ }
+
+ /* Enter global sleep mode */
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GSLPR);
+ clk_disable_unprepare(gpriv->clkp);
+ return 0;
+}
+
+static int __maybe_unused rcar_canfd_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int __maybe_unused rcar_canfd_resume(struct device *dev)
+{
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(rcar_canfd_pm_ops, rcar_canfd_suspend,
+ rcar_canfd_resume);
+
+static const struct of_device_id rcar_canfd_of_table[] = {
+ { .compatible = "renesas,rcar-gen3-canfd" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, rcar_canfd_of_table);
+
+static struct platform_driver rcar_canfd_driver = {
+ .driver = {
+ .name = RCANFD_DRV_NAME,
+ .of_match_table = of_match_ptr(rcar_canfd_of_table),
+ .pm = &rcar_canfd_pm_ops,
+ },
+ .probe = rcar_canfd_probe,
+ .remove = rcar_canfd_remove,
+};
+
+module_platform_driver(rcar_canfd_driver);
+
+MODULE_AUTHOR("Ramesh Shanmugasundaram <ramesh.shanmugasundaram@bp.renesas.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("CAN FD driver for Renesas R-Car SoC");
+MODULE_ALIAS("platform:" RCANFD_DRV_NAME);
diff --git a/drivers/net/can/sja1000/tscan1.c b/drivers/net/can/sja1000/tscan1.c
index 76513dd780c7..79572457a2d6 100644
--- a/drivers/net/can/sja1000/tscan1.c
+++ b/drivers/net/can/sja1000/tscan1.c
@@ -203,14 +203,4 @@ static struct isa_driver tscan1_isa_driver = {
},
};
-static int __init tscan1_init(void)
-{
- return isa_register_driver(&tscan1_isa_driver, TSCAN1_MAXDEV);
-}
-module_init(tscan1_init);
-
-static void __exit tscan1_exit(void)
-{
- isa_unregister_driver(&tscan1_isa_driver);
-}
-module_exit(tscan1_exit);
+module_isa_driver(tscan1_isa_driver, TSCAN1_MAXDEV);
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 9a3f15cb7ef4..eb7173713bbc 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -354,7 +354,7 @@ static netdev_tx_t slc_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct slcan *sl = netdev_priv(dev);
- if (skb->len != sizeof(struct can_frame))
+ if (skb->len != CAN_MTU)
goto out;
spin_lock(&sl->lock);
@@ -442,7 +442,7 @@ static void slc_setup(struct net_device *dev)
dev->addr_len = 0;
dev->tx_queue_len = 10;
- dev->mtu = sizeof(struct can_frame);
+ dev->mtu = CAN_MTU;
dev->type = ARPHRD_CAN;
/* New-style flags. */
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index cf36d26ef002..f3f05fea8e1f 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -1145,8 +1145,11 @@ static int mcp251x_can_probe(struct spi_device *spi)
/* Here is OK to not lock the MCP, no one knows about it yet */
ret = mcp251x_hw_probe(spi);
- if (ret)
+ if (ret) {
+ if (ret == -ENODEV)
+ dev_err(&spi->dev, "Cannot initialize MCP%x. Wrong wiring?\n", priv->model);
goto error_probe;
+ }
mcp251x_hw_sleep(spi);
@@ -1156,6 +1159,7 @@ static int mcp251x_can_probe(struct spi_device *spi)
devm_can_led_init(net);
+ netdev_info(net, "MCP%x successfully initialized.\n", priv->model);
return 0;
error_probe:
@@ -1168,6 +1172,7 @@ out_clk:
out_free:
free_candev(net);
+ dev_err(&spi->dev, "Probe failed, err=%d\n", -ret);
return ret;
}
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index bcb272f6c68a..8483a40e7e9e 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -16,7 +16,8 @@ config CAN_ESD_USB2
config CAN_GS_USB
tristate "Geschwister Schneider UG interfaces"
---help---
- This driver supports the Geschwister Schneider USB/CAN devices.
+ This driver supports the Geschwister Schneider and bytewerk.org
+ candleLight USB CAN interfaces USB/CAN devices
If unsure choose N,
choose Y for built in support,
M to compile as module (module will be named: gs_usb).
@@ -46,6 +47,8 @@ config CAN_KVASER_USB
- Kvaser USBcan R
- Kvaser Leaf Light v2
- Kvaser Mini PCI Express HS
+ - Kvaser Mini PCI Express 2xHS
+ - Kvaser USBcan Light 2xHS
- Kvaser USBcan II HS/HS
- Kvaser USBcan II HS/LS
- Kvaser USBcan Rugged ("USBcan Rev B")
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 1556d4286235..6f0cbc38782e 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -1,7 +1,9 @@
-/* CAN driver for Geschwister Schneider USB/CAN devices.
+/* CAN driver for Geschwister Schneider USB/CAN devices
+ * and bytewerk.org candleLight USB CAN interfaces.
*
- * Copyright (C) 2013 Geschwister Schneider Technologie-,
+ * Copyright (C) 2013-2016 Geschwister Schneider Technologie-,
* Entwicklungs- und Vertriebs UG (Haftungsbeschränkt).
+ * Copyright (C) 2016 Hubert Denkmair
*
* Many thanks to all socketcan devs!
*
@@ -29,6 +31,9 @@
#define USB_GSUSB_1_VENDOR_ID 0x1d50
#define USB_GSUSB_1_PRODUCT_ID 0x606f
+#define USB_CANDLELIGHT_VENDOR_ID 0x1209
+#define USB_CANDLELIGHT_PRODUCT_ID 0x2323
+
#define GSUSB_ENDPOINT_IN 1
#define GSUSB_ENDPOINT_OUT 2
@@ -39,7 +44,9 @@ enum gs_usb_breq {
GS_USB_BREQ_MODE,
GS_USB_BREQ_BERR,
GS_USB_BREQ_BT_CONST,
- GS_USB_BREQ_DEVICE_CONFIG
+ GS_USB_BREQ_DEVICE_CONFIG,
+ GS_USB_BREQ_TIMESTAMP,
+ GS_USB_BREQ_IDENTIFY,
};
enum gs_can_mode {
@@ -58,6 +65,11 @@ enum gs_can_state {
GS_CAN_STATE_SLEEPING
};
+enum gs_can_identify_mode {
+ GS_CAN_IDENTIFY_OFF = 0,
+ GS_CAN_IDENTIFY_ON
+};
+
/* data types passed between host and device */
struct gs_host_config {
u32 byte_order;
@@ -77,10 +89,10 @@ struct gs_device_config {
} __packed;
#define GS_CAN_MODE_NORMAL 0
-#define GS_CAN_MODE_LISTEN_ONLY (1<<0)
-#define GS_CAN_MODE_LOOP_BACK (1<<1)
-#define GS_CAN_MODE_TRIPLE_SAMPLE (1<<2)
-#define GS_CAN_MODE_ONE_SHOT (1<<3)
+#define GS_CAN_MODE_LISTEN_ONLY BIT(0)
+#define GS_CAN_MODE_LOOP_BACK BIT(1)
+#define GS_CAN_MODE_TRIPLE_SAMPLE BIT(2)
+#define GS_CAN_MODE_ONE_SHOT BIT(3)
struct gs_device_mode {
u32 mode;
@@ -101,10 +113,16 @@ struct gs_device_bittiming {
u32 brp;
} __packed;
-#define GS_CAN_FEATURE_LISTEN_ONLY (1<<0)
-#define GS_CAN_FEATURE_LOOP_BACK (1<<1)
-#define GS_CAN_FEATURE_TRIPLE_SAMPLE (1<<2)
-#define GS_CAN_FEATURE_ONE_SHOT (1<<3)
+struct gs_identify_mode {
+ u32 mode;
+} __packed;
+
+#define GS_CAN_FEATURE_LISTEN_ONLY BIT(0)
+#define GS_CAN_FEATURE_LOOP_BACK BIT(1)
+#define GS_CAN_FEATURE_TRIPLE_SAMPLE BIT(2)
+#define GS_CAN_FEATURE_ONE_SHOT BIT(3)
+#define GS_CAN_FEATURE_HW_TIMESTAMP BIT(4)
+#define GS_CAN_FEATURE_IDENTIFY BIT(5)
struct gs_device_bt_const {
u32 feature;
@@ -209,7 +227,8 @@ static void gs_free_tx_context(struct gs_tx_context *txc)
/* Get a tx context by id.
*/
-static struct gs_tx_context *gs_get_tx_context(struct gs_can *dev, unsigned int id)
+static struct gs_tx_context *gs_get_tx_context(struct gs_can *dev,
+ unsigned int id)
{
unsigned long flags;
@@ -452,7 +471,8 @@ static void gs_usb_xmit_callback(struct urb *urb)
netif_wake_queue(netdev);
}
-static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
{
struct gs_can *dev = netdev_priv(netdev);
struct net_device_stats *stats = &dev->netdev->stats;
@@ -658,7 +678,8 @@ static int gs_can_open(struct net_device *netdev)
rc = usb_control_msg(interface_to_usbdev(dev->iface),
usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0),
GS_USB_BREQ_MODE,
- USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+ USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_INTERFACE,
dev->channel,
0,
dm,
@@ -721,7 +742,59 @@ static const struct net_device_ops gs_usb_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
-static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface *intf)
+static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
+{
+ struct gs_can *dev = netdev_priv(netdev);
+ struct gs_identify_mode imode;
+ int rc;
+
+ if (do_identify)
+ imode.mode = GS_CAN_IDENTIFY_ON;
+ else
+ imode.mode = GS_CAN_IDENTIFY_OFF;
+
+ rc = usb_control_msg(interface_to_usbdev(dev->iface),
+ usb_sndctrlpipe(interface_to_usbdev(dev->iface),
+ 0),
+ GS_USB_BREQ_IDENTIFY,
+ USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_INTERFACE,
+ dev->channel,
+ 0,
+ &imode,
+ sizeof(imode),
+ 100);
+
+ return (rc > 0) ? 0 : rc;
+}
+
+/* blink LED's for finding the this interface */
+static int gs_usb_set_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+{
+ int rc = 0;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ rc = gs_usb_set_identify(dev, GS_CAN_IDENTIFY_ON);
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ rc = gs_usb_set_identify(dev, GS_CAN_IDENTIFY_OFF);
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+static const struct ethtool_ops gs_usb_ethtool_ops = {
+ .set_phys_id = gs_usb_set_phys_id,
+};
+
+static struct gs_can *gs_make_candev(unsigned int channel,
+ struct usb_interface *intf,
+ struct gs_device_config *dconf)
{
struct gs_can *dev;
struct net_device *netdev;
@@ -809,10 +882,14 @@ static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface
if (bt_const->feature & GS_CAN_FEATURE_ONE_SHOT)
dev->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
- kfree(bt_const);
-
SET_NETDEV_DEV(netdev, &intf->dev);
+ if (dconf->sw_version > 1)
+ if (bt_const->feature & GS_CAN_FEATURE_IDENTIFY)
+ netdev->ethtool_ops = &gs_usb_ethtool_ops;
+
+ kfree(bt_const);
+
rc = register_candev(dev->netdev);
if (rc) {
free_candev(dev->netdev);
@@ -830,19 +907,16 @@ static void gs_destroy_candev(struct gs_can *dev)
free_candev(dev->netdev);
}
-static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
+static int gs_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
{
struct gs_usb *dev;
int rc = -ENOMEM;
unsigned int icount, i;
- struct gs_host_config *hconf;
- struct gs_device_config *dconf;
-
- hconf = kmalloc(sizeof(*hconf), GFP_KERNEL);
- if (!hconf)
- return -ENOMEM;
-
- hconf->byte_order = 0x0000beef;
+ struct gs_host_config hconf = {
+ .byte_order = 0x0000beef,
+ };
+ struct gs_device_config dconf;
/* send host config */
rc = usb_control_msg(interface_to_usbdev(intf),
@@ -851,22 +925,16 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *
USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
1,
intf->altsetting[0].desc.bInterfaceNumber,
- hconf,
- sizeof(*hconf),
+ &hconf,
+ sizeof(hconf),
1000);
- kfree(hconf);
-
if (rc < 0) {
dev_err(&intf->dev, "Couldn't send data format (err=%d)\n",
rc);
return rc;
}
- dconf = kmalloc(sizeof(*dconf), GFP_KERNEL);
- if (!dconf)
- return -ENOMEM;
-
/* read device config */
rc = usb_control_msg(interface_to_usbdev(intf),
usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
@@ -874,22 +942,16 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *
USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
1,
intf->altsetting[0].desc.bInterfaceNumber,
- dconf,
- sizeof(*dconf),
+ &dconf,
+ sizeof(dconf),
1000);
if (rc < 0) {
dev_err(&intf->dev, "Couldn't get device config: (err=%d)\n",
rc);
-
- kfree(dconf);
-
return rc;
}
- icount = dconf->icount+1;
-
- kfree(dconf);
-
+ icount = dconf.icount + 1;
dev_info(&intf->dev, "Configuring for %d interfaces\n", icount);
if (icount > GS_MAX_INTF) {
@@ -910,7 +972,7 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *
dev->udev = interface_to_usbdev(intf);
for (i = 0; i < icount; i++) {
- dev->canch[i] = gs_make_candev(i, intf);
+ dev->canch[i] = gs_make_candev(i, intf, &dconf);
if (IS_ERR_OR_NULL(dev->canch[i])) {
/* save error code to return later */
rc = PTR_ERR(dev->canch[i]);
@@ -952,6 +1014,8 @@ static void gs_usb_disconnect(struct usb_interface *intf)
static const struct usb_device_id gs_usb_table[] = {
{ USB_DEVICE_INTERFACE_NUMBER(USB_GSUSB_1_VENDOR_ID,
USB_GSUSB_1_PRODUCT_ID, 0) },
+ { USB_DEVICE_INTERFACE_NUMBER(USB_CANDLELIGHT_VENDOR_ID,
+ USB_CANDLELIGHT_PRODUCT_ID, 0) },
{} /* Terminating entry */
};
@@ -969,5 +1033,6 @@ module_usb_driver(gs_usb_driver);
MODULE_AUTHOR("Maximilian Schneider <mws@schneidersoft.net>");
MODULE_DESCRIPTION(
"Socket CAN device driver for Geschwister Schneider Technologie-, "
-"Entwicklungs- und Vertriebs UG. USB2.0 to CAN interfaces.");
+"Entwicklungs- und Vertriebs UG. USB2.0 to CAN interfaces\n"
+"and bytewerk.org candleLight USB CAN interfaces.");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 022bfa13ebfa..6f1f3b675ff5 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -59,11 +59,14 @@
#define USB_CAN_R_PRODUCT_ID 39
#define USB_LEAF_LITE_V2_PRODUCT_ID 288
#define USB_MINI_PCIE_HS_PRODUCT_ID 289
+#define USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID 290
+#define USB_USBCAN_LIGHT_2HS_PRODUCT_ID 291
+#define USB_MINI_PCIE_2HS_PRODUCT_ID 292
static inline bool kvaser_is_leaf(const struct usb_device_id *id)
{
return id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID &&
- id->idProduct <= USB_MINI_PCIE_HS_PRODUCT_ID;
+ id->idProduct <= USB_MINI_PCIE_2HS_PRODUCT_ID;
}
/* Kvaser USBCan-II devices */
@@ -537,6 +540,9 @@ static const struct usb_device_id kvaser_usb_table[] = {
.driver_info = KVASER_HAS_TXRX_ERRORS },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID) },
/* USBCANII family IDs */
{ USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID),
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 200663c43ce9..8f4544394f44 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -9,14 +9,6 @@ config NET_DSA_MV88E6060
This enables support for the Marvell 88E6060 ethernet switch
chip.
-config NET_DSA_MV88E6XXX
- tristate "Marvell 88E6xxx Ethernet switch chip support"
- depends on NET_DSA
- select NET_DSA_TAG_EDSA
- ---help---
- This enables support for most of the Marvell 88E6xxx models of
- Ethernet switch chips, except 88E6060.
-
config NET_DSA_BCM_SF2
tristate "Broadcom Starfighter 2 Ethernet switch support"
depends on HAS_IOMEM && NET_DSA
@@ -28,4 +20,8 @@ config NET_DSA_BCM_SF2
This enables support for the Broadcom Starfighter 2 Ethernet
switch chips.
+source "drivers/net/dsa/b53/Kconfig"
+
+source "drivers/net/dsa/mv88e6xxx/Kconfig"
+
endmenu
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index 76b751dd9efd..ca1e71b853a6 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -1,3 +1,5 @@
obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
-obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx.o
obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm_sf2.o
+
+obj-y += b53/
+obj-y += mv88e6xxx/
diff --git a/drivers/net/dsa/b53/Kconfig b/drivers/net/dsa/b53/Kconfig
new file mode 100644
index 000000000000..27f32a50df57
--- /dev/null
+++ b/drivers/net/dsa/b53/Kconfig
@@ -0,0 +1,33 @@
+menuconfig B53
+ tristate "Broadcom BCM53xx managed switch support"
+ depends on NET_DSA
+ help
+ This driver adds support for Broadcom managed switch chips. It supports
+ BCM5325E, BCM5365, BCM539x, BCM53115 and BCM53125 as well as BCM63XX
+ integrated switches.
+
+config B53_SPI_DRIVER
+ tristate "B53 SPI connected switch driver"
+ depends on B53 && SPI
+ help
+ Select to enable support for registering switches configured through SPI.
+
+config B53_MDIO_DRIVER
+ tristate "B53 MDIO connected switch driver"
+ depends on B53
+ help
+ Select to enable support for registering switches configured through MDIO.
+
+config B53_MMAP_DRIVER
+ tristate "B53 MMAP connected switch driver"
+ depends on B53 && HAS_IOMEM
+ help
+ Select to enable support for memory-mapped switches like the BCM63XX
+ integrated switches.
+
+config B53_SRAB_DRIVER
+ tristate "B53 SRAB connected switch driver"
+ depends on B53 && HAS_IOMEM
+ help
+ Select to enable support for memory-mapped Switch Register Access
+ Bridge Registers (SRAB) like it is found on the BCM53010
diff --git a/drivers/net/dsa/b53/Makefile b/drivers/net/dsa/b53/Makefile
new file mode 100644
index 000000000000..7e6f9a8bfd75
--- /dev/null
+++ b/drivers/net/dsa/b53/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_B53) += b53_common.o
+
+obj-$(CONFIG_B53_SPI_DRIVER) += b53_spi.o
+obj-$(CONFIG_B53_MDIO_DRIVER) += b53_mdio.o
+obj-$(CONFIG_B53_MMAP_DRIVER) += b53_mmap.o
+obj-$(CONFIG_B53_SRAB_DRIVER) += b53_srab.o
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
new file mode 100644
index 000000000000..444de66667b9
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -0,0 +1,1787 @@
+/*
+ * B53 switch driver main logic
+ *
+ * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org>
+ * Copyright (C) 2016 Florian Fainelli <f.fainelli@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_data/b53.h>
+#include <linux/phy.h>
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <net/dsa.h>
+#include <net/switchdev.h>
+
+#include "b53_regs.h"
+#include "b53_priv.h"
+
+struct b53_mib_desc {
+ u8 size;
+ u8 offset;
+ const char *name;
+};
+
+/* BCM5365 MIB counters */
+static const struct b53_mib_desc b53_mibs_65[] = {
+ { 8, 0x00, "TxOctets" },
+ { 4, 0x08, "TxDropPkts" },
+ { 4, 0x10, "TxBroadcastPkts" },
+ { 4, 0x14, "TxMulticastPkts" },
+ { 4, 0x18, "TxUnicastPkts" },
+ { 4, 0x1c, "TxCollisions" },
+ { 4, 0x20, "TxSingleCollision" },
+ { 4, 0x24, "TxMultipleCollision" },
+ { 4, 0x28, "TxDeferredTransmit" },
+ { 4, 0x2c, "TxLateCollision" },
+ { 4, 0x30, "TxExcessiveCollision" },
+ { 4, 0x38, "TxPausePkts" },
+ { 8, 0x44, "RxOctets" },
+ { 4, 0x4c, "RxUndersizePkts" },
+ { 4, 0x50, "RxPausePkts" },
+ { 4, 0x54, "Pkts64Octets" },
+ { 4, 0x58, "Pkts65to127Octets" },
+ { 4, 0x5c, "Pkts128to255Octets" },
+ { 4, 0x60, "Pkts256to511Octets" },
+ { 4, 0x64, "Pkts512to1023Octets" },
+ { 4, 0x68, "Pkts1024to1522Octets" },
+ { 4, 0x6c, "RxOversizePkts" },
+ { 4, 0x70, "RxJabbers" },
+ { 4, 0x74, "RxAlignmentErrors" },
+ { 4, 0x78, "RxFCSErrors" },
+ { 8, 0x7c, "RxGoodOctets" },
+ { 4, 0x84, "RxDropPkts" },
+ { 4, 0x88, "RxUnicastPkts" },
+ { 4, 0x8c, "RxMulticastPkts" },
+ { 4, 0x90, "RxBroadcastPkts" },
+ { 4, 0x94, "RxSAChanges" },
+ { 4, 0x98, "RxFragments" },
+};
+
+#define B53_MIBS_65_SIZE ARRAY_SIZE(b53_mibs_65)
+
+/* BCM63xx MIB counters */
+static const struct b53_mib_desc b53_mibs_63xx[] = {
+ { 8, 0x00, "TxOctets" },
+ { 4, 0x08, "TxDropPkts" },
+ { 4, 0x0c, "TxQoSPkts" },
+ { 4, 0x10, "TxBroadcastPkts" },
+ { 4, 0x14, "TxMulticastPkts" },
+ { 4, 0x18, "TxUnicastPkts" },
+ { 4, 0x1c, "TxCollisions" },
+ { 4, 0x20, "TxSingleCollision" },
+ { 4, 0x24, "TxMultipleCollision" },
+ { 4, 0x28, "TxDeferredTransmit" },
+ { 4, 0x2c, "TxLateCollision" },
+ { 4, 0x30, "TxExcessiveCollision" },
+ { 4, 0x38, "TxPausePkts" },
+ { 8, 0x3c, "TxQoSOctets" },
+ { 8, 0x44, "RxOctets" },
+ { 4, 0x4c, "RxUndersizePkts" },
+ { 4, 0x50, "RxPausePkts" },
+ { 4, 0x54, "Pkts64Octets" },
+ { 4, 0x58, "Pkts65to127Octets" },
+ { 4, 0x5c, "Pkts128to255Octets" },
+ { 4, 0x60, "Pkts256to511Octets" },
+ { 4, 0x64, "Pkts512to1023Octets" },
+ { 4, 0x68, "Pkts1024to1522Octets" },
+ { 4, 0x6c, "RxOversizePkts" },
+ { 4, 0x70, "RxJabbers" },
+ { 4, 0x74, "RxAlignmentErrors" },
+ { 4, 0x78, "RxFCSErrors" },
+ { 8, 0x7c, "RxGoodOctets" },
+ { 4, 0x84, "RxDropPkts" },
+ { 4, 0x88, "RxUnicastPkts" },
+ { 4, 0x8c, "RxMulticastPkts" },
+ { 4, 0x90, "RxBroadcastPkts" },
+ { 4, 0x94, "RxSAChanges" },
+ { 4, 0x98, "RxFragments" },
+ { 4, 0xa0, "RxSymbolErrors" },
+ { 4, 0xa4, "RxQoSPkts" },
+ { 8, 0xa8, "RxQoSOctets" },
+ { 4, 0xb0, "Pkts1523to2047Octets" },
+ { 4, 0xb4, "Pkts2048to4095Octets" },
+ { 4, 0xb8, "Pkts4096to8191Octets" },
+ { 4, 0xbc, "Pkts8192to9728Octets" },
+ { 4, 0xc0, "RxDiscarded" },
+};
+
+#define B53_MIBS_63XX_SIZE ARRAY_SIZE(b53_mibs_63xx)
+
+/* MIB counters */
+static const struct b53_mib_desc b53_mibs[] = {
+ { 8, 0x00, "TxOctets" },
+ { 4, 0x08, "TxDropPkts" },
+ { 4, 0x10, "TxBroadcastPkts" },
+ { 4, 0x14, "TxMulticastPkts" },
+ { 4, 0x18, "TxUnicastPkts" },
+ { 4, 0x1c, "TxCollisions" },
+ { 4, 0x20, "TxSingleCollision" },
+ { 4, 0x24, "TxMultipleCollision" },
+ { 4, 0x28, "TxDeferredTransmit" },
+ { 4, 0x2c, "TxLateCollision" },
+ { 4, 0x30, "TxExcessiveCollision" },
+ { 4, 0x38, "TxPausePkts" },
+ { 8, 0x50, "RxOctets" },
+ { 4, 0x58, "RxUndersizePkts" },
+ { 4, 0x5c, "RxPausePkts" },
+ { 4, 0x60, "Pkts64Octets" },
+ { 4, 0x64, "Pkts65to127Octets" },
+ { 4, 0x68, "Pkts128to255Octets" },
+ { 4, 0x6c, "Pkts256to511Octets" },
+ { 4, 0x70, "Pkts512to1023Octets" },
+ { 4, 0x74, "Pkts1024to1522Octets" },
+ { 4, 0x78, "RxOversizePkts" },
+ { 4, 0x7c, "RxJabbers" },
+ { 4, 0x80, "RxAlignmentErrors" },
+ { 4, 0x84, "RxFCSErrors" },
+ { 8, 0x88, "RxGoodOctets" },
+ { 4, 0x90, "RxDropPkts" },
+ { 4, 0x94, "RxUnicastPkts" },
+ { 4, 0x98, "RxMulticastPkts" },
+ { 4, 0x9c, "RxBroadcastPkts" },
+ { 4, 0xa0, "RxSAChanges" },
+ { 4, 0xa4, "RxFragments" },
+ { 4, 0xa8, "RxJumboPkts" },
+ { 4, 0xac, "RxSymbolErrors" },
+ { 4, 0xc0, "RxDiscarded" },
+};
+
+#define B53_MIBS_SIZE ARRAY_SIZE(b53_mibs)
+
+static int b53_do_vlan_op(struct b53_device *dev, u8 op)
+{
+ unsigned int i;
+
+ b53_write8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], VTA_START_CMD | op);
+
+ for (i = 0; i < 10; i++) {
+ u8 vta;
+
+ b53_read8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], &vta);
+ if (!(vta & VTA_START_CMD))
+ return 0;
+
+ usleep_range(100, 200);
+ }
+
+ return -EIO;
+}
+
+static void b53_set_vlan_entry(struct b53_device *dev, u16 vid,
+ struct b53_vlan *vlan)
+{
+ if (is5325(dev)) {
+ u32 entry = 0;
+
+ if (vlan->members) {
+ entry = ((vlan->untag & VA_UNTAG_MASK_25) <<
+ VA_UNTAG_S_25) | vlan->members;
+ if (dev->core_rev >= 3)
+ entry |= VA_VALID_25_R4 | vid << VA_VID_HIGH_S;
+ else
+ entry |= VA_VALID_25;
+ }
+
+ b53_write32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, entry);
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid |
+ VTA_RW_STATE_WR | VTA_RW_OP_EN);
+ } else if (is5365(dev)) {
+ u16 entry = 0;
+
+ if (vlan->members)
+ entry = ((vlan->untag & VA_UNTAG_MASK_65) <<
+ VA_UNTAG_S_65) | vlan->members | VA_VALID_65;
+
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, entry);
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid |
+ VTA_RW_STATE_WR | VTA_RW_OP_EN);
+ } else {
+ b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid);
+ b53_write32(dev, B53_ARLIO_PAGE, dev->vta_regs[2],
+ (vlan->untag << VTE_UNTAG_S) | vlan->members);
+
+ b53_do_vlan_op(dev, VTA_CMD_WRITE);
+ }
+
+ dev_dbg(dev->ds->dev, "VID: %d, members: 0x%04x, untag: 0x%04x\n",
+ vid, vlan->members, vlan->untag);
+}
+
+static void b53_get_vlan_entry(struct b53_device *dev, u16 vid,
+ struct b53_vlan *vlan)
+{
+ if (is5325(dev)) {
+ u32 entry = 0;
+
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid |
+ VTA_RW_STATE_RD | VTA_RW_OP_EN);
+ b53_read32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, &entry);
+
+ if (dev->core_rev >= 3)
+ vlan->valid = !!(entry & VA_VALID_25_R4);
+ else
+ vlan->valid = !!(entry & VA_VALID_25);
+ vlan->members = entry & VA_MEMBER_MASK;
+ vlan->untag = (entry >> VA_UNTAG_S_25) & VA_UNTAG_MASK_25;
+
+ } else if (is5365(dev)) {
+ u16 entry = 0;
+
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid |
+ VTA_RW_STATE_WR | VTA_RW_OP_EN);
+ b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, &entry);
+
+ vlan->valid = !!(entry & VA_VALID_65);
+ vlan->members = entry & VA_MEMBER_MASK;
+ vlan->untag = (entry >> VA_UNTAG_S_65) & VA_UNTAG_MASK_65;
+ } else {
+ u32 entry = 0;
+
+ b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid);
+ b53_do_vlan_op(dev, VTA_CMD_READ);
+ b53_read32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], &entry);
+ vlan->members = entry & VTE_MEMBERS;
+ vlan->untag = (entry >> VTE_UNTAG_S) & VTE_MEMBERS;
+ vlan->valid = true;
+ }
+}
+
+static void b53_set_forwarding(struct b53_device *dev, int enable)
+{
+ u8 mgmt;
+
+ b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
+
+ if (enable)
+ mgmt |= SM_SW_FWD_EN;
+ else
+ mgmt &= ~SM_SW_FWD_EN;
+
+ b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
+}
+
+static void b53_enable_vlan(struct b53_device *dev, bool enable)
+{
+ u8 mgmt, vc0, vc1, vc4 = 0, vc5;
+
+ b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, &vc0);
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, &vc1);
+
+ if (is5325(dev) || is5365(dev)) {
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4);
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, &vc5);
+ } else if (is63xx(dev)) {
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, &vc4);
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, &vc5);
+ } else {
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, &vc4);
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5);
+ }
+
+ mgmt &= ~SM_SW_FWD_MODE;
+
+ if (enable) {
+ vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID;
+ vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN;
+ vc4 &= ~VC4_ING_VID_CHECK_MASK;
+ vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
+ vc5 |= VC5_DROP_VTABLE_MISS;
+
+ if (is5325(dev))
+ vc0 &= ~VC0_RESERVED_1;
+
+ if (is5325(dev) || is5365(dev))
+ vc1 |= VC1_RX_MCST_TAG_EN;
+
+ } else {
+ vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID);
+ vc1 &= ~(VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN);
+ vc4 &= ~VC4_ING_VID_CHECK_MASK;
+ vc5 &= ~VC5_DROP_VTABLE_MISS;
+
+ if (is5325(dev) || is5365(dev))
+ vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
+ else
+ vc4 |= VC4_ING_VID_VIO_TO_IMP << VC4_ING_VID_CHECK_S;
+
+ if (is5325(dev) || is5365(dev))
+ vc1 &= ~VC1_RX_MCST_TAG_EN;
+ }
+
+ if (!is5325(dev) && !is5365(dev))
+ vc5 &= ~VC5_VID_FFF_EN;
+
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, vc0);
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, vc1);
+
+ if (is5325(dev) || is5365(dev)) {
+ /* enable the high 8 bit vid check on 5325 */
+ if (is5325(dev) && enable)
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3,
+ VC3_HIGH_8BIT_EN);
+ else
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0);
+
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, vc4);
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, vc5);
+ } else if (is63xx(dev)) {
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3_63XX, 0);
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, vc4);
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, vc5);
+ } else {
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0);
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, vc4);
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, vc5);
+ }
+
+ b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
+}
+
+static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100)
+{
+ u32 port_mask = 0;
+ u16 max_size = JMS_MIN_SIZE;
+
+ if (is5325(dev) || is5365(dev))
+ return -EINVAL;
+
+ if (enable) {
+ port_mask = dev->enabled_ports;
+ max_size = JMS_MAX_SIZE;
+ if (allow_10_100)
+ port_mask |= JPM_10_100_JUMBO_EN;
+ }
+
+ b53_write32(dev, B53_JUMBO_PAGE, dev->jumbo_pm_reg, port_mask);
+ return b53_write16(dev, B53_JUMBO_PAGE, dev->jumbo_size_reg, max_size);
+}
+
+static int b53_flush_arl(struct b53_device *dev, u8 mask)
+{
+ unsigned int i;
+
+ b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL,
+ FAST_AGE_DONE | FAST_AGE_DYNAMIC | mask);
+
+ for (i = 0; i < 10; i++) {
+ u8 fast_age_ctrl;
+
+ b53_read8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL,
+ &fast_age_ctrl);
+
+ if (!(fast_age_ctrl & FAST_AGE_DONE))
+ goto out;
+
+ msleep(1);
+ }
+
+ return -ETIMEDOUT;
+out:
+ /* Only age dynamic entries (default behavior) */
+ b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, FAST_AGE_DYNAMIC);
+ return 0;
+}
+
+static int b53_fast_age_port(struct b53_device *dev, int port)
+{
+ b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_PORT_CTRL, port);
+
+ return b53_flush_arl(dev, FAST_AGE_PORT);
+}
+
+static int b53_fast_age_vlan(struct b53_device *dev, u16 vid)
+{
+ b53_write16(dev, B53_CTRL_PAGE, B53_FAST_AGE_VID_CTRL, vid);
+
+ return b53_flush_arl(dev, FAST_AGE_VLAN);
+}
+
+static void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ unsigned int i;
+ u16 pvlan;
+
+ /* Enable the IMP port to be in the same VLAN as the other ports
+ * on a per-port basis such that we only have Port i and IMP in
+ * the same VLAN.
+ */
+ b53_for_each_port(dev, i) {
+ b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &pvlan);
+ pvlan |= BIT(cpu_port);
+ b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), pvlan);
+ }
+}
+
+static int b53_enable_port(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ unsigned int cpu_port = dev->cpu_port;
+ u16 pvlan;
+
+ /* Clear the Rx and Tx disable bits and set to no spanning tree */
+ b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), 0);
+
+ /* Set this port, and only this one to be in the default VLAN,
+ * if member of a bridge, restore its membership prior to
+ * bringing down this port.
+ */
+ b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
+ pvlan &= ~0x1ff;
+ pvlan |= BIT(port);
+ pvlan |= dev->ports[port].vlan_ctl_mask;
+ b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
+
+ b53_imp_vlan_setup(ds, cpu_port);
+
+ return 0;
+}
+
+static void b53_disable_port(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ u8 reg;
+
+ /* Disable Tx/Rx for the port */
+ b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), &reg);
+ reg |= PORT_CTRL_RX_DISABLE | PORT_CTRL_TX_DISABLE;
+ b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
+}
+
+static void b53_enable_cpu_port(struct b53_device *dev)
+{
+ unsigned int cpu_port = dev->cpu_port;
+ u8 port_ctrl;
+
+ /* BCM5325 CPU port is at 8 */
+ if ((is5325(dev) || is5365(dev)) && cpu_port == B53_CPU_PORT_25)
+ cpu_port = B53_CPU_PORT;
+
+ port_ctrl = PORT_CTRL_RX_BCST_EN |
+ PORT_CTRL_RX_MCST_EN |
+ PORT_CTRL_RX_UCST_EN;
+ b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(cpu_port), port_ctrl);
+}
+
+static void b53_enable_mib(struct b53_device *dev)
+{
+ u8 gc;
+
+ b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc);
+ gc &= ~(GC_RESET_MIB | GC_MIB_AC_EN);
+ b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc);
+}
+
+static int b53_configure_vlan(struct b53_device *dev)
+{
+ struct b53_vlan vl = { 0 };
+ int i;
+
+ /* clear all vlan entries */
+ if (is5325(dev) || is5365(dev)) {
+ for (i = 1; i < dev->num_vlans; i++)
+ b53_set_vlan_entry(dev, i, &vl);
+ } else {
+ b53_do_vlan_op(dev, VTA_CMD_CLEAR);
+ }
+
+ b53_enable_vlan(dev, false);
+
+ b53_for_each_port(dev, i)
+ b53_write16(dev, B53_VLAN_PAGE,
+ B53_VLAN_PORT_DEF_TAG(i), 1);
+
+ if (!is5325(dev) && !is5365(dev))
+ b53_set_jumbo(dev, dev->enable_jumbo, false);
+
+ return 0;
+}
+
+static void b53_switch_reset_gpio(struct b53_device *dev)
+{
+ int gpio = dev->reset_gpio;
+
+ if (gpio < 0)
+ return;
+
+ /* Reset sequence: RESET low(50ms)->high(20ms)
+ */
+ gpio_set_value(gpio, 0);
+ mdelay(50);
+
+ gpio_set_value(gpio, 1);
+ mdelay(20);
+
+ dev->current_page = 0xff;
+}
+
+static int b53_switch_reset(struct b53_device *dev)
+{
+ u8 mgmt;
+
+ b53_switch_reset_gpio(dev);
+
+ if (is539x(dev)) {
+ b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x83);
+ b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x00);
+ }
+
+ b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
+
+ if (!(mgmt & SM_SW_FWD_EN)) {
+ mgmt &= ~SM_SW_FWD_MODE;
+ mgmt |= SM_SW_FWD_EN;
+
+ b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
+ b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
+
+ if (!(mgmt & SM_SW_FWD_EN)) {
+ dev_err(dev->dev, "Failed to enable switch!\n");
+ return -EINVAL;
+ }
+ }
+
+ b53_enable_mib(dev);
+
+ return b53_flush_arl(dev, FAST_AGE_STATIC);
+}
+
+static int b53_phy_read16(struct dsa_switch *ds, int addr, int reg)
+{
+ struct b53_device *priv = ds_to_priv(ds);
+ u16 value = 0;
+ int ret;
+
+ if (priv->ops->phy_read16)
+ ret = priv->ops->phy_read16(priv, addr, reg, &value);
+ else
+ ret = b53_read16(priv, B53_PORT_MII_PAGE(addr),
+ reg * 2, &value);
+
+ return ret ? ret : value;
+}
+
+static int b53_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
+{
+ struct b53_device *priv = ds_to_priv(ds);
+
+ if (priv->ops->phy_write16)
+ return priv->ops->phy_write16(priv, addr, reg, val);
+
+ return b53_write16(priv, B53_PORT_MII_PAGE(addr), reg * 2, val);
+}
+
+static int b53_reset_switch(struct b53_device *priv)
+{
+ /* reset vlans */
+ priv->enable_jumbo = false;
+
+ memset(priv->vlans, 0, sizeof(*priv->vlans) * priv->num_vlans);
+ memset(priv->ports, 0, sizeof(*priv->ports) * priv->num_ports);
+
+ return b53_switch_reset(priv);
+}
+
+static int b53_apply_config(struct b53_device *priv)
+{
+ /* disable switching */
+ b53_set_forwarding(priv, 0);
+
+ b53_configure_vlan(priv);
+
+ /* enable switching */
+ b53_set_forwarding(priv, 1);
+
+ return 0;
+}
+
+static void b53_reset_mib(struct b53_device *priv)
+{
+ u8 gc;
+
+ b53_read8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc);
+
+ b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc | GC_RESET_MIB);
+ msleep(1);
+ b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc & ~GC_RESET_MIB);
+ msleep(1);
+}
+
+static const struct b53_mib_desc *b53_get_mib(struct b53_device *dev)
+{
+ if (is5365(dev))
+ return b53_mibs_65;
+ else if (is63xx(dev))
+ return b53_mibs_63xx;
+ else
+ return b53_mibs;
+}
+
+static unsigned int b53_get_mib_size(struct b53_device *dev)
+{
+ if (is5365(dev))
+ return B53_MIBS_65_SIZE;
+ else if (is63xx(dev))
+ return B53_MIBS_63XX_SIZE;
+ else
+ return B53_MIBS_SIZE;
+}
+
+static void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ const struct b53_mib_desc *mibs = b53_get_mib(dev);
+ unsigned int mib_size = b53_get_mib_size(dev);
+ unsigned int i;
+
+ for (i = 0; i < mib_size; i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ mibs[i].name, ETH_GSTRING_LEN);
+}
+
+static void b53_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ const struct b53_mib_desc *mibs = b53_get_mib(dev);
+ unsigned int mib_size = b53_get_mib_size(dev);
+ const struct b53_mib_desc *s;
+ unsigned int i;
+ u64 val = 0;
+
+ if (is5365(dev) && port == 5)
+ port = 8;
+
+ mutex_lock(&dev->stats_mutex);
+
+ for (i = 0; i < mib_size; i++) {
+ s = &mibs[i];
+
+ if (s->size == 8) {
+ b53_read64(dev, B53_MIB_PAGE(port), s->offset, &val);
+ } else {
+ u32 val32;
+
+ b53_read32(dev, B53_MIB_PAGE(port), s->offset,
+ &val32);
+ val = val32;
+ }
+ data[i] = (u64)val;
+ }
+
+ mutex_unlock(&dev->stats_mutex);
+}
+
+static int b53_get_sset_count(struct dsa_switch *ds)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+
+ return b53_get_mib_size(dev);
+}
+
+static int b53_set_addr(struct dsa_switch *ds, u8 *addr)
+{
+ return 0;
+}
+
+static int b53_setup(struct dsa_switch *ds)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ unsigned int port;
+ int ret;
+
+ ret = b53_reset_switch(dev);
+ if (ret) {
+ dev_err(ds->dev, "failed to reset switch\n");
+ return ret;
+ }
+
+ b53_reset_mib(dev);
+
+ ret = b53_apply_config(dev);
+ if (ret)
+ dev_err(ds->dev, "failed to apply configuration\n");
+
+ for (port = 0; port < dev->num_ports; port++) {
+ if (BIT(port) & ds->enabled_port_mask)
+ b53_enable_port(ds, port, NULL);
+ else if (dsa_is_cpu_port(ds, port))
+ b53_enable_cpu_port(dev);
+ else
+ b53_disable_port(ds, port, NULL);
+ }
+
+ return ret;
+}
+
+static void b53_adjust_link(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ u8 rgmii_ctrl = 0, reg = 0, off;
+
+ if (!phy_is_pseudo_fixed_link(phydev))
+ return;
+
+ /* Override the port settings */
+ if (port == dev->cpu_port) {
+ off = B53_PORT_OVERRIDE_CTRL;
+ reg = PORT_OVERRIDE_EN;
+ } else {
+ off = B53_GMII_PORT_OVERRIDE_CTRL(port);
+ reg = GMII_PO_EN;
+ }
+
+ /* Set the link UP */
+ if (phydev->link)
+ reg |= PORT_OVERRIDE_LINK;
+
+ if (phydev->duplex == DUPLEX_FULL)
+ reg |= PORT_OVERRIDE_FULL_DUPLEX;
+
+ switch (phydev->speed) {
+ case 2000:
+ reg |= PORT_OVERRIDE_SPEED_2000M;
+ /* fallthrough */
+ case SPEED_1000:
+ reg |= PORT_OVERRIDE_SPEED_1000M;
+ break;
+ case SPEED_100:
+ reg |= PORT_OVERRIDE_SPEED_100M;
+ break;
+ case SPEED_10:
+ reg |= PORT_OVERRIDE_SPEED_10M;
+ break;
+ default:
+ dev_err(ds->dev, "unknown speed: %d\n", phydev->speed);
+ return;
+ }
+
+ /* Enable flow control on BCM5301x's CPU port */
+ if (is5301x(dev) && port == dev->cpu_port)
+ reg |= PORT_OVERRIDE_RX_FLOW | PORT_OVERRIDE_TX_FLOW;
+
+ if (phydev->pause) {
+ if (phydev->asym_pause)
+ reg |= PORT_OVERRIDE_TX_FLOW;
+ reg |= PORT_OVERRIDE_RX_FLOW;
+ }
+
+ b53_write8(dev, B53_CTRL_PAGE, off, reg);
+
+ if (is531x5(dev) && phy_interface_is_rgmii(phydev)) {
+ if (port == 8)
+ off = B53_RGMII_CTRL_IMP;
+ else
+ off = B53_RGMII_CTRL_P(port);
+
+ /* Configure the port RGMII clock delay by DLL disabled and
+ * tx_clk aligned timing (restoring to reset defaults)
+ */
+ b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl);
+ rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC |
+ RGMII_CTRL_TIMING_SEL);
+
+ /* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make
+ * sure that we enable the port TX clock internal delay to
+ * account for this internal delay that is inserted, otherwise
+ * the switch won't be able to receive correctly.
+ *
+ * PHY_INTERFACE_MODE_RGMII means that we are not introducing
+ * any delay neither on transmission nor reception, so the
+ * BCM53125 must also be configured accordingly to account for
+ * the lack of delay and introduce
+ *
+ * The BCM53125 switch has its RX clock and TX clock control
+ * swapped, hence the reason why we modify the TX clock path in
+ * the "RGMII" case
+ */
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ rgmii_ctrl |= RGMII_CTRL_DLL_TXC;
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
+ rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC;
+ rgmii_ctrl |= RGMII_CTRL_TIMING_SEL;
+ b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl);
+
+ dev_info(ds->dev, "Configured port %d for %s\n", port,
+ phy_modes(phydev->interface));
+ }
+
+ /* configure MII port if necessary */
+ if (is5325(dev)) {
+ b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
+ &reg);
+
+ /* reverse mii needs to be enabled */
+ if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
+ b53_write8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
+ reg | PORT_OVERRIDE_RV_MII_25);
+ b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
+ &reg);
+
+ if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
+ dev_err(ds->dev,
+ "Failed to enable reverse MII mode\n");
+ return;
+ }
+ }
+ } else if (is5301x(dev)) {
+ if (port != dev->cpu_port) {
+ u8 po_reg = B53_GMII_PORT_OVERRIDE_CTRL(dev->cpu_port);
+ u8 gmii_po;
+
+ b53_read8(dev, B53_CTRL_PAGE, po_reg, &gmii_po);
+ gmii_po |= GMII_PO_LINK |
+ GMII_PO_RX_FLOW |
+ GMII_PO_TX_FLOW |
+ GMII_PO_EN |
+ GMII_PO_SPEED_2000M;
+ b53_write8(dev, B53_CTRL_PAGE, po_reg, gmii_po);
+ }
+ }
+}
+
+static int b53_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering)
+{
+ return 0;
+}
+
+static int b53_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+
+ if ((is5325(dev) || is5365(dev)) && vlan->vid_begin == 0)
+ return -EOPNOTSUPP;
+
+ if (vlan->vid_end > dev->num_vlans)
+ return -ERANGE;
+
+ b53_enable_vlan(dev, true);
+
+ return 0;
+}
+
+static void b53_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ unsigned int cpu_port = dev->cpu_port;
+ struct b53_vlan *vl;
+ u16 vid;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+ vl = &dev->vlans[vid];
+
+ b53_get_vlan_entry(dev, vid, vl);
+
+ vl->members |= BIT(port) | BIT(cpu_port);
+ if (untagged)
+ vl->untag |= BIT(port) | BIT(cpu_port);
+ else
+ vl->untag &= ~(BIT(port) | BIT(cpu_port));
+
+ b53_set_vlan_entry(dev, vid, vl);
+ b53_fast_age_vlan(dev, vid);
+ }
+
+ if (pvid) {
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
+ vlan->vid_end);
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port),
+ vlan->vid_end);
+ b53_fast_age_vlan(dev, vid);
+ }
+}
+
+static int b53_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ unsigned int cpu_port = dev->cpu_port;
+ struct b53_vlan *vl;
+ u16 vid;
+ u16 pvid;
+
+ b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+ vl = &dev->vlans[vid];
+
+ b53_get_vlan_entry(dev, vid, vl);
+
+ vl->members &= ~BIT(port);
+ if ((vl->members & BIT(cpu_port)) == BIT(cpu_port))
+ vl->members = 0;
+
+ if (pvid == vid) {
+ if (is5325(dev) || is5365(dev))
+ pvid = 1;
+ else
+ pvid = 0;
+ }
+
+ if (untagged) {
+ vl->untag &= ~(BIT(port));
+ if ((vl->untag & BIT(cpu_port)) == BIT(cpu_port))
+ vl->untag = 0;
+ }
+
+ b53_set_vlan_entry(dev, vid, vl);
+ b53_fast_age_vlan(dev, vid);
+ }
+
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid);
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port), pvid);
+ b53_fast_age_vlan(dev, pvid);
+
+ return 0;
+}
+
+static int b53_vlan_dump(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_vlan *vlan,
+ int (*cb)(struct switchdev_obj *obj))
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ u16 vid, vid_start = 0, pvid;
+ struct b53_vlan *vl;
+ int err = 0;
+
+ if (is5325(dev) || is5365(dev))
+ vid_start = 1;
+
+ b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
+
+ /* Use our software cache for dumps, since we do not have any HW
+ * operation returning only the used/valid VLANs
+ */
+ for (vid = vid_start; vid < dev->num_vlans; vid++) {
+ vl = &dev->vlans[vid];
+
+ if (!vl->valid)
+ continue;
+
+ if (!(vl->members & BIT(port)))
+ continue;
+
+ vlan->vid_begin = vlan->vid_end = vid;
+ vlan->flags = 0;
+
+ if (vl->untag & BIT(port))
+ vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
+ if (pvid == vid)
+ vlan->flags |= BRIDGE_VLAN_INFO_PVID;
+
+ err = cb(&vlan->obj);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+/* Address Resolution Logic routines */
+static int b53_arl_op_wait(struct b53_device *dev)
+{
+ unsigned int timeout = 10;
+ u8 reg;
+
+ do {
+ b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, &reg);
+ if (!(reg & ARLTBL_START_DONE))
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while (timeout--);
+
+ dev_warn(dev->dev, "timeout waiting for ARL to finish: 0x%02x\n", reg);
+
+ return -ETIMEDOUT;
+}
+
+static int b53_arl_rw_op(struct b53_device *dev, unsigned int op)
+{
+ u8 reg;
+
+ if (op > ARLTBL_RW)
+ return -EINVAL;
+
+ b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, &reg);
+ reg |= ARLTBL_START_DONE;
+ if (op)
+ reg |= ARLTBL_RW;
+ else
+ reg &= ~ARLTBL_RW;
+ b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, reg);
+
+ return b53_arl_op_wait(dev);
+}
+
+static int b53_arl_read(struct b53_device *dev, u64 mac,
+ u16 vid, struct b53_arl_entry *ent, u8 *idx,
+ bool is_valid)
+{
+ unsigned int i;
+ int ret;
+
+ ret = b53_arl_op_wait(dev);
+ if (ret)
+ return ret;
+
+ /* Read the bins */
+ for (i = 0; i < dev->num_arl_entries; i++) {
+ u64 mac_vid;
+ u32 fwd_entry;
+
+ b53_read64(dev, B53_ARLIO_PAGE,
+ B53_ARLTBL_MAC_VID_ENTRY(i), &mac_vid);
+ b53_read32(dev, B53_ARLIO_PAGE,
+ B53_ARLTBL_DATA_ENTRY(i), &fwd_entry);
+ b53_arl_to_entry(ent, mac_vid, fwd_entry);
+
+ if (!(fwd_entry & ARLTBL_VALID))
+ continue;
+ if ((mac_vid & ARLTBL_MAC_MASK) != mac)
+ continue;
+ *idx = i;
+ }
+
+ return -ENOENT;
+}
+
+static int b53_arl_op(struct b53_device *dev, int op, int port,
+ const unsigned char *addr, u16 vid, bool is_valid)
+{
+ struct b53_arl_entry ent;
+ u32 fwd_entry;
+ u64 mac, mac_vid = 0;
+ u8 idx = 0;
+ int ret;
+
+ /* Convert the array into a 64-bit MAC */
+ mac = b53_mac_to_u64(addr);
+
+ /* Perform a read for the given MAC and VID */
+ b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac);
+ b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid);
+
+ /* Issue a read operation for this MAC */
+ ret = b53_arl_rw_op(dev, 1);
+ if (ret)
+ return ret;
+
+ ret = b53_arl_read(dev, mac, vid, &ent, &idx, is_valid);
+ /* If this is a read, just finish now */
+ if (op)
+ return ret;
+
+ /* We could not find a matching MAC, so reset to a new entry */
+ if (ret) {
+ fwd_entry = 0;
+ idx = 1;
+ }
+
+ memset(&ent, 0, sizeof(ent));
+ ent.port = port;
+ ent.is_valid = is_valid;
+ ent.vid = vid;
+ ent.is_static = true;
+ memcpy(ent.mac, addr, ETH_ALEN);
+ b53_arl_from_entry(&mac_vid, &fwd_entry, &ent);
+
+ b53_write64(dev, B53_ARLIO_PAGE,
+ B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid);
+ b53_write32(dev, B53_ARLIO_PAGE,
+ B53_ARLTBL_DATA_ENTRY(idx), fwd_entry);
+
+ return b53_arl_rw_op(dev, 0);
+}
+
+static int b53_fdb_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans)
+{
+ struct b53_device *priv = ds_to_priv(ds);
+
+ /* 5325 and 5365 require some more massaging, but could
+ * be supported eventually
+ */
+ if (is5325(priv) || is5365(priv))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static void b53_fdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans)
+{
+ struct b53_device *priv = ds_to_priv(ds);
+
+ if (b53_arl_op(priv, 0, port, fdb->addr, fdb->vid, true))
+ pr_err("%s: failed to add MAC address\n", __func__);
+}
+
+static int b53_fdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb)
+{
+ struct b53_device *priv = ds_to_priv(ds);
+
+ return b53_arl_op(priv, 0, port, fdb->addr, fdb->vid, false);
+}
+
+static int b53_arl_search_wait(struct b53_device *dev)
+{
+ unsigned int timeout = 1000;
+ u8 reg;
+
+ do {
+ b53_read8(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, &reg);
+ if (!(reg & ARL_SRCH_STDN))
+ return 0;
+
+ if (reg & ARL_SRCH_VLID)
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while (timeout--);
+
+ return -ETIMEDOUT;
+}
+
+static void b53_arl_search_rd(struct b53_device *dev, u8 idx,
+ struct b53_arl_entry *ent)
+{
+ u64 mac_vid;
+ u32 fwd_entry;
+
+ b53_read64(dev, B53_ARLIO_PAGE,
+ B53_ARL_SRCH_RSTL_MACVID(idx), &mac_vid);
+ b53_read32(dev, B53_ARLIO_PAGE,
+ B53_ARL_SRCH_RSTL(idx), &fwd_entry);
+ b53_arl_to_entry(ent, mac_vid, fwd_entry);
+}
+
+static int b53_fdb_copy(struct net_device *dev, int port,
+ const struct b53_arl_entry *ent,
+ struct switchdev_obj_port_fdb *fdb,
+ int (*cb)(struct switchdev_obj *obj))
+{
+ if (!ent->is_valid)
+ return 0;
+
+ if (port != ent->port)
+ return 0;
+
+ ether_addr_copy(fdb->addr, ent->mac);
+ fdb->vid = ent->vid;
+ fdb->ndm_state = ent->is_static ? NUD_NOARP : NUD_REACHABLE;
+
+ return cb(&fdb->obj);
+}
+
+static int b53_fdb_dump(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_fdb *fdb,
+ int (*cb)(struct switchdev_obj *obj))
+{
+ struct b53_device *priv = ds_to_priv(ds);
+ struct net_device *dev = ds->ports[port].netdev;
+ struct b53_arl_entry results[2];
+ unsigned int count = 0;
+ int ret;
+ u8 reg;
+
+ /* Start search operation */
+ reg = ARL_SRCH_STDN;
+ b53_write8(priv, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, reg);
+
+ do {
+ ret = b53_arl_search_wait(priv);
+ if (ret)
+ return ret;
+
+ b53_arl_search_rd(priv, 0, &results[0]);
+ ret = b53_fdb_copy(dev, port, &results[0], fdb, cb);
+ if (ret)
+ return ret;
+
+ if (priv->num_arl_entries > 2) {
+ b53_arl_search_rd(priv, 1, &results[1]);
+ ret = b53_fdb_copy(dev, port, &results[1], fdb, cb);
+ if (ret)
+ return ret;
+
+ if (!results[0].is_valid && !results[1].is_valid)
+ break;
+ }
+
+ } while (count++ < 1024);
+
+ return 0;
+}
+
+static int b53_br_join(struct dsa_switch *ds, int port,
+ struct net_device *bridge)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ u16 pvlan, reg;
+ unsigned int i;
+
+ dev->ports[port].bridge_dev = bridge;
+ b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
+
+ b53_for_each_port(dev, i) {
+ if (dev->ports[i].bridge_dev != bridge)
+ continue;
+
+ /* Add this local port to the remote port VLAN control
+ * membership and update the remote port bitmask
+ */
+ b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &reg);
+ reg |= BIT(port);
+ b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg);
+ dev->ports[i].vlan_ctl_mask = reg;
+
+ pvlan |= BIT(i);
+ }
+
+ /* Configure the local port VLAN control membership to include
+ * remote ports and update the local port bitmask
+ */
+ b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
+ dev->ports[port].vlan_ctl_mask = pvlan;
+
+ return 0;
+}
+
+static void b53_br_leave(struct dsa_switch *ds, int port)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ struct net_device *bridge = dev->ports[port].bridge_dev;
+ struct b53_vlan *vl = &dev->vlans[0];
+ unsigned int i;
+ u16 pvlan, reg, pvid;
+
+ b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
+
+ b53_for_each_port(dev, i) {
+ /* Don't touch the remaining ports */
+ if (dev->ports[i].bridge_dev != bridge)
+ continue;
+
+ b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &reg);
+ reg &= ~BIT(port);
+ b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg);
+ dev->ports[port].vlan_ctl_mask = reg;
+
+ /* Prevent self removal to preserve isolation */
+ if (port != i)
+ pvlan &= ~BIT(i);
+ }
+
+ b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
+ dev->ports[port].vlan_ctl_mask = pvlan;
+ dev->ports[port].bridge_dev = NULL;
+
+ if (is5325(dev) || is5365(dev))
+ pvid = 1;
+ else
+ pvid = 0;
+
+ b53_get_vlan_entry(dev, pvid, vl);
+ vl->members |= BIT(port) | BIT(dev->cpu_port);
+ vl->untag |= BIT(port) | BIT(dev->cpu_port);
+ b53_set_vlan_entry(dev, pvid, vl);
+}
+
+static void b53_br_set_stp_state(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ u8 hw_state, cur_hw_state;
+ u8 reg;
+
+ b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), &reg);
+ cur_hw_state = reg & PORT_CTRL_STP_STATE_MASK;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ hw_state = PORT_CTRL_DIS_STATE;
+ break;
+ case BR_STATE_LISTENING:
+ hw_state = PORT_CTRL_LISTEN_STATE;
+ break;
+ case BR_STATE_LEARNING:
+ hw_state = PORT_CTRL_LEARN_STATE;
+ break;
+ case BR_STATE_FORWARDING:
+ hw_state = PORT_CTRL_FWD_STATE;
+ break;
+ case BR_STATE_BLOCKING:
+ hw_state = PORT_CTRL_BLOCK_STATE;
+ break;
+ default:
+ dev_err(ds->dev, "invalid STP state: %d\n", state);
+ return;
+ }
+
+ /* Fast-age ARL entries if we are moving a port from Learning or
+ * Forwarding (cur_hw_state) state to Disabled, Blocking or Listening
+ * state (hw_state)
+ */
+ if (cur_hw_state != hw_state) {
+ if (cur_hw_state >= PORT_CTRL_LEARN_STATE &&
+ hw_state <= PORT_CTRL_LISTEN_STATE) {
+ if (b53_fast_age_port(dev, port)) {
+ dev_err(ds->dev, "fast ageing failed\n");
+ return;
+ }
+ }
+ }
+
+ b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), &reg);
+ reg &= ~PORT_CTRL_STP_STATE_MASK;
+ reg |= hw_state;
+ b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
+}
+
+static struct dsa_switch_driver b53_switch_ops = {
+ .tag_protocol = DSA_TAG_PROTO_NONE,
+ .setup = b53_setup,
+ .set_addr = b53_set_addr,
+ .get_strings = b53_get_strings,
+ .get_ethtool_stats = b53_get_ethtool_stats,
+ .get_sset_count = b53_get_sset_count,
+ .phy_read = b53_phy_read16,
+ .phy_write = b53_phy_write16,
+ .adjust_link = b53_adjust_link,
+ .port_enable = b53_enable_port,
+ .port_disable = b53_disable_port,
+ .port_bridge_join = b53_br_join,
+ .port_bridge_leave = b53_br_leave,
+ .port_stp_state_set = b53_br_set_stp_state,
+ .port_vlan_filtering = b53_vlan_filtering,
+ .port_vlan_prepare = b53_vlan_prepare,
+ .port_vlan_add = b53_vlan_add,
+ .port_vlan_del = b53_vlan_del,
+ .port_vlan_dump = b53_vlan_dump,
+ .port_fdb_prepare = b53_fdb_prepare,
+ .port_fdb_dump = b53_fdb_dump,
+ .port_fdb_add = b53_fdb_add,
+ .port_fdb_del = b53_fdb_del,
+};
+
+struct b53_chip_data {
+ u32 chip_id;
+ const char *dev_name;
+ u16 vlans;
+ u16 enabled_ports;
+ u8 cpu_port;
+ u8 vta_regs[3];
+ u8 arl_entries;
+ u8 duplex_reg;
+ u8 jumbo_pm_reg;
+ u8 jumbo_size_reg;
+};
+
+#define B53_VTA_REGS \
+ { B53_VT_ACCESS, B53_VT_INDEX, B53_VT_ENTRY }
+#define B53_VTA_REGS_9798 \
+ { B53_VT_ACCESS_9798, B53_VT_INDEX_9798, B53_VT_ENTRY_9798 }
+#define B53_VTA_REGS_63XX \
+ { B53_VT_ACCESS_63XX, B53_VT_INDEX_63XX, B53_VT_ENTRY_63XX }
+
+static const struct b53_chip_data b53_switch_chips[] = {
+ {
+ .chip_id = BCM5325_DEVICE_ID,
+ .dev_name = "BCM5325",
+ .vlans = 16,
+ .enabled_ports = 0x1f,
+ .arl_entries = 2,
+ .cpu_port = B53_CPU_PORT_25,
+ .duplex_reg = B53_DUPLEX_STAT_FE,
+ },
+ {
+ .chip_id = BCM5365_DEVICE_ID,
+ .dev_name = "BCM5365",
+ .vlans = 256,
+ .enabled_ports = 0x1f,
+ .arl_entries = 2,
+ .cpu_port = B53_CPU_PORT_25,
+ .duplex_reg = B53_DUPLEX_STAT_FE,
+ },
+ {
+ .chip_id = BCM5395_DEVICE_ID,
+ .dev_name = "BCM5395",
+ .vlans = 4096,
+ .enabled_ports = 0x1f,
+ .arl_entries = 4,
+ .cpu_port = B53_CPU_PORT,
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM5397_DEVICE_ID,
+ .dev_name = "BCM5397",
+ .vlans = 4096,
+ .enabled_ports = 0x1f,
+ .arl_entries = 4,
+ .cpu_port = B53_CPU_PORT,
+ .vta_regs = B53_VTA_REGS_9798,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM5398_DEVICE_ID,
+ .dev_name = "BCM5398",
+ .vlans = 4096,
+ .enabled_ports = 0x7f,
+ .arl_entries = 4,
+ .cpu_port = B53_CPU_PORT,
+ .vta_regs = B53_VTA_REGS_9798,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM53115_DEVICE_ID,
+ .dev_name = "BCM53115",
+ .vlans = 4096,
+ .enabled_ports = 0x1f,
+ .arl_entries = 4,
+ .vta_regs = B53_VTA_REGS,
+ .cpu_port = B53_CPU_PORT,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM53125_DEVICE_ID,
+ .dev_name = "BCM53125",
+ .vlans = 4096,
+ .enabled_ports = 0xff,
+ .cpu_port = B53_CPU_PORT,
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM53128_DEVICE_ID,
+ .dev_name = "BCM53128",
+ .vlans = 4096,
+ .enabled_ports = 0x1ff,
+ .arl_entries = 4,
+ .cpu_port = B53_CPU_PORT,
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM63XX_DEVICE_ID,
+ .dev_name = "BCM63xx",
+ .vlans = 4096,
+ .enabled_ports = 0, /* pdata must provide them */
+ .arl_entries = 4,
+ .cpu_port = B53_CPU_PORT,
+ .vta_regs = B53_VTA_REGS_63XX,
+ .duplex_reg = B53_DUPLEX_STAT_63XX,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX,
+ },
+ {
+ .chip_id = BCM53010_DEVICE_ID,
+ .dev_name = "BCM53010",
+ .vlans = 4096,
+ .enabled_ports = 0x1f,
+ .arl_entries = 4,
+ .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM53011_DEVICE_ID,
+ .dev_name = "BCM53011",
+ .vlans = 4096,
+ .enabled_ports = 0x1bf,
+ .arl_entries = 4,
+ .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM53012_DEVICE_ID,
+ .dev_name = "BCM53012",
+ .vlans = 4096,
+ .enabled_ports = 0x1bf,
+ .arl_entries = 4,
+ .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM53018_DEVICE_ID,
+ .dev_name = "BCM53018",
+ .vlans = 4096,
+ .enabled_ports = 0x1f,
+ .arl_entries = 4,
+ .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM53019_DEVICE_ID,
+ .dev_name = "BCM53019",
+ .vlans = 4096,
+ .enabled_ports = 0x1f,
+ .arl_entries = 4,
+ .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+};
+
+static int b53_switch_init(struct b53_device *dev)
+{
+ struct dsa_switch *ds = dev->ds;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(b53_switch_chips); i++) {
+ const struct b53_chip_data *chip = &b53_switch_chips[i];
+
+ if (chip->chip_id == dev->chip_id) {
+ if (!dev->enabled_ports)
+ dev->enabled_ports = chip->enabled_ports;
+ dev->name = chip->dev_name;
+ dev->duplex_reg = chip->duplex_reg;
+ dev->vta_regs[0] = chip->vta_regs[0];
+ dev->vta_regs[1] = chip->vta_regs[1];
+ dev->vta_regs[2] = chip->vta_regs[2];
+ dev->jumbo_pm_reg = chip->jumbo_pm_reg;
+ ds->drv = &b53_switch_ops;
+ dev->cpu_port = chip->cpu_port;
+ dev->num_vlans = chip->vlans;
+ dev->num_arl_entries = chip->arl_entries;
+ break;
+ }
+ }
+
+ /* check which BCM5325x version we have */
+ if (is5325(dev)) {
+ u8 vc4;
+
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4);
+
+ /* check reserved bits */
+ switch (vc4 & 3) {
+ case 1:
+ /* BCM5325E */
+ break;
+ case 3:
+ /* BCM5325F - do not use port 4 */
+ dev->enabled_ports &= ~BIT(4);
+ break;
+ default:
+/* On the BCM47XX SoCs this is the supported internal switch.*/
+#ifndef CONFIG_BCM47XX
+ /* BCM5325M */
+ return -EINVAL;
+#else
+ break;
+#endif
+ }
+ } else if (dev->chip_id == BCM53115_DEVICE_ID) {
+ u64 strap_value;
+
+ b53_read48(dev, B53_STAT_PAGE, B53_STRAP_VALUE, &strap_value);
+ /* use second IMP port if GMII is enabled */
+ if (strap_value & SV_GMII_CTRL_115)
+ dev->cpu_port = 5;
+ }
+
+ /* cpu port is always last */
+ dev->num_ports = dev->cpu_port + 1;
+ dev->enabled_ports |= BIT(dev->cpu_port);
+
+ dev->ports = devm_kzalloc(dev->dev,
+ sizeof(struct b53_port) * dev->num_ports,
+ GFP_KERNEL);
+ if (!dev->ports)
+ return -ENOMEM;
+
+ dev->vlans = devm_kzalloc(dev->dev,
+ sizeof(struct b53_vlan) * dev->num_vlans,
+ GFP_KERNEL);
+ if (!dev->vlans)
+ return -ENOMEM;
+
+ dev->reset_gpio = b53_switch_get_reset_gpio(dev);
+ if (dev->reset_gpio >= 0) {
+ ret = devm_gpio_request_one(dev->dev, dev->reset_gpio,
+ GPIOF_OUT_INIT_HIGH, "robo_reset");
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+struct b53_device *b53_switch_alloc(struct device *base, struct b53_io_ops *ops,
+ void *priv)
+{
+ struct dsa_switch *ds;
+ struct b53_device *dev;
+
+ ds = devm_kzalloc(base, sizeof(*ds) + sizeof(*dev), GFP_KERNEL);
+ if (!ds)
+ return NULL;
+
+ dev = (struct b53_device *)(ds + 1);
+
+ ds->priv = dev;
+ ds->dev = base;
+ dev->dev = base;
+
+ dev->ds = ds;
+ dev->priv = priv;
+ dev->ops = ops;
+ mutex_init(&dev->reg_mutex);
+ mutex_init(&dev->stats_mutex);
+
+ return dev;
+}
+EXPORT_SYMBOL(b53_switch_alloc);
+
+int b53_switch_detect(struct b53_device *dev)
+{
+ u32 id32;
+ u16 tmp;
+ u8 id8;
+ int ret;
+
+ ret = b53_read8(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id8);
+ if (ret)
+ return ret;
+
+ switch (id8) {
+ case 0:
+ /* BCM5325 and BCM5365 do not have this register so reads
+ * return 0. But the read operation did succeed, so assume this
+ * is one of them.
+ *
+ * Next check if we can write to the 5325's VTA register; for
+ * 5365 it is read only.
+ */
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, 0xf);
+ b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, &tmp);
+
+ if (tmp == 0xf)
+ dev->chip_id = BCM5325_DEVICE_ID;
+ else
+ dev->chip_id = BCM5365_DEVICE_ID;
+ break;
+ case BCM5395_DEVICE_ID:
+ case BCM5397_DEVICE_ID:
+ case BCM5398_DEVICE_ID:
+ dev->chip_id = id8;
+ break;
+ default:
+ ret = b53_read32(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id32);
+ if (ret)
+ return ret;
+
+ switch (id32) {
+ case BCM53115_DEVICE_ID:
+ case BCM53125_DEVICE_ID:
+ case BCM53128_DEVICE_ID:
+ case BCM53010_DEVICE_ID:
+ case BCM53011_DEVICE_ID:
+ case BCM53012_DEVICE_ID:
+ case BCM53018_DEVICE_ID:
+ case BCM53019_DEVICE_ID:
+ dev->chip_id = id32;
+ break;
+ default:
+ pr_err("unsupported switch detected (BCM53%02x/BCM%x)\n",
+ id8, id32);
+ return -ENODEV;
+ }
+ }
+
+ if (dev->chip_id == BCM5325_DEVICE_ID)
+ return b53_read8(dev, B53_STAT_PAGE, B53_REV_ID_25,
+ &dev->core_rev);
+ else
+ return b53_read8(dev, B53_MGMT_PAGE, B53_REV_ID,
+ &dev->core_rev);
+}
+EXPORT_SYMBOL(b53_switch_detect);
+
+int b53_switch_register(struct b53_device *dev)
+{
+ int ret;
+
+ if (dev->pdata) {
+ dev->chip_id = dev->pdata->chip_id;
+ dev->enabled_ports = dev->pdata->enabled_ports;
+ }
+
+ if (!dev->chip_id && b53_switch_detect(dev))
+ return -EINVAL;
+
+ ret = b53_switch_init(dev);
+ if (ret)
+ return ret;
+
+ pr_info("found switch: %s, rev %i\n", dev->name, dev->core_rev);
+
+ return dsa_register_switch(dev->ds, dev->ds->dev->of_node);
+}
+EXPORT_SYMBOL(b53_switch_register);
+
+MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>");
+MODULE_DESCRIPTION("B53 switch library");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c
new file mode 100644
index 000000000000..aa87c3fffdac
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_mdio.c
@@ -0,0 +1,392 @@
+/*
+ * B53 register access through MII registers
+ *
+ * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/phy.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/brcmphy.h>
+#include <linux/rtnetlink.h>
+#include <net/dsa.h>
+
+#include "b53_priv.h"
+
+/* MII registers */
+#define REG_MII_PAGE 0x10 /* MII Page register */
+#define REG_MII_ADDR 0x11 /* MII Address register */
+#define REG_MII_DATA0 0x18 /* MII Data register 0 */
+#define REG_MII_DATA1 0x19 /* MII Data register 1 */
+#define REG_MII_DATA2 0x1a /* MII Data register 2 */
+#define REG_MII_DATA3 0x1b /* MII Data register 3 */
+
+#define REG_MII_PAGE_ENABLE BIT(0)
+#define REG_MII_ADDR_WRITE BIT(0)
+#define REG_MII_ADDR_READ BIT(1)
+
+static int b53_mdio_op(struct b53_device *dev, u8 page, u8 reg, u16 op)
+{
+ int i;
+ u16 v;
+ int ret;
+ struct mii_bus *bus = dev->priv;
+
+ if (dev->current_page != page) {
+ /* set page number */
+ v = (page << 8) | REG_MII_PAGE_ENABLE;
+ ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_PAGE, v);
+ if (ret)
+ return ret;
+ dev->current_page = page;
+ }
+
+ /* set register address */
+ v = (reg << 8) | op;
+ ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR, REG_MII_ADDR, v);
+ if (ret)
+ return ret;
+
+ /* check if operation completed */
+ for (i = 0; i < 5; ++i) {
+ v = mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_ADDR);
+ if (!(v & (REG_MII_ADDR_WRITE | REG_MII_ADDR_READ)))
+ break;
+ usleep_range(10, 100);
+ }
+
+ if (WARN_ON(i == 5))
+ return -EIO;
+
+ return 0;
+}
+
+static int b53_mdio_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
+{
+ struct mii_bus *bus = dev->priv;
+ int ret;
+
+ ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ);
+ if (ret)
+ return ret;
+
+ *val = mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_DATA0) & 0xff;
+
+ return 0;
+}
+
+static int b53_mdio_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val)
+{
+ struct mii_bus *bus = dev->priv;
+ int ret;
+
+ ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ);
+ if (ret)
+ return ret;
+
+ *val = mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR, REG_MII_DATA0);
+
+ return 0;
+}
+
+static int b53_mdio_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val)
+{
+ struct mii_bus *bus = dev->priv;
+ int ret;
+
+ ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ);
+ if (ret)
+ return ret;
+
+ *val = mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR, REG_MII_DATA0);
+ *val |= mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_DATA1) << 16;
+
+ return 0;
+}
+
+static int b53_mdio_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ struct mii_bus *bus = dev->priv;
+ u64 temp = 0;
+ int i;
+ int ret;
+
+ ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ);
+ if (ret)
+ return ret;
+
+ for (i = 2; i >= 0; i--) {
+ temp <<= 16;
+ temp |= mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_DATA0 + i);
+ }
+
+ *val = temp;
+
+ return 0;
+}
+
+static int b53_mdio_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ struct mii_bus *bus = dev->priv;
+ u64 temp = 0;
+ int i;
+ int ret;
+
+ ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ);
+ if (ret)
+ return ret;
+
+ for (i = 3; i >= 0; i--) {
+ temp <<= 16;
+ temp |= mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_DATA0 + i);
+ }
+
+ *val = temp;
+
+ return 0;
+}
+
+static int b53_mdio_write8(struct b53_device *dev, u8 page, u8 reg, u8 value)
+{
+ struct mii_bus *bus = dev->priv;
+ int ret;
+
+ ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_DATA0, value);
+ if (ret)
+ return ret;
+
+ return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE);
+}
+
+static int b53_mdio_write16(struct b53_device *dev, u8 page, u8 reg,
+ u16 value)
+{
+ struct mii_bus *bus = dev->priv;
+ int ret;
+
+ ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_DATA0, value);
+ if (ret)
+ return ret;
+
+ return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE);
+}
+
+static int b53_mdio_write32(struct b53_device *dev, u8 page, u8 reg,
+ u32 value)
+{
+ struct mii_bus *bus = dev->priv;
+ unsigned int i;
+ u32 temp = value;
+
+ for (i = 0; i < 2; i++) {
+ int ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_DATA0 + i,
+ temp & 0xffff);
+ if (ret)
+ return ret;
+ temp >>= 16;
+ }
+
+ return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE);
+}
+
+static int b53_mdio_write48(struct b53_device *dev, u8 page, u8 reg,
+ u64 value)
+{
+ struct mii_bus *bus = dev->priv;
+ unsigned int i;
+ u64 temp = value;
+
+ for (i = 0; i < 3; i++) {
+ int ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_DATA0 + i,
+ temp & 0xffff);
+ if (ret)
+ return ret;
+ temp >>= 16;
+ }
+
+ return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE);
+}
+
+static int b53_mdio_write64(struct b53_device *dev, u8 page, u8 reg,
+ u64 value)
+{
+ struct mii_bus *bus = dev->priv;
+ unsigned int i;
+ u64 temp = value;
+
+ for (i = 0; i < 4; i++) {
+ int ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_DATA0 + i,
+ temp & 0xffff);
+ if (ret)
+ return ret;
+ temp >>= 16;
+ }
+
+ return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE);
+}
+
+static int b53_mdio_phy_read16(struct b53_device *dev, int addr, int reg,
+ u16 *value)
+{
+ struct mii_bus *bus = dev->priv;
+
+ *value = mdiobus_read_nested(bus, addr, reg);
+
+ return 0;
+}
+
+static int b53_mdio_phy_write16(struct b53_device *dev, int addr, int reg,
+ u16 value)
+{
+ struct mii_bus *bus = dev->bus;
+
+ return mdiobus_write_nested(bus, addr, reg, value);
+}
+
+static struct b53_io_ops b53_mdio_ops = {
+ .read8 = b53_mdio_read8,
+ .read16 = b53_mdio_read16,
+ .read32 = b53_mdio_read32,
+ .read48 = b53_mdio_read48,
+ .read64 = b53_mdio_read64,
+ .write8 = b53_mdio_write8,
+ .write16 = b53_mdio_write16,
+ .write32 = b53_mdio_write32,
+ .write48 = b53_mdio_write48,
+ .write64 = b53_mdio_write64,
+ .phy_read16 = b53_mdio_phy_read16,
+ .phy_write16 = b53_mdio_phy_write16,
+};
+
+#define B53_BRCM_OUI_1 0x0143bc00
+#define B53_BRCM_OUI_2 0x03625c00
+#define B53_BRCM_OUI_3 0x00406000
+
+static int b53_mdio_probe(struct mdio_device *mdiodev)
+{
+ struct b53_device *dev;
+ u32 phy_id;
+ int ret;
+
+ /* allow the generic PHY driver to take over the non-management MDIO
+ * addresses
+ */
+ if (mdiodev->addr != BRCM_PSEUDO_PHY_ADDR && mdiodev->addr != 0) {
+ dev_err(&mdiodev->dev, "leaving address %d to PHY\n",
+ mdiodev->addr);
+ return -ENODEV;
+ }
+
+ /* read the first port's id */
+ phy_id = mdiobus_read(mdiodev->bus, 0, 2) << 16;
+ phy_id |= mdiobus_read(mdiodev->bus, 0, 3);
+
+ /* BCM5325, BCM539x (OUI_1)
+ * BCM53125, BCM53128 (OUI_2)
+ * BCM5365 (OUI_3)
+ */
+ if ((phy_id & 0xfffffc00) != B53_BRCM_OUI_1 &&
+ (phy_id & 0xfffffc00) != B53_BRCM_OUI_2 &&
+ (phy_id & 0xfffffc00) != B53_BRCM_OUI_3) {
+ dev_err(&mdiodev->dev, "Unsupported device: 0x%08x\n", phy_id);
+ return -ENODEV;
+ }
+
+ /* First probe will come from SWITCH_MDIO controller on the 7445D0
+ * switch, which will conflict with the 7445 integrated switch
+ * pseudo-phy (we end-up programming both). In that case, we return
+ * -EPROBE_DEFER for the first time we get here, and wait until we come
+ * back with the slave MDIO bus which has the correct indirection
+ * layer setup
+ */
+ if (of_machine_is_compatible("brcm,bcm7445d0") &&
+ strcmp(mdiodev->bus->name, "sf2 slave mii"))
+ return -EPROBE_DEFER;
+
+ dev = b53_switch_alloc(&mdiodev->dev, &b53_mdio_ops, mdiodev->bus);
+ if (!dev)
+ return -ENOMEM;
+
+ /* we don't use page 0xff, so force a page set */
+ dev->current_page = 0xff;
+ dev->bus = mdiodev->bus;
+
+ dev_set_drvdata(&mdiodev->dev, dev);
+
+ ret = b53_switch_register(dev);
+ if (ret) {
+ dev_err(&mdiodev->dev, "failed to register switch: %i\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void b53_mdio_remove(struct mdio_device *mdiodev)
+{
+ struct b53_device *dev = dev_get_drvdata(&mdiodev->dev);
+ struct dsa_switch *ds = dev->ds;
+
+ dsa_unregister_switch(ds);
+}
+
+static const struct of_device_id b53_of_match[] = {
+ { .compatible = "brcm,bcm5325" },
+ { .compatible = "brcm,bcm53115" },
+ { .compatible = "brcm,bcm53125" },
+ { .compatible = "brcm,bcm53128" },
+ { .compatible = "brcm,bcm5365" },
+ { .compatible = "brcm,bcm5395" },
+ { .compatible = "brcm,bcm5397" },
+ { .compatible = "brcm,bcm5398" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, b53_of_match);
+
+static struct mdio_driver b53_mdio_driver = {
+ .probe = b53_mdio_probe,
+ .remove = b53_mdio_remove,
+ .mdiodrv.driver = {
+ .name = "bcm53xx",
+ .of_match_table = b53_of_match,
+ },
+};
+
+static int __init b53_mdio_driver_register(void)
+{
+ return mdio_driver_register(&b53_mdio_driver);
+}
+module_init(b53_mdio_driver_register);
+
+static void __exit b53_mdio_driver_unregister(void)
+{
+ mdio_driver_unregister(&b53_mdio_driver);
+}
+module_exit(b53_mdio_driver_unregister);
+
+MODULE_DESCRIPTION("B53 MDIO access driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
new file mode 100644
index 000000000000..21f1068b0804
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_mmap.c
@@ -0,0 +1,274 @@
+/*
+ * B53 register access through memory mapped registers
+ *
+ * Copyright (C) 2012-2013 Jonas Gorski <jogo@openwrt.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kconfig.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/b53.h>
+
+#include "b53_priv.h"
+
+struct b53_mmap_priv {
+ void __iomem *regs;
+};
+
+static int b53_mmap_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
+{
+ u8 __iomem *regs = dev->priv;
+
+ *val = readb(regs + (page << 8) + reg);
+
+ return 0;
+}
+
+static int b53_mmap_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val)
+{
+ u8 __iomem *regs = dev->priv;
+
+ if (WARN_ON(reg % 2))
+ return -EINVAL;
+
+ if (dev->pdata && dev->pdata->big_endian)
+ *val = ioread16be(regs + (page << 8) + reg);
+ else
+ *val = readw(regs + (page << 8) + reg);
+
+ return 0;
+}
+
+static int b53_mmap_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val)
+{
+ u8 __iomem *regs = dev->priv;
+
+ if (WARN_ON(reg % 4))
+ return -EINVAL;
+
+ if (dev->pdata && dev->pdata->big_endian)
+ *val = ioread32be(regs + (page << 8) + reg);
+ else
+ *val = readl(regs + (page << 8) + reg);
+
+ return 0;
+}
+
+static int b53_mmap_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ u8 __iomem *regs = dev->priv;
+
+ if (WARN_ON(reg % 2))
+ return -EINVAL;
+
+ if (reg % 4) {
+ u16 lo;
+ u32 hi;
+
+ if (dev->pdata && dev->pdata->big_endian) {
+ lo = ioread16be(regs + (page << 8) + reg);
+ hi = ioread32be(regs + (page << 8) + reg + 2);
+ } else {
+ lo = readw(regs + (page << 8) + reg);
+ hi = readl(regs + (page << 8) + reg + 2);
+ }
+
+ *val = ((u64)hi << 16) | lo;
+ } else {
+ u32 lo;
+ u16 hi;
+
+ if (dev->pdata && dev->pdata->big_endian) {
+ lo = ioread32be(regs + (page << 8) + reg);
+ hi = ioread16be(regs + (page << 8) + reg + 4);
+ } else {
+ lo = readl(regs + (page << 8) + reg);
+ hi = readw(regs + (page << 8) + reg + 4);
+ }
+
+ *val = ((u64)hi << 32) | lo;
+ }
+
+ return 0;
+}
+
+static int b53_mmap_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ u8 __iomem *regs = dev->priv;
+ u32 hi, lo;
+
+ if (WARN_ON(reg % 4))
+ return -EINVAL;
+
+ if (dev->pdata && dev->pdata->big_endian) {
+ lo = ioread32be(regs + (page << 8) + reg);
+ hi = ioread32be(regs + (page << 8) + reg + 4);
+ } else {
+ lo = readl(regs + (page << 8) + reg);
+ hi = readl(regs + (page << 8) + reg + 4);
+ }
+
+ *val = ((u64)hi << 32) | lo;
+
+ return 0;
+}
+
+static int b53_mmap_write8(struct b53_device *dev, u8 page, u8 reg, u8 value)
+{
+ u8 __iomem *regs = dev->priv;
+
+ writeb(value, regs + (page << 8) + reg);
+
+ return 0;
+}
+
+static int b53_mmap_write16(struct b53_device *dev, u8 page, u8 reg,
+ u16 value)
+{
+ u8 __iomem *regs = dev->priv;
+
+ if (WARN_ON(reg % 2))
+ return -EINVAL;
+
+ if (dev->pdata && dev->pdata->big_endian)
+ iowrite16be(value, regs + (page << 8) + reg);
+ else
+ writew(value, regs + (page << 8) + reg);
+
+ return 0;
+}
+
+static int b53_mmap_write32(struct b53_device *dev, u8 page, u8 reg,
+ u32 value)
+{
+ u8 __iomem *regs = dev->priv;
+
+ if (WARN_ON(reg % 4))
+ return -EINVAL;
+
+ if (dev->pdata && dev->pdata->big_endian)
+ iowrite32be(value, regs + (page << 8) + reg);
+ else
+ writel(value, regs + (page << 8) + reg);
+
+ return 0;
+}
+
+static int b53_mmap_write48(struct b53_device *dev, u8 page, u8 reg,
+ u64 value)
+{
+ if (WARN_ON(reg % 2))
+ return -EINVAL;
+
+ if (reg % 4) {
+ u32 hi = (u32)(value >> 16);
+ u16 lo = (u16)value;
+
+ b53_mmap_write16(dev, page, reg, lo);
+ b53_mmap_write32(dev, page, reg + 2, hi);
+ } else {
+ u16 hi = (u16)(value >> 32);
+ u32 lo = (u32)value;
+
+ b53_mmap_write32(dev, page, reg, lo);
+ b53_mmap_write16(dev, page, reg + 4, hi);
+ }
+
+ return 0;
+}
+
+static int b53_mmap_write64(struct b53_device *dev, u8 page, u8 reg,
+ u64 value)
+{
+ u32 hi, lo;
+
+ hi = upper_32_bits(value);
+ lo = lower_32_bits(value);
+
+ if (WARN_ON(reg % 4))
+ return -EINVAL;
+
+ b53_mmap_write32(dev, page, reg, lo);
+ b53_mmap_write32(dev, page, reg + 4, hi);
+
+ return 0;
+}
+
+static struct b53_io_ops b53_mmap_ops = {
+ .read8 = b53_mmap_read8,
+ .read16 = b53_mmap_read16,
+ .read32 = b53_mmap_read32,
+ .read48 = b53_mmap_read48,
+ .read64 = b53_mmap_read64,
+ .write8 = b53_mmap_write8,
+ .write16 = b53_mmap_write16,
+ .write32 = b53_mmap_write32,
+ .write48 = b53_mmap_write48,
+ .write64 = b53_mmap_write64,
+};
+
+static int b53_mmap_probe(struct platform_device *pdev)
+{
+ struct b53_platform_data *pdata = pdev->dev.platform_data;
+ struct b53_device *dev;
+
+ if (!pdata)
+ return -EINVAL;
+
+ dev = b53_switch_alloc(&pdev->dev, &b53_mmap_ops, pdata->regs);
+ if (!dev)
+ return -ENOMEM;
+
+ if (pdata)
+ dev->pdata = pdata;
+
+ platform_set_drvdata(pdev, dev);
+
+ return b53_switch_register(dev);
+}
+
+static int b53_mmap_remove(struct platform_device *pdev)
+{
+ struct b53_device *dev = platform_get_drvdata(pdev);
+
+ if (dev)
+ b53_switch_remove(dev);
+
+ return 0;
+}
+
+static const struct of_device_id b53_mmap_of_table[] = {
+ { .compatible = "brcm,bcm3384-switch" },
+ { .compatible = "brcm,bcm6328-switch" },
+ { .compatible = "brcm,bcm6368-switch" },
+ { .compatible = "brcm,bcm63xx-switch" },
+ { /* sentinel */ },
+};
+
+static struct platform_driver b53_mmap_driver = {
+ .probe = b53_mmap_probe,
+ .remove = b53_mmap_remove,
+ .driver = {
+ .name = "b53-switch",
+ .of_match_table = b53_mmap_of_table,
+ },
+};
+
+module_platform_driver(b53_mmap_driver);
+MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>");
+MODULE_DESCRIPTION("B53 MMAP access driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
new file mode 100644
index 000000000000..5d8c602fb877
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -0,0 +1,387 @@
+/*
+ * B53 common definitions
+ *
+ * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __B53_PRIV_H
+#define __B53_PRIV_H
+
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/phy.h>
+#include <net/dsa.h>
+
+#include "b53_regs.h"
+
+struct b53_device;
+struct net_device;
+
+struct b53_io_ops {
+ int (*read8)(struct b53_device *dev, u8 page, u8 reg, u8 *value);
+ int (*read16)(struct b53_device *dev, u8 page, u8 reg, u16 *value);
+ int (*read32)(struct b53_device *dev, u8 page, u8 reg, u32 *value);
+ int (*read48)(struct b53_device *dev, u8 page, u8 reg, u64 *value);
+ int (*read64)(struct b53_device *dev, u8 page, u8 reg, u64 *value);
+ int (*write8)(struct b53_device *dev, u8 page, u8 reg, u8 value);
+ int (*write16)(struct b53_device *dev, u8 page, u8 reg, u16 value);
+ int (*write32)(struct b53_device *dev, u8 page, u8 reg, u32 value);
+ int (*write48)(struct b53_device *dev, u8 page, u8 reg, u64 value);
+ int (*write64)(struct b53_device *dev, u8 page, u8 reg, u64 value);
+ int (*phy_read16)(struct b53_device *dev, int addr, int reg, u16 *value);
+ int (*phy_write16)(struct b53_device *dev, int addr, int reg, u16 value);
+};
+
+enum {
+ BCM5325_DEVICE_ID = 0x25,
+ BCM5365_DEVICE_ID = 0x65,
+ BCM5395_DEVICE_ID = 0x95,
+ BCM5397_DEVICE_ID = 0x97,
+ BCM5398_DEVICE_ID = 0x98,
+ BCM53115_DEVICE_ID = 0x53115,
+ BCM53125_DEVICE_ID = 0x53125,
+ BCM53128_DEVICE_ID = 0x53128,
+ BCM63XX_DEVICE_ID = 0x6300,
+ BCM53010_DEVICE_ID = 0x53010,
+ BCM53011_DEVICE_ID = 0x53011,
+ BCM53012_DEVICE_ID = 0x53012,
+ BCM53018_DEVICE_ID = 0x53018,
+ BCM53019_DEVICE_ID = 0x53019,
+};
+
+#define B53_N_PORTS 9
+#define B53_N_PORTS_25 6
+
+struct b53_port {
+ u16 vlan_ctl_mask;
+ struct net_device *bridge_dev;
+};
+
+struct b53_vlan {
+ u16 members;
+ u16 untag;
+ bool valid;
+};
+
+struct b53_device {
+ struct dsa_switch *ds;
+ struct b53_platform_data *pdata;
+ const char *name;
+
+ struct mutex reg_mutex;
+ struct mutex stats_mutex;
+ const struct b53_io_ops *ops;
+
+ /* chip specific data */
+ u32 chip_id;
+ u8 core_rev;
+ u8 vta_regs[3];
+ u8 duplex_reg;
+ u8 jumbo_pm_reg;
+ u8 jumbo_size_reg;
+ int reset_gpio;
+ u8 num_arl_entries;
+
+ /* used ports mask */
+ u16 enabled_ports;
+ unsigned int cpu_port;
+
+ /* connect specific data */
+ u8 current_page;
+ struct device *dev;
+
+ /* Master MDIO bus we got probed from */
+ struct mii_bus *bus;
+
+ void *priv;
+
+ /* run time configuration */
+ bool enable_jumbo;
+
+ unsigned int num_vlans;
+ struct b53_vlan *vlans;
+ unsigned int num_ports;
+ struct b53_port *ports;
+};
+
+#define b53_for_each_port(dev, i) \
+ for (i = 0; i < B53_N_PORTS; i++) \
+ if (dev->enabled_ports & BIT(i))
+
+
+static inline int is5325(struct b53_device *dev)
+{
+ return dev->chip_id == BCM5325_DEVICE_ID;
+}
+
+static inline int is5365(struct b53_device *dev)
+{
+#ifdef CONFIG_BCM47XX
+ return dev->chip_id == BCM5365_DEVICE_ID;
+#else
+ return 0;
+#endif
+}
+
+static inline int is5397_98(struct b53_device *dev)
+{
+ return dev->chip_id == BCM5397_DEVICE_ID ||
+ dev->chip_id == BCM5398_DEVICE_ID;
+}
+
+static inline int is539x(struct b53_device *dev)
+{
+ return dev->chip_id == BCM5395_DEVICE_ID ||
+ dev->chip_id == BCM5397_DEVICE_ID ||
+ dev->chip_id == BCM5398_DEVICE_ID;
+}
+
+static inline int is531x5(struct b53_device *dev)
+{
+ return dev->chip_id == BCM53115_DEVICE_ID ||
+ dev->chip_id == BCM53125_DEVICE_ID ||
+ dev->chip_id == BCM53128_DEVICE_ID;
+}
+
+static inline int is63xx(struct b53_device *dev)
+{
+#ifdef CONFIG_BCM63XX
+ return dev->chip_id == BCM63XX_DEVICE_ID;
+#else
+ return 0;
+#endif
+}
+
+static inline int is5301x(struct b53_device *dev)
+{
+ return dev->chip_id == BCM53010_DEVICE_ID ||
+ dev->chip_id == BCM53011_DEVICE_ID ||
+ dev->chip_id == BCM53012_DEVICE_ID ||
+ dev->chip_id == BCM53018_DEVICE_ID ||
+ dev->chip_id == BCM53019_DEVICE_ID;
+}
+
+#define B53_CPU_PORT_25 5
+#define B53_CPU_PORT 8
+
+static inline int is_cpu_port(struct b53_device *dev, int port)
+{
+ return dev->cpu_port;
+}
+
+struct b53_device *b53_switch_alloc(struct device *base, struct b53_io_ops *ops,
+ void *priv);
+
+int b53_switch_detect(struct b53_device *dev);
+
+int b53_switch_register(struct b53_device *dev);
+
+static inline void b53_switch_remove(struct b53_device *dev)
+{
+ dsa_unregister_switch(dev->ds);
+}
+
+static inline int b53_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->read8(dev, page, reg, val);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int b53_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->read16(dev, page, reg, val);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int b53_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->read32(dev, page, reg, val);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int b53_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->read48(dev, page, reg, val);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int b53_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->read64(dev, page, reg, val);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int b53_write8(struct b53_device *dev, u8 page, u8 reg, u8 value)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->write8(dev, page, reg, value);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int b53_write16(struct b53_device *dev, u8 page, u8 reg,
+ u16 value)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->write16(dev, page, reg, value);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int b53_write32(struct b53_device *dev, u8 page, u8 reg,
+ u32 value)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->write32(dev, page, reg, value);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int b53_write48(struct b53_device *dev, u8 page, u8 reg,
+ u64 value)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->write48(dev, page, reg, value);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int b53_write64(struct b53_device *dev, u8 page, u8 reg,
+ u64 value)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->write64(dev, page, reg, value);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+struct b53_arl_entry {
+ u8 port;
+ u8 mac[ETH_ALEN];
+ u16 vid;
+ u8 is_valid:1;
+ u8 is_age:1;
+ u8 is_static:1;
+};
+
+static inline void b53_mac_from_u64(u64 src, u8 *dst)
+{
+ unsigned int i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ dst[ETH_ALEN - 1 - i] = (src >> (8 * i)) & 0xff;
+}
+
+static inline u64 b53_mac_to_u64(const u8 *src)
+{
+ unsigned int i;
+ u64 dst = 0;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ dst |= (u64)src[ETH_ALEN - 1 - i] << (8 * i);
+
+ return dst;
+}
+
+static inline void b53_arl_to_entry(struct b53_arl_entry *ent,
+ u64 mac_vid, u32 fwd_entry)
+{
+ memset(ent, 0, sizeof(*ent));
+ ent->port = fwd_entry & ARLTBL_DATA_PORT_ID_MASK;
+ ent->is_valid = !!(fwd_entry & ARLTBL_VALID);
+ ent->is_age = !!(fwd_entry & ARLTBL_AGE);
+ ent->is_static = !!(fwd_entry & ARLTBL_STATIC);
+ b53_mac_from_u64(mac_vid, ent->mac);
+ ent->vid = mac_vid >> ARLTBL_VID_S;
+}
+
+static inline void b53_arl_from_entry(u64 *mac_vid, u32 *fwd_entry,
+ const struct b53_arl_entry *ent)
+{
+ *mac_vid = b53_mac_to_u64(ent->mac);
+ *mac_vid |= (u64)(ent->vid & ARLTBL_VID_MASK) << ARLTBL_VID_S;
+ *fwd_entry = ent->port & ARLTBL_DATA_PORT_ID_MASK;
+ if (ent->is_valid)
+ *fwd_entry |= ARLTBL_VALID;
+ if (ent->is_static)
+ *fwd_entry |= ARLTBL_STATIC;
+ if (ent->is_age)
+ *fwd_entry |= ARLTBL_AGE;
+}
+
+#ifdef CONFIG_BCM47XX
+
+#include <linux/version.h>
+#include <linux/bcm47xx_nvram.h>
+#include <bcm47xx_board.h>
+static inline int b53_switch_get_reset_gpio(struct b53_device *dev)
+{
+ enum bcm47xx_board board = bcm47xx_board_get();
+
+ switch (board) {
+ case BCM47XX_BOARD_LINKSYS_WRT300NV11:
+ case BCM47XX_BOARD_LINKSYS_WRT310NV1:
+ return 8;
+ default:
+ return bcm47xx_nvram_gpio_pin("robo_reset");
+ }
+}
+#else
+static inline int b53_switch_get_reset_gpio(struct b53_device *dev)
+{
+ return -ENOENT;
+}
+#endif
+#endif
diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h
new file mode 100644
index 000000000000..8f12bddd5dc9
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_regs.h
@@ -0,0 +1,434 @@
+/*
+ * B53 register definitions
+ *
+ * Copyright (C) 2004 Broadcom Corporation
+ * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __B53_REGS_H
+#define __B53_REGS_H
+
+/* Management Port (SMP) Page offsets */
+#define B53_CTRL_PAGE 0x00 /* Control */
+#define B53_STAT_PAGE 0x01 /* Status */
+#define B53_MGMT_PAGE 0x02 /* Management Mode */
+#define B53_MIB_AC_PAGE 0x03 /* MIB Autocast */
+#define B53_ARLCTRL_PAGE 0x04 /* ARL Control */
+#define B53_ARLIO_PAGE 0x05 /* ARL Access */
+#define B53_FRAMEBUF_PAGE 0x06 /* Management frame access */
+#define B53_MEM_ACCESS_PAGE 0x08 /* Memory access */
+
+/* PHY Registers */
+#define B53_PORT_MII_PAGE(i) (0x10 + (i)) /* Port i MII Registers */
+#define B53_IM_PORT_PAGE 0x18 /* Inverse MII Port (to EMAC) */
+#define B53_ALL_PORT_PAGE 0x19 /* All ports MII (broadcast) */
+
+/* MIB registers */
+#define B53_MIB_PAGE(i) (0x20 + (i))
+
+/* Quality of Service (QoS) Registers */
+#define B53_QOS_PAGE 0x30
+
+/* Port VLAN Page */
+#define B53_PVLAN_PAGE 0x31
+
+/* VLAN Registers */
+#define B53_VLAN_PAGE 0x34
+
+/* Jumbo Frame Registers */
+#define B53_JUMBO_PAGE 0x40
+
+/* CFP Configuration Registers Page */
+#define B53_CFP_PAGE 0xa1
+
+/*************************************************************************
+ * Control Page registers
+ *************************************************************************/
+
+/* Port Control Register (8 bit) */
+#define B53_PORT_CTRL(i) (0x00 + (i))
+#define PORT_CTRL_RX_DISABLE BIT(0)
+#define PORT_CTRL_TX_DISABLE BIT(1)
+#define PORT_CTRL_RX_BCST_EN BIT(2) /* Broadcast RX (P8 only) */
+#define PORT_CTRL_RX_MCST_EN BIT(3) /* Multicast RX (P8 only) */
+#define PORT_CTRL_RX_UCST_EN BIT(4) /* Unicast RX (P8 only) */
+#define PORT_CTRL_STP_STATE_S 5
+#define PORT_CTRL_NO_STP (0 << PORT_CTRL_STP_STATE_S)
+#define PORT_CTRL_DIS_STATE (1 << PORT_CTRL_STP_STATE_S)
+#define PORT_CTRL_BLOCK_STATE (2 << PORT_CTRL_STP_STATE_S)
+#define PORT_CTRL_LISTEN_STATE (3 << PORT_CTRL_STP_STATE_S)
+#define PORT_CTRL_LEARN_STATE (4 << PORT_CTRL_STP_STATE_S)
+#define PORT_CTRL_FWD_STATE (5 << PORT_CTRL_STP_STATE_S)
+#define PORT_CTRL_STP_STATE_MASK (0x7 << PORT_CTRL_STP_STATE_S)
+
+/* SMP Control Register (8 bit) */
+#define B53_SMP_CTRL 0x0a
+
+/* Switch Mode Control Register (8 bit) */
+#define B53_SWITCH_MODE 0x0b
+#define SM_SW_FWD_MODE BIT(0) /* 1 = Managed Mode */
+#define SM_SW_FWD_EN BIT(1) /* Forwarding Enable */
+
+/* IMP Port state override register (8 bit) */
+#define B53_PORT_OVERRIDE_CTRL 0x0e
+#define PORT_OVERRIDE_LINK BIT(0)
+#define PORT_OVERRIDE_FULL_DUPLEX BIT(1) /* 0 = Half Duplex */
+#define PORT_OVERRIDE_SPEED_S 2
+#define PORT_OVERRIDE_SPEED_10M (0 << PORT_OVERRIDE_SPEED_S)
+#define PORT_OVERRIDE_SPEED_100M (1 << PORT_OVERRIDE_SPEED_S)
+#define PORT_OVERRIDE_SPEED_1000M (2 << PORT_OVERRIDE_SPEED_S)
+#define PORT_OVERRIDE_RV_MII_25 BIT(4) /* BCM5325 only */
+#define PORT_OVERRIDE_RX_FLOW BIT(4)
+#define PORT_OVERRIDE_TX_FLOW BIT(5)
+#define PORT_OVERRIDE_SPEED_2000M BIT(6) /* BCM5301X only, requires setting 1000M */
+#define PORT_OVERRIDE_EN BIT(7) /* Use the register contents */
+
+/* Power-down mode control */
+#define B53_PD_MODE_CTRL_25 0x0f
+
+/* IP Multicast control (8 bit) */
+#define B53_IP_MULTICAST_CTRL 0x21
+#define B53_IPMC_FWD_EN BIT(1)
+#define B53_UC_FWD_EN BIT(6)
+#define B53_MC_FWD_EN BIT(7)
+
+/* (16 bit) */
+#define B53_UC_FLOOD_MASK 0x32
+#define B53_MC_FLOOD_MASK 0x34
+#define B53_IPMC_FLOOD_MASK 0x36
+
+/*
+ * Override Ports 0-7 State on devices with xMII interfaces (8 bit)
+ *
+ * For port 8 still use B53_PORT_OVERRIDE_CTRL
+ * Please note that not all ports are available on every hardware, e.g. BCM5301X
+ * don't include overriding port 6, BCM63xx also have some limitations.
+ */
+#define B53_GMII_PORT_OVERRIDE_CTRL(i) (0x58 + (i))
+#define GMII_PO_LINK BIT(0)
+#define GMII_PO_FULL_DUPLEX BIT(1) /* 0 = Half Duplex */
+#define GMII_PO_SPEED_S 2
+#define GMII_PO_SPEED_10M (0 << GMII_PO_SPEED_S)
+#define GMII_PO_SPEED_100M (1 << GMII_PO_SPEED_S)
+#define GMII_PO_SPEED_1000M (2 << GMII_PO_SPEED_S)
+#define GMII_PO_RX_FLOW BIT(4)
+#define GMII_PO_TX_FLOW BIT(5)
+#define GMII_PO_EN BIT(6) /* Use the register contents */
+#define GMII_PO_SPEED_2000M BIT(7) /* BCM5301X only, requires setting 1000M */
+
+#define B53_RGMII_CTRL_IMP 0x60
+#define RGMII_CTRL_ENABLE_GMII BIT(7)
+#define RGMII_CTRL_TIMING_SEL BIT(2)
+#define RGMII_CTRL_DLL_RXC BIT(1)
+#define RGMII_CTRL_DLL_TXC BIT(0)
+
+#define B53_RGMII_CTRL_P(i) (B53_RGMII_CTRL_IMP + (i))
+
+/* Software reset register (8 bit) */
+#define B53_SOFTRESET 0x79
+#define SW_RST BIT(7)
+#define EN_SW_RST BIT(4)
+
+/* Fast Aging Control register (8 bit) */
+#define B53_FAST_AGE_CTRL 0x88
+#define FAST_AGE_STATIC BIT(0)
+#define FAST_AGE_DYNAMIC BIT(1)
+#define FAST_AGE_PORT BIT(2)
+#define FAST_AGE_VLAN BIT(3)
+#define FAST_AGE_STP BIT(4)
+#define FAST_AGE_MC BIT(5)
+#define FAST_AGE_DONE BIT(7)
+
+/* Fast Aging Port Control register (8 bit) */
+#define B53_FAST_AGE_PORT_CTRL 0x89
+
+/* Fast Aging VID Control register (16 bit) */
+#define B53_FAST_AGE_VID_CTRL 0x8a
+
+/*************************************************************************
+ * Status Page registers
+ *************************************************************************/
+
+/* Link Status Summary Register (16bit) */
+#define B53_LINK_STAT 0x00
+
+/* Link Status Change Register (16 bit) */
+#define B53_LINK_STAT_CHANGE 0x02
+
+/* Port Speed Summary Register (16 bit for FE, 32 bit for GE) */
+#define B53_SPEED_STAT 0x04
+#define SPEED_PORT_FE(reg, port) (((reg) >> (port)) & 1)
+#define SPEED_PORT_GE(reg, port) (((reg) >> 2 * (port)) & 3)
+#define SPEED_STAT_10M 0
+#define SPEED_STAT_100M 1
+#define SPEED_STAT_1000M 2
+
+/* Duplex Status Summary (16 bit) */
+#define B53_DUPLEX_STAT_FE 0x06
+#define B53_DUPLEX_STAT_GE 0x08
+#define B53_DUPLEX_STAT_63XX 0x0c
+
+/* Revision ID register for BCM5325 */
+#define B53_REV_ID_25 0x50
+
+/* Strap Value (48 bit) */
+#define B53_STRAP_VALUE 0x70
+#define SV_GMII_CTRL_115 BIT(27)
+
+/*************************************************************************
+ * Management Mode Page Registers
+ *************************************************************************/
+
+/* Global Management Config Register (8 bit) */
+#define B53_GLOBAL_CONFIG 0x00
+#define GC_RESET_MIB 0x01
+#define GC_RX_BPDU_EN 0x02
+#define GC_MIB_AC_HDR_EN 0x10
+#define GC_MIB_AC_EN 0x20
+#define GC_FRM_MGMT_PORT_M 0xC0
+#define GC_FRM_MGMT_PORT_04 0x00
+#define GC_FRM_MGMT_PORT_MII 0x80
+
+/* Broadcom Header control register (8 bit) */
+#define B53_BRCM_HDR 0x03
+#define BRCM_HDR_P8_EN BIT(0) /* Enable tagging on port 8 */
+#define BRCM_HDR_P5_EN BIT(1) /* Enable tagging on port 5 */
+
+/* Device ID register (8 or 32 bit) */
+#define B53_DEVICE_ID 0x30
+
+/* Revision ID register (8 bit) */
+#define B53_REV_ID 0x40
+
+/*************************************************************************
+ * ARL Access Page Registers
+ *************************************************************************/
+
+/* VLAN Table Access Register (8 bit) */
+#define B53_VT_ACCESS 0x80
+#define B53_VT_ACCESS_9798 0x60 /* for BCM5397/BCM5398 */
+#define B53_VT_ACCESS_63XX 0x60 /* for BCM6328/62/68 */
+#define VTA_CMD_WRITE 0
+#define VTA_CMD_READ 1
+#define VTA_CMD_CLEAR 2
+#define VTA_START_CMD BIT(7)
+
+/* VLAN Table Index Register (16 bit) */
+#define B53_VT_INDEX 0x81
+#define B53_VT_INDEX_9798 0x61
+#define B53_VT_INDEX_63XX 0x62
+
+/* VLAN Table Entry Register (32 bit) */
+#define B53_VT_ENTRY 0x83
+#define B53_VT_ENTRY_9798 0x63
+#define B53_VT_ENTRY_63XX 0x64
+#define VTE_MEMBERS 0x1ff
+#define VTE_UNTAG_S 9
+#define VTE_UNTAG (0x1ff << 9)
+
+/*************************************************************************
+ * ARL I/O Registers
+ *************************************************************************/
+
+/* ARL Table Read/Write Register (8 bit) */
+#define B53_ARLTBL_RW_CTRL 0x00
+#define ARLTBL_RW BIT(0)
+#define ARLTBL_START_DONE BIT(7)
+
+/* MAC Address Index Register (48 bit) */
+#define B53_MAC_ADDR_IDX 0x02
+
+/* VLAN ID Index Register (16 bit) */
+#define B53_VLAN_ID_IDX 0x08
+
+/* ARL Table MAC/VID Entry N Registers (64 bit)
+ *
+ * BCM5325 and BCM5365 share most definitions below
+ */
+#define B53_ARLTBL_MAC_VID_ENTRY(n) (0x10 * (n))
+#define ARLTBL_MAC_MASK 0xffffffffffff
+#define ARLTBL_VID_S 48
+#define ARLTBL_VID_MASK_25 0xff
+#define ARLTBL_VID_MASK 0xfff
+#define ARLTBL_DATA_PORT_ID_S_25 48
+#define ARLTBL_DATA_PORT_ID_MASK_25 0xf
+#define ARLTBL_AGE_25 BIT(61)
+#define ARLTBL_STATIC_25 BIT(62)
+#define ARLTBL_VALID_25 BIT(63)
+
+/* ARL Table Data Entry N Registers (32 bit) */
+#define B53_ARLTBL_DATA_ENTRY(n) ((0x10 * (n)) + 0x08)
+#define ARLTBL_DATA_PORT_ID_MASK 0x1ff
+#define ARLTBL_TC(tc) ((3 & tc) << 11)
+#define ARLTBL_AGE BIT(14)
+#define ARLTBL_STATIC BIT(15)
+#define ARLTBL_VALID BIT(16)
+
+/* ARL Search Control Register (8 bit) */
+#define B53_ARL_SRCH_CTL 0x50
+#define B53_ARL_SRCH_CTL_25 0x20
+#define ARL_SRCH_VLID BIT(0)
+#define ARL_SRCH_STDN BIT(7)
+
+/* ARL Search Address Register (16 bit) */
+#define B53_ARL_SRCH_ADDR 0x51
+#define B53_ARL_SRCH_ADDR_25 0x22
+#define B53_ARL_SRCH_ADDR_65 0x24
+#define ARL_ADDR_MASK GENMASK(14, 0)
+
+/* ARL Search MAC/VID Result (64 bit) */
+#define B53_ARL_SRCH_RSTL_0_MACVID 0x60
+
+/* Single register search result on 5325 */
+#define B53_ARL_SRCH_RSTL_0_MACVID_25 0x24
+/* Single register search result on 5365 */
+#define B53_ARL_SRCH_RSTL_0_MACVID_65 0x30
+
+/* ARL Search Data Result (32 bit) */
+#define B53_ARL_SRCH_RSTL_0 0x68
+
+#define B53_ARL_SRCH_RSTL_MACVID(x) (B53_ARL_SRCH_RSTL_0_MACVID + ((x) * 0x10))
+#define B53_ARL_SRCH_RSTL(x) (B53_ARL_SRCH_RSTL_0 + ((x) * 0x10))
+
+/*************************************************************************
+ * Port VLAN Registers
+ *************************************************************************/
+
+/* Port VLAN mask (16 bit) IMP port is always 8, also on 5325 & co */
+#define B53_PVLAN_PORT_MASK(i) ((i) * 2)
+
+/*************************************************************************
+ * 802.1Q Page Registers
+ *************************************************************************/
+
+/* Global QoS Control (8 bit) */
+#define B53_QOS_GLOBAL_CTL 0x00
+
+/* Enable 802.1Q for individual Ports (16 bit) */
+#define B53_802_1P_EN 0x04
+
+/*************************************************************************
+ * VLAN Page Registers
+ *************************************************************************/
+
+/* VLAN Control 0 (8 bit) */
+#define B53_VLAN_CTRL0 0x00
+#define VC0_8021PF_CTRL_MASK 0x3
+#define VC0_8021PF_CTRL_NONE 0x0
+#define VC0_8021PF_CTRL_CHANGE_PRI 0x1
+#define VC0_8021PF_CTRL_CHANGE_VID 0x2
+#define VC0_8021PF_CTRL_CHANGE_BOTH 0x3
+#define VC0_8021QF_CTRL_MASK 0xc
+#define VC0_8021QF_CTRL_CHANGE_PRI 0x1
+#define VC0_8021QF_CTRL_CHANGE_VID 0x2
+#define VC0_8021QF_CTRL_CHANGE_BOTH 0x3
+#define VC0_RESERVED_1 BIT(1)
+#define VC0_DROP_VID_MISS BIT(4)
+#define VC0_VID_HASH_VID BIT(5)
+#define VC0_VID_CHK_EN BIT(6) /* Use VID,DA or VID,SA */
+#define VC0_VLAN_EN BIT(7) /* 802.1Q VLAN Enabled */
+
+/* VLAN Control 1 (8 bit) */
+#define B53_VLAN_CTRL1 0x01
+#define VC1_RX_MCST_TAG_EN BIT(1)
+#define VC1_RX_MCST_FWD_EN BIT(2)
+#define VC1_RX_MCST_UNTAG_EN BIT(3)
+
+/* VLAN Control 2 (8 bit) */
+#define B53_VLAN_CTRL2 0x02
+
+/* VLAN Control 3 (8 bit when BCM5325, 16 bit else) */
+#define B53_VLAN_CTRL3 0x03
+#define B53_VLAN_CTRL3_63XX 0x04
+#define VC3_MAXSIZE_1532 BIT(6) /* 5325 only */
+#define VC3_HIGH_8BIT_EN BIT(7) /* 5325 only */
+
+/* VLAN Control 4 (8 bit) */
+#define B53_VLAN_CTRL4 0x05
+#define B53_VLAN_CTRL4_25 0x04
+#define B53_VLAN_CTRL4_63XX 0x06
+#define VC4_ING_VID_CHECK_S 6
+#define VC4_ING_VID_CHECK_MASK (0x3 << VC4_ING_VID_CHECK_S)
+#define VC4_ING_VID_VIO_FWD 0 /* forward, but do not learn */
+#define VC4_ING_VID_VIO_DROP 1 /* drop VID violations */
+#define VC4_NO_ING_VID_CHK 2 /* do not check */
+#define VC4_ING_VID_VIO_TO_IMP 3 /* redirect to MII port */
+
+/* VLAN Control 5 (8 bit) */
+#define B53_VLAN_CTRL5 0x06
+#define B53_VLAN_CTRL5_25 0x05
+#define B53_VLAN_CTRL5_63XX 0x07
+#define VC5_VID_FFF_EN BIT(2)
+#define VC5_DROP_VTABLE_MISS BIT(3)
+
+/* VLAN Control 6 (8 bit) */
+#define B53_VLAN_CTRL6 0x07
+#define B53_VLAN_CTRL6_63XX 0x08
+
+/* VLAN Table Access Register (16 bit) */
+#define B53_VLAN_TABLE_ACCESS_25 0x06 /* BCM5325E/5350 */
+#define B53_VLAN_TABLE_ACCESS_65 0x08 /* BCM5365 */
+#define VTA_VID_LOW_MASK_25 0xf
+#define VTA_VID_LOW_MASK_65 0xff
+#define VTA_VID_HIGH_S_25 4
+#define VTA_VID_HIGH_S_65 8
+#define VTA_VID_HIGH_MASK_25 (0xff << VTA_VID_HIGH_S_25E)
+#define VTA_VID_HIGH_MASK_65 (0xf << VTA_VID_HIGH_S_65)
+#define VTA_RW_STATE BIT(12)
+#define VTA_RW_STATE_RD 0
+#define VTA_RW_STATE_WR BIT(12)
+#define VTA_RW_OP_EN BIT(13)
+
+/* VLAN Read/Write Registers for (16/32 bit) */
+#define B53_VLAN_WRITE_25 0x08
+#define B53_VLAN_WRITE_65 0x0a
+#define B53_VLAN_READ 0x0c
+#define VA_MEMBER_MASK 0x3f
+#define VA_UNTAG_S_25 6
+#define VA_UNTAG_MASK_25 0x3f
+#define VA_UNTAG_S_65 7
+#define VA_UNTAG_MASK_65 0x1f
+#define VA_VID_HIGH_S 12
+#define VA_VID_HIGH_MASK (0xffff << VA_VID_HIGH_S)
+#define VA_VALID_25 BIT(20)
+#define VA_VALID_25_R4 BIT(24)
+#define VA_VALID_65 BIT(14)
+
+/* VLAN Port Default Tag (16 bit) */
+#define B53_VLAN_PORT_DEF_TAG(i) (0x10 + 2 * (i))
+
+/*************************************************************************
+ * Jumbo Frame Page Registers
+ *************************************************************************/
+
+/* Jumbo Enable Port Mask (bit i == port i enabled) (32 bit) */
+#define B53_JUMBO_PORT_MASK 0x01
+#define B53_JUMBO_PORT_MASK_63XX 0x04
+#define JPM_10_100_JUMBO_EN BIT(24) /* GigE always enabled */
+
+/* Good Frame Max Size without 802.1Q TAG (16 bit) */
+#define B53_JUMBO_MAX_SIZE 0x05
+#define B53_JUMBO_MAX_SIZE_63XX 0x08
+#define JMS_MIN_SIZE 1518
+#define JMS_MAX_SIZE 9724
+
+/*************************************************************************
+ * CFP Configuration Page Registers
+ *************************************************************************/
+
+/* CFP Control Register with ports map (8 bit) */
+#define B53_CFP_CTRL 0x00
+
+#endif /* !__B53_REGS_H */
diff --git a/drivers/net/dsa/b53/b53_spi.c b/drivers/net/dsa/b53/b53_spi.c
new file mode 100644
index 000000000000..2bda0b5f1578
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_spi.c
@@ -0,0 +1,331 @@
+/*
+ * B53 register access through SPI
+ *
+ * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <asm/unaligned.h>
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/platform_data/b53.h>
+
+#include "b53_priv.h"
+
+#define B53_SPI_DATA 0xf0
+
+#define B53_SPI_STATUS 0xfe
+#define B53_SPI_CMD_SPIF BIT(7)
+#define B53_SPI_CMD_RACK BIT(5)
+
+#define B53_SPI_CMD_READ 0x00
+#define B53_SPI_CMD_WRITE 0x01
+#define B53_SPI_CMD_NORMAL 0x60
+#define B53_SPI_CMD_FAST 0x10
+
+#define B53_SPI_PAGE_SELECT 0xff
+
+static inline int b53_spi_read_reg(struct spi_device *spi, u8 reg, u8 *val,
+ unsigned int len)
+{
+ u8 txbuf[2];
+
+ txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_READ;
+ txbuf[1] = reg;
+
+ return spi_write_then_read(spi, txbuf, 2, val, len);
+}
+
+static inline int b53_spi_clear_status(struct spi_device *spi)
+{
+ unsigned int i;
+ u8 rxbuf;
+ int ret;
+
+ for (i = 0; i < 10; i++) {
+ ret = b53_spi_read_reg(spi, B53_SPI_STATUS, &rxbuf, 1);
+ if (ret)
+ return ret;
+
+ if (!(rxbuf & B53_SPI_CMD_SPIF))
+ break;
+
+ mdelay(1);
+ }
+
+ if (i == 10)
+ return -EIO;
+
+ return 0;
+}
+
+static inline int b53_spi_set_page(struct spi_device *spi, u8 page)
+{
+ u8 txbuf[3];
+
+ txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE;
+ txbuf[1] = B53_SPI_PAGE_SELECT;
+ txbuf[2] = page;
+
+ return spi_write(spi, txbuf, sizeof(txbuf));
+}
+
+static inline int b53_prepare_reg_access(struct spi_device *spi, u8 page)
+{
+ int ret = b53_spi_clear_status(spi);
+
+ if (ret)
+ return ret;
+
+ return b53_spi_set_page(spi, page);
+}
+
+static int b53_spi_prepare_reg_read(struct spi_device *spi, u8 reg)
+{
+ u8 rxbuf;
+ int retry_count;
+ int ret;
+
+ ret = b53_spi_read_reg(spi, reg, &rxbuf, 1);
+ if (ret)
+ return ret;
+
+ for (retry_count = 0; retry_count < 10; retry_count++) {
+ ret = b53_spi_read_reg(spi, B53_SPI_STATUS, &rxbuf, 1);
+ if (ret)
+ return ret;
+
+ if (rxbuf & B53_SPI_CMD_RACK)
+ break;
+
+ mdelay(1);
+ }
+
+ if (retry_count == 10)
+ return -EIO;
+
+ return 0;
+}
+
+static int b53_spi_read(struct b53_device *dev, u8 page, u8 reg, u8 *data,
+ unsigned int len)
+{
+ struct spi_device *spi = dev->priv;
+ int ret;
+
+ ret = b53_prepare_reg_access(spi, page);
+ if (ret)
+ return ret;
+
+ ret = b53_spi_prepare_reg_read(spi, reg);
+ if (ret)
+ return ret;
+
+ return b53_spi_read_reg(spi, B53_SPI_DATA, data, len);
+}
+
+static int b53_spi_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
+{
+ return b53_spi_read(dev, page, reg, val, 1);
+}
+
+static int b53_spi_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val)
+{
+ int ret = b53_spi_read(dev, page, reg, (u8 *)val, 2);
+
+ if (!ret)
+ *val = le16_to_cpu(*val);
+
+ return ret;
+}
+
+static int b53_spi_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val)
+{
+ int ret = b53_spi_read(dev, page, reg, (u8 *)val, 4);
+
+ if (!ret)
+ *val = le32_to_cpu(*val);
+
+ return ret;
+}
+
+static int b53_spi_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ int ret;
+
+ *val = 0;
+ ret = b53_spi_read(dev, page, reg, (u8 *)val, 6);
+ if (!ret)
+ *val = le64_to_cpu(*val);
+
+ return ret;
+}
+
+static int b53_spi_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ int ret = b53_spi_read(dev, page, reg, (u8 *)val, 8);
+
+ if (!ret)
+ *val = le64_to_cpu(*val);
+
+ return ret;
+}
+
+static int b53_spi_write8(struct b53_device *dev, u8 page, u8 reg, u8 value)
+{
+ struct spi_device *spi = dev->priv;
+ int ret;
+ u8 txbuf[3];
+
+ ret = b53_prepare_reg_access(spi, page);
+ if (ret)
+ return ret;
+
+ txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE;
+ txbuf[1] = reg;
+ txbuf[2] = value;
+
+ return spi_write(spi, txbuf, sizeof(txbuf));
+}
+
+static int b53_spi_write16(struct b53_device *dev, u8 page, u8 reg, u16 value)
+{
+ struct spi_device *spi = dev->priv;
+ int ret;
+ u8 txbuf[4];
+
+ ret = b53_prepare_reg_access(spi, page);
+ if (ret)
+ return ret;
+
+ txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE;
+ txbuf[1] = reg;
+ put_unaligned_le16(value, &txbuf[2]);
+
+ return spi_write(spi, txbuf, sizeof(txbuf));
+}
+
+static int b53_spi_write32(struct b53_device *dev, u8 page, u8 reg, u32 value)
+{
+ struct spi_device *spi = dev->priv;
+ int ret;
+ u8 txbuf[6];
+
+ ret = b53_prepare_reg_access(spi, page);
+ if (ret)
+ return ret;
+
+ txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE;
+ txbuf[1] = reg;
+ put_unaligned_le32(value, &txbuf[2]);
+
+ return spi_write(spi, txbuf, sizeof(txbuf));
+}
+
+static int b53_spi_write48(struct b53_device *dev, u8 page, u8 reg, u64 value)
+{
+ struct spi_device *spi = dev->priv;
+ int ret;
+ u8 txbuf[10];
+
+ ret = b53_prepare_reg_access(spi, page);
+ if (ret)
+ return ret;
+
+ txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE;
+ txbuf[1] = reg;
+ put_unaligned_le64(value, &txbuf[2]);
+
+ return spi_write(spi, txbuf, sizeof(txbuf) - 2);
+}
+
+static int b53_spi_write64(struct b53_device *dev, u8 page, u8 reg, u64 value)
+{
+ struct spi_device *spi = dev->priv;
+ int ret;
+ u8 txbuf[10];
+
+ ret = b53_prepare_reg_access(spi, page);
+ if (ret)
+ return ret;
+
+ txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE;
+ txbuf[1] = reg;
+ put_unaligned_le64(value, &txbuf[2]);
+
+ return spi_write(spi, txbuf, sizeof(txbuf));
+}
+
+static struct b53_io_ops b53_spi_ops = {
+ .read8 = b53_spi_read8,
+ .read16 = b53_spi_read16,
+ .read32 = b53_spi_read32,
+ .read48 = b53_spi_read48,
+ .read64 = b53_spi_read64,
+ .write8 = b53_spi_write8,
+ .write16 = b53_spi_write16,
+ .write32 = b53_spi_write32,
+ .write48 = b53_spi_write48,
+ .write64 = b53_spi_write64,
+};
+
+static int b53_spi_probe(struct spi_device *spi)
+{
+ struct b53_device *dev;
+ int ret;
+
+ dev = b53_switch_alloc(&spi->dev, &b53_spi_ops, spi);
+ if (!dev)
+ return -ENOMEM;
+
+ if (spi->dev.platform_data)
+ dev->pdata = spi->dev.platform_data;
+
+ ret = b53_switch_register(dev);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, dev);
+
+ return 0;
+}
+
+static int b53_spi_remove(struct spi_device *spi)
+{
+ struct b53_device *dev = spi_get_drvdata(spi);
+
+ if (dev)
+ b53_switch_remove(dev);
+
+ return 0;
+}
+
+static struct spi_driver b53_spi_driver = {
+ .driver = {
+ .name = "b53-switch",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = b53_spi_probe,
+ .remove = b53_spi_remove,
+};
+
+module_spi_driver(b53_spi_driver);
+
+MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>");
+MODULE_DESCRIPTION("B53 SPI access driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
new file mode 100644
index 000000000000..70fd47284535
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_srab.c
@@ -0,0 +1,415 @@
+/*
+ * B53 register access through Switch Register Access Bridge Registers
+ *
+ * Copyright (C) 2013 Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/b53.h>
+
+#include "b53_priv.h"
+
+/* command and status register of the SRAB */
+#define B53_SRAB_CMDSTAT 0x2c
+#define B53_SRAB_CMDSTAT_RST BIT(2)
+#define B53_SRAB_CMDSTAT_WRITE BIT(1)
+#define B53_SRAB_CMDSTAT_GORDYN BIT(0)
+#define B53_SRAB_CMDSTAT_PAGE 24
+#define B53_SRAB_CMDSTAT_REG 16
+
+/* high order word of write data to switch registe */
+#define B53_SRAB_WD_H 0x30
+
+/* low order word of write data to switch registe */
+#define B53_SRAB_WD_L 0x34
+
+/* high order word of read data from switch register */
+#define B53_SRAB_RD_H 0x38
+
+/* low order word of read data from switch register */
+#define B53_SRAB_RD_L 0x3c
+
+/* command and status register of the SRAB */
+#define B53_SRAB_CTRLS 0x40
+#define B53_SRAB_CTRLS_RCAREQ BIT(3)
+#define B53_SRAB_CTRLS_RCAGNT BIT(4)
+#define B53_SRAB_CTRLS_SW_INIT_DONE BIT(6)
+
+/* the register captures interrupt pulses from the switch */
+#define B53_SRAB_INTR 0x44
+#define B53_SRAB_INTR_P(x) BIT(x)
+#define B53_SRAB_SWITCH_PHY BIT(8)
+#define B53_SRAB_1588_SYNC BIT(9)
+#define B53_SRAB_IMP1_SLEEP_TIMER BIT(10)
+#define B53_SRAB_P7_SLEEP_TIMER BIT(11)
+#define B53_SRAB_IMP0_SLEEP_TIMER BIT(12)
+
+struct b53_srab_priv {
+ void __iomem *regs;
+};
+
+static int b53_srab_request_grant(struct b53_device *dev)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ u32 ctrls;
+ int i;
+
+ ctrls = readl(regs + B53_SRAB_CTRLS);
+ ctrls |= B53_SRAB_CTRLS_RCAREQ;
+ writel(ctrls, regs + B53_SRAB_CTRLS);
+
+ for (i = 0; i < 20; i++) {
+ ctrls = readl(regs + B53_SRAB_CTRLS);
+ if (ctrls & B53_SRAB_CTRLS_RCAGNT)
+ break;
+ usleep_range(10, 100);
+ }
+ if (WARN_ON(i == 5))
+ return -EIO;
+
+ return 0;
+}
+
+static void b53_srab_release_grant(struct b53_device *dev)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ u32 ctrls;
+
+ ctrls = readl(regs + B53_SRAB_CTRLS);
+ ctrls &= ~B53_SRAB_CTRLS_RCAREQ;
+ writel(ctrls, regs + B53_SRAB_CTRLS);
+}
+
+static int b53_srab_op(struct b53_device *dev, u8 page, u8 reg, u32 op)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int i;
+ u32 cmdstat;
+
+ /* set register address */
+ cmdstat = (page << B53_SRAB_CMDSTAT_PAGE) |
+ (reg << B53_SRAB_CMDSTAT_REG) |
+ B53_SRAB_CMDSTAT_GORDYN |
+ op;
+ writel(cmdstat, regs + B53_SRAB_CMDSTAT);
+
+ /* check if operation completed */
+ for (i = 0; i < 5; ++i) {
+ cmdstat = readl(regs + B53_SRAB_CMDSTAT);
+ if (!(cmdstat & B53_SRAB_CMDSTAT_GORDYN))
+ break;
+ usleep_range(10, 100);
+ }
+
+ if (WARN_ON(i == 5))
+ return -EIO;
+
+ return 0;
+}
+
+static int b53_srab_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ ret = b53_srab_op(dev, page, reg, 0);
+ if (ret)
+ goto err;
+
+ *val = readl(regs + B53_SRAB_RD_L) & 0xff;
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static int b53_srab_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ ret = b53_srab_op(dev, page, reg, 0);
+ if (ret)
+ goto err;
+
+ *val = readl(regs + B53_SRAB_RD_L) & 0xffff;
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static int b53_srab_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ ret = b53_srab_op(dev, page, reg, 0);
+ if (ret)
+ goto err;
+
+ *val = readl(regs + B53_SRAB_RD_L);
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static int b53_srab_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ ret = b53_srab_op(dev, page, reg, 0);
+ if (ret)
+ goto err;
+
+ *val = readl(regs + B53_SRAB_RD_L);
+ *val += ((u64)readl(regs + B53_SRAB_RD_H) & 0xffff) << 32;
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static int b53_srab_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ ret = b53_srab_op(dev, page, reg, 0);
+ if (ret)
+ goto err;
+
+ *val = readl(regs + B53_SRAB_RD_L);
+ *val += (u64)readl(regs + B53_SRAB_RD_H) << 32;
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static int b53_srab_write8(struct b53_device *dev, u8 page, u8 reg, u8 value)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ writel(value, regs + B53_SRAB_WD_L);
+
+ ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE);
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static int b53_srab_write16(struct b53_device *dev, u8 page, u8 reg,
+ u16 value)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ writel(value, regs + B53_SRAB_WD_L);
+
+ ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE);
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static int b53_srab_write32(struct b53_device *dev, u8 page, u8 reg,
+ u32 value)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ writel(value, regs + B53_SRAB_WD_L);
+
+ ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE);
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static int b53_srab_write48(struct b53_device *dev, u8 page, u8 reg,
+ u64 value)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ writel((u32)value, regs + B53_SRAB_WD_L);
+ writel((u16)(value >> 32), regs + B53_SRAB_WD_H);
+
+ ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE);
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static int b53_srab_write64(struct b53_device *dev, u8 page, u8 reg,
+ u64 value)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ writel((u32)value, regs + B53_SRAB_WD_L);
+ writel((u32)(value >> 32), regs + B53_SRAB_WD_H);
+
+ ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE);
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static struct b53_io_ops b53_srab_ops = {
+ .read8 = b53_srab_read8,
+ .read16 = b53_srab_read16,
+ .read32 = b53_srab_read32,
+ .read48 = b53_srab_read48,
+ .read64 = b53_srab_read64,
+ .write8 = b53_srab_write8,
+ .write16 = b53_srab_write16,
+ .write32 = b53_srab_write32,
+ .write48 = b53_srab_write48,
+ .write64 = b53_srab_write64,
+};
+
+static int b53_srab_probe(struct platform_device *pdev)
+{
+ struct b53_srab_priv *priv;
+ struct b53_device *dev;
+ struct resource *r;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->regs = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(priv->regs))
+ return -ENOMEM;
+
+ dev = b53_switch_alloc(&pdev->dev, &b53_srab_ops, priv);
+ if (!dev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, dev);
+
+ return b53_switch_register(dev);
+}
+
+static int b53_srab_remove(struct platform_device *pdev)
+{
+ struct b53_device *dev = platform_get_drvdata(pdev);
+
+ if (dev)
+ b53_switch_remove(dev);
+
+ return 0;
+}
+
+static const struct of_device_id b53_srab_of_match[] = {
+ { .compatible = "brcm,bcm53010-srab" },
+ { .compatible = "brcm,bcm53011-srab" },
+ { .compatible = "brcm,bcm53012-srab" },
+ { .compatible = "brcm,bcm53018-srab" },
+ { .compatible = "brcm,bcm53019-srab" },
+ { .compatible = "brcm,bcm5301x-srab" },
+ { /* sentinel */ },
+};
+
+static struct platform_driver b53_srab_driver = {
+ .probe = b53_srab_probe,
+ .remove = b53_srab_remove,
+ .driver = {
+ .name = "b53-srab-switch",
+ .of_match_table = b53_srab_of_match,
+ },
+};
+
+module_platform_driver(b53_srab_driver);
+MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
+MODULE_DESCRIPTION("B53 Switch Register Access Bridge Registers (SRAB) access driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 73df91bb0466..cd1d630ae3a9 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -22,6 +22,7 @@
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/of_net.h>
+#include <linux/of_mdio.h>
#include <net/dsa.h>
#include <linux/ethtool.h>
#include <linux/if_bridge.h>
@@ -460,19 +461,13 @@ static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port,
return 0;
}
-/* Fast-ageing of ARL entries for a given port, equivalent to an ARL
- * flush for that port.
- */
-static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port)
+static int bcm_sf2_fast_age_op(struct bcm_sf2_priv *priv)
{
- struct bcm_sf2_priv *priv = ds_to_priv(ds);
unsigned int timeout = 1000;
u32 reg;
- core_writel(priv, port, CORE_FAST_AGE_PORT);
-
reg = core_readl(priv, CORE_FAST_AGE_CTRL);
- reg |= EN_AGE_PORT | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE;
+ reg |= EN_AGE_PORT | EN_AGE_VLAN | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE;
core_writel(priv, reg, CORE_FAST_AGE_CTRL);
do {
@@ -491,13 +486,98 @@ static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port)
return 0;
}
+/* Fast-ageing of ARL entries for a given port, equivalent to an ARL
+ * flush for that port.
+ */
+static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port)
+{
+ struct bcm_sf2_priv *priv = ds_to_priv(ds);
+
+ core_writel(priv, port, CORE_FAST_AGE_PORT);
+
+ return bcm_sf2_fast_age_op(priv);
+}
+
+static int bcm_sf2_sw_fast_age_vlan(struct bcm_sf2_priv *priv, u16 vid)
+{
+ core_writel(priv, vid, CORE_FAST_AGE_VID);
+
+ return bcm_sf2_fast_age_op(priv);
+}
+
+static int bcm_sf2_vlan_op_wait(struct bcm_sf2_priv *priv)
+{
+ unsigned int timeout = 10;
+ u32 reg;
+
+ do {
+ reg = core_readl(priv, CORE_ARLA_VTBL_RWCTRL);
+ if (!(reg & ARLA_VTBL_STDN))
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while (timeout--);
+
+ return -ETIMEDOUT;
+}
+
+static int bcm_sf2_vlan_op(struct bcm_sf2_priv *priv, u8 op)
+{
+ core_writel(priv, ARLA_VTBL_STDN | op, CORE_ARLA_VTBL_RWCTRL);
+
+ return bcm_sf2_vlan_op_wait(priv);
+}
+
+static void bcm_sf2_set_vlan_entry(struct bcm_sf2_priv *priv, u16 vid,
+ struct bcm_sf2_vlan *vlan)
+{
+ int ret;
+
+ core_writel(priv, vid & VTBL_ADDR_INDEX_MASK, CORE_ARLA_VTBL_ADDR);
+ core_writel(priv, vlan->untag << UNTAG_MAP_SHIFT | vlan->members,
+ CORE_ARLA_VTBL_ENTRY);
+
+ ret = bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_WRITE);
+ if (ret)
+ pr_err("failed to write VLAN entry\n");
+}
+
+static int bcm_sf2_get_vlan_entry(struct bcm_sf2_priv *priv, u16 vid,
+ struct bcm_sf2_vlan *vlan)
+{
+ u32 entry;
+ int ret;
+
+ core_writel(priv, vid & VTBL_ADDR_INDEX_MASK, CORE_ARLA_VTBL_ADDR);
+
+ ret = bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_READ);
+ if (ret)
+ return ret;
+
+ entry = core_readl(priv, CORE_ARLA_VTBL_ENTRY);
+ vlan->members = entry & FWD_MAP_MASK;
+ vlan->untag = (entry >> UNTAG_MAP_SHIFT) & UNTAG_MAP_MASK;
+
+ return 0;
+}
+
static int bcm_sf2_sw_br_join(struct dsa_switch *ds, int port,
struct net_device *bridge)
{
struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ s8 cpu_port = ds->dst->cpu_port;
unsigned int i;
u32 reg, p_ctl;
+ /* Make this port leave the all VLANs join since we will have proper
+ * VLAN entries from now on
+ */
+ reg = core_readl(priv, CORE_JOIN_ALL_VLAN_EN);
+ reg &= ~BIT(port);
+ if ((reg & BIT(cpu_port)) == BIT(cpu_port))
+ reg &= ~BIT(cpu_port);
+ core_writel(priv, reg, CORE_JOIN_ALL_VLAN_EN);
+
priv->port_sts[port].bridge_dev = bridge;
p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
@@ -529,6 +609,7 @@ static void bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port)
{
struct bcm_sf2_priv *priv = ds_to_priv(ds);
struct net_device *bridge = priv->port_sts[port].bridge_dev;
+ s8 cpu_port = ds->dst->cpu_port;
unsigned int i;
u32 reg, p_ctl;
@@ -552,6 +633,13 @@ static void bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port)
core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port));
priv->port_sts[port].vlan_ctl_mask = p_ctl;
priv->port_sts[port].bridge_dev = NULL;
+
+ /* Make this port join all VLANs without VLAN entries */
+ reg = core_readl(priv, CORE_JOIN_ALL_VLAN_EN);
+ reg |= BIT(port);
+ if (!(reg & BIT(cpu_port)))
+ reg |= BIT(cpu_port);
+ core_writel(priv, reg, CORE_JOIN_ALL_VLAN_EN);
}
static void bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
@@ -836,6 +924,66 @@ static int bcm_sf2_sw_fdb_dump(struct dsa_switch *ds, int port,
return 0;
}
+static int bcm_sf2_sw_indir_rw(struct bcm_sf2_priv *priv, int op, int addr,
+ int regnum, u16 val)
+{
+ int ret = 0;
+ u32 reg;
+
+ reg = reg_readl(priv, REG_SWITCH_CNTRL);
+ reg |= MDIO_MASTER_SEL;
+ reg_writel(priv, reg, REG_SWITCH_CNTRL);
+
+ /* Page << 8 | offset */
+ reg = 0x70;
+ reg <<= 2;
+ core_writel(priv, addr, reg);
+
+ /* Page << 8 | offset */
+ reg = 0x80 << 8 | regnum << 1;
+ reg <<= 2;
+
+ if (op)
+ ret = core_readl(priv, reg);
+ else
+ core_writel(priv, val, reg);
+
+ reg = reg_readl(priv, REG_SWITCH_CNTRL);
+ reg &= ~MDIO_MASTER_SEL;
+ reg_writel(priv, reg, REG_SWITCH_CNTRL);
+
+ return ret & 0xffff;
+}
+
+static int bcm_sf2_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct bcm_sf2_priv *priv = bus->priv;
+
+ /* Intercept reads from Broadcom pseudo-PHY address, else, send
+ * them to our master MDIO bus controller
+ */
+ if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
+ return bcm_sf2_sw_indir_rw(priv, 1, addr, regnum, 0);
+ else
+ return mdiobus_read(priv->master_mii_bus, addr, regnum);
+}
+
+static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct bcm_sf2_priv *priv = bus->priv;
+
+ /* Intercept writes to the Broadcom pseudo-PHY address, else,
+ * send them to our master MDIO bus controller
+ */
+ if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
+ bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val);
+ else
+ mdiobus_write(priv->master_mii_bus, addr, regnum, val);
+
+ return 0;
+}
+
static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
{
struct bcm_sf2_priv *priv = dev_id;
@@ -932,133 +1080,70 @@ static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
}
}
-static int bcm_sf2_sw_setup(struct dsa_switch *ds)
+static int bcm_sf2_mdio_register(struct dsa_switch *ds)
{
- const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
struct bcm_sf2_priv *priv = ds_to_priv(ds);
struct device_node *dn;
- void __iomem **base;
- unsigned int port;
- unsigned int i;
- u32 reg, rev;
- int ret;
-
- spin_lock_init(&priv->indir_lock);
- mutex_init(&priv->stats_mutex);
-
- /* All the interesting properties are at the parent device_node
- * level
- */
- dn = ds->cd->of_node->parent;
- bcm_sf2_identify_ports(priv, ds->cd->of_node);
-
- priv->irq0 = irq_of_parse_and_map(dn, 0);
- priv->irq1 = irq_of_parse_and_map(dn, 1);
-
- base = &priv->core;
- for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
- *base = of_iomap(dn, i);
- if (*base == NULL) {
- pr_err("unable to find register: %s\n", reg_names[i]);
- ret = -ENOMEM;
- goto out_unmap;
- }
- base++;
- }
-
- ret = bcm_sf2_sw_rst(priv);
- if (ret) {
- pr_err("unable to software reset switch: %d\n", ret);
- goto out_unmap;
- }
-
- /* Disable all interrupts and request them */
- bcm_sf2_intr_disable(priv);
-
- ret = request_irq(priv->irq0, bcm_sf2_switch_0_isr, 0,
- "switch_0", priv);
- if (ret < 0) {
- pr_err("failed to request switch_0 IRQ\n");
- goto out_unmap;
- }
-
- ret = request_irq(priv->irq1, bcm_sf2_switch_1_isr, 0,
- "switch_1", priv);
- if (ret < 0) {
- pr_err("failed to request switch_1 IRQ\n");
- goto out_free_irq0;
- }
-
- /* Reset the MIB counters */
- reg = core_readl(priv, CORE_GMNCFGCFG);
- reg |= RST_MIB_CNT;
- core_writel(priv, reg, CORE_GMNCFGCFG);
- reg &= ~RST_MIB_CNT;
- core_writel(priv, reg, CORE_GMNCFGCFG);
-
- /* Get the maximum number of ports for this switch */
- priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1;
- if (priv->hw_params.num_ports > DSA_MAX_PORTS)
- priv->hw_params.num_ports = DSA_MAX_PORTS;
-
- /* Assume a single GPHY setup if we can't read that property */
- if (of_property_read_u32(dn, "brcm,num-gphy",
- &priv->hw_params.num_gphy))
- priv->hw_params.num_gphy = 1;
-
- /* Enable all valid ports and disable those unused */
- for (port = 0; port < priv->hw_params.num_ports; port++) {
- /* IMP port receives special treatment */
- if ((1 << port) & ds->enabled_port_mask)
- bcm_sf2_port_setup(ds, port, NULL);
- else if (dsa_is_cpu_port(ds, port))
- bcm_sf2_imp_setup(ds, port);
- else
- bcm_sf2_port_disable(ds, port, NULL);
- }
-
- /* Include the pseudo-PHY address and the broadcast PHY address to
- * divert reads towards our workaround. This is only required for
- * 7445D0, since 7445E0 disconnects the internal switch pseudo-PHY such
- * that we can use the regular SWITCH_MDIO master controller instead.
+ static int index;
+ int err;
+
+ /* Find our integrated MDIO bus node */
+ dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
+ priv->master_mii_bus = of_mdio_find_bus(dn);
+ if (!priv->master_mii_bus)
+ return -EPROBE_DEFER;
+
+ get_device(&priv->master_mii_bus->dev);
+ priv->master_mii_dn = dn;
+
+ priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
+ if (!priv->slave_mii_bus)
+ return -ENOMEM;
+
+ priv->slave_mii_bus->priv = priv;
+ priv->slave_mii_bus->name = "sf2 slave mii";
+ priv->slave_mii_bus->read = bcm_sf2_sw_mdio_read;
+ priv->slave_mii_bus->write = bcm_sf2_sw_mdio_write;
+ snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "sf2-%d",
+ index++);
+ priv->slave_mii_bus->dev.of_node = dn;
+
+ /* Include the pseudo-PHY address to divert reads towards our
+ * workaround. This is only required for 7445D0, since 7445E0
+ * disconnects the internal switch pseudo-PHY such that we can use the
+ * regular SWITCH_MDIO master controller instead.
*
- * By default, DSA initializes ds->phys_mii_mask to
- * ds->enabled_port_mask to have a 1:1 mapping between Port address
- * and PHY address in order to utilize the slave_mii_bus instance to
- * read from Port PHYs. This is not what we want here, so we
- * initialize phys_mii_mask 0 to always utilize the "master" MDIO
- * bus backed by the "mdio-unimac" driver.
+ * Here we flag the pseudo PHY as needing special treatment and would
+ * otherwise make all other PHY read/writes go to the master MDIO bus
+ * controller that comes with this switch backed by the "mdio-unimac"
+ * driver.
*/
if (of_machine_is_compatible("brcm,bcm7445d0"))
- ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0));
+ priv->indir_phy_mask |= (1 << BRCM_PSEUDO_PHY_ADDR);
else
- ds->phys_mii_mask = 0;
+ priv->indir_phy_mask = 0;
- rev = reg_readl(priv, REG_SWITCH_REVISION);
- priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
- SWITCH_TOP_REV_MASK;
- priv->hw_params.core_rev = (rev & SF2_REV_MASK);
+ ds->phys_mii_mask = priv->indir_phy_mask;
+ ds->slave_mii_bus = priv->slave_mii_bus;
+ priv->slave_mii_bus->parent = ds->dev->parent;
+ priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask;
- rev = reg_readl(priv, REG_PHY_REVISION);
- priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK;
+ if (dn)
+ err = of_mdiobus_register(priv->slave_mii_bus, dn);
+ else
+ err = mdiobus_register(priv->slave_mii_bus);
- pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n",
- priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff,
- priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
- priv->core, priv->irq0, priv->irq1);
+ if (err)
+ of_node_put(dn);
- return 0;
+ return err;
+}
-out_free_irq0:
- free_irq(priv->irq0, priv);
-out_unmap:
- base = &priv->core;
- for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
- if (*base)
- iounmap(*base);
- base++;
- }
- return ret;
+static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
+{
+ mdiobus_unregister(priv->slave_mii_bus);
+ if (priv->master_mii_dn)
+ of_node_put(priv->master_mii_dn);
}
static int bcm_sf2_sw_set_addr(struct dsa_switch *ds, u8 *addr)
@@ -1078,68 +1163,6 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
return priv->hw_params.gphy_rev;
}
-static int bcm_sf2_sw_indir_rw(struct dsa_switch *ds, int op, int addr,
- int regnum, u16 val)
-{
- struct bcm_sf2_priv *priv = ds_to_priv(ds);
- int ret = 0;
- u32 reg;
-
- reg = reg_readl(priv, REG_SWITCH_CNTRL);
- reg |= MDIO_MASTER_SEL;
- reg_writel(priv, reg, REG_SWITCH_CNTRL);
-
- /* Page << 8 | offset */
- reg = 0x70;
- reg <<= 2;
- core_writel(priv, addr, reg);
-
- /* Page << 8 | offset */
- reg = 0x80 << 8 | regnum << 1;
- reg <<= 2;
-
- if (op)
- ret = core_readl(priv, reg);
- else
- core_writel(priv, val, reg);
-
- reg = reg_readl(priv, REG_SWITCH_CNTRL);
- reg &= ~MDIO_MASTER_SEL;
- reg_writel(priv, reg, REG_SWITCH_CNTRL);
-
- return ret & 0xffff;
-}
-
-static int bcm_sf2_sw_phy_read(struct dsa_switch *ds, int addr, int regnum)
-{
- /* Intercept reads from the MDIO broadcast address or Broadcom
- * pseudo-PHY address
- */
- switch (addr) {
- case 0:
- case BRCM_PSEUDO_PHY_ADDR:
- return bcm_sf2_sw_indir_rw(ds, 1, addr, regnum, 0);
- default:
- return 0xffff;
- }
-}
-
-static int bcm_sf2_sw_phy_write(struct dsa_switch *ds, int addr, int regnum,
- u16 val)
-{
- /* Intercept writes to the MDIO broadcast address or Broadcom
- * pseudo-PHY address
- */
- switch (addr) {
- case 0:
- case BRCM_PSEUDO_PHY_ADDR:
- bcm_sf2_sw_indir_rw(ds, 0, addr, regnum, val);
- break;
- }
-
- return 0;
-}
-
static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port,
struct phy_device *phydev)
{
@@ -1370,14 +1393,309 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
return p->ethtool_ops->set_wol(p, wol);
}
+static void bcm_sf2_enable_vlan(struct bcm_sf2_priv *priv, bool enable)
+{
+ u32 mgmt, vc0, vc1, vc4, vc5;
+
+ mgmt = core_readl(priv, CORE_SWMODE);
+ vc0 = core_readl(priv, CORE_VLAN_CTRL0);
+ vc1 = core_readl(priv, CORE_VLAN_CTRL1);
+ vc4 = core_readl(priv, CORE_VLAN_CTRL4);
+ vc5 = core_readl(priv, CORE_VLAN_CTRL5);
+
+ mgmt &= ~SW_FWDG_MODE;
+
+ if (enable) {
+ vc0 |= VLAN_EN | VLAN_LEARN_MODE_IVL;
+ vc1 |= EN_RSV_MCAST_UNTAG | EN_RSV_MCAST_FWDMAP;
+ vc4 &= ~(INGR_VID_CHK_MASK << INGR_VID_CHK_SHIFT);
+ vc4 |= INGR_VID_CHK_DROP;
+ vc5 |= DROP_VTABLE_MISS | EN_VID_FFF_FWD;
+ } else {
+ vc0 &= ~(VLAN_EN | VLAN_LEARN_MODE_IVL);
+ vc1 &= ~(EN_RSV_MCAST_UNTAG | EN_RSV_MCAST_FWDMAP);
+ vc4 &= ~(INGR_VID_CHK_MASK << INGR_VID_CHK_SHIFT);
+ vc5 &= ~(DROP_VTABLE_MISS | EN_VID_FFF_FWD);
+ vc4 |= INGR_VID_CHK_VID_VIOL_IMP;
+ }
+
+ core_writel(priv, vc0, CORE_VLAN_CTRL0);
+ core_writel(priv, vc1, CORE_VLAN_CTRL1);
+ core_writel(priv, 0, CORE_VLAN_CTRL3);
+ core_writel(priv, vc4, CORE_VLAN_CTRL4);
+ core_writel(priv, vc5, CORE_VLAN_CTRL5);
+ core_writel(priv, mgmt, CORE_SWMODE);
+}
+
+static void bcm_sf2_sw_configure_vlan(struct dsa_switch *ds)
+{
+ struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ unsigned int port;
+
+ /* Clear all VLANs */
+ bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_CLEAR);
+
+ for (port = 0; port < priv->hw_params.num_ports; port++) {
+ if (!((1 << port) & ds->enabled_port_mask))
+ continue;
+
+ core_writel(priv, 1, CORE_DEFAULT_1Q_TAG_P(port));
+ }
+}
+
+static int bcm_sf2_sw_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering)
+{
+ return 0;
+}
+
+static int bcm_sf2_sw_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
+{
+ struct bcm_sf2_priv *priv = ds_to_priv(ds);
+
+ bcm_sf2_enable_vlan(priv, true);
+
+ return 0;
+}
+
+static void bcm_sf2_sw_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
+{
+ struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ s8 cpu_port = ds->dst->cpu_port;
+ struct bcm_sf2_vlan *vl;
+ u16 vid;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+ vl = &priv->vlans[vid];
+
+ bcm_sf2_get_vlan_entry(priv, vid, vl);
+
+ vl->members |= BIT(port) | BIT(cpu_port);
+ if (untagged)
+ vl->untag |= BIT(port) | BIT(cpu_port);
+ else
+ vl->untag &= ~(BIT(port) | BIT(cpu_port));
+
+ bcm_sf2_set_vlan_entry(priv, vid, vl);
+ bcm_sf2_sw_fast_age_vlan(priv, vid);
+ }
+
+ if (pvid) {
+ core_writel(priv, vlan->vid_end, CORE_DEFAULT_1Q_TAG_P(port));
+ core_writel(priv, vlan->vid_end,
+ CORE_DEFAULT_1Q_TAG_P(cpu_port));
+ bcm_sf2_sw_fast_age_vlan(priv, vid);
+ }
+}
+
+static int bcm_sf2_sw_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ s8 cpu_port = ds->dst->cpu_port;
+ struct bcm_sf2_vlan *vl;
+ u16 vid, pvid;
+ int ret;
+
+ pvid = core_readl(priv, CORE_DEFAULT_1Q_TAG_P(port));
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+ vl = &priv->vlans[vid];
+
+ ret = bcm_sf2_get_vlan_entry(priv, vid, vl);
+ if (ret)
+ return ret;
+
+ vl->members &= ~BIT(port);
+ if ((vl->members & BIT(cpu_port)) == BIT(cpu_port))
+ vl->members = 0;
+ if (pvid == vid)
+ pvid = 0;
+ if (untagged) {
+ vl->untag &= ~BIT(port);
+ if ((vl->untag & BIT(port)) == BIT(cpu_port))
+ vl->untag = 0;
+ }
+
+ bcm_sf2_set_vlan_entry(priv, vid, vl);
+ bcm_sf2_sw_fast_age_vlan(priv, vid);
+ }
+
+ core_writel(priv, pvid, CORE_DEFAULT_1Q_TAG_P(port));
+ core_writel(priv, pvid, CORE_DEFAULT_1Q_TAG_P(cpu_port));
+ bcm_sf2_sw_fast_age_vlan(priv, vid);
+
+ return 0;
+}
+
+static int bcm_sf2_sw_vlan_dump(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_vlan *vlan,
+ int (*cb)(struct switchdev_obj *obj))
+{
+ struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ struct bcm_sf2_port_status *p = &priv->port_sts[port];
+ struct bcm_sf2_vlan *vl;
+ u16 vid, pvid;
+ int err = 0;
+
+ pvid = core_readl(priv, CORE_DEFAULT_1Q_TAG_P(port));
+
+ for (vid = 0; vid < VLAN_N_VID; vid++) {
+ vl = &priv->vlans[vid];
+
+ if (!(vl->members & BIT(port)))
+ continue;
+
+ vlan->vid_begin = vlan->vid_end = vid;
+ vlan->flags = 0;
+
+ if (vl->untag & BIT(port))
+ vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
+ if (p->pvid == vid)
+ vlan->flags |= BRIDGE_VLAN_INFO_PVID;
+
+ err = cb(&vlan->obj);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int bcm_sf2_sw_setup(struct dsa_switch *ds)
+{
+ const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
+ struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ struct device_node *dn;
+ void __iomem **base;
+ unsigned int port;
+ unsigned int i;
+ u32 reg, rev;
+ int ret;
+
+ spin_lock_init(&priv->indir_lock);
+ mutex_init(&priv->stats_mutex);
+
+ /* All the interesting properties are at the parent device_node
+ * level
+ */
+ dn = ds->cd->of_node->parent;
+ bcm_sf2_identify_ports(priv, ds->cd->of_node);
+
+ priv->irq0 = irq_of_parse_and_map(dn, 0);
+ priv->irq1 = irq_of_parse_and_map(dn, 1);
+
+ base = &priv->core;
+ for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
+ *base = of_iomap(dn, i);
+ if (*base == NULL) {
+ pr_err("unable to find register: %s\n", reg_names[i]);
+ ret = -ENOMEM;
+ goto out_unmap;
+ }
+ base++;
+ }
+
+ ret = bcm_sf2_sw_rst(priv);
+ if (ret) {
+ pr_err("unable to software reset switch: %d\n", ret);
+ goto out_unmap;
+ }
+
+ ret = bcm_sf2_mdio_register(ds);
+ if (ret) {
+ pr_err("failed to register MDIO bus\n");
+ goto out_unmap;
+ }
+
+ /* Disable all interrupts and request them */
+ bcm_sf2_intr_disable(priv);
+
+ ret = request_irq(priv->irq0, bcm_sf2_switch_0_isr, 0,
+ "switch_0", priv);
+ if (ret < 0) {
+ pr_err("failed to request switch_0 IRQ\n");
+ goto out_unmap;
+ }
+
+ ret = request_irq(priv->irq1, bcm_sf2_switch_1_isr, 0,
+ "switch_1", priv);
+ if (ret < 0) {
+ pr_err("failed to request switch_1 IRQ\n");
+ goto out_free_irq0;
+ }
+
+ /* Reset the MIB counters */
+ reg = core_readl(priv, CORE_GMNCFGCFG);
+ reg |= RST_MIB_CNT;
+ core_writel(priv, reg, CORE_GMNCFGCFG);
+ reg &= ~RST_MIB_CNT;
+ core_writel(priv, reg, CORE_GMNCFGCFG);
+
+ /* Get the maximum number of ports for this switch */
+ priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1;
+ if (priv->hw_params.num_ports > DSA_MAX_PORTS)
+ priv->hw_params.num_ports = DSA_MAX_PORTS;
+
+ /* Assume a single GPHY setup if we can't read that property */
+ if (of_property_read_u32(dn, "brcm,num-gphy",
+ &priv->hw_params.num_gphy))
+ priv->hw_params.num_gphy = 1;
+
+ /* Enable all valid ports and disable those unused */
+ for (port = 0; port < priv->hw_params.num_ports; port++) {
+ /* IMP port receives special treatment */
+ if ((1 << port) & ds->enabled_port_mask)
+ bcm_sf2_port_setup(ds, port, NULL);
+ else if (dsa_is_cpu_port(ds, port))
+ bcm_sf2_imp_setup(ds, port);
+ else
+ bcm_sf2_port_disable(ds, port, NULL);
+ }
+
+ bcm_sf2_sw_configure_vlan(ds);
+
+ rev = reg_readl(priv, REG_SWITCH_REVISION);
+ priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
+ SWITCH_TOP_REV_MASK;
+ priv->hw_params.core_rev = (rev & SF2_REV_MASK);
+
+ rev = reg_readl(priv, REG_PHY_REVISION);
+ priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK;
+
+ pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n",
+ priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff,
+ priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
+ priv->core, priv->irq0, priv->irq1);
+
+ return 0;
+
+out_free_irq0:
+ free_irq(priv->irq0, priv);
+out_unmap:
+ base = &priv->core;
+ for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
+ if (*base)
+ iounmap(*base);
+ base++;
+ }
+ bcm_sf2_mdio_unregister(priv);
+ return ret;
+}
+
static struct dsa_switch_driver bcm_sf2_switch_driver = {
.tag_protocol = DSA_TAG_PROTO_BRCM,
.probe = bcm_sf2_sw_drv_probe,
.setup = bcm_sf2_sw_setup,
.set_addr = bcm_sf2_sw_set_addr,
.get_phy_flags = bcm_sf2_sw_get_phy_flags,
- .phy_read = bcm_sf2_sw_phy_read,
- .phy_write = bcm_sf2_sw_phy_write,
.get_strings = bcm_sf2_sw_get_strings,
.get_ethtool_stats = bcm_sf2_sw_get_ethtool_stats,
.get_sset_count = bcm_sf2_sw_get_sset_count,
@@ -1398,6 +1716,11 @@ static struct dsa_switch_driver bcm_sf2_switch_driver = {
.port_fdb_add = bcm_sf2_sw_fdb_add,
.port_fdb_del = bcm_sf2_sw_fdb_del,
.port_fdb_dump = bcm_sf2_sw_fdb_dump,
+ .port_vlan_filtering = bcm_sf2_sw_vlan_filtering,
+ .port_vlan_prepare = bcm_sf2_sw_vlan_prepare,
+ .port_vlan_add = bcm_sf2_sw_vlan_add,
+ .port_vlan_del = bcm_sf2_sw_vlan_del,
+ .port_vlan_dump = bcm_sf2_sw_vlan_dump,
};
static int __init bcm_sf2_init(void)
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index 200b1f5fdb56..463bed8cbe4c 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -21,6 +21,7 @@
#include <linux/ethtool.h>
#include <linux/types.h>
#include <linux/bitops.h>
+#include <linux/if_vlan.h>
#include <net/dsa.h>
@@ -50,6 +51,7 @@ struct bcm_sf2_port_status {
struct ethtool_eee eee;
u32 vlan_ctl_mask;
+ u16 pvid;
struct net_device *bridge_dev;
};
@@ -63,6 +65,11 @@ struct bcm_sf2_arl_entry {
u8 is_static:1;
};
+struct bcm_sf2_vlan {
+ u16 members;
+ u16 untag;
+};
+
static inline void bcm_sf2_mac_from_u64(u64 src, u8 *dst)
{
unsigned int i;
@@ -142,6 +149,15 @@ struct bcm_sf2_priv {
/* Bitmask of ports having an integrated PHY */
unsigned int int_phy_mask;
+
+ /* Master and slave MDIO bus controller */
+ unsigned int indir_phy_mask;
+ struct device_node *master_mii_dn;
+ struct mii_bus *slave_mii_bus;
+ struct mii_bus *master_mii_bus;
+
+ /* Cache of programmed VLANs */
+ struct bcm_sf2_vlan vlans[VLAN_N_VID];
};
struct bcm_sf2_hw_stats {
diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h
index 97780d43b5c0..9f2a9cb42074 100644
--- a/drivers/net/dsa/bcm_sf2_regs.h
+++ b/drivers/net/dsa/bcm_sf2_regs.h
@@ -274,6 +274,23 @@
#define CORE_ARLA_SRCH_RSLT_MACVID(x) (CORE_ARLA_SRCH_RSLT_0_MACVID + ((x) * 0x40))
#define CORE_ARLA_SRCH_RSLT(x) (CORE_ARLA_SRCH_RSLT_0 + ((x) * 0x40))
+#define CORE_ARLA_VTBL_RWCTRL 0x1600
+#define ARLA_VTBL_CMD_WRITE 0
+#define ARLA_VTBL_CMD_READ 1
+#define ARLA_VTBL_CMD_CLEAR 2
+#define ARLA_VTBL_STDN (1 << 7)
+
+#define CORE_ARLA_VTBL_ADDR 0x1604
+#define VTBL_ADDR_INDEX_MASK 0xfff
+
+#define CORE_ARLA_VTBL_ENTRY 0x160c
+#define FWD_MAP_MASK 0x1ff
+#define UNTAG_MAP_MASK 0x1ff
+#define UNTAG_MAP_SHIFT 9
+#define MSTP_INDEX_MASK 0x7
+#define MSTP_INDEX_SHIFT 18
+#define FWD_MODE (1 << 21)
+
#define CORE_MEM_PSM_VDD_CTRL 0x2380
#define P_TXQ_PSM_VDD_SHIFT 2
#define P_TXQ_PSM_VDD_MASK 0x3
@@ -287,6 +304,59 @@
#define CORE_PORT_VLAN_CTL_PORT(x) (0xc400 + ((x) * 0x8))
#define PORT_VLAN_CTRL_MASK 0x1ff
+#define CORE_VLAN_CTRL0 0xd000
+#define CHANGE_1P_VID_INNER (1 << 0)
+#define CHANGE_1P_VID_OUTER (1 << 1)
+#define CHANGE_1Q_VID (1 << 3)
+#define VLAN_LEARN_MODE_SVL (0 << 5)
+#define VLAN_LEARN_MODE_IVL (3 << 5)
+#define VLAN_EN (1 << 7)
+
+#define CORE_VLAN_CTRL1 0xd004
+#define EN_RSV_MCAST_FWDMAP (1 << 2)
+#define EN_RSV_MCAST_UNTAG (1 << 3)
+#define EN_IPMC_BYPASS_FWDMAP (1 << 5)
+#define EN_IPMC_BYPASS_UNTAG (1 << 6)
+
+#define CORE_VLAN_CTRL2 0xd008
+#define EN_MIIM_BYPASS_V_FWDMAP (1 << 2)
+#define EN_GMRP_GVRP_V_FWDMAP (1 << 5)
+#define EN_GMRP_GVRP_UNTAG_MAP (1 << 6)
+
+#define CORE_VLAN_CTRL3 0xd00c
+#define EN_DROP_NON1Q_MASK 0x1ff
+
+#define CORE_VLAN_CTRL4 0xd014
+#define RESV_MCAST_FLOOD (1 << 1)
+#define EN_DOUBLE_TAG_MASK 0x3
+#define EN_DOUBLE_TAG_SHIFT 2
+#define EN_MGE_REV_GMRP (1 << 4)
+#define EN_MGE_REV_GVRP (1 << 5)
+#define INGR_VID_CHK_SHIFT 6
+#define INGR_VID_CHK_MASK 0x3
+#define INGR_VID_CHK_FWD (0 << INGR_VID_CHK_SHIFT)
+#define INGR_VID_CHK_DROP (1 << INGR_VID_CHK_SHIFT)
+#define INGR_VID_CHK_NO_CHK (2 << INGR_VID_CHK_SHIFT)
+#define INGR_VID_CHK_VID_VIOL_IMP (3 << INGR_VID_CHK_SHIFT)
+
+#define CORE_VLAN_CTRL5 0xd018
+#define EN_CPU_RX_BYP_INNER_CRCCHCK (1 << 0)
+#define EN_VID_FFF_FWD (1 << 2)
+#define DROP_VTABLE_MISS (1 << 3)
+#define EGRESS_DIR_FRM_BYP_TRUNK_EN (1 << 4)
+#define PRESV_NON1Q (1 << 6)
+
+#define CORE_VLAN_CTRL6 0xd01c
+#define STRICT_SFD_DETECT (1 << 0)
+#define DIS_ARL_BUST_LMIT (1 << 4)
+
+#define CORE_DEFAULT_1Q_TAG_P(x) (0xd040 + ((x) * 8))
+#define CFI_SHIFT 12
+#define PRI_SHIFT 13
+#define PRI_MASK 0x7
+
+#define CORE_JOIN_ALL_VLAN_EN 0xd140
+
#define CORE_EEE_EN_CTRL 0x24800
#define CORE_EEE_LPI_INDICATE 0x24810
diff --git a/drivers/net/dsa/mv88e6xxx/Kconfig b/drivers/net/dsa/mv88e6xxx/Kconfig
new file mode 100644
index 000000000000..490bc06f993e
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/Kconfig
@@ -0,0 +1,7 @@
+config NET_DSA_MV88E6XXX
+ tristate "Marvell 88E6xxx Ethernet switch fabric support"
+ depends on NET_DSA
+ select NET_DSA_TAG_EDSA
+ help
+ This driver adds support for most of the Marvell 88E6xxx models of
+ Ethernet switch chips, except 88E6060.
diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile
new file mode 100644
index 000000000000..6e29a75ee2f7
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_NET_DSA_MV88E6XXX) += chip.o
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx/chip.c
index ee06055444f9..5cb06f7673af 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1,5 +1,6 @@
/*
- * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support
+ * Marvell 88e6xxx Ethernet switch single-chip support
+ *
* Copyright (c) 2008 Marvell Semiconductor
*
* Copyright (c) 2015 CMC Electronics, Inc.
@@ -21,6 +22,7 @@
#include <linux/list.h>
#include <linux/mdio.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/of_mdio.h>
#include <linux/netdevice.h>
#include <linux/gpio/consumer.h>
@@ -29,29 +31,82 @@
#include <net/switchdev.h>
#include "mv88e6xxx.h"
-static void assert_smi_lock(struct mv88e6xxx_priv_state *ps)
+static void assert_reg_lock(struct mv88e6xxx_chip *chip)
{
- if (unlikely(!mutex_is_locked(&ps->smi_mutex))) {
- dev_err(ps->dev, "SMI lock not held!\n");
+ if (unlikely(!mutex_is_locked(&chip->reg_lock))) {
+ dev_err(chip->dev, "Switch registers lock not held!\n");
dump_stack();
}
}
-/* If the switch's ADDR[4:0] strap pins are strapped to zero, it will
- * use all 32 SMI bus addresses on its SMI bus, and all switch registers
- * will be directly accessible on some {device address,register address}
- * pair. If the ADDR[4:0] pins are not strapped to zero, the switch
- * will only respond to SMI transactions to that specific address, and
- * an indirect addressing mechanism needs to be used to access its
- * registers.
+/* The switch ADDR[4:1] configuration pins define the chip SMI device address
+ * (ADDR[0] is always zero, thus only even SMI addresses can be strapped).
+ *
+ * When ADDR is all zero, the chip uses Single-chip Addressing Mode, assuming it
+ * is the only device connected to the SMI master. In this mode it responds to
+ * all 32 possible SMI addresses, and thus maps directly the internal devices.
+ *
+ * When ADDR is non-zero, the chip uses Multi-chip Addressing Mode, allowing
+ * multiple devices to share the SMI interface. In this mode it responds to only
+ * 2 registers, used to indirectly access the internal SMI devices.
*/
-static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr)
+
+static int mv88e6xxx_smi_read(struct mv88e6xxx_chip *chip,
+ int addr, int reg, u16 *val)
+{
+ if (!chip->smi_ops)
+ return -EOPNOTSUPP;
+
+ return chip->smi_ops->read(chip, addr, reg, val);
+}
+
+static int mv88e6xxx_smi_write(struct mv88e6xxx_chip *chip,
+ int addr, int reg, u16 val)
+{
+ if (!chip->smi_ops)
+ return -EOPNOTSUPP;
+
+ return chip->smi_ops->write(chip, addr, reg, val);
+}
+
+static int mv88e6xxx_smi_single_chip_read(struct mv88e6xxx_chip *chip,
+ int addr, int reg, u16 *val)
+{
+ int ret;
+
+ ret = mdiobus_read_nested(chip->bus, addr, reg);
+ if (ret < 0)
+ return ret;
+
+ *val = ret & 0xffff;
+
+ return 0;
+}
+
+static int mv88e6xxx_smi_single_chip_write(struct mv88e6xxx_chip *chip,
+ int addr, int reg, u16 val)
+{
+ int ret;
+
+ ret = mdiobus_write_nested(chip->bus, addr, reg, val);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static const struct mv88e6xxx_ops mv88e6xxx_smi_single_chip_ops = {
+ .read = mv88e6xxx_smi_single_chip_read,
+ .write = mv88e6xxx_smi_single_chip_write,
+};
+
+static int mv88e6xxx_smi_multi_chip_wait(struct mv88e6xxx_chip *chip)
{
int ret;
int i;
for (i = 0; i < 16; i++) {
- ret = mdiobus_read_nested(bus, sw_addr, SMI_CMD);
+ ret = mdiobus_read_nested(chip->bus, chip->sw_addr, SMI_CMD);
if (ret < 0)
return ret;
@@ -62,143 +117,168 @@ static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr)
return -ETIMEDOUT;
}
-static int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr,
- int reg)
+static int mv88e6xxx_smi_multi_chip_read(struct mv88e6xxx_chip *chip,
+ int addr, int reg, u16 *val)
{
int ret;
- if (sw_addr == 0)
- return mdiobus_read_nested(bus, addr, reg);
-
/* Wait for the bus to become free. */
- ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
+ ret = mv88e6xxx_smi_multi_chip_wait(chip);
if (ret < 0)
return ret;
/* Transmit the read command. */
- ret = mdiobus_write_nested(bus, sw_addr, SMI_CMD,
+ ret = mdiobus_write_nested(chip->bus, chip->sw_addr, SMI_CMD,
SMI_CMD_OP_22_READ | (addr << 5) | reg);
if (ret < 0)
return ret;
/* Wait for the read command to complete. */
- ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
+ ret = mv88e6xxx_smi_multi_chip_wait(chip);
if (ret < 0)
return ret;
/* Read the data. */
- ret = mdiobus_read_nested(bus, sw_addr, SMI_DATA);
- if (ret < 0)
- return ret;
-
- return ret & 0xffff;
-}
-
-static int _mv88e6xxx_reg_read(struct mv88e6xxx_priv_state *ps,
- int addr, int reg)
-{
- int ret;
-
- assert_smi_lock(ps);
-
- ret = __mv88e6xxx_reg_read(ps->bus, ps->sw_addr, addr, reg);
+ ret = mdiobus_read_nested(chip->bus, chip->sw_addr, SMI_DATA);
if (ret < 0)
return ret;
- dev_dbg(ps->dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
- addr, reg, ret);
+ *val = ret & 0xffff;
- return ret;
-}
-
-int mv88e6xxx_reg_read(struct mv88e6xxx_priv_state *ps, int addr, int reg)
-{
- int ret;
-
- mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_reg_read(ps, addr, reg);
- mutex_unlock(&ps->smi_mutex);
-
- return ret;
+ return 0;
}
-static int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
- int reg, u16 val)
+static int mv88e6xxx_smi_multi_chip_write(struct mv88e6xxx_chip *chip,
+ int addr, int reg, u16 val)
{
int ret;
- if (sw_addr == 0)
- return mdiobus_write_nested(bus, addr, reg, val);
-
/* Wait for the bus to become free. */
- ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
+ ret = mv88e6xxx_smi_multi_chip_wait(chip);
if (ret < 0)
return ret;
/* Transmit the data to write. */
- ret = mdiobus_write_nested(bus, sw_addr, SMI_DATA, val);
+ ret = mdiobus_write_nested(chip->bus, chip->sw_addr, SMI_DATA, val);
if (ret < 0)
return ret;
/* Transmit the write command. */
- ret = mdiobus_write_nested(bus, sw_addr, SMI_CMD,
+ ret = mdiobus_write_nested(chip->bus, chip->sw_addr, SMI_CMD,
SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
if (ret < 0)
return ret;
/* Wait for the write command to complete. */
- ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
+ ret = mv88e6xxx_smi_multi_chip_wait(chip);
if (ret < 0)
return ret;
return 0;
}
-static int _mv88e6xxx_reg_write(struct mv88e6xxx_priv_state *ps, int addr,
- int reg, u16 val)
+static const struct mv88e6xxx_ops mv88e6xxx_smi_multi_chip_ops = {
+ .read = mv88e6xxx_smi_multi_chip_read,
+ .write = mv88e6xxx_smi_multi_chip_write,
+};
+
+static int mv88e6xxx_read(struct mv88e6xxx_chip *chip,
+ int addr, int reg, u16 *val)
{
- assert_smi_lock(ps);
+ int err;
+
+ assert_reg_lock(chip);
+
+ err = mv88e6xxx_smi_read(chip, addr, reg, val);
+ if (err)
+ return err;
+
+ dev_dbg(chip->dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
+ addr, reg, *val);
+
+ return 0;
+}
+
+static int mv88e6xxx_write(struct mv88e6xxx_chip *chip,
+ int addr, int reg, u16 val)
+{
+ int err;
- dev_dbg(ps->dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
+ assert_reg_lock(chip);
+
+ err = mv88e6xxx_smi_write(chip, addr, reg, val);
+ if (err)
+ return err;
+
+ dev_dbg(chip->dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
addr, reg, val);
- return __mv88e6xxx_reg_write(ps->bus, ps->sw_addr, addr, reg, val);
+ return 0;
}
-int mv88e6xxx_reg_write(struct mv88e6xxx_priv_state *ps, int addr,
- int reg, u16 val)
+static int _mv88e6xxx_reg_read(struct mv88e6xxx_chip *chip, int addr, int reg)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_read(chip, addr, reg, &val);
+ if (err)
+ return err;
+
+ return val;
+}
+
+static int mv88e6xxx_reg_read(struct mv88e6xxx_chip *chip, int addr, int reg)
{
int ret;
- mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_reg_write(ps, addr, reg, val);
- mutex_unlock(&ps->smi_mutex);
+ mutex_lock(&chip->reg_lock);
+ ret = _mv88e6xxx_reg_read(chip, addr, reg);
+ mutex_unlock(&chip->reg_lock);
+
+ return ret;
+}
+
+static int _mv88e6xxx_reg_write(struct mv88e6xxx_chip *chip, int addr,
+ int reg, u16 val)
+{
+ return mv88e6xxx_write(chip, addr, reg, val);
+}
+
+static int mv88e6xxx_reg_write(struct mv88e6xxx_chip *chip, int addr,
+ int reg, u16 val)
+{
+ int ret;
+
+ mutex_lock(&chip->reg_lock);
+ ret = _mv88e6xxx_reg_write(chip, addr, reg, val);
+ mutex_unlock(&chip->reg_lock);
return ret;
}
static int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
int err;
- err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_01,
+ err = mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_MAC_01,
(addr[0] << 8) | addr[1]);
if (err)
return err;
- err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_23,
+ err = mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_MAC_23,
(addr[2] << 8) | addr[3]);
if (err)
return err;
- return mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_45,
+ return mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_MAC_45,
(addr[4] << 8) | addr[5]);
}
static int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
int ret;
int i;
@@ -206,7 +286,7 @@ static int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
int j;
/* Write the MAC address byte. */
- ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SWITCH_MAC,
+ ret = mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_SWITCH_MAC,
GLOBAL2_SWITCH_MAC_BUSY |
(i << 8) | addr[i]);
if (ret)
@@ -214,7 +294,7 @@ static int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
/* Wait for the write to complete. */
for (j = 0; j < 16; j++) {
- ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2,
+ ret = mv88e6xxx_reg_read(chip, REG_GLOBAL2,
GLOBAL2_SWITCH_MAC);
if (ret < 0)
return ret;
@@ -229,49 +309,49 @@ static int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
return 0;
}
-int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr)
+static int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
- if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SWITCH_MAC))
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_SWITCH_MAC))
return mv88e6xxx_set_addr_indirect(ds, addr);
else
return mv88e6xxx_set_addr_direct(ds, addr);
}
-static int mv88e6xxx_mdio_read_direct(struct mv88e6xxx_priv_state *ps,
+static int mv88e6xxx_mdio_read_direct(struct mv88e6xxx_chip *chip,
int addr, int regnum)
{
if (addr >= 0)
- return _mv88e6xxx_reg_read(ps, addr, regnum);
+ return _mv88e6xxx_reg_read(chip, addr, regnum);
return 0xffff;
}
-static int mv88e6xxx_mdio_write_direct(struct mv88e6xxx_priv_state *ps,
+static int mv88e6xxx_mdio_write_direct(struct mv88e6xxx_chip *chip,
int addr, int regnum, u16 val)
{
if (addr >= 0)
- return _mv88e6xxx_reg_write(ps, addr, regnum, val);
+ return _mv88e6xxx_reg_write(chip, addr, regnum, val);
return 0;
}
-static int mv88e6xxx_ppu_disable(struct mv88e6xxx_priv_state *ps)
+static int mv88e6xxx_ppu_disable(struct mv88e6xxx_chip *chip)
{
int ret;
unsigned long timeout;
- ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_CONTROL);
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_CONTROL);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL,
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_CONTROL,
ret & ~GLOBAL_CONTROL_PPU_ENABLE);
if (ret)
return ret;
timeout = jiffies + 1 * HZ;
while (time_before(jiffies, timeout)) {
- ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATUS);
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATUS);
if (ret < 0)
return ret;
@@ -284,23 +364,23 @@ static int mv88e6xxx_ppu_disable(struct mv88e6xxx_priv_state *ps)
return -ETIMEDOUT;
}
-static int mv88e6xxx_ppu_enable(struct mv88e6xxx_priv_state *ps)
+static int mv88e6xxx_ppu_enable(struct mv88e6xxx_chip *chip)
{
int ret, err;
unsigned long timeout;
- ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_CONTROL);
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_CONTROL);
if (ret < 0)
return ret;
- err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL,
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_CONTROL,
ret | GLOBAL_CONTROL_PPU_ENABLE);
if (err)
return err;
timeout = jiffies + 1 * HZ;
while (time_before(jiffies, timeout)) {
- ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATUS);
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATUS);
if (ret < 0)
return ret;
@@ -315,148 +395,148 @@ static int mv88e6xxx_ppu_enable(struct mv88e6xxx_priv_state *ps)
static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
{
- struct mv88e6xxx_priv_state *ps;
+ struct mv88e6xxx_chip *chip;
- ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work);
+ chip = container_of(ugly, struct mv88e6xxx_chip, ppu_work);
- mutex_lock(&ps->smi_mutex);
+ mutex_lock(&chip->reg_lock);
- if (mutex_trylock(&ps->ppu_mutex)) {
- if (mv88e6xxx_ppu_enable(ps) == 0)
- ps->ppu_disabled = 0;
- mutex_unlock(&ps->ppu_mutex);
+ if (mutex_trylock(&chip->ppu_mutex)) {
+ if (mv88e6xxx_ppu_enable(chip) == 0)
+ chip->ppu_disabled = 0;
+ mutex_unlock(&chip->ppu_mutex);
}
- mutex_unlock(&ps->smi_mutex);
+ mutex_unlock(&chip->reg_lock);
}
static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
{
- struct mv88e6xxx_priv_state *ps = (void *)_ps;
+ struct mv88e6xxx_chip *chip = (void *)_ps;
- schedule_work(&ps->ppu_work);
+ schedule_work(&chip->ppu_work);
}
-static int mv88e6xxx_ppu_access_get(struct mv88e6xxx_priv_state *ps)
+static int mv88e6xxx_ppu_access_get(struct mv88e6xxx_chip *chip)
{
int ret;
- mutex_lock(&ps->ppu_mutex);
+ mutex_lock(&chip->ppu_mutex);
/* If the PHY polling unit is enabled, disable it so that
* we can access the PHY registers. If it was already
* disabled, cancel the timer that is going to re-enable
* it.
*/
- if (!ps->ppu_disabled) {
- ret = mv88e6xxx_ppu_disable(ps);
+ if (!chip->ppu_disabled) {
+ ret = mv88e6xxx_ppu_disable(chip);
if (ret < 0) {
- mutex_unlock(&ps->ppu_mutex);
+ mutex_unlock(&chip->ppu_mutex);
return ret;
}
- ps->ppu_disabled = 1;
+ chip->ppu_disabled = 1;
} else {
- del_timer(&ps->ppu_timer);
+ del_timer(&chip->ppu_timer);
ret = 0;
}
return ret;
}
-static void mv88e6xxx_ppu_access_put(struct mv88e6xxx_priv_state *ps)
+static void mv88e6xxx_ppu_access_put(struct mv88e6xxx_chip *chip)
{
/* Schedule a timer to re-enable the PHY polling unit. */
- mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
- mutex_unlock(&ps->ppu_mutex);
+ mod_timer(&chip->ppu_timer, jiffies + msecs_to_jiffies(10));
+ mutex_unlock(&chip->ppu_mutex);
}
-void mv88e6xxx_ppu_state_init(struct mv88e6xxx_priv_state *ps)
+static void mv88e6xxx_ppu_state_init(struct mv88e6xxx_chip *chip)
{
- mutex_init(&ps->ppu_mutex);
- INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
- init_timer(&ps->ppu_timer);
- ps->ppu_timer.data = (unsigned long)ps;
- ps->ppu_timer.function = mv88e6xxx_ppu_reenable_timer;
+ mutex_init(&chip->ppu_mutex);
+ INIT_WORK(&chip->ppu_work, mv88e6xxx_ppu_reenable_work);
+ init_timer(&chip->ppu_timer);
+ chip->ppu_timer.data = (unsigned long)chip;
+ chip->ppu_timer.function = mv88e6xxx_ppu_reenable_timer;
}
-static int mv88e6xxx_mdio_read_ppu(struct mv88e6xxx_priv_state *ps, int addr,
+static int mv88e6xxx_mdio_read_ppu(struct mv88e6xxx_chip *chip, int addr,
int regnum)
{
int ret;
- ret = mv88e6xxx_ppu_access_get(ps);
+ ret = mv88e6xxx_ppu_access_get(chip);
if (ret >= 0) {
- ret = _mv88e6xxx_reg_read(ps, addr, regnum);
- mv88e6xxx_ppu_access_put(ps);
+ ret = _mv88e6xxx_reg_read(chip, addr, regnum);
+ mv88e6xxx_ppu_access_put(chip);
}
return ret;
}
-static int mv88e6xxx_mdio_write_ppu(struct mv88e6xxx_priv_state *ps, int addr,
+static int mv88e6xxx_mdio_write_ppu(struct mv88e6xxx_chip *chip, int addr,
int regnum, u16 val)
{
int ret;
- ret = mv88e6xxx_ppu_access_get(ps);
+ ret = mv88e6xxx_ppu_access_get(chip);
if (ret >= 0) {
- ret = _mv88e6xxx_reg_write(ps, addr, regnum, val);
- mv88e6xxx_ppu_access_put(ps);
+ ret = _mv88e6xxx_reg_write(chip, addr, regnum, val);
+ mv88e6xxx_ppu_access_put(chip);
}
return ret;
}
-static bool mv88e6xxx_6065_family(struct mv88e6xxx_priv_state *ps)
+static bool mv88e6xxx_6065_family(struct mv88e6xxx_chip *chip)
{
- return ps->info->family == MV88E6XXX_FAMILY_6065;
+ return chip->info->family == MV88E6XXX_FAMILY_6065;
}
-static bool mv88e6xxx_6095_family(struct mv88e6xxx_priv_state *ps)
+static bool mv88e6xxx_6095_family(struct mv88e6xxx_chip *chip)
{
- return ps->info->family == MV88E6XXX_FAMILY_6095;
+ return chip->info->family == MV88E6XXX_FAMILY_6095;
}
-static bool mv88e6xxx_6097_family(struct mv88e6xxx_priv_state *ps)
+static bool mv88e6xxx_6097_family(struct mv88e6xxx_chip *chip)
{
- return ps->info->family == MV88E6XXX_FAMILY_6097;
+ return chip->info->family == MV88E6XXX_FAMILY_6097;
}
-static bool mv88e6xxx_6165_family(struct mv88e6xxx_priv_state *ps)
+static bool mv88e6xxx_6165_family(struct mv88e6xxx_chip *chip)
{
- return ps->info->family == MV88E6XXX_FAMILY_6165;
+ return chip->info->family == MV88E6XXX_FAMILY_6165;
}
-static bool mv88e6xxx_6185_family(struct mv88e6xxx_priv_state *ps)
+static bool mv88e6xxx_6185_family(struct mv88e6xxx_chip *chip)
{
- return ps->info->family == MV88E6XXX_FAMILY_6185;
+ return chip->info->family == MV88E6XXX_FAMILY_6185;
}
-static bool mv88e6xxx_6320_family(struct mv88e6xxx_priv_state *ps)
+static bool mv88e6xxx_6320_family(struct mv88e6xxx_chip *chip)
{
- return ps->info->family == MV88E6XXX_FAMILY_6320;
+ return chip->info->family == MV88E6XXX_FAMILY_6320;
}
-static bool mv88e6xxx_6351_family(struct mv88e6xxx_priv_state *ps)
+static bool mv88e6xxx_6351_family(struct mv88e6xxx_chip *chip)
{
- return ps->info->family == MV88E6XXX_FAMILY_6351;
+ return chip->info->family == MV88E6XXX_FAMILY_6351;
}
-static bool mv88e6xxx_6352_family(struct mv88e6xxx_priv_state *ps)
+static bool mv88e6xxx_6352_family(struct mv88e6xxx_chip *chip)
{
- return ps->info->family == MV88E6XXX_FAMILY_6352;
+ return chip->info->family == MV88E6XXX_FAMILY_6352;
}
-static unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_priv_state *ps)
+static unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_chip *chip)
{
- return ps->info->num_databases;
+ return chip->info->num_databases;
}
-static bool mv88e6xxx_has_fid_reg(struct mv88e6xxx_priv_state *ps)
+static bool mv88e6xxx_has_fid_reg(struct mv88e6xxx_chip *chip)
{
/* Does the device have dedicated FID registers for ATU and VTU ops? */
- if (mv88e6xxx_6097_family(ps) || mv88e6xxx_6165_family(ps) ||
- mv88e6xxx_6351_family(ps) || mv88e6xxx_6352_family(ps))
+ if (mv88e6xxx_6097_family(chip) || mv88e6xxx_6165_family(chip) ||
+ mv88e6xxx_6351_family(chip) || mv88e6xxx_6352_family(chip))
return true;
return false;
@@ -469,16 +549,16 @@ static bool mv88e6xxx_has_fid_reg(struct mv88e6xxx_priv_state *ps)
static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
struct phy_device *phydev)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
u32 reg;
int ret;
if (!phy_is_pseudo_fixed_link(phydev))
return;
- mutex_lock(&ps->smi_mutex);
+ mutex_lock(&chip->reg_lock);
- ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_PCS_CTRL);
+ ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_PCS_CTRL);
if (ret < 0)
goto out;
@@ -490,9 +570,9 @@ static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
reg |= PORT_PCS_CTRL_FORCE_LINK;
if (phydev->link)
- reg |= PORT_PCS_CTRL_LINK_UP;
+ reg |= PORT_PCS_CTRL_LINK_UP;
- if (mv88e6xxx_6065_family(ps) && phydev->speed > SPEED_100)
+ if (mv88e6xxx_6065_family(chip) && phydev->speed > SPEED_100)
goto out;
switch (phydev->speed) {
@@ -514,8 +594,8 @@ static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
if (phydev->duplex == DUPLEX_FULL)
reg |= PORT_PCS_CTRL_DUPLEX_FULL;
- if ((mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps)) &&
- (port >= ps->info->num_ports - 2)) {
+ if ((mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip)) &&
+ (port >= chip->info->num_ports - 2)) {
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK;
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
@@ -524,19 +604,19 @@ static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
reg |= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK |
PORT_PCS_CTRL_RGMII_DELAY_TXCLK);
}
- _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_PCS_CTRL, reg);
+ _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_PCS_CTRL, reg);
out:
- mutex_unlock(&ps->smi_mutex);
+ mutex_unlock(&chip->reg_lock);
}
-static int _mv88e6xxx_stats_wait(struct mv88e6xxx_priv_state *ps)
+static int _mv88e6xxx_stats_wait(struct mv88e6xxx_chip *chip)
{
int ret;
int i;
for (i = 0; i < 10; i++) {
- ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_OP);
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATS_OP);
if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
return 0;
}
@@ -544,30 +624,29 @@ static int _mv88e6xxx_stats_wait(struct mv88e6xxx_priv_state *ps)
return -ETIMEDOUT;
}
-static int _mv88e6xxx_stats_snapshot(struct mv88e6xxx_priv_state *ps,
- int port)
+static int _mv88e6xxx_stats_snapshot(struct mv88e6xxx_chip *chip, int port)
{
int ret;
- if (mv88e6xxx_6320_family(ps) || mv88e6xxx_6352_family(ps))
+ if (mv88e6xxx_6320_family(chip) || mv88e6xxx_6352_family(chip))
port = (port + 1) << 5;
/* Snapshot the hardware statistics counters for this port. */
- ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP,
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_STATS_OP,
GLOBAL_STATS_OP_CAPTURE_PORT |
GLOBAL_STATS_OP_HIST_RX_TX | port);
if (ret < 0)
return ret;
/* Wait for the snapshotting to complete. */
- ret = _mv88e6xxx_stats_wait(ps);
+ ret = _mv88e6xxx_stats_wait(chip);
if (ret < 0)
return ret;
return 0;
}
-static void _mv88e6xxx_stats_read(struct mv88e6xxx_priv_state *ps,
+static void _mv88e6xxx_stats_read(struct mv88e6xxx_chip *chip,
int stat, u32 *val)
{
u32 _val;
@@ -575,23 +654,23 @@ static void _mv88e6xxx_stats_read(struct mv88e6xxx_priv_state *ps,
*val = 0;
- ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP,
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_STATS_OP,
GLOBAL_STATS_OP_READ_CAPTURED |
GLOBAL_STATS_OP_HIST_RX_TX | stat);
if (ret < 0)
return;
- ret = _mv88e6xxx_stats_wait(ps);
+ ret = _mv88e6xxx_stats_wait(chip);
if (ret < 0)
return;
- ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
if (ret < 0)
return;
_val = ret << 16;
- ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
if (ret < 0)
return;
@@ -660,26 +739,26 @@ static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
{ "out_management", 4, 0x1f | GLOBAL_STATS_OP_BANK_1, BANK1, },
};
-static bool mv88e6xxx_has_stat(struct mv88e6xxx_priv_state *ps,
+static bool mv88e6xxx_has_stat(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_hw_stat *stat)
{
switch (stat->type) {
case BANK0:
return true;
case BANK1:
- return mv88e6xxx_6320_family(ps);
+ return mv88e6xxx_6320_family(chip);
case PORT:
- return mv88e6xxx_6095_family(ps) ||
- mv88e6xxx_6185_family(ps) ||
- mv88e6xxx_6097_family(ps) ||
- mv88e6xxx_6165_family(ps) ||
- mv88e6xxx_6351_family(ps) ||
- mv88e6xxx_6352_family(ps);
+ return mv88e6xxx_6095_family(chip) ||
+ mv88e6xxx_6185_family(chip) ||
+ mv88e6xxx_6097_family(chip) ||
+ mv88e6xxx_6165_family(chip) ||
+ mv88e6xxx_6351_family(chip) ||
+ mv88e6xxx_6352_family(chip);
}
return false;
}
-static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_priv_state *ps,
+static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_hw_stat *s,
int port)
{
@@ -690,13 +769,13 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_priv_state *ps,
switch (s->type) {
case PORT:
- ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), s->reg);
+ ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), s->reg);
if (ret < 0)
return UINT64_MAX;
low = ret;
if (s->sizeof_stat == 4) {
- ret = _mv88e6xxx_reg_read(ps, REG_PORT(port),
+ ret = _mv88e6xxx_reg_read(chip, REG_PORT(port),
s->reg + 1);
if (ret < 0)
return UINT64_MAX;
@@ -705,9 +784,9 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_priv_state *ps,
break;
case BANK0:
case BANK1:
- _mv88e6xxx_stats_read(ps, s->reg, &low);
+ _mv88e6xxx_stats_read(chip, s->reg, &low);
if (s->sizeof_stat == 8)
- _mv88e6xxx_stats_read(ps, s->reg + 1, &high);
+ _mv88e6xxx_stats_read(chip, s->reg + 1, &high);
}
value = (((u64)high) << 16) | low;
return value;
@@ -716,13 +795,13 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_priv_state *ps,
static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port,
uint8_t *data)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
struct mv88e6xxx_hw_stat *stat;
int i, j;
for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
stat = &mv88e6xxx_hw_stats[i];
- if (mv88e6xxx_has_stat(ps, stat)) {
+ if (mv88e6xxx_has_stat(chip, stat)) {
memcpy(data + j * ETH_GSTRING_LEN, stat->string,
ETH_GSTRING_LEN);
j++;
@@ -732,13 +811,13 @@ static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port,
static int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
struct mv88e6xxx_hw_stat *stat;
int i, j;
for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
stat = &mv88e6xxx_hw_stats[i];
- if (mv88e6xxx_has_stat(ps, stat))
+ if (mv88e6xxx_has_stat(chip, stat))
j++;
}
return j;
@@ -747,27 +826,27 @@ static int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
static void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port,
uint64_t *data)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
struct mv88e6xxx_hw_stat *stat;
int ret;
int i, j;
- mutex_lock(&ps->smi_mutex);
+ mutex_lock(&chip->reg_lock);
- ret = _mv88e6xxx_stats_snapshot(ps, port);
+ ret = _mv88e6xxx_stats_snapshot(chip, port);
if (ret < 0) {
- mutex_unlock(&ps->smi_mutex);
+ mutex_unlock(&chip->reg_lock);
return;
}
for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
stat = &mv88e6xxx_hw_stats[i];
- if (mv88e6xxx_has_stat(ps, stat)) {
- data[j] = _mv88e6xxx_get_ethtool_stat(ps, stat, port);
+ if (mv88e6xxx_has_stat(chip, stat)) {
+ data[j] = _mv88e6xxx_get_ethtool_stat(chip, stat, port);
j++;
}
}
- mutex_unlock(&ps->smi_mutex);
+ mutex_unlock(&chip->reg_lock);
}
static int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
@@ -778,7 +857,7 @@ static int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
struct ethtool_regs *regs, void *_p)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
u16 *p = _p;
int i;
@@ -786,20 +865,20 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
memset(p, 0xff, 32 * sizeof(u16));
- mutex_lock(&ps->smi_mutex);
+ mutex_lock(&chip->reg_lock);
for (i = 0; i < 32; i++) {
int ret;
- ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), i);
+ ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), i);
if (ret >= 0)
p[i] = ret;
}
- mutex_unlock(&ps->smi_mutex);
+ mutex_unlock(&chip->reg_lock);
}
-static int _mv88e6xxx_wait(struct mv88e6xxx_priv_state *ps, int reg, int offset,
+static int _mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int reg, int offset,
u16 mask)
{
unsigned long timeout = jiffies + HZ / 10;
@@ -807,7 +886,7 @@ static int _mv88e6xxx_wait(struct mv88e6xxx_priv_state *ps, int reg, int offset,
while (time_before(jiffies, timeout)) {
int ret;
- ret = _mv88e6xxx_reg_read(ps, reg, offset);
+ ret = _mv88e6xxx_reg_read(chip, reg, offset);
if (ret < 0)
return ret;
if (!(ret & mask))
@@ -818,48 +897,48 @@ static int _mv88e6xxx_wait(struct mv88e6xxx_priv_state *ps, int reg, int offset,
return -ETIMEDOUT;
}
-static int mv88e6xxx_wait(struct mv88e6xxx_priv_state *ps, int reg,
+static int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int reg,
int offset, u16 mask)
{
int ret;
- mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_wait(ps, reg, offset, mask);
- mutex_unlock(&ps->smi_mutex);
+ mutex_lock(&chip->reg_lock);
+ ret = _mv88e6xxx_wait(chip, reg, offset, mask);
+ mutex_unlock(&chip->reg_lock);
return ret;
}
-static int mv88e6xxx_mdio_wait(struct mv88e6xxx_priv_state *ps)
+static int mv88e6xxx_mdio_wait(struct mv88e6xxx_chip *chip)
{
- return _mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_SMI_OP,
+ return _mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_SMI_OP,
GLOBAL2_SMI_OP_BUSY);
}
static int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
- return mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+ return mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
GLOBAL2_EEPROM_OP_LOAD);
}
static int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
- return mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+ return mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
GLOBAL2_EEPROM_OP_BUSY);
}
static int mv88e6xxx_read_eeprom_word(struct dsa_switch *ds, int addr)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
int ret;
- mutex_lock(&ps->eeprom_mutex);
+ mutex_lock(&chip->eeprom_mutex);
- ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+ ret = mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
GLOBAL2_EEPROM_OP_READ |
(addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
if (ret < 0)
@@ -869,18 +948,18 @@ static int mv88e6xxx_read_eeprom_word(struct dsa_switch *ds, int addr)
if (ret < 0)
goto error;
- ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_EEPROM_DATA);
+ ret = mv88e6xxx_reg_read(chip, REG_GLOBAL2, GLOBAL2_EEPROM_DATA);
error:
- mutex_unlock(&ps->eeprom_mutex);
+ mutex_unlock(&chip->eeprom_mutex);
return ret;
}
static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
- if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
- return ps->eeprom_len;
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEPROM))
+ return chip->eeprom_len;
return 0;
}
@@ -888,12 +967,12 @@ static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds)
static int mv88e6xxx_get_eeprom(struct dsa_switch *ds,
struct ethtool_eeprom *eeprom, u8 *data)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
int offset;
int len;
int ret;
- if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
+ if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEPROM))
return -EOPNOTSUPP;
offset = eeprom->offset;
@@ -954,10 +1033,10 @@ static int mv88e6xxx_get_eeprom(struct dsa_switch *ds,
static int mv88e6xxx_eeprom_is_readonly(struct dsa_switch *ds)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
int ret;
- ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP);
+ ret = mv88e6xxx_reg_read(chip, REG_GLOBAL2, GLOBAL2_EEPROM_OP);
if (ret < 0)
return ret;
@@ -970,16 +1049,16 @@ static int mv88e6xxx_eeprom_is_readonly(struct dsa_switch *ds)
static int mv88e6xxx_write_eeprom_word(struct dsa_switch *ds, int addr,
u16 data)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
int ret;
- mutex_lock(&ps->eeprom_mutex);
+ mutex_lock(&chip->eeprom_mutex);
- ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data);
+ ret = mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data);
if (ret < 0)
goto error;
- ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+ ret = mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
GLOBAL2_EEPROM_OP_WRITE |
(addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
if (ret < 0)
@@ -987,19 +1066,19 @@ static int mv88e6xxx_write_eeprom_word(struct dsa_switch *ds, int addr,
ret = mv88e6xxx_eeprom_busy_wait(ds);
error:
- mutex_unlock(&ps->eeprom_mutex);
+ mutex_unlock(&chip->eeprom_mutex);
return ret;
}
static int mv88e6xxx_set_eeprom(struct dsa_switch *ds,
struct ethtool_eeprom *eeprom, u8 *data)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
int offset;
int ret;
int len;
- if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
+ if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEPROM))
return -EOPNOTSUPP;
if (eeprom->magic != 0xc3ec4951)
@@ -1071,67 +1150,67 @@ static int mv88e6xxx_set_eeprom(struct dsa_switch *ds,
return 0;
}
-static int _mv88e6xxx_atu_wait(struct mv88e6xxx_priv_state *ps)
+static int _mv88e6xxx_atu_wait(struct mv88e6xxx_chip *chip)
{
- return _mv88e6xxx_wait(ps, REG_GLOBAL, GLOBAL_ATU_OP,
+ return _mv88e6xxx_wait(chip, REG_GLOBAL, GLOBAL_ATU_OP,
GLOBAL_ATU_OP_BUSY);
}
-static int mv88e6xxx_mdio_read_indirect(struct mv88e6xxx_priv_state *ps,
+static int mv88e6xxx_mdio_read_indirect(struct mv88e6xxx_chip *chip,
int addr, int regnum)
{
int ret;
- ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_OP,
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_SMI_OP,
GLOBAL2_SMI_OP_22_READ | (addr << 5) |
regnum);
if (ret < 0)
return ret;
- ret = mv88e6xxx_mdio_wait(ps);
+ ret = mv88e6xxx_mdio_wait(chip);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_SMI_DATA);
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL2, GLOBAL2_SMI_DATA);
return ret;
}
-static int mv88e6xxx_mdio_write_indirect(struct mv88e6xxx_priv_state *ps,
+static int mv88e6xxx_mdio_write_indirect(struct mv88e6xxx_chip *chip,
int addr, int regnum, u16 val)
{
int ret;
- ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_OP,
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_SMI_OP,
GLOBAL2_SMI_OP_22_WRITE | (addr << 5) |
regnum);
- return mv88e6xxx_mdio_wait(ps);
+ return mv88e6xxx_mdio_wait(chip);
}
static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port,
struct ethtool_eee *e)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
int reg;
- if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEE))
+ if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEE))
return -EOPNOTSUPP;
- mutex_lock(&ps->smi_mutex);
+ mutex_lock(&chip->reg_lock);
- reg = mv88e6xxx_mdio_read_indirect(ps, port, 16);
+ reg = mv88e6xxx_mdio_read_indirect(chip, port, 16);
if (reg < 0)
goto out;
e->eee_enabled = !!(reg & 0x0200);
e->tx_lpi_enabled = !!(reg & 0x0100);
- reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_STATUS);
+ reg = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_STATUS);
if (reg < 0)
goto out;
@@ -1139,23 +1218,23 @@ static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port,
reg = 0;
out:
- mutex_unlock(&ps->smi_mutex);
+ mutex_unlock(&chip->reg_lock);
return reg;
}
static int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
struct phy_device *phydev, struct ethtool_eee *e)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
int reg;
int ret;
- if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEE))
+ if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEE))
return -EOPNOTSUPP;
- mutex_lock(&ps->smi_mutex);
+ mutex_lock(&chip->reg_lock);
- ret = mv88e6xxx_mdio_read_indirect(ps, port, 16);
+ ret = mv88e6xxx_mdio_read_indirect(chip, port, 16);
if (ret < 0)
goto out;
@@ -1165,28 +1244,29 @@ static int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
if (e->tx_lpi_enabled)
reg |= 0x0100;
- ret = mv88e6xxx_mdio_write_indirect(ps, port, 16, reg);
+ ret = mv88e6xxx_mdio_write_indirect(chip, port, 16, reg);
out:
- mutex_unlock(&ps->smi_mutex);
+ mutex_unlock(&chip->reg_lock);
return ret;
}
-static int _mv88e6xxx_atu_cmd(struct mv88e6xxx_priv_state *ps, u16 fid, u16 cmd)
+static int _mv88e6xxx_atu_cmd(struct mv88e6xxx_chip *chip, u16 fid, u16 cmd)
{
int ret;
- if (mv88e6xxx_has_fid_reg(ps)) {
- ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_FID, fid);
+ if (mv88e6xxx_has_fid_reg(chip)) {
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_FID,
+ fid);
if (ret < 0)
return ret;
- } else if (mv88e6xxx_num_databases(ps) == 256) {
+ } else if (mv88e6xxx_num_databases(chip) == 256) {
/* ATU DBNum[7:4] are located in ATU Control 15:12 */
- ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL);
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL,
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL,
(ret & 0xfff) |
((fid << 8) & 0xf000));
if (ret < 0)
@@ -1196,14 +1276,14 @@ static int _mv88e6xxx_atu_cmd(struct mv88e6xxx_priv_state *ps, u16 fid, u16 cmd)
cmd |= fid & 0xf;
}
- ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
if (ret < 0)
return ret;
- return _mv88e6xxx_atu_wait(ps);
+ return _mv88e6xxx_atu_wait(chip);
}
-static int _mv88e6xxx_atu_data_write(struct mv88e6xxx_priv_state *ps,
+static int _mv88e6xxx_atu_data_write(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_atu_entry *entry)
{
u16 data = entry->state & GLOBAL_ATU_DATA_STATE_MASK;
@@ -1223,21 +1303,21 @@ static int _mv88e6xxx_atu_data_write(struct mv88e6xxx_priv_state *ps,
data |= (entry->portv_trunkid << shift) & mask;
}
- return _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_DATA, data);
+ return _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_DATA, data);
}
-static int _mv88e6xxx_atu_flush_move(struct mv88e6xxx_priv_state *ps,
+static int _mv88e6xxx_atu_flush_move(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_atu_entry *entry,
bool static_too)
{
int op;
int err;
- err = _mv88e6xxx_atu_wait(ps);
+ err = _mv88e6xxx_atu_wait(chip);
if (err)
return err;
- err = _mv88e6xxx_atu_data_write(ps, entry);
+ err = _mv88e6xxx_atu_data_write(chip, entry);
if (err)
return err;
@@ -1249,10 +1329,10 @@ static int _mv88e6xxx_atu_flush_move(struct mv88e6xxx_priv_state *ps,
GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC;
}
- return _mv88e6xxx_atu_cmd(ps, entry->fid, op);
+ return _mv88e6xxx_atu_cmd(chip, entry->fid, op);
}
-static int _mv88e6xxx_atu_flush(struct mv88e6xxx_priv_state *ps,
+static int _mv88e6xxx_atu_flush(struct mv88e6xxx_chip *chip,
u16 fid, bool static_too)
{
struct mv88e6xxx_atu_entry entry = {
@@ -1260,10 +1340,10 @@ static int _mv88e6xxx_atu_flush(struct mv88e6xxx_priv_state *ps,
.state = 0, /* EntryState bits must be 0 */
};
- return _mv88e6xxx_atu_flush_move(ps, &entry, static_too);
+ return _mv88e6xxx_atu_flush_move(chip, &entry, static_too);
}
-static int _mv88e6xxx_atu_move(struct mv88e6xxx_priv_state *ps, u16 fid,
+static int _mv88e6xxx_atu_move(struct mv88e6xxx_chip *chip, u16 fid,
int from_port, int to_port, bool static_too)
{
struct mv88e6xxx_atu_entry entry = {
@@ -1278,14 +1358,14 @@ static int _mv88e6xxx_atu_move(struct mv88e6xxx_priv_state *ps, u16 fid,
entry.portv_trunkid = (to_port & 0x0f) << 4;
entry.portv_trunkid |= from_port & 0x0f;
- return _mv88e6xxx_atu_flush_move(ps, &entry, static_too);
+ return _mv88e6xxx_atu_flush_move(chip, &entry, static_too);
}
-static int _mv88e6xxx_atu_remove(struct mv88e6xxx_priv_state *ps, u16 fid,
+static int _mv88e6xxx_atu_remove(struct mv88e6xxx_chip *chip, u16 fid,
int port, bool static_too)
{
/* Destination port 0xF means remove the entries */
- return _mv88e6xxx_atu_move(ps, fid, port, 0x0f, static_too);
+ return _mv88e6xxx_atu_move(chip, fid, port, 0x0f, static_too);
}
static const char * const mv88e6xxx_port_state_names[] = {
@@ -1295,14 +1375,14 @@ static const char * const mv88e6xxx_port_state_names[] = {
[PORT_CONTROL_STATE_FORWARDING] = "Forwarding",
};
-static int _mv88e6xxx_port_state(struct mv88e6xxx_priv_state *ps, int port,
+static int _mv88e6xxx_port_state(struct mv88e6xxx_chip *chip, int port,
u8 state)
{
- struct dsa_switch *ds = ps->ds;
+ struct dsa_switch *ds = chip->ds;
int reg, ret = 0;
u8 oldstate;
- reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL);
+ reg = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_CONTROL);
if (reg < 0)
return reg;
@@ -1314,16 +1394,16 @@ static int _mv88e6xxx_port_state(struct mv88e6xxx_priv_state *ps, int port,
* Blocking or Listening state.
*/
if ((oldstate == PORT_CONTROL_STATE_LEARNING ||
- oldstate == PORT_CONTROL_STATE_FORWARDING)
- && (state == PORT_CONTROL_STATE_DISABLED ||
- state == PORT_CONTROL_STATE_BLOCKING)) {
- ret = _mv88e6xxx_atu_remove(ps, 0, port, false);
+ oldstate == PORT_CONTROL_STATE_FORWARDING) &&
+ (state == PORT_CONTROL_STATE_DISABLED ||
+ state == PORT_CONTROL_STATE_BLOCKING)) {
+ ret = _mv88e6xxx_atu_remove(chip, 0, port, false);
if (ret)
return ret;
}
reg = (reg & ~PORT_CONTROL_STATE_MASK) | state;
- ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL,
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL,
reg);
if (ret)
return ret;
@@ -1336,12 +1416,11 @@ static int _mv88e6xxx_port_state(struct mv88e6xxx_priv_state *ps, int port,
return ret;
}
-static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_priv_state *ps,
- int port)
+static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_chip *chip, int port)
{
- struct net_device *bridge = ps->ports[port].bridge_dev;
- const u16 mask = (1 << ps->info->num_ports) - 1;
- struct dsa_switch *ds = ps->ds;
+ struct net_device *bridge = chip->ports[port].bridge_dev;
+ const u16 mask = (1 << chip->info->num_ports) - 1;
+ struct dsa_switch *ds = chip->ds;
u16 output_ports = 0;
int reg;
int i;
@@ -1350,9 +1429,9 @@ static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_priv_state *ps,
if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
output_ports = mask;
} else {
- for (i = 0; i < ps->info->num_ports; ++i) {
+ for (i = 0; i < chip->info->num_ports; ++i) {
/* allow sending frames to every group member */
- if (bridge && ps->ports[i].bridge_dev == bridge)
+ if (bridge && chip->ports[i].bridge_dev == bridge)
output_ports |= BIT(i);
/* allow sending frames to CPU port and DSA link(s) */
@@ -1364,24 +1443,24 @@ static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_priv_state *ps,
/* prevent frames from going back out of the port they came in on */
output_ports &= ~BIT(port);
- reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_BASE_VLAN);
+ reg = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_BASE_VLAN);
if (reg < 0)
return reg;
reg &= ~mask;
reg |= output_ports & mask;
- return _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_BASE_VLAN, reg);
+ return _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_BASE_VLAN, reg);
}
static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port,
u8 state)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
int stp_state;
int err;
- if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_PORTSTATE))
+ if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_PORTSTATE))
return;
switch (state) {
@@ -1401,9 +1480,9 @@ static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port,
break;
}
- mutex_lock(&ps->smi_mutex);
- err = _mv88e6xxx_port_state(ps, port, stp_state);
- mutex_unlock(&ps->smi_mutex);
+ mutex_lock(&chip->reg_lock);
+ err = _mv88e6xxx_port_state(chip, port, stp_state);
+ mutex_unlock(&chip->reg_lock);
if (err)
netdev_err(ds->ports[port].netdev,
@@ -1411,14 +1490,14 @@ static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port,
mv88e6xxx_port_state_names[stp_state]);
}
-static int _mv88e6xxx_port_pvid(struct mv88e6xxx_priv_state *ps, int port,
+static int _mv88e6xxx_port_pvid(struct mv88e6xxx_chip *chip, int port,
u16 *new, u16 *old)
{
- struct dsa_switch *ds = ps->ds;
+ struct dsa_switch *ds = chip->ds;
u16 pvid;
int ret;
- ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_DEFAULT_VLAN);
+ ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_DEFAULT_VLAN);
if (ret < 0)
return ret;
@@ -1428,7 +1507,7 @@ static int _mv88e6xxx_port_pvid(struct mv88e6xxx_priv_state *ps, int port,
ret &= ~PORT_DEFAULT_VLAN_MASK;
ret |= *new & PORT_DEFAULT_VLAN_MASK;
- ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
PORT_DEFAULT_VLAN, ret);
if (ret < 0)
return ret;
@@ -1443,47 +1522,47 @@ static int _mv88e6xxx_port_pvid(struct mv88e6xxx_priv_state *ps, int port,
return 0;
}
-static int _mv88e6xxx_port_pvid_get(struct mv88e6xxx_priv_state *ps,
+static int _mv88e6xxx_port_pvid_get(struct mv88e6xxx_chip *chip,
int port, u16 *pvid)
{
- return _mv88e6xxx_port_pvid(ps, port, NULL, pvid);
+ return _mv88e6xxx_port_pvid(chip, port, NULL, pvid);
}
-static int _mv88e6xxx_port_pvid_set(struct mv88e6xxx_priv_state *ps,
+static int _mv88e6xxx_port_pvid_set(struct mv88e6xxx_chip *chip,
int port, u16 pvid)
{
- return _mv88e6xxx_port_pvid(ps, port, &pvid, NULL);
+ return _mv88e6xxx_port_pvid(chip, port, &pvid, NULL);
}
-static int _mv88e6xxx_vtu_wait(struct mv88e6xxx_priv_state *ps)
+static int _mv88e6xxx_vtu_wait(struct mv88e6xxx_chip *chip)
{
- return _mv88e6xxx_wait(ps, REG_GLOBAL, GLOBAL_VTU_OP,
+ return _mv88e6xxx_wait(chip, REG_GLOBAL, GLOBAL_VTU_OP,
GLOBAL_VTU_OP_BUSY);
}
-static int _mv88e6xxx_vtu_cmd(struct mv88e6xxx_priv_state *ps, u16 op)
+static int _mv88e6xxx_vtu_cmd(struct mv88e6xxx_chip *chip, u16 op)
{
int ret;
- ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_OP, op);
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_OP, op);
if (ret < 0)
return ret;
- return _mv88e6xxx_vtu_wait(ps);
+ return _mv88e6xxx_vtu_wait(chip);
}
-static int _mv88e6xxx_vtu_stu_flush(struct mv88e6xxx_priv_state *ps)
+static int _mv88e6xxx_vtu_stu_flush(struct mv88e6xxx_chip *chip)
{
int ret;
- ret = _mv88e6xxx_vtu_wait(ps);
+ ret = _mv88e6xxx_vtu_wait(chip);
if (ret < 0)
return ret;
- return _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_FLUSH_ALL);
+ return _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_FLUSH_ALL);
}
-static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_priv_state *ps,
+static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_stu_entry *entry,
unsigned int nibble_offset)
{
@@ -1492,7 +1571,7 @@ static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_priv_state *ps,
int ret;
for (i = 0; i < 3; ++i) {
- ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL,
GLOBAL_VTU_DATA_0_3 + i);
if (ret < 0)
return ret;
@@ -1500,7 +1579,7 @@ static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_priv_state *ps,
regs[i] = ret;
}
- for (i = 0; i < ps->info->num_ports; ++i) {
+ for (i = 0; i < chip->info->num_ports; ++i) {
unsigned int shift = (i % 4) * 4 + nibble_offset;
u16 reg = regs[i / 4];
@@ -1510,19 +1589,19 @@ static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_priv_state *ps,
return 0;
}
-static int mv88e6xxx_vtu_data_read(struct mv88e6xxx_priv_state *ps,
+static int mv88e6xxx_vtu_data_read(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_stu_entry *entry)
{
- return _mv88e6xxx_vtu_stu_data_read(ps, entry, 0);
+ return _mv88e6xxx_vtu_stu_data_read(chip, entry, 0);
}
-static int mv88e6xxx_stu_data_read(struct mv88e6xxx_priv_state *ps,
+static int mv88e6xxx_stu_data_read(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_stu_entry *entry)
{
- return _mv88e6xxx_vtu_stu_data_read(ps, entry, 2);
+ return _mv88e6xxx_vtu_stu_data_read(chip, entry, 2);
}
-static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_priv_state *ps,
+static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_stu_entry *entry,
unsigned int nibble_offset)
{
@@ -1530,7 +1609,7 @@ static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_priv_state *ps,
int i;
int ret;
- for (i = 0; i < ps->info->num_ports; ++i) {
+ for (i = 0; i < chip->info->num_ports; ++i) {
unsigned int shift = (i % 4) * 4 + nibble_offset;
u8 data = entry->data[i];
@@ -1538,7 +1617,7 @@ static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_priv_state *ps,
}
for (i = 0; i < 3; ++i) {
- ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL,
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL,
GLOBAL_VTU_DATA_0_3 + i, regs[i]);
if (ret < 0)
return ret;
@@ -1547,39 +1626,39 @@ static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_priv_state *ps,
return 0;
}
-static int mv88e6xxx_vtu_data_write(struct mv88e6xxx_priv_state *ps,
+static int mv88e6xxx_vtu_data_write(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_stu_entry *entry)
{
- return _mv88e6xxx_vtu_stu_data_write(ps, entry, 0);
+ return _mv88e6xxx_vtu_stu_data_write(chip, entry, 0);
}
-static int mv88e6xxx_stu_data_write(struct mv88e6xxx_priv_state *ps,
+static int mv88e6xxx_stu_data_write(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_stu_entry *entry)
{
- return _mv88e6xxx_vtu_stu_data_write(ps, entry, 2);
+ return _mv88e6xxx_vtu_stu_data_write(chip, entry, 2);
}
-static int _mv88e6xxx_vtu_vid_write(struct mv88e6xxx_priv_state *ps, u16 vid)
+static int _mv88e6xxx_vtu_vid_write(struct mv88e6xxx_chip *chip, u16 vid)
{
- return _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID,
+ return _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_VID,
vid & GLOBAL_VTU_VID_MASK);
}
-static int _mv88e6xxx_vtu_getnext(struct mv88e6xxx_priv_state *ps,
+static int _mv88e6xxx_vtu_getnext(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_stu_entry *entry)
{
struct mv88e6xxx_vtu_stu_entry next = { 0 };
int ret;
- ret = _mv88e6xxx_vtu_wait(ps);
+ ret = _mv88e6xxx_vtu_wait(chip);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_VTU_GET_NEXT);
+ ret = _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_VTU_GET_NEXT);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_VID);
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_VTU_VID);
if (ret < 0)
return ret;
@@ -1587,22 +1666,22 @@ static int _mv88e6xxx_vtu_getnext(struct mv88e6xxx_priv_state *ps,
next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
if (next.valid) {
- ret = mv88e6xxx_vtu_data_read(ps, &next);
+ ret = mv88e6xxx_vtu_data_read(chip, &next);
if (ret < 0)
return ret;
- if (mv88e6xxx_has_fid_reg(ps)) {
- ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
+ if (mv88e6xxx_has_fid_reg(chip)) {
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL,
GLOBAL_VTU_FID);
if (ret < 0)
return ret;
next.fid = ret & GLOBAL_VTU_FID_MASK;
- } else if (mv88e6xxx_num_databases(ps) == 256) {
+ } else if (mv88e6xxx_num_databases(chip) == 256) {
/* VTU DBNum[7:4] are located in VTU Operation 11:8, and
* VTU DBNum[3:0] are located in VTU Operation 3:0
*/
- ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL,
GLOBAL_VTU_OP);
if (ret < 0)
return ret;
@@ -1611,8 +1690,8 @@ static int _mv88e6xxx_vtu_getnext(struct mv88e6xxx_priv_state *ps,
next.fid |= ret & 0xf;
}
- if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_STU)) {
- ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_STU)) {
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL,
GLOBAL_VTU_SID);
if (ret < 0)
return ret;
@@ -1629,26 +1708,26 @@ static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
struct switchdev_obj_port_vlan *vlan,
int (*cb)(struct switchdev_obj *obj))
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
struct mv88e6xxx_vtu_stu_entry next;
u16 pvid;
int err;
- if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU))
+ if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU))
return -EOPNOTSUPP;
- mutex_lock(&ps->smi_mutex);
+ mutex_lock(&chip->reg_lock);
- err = _mv88e6xxx_port_pvid_get(ps, port, &pvid);
+ err = _mv88e6xxx_port_pvid_get(chip, port, &pvid);
if (err)
goto unlock;
- err = _mv88e6xxx_vtu_vid_write(ps, GLOBAL_VTU_VID_MASK);
+ err = _mv88e6xxx_vtu_vid_write(chip, GLOBAL_VTU_VID_MASK);
if (err)
goto unlock;
do {
- err = _mv88e6xxx_vtu_getnext(ps, &next);
+ err = _mv88e6xxx_vtu_getnext(chip, &next);
if (err)
break;
@@ -1659,7 +1738,8 @@ static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
continue;
/* reinit and dump this VLAN obj */
- vlan->vid_begin = vlan->vid_end = next.vid;
+ vlan->vid_begin = next.vid;
+ vlan->vid_end = next.vid;
vlan->flags = 0;
if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
@@ -1674,19 +1754,19 @@ static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
} while (next.vid < GLOBAL_VTU_VID_MASK);
unlock:
- mutex_unlock(&ps->smi_mutex);
+ mutex_unlock(&chip->reg_lock);
return err;
}
-static int _mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_priv_state *ps,
+static int _mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_stu_entry *entry)
{
u16 op = GLOBAL_VTU_OP_VTU_LOAD_PURGE;
u16 reg = 0;
int ret;
- ret = _mv88e6xxx_vtu_wait(ps);
+ ret = _mv88e6xxx_vtu_wait(chip);
if (ret < 0)
return ret;
@@ -1694,23 +1774,25 @@ static int _mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_priv_state *ps,
goto loadpurge;
/* Write port member tags */
- ret = mv88e6xxx_vtu_data_write(ps, entry);
+ ret = mv88e6xxx_vtu_data_write(chip, entry);
if (ret < 0)
return ret;
- if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_STU)) {
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_STU)) {
reg = entry->sid & GLOBAL_VTU_SID_MASK;
- ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID, reg);
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_SID,
+ reg);
if (ret < 0)
return ret;
}
- if (mv88e6xxx_has_fid_reg(ps)) {
+ if (mv88e6xxx_has_fid_reg(chip)) {
reg = entry->fid & GLOBAL_VTU_FID_MASK;
- ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_FID, reg);
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_FID,
+ reg);
if (ret < 0)
return ret;
- } else if (mv88e6xxx_num_databases(ps) == 256) {
+ } else if (mv88e6xxx_num_databases(chip) == 256) {
/* VTU DBNum[7:4] are located in VTU Operation 11:8, and
* VTU DBNum[3:0] are located in VTU Operation 3:0
*/
@@ -1721,46 +1803,46 @@ static int _mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_priv_state *ps,
reg = GLOBAL_VTU_VID_VALID;
loadpurge:
reg |= entry->vid & GLOBAL_VTU_VID_MASK;
- ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID, reg);
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_VID, reg);
if (ret < 0)
return ret;
- return _mv88e6xxx_vtu_cmd(ps, op);
+ return _mv88e6xxx_vtu_cmd(chip, op);
}
-static int _mv88e6xxx_stu_getnext(struct mv88e6xxx_priv_state *ps, u8 sid,
+static int _mv88e6xxx_stu_getnext(struct mv88e6xxx_chip *chip, u8 sid,
struct mv88e6xxx_vtu_stu_entry *entry)
{
struct mv88e6xxx_vtu_stu_entry next = { 0 };
int ret;
- ret = _mv88e6xxx_vtu_wait(ps);
+ ret = _mv88e6xxx_vtu_wait(chip);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID,
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_SID,
sid & GLOBAL_VTU_SID_MASK);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_STU_GET_NEXT);
+ ret = _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_STU_GET_NEXT);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_SID);
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_VTU_SID);
if (ret < 0)
return ret;
next.sid = ret & GLOBAL_VTU_SID_MASK;
- ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_VID);
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_VTU_VID);
if (ret < 0)
return ret;
next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
if (next.valid) {
- ret = mv88e6xxx_stu_data_read(ps, &next);
+ ret = mv88e6xxx_stu_data_read(chip, &next);
if (ret < 0)
return ret;
}
@@ -1769,13 +1851,13 @@ static int _mv88e6xxx_stu_getnext(struct mv88e6xxx_priv_state *ps, u8 sid,
return 0;
}
-static int _mv88e6xxx_stu_loadpurge(struct mv88e6xxx_priv_state *ps,
+static int _mv88e6xxx_stu_loadpurge(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_stu_entry *entry)
{
u16 reg = 0;
int ret;
- ret = _mv88e6xxx_vtu_wait(ps);
+ ret = _mv88e6xxx_vtu_wait(chip);
if (ret < 0)
return ret;
@@ -1783,41 +1865,41 @@ static int _mv88e6xxx_stu_loadpurge(struct mv88e6xxx_priv_state *ps,
goto loadpurge;
/* Write port states */
- ret = mv88e6xxx_stu_data_write(ps, entry);
+ ret = mv88e6xxx_stu_data_write(chip, entry);
if (ret < 0)
return ret;
reg = GLOBAL_VTU_VID_VALID;
loadpurge:
- ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID, reg);
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_VID, reg);
if (ret < 0)
return ret;
reg = entry->sid & GLOBAL_VTU_SID_MASK;
- ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID, reg);
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_SID, reg);
if (ret < 0)
return ret;
- return _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_STU_LOAD_PURGE);
+ return _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_STU_LOAD_PURGE);
}
-static int _mv88e6xxx_port_fid(struct mv88e6xxx_priv_state *ps, int port,
+static int _mv88e6xxx_port_fid(struct mv88e6xxx_chip *chip, int port,
u16 *new, u16 *old)
{
- struct dsa_switch *ds = ps->ds;
+ struct dsa_switch *ds = chip->ds;
u16 upper_mask;
u16 fid;
int ret;
- if (mv88e6xxx_num_databases(ps) == 4096)
+ if (mv88e6xxx_num_databases(chip) == 4096)
upper_mask = 0xff;
- else if (mv88e6xxx_num_databases(ps) == 256)
+ else if (mv88e6xxx_num_databases(chip) == 256)
upper_mask = 0xf;
else
return -EOPNOTSUPP;
/* Port's default FID bits 3:0 are located in reg 0x06, offset 12 */
- ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_BASE_VLAN);
+ ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_BASE_VLAN);
if (ret < 0)
return ret;
@@ -1827,14 +1909,14 @@ static int _mv88e6xxx_port_fid(struct mv88e6xxx_priv_state *ps, int port,
ret &= ~PORT_BASE_VLAN_FID_3_0_MASK;
ret |= (*new << 12) & PORT_BASE_VLAN_FID_3_0_MASK;
- ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_BASE_VLAN,
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_BASE_VLAN,
ret);
if (ret < 0)
return ret;
}
/* Port's default FID bits 11:4 are located in reg 0x05, offset 0 */
- ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL_1);
+ ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_CONTROL_1);
if (ret < 0)
return ret;
@@ -1844,7 +1926,7 @@ static int _mv88e6xxx_port_fid(struct mv88e6xxx_priv_state *ps, int port,
ret &= ~upper_mask;
ret |= (*new >> 4) & upper_mask;
- ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_1,
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL_1,
ret);
if (ret < 0)
return ret;
@@ -1859,19 +1941,19 @@ static int _mv88e6xxx_port_fid(struct mv88e6xxx_priv_state *ps, int port,
return 0;
}
-static int _mv88e6xxx_port_fid_get(struct mv88e6xxx_priv_state *ps,
+static int _mv88e6xxx_port_fid_get(struct mv88e6xxx_chip *chip,
int port, u16 *fid)
{
- return _mv88e6xxx_port_fid(ps, port, NULL, fid);
+ return _mv88e6xxx_port_fid(chip, port, NULL, fid);
}
-static int _mv88e6xxx_port_fid_set(struct mv88e6xxx_priv_state *ps,
+static int _mv88e6xxx_port_fid_set(struct mv88e6xxx_chip *chip,
int port, u16 fid)
{
- return _mv88e6xxx_port_fid(ps, port, &fid, NULL);
+ return _mv88e6xxx_port_fid(chip, port, &fid, NULL);
}
-static int _mv88e6xxx_fid_new(struct mv88e6xxx_priv_state *ps, u16 *fid)
+static int _mv88e6xxx_fid_new(struct mv88e6xxx_chip *chip, u16 *fid)
{
DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
struct mv88e6xxx_vtu_stu_entry vlan;
@@ -1880,8 +1962,8 @@ static int _mv88e6xxx_fid_new(struct mv88e6xxx_priv_state *ps, u16 *fid)
bitmap_zero(fid_bitmap, MV88E6XXX_N_FID);
/* Set every FID bit used by the (un)bridged ports */
- for (i = 0; i < ps->info->num_ports; ++i) {
- err = _mv88e6xxx_port_fid_get(ps, i, fid);
+ for (i = 0; i < chip->info->num_ports; ++i) {
+ err = _mv88e6xxx_port_fid_get(chip, i, fid);
if (err)
return err;
@@ -1889,12 +1971,12 @@ static int _mv88e6xxx_fid_new(struct mv88e6xxx_priv_state *ps, u16 *fid)
}
/* Set every FID bit used by the VLAN entries */
- err = _mv88e6xxx_vtu_vid_write(ps, GLOBAL_VTU_VID_MASK);
+ err = _mv88e6xxx_vtu_vid_write(chip, GLOBAL_VTU_VID_MASK);
if (err)
return err;
do {
- err = _mv88e6xxx_vtu_getnext(ps, &vlan);
+ err = _mv88e6xxx_vtu_getnext(chip, &vlan);
if (err)
return err;
@@ -1908,35 +1990,35 @@ static int _mv88e6xxx_fid_new(struct mv88e6xxx_priv_state *ps, u16 *fid)
* databases are not needed. Return the next positive available.
*/
*fid = find_next_zero_bit(fid_bitmap, MV88E6XXX_N_FID, 1);
- if (unlikely(*fid >= mv88e6xxx_num_databases(ps)))
+ if (unlikely(*fid >= mv88e6xxx_num_databases(chip)))
return -ENOSPC;
/* Clear the database */
- return _mv88e6xxx_atu_flush(ps, *fid, true);
+ return _mv88e6xxx_atu_flush(chip, *fid, true);
}
-static int _mv88e6xxx_vtu_new(struct mv88e6xxx_priv_state *ps, u16 vid,
+static int _mv88e6xxx_vtu_new(struct mv88e6xxx_chip *chip, u16 vid,
struct mv88e6xxx_vtu_stu_entry *entry)
{
- struct dsa_switch *ds = ps->ds;
+ struct dsa_switch *ds = chip->ds;
struct mv88e6xxx_vtu_stu_entry vlan = {
.valid = true,
.vid = vid,
};
int i, err;
- err = _mv88e6xxx_fid_new(ps, &vlan.fid);
+ err = _mv88e6xxx_fid_new(chip, &vlan.fid);
if (err)
return err;
/* exclude all ports except the CPU and DSA ports */
- for (i = 0; i < ps->info->num_ports; ++i)
+ for (i = 0; i < chip->info->num_ports; ++i)
vlan.data[i] = dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i)
? GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED
: GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
- if (mv88e6xxx_6097_family(ps) || mv88e6xxx_6165_family(ps) ||
- mv88e6xxx_6351_family(ps) || mv88e6xxx_6352_family(ps)) {
+ if (mv88e6xxx_6097_family(chip) || mv88e6xxx_6165_family(chip) ||
+ mv88e6xxx_6351_family(chip) || mv88e6xxx_6352_family(chip)) {
struct mv88e6xxx_vtu_stu_entry vstp;
/* Adding a VTU entry requires a valid STU entry. As VSTP is not
@@ -1944,7 +2026,7 @@ static int _mv88e6xxx_vtu_new(struct mv88e6xxx_priv_state *ps, u16 vid,
* entries. Thus, validate the SID 0.
*/
vlan.sid = 0;
- err = _mv88e6xxx_stu_getnext(ps, GLOBAL_VTU_SID_MASK, &vstp);
+ err = _mv88e6xxx_stu_getnext(chip, GLOBAL_VTU_SID_MASK, &vstp);
if (err)
return err;
@@ -1953,7 +2035,7 @@ static int _mv88e6xxx_vtu_new(struct mv88e6xxx_priv_state *ps, u16 vid,
vstp.valid = true;
vstp.sid = vlan.sid;
- err = _mv88e6xxx_stu_loadpurge(ps, &vstp);
+ err = _mv88e6xxx_stu_loadpurge(chip, &vstp);
if (err)
return err;
}
@@ -1963,7 +2045,7 @@ static int _mv88e6xxx_vtu_new(struct mv88e6xxx_priv_state *ps, u16 vid,
return 0;
}
-static int _mv88e6xxx_vtu_get(struct mv88e6xxx_priv_state *ps, u16 vid,
+static int _mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid,
struct mv88e6xxx_vtu_stu_entry *entry, bool creat)
{
int err;
@@ -1971,11 +2053,11 @@ static int _mv88e6xxx_vtu_get(struct mv88e6xxx_priv_state *ps, u16 vid,
if (!vid)
return -EINVAL;
- err = _mv88e6xxx_vtu_vid_write(ps, vid - 1);
+ err = _mv88e6xxx_vtu_vid_write(chip, vid - 1);
if (err)
return err;
- err = _mv88e6xxx_vtu_getnext(ps, entry);
+ err = _mv88e6xxx_vtu_getnext(chip, entry);
if (err)
return err;
@@ -1986,7 +2068,7 @@ static int _mv88e6xxx_vtu_get(struct mv88e6xxx_priv_state *ps, u16 vid,
* -EOPNOTSUPP to inform bridge about an eventual software VLAN.
*/
- err = _mv88e6xxx_vtu_new(ps, vid, entry);
+ err = _mv88e6xxx_vtu_new(chip, vid, entry);
}
return err;
@@ -1995,21 +2077,21 @@ static int _mv88e6xxx_vtu_get(struct mv88e6xxx_priv_state *ps, u16 vid,
static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
u16 vid_begin, u16 vid_end)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
struct mv88e6xxx_vtu_stu_entry vlan;
int i, err;
if (!vid_begin)
return -EOPNOTSUPP;
- mutex_lock(&ps->smi_mutex);
+ mutex_lock(&chip->reg_lock);
- err = _mv88e6xxx_vtu_vid_write(ps, vid_begin - 1);
+ err = _mv88e6xxx_vtu_vid_write(chip, vid_begin - 1);
if (err)
goto unlock;
do {
- err = _mv88e6xxx_vtu_getnext(ps, &vlan);
+ err = _mv88e6xxx_vtu_getnext(chip, &vlan);
if (err)
goto unlock;
@@ -2019,7 +2101,7 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
if (vlan.vid > vid_end)
break;
- for (i = 0; i < ps->info->num_ports; ++i) {
+ for (i = 0; i < chip->info->num_ports; ++i) {
if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
continue;
@@ -2027,21 +2109,21 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
continue;
- if (ps->ports[i].bridge_dev ==
- ps->ports[port].bridge_dev)
+ if (chip->ports[i].bridge_dev ==
+ chip->ports[port].bridge_dev)
break; /* same bridge, check next VLAN */
netdev_warn(ds->ports[port].netdev,
"hardware VLAN %d already used by %s\n",
vlan.vid,
- netdev_name(ps->ports[i].bridge_dev));
+ netdev_name(chip->ports[i].bridge_dev));
err = -EOPNOTSUPP;
goto unlock;
}
} while (vlan.vid < vid_end);
unlock:
- mutex_unlock(&ps->smi_mutex);
+ mutex_unlock(&chip->reg_lock);
return err;
}
@@ -2056,17 +2138,17 @@ static const char * const mv88e6xxx_port_8021q_mode_names[] = {
static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
bool vlan_filtering)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
u16 old, new = vlan_filtering ? PORT_CONTROL_2_8021Q_SECURE :
PORT_CONTROL_2_8021Q_DISABLED;
int ret;
- if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU))
+ if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU))
return -EOPNOTSUPP;
- mutex_lock(&ps->smi_mutex);
+ mutex_lock(&chip->reg_lock);
- ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL_2);
+ ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_CONTROL_2);
if (ret < 0)
goto unlock;
@@ -2076,7 +2158,7 @@ static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
ret &= ~PORT_CONTROL_2_8021Q_MASK;
ret |= new & PORT_CONTROL_2_8021Q_MASK;
- ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_2,
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL_2,
ret);
if (ret < 0)
goto unlock;
@@ -2088,19 +2170,20 @@ static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
ret = 0;
unlock:
- mutex_unlock(&ps->smi_mutex);
+ mutex_unlock(&chip->reg_lock);
return ret;
}
-static int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+static int
+mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
int err;
- if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU))
+ if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU))
return -EOPNOTSUPP;
/* If the requested port doesn't belong to the same bridge as the VLAN
@@ -2117,13 +2200,13 @@ static int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
return 0;
}
-static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_priv_state *ps, int port,
+static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_chip *chip, int port,
u16 vid, bool untagged)
{
struct mv88e6xxx_vtu_stu_entry vlan;
int err;
- err = _mv88e6xxx_vtu_get(ps, vid, &vlan, true);
+ err = _mv88e6xxx_vtu_get(chip, vid, &vlan, true);
if (err)
return err;
@@ -2131,44 +2214,44 @@ static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_priv_state *ps, int port,
GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED :
GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED;
- return _mv88e6xxx_vtu_loadpurge(ps, &vlan);
+ return _mv88e6xxx_vtu_loadpurge(chip, &vlan);
}
static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct switchdev_trans *trans)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
u16 vid;
- if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU))
+ if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU))
return;
- mutex_lock(&ps->smi_mutex);
+ mutex_lock(&chip->reg_lock);
for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
- if (_mv88e6xxx_port_vlan_add(ps, port, vid, untagged))
+ if (_mv88e6xxx_port_vlan_add(chip, port, vid, untagged))
netdev_err(ds->ports[port].netdev,
"failed to add VLAN %d%c\n",
vid, untagged ? 'u' : 't');
- if (pvid && _mv88e6xxx_port_pvid_set(ps, port, vlan->vid_end))
+ if (pvid && _mv88e6xxx_port_pvid_set(chip, port, vlan->vid_end))
netdev_err(ds->ports[port].netdev, "failed to set PVID %d\n",
vlan->vid_end);
- mutex_unlock(&ps->smi_mutex);
+ mutex_unlock(&chip->reg_lock);
}
-static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_priv_state *ps,
+static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_chip *chip,
int port, u16 vid)
{
- struct dsa_switch *ds = ps->ds;
+ struct dsa_switch *ds = chip->ds;
struct mv88e6xxx_vtu_stu_entry vlan;
int i, err;
- err = _mv88e6xxx_vtu_get(ps, vid, &vlan, false);
+ err = _mv88e6xxx_vtu_get(chip, vid, &vlan, false);
if (err)
return err;
@@ -2180,7 +2263,7 @@ static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_priv_state *ps,
/* keep the VLAN unless all ports are excluded */
vlan.valid = false;
- for (i = 0; i < ps->info->num_ports; ++i) {
+ for (i = 0; i < chip->info->num_ports; ++i) {
if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
continue;
@@ -2190,55 +2273,55 @@ static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_priv_state *ps,
}
}
- err = _mv88e6xxx_vtu_loadpurge(ps, &vlan);
+ err = _mv88e6xxx_vtu_loadpurge(chip, &vlan);
if (err)
return err;
- return _mv88e6xxx_atu_remove(ps, vlan.fid, port, false);
+ return _mv88e6xxx_atu_remove(chip, vlan.fid, port, false);
}
static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
u16 pvid, vid;
int err = 0;
- if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU))
+ if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU))
return -EOPNOTSUPP;
- mutex_lock(&ps->smi_mutex);
+ mutex_lock(&chip->reg_lock);
- err = _mv88e6xxx_port_pvid_get(ps, port, &pvid);
+ err = _mv88e6xxx_port_pvid_get(chip, port, &pvid);
if (err)
goto unlock;
for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- err = _mv88e6xxx_port_vlan_del(ps, port, vid);
+ err = _mv88e6xxx_port_vlan_del(chip, port, vid);
if (err)
goto unlock;
if (vid == pvid) {
- err = _mv88e6xxx_port_pvid_set(ps, port, 0);
+ err = _mv88e6xxx_port_pvid_set(chip, port, 0);
if (err)
goto unlock;
}
}
unlock:
- mutex_unlock(&ps->smi_mutex);
+ mutex_unlock(&chip->reg_lock);
return err;
}
-static int _mv88e6xxx_atu_mac_write(struct mv88e6xxx_priv_state *ps,
+static int _mv88e6xxx_atu_mac_write(struct mv88e6xxx_chip *chip,
const unsigned char *addr)
{
int i, ret;
for (i = 0; i < 3; i++) {
ret = _mv88e6xxx_reg_write(
- ps, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
+ chip, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
(addr[i * 2] << 8) | addr[i * 2 + 1]);
if (ret < 0)
return ret;
@@ -2247,13 +2330,13 @@ static int _mv88e6xxx_atu_mac_write(struct mv88e6xxx_priv_state *ps,
return 0;
}
-static int _mv88e6xxx_atu_mac_read(struct mv88e6xxx_priv_state *ps,
+static int _mv88e6xxx_atu_mac_read(struct mv88e6xxx_chip *chip,
unsigned char *addr)
{
int i, ret;
for (i = 0; i < 3; i++) {
- ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL,
GLOBAL_ATU_MAC_01 + i);
if (ret < 0)
return ret;
@@ -2264,27 +2347,27 @@ static int _mv88e6xxx_atu_mac_read(struct mv88e6xxx_priv_state *ps,
return 0;
}
-static int _mv88e6xxx_atu_load(struct mv88e6xxx_priv_state *ps,
+static int _mv88e6xxx_atu_load(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_atu_entry *entry)
{
int ret;
- ret = _mv88e6xxx_atu_wait(ps);
+ ret = _mv88e6xxx_atu_wait(chip);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_atu_mac_write(ps, entry->mac);
+ ret = _mv88e6xxx_atu_mac_write(chip, entry->mac);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_atu_data_write(ps, entry);
+ ret = _mv88e6xxx_atu_data_write(chip, entry);
if (ret < 0)
return ret;
- return _mv88e6xxx_atu_cmd(ps, entry->fid, GLOBAL_ATU_OP_LOAD_DB);
+ return _mv88e6xxx_atu_cmd(chip, entry->fid, GLOBAL_ATU_OP_LOAD_DB);
}
-static int _mv88e6xxx_port_fdb_load(struct mv88e6xxx_priv_state *ps, int port,
+static int _mv88e6xxx_port_fdb_load(struct mv88e6xxx_chip *chip, int port,
const unsigned char *addr, u16 vid,
u8 state)
{
@@ -2294,9 +2377,9 @@ static int _mv88e6xxx_port_fdb_load(struct mv88e6xxx_priv_state *ps, int port,
/* Null VLAN ID corresponds to the port private database */
if (vid == 0)
- err = _mv88e6xxx_port_fid_get(ps, port, &vlan.fid);
+ err = _mv88e6xxx_port_fid_get(chip, port, &vlan.fid);
else
- err = _mv88e6xxx_vtu_get(ps, vid, &vlan, false);
+ err = _mv88e6xxx_vtu_get(chip, vid, &vlan, false);
if (err)
return err;
@@ -2308,16 +2391,16 @@ static int _mv88e6xxx_port_fdb_load(struct mv88e6xxx_priv_state *ps, int port,
entry.portv_trunkid = BIT(port);
}
- return _mv88e6xxx_atu_load(ps, &entry);
+ return _mv88e6xxx_atu_load(chip, &entry);
}
static int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_fdb *fdb,
struct switchdev_trans *trans)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
- if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU))
+ if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_ATU))
return -EOPNOTSUPP;
/* We don't need any dynamic resource from the kernel (yet),
@@ -2333,36 +2416,36 @@ static void mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
int state = is_multicast_ether_addr(fdb->addr) ?
GLOBAL_ATU_DATA_STATE_MC_STATIC :
GLOBAL_ATU_DATA_STATE_UC_STATIC;
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
- if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU))
+ if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_ATU))
return;
- mutex_lock(&ps->smi_mutex);
- if (_mv88e6xxx_port_fdb_load(ps, port, fdb->addr, fdb->vid, state))
+ mutex_lock(&chip->reg_lock);
+ if (_mv88e6xxx_port_fdb_load(chip, port, fdb->addr, fdb->vid, state))
netdev_err(ds->ports[port].netdev,
"failed to load MAC address\n");
- mutex_unlock(&ps->smi_mutex);
+ mutex_unlock(&chip->reg_lock);
}
static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_fdb *fdb)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
int ret;
- if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU))
+ if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_ATU))
return -EOPNOTSUPP;
- mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_port_fdb_load(ps, port, fdb->addr, fdb->vid,
+ mutex_lock(&chip->reg_lock);
+ ret = _mv88e6xxx_port_fdb_load(chip, port, fdb->addr, fdb->vid,
GLOBAL_ATU_DATA_STATE_UNUSED);
- mutex_unlock(&ps->smi_mutex);
+ mutex_unlock(&chip->reg_lock);
return ret;
}
-static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_priv_state *ps, u16 fid,
+static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_chip *chip, u16 fid,
struct mv88e6xxx_atu_entry *entry)
{
struct mv88e6xxx_atu_entry next = { 0 };
@@ -2370,19 +2453,19 @@ static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_priv_state *ps, u16 fid,
next.fid = fid;
- ret = _mv88e6xxx_atu_wait(ps);
+ ret = _mv88e6xxx_atu_wait(chip);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_atu_cmd(ps, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
+ ret = _mv88e6xxx_atu_cmd(chip, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_atu_mac_read(ps, next.mac);
+ ret = _mv88e6xxx_atu_mac_read(chip, next.mac);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_ATU_DATA);
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_ATU_DATA);
if (ret < 0)
return ret;
@@ -2407,7 +2490,7 @@ static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_priv_state *ps, u16 fid,
return 0;
}
-static int _mv88e6xxx_port_fdb_dump_one(struct mv88e6xxx_priv_state *ps,
+static int _mv88e6xxx_port_fdb_dump_one(struct mv88e6xxx_chip *chip,
u16 fid, u16 vid, int port,
struct switchdev_obj_port_fdb *fdb,
int (*cb)(struct switchdev_obj *obj))
@@ -2417,12 +2500,12 @@ static int _mv88e6xxx_port_fdb_dump_one(struct mv88e6xxx_priv_state *ps,
};
int err;
- err = _mv88e6xxx_atu_mac_write(ps, addr.mac);
+ err = _mv88e6xxx_atu_mac_write(chip, addr.mac);
if (err)
return err;
do {
- err = _mv88e6xxx_atu_getnext(ps, fid, &addr);
+ err = _mv88e6xxx_atu_getnext(chip, fid, &addr);
if (err)
break;
@@ -2452,48 +2535,48 @@ static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
struct switchdev_obj_port_fdb *fdb,
int (*cb)(struct switchdev_obj *obj))
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
struct mv88e6xxx_vtu_stu_entry vlan = {
.vid = GLOBAL_VTU_VID_MASK, /* all ones */
};
u16 fid;
int err;
- if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU))
+ if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_ATU))
return -EOPNOTSUPP;
- mutex_lock(&ps->smi_mutex);
+ mutex_lock(&chip->reg_lock);
/* Dump port's default Filtering Information Database (VLAN ID 0) */
- err = _mv88e6xxx_port_fid_get(ps, port, &fid);
+ err = _mv88e6xxx_port_fid_get(chip, port, &fid);
if (err)
goto unlock;
- err = _mv88e6xxx_port_fdb_dump_one(ps, fid, 0, port, fdb, cb);
+ err = _mv88e6xxx_port_fdb_dump_one(chip, fid, 0, port, fdb, cb);
if (err)
goto unlock;
/* Dump VLANs' Filtering Information Databases */
- err = _mv88e6xxx_vtu_vid_write(ps, vlan.vid);
+ err = _mv88e6xxx_vtu_vid_write(chip, vlan.vid);
if (err)
goto unlock;
do {
- err = _mv88e6xxx_vtu_getnext(ps, &vlan);
+ err = _mv88e6xxx_vtu_getnext(chip, &vlan);
if (err)
break;
if (!vlan.valid)
break;
- err = _mv88e6xxx_port_fdb_dump_one(ps, vlan.fid, vlan.vid, port,
- fdb, cb);
+ err = _mv88e6xxx_port_fdb_dump_one(chip, vlan.fid, vlan.vid,
+ port, fdb, cb);
if (err)
break;
} while (vlan.vid < GLOBAL_VTU_VID_MASK);
unlock:
- mutex_unlock(&ps->smi_mutex);
+ mutex_unlock(&chip->reg_lock);
return err;
}
@@ -2501,101 +2584,101 @@ unlock:
static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
struct net_device *bridge)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
int i, err = 0;
- if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VLANTABLE))
+ if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VLANTABLE))
return -EOPNOTSUPP;
- mutex_lock(&ps->smi_mutex);
+ mutex_lock(&chip->reg_lock);
/* Assign the bridge and remap each port's VLANTable */
- ps->ports[port].bridge_dev = bridge;
+ chip->ports[port].bridge_dev = bridge;
- for (i = 0; i < ps->info->num_ports; ++i) {
- if (ps->ports[i].bridge_dev == bridge) {
- err = _mv88e6xxx_port_based_vlan_map(ps, i);
+ for (i = 0; i < chip->info->num_ports; ++i) {
+ if (chip->ports[i].bridge_dev == bridge) {
+ err = _mv88e6xxx_port_based_vlan_map(chip, i);
if (err)
break;
}
}
- mutex_unlock(&ps->smi_mutex);
+ mutex_unlock(&chip->reg_lock);
return err;
}
static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- struct net_device *bridge = ps->ports[port].bridge_dev;
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ struct net_device *bridge = chip->ports[port].bridge_dev;
int i;
- if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VLANTABLE))
+ if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VLANTABLE))
return;
- mutex_lock(&ps->smi_mutex);
+ mutex_lock(&chip->reg_lock);
/* Unassign the bridge and remap each port's VLANTable */
- ps->ports[port].bridge_dev = NULL;
+ chip->ports[port].bridge_dev = NULL;
- for (i = 0; i < ps->info->num_ports; ++i)
- if (i == port || ps->ports[i].bridge_dev == bridge)
- if (_mv88e6xxx_port_based_vlan_map(ps, i))
+ for (i = 0; i < chip->info->num_ports; ++i)
+ if (i == port || chip->ports[i].bridge_dev == bridge)
+ if (_mv88e6xxx_port_based_vlan_map(chip, i))
netdev_warn(ds->ports[i].netdev,
"failed to remap\n");
- mutex_unlock(&ps->smi_mutex);
+ mutex_unlock(&chip->reg_lock);
}
-static int _mv88e6xxx_mdio_page_write(struct mv88e6xxx_priv_state *ps,
+static int _mv88e6xxx_mdio_page_write(struct mv88e6xxx_chip *chip,
int port, int page, int reg, int val)
{
int ret;
- ret = mv88e6xxx_mdio_write_indirect(ps, port, 0x16, page);
+ ret = mv88e6xxx_mdio_write_indirect(chip, port, 0x16, page);
if (ret < 0)
goto restore_page_0;
- ret = mv88e6xxx_mdio_write_indirect(ps, port, reg, val);
+ ret = mv88e6xxx_mdio_write_indirect(chip, port, reg, val);
restore_page_0:
- mv88e6xxx_mdio_write_indirect(ps, port, 0x16, 0x0);
+ mv88e6xxx_mdio_write_indirect(chip, port, 0x16, 0x0);
return ret;
}
-static int _mv88e6xxx_mdio_page_read(struct mv88e6xxx_priv_state *ps,
+static int _mv88e6xxx_mdio_page_read(struct mv88e6xxx_chip *chip,
int port, int page, int reg)
{
int ret;
- ret = mv88e6xxx_mdio_write_indirect(ps, port, 0x16, page);
+ ret = mv88e6xxx_mdio_write_indirect(chip, port, 0x16, page);
if (ret < 0)
goto restore_page_0;
- ret = mv88e6xxx_mdio_read_indirect(ps, port, reg);
+ ret = mv88e6xxx_mdio_read_indirect(chip, port, reg);
restore_page_0:
- mv88e6xxx_mdio_write_indirect(ps, port, 0x16, 0x0);
+ mv88e6xxx_mdio_write_indirect(chip, port, 0x16, 0x0);
return ret;
}
-static int mv88e6xxx_switch_reset(struct mv88e6xxx_priv_state *ps)
+static int mv88e6xxx_switch_reset(struct mv88e6xxx_chip *chip)
{
- bool ppu_active = mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU_ACTIVE);
+ bool ppu_active = mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU_ACTIVE);
u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
- struct gpio_desc *gpiod = ps->reset;
+ struct gpio_desc *gpiod = chip->reset;
unsigned long timeout;
int ret;
int i;
/* Set all ports to the disabled state. */
- for (i = 0; i < ps->info->num_ports; i++) {
- ret = _mv88e6xxx_reg_read(ps, REG_PORT(i), PORT_CONTROL);
+ for (i = 0; i < chip->info->num_ports; i++) {
+ ret = _mv88e6xxx_reg_read(chip, REG_PORT(i), PORT_CONTROL);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_reg_write(ps, REG_PORT(i), PORT_CONTROL,
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(i), PORT_CONTROL,
ret & 0xfffc);
if (ret)
return ret;
@@ -2617,16 +2700,16 @@ static int mv88e6xxx_switch_reset(struct mv88e6xxx_priv_state *ps)
* through global registers 0x18 and 0x19.
*/
if (ppu_active)
- ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, 0x04, 0xc000);
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, 0x04, 0xc000);
else
- ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, 0x04, 0xc400);
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, 0x04, 0xc400);
if (ret)
return ret;
/* Wait up to one second for reset to complete. */
timeout = jiffies + 1 * HZ;
while (time_before(jiffies, timeout)) {
- ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, 0x00);
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, 0x00);
if (ret < 0)
return ret;
@@ -2642,18 +2725,18 @@ static int mv88e6xxx_switch_reset(struct mv88e6xxx_priv_state *ps)
return ret;
}
-static int mv88e6xxx_power_on_serdes(struct mv88e6xxx_priv_state *ps)
+static int mv88e6xxx_power_on_serdes(struct mv88e6xxx_chip *chip)
{
int ret;
- ret = _mv88e6xxx_mdio_page_read(ps, REG_FIBER_SERDES,
+ ret = _mv88e6xxx_mdio_page_read(chip, REG_FIBER_SERDES,
PAGE_FIBER_SERDES, MII_BMCR);
if (ret < 0)
return ret;
if (ret & BMCR_PDOWN) {
ret &= ~BMCR_PDOWN;
- ret = _mv88e6xxx_mdio_page_write(ps, REG_FIBER_SERDES,
+ ret = _mv88e6xxx_mdio_page_write(chip, REG_FIBER_SERDES,
PAGE_FIBER_SERDES, MII_BMCR,
ret);
}
@@ -2661,30 +2744,30 @@ static int mv88e6xxx_power_on_serdes(struct mv88e6xxx_priv_state *ps)
return ret;
}
-static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port)
+static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
{
- struct dsa_switch *ds = ps->ds;
+ struct dsa_switch *ds = chip->ds;
int ret;
u16 reg;
- if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
- mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
- mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) ||
- mv88e6xxx_6065_family(ps) || mv88e6xxx_6320_family(ps)) {
+ if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
+ mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
+ mv88e6xxx_6185_family(chip) || mv88e6xxx_6095_family(chip) ||
+ mv88e6xxx_6065_family(chip) || mv88e6xxx_6320_family(chip)) {
/* MAC Forcing register: don't force link, speed,
* duplex or flow control state to any particular
* values on physical ports, but force the CPU port
* and all DSA ports to their maximum bandwidth and
* full duplex.
*/
- reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_PCS_CTRL);
+ reg = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_PCS_CTRL);
if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
reg &= ~PORT_PCS_CTRL_UNFORCED;
reg |= PORT_PCS_CTRL_FORCE_LINK |
PORT_PCS_CTRL_LINK_UP |
PORT_PCS_CTRL_DUPLEX_FULL |
PORT_PCS_CTRL_FORCE_DUPLEX;
- if (mv88e6xxx_6065_family(ps))
+ if (mv88e6xxx_6065_family(chip))
reg |= PORT_PCS_CTRL_100;
else
reg |= PORT_PCS_CTRL_1000;
@@ -2692,7 +2775,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port)
reg |= PORT_PCS_CTRL_UNFORCED;
}
- ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
PORT_PCS_CTRL, reg);
if (ret)
return ret;
@@ -2713,37 +2796,46 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port)
* forwarding of unknown unicasts and multicasts.
*/
reg = 0;
- if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
- mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
- mv88e6xxx_6095_family(ps) || mv88e6xxx_6065_family(ps) ||
- mv88e6xxx_6185_family(ps) || mv88e6xxx_6320_family(ps))
+ if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
+ mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
+ mv88e6xxx_6095_family(chip) || mv88e6xxx_6065_family(chip) ||
+ mv88e6xxx_6185_family(chip) || mv88e6xxx_6320_family(chip))
reg = PORT_CONTROL_IGMP_MLD_SNOOP |
PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
PORT_CONTROL_STATE_FORWARDING;
if (dsa_is_cpu_port(ds, port)) {
- if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps))
+ if (mv88e6xxx_6095_family(chip) || mv88e6xxx_6185_family(chip))
reg |= PORT_CONTROL_DSA_TAG;
- if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
- mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
- mv88e6xxx_6320_family(ps)) {
+ if (mv88e6xxx_6352_family(chip) ||
+ mv88e6xxx_6351_family(chip) ||
+ mv88e6xxx_6165_family(chip) ||
+ mv88e6xxx_6097_family(chip) ||
+ mv88e6xxx_6320_family(chip)) {
reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA |
PORT_CONTROL_FORWARD_UNKNOWN |
PORT_CONTROL_FORWARD_UNKNOWN_MC;
}
- if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
- mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
- mv88e6xxx_6095_family(ps) || mv88e6xxx_6065_family(ps) ||
- mv88e6xxx_6185_family(ps) || mv88e6xxx_6320_family(ps)) {
- reg |= PORT_CONTROL_EGRESS_ADD_TAG;
+ if (mv88e6xxx_6352_family(chip) ||
+ mv88e6xxx_6351_family(chip) ||
+ mv88e6xxx_6165_family(chip) ||
+ mv88e6xxx_6097_family(chip) ||
+ mv88e6xxx_6095_family(chip) ||
+ mv88e6xxx_6065_family(chip) ||
+ mv88e6xxx_6185_family(chip) ||
+ mv88e6xxx_6320_family(chip)) {
+ reg |= PORT_CONTROL_EGRESS_ADD_TAG;
}
}
if (dsa_is_dsa_port(ds, port)) {
- if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps))
+ if (mv88e6xxx_6095_family(chip) ||
+ mv88e6xxx_6185_family(chip))
reg |= PORT_CONTROL_DSA_TAG;
- if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
- mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
- mv88e6xxx_6320_family(ps)) {
+ if (mv88e6xxx_6352_family(chip) ||
+ mv88e6xxx_6351_family(chip) ||
+ mv88e6xxx_6165_family(chip) ||
+ mv88e6xxx_6097_family(chip) ||
+ mv88e6xxx_6320_family(chip)) {
reg |= PORT_CONTROL_FRAME_MODE_DSA;
}
@@ -2752,7 +2844,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port)
PORT_CONTROL_FORWARD_UNKNOWN_MC;
}
if (reg) {
- ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
PORT_CONTROL, reg);
if (ret)
return ret;
@@ -2761,15 +2853,15 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port)
/* If this port is connected to a SerDes, make sure the SerDes is not
* powered down.
*/
- if (mv88e6xxx_6352_family(ps)) {
- ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_STATUS);
+ if (mv88e6xxx_6352_family(chip)) {
+ ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_STATUS);
if (ret < 0)
return ret;
ret &= PORT_STATUS_CMODE_MASK;
if ((ret == PORT_STATUS_CMODE_100BASE_X) ||
(ret == PORT_STATUS_CMODE_1000BASE_X) ||
(ret == PORT_STATUS_CMODE_SGMII)) {
- ret = mv88e6xxx_power_on_serdes(ps);
+ ret = mv88e6xxx_power_on_serdes(chip);
if (ret < 0)
return ret;
}
@@ -2782,17 +2874,17 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port)
* copy of all transmitted/received frames on this port to the CPU.
*/
reg = 0;
- if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
- mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
- mv88e6xxx_6095_family(ps) || mv88e6xxx_6320_family(ps) ||
- mv88e6xxx_6185_family(ps))
+ if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
+ mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
+ mv88e6xxx_6095_family(chip) || mv88e6xxx_6320_family(chip) ||
+ mv88e6xxx_6185_family(chip))
reg = PORT_CONTROL_2_MAP_DA;
- if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
- mv88e6xxx_6165_family(ps) || mv88e6xxx_6320_family(ps))
+ if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
+ mv88e6xxx_6165_family(chip) || mv88e6xxx_6320_family(chip))
reg |= PORT_CONTROL_2_JUMBO_10240;
- if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps)) {
+ if (mv88e6xxx_6095_family(chip) || mv88e6xxx_6185_family(chip)) {
/* Set the upstream port this port should use */
reg |= dsa_upstream_port(ds);
/* enable forwarding of unknown multicast addresses to
@@ -2805,7 +2897,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port)
reg |= PORT_CONTROL_2_8021Q_DISABLED;
if (reg) {
- ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
PORT_CONTROL_2, reg);
if (ret)
return ret;
@@ -2821,24 +2913,25 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port)
if (dsa_is_cpu_port(ds, port))
reg = 0;
- ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_ASSOC_VECTOR, reg);
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_ASSOC_VECTOR,
+ reg);
if (ret)
return ret;
/* Egress rate control 2: disable egress rate control. */
- ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_RATE_CONTROL_2,
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_RATE_CONTROL_2,
0x0000);
if (ret)
return ret;
- if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
- mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
- mv88e6xxx_6320_family(ps)) {
+ if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
+ mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
+ mv88e6xxx_6320_family(chip)) {
/* Do not limit the period of time that this port can
* be paused for by the remote end or the period of
* time that this port can pause the remote end.
*/
- ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
PORT_PAUSE_CTRL, 0x0000);
if (ret)
return ret;
@@ -2847,12 +2940,12 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port)
* address database entries that this port is allowed
* to use.
*/
- ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
PORT_ATU_CONTROL, 0x0000);
/* Priority Override: disable DA, SA and VTU priority
* override.
*/
- ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
PORT_PRI_OVERRIDE, 0x0000);
if (ret)
return ret;
@@ -2860,14 +2953,14 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port)
/* Port Ethertype: use the Ethertype DSA Ethertype
* value.
*/
- ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
PORT_ETH_TYPE, ETH_P_EDSA);
if (ret)
return ret;
/* Tag Remap: use an identity 802.1p prio -> switch
* prio mapping.
*/
- ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
PORT_TAG_REGMAP_0123, 0x3210);
if (ret)
return ret;
@@ -2875,18 +2968,18 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port)
/* Tag Remap 2: use an identity 802.1p prio -> switch
* prio mapping.
*/
- ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
PORT_TAG_REGMAP_4567, 0x7654);
if (ret)
return ret;
}
- if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
- mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
- mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) ||
- mv88e6xxx_6320_family(ps)) {
+ if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
+ mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
+ mv88e6xxx_6185_family(chip) || mv88e6xxx_6095_family(chip) ||
+ mv88e6xxx_6320_family(chip)) {
/* Rate Control: disable ingress rate limiting. */
- ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
PORT_RATE_CONTROL, 0x0001);
if (ret)
return ret;
@@ -2895,7 +2988,8 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port)
/* Port Control 1: disable trunking, disable sending
* learning messages to this port.
*/
- ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_1, 0x0000);
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL_1,
+ 0x0000);
if (ret)
return ret;
@@ -2903,18 +2997,18 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port)
* database, and allow bidirectional communication between the
* CPU and DSA port(s), and the other ports.
*/
- ret = _mv88e6xxx_port_fid_set(ps, port, 0);
+ ret = _mv88e6xxx_port_fid_set(chip, port, 0);
if (ret)
return ret;
- ret = _mv88e6xxx_port_based_vlan_map(ps, port);
+ ret = _mv88e6xxx_port_based_vlan_map(chip, port);
if (ret)
return ret;
/* Default VLAN ID and priority: don't set a default VLAN
* ID, and set the default packet priority to zero.
*/
- ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_DEFAULT_VLAN,
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_DEFAULT_VLAN,
0x0000);
if (ret)
return ret;
@@ -2922,9 +3016,9 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port)
return 0;
}
-static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps)
+static int mv88e6xxx_setup_global(struct mv88e6xxx_chip *chip)
{
- struct dsa_switch *ds = ps->ds;
+ struct dsa_switch *ds = chip->ds;
u32 upstream_port = dsa_upstream_port(ds);
u16 reg;
int err;
@@ -2934,11 +3028,11 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps)
* and mask all interrupt sources.
*/
reg = 0;
- if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU) ||
- mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU_ACTIVE))
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU) ||
+ mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU_ACTIVE))
reg |= GLOBAL_CONTROL_PPU_ENABLE;
- err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL, reg);
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_CONTROL, reg);
if (err)
return err;
@@ -2948,12 +3042,13 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps)
reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT;
- err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_MONITOR_CONTROL,
+ reg);
if (err)
return err;
/* Disable remote management, and set the switch's DSA device number. */
- err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL_2,
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_CONTROL_2,
GLOBAL_CONTROL_2_MULTIPLE_CASCADE |
(ds->index & 0x1f));
if (err)
@@ -2963,46 +3058,47 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps)
* enable address learn messages to be sent to all message
* ports.
*/
- err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL,
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL,
0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL);
if (err)
return err;
/* Configure the IP ToS mapping registers. */
- err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
if (err)
return err;
- err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
if (err)
return err;
- err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
if (err)
return err;
- err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
if (err)
return err;
- err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
if (err)
return err;
- err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
if (err)
return err;
- err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
if (err)
return err;
- err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
if (err)
return err;
/* Configure the IEEE 802.1p priority mapping register. */
- err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
if (err)
return err;
/* Send all frames with destination addresses matching
* 01:80:c2:00:00:0x to the CPU port.
*/
- err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff);
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_MGMT_EN_0X,
+ 0xffff);
if (err)
return err;
@@ -3011,7 +3107,7 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps)
* highest, and send all special multicast frames to the CPU
* port at the highest priority.
*/
- err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SWITCH_MGMT,
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_SWITCH_MGMT,
0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 |
GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI);
if (err)
@@ -3025,7 +3121,7 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps)
nexthop = ds->rtable[i] & 0x1f;
err = _mv88e6xxx_reg_write(
- ps, REG_GLOBAL2,
+ chip, REG_GLOBAL2,
GLOBAL2_DEVICE_MAPPING,
GLOBAL2_DEVICE_MAPPING_UPDATE |
(i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) | nexthop);
@@ -3035,10 +3131,11 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps)
/* Clear all trunk masks. */
for (i = 0; i < 8; i++) {
- err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_TRUNK_MASK,
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL2,
+ GLOBAL2_TRUNK_MASK,
0x8000 |
(i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) |
- ((1 << ps->info->num_ports) - 1));
+ ((1 << chip->info->num_ports) - 1));
if (err)
return err;
}
@@ -3046,7 +3143,7 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps)
/* Clear all trunk mappings. */
for (i = 0; i < 16; i++) {
err = _mv88e6xxx_reg_write(
- ps, REG_GLOBAL2,
+ chip, REG_GLOBAL2,
GLOBAL2_TRUNK_MAPPING,
GLOBAL2_TRUNK_MAPPING_UPDATE |
(i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
@@ -3054,13 +3151,13 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps)
return err;
}
- if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
- mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
- mv88e6xxx_6320_family(ps)) {
+ if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
+ mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
+ mv88e6xxx_6320_family(chip)) {
/* Send all frames with destination addresses matching
* 01:80:c2:00:00:2x to the CPU port.
*/
- err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL2,
GLOBAL2_MGMT_EN_2X, 0xffff);
if (err)
return err;
@@ -3068,14 +3165,14 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps)
/* Initialise cross-chip port VLAN table to reset
* defaults.
*/
- err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL2,
GLOBAL2_PVT_ADDR, 0x9000);
if (err)
return err;
/* Clear the priority override table. */
for (i = 0; i < 16; i++) {
- err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL2,
GLOBAL2_PRIO_OVERRIDE,
0x8000 | (i << 8));
if (err)
@@ -3083,16 +3180,16 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps)
}
}
- if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
- mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
- mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) ||
- mv88e6xxx_6320_family(ps)) {
+ if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
+ mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
+ mv88e6xxx_6185_family(chip) || mv88e6xxx_6095_family(chip) ||
+ mv88e6xxx_6320_family(chip)) {
/* Disable ingress rate limiting by resetting all
* ingress rate limit registers to their initial
* state.
*/
- for (i = 0; i < ps->info->num_ports; i++) {
- err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
+ for (i = 0; i < chip->info->num_ports; i++) {
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL2,
GLOBAL2_INGRESS_OP,
0x9000 | (i << 8));
if (err)
@@ -3101,23 +3198,23 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps)
}
/* Clear the statistics counters for all ports */
- err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP,
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_STATS_OP,
GLOBAL_STATS_OP_FLUSH_ALL);
if (err)
return err;
/* Wait for the flush to complete. */
- err = _mv88e6xxx_stats_wait(ps);
+ err = _mv88e6xxx_stats_wait(chip);
if (err)
return err;
/* Clear all ATU entries */
- err = _mv88e6xxx_atu_flush(ps, 0, true);
+ err = _mv88e6xxx_atu_flush(chip, 0, true);
if (err)
return err;
/* Clear all the VTU and STU entries */
- err = _mv88e6xxx_vtu_stu_flush(ps);
+ err = _mv88e6xxx_vtu_stu_flush(chip);
if (err < 0)
return err;
@@ -3126,134 +3223,134 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps)
static int mv88e6xxx_setup(struct dsa_switch *ds)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
int err;
int i;
- ps->ds = ds;
- ds->slave_mii_bus = ps->mdio_bus;
+ chip->ds = ds;
+ ds->slave_mii_bus = chip->mdio_bus;
- if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
- mutex_init(&ps->eeprom_mutex);
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEPROM))
+ mutex_init(&chip->eeprom_mutex);
- mutex_lock(&ps->smi_mutex);
+ mutex_lock(&chip->reg_lock);
- err = mv88e6xxx_switch_reset(ps);
+ err = mv88e6xxx_switch_reset(chip);
if (err)
goto unlock;
- err = mv88e6xxx_setup_global(ps);
+ err = mv88e6xxx_setup_global(chip);
if (err)
goto unlock;
- for (i = 0; i < ps->info->num_ports; i++) {
- err = mv88e6xxx_setup_port(ps, i);
+ for (i = 0; i < chip->info->num_ports; i++) {
+ err = mv88e6xxx_setup_port(chip, i);
if (err)
goto unlock;
}
unlock:
- mutex_unlock(&ps->smi_mutex);
+ mutex_unlock(&chip->reg_lock);
return err;
}
-int mv88e6xxx_mdio_page_read(struct dsa_switch *ds, int port, int page, int reg)
+static int mv88e6xxx_mdio_page_read(struct dsa_switch *ds, int port, int page,
+ int reg)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
int ret;
- mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_mdio_page_read(ps, port, page, reg);
- mutex_unlock(&ps->smi_mutex);
+ mutex_lock(&chip->reg_lock);
+ ret = _mv88e6xxx_mdio_page_read(chip, port, page, reg);
+ mutex_unlock(&chip->reg_lock);
return ret;
}
-int mv88e6xxx_mdio_page_write(struct dsa_switch *ds, int port, int page,
- int reg, int val)
+static int mv88e6xxx_mdio_page_write(struct dsa_switch *ds, int port, int page,
+ int reg, int val)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
int ret;
- mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_mdio_page_write(ps, port, page, reg, val);
- mutex_unlock(&ps->smi_mutex);
+ mutex_lock(&chip->reg_lock);
+ ret = _mv88e6xxx_mdio_page_write(chip, port, page, reg, val);
+ mutex_unlock(&chip->reg_lock);
return ret;
}
-static int mv88e6xxx_port_to_mdio_addr(struct mv88e6xxx_priv_state *ps,
- int port)
+static int mv88e6xxx_port_to_mdio_addr(struct mv88e6xxx_chip *chip, int port)
{
- if (port >= 0 && port < ps->info->num_ports)
+ if (port >= 0 && port < chip->info->num_ports)
return port;
return -EINVAL;
}
static int mv88e6xxx_mdio_read(struct mii_bus *bus, int port, int regnum)
{
- struct mv88e6xxx_priv_state *ps = bus->priv;
- int addr = mv88e6xxx_port_to_mdio_addr(ps, port);
+ struct mv88e6xxx_chip *chip = bus->priv;
+ int addr = mv88e6xxx_port_to_mdio_addr(chip, port);
int ret;
if (addr < 0)
return 0xffff;
- mutex_lock(&ps->smi_mutex);
+ mutex_lock(&chip->reg_lock);
- if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU))
- ret = mv88e6xxx_mdio_read_ppu(ps, addr, regnum);
- else if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SMI_PHY))
- ret = mv88e6xxx_mdio_read_indirect(ps, addr, regnum);
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU))
+ ret = mv88e6xxx_mdio_read_ppu(chip, addr, regnum);
+ else if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_SMI_PHY))
+ ret = mv88e6xxx_mdio_read_indirect(chip, addr, regnum);
else
- ret = mv88e6xxx_mdio_read_direct(ps, addr, regnum);
+ ret = mv88e6xxx_mdio_read_direct(chip, addr, regnum);
- mutex_unlock(&ps->smi_mutex);
+ mutex_unlock(&chip->reg_lock);
return ret;
}
static int mv88e6xxx_mdio_write(struct mii_bus *bus, int port, int regnum,
u16 val)
{
- struct mv88e6xxx_priv_state *ps = bus->priv;
- int addr = mv88e6xxx_port_to_mdio_addr(ps, port);
+ struct mv88e6xxx_chip *chip = bus->priv;
+ int addr = mv88e6xxx_port_to_mdio_addr(chip, port);
int ret;
if (addr < 0)
return 0xffff;
- mutex_lock(&ps->smi_mutex);
+ mutex_lock(&chip->reg_lock);
- if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU))
- ret = mv88e6xxx_mdio_write_ppu(ps, addr, regnum, val);
- else if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SMI_PHY))
- ret = mv88e6xxx_mdio_write_indirect(ps, addr, regnum, val);
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU))
+ ret = mv88e6xxx_mdio_write_ppu(chip, addr, regnum, val);
+ else if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_SMI_PHY))
+ ret = mv88e6xxx_mdio_write_indirect(chip, addr, regnum, val);
else
- ret = mv88e6xxx_mdio_write_direct(ps, addr, regnum, val);
+ ret = mv88e6xxx_mdio_write_direct(chip, addr, regnum, val);
- mutex_unlock(&ps->smi_mutex);
+ mutex_unlock(&chip->reg_lock);
return ret;
}
-static int mv88e6xxx_mdio_register(struct mv88e6xxx_priv_state *ps,
+static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
struct device_node *np)
{
static int index;
struct mii_bus *bus;
int err;
- if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU))
- mv88e6xxx_ppu_state_init(ps);
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU))
+ mv88e6xxx_ppu_state_init(chip);
if (np)
- ps->mdio_np = of_get_child_by_name(np, "mdio");
+ chip->mdio_np = of_get_child_by_name(np, "mdio");
- bus = devm_mdiobus_alloc(ps->dev);
+ bus = devm_mdiobus_alloc(chip->dev);
if (!bus)
return -ENOMEM;
- bus->priv = (void *)ps;
+ bus->priv = (void *)chip;
if (np) {
bus->name = np->full_name;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s", np->full_name);
@@ -3264,89 +3361,89 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_priv_state *ps,
bus->read = mv88e6xxx_mdio_read;
bus->write = mv88e6xxx_mdio_write;
- bus->parent = ps->dev;
+ bus->parent = chip->dev;
- if (ps->mdio_np)
- err = of_mdiobus_register(bus, ps->mdio_np);
+ if (chip->mdio_np)
+ err = of_mdiobus_register(bus, chip->mdio_np);
else
err = mdiobus_register(bus);
if (err) {
- dev_err(ps->dev, "Cannot register MDIO bus (%d)\n", err);
+ dev_err(chip->dev, "Cannot register MDIO bus (%d)\n", err);
goto out;
}
- ps->mdio_bus = bus;
+ chip->mdio_bus = bus;
return 0;
out:
- if (ps->mdio_np)
- of_node_put(ps->mdio_np);
+ if (chip->mdio_np)
+ of_node_put(chip->mdio_np);
return err;
}
-static void mv88e6xxx_mdio_unregister(struct mv88e6xxx_priv_state *ps)
+static void mv88e6xxx_mdio_unregister(struct mv88e6xxx_chip *chip)
{
- struct mii_bus *bus = ps->mdio_bus;
+ struct mii_bus *bus = chip->mdio_bus;
mdiobus_unregister(bus);
- if (ps->mdio_np)
- of_node_put(ps->mdio_np);
+ if (chip->mdio_np)
+ of_node_put(chip->mdio_np);
}
#ifdef CONFIG_NET_DSA_HWMON
static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
int ret;
int val;
*temp = 0;
- mutex_lock(&ps->smi_mutex);
+ mutex_lock(&chip->reg_lock);
- ret = mv88e6xxx_mdio_write_direct(ps, 0x0, 0x16, 0x6);
+ ret = mv88e6xxx_mdio_write_direct(chip, 0x0, 0x16, 0x6);
if (ret < 0)
goto error;
/* Enable temperature sensor */
- ret = mv88e6xxx_mdio_read_direct(ps, 0x0, 0x1a);
+ ret = mv88e6xxx_mdio_read_direct(chip, 0x0, 0x1a);
if (ret < 0)
goto error;
- ret = mv88e6xxx_mdio_write_direct(ps, 0x0, 0x1a, ret | (1 << 5));
+ ret = mv88e6xxx_mdio_write_direct(chip, 0x0, 0x1a, ret | (1 << 5));
if (ret < 0)
goto error;
/* Wait for temperature to stabilize */
usleep_range(10000, 12000);
- val = mv88e6xxx_mdio_read_direct(ps, 0x0, 0x1a);
+ val = mv88e6xxx_mdio_read_direct(chip, 0x0, 0x1a);
if (val < 0) {
ret = val;
goto error;
}
/* Disable temperature sensor */
- ret = mv88e6xxx_mdio_write_direct(ps, 0x0, 0x1a, ret & ~(1 << 5));
+ ret = mv88e6xxx_mdio_write_direct(chip, 0x0, 0x1a, ret & ~(1 << 5));
if (ret < 0)
goto error;
*temp = ((val & 0x1f) - 5) * 5;
error:
- mv88e6xxx_mdio_write_direct(ps, 0x0, 0x16, 0x0);
- mutex_unlock(&ps->smi_mutex);
+ mv88e6xxx_mdio_write_direct(chip, 0x0, 0x16, 0x0);
+ mutex_unlock(&chip->reg_lock);
return ret;
}
static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ int phy = mv88e6xxx_6320_family(chip) ? 3 : 0;
int ret;
*temp = 0;
@@ -3362,12 +3459,12 @@ static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
static int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
- if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP))
+ if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP))
return -EOPNOTSUPP;
- if (mv88e6xxx_6320_family(ps) || mv88e6xxx_6352_family(ps))
+ if (mv88e6xxx_6320_family(chip) || mv88e6xxx_6352_family(chip))
return mv88e63xx_get_temp(ds, temp);
return mv88e61xx_get_temp(ds, temp);
@@ -3375,11 +3472,11 @@ static int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
static int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ int phy = mv88e6xxx_6320_family(chip) ? 3 : 0;
int ret;
- if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP_LIMIT))
+ if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT))
return -EOPNOTSUPP;
*temp = 0;
@@ -3395,11 +3492,11 @@ static int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
static int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ int phy = mv88e6xxx_6320_family(chip) ? 3 : 0;
int ret;
- if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP_LIMIT))
+ if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT))
return -EOPNOTSUPP;
ret = mv88e6xxx_mdio_page_read(ds, phy, 6, 26);
@@ -3412,11 +3509,11 @@ static int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
static int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ int phy = mv88e6xxx_6320_family(chip) ? 3 : 0;
int ret;
- if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP_LIMIT))
+ if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT))
return -EOPNOTSUPP;
*alarm = false;
@@ -3438,6 +3535,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6085",
.num_databases = 4096,
.num_ports = 10,
+ .port_base_addr = 0x10,
.flags = MV88E6XXX_FLAGS_FAMILY_6097,
},
@@ -3447,6 +3545,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6095/88E6095F",
.num_databases = 256,
.num_ports = 11,
+ .port_base_addr = 0x10,
.flags = MV88E6XXX_FLAGS_FAMILY_6095,
},
@@ -3456,6 +3555,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6123",
.num_databases = 4096,
.num_ports = 3,
+ .port_base_addr = 0x10,
.flags = MV88E6XXX_FLAGS_FAMILY_6165,
},
@@ -3465,6 +3565,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6131",
.num_databases = 256,
.num_ports = 8,
+ .port_base_addr = 0x10,
.flags = MV88E6XXX_FLAGS_FAMILY_6185,
},
@@ -3474,6 +3575,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6161",
.num_databases = 4096,
.num_ports = 6,
+ .port_base_addr = 0x10,
.flags = MV88E6XXX_FLAGS_FAMILY_6165,
},
@@ -3483,6 +3585,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6165",
.num_databases = 4096,
.num_ports = 6,
+ .port_base_addr = 0x10,
.flags = MV88E6XXX_FLAGS_FAMILY_6165,
},
@@ -3492,6 +3595,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6171",
.num_databases = 4096,
.num_ports = 7,
+ .port_base_addr = 0x10,
.flags = MV88E6XXX_FLAGS_FAMILY_6351,
},
@@ -3501,6 +3605,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6172",
.num_databases = 4096,
.num_ports = 7,
+ .port_base_addr = 0x10,
.flags = MV88E6XXX_FLAGS_FAMILY_6352,
},
@@ -3510,6 +3615,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6175",
.num_databases = 4096,
.num_ports = 7,
+ .port_base_addr = 0x10,
.flags = MV88E6XXX_FLAGS_FAMILY_6351,
},
@@ -3519,6 +3625,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6176",
.num_databases = 4096,
.num_ports = 7,
+ .port_base_addr = 0x10,
.flags = MV88E6XXX_FLAGS_FAMILY_6352,
},
@@ -3528,6 +3635,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6185",
.num_databases = 256,
.num_ports = 10,
+ .port_base_addr = 0x10,
.flags = MV88E6XXX_FLAGS_FAMILY_6185,
},
@@ -3537,6 +3645,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6240",
.num_databases = 4096,
.num_ports = 7,
+ .port_base_addr = 0x10,
.flags = MV88E6XXX_FLAGS_FAMILY_6352,
},
@@ -3546,6 +3655,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6320",
.num_databases = 4096,
.num_ports = 7,
+ .port_base_addr = 0x10,
.flags = MV88E6XXX_FLAGS_FAMILY_6320,
},
@@ -3555,6 +3665,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6321",
.num_databases = 4096,
.num_ports = 7,
+ .port_base_addr = 0x10,
.flags = MV88E6XXX_FLAGS_FAMILY_6320,
},
@@ -3564,6 +3675,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6350",
.num_databases = 4096,
.num_ports = 7,
+ .port_base_addr = 0x10,
.flags = MV88E6XXX_FLAGS_FAMILY_6351,
},
@@ -3573,6 +3685,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6351",
.num_databases = 4096,
.num_ports = 7,
+ .port_base_addr = 0x10,
.flags = MV88E6XXX_FLAGS_FAMILY_6351,
},
@@ -3582,75 +3695,124 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6352",
.num_databases = 4096,
.num_ports = 7,
+ .port_base_addr = 0x10,
.flags = MV88E6XXX_FLAGS_FAMILY_6352,
},
};
-static const struct mv88e6xxx_info *
-mv88e6xxx_lookup_info(unsigned int prod_num, const struct mv88e6xxx_info *table,
- unsigned int num)
+static const struct mv88e6xxx_info *mv88e6xxx_lookup_info(unsigned int prod_num)
{
int i;
- for (i = 0; i < num; ++i)
- if (table[i].prod_num == prod_num)
- return &table[i];
+ for (i = 0; i < ARRAY_SIZE(mv88e6xxx_table); ++i)
+ if (mv88e6xxx_table[i].prod_num == prod_num)
+ return &mv88e6xxx_table[i];
return NULL;
}
-static const char *mv88e6xxx_drv_probe(struct device *dsa_dev,
- struct device *host_dev, int sw_addr,
- void **priv)
+static int mv88e6xxx_detect(struct mv88e6xxx_chip *chip)
{
const struct mv88e6xxx_info *info;
- struct mv88e6xxx_priv_state *ps;
- struct mii_bus *bus;
- const char *name;
int id, prod_num, rev;
- int err;
-
- bus = dsa_host_dev_to_mii_bus(host_dev);
- if (!bus)
- return NULL;
- id = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
+ id = mv88e6xxx_reg_read(chip, chip->info->port_base_addr,
+ PORT_SWITCH_ID);
if (id < 0)
- return NULL;
+ return id;
prod_num = (id & 0xfff0) >> 4;
rev = id & 0x000f;
- info = mv88e6xxx_lookup_info(prod_num, mv88e6xxx_table,
- ARRAY_SIZE(mv88e6xxx_table));
+ info = mv88e6xxx_lookup_info(prod_num);
if (!info)
+ return -ENODEV;
+
+ /* Update the compatible info with the probed one */
+ chip->info = info;
+
+ dev_info(chip->dev, "switch 0x%x detected: %s, revision %u\n",
+ chip->info->prod_num, chip->info->name, rev);
+
+ return 0;
+}
+
+static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev)
+{
+ struct mv88e6xxx_chip *chip;
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
return NULL;
- name = info->name;
+ chip->dev = dev;
+
+ mutex_init(&chip->reg_lock);
+
+ return chip;
+}
+
+static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus, int sw_addr)
+{
+ /* ADDR[0] pin is unavailable externally and considered zero */
+ if (sw_addr & 0x1)
+ return -EINVAL;
+
+ if (sw_addr == 0)
+ chip->smi_ops = &mv88e6xxx_smi_single_chip_ops;
+ else if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_MULTI_CHIP))
+ chip->smi_ops = &mv88e6xxx_smi_multi_chip_ops;
+ else
+ return -EINVAL;
+
+ chip->bus = bus;
+ chip->sw_addr = sw_addr;
+
+ return 0;
+}
+
+static const char *mv88e6xxx_drv_probe(struct device *dsa_dev,
+ struct device *host_dev, int sw_addr,
+ void **priv)
+{
+ struct mv88e6xxx_chip *chip;
+ struct mii_bus *bus;
+ int err;
+
+ bus = dsa_host_dev_to_mii_bus(host_dev);
+ if (!bus)
+ return NULL;
- ps = devm_kzalloc(dsa_dev, sizeof(*ps), GFP_KERNEL);
- if (!ps)
+ chip = mv88e6xxx_alloc_chip(dsa_dev);
+ if (!chip)
return NULL;
- ps->bus = bus;
- ps->sw_addr = sw_addr;
- ps->info = info;
- ps->dev = dsa_dev;
- mutex_init(&ps->smi_mutex);
+ /* Legacy SMI probing will only support chips similar to 88E6085 */
+ chip->info = &mv88e6xxx_table[MV88E6085];
- err = mv88e6xxx_mdio_register(ps, NULL);
+ err = mv88e6xxx_smi_init(chip, bus, sw_addr);
if (err)
- return NULL;
+ goto free;
+
+ err = mv88e6xxx_detect(chip);
+ if (err)
+ goto free;
+
+ err = mv88e6xxx_mdio_register(chip, NULL);
+ if (err)
+ goto free;
- *priv = ps;
+ *priv = chip;
- dev_info(&ps->bus->dev, "switch 0x%x probed: %s, revision %u\n",
- prod_num, name, rev);
+ return chip->info->name;
+free:
+ devm_kfree(dsa_dev, chip);
- return name;
+ return NULL;
}
-struct dsa_switch_driver mv88e6xxx_switch_driver = {
+static struct dsa_switch_driver mv88e6xxx_switch_driver = {
.tag_protocol = DSA_TAG_PROTO_EDSA,
.probe = mv88e6xxx_drv_probe,
.setup = mv88e6xxx_setup,
@@ -3686,93 +3848,92 @@ struct dsa_switch_driver mv88e6xxx_switch_driver = {
.port_fdb_dump = mv88e6xxx_port_fdb_dump,
};
-int mv88e6xxx_probe(struct mdio_device *mdiodev)
+static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip,
+ struct device_node *np)
{
- struct device *dev = &mdiodev->dev;
- struct device_node *np = dev->of_node;
- struct mv88e6xxx_priv_state *ps;
- int id, prod_num, rev;
+ struct device *dev = chip->dev;
struct dsa_switch *ds;
- u32 eeprom_len;
- int err;
- ds = devm_kzalloc(dev, sizeof(*ds) + sizeof(*ps), GFP_KERNEL);
+ ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
if (!ds)
return -ENOMEM;
- ps = (struct mv88e6xxx_priv_state *)(ds + 1);
- ds->priv = ps;
ds->dev = dev;
- ps->dev = dev;
- ps->ds = ds;
- ps->bus = mdiodev->bus;
- ps->sw_addr = mdiodev->addr;
- mutex_init(&ps->smi_mutex);
+ ds->priv = chip;
+ ds->drv = &mv88e6xxx_switch_driver;
- get_device(&ps->bus->dev);
+ dev_set_drvdata(dev, ds);
- ds->drv = &mv88e6xxx_switch_driver;
+ return dsa_register_switch(ds, np);
+}
- id = mv88e6xxx_reg_read(ps, REG_PORT(0), PORT_SWITCH_ID);
- if (id < 0)
- return id;
+static void mv88e6xxx_unregister_switch(struct mv88e6xxx_chip *chip)
+{
+ dsa_unregister_switch(chip->ds);
+}
- prod_num = (id & 0xfff0) >> 4;
- rev = id & 0x000f;
+static int mv88e6xxx_probe(struct mdio_device *mdiodev)
+{
+ struct device *dev = &mdiodev->dev;
+ struct device_node *np = dev->of_node;
+ const struct mv88e6xxx_info *compat_info;
+ struct mv88e6xxx_chip *chip;
+ u32 eeprom_len;
+ int err;
- ps->info = mv88e6xxx_lookup_info(prod_num, mv88e6xxx_table,
- ARRAY_SIZE(mv88e6xxx_table));
- if (!ps->info)
- return -ENODEV;
+ compat_info = of_device_get_match_data(dev);
+ if (!compat_info)
+ return -EINVAL;
- ps->reset = devm_gpiod_get(&mdiodev->dev, "reset", GPIOD_ASIS);
- if (IS_ERR(ps->reset)) {
- err = PTR_ERR(ps->reset);
- if (err == -ENOENT) {
- /* Optional, so not an error */
- ps->reset = NULL;
- } else {
- return err;
- }
- }
+ chip = mv88e6xxx_alloc_chip(dev);
+ if (!chip)
+ return -ENOMEM;
- if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM) &&
- !of_property_read_u32(np, "eeprom-length", &eeprom_len))
- ps->eeprom_len = eeprom_len;
+ chip->info = compat_info;
- err = mv88e6xxx_mdio_register(ps, mdiodev->dev.of_node);
+ err = mv88e6xxx_smi_init(chip, mdiodev->bus, mdiodev->addr);
if (err)
return err;
- ds->slave_mii_bus = ps->mdio_bus;
+ err = mv88e6xxx_detect(chip);
+ if (err)
+ return err;
- dev_set_drvdata(dev, ds);
+ chip->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_ASIS);
+ if (IS_ERR(chip->reset))
+ return PTR_ERR(chip->reset);
- err = dsa_register_switch(ds, mdiodev->dev.of_node);
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEPROM) &&
+ !of_property_read_u32(np, "eeprom-length", &eeprom_len))
+ chip->eeprom_len = eeprom_len;
+
+ err = mv88e6xxx_mdio_register(chip, np);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_register_switch(chip, np);
if (err) {
- mv88e6xxx_mdio_unregister(ps);
+ mv88e6xxx_mdio_unregister(chip);
return err;
}
- dev_info(dev, "switch 0x%x probed: %s, revision %u\n",
- prod_num, ps->info->name, rev);
-
return 0;
}
static void mv88e6xxx_remove(struct mdio_device *mdiodev)
{
struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
- dsa_unregister_switch(ds);
- put_device(&ps->bus->dev);
-
- mv88e6xxx_mdio_unregister(ps);
+ mv88e6xxx_unregister_switch(chip);
+ mv88e6xxx_mdio_unregister(chip);
}
static const struct of_device_id mv88e6xxx_of_match[] = {
- { .compatible = "marvell,mv88e6085" },
+ {
+ .compatible = "marvell,mv88e6085",
+ .data = &mv88e6xxx_table[MV88E6085],
+ },
{ /* sentinel */ },
};
diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
index 8221c3c7ec5a..83f06620133d 100644
--- a/drivers/net/dsa/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
@@ -1,5 +1,6 @@
/*
- * net/dsa/mv88e6xxx.h - Marvell 88e6xxx switch chip support
+ * Marvell 88e6xxx common definitions
+ *
* Copyright (c) 2008 Marvell Semiconductor
*
* This program is free software; you can redistribute it and/or modify
@@ -387,6 +388,12 @@ enum mv88e6xxx_cap {
*/
MV88E6XXX_CAP_EEPROM,
+ /* Multi-chip Addressing Mode.
+ * Some chips require an indirect SMI access when their SMI device
+ * address is not zero. See SMI_CMD and SMI_DATA.
+ */
+ MV88E6XXX_CAP_MULTI_CHIP,
+
/* Port State Filtering for 802.1D Spanning Tree.
* See PORT_CONTROL_STATE_* values in the PORT_CONTROL register.
*/
@@ -439,6 +446,7 @@ enum mv88e6xxx_cap {
#define MV88E6XXX_FLAG_ATU BIT(MV88E6XXX_CAP_ATU)
#define MV88E6XXX_FLAG_EEE BIT(MV88E6XXX_CAP_EEE)
#define MV88E6XXX_FLAG_EEPROM BIT(MV88E6XXX_CAP_EEPROM)
+#define MV88E6XXX_FLAG_MULTI_CHIP BIT(MV88E6XXX_CAP_MULTI_CHIP)
#define MV88E6XXX_FLAG_PORTSTATE BIT(MV88E6XXX_CAP_PORTSTATE)
#define MV88E6XXX_FLAG_PPU BIT(MV88E6XXX_CAP_PPU)
#define MV88E6XXX_FLAG_PPU_ACTIVE BIT(MV88E6XXX_CAP_PPU_ACTIVE)
@@ -452,25 +460,29 @@ enum mv88e6xxx_cap {
#define MV88E6XXX_FLAGS_FAMILY_6095 \
(MV88E6XXX_FLAG_ATU | \
+ MV88E6XXX_FLAG_MULTI_CHIP | \
MV88E6XXX_FLAG_PPU | \
MV88E6XXX_FLAG_VLANTABLE | \
MV88E6XXX_FLAG_VTU)
#define MV88E6XXX_FLAGS_FAMILY_6097 \
(MV88E6XXX_FLAG_ATU | \
+ MV88E6XXX_FLAG_MULTI_CHIP | \
MV88E6XXX_FLAG_PPU | \
MV88E6XXX_FLAG_STU | \
MV88E6XXX_FLAG_VLANTABLE | \
MV88E6XXX_FLAG_VTU)
#define MV88E6XXX_FLAGS_FAMILY_6165 \
- (MV88E6XXX_FLAG_STU | \
+ (MV88E6XXX_FLAG_MULTI_CHIP | \
+ MV88E6XXX_FLAG_STU | \
MV88E6XXX_FLAG_SWITCH_MAC | \
MV88E6XXX_FLAG_TEMP | \
MV88E6XXX_FLAG_VTU)
#define MV88E6XXX_FLAGS_FAMILY_6185 \
(MV88E6XXX_FLAG_ATU | \
+ MV88E6XXX_FLAG_MULTI_CHIP | \
MV88E6XXX_FLAG_PPU | \
MV88E6XXX_FLAG_VLANTABLE | \
MV88E6XXX_FLAG_VTU)
@@ -479,6 +491,7 @@ enum mv88e6xxx_cap {
(MV88E6XXX_FLAG_ATU | \
MV88E6XXX_FLAG_EEE | \
MV88E6XXX_FLAG_EEPROM | \
+ MV88E6XXX_FLAG_MULTI_CHIP | \
MV88E6XXX_FLAG_PORTSTATE | \
MV88E6XXX_FLAG_PPU_ACTIVE | \
MV88E6XXX_FLAG_SMI_PHY | \
@@ -490,6 +503,7 @@ enum mv88e6xxx_cap {
#define MV88E6XXX_FLAGS_FAMILY_6351 \
(MV88E6XXX_FLAG_ATU | \
+ MV88E6XXX_FLAG_MULTI_CHIP | \
MV88E6XXX_FLAG_PORTSTATE | \
MV88E6XXX_FLAG_PPU_ACTIVE | \
MV88E6XXX_FLAG_SMI_PHY | \
@@ -503,6 +517,7 @@ enum mv88e6xxx_cap {
(MV88E6XXX_FLAG_ATU | \
MV88E6XXX_FLAG_EEE | \
MV88E6XXX_FLAG_EEPROM | \
+ MV88E6XXX_FLAG_MULTI_CHIP | \
MV88E6XXX_FLAG_PORTSTATE | \
MV88E6XXX_FLAG_PPU_ACTIVE | \
MV88E6XXX_FLAG_SMI_PHY | \
@@ -519,6 +534,7 @@ struct mv88e6xxx_info {
const char *name;
unsigned int num_databases;
unsigned int num_ports;
+ unsigned int port_base_addr;
unsigned long flags;
};
@@ -541,11 +557,13 @@ struct mv88e6xxx_vtu_stu_entry {
u8 data[DSA_MAX_PORTS];
};
+struct mv88e6xxx_ops;
+
struct mv88e6xxx_priv_port {
struct net_device *bridge_dev;
};
-struct mv88e6xxx_priv_state {
+struct mv88e6xxx_chip {
const struct mv88e6xxx_info *info;
/* The dsa_switch this private structure is related to */
@@ -554,15 +572,13 @@ struct mv88e6xxx_priv_state {
/* The device this structure is associated to */
struct device *dev;
- /* When using multi-chip addressing, this mutex protects
- * access to the indirect access registers. (In single-chip
- * mode, this mutex is effectively useless.)
- */
- struct mutex smi_mutex;
+ /* This mutex protects the access to the switch registers */
+ struct mutex reg_lock;
/* The MII bus and the address on the bus that is used to
* communication with the switch
*/
+ const struct mv88e6xxx_ops *smi_ops;
struct mii_bus *bus;
int sw_addr;
@@ -608,6 +624,11 @@ struct mv88e6xxx_priv_state {
struct mii_bus *mdio_bus;
};
+struct mv88e6xxx_ops {
+ int (*read)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val);
+ int (*write)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val);
+};
+
enum stat_type {
BANK0,
BANK1,
@@ -621,10 +642,10 @@ struct mv88e6xxx_hw_stat {
enum stat_type type;
};
-static inline bool mv88e6xxx_has(struct mv88e6xxx_priv_state *ps,
+static inline bool mv88e6xxx_has(struct mv88e6xxx_chip *chip,
unsigned long flags)
{
- return (ps->info->flags & flags) == flags;
+ return (chip->info->flags & flags) == flags;
}
#endif
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index c89b9aeeceb6..5698f5354c0b 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -84,7 +84,6 @@ static u32 ax_msg_enable;
struct ax_device {
struct mii_bus *mii_bus;
struct mdiobb_ctrl bb_ctrl;
- struct phy_device *phy_dev;
void __iomem *addr_memr;
u8 reg_memr;
int link;
@@ -320,7 +319,7 @@ static void ax_block_output(struct net_device *dev, int count,
static void ax_handle_link_change(struct net_device *dev)
{
struct ax_device *ax = to_ax_dev(dev);
- struct phy_device *phy_dev = ax->phy_dev;
+ struct phy_device *phy_dev = dev->phydev;
int status_change = 0;
if (phy_dev->link && ((ax->speed != phy_dev->speed) ||
@@ -369,8 +368,6 @@ static int ax_mii_probe(struct net_device *dev)
phy_dev->supported &= PHY_BASIC_FEATURES;
phy_dev->advertising = phy_dev->supported;
- ax->phy_dev = phy_dev;
-
netdev_info(dev, "PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
phy_dev->drv->name, phydev_name(phy_dev), phy_dev->irq);
@@ -410,7 +407,7 @@ static int ax_open(struct net_device *dev)
ret = ax_mii_probe(dev);
if (ret)
goto failed_mii_probe;
- phy_start(ax->phy_dev);
+ phy_start(dev->phydev);
ret = ax_ei_open(dev);
if (ret)
@@ -421,7 +418,7 @@ static int ax_open(struct net_device *dev)
return 0;
failed_ax_ei_open:
- phy_disconnect(ax->phy_dev);
+ phy_disconnect(dev->phydev);
failed_mii_probe:
ax_phy_switch(dev, 0);
free_irq(dev->irq, dev);
@@ -442,7 +439,7 @@ static int ax_close(struct net_device *dev)
/* turn the phy off */
ax_phy_switch(dev, 0);
- phy_disconnect(ax->phy_dev);
+ phy_disconnect(dev->phydev);
free_irq(dev->irq, dev);
return 0;
@@ -450,8 +447,7 @@ static int ax_close(struct net_device *dev)
static int ax_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{
- struct ax_device *ax = to_ax_dev(dev);
- struct phy_device *phy_dev = ax->phy_dev;
+ struct phy_device *phy_dev = dev->phydev;
if (!netif_running(dev))
return -EINVAL;
@@ -474,28 +470,6 @@ static void ax_get_drvinfo(struct net_device *dev,
strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
}
-static int ax_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct ax_device *ax = to_ax_dev(dev);
- struct phy_device *phy_dev = ax->phy_dev;
-
- if (!phy_dev)
- return -ENODEV;
-
- return phy_ethtool_gset(phy_dev, cmd);
-}
-
-static int ax_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct ax_device *ax = to_ax_dev(dev);
- struct phy_device *phy_dev = ax->phy_dev;
-
- if (!phy_dev)
- return -ENODEV;
-
- return phy_ethtool_sset(phy_dev, cmd);
-}
-
static u32 ax_get_msglevel(struct net_device *dev)
{
struct ei_device *ei_local = netdev_priv(dev);
@@ -512,12 +486,12 @@ static void ax_set_msglevel(struct net_device *dev, u32 v)
static const struct ethtool_ops ax_ethtool_ops = {
.get_drvinfo = ax_get_drvinfo,
- .get_settings = ax_get_settings,
- .set_settings = ax_set_settings,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
.get_msglevel = ax_get_msglevel,
.set_msglevel = ax_set_msglevel,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
#ifdef CONFIG_AX88796_93CX6
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 30defe6c81f2..cd7e2e5f496b 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -440,7 +440,6 @@ struct et131x_adapter {
struct net_device *netdev;
struct pci_dev *pdev;
struct mii_bus *mii_bus;
- struct phy_device *phydev;
struct napi_struct napi;
/* Flags that indicate current state of the adapter */
@@ -864,7 +863,7 @@ static void et1310_config_mac_regs2(struct et131x_adapter *adapter)
{
int32_t delay = 0;
struct mac_regs __iomem *mac = &adapter->regs->mac;
- struct phy_device *phydev = adapter->phydev;
+ struct phy_device *phydev = adapter->netdev->phydev;
u32 cfg1;
u32 cfg2;
u32 ifctrl;
@@ -1035,7 +1034,7 @@ static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
static void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
{
struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
- struct phy_device *phydev = adapter->phydev;
+ struct phy_device *phydev = adapter->netdev->phydev;
u32 sa_lo;
u32 sa_hi = 0;
u32 pf_ctrl = 0;
@@ -1230,7 +1229,7 @@ out:
static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
{
- struct phy_device *phydev = adapter->phydev;
+ struct phy_device *phydev = adapter->netdev->phydev;
if (!phydev)
return -EIO;
@@ -1311,7 +1310,7 @@ static void et1310_phy_read_mii_bit(struct et131x_adapter *adapter,
static void et1310_config_flow_control(struct et131x_adapter *adapter)
{
- struct phy_device *phydev = adapter->phydev;
+ struct phy_device *phydev = adapter->netdev->phydev;
if (phydev->duplex == DUPLEX_HALF) {
adapter->flow = FLOW_NONE;
@@ -1456,7 +1455,7 @@ static int et131x_mdio_write(struct mii_bus *bus, int phy_addr,
static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down)
{
u16 data;
- struct phy_device *phydev = adapter->phydev;
+ struct phy_device *phydev = adapter->netdev->phydev;
et131x_mii_read(adapter, MII_BMCR, &data);
data &= ~BMCR_PDOWN;
@@ -1469,7 +1468,7 @@ static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down)
static void et131x_xcvr_init(struct et131x_adapter *adapter)
{
u16 lcr2;
- struct phy_device *phydev = adapter->phydev;
+ struct phy_device *phydev = adapter->netdev->phydev;
/* Set the LED behavior such that LED 1 indicates speed (off =
* 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates
@@ -2111,7 +2110,7 @@ static int et131x_init_recv(struct et131x_adapter *adapter)
/* et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate */
static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
{
- struct phy_device *phydev = adapter->phydev;
+ struct phy_device *phydev = adapter->netdev->phydev;
/* For version B silicon, we do not use the RxDMA timer for 10 and 100
* Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
@@ -2426,7 +2425,7 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
struct sk_buff *skb = tcb->skb;
u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
- struct phy_device *phydev = adapter->phydev;
+ struct phy_device *phydev = adapter->netdev->phydev;
dma_addr_t dma_addr;
struct tx_ring *tx_ring = &adapter->tx_ring;
@@ -2791,22 +2790,6 @@ static void et131x_handle_send_pkts(struct et131x_adapter *adapter)
spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
}
-static int et131x_get_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
-{
- struct et131x_adapter *adapter = netdev_priv(netdev);
-
- return phy_ethtool_gset(adapter->phydev, cmd);
-}
-
-static int et131x_set_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
-{
- struct et131x_adapter *adapter = netdev_priv(netdev);
-
- return phy_ethtool_sset(adapter->phydev, cmd);
-}
-
static int et131x_get_regs_len(struct net_device *netdev)
{
#define ET131X_REGS_LEN 256
@@ -2979,12 +2962,12 @@ static void et131x_get_drvinfo(struct net_device *netdev,
}
static struct ethtool_ops et131x_ethtool_ops = {
- .get_settings = et131x_get_settings,
- .set_settings = et131x_set_settings,
.get_drvinfo = et131x_get_drvinfo,
.get_regs_len = et131x_get_regs_len,
.get_regs = et131x_get_regs,
.get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
/* et131x_hwaddr_init - set up the MAC Address */
@@ -3098,7 +3081,7 @@ err_out:
static void et131x_error_timer_handler(unsigned long data)
{
struct et131x_adapter *adapter = (struct et131x_adapter *)data;
- struct phy_device *phydev = adapter->phydev;
+ struct phy_device *phydev = adapter->netdev->phydev;
if (et1310_in_phy_coma(adapter)) {
/* Bring the device immediately out of coma, to
@@ -3168,7 +3151,7 @@ static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
static void et131x_adjust_link(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
- struct phy_device *phydev = adapter->phydev;
+ struct phy_device *phydev = netdev->phydev;
if (!phydev)
return;
@@ -3287,7 +3270,6 @@ static int et131x_mii_probe(struct net_device *netdev)
phydev->advertising = phydev->supported;
phydev->autoneg = AUTONEG_ENABLE;
- adapter->phydev = phydev;
phy_attached_info(phydev);
@@ -3323,7 +3305,7 @@ static void et131x_pci_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
netif_napi_del(&adapter->napi);
- phy_disconnect(adapter->phydev);
+ phy_disconnect(netdev->phydev);
mdiobus_unregister(adapter->mii_bus);
mdiobus_free(adapter->mii_bus);
@@ -3338,20 +3320,16 @@ static void et131x_pci_remove(struct pci_dev *pdev)
static void et131x_up(struct net_device *netdev)
{
- struct et131x_adapter *adapter = netdev_priv(netdev);
-
et131x_enable_txrx(netdev);
- phy_start(adapter->phydev);
+ phy_start(netdev->phydev);
}
static void et131x_down(struct net_device *netdev)
{
- struct et131x_adapter *adapter = netdev_priv(netdev);
-
/* Save the timestamp for the TX watchdog, prevent a timeout */
netif_trans_update(netdev);
- phy_stop(adapter->phydev);
+ phy_stop(netdev->phydev);
et131x_disable_txrx(netdev);
}
@@ -3684,12 +3662,10 @@ static int et131x_close(struct net_device *netdev)
static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf,
int cmd)
{
- struct et131x_adapter *adapter = netdev_priv(netdev);
-
- if (!adapter->phydev)
+ if (!netdev->phydev)
return -EINVAL;
- return phy_mii_ioctl(adapter->phydev, reqbuf, cmd);
+ return phy_mii_ioctl(netdev->phydev, reqbuf, cmd);
}
/* et131x_set_packet_filter - Configures the Rx Packet filtering */
@@ -4073,7 +4049,7 @@ out:
return rc;
err_phy_disconnect:
- phy_disconnect(adapter->phydev);
+ phy_disconnect(netdev->phydev);
err_mdio_unregister:
mdiobus_unregister(adapter->mii_bus);
err_mdio_free:
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index de2c4bf5fac4..6ffdff68bfc4 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -77,7 +77,6 @@ struct emac_board_info {
int emacrx_completed_flag;
- struct phy_device *phy_dev;
struct device_node *phy_node;
unsigned int link;
unsigned int speed;
@@ -115,7 +114,7 @@ static void emac_update_duplex(struct net_device *dev)
static void emac_handle_link_change(struct net_device *dev)
{
struct emac_board_info *db = netdev_priv(dev);
- struct phy_device *phydev = db->phy_dev;
+ struct phy_device *phydev = dev->phydev;
unsigned long flags;
int status_change = 0;
@@ -154,21 +153,22 @@ static void emac_handle_link_change(struct net_device *dev)
static int emac_mdio_probe(struct net_device *dev)
{
struct emac_board_info *db = netdev_priv(dev);
+ struct phy_device *phydev;
/* to-do: PHY interrupts are currently not supported */
/* attach the mac to the phy */
- db->phy_dev = of_phy_connect(db->ndev, db->phy_node,
- &emac_handle_link_change, 0,
- db->phy_interface);
- if (!db->phy_dev) {
+ phydev = of_phy_connect(db->ndev, db->phy_node,
+ &emac_handle_link_change, 0,
+ db->phy_interface);
+ if (!phydev) {
netdev_err(db->ndev, "could not find the PHY\n");
return -ENODEV;
}
/* mask with MAC supported features */
- db->phy_dev->supported &= PHY_BASIC_FEATURES;
- db->phy_dev->advertising = db->phy_dev->supported;
+ phydev->supported &= PHY_BASIC_FEATURES;
+ phydev->advertising = phydev->supported;
db->link = 0;
db->speed = 0;
@@ -179,10 +179,7 @@ static int emac_mdio_probe(struct net_device *dev)
static void emac_mdio_remove(struct net_device *dev)
{
- struct emac_board_info *db = netdev_priv(dev);
-
- phy_disconnect(db->phy_dev);
- db->phy_dev = NULL;
+ phy_disconnect(dev->phydev);
}
static void emac_reset(struct emac_board_info *db)
@@ -208,8 +205,7 @@ static void emac_inblk_32bit(void __iomem *reg, void *data, int count)
static int emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct emac_board_info *dm = netdev_priv(dev);
- struct phy_device *phydev = dm->phy_dev;
+ struct phy_device *phydev = dev->phydev;
if (!netif_running(dev))
return -EINVAL;
@@ -229,33 +225,11 @@ static void emac_get_drvinfo(struct net_device *dev,
strlcpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info));
}
-static int emac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct emac_board_info *dm = netdev_priv(dev);
- struct phy_device *phydev = dm->phy_dev;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_ethtool_gset(phydev, cmd);
-}
-
-static int emac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct emac_board_info *dm = netdev_priv(dev);
- struct phy_device *phydev = dm->phy_dev;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_ethtool_sset(phydev, cmd);
-}
-
static const struct ethtool_ops emac_ethtool_ops = {
.get_drvinfo = emac_get_drvinfo,
- .get_settings = emac_get_settings,
- .set_settings = emac_set_settings,
.get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static unsigned int emac_setup(struct net_device *ndev)
@@ -744,7 +718,7 @@ static int emac_open(struct net_device *dev)
return ret;
}
- phy_start(db->phy_dev);
+ phy_start(dev->phydev);
netif_start_queue(dev);
return 0;
@@ -781,7 +755,7 @@ static int emac_stop(struct net_device *ndev)
netif_stop_queue(ndev);
netif_carrier_off(ndev);
- phy_stop(db->phy_dev);
+ phy_stop(ndev->phydev);
emac_mdio_remove(ndev);
diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h
index 103c30ddddf7..e0052003d16f 100644
--- a/drivers/net/ethernet/altera/altera_tse.h
+++ b/drivers/net/ethernet/altera/altera_tse.h
@@ -473,7 +473,6 @@ struct altera_tse_private {
int phy_addr; /* PHY's MDIO address, -1 for autodetection */
phy_interface_t phy_iface;
struct mii_bus *mdio;
- struct phy_device *phydev;
int oldspeed;
int oldduplex;
int oldlink;
diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c
index be72e1e64525..7c367713c3e6 100644
--- a/drivers/net/ethernet/altera/altera_tse_ethtool.c
+++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c
@@ -233,40 +233,18 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
buf[i] = csrrd32(priv->mac_dev, i * 4);
}
-static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct altera_tse_private *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
-
- if (phydev == NULL)
- return -ENODEV;
-
- return phy_ethtool_gset(phydev, cmd);
-}
-
-static int tse_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct altera_tse_private *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
-
- if (phydev == NULL)
- return -ENODEV;
-
- return phy_ethtool_sset(phydev, cmd);
-}
-
static const struct ethtool_ops tse_ethtool_ops = {
.get_drvinfo = tse_get_drvinfo,
.get_regs_len = tse_reglen,
.get_regs = tse_get_regs,
.get_link = ethtool_op_get_link,
- .get_settings = tse_get_settings,
- .set_settings = tse_set_settings,
.get_strings = tse_gstrings,
.get_sset_count = tse_sset_count,
.get_ethtool_stats = tse_fill_stats,
.get_msglevel = tse_get_msglevel,
.set_msglevel = tse_set_msglevel,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
void altera_tse_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index f749e4d389eb..49025e99fb0e 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -625,7 +625,7 @@ out:
static void altera_tse_adjust_link(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = dev->phydev;
int new_state = 0;
/* only change config if there is a link */
@@ -845,7 +845,6 @@ static int init_phy(struct net_device *dev)
netdev_dbg(dev, "attached to PHY %d UID 0x%08x Link = %d\n",
phydev->mdio.addr, phydev->phy_id, phydev->link);
- priv->phydev = phydev;
return 0;
}
@@ -1172,8 +1171,8 @@ static int tse_open(struct net_device *dev)
spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
- if (priv->phydev)
- phy_start(priv->phydev);
+ if (dev->phydev)
+ phy_start(dev->phydev);
napi_enable(&priv->napi);
netif_start_queue(dev);
@@ -1205,8 +1204,8 @@ static int tse_shutdown(struct net_device *dev)
unsigned long int flags;
/* Stop the PHY */
- if (priv->phydev)
- phy_stop(priv->phydev);
+ if (dev->phydev)
+ phy_stop(dev->phydev);
netif_stop_queue(dev);
napi_disable(&priv->napi);
@@ -1545,10 +1544,9 @@ err_free_netdev:
static int altera_tse_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
- struct altera_tse_private *priv = netdev_priv(ndev);
- if (priv->phydev)
- phy_disconnect(priv->phydev);
+ if (ndev->phydev)
+ phy_disconnect(ndev->phydev);
platform_set_drvdata(pdev, NULL);
altera_tse_mdio_destroy(ndev);
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index e0fb0f1122db..20760e10211a 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -509,8 +509,8 @@ static int au1000_mii_probe(struct net_device *dev)
* on the current MAC's MII bus
*/
for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
- if (mdiobus_get_phy(aup->mii_bus, aup->phy_addr)) {
- phydev = mdiobus_get_phy(aup->mii_bus, aup->phy_addr);
+ if (mdiobus_get_phy(aup->mii_bus, phy_addr)) {
+ phydev = mdiobus_get_phy(aup->mii_bus, phy_addr);
if (!aup->phy_search_highest_addr)
/* break out with first one found */
break;
diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h
index ca562bc034c3..e4feb712d4f2 100644
--- a/drivers/net/ethernet/arc/emac.h
+++ b/drivers/net/ethernet/arc/emac.h
@@ -134,7 +134,6 @@ struct arc_emac_priv {
/* Devices */
struct device *dev;
- struct phy_device *phy_dev;
struct mii_bus *bus;
struct arc_emac_mdio_bus_data bus_data;
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index a3a9392a4954..586bedac457d 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -47,7 +47,7 @@ static inline int arc_emac_tx_avail(struct arc_emac_priv *priv)
static void arc_emac_adjust_link(struct net_device *ndev)
{
struct arc_emac_priv *priv = netdev_priv(ndev);
- struct phy_device *phy_dev = priv->phy_dev;
+ struct phy_device *phy_dev = ndev->phydev;
unsigned int reg, state_changed = 0;
if (priv->link != phy_dev->link) {
@@ -80,46 +80,6 @@ static void arc_emac_adjust_link(struct net_device *ndev)
}
/**
- * arc_emac_get_settings - Get PHY settings.
- * @ndev: Pointer to net_device structure.
- * @cmd: Pointer to ethtool_cmd structure.
- *
- * This implements ethtool command for getting PHY settings. If PHY could
- * not be found, the function returns -ENODEV. This function calls the
- * relevant PHY ethtool API to get the PHY settings.
- * Issue "ethtool ethX" under linux prompt to execute this function.
- */
-static int arc_emac_get_settings(struct net_device *ndev,
- struct ethtool_cmd *cmd)
-{
- struct arc_emac_priv *priv = netdev_priv(ndev);
-
- return phy_ethtool_gset(priv->phy_dev, cmd);
-}
-
-/**
- * arc_emac_set_settings - Set PHY settings as passed in the argument.
- * @ndev: Pointer to net_device structure.
- * @cmd: Pointer to ethtool_cmd structure.
- *
- * This implements ethtool command for setting various PHY settings. If PHY
- * could not be found, the function returns -ENODEV. This function calls the
- * relevant PHY ethtool API to set the PHY.
- * Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this
- * function.
- */
-static int arc_emac_set_settings(struct net_device *ndev,
- struct ethtool_cmd *cmd)
-{
- struct arc_emac_priv *priv = netdev_priv(ndev);
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- return phy_ethtool_sset(priv->phy_dev, cmd);
-}
-
-/**
* arc_emac_get_drvinfo - Get EMAC driver information.
* @ndev: Pointer to net_device structure.
* @info: Pointer to ethtool_drvinfo structure.
@@ -137,10 +97,10 @@ static void arc_emac_get_drvinfo(struct net_device *ndev,
}
static const struct ethtool_ops arc_emac_ethtool_ops = {
- .get_settings = arc_emac_get_settings,
- .set_settings = arc_emac_set_settings,
.get_drvinfo = arc_emac_get_drvinfo,
.get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
#define FIRST_OR_LAST_MASK (FIRST_MASK | LAST_MASK)
@@ -403,7 +363,7 @@ static void arc_emac_poll_controller(struct net_device *dev)
static int arc_emac_open(struct net_device *ndev)
{
struct arc_emac_priv *priv = netdev_priv(ndev);
- struct phy_device *phy_dev = priv->phy_dev;
+ struct phy_device *phy_dev = ndev->phydev;
int i;
phy_dev->autoneg = AUTONEG_ENABLE;
@@ -474,7 +434,7 @@ static int arc_emac_open(struct net_device *ndev)
/* Enable EMAC */
arc_reg_or(priv, R_CTRL, EN_MASK);
- phy_start_aneg(priv->phy_dev);
+ phy_start_aneg(ndev->phydev);
netif_start_queue(ndev);
@@ -772,6 +732,7 @@ int arc_emac_probe(struct net_device *ndev, int interface)
struct device *dev = ndev->dev.parent;
struct resource res_regs;
struct device_node *phy_node;
+ struct phy_device *phydev = NULL;
struct arc_emac_priv *priv;
const char *mac_addr;
unsigned int id, clock_frequency, irq;
@@ -887,16 +848,16 @@ int arc_emac_probe(struct net_device *ndev, int interface)
goto out_clken;
}
- priv->phy_dev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0,
- interface);
- if (!priv->phy_dev) {
+ phydev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0,
+ interface);
+ if (!phydev) {
dev_err(dev, "of_phy_connect() failed\n");
err = -ENODEV;
goto out_mdio;
}
dev_info(dev, "connected to %s phy with id 0x%x\n",
- priv->phy_dev->drv->name, priv->phy_dev->phy_id);
+ phydev->drv->name, phydev->phy_id);
netif_napi_add(ndev, &priv->napi, arc_emac_poll, ARC_EMAC_NAPI_WEIGHT);
@@ -910,8 +871,7 @@ int arc_emac_probe(struct net_device *ndev, int interface)
out_netif_api:
netif_napi_del(&priv->napi);
- phy_disconnect(priv->phy_dev);
- priv->phy_dev = NULL;
+ phy_disconnect(phydev);
out_mdio:
arc_mdio_remove(priv);
out_clken:
@@ -925,8 +885,7 @@ int arc_emac_remove(struct net_device *ndev)
{
struct arc_emac_priv *priv = netdev_priv(ndev);
- phy_disconnect(priv->phy_dev);
- priv->phy_dev = NULL;
+ phy_disconnect(ndev->phydev);
arc_mdio_remove(priv);
unregister_netdev(ndev);
netif_napi_del(&priv->napi);
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h
index d02c4240b7df..8fc93c5f6abc 100644
--- a/drivers/net/ethernet/atheros/alx/alx.h
+++ b/drivers/net/ethernet/atheros/alx/alx.h
@@ -96,10 +96,6 @@ struct alx_priv {
unsigned int rx_ringsz;
unsigned int rxbuf_size;
- struct page *rx_page;
- unsigned int rx_page_offset;
- unsigned int rx_frag_size;
-
struct napi_struct napi;
struct alx_tx_queue txq;
struct alx_rx_queue rxq;
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index c98acdc0d14f..e708e360a9e3 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -70,35 +70,6 @@ static void alx_free_txbuf(struct alx_priv *alx, int entry)
}
}
-static struct sk_buff *alx_alloc_skb(struct alx_priv *alx, gfp_t gfp)
-{
- struct sk_buff *skb;
- struct page *page;
-
- if (alx->rx_frag_size > PAGE_SIZE)
- return __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
-
- page = alx->rx_page;
- if (!page) {
- alx->rx_page = page = alloc_page(gfp);
- if (unlikely(!page))
- return NULL;
- alx->rx_page_offset = 0;
- }
-
- skb = build_skb(page_address(page) + alx->rx_page_offset,
- alx->rx_frag_size);
- if (likely(skb)) {
- alx->rx_page_offset += alx->rx_frag_size;
- if (alx->rx_page_offset >= PAGE_SIZE)
- alx->rx_page = NULL;
- else
- get_page(page);
- }
- return skb;
-}
-
-
static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
{
struct alx_rx_queue *rxq = &alx->rxq;
@@ -115,9 +86,22 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
while (!cur_buf->skb && next != rxq->read_idx) {
struct alx_rfd *rfd = &rxq->rfd[cur];
- skb = alx_alloc_skb(alx, gfp);
+ /*
+ * When DMA RX address is set to something like
+ * 0x....fc0, it will be very likely to cause DMA
+ * RFD overflow issue.
+ *
+ * To work around it, we apply rx skb with 64 bytes
+ * longer space, and offset the address whenever
+ * 0x....fc0 is detected.
+ */
+ skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size + 64, gfp);
if (!skb)
break;
+
+ if (((unsigned long)skb->data & 0xfff) == 0xfc0)
+ skb_reserve(skb, 64);
+
dma = dma_map_single(&alx->hw.pdev->dev,
skb->data, alx->rxbuf_size,
DMA_FROM_DEVICE);
@@ -153,7 +137,6 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur);
}
-
return count;
}
@@ -622,11 +605,6 @@ static void alx_free_rings(struct alx_priv *alx)
kfree(alx->txq.bufs);
kfree(alx->rxq.bufs);
- if (alx->rx_page) {
- put_page(alx->rx_page);
- alx->rx_page = NULL;
- }
-
dma_free_coherent(&alx->hw.pdev->dev,
alx->descmem.size,
alx->descmem.virt,
@@ -681,7 +659,6 @@ static int alx_request_irq(struct alx_priv *alx)
alx->dev->name, alx);
if (!err)
goto out;
-
/* fall back to legacy interrupt */
pci_disable_msi(alx->hw.pdev);
}
@@ -725,7 +702,6 @@ static int alx_init_sw(struct alx_priv *alx)
struct pci_dev *pdev = alx->hw.pdev;
struct alx_hw *hw = &alx->hw;
int err;
- unsigned int head_size;
err = alx_identify_hw(alx);
if (err) {
@@ -741,12 +717,7 @@ static int alx_init_sw(struct alx_priv *alx)
hw->smb_timer = 400;
hw->mtu = alx->dev->mtu;
-
alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu);
- head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- alx->rx_frag_size = roundup_pow_of_two(head_size);
-
alx->tx_ringsz = 256;
alx->rx_ringsz = 512;
hw->imt = 200;
@@ -848,7 +819,6 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
{
struct alx_priv *alx = netdev_priv(netdev);
int max_frame = ALX_MAX_FRAME_LEN(mtu);
- unsigned int head_size;
if ((max_frame < ALX_MIN_FRAME_SIZE) ||
(max_frame > ALX_MAX_FRAME_SIZE))
@@ -860,9 +830,6 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
netdev->mtu = mtu;
alx->hw.mtu = mtu;
alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE);
- head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- alx->rx_frag_size = roundup_pow_of_two(head_size);
netdev_update_features(netdev);
if (netif_running(netdev))
alx_reinit(alx);
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index 08a23e6b60e9..dc2c35dce216 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -631,7 +631,7 @@ static void nb8800_mac_config(struct net_device *dev)
static void nb8800_pause_config(struct net_device *dev)
{
struct nb8800_priv *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = dev->phydev;
u32 rxcr;
if (priv->pause_aneg) {
@@ -664,7 +664,7 @@ static void nb8800_pause_config(struct net_device *dev)
static void nb8800_link_reconfigure(struct net_device *dev)
{
struct nb8800_priv *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = dev->phydev;
int change = 0;
if (phydev->link) {
@@ -690,7 +690,7 @@ static void nb8800_link_reconfigure(struct net_device *dev)
}
if (change)
- phy_print_status(priv->phydev);
+ phy_print_status(phydev);
}
static void nb8800_update_mac_addr(struct net_device *dev)
@@ -935,9 +935,10 @@ static int nb8800_dma_stop(struct net_device *dev)
static void nb8800_pause_adv(struct net_device *dev)
{
struct nb8800_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
u32 adv = 0;
- if (!priv->phydev)
+ if (!phydev)
return;
if (priv->pause_rx)
@@ -945,13 +946,14 @@ static void nb8800_pause_adv(struct net_device *dev)
if (priv->pause_tx)
adv ^= ADVERTISED_Asym_Pause;
- priv->phydev->supported |= adv;
- priv->phydev->advertising |= adv;
+ phydev->supported |= adv;
+ phydev->advertising |= adv;
}
static int nb8800_open(struct net_device *dev)
{
struct nb8800_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev;
int err;
/* clear any pending interrupts */
@@ -969,10 +971,10 @@ static int nb8800_open(struct net_device *dev)
nb8800_mac_rx(dev, true);
nb8800_mac_tx(dev, true);
- priv->phydev = of_phy_connect(dev, priv->phy_node,
- nb8800_link_reconfigure, 0,
- priv->phy_mode);
- if (!priv->phydev)
+ phydev = of_phy_connect(dev, priv->phy_node,
+ nb8800_link_reconfigure, 0,
+ priv->phy_mode);
+ if (!phydev)
goto err_free_irq;
nb8800_pause_adv(dev);
@@ -982,7 +984,7 @@ static int nb8800_open(struct net_device *dev)
netif_start_queue(dev);
nb8800_start_rx(dev);
- phy_start(priv->phydev);
+ phy_start(phydev);
return 0;
@@ -997,8 +999,9 @@ err_free_dma:
static int nb8800_stop(struct net_device *dev)
{
struct nb8800_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
- phy_stop(priv->phydev);
+ phy_stop(phydev);
netif_stop_queue(dev);
napi_disable(&priv->napi);
@@ -1007,8 +1010,7 @@ static int nb8800_stop(struct net_device *dev)
nb8800_mac_rx(dev, false);
nb8800_mac_tx(dev, false);
- phy_disconnect(priv->phydev);
- priv->phydev = NULL;
+ phy_disconnect(phydev);
free_irq(dev->irq, dev);
@@ -1019,9 +1021,7 @@ static int nb8800_stop(struct net_device *dev)
static int nb8800_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct nb8800_priv *priv = netdev_priv(dev);
-
- return phy_mii_ioctl(priv->phydev, rq, cmd);
+ return phy_mii_ioctl(dev->phydev, rq, cmd);
}
static const struct net_device_ops nb8800_netdev_ops = {
@@ -1035,34 +1035,14 @@ static const struct net_device_ops nb8800_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static int nb8800_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
-
- if (!priv->phydev)
- return -ENODEV;
-
- return phy_ethtool_gset(priv->phydev, cmd);
-}
-
-static int nb8800_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
-
- if (!priv->phydev)
- return -ENODEV;
-
- return phy_ethtool_sset(priv->phydev, cmd);
-}
-
static int nb8800_nway_reset(struct net_device *dev)
{
- struct nb8800_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
- if (!priv->phydev)
+ if (!phydev)
return -ENODEV;
- return genphy_restart_aneg(priv->phydev);
+ return genphy_restart_aneg(phydev);
}
static void nb8800_get_pauseparam(struct net_device *dev,
@@ -1079,6 +1059,7 @@ static int nb8800_set_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *pp)
{
struct nb8800_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
priv->pause_aneg = pp->autoneg;
priv->pause_rx = pp->rx_pause;
@@ -1088,8 +1069,8 @@ static int nb8800_set_pauseparam(struct net_device *dev,
if (!priv->pause_aneg)
nb8800_pause_config(dev);
- else if (priv->phydev)
- phy_start_aneg(priv->phydev);
+ else if (phydev)
+ phy_start_aneg(phydev);
return 0;
}
@@ -1182,8 +1163,6 @@ static void nb8800_get_ethtool_stats(struct net_device *dev,
}
static const struct ethtool_ops nb8800_ethtool_ops = {
- .get_settings = nb8800_get_settings,
- .set_settings = nb8800_set_settings,
.nway_reset = nb8800_nway_reset,
.get_link = ethtool_op_get_link,
.get_pauseparam = nb8800_get_pauseparam,
@@ -1191,6 +1170,8 @@ static const struct ethtool_ops nb8800_ethtool_ops = {
.get_sset_count = nb8800_get_sset_count,
.get_strings = nb8800_get_strings,
.get_ethtool_stats = nb8800_get_ethtool_stats,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static int nb8800_hw_init(struct net_device *dev)
diff --git a/drivers/net/ethernet/aurora/nb8800.h b/drivers/net/ethernet/aurora/nb8800.h
index e5adbc2aac9f..6ec4a956e1e5 100644
--- a/drivers/net/ethernet/aurora/nb8800.h
+++ b/drivers/net/ethernet/aurora/nb8800.h
@@ -284,7 +284,6 @@ struct nb8800_priv {
struct mii_bus *mii_bus;
struct device_node *phy_node;
- struct phy_device *phydev;
/* PHY connection type from DT */
int phy_mode;
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 18042c2460bd..d74a92e1c27d 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -139,26 +139,6 @@ config BNX2X_SRIOV
Virtualization support in the 578xx and 57712 products. This
allows for virtual function acceleration in virtual environments.
-config BNX2X_VXLAN
- bool "Virtual eXtensible Local Area Network support"
- default n
- depends on BNX2X && VXLAN && !(BNX2X=y && VXLAN=m)
- ---help---
- This enables hardward offload support for VXLAN protocol over the
- NetXtremeII series adapters.
- Say Y here if you want to enable hardware offload support for
- Virtual eXtensible Local Area Network (VXLAN) in the driver.
-
-config BNX2X_GENEVE
- bool "Generic Network Virtualization Encapsulation (GENEVE) support"
- depends on BNX2X && GENEVE && !(BNX2X=y && GENEVE=m)
- ---help---
- This allows one to create GENEVE virtual interfaces that provide
- Layer 2 Networks over Layer 3 Networks. GENEVE is often used
- to tunnel virtual network infrastructure in virtualized environments.
- Say Y here if you want to enable hardware offload support for
- Generic Network Virtualization Encapsulation (GENEVE) in the driver.
-
config BGMAC
tristate "BCMA bus GBit core support"
depends on BCMA && BCMA_HOST_SOC
@@ -186,7 +166,6 @@ config SYSTEMPORT
config BNXT
tristate "Broadcom NetXtreme-C/E support"
depends on PCI
- depends on VXLAN || VXLAN=n
select FW_LOADER
select LIBCRC32C
---help---
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 543bf38105c9..834afbb51aff 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -96,28 +96,6 @@ static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
}
/* Ethtool operations */
-static int bcm_sysport_set_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
-{
- struct bcm_sysport_priv *priv = netdev_priv(dev);
-
- if (!netif_running(dev))
- return -EINVAL;
-
- return phy_ethtool_sset(priv->phydev, cmd);
-}
-
-static int bcm_sysport_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
-{
- struct bcm_sysport_priv *priv = netdev_priv(dev);
-
- if (!netif_running(dev))
- return -EINVAL;
-
- return phy_ethtool_gset(priv->phydev, cmd);
-}
-
static int bcm_sysport_set_rx_csum(struct net_device *dev,
netdev_features_t wanted)
{
@@ -1127,7 +1105,7 @@ static void bcm_sysport_tx_timeout(struct net_device *dev)
static void bcm_sysport_adj_link(struct net_device *dev)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = dev->phydev;
unsigned int changed = 0;
u32 cmd_bits = 0, reg;
@@ -1182,7 +1160,7 @@ static void bcm_sysport_adj_link(struct net_device *dev)
umac_writel(priv, reg, UMAC_CMD);
}
- phy_print_status(priv->phydev);
+ phy_print_status(phydev);
}
static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
@@ -1525,7 +1503,7 @@ static void bcm_sysport_netif_start(struct net_device *dev)
/* Enable RX interrupt and TX ring full interrupt */
intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
- phy_start(priv->phydev);
+ phy_start(dev->phydev);
/* Enable TX interrupts for the 32 TXQs */
intrl2_1_mask_clear(priv, 0xffffffff);
@@ -1546,6 +1524,7 @@ static void rbuf_init(struct bcm_sysport_priv *priv)
static int bcm_sysport_open(struct net_device *dev)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev;
unsigned int i;
int ret;
@@ -1570,9 +1549,9 @@ static int bcm_sysport_open(struct net_device *dev)
/* Read CRC forward */
priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
- priv->phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
- 0, priv->phy_interface);
- if (!priv->phydev) {
+ phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
+ 0, priv->phy_interface);
+ if (!phydev) {
netdev_err(dev, "could not attach to PHY\n");
return -ENODEV;
}
@@ -1650,7 +1629,7 @@ out_free_tx_ring:
out_free_irq0:
free_irq(priv->irq0, dev);
out_phy_disconnect:
- phy_disconnect(priv->phydev);
+ phy_disconnect(phydev);
return ret;
}
@@ -1661,7 +1640,7 @@ static void bcm_sysport_netif_stop(struct net_device *dev)
/* stop all software from updating hardware */
netif_tx_stop_all_queues(dev);
napi_disable(&priv->napi);
- phy_stop(priv->phydev);
+ phy_stop(dev->phydev);
/* mask all interrupts */
intrl2_0_mask_set(priv, 0xffffffff);
@@ -1708,14 +1687,12 @@ static int bcm_sysport_stop(struct net_device *dev)
free_irq(priv->irq1, dev);
/* Disconnect from PHY */
- phy_disconnect(priv->phydev);
+ phy_disconnect(dev->phydev);
return 0;
}
static struct ethtool_ops bcm_sysport_ethtool_ops = {
- .get_settings = bcm_sysport_get_settings,
- .set_settings = bcm_sysport_set_settings,
.get_drvinfo = bcm_sysport_get_drvinfo,
.get_msglevel = bcm_sysport_get_msglvl,
.set_msglevel = bcm_sysport_set_msglvl,
@@ -1727,6 +1704,8 @@ static struct ethtool_ops bcm_sysport_ethtool_ops = {
.set_wol = bcm_sysport_set_wol,
.get_coalesce = bcm_sysport_get_coalesce,
.set_coalesce = bcm_sysport_set_coalesce,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static const struct net_device_ops bcm_sysport_netdev_ops = {
@@ -1929,7 +1908,7 @@ static int bcm_sysport_suspend(struct device *d)
bcm_sysport_netif_stop(dev);
- phy_suspend(priv->phydev);
+ phy_suspend(dev->phydev);
netif_device_detach(dev);
@@ -2055,7 +2034,7 @@ static int bcm_sysport_resume(struct device *d)
goto out_free_rx_ring;
}
- phy_resume(priv->phydev);
+ phy_resume(dev->phydev);
bcm_sysport_netif_start(dev);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index f28bf545d7f4..1c82e3da69a7 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -670,7 +670,6 @@ struct bcm_sysport_priv {
/* PHY device */
struct device_node *phy_dn;
- struct phy_device *phydev;
phy_interface_t phy_interface;
int old_pause;
int old_link;
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index ee5f431ab32a..b045dc072c40 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -246,6 +246,8 @@ err_dma_head:
err_drop:
dev_kfree_skb(skb);
+ net_dev->stats.tx_dropped++;
+ net_dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
@@ -267,15 +269,16 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
while (ring->start != ring->end) {
int slot_idx = ring->start % BGMAC_TX_RING_SLOTS;
struct bgmac_slot_info *slot = &ring->slots[slot_idx];
- u32 ctl1;
+ u32 ctl0, ctl1;
int len;
if (slot_idx == empty_slot)
break;
+ ctl0 = le32_to_cpu(ring->cpu_base[slot_idx].ctl0);
ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1);
len = ctl1 & BGMAC_DESC_CTL1_LEN;
- if (ctl1 & BGMAC_DESC_CTL0_SOF)
+ if (ctl0 & BGMAC_DESC_CTL0_SOF)
/* Unmap no longer used buffer */
dma_unmap_single(dma_dev, slot->dma_addr, len,
DMA_TO_DEVICE);
@@ -284,6 +287,8 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
DMA_TO_DEVICE);
if (slot->skb) {
+ bgmac->net_dev->stats.tx_bytes += slot->skb->len;
+ bgmac->net_dev->stats.tx_packets++;
bytes_compl += slot->skb->len;
pkts_compl++;
@@ -464,6 +469,7 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
ring->start);
put_page(virt_to_head_page(buf));
+ bgmac->net_dev->stats.rx_errors++;
break;
}
@@ -471,6 +477,8 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
bgmac_err(bgmac, "Found oversized packet at slot %d, DMA issue!\n",
ring->start);
put_page(virt_to_head_page(buf));
+ bgmac->net_dev->stats.rx_length_errors++;
+ bgmac->net_dev->stats.rx_errors++;
break;
}
@@ -481,6 +489,7 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
if (unlikely(!skb)) {
bgmac_err(bgmac, "build_skb failed\n");
put_page(virt_to_head_page(buf));
+ bgmac->net_dev->stats.rx_errors++;
break;
}
skb_put(skb, BGMAC_RX_FRAME_OFFSET +
@@ -490,6 +499,8 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, bgmac->net_dev);
+ bgmac->net_dev->stats.rx_bytes += len;
+ bgmac->net_dev->stats.rx_packets++;
napi_gro_receive(&bgmac->napi, skb);
handled++;
} while (0);
@@ -1310,9 +1321,10 @@ static int bgmac_open(struct net_device *net_dev)
}
napi_enable(&bgmac->napi);
- phy_start(bgmac->phy_dev);
+ phy_start(net_dev->phydev);
+
+ netif_start_queue(net_dev);
- netif_carrier_on(net_dev);
return 0;
}
@@ -1322,7 +1334,7 @@ static int bgmac_stop(struct net_device *net_dev)
netif_carrier_off(net_dev);
- phy_stop(bgmac->phy_dev);
+ phy_stop(net_dev->phydev);
napi_disable(&bgmac->napi);
bgmac_chip_intrs_off(bgmac);
@@ -1360,12 +1372,10 @@ static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
{
- struct bgmac *bgmac = netdev_priv(net_dev);
-
if (!netif_running(net_dev))
return -EINVAL;
- return phy_mii_ioctl(bgmac->phy_dev, ifr, cmd);
+ return phy_mii_ioctl(net_dev->phydev, ifr, cmd);
}
static const struct net_device_ops bgmac_netdev_ops = {
@@ -1382,20 +1392,125 @@ static const struct net_device_ops bgmac_netdev_ops = {
* ethtool_ops
**************************************************/
-static int bgmac_get_settings(struct net_device *net_dev,
- struct ethtool_cmd *cmd)
+struct bgmac_stat {
+ u8 size;
+ u32 offset;
+ const char *name;
+};
+
+static struct bgmac_stat bgmac_get_strings_stats[] = {
+ { 8, BGMAC_TX_GOOD_OCTETS, "tx_good_octets" },
+ { 4, BGMAC_TX_GOOD_PKTS, "tx_good" },
+ { 8, BGMAC_TX_OCTETS, "tx_octets" },
+ { 4, BGMAC_TX_PKTS, "tx_pkts" },
+ { 4, BGMAC_TX_BROADCAST_PKTS, "tx_broadcast" },
+ { 4, BGMAC_TX_MULTICAST_PKTS, "tx_multicast" },
+ { 4, BGMAC_TX_LEN_64, "tx_64" },
+ { 4, BGMAC_TX_LEN_65_TO_127, "tx_65_127" },
+ { 4, BGMAC_TX_LEN_128_TO_255, "tx_128_255" },
+ { 4, BGMAC_TX_LEN_256_TO_511, "tx_256_511" },
+ { 4, BGMAC_TX_LEN_512_TO_1023, "tx_512_1023" },
+ { 4, BGMAC_TX_LEN_1024_TO_1522, "tx_1024_1522" },
+ { 4, BGMAC_TX_LEN_1523_TO_2047, "tx_1523_2047" },
+ { 4, BGMAC_TX_LEN_2048_TO_4095, "tx_2048_4095" },
+ { 4, BGMAC_TX_LEN_4096_TO_8191, "tx_4096_8191" },
+ { 4, BGMAC_TX_LEN_8192_TO_MAX, "tx_8192_max" },
+ { 4, BGMAC_TX_JABBER_PKTS, "tx_jabber" },
+ { 4, BGMAC_TX_OVERSIZE_PKTS, "tx_oversize" },
+ { 4, BGMAC_TX_FRAGMENT_PKTS, "tx_fragment" },
+ { 4, BGMAC_TX_UNDERRUNS, "tx_underruns" },
+ { 4, BGMAC_TX_TOTAL_COLS, "tx_total_cols" },
+ { 4, BGMAC_TX_SINGLE_COLS, "tx_single_cols" },
+ { 4, BGMAC_TX_MULTIPLE_COLS, "tx_multiple_cols" },
+ { 4, BGMAC_TX_EXCESSIVE_COLS, "tx_excessive_cols" },
+ { 4, BGMAC_TX_LATE_COLS, "tx_late_cols" },
+ { 4, BGMAC_TX_DEFERED, "tx_defered" },
+ { 4, BGMAC_TX_CARRIER_LOST, "tx_carrier_lost" },
+ { 4, BGMAC_TX_PAUSE_PKTS, "tx_pause" },
+ { 4, BGMAC_TX_UNI_PKTS, "tx_unicast" },
+ { 4, BGMAC_TX_Q0_PKTS, "tx_q0" },
+ { 8, BGMAC_TX_Q0_OCTETS, "tx_q0_octets" },
+ { 4, BGMAC_TX_Q1_PKTS, "tx_q1" },
+ { 8, BGMAC_TX_Q1_OCTETS, "tx_q1_octets" },
+ { 4, BGMAC_TX_Q2_PKTS, "tx_q2" },
+ { 8, BGMAC_TX_Q2_OCTETS, "tx_q2_octets" },
+ { 4, BGMAC_TX_Q3_PKTS, "tx_q3" },
+ { 8, BGMAC_TX_Q3_OCTETS, "tx_q3_octets" },
+ { 8, BGMAC_RX_GOOD_OCTETS, "rx_good_octets" },
+ { 4, BGMAC_RX_GOOD_PKTS, "rx_good" },
+ { 8, BGMAC_RX_OCTETS, "rx_octets" },
+ { 4, BGMAC_RX_PKTS, "rx_pkts" },
+ { 4, BGMAC_RX_BROADCAST_PKTS, "rx_broadcast" },
+ { 4, BGMAC_RX_MULTICAST_PKTS, "rx_multicast" },
+ { 4, BGMAC_RX_LEN_64, "rx_64" },
+ { 4, BGMAC_RX_LEN_65_TO_127, "rx_65_127" },
+ { 4, BGMAC_RX_LEN_128_TO_255, "rx_128_255" },
+ { 4, BGMAC_RX_LEN_256_TO_511, "rx_256_511" },
+ { 4, BGMAC_RX_LEN_512_TO_1023, "rx_512_1023" },
+ { 4, BGMAC_RX_LEN_1024_TO_1522, "rx_1024_1522" },
+ { 4, BGMAC_RX_LEN_1523_TO_2047, "rx_1523_2047" },
+ { 4, BGMAC_RX_LEN_2048_TO_4095, "rx_2048_4095" },
+ { 4, BGMAC_RX_LEN_4096_TO_8191, "rx_4096_8191" },
+ { 4, BGMAC_RX_LEN_8192_TO_MAX, "rx_8192_max" },
+ { 4, BGMAC_RX_JABBER_PKTS, "rx_jabber" },
+ { 4, BGMAC_RX_OVERSIZE_PKTS, "rx_oversize" },
+ { 4, BGMAC_RX_FRAGMENT_PKTS, "rx_fragment" },
+ { 4, BGMAC_RX_MISSED_PKTS, "rx_missed" },
+ { 4, BGMAC_RX_CRC_ALIGN_ERRS, "rx_crc_align" },
+ { 4, BGMAC_RX_UNDERSIZE, "rx_undersize" },
+ { 4, BGMAC_RX_CRC_ERRS, "rx_crc" },
+ { 4, BGMAC_RX_ALIGN_ERRS, "rx_align" },
+ { 4, BGMAC_RX_SYMBOL_ERRS, "rx_symbol" },
+ { 4, BGMAC_RX_PAUSE_PKTS, "rx_pause" },
+ { 4, BGMAC_RX_NONPAUSE_PKTS, "rx_nonpause" },
+ { 4, BGMAC_RX_SACHANGES, "rx_sa_changes" },
+ { 4, BGMAC_RX_UNI_PKTS, "rx_unicast" },
+};
+
+#define BGMAC_STATS_LEN ARRAY_SIZE(bgmac_get_strings_stats)
+
+static int bgmac_get_sset_count(struct net_device *dev, int string_set)
{
- struct bgmac *bgmac = netdev_priv(net_dev);
+ switch (string_set) {
+ case ETH_SS_STATS:
+ return BGMAC_STATS_LEN;
+ }
- return phy_ethtool_gset(bgmac->phy_dev, cmd);
+ return -EOPNOTSUPP;
}
-static int bgmac_set_settings(struct net_device *net_dev,
- struct ethtool_cmd *cmd)
+static void bgmac_get_strings(struct net_device *dev, u32 stringset,
+ u8 *data)
{
- struct bgmac *bgmac = netdev_priv(net_dev);
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
- return phy_ethtool_sset(bgmac->phy_dev, cmd);
+ for (i = 0; i < BGMAC_STATS_LEN; i++)
+ strlcpy(data + i * ETH_GSTRING_LEN,
+ bgmac_get_strings_stats[i].name, ETH_GSTRING_LEN);
+}
+
+static void bgmac_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *ss, uint64_t *data)
+{
+ struct bgmac *bgmac = netdev_priv(dev);
+ const struct bgmac_stat *s;
+ unsigned int i;
+ u64 val;
+
+ if (!netif_running(dev))
+ return;
+
+ for (i = 0; i < BGMAC_STATS_LEN; i++) {
+ s = &bgmac_get_strings_stats[i];
+ val = 0;
+ if (s->size == 8)
+ val = (u64)bgmac_read(bgmac, s->offset + 4) << 32;
+ val |= bgmac_read(bgmac, s->offset);
+ data[i] = val;
+ }
}
static void bgmac_get_drvinfo(struct net_device *net_dev,
@@ -1406,9 +1521,12 @@ static void bgmac_get_drvinfo(struct net_device *net_dev,
}
static const struct ethtool_ops bgmac_ethtool_ops = {
- .get_settings = bgmac_get_settings,
- .set_settings = bgmac_set_settings,
+ .get_strings = bgmac_get_strings,
+ .get_sset_count = bgmac_get_sset_count,
+ .get_ethtool_stats = bgmac_get_ethtool_stats,
.get_drvinfo = bgmac_get_drvinfo,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
/**************************************************
@@ -1429,7 +1547,7 @@ static int bgmac_mii_write(struct mii_bus *bus, int mii_id, int regnum,
static void bgmac_adjust_link(struct net_device *net_dev)
{
struct bgmac *bgmac = netdev_priv(net_dev);
- struct phy_device *phy_dev = bgmac->phy_dev;
+ struct phy_device *phy_dev = net_dev->phydev;
bool update = false;
if (phy_dev->link) {
@@ -1473,8 +1591,6 @@ static int bgmac_fixed_phy_register(struct bgmac *bgmac)
return err;
}
- bgmac->phy_dev = phy_dev;
-
return err;
}
@@ -1519,7 +1635,6 @@ static int bgmac_mii_register(struct bgmac *bgmac)
err = PTR_ERR(phy_dev);
goto err_unregister_bus;
}
- bgmac->phy_dev = phy_dev;
return err;
@@ -1588,6 +1703,7 @@ static int bgmac_probe(struct bcma_device *core)
bgmac->net_dev = net_dev;
bgmac->core = core;
bcma_set_drvdata(core, bgmac);
+ SET_NETDEV_DEV(net_dev, &core->dev);
/* Defaults */
memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN);
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 9a03c142b742..99beb181f577 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -123,7 +123,7 @@
#define BGMAC_TX_LEN_1024_TO_1522 0x334
#define BGMAC_TX_LEN_1523_TO_2047 0x338
#define BGMAC_TX_LEN_2048_TO_4095 0x33c
-#define BGMAC_TX_LEN_4095_TO_8191 0x340
+#define BGMAC_TX_LEN_4096_TO_8191 0x340
#define BGMAC_TX_LEN_8192_TO_MAX 0x344
#define BGMAC_TX_JABBER_PKTS 0x348 /* Error */
#define BGMAC_TX_OVERSIZE_PKTS 0x34c /* Error */
@@ -166,7 +166,7 @@
#define BGMAC_RX_LEN_1024_TO_1522 0x3e4
#define BGMAC_RX_LEN_1523_TO_2047 0x3e8
#define BGMAC_RX_LEN_2048_TO_4095 0x3ec
-#define BGMAC_RX_LEN_4095_TO_8191 0x3f0
+#define BGMAC_RX_LEN_4096_TO_8191 0x3f0
#define BGMAC_RX_LEN_8192_TO_MAX 0x3f4
#define BGMAC_RX_JABBER_PKTS 0x3f8 /* Error */
#define BGMAC_RX_OVERSIZE_PKTS 0x3fc /* Error */
@@ -441,7 +441,6 @@ struct bgmac {
struct net_device *net_dev;
struct napi_struct napi;
struct mii_bus *mii_bus;
- struct phy_device *phy_dev;
/* DMA */
struct bgmac_dma_ring tx_ring[BGMAC_MAX_TX_RINGS];
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index c5fe915870ad..97e892511666 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -59,9 +59,6 @@
#include <linux/semaphore.h>
#include <linux/stringify.h>
#include <linux/vmalloc.h>
-#if IS_ENABLED(CONFIG_BNX2X_GENEVE)
-#include <net/geneve.h>
-#endif
#include "bnx2x.h"
#include "bnx2x_init.h"
#include "bnx2x_init_ops.h"
@@ -10076,7 +10073,6 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
}
}
-#if defined(CONFIG_BNX2X_VXLAN) || IS_ENABLED(CONFIG_BNX2X_GENEVE)
static int bnx2x_udp_port_update(struct bnx2x *bp)
{
struct bnx2x_func_switch_update_params *switch_update_params;
@@ -10177,47 +10173,42 @@ static void __bnx2x_del_udp_port(struct bnx2x *bp, u16 port,
DP(BNX2X_MSG_SP, "Deleted UDP tunnel [%d] port %d\n",
type, port);
}
-#endif
-
-#ifdef CONFIG_BNX2X_VXLAN
-static void bnx2x_add_vxlan_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
-{
- struct bnx2x *bp = netdev_priv(netdev);
- u16 t_port = ntohs(port);
-
- __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
-}
-
-static void bnx2x_del_vxlan_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
-{
- struct bnx2x *bp = netdev_priv(netdev);
- u16 t_port = ntohs(port);
-
- __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
-}
-#endif
-#if IS_ENABLED(CONFIG_BNX2X_GENEVE)
-static void bnx2x_add_geneve_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
+static void bnx2x_udp_tunnel_add(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
{
struct bnx2x *bp = netdev_priv(netdev);
- u16 t_port = ntohs(port);
+ u16 t_port = ntohs(ti->port);
- __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE);
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE);
+ break;
+ default:
+ break;
+ }
}
-static void bnx2x_del_geneve_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
+static void bnx2x_udp_tunnel_del(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
{
struct bnx2x *bp = netdev_priv(netdev);
- u16 t_port = ntohs(port);
+ u16 t_port = ntohs(ti->port);
- __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE);
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE);
+ break;
+ default:
+ break;
+ }
}
-#endif
static int bnx2x_close(struct net_device *dev);
@@ -10325,7 +10316,6 @@ sp_rtnl_not_reset:
&bp->sp_rtnl_state))
bnx2x_update_mng_version(bp);
-#if defined(CONFIG_BNX2X_VXLAN) || IS_ENABLED(CONFIG_BNX2X_GENEVE)
if (test_and_clear_bit(BNX2X_SP_RTNL_CHANGE_UDP_PORT,
&bp->sp_rtnl_state)) {
if (bnx2x_udp_port_update(bp)) {
@@ -10335,20 +10325,14 @@ sp_rtnl_not_reset:
BNX2X_UDP_PORT_MAX);
} else {
/* Since we don't store additional port information,
- * if no port is configured for any feature ask for
+ * if no ports are configured for any feature ask for
* information about currently configured ports.
*/
-#ifdef CONFIG_BNX2X_VXLAN
- if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count)
- vxlan_get_rx_port(bp->dev);
-#endif
-#if IS_ENABLED(CONFIG_BNX2X_GENEVE)
- if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count)
- geneve_get_rx_port(bp->dev);
-#endif
+ if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count &&
+ !bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count)
+ udp_tunnel_get_rx_info(bp->dev);
}
}
-#endif
/* work which needs rtnl lock not-taken (as it takes the lock itself and
* can be called from other contexts as well)
@@ -12551,14 +12535,8 @@ static int bnx2x_open(struct net_device *dev)
if (rc)
return rc;
-#ifdef CONFIG_BNX2X_VXLAN
- if (IS_PF(bp))
- vxlan_get_rx_port(dev);
-#endif
-#if IS_ENABLED(CONFIG_BNX2X_GENEVE)
if (IS_PF(bp))
- geneve_get_rx_port(dev);
-#endif
+ udp_tunnel_get_rx_info(dev);
return 0;
}
@@ -12895,52 +12873,71 @@ static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add)
return rc;
}
-int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
+static int bnx2x_vlan_configure_vid_list(struct bnx2x *bp)
{
struct bnx2x_vlan_entry *vlan;
int rc = 0;
- if (!bp->vlan_cnt) {
- DP(NETIF_MSG_IFUP, "No need to re-configure vlan filters\n");
- return 0;
- }
-
+ /* Configure all non-configured entries */
list_for_each_entry(vlan, &bp->vlan_reg, link) {
- /* Prepare for cleanup in case of errors */
- if (rc) {
- vlan->hw = false;
- continue;
- }
-
- if (!vlan->hw)
+ if (vlan->hw)
continue;
- DP(NETIF_MSG_IFUP, "Re-configuring vlan 0x%04x\n", vlan->vid);
+ if (bp->vlan_cnt >= bp->vlan_credit)
+ return -ENOBUFS;
rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
if (rc) {
- BNX2X_ERR("Unable to configure VLAN %d\n", vlan->vid);
- vlan->hw = false;
- rc = -EINVAL;
- continue;
+ BNX2X_ERR("Unable to config VLAN %d\n", vlan->vid);
+ return rc;
}
+
+ DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", vlan->vid);
+ vlan->hw = true;
+ bp->vlan_cnt++;
}
- return rc;
+ return 0;
+}
+
+static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode)
+{
+ bool need_accept_any_vlan;
+
+ need_accept_any_vlan = !!bnx2x_vlan_configure_vid_list(bp);
+
+ if (bp->accept_any_vlan != need_accept_any_vlan) {
+ bp->accept_any_vlan = need_accept_any_vlan;
+ DP(NETIF_MSG_IFUP, "Accept all VLAN %s\n",
+ bp->accept_any_vlan ? "raised" : "cleared");
+ if (set_rx_mode) {
+ if (IS_PF(bp))
+ bnx2x_set_rx_mode_inner(bp);
+ else
+ bnx2x_vfpf_storm_rx_mode(bp);
+ }
+ }
+}
+
+int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
+{
+ struct bnx2x_vlan_entry *vlan;
+
+ /* The hw forgot all entries after reload */
+ list_for_each_entry(vlan, &bp->vlan_reg, link)
+ vlan->hw = false;
+ bp->vlan_cnt = 0;
+
+ /* Don't set rx mode here. Our caller will do it. */
+ bnx2x_vlan_configure(bp, false);
+
+ return 0;
}
static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct bnx2x *bp = netdev_priv(dev);
struct bnx2x_vlan_entry *vlan;
- bool hw = false;
- int rc = 0;
-
- if (!netif_running(bp->dev)) {
- DP(NETIF_MSG_IFUP,
- "Ignoring VLAN configuration the interface is down\n");
- return -EFAULT;
- }
DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid);
@@ -12948,93 +12945,47 @@ static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
if (!vlan)
return -ENOMEM;
- bp->vlan_cnt++;
- if (bp->vlan_cnt > bp->vlan_credit && !bp->accept_any_vlan) {
- DP(NETIF_MSG_IFUP, "Accept all VLAN raised\n");
- bp->accept_any_vlan = true;
- if (IS_PF(bp))
- bnx2x_set_rx_mode_inner(bp);
- else
- bnx2x_vfpf_storm_rx_mode(bp);
- } else if (bp->vlan_cnt <= bp->vlan_credit) {
- rc = __bnx2x_vlan_configure_vid(bp, vid, true);
- hw = true;
- }
-
vlan->vid = vid;
- vlan->hw = hw;
-
- if (!rc) {
- list_add(&vlan->link, &bp->vlan_reg);
- } else {
- bp->vlan_cnt--;
- kfree(vlan);
- }
+ vlan->hw = false;
+ list_add_tail(&vlan->link, &bp->vlan_reg);
- DP(NETIF_MSG_IFUP, "Adding VLAN result %d\n", rc);
+ if (netif_running(dev))
+ bnx2x_vlan_configure(bp, true);
- return rc;
+ return 0;
}
static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct bnx2x *bp = netdev_priv(dev);
struct bnx2x_vlan_entry *vlan;
+ bool found = false;
int rc = 0;
- if (!netif_running(bp->dev)) {
- DP(NETIF_MSG_IFUP,
- "Ignoring VLAN configuration the interface is down\n");
- return -EFAULT;
- }
-
DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid);
- if (!bp->vlan_cnt) {
- BNX2X_ERR("Unable to kill VLAN %d\n", vid);
- return -EINVAL;
- }
-
list_for_each_entry(vlan, &bp->vlan_reg, link)
- if (vlan->vid == vid)
+ if (vlan->vid == vid) {
+ found = true;
break;
+ }
- if (vlan->vid != vid) {
+ if (!found) {
BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid);
return -EINVAL;
}
- if (vlan->hw)
+ if (netif_running(dev) && vlan->hw) {
rc = __bnx2x_vlan_configure_vid(bp, vid, false);
+ DP(NETIF_MSG_IFUP, "HW deconfigured for VLAN %d\n", vid);
+ bp->vlan_cnt--;
+ }
list_del(&vlan->link);
kfree(vlan);
- bp->vlan_cnt--;
-
- if (bp->vlan_cnt <= bp->vlan_credit && bp->accept_any_vlan) {
- /* Configure all non-configured entries */
- list_for_each_entry(vlan, &bp->vlan_reg, link) {
- if (vlan->hw)
- continue;
-
- rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
- if (rc) {
- BNX2X_ERR("Unable to config VLAN %d\n",
- vlan->vid);
- continue;
- }
- DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n",
- vlan->vid);
- vlan->hw = true;
- }
- DP(NETIF_MSG_IFUP, "Accept all VLAN Removed\n");
- bp->accept_any_vlan = false;
- if (IS_PF(bp))
- bnx2x_set_rx_mode_inner(bp);
- else
- bnx2x_vfpf_storm_rx_mode(bp);
- }
+ if (netif_running(dev))
+ bnx2x_vlan_configure(bp, true);
DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc);
@@ -13072,14 +13023,8 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_get_phys_port_id = bnx2x_get_phys_port_id,
.ndo_set_vf_link_state = bnx2x_set_vf_link_state,
.ndo_features_check = bnx2x_features_check,
-#ifdef CONFIG_BNX2X_VXLAN
- .ndo_add_vxlan_port = bnx2x_add_vxlan_port,
- .ndo_del_vxlan_port = bnx2x_del_vxlan_port,
-#endif
-#if IS_ENABLED(CONFIG_BNX2X_GENEVE)
- .ndo_add_geneve_port = bnx2x_add_geneve_port,
- .ndo_del_geneve_port = bnx2x_del_geneve_port,
-#endif
+ .ndo_udp_tunnel_add = bnx2x_udp_tunnel_add,
+ .ndo_udp_tunnel_del = bnx2x_udp_tunnel_del,
};
static int bnx2x_set_coherency_mask(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 72a2efff8e49..70b148a10ec8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -37,9 +37,7 @@
#include <net/udp.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
-#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
-#include <net/vxlan.h>
-#endif
+#include <net/udp_tunnel.h>
#ifdef CONFIG_NET_RX_BUSY_POLL
#include <net/busy_poll.h>
#endif
@@ -75,12 +73,22 @@ enum board_idx {
BCM57301,
BCM57302,
BCM57304,
+ BCM57311,
+ BCM57312,
BCM57402,
BCM57404,
BCM57406,
+ BCM57404_NPAR,
+ BCM57412,
+ BCM57414,
+ BCM57416,
+ BCM57417,
+ BCM57414_NPAR,
BCM57314,
BCM57304_VF,
BCM57404_VF,
+ BCM57414_VF,
+ BCM57314_VF,
};
/* indexed by enum above */
@@ -90,25 +98,45 @@ static const struct {
{ "Broadcom BCM57301 NetXtreme-C Single-port 10Gb Ethernet" },
{ "Broadcom BCM57302 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" },
{ "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
+ { "Broadcom BCM57311 NetXtreme-C Single-port 10Gb Ethernet" },
+ { "Broadcom BCM57312 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" },
{ "Broadcom BCM57402 NetXtreme-E Dual-port 10Gb Ethernet" },
{ "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
{ "Broadcom BCM57406 NetXtreme-E Dual-port 10GBase-T Ethernet" },
+ { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
+ { "Broadcom BCM57412 NetXtreme-E Dual-port 10Gb Ethernet" },
+ { "Broadcom BCM57414 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
+ { "Broadcom BCM57416 NetXtreme-E Dual-port 10GBase-T Ethernet" },
+ { "Broadcom BCM57417 NetXtreme-E Dual-port 10GBase-T Ethernet" },
+ { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57314 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
{ "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" },
{ "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" },
+ { "Broadcom BCM57414 NetXtreme-E Ethernet Virtual Function" },
+ { "Broadcom BCM57314 NetXtreme-E Ethernet Virtual Function" },
};
static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
{ PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
{ PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
+ { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
+ { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
{ PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
{ PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
{ PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
+ { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57404_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
+ { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
+ { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
+ { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
+ { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57414_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
#ifdef CONFIG_BNXT_SRIOV
{ PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF },
{ PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = BCM57414_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = BCM57314_VF },
#endif
{ 0 }
};
@@ -125,12 +153,14 @@ static const u16 bnxt_async_events_arr[] = {
HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
};
static bool bnxt_vf_pciid(enum board_idx idx)
{
- return (idx == BCM57304_VF || idx == BCM57404_VF);
+ return (idx == BCM57304_VF || idx == BCM57404_VF ||
+ idx == BCM57314_VF || idx == BCM57414_VF);
}
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
@@ -286,7 +316,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
txr->tx_prod = prod;
+ tx_buf->is_push = 1;
netdev_tx_sent_queue(txq, skb->len);
+ wmb(); /* Sync is_push and byte queue before pushing data */
push_len = (length + sizeof(*tx_push) + 7) / 8;
if (push_len > 16) {
@@ -298,7 +330,6 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
push_len);
}
- tx_buf->is_push = 1;
goto tx_done;
}
@@ -919,6 +950,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
}
tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
+ tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
rxr->rx_prod = NEXT_RX(prod);
cons = NEXT_RX(cons);
@@ -937,32 +969,102 @@ static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
}
+static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
+ int payload_off, int tcp_ts,
+ struct sk_buff *skb)
+{
+#ifdef CONFIG_INET
+ struct tcphdr *th;
+ int len, nw_off;
+ u16 outer_ip_off, inner_ip_off, inner_mac_off;
+ u32 hdr_info = tpa_info->hdr_info;
+ bool loopback = false;
+
+ inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
+ inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
+ outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
+
+ /* If the packet is an internal loopback packet, the offsets will
+ * have an extra 4 bytes.
+ */
+ if (inner_mac_off == 4) {
+ loopback = true;
+ } else if (inner_mac_off > 4) {
+ __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
+ ETH_HLEN - 2));
+
+ /* We only support inner iPv4/ipv6. If we don't see the
+ * correct protocol ID, it must be a loopback packet where
+ * the offsets are off by 4.
+ */
+ if (proto != htons(ETH_P_IP) && proto && htons(ETH_P_IPV6))
+ loopback = true;
+ }
+ if (loopback) {
+ /* internal loopback packet, subtract all offsets by 4 */
+ inner_ip_off -= 4;
+ inner_mac_off -= 4;
+ outer_ip_off -= 4;
+ }
+
+ nw_off = inner_ip_off - ETH_HLEN;
+ skb_set_network_header(skb, nw_off);
+ if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
+ struct ipv6hdr *iph = ipv6_hdr(skb);
+
+ skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
+ len = skb->len - skb_transport_offset(skb);
+ th = tcp_hdr(skb);
+ th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
+ } else {
+ struct iphdr *iph = ip_hdr(skb);
+
+ skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
+ len = skb->len - skb_transport_offset(skb);
+ th = tcp_hdr(skb);
+ th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
+ }
+
+ if (inner_mac_off) { /* tunnel */
+ struct udphdr *uh = NULL;
+ __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
+ ETH_HLEN - 2));
+
+ if (proto == htons(ETH_P_IP)) {
+ struct iphdr *iph = (struct iphdr *)skb->data;
+
+ if (iph->protocol == IPPROTO_UDP)
+ uh = (struct udphdr *)(iph + 1);
+ } else {
+ struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
+
+ if (iph->nexthdr == IPPROTO_UDP)
+ uh = (struct udphdr *)(iph + 1);
+ }
+ if (uh) {
+ if (uh->check)
+ skb_shinfo(skb)->gso_type |=
+ SKB_GSO_UDP_TUNNEL_CSUM;
+ else
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
+ }
+ }
+#endif
+ return skb;
+}
+
#define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
#define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
-static inline struct sk_buff *bnxt_gro_skb(struct bnxt_tpa_info *tpa_info,
- struct rx_tpa_end_cmp *tpa_end,
- struct rx_tpa_end_cmp_ext *tpa_end1,
+static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
+ int payload_off, int tcp_ts,
struct sk_buff *skb)
{
#ifdef CONFIG_INET
struct tcphdr *th;
- int payload_off, tcp_opt_len = 0;
- int len, nw_off;
- u16 segs;
+ int len, nw_off, tcp_opt_len;
- segs = TPA_END_TPA_SEGS(tpa_end);
- if (segs == 1)
- return skb;
-
- NAPI_GRO_CB(skb)->count = segs;
- skb_shinfo(skb)->gso_size =
- le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
- skb_shinfo(skb)->gso_type = tpa_info->gso_type;
- payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
- RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
- RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
- if (TPA_END_GRO_TS(tpa_end))
+ if (tcp_ts)
tcp_opt_len = 12;
if (tpa_info->gso_type == SKB_GSO_TCPV4) {
@@ -1019,6 +1121,32 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt_tpa_info *tpa_info,
return skb;
}
+static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
+ struct bnxt_tpa_info *tpa_info,
+ struct rx_tpa_end_cmp *tpa_end,
+ struct rx_tpa_end_cmp_ext *tpa_end1,
+ struct sk_buff *skb)
+{
+#ifdef CONFIG_INET
+ int payload_off;
+ u16 segs;
+
+ segs = TPA_END_TPA_SEGS(tpa_end);
+ if (segs == 1)
+ return skb;
+
+ NAPI_GRO_CB(skb)->count = segs;
+ skb_shinfo(skb)->gso_size =
+ le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
+ skb_shinfo(skb)->gso_type = tpa_info->gso_type;
+ payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
+ RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
+ RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
+ skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
+#endif
+ return skb;
+}
+
static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
struct bnxt_napi *bnapi,
u32 *raw_cons,
@@ -1112,19 +1240,13 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
- if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
- netdev_features_t features = skb->dev->features;
+ if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
+ (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
u16 vlan_proto = tpa_info->metadata >>
RX_CMP_FLAGS2_METADATA_TPID_SFT;
+ u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK;
- if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
- vlan_proto == ETH_P_8021Q) ||
- ((features & NETIF_F_HW_VLAN_STAG_RX) &&
- vlan_proto == ETH_P_8021AD)) {
- __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
- tpa_info->metadata &
- RX_CMP_FLAGS2_METADATA_VID_MASK);
- }
+ __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
}
skb_checksum_none_assert(skb);
@@ -1135,7 +1257,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
}
if (TPA_END_GRO(tpa_end))
- skb = bnxt_gro_skb(tpa_info, tpa_end, tpa_end1, skb);
+ skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
return skb;
}
@@ -1277,19 +1399,14 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
skb->protocol = eth_type_trans(skb, dev);
- if (rxcmp1->rx_cmp_flags2 &
- cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) {
- netdev_features_t features = skb->dev->features;
+ if ((rxcmp1->rx_cmp_flags2 &
+ cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
+ (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
+ u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK;
u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
- if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
- vlan_proto == ETH_P_8021Q) ||
- ((features & NETIF_F_HW_VLAN_STAG_RX) &&
- vlan_proto == ETH_P_8021AD))
- __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
- meta_data &
- RX_CMP_FLAGS2_METADATA_VID_MASK);
+ __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
}
skb_checksum_none_assert(skb);
@@ -1368,6 +1485,11 @@ static int bnxt_async_event_process(struct bnxt *bp,
set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
break;
}
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
+ if (BNXT_PF(bp))
+ goto async_event_process_exit;
+ set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
+ break;
default:
netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
event_id);
@@ -2272,7 +2394,7 @@ static void bnxt_set_tpa_flags(struct bnxt *bp)
bp->flags &= ~BNXT_FLAG_TPA;
if (bp->dev->features & NETIF_F_LRO)
bp->flags |= BNXT_FLAG_LRO;
- if ((bp->dev->features & NETIF_F_GRO) && (bp->pdev->revision > 0))
+ if (bp->dev->features & NETIF_F_GRO)
bp->flags |= BNXT_FLAG_GRO;
}
@@ -3287,11 +3409,13 @@ static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
unsigned int ring = 0, grp_idx;
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_cfg_input req = {0};
+ u16 def_vlan = 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
/* Only RSS support for now TBD: COS & LB */
req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP |
- VNIC_CFG_REQ_ENABLES_RSS_RULE);
+ VNIC_CFG_REQ_ENABLES_RSS_RULE |
+ VNIC_CFG_REQ_ENABLES_MRU);
req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
req.cos_rule = cpu_to_le16(0xffff);
if (vnic->flags & BNXT_VNIC_RSS_FLAG)
@@ -3307,7 +3431,11 @@ static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
VLAN_HLEN);
- if (bp->flags & BNXT_FLAG_STRIP_VLAN)
+#ifdef CONFIG_BNXT_SRIOV
+ if (BNXT_VF(bp))
+ def_vlan = bp->vf.vlan;
+#endif
+ if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
@@ -3824,7 +3952,7 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
- req.update_period_ms = cpu_to_le32(1000);
+ req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
mutex_lock(&bp->hwrm_cmd_lock);
for (i = 0; i < bp->cp_nr_rings; i++) {
@@ -3846,6 +3974,39 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
return 0;
}
+static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
+{
+ struct hwrm_func_qcfg_input req = {0};
+ struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ goto func_qcfg_exit;
+
+#ifdef CONFIG_BNXT_SRIOV
+ if (BNXT_VF(bp)) {
+ struct bnxt_vf_info *vf = &bp->vf;
+
+ vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
+ }
+#endif
+ switch (resp->port_partition_type) {
+ case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
+ case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
+ case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
+ bp->port_partition_type = resp->port_partition_type;
+ break;
+ }
+
+func_qcfg_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
int bnxt_hwrm_func_qcaps(struct bnxt *bp)
{
int rc = 0;
@@ -3865,6 +4026,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
pf->fw_fid = le16_to_cpu(resp->fid);
pf->port_id = le16_to_cpu(resp->port_id);
+ bp->dev->dev_port = pf->port_id;
memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
@@ -4000,6 +4162,8 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
if (resp->hwrm_intf_maj >= 1)
bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
+ bp->chip_num = le16_to_cpu(resp->chip_num);
+
hwrm_ver_get_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -4153,6 +4317,16 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
#endif
}
+/* Allow PF and VF with default VLAN to be in promiscuous mode */
+static bool bnxt_promisc_ok(struct bnxt *bp)
+{
+#ifdef CONFIG_BNXT_SRIOV
+ if (BNXT_VF(bp) && !bp->vf.vlan)
+ return false;
+#endif
+ return true;
+}
+
static int bnxt_cfg_rx_mode(struct bnxt *);
static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
@@ -4218,7 +4392,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
- if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp))
+ if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
if (bp->dev->flags & IFF_ALLMULTI) {
@@ -4240,6 +4414,11 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
rc);
+ if (BNXT_VF(bp)) {
+ bnxt_hwrm_func_qcfg(bp);
+ netdev_update_features(bp->dev);
+ }
+
return 0;
err_out:
@@ -4654,6 +4833,7 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
int rc = 0;
struct hwrm_port_phy_qcaps_input req = {0};
struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_link_info *link_info = &bp->link_info;
if (bp->hwrm_spec_code < 0x10201)
return 0;
@@ -4676,6 +4856,8 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
}
+ link_info->support_auto_speeds =
+ le16_to_cpu(resp->supported_speeds_auto_mode);
hwrm_phy_qcaps_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
@@ -4933,7 +5115,7 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
{
struct hwrm_port_phy_cfg_input req = {0};
- if (BNXT_VF(bp))
+ if (!BNXT_SINGLE_PF(bp))
return 0;
if (pci_num_vf(bp->pdev))
@@ -5083,15 +5265,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
netdev_warn(bp->dev, "failed to update phy settings\n");
}
- if (irq_re_init) {
-#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
- vxlan_get_rx_port(bp->dev);
-#endif
- if (!bnxt_hwrm_tunnel_dst_port_alloc(
- bp, htons(0x17c1),
- TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE))
- bp->nge_port_cnt = 1;
- }
+ if (irq_re_init)
+ udp_tunnel_get_rx_info(bp->dev);
set_bit(BNXT_STATE_OPEN, &bp->state);
bnxt_enable_int(bp);
@@ -5132,12 +5307,19 @@ static int bnxt_open(struct net_device *dev)
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
- rc = bnxt_hwrm_func_reset(bp);
- if (rc) {
- netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n",
- rc);
- rc = -1;
- return rc;
+ if (!test_bit(BNXT_STATE_FN_RST_DONE, &bp->state)) {
+ rc = bnxt_hwrm_func_reset(bp);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n",
+ rc);
+ rc = -EBUSY;
+ return rc;
+ }
+ /* Do func_reset during the 1st PF open only to prevent killing
+ * the VFs when the PF is brought down and up.
+ */
+ if (BNXT_PF(bp))
+ set_bit(BNXT_STATE_FN_RST_DONE, &bp->state);
}
return __bnxt_open_nic(bp, true, true);
}
@@ -5357,8 +5539,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
- /* Only allow PF to be in promiscuous mode */
- if ((dev->flags & IFF_PROMISC) && BNXT_PF(bp))
+ if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
uc_update = bnxt_uc_list_updated(bp);
@@ -5466,6 +5647,27 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
if (!bnxt_rfs_capable(bp))
features &= ~NETIF_F_NTUPLE;
+
+ /* Both CTAG and STAG VLAN accelaration on the RX side have to be
+ * turned on or off together.
+ */
+ if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
+ (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
+ if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_RX);
+ else
+ features |= NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_RX;
+ }
+#ifdef CONFIG_BNXT_SRIOV
+ if (BNXT_VF(bp)) {
+ if (bp->vf.vlan) {
+ features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_RX);
+ }
+ }
+#endif
return features;
}
@@ -5581,9 +5783,10 @@ static void bnxt_dbg_dump_states(struct bnxt *bp)
}
}
-static void bnxt_reset_task(struct bnxt *bp)
+static void bnxt_reset_task(struct bnxt *bp, bool silent)
{
- bnxt_dbg_dump_states(bp);
+ if (!silent)
+ bnxt_dbg_dump_states(bp);
if (netif_running(bp->dev)) {
bnxt_close_nic(bp, false, false);
bnxt_open_nic(bp, false, false);
@@ -5634,6 +5837,23 @@ bnxt_restart_timer:
mod_timer(&bp->timer, jiffies + bp->current_interval);
}
+/* Only called from bnxt_sp_task() */
+static void bnxt_reset(struct bnxt *bp, bool silent)
+{
+ /* bnxt_reset_task() calls bnxt_close_nic() which waits
+ * for BNXT_STATE_IN_SP_TASK to clear.
+ * If there is a parallel dev_close(), bnxt_close() may be holding
+ * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
+ * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
+ */
+ clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+ rtnl_lock();
+ if (test_bit(BNXT_STATE_OPEN, &bp->state))
+ bnxt_reset_task(bp, silent);
+ set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+ rtnl_unlock();
+}
+
static void bnxt_cfg_ntp_filters(struct bnxt *);
static void bnxt_sp_task(struct work_struct *work)
@@ -5670,16 +5890,20 @@ static void bnxt_sp_task(struct work_struct *work)
bnxt_hwrm_tunnel_dst_port_free(
bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
}
- if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) {
- /* bnxt_reset_task() calls bnxt_close_nic() which waits
- * for BNXT_STATE_IN_SP_TASK to clear.
- */
- clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
- rtnl_lock();
- bnxt_reset_task(bp);
- set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
- rtnl_unlock();
+ if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
+ bnxt_hwrm_tunnel_dst_port_alloc(
+ bp, bp->nge_port,
+ TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
}
+ if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
+ bnxt_hwrm_tunnel_dst_port_free(
+ bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
+ }
+ if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
+ bnxt_reset(bp, false);
+
+ if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
+ bnxt_reset(bp, true);
if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event))
bnxt_get_port_module_status(bp);
@@ -5770,6 +5994,8 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->tx_coal_ticks_irq = 2;
bp->tx_coal_bufs_irq = 2;
+ bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
+
init_timer(&bp->timer);
bp->timer.data = (unsigned long)bp;
bp->timer.function = bnxt_timer;
@@ -5835,7 +6061,7 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
{
struct bnxt *bp = netdev_priv(dev);
- if (new_mtu < 60 || new_mtu > 9000)
+ if (new_mtu < 60 || new_mtu > 9500)
return -EINVAL;
if (netif_running(dev))
@@ -6044,47 +6270,83 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
#endif /* CONFIG_RFS_ACCEL */
-static void bnxt_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
- __be16 port)
+static void bnxt_udp_tunnel_add(struct net_device *dev,
+ struct udp_tunnel_info *ti)
{
struct bnxt *bp = netdev_priv(dev);
- if (!netif_running(dev))
+ if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
return;
- if (sa_family != AF_INET6 && sa_family != AF_INET)
+ if (!netif_running(dev))
return;
- if (bp->vxlan_port_cnt && bp->vxlan_port != port)
- return;
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
+ return;
- bp->vxlan_port_cnt++;
- if (bp->vxlan_port_cnt == 1) {
- bp->vxlan_port = port;
- set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
- schedule_work(&bp->sp_task);
+ bp->vxlan_port_cnt++;
+ if (bp->vxlan_port_cnt == 1) {
+ bp->vxlan_port = ti->port;
+ set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
+ schedule_work(&bp->sp_task);
+ }
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ if (bp->nge_port_cnt && bp->nge_port != ti->port)
+ return;
+
+ bp->nge_port_cnt++;
+ if (bp->nge_port_cnt == 1) {
+ bp->nge_port = ti->port;
+ set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
+ }
+ break;
+ default:
+ return;
}
+
+ schedule_work(&bp->sp_task);
}
-static void bnxt_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
- __be16 port)
+static void bnxt_udp_tunnel_del(struct net_device *dev,
+ struct udp_tunnel_info *ti)
{
struct bnxt *bp = netdev_priv(dev);
- if (!netif_running(dev))
+ if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
return;
- if (sa_family != AF_INET6 && sa_family != AF_INET)
+ if (!netif_running(dev))
return;
- if (bp->vxlan_port_cnt && bp->vxlan_port == port) {
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
+ return;
bp->vxlan_port_cnt--;
- if (bp->vxlan_port_cnt == 0) {
- set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
- schedule_work(&bp->sp_task);
- }
+ if (bp->vxlan_port_cnt != 0)
+ return;
+
+ set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ if (!bp->nge_port_cnt || bp->nge_port != ti->port)
+ return;
+ bp->nge_port_cnt--;
+
+ if (bp->nge_port_cnt != 0)
+ return;
+
+ set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
+ break;
+ default:
+ return;
}
+
+ schedule_work(&bp->sp_task);
}
static const struct net_device_ops bnxt_netdev_ops = {
@@ -6115,8 +6377,8 @@ static const struct net_device_ops bnxt_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = bnxt_rx_flow_steer,
#endif
- .ndo_add_vxlan_port = bnxt_add_vxlan_port,
- .ndo_del_vxlan_port = bnxt_del_vxlan_port,
+ .ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
+ .ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = bnxt_busy_poll,
#endif
@@ -6165,6 +6427,12 @@ static int bnxt_probe_phy(struct bnxt *bp)
return rc;
}
+ /* Older firmware does not have supported_auto_speeds, so assume
+ * that all supported speeds can be autonegotiated.
+ */
+ if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
+ link_info->support_auto_speeds = link_info->support_speeds;
+
/*initialize the ethool setting copy with NVM settings */
if (BNXT_AUTO_MODE(link_info->auto_mode)) {
link_info->autoneg = BNXT_AUTONEG_SPEED;
@@ -6338,7 +6606,13 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto init_err;
mutex_init(&bp->hwrm_cmd_lock);
- bnxt_hwrm_ver_get(bp);
+ rc = bnxt_hwrm_ver_get(bp);
+ if (rc)
+ goto init_err;
+
+ bp->gro_func = bnxt_gro_func_5730x;
+ if (BNXT_CHIP_NUM_57X1X(bp->chip_num))
+ bp->gro_func = bnxt_gro_func_5731x;
rc = bnxt_hwrm_func_drv_rgtr(bp);
if (rc)
@@ -6361,6 +6635,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto init_err;
}
+ bnxt_hwrm_func_qcfg(bp);
+
bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp);
if (BNXT_PF(bp))
@@ -6420,6 +6696,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
+ struct bnxt *bp = netdev_priv(netdev);
netdev_info(netdev, "PCI I/O error detected\n");
@@ -6434,6 +6711,8 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
if (netif_running(netdev))
bnxt_close(netdev);
+ /* So that func_reset will be done during slot_reset */
+ clear_bit(BNXT_STATE_FN_RST_DONE, &bp->state);
pci_disable_device(pdev);
rtnl_unlock();
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 2824d65b2e35..2313e37e6eb5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -11,10 +11,10 @@
#define BNXT_H
#define DRV_MODULE_NAME "bnxt_en"
-#define DRV_MODULE_VERSION "1.2.0"
+#define DRV_MODULE_VERSION "1.3.0"
#define DRV_VER_MAJ 1
-#define DRV_VER_MIN 0
+#define DRV_VER_MIN 3
#define DRV_VER_UPD 0
struct tx_bd {
@@ -298,13 +298,14 @@ struct rx_tpa_start_cmp_ext {
#define RX_TPA_START_CMP_FLAGS2_L4_CS_CALC (0x1 << 1)
#define RX_TPA_START_CMP_FLAGS2_T_IP_CS_CALC (0x1 << 2)
#define RX_TPA_START_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3)
+ #define RX_TPA_START_CMP_FLAGS2_IP_TYPE (0x1 << 8)
__le32 rx_tpa_start_cmp_metadata;
__le32 rx_tpa_start_cmp_cfa_code_v2;
#define RX_TPA_START_CMP_V2 (0x1 << 0)
#define RX_TPA_START_CMP_CFA_CODE (0xffff << 16)
#define RX_TPA_START_CMPL_CFA_CODE_SHIFT 16
- __le32 rx_tpa_start_cmp_unused5;
+ __le32 rx_tpa_start_cmp_hdr_info;
};
struct rx_tpa_end_cmp {
@@ -358,7 +359,8 @@ struct rx_tpa_end_cmp {
RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO)
#define TPA_END_GRO_TS(rx_tpa_end) \
- ((rx_tpa_end)->rx_tpa_end_cmp_tsdelta & cpu_to_le32(RX_TPA_END_GRO_TS))
+ (!!((rx_tpa_end)->rx_tpa_end_cmp_tsdelta & \
+ cpu_to_le32(RX_TPA_END_GRO_TS)))
struct rx_tpa_end_cmp_ext {
__le32 rx_tpa_end_cmp_dup_acks;
@@ -584,6 +586,19 @@ struct bnxt_tpa_info {
u32 metadata;
enum pkt_hash_types hash_type;
u32 rss_hash;
+ u32 hdr_info;
+
+#define BNXT_TPA_L4_SIZE(hdr_info) \
+ (((hdr_info) & 0xf8000000) ? ((hdr_info) >> 27) : 32)
+
+#define BNXT_TPA_INNER_L3_OFF(hdr_info) \
+ (((hdr_info) >> 18) & 0x1ff)
+
+#define BNXT_TPA_INNER_L2_OFF(hdr_info) \
+ (((hdr_info) >> 9) & 0x1ff)
+
+#define BNXT_TPA_OUTER_L3_OFF(hdr_info) \
+ ((hdr_info) & 0x1ff)
};
struct bnxt_rx_ring_info {
@@ -739,8 +754,8 @@ struct bnxt_vf_info {
struct bnxt_pf_info {
#define BNXT_FIRST_PF_FID 1
#define BNXT_FIRST_VF_FID 128
- u32 fw_fid;
- u8 port_id;
+ u16 fw_fid;
+ u16 port_id;
u8 mac_addr[ETH_ALEN];
u16 max_rsscos_ctxs;
u16 max_cp_rings;
@@ -835,6 +850,7 @@ struct bnxt_link_info {
#define BNXT_LINK_SPEED_MSK_25GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB
#define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB
#define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB
+ u16 support_auto_speeds;
u16 lp_auto_link_speeds;
u16 force_link_speed;
u32 preemphasis;
@@ -873,6 +889,44 @@ struct bnxt {
void __iomem *bar2;
u32 reg_base;
+ u16 chip_num;
+#define CHIP_NUM_57301 0x16c8
+#define CHIP_NUM_57302 0x16c9
+#define CHIP_NUM_57304 0x16ca
+#define CHIP_NUM_57402 0x16d0
+#define CHIP_NUM_57404 0x16d1
+#define CHIP_NUM_57406 0x16d2
+
+#define CHIP_NUM_57311 0x16ce
+#define CHIP_NUM_57312 0x16cf
+#define CHIP_NUM_57314 0x16df
+#define CHIP_NUM_57412 0x16d6
+#define CHIP_NUM_57414 0x16d7
+#define CHIP_NUM_57416 0x16d8
+#define CHIP_NUM_57417 0x16d9
+
+#define BNXT_CHIP_NUM_5730X(chip_num) \
+ ((chip_num) >= CHIP_NUM_57301 && \
+ (chip_num) <= CHIP_NUM_57304)
+
+#define BNXT_CHIP_NUM_5740X(chip_num) \
+ ((chip_num) >= CHIP_NUM_57402 && \
+ (chip_num) <= CHIP_NUM_57406)
+
+#define BNXT_CHIP_NUM_5731X(chip_num) \
+ ((chip_num) == CHIP_NUM_57311 || \
+ (chip_num) == CHIP_NUM_57312 || \
+ (chip_num) == CHIP_NUM_57314)
+
+#define BNXT_CHIP_NUM_5741X(chip_num) \
+ ((chip_num) >= CHIP_NUM_57412 && \
+ (chip_num) <= CHIP_NUM_57417)
+
+#define BNXT_CHIP_NUM_57X0X(chip_num) \
+ (BNXT_CHIP_NUM_5730X(chip_num) || BNXT_CHIP_NUM_5740X(chip_num))
+
+#define BNXT_CHIP_NUM_57X1X(chip_num) \
+ (BNXT_CHIP_NUM_5731X(chip_num) || BNXT_CHIP_NUM_5741X(chip_num))
struct net_device *dev;
struct pci_dev *pdev;
@@ -907,12 +961,17 @@ struct bnxt {
#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))
#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF)
+#define BNXT_NPAR(bp) ((bp)->port_partition_type)
+#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp))
struct bnxt_napi **bnapi;
struct bnxt_rx_ring_info *rx_ring;
struct bnxt_tx_ring_info *tx_ring;
+ struct sk_buff * (*gro_func)(struct bnxt_tpa_info *, int, int,
+ struct sk_buff *);
+
u32 rx_buf_size;
u32 rx_buf_use_size; /* useable size */
u32 rx_ring_size;
@@ -959,6 +1018,7 @@ struct bnxt {
unsigned long state;
#define BNXT_STATE_OPEN 0
#define BNXT_STATE_IN_SP_TASK 1
+#define BNXT_STATE_FN_RST_DONE 2
struct bnxt_irq *irq_tbl;
u8 mac_addr[ETH_ALEN];
@@ -991,8 +1051,10 @@ struct bnxt {
__be16 vxlan_port;
u8 vxlan_port_cnt;
__le16 vxlan_fw_dst_port_id;
+ __be16 nge_port;
u8 nge_port_cnt;
__le16 nge_fw_dst_port_id;
+ u8 port_partition_type;
u16 rx_coal_ticks;
u16 rx_coal_ticks_irq;
@@ -1005,6 +1067,11 @@ struct bnxt {
#define BNXT_USEC_TO_COAL_TIMER(x) ((x) * 25 / 2)
+ u32 stats_coal_ticks;
+#define BNXT_DEF_STATS_COAL_TICKS 1000000
+#define BNXT_MIN_STATS_COAL_TICKS 250000
+#define BNXT_MAX_STATS_COAL_TICKS 1000000
+
struct work_struct sp_task;
unsigned long sp_event;
#define BNXT_RX_MASK_SP_EVENT 0
@@ -1018,6 +1085,9 @@ struct bnxt {
#define BNXT_HWRM_PF_UNLOAD_SP_EVENT 8
#define BNXT_PERIODIC_STATS_SP_EVENT 9
#define BNXT_HWRM_PORT_MODULE_SP_EVENT 10
+#define BNXT_RESET_TASK_SILENT_SP_EVENT 11
+#define BNXT_GENEVE_ADD_PORT_SP_EVENT 12
+#define BNXT_GENEVE_DEL_PORT_SP_EVENT 13
struct bnxt_pf_info pf;
#ifdef CONFIG_BNXT_SRIOV
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index a38cb047b540..0f7dd861ab4d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -56,6 +56,8 @@ static int bnxt_get_coalesce(struct net_device *dev,
coal->tx_coalesce_usecs_irq = bp->tx_coal_ticks_irq;
coal->tx_max_coalesced_frames_irq = bp->tx_coal_bufs_irq;
+ coal->stats_block_coalesce_usecs = bp->stats_coal_ticks;
+
return 0;
}
@@ -63,6 +65,7 @@ static int bnxt_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *coal)
{
struct bnxt *bp = netdev_priv(dev);
+ bool update_stats = false;
int rc = 0;
bp->rx_coal_ticks = coal->rx_coalesce_usecs;
@@ -76,8 +79,26 @@ static int bnxt_set_coalesce(struct net_device *dev,
bp->tx_coal_ticks_irq = coal->tx_coalesce_usecs_irq;
bp->tx_coal_bufs_irq = coal->tx_max_coalesced_frames_irq;
- if (netif_running(dev))
- rc = bnxt_hwrm_set_coal(bp);
+ if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) {
+ u32 stats_ticks = coal->stats_block_coalesce_usecs;
+
+ stats_ticks = clamp_t(u32, stats_ticks,
+ BNXT_MIN_STATS_COAL_TICKS,
+ BNXT_MAX_STATS_COAL_TICKS);
+ stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS);
+ bp->stats_coal_ticks = stats_ticks;
+ update_stats = true;
+ }
+
+ if (netif_running(dev)) {
+ if (update_stats) {
+ rc = bnxt_close_nic(bp, true, false);
+ if (!rc)
+ rc = bnxt_open_nic(bp, true, false);
+ } else {
+ rc = bnxt_hwrm_set_coal(bp);
+ }
+ }
return rc;
}
@@ -628,7 +649,66 @@ u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
return speed_mask;
}
-static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info)
+#define BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, name)\
+{ \
+ if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100MB) \
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+ 100baseT_Full); \
+ if ((fw_speeds) & BNXT_LINK_SPEED_MSK_1GB) \
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+ 1000baseT_Full); \
+ if ((fw_speeds) & BNXT_LINK_SPEED_MSK_10GB) \
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+ 10000baseT_Full); \
+ if ((fw_speeds) & BNXT_LINK_SPEED_MSK_25GB) \
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+ 25000baseCR_Full); \
+ if ((fw_speeds) & BNXT_LINK_SPEED_MSK_40GB) \
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+ 40000baseCR4_Full);\
+ if ((fw_speeds) & BNXT_LINK_SPEED_MSK_50GB) \
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+ 50000baseCR2_Full);\
+ if ((fw_pause) & BNXT_LINK_PAUSE_RX) { \
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+ Pause); \
+ if (!((fw_pause) & BNXT_LINK_PAUSE_TX)) \
+ ethtool_link_ksettings_add_link_mode( \
+ lk_ksettings, name, Asym_Pause);\
+ } else if ((fw_pause) & BNXT_LINK_PAUSE_TX) { \
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+ Asym_Pause); \
+ } \
+}
+
+#define BNXT_ETHTOOL_TO_FW_SPDS(fw_speeds, lk_ksettings, name) \
+{ \
+ if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
+ 100baseT_Full) || \
+ ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
+ 100baseT_Half)) \
+ (fw_speeds) |= BNXT_LINK_SPEED_MSK_100MB; \
+ if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
+ 1000baseT_Full) || \
+ ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
+ 1000baseT_Half)) \
+ (fw_speeds) |= BNXT_LINK_SPEED_MSK_1GB; \
+ if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
+ 10000baseT_Full)) \
+ (fw_speeds) |= BNXT_LINK_SPEED_MSK_10GB; \
+ if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
+ 25000baseCR_Full)) \
+ (fw_speeds) |= BNXT_LINK_SPEED_MSK_25GB; \
+ if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
+ 40000baseCR4_Full)) \
+ (fw_speeds) |= BNXT_LINK_SPEED_MSK_40GB; \
+ if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
+ 50000baseCR2_Full)) \
+ (fw_speeds) |= BNXT_LINK_SPEED_MSK_50GB; \
+}
+
+static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info,
+ struct ethtool_link_ksettings *lk_ksettings)
{
u16 fw_speeds = link_info->auto_link_speeds;
u8 fw_pause = 0;
@@ -636,10 +716,11 @@ static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info)
if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
fw_pause = link_info->auto_pause_setting;
- return _bnxt_fw_to_ethtool_adv_spds(fw_speeds, fw_pause);
+ BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, advertising);
}
-static u32 bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info)
+static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info,
+ struct ethtool_link_ksettings *lk_ksettings)
{
u16 fw_speeds = link_info->lp_auto_link_speeds;
u8 fw_pause = 0;
@@ -647,16 +728,24 @@ static u32 bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info)
if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
fw_pause = link_info->lp_pause;
- return _bnxt_fw_to_ethtool_adv_spds(fw_speeds, fw_pause);
+ BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings,
+ lp_advertising);
}
-static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info)
+static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info,
+ struct ethtool_link_ksettings *lk_ksettings)
{
u16 fw_speeds = link_info->support_speeds;
- u32 supported;
- supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
- return supported | SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported);
+
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, Pause);
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
+ Asym_Pause);
+
+ if (link_info->support_auto_speeds)
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
+ Autoneg);
}
u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
@@ -683,65 +772,62 @@ u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
}
}
-static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int bnxt_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *lk_ksettings)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info = &bp->link_info;
- u16 ethtool_speed;
-
- cmd->supported = bnxt_fw_to_ethtool_support_spds(link_info);
+ struct ethtool_link_settings *base = &lk_ksettings->base;
+ u32 ethtool_speed;
- if (link_info->auto_link_speeds)
- cmd->supported |= SUPPORTED_Autoneg;
+ ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported);
+ bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings);
+ ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
if (link_info->autoneg) {
- cmd->advertising =
- bnxt_fw_to_ethtool_advertised_spds(link_info);
- cmd->advertising |= ADVERTISED_Autoneg;
- cmd->autoneg = AUTONEG_ENABLE;
+ bnxt_fw_to_ethtool_advertised_spds(link_info, lk_ksettings);
+ ethtool_link_ksettings_add_link_mode(lk_ksettings,
+ advertising, Autoneg);
+ base->autoneg = AUTONEG_ENABLE;
if (link_info->phy_link_status == BNXT_LINK_LINK)
- cmd->lp_advertising =
- bnxt_fw_to_ethtool_lp_adv(link_info);
+ bnxt_fw_to_ethtool_lp_adv(link_info, lk_ksettings);
ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
if (!netif_carrier_ok(dev))
- cmd->duplex = DUPLEX_UNKNOWN;
+ base->duplex = DUPLEX_UNKNOWN;
else if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
- cmd->duplex = DUPLEX_FULL;
+ base->duplex = DUPLEX_FULL;
else
- cmd->duplex = DUPLEX_HALF;
+ base->duplex = DUPLEX_HALF;
} else {
- cmd->autoneg = AUTONEG_DISABLE;
- cmd->advertising = 0;
+ base->autoneg = AUTONEG_DISABLE;
ethtool_speed =
bnxt_fw_to_ethtool_speed(link_info->req_link_speed);
- cmd->duplex = DUPLEX_HALF;
+ base->duplex = DUPLEX_HALF;
if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL)
- cmd->duplex = DUPLEX_FULL;
+ base->duplex = DUPLEX_FULL;
}
- ethtool_cmd_speed_set(cmd, ethtool_speed);
+ base->speed = ethtool_speed;
- cmd->port = PORT_NONE;
+ base->port = PORT_NONE;
if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
- cmd->port = PORT_TP;
- cmd->supported |= SUPPORTED_TP;
- cmd->advertising |= ADVERTISED_TP;
+ base->port = PORT_TP;
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
+ TP);
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising,
+ TP);
} else {
- cmd->supported |= SUPPORTED_FIBRE;
- cmd->advertising |= ADVERTISED_FIBRE;
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
+ FIBRE);
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising,
+ FIBRE);
if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
- cmd->port = PORT_DA;
+ base->port = PORT_DA;
else if (link_info->media_type ==
PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE)
- cmd->port = PORT_FIBRE;
+ base->port = PORT_FIBRE;
}
-
- if (link_info->transceiver ==
- PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL)
- cmd->transceiver = XCVR_INTERNAL;
- else
- cmd->transceiver = XCVR_EXTERNAL;
- cmd->phy_address = link_info->phy_addr;
+ base->phy_address = link_info->phy_addr;
return 0;
}
@@ -815,37 +901,25 @@ u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
return fw_speed_mask;
}
-static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int bnxt_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *lk_ksettings)
{
- int rc = 0;
struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info = &bp->link_info;
+ const struct ethtool_link_settings *base = &lk_ksettings->base;
u32 speed, fw_advertising = 0;
bool set_pause = false;
+ int rc = 0;
- if (BNXT_VF(bp))
- return rc;
-
- if (cmd->autoneg == AUTONEG_ENABLE) {
- u32 supported_spds = bnxt_fw_to_ethtool_support_spds(link_info);
+ if (!BNXT_SINGLE_PF(bp))
+ return -EOPNOTSUPP;
- if (cmd->advertising & ~(supported_spds | ADVERTISED_Autoneg |
- ADVERTISED_TP | ADVERTISED_FIBRE)) {
- netdev_err(dev, "Unsupported advertising mask (adv: 0x%x)\n",
- cmd->advertising);
- rc = -EINVAL;
- goto set_setting_exit;
- }
- fw_advertising = bnxt_get_fw_auto_link_speeds(cmd->advertising);
- if (fw_advertising & ~link_info->support_speeds) {
- netdev_err(dev, "Advertising parameters are not supported! (adv: 0x%x)\n",
- cmd->advertising);
- rc = -EINVAL;
- goto set_setting_exit;
- }
+ if (base->autoneg == AUTONEG_ENABLE) {
+ BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings,
+ advertising);
link_info->autoneg |= BNXT_AUTONEG_SPEED;
if (!fw_advertising)
- link_info->advertising = link_info->support_speeds;
+ link_info->advertising = link_info->support_auto_speeds;
else
link_info->advertising = fw_advertising;
/* any change to autoneg will cause link change, therefore the
@@ -863,16 +937,12 @@ static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
rc = -EINVAL;
goto set_setting_exit;
}
- /* TODO: currently don't support half duplex */
- if (cmd->duplex == DUPLEX_HALF) {
+ if (base->duplex == DUPLEX_HALF) {
netdev_err(dev, "HALF DUPLEX is not supported!\n");
rc = -EINVAL;
goto set_setting_exit;
}
- /* If received a request for an unknown duplex, assume full*/
- if (cmd->duplex == DUPLEX_UNKNOWN)
- cmd->duplex = DUPLEX_FULL;
- speed = ethtool_cmd_speed(cmd);
+ speed = base->speed;
fw_speed = bnxt_get_fw_speed(dev, speed);
if (!fw_speed) {
rc = -EINVAL;
@@ -911,8 +981,8 @@ static int bnxt_set_pauseparam(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info = &bp->link_info;
- if (BNXT_VF(bp))
- return rc;
+ if (!BNXT_SINGLE_PF(bp))
+ return -EOPNOTSUPP;
if (epause->autoneg) {
if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
@@ -1010,6 +1080,8 @@ static int bnxt_firmware_reset(struct net_device *dev,
case BNX_DIR_TYPE_APE_FW:
case BNX_DIR_TYPE_APE_PATCH:
req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT;
+ /* Self-reset APE upon next PCIe reset: */
+ req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
break;
case BNX_DIR_TYPE_KONG_FW:
case BNX_DIR_TYPE_KONG_PATCH:
@@ -1043,9 +1115,27 @@ static int bnxt_flash_firmware(struct net_device *dev,
case BNX_DIR_TYPE_BOOTCODE_2:
code_type = CODE_BOOT;
break;
+ case BNX_DIR_TYPE_CHIMP_PATCH:
+ code_type = CODE_CHIMP_PATCH;
+ break;
case BNX_DIR_TYPE_APE_FW:
code_type = CODE_MCTP_PASSTHRU;
break;
+ case BNX_DIR_TYPE_APE_PATCH:
+ code_type = CODE_APE_PATCH;
+ break;
+ case BNX_DIR_TYPE_KONG_FW:
+ code_type = CODE_KONG_FW;
+ break;
+ case BNX_DIR_TYPE_KONG_PATCH:
+ code_type = CODE_KONG_PATCH;
+ break;
+ case BNX_DIR_TYPE_BONO_FW:
+ code_type = CODE_BONO_FW;
+ break;
+ case BNX_DIR_TYPE_BONO_PATCH:
+ code_type = CODE_BONO_PATCH;
+ break;
default:
netdev_err(dev, "Unsupported directory entry type: %u\n",
dir_type);
@@ -1100,6 +1190,8 @@ static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
case BNX_DIR_TYPE_APE_PATCH:
case BNX_DIR_TYPE_KONG_FW:
case BNX_DIR_TYPE_KONG_PATCH:
+ case BNX_DIR_TYPE_BONO_FW:
+ case BNX_DIR_TYPE_BONO_PATCH:
return true;
}
@@ -1137,7 +1229,8 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev,
const struct firmware *fw;
int rc;
- if (bnxt_dir_type_is_executable(dir_type) == false)
+ if (dir_type != BNX_DIR_TYPE_UPDATE &&
+ bnxt_dir_type_is_executable(dir_type) == false)
return -EINVAL;
rc = request_firmware(&fw, filename, &dev->dev);
@@ -1433,8 +1526,8 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
_bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
int rc = 0;
- if (BNXT_VF(bp))
- return 0;
+ if (!BNXT_SINGLE_PF(bp))
+ return -EOPNOTSUPP;
if (!(bp->flags & BNXT_FLAG_EEE_CAP))
return -EOPNOTSUPP;
@@ -1618,8 +1711,8 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
}
const struct ethtool_ops bnxt_ethtool_ops = {
- .get_settings = bnxt_get_settings,
- .set_settings = bnxt_set_settings,
+ .get_link_ksettings = bnxt_get_link_ksettings,
+ .set_link_ksettings = bnxt_set_link_ksettings,
.get_pauseparam = bnxt_get_pauseparam,
.set_pauseparam = bnxt_set_pauseparam,
.get_drvinfo = bnxt_get_drvinfo,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
index 461675caaacd..82bf44ab811b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
@@ -70,6 +70,7 @@ enum SUPPORTED_CODE {
CODE_KONG_PATCH, /* 18 - KONG Patch firmware */
CODE_BONO_FW, /* 19 - BONO firmware */
CODE_BONO_PATCH, /* 20 - BONO Patch firmware */
+ CODE_CHIMP_PATCH, /* 21 - ChiMP Patch firmware */
MAX_CODE_TYPE,
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 05e3c49a7677..517567f6d651 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -105,6 +105,7 @@ struct hwrm_async_event_cmpl {
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED (0x5UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE (0x6UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE (0x7UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0)
@@ -484,12 +485,12 @@ struct hwrm_async_event_cmpl_hwrm_error {
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
};
-/* HW Resource Manager Specification 1.2.2 */
+/* HW Resource Manager Specification 1.3.0 */
#define HWRM_VERSION_MAJOR 1
-#define HWRM_VERSION_MINOR 2
-#define HWRM_VERSION_UPDATE 2
+#define HWRM_VERSION_MINOR 3
+#define HWRM_VERSION_UPDATE 0
-#define HWRM_VERSION_STR "1.2.2"
+#define HWRM_VERSION_STR "1.3.0"
/*
* Following is the signature for HWRM message field that indicates not
* applicable (All F's). Need to cast it the size of the field if needed.
@@ -611,6 +612,9 @@ struct cmd_nums {
#define HWRM_FWD_RESP (0xd2UL)
#define HWRM_FWD_ASYNC_EVENT_CMPL (0xd3UL)
#define HWRM_TEMP_MONITOR_QUERY (0xe0UL)
+ #define HWRM_WOL_FILTER_ALLOC (0xf0UL)
+ #define HWRM_WOL_FILTER_FREE (0xf1UL)
+ #define HWRM_WOL_FILTER_QCFG (0xf2UL)
#define HWRM_DBG_READ_DIRECT (0xff10UL)
#define HWRM_DBG_READ_INDIRECT (0xff11UL)
#define HWRM_DBG_WRITE_DIRECT (0xff12UL)
@@ -1020,6 +1024,10 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
#define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
#define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL
u8 mac_address[6];
__le16 max_rsscos_ctx;
__le16 max_cmpl_rings;
@@ -1066,8 +1074,9 @@ struct hwrm_func_qcfg_output {
__le16 fid;
__le16 port_id;
__le16 vlan;
- u8 unused_0;
- u8 unused_1;
+ __le16 flags;
+ #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL
+ #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
@@ -1086,23 +1095,23 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 (0x3UL << 0)
#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 (0x4UL << 0)
#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN (0xffUL << 0)
- u8 unused_2;
+ u8 unused_0;
__le16 dflt_vnic_id;
- u8 unused_3;
- u8 unused_4;
+ u8 unused_1;
+ u8 unused_2;
__le32 min_bw;
__le32 max_bw;
u8 evb_mode;
#define FUNC_QCFG_RESP_EVB_MODE_NO_EVB (0x0UL << 0)
#define FUNC_QCFG_RESP_EVB_MODE_VEB (0x1UL << 0)
#define FUNC_QCFG_RESP_EVB_MODE_VEPA (0x2UL << 0)
- u8 unused_5;
- __le16 unused_6;
+ u8 unused_3;
+ __le16 unused_4;
__le32 alloc_mcast_filters;
__le32 alloc_hw_ring_grps;
+ u8 unused_5;
+ u8 unused_6;
u8 unused_7;
- u8 unused_8;
- u8 unused_9;
u8 valid;
};
@@ -1410,8 +1419,8 @@ struct hwrm_func_buf_rgtr_input {
#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K (0xcUL << 0)
#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K (0xdUL << 0)
#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K (0x10UL << 0)
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M (0x16UL << 0)
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M (0x17UL << 0)
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M (0x15UL << 0)
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M (0x16UL << 0)
#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G (0x1eUL << 0)
__le16 req_buf_len;
__le16 resp_buf_len;
@@ -1499,6 +1508,12 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
#define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
#define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
__le32 enables;
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
@@ -1815,13 +1830,22 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24)
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24)
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24)
- __le32 unused_1;
+ __le16 fec_cfg;
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
+ u8 unused_1;
+ u8 unused_2;
char phy_vendor_name[16];
char phy_vendor_partnumber[16];
- __le32 unused_2;
- u8 unused_3;
+ __le32 unused_3;
u8 unused_4;
u8 unused_5;
+ u8 unused_6;
u8 valid;
};
@@ -1842,6 +1866,8 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL
#define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL
#define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL
+ #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL
+ #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL
__le32 enables;
#define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
#define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
@@ -2127,6 +2153,7 @@ struct hwrm_port_phy_i2c_read_output {
u8 valid;
};
+/* hwrm_queue_qportcfg */
/* Input (24 bytes) */
struct hwrm_queue_qportcfg_input {
__le16 req_type;
@@ -2382,7 +2409,7 @@ struct hwrm_queue_cos2bw_cfg_input {
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP (0x0UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS (0x1UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
u8 queue_id0_pri_lvl;
u8 queue_id0_bw_weight;
u8 queue_id1;
@@ -2392,7 +2419,7 @@ struct hwrm_queue_cos2bw_cfg_input {
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP (0x0UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS (0x1UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
u8 queue_id1_pri_lvl;
u8 queue_id1_bw_weight;
u8 queue_id2;
@@ -2402,7 +2429,7 @@ struct hwrm_queue_cos2bw_cfg_input {
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP (0x0UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS (0x1UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
u8 queue_id2_pri_lvl;
u8 queue_id2_bw_weight;
u8 queue_id3;
@@ -2412,7 +2439,7 @@ struct hwrm_queue_cos2bw_cfg_input {
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP (0x0UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS (0x1UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
u8 queue_id3_pri_lvl;
u8 queue_id3_bw_weight;
u8 queue_id4;
@@ -2422,7 +2449,7 @@ struct hwrm_queue_cos2bw_cfg_input {
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP (0x0UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS (0x1UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
u8 queue_id4_pri_lvl;
u8 queue_id4_bw_weight;
u8 queue_id5;
@@ -2432,7 +2459,7 @@ struct hwrm_queue_cos2bw_cfg_input {
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP (0x0UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS (0x1UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
u8 queue_id5_pri_lvl;
u8 queue_id5_bw_weight;
u8 queue_id6;
@@ -2442,7 +2469,7 @@ struct hwrm_queue_cos2bw_cfg_input {
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP (0x0UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS (0x1UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
u8 queue_id6_pri_lvl;
u8 queue_id6_bw_weight;
u8 queue_id7;
@@ -2452,7 +2479,7 @@ struct hwrm_queue_cos2bw_cfg_input {
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP (0x0UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS (0x1UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
u8 queue_id7_pri_lvl;
u8 queue_id7_bw_weight;
u8 unused_1[5];
@@ -3150,7 +3177,7 @@ struct hwrm_cfa_l2_filter_cfg_output {
};
/* hwrm_cfa_l2_set_rx_mask */
-/* Input (40 bytes) */
+/* Input (56 bytes) */
struct hwrm_cfa_l2_set_rx_mask_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -3165,9 +3192,15 @@ struct hwrm_cfa_l2_set_rx_mask_input {
#define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL
#define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL
#define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY 0x40UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN 0x80UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN 0x100UL
__le64 mc_tbl_addr;
__le32 num_mc_entries;
__le32 unused_0;
+ __le64 vlan_tag_tbl_addr;
+ __le32 num_vlan_tags;
+ __le32 unused_1;
};
/* Output (16 bytes) */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
index 40a7b0e09612..73f2249555b5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
@@ -13,6 +13,7 @@
enum bnxt_nvm_directory_type {
BNX_DIR_TYPE_UNUSED = 0,
BNX_DIR_TYPE_PKG_LOG = 1,
+ BNX_DIR_TYPE_UPDATE = 2,
BNX_DIR_TYPE_CHIMP_PATCH = 3,
BNX_DIR_TYPE_BOOTCODE = 4,
BNX_DIR_TYPE_VPD = 5,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 363884dd9e8a..50d2007a2640 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -143,6 +143,9 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos)
u16 vlan_tag;
int rc;
+ if (bp->hwrm_spec_code < 0x10201)
+ return -ENOTSUPP;
+
rc = bnxt_vf_ndo_prep(bp, vf_id);
if (rc)
return rc;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 541456398dfb..76ed6df0fe53 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -450,34 +450,6 @@ static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
genet_dma_ring_regs[r]);
}
-static int bcmgenet_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
-{
- struct bcmgenet_priv *priv = netdev_priv(dev);
-
- if (!netif_running(dev))
- return -EINVAL;
-
- if (!priv->phydev)
- return -ENODEV;
-
- return phy_ethtool_gset(priv->phydev, cmd);
-}
-
-static int bcmgenet_set_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
-{
- struct bcmgenet_priv *priv = netdev_priv(dev);
-
- if (!netif_running(dev))
- return -EINVAL;
-
- if (!priv->phydev)
- return -ENODEV;
-
- return phy_ethtool_sset(priv->phydev, cmd);
-}
-
static int bcmgenet_set_rx_csum(struct net_device *dev,
netdev_features_t wanted)
{
@@ -941,7 +913,7 @@ static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
e->eee_active = p->eee_active;
e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
- return phy_ethtool_get_eee(priv->phydev, e);
+ return phy_ethtool_get_eee(dev->phydev, e);
}
static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
@@ -958,7 +930,7 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
if (!p->eee_enabled) {
bcmgenet_eee_enable_set(dev, false);
} else {
- ret = phy_init_eee(priv->phydev, 0);
+ ret = phy_init_eee(dev->phydev, 0);
if (ret) {
netif_err(priv, hw, dev, "EEE initialization failed\n");
return ret;
@@ -968,14 +940,12 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
bcmgenet_eee_enable_set(dev, true);
}
- return phy_ethtool_set_eee(priv->phydev, e);
+ return phy_ethtool_set_eee(dev->phydev, e);
}
static int bcmgenet_nway_reset(struct net_device *dev)
{
- struct bcmgenet_priv *priv = netdev_priv(dev);
-
- return genphy_restart_aneg(priv->phydev);
+ return genphy_restart_aneg(dev->phydev);
}
/* standard ethtool support functions. */
@@ -983,8 +953,6 @@ static struct ethtool_ops bcmgenet_ethtool_ops = {
.get_strings = bcmgenet_get_strings,
.get_sset_count = bcmgenet_get_sset_count,
.get_ethtool_stats = bcmgenet_get_ethtool_stats,
- .get_settings = bcmgenet_get_settings,
- .set_settings = bcmgenet_set_settings,
.get_drvinfo = bcmgenet_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_msglevel = bcmgenet_get_msglevel,
@@ -996,18 +964,21 @@ static struct ethtool_ops bcmgenet_ethtool_ops = {
.nway_reset = bcmgenet_nway_reset,
.get_coalesce = bcmgenet_get_coalesce,
.set_coalesce = bcmgenet_set_coalesce,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
/* Power down the unimac, based on mode. */
static int bcmgenet_power_down(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode)
{
+ struct net_device *ndev = priv->dev;
int ret = 0;
u32 reg;
switch (mode) {
case GENET_POWER_CABLE_SENSE:
- phy_detach(priv->phydev);
+ phy_detach(ndev->phydev);
break;
case GENET_POWER_WOL_MAGIC:
@@ -1068,7 +1039,6 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
/* ioctl handle special commands that are not present in ethtool. */
static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct bcmgenet_priv *priv = netdev_priv(dev);
int val = 0;
if (!netif_running(dev))
@@ -1078,10 +1048,10 @@ static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSMIIREG:
- if (!priv->phydev)
+ if (!dev->phydev)
val = -ENODEV;
else
- val = phy_mii_ioctl(priv->phydev, rq, cmd);
+ val = phy_mii_ioctl(dev->phydev, rq, cmd);
break;
default:
@@ -2464,6 +2434,7 @@ static void bcmgenet_irq_task(struct work_struct *work)
{
struct bcmgenet_priv *priv = container_of(
work, struct bcmgenet_priv, bcmgenet_irq_work);
+ struct net_device *ndev = priv->dev;
netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
@@ -2476,7 +2447,7 @@ static void bcmgenet_irq_task(struct work_struct *work)
/* Link UP/DOWN event */
if (priv->irq0_stat & UMAC_IRQ_LINK_EVENT) {
- phy_mac_interrupt(priv->phydev,
+ phy_mac_interrupt(ndev->phydev,
!!(priv->irq0_stat & UMAC_IRQ_LINK_UP));
priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT;
}
@@ -2838,7 +2809,7 @@ static void bcmgenet_netif_start(struct net_device *dev)
/* Monitor link interrupts now */
bcmgenet_link_intr_enable(priv);
- phy_start(priv->phydev);
+ phy_start(dev->phydev);
}
static int bcmgenet_open(struct net_device *dev)
@@ -2937,7 +2908,7 @@ static void bcmgenet_netif_stop(struct net_device *dev)
struct bcmgenet_priv *priv = netdev_priv(dev);
netif_tx_stop_all_queues(dev);
- phy_stop(priv->phydev);
+ phy_stop(dev->phydev);
bcmgenet_intr_disable(priv);
bcmgenet_disable_rx_napi(priv);
bcmgenet_disable_tx_napi(priv);
@@ -2963,7 +2934,7 @@ static int bcmgenet_close(struct net_device *dev)
bcmgenet_netif_stop(dev);
/* Really kill the PHY state machine and disconnect from it */
- phy_disconnect(priv->phydev);
+ phy_disconnect(dev->phydev);
/* Disable MAC receive */
umac_enable_set(priv, CMD_RX_EN, false);
@@ -3522,7 +3493,7 @@ static int bcmgenet_suspend(struct device *d)
bcmgenet_netif_stop(dev);
- phy_suspend(priv->phydev);
+ phy_suspend(dev->phydev);
netif_device_detach(dev);
@@ -3586,7 +3557,7 @@ static int bcmgenet_resume(struct device *d)
if (priv->wolopts)
clk_disable_unprepare(priv->clk_wol);
- phy_init_hw(priv->phydev);
+ phy_init_hw(dev->phydev);
/* Speed settings must be restored */
bcmgenet_mii_config(priv->dev);
@@ -3619,7 +3590,7 @@ static int bcmgenet_resume(struct device *d)
netif_device_attach(dev);
- phy_resume(priv->phydev);
+ phy_resume(dev->phydev);
if (priv->eee.eee_enabled)
bcmgenet_eee_enable_set(dev, true);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 1e2dc34d331a..0f0868c56f05 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -597,7 +597,6 @@ struct bcmgenet_priv {
/* MDIO bus variables */
wait_queue_head_t wq;
- struct phy_device *phydev;
bool internal_phy;
struct device_node *phy_dn;
struct device_node *mdio_dn;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 457c3bc8cfff..e907acd81da9 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -86,7 +86,7 @@ static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id,
void bcmgenet_mii_setup(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = dev->phydev;
u32 reg, cmd_bits = 0;
bool status_changed = false;
@@ -183,9 +183,9 @@ void bcmgenet_mii_reset(struct net_device *dev)
if (GENET_IS_V4(priv))
return;
- if (priv->phydev) {
- phy_init_hw(priv->phydev);
- phy_start_aneg(priv->phydev);
+ if (dev->phydev) {
+ phy_init_hw(dev->phydev);
+ phy_start_aneg(dev->phydev);
}
}
@@ -236,6 +236,7 @@ static void bcmgenet_internal_phy_setup(struct net_device *dev)
static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
{
+ struct net_device *ndev = priv->dev;
u32 reg;
/* Speed settings are set in bcmgenet_mii_setup() */
@@ -244,14 +245,14 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
- fixed_phy_set_link_update(priv->phydev,
+ fixed_phy_set_link_update(ndev->phydev,
bcmgenet_fixed_phy_link_update);
}
int bcmgenet_mii_config(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = dev->phydev;
struct device *kdev = &priv->pdev->dev;
const char *phy_name = NULL;
u32 id_mode_dis = 0;
@@ -302,7 +303,7 @@ int bcmgenet_mii_config(struct net_device *dev)
* capabilities, use that knowledge to also configure the
* Reverse MII interface correctly.
*/
- if ((priv->phydev->supported & PHY_BASIC_FEATURES) ==
+ if ((phydev->supported & PHY_BASIC_FEATURES) ==
PHY_BASIC_FEATURES)
port_ctrl = PORT_MODE_EXT_RVMII_25;
else
@@ -371,7 +372,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
return -ENODEV;
}
} else {
- phydev = priv->phydev;
+ phydev = dev->phydev;
phydev->dev_flags = phy_flags;
ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup,
@@ -382,8 +383,6 @@ int bcmgenet_mii_probe(struct net_device *dev)
}
}
- priv->phydev = phydev;
-
/* Configure port multiplexer based on what the probed PHY device since
* reading the 'max-speed' property determines the maximum supported
* PHY speed which is needed for bcmgenet_mii_config() to configure
@@ -391,7 +390,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
*/
ret = bcmgenet_mii_config(dev);
if (ret) {
- phy_disconnect(priv->phydev);
+ phy_disconnect(phydev);
return ret;
}
@@ -401,7 +400,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
* Ethernet MAC ISRs
*/
if (priv->internal_phy)
- priv->phydev->irq = PHY_IGNORE_INTERRUPT;
+ phydev->irq = PHY_IGNORE_INTERRUPT;
return 0;
}
@@ -606,7 +605,6 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
}
- priv->phydev = phydev;
priv->phy_interface = pd->phy_interface;
return 0;
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index cb07d95e3dd9..89c0cfa9719f 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -304,7 +304,7 @@ static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
static void macb_handle_link_change(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
- struct phy_device *phydev = bp->phy_dev;
+ struct phy_device *phydev = dev->phydev;
unsigned long flags;
int status_change = 0;
@@ -414,7 +414,6 @@ static int macb_mii_probe(struct net_device *dev)
bp->link = 0;
bp->speed = 0;
bp->duplex = -1;
- bp->phy_dev = phydev;
return 0;
}
@@ -1886,7 +1885,7 @@ static int macb_open(struct net_device *dev)
netif_carrier_off(dev);
/* if the phy is not yet register, retry later*/
- if (!bp->phy_dev)
+ if (!dev->phydev)
return -EAGAIN;
/* RX buffers initialization */
@@ -1905,7 +1904,7 @@ static int macb_open(struct net_device *dev)
macb_init_hw(bp);
/* schedule a link state check */
- phy_start(bp->phy_dev);
+ phy_start(dev->phydev);
netif_tx_start_all_queues(dev);
@@ -1920,8 +1919,8 @@ static int macb_close(struct net_device *dev)
netif_tx_stop_all_queues(dev);
napi_disable(&bp->napi);
- if (bp->phy_dev)
- phy_stop(bp->phy_dev);
+ if (dev->phydev)
+ phy_stop(dev->phydev);
spin_lock_irqsave(&bp->lock, flags);
macb_reset_hw(bp);
@@ -2092,28 +2091,6 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
return nstat;
}
-static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct macb *bp = netdev_priv(dev);
- struct phy_device *phydev = bp->phy_dev;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_ethtool_gset(phydev, cmd);
-}
-
-static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct macb *bp = netdev_priv(dev);
- struct phy_device *phydev = bp->phy_dev;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_ethtool_sset(phydev, cmd);
-}
-
static int macb_get_regs_len(struct net_device *netdev)
{
return MACB_GREGS_NBR * sizeof(u32);
@@ -2186,19 +2163,17 @@ static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
}
static const struct ethtool_ops macb_ethtool_ops = {
- .get_settings = macb_get_settings,
- .set_settings = macb_set_settings,
.get_regs_len = macb_get_regs_len,
.get_regs = macb_get_regs,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
.get_wol = macb_get_wol,
.set_wol = macb_set_wol,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static const struct ethtool_ops gem_ethtool_ops = {
- .get_settings = macb_get_settings,
- .set_settings = macb_set_settings,
.get_regs_len = macb_get_regs_len,
.get_regs = macb_get_regs,
.get_link = ethtool_op_get_link,
@@ -2206,12 +2181,13 @@ static const struct ethtool_ops gem_ethtool_ops = {
.get_ethtool_stats = gem_get_ethtool_stats,
.get_strings = gem_get_ethtool_strings,
.get_sset_count = gem_get_sset_count,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct macb *bp = netdev_priv(dev);
- struct phy_device *phydev = bp->phy_dev;
+ struct phy_device *phydev = dev->phydev;
if (!netif_running(dev))
return -EINVAL;
@@ -2570,7 +2546,7 @@ static int at91ether_open(struct net_device *dev)
MACB_BIT(HRESP));
/* schedule a link state check */
- phy_start(lp->phy_dev);
+ phy_start(dev->phydev);
netif_start_queue(dev);
@@ -3010,7 +2986,7 @@ static int macb_probe(struct platform_device *pdev)
if (err)
goto err_out_free_netdev;
- phydev = bp->phy_dev;
+ phydev = dev->phydev;
netif_carrier_off(dev);
@@ -3029,7 +3005,7 @@ static int macb_probe(struct platform_device *pdev)
return 0;
err_out_unregister_mdio:
- phy_disconnect(bp->phy_dev);
+ phy_disconnect(dev->phydev);
mdiobus_unregister(bp->mii_bus);
mdiobus_free(bp->mii_bus);
@@ -3057,8 +3033,8 @@ static int macb_remove(struct platform_device *pdev)
if (dev) {
bp = netdev_priv(dev);
- if (bp->phy_dev)
- phy_disconnect(bp->phy_dev);
+ if (dev->phydev)
+ phy_disconnect(dev->phydev);
mdiobus_unregister(bp->mii_bus);
mdiobus_free(bp->mii_bus);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 8a13824ef802..36893d8958d4 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -823,7 +823,6 @@ struct macb {
struct macb_or_gem_ops macbgem_ops;
struct mii_bus *mii_bus;
- struct phy_device *phy_dev;
int link;
int speed;
int duplex;
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
index 8ad7425f89bf..c03d37016a48 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
@@ -19,26 +19,16 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
-#include <linux/version.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
#include <linux/pci.h>
-#include <linux/kthread.h>
#include <linux/netdevice.h>
-#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
-#include "octeon_nic.h"
#include "octeon_main.h"
-#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
-#include "liquidio_image.h"
-#include "octeon_mem_ops.h"
int lio_cn6xxx_soft_reset(struct octeon_device *oct)
{
@@ -74,9 +64,9 @@ void lio_cn6xxx_enable_error_reporting(struct octeon_device *oct)
u32 val;
pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val);
- if (val & 0x000f0000) {
+ if (val & 0x000c0000) {
dev_err(&oct->pci_dev->dev, "PCI-E Link error detected: 0x%08x\n",
- val & 0x000f0000);
+ val & 0x000c0000);
}
val |= 0xf; /* Enable Link error reporting */
@@ -229,7 +219,7 @@ void lio_cn6xxx_setup_global_output_regs(struct octeon_device *oct)
/* / Select Packet count instead of bytes for SLI_PKTi_CNTS[CNT] */
octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_BMODE, 0);
- /* / Select ES,RO,NS setting from register for Output Queue Packet
+ /* Select ES, RO, NS setting from register for Output Queue Packet
* Address
*/
octeon_write_csr(oct, CN6XXX_SLI_PKT_DPADDR, 0xFFFFFFFF);
@@ -367,7 +357,8 @@ void lio_cn6xxx_enable_io_queues(struct octeon_device *oct)
void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
{
- u32 mask, i, loop = HZ;
+ int i;
+ u32 mask, loop = HZ;
u32 d32;
/* Reset the Enable bits for Input Queues. */
@@ -376,7 +367,7 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, mask);
/* Wait until hardware indicates that the queues are out of reset. */
- mask = oct->io_qmask.iq;
+ mask = (u32)oct->io_qmask.iq;
d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ);
while (((d32 & mask) != mask) && loop--) {
d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ);
@@ -384,8 +375,8 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
}
/* Reset the doorbell register for each Input queue. */
- for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
- if (!(oct->io_qmask.iq & (1UL << i)))
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.iq & (1ULL << i)))
continue;
octeon_write_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i), 0xFFFFFFFF);
d32 = octeon_read_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i));
@@ -398,7 +389,7 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
/* Wait until hardware indicates that the queues are out of reset. */
loop = HZ;
- mask = oct->io_qmask.oq;
+ mask = (u32)oct->io_qmask.oq;
d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ);
while (((d32 & mask) != mask) && loop--) {
d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ);
@@ -408,8 +399,8 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
/* Reset the doorbell register for each Output queue. */
/* for (i = 0; i < oct->num_oqs; i++) { */
- for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
- if (!(oct->io_qmask.oq & (1UL << i)))
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.oq & (1ULL << i)))
continue;
octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i), 0xFFFFFFFF);
d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i));
@@ -429,16 +420,16 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
void lio_cn6xxx_reinit_regs(struct octeon_device *oct)
{
- u32 i;
+ int i;
- for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
- if (!(oct->io_qmask.iq & (1UL << i)))
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.iq & (1ULL << i)))
continue;
oct->fn_list.setup_iq_regs(oct, i);
}
- for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
- if (!(oct->io_qmask.oq & (1UL << i)))
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.oq & (1ULL << i)))
continue;
oct->fn_list.setup_oq_regs(oct, i);
}
@@ -450,8 +441,8 @@ void lio_cn6xxx_reinit_regs(struct octeon_device *oct)
oct->fn_list.enable_io_queues(oct);
/* for (i = 0; i < oct->num_oqs; i++) { */
- for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
- if (!(oct->io_qmask.oq & (1UL << i)))
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.oq & (1ULL << i)))
continue;
writel(oct->droq[i]->max_count, oct->droq[i]->pkts_credit_reg);
}
@@ -495,8 +486,7 @@ u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx)
}
u32
-lio_cn6xxx_update_read_index(struct octeon_device *oct __attribute__((unused)),
- struct octeon_instr_queue *iq)
+lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq)
{
u32 new_idx = readl(iq->inst_cnt_reg);
@@ -547,17 +537,18 @@ static void lio_cn6xxx_get_pcie_qlmport(struct octeon_device *oct)
dev_dbg(&oct->pci_dev->dev, "Using PCIE Port %d\n", oct->pcie_port);
}
-void
+static void
lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64)
{
dev_err(&oct->pci_dev->dev, "Error Intr: 0x%016llx\n",
CVM_CAST64(intr64));
}
-int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct)
+static int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct)
{
struct octeon_droq *droq;
- u32 oq_no, pkt_count, droq_time_mask, droq_mask, droq_int_enb;
+ int oq_no;
+ u32 pkt_count, droq_time_mask, droq_mask, droq_int_enb;
u32 droq_cnt_enb, droq_cnt_mask;
droq_cnt_enb = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB);
@@ -573,12 +564,12 @@ int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct)
oct->droq_intr = 0;
/* for (oq_no = 0; oq_no < oct->num_oqs; oq_no++) { */
- for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES; oq_no++) {
- if (!(droq_mask & (1 << oq_no)))
+ for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct); oq_no++) {
+ if (!(droq_mask & (1ULL << oq_no)))
continue;
droq = oct->droq[oq_no];
- pkt_count = octeon_droq_check_hw_for_pkts(oct, droq);
+ pkt_count = octeon_droq_check_hw_for_pkts(droq);
if (pkt_count) {
oct->droq_intr |= (1ULL << oq_no);
if (droq->ops.poll_mode) {
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
index f77918779355..28c47224221a 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
@@ -82,8 +82,6 @@ void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no);
void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no);
void lio_cn6xxx_enable_io_queues(struct octeon_device *oct);
void lio_cn6xxx_disable_io_queues(struct octeon_device *oct);
-void lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64);
-int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct);
irqreturn_t lio_cn6xxx_process_interrupt_regs(void *dev);
void lio_cn6xxx_reinit_regs(struct octeon_device *oct);
void lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr,
@@ -91,8 +89,7 @@ void lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr,
void lio_cn6xxx_bar1_idx_write(struct octeon_device *oct, u32 idx, u32 mask);
u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx);
u32
-lio_cn6xxx_update_read_index(struct octeon_device *oct __attribute__((unused)),
- struct octeon_instr_queue *iq);
+lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq);
void lio_cn6xxx_enable_interrupt(void *chip);
void lio_cn6xxx_disable_interrupt(void *chip);
void cn6xxx_get_pcie_qlmport(struct octeon_device *oct);
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
index 8e830d0c0754..29755bc68f12 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
@@ -19,28 +19,17 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
-#include <linux/version.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
#include <linux/pci.h>
-#include <linux/kthread.h>
#include <linux/netdevice.h>
-#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
-#include "octeon_nic.h"
#include "octeon_main.h"
-#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
#include "cn68xx_regs.h"
-#include "cn68xx_device.h"
-#include "liquidio_image.h"
-#include "octeon_mem_ops.h"
static void lio_cn68xx_set_dpi_regs(struct octeon_device *oct)
{
@@ -129,7 +118,7 @@ static inline void lio_cn68xx_vendor_message_fix(struct octeon_device *oct)
pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_FLTMSK, val);
}
-int lio_is_210nv(struct octeon_device *oct)
+static int lio_is_210nv(struct octeon_device *oct)
{
u64 mio_qlm4_cfg = lio_pci_readq(oct, CN6XXX_MIO_QLM4_CFG);
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h
index d4e1c9fb0bf2..ea7bdcce6044 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h
@@ -28,6 +28,5 @@
#define __CN68XX_DEVICE_H__
int lio_setup_cn68xx_octeon_device(struct octeon_device *oct);
-int lio_is_210nv(struct octeon_device *oct);
#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h
index 38cddbd107b6..d45a0f4aaf1f 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h
@@ -29,7 +29,6 @@
#ifndef __CN68XX_REGS_H__
#define __CN68XX_REGS_H__
-#include "cn66xx_regs.h"
/*###################### REQUEST QUEUE #########################*/
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 245c063ed4db..289eb8907922 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -19,13 +19,9 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
-#include <linux/version.h>
#include <linux/netdevice.h>
#include <linux/net_tstamp.h>
-#include <linux/ethtool.h>
-#include <linux/dma-mapping.h>
#include <linux/pci.h>
-#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
@@ -36,9 +32,8 @@
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
-#include "cn68xx_regs.h"
-#include "cn68xx_device.h"
-#include "liquidio_image.h"
+
+static int octnet_get_link_stats(struct net_device *netdev);
struct oct_mdio_cmd_context {
int octeon_id;
@@ -71,34 +66,126 @@ enum {
INTERFACE_MODE_RXAUI,
INTERFACE_MODE_QSGMII,
INTERFACE_MODE_AGL,
+ INTERFACE_MODE_XLAUI,
+ INTERFACE_MODE_XFI,
+ INTERFACE_MODE_10G_KR,
+ INTERFACE_MODE_40G_KR4,
+ INTERFACE_MODE_MIXED,
};
#define ARRAY_LENGTH(a) (sizeof(a) / sizeof((a)[0]))
#define OCT_ETHTOOL_REGDUMP_LEN 4096
#define OCT_ETHTOOL_REGSVER 1
+/* statistics of PF */
+static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
+ "rx_packets",
+ "tx_packets",
+ "rx_bytes",
+ "tx_bytes",
+ "rx_errors", /*jabber_err+l2_err+frame_err */
+ "tx_errors", /*fw_err_pko+fw_err_link+fw_err_drop */
+ "rx_dropped", /*st->fromwire.total_rcvd - st->fromwire.fw_total_rcvd
+ *+st->fromwire.dmac_drop + st->fromwire.fw_err_drop
+ */
+ "tx_dropped",
+
+ "tx_total_sent",
+ "tx_total_fwd",
+ "tx_err_pko",
+ "tx_err_link",
+ "tx_err_drop",
+
+ "tx_tso",
+ "tx_tso_packets",
+ "tx_tso_err",
+ "tx_vxlan",
+
+ "mac_tx_total_pkts",
+ "mac_tx_total_bytes",
+ "mac_tx_mcast_pkts",
+ "mac_tx_bcast_pkts",
+ "mac_tx_ctl_packets", /*oct->link_stats.fromhost.ctl_sent */
+ "mac_tx_total_collisions",
+ "mac_tx_one_collision",
+ "mac_tx_multi_collison",
+ "mac_tx_max_collision_fail",
+ "mac_tx_max_deferal_fail",
+ "mac_tx_fifo_err",
+ "mac_tx_runts",
+
+ "rx_total_rcvd",
+ "rx_total_fwd",
+ "rx_jabber_err",
+ "rx_l2_err",
+ "rx_frame_err",
+ "rx_err_pko",
+ "rx_err_link",
+ "rx_err_drop",
+
+ "rx_vxlan",
+ "rx_vxlan_err",
+
+ "rx_lro_pkts",
+ "rx_lro_bytes",
+ "rx_total_lro",
+
+ "rx_lro_aborts",
+ "rx_lro_aborts_port",
+ "rx_lro_aborts_seq",
+ "rx_lro_aborts_tsval",
+ "rx_lro_aborts_timer",
+ "rx_fwd_rate",
+
+ "mac_rx_total_rcvd",
+ "mac_rx_bytes",
+ "mac_rx_total_bcst",
+ "mac_rx_total_mcst",
+ "mac_rx_runts",
+ "mac_rx_ctl_packets",
+ "mac_rx_fifo_err",
+ "mac_rx_dma_drop",
+ "mac_rx_fcs_err",
+
+ "link_state_changes",
+};
+
+/* statistics of host tx queue */
static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = {
- "Instr posted",
- "Instr processed",
- "Instr dropped",
- "Bytes Sent",
- "Sgentry_sent",
- "Inst cntreg",
- "Tx done",
- "Tx Iq busy",
- "Tx dropped",
- "Tx bytes",
+ "packets", /*oct->instr_queue[iq_no]->stats.tx_done*/
+ "bytes", /*oct->instr_queue[iq_no]->stats.tx_tot_bytes*/
+ "dropped",
+ "iq_busy",
+ "sgentry_sent",
+
+ "fw_instr_posted",
+ "fw_instr_processed",
+ "fw_instr_dropped",
+ "fw_bytes_sent",
+
+ "tso",
+ "vxlan",
+ "txq_restart",
};
+/* statistics of host rx queue */
static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = {
- "OQ Pkts Received",
- "OQ Bytes Received",
- "Dropped no dispatch",
- "Dropped nomem",
- "Dropped toomany",
- "Stack RX cnt",
- "Stack RX Bytes",
- "RX dropped",
+ "packets", /*oct->droq[oq_no]->stats.rx_pkts_received */
+ "bytes", /*oct->droq[oq_no]->stats.rx_bytes_received */
+ "dropped", /*oct->droq[oq_no]->stats.rx_dropped+
+ *oct->droq[oq_no]->stats.dropped_nodispatch+
+ *oct->droq[oq_no]->stats.dropped_toomany+
+ *oct->droq[oq_no]->stats.dropped_nomem
+ */
+ "dropped_nomem",
+ "dropped_toomany",
+ "fw_dropped",
+ "fw_pkts_received",
+ "fw_bytes_received",
+ "fw_dropped_nodispatch",
+
+ "vxlan",
+ "buffer_alloc_failure",
};
#define OCTNIC_NCMD_AUTONEG_ON 0x1
@@ -112,8 +199,9 @@ static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
linfo = &lio->linfo;
- if (linfo->link.s.interface == INTERFACE_MODE_XAUI ||
- linfo->link.s.interface == INTERFACE_MODE_RXAUI) {
+ if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
+ linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
+ linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
ecmd->port = PORT_FIBRE;
ecmd->supported =
(SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE |
@@ -124,10 +212,11 @@ static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ecmd->autoneg = AUTONEG_DISABLE;
} else {
- dev_err(&oct->pci_dev->dev, "Unknown link interface reported\n");
+ dev_err(&oct->pci_dev->dev, "Unknown link interface reported %d\n",
+ linfo->link.s.if_mode);
}
- if (linfo->link.s.status) {
+ if (linfo->link.s.link_up) {
ethtool_cmd_speed_set(ecmd, linfo->link.s.speed);
ecmd->duplex = linfo->link.s.duplex;
} else {
@@ -222,23 +311,20 @@ static int octnet_gpio_access(struct net_device *netdev, int addr, int val)
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
struct octnic_ctrl_pkt nctrl;
- struct octnic_ctrl_params nparams;
int ret = 0;
memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
nctrl.ncmd.u64 = 0;
nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS;
- nctrl.ncmd.s.param1 = lio->linfo.ifidx;
- nctrl.ncmd.s.param2 = addr;
- nctrl.ncmd.s.param3 = val;
+ nctrl.ncmd.s.param1 = addr;
+ nctrl.ncmd.s.param2 = val;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.wait_time = 100;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
- nparams.resp_order = OCTEON_RESP_ORDERED;
-
- ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
return -EINVAL;
@@ -253,20 +339,18 @@ static void octnet_mdio_resp_callback(struct octeon_device *oct,
u32 status,
void *buf)
{
- struct oct_mdio_cmd_resp *mdio_cmd_rsp;
struct oct_mdio_cmd_context *mdio_cmd_ctx;
struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
- mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
oct = lio_get_device(mdio_cmd_ctx->octeon_id);
if (status) {
dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n",
CVM_CAST64(status));
- ACCESS_ONCE(mdio_cmd_ctx->cond) = -1;
+ WRITE_ONCE(mdio_cmd_ctx->cond, -1);
} else {
- ACCESS_ONCE(mdio_cmd_ctx->cond) = 1;
+ WRITE_ONCE(mdio_cmd_ctx->cond, 1);
}
wake_up_interruptible(&mdio_cmd_ctx->wc);
}
@@ -297,15 +381,16 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr;
- ACCESS_ONCE(mdio_cmd_ctx->cond) = 0;
+ WRITE_ONCE(mdio_cmd_ctx->cond, 0);
mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev);
mdio_cmd->op = op;
mdio_cmd->mdio_addr = loc;
if (op)
mdio_cmd->value1 = *value;
- mdio_cmd->value2 = lio->linfo.ifidx;
octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8);
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45,
0, 0, 0);
@@ -317,7 +402,7 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
retval = octeon_send_soft_command(oct_dev, sc);
- if (retval) {
+ if (retval == IQ_SEND_FAILED) {
dev_err(&oct_dev->pci_dev->dev,
"octnet_mdio45_access instruction failed status: %x\n",
retval);
@@ -335,7 +420,7 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp),
sizeof(struct oct_mdio_cmd) / 8);
- if (ACCESS_ONCE(mdio_cmd_ctx->cond) == 1) {
+ if (READ_ONCE(mdio_cmd_ctx->cond) == 1) {
if (!op)
*value = mdio_cmd_rsp->resp.value1;
} else {
@@ -379,18 +464,16 @@ static int lio_set_phys_id(struct net_device *netdev,
/* Configure Beacon values */
value = LIO68XX_LED_BEACON_CFGON;
- ret =
- octnet_mdio45_access(lio, 1,
- LIO68XX_LED_BEACON_ADDR,
- &value);
+ ret = octnet_mdio45_access(lio, 1,
+ LIO68XX_LED_BEACON_ADDR,
+ &value);
if (ret)
return ret;
value = LIO68XX_LED_CTRL_CFGON;
- ret =
- octnet_mdio45_access(lio, 1,
- LIO68XX_LED_CTRL_ADDR,
- &value);
+ ret = octnet_mdio45_access(lio, 1,
+ LIO68XX_LED_CTRL_ADDR,
+ &value);
if (ret)
return ret;
} else {
@@ -469,7 +552,7 @@ lio_ethtool_get_ringparam(struct net_device *netdev,
tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
}
- if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE) {
+ if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE - OCTNET_FRM_HEADER_SIZE) {
ering->rx_pending = 0;
ering->rx_max_pending = 0;
ering->rx_mini_pending = 0;
@@ -503,10 +586,10 @@ static void lio_set_msglevel(struct net_device *netdev, u32 msglvl)
if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) {
if (msglvl & NETIF_MSG_HW)
liquidio_set_feature(netdev,
- OCTNET_CMD_VERBOSE_ENABLE);
+ OCTNET_CMD_VERBOSE_ENABLE, 0);
else
liquidio_set_feature(netdev,
- OCTNET_CMD_VERBOSE_DISABLE);
+ OCTNET_CMD_VERBOSE_DISABLE, 0);
}
lio->msg_enable = msglvl;
@@ -518,61 +601,279 @@ lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
/* Notes: Not supporting any auto negotiation in these
* drivers. Just report pause frame support.
*/
- pause->tx_pause = 1;
- pause->rx_pause = 1; /* TODO: Need to support RX pause frame!!. */
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ pause->autoneg = 0;
+
+ pause->tx_pause = oct->tx_pause;
+ pause->rx_pause = oct->rx_pause;
}
static void
lio_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, u64 *data)
+ struct ethtool_stats *stats __attribute__((unused)),
+ u64 *data)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct_dev = lio->oct_dev;
+ struct net_device_stats *netstats = &netdev->stats;
int i = 0, j;
- for (j = 0; j < MAX_OCTEON_INSTR_QUEUES; j++) {
- if (!(oct_dev->io_qmask.iq & (1UL << j)))
+ netdev->netdev_ops->ndo_get_stats(netdev);
+ octnet_get_link_stats(netdev);
+
+ /*sum of oct->droq[oq_no]->stats->rx_pkts_received */
+ data[i++] = CVM_CAST64(netstats->rx_packets);
+ /*sum of oct->instr_queue[iq_no]->stats.tx_done */
+ data[i++] = CVM_CAST64(netstats->tx_packets);
+ /*sum of oct->droq[oq_no]->stats->rx_bytes_received */
+ data[i++] = CVM_CAST64(netstats->rx_bytes);
+ /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
+ data[i++] = CVM_CAST64(netstats->tx_bytes);
+ data[i++] = CVM_CAST64(netstats->rx_errors);
+ data[i++] = CVM_CAST64(netstats->tx_errors);
+ /*sum of oct->droq[oq_no]->stats->rx_dropped +
+ *oct->droq[oq_no]->stats->dropped_nodispatch +
+ *oct->droq[oq_no]->stats->dropped_toomany +
+ *oct->droq[oq_no]->stats->dropped_nomem
+ */
+ data[i++] = CVM_CAST64(netstats->rx_dropped);
+ /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */
+ data[i++] = CVM_CAST64(netstats->tx_dropped);
+
+ /*data[i++] = CVM_CAST64(stats->multicast); */
+ /*data[i++] = CVM_CAST64(stats->collisions); */
+
+ /* firmware tx stats */
+ /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx].
+ *fromhost.fw_total_sent
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_sent);
+ /*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd);
+ /*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko);
+ /*per_core_stats[j].link_stats[i].fromhost.fw_err_link */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
+ *fw_err_drop
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_drop);
+
+ /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
+ *fw_tso_fwd
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso_fwd);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
+ *fw_err_tso
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
+ *fw_tx_vxlan
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan);
+
+ /* mac tx statistics */
+ /*CVMX_BGXX_CMRX_TX_STAT5 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent);
+ /*CVMX_BGXX_CMRX_TX_STAT4 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_bytes_sent);
+ /*CVMX_BGXX_CMRX_TX_STAT15 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.mcast_pkts_sent);
+ /*CVMX_BGXX_CMRX_TX_STAT14 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.bcast_pkts_sent);
+ /*CVMX_BGXX_CMRX_TX_STAT17 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.ctl_sent);
+ /*CVMX_BGXX_CMRX_TX_STAT0 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_collisions);
+ /*CVMX_BGXX_CMRX_TX_STAT3 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.one_collision_sent);
+ /*CVMX_BGXX_CMRX_TX_STAT2 */
+ data[i++] =
+ CVM_CAST64(oct_dev->link_stats.fromhost.multi_collision_sent);
+ /*CVMX_BGXX_CMRX_TX_STAT0 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_collision_fail);
+ /*CVMX_BGXX_CMRX_TX_STAT1 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_deferral_fail);
+ /*CVMX_BGXX_CMRX_TX_STAT16 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fifo_err);
+ /*CVMX_BGXX_CMRX_TX_STAT6 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.runts);
+
+ /* RX firmware stats */
+ /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
+ *fw_total_rcvd
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_rcvd);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
+ *fw_total_fwd
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd);
+ /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err);
+ /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.l2_err);
+ /*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.frame_err);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
+ *fw_err_pko
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_pko);
+ /*per_core_stats[j].link_stats[i].fromwire.fw_err_link */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_link);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
+ *fromwire.fw_err_drop
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop);
+
+ /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
+ *fromwire.fw_rx_vxlan
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
+ *fromwire.fw_rx_vxlan_err
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan_err);
+
+ /* LRO */
+ /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
+ *fw_lro_pkts
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_pkts);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
+ *fw_lro_octs
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_octs);
+ /*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_lro);
+ /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
+ *fw_lro_aborts_port
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_port);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
+ *fw_lro_aborts_seq
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_seq);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
+ *fw_lro_aborts_tsval
+ */
+ data[i++] =
+ CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_tsval);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
+ *fw_lro_aborts_timer
+ */
+ /* intrmod: packet forward rate */
+ data[i++] =
+ CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_timer);
+ /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fwd_rate);
+
+ /* mac: link-level stats */
+ /*CVMX_BGXX_CMRX_RX_STAT0 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_rcvd);
+ /*CVMX_BGXX_CMRX_RX_STAT1 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.bytes_rcvd);
+ /*CVMX_PKI_STATX_STAT5 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_bcst);
+ /*CVMX_PKI_STATX_STAT5 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_mcst);
+ /*wqe->word2.err_code or wqe->word2.err_level */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.runts);
+ /*CVMX_BGXX_CMRX_RX_STAT2 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.ctl_rcvd);
+ /*CVMX_BGXX_CMRX_RX_STAT6 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fifo_err);
+ /*CVMX_BGXX_CMRX_RX_STAT4 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.dmac_drop);
+ /*wqe->word2.err_code or wqe->word2.err_level */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fcs_err);
+ /*lio->link_changes*/
+ data[i++] = CVM_CAST64(lio->link_changes);
+
+ /* TX -- lio_update_stats(lio); */
+ for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) {
+ if (!(oct_dev->io_qmask.iq & (1ULL << j)))
continue;
+ /*packets to network port*/
+ /*# of packets tx to network */
+ data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
+ /*# of bytes tx to network */
data[i++] =
- CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted);
- data[i++] =
- CVM_CAST64(
- oct_dev->instr_queue[j]->stats.instr_processed);
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes);
+ /*# of packets dropped */
data[i++] =
- CVM_CAST64(
- oct_dev->instr_queue[j]->stats.instr_dropped);
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped);
+ /*# of tx fails due to queue full */
data[i++] =
- CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent);
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy);
+ /*XXX gather entries sent */
data[i++] =
CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent);
+
+ /*instruction to firmware: data and control */
+ /*# of instructions to the queue */
data[i++] =
- readl(oct_dev->instr_queue[j]->inst_cnt_reg);
- data[i++] =
- CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
- data[i++] =
- CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy);
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted);
+ /*# of instructions processed */
+ data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->
+ stats.instr_processed);
+ /*# of instructions could not be processed */
+ data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->
+ stats.instr_dropped);
+ /*bytes sent through the queue */
data[i++] =
- CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped);
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent);
+
+ /*tso request*/
+ data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
+ /*vxlan request*/
+ data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
+ /*txq restart*/
data[i++] =
- CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes);
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart);
}
- /* for (j = 0; j < oct_dev->num_oqs; j++){ */
- for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES; j++) {
- if (!(oct_dev->io_qmask.oq & (1UL << j)))
+ /* RX */
+ /* for (j = 0; j < oct_dev->num_oqs; j++) { */
+ for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) {
+ if (!(oct_dev->io_qmask.oq & (1ULL << j)))
continue;
- data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
- data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
- data[i++] =
- CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
- data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
- data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
+
+ /*packets send to TCP/IP network stack */
+ /*# of packets to network stack */
data[i++] =
CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received);
+ /*# of bytes to network stack */
data[i++] =
CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received);
+ /*# of packets dropped */
+ data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
+ oct_dev->droq[j]->stats.dropped_toomany +
+ oct_dev->droq[j]->stats.rx_dropped);
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
data[i++] =
CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
+
+ /*control and data path*/
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
+
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
}
}
@@ -581,26 +882,43 @@ static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct_dev = lio->oct_dev;
int num_iq_stats, num_oq_stats, i, j;
+ int num_stats;
- num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
- for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
- if (!(oct_dev->io_qmask.iq & (1UL << i)))
- continue;
- for (j = 0; j < num_iq_stats; j++) {
- sprintf(data, "IQ%d %s", i, oct_iq_stats_strings[j]);
+ switch (stringset) {
+ case ETH_SS_STATS:
+ num_stats = ARRAY_SIZE(oct_stats_strings);
+ for (j = 0; j < num_stats; j++) {
+ sprintf(data, "%s", oct_stats_strings[j]);
data += ETH_GSTRING_LEN;
}
- }
- num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
- /* for (i = 0; i < oct_dev->num_oqs; i++) { */
- for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
- if (!(oct_dev->io_qmask.oq & (1UL << i)))
- continue;
- for (j = 0; j < num_oq_stats; j++) {
- sprintf(data, "OQ%d %s", i, oct_droq_stats_strings[j]);
- data += ETH_GSTRING_LEN;
+ num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
+ if (!(oct_dev->io_qmask.iq & (1ULL << i)))
+ continue;
+ for (j = 0; j < num_iq_stats; j++) {
+ sprintf(data, "tx-%d-%s", i,
+ oct_iq_stats_strings[j]);
+ data += ETH_GSTRING_LEN;
+ }
}
+
+ num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
+ /* for (i = 0; i < oct_dev->num_oqs; i++) { */
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
+ if (!(oct_dev->io_qmask.oq & (1ULL << i)))
+ continue;
+ for (j = 0; j < num_oq_stats; j++) {
+ sprintf(data, "rx-%d-%s", i,
+ oct_droq_stats_strings[j]);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+ break;
+
+ default:
+ netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
+ break;
}
}
@@ -609,8 +927,14 @@ static int lio_get_sset_count(struct net_device *netdev, int sset)
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct_dev = lio->oct_dev;
- return (ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs) +
- (ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
+ switch (sset) {
+ case ETH_SS_STATS:
+ return (ARRAY_SIZE(oct_stats_strings) +
+ ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
+ ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
+ default:
+ return -EOPNOTSUPP;
+ }
}
static int lio_get_intr_coalesce(struct net_device *netdev,
@@ -618,50 +942,49 @@ static int lio_get_intr_coalesce(struct net_device *netdev,
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
- struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
struct octeon_instr_queue *iq;
struct oct_intrmod_cfg *intrmod_cfg;
intrmod_cfg = &oct->intrmod;
switch (oct->chip_id) {
- /* case OCTEON_CN73XX: Todo */
- /* break; */
case OCTEON_CN68XX:
- case OCTEON_CN66XX:
- if (!intrmod_cfg->intrmod_enable) {
+ case OCTEON_CN66XX: {
+ struct octeon_cn6xxx *cn6xxx =
+ (struct octeon_cn6xxx *)oct->chip;
+
+ if (!intrmod_cfg->rx_enable) {
intr_coal->rx_coalesce_usecs =
CFG_GET_OQ_INTR_TIME(cn6xxx->conf);
intr_coal->rx_max_coalesced_frames =
CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
- } else {
- intr_coal->use_adaptive_rx_coalesce =
- intrmod_cfg->intrmod_enable;
- intr_coal->rate_sample_interval =
- intrmod_cfg->intrmod_check_intrvl;
- intr_coal->pkt_rate_high =
- intrmod_cfg->intrmod_maxpkt_ratethr;
- intr_coal->pkt_rate_low =
- intrmod_cfg->intrmod_minpkt_ratethr;
- intr_coal->rx_max_coalesced_frames_high =
- intrmod_cfg->intrmod_maxcnt_trigger;
- intr_coal->rx_coalesce_usecs_high =
- intrmod_cfg->intrmod_maxtmr_trigger;
- intr_coal->rx_coalesce_usecs_low =
- intrmod_cfg->intrmod_mintmr_trigger;
- intr_coal->rx_max_coalesced_frames_low =
- intrmod_cfg->intrmod_mincnt_trigger;
}
-
- iq = oct->instr_queue[lio->linfo.txpciq[0]];
+ iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no];
intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
break;
-
+ }
default:
netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
return -EINVAL;
}
-
+ if (intrmod_cfg->rx_enable) {
+ intr_coal->use_adaptive_rx_coalesce =
+ intrmod_cfg->rx_enable;
+ intr_coal->rate_sample_interval =
+ intrmod_cfg->check_intrvl;
+ intr_coal->pkt_rate_high =
+ intrmod_cfg->maxpkt_ratethr;
+ intr_coal->pkt_rate_low =
+ intrmod_cfg->minpkt_ratethr;
+ intr_coal->rx_max_coalesced_frames_high =
+ intrmod_cfg->rx_maxcnt_trigger;
+ intr_coal->rx_coalesce_usecs_high =
+ intrmod_cfg->rx_maxtmr_trigger;
+ intr_coal->rx_coalesce_usecs_low =
+ intrmod_cfg->rx_mintmr_trigger;
+ intr_coal->rx_max_coalesced_frames_low =
+ intrmod_cfg->rx_mincnt_trigger;
+ }
return 0;
}
@@ -681,19 +1004,20 @@ static void octnet_intrmod_callback(struct octeon_device *oct_dev,
else
dev_info(&oct_dev->pci_dev->dev,
"Rx-Adaptive Interrupt moderation enabled:%llx\n",
- oct_dev->intrmod.intrmod_enable);
+ oct_dev->intrmod.rx_enable);
octeon_free_soft_command(oct_dev, sc);
}
/* Configure interrupt moderation parameters */
-static int octnet_set_intrmod_cfg(void *oct, struct oct_intrmod_cfg *intr_cfg)
+static int octnet_set_intrmod_cfg(struct lio *lio,
+ struct oct_intrmod_cfg *intr_cfg)
{
struct octeon_soft_command *sc;
struct oct_intrmod_cmd *cmd;
struct oct_intrmod_cfg *cfg;
int retval;
- struct octeon_device *oct_dev = (struct octeon_device *)oct;
+ struct octeon_device *oct_dev = lio->oct_dev;
/* Alloc soft command */
sc = (struct octeon_soft_command *)
@@ -714,6 +1038,8 @@ static int octnet_set_intrmod_cfg(void *oct, struct oct_intrmod_cfg *intr_cfg)
cmd->cfg = cfg;
cmd->oct_dev = oct_dev;
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
OPCODE_NIC_INTRMOD_CFG, 0, 0, 0);
@@ -722,17 +1048,171 @@ static int octnet_set_intrmod_cfg(void *oct, struct oct_intrmod_cfg *intr_cfg)
sc->wait_time = 1000;
retval = octeon_send_soft_command(oct_dev, sc);
- if (retval) {
+ if (retval == IQ_SEND_FAILED) {
+ octeon_free_soft_command(oct_dev, sc);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+octnet_nic_stats_callback(struct octeon_device *oct_dev,
+ u32 status, void *ptr)
+{
+ struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
+ struct oct_nic_stats_resp *resp = (struct oct_nic_stats_resp *)
+ sc->virtrptr;
+ struct oct_nic_stats_ctrl *ctrl = (struct oct_nic_stats_ctrl *)
+ sc->ctxptr;
+ struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
+ struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
+
+ struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
+ struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost;
+
+ if ((status != OCTEON_REQUEST_TIMEOUT) && !resp->status) {
+ octeon_swap_8B_data((u64 *)&resp->stats,
+ (sizeof(struct oct_link_stats)) >> 3);
+
+ /* RX link-level stats */
+ rstats->total_rcvd = rsp_rstats->total_rcvd;
+ rstats->bytes_rcvd = rsp_rstats->bytes_rcvd;
+ rstats->total_bcst = rsp_rstats->total_bcst;
+ rstats->total_mcst = rsp_rstats->total_mcst;
+ rstats->runts = rsp_rstats->runts;
+ rstats->ctl_rcvd = rsp_rstats->ctl_rcvd;
+ /* Accounts for over/under-run of buffers */
+ rstats->fifo_err = rsp_rstats->fifo_err;
+ rstats->dmac_drop = rsp_rstats->dmac_drop;
+ rstats->fcs_err = rsp_rstats->fcs_err;
+ rstats->jabber_err = rsp_rstats->jabber_err;
+ rstats->l2_err = rsp_rstats->l2_err;
+ rstats->frame_err = rsp_rstats->frame_err;
+
+ /* RX firmware stats */
+ rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd;
+ rstats->fw_total_fwd = rsp_rstats->fw_total_fwd;
+ rstats->fw_err_pko = rsp_rstats->fw_err_pko;
+ rstats->fw_err_link = rsp_rstats->fw_err_link;
+ rstats->fw_err_drop = rsp_rstats->fw_err_drop;
+ rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan;
+ rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err;
+
+ /* Number of packets that are LROed */
+ rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
+ /* Number of octets that are LROed */
+ rstats->fw_lro_octs = rsp_rstats->fw_lro_octs;
+ /* Number of LRO packets formed */
+ rstats->fw_total_lro = rsp_rstats->fw_total_lro;
+ /* Number of times lRO of packet aborted */
+ rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
+ rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
+ rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
+ rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
+ rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer;
+ /* intrmod: packet forward rate */
+ rstats->fwd_rate = rsp_rstats->fwd_rate;
+
+ /* TX link-level stats */
+ tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
+ tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
+ tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
+ tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
+ tstats->ctl_sent = rsp_tstats->ctl_sent;
+ /* Packets sent after one collision*/
+ tstats->one_collision_sent = rsp_tstats->one_collision_sent;
+ /* Packets sent after multiple collision*/
+ tstats->multi_collision_sent = rsp_tstats->multi_collision_sent;
+ /* Packets not sent due to max collisions */
+ tstats->max_collision_fail = rsp_tstats->max_collision_fail;
+ /* Packets not sent due to max deferrals */
+ tstats->max_deferral_fail = rsp_tstats->max_deferral_fail;
+ /* Accounts for over/under-run of buffers */
+ tstats->fifo_err = rsp_tstats->fifo_err;
+ tstats->runts = rsp_tstats->runts;
+ /* Total number of collisions detected */
+ tstats->total_collisions = rsp_tstats->total_collisions;
+
+ /* firmware stats */
+ tstats->fw_total_sent = rsp_tstats->fw_total_sent;
+ tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
+ tstats->fw_err_pko = rsp_tstats->fw_err_pko;
+ tstats->fw_err_link = rsp_tstats->fw_err_link;
+ tstats->fw_err_drop = rsp_tstats->fw_err_drop;
+ tstats->fw_tso = rsp_tstats->fw_tso;
+ tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
+ tstats->fw_err_tso = rsp_tstats->fw_err_tso;
+ tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan;
+
+ resp->status = 1;
+ } else {
+ resp->status = -1;
+ }
+ complete(&ctrl->complete);
+}
+
+/* Configure interrupt moderation parameters */
+static int octnet_get_link_stats(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct_dev = lio->oct_dev;
+
+ struct octeon_soft_command *sc;
+ struct oct_nic_stats_ctrl *ctrl;
+ struct oct_nic_stats_resp *resp;
+
+ int retval;
+
+ /* Alloc soft command */
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct_dev,
+ 0,
+ sizeof(struct oct_nic_stats_resp),
+ sizeof(struct octnic_ctrl_pkt));
+
+ if (!sc)
+ return -ENOMEM;
+
+ resp = (struct oct_nic_stats_resp *)sc->virtrptr;
+ memset(resp, 0, sizeof(struct oct_nic_stats_resp));
+
+ ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr;
+ memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl));
+ ctrl->netdev = netdev;
+ init_completion(&ctrl->complete);
+
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
+ octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
+ OPCODE_NIC_PORT_STATS, 0, 0, 0);
+
+ sc->callback = octnet_nic_stats_callback;
+ sc->callback_arg = sc;
+ sc->wait_time = 500; /*in milli seconds*/
+
+ retval = octeon_send_soft_command(oct_dev, sc);
+ if (retval == IQ_SEND_FAILED) {
+ octeon_free_soft_command(oct_dev, sc);
+ return -EINVAL;
+ }
+
+ wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000));
+
+ if (resp->status != 1) {
octeon_free_soft_command(oct_dev, sc);
+
return -EINVAL;
}
+ octeon_free_soft_command(oct_dev, sc);
+
return 0;
}
/* Enable/Disable auto interrupt Moderation */
static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce
- *intr_coal, int adaptive)
+ *intr_coal)
{
int ret = 0;
struct octeon_device *oct = lio->oct_dev;
@@ -740,59 +1220,73 @@ static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce
intrmod_cfg = &oct->intrmod;
- if (adaptive) {
+ if (oct->intrmod.rx_enable || oct->intrmod.tx_enable) {
if (intr_coal->rate_sample_interval)
- intrmod_cfg->intrmod_check_intrvl =
+ intrmod_cfg->check_intrvl =
intr_coal->rate_sample_interval;
else
- intrmod_cfg->intrmod_check_intrvl =
+ intrmod_cfg->check_intrvl =
LIO_INTRMOD_CHECK_INTERVAL;
if (intr_coal->pkt_rate_high)
- intrmod_cfg->intrmod_maxpkt_ratethr =
+ intrmod_cfg->maxpkt_ratethr =
intr_coal->pkt_rate_high;
else
- intrmod_cfg->intrmod_maxpkt_ratethr =
+ intrmod_cfg->maxpkt_ratethr =
LIO_INTRMOD_MAXPKT_RATETHR;
if (intr_coal->pkt_rate_low)
- intrmod_cfg->intrmod_minpkt_ratethr =
+ intrmod_cfg->minpkt_ratethr =
intr_coal->pkt_rate_low;
else
- intrmod_cfg->intrmod_minpkt_ratethr =
+ intrmod_cfg->minpkt_ratethr =
LIO_INTRMOD_MINPKT_RATETHR;
-
+ }
+ if (oct->intrmod.rx_enable) {
if (intr_coal->rx_max_coalesced_frames_high)
- intrmod_cfg->intrmod_maxcnt_trigger =
+ intrmod_cfg->rx_maxcnt_trigger =
intr_coal->rx_max_coalesced_frames_high;
else
- intrmod_cfg->intrmod_maxcnt_trigger =
- LIO_INTRMOD_MAXCNT_TRIGGER;
+ intrmod_cfg->rx_maxcnt_trigger =
+ LIO_INTRMOD_RXMAXCNT_TRIGGER;
if (intr_coal->rx_coalesce_usecs_high)
- intrmod_cfg->intrmod_maxtmr_trigger =
+ intrmod_cfg->rx_maxtmr_trigger =
intr_coal->rx_coalesce_usecs_high;
else
- intrmod_cfg->intrmod_maxtmr_trigger =
- LIO_INTRMOD_MAXTMR_TRIGGER;
+ intrmod_cfg->rx_maxtmr_trigger =
+ LIO_INTRMOD_RXMAXTMR_TRIGGER;
if (intr_coal->rx_coalesce_usecs_low)
- intrmod_cfg->intrmod_mintmr_trigger =
+ intrmod_cfg->rx_mintmr_trigger =
intr_coal->rx_coalesce_usecs_low;
else
- intrmod_cfg->intrmod_mintmr_trigger =
- LIO_INTRMOD_MINTMR_TRIGGER;
+ intrmod_cfg->rx_mintmr_trigger =
+ LIO_INTRMOD_RXMINTMR_TRIGGER;
if (intr_coal->rx_max_coalesced_frames_low)
- intrmod_cfg->intrmod_mincnt_trigger =
+ intrmod_cfg->rx_mincnt_trigger =
intr_coal->rx_max_coalesced_frames_low;
else
- intrmod_cfg->intrmod_mincnt_trigger =
- LIO_INTRMOD_MINCNT_TRIGGER;
+ intrmod_cfg->rx_mincnt_trigger =
+ LIO_INTRMOD_RXMINCNT_TRIGGER;
+ }
+ if (oct->intrmod.tx_enable) {
+ if (intr_coal->tx_max_coalesced_frames_high)
+ intrmod_cfg->tx_maxcnt_trigger =
+ intr_coal->tx_max_coalesced_frames_high;
+ else
+ intrmod_cfg->tx_maxcnt_trigger =
+ LIO_INTRMOD_TXMAXCNT_TRIGGER;
+ if (intr_coal->tx_max_coalesced_frames_low)
+ intrmod_cfg->tx_mincnt_trigger =
+ intr_coal->tx_max_coalesced_frames_low;
+ else
+ intrmod_cfg->tx_mincnt_trigger =
+ LIO_INTRMOD_TXMINCNT_TRIGGER;
}
- intrmod_cfg->intrmod_enable = adaptive;
- ret = octnet_set_intrmod_cfg(oct, intrmod_cfg);
+ ret = octnet_set_intrmod_cfg(lio, intrmod_cfg);
return ret;
}
@@ -800,54 +1294,82 @@ static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce
static int
oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
{
- int ret;
struct octeon_device *oct = lio->oct_dev;
- struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
u32 rx_max_coalesced_frames;
- if (!intr_coal->rx_max_coalesced_frames)
- rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT;
- else
- rx_max_coalesced_frames = intr_coal->rx_max_coalesced_frames;
-
- /* Disable adaptive interrupt modulation */
- ret = oct_cfg_adaptive_intr(lio, intr_coal, 0);
- if (ret)
- return ret;
-
/* Config Cnt based interrupt values */
- octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
- rx_max_coalesced_frames);
- CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames);
+ switch (oct->chip_id) {
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX: {
+ struct octeon_cn6xxx *cn6xxx =
+ (struct octeon_cn6xxx *)oct->chip;
+
+ if (!intr_coal->rx_max_coalesced_frames)
+ rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT;
+ else
+ rx_max_coalesced_frames =
+ intr_coal->rx_max_coalesced_frames;
+ octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
+ rx_max_coalesced_frames);
+ CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
return 0;
}
static int oct_cfg_rx_intrtime(struct lio *lio, struct ethtool_coalesce
*intr_coal)
{
- int ret;
struct octeon_device *oct = lio->oct_dev;
- struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
u32 time_threshold, rx_coalesce_usecs;
- if (!intr_coal->rx_coalesce_usecs)
- rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME;
- else
- rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
+ /* Config Time based interrupt values */
+ switch (oct->chip_id) {
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX: {
+ struct octeon_cn6xxx *cn6xxx =
+ (struct octeon_cn6xxx *)oct->chip;
+ if (!intr_coal->rx_coalesce_usecs)
+ rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME;
+ else
+ rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
- /* Disable adaptive interrupt modulation */
- ret = oct_cfg_adaptive_intr(lio, intr_coal, 0);
- if (ret)
- return ret;
+ time_threshold = lio_cn6xxx_get_oq_ticks(oct,
+ rx_coalesce_usecs);
+ octeon_write_csr(oct,
+ CN6XXX_SLI_OQ_INT_LEVEL_TIME,
+ time_threshold);
- /* Config Time based interrupt values */
- time_threshold = lio_cn6xxx_get_oq_ticks(oct, rx_coalesce_usecs);
- octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_TIME, time_threshold);
- CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs);
+ CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
return 0;
}
+static int
+oct_cfg_tx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal
+ __attribute__((unused)))
+{
+ struct octeon_device *oct = lio->oct_dev;
+
+ /* Config Cnt based interrupt values */
+ switch (oct->chip_id) {
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX:
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int lio_set_intr_coalesce(struct net_device *netdev,
struct ethtool_coalesce *intr_coal)
{
@@ -855,59 +1377,48 @@ static int lio_set_intr_coalesce(struct net_device *netdev,
int ret;
struct octeon_device *oct = lio->oct_dev;
u32 j, q_no;
+ int db_max, db_min;
- if ((intr_coal->tx_max_coalesced_frames >= CN6XXX_DB_MIN) &&
- (intr_coal->tx_max_coalesced_frames <= CN6XXX_DB_MAX)) {
- for (j = 0; j < lio->linfo.num_txpciq; j++) {
- q_no = lio->linfo.txpciq[j];
- oct->instr_queue[q_no]->fill_threshold =
- intr_coal->tx_max_coalesced_frames;
+ switch (oct->chip_id) {
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX:
+ db_min = CN6XXX_DB_MIN;
+ db_max = CN6XXX_DB_MAX;
+ if ((intr_coal->tx_max_coalesced_frames >= db_min) &&
+ (intr_coal->tx_max_coalesced_frames <= db_max)) {
+ for (j = 0; j < lio->linfo.num_txpciq; j++) {
+ q_no = lio->linfo.txpciq[j].s.q_no;
+ oct->instr_queue[q_no]->fill_threshold =
+ intr_coal->tx_max_coalesced_frames;
+ }
+ } else {
+ dev_err(&oct->pci_dev->dev,
+ "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
+ intr_coal->tx_max_coalesced_frames, db_min,
+ db_max);
+ return -EINVAL;
}
- } else {
- dev_err(&oct->pci_dev->dev,
- "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
- intr_coal->tx_max_coalesced_frames, CN6XXX_DB_MIN,
- CN6XXX_DB_MAX);
+ break;
+ default:
return -EINVAL;
}
- /* User requested adaptive-rx on */
- if (intr_coal->use_adaptive_rx_coalesce) {
- ret = oct_cfg_adaptive_intr(lio, intr_coal, 1);
- if (ret)
- goto ret_intrmod;
- }
+ oct->intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0;
+ oct->intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0;
- /* User requested adaptive-rx off and rx coalesce */
- if ((intr_coal->rx_coalesce_usecs) &&
- (!intr_coal->use_adaptive_rx_coalesce)) {
+ ret = oct_cfg_adaptive_intr(lio, intr_coal);
+
+ if (!intr_coal->use_adaptive_rx_coalesce) {
ret = oct_cfg_rx_intrtime(lio, intr_coal);
if (ret)
goto ret_intrmod;
- }
- /* User requested adaptive-rx off and rx coalesce */
- if ((intr_coal->rx_max_coalesced_frames) &&
- (!intr_coal->use_adaptive_rx_coalesce)) {
ret = oct_cfg_rx_intrcnt(lio, intr_coal);
if (ret)
goto ret_intrmod;
}
-
- /* User requested adaptive-rx off, so use default coalesce params */
- if ((!intr_coal->rx_max_coalesced_frames) &&
- (!intr_coal->use_adaptive_rx_coalesce) &&
- (!intr_coal->rx_coalesce_usecs)) {
- dev_info(&oct->pci_dev->dev,
- "Turning off adaptive-rx interrupt moderation\n");
- dev_info(&oct->pci_dev->dev,
- "Using RX Coalesce Default values rx_coalesce_usecs:%d rx_max_coalesced_frames:%d\n",
- CN6XXX_OQ_INTR_TIME, CN6XXX_OQ_INTR_PKT);
- ret = oct_cfg_rx_intrtime(lio, intr_coal);
- if (ret)
- goto ret_intrmod;
-
- ret = oct_cfg_rx_intrcnt(lio, intr_coal);
+ if (!intr_coal->use_adaptive_tx_coalesce) {
+ ret = oct_cfg_tx_intrcnt(lio, intr_coal);
if (ret)
goto ret_intrmod;
}
@@ -923,23 +1434,28 @@ static int lio_get_ts_info(struct net_device *netdev,
struct lio *lio = GET_LIO(netdev);
info->so_timestamping =
+#ifdef PTP_HARDWARE_TIMESTAMPING
SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+#endif
SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE;
+ SOF_TIMESTAMPING_SOFTWARE;
if (lio->ptp_clock)
info->phc_index = ptp_clock_index(lio->ptp_clock);
else
info->phc_index = -1;
+#ifdef PTP_HARDWARE_TIMESTAMPING
info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
+#endif
return 0;
}
@@ -950,7 +1466,6 @@ static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
struct octeon_device *oct = lio->oct_dev;
struct oct_link_info *linfo;
struct octnic_ctrl_pkt nctrl;
- struct octnic_ctrl_params nparams;
int ret = 0;
/* get the link info */
@@ -965,12 +1480,14 @@ static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ecmd->duplex != DUPLEX_FULL)))
return -EINVAL;
- /* Ethtool Support is not provided for XAUI and RXAUI Interfaces
+ /* Ethtool Support is not provided for XAUI, RXAUI, and XFI Interfaces
* as they operate at fixed Speed and Duplex settings
*/
- if (linfo->link.s.interface == INTERFACE_MODE_XAUI ||
- linfo->link.s.interface == INTERFACE_MODE_RXAUI) {
- dev_info(&oct->pci_dev->dev, "XAUI IFs settings cannot be modified.\n");
+ if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
+ linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
+ linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
+ dev_info(&oct->pci_dev->dev,
+ "Autonegotiation, duplex and speed settings cannot be modified.\n");
return -EINVAL;
}
@@ -978,9 +1495,9 @@ static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
nctrl.ncmd.u64 = 0;
nctrl.ncmd.s.cmd = OCTNET_CMD_SET_SETTINGS;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.wait_time = 1000;
nctrl.netpndev = (u64)netdev;
- nctrl.ncmd.s.param1 = lio->linfo.ifidx;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
/* Passing the parameters sent by ethtool like Speed, Autoneg & Duplex
@@ -990,19 +1507,17 @@ static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
/* Autoneg ON */
nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON |
OCTNIC_NCMD_AUTONEG_ON;
- nctrl.ncmd.s.param2 = ecmd->advertising;
+ nctrl.ncmd.s.param1 = ecmd->advertising;
} else {
/* Autoneg OFF */
nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON;
- nctrl.ncmd.s.param3 = ecmd->duplex;
+ nctrl.ncmd.s.param2 = ecmd->duplex;
- nctrl.ncmd.s.param2 = ecmd->speed;
+ nctrl.ncmd.s.param1 = ecmd->speed;
}
- nparams.resp_order = OCTEON_RESP_ORDERED;
-
- ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
dev_err(&oct->pci_dev->dev, "Failed to set settings\n");
return -1;
@@ -1026,7 +1541,7 @@ static int lio_nway_reset(struct net_device *netdev)
}
/* Return register dump len. */
-static int lio_get_regs_len(struct net_device *dev)
+static int lio_get_regs_len(struct net_device *dev __attribute__((unused)))
{
return OCT_ETHTOOL_REGDUMP_LEN;
}
@@ -1170,13 +1685,12 @@ static void lio_get_regs(struct net_device *dev,
int len = 0;
struct octeon_device *oct = lio->oct_dev;
- memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
regs->version = OCT_ETHTOOL_REGSVER;
switch (oct->chip_id) {
- /* case OCTEON_CN73XX: Todo */
case OCTEON_CN68XX:
case OCTEON_CN66XX:
+ memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
len += cn6xxx_read_csr_reg(regbuf + len, oct);
len += cn6xxx_read_config_reg(regbuf + len, oct);
break;
@@ -1186,6 +1700,23 @@ static void lio_get_regs(struct net_device *dev,
}
}
+static u32 lio_get_priv_flags(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ return lio->oct_dev->priv_flags;
+}
+
+static int lio_set_priv_flags(struct net_device *netdev, u32 flags)
+{
+ struct lio *lio = GET_LIO(netdev);
+ bool intr_by_tx_bytes = !!(flags & (0x1 << OCT_PRIV_FLAG_TX_BYTES));
+
+ lio_set_priv_flag(lio->oct_dev, OCT_PRIV_FLAG_TX_BYTES,
+ intr_by_tx_bytes);
+ return 0;
+}
+
static const struct ethtool_ops lio_ethtool_ops = {
.get_settings = lio_get_settings,
.get_link = ethtool_op_get_link,
@@ -1207,6 +1738,8 @@ static const struct ethtool_ops lio_ethtool_ops = {
.set_settings = lio_set_settings,
.get_coalesce = lio_get_intr_coalesce,
.set_coalesce = lio_set_intr_coalesce,
+ .get_priv_flags = lio_get_priv_flags,
+ .set_priv_flags = lio_set_priv_flags,
.get_ts_info = lio_get_ts_info,
};
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 8de79ae63231..20d6942edf40 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -20,24 +20,12 @@
* Contact Cavium, Inc. for more information
**********************************************************************/
#include <linux/version.h>
-#include <linux/module.h>
-#include <linux/crc32.h>
-#include <linux/dma-mapping.h>
#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/ip.h>
-#include <net/ip.h>
-#include <linux/ipv6.h>
#include <linux/net_tstamp.h>
#include <linux/if_vlan.h>
#include <linux/firmware.h>
-#include <linux/ethtool.h>
#include <linux/ptp_clock_kernel.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/workqueue.h>
-#include <linux/interrupt.h>
-#include "octeon_config.h"
+#include <net/vxlan.h>
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
@@ -48,7 +36,6 @@
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
-#include "cn68xx_regs.h"
#include "cn68xx_device.h"
#include "liquidio_image.h"
@@ -72,6 +59,9 @@ MODULE_PARM_DESC(console_bitmask,
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
+ (octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
+
static int debug = -1;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
@@ -84,6 +74,8 @@ static int conf_type;
module_param(conf_type, int, 0);
MODULE_PARM_DESC(conf_type, "select octeon configuration 0 default 1 ovs");
+static int ptp_enable = 1;
+
/* Bit mask values for lio->ifstate */
#define LIO_IFSTATE_DROQ_OPS 0x01
#define LIO_IFSTATE_REGISTERED 0x02
@@ -166,6 +158,8 @@ struct octnic_gather {
* received from the IP layer.
*/
struct octeon_sg_entry *sg;
+
+ u64 sg_dma_ptr;
};
/** This structure is used by NIC driver to store information required
@@ -220,8 +214,8 @@ static void octeon_droq_bh(unsigned long pdev)
(struct octeon_device_priv *)oct->priv;
/* for (q_no = 0; q_no < oct->num_oqs; q_no++) { */
- for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES; q_no++) {
- if (!(oct->io_qmask.oq & (1UL << q_no)))
+ for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
+ if (!(oct->io_qmask.oq & (1ULL << q_no)))
continue;
reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
MAX_PACKET_BUDGET);
@@ -241,11 +235,10 @@ static int lio_wait_for_oq_pkts(struct octeon_device *oct)
do {
pending_pkts = 0;
- for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
- if (!(oct->io_qmask.oq & (1UL << i)))
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.oq & (1ULL << i)))
continue;
- pkt_cnt += octeon_droq_check_hw_for_pkts(oct,
- oct->droq[i]);
+ pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
}
if (pkt_cnt > 0) {
pending_pkts += pkt_cnt;
@@ -361,7 +354,7 @@ static int wait_for_pending_requests(struct octeon_device *oct)
[OCTEON_ORDERED_SC_LIST].pending_req_count);
if (pcount)
schedule_timeout_uninterruptible(HZ / 10);
- else
+ else
break;
}
@@ -392,10 +385,10 @@ static inline void pcierror_quiesce_device(struct octeon_device *oct)
dev_err(&oct->pci_dev->dev, "There were pending requests\n");
/* Force all requests waiting to be fetched by OCTEON to complete. */
- for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
struct octeon_instr_queue *iq;
- if (!(oct->io_qmask.iq & (1UL << i)))
+ if (!(oct->io_qmask.iq & (1ULL << i)))
continue;
iq = oct->instr_queue[i];
@@ -405,7 +398,7 @@ static inline void pcierror_quiesce_device(struct octeon_device *oct)
iq->octeon_read_index = iq->host_write_index;
iq->stats.instr_processed +=
atomic_read(&iq->instr_pending);
- lio_process_iq_request_list(oct, iq);
+ lio_process_iq_request_list(oct, iq, 0);
spin_unlock_bh(&iq->lock);
}
}
@@ -500,7 +493,8 @@ static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
* \brief mmio handler
* @param pdev Pointer to PCI device
*/
-static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev *pdev)
+static pci_ers_result_t liquidio_pcie_mmio_enabled(
+ struct pci_dev *pdev __attribute__((unused)))
{
/* We should never hit this since we never ask for a reset for a Fatal
* Error. We always return DISCONNECT in io_error above.
@@ -516,7 +510,8 @@ static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev *pdev)
* Restart the card from scratch, as if from a cold-boot. Implementation
* resembles the first-half of the octeon_resume routine.
*/
-static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev *pdev)
+static pci_ers_result_t liquidio_pcie_slot_reset(
+ struct pci_dev *pdev __attribute__((unused)))
{
/* We should never hit this since we never ask for a reset for a Fatal
* Error. We always return DISCONNECT in io_error above.
@@ -533,7 +528,7 @@ static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev *pdev)
* its OK to resume normal operation. Implementation resembles the
* second-half of the octeon_resume routine.
*/
-static void liquidio_pcie_resume(struct pci_dev *pdev)
+static void liquidio_pcie_resume(struct pci_dev *pdev __attribute__((unused)))
{
/* Nothing to be done here. */
}
@@ -544,7 +539,8 @@ static void liquidio_pcie_resume(struct pci_dev *pdev)
* @param pdev Pointer to PCI device
* @param state state to suspend to
*/
-static int liquidio_suspend(struct pci_dev *pdev, pm_message_t state)
+static int liquidio_suspend(struct pci_dev *pdev __attribute__((unused)),
+ pm_message_t state __attribute__((unused)))
{
return 0;
}
@@ -553,7 +549,7 @@ static int liquidio_suspend(struct pci_dev *pdev, pm_message_t state)
* \brief called when resuming
* @param pdev Pointer to PCI device
*/
-static int liquidio_resume(struct pci_dev *pdev)
+static int liquidio_resume(struct pci_dev *pdev __attribute__((unused)))
{
return 0;
}
@@ -678,12 +674,24 @@ static inline void txqs_start(struct net_device *netdev)
*/
static inline void txqs_wake(struct net_device *netdev)
{
+ struct lio *lio = GET_LIO(netdev);
+
if (netif_is_multiqueue(netdev)) {
int i;
- for (i = 0; i < netdev->num_tx_queues; i++)
- netif_wake_subqueue(netdev, i);
+ for (i = 0; i < netdev->num_tx_queues; i++) {
+ int qno = lio->linfo.txpciq[i %
+ (lio->linfo.num_txpciq)].s.q_no;
+
+ if (__netif_subqueue_stopped(netdev, i)) {
+ INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno,
+ tx_restart, 1);
+ netif_wake_subqueue(netdev, i);
+ }
+ }
} else {
+ INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
+ tx_restart, 1);
netif_wake_queue(netdev);
}
}
@@ -705,7 +713,7 @@ static void start_txq(struct net_device *netdev)
{
struct lio *lio = GET_LIO(netdev);
- if (lio->linfo.link.s.status) {
+ if (lio->linfo.link.s.link_up) {
txqs_start(netdev);
return;
}
@@ -752,16 +760,23 @@ static inline int check_txq_status(struct lio *lio)
/* check each sub-queue state */
for (q = 0; q < numqs; q++) {
- iq = lio->linfo.txpciq[q & (lio->linfo.num_txpciq - 1)];
+ iq = lio->linfo.txpciq[q %
+ (lio->linfo.num_txpciq)].s.q_no;
if (octnet_iq_is_full(lio->oct_dev, iq))
continue;
- wake_q(lio->netdev, q);
- ret_val++;
+ if (__netif_subqueue_stopped(lio->netdev, q)) {
+ wake_q(lio->netdev, q);
+ INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
+ tx_restart, 1);
+ ret_val++;
+ }
}
} else {
if (octnet_iq_is_full(lio->oct_dev, lio->txq))
return 0;
wake_q(lio->netdev, lio->txq);
+ INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
+ tx_restart, 1);
ret_val = 1;
}
return ret_val;
@@ -787,64 +802,116 @@ static inline struct list_head *list_delete_head(struct list_head *root)
}
/**
- * \brief Delete gather list
+ * \brief Delete gather lists
* @param lio per-network private data
*/
-static void delete_glist(struct lio *lio)
+static void delete_glists(struct lio *lio)
{
struct octnic_gather *g;
+ int i;
- do {
- g = (struct octnic_gather *)
- list_delete_head(&lio->glist);
- if (g) {
- if (g->sg)
- kfree((void *)((unsigned long)g->sg -
- g->adjust));
- kfree(g);
- }
- } while (g);
+ if (!lio->glist)
+ return;
+
+ for (i = 0; i < lio->linfo.num_txpciq; i++) {
+ do {
+ g = (struct octnic_gather *)
+ list_delete_head(&lio->glist[i]);
+ if (g) {
+ if (g->sg) {
+ dma_unmap_single(&lio->oct_dev->
+ pci_dev->dev,
+ g->sg_dma_ptr,
+ g->sg_size,
+ DMA_TO_DEVICE);
+ kfree((void *)((unsigned long)g->sg -
+ g->adjust));
+ }
+ kfree(g);
+ }
+ } while (g);
+ }
+
+ kfree((void *)lio->glist);
}
/**
- * \brief Setup gather list
+ * \brief Setup gather lists
* @param lio per-network private data
*/
-static int setup_glist(struct lio *lio)
+static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
{
- int i;
+ int i, j;
struct octnic_gather *g;
- INIT_LIST_HEAD(&lio->glist);
+ lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock),
+ GFP_KERNEL);
+ if (!lio->glist_lock)
+ return 1;
- for (i = 0; i < lio->tx_qsize; i++) {
- g = kzalloc(sizeof(*g), GFP_KERNEL);
- if (!g)
- break;
+ lio->glist = kcalloc(num_iqs, sizeof(*lio->glist),
+ GFP_KERNEL);
+ if (!lio->glist) {
+ kfree((void *)lio->glist_lock);
+ return 1;
+ }
- g->sg_size =
- ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
+ for (i = 0; i < num_iqs; i++) {
+ int numa_node = cpu_to_node(i % num_online_cpus());
- g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);
- if (!g->sg) {
- kfree(g);
- break;
+ spin_lock_init(&lio->glist_lock[i]);
+
+ INIT_LIST_HEAD(&lio->glist[i]);
+
+ for (j = 0; j < lio->tx_qsize; j++) {
+ g = kzalloc_node(sizeof(*g), GFP_KERNEL,
+ numa_node);
+ if (!g)
+ g = kzalloc(sizeof(*g), GFP_KERNEL);
+ if (!g)
+ break;
+
+ g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) *
+ OCT_SG_ENTRY_SIZE);
+
+ g->sg = kmalloc_node(g->sg_size + 8,
+ GFP_KERNEL, numa_node);
+ if (!g->sg)
+ g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);
+ if (!g->sg) {
+ kfree(g);
+ break;
+ }
+
+ /* The gather component should be aligned on 64-bit
+ * boundary
+ */
+ if (((unsigned long)g->sg) & 7) {
+ g->adjust = 8 - (((unsigned long)g->sg) & 7);
+ g->sg = (struct octeon_sg_entry *)
+ ((unsigned long)g->sg + g->adjust);
+ }
+ g->sg_dma_ptr = dma_map_single(&oct->pci_dev->dev,
+ g->sg, g->sg_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&oct->pci_dev->dev,
+ g->sg_dma_ptr)) {
+ kfree((void *)((unsigned long)g->sg -
+ g->adjust));
+ kfree(g);
+ break;
+ }
+
+ list_add_tail(&g->list, &lio->glist[i]);
}
- /* The gather component should be aligned on 64-bit boundary */
- if (((unsigned long)g->sg) & 7) {
- g->adjust = 8 - (((unsigned long)g->sg) & 7);
- g->sg = (struct octeon_sg_entry *)
- ((unsigned long)g->sg + g->adjust);
+ if (j != lio->tx_qsize) {
+ delete_glists(lio);
+ return 1;
}
- list_add_tail(&g->list, &lio->glist);
}
- if (i == lio->tx_qsize)
- return 0;
-
- delete_glist(lio);
- return 1;
+ return 0;
}
/**
@@ -858,7 +925,7 @@ static void print_link_info(struct net_device *netdev)
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) {
struct oct_link_info *linfo = &lio->linfo;
- if (linfo->link.s.status) {
+ if (linfo->link.s.link_up) {
netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
linfo->link.s.speed,
(linfo->link.s.duplex) ? "Full" : "Half");
@@ -880,13 +947,15 @@ static inline void update_link_status(struct net_device *netdev,
union oct_link_status *ls)
{
struct lio *lio = GET_LIO(netdev);
+ int changed = (lio->linfo.link.u64 != ls->u64);
- if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) {
- lio->linfo.link.u64 = ls->u64;
+ lio->linfo.link.u64 = ls->u64;
+ if ((lio->intf_open) && (changed)) {
print_link_info(netdev);
+ lio->link_changes++;
- if (lio->linfo.link.s.status) {
+ if (lio->linfo.link.s.link_up) {
netif_carrier_on(netdev);
/* start_txq(netdev); */
txqs_wake(netdev);
@@ -897,6 +966,42 @@ static inline void update_link_status(struct net_device *netdev,
}
}
+/* Runs in interrupt context. */
+static void update_txq_status(struct octeon_device *oct, int iq_num)
+{
+ struct net_device *netdev;
+ struct lio *lio;
+ struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
+
+ /*octeon_update_iq_read_idx(oct, iq);*/
+
+ netdev = oct->props[iq->ifidx].netdev;
+
+ /* This is needed because the first IQ does not have
+ * a netdev associated with it.
+ */
+ if (!netdev)
+ return;
+
+ lio = GET_LIO(netdev);
+ if (netif_is_multiqueue(netdev)) {
+ if (__netif_subqueue_stopped(netdev, iq->q_index) &&
+ lio->linfo.link.s.link_up &&
+ (!octnet_iq_is_full(oct, iq_num))) {
+ INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
+ tx_restart, 1);
+ netif_wake_subqueue(netdev, iq->q_index);
+ } else {
+ if (!octnet_iq_is_full(oct, lio->txq)) {
+ INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev,
+ lio->txq,
+ tx_restart, 1);
+ wake_q(netdev, lio->txq);
+ }
+ }
+ }
+}
+
/**
* \brief Droq packet processor sceduler
* @param oct octeon device
@@ -910,8 +1015,9 @@ void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
struct octeon_droq *droq;
if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
- for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES; oq_no++) {
- if (!(oct->droq_intr & (1 << oq_no)))
+ for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
+ oq_no++) {
+ if (!(oct->droq_intr & (1ULL << oq_no)))
continue;
droq = oct->droq[oq_no];
@@ -987,7 +1093,9 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
* @param pdev PCI device structure
* @param ent unused
*/
-static int liquidio_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int
+liquidio_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent __attribute__((unused)))
{
struct octeon_device *oct_dev = NULL;
struct handshake *hs;
@@ -1022,6 +1130,9 @@ static int liquidio_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
}
+ oct_dev->rx_pause = 1;
+ oct_dev->tx_pause = 1;
+
dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
return 0;
@@ -1087,19 +1198,13 @@ static void octeon_destroy_resources(struct octeon_device *oct)
if (oct->flags & LIO_FLAG_MSI_ENABLED)
pci_disable_msi(oct->pci_dev);
- /* Soft reset the octeon device before exiting */
- oct->fn_list.soft_reset(oct);
-
- /* Disable the device, releasing the PCI INT */
- pci_disable_device(oct->pci_dev);
-
/* fallthrough */
case OCT_DEV_IN_RESET:
case OCT_DEV_DROQ_INIT_DONE:
/*atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);*/
mdelay(100);
- for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
- if (!(oct->io_qmask.oq & (1UL << i)))
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.oq & (1ULL << i)))
continue;
octeon_delete_droq(oct, i);
}
@@ -1126,8 +1231,8 @@ static void octeon_destroy_resources(struct octeon_device *oct)
/* fallthrough */
case OCT_DEV_INSTR_QUEUE_INIT_DONE:
- for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
- if (!(oct->io_qmask.iq & (1UL << i)))
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.iq & (1ULL << i)))
continue;
octeon_delete_instr_queue(oct, i);
}
@@ -1139,14 +1244,21 @@ static void octeon_destroy_resources(struct octeon_device *oct)
/* fallthrough */
case OCT_DEV_PCI_MAP_DONE:
+
+ /* Soft reset the octeon device before exiting */
+ oct->fn_list.soft_reset(oct);
+
octeon_unmap_pci_barx(oct, 0);
octeon_unmap_pci_barx(oct, 1);
/* fallthrough */
case OCT_DEV_BEGIN_STATE:
+ /* Disable the device, releasing the PCI INT */
+ pci_disable_device(oct->pci_dev);
+
/* Nothing to be done here either */
break;
- } /* end switch(oct->status) */
+ } /* end switch (oct->status) */
tasklet_kill(&oct_priv->droq_tasklet);
}
@@ -1159,18 +1271,15 @@ static void octeon_destroy_resources(struct octeon_device *oct)
static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
{
struct octnic_ctrl_pkt nctrl;
- struct octnic_ctrl_params nparams;
memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
nctrl.ncmd.s.cmd = OCTNET_CMD_RX_CTL;
- nctrl.ncmd.s.param1 = lio->linfo.ifidx;
- nctrl.ncmd.s.param2 = start_stop;
+ nctrl.ncmd.s.param1 = start_stop;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.netpndev = (u64)lio->netdev;
- nparams.resp_order = OCTEON_RESP_NORESPONSE;
-
- if (octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams) < 0)
+ if (octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl) < 0)
netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
}
@@ -1186,6 +1295,7 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
{
struct net_device *netdev = oct->props[ifidx].netdev;
struct lio *lio;
+ struct napi_struct *napi, *n;
if (!netdev) {
dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
@@ -1202,13 +1312,22 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
txqs_stop(netdev);
+ if (oct->props[lio->ifidx].napi_enabled == 1) {
+ list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
+ napi_disable(napi);
+
+ oct->props[lio->ifidx].napi_enabled = 0;
+ }
+
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
unregister_netdev(netdev);
- delete_glist(lio);
+ delete_glists(lio);
free_netdev(netdev);
+ oct->props[ifidx].gmxport = -1;
+
oct->props[ifidx].netdev = NULL;
}
@@ -1227,10 +1346,15 @@ static int liquidio_stop_nic_module(struct octeon_device *oct)
return 1;
}
+ spin_lock_bh(&oct->cmd_resp_wqlock);
+ oct->cmd_resp_state = OCT_DRV_OFFLINE;
+ spin_unlock_bh(&oct->cmd_resp_wqlock);
+
for (i = 0; i < oct->ifcount; i++) {
lio = GET_LIO(oct->props[i].netdev);
for (j = 0; j < lio->linfo.num_rxpciq; j++)
- octeon_unregister_droq_ops(oct, lio->linfo.rxpciq[j]);
+ octeon_unregister_droq_ops(oct,
+ lio->linfo.rxpciq[j].s.q_no);
}
for (i = 0; i < oct->ifcount; i++)
@@ -1274,6 +1398,7 @@ static int octeon_chip_specific_setup(struct octeon_device *oct)
{
u32 dev_id, rev_id;
int ret = 1;
+ char *s;
pci_read_config_dword(oct->pci_dev, 0, &dev_id);
pci_read_config_dword(oct->pci_dev, 8, &rev_id);
@@ -1283,22 +1408,27 @@ static int octeon_chip_specific_setup(struct octeon_device *oct)
case OCTEON_CN68XX_PCIID:
oct->chip_id = OCTEON_CN68XX;
ret = lio_setup_cn68xx_octeon_device(oct);
+ s = "CN68XX";
break;
case OCTEON_CN66XX_PCIID:
oct->chip_id = OCTEON_CN66XX;
ret = lio_setup_cn66xx_octeon_device(oct);
+ s = "CN66XX";
break;
+
default:
+ s = "?";
dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
dev_id);
}
if (!ret)
- dev_info(&oct->pci_dev->dev, "CN68XX PASS%d.%d %s\n",
+ dev_info(&oct->pci_dev->dev, "%s PASS%d.%d %s Version: %s\n", s,
OCTEON_MAJOR_REV(oct),
OCTEON_MINOR_REV(oct),
- octeon_get_conf(oct)->card_name);
+ octeon_get_conf(oct)->card_name,
+ LIQUIDIO_VERSION);
return ret;
}
@@ -1326,6 +1456,16 @@ static int octeon_pci_os_setup(struct octeon_device *oct)
return 0;
}
+static inline int skb_iq(struct lio *lio, struct sk_buff *skb)
+{
+ int q = 0;
+
+ if (netif_is_multiqueue(lio->netdev))
+ q = skb->queue_mapping % lio->linfo.num_txpciq;
+
+ return q;
+}
+
/**
* \brief Check Tx queue state for a given network buffer
* @param lio per-network private data
@@ -1337,14 +1477,19 @@ static inline int check_txq_state(struct lio *lio, struct sk_buff *skb)
if (netif_is_multiqueue(lio->netdev)) {
q = skb->queue_mapping;
- iq = lio->linfo.txpciq[(q & (lio->linfo.num_txpciq - 1))];
+ iq = lio->linfo.txpciq[(q % (lio->linfo.num_txpciq))].s.q_no;
} else {
iq = lio->txq;
+ q = iq;
}
if (octnet_iq_is_full(lio->oct_dev, iq))
return 0;
- wake_q(lio->netdev, q);
+
+ if (__netif_subqueue_stopped(lio->netdev, q)) {
+ INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1);
+ wake_q(lio->netdev, q);
+ }
return 1;
}
@@ -1367,7 +1512,7 @@ static void free_netbuf(void *buf)
check_txq_state(lio, skb);
- recv_buffer_free((struct sk_buff *)skb);
+ tx_buffer_free(skb);
}
/**
@@ -1380,7 +1525,7 @@ static void free_netsgbuf(void *buf)
struct sk_buff *skb;
struct lio *lio;
struct octnic_gather *g;
- int i, frags;
+ int i, frags, iq;
finfo = (struct octnet_buf_free_info *)buf;
skb = finfo->skb;
@@ -1402,17 +1547,17 @@ static void free_netsgbuf(void *buf)
i++;
}
- dma_unmap_single(&lio->oct_dev->pci_dev->dev,
- finfo->dptr, g->sg_size,
- DMA_TO_DEVICE);
+ dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
+ g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
- spin_lock(&lio->lock);
- list_add_tail(&g->list, &lio->glist);
- spin_unlock(&lio->lock);
+ iq = skb_iq(lio, skb);
+ spin_lock(&lio->glist_lock[iq]);
+ list_add_tail(&g->list, &lio->glist[iq]);
+ spin_unlock(&lio->glist_lock[iq]);
check_txq_state(lio, skb); /* mq support: sub-queue state check */
- recv_buffer_free((struct sk_buff *)skb);
+ tx_buffer_free(skb);
}
/**
@@ -1426,7 +1571,7 @@ static void free_netsgbuf_with_resp(void *buf)
struct sk_buff *skb;
struct lio *lio;
struct octnic_gather *g;
- int i, frags;
+ int i, frags, iq;
sc = (struct octeon_soft_command *)buf;
skb = (struct sk_buff *)sc->callback_arg;
@@ -1450,13 +1595,14 @@ static void free_netsgbuf_with_resp(void *buf)
i++;
}
- dma_unmap_single(&lio->oct_dev->pci_dev->dev,
- finfo->dptr, g->sg_size,
- DMA_TO_DEVICE);
+ dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
+ g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
+
+ iq = skb_iq(lio, skb);
- spin_lock(&lio->lock);
- list_add_tail(&g->list, &lio->glist);
- spin_unlock(&lio->lock);
+ spin_lock(&lio->glist_lock[iq]);
+ list_add_tail(&g->list, &lio->glist[iq]);
+ spin_unlock(&lio->glist_lock[iq]);
/* Don't free the skb yet */
@@ -1569,8 +1715,10 @@ static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
* @param rq request
* @param on is it on
*/
-static int liquidio_ptp_enable(struct ptp_clock_info *ptp,
- struct ptp_clock_request *rq, int on)
+static int
+liquidio_ptp_enable(struct ptp_clock_info *ptp __attribute__((unused)),
+ struct ptp_clock_request *rq __attribute__((unused)),
+ int on __attribute__((unused)))
{
return -EOPNOTSUPP;
}
@@ -1657,6 +1805,7 @@ static int load_firmware(struct octeon_device *oct)
if (ret) {
dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.",
fw_name);
+ release_firmware(fw);
return ret;
}
@@ -1710,7 +1859,7 @@ static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
* @param buf pointer to resp structure
*/
static void if_cfg_callback(struct octeon_device *oct,
- u32 status,
+ u32 status __attribute__((unused)),
void *buf)
{
struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
@@ -1724,7 +1873,10 @@ static void if_cfg_callback(struct octeon_device *oct,
if (resp->status)
dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
CVM_CAST64(resp->status));
- ACCESS_ONCE(ctx->cond) = 1;
+ WRITE_ONCE(ctx->cond, 1);
+
+ snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
+ resp->cfg_info.liquidio_firmware_version);
/* This barrier is required to be sure that the response has been
* written fully before waking up the handler
@@ -1741,16 +1893,16 @@ static void if_cfg_callback(struct octeon_device *oct,
* @returns selected queue number
*/
static u16 select_q(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ void *accel_priv __attribute__((unused)),
+ select_queue_fallback_t fallback __attribute__((unused)))
{
- int qindex;
+ u32 qindex = 0;
struct lio *lio;
lio = GET_LIO(dev);
- /* select queue on chosen queue_mapping or core */
- qindex = skb_rx_queue_recorded(skb) ?
- skb_get_rx_queue(skb) : smp_processor_id();
- return (u16)(qindex & (lio->linfo.num_txpciq - 1));
+ qindex = skb_tx_hash(dev, skb);
+
+ return (u16)(qindex % (lio->linfo.num_txpciq));
}
/** Routine to push packets arriving on Octeon interface upto network layer.
@@ -1759,26 +1911,28 @@ static u16 select_q(struct net_device *dev, struct sk_buff *skb,
* @param len - size of total data received.
* @param rh - Control header associated with the packet
* @param param - additional control data with the packet
+ * @param arg - farg registered in droq_ops
*/
static void
-liquidio_push_packet(u32 octeon_id,
+liquidio_push_packet(u32 octeon_id __attribute__((unused)),
void *skbuff,
u32 len,
union octeon_rh *rh,
- void *param)
+ void *param,
+ void *arg)
{
struct napi_struct *napi = param;
- struct octeon_device *oct = lio_get_device(octeon_id);
struct sk_buff *skb = (struct sk_buff *)skbuff;
struct skb_shared_hwtstamps *shhwtstamps;
u64 ns;
- struct net_device *netdev =
- (struct net_device *)oct->props[rh->r_dh.link].netdev;
+ u16 vtag = 0;
+ struct net_device *netdev = (struct net_device *)arg;
struct octeon_droq *droq = container_of(param, struct octeon_droq,
napi);
if (netdev) {
int packet_was_received;
struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
/* Do not proceed if the interface is not in RUNNING state. */
if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
@@ -1789,32 +1943,86 @@ liquidio_push_packet(u32 octeon_id,
skb->dev = netdev;
- if (rh->r_dh.has_hwtstamp) {
- /* timestamp is included from the hardware at the
- * beginning of the packet.
- */
- if (ifstate_check(lio,
- LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
- /* Nanoseconds are in the first 64-bits
- * of the packet.
+ skb_record_rx_queue(skb, droq->q_no);
+ if (likely(len > MIN_SKB_SIZE)) {
+ struct octeon_skb_page_info *pg_info;
+ unsigned char *va;
+
+ pg_info = ((struct octeon_skb_page_info *)(skb->cb));
+ if (pg_info->page) {
+ /* For Paged allocation use the frags */
+ va = page_address(pg_info->page) +
+ pg_info->page_offset;
+ memcpy(skb->data, va, MIN_SKB_SIZE);
+ skb_put(skb, MIN_SKB_SIZE);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ pg_info->page,
+ pg_info->page_offset +
+ MIN_SKB_SIZE,
+ len - MIN_SKB_SIZE,
+ LIO_RXBUFFER_SZ);
+ }
+ } else {
+ struct octeon_skb_page_info *pg_info =
+ ((struct octeon_skb_page_info *)(skb->cb));
+ skb_copy_to_linear_data(skb, page_address(pg_info->page)
+ + pg_info->page_offset, len);
+ skb_put(skb, len);
+ put_page(pg_info->page);
+ }
+
+ if (((oct->chip_id == OCTEON_CN66XX) ||
+ (oct->chip_id == OCTEON_CN68XX)) &&
+ ptp_enable) {
+ if (rh->r_dh.has_hwtstamp) {
+ /* timestamp is included from the hardware at
+ * the beginning of the packet.
*/
- memcpy(&ns, (skb->data), sizeof(ns));
- shhwtstamps = skb_hwtstamps(skb);
- shhwtstamps->hwtstamp =
- ns_to_ktime(ns + lio->ptp_adjust);
+ if (ifstate_check
+ (lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
+ /* Nanoseconds are in the first 64-bits
+ * of the packet.
+ */
+ memcpy(&ns, (skb->data), sizeof(ns));
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp =
+ ns_to_ktime(ns +
+ lio->ptp_adjust);
+ }
+ skb_pull(skb, sizeof(ns));
}
- skb_pull(skb, sizeof(ns));
}
skb->protocol = eth_type_trans(skb, skb->dev);
-
if ((netdev->features & NETIF_F_RXCSUM) &&
- (rh->r_dh.csum_verified == CNNIC_CSUM_VERIFIED))
+ (((rh->r_dh.encap_on) &&
+ (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
+ (!(rh->r_dh.encap_on) &&
+ (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED))))
/* checksum has already been verified */
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb->ip_summed = CHECKSUM_NONE;
+ /* Setting Encapsulation field on basis of status received
+ * from the firmware
+ */
+ if (rh->r_dh.encap_on) {
+ skb->encapsulation = 1;
+ skb->csum_level = 1;
+ droq->stats.rx_vxlan++;
+ }
+
+ /* inbound VLAN tag */
+ if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (rh->r_dh.vlan != 0)) {
+ u16 vid = rh->r_dh.vlan;
+ u16 priority = rh->r_dh.priority;
+
+ vtag = priority << 13 | vid;
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
+ }
+
packet_was_received = napi_gro_receive(napi, skb) != GRO_DROP;
if (packet_was_received) {
@@ -1869,39 +2077,6 @@ static void liquidio_napi_drv_callback(void *arg)
}
/**
- * \brief Main NAPI poll function
- * @param droq octeon output queue
- * @param budget maximum number of items to process
- */
-static int liquidio_napi_do_rx(struct octeon_droq *droq, int budget)
-{
- int work_done;
- struct lio *lio = GET_LIO(droq->napi.dev);
- struct octeon_device *oct = lio->oct_dev;
-
- work_done = octeon_process_droq_poll_cmd(oct, droq->q_no,
- POLL_EVENT_PROCESS_PKTS,
- budget);
- if (work_done < 0) {
- netif_info(lio, rx_err, lio->netdev,
- "Receive work_done < 0, rxq:%d\n", droq->q_no);
- goto octnet_napi_finish;
- }
-
- if (work_done > budget)
- dev_err(&oct->pci_dev->dev, ">>>> %s work_done: %d budget: %d\n",
- __func__, work_done, budget);
-
- return work_done;
-
-octnet_napi_finish:
- napi_complete(&droq->napi);
- octeon_process_droq_poll_cmd(oct, droq->q_no, POLL_EVENT_ENABLE_INTR,
- 0);
- return 0;
-}
-
-/**
* \brief Entry point for NAPI polling
* @param napi NAPI structure
* @param budget maximum number of items to process
@@ -1910,35 +2085,57 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
{
struct octeon_droq *droq;
int work_done;
+ int tx_done = 0, iq_no;
+ struct octeon_instr_queue *iq;
+ struct octeon_device *oct;
droq = container_of(napi, struct octeon_droq, napi);
+ oct = droq->oct_dev;
+ iq_no = droq->q_no;
+ /* Handle Droq descriptors */
+ work_done = octeon_process_droq_poll_cmd(oct, droq->q_no,
+ POLL_EVENT_PROCESS_PKTS,
+ budget);
- work_done = liquidio_napi_do_rx(droq, budget);
+ /* Flush the instruction queue */
+ iq = oct->instr_queue[iq_no];
+ if (iq) {
+ /* Process iq buffers with in the budget limits */
+ tx_done = octeon_flush_iq(oct, iq, 1, budget);
+ /* Update iq read-index rather than waiting for next interrupt.
+ * Return back if tx_done is false.
+ */
+ update_txq_status(oct, iq_no);
+ /*tx_done = (iq->flush_index == iq->octeon_read_index);*/
+ } else {
+ dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n",
+ __func__, iq_no);
+ }
- if (work_done < budget) {
+ if ((work_done < budget) && (tx_done)) {
napi_complete(napi);
octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
POLL_EVENT_ENABLE_INTR, 0);
return 0;
}
- return work_done;
+ return (!tx_done) ? (budget) : (work_done);
}
/**
* \brief Setup input and output queues
* @param octeon_dev octeon device
- * @param net_device Net device
+ * @param ifidx Interface Index
*
* Note: Queues are with respect to the octeon device. Thus
* an input queue is for egress packets, and output queues
* are for ingress packets.
*/
static inline int setup_io_queues(struct octeon_device *octeon_dev,
- struct net_device *net_device)
+ int ifidx)
{
- static int first_time = 1;
- static struct octeon_droq_ops droq_ops;
+ struct octeon_droq_ops droq_ops;
+ struct net_device *netdev;
static int cpu_id;
static int cpu_id_modulus;
struct octeon_droq *droq;
@@ -1947,23 +2144,26 @@ static inline int setup_io_queues(struct octeon_device *octeon_dev,
struct lio *lio;
int num_tx_descs;
- lio = GET_LIO(net_device);
- if (first_time) {
- first_time = 0;
- memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
+ netdev = octeon_dev->props[ifidx].netdev;
- droq_ops.fptr = liquidio_push_packet;
+ lio = GET_LIO(netdev);
- droq_ops.poll_mode = 1;
- droq_ops.napi_fn = liquidio_napi_drv_callback;
- cpu_id = 0;
- cpu_id_modulus = num_present_cpus();
- }
+ memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
+
+ droq_ops.fptr = liquidio_push_packet;
+ droq_ops.farg = (void *)netdev;
+
+ droq_ops.poll_mode = 1;
+ droq_ops.napi_fn = liquidio_napi_drv_callback;
+ cpu_id = 0;
+ cpu_id_modulus = num_present_cpus();
/* set up DROQs. */
for (q = 0; q < lio->linfo.num_rxpciq; q++) {
- q_no = lio->linfo.rxpciq[q];
-
+ q_no = lio->linfo.rxpciq[q].s.q_no;
+ dev_dbg(&octeon_dev->pci_dev->dev,
+ "setup_io_queues index:%d linfo.rxpciq.s.q_no:%d\n",
+ q, q_no);
retval = octeon_setup_droq(octeon_dev, q_no,
CFG_GET_NUM_RX_DESCS_NIC_IF
(octeon_get_conf(octeon_dev),
@@ -1980,7 +2180,11 @@ static inline int setup_io_queues(struct octeon_device *octeon_dev,
droq = octeon_dev->droq[q_no];
napi = &droq->napi;
- netif_napi_add(net_device, napi, liquidio_napi_poll, 64);
+ dev_dbg(&octeon_dev->pci_dev->dev,
+ "netif_napi_add netdev:%llx oct:%llx\n",
+ (u64)netdev,
+ (u64)octeon_dev);
+ netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
/* designate a CPU for this droq */
droq->cpu_id = cpu_id;
@@ -1996,9 +2200,9 @@ static inline int setup_io_queues(struct octeon_device *octeon_dev,
num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(octeon_get_conf
(octeon_dev),
lio->ifidx);
- retval = octeon_setup_iq(octeon_dev, lio->linfo.txpciq[q],
- num_tx_descs,
- netdev_get_tx_queue(net_device, q));
+ retval = octeon_setup_iq(octeon_dev, ifidx, q,
+ lio->linfo.txpciq[q], num_tx_descs,
+ netdev_get_tx_queue(netdev, q));
if (retval) {
dev_err(&octeon_dev->pci_dev->dev,
" %s : Runtime IQ(TxQ) creation failed.\n",
@@ -2036,7 +2240,8 @@ static inline void setup_tx_poll_fn(struct net_device *netdev)
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
- lio->txq_status_wq.wq = create_workqueue("txq-status");
+ lio->txq_status_wq.wq = alloc_workqueue("txq-status",
+ WQ_MEM_RECLAIM, 0);
if (!lio->txq_status_wq.wq) {
dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
return;
@@ -2048,6 +2253,14 @@ static inline void setup_tx_poll_fn(struct net_device *netdev)
&lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
}
+static inline void cleanup_tx_poll_fn(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
+ destroy_workqueue(lio->txq_status_wq.wq);
+}
+
/**
* \brief Net device open for LiquidIO
* @param netdev network device
@@ -2058,17 +2271,22 @@ static int liquidio_open(struct net_device *netdev)
struct octeon_device *oct = lio->oct_dev;
struct napi_struct *napi, *n;
- list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
- napi_enable(napi);
+ if (oct->props[lio->ifidx].napi_enabled == 0) {
+ list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
+ napi_enable(napi);
+
+ oct->props[lio->ifidx].napi_enabled = 1;
+ }
oct_ptp_open(netdev);
ifstate_set(lio, LIO_IFSTATE_RUNNING);
+
setup_tx_poll_fn(netdev);
+
start_txq(netdev);
netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
- try_module_get(THIS_MODULE);
/* tell Octeon to start forwarding packets to host */
send_rx_ctrl_cmd(lio, 1);
@@ -2088,41 +2306,36 @@ static int liquidio_open(struct net_device *netdev)
*/
static int liquidio_stop(struct net_device *netdev)
{
- struct napi_struct *napi, *n;
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
- netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n");
+ ifstate_reset(lio, LIO_IFSTATE_RUNNING);
+
+ netif_tx_disable(netdev);
+
/* Inform that netif carrier is down */
+ netif_carrier_off(netdev);
lio->intf_open = 0;
- lio->linfo.link.s.status = 0;
+ lio->linfo.link.s.link_up = 0;
+ lio->link_changes++;
- netif_carrier_off(netdev);
+ /* Pause for a moment and wait for Octeon to flush out (to the wire) any
+ * egress packets that are in-flight.
+ */
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(msecs_to_jiffies(100));
- /* tell Octeon to stop forwarding packets to host */
+ /* Now it should be safe to tell Octeon that nic interface is down. */
send_rx_ctrl_cmd(lio, 0);
- cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
- flush_workqueue(lio->txq_status_wq.wq);
- destroy_workqueue(lio->txq_status_wq.wq);
+ cleanup_tx_poll_fn(netdev);
if (lio->ptp_clock) {
ptp_clock_unregister(lio->ptp_clock);
lio->ptp_clock = NULL;
}
- ifstate_reset(lio, LIO_IFSTATE_RUNNING);
-
- /* This is a hack that allows DHCP to continue working. */
- set_bit(__LINK_STATE_START, &lio->netdev->state);
-
- list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
- napi_disable(napi);
-
- txqs_stop(netdev);
-
dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
- module_put(THIS_MODULE);
return 0;
}
@@ -2133,6 +2346,7 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
struct net_device *netdev = (struct net_device *)nctrl->netpndev;
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
+ u8 *mac;
switch (nctrl->ncmd.s.cmd) {
case OCTNET_CMD_CHANGE_DEVFLAGS:
@@ -2140,22 +2354,24 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
break;
case OCTNET_CMD_CHANGE_MACADDR:
- /* If command is successful, change the MACADDR. */
- netif_info(lio, probe, lio->netdev, " MACAddr changed to 0x%llx\n",
- CVM_CAST64(nctrl->udd[0]));
- dev_info(&oct->pci_dev->dev, "%s MACAddr changed to 0x%llx\n",
- netdev->name, CVM_CAST64(nctrl->udd[0]));
- memcpy(netdev->dev_addr, ((u8 *)&nctrl->udd[0]) + 2, ETH_ALEN);
+ mac = ((u8 *)&nctrl->udd[0]) + 2;
+ netif_info(lio, probe, lio->netdev,
+ "%s %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
+ "MACAddr changed to", mac[0], mac[1],
+ mac[2], mac[3], mac[4], mac[5]);
break;
case OCTNET_CMD_CHANGE_MTU:
/* If command is successful, change the MTU. */
netif_info(lio, probe, lio->netdev, " MTU Changed from %d to %d\n",
- netdev->mtu, nctrl->ncmd.s.param2);
+ netdev->mtu, nctrl->ncmd.s.param1);
dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n",
netdev->name, netdev->mtu,
- nctrl->ncmd.s.param2);
- netdev->mtu = nctrl->ncmd.s.param2;
+ nctrl->ncmd.s.param1);
+ rtnl_lock();
+ netdev->mtu = nctrl->ncmd.s.param1;
+ call_netdevice_notifiers(NETDEV_CHANGEMTU, netdev);
+ rtnl_unlock();
break;
case OCTNET_CMD_GPIO_ACCESS:
@@ -2181,11 +2397,79 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
netdev->name);
break;
+ case OCTNET_CMD_ENABLE_VLAN_FILTER:
+ dev_info(&oct->pci_dev->dev, "%s VLAN filter enabled\n",
+ netdev->name);
+ break;
+
+ case OCTNET_CMD_ADD_VLAN_FILTER:
+ dev_info(&oct->pci_dev->dev, "%s VLAN filter %d added\n",
+ netdev->name, nctrl->ncmd.s.param1);
+ break;
+
+ case OCTNET_CMD_DEL_VLAN_FILTER:
+ dev_info(&oct->pci_dev->dev, "%s VLAN filter %d removed\n",
+ netdev->name, nctrl->ncmd.s.param1);
+ break;
+
case OCTNET_CMD_SET_SETTINGS:
dev_info(&oct->pci_dev->dev, "%s settings changed\n",
netdev->name);
break;
+ /* Case to handle "OCTNET_CMD_TNL_RX_CSUM_CTL"
+ * Command passed by NIC driver
+ */
+ case OCTNET_CMD_TNL_RX_CSUM_CTL:
+ if (nctrl->ncmd.s.param1 == OCTNET_CMD_RXCSUM_ENABLE) {
+ netif_info(lio, probe, lio->netdev,
+ "%s RX Checksum Offload Enabled\n",
+ netdev->name);
+ } else if (nctrl->ncmd.s.param1 ==
+ OCTNET_CMD_RXCSUM_DISABLE) {
+ netif_info(lio, probe, lio->netdev,
+ "%s RX Checksum Offload Disabled\n",
+ netdev->name);
+ }
+ break;
+
+ /* Case to handle "OCTNET_CMD_TNL_TX_CSUM_CTL"
+ * Command passed by NIC driver
+ */
+ case OCTNET_CMD_TNL_TX_CSUM_CTL:
+ if (nctrl->ncmd.s.param1 == OCTNET_CMD_TXCSUM_ENABLE) {
+ netif_info(lio, probe, lio->netdev,
+ "%s TX Checksum Offload Enabled\n",
+ netdev->name);
+ } else if (nctrl->ncmd.s.param1 ==
+ OCTNET_CMD_TXCSUM_DISABLE) {
+ netif_info(lio, probe, lio->netdev,
+ "%s TX Checksum Offload Disabled\n",
+ netdev->name);
+ }
+ break;
+
+ /* Case to handle "OCTNET_CMD_VXLAN_PORT_CONFIG"
+ * Command passed by NIC driver
+ */
+ case OCTNET_CMD_VXLAN_PORT_CONFIG:
+ if (nctrl->ncmd.s.more == OCTNET_CMD_VXLAN_PORT_ADD) {
+ netif_info(lio, probe, lio->netdev,
+ "%s VxLAN Destination UDP PORT:%d ADDED\n",
+ netdev->name,
+ nctrl->ncmd.s.param1);
+ } else if (nctrl->ncmd.s.more ==
+ OCTNET_CMD_VXLAN_PORT_DEL) {
+ netif_info(lio, probe, lio->netdev,
+ "%s VxLAN Destination UDP PORT:%d DELETED\n",
+ netdev->name,
+ nctrl->ncmd.s.param1);
+ }
+ break;
+
+ case OCTNET_CMD_SET_FLOW_CTL:
+ netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n");
+ break;
default:
dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__,
@@ -2235,10 +2519,9 @@ static void liquidio_set_mcast_list(struct net_device *netdev)
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
struct octnic_ctrl_pkt nctrl;
- struct octnic_ctrl_params nparams;
struct netdev_hw_addr *ha;
u64 *mc;
- int ret, i;
+ int ret;
int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
@@ -2246,15 +2529,14 @@ static void liquidio_set_mcast_list(struct net_device *netdev)
/* Create a ctrl pkt command to be sent to core app. */
nctrl.ncmd.u64 = 0;
nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
- nctrl.ncmd.s.param1 = lio->linfo.ifidx;
- nctrl.ncmd.s.param2 = get_new_flags(netdev);
- nctrl.ncmd.s.param3 = mc_count;
+ nctrl.ncmd.s.param1 = get_new_flags(netdev);
+ nctrl.ncmd.s.param2 = mc_count;
nctrl.ncmd.s.more = mc_count;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
/* copy all the addresses into the udd */
- i = 0;
mc = &nctrl.udd[0];
netdev_for_each_mc_addr(ha, netdev) {
*mc = 0;
@@ -2270,9 +2552,7 @@ static void liquidio_set_mcast_list(struct net_device *netdev)
*/
nctrl.wait_time = 0;
- nparams.resp_order = OCTEON_RESP_NORESPONSE;
-
- ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
ret);
@@ -2290,19 +2570,17 @@ static int liquidio_set_mac(struct net_device *netdev, void *p)
struct octeon_device *oct = lio->oct_dev;
struct sockaddr *addr = (struct sockaddr *)p;
struct octnic_ctrl_pkt nctrl;
- struct octnic_ctrl_params nparams;
- if ((!is_valid_ether_addr(addr->sa_data)) ||
- (ifstate_check(lio, LIO_IFSTATE_RUNNING)))
+ if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
nctrl.ncmd.u64 = 0;
nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
- nctrl.ncmd.s.param1 = lio->linfo.ifidx;
- nctrl.ncmd.s.param2 = 0;
+ nctrl.ncmd.s.param1 = 0;
nctrl.ncmd.s.more = 1;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
nctrl.wait_time = 100;
@@ -2311,9 +2589,7 @@ static int liquidio_set_mac(struct net_device *netdev, void *p)
/* The MAC Address is presented in network byte order. */
memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
- nparams.resp_order = OCTEON_RESP_ORDERED;
-
- ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
return -ENOMEM;
@@ -2341,7 +2617,7 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
oct = lio->oct_dev;
for (i = 0; i < lio->linfo.num_txpciq; i++) {
- iq_no = lio->linfo.txpciq[i];
+ iq_no = lio->linfo.txpciq[i].s.q_no;
iq_stats = &oct->instr_queue[iq_no]->stats;
pkts += iq_stats->tx_done;
drop += iq_stats->tx_dropped;
@@ -2357,7 +2633,7 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
bytes = 0;
for (i = 0; i < lio->linfo.num_rxpciq; i++) {
- oq_no = lio->linfo.rxpciq[i];
+ oq_no = lio->linfo.rxpciq[i].s.q_no;
oq_stats = &oct->droq[oq_no]->stats;
pkts += oq_stats->rx_pkts_received;
drop += (oq_stats->rx_dropped +
@@ -2383,19 +2659,16 @@ static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
struct octnic_ctrl_pkt nctrl;
- struct octnic_ctrl_params nparams;
- int max_frm_size = new_mtu + OCTNET_FRM_HEADER_SIZE;
int ret = 0;
- /* Limit the MTU to make sure the ethernet packets are between 64 bytes
- * and 65535 bytes
+ /* Limit the MTU to make sure the ethernet packets are between 68 bytes
+ * and 16000 bytes
*/
- if ((max_frm_size < OCTNET_MIN_FRM_SIZE) ||
- (max_frm_size > OCTNET_MAX_FRM_SIZE)) {
+ if ((new_mtu < LIO_MIN_MTU_SIZE) ||
+ (new_mtu > LIO_MAX_MTU_SIZE)) {
dev_err(&oct->pci_dev->dev, "Invalid MTU: %d\n", new_mtu);
dev_err(&oct->pci_dev->dev, "Valid range %d and %d\n",
- (OCTNET_MIN_FRM_SIZE - OCTNET_FRM_HEADER_SIZE),
- (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE));
+ LIO_MIN_MTU_SIZE, LIO_MAX_MTU_SIZE);
return -EINVAL;
}
@@ -2403,15 +2676,13 @@ static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
nctrl.ncmd.u64 = 0;
nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MTU;
- nctrl.ncmd.s.param1 = lio->linfo.ifidx;
- nctrl.ncmd.s.param2 = new_mtu;
+ nctrl.ncmd.s.param1 = new_mtu;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.wait_time = 100;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
- nparams.resp_order = OCTEON_RESP_ORDERED;
-
- ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
dev_err(&oct->pci_dev->dev, "Failed to set MTU\n");
return -1;
@@ -2428,7 +2699,7 @@ static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
* @param ifr interface request
* @param cmd command
*/
-static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
{
struct hwtstamp_config conf;
struct lio *lio = GET_LIO(netdev);
@@ -2489,7 +2760,7 @@ static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
case SIOCSHWTSTAMP:
- return hwtstamp_ioctl(netdev, ifr, cmd);
+ return hwtstamp_ioctl(netdev, ifr);
default:
return -EOPNOTSUPP;
}
@@ -2536,7 +2807,7 @@ static void handle_timestamp(struct octeon_device *oct,
}
octeon_free_soft_command(oct, sc);
- recv_buffer_free(skb);
+ tx_buffer_free(skb);
}
/* \brief Send a data packet that will be timestamped
@@ -2551,10 +2822,9 @@ static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
{
int retval;
struct octeon_soft_command *sc;
- struct octeon_instr_ih *ih;
- struct octeon_instr_rdp *rdp;
struct lio *lio;
int ring_doorbell;
+ u32 len;
lio = finfo->lio;
@@ -2576,14 +2846,13 @@ static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
sc->callback_arg = finfo->skb;
sc->iq_no = ndata->q_no;
- ih = (struct octeon_instr_ih *)&sc->cmd.ih;
- rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
+ len = (u32)((struct octeon_instr_ih2 *)(&sc->cmd.cmd2.ih2))->dlengsz;
ring_doorbell = !xmit_more;
retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
- sc, ih->dlengsz, ndata->reqtype);
+ sc, len, ndata->reqtype);
- if (retval) {
+ if (retval == IQ_SEND_FAILED) {
dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
retval);
octeon_free_soft_command(oct, sc);
@@ -2594,68 +2863,6 @@ static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
return retval;
}
-static inline int is_ipv4(struct sk_buff *skb)
-{
- return (skb->protocol == htons(ETH_P_IP)) &&
- (ip_hdr(skb)->version == 4);
-}
-
-static inline int is_vlan(struct sk_buff *skb)
-{
- return skb->protocol == htons(ETH_P_8021Q);
-}
-
-static inline int is_ip_fragmented(struct sk_buff *skb)
-{
- /* The Don't fragment and Reserved flag fields are ignored.
- * IP is fragmented if
- * - the More fragments bit is set (indicating this IP is a fragment
- * with more to follow; the current offset could be 0 ).
- * - ths offset field is non-zero.
- */
- return (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) ? 1 : 0;
-}
-
-static inline int is_ipv6(struct sk_buff *skb)
-{
- return (skb->protocol == htons(ETH_P_IPV6)) &&
- (ipv6_hdr(skb)->version == 6);
-}
-
-static inline int is_with_extn_hdr(struct sk_buff *skb)
-{
- return (ipv6_hdr(skb)->nexthdr != IPPROTO_TCP) &&
- (ipv6_hdr(skb)->nexthdr != IPPROTO_UDP);
-}
-
-static inline int is_tcpudp(struct sk_buff *skb)
-{
- return (ip_hdr(skb)->protocol == IPPROTO_TCP) ||
- (ip_hdr(skb)->protocol == IPPROTO_UDP);
-}
-
-static inline u32 get_ipv4_5tuple_tag(struct sk_buff *skb)
-{
- u32 tag;
- struct iphdr *iphdr = ip_hdr(skb);
-
- tag = crc32(0, &iphdr->protocol, 1);
- tag = crc32(tag, (u8 *)&iphdr->saddr, 8);
- tag = crc32(tag, skb_transport_header(skb), 4);
- return tag;
-}
-
-static inline u32 get_ipv6_5tuple_tag(struct sk_buff *skb)
-{
- u32 tag;
- struct ipv6hdr *ipv6hdr = ipv6_hdr(skb);
-
- tag = crc32(0, &ipv6hdr->nexthdr, 1);
- tag = crc32(tag, (u8 *)&ipv6hdr->saddr, 32);
- tag = crc32(tag, skb_transport_header(skb), 4);
- return tag;
-}
-
/** \brief Transmit networks packets to the Octeon interface
* @param skbuff skbuff struct to be passed to network layer.
* @param netdev pointer to network device
@@ -2670,18 +2877,22 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
struct octnic_data_pkt ndata;
struct octeon_device *oct;
struct oct_iq_stats *stats;
- int cpu = 0, status = 0;
+ struct octeon_instr_irh *irh;
+ union tx_info *tx_info;
+ int status = 0;
int q_idx = 0, iq_no = 0;
- int xmit_more;
+ int xmit_more, j;
+ u64 dptr = 0;
u32 tag = 0;
lio = GET_LIO(netdev);
oct = lio->oct_dev;
if (netif_is_multiqueue(netdev)) {
- cpu = skb->queue_mapping;
- q_idx = (cpu & (lio->linfo.num_txpciq - 1));
- iq_no = lio->linfo.txpciq[q_idx];
+ q_idx = skb->queue_mapping;
+ q_idx = (q_idx % (lio->linfo.num_txpciq));
+ tag = q_idx;
+ iq_no = lio->linfo.txpciq[q_idx].s.q_no;
} else {
iq_no = lio->txq;
}
@@ -2692,11 +2903,11 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
* transmitted.
*/
if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
- (!lio->linfo.link.s.status) ||
+ (!lio->linfo.link.s.link_up) ||
(skb->len <= 0)) {
netif_info(lio, tx_err, lio->netdev,
"Transmit failed link_status : %d\n",
- lio->linfo.link.s.status);
+ lio->linfo.link.s.link_up);
goto lio_xmit_failed;
}
@@ -2728,62 +2939,25 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
/* defer sending if queue is full */
stats->tx_iq_busy++;
netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
- ndata.q_no);
+ lio->txq);
return NETDEV_TX_BUSY;
}
}
/* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n",
- * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no );
+ * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
*/
ndata.datasize = skb->len;
cmdsetup.u64 = 0;
- cmdsetup.s.ifidx = lio->linfo.ifidx;
+ cmdsetup.s.iq_no = iq_no;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- if (is_ipv4(skb) && !is_ip_fragmented(skb) && is_tcpudp(skb)) {
- tag = get_ipv4_5tuple_tag(skb);
-
- cmdsetup.s.cksum_offset = sizeof(struct ethhdr) + 1;
-
- if (ip_hdr(skb)->ihl > 5)
- cmdsetup.s.ipv4opts_ipv6exthdr =
- OCT_PKT_PARAM_IPV4OPTS;
-
- } else if (is_ipv6(skb)) {
- tag = get_ipv6_5tuple_tag(skb);
-
- cmdsetup.s.cksum_offset = sizeof(struct ethhdr) + 1;
-
- if (is_with_extn_hdr(skb))
- cmdsetup.s.ipv4opts_ipv6exthdr =
- OCT_PKT_PARAM_IPV6EXTHDR;
-
- } else if (is_vlan(skb)) {
- if (vlan_eth_hdr(skb)->h_vlan_encapsulated_proto
- == htons(ETH_P_IP) &&
- !is_ip_fragmented(skb) && is_tcpudp(skb)) {
- tag = get_ipv4_5tuple_tag(skb);
-
- cmdsetup.s.cksum_offset =
- sizeof(struct vlan_ethhdr) + 1;
-
- if (ip_hdr(skb)->ihl > 5)
- cmdsetup.s.ipv4opts_ipv6exthdr =
- OCT_PKT_PARAM_IPV4OPTS;
-
- } else if (vlan_eth_hdr(skb)->h_vlan_encapsulated_proto
- == htons(ETH_P_IPV6)) {
- tag = get_ipv6_5tuple_tag(skb);
-
- cmdsetup.s.cksum_offset =
- sizeof(struct vlan_ethhdr) + 1;
-
- if (is_with_extn_hdr(skb))
- cmdsetup.s.ipv4opts_ipv6exthdr =
- OCT_PKT_PARAM_IPV6EXTHDR;
- }
+ if (skb->encapsulation) {
+ cmdsetup.s.tnl_csum = 1;
+ stats->tx_vxlan++;
+ } else {
+ cmdsetup.s.transport_csum = 1;
}
}
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
@@ -2793,20 +2967,21 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
if (skb_shinfo(skb)->nr_frags == 0) {
cmdsetup.s.u.datasize = skb->len;
- octnet_prepare_pci_cmd(&ndata.cmd, &cmdsetup, tag);
+ octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
+
/* Offload checksum calculation for TCP/UDP packets */
- ndata.cmd.dptr = dma_map_single(&oct->pci_dev->dev,
- skb->data,
- skb->len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&oct->pci_dev->dev, ndata.cmd.dptr)) {
+ dptr = dma_map_single(&oct->pci_dev->dev,
+ skb->data,
+ skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
__func__);
return NETDEV_TX_BUSY;
}
- finfo->dptr = ndata.cmd.dptr;
-
+ ndata.cmd.cmd2.dptr = dptr;
+ finfo->dptr = dptr;
ndata.reqtype = REQTYPE_NORESP_NET;
} else {
@@ -2814,9 +2989,10 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
struct skb_frag_struct *frag;
struct octnic_gather *g;
- spin_lock(&lio->lock);
- g = (struct octnic_gather *)list_delete_head(&lio->glist);
- spin_unlock(&lio->lock);
+ spin_lock(&lio->glist_lock[q_idx]);
+ g = (struct octnic_gather *)
+ list_delete_head(&lio->glist[q_idx]);
+ spin_unlock(&lio->glist_lock[q_idx]);
if (!g) {
netif_info(lio, tx_err, lio->netdev,
@@ -2826,7 +3002,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
cmdsetup.s.gather = 1;
cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
- octnet_prepare_pci_cmd(&ndata.cmd, &cmdsetup, tag);
+ octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
memset(g->sg, 0, g->sg_size);
@@ -2853,36 +3029,52 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
frag->size,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&oct->pci_dev->dev,
+ g->sg[i >> 2].ptr[i & 3])) {
+ dma_unmap_single(&oct->pci_dev->dev,
+ g->sg[0].ptr[0],
+ skb->len - skb->data_len,
+ DMA_TO_DEVICE);
+ for (j = 1; j < i; j++) {
+ frag = &skb_shinfo(skb)->frags[j - 1];
+ dma_unmap_page(&oct->pci_dev->dev,
+ g->sg[j >> 2].ptr[j & 3],
+ frag->size,
+ DMA_TO_DEVICE);
+ }
+ dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
+ __func__);
+ return NETDEV_TX_BUSY;
+ }
+
add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
i++;
}
- ndata.cmd.dptr = dma_map_single(&oct->pci_dev->dev,
- g->sg, g->sg_size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&oct->pci_dev->dev, ndata.cmd.dptr)) {
- dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
- __func__);
- dma_unmap_single(&oct->pci_dev->dev, g->sg[0].ptr[0],
- skb->len - skb->data_len,
- DMA_TO_DEVICE);
- return NETDEV_TX_BUSY;
- }
+ dma_sync_single_for_device(&oct->pci_dev->dev, g->sg_dma_ptr,
+ g->sg_size, DMA_TO_DEVICE);
+ dptr = g->sg_dma_ptr;
- finfo->dptr = ndata.cmd.dptr;
+ ndata.cmd.cmd2.dptr = dptr;
+ finfo->dptr = dptr;
finfo->g = g;
ndata.reqtype = REQTYPE_NORESP_NET_SG;
}
- if (skb_shinfo(skb)->gso_size) {
- struct octeon_instr_irh *irh =
- (struct octeon_instr_irh *)&ndata.cmd.irh;
- union tx_info *tx_info = (union tx_info *)&ndata.cmd.ossp[0];
+ irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
+ tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
- irh->len = 1; /* to indicate that ossp[0] contains tx_info */
+ if (skb_shinfo(skb)->gso_size) {
tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
+ stats->tx_gso++;
+ }
+
+ /* HW insert VLAN tag */
+ if (skb_vlan_tag_present(skb)) {
+ irh->priority = skb_vlan_tag_get(skb) >> 13;
+ irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
}
xmit_more = skb->xmit_more;
@@ -2901,7 +3093,10 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
netif_trans_update(netdev);
- stats->tx_done++;
+ if (skb_shinfo(skb)->gso_size)
+ stats->tx_done += skb_shinfo(skb)->gso_segs;
+ else
+ stats->tx_done++;
stats->tx_tot_bytes += skb->len;
return NETDEV_TX_OK;
@@ -2910,9 +3105,10 @@ lio_xmit_failed:
stats->tx_dropped++;
netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
iq_no, stats->tx_dropped);
- dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr,
- ndata.datasize, DMA_TO_DEVICE);
- recv_buffer_free(skb);
+ if (dptr)
+ dma_unmap_single(&oct->pci_dev->dev, dptr,
+ ndata.datasize, DMA_TO_DEVICE);
+ tx_buffer_free(skb);
return NETDEV_TX_OK;
}
@@ -2932,27 +3128,145 @@ static void liquidio_tx_timeout(struct net_device *netdev)
txqs_wake(netdev);
}
-int liquidio_set_feature(struct net_device *netdev, int cmd)
+static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
+ __be16 proto __attribute__((unused)),
+ u16 vid)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
struct octnic_ctrl_pkt nctrl;
- struct octnic_ctrl_params nparams;
int ret = 0;
memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
nctrl.ncmd.u64 = 0;
- nctrl.ncmd.s.cmd = cmd;
- nctrl.ncmd.s.param1 = lio->linfo.ifidx;
- nctrl.ncmd.s.param2 = OCTNIC_LROIPV4 | OCTNIC_LROIPV6;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
+ nctrl.ncmd.s.param1 = vid;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.wait_time = 100;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
+ ret);
+ }
+
+ return ret;
+}
+
+static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
+ __be16 proto __attribute__((unused)),
+ u16 vid)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
+ nctrl.ncmd.s.param1 = vid;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.wait_time = 100;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
+ ret);
+ }
+ return ret;
+}
+
+/** Sending command to enable/disable RX checksum offload
+ * @param netdev pointer to network device
+ * @param command OCTNET_CMD_TNL_RX_CSUM_CTL
+ * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/
+ * OCTNET_CMD_RXCSUM_DISABLE
+ * @returns SUCCESS or FAILURE
+ */
+int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
+ u8 rx_cmd)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = command;
+ nctrl.ncmd.s.param1 = rx_cmd;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.wait_time = 100;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
- nparams.resp_order = OCTEON_RESP_NORESPONSE;
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev,
+ "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
+ ret);
+ }
+ return ret;
+}
+
+/** Sending command to add/delete VxLAN UDP port to firmware
+ * @param netdev pointer to network device
+ * @param command OCTNET_CMD_VXLAN_PORT_CONFIG
+ * @param vxlan_port VxLAN port to be added or deleted
+ * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD,
+ * OCTNET_CMD_VXLAN_PORT_DEL
+ * @returns SUCCESS or FAILURE
+ */
+static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
+ u16 vxlan_port, u8 vxlan_cmd_bit)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = command;
+ nctrl.ncmd.s.more = vxlan_cmd_bit;
+ nctrl.ncmd.s.param1 = vxlan_port;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.wait_time = 100;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev,
+ "VxLAN port add/delete failed in core (ret:0x%x)\n",
+ ret);
+ }
+ return ret;
+}
+
+int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = cmd;
+ nctrl.ncmd.s.param1 = param1;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.wait_time = 100;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
- ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n",
ret);
@@ -3008,14 +3322,55 @@ static int liquidio_set_features(struct net_device *netdev,
return 0;
if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO))
- liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE);
+ liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
+ OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
else if (!(features & NETIF_F_LRO) &&
(lio->dev_capability & NETIF_F_LRO))
- liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE);
+ liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
+ OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
+
+ /* Sending command to firmware to enable/disable RX checksum
+ * offload settings using ethtool
+ */
+ if (!(netdev->features & NETIF_F_RXCSUM) &&
+ (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
+ (features & NETIF_F_RXCSUM))
+ liquidio_set_rxcsum_command(netdev,
+ OCTNET_CMD_TNL_RX_CSUM_CTL,
+ OCTNET_CMD_RXCSUM_ENABLE);
+ else if ((netdev->features & NETIF_F_RXCSUM) &&
+ (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
+ !(features & NETIF_F_RXCSUM))
+ liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
+ OCTNET_CMD_RXCSUM_DISABLE);
return 0;
}
+static void liquidio_add_vxlan_port(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
+{
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
+
+ liquidio_vxlan_port_command(netdev,
+ OCTNET_CMD_VXLAN_PORT_CONFIG,
+ htons(ti->port),
+ OCTNET_CMD_VXLAN_PORT_ADD);
+}
+
+static void liquidio_del_vxlan_port(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
+{
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
+
+ liquidio_vxlan_port_command(netdev,
+ OCTNET_CMD_VXLAN_PORT_CONFIG,
+ htons(ti->port),
+ OCTNET_CMD_VXLAN_PORT_DEL);
+}
+
static struct net_device_ops lionetdevops = {
.ndo_open = liquidio_open,
.ndo_stop = liquidio_stop,
@@ -3024,10 +3379,15 @@ static struct net_device_ops lionetdevops = {
.ndo_set_mac_address = liquidio_set_mac,
.ndo_set_rx_mode = liquidio_set_mcast_list,
.ndo_tx_timeout = liquidio_tx_timeout,
+
+ .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
.ndo_change_mtu = liquidio_change_mtu,
.ndo_do_ioctl = liquidio_ioctl,
.ndo_fix_features = liquidio_fix_features,
.ndo_set_features = liquidio_set_features,
+ .ndo_udp_tunnel_add = liquidio_add_vxlan_port,
+ .ndo_udp_tunnel_del = liquidio_del_vxlan_port,
};
/** \brief Entry point for the liquidio module
@@ -3082,24 +3442,27 @@ static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
{
struct octeon_device *oct = (struct octeon_device *)buf;
struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
- int ifidx = 0;
+ int gmxport = 0;
union oct_link_status *ls;
int i;
- if ((recv_pkt->buffer_size[0] != sizeof(*ls)) ||
- (recv_pkt->rh.r_nic_info.ifidx > oct->ifcount)) {
+ if (recv_pkt->buffer_size[0] != sizeof(*ls)) {
dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
recv_pkt->buffer_size[0],
- recv_pkt->rh.r_nic_info.ifidx);
+ recv_pkt->rh.r_nic_info.gmxport);
goto nic_info_err;
}
- ifidx = recv_pkt->rh.r_nic_info.ifidx;
+ gmxport = recv_pkt->rh.r_nic_info.gmxport;
ls = (union oct_link_status *)get_rbd(recv_pkt->buffer_ptr[0]);
octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
-
- update_link_status(oct->props[ifidx].netdev, ls);
+ for (i = 0; i < oct->ifcount; i++) {
+ if (oct->props[i].gmxport == gmxport) {
+ update_link_status(oct->props[i].netdev, ls);
+ break;
+ }
+ }
nic_info_err:
for (i = 0; i < recv_pkt->buffer_count; i++)
@@ -3125,13 +3488,12 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
struct liquidio_if_cfg_context *ctx;
struct liquidio_if_cfg_resp *resp;
struct octdev_props *props;
- int retval, num_iqueues, num_oqueues, q_no;
- u64 q_mask;
- int num_cpus = num_online_cpus();
+ int retval, num_iqueues, num_oqueues;
union oct_nic_if_cfg if_cfg;
unsigned int base_queue;
unsigned int gmx_port_id;
u32 resp_size, ctx_size;
+ u32 ifidx_or_pfnum;
/* This is to handle link status changes */
octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
@@ -3167,14 +3529,12 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
CFG_GET_BASE_QUE_NIC_IF(octeon_get_conf(octeon_dev), i);
gmx_port_id =
CFG_GET_GMXID_NIC_IF(octeon_get_conf(octeon_dev), i);
- if (num_iqueues > num_cpus)
- num_iqueues = num_cpus;
- if (num_oqueues > num_cpus)
- num_oqueues = num_cpus;
+ ifidx_or_pfnum = i;
+
dev_dbg(&octeon_dev->pci_dev->dev,
"requesting config for interface %d, iqs %d, oqs %d\n",
- i, num_iqueues, num_oqueues);
- ACCESS_ONCE(ctx->cond) = 0;
+ ifidx_or_pfnum, num_iqueues, num_oqueues);
+ WRITE_ONCE(ctx->cond, 0);
ctx->octeon_id = lio_get_device_id(octeon_dev);
init_waitqueue_head(&ctx->wc);
@@ -3183,16 +3543,19 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
if_cfg.s.num_oqueues = num_oqueues;
if_cfg.s.base_queue = base_queue;
if_cfg.s.gmx_port_id = gmx_port_id;
+
+ sc->iq_no = 0;
+
octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
- OPCODE_NIC_IF_CFG, i,
+ OPCODE_NIC_IF_CFG, 0,
if_cfg.u64, 0);
sc->callback = if_cfg_callback;
sc->callback_arg = sc;
- sc->wait_time = 1000;
+ sc->wait_time = 3000;
retval = octeon_send_soft_command(octeon_dev, sc);
- if (retval) {
+ if (retval == IQ_SEND_FAILED) {
dev_err(&octeon_dev->pci_dev->dev,
"iq/oq config failed status: %x\n",
retval);
@@ -3234,8 +3597,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
goto setup_nic_dev_fail;
}
- props = &octeon_dev->props[i];
- props->netdev = netdev;
+ SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
if (num_iqueues > 1)
lionetdevops.ndo_select_queue = select_q;
@@ -3249,23 +3611,21 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
memset(lio, 0, sizeof(struct lio));
- lio->linfo.ifidx = resp->cfg_info.ifidx;
- lio->ifidx = resp->cfg_info.ifidx;
+ lio->ifidx = ifidx_or_pfnum;
+
+ props = &octeon_dev->props[i];
+ props->gmxport = resp->cfg_info.linfo.gmxport;
+ props->netdev = netdev;
lio->linfo.num_rxpciq = num_oqueues;
lio->linfo.num_txpciq = num_iqueues;
- q_mask = resp->cfg_info.oqmask;
- /* q_mask is 0-based and already verified mask is nonzero */
for (j = 0; j < num_oqueues; j++) {
- q_no = __ffs64(q_mask);
- q_mask &= (~(1UL << q_no));
- lio->linfo.rxpciq[j] = q_no;
+ lio->linfo.rxpciq[j].u64 =
+ resp->cfg_info.linfo.rxpciq[j].u64;
}
- q_mask = resp->cfg_info.iqmask;
for (j = 0; j < num_iqueues; j++) {
- q_no = __ffs64(q_mask);
- q_mask &= (~(1UL << q_no));
- lio->linfo.txpciq[j] = q_no;
+ lio->linfo.txpciq[j].u64 =
+ resp->cfg_info.linfo.txpciq[j].u64;
}
lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
@@ -3274,16 +3634,41 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
lio->dev_capability = NETIF_F_HIGHDMA
- | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
- | NETIF_F_SG | NETIF_F_RXCSUM
- | NETIF_F_TSO | NETIF_F_TSO6
- | NETIF_F_LRO;
+ | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
+ | NETIF_F_SG | NETIF_F_RXCSUM
+ | NETIF_F_GRO
+ | NETIF_F_TSO | NETIF_F_TSO6
+ | NETIF_F_LRO;
netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
- netdev->features = lio->dev_capability;
+ /* Copy of transmit encapsulation capabilities:
+ * TSO, TSO6, Checksums for this device
+ */
+ lio->enc_dev_capability = NETIF_F_IP_CSUM
+ | NETIF_F_IPV6_CSUM
+ | NETIF_F_GSO_UDP_TUNNEL
+ | NETIF_F_HW_CSUM | NETIF_F_SG
+ | NETIF_F_RXCSUM
+ | NETIF_F_TSO | NETIF_F_TSO6
+ | NETIF_F_LRO;
+
+ netdev->hw_enc_features = (lio->enc_dev_capability &
+ ~NETIF_F_LRO);
+
+ lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL;
+
netdev->vlan_features = lio->dev_capability;
+ /* Add any unchangeable hw features */
+ lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX;
+
+ netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
netdev->hw_features = lio->dev_capability;
+ /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
+ netdev->hw_features = netdev->hw_features &
+ ~NETIF_F_HW_VLAN_CTAG_RX;
/* Point to the properties for octeon device to which this
* interface belongs.
@@ -3291,7 +3676,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
lio->oct_dev = octeon_dev;
lio->octprops = props;
lio->netdev = netdev;
- spin_lock_init(&lio->lock);
dev_dbg(&octeon_dev->pci_dev->dev,
"if%d gmx: %d hw_addr: 0x%llx\n", i,
@@ -3306,23 +3690,22 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
ether_addr_copy(netdev->dev_addr, mac);
- if (setup_io_queues(octeon_dev, netdev)) {
+ /* By default all interfaces on a single Octeon uses the same
+ * tx and rx queues
+ */
+ lio->txq = lio->linfo.txpciq[0].s.q_no;
+ lio->rxq = lio->linfo.rxpciq[0].s.q_no;
+ if (setup_io_queues(octeon_dev, i)) {
dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
goto setup_nic_dev_fail;
}
ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
- /* By default all interfaces on a single Octeon uses the same
- * tx and rx queues
- */
- lio->txq = lio->linfo.txpciq[0];
- lio->rxq = lio->linfo.rxpciq[0];
-
lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
- if (setup_glist(lio)) {
+ if (setup_glists(octeon_dev, lio, num_iqueues)) {
dev_err(&octeon_dev->pci_dev->dev,
"Gather list allocation failed\n");
goto setup_nic_dev_fail;
@@ -3330,11 +3713,17 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
/* Register ethtool support */
liquidio_set_ethtool_ops(netdev);
+ octeon_dev->priv_flags = 0x0;
- liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE);
+ if (netdev->features & NETIF_F_LRO)
+ liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
+ OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
+
+ liquidio_set_feature(netdev, OCTNET_CMD_ENABLE_VLAN_FILTER, 0);
if ((debug != -1) && (debug & NETIF_MSG_HW))
- liquidio_set_feature(netdev, OCTNET_CMD_VERBOSE_ENABLE);
+ liquidio_set_feature(netdev,
+ OCTNET_CMD_VERBOSE_ENABLE, 0);
/* Register the network device with the OS */
if (register_netdev(netdev)) {
@@ -3346,16 +3735,19 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
"Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
netif_carrier_off(netdev);
-
- if (lio->linfo.link.s.status) {
- netif_carrier_on(netdev);
- start_txq(netdev);
- } else {
- netif_carrier_off(netdev);
- }
+ lio->link_changes++;
ifstate_set(lio, LIO_IFSTATE_REGISTERED);
+ /* Sending command to firmware to enable Rx checksum offload
+ * by default at the time of setup of Liquidio driver for
+ * this device
+ */
+ liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
+ OCTNET_CMD_RXCSUM_ENABLE);
+ liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
+ OCTNET_CMD_TXCSUM_ENABLE);
+
dev_dbg(&octeon_dev->pci_dev->dev,
"NIC ifidx:%d Setup successful\n", i);
@@ -3386,7 +3778,7 @@ setup_nic_dev_fail:
static int liquidio_init_nic_module(struct octeon_device *oct)
{
struct oct_intrmod_cfg *intrmod_cfg;
- int retval = 0;
+ int i, retval = 0;
int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
@@ -3400,6 +3792,9 @@ static int liquidio_init_nic_module(struct octeon_device *oct)
memset(oct->props, 0,
sizeof(struct octdev_props) * num_nic_ports);
+ for (i = 0; i < MAX_OCTEON_LINKS; i++)
+ oct->props[i].gmxport = -1;
+
retval = setup_nic_devices(oct);
if (retval) {
dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
@@ -3410,15 +3805,19 @@ static int liquidio_init_nic_module(struct octeon_device *oct)
/* Initialize interrupt moderation params */
intrmod_cfg = &((struct octeon_device *)oct)->intrmod;
- intrmod_cfg->intrmod_enable = 1;
- intrmod_cfg->intrmod_check_intrvl = LIO_INTRMOD_CHECK_INTERVAL;
- intrmod_cfg->intrmod_maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR;
- intrmod_cfg->intrmod_minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR;
- intrmod_cfg->intrmod_maxcnt_trigger = LIO_INTRMOD_MAXCNT_TRIGGER;
- intrmod_cfg->intrmod_maxtmr_trigger = LIO_INTRMOD_MAXTMR_TRIGGER;
- intrmod_cfg->intrmod_mintmr_trigger = LIO_INTRMOD_MINTMR_TRIGGER;
- intrmod_cfg->intrmod_mincnt_trigger = LIO_INTRMOD_MINCNT_TRIGGER;
-
+ intrmod_cfg->rx_enable = 1;
+ intrmod_cfg->check_intrvl = LIO_INTRMOD_CHECK_INTERVAL;
+ intrmod_cfg->maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR;
+ intrmod_cfg->minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR;
+ intrmod_cfg->rx_maxcnt_trigger = LIO_INTRMOD_RXMAXCNT_TRIGGER;
+ intrmod_cfg->rx_maxtmr_trigger = LIO_INTRMOD_RXMAXTMR_TRIGGER;
+ intrmod_cfg->rx_mintmr_trigger = LIO_INTRMOD_RXMINTMR_TRIGGER;
+ intrmod_cfg->rx_mincnt_trigger = LIO_INTRMOD_RXMINCNT_TRIGGER;
+ intrmod_cfg->tx_enable = 1;
+ intrmod_cfg->tx_maxcnt_trigger = LIO_INTRMOD_TXMAXCNT_TRIGGER;
+ intrmod_cfg->tx_mincnt_trigger = LIO_INTRMOD_TXMINCNT_TRIGGER;
+ intrmod_cfg->rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
+ intrmod_cfg->rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
return retval;
@@ -3481,6 +3880,7 @@ static void nic_starter(struct work_struct *work)
static int octeon_device_init(struct octeon_device *octeon_dev)
{
int j, ret;
+ char bootcmd[] = "\n";
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)octeon_dev->priv;
atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
@@ -3558,6 +3958,7 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
/* Release any previously allocated queues */
for (j = 0; j < octeon_dev->num_oqs; j++)
octeon_delete_droq(octeon_dev, j);
+ return 1;
}
atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
@@ -3580,7 +3981,8 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
/* Setup the interrupt handler and record the INT SUM register address
*/
- octeon_setup_interrupt(octeon_dev);
+ if (octeon_setup_interrupt(octeon_dev))
+ return 1;
/* Enable Octeon device interrupts */
octeon_dev->fn_list.enable_interrupt(octeon_dev->chip);
@@ -3592,14 +3994,19 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
- if (ddr_timeout == 0) {
- dev_info(&octeon_dev->pci_dev->dev,
- "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
- }
+ if (ddr_timeout == 0)
+ dev_info(&octeon_dev->pci_dev->dev, "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
/* Wait for the octeon to initialize DDR after the soft-reset. */
+ while (ddr_timeout == 0) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (schedule_timeout(HZ / 10)) {
+ /* user probably pressed Control-C */
+ return 1;
+ }
+ }
ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
if (ret) {
dev_err(&octeon_dev->pci_dev->dev,
@@ -3613,6 +4020,9 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
return 1;
}
+ /* Divert uboot to take commands from host instead. */
+ ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
+
dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
ret = octeon_init_consoles(octeon_dev);
if (ret) {
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
index 0ac347ccc8ba..199a8b9c7dc5 100644
--- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
@@ -30,10 +30,10 @@
#include "octeon_config.h"
-#define LIQUIDIO_VERSION "1.1.9"
-#define LIQUIDIO_MAJOR_VERSION 1
-#define LIQUIDIO_MINOR_VERSION 1
-#define LIQUIDIO_MICRO_VERSION 9
+#define LIQUIDIO_BASE_VERSION "1.4"
+#define LIQUIDIO_MICRO_VERSION ".1"
+#define LIQUIDIO_PACKAGE ""
+#define LIQUIDIO_VERSION "1.4.1"
#define CONTROL_IQ 0
/** Tag types used by Octeon cores in its work. */
@@ -174,9 +174,11 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
/*------------------------- End Scatter/Gather ---------------------------*/
#define OCTNET_FRM_PTP_HEADER_SIZE 8
-#define OCTNET_FRM_HEADER_SIZE 30 /* PTP timestamp + VLAN + Ethernet */
-#define OCTNET_MIN_FRM_SIZE (64 + OCTNET_FRM_PTP_HEADER_SIZE)
+#define OCTNET_FRM_HEADER_SIZE 22 /* VLAN + Ethernet */
+
+#define OCTNET_MIN_FRM_SIZE 64
+
#define OCTNET_MAX_FRM_SIZE (16000 + OCTNET_FRM_HEADER_SIZE)
#define OCTNET_DEFAULT_FRM_SIZE (1500 + OCTNET_FRM_HEADER_SIZE)
@@ -212,6 +214,17 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
#define OCTNET_CMD_VERBOSE_ENABLE 0x14
#define OCTNET_CMD_VERBOSE_DISABLE 0x15
+#define OCTNET_CMD_ENABLE_VLAN_FILTER 0x16
+#define OCTNET_CMD_ADD_VLAN_FILTER 0x17
+#define OCTNET_CMD_DEL_VLAN_FILTER 0x18
+#define OCTNET_CMD_VXLAN_PORT_CONFIG 0x19
+#define OCTNET_CMD_VXLAN_PORT_ADD 0x0
+#define OCTNET_CMD_VXLAN_PORT_DEL 0x1
+#define OCTNET_CMD_RXCSUM_ENABLE 0x0
+#define OCTNET_CMD_RXCSUM_DISABLE 0x1
+#define OCTNET_CMD_TXCSUM_ENABLE 0x0
+#define OCTNET_CMD_TXCSUM_DISABLE 0x1
+
/* RX(packets coming from wire) Checksum verification flags */
/* TCP/UDP csum */
#define CNNIC_L4SUM_VERIFIED 0x1
@@ -258,19 +271,19 @@ union octnet_cmd {
u64 more:6; /* How many udd words follow the command */
- u64 param1:29;
+ u64 reserved:29;
- u64 param2:16;
+ u64 param1:16;
- u64 param3:8;
+ u64 param2:8;
#else
- u64 param3:8;
+ u64 param2:8;
- u64 param2:16;
+ u64 param1:16;
- u64 param1:29;
+ u64 reserved:29;
u64 more:6;
@@ -283,8 +296,140 @@ union octnet_cmd {
#define OCTNET_CMD_SIZE (sizeof(union octnet_cmd))
+/* Instruction Header(DPI) - for OCTEON-III models */
+struct octeon_instr_ih3 {
+#ifdef __BIG_ENDIAN_BITFIELD
+
+ /** Reserved3 */
+ u64 reserved3:1;
+
+ /** Gather indicator 1=gather*/
+ u64 gather:1;
+
+ /** Data length OR no. of entries in gather list */
+ u64 dlengsz:14;
+
+ /** Front Data size */
+ u64 fsz:6;
+
+ /** Reserved2 */
+ u64 reserved2:4;
+
+ /** PKI port kind - PKIND */
+ u64 pkind:6;
+
+ /** Reserved1 */
+ u64 reserved1:32;
+
+#else
+ /** Reserved1 */
+ u64 reserved1:32;
+
+ /** PKI port kind - PKIND */
+ u64 pkind:6;
+
+ /** Reserved2 */
+ u64 reserved2:4;
+
+ /** Front Data size */
+ u64 fsz:6;
+
+ /** Data length OR no. of entries in gather list */
+ u64 dlengsz:14;
+
+ /** Gather indicator 1=gather*/
+ u64 gather:1;
+
+ /** Reserved3 */
+ u64 reserved3:1;
+
+#endif
+};
+
+/* Optional PKI Instruction Header(PKI IH) - for OCTEON-III models */
+/** BIG ENDIAN format. */
+struct octeon_instr_pki_ih3 {
+#ifdef __BIG_ENDIAN_BITFIELD
+
+ /** Wider bit */
+ u64 w:1;
+
+ /** Raw mode indicator 1 = RAW */
+ u64 raw:1;
+
+ /** Use Tag */
+ u64 utag:1;
+
+ /** Use QPG */
+ u64 uqpg:1;
+
+ /** Reserved2 */
+ u64 reserved2:1;
+
+ /** Parse Mode */
+ u64 pm:3;
+
+ /** Skip Length */
+ u64 sl:8;
+
+ /** Use Tag Type */
+ u64 utt:1;
+
+ /** Tag type */
+ u64 tagtype:2;
+
+ /** Reserved1 */
+ u64 reserved1:2;
+
+ /** QPG Value */
+ u64 qpg:11;
+
+ /** Tag Value */
+ u64 tag:32;
+
+#else
+
+ /** Tag Value */
+ u64 tag:32;
+
+ /** QPG Value */
+ u64 qpg:11;
+
+ /** Reserved1 */
+ u64 reserved1:2;
+
+ /** Tag type */
+ u64 tagtype:2;
+
+ /** Use Tag Type */
+ u64 utt:1;
+
+ /** Skip Length */
+ u64 sl:8;
+
+ /** Parse Mode */
+ u64 pm:3;
+
+ /** Reserved2 */
+ u64 reserved2:1;
+
+ /** Use QPG */
+ u64 uqpg:1;
+
+ /** Use Tag */
+ u64 utag:1;
+
+ /** Raw mode indicator 1 = RAW */
+ u64 raw:1;
+
+ /** Wider bit */
+ u64 w:1;
+#endif
+
+};
+
/** Instruction Header */
-struct octeon_instr_ih {
+struct octeon_instr_ih2 {
#ifdef __BIG_ENDIAN_BITFIELD
/** Raw mode indicator 1 = RAW */
u64 raw:1;
@@ -348,15 +493,15 @@ struct octeon_instr_irh {
u64 opcode:4;
u64 rflag:1;
u64 subcode:7;
- u64 len:3;
- u64 rid:13;
- u64 reserved:4;
+ u64 vlan:12;
+ u64 priority:3;
+ u64 reserved:5;
u64 ossp:32; /* opcode/subcode specific parameters */
#else
u64 ossp:32; /* opcode/subcode specific parameters */
- u64 reserved:4;
- u64 rid:13;
- u64 len:3;
+ u64 reserved:5;
+ u64 priority:3;
+ u64 vlan:12;
u64 subcode:7;
u64 rflag:1;
u64 opcode:4;
@@ -383,75 +528,77 @@ union octeon_rh {
struct {
u64 opcode:4;
u64 subcode:8;
- u64 len:3; /** additional 64-bit words */
- u64 rid:13; /** request id in response to pkt sent by host */
- u64 reserved:4;
- u64 ossp:32; /** opcode/subcode specific parameters */
+ u64 len:3; /** additional 64-bit words */
+ u64 reserved:17;
+ u64 ossp:32; /** opcode/subcode specific parameters */
} r;
struct {
u64 opcode:4;
u64 subcode:8;
- u64 len:3; /** additional 64-bit words */
- u64 rid:13; /** request id in response to pkt sent by host */
- u64 extra:24;
- u64 link:8;
+ u64 len:3; /** additional 64-bit words */
+ u64 extra:28;
+ u64 vlan:12;
+ u64 priority:3;
u64 csum_verified:3; /** checksum verified. */
u64 has_hwtstamp:1; /** Has hardware timestamp. 1 = yes. */
+ u64 encap_on:1;
+ u64 has_hash:1; /** Has hash (rth or rss). 1 = yes. */
} r_dh;
struct {
u64 opcode:4;
u64 subcode:8;
- u64 len:3; /** additional 64-bit words */
- u64 rid:13; /** request id in response to pkt sent by host */
+ u64 len:3; /** additional 64-bit words */
+ u64 reserved:11;
u64 num_gmx_ports:8;
- u64 max_nic_ports:8;
+ u64 max_nic_ports:10;
u64 app_cap_flags:4;
- u64 app_mode:16;
+ u64 app_mode:8;
+ u64 pkind:8;
} r_core_drv_init;
struct {
u64 opcode:4;
u64 subcode:8;
u64 len:3; /** additional 64-bit words */
- u64 rid:13;
- u64 reserved:4;
+ u64 reserved:8;
u64 extra:25;
- u64 ifidx:7;
+ u64 gmxport:16;
} r_nic_info;
#else
u64 u64;
struct {
u64 ossp:32; /** opcode/subcode specific parameters */
- u64 reserved:4;
- u64 rid:13; /** req id in response to pkt sent by host */
+ u64 reserved:17;
u64 len:3; /** additional 64-bit words */
u64 subcode:8;
u64 opcode:4;
} r;
struct {
+ u64 has_hash:1; /** Has hash (rth or rss). 1 = yes. */
+ u64 encap_on:1;
u64 has_hwtstamp:1; /** 1 = has hwtstamp */
u64 csum_verified:3; /** checksum verified. */
- u64 link:8;
- u64 extra:24;
- u64 rid:13; /** req id in response to pkt sent by host */
+ u64 priority:3;
+ u64 vlan:12;
+ u64 extra:28;
u64 len:3; /** additional 64-bit words */
u64 subcode:8;
u64 opcode:4;
} r_dh;
struct {
- u64 app_mode:16;
+ u64 pkind:8;
+ u64 app_mode:8;
u64 app_cap_flags:4;
- u64 max_nic_ports:8;
+ u64 max_nic_ports:10;
u64 num_gmx_ports:8;
- u64 rid:13;
+ u64 reserved:11;
u64 len:3; /** additional 64-bit words */
u64 subcode:8;
u64 opcode:4;
} r_core_drv_init;
struct {
- u64 ifidx:7;
+ u64 gmxport:16;
u64 extra:25;
- u64 reserved:4;
- u64 rid:13;
+ u64 reserved:8;
u64 len:3; /** additional 64-bit words */
u64 subcode:8;
u64 opcode:4;
@@ -461,30 +608,25 @@ union octeon_rh {
#define OCT_RH_SIZE (sizeof(union octeon_rh))
-#define OCT_PKT_PARAM_IPV4OPTS 1
-#define OCT_PKT_PARAM_IPV6EXTHDR 2
-
union octnic_packet_params {
u32 u32;
struct {
#ifdef __BIG_ENDIAN_BITFIELD
- u32 reserved:6;
+ u32 reserved:24;
+ u32 ip_csum:1; /* Perform IP header checksum(s) */
+ /* Perform Outer transport header checksum */
+ u32 transport_csum:1;
+ /* Find tunnel, and perform transport csum. */
u32 tnl_csum:1;
- u32 ip_csum:1;
- u32 ipv4opts_ipv6exthdr:2;
- u32 ipsec_ops:4;
- u32 tsflag:1;
- u32 csoffset:9;
- u32 ifidx:8;
+ u32 tsflag:1; /* Timestamp this packet */
+ u32 ipsec_ops:4; /* IPsec operation */
#else
- u32 ifidx:8;
- u32 csoffset:9;
- u32 tsflag:1;
u32 ipsec_ops:4;
- u32 ipv4opts_ipv6exthdr:2;
- u32 ip_csum:1;
+ u32 tsflag:1;
u32 tnl_csum:1;
- u32 reserved:6;
+ u32 transport_csum:1;
+ u32 ip_csum:1;
+ u32 reserved:24;
#endif
} s;
};
@@ -496,56 +638,96 @@ union oct_link_status {
struct {
#ifdef __BIG_ENDIAN_BITFIELD
u64 duplex:8;
- u64 status:8;
u64 mtu:16;
u64 speed:16;
+ u64 link_up:1;
u64 autoneg:1;
- u64 interface:4;
+ u64 if_mode:5;
u64 pause:1;
- u64 reserved:10;
+ u64 flashing:1;
+ u64 reserved:15;
#else
- u64 reserved:10;
+ u64 reserved:15;
+ u64 flashing:1;
u64 pause:1;
- u64 interface:4;
+ u64 if_mode:5;
u64 autoneg:1;
+ u64 link_up:1;
u64 speed:16;
u64 mtu:16;
- u64 status:8;
u64 duplex:8;
#endif
} s;
};
+/** The txpciq info passed to host from the firmware */
+
+union oct_txpciq {
+ u64 u64;
+
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 q_no:8;
+ u64 port:8;
+ u64 pkind:6;
+ u64 use_qpg:1;
+ u64 qpg:11;
+ u64 reserved:30;
+#else
+ u64 reserved:30;
+ u64 qpg:11;
+ u64 use_qpg:1;
+ u64 pkind:6;
+ u64 port:8;
+ u64 q_no:8;
+#endif
+ } s;
+};
+
+/** The rxpciq info passed to host from the firmware */
+
+union oct_rxpciq {
+ u64 u64;
+
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 q_no:8;
+ u64 reserved:56;
+#else
+ u64 reserved:56;
+ u64 q_no:8;
+#endif
+ } s;
+};
+
/** Information for a OCTEON ethernet interface shared between core & host. */
struct oct_link_info {
union oct_link_status link;
u64 hw_addr;
#ifdef __BIG_ENDIAN_BITFIELD
- u16 gmxport;
- u8 rsvd[3];
- u8 num_txpciq;
- u8 num_rxpciq;
- u8 ifidx;
+ u64 gmxport:16;
+ u64 rsvd:32;
+ u64 num_txpciq:8;
+ u64 num_rxpciq:8;
#else
- u8 ifidx;
- u8 num_rxpciq;
- u8 num_txpciq;
- u8 rsvd[3];
- u16 gmxport;
+ u64 num_rxpciq:8;
+ u64 num_txpciq:8;
+ u64 rsvd:32;
+ u64 gmxport:16;
#endif
- u8 txpciq[MAX_IOQS_PER_NICIF];
- u8 rxpciq[MAX_IOQS_PER_NICIF];
+ union oct_txpciq txpciq[MAX_IOQS_PER_NICIF];
+ union oct_rxpciq rxpciq[MAX_IOQS_PER_NICIF];
};
#define OCT_LINK_INFO_SIZE (sizeof(struct oct_link_info))
struct liquidio_if_cfg_info {
- u64 ifidx;
u64 iqmask; /** mask for IQs enabled for the port */
u64 oqmask; /** mask for OQs enabled for the port */
struct oct_link_info linfo; /** initial link information */
+ char liquidio_firmware_version[32];
};
/** Stats for each NIC port in RX direction. */
@@ -570,10 +752,18 @@ struct nic_rx_stats {
u64 fw_err_pko;
u64 fw_err_link;
u64 fw_err_drop;
+ u64 fw_rx_vxlan;
+ u64 fw_rx_vxlan_err;
+
+ /* LRO */
u64 fw_lro_pkts; /* Number of packets that are LROed */
u64 fw_lro_octs; /* Number of octets that are LROed */
u64 fw_total_lro; /* Number of LRO packets formed */
u64 fw_lro_aborts; /* Number of times lRO of packet aborted */
+ u64 fw_lro_aborts_port;
+ u64 fw_lro_aborts_seq;
+ u64 fw_lro_aborts_tsval;
+ u64 fw_lro_aborts_timer;
/* intrmod: packet forward rate */
u64 fwd_rate;
};
@@ -597,9 +787,14 @@ struct nic_tx_stats {
/* firmware stats */
u64 fw_total_sent;
u64 fw_total_fwd;
+ u64 fw_total_fwd_bytes;
u64 fw_err_pko;
u64 fw_err_link;
u64 fw_err_drop;
+ u64 fw_err_tso;
+ u64 fw_tso; /* number of tso requests */
+ u64 fw_tso_fwd; /* number of packets segmented in tso */
+ u64 fw_tx_vxlan;
};
struct oct_link_stats {
@@ -630,23 +825,44 @@ struct oct_mdio_cmd {
#define OCT_LINK_STATS_SIZE (sizeof(struct oct_link_stats))
+/* intrmod: max. packet rate threshold */
+#define LIO_INTRMOD_MAXPKT_RATETHR 196608
+/* intrmod: min. packet rate threshold */
+#define LIO_INTRMOD_MINPKT_RATETHR 9216
+/* intrmod: max. packets to trigger interrupt */
+#define LIO_INTRMOD_RXMAXCNT_TRIGGER 384
+/* intrmod: min. packets to trigger interrupt */
+#define LIO_INTRMOD_RXMINCNT_TRIGGER 1
+/* intrmod: max. time to trigger interrupt */
+#define LIO_INTRMOD_RXMAXTMR_TRIGGER 128
+/* 66xx:intrmod: min. time to trigger interrupt
+ * (value of 1 is optimum for TCP_RR)
+ */
+#define LIO_INTRMOD_RXMINTMR_TRIGGER 1
+
+/* intrmod: max. packets to trigger interrupt */
+#define LIO_INTRMOD_TXMAXCNT_TRIGGER 64
+/* intrmod: min. packets to trigger interrupt */
+#define LIO_INTRMOD_TXMINCNT_TRIGGER 0
+
+/* intrmod: poll interval in seconds */
#define LIO_INTRMOD_CHECK_INTERVAL 1
-#define LIO_INTRMOD_MAXPKT_RATETHR 196608 /* max pkt rate threshold */
-#define LIO_INTRMOD_MINPKT_RATETHR 9216 /* min pkt rate threshold */
-#define LIO_INTRMOD_MAXCNT_TRIGGER 384 /* max pkts to trigger interrupt */
-#define LIO_INTRMOD_MINCNT_TRIGGER 1 /* min pkts to trigger interrupt */
-#define LIO_INTRMOD_MAXTMR_TRIGGER 128 /* max time to trigger interrupt */
-#define LIO_INTRMOD_MINTMR_TRIGGER 32 /* min time to trigger interrupt */
struct oct_intrmod_cfg {
- u64 intrmod_enable;
- u64 intrmod_check_intrvl;
- u64 intrmod_maxpkt_ratethr;
- u64 intrmod_minpkt_ratethr;
- u64 intrmod_maxcnt_trigger;
- u64 intrmod_maxtmr_trigger;
- u64 intrmod_mincnt_trigger;
- u64 intrmod_mintmr_trigger;
+ u64 rx_enable;
+ u64 tx_enable;
+ u64 check_intrvl;
+ u64 maxpkt_ratethr;
+ u64 minpkt_ratethr;
+ u64 rx_maxcnt_trigger;
+ u64 rx_mincnt_trigger;
+ u64 rx_maxtmr_trigger;
+ u64 rx_mintmr_trigger;
+ u64 tx_mincnt_trigger;
+ u64 tx_maxcnt_trigger;
+ u64 rx_frames;
+ u64 tx_frames;
+ u64 rx_usecs;
};
#define BASE_QUEUE_NOT_REQUESTED 65535
@@ -659,9 +875,9 @@ union oct_nic_if_cfg {
u64 num_iqueues:16;
u64 num_oqueues:16;
u64 gmx_port_id:8;
- u64 reserved:8;
+ u64 vf_id:8;
#else
- u64 reserved:8;
+ u64 vf_id:8;
u64 gmx_port_id:8;
u64 num_oqueues:16;
u64 num_iqueues:16;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
index 62a8dd5cd3dc..b3396e3a8bab 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_config.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
@@ -37,7 +37,7 @@
/* Maximum octeon devices defined as MAX_OCTEON_NICIF to support
* multiple(<= MAX_OCTEON_NICIF) Miniports
*/
-#define MAX_OCTEON_NICIF 32
+#define MAX_OCTEON_NICIF 128
#define MAX_OCTEON_DEVICES MAX_OCTEON_NICIF
#define MAX_OCTEON_LINKS MAX_OCTEON_NICIF
#define MAX_OCTEON_MULTICAST_ADDR 32
@@ -135,7 +135,7 @@
#define CFG_GET_IS_SLI_BP_ON(cfg) ((cfg)->misc.enable_sli_oq_bp)
/* Max IOQs per OCTEON Link */
-#define MAX_IOQS_PER_NICIF 32
+#define MAX_IOQS_PER_NICIF 64
enum lio_card_type {
LIO_210SV = 0, /* Two port, 66xx */
@@ -226,7 +226,7 @@ struct octeon_oq_config {
*/
u64 refill_threshold:16;
- /** If set, the Output queue uses info-pointer mode. (Default: 1 ) */
+ /** If set, the Output queue uses info-pointer mode. (Default: 1) */
u64 info_ptr:32;
/* Max number of OQs available */
@@ -236,7 +236,7 @@ struct octeon_oq_config {
/* Max number of OQs available */
u64 max_oqs:8;
- /** If set, the Output queue uses info-pointer mode. (Default: 1 ) */
+ /** If set, the Output queue uses info-pointer mode. (Default: 1) */
u64 info_ptr:32;
/** The number of buffers that were consumed during packet processing by
@@ -416,9 +416,11 @@ struct octeon_config {
#define DISPATCH_LIST_SIZE BIT(OPCODE_MASK_BITS)
/* Maximum number of Octeon Instruction (command) queues */
-#define MAX_OCTEON_INSTR_QUEUES CN6XXX_MAX_INPUT_QUEUES
+#define MAX_OCTEON_INSTR_QUEUES(oct) CN6XXX_MAX_INPUT_QUEUES
+/* Maximum number of Octeon Output queues */
+#define MAX_OCTEON_OUTPUT_QUEUES(oct) CN6XXX_MAX_OUTPUT_QUEUES
-/* Maximum number of Octeon Instruction (command) queues */
-#define MAX_OCTEON_OUTPUT_QUEUES CN6XXX_MAX_OUTPUT_QUEUES
+#define MAX_POSSIBLE_OCTEON_INSTR_QUEUES CN6XXX_MAX_INPUT_QUEUES
+#define MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES CN6XXX_MAX_OUTPUT_QUEUES
#endif /* __OCTEON_CONFIG_H__ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
index 466147e409c9..bbb50ea66f16 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
@@ -23,27 +23,14 @@
/**
* @file octeon_console.c
*/
-#include <linux/version.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
#include <linux/pci.h>
-#include <linux/kthread.h>
#include <linux/netdevice.h>
-#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
-#include "octeon_nic.h"
#include "octeon_main.h"
-#include "octeon_network.h"
-#include "cn66xx_regs.h"
-#include "cn66xx_device.h"
-#include "cn68xx_regs.h"
-#include "cn68xx_device.h"
-#include "liquidio_image.h"
#include "octeon_mem_ops.h"
static void octeon_remote_lock(void);
@@ -51,6 +38,8 @@ static void octeon_remote_unlock(void);
static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct,
const char *name,
u32 flags);
+static int octeon_console_read(struct octeon_device *oct, u32 console_num,
+ char *buffer, u32 buf_size);
#define MIN(a, b) min((a), (b))
#define CAST_ULL(v) ((u64)(v))
@@ -170,8 +159,8 @@ struct octeon_pci_console_desc {
offsetof(struct cvmx_bootmem_desc, field), \
SIZEOF_FIELD(struct cvmx_bootmem_desc, field))
-#define __cvmx_bootmem_lock(flags)
-#define __cvmx_bootmem_unlock(flags)
+#define __cvmx_bootmem_lock(flags) (flags = flags)
+#define __cvmx_bootmem_unlock(flags) (flags = flags)
/**
* This macro returns a member of the
@@ -234,7 +223,7 @@ static void CVMX_BOOTMEM_NAMED_GET_NAME(struct octeon_device *oct,
u32 len)
{
addr += offsetof(struct cvmx_bootmem_named_block_desc, name);
- octeon_pci_read_core_mem(oct, addr, str, len);
+ octeon_pci_read_core_mem(oct, addr, (u8 *)str, len);
str[len] = 0;
}
@@ -323,6 +312,9 @@ static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct,
if (name && named_size) {
char *name_tmp =
kmalloc(name_length + 1, GFP_KERNEL);
+ if (!name_tmp)
+ break;
+
CVMX_BOOTMEM_NAMED_GET_NAME(oct, named_addr,
name_tmp,
name_length);
@@ -383,7 +375,7 @@ static void octeon_remote_unlock(void)
int octeon_console_send_cmd(struct octeon_device *oct, char *cmd_str,
u32 wait_hundredths)
{
- u32 len = strlen(cmd_str);
+ u32 len = (u32)strlen(cmd_str);
dev_dbg(&oct->pci_dev->dev, "sending \"%s\" to bootloader\n", cmd_str);
@@ -440,8 +432,7 @@ int octeon_wait_for_bootloader(struct octeon_device *oct,
}
static void octeon_console_handle_result(struct octeon_device *oct,
- size_t console_num,
- char *buffer, s32 bytes_read)
+ size_t console_num)
{
struct octeon_console *console;
@@ -492,7 +483,7 @@ static void check_console(struct work_struct *work)
struct octeon_console *console;
struct cavium_wk *wk = (struct cavium_wk *)work;
struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
- size_t console_num = wk->ctxul;
+ u32 console_num = (u32)wk->ctxul;
u32 delay;
console = &oct->console[console_num];
@@ -505,20 +496,17 @@ static void check_console(struct work_struct *work)
*/
bytes_read =
octeon_console_read(oct, console_num, console_buffer,
- sizeof(console_buffer) - 1, 0);
+ sizeof(console_buffer) - 1);
if (bytes_read > 0) {
total_read += bytes_read;
- if (console->waiting) {
- octeon_console_handle_result(oct, console_num,
- console_buffer,
- bytes_read);
- }
+ if (console->waiting)
+ octeon_console_handle_result(oct, console_num);
if (octeon_console_debug_enabled(console_num)) {
output_console_line(oct, console, console_num,
console_buffer, bytes_read);
}
} else if (bytes_read < 0) {
- dev_err(&oct->pci_dev->dev, "Error reading console %lu, ret=%d\n",
+ dev_err(&oct->pci_dev->dev, "Error reading console %u, ret=%d\n",
console_num, bytes_read);
}
@@ -530,7 +518,7 @@ static void check_console(struct work_struct *work)
*/
if (octeon_console_debug_enabled(console_num) &&
(total_read == 0) && (console->leftover[0])) {
- dev_info(&oct->pci_dev->dev, "%lu: %s\n",
+ dev_info(&oct->pci_dev->dev, "%u: %s\n",
console_num, console->leftover);
console->leftover[0] = '\0';
}
@@ -675,8 +663,8 @@ static inline int octeon_console_avail_bytes(u32 buffer_size,
octeon_console_free_bytes(buffer_size, wr_idx, rd_idx);
}
-int octeon_console_read(struct octeon_device *oct, u32 console_num,
- char *buffer, u32 buf_size, u32 flags)
+static int octeon_console_read(struct octeon_device *oct, u32 console_num,
+ char *buffer, u32 buf_size)
{
int bytes_to_read;
u32 rd_idx, wr_idx;
@@ -712,7 +700,7 @@ int octeon_console_read(struct octeon_device *oct, u32 console_num,
bytes_to_read = console->buffer_size - rd_idx;
octeon_pci_read_core_mem(oct, console->output_base_addr + rd_idx,
- buffer, bytes_to_read);
+ (u8 *)buffer, bytes_to_read);
octeon_write_device_mem32(oct, console->addr +
offsetof(struct octeon_pci_console,
output_read_index),
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index 8e23e3fad662..0eb504a4379a 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -19,28 +19,19 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
-#include <linux/version.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/crc32.h>
-#include <linux/kthread.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
-#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
-#include "octeon_nic.h"
#include "octeon_main.h"
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
-#include "cn68xx_regs.h"
-#include "cn68xx_device.h"
#include "liquidio_image.h"
#include "octeon_mem_ops.h"
@@ -449,10 +440,10 @@ static struct octeon_config_ptr {
};
static char oct_dev_state_str[OCT_DEV_STATES + 1][32] = {
- "BEGIN", "PCI-MAP-DONE", "DISPATCH-INIT-DONE",
+ "BEGIN", "PCI-MAP-DONE", "DISPATCH-INIT-DONE",
"IQ-INIT-DONE", "SCBUFF-POOL-INIT-DONE", "RESPLIST-INIT-DONE",
"DROQ-INIT-DONE", "IO-QUEUES-INIT-DONE", "CONSOLE-INIT-DONE",
- "HOST-READY", "CORE-READY", "RUNNING", "IN-RESET",
+ "HOST-READY", "CORE-READY", "RUNNING", "IN-RESET",
"INVALID"
};
@@ -550,17 +541,19 @@ static char *get_oct_app_string(u32 app_mode)
return oct_dev_app_str[CVM_DRV_INVALID_APP - CVM_DRV_APP_START];
}
+u8 fbuf[4 * 1024 * 1024];
+
int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
size_t size)
{
int ret = 0;
- u8 *p;
- u8 *buffer;
+ u8 *p = fbuf;
u32 crc32_result;
u64 load_addr;
u32 image_len;
struct octeon_firmware_file_header *h;
- u32 i;
+ u32 i, rem, base_len = strlen(LIQUIDIO_BASE_VERSION);
+ char *base;
if (size < sizeof(struct octeon_firmware_file_header)) {
dev_err(&oct->pci_dev->dev, "Firmware file too small (%d < %d).\n",
@@ -576,19 +569,26 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
return -EINVAL;
}
- crc32_result =
- crc32(~0, data,
- sizeof(struct octeon_firmware_file_header) -
- sizeof(u32)) ^ ~0U;
+ crc32_result = crc32((unsigned int)~0, data,
+ sizeof(struct octeon_firmware_file_header) -
+ sizeof(u32)) ^ ~0U;
if (crc32_result != be32_to_cpu(h->crc32)) {
dev_err(&oct->pci_dev->dev, "Firmware CRC mismatch (0x%08x != 0x%08x).\n",
crc32_result, be32_to_cpu(h->crc32));
return -EINVAL;
}
- if (memcmp(LIQUIDIO_VERSION, h->version, strlen(LIQUIDIO_VERSION))) {
- dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s, got %s.\n",
- LIQUIDIO_VERSION, h->version);
+ if (strncmp(LIQUIDIO_PACKAGE, h->version, strlen(LIQUIDIO_PACKAGE))) {
+ dev_err(&oct->pci_dev->dev, "Unmatched firmware package type. Expected %s, got %s.\n",
+ LIQUIDIO_PACKAGE, h->version);
+ return -EINVAL;
+ }
+
+ base = h->version + strlen(LIQUIDIO_PACKAGE);
+ ret = memcmp(LIQUIDIO_BASE_VERSION, base, base_len);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s.x, got %s.\n",
+ LIQUIDIO_BASE_VERSION, base);
return -EINVAL;
}
@@ -602,58 +602,58 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
snprintf(oct->fw_info.liquidio_firmware_version, 32, "LIQUIDIO: %s",
h->version);
- buffer = kmemdup(data, size, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
- p = buffer + sizeof(struct octeon_firmware_file_header);
+ data += sizeof(struct octeon_firmware_file_header);
+ dev_info(&oct->pci_dev->dev, "%s: Loading %d images\n", __func__,
+ be32_to_cpu(h->num_images));
/* load all images */
for (i = 0; i < be32_to_cpu(h->num_images); i++) {
load_addr = be64_to_cpu(h->desc[i].addr);
image_len = be32_to_cpu(h->desc[i].len);
- /* validate the image */
- crc32_result = crc32(~0, p, image_len) ^ ~0U;
- if (crc32_result != be32_to_cpu(h->desc[i].crc32)) {
- dev_err(&oct->pci_dev->dev,
- "Firmware CRC mismatch in image %d (0x%08x != 0x%08x).\n",
- i, crc32_result,
- be32_to_cpu(h->desc[i].crc32));
- ret = -EINVAL;
- goto done_downloading;
- }
+ dev_info(&oct->pci_dev->dev, "Loading firmware %d at %llx\n",
+ image_len, load_addr);
- /* download the image */
- octeon_pci_write_core_mem(oct, load_addr, p, image_len);
+ /* Write in 4MB chunks*/
+ rem = image_len;
- p += image_len;
- dev_dbg(&oct->pci_dev->dev,
- "Downloaded image %d (%d bytes) to address 0x%016llx\n",
- i, image_len, load_addr);
+ while (rem) {
+ if (rem < (4 * 1024 * 1024))
+ size = rem;
+ else
+ size = 4 * 1024 * 1024;
+
+ memcpy(p, data, size);
+
+ /* download the image */
+ octeon_pci_write_core_mem(oct, load_addr, p, (u32)size);
+
+ data += size;
+ rem -= (u32)size;
+ load_addr += size;
+ }
}
+ dev_info(&oct->pci_dev->dev, "Writing boot command: %s\n",
+ h->bootcmd);
/* Invoke the bootcmd */
ret = octeon_console_send_cmd(oct, h->bootcmd, 50);
-done_downloading:
- kfree(buffer);
-
- return ret;
+ return 0;
}
void octeon_free_device_mem(struct octeon_device *oct)
{
- u32 i;
+ int i;
- for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
- /* could check mask as well */
- vfree(oct->droq[i]);
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
+ if (oct->io_qmask.oq & (1ULL << i))
+ vfree(oct->droq[i]);
}
- for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
- /* could check mask as well */
- vfree(oct->instr_queue[i]);
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
+ if (oct->io_qmask.iq & (1ULL << i))
+ vfree(oct->instr_queue[i]);
}
i = oct->octeon_id;
@@ -735,55 +735,61 @@ struct octeon_device *octeon_allocate_device(u32 pci_id,
octeon_device[oct_idx] = oct;
oct->octeon_id = oct_idx;
- snprintf((oct->device_name), sizeof(oct->device_name),
+ snprintf(oct->device_name, sizeof(oct->device_name),
"LiquidIO%d", (oct->octeon_id));
return oct;
}
+/* this function is only for setting up the first queue */
int octeon_setup_instr_queues(struct octeon_device *oct)
{
- u32 i, num_iqs = 0;
u32 num_descs = 0;
+ u32 iq_no = 0;
+ union oct_txpciq txpciq;
+ int numa_node = cpu_to_node(iq_no % num_online_cpus());
/* this causes queue 0 to be default queue */
- if (OCTEON_CN6XXX(oct)) {
- num_iqs = 1;
+ if (OCTEON_CN6XXX(oct))
num_descs =
CFG_GET_NUM_DEF_TX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
- }
oct->num_iqs = 0;
- for (i = 0; i < num_iqs; i++) {
- oct->instr_queue[i] =
+ oct->instr_queue[0] = vmalloc_node(sizeof(*oct->instr_queue[0]),
+ numa_node);
+ if (!oct->instr_queue[0])
+ oct->instr_queue[0] =
vmalloc(sizeof(struct octeon_instr_queue));
- if (!oct->instr_queue[i])
- return 1;
-
- memset(oct->instr_queue[i], 0,
- sizeof(struct octeon_instr_queue));
-
- oct->instr_queue[i]->app_ctx = (void *)(size_t)i;
- if (octeon_init_instr_queue(oct, i, num_descs))
- return 1;
-
- oct->num_iqs++;
+ if (!oct->instr_queue[0])
+ return 1;
+ memset(oct->instr_queue[0], 0, sizeof(struct octeon_instr_queue));
+ oct->instr_queue[0]->q_index = 0;
+ oct->instr_queue[0]->app_ctx = (void *)(size_t)0;
+ oct->instr_queue[0]->ifidx = 0;
+ txpciq.u64 = 0;
+ txpciq.s.q_no = iq_no;
+ txpciq.s.use_qpg = 0;
+ txpciq.s.qpg = 0;
+ if (octeon_init_instr_queue(oct, txpciq, num_descs)) {
+ /* prevent memory leak */
+ vfree(oct->instr_queue[0]);
+ return 1;
}
+ oct->num_iqs++;
return 0;
}
int octeon_setup_output_queues(struct octeon_device *oct)
{
- u32 i, num_oqs = 0;
u32 num_descs = 0;
u32 desc_size = 0;
+ u32 oq_no = 0;
+ int numa_node = cpu_to_node(oq_no % num_online_cpus());
/* this causes queue 0 to be default queue */
if (OCTEON_CN6XXX(oct)) {
- /* CFG_GET_OQ_MAX_BASE_Q(CHIP_FIELD(oct, cn6xxx, conf)); */
- num_oqs = 1;
num_descs =
CFG_GET_NUM_DEF_RX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
desc_size =
@@ -791,19 +797,15 @@ int octeon_setup_output_queues(struct octeon_device *oct)
}
oct->num_oqs = 0;
+ oct->droq[0] = vmalloc_node(sizeof(*oct->droq[0]), numa_node);
+ if (!oct->droq[0])
+ oct->droq[0] = vmalloc(sizeof(*oct->droq[0]));
+ if (!oct->droq[0])
+ return 1;
- for (i = 0; i < num_oqs; i++) {
- oct->droq[i] = vmalloc(sizeof(*oct->droq[i]));
- if (!oct->droq[i])
- return 1;
-
- memset(oct->droq[i], 0, sizeof(struct octeon_droq));
-
- if (octeon_init_droq(oct, i, num_descs, desc_size, NULL))
- return 1;
-
- oct->num_oqs++;
- }
+ if (octeon_init_droq(oct, oq_no, num_descs, desc_size, NULL))
+ return 1;
+ oct->num_oqs++;
return 0;
}
@@ -1005,79 +1007,6 @@ octeon_register_dispatch_fn(struct octeon_device *oct,
return 0;
}
-/* octeon_unregister_dispatch_fn
- * Parameters:
- * oct - octeon device
- * opcode - driver should unregister the function for this opcode
- * subcode - driver should unregister the function for this subcode
- * Description:
- * Unregister the function set for this opcode+subcode.
- * Returns:
- * Success: 0
- * Failure: 1
- * Locks:
- * No locks are held.
- */
-int
-octeon_unregister_dispatch_fn(struct octeon_device *oct, u16 opcode,
- u16 subcode)
-{
- int retval = 0;
- u32 idx;
- struct list_head *dispatch, *dfree = NULL, *tmp2;
- u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
-
- idx = combined_opcode & OCTEON_OPCODE_MASK;
-
- spin_lock_bh(&oct->dispatch.lock);
-
- if (oct->dispatch.count == 0) {
- spin_unlock_bh(&oct->dispatch.lock);
- dev_err(&oct->pci_dev->dev,
- "No dispatch functions registered for this device\n");
- return 1;
- }
-
- if (oct->dispatch.dlist[idx].opcode == combined_opcode) {
- dispatch = &oct->dispatch.dlist[idx].list;
- if (dispatch->next != dispatch) {
- dispatch = dispatch->next;
- oct->dispatch.dlist[idx].opcode =
- ((struct octeon_dispatch *)dispatch)->opcode;
- oct->dispatch.dlist[idx].dispatch_fn =
- ((struct octeon_dispatch *)
- dispatch)->dispatch_fn;
- oct->dispatch.dlist[idx].arg =
- ((struct octeon_dispatch *)dispatch)->arg;
- list_del(dispatch);
- dfree = dispatch;
- } else {
- oct->dispatch.dlist[idx].opcode = 0;
- oct->dispatch.dlist[idx].dispatch_fn = NULL;
- oct->dispatch.dlist[idx].arg = NULL;
- }
- } else {
- retval = 1;
- list_for_each_safe(dispatch, tmp2,
- &(oct->dispatch.dlist[idx].
- list)) {
- if (((struct octeon_dispatch *)dispatch)->opcode ==
- combined_opcode) {
- list_del(dispatch);
- dfree = dispatch;
- retval = 0;
- }
- }
- }
-
- if (!retval)
- oct->dispatch.count--;
-
- spin_unlock_bh(&oct->dispatch.lock);
- vfree(dfree);
- return retval;
-}
-
int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf)
{
u32 i;
@@ -1152,8 +1081,8 @@ core_drv_init_err:
int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no)
{
- if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES) &&
- (oct->io_qmask.iq & (1UL << q_no)))
+ if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES(oct)) &&
+ (oct->io_qmask.iq & (1ULL << q_no)))
return oct->instr_queue[q_no]->max_count;
return -1;
@@ -1161,8 +1090,8 @@ int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no)
int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no)
{
- if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES) &&
- (oct->io_qmask.oq & (1UL << q_no)))
+ if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES(oct)) &&
+ (oct->io_qmask.oq & (1ULL << q_no)))
return oct->droq[q_no]->max_count;
return -1;
}
@@ -1253,10 +1182,10 @@ void lio_pci_writeq(struct octeon_device *oct,
int octeon_mem_access_ok(struct octeon_device *oct)
{
u64 access_okay = 0;
+ u64 lmc0_reset_ctl;
/* Check to make sure a DDR interface is enabled */
- u64 lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL);
-
+ lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL);
access_okay = (lmc0_reset_ctl & CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK);
return access_okay ? 0 : 1;
@@ -1270,9 +1199,6 @@ int octeon_wait_for_ddr_init(struct octeon_device *oct, u32 *timeout)
if (!timeout)
return ret;
- while (*timeout == 0)
- schedule_timeout_uninterruptible(HZ / 10);
-
for (ms = 0; (ret != 0) && ((*timeout == 0) || (ms <= *timeout));
ms += HZ / 10) {
ret = octeon_mem_access_ok(oct);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index 36e1f85df8c4..01edfb404346 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -152,9 +152,9 @@ struct octeon_mmio {
#define MAX_OCTEON_MAPS 32
struct octeon_io_enable {
- u32 iq;
- u32 oq;
- u32 iq64B;
+ u64 iq;
+ u64 oq;
+ u64 iq64B;
};
struct octeon_reg_list {
@@ -204,8 +204,7 @@ struct octeon_fn_list {
void (*bar1_idx_setup)(struct octeon_device *, u64, u32, int);
void (*bar1_idx_write)(struct octeon_device *, u32, u32);
u32 (*bar1_idx_read)(struct octeon_device *, u32);
- u32 (*update_iq_read_idx)(struct octeon_device *,
- struct octeon_instr_queue *);
+ u32 (*update_iq_read_idx)(struct octeon_instr_queue *);
void (*enable_oq_pkt_time_intr)(struct octeon_device *, u32);
void (*disable_oq_pkt_time_intr)(struct octeon_device *, u32);
@@ -222,7 +221,7 @@ struct octeon_fn_list {
/* Structure for named memory blocks
* Number of descriptors
- * available can be changed without affecting compatiblity,
+ * available can be changed without affecting compatibility,
* but name length changes require a bump in the bootmem
* descriptor version
* Note: This structure must be naturally 64 bit aligned, as a single
@@ -255,7 +254,7 @@ struct oct_fw_info {
struct cavium_wk {
struct delayed_work work;
void *ctxptr;
- size_t ctxul;
+ u64 ctxul;
};
struct cavium_wq {
@@ -267,6 +266,8 @@ struct octdev_props {
/* Each interface in the Octeon device has a network
* device pointer (used for OS specific calls).
*/
+ int napi_enabled;
+ int gmxport;
struct net_device *netdev;
};
@@ -324,7 +325,8 @@ struct octeon_device {
struct octeon_sc_buffer_pool sc_buf_pool;
/** The input instruction queues */
- struct octeon_instr_queue *instr_queue[MAX_OCTEON_INSTR_QUEUES];
+ struct octeon_instr_queue *instr_queue
+ [MAX_POSSIBLE_OCTEON_INSTR_QUEUES];
/** The doubly-linked list of instruction response */
struct octeon_response_list response_list[MAX_RESPONSE_LISTS];
@@ -332,7 +334,7 @@ struct octeon_device {
u32 num_oqs;
/** The DROQ output queues */
- struct octeon_droq *droq[MAX_OCTEON_OUTPUT_QUEUES];
+ struct octeon_droq *droq[MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES];
struct octeon_io_enable io_qmask;
@@ -381,15 +383,29 @@ struct octeon_device {
struct cavium_wq dma_comp_wq;
- struct cavium_wq check_db_wq[MAX_OCTEON_INSTR_QUEUES];
+ /** Lock for dma response list */
+ spinlock_t cmd_resp_wqlock;
+ u32 cmd_resp_state;
+
+ struct cavium_wq check_db_wq[MAX_POSSIBLE_OCTEON_INSTR_QUEUES];
struct cavium_wk nic_poll_work;
struct cavium_wk console_poll_work[MAX_OCTEON_MAPS];
void *priv;
+
+ int rx_pause;
+ int tx_pause;
+
+ struct oct_link_stats link_stats; /*stastics from firmware*/
+
+ /* private flags to control driver-specific features through ethtool */
+ u32 priv_flags;
};
+#define OCT_DRV_ONLINE 1
+#define OCT_DRV_OFFLINE 2
#define OCTEON_CN6XXX(oct) ((oct->chip_id == OCTEON_CN66XX) || \
(oct->chip_id == OCTEON_CN68XX))
#define CHIP_FIELD(oct, TYPE, field) \
@@ -569,8 +585,7 @@ int octeon_add_console(struct octeon_device *oct, u32 console_num);
int octeon_console_write(struct octeon_device *oct, u32 console_num,
char *buffer, u32 write_request_size, u32 flags);
int octeon_console_write_avail(struct octeon_device *oct, u32 console_num);
-int octeon_console_read(struct octeon_device *oct, u32 console_num,
- char *buffer, u32 buf_size, u32 flags);
+
int octeon_console_read_avail(struct octeon_device *oct, u32 console_num);
/** Removes all attached consoles. */
@@ -646,4 +661,17 @@ void *oct_get_config_info(struct octeon_device *oct, u16 card_type);
*/
struct octeon_config *octeon_get_conf(struct octeon_device *oct);
+/* LiquidIO driver pivate flags */
+enum {
+ OCT_PRIV_FLAG_TX_BYTES = 0, /* Tx interrupts by pending byte count */
+};
+
+static inline void lio_set_priv_flag(struct octeon_device *octdev, u32 flag,
+ u32 val)
+{
+ if (val)
+ octdev->priv_flags |= (0x1 << flag);
+ else
+ octdev->priv_flags &= ~(0x1 << flag);
+}
#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index 174072b3740b..e0afe4c1fd01 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -19,30 +19,18 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
-#include <linux/version.h>
-#include <linux/types.h>
-#include <linux/list.h>
#include <linux/pci.h>
-#include <linux/kthread.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
-#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
-#include "octeon_nic.h"
#include "octeon_main.h"
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
-#include "cn68xx_regs.h"
-#include "cn68xx_device.h"
-#include "liquidio_image.h"
-#include "octeon_mem_ops.h"
-
-/* #define CAVIUM_ONLY_PERF_MODE */
#define CVM_MIN(d1, d2) (((d1) < (d2)) ? (d1) : (d2))
#define CVM_MAX(d1, d2) (((d1) > (d2)) ? (d1) : (d2))
@@ -104,8 +92,12 @@ static inline void *octeon_get_dispatch_arg(struct octeon_device *octeon_dev,
return fn_arg;
}
-u32 octeon_droq_check_hw_for_pkts(struct octeon_device *oct,
- struct octeon_droq *droq)
+/** Check for packets on Droq. This function should be called with
+ * lock held.
+ * @param droq - Droq on which count is checked.
+ * @return Returns packet count.
+ */
+u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq)
{
u32 pkt_count = 0;
@@ -151,22 +143,26 @@ octeon_droq_destroy_ring_buffers(struct octeon_device *oct,
struct octeon_droq *droq)
{
u32 i;
+ struct octeon_skb_page_info *pg_info;
for (i = 0; i < droq->max_count; i++) {
- if (droq->recv_buf_list[i].buffer) {
- if (droq->desc_ring) {
- lio_unmap_ring_info(oct->pci_dev,
- (u64)droq->
- desc_ring[i].info_ptr,
- OCT_DROQ_INFO_SIZE);
- lio_unmap_ring(oct->pci_dev,
- (u64)droq->desc_ring[i].
- buffer_ptr,
- droq->buffer_size);
- }
- recv_buffer_free(droq->recv_buf_list[i].buffer);
- droq->recv_buf_list[i].buffer = NULL;
- }
+ pg_info = &droq->recv_buf_list[i].pg_info;
+
+ if (pg_info->dma)
+ lio_unmap_ring(oct->pci_dev,
+ (u64)pg_info->dma);
+ pg_info->dma = 0;
+
+ if (pg_info->page)
+ recv_buffer_destroy(droq->recv_buf_list[i].buffer,
+ pg_info);
+
+ if (droq->desc_ring && droq->desc_ring[i].info_ptr)
+ lio_unmap_ring_info(oct->pci_dev,
+ (u64)droq->
+ desc_ring[i].info_ptr,
+ OCT_DROQ_INFO_SIZE);
+ droq->recv_buf_list[i].buffer = NULL;
}
octeon_droq_reset_indices(droq);
@@ -181,25 +177,23 @@ octeon_droq_setup_ring_buffers(struct octeon_device *oct,
struct octeon_droq_desc *desc_ring = droq->desc_ring;
for (i = 0; i < droq->max_count; i++) {
- buf = recv_buffer_alloc(oct, droq->q_no, droq->buffer_size);
+ buf = recv_buffer_alloc(oct, &droq->recv_buf_list[i].pg_info);
if (!buf) {
dev_err(&oct->pci_dev->dev, "%s buffer alloc failed\n",
__func__);
+ droq->stats.rx_alloc_failure++;
return -ENOMEM;
}
droq->recv_buf_list[i].buffer = buf;
droq->recv_buf_list[i].data = get_rbd(buf);
-
droq->info_list[i].length = 0;
/* map ring buffers into memory */
desc_ring[i].info_ptr = lio_map_ring_info(droq, i);
desc_ring[i].buffer_ptr =
- lio_map_ring(oct->pci_dev,
- droq->recv_buf_list[i].buffer,
- droq->buffer_size);
+ lio_map_ring(droq->recv_buf_list[i].buffer);
}
octeon_droq_reset_indices(droq);
@@ -242,6 +236,8 @@ int octeon_init_droq(struct octeon_device *oct,
struct octeon_droq *droq;
u32 desc_ring_size = 0, c_num_descs = 0, c_buf_size = 0;
u32 c_pkts_per_intr = 0, c_refill_threshold = 0;
+ int orig_node = dev_to_node(&oct->pci_dev->dev);
+ int numa_node = cpu_to_node(q_no % num_online_cpus());
dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no);
@@ -261,15 +257,23 @@ int octeon_init_droq(struct octeon_device *oct,
struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf6x);
- c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x);
+ c_refill_threshold =
+ (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x);
+ } else {
+ return 1;
}
droq->max_count = c_num_descs;
droq->buffer_size = c_buf_size;
desc_ring_size = droq->max_count * OCT_DROQ_DESC_SIZE;
+ set_dev_node(&oct->pci_dev->dev, numa_node);
droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
(dma_addr_t *)&droq->desc_ring_dma);
+ set_dev_node(&oct->pci_dev->dev, orig_node);
+ if (!droq->desc_ring)
+ droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
+ (dma_addr_t *)&droq->desc_ring_dma);
if (!droq->desc_ring) {
dev_err(&oct->pci_dev->dev,
@@ -283,12 +287,11 @@ int octeon_init_droq(struct octeon_device *oct,
droq->max_count);
droq->info_list =
- cnnic_alloc_aligned_dma(oct->pci_dev,
- (droq->max_count * OCT_DROQ_INFO_SIZE),
- &droq->info_alloc_size,
- &droq->info_base_addr,
- &droq->info_list_dma);
-
+ cnnic_numa_alloc_aligned_dma((droq->max_count *
+ OCT_DROQ_INFO_SIZE),
+ &droq->info_alloc_size,
+ &droq->info_base_addr,
+ numa_node);
if (!droq->info_list) {
dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n");
lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
@@ -297,7 +300,12 @@ int octeon_init_droq(struct octeon_device *oct,
}
droq->recv_buf_list = (struct octeon_recv_buffer *)
- vmalloc(droq->max_count *
+ vmalloc_node(droq->max_count *
+ OCT_DROQ_RECVBUF_SIZE,
+ numa_node);
+ if (!droq->recv_buf_list)
+ droq->recv_buf_list = (struct octeon_recv_buffer *)
+ vmalloc(droq->max_count *
OCT_DROQ_RECVBUF_SIZE);
if (!droq->recv_buf_list) {
dev_err(&oct->pci_dev->dev, "Output queue recv buf list alloc failed\n");
@@ -320,7 +328,7 @@ int octeon_init_droq(struct octeon_device *oct,
/* For 56xx Pass1, this function won't be called, so no checks. */
oct->fn_list.setup_oq_regs(oct, q_no);
- oct->io_qmask.oq |= (1 << q_no);
+ oct->io_qmask.oq |= (1ULL << q_no);
return 0;
@@ -358,6 +366,7 @@ static inline struct octeon_recv_info *octeon_create_recv_info(
struct octeon_recv_pkt *recv_pkt;
struct octeon_recv_info *recv_info;
u32 i, bytes_left;
+ struct octeon_skb_page_info *pg_info;
info = &droq->info_list[idx];
@@ -375,9 +384,14 @@ static inline struct octeon_recv_info *octeon_create_recv_info(
bytes_left = (u32)info->length;
while (buf_cnt) {
- lio_unmap_ring(octeon_dev->pci_dev,
- (u64)droq->desc_ring[idx].buffer_ptr,
- droq->buffer_size);
+ {
+ pg_info = &droq->recv_buf_list[idx].pg_info;
+
+ lio_unmap_ring(octeon_dev->pci_dev,
+ (u64)pg_info->dma);
+ pg_info->page = NULL;
+ pg_info->dma = 0;
+ }
recv_pkt->buffer_size[i] =
(bytes_left >=
@@ -449,6 +463,7 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
void *buf = NULL;
u8 *data;
u32 desc_refilled = 0;
+ struct octeon_skb_page_info *pg_info;
desc_ring = droq->desc_ring;
@@ -458,13 +473,22 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
* the buffer, else allocate.
*/
if (!droq->recv_buf_list[droq->refill_idx].buffer) {
- buf = recv_buffer_alloc(octeon_dev, droq->q_no,
- droq->buffer_size);
+ pg_info =
+ &droq->recv_buf_list[droq->refill_idx].pg_info;
+ /* Either recycle the existing pages or go for
+ * new page alloc
+ */
+ if (pg_info->page)
+ buf = recv_buffer_reuse(octeon_dev, pg_info);
+ else
+ buf = recv_buffer_alloc(octeon_dev, pg_info);
/* If a buffer could not be allocated, no point in
* continuing
*/
- if (!buf)
+ if (!buf) {
+ droq->stats.rx_alloc_failure++;
break;
+ }
droq->recv_buf_list[droq->refill_idx].buffer =
buf;
data = get_rbd(buf);
@@ -476,11 +500,8 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
droq->recv_buf_list[droq->refill_idx].data = data;
desc_ring[droq->refill_idx].buffer_ptr =
- lio_map_ring(octeon_dev->pci_dev,
- droq->recv_buf_list[droq->
- refill_idx].buffer,
- droq->buffer_size);
-
+ lio_map_ring(droq->recv_buf_list[droq->
+ refill_idx].buffer);
/* Reset any previous values in the length field. */
droq->info_list[droq->refill_idx].length = 0;
@@ -539,7 +560,9 @@ octeon_droq_dispatch_pkt(struct octeon_device *oct,
droq->stats.dropped_nomem++;
}
} else {
- dev_err(&oct->pci_dev->dev, "DROQ: No dispatch function\n");
+ dev_err(&oct->pci_dev->dev, "DROQ: No dispatch function (opcode %u/%u)\n",
+ (unsigned int)rh->r.opcode,
+ (unsigned int)rh->r.subcode);
droq->stats.dropped_nodispatch++;
} /* else (dispatch_fn ... */
@@ -586,6 +609,8 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
for (pkt = 0; pkt < pkt_count; pkt++) {
u32 pkt_len = 0;
struct sk_buff *nicbuf = NULL;
+ struct octeon_skb_page_info *pg_info;
+ void *buf;
info = &droq->info_list[droq->read_idx];
octeon_swap_8B_data((u64 *)info, 2);
@@ -605,7 +630,6 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
rh = &info->rh;
total_len += (u32)info->length;
-
if (OPCODE_SLOW_PATH(rh)) {
u32 buf_cnt;
@@ -614,50 +638,45 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
droq->refill_count += buf_cnt;
} else {
if (info->length <= droq->buffer_size) {
- lio_unmap_ring(oct->pci_dev,
- (u64)droq->desc_ring[
- droq->read_idx].buffer_ptr,
- droq->buffer_size);
pkt_len = (u32)info->length;
nicbuf = droq->recv_buf_list[
droq->read_idx].buffer;
+ pg_info = &droq->recv_buf_list[
+ droq->read_idx].pg_info;
+ if (recv_buffer_recycle(oct, pg_info))
+ pg_info->page = NULL;
droq->recv_buf_list[droq->read_idx].buffer =
NULL;
+
INCR_INDEX_BY1(droq->read_idx, droq->max_count);
- skb_put(nicbuf, pkt_len);
droq->refill_count++;
} else {
- nicbuf = octeon_fast_packet_alloc(oct, droq,
- droq->q_no,
- (u32)
+ nicbuf = octeon_fast_packet_alloc((u32)
info->length);
pkt_len = 0;
/* nicbuf allocation can fail. We'll handle it
* inside the loop.
*/
while (pkt_len < info->length) {
- int cpy_len;
+ int cpy_len, idx = droq->read_idx;
- cpy_len = ((pkt_len +
- droq->buffer_size) >
- info->length) ?
+ cpy_len = ((pkt_len + droq->buffer_size)
+ > info->length) ?
((u32)info->length - pkt_len) :
droq->buffer_size;
if (nicbuf) {
- lio_unmap_ring(oct->pci_dev,
- (u64)
- droq->desc_ring
- [droq->read_idx].
- buffer_ptr,
- droq->
- buffer_size);
octeon_fast_packet_next(droq,
nicbuf,
cpy_len,
- droq->
- read_idx
- );
+ idx);
+ buf = droq->recv_buf_list[idx].
+ buffer;
+ recv_buffer_fast_free(buf);
+ droq->recv_buf_list[idx].buffer
+ = NULL;
+ } else {
+ droq->stats.rx_alloc_failure++;
}
pkt_len += cpy_len;
@@ -668,12 +687,14 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
}
if (nicbuf) {
- if (droq->ops.fptr)
+ if (droq->ops.fptr) {
droq->ops.fptr(oct->octeon_id,
- nicbuf, pkt_len,
- rh, &droq->napi);
- else
+ nicbuf, pkt_len,
+ rh, &droq->napi,
+ droq->ops.farg);
+ } else {
recv_buffer_free(nicbuf);
+ }
}
}
@@ -681,16 +702,16 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
int desc_refilled = octeon_droq_refill(oct, droq);
/* Flush the droq descriptor data to memory to be sure
- * that when we update the credits the data in memory
- * is accurate.
- */
+ * that when we update the credits the data in memory
+ * is accurate.
+ */
wmb();
writel((desc_refilled), droq->pkts_credit_reg);
/* make sure mmio write completes */
mmiowb();
}
- } /* for ( each packet )... */
+ } /* for (each packet)... */
/* Increment refill_count by the number of buffers processed. */
droq->stats.pkts_received += pkt;
@@ -721,7 +742,7 @@ octeon_droq_process_packets(struct octeon_device *oct,
if (pkt_count > budget)
pkt_count = budget;
- /* Grab the lock */
+ /* Grab the droq lock */
spin_lock(&droq->lock);
pkts_processed = octeon_droq_fast_process_packets(oct, droq, pkt_count);
@@ -783,7 +804,7 @@ octeon_droq_process_poll_pkts(struct octeon_device *oct,
total_pkts_processed += pkts_processed;
- octeon_droq_check_hw_for_pkts(oct, droq);
+ octeon_droq_check_hw_for_pkts(droq);
}
spin_unlock(&droq->lock);
@@ -807,18 +828,6 @@ octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no, int cmd,
u32 arg)
{
struct octeon_droq *droq;
- struct octeon_config *oct_cfg = NULL;
-
- oct_cfg = octeon_get_conf(oct);
-
- if (!oct_cfg)
- return -EINVAL;
-
- if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) {
- dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n",
- __func__, q_no, (oct->num_oqs - 1));
- return -EINVAL;
- }
droq = oct->droq[q_no];
@@ -937,6 +946,7 @@ int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no)
spin_lock_irqsave(&droq->lock, flags);
droq->ops.fptr = NULL;
+ droq->ops.farg = NULL;
droq->ops.drop_on_max = 0;
spin_unlock_irqrestore(&droq->lock, flags);
@@ -949,6 +959,7 @@ int octeon_create_droq(struct octeon_device *oct,
u32 desc_size, void *app_ctx)
{
struct octeon_droq *droq;
+ int numa_node = cpu_to_node(q_no % num_online_cpus());
if (oct->droq[q_no]) {
dev_dbg(&oct->pci_dev->dev, "Droq already in use. Cannot create droq %d again\n",
@@ -957,7 +968,9 @@ int octeon_create_droq(struct octeon_device *oct,
}
/* Allocate the DS for the new droq. */
- droq = vmalloc(sizeof(*droq));
+ droq = vmalloc_node(sizeof(*droq), numa_node);
+ if (!droq)
+ droq = vmalloc(sizeof(*droq));
if (!droq)
goto create_droq_fail;
memset(droq, 0, sizeof(struct octeon_droq));
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
index 7940ccee12d9..5a6fb9113bbd 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
@@ -65,6 +65,17 @@ struct octeon_droq_info {
#define OCT_DROQ_INFO_SIZE (sizeof(struct octeon_droq_info))
+struct octeon_skb_page_info {
+ /* DMA address for the page */
+ dma_addr_t dma;
+
+ /* Page for the rx dma **/
+ struct page *page;
+
+ /** which offset into page */
+ unsigned int page_offset;
+};
+
/** Pointer to data buffer.
* Driver keeps a pointer to the data buffer that it made available to
* the Octeon device. Since the descriptor ring keeps physical (bus)
@@ -77,6 +88,9 @@ struct octeon_recv_buffer {
/** Data in the packet buffer. */
u8 *data;
+
+ /** pg_info **/
+ struct octeon_skb_page_info pg_info;
};
#define OCT_DROQ_RECVBUF_SIZE (sizeof(struct octeon_recv_buffer))
@@ -106,6 +120,13 @@ struct oct_droq_stats {
/** Num of Packets dropped due to receive path failures. */
u64 rx_dropped;
+
+ /** Num of vxlan packets received; */
+ u64 rx_vxlan;
+
+ /** Num of failures of recv_buffer_alloc() */
+ u64 rx_alloc_failure;
+
};
#define POLL_EVENT_INTR_ARRIVED 1
@@ -213,7 +234,8 @@ struct octeon_droq_ops {
* data in the buffer. The receive header gives the port
* number to the caller. Function pointer is set by caller.
*/
- void (*fptr)(u32, void *, u32, union octeon_rh *, void *);
+ void (*fptr)(u32, void *, u32, union octeon_rh *, void *, void *);
+ void *farg;
/* This function will be called by the driver for all NAPI related
* events. The first param is the octeon id. The second param is the
@@ -394,24 +416,9 @@ int octeon_register_dispatch_fn(struct octeon_device *oct,
u16 subcode,
octeon_dispatch_fn_t fn, void *fn_arg);
-/** Remove registration for an opcode/subcode. This will delete the mapping for
- * an opcode/subcode. The dispatch function will be unregistered and will no
- * longer be called if a packet with the opcode/subcode arrives in the driver
- * output queues.
- * @param oct - the octeon device to unregister from.
- * @param opcode - the opcode to be unregistered.
- * @param subcode - the subcode to be unregistered.
- *
- * @return Success: 0; Failure: 1
- */
-int octeon_unregister_dispatch_fn(struct octeon_device *oct,
- u16 opcode,
- u16 subcode);
-
void octeon_droq_print_stats(void);
-u32 octeon_droq_check_hw_for_pkts(struct octeon_device *oct,
- struct octeon_droq *droq);
+u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq);
int octeon_create_droq(struct octeon_device *oct, u32 q_no,
u32 num_descs, u32 desc_size, void *app_ctx);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
index 592fe49b589d..ff4b1d6f007b 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
@@ -65,6 +65,11 @@ struct oct_iq_stats {
u64 tx_iq_busy;/**< Numof times this iq was found to be full. */
u64 tx_dropped;/**< Numof pkts dropped dueto xmitpath errors. */
u64 tx_tot_bytes;/**< Total count of bytes sento to network. */
+ u64 tx_gso; /* count of tso */
+ u64 tx_vxlan; /* tunnel */
+ u64 tx_dmamap_fail;
+ u64 tx_restart;
+ /*u64 tx_timeout_count;*/
};
#define OCT_IQ_STATS_SIZE (sizeof(struct oct_iq_stats))
@@ -75,18 +80,26 @@ struct oct_iq_stats {
* a Octeon device has one such structure to represent it.
*/
struct octeon_instr_queue {
+ struct octeon_device *oct_dev;
+
/** A spinlock to protect access to the input ring. */
spinlock_t lock;
+ /** A spinlock to protect while posting on the ring. */
+ spinlock_t post_lock;
+
+ /** A spinlock to protect access to the input ring.*/
+ spinlock_t iq_flush_running_lock;
+
/** Flag that indicates if the queue uses 64 byte commands. */
u32 iqcmd_64B:1;
- /** Queue Number. */
- u32 iq_no:5;
+ /** Queue info. */
+ union oct_txpciq txpciq;
u32 rsvd:17;
- /* Controls the periodic flushing of iq */
+ /* Controls whether extra flushing of IQ is done on Tx */
u32 do_auto_flush:1;
u32 status:8;
@@ -147,6 +160,13 @@ struct octeon_instr_queue {
/** Application context */
void *app_ctx;
+
+ /* network stack queue index */
+ int q_index;
+
+ /*os ifidx associated with this queue */
+ int ifidx;
+
};
/*---------------------- INSTRUCTION FORMAT ----------------------------*/
@@ -176,12 +196,12 @@ struct octeon_instr_32B {
/** 64-byte instruction format.
* Format of instruction for a 64-byte mode input queue.
*/
-struct octeon_instr_64B {
+struct octeon_instr2_64B {
/** Pointer where the input data is available. */
u64 dptr;
/** Instruction Header. */
- u64 ih;
+ u64 ih2;
/** Input Request Header. */
u64 irh;
@@ -198,14 +218,44 @@ struct octeon_instr_64B {
u64 rptr;
u64 reserved;
+};
+
+struct octeon_instr3_64B {
+ /** Pointer where the input data is available. */
+ u64 dptr;
+
+ /** Instruction Header. */
+ u64 ih3;
+
+ /** Instruction Header. */
+ u64 pki_ih3;
+
+ /** Input Request Header. */
+ u64 irh;
+ /** opcode/subcode specific parameters */
+ u64 ossp[2];
+
+ /** Return Data Parameters */
+ u64 rdp;
+
+ /** Pointer where the response for a RAW mode packet will be written
+ * by Octeon.
+ */
+ u64 rptr;
+
+};
+
+union octeon_instr_64B {
+ struct octeon_instr2_64B cmd2;
+ struct octeon_instr3_64B cmd3;
};
-#define OCT_64B_INSTR_SIZE (sizeof(struct octeon_instr_64B))
+#define OCT_64B_INSTR_SIZE (sizeof(union octeon_instr_64B))
/** The size of each buffer in soft command buffer pool
*/
-#define SOFT_COMMAND_BUFFER_SIZE 1024
+#define SOFT_COMMAND_BUFFER_SIZE 1536
struct octeon_soft_command {
/** Soft command buffer info. */
@@ -214,7 +264,8 @@ struct octeon_soft_command {
u32 size;
/** Command and return status */
- struct octeon_instr_64B cmd;
+ union octeon_instr_64B cmd;
+
#define COMPLETION_WORD_INIT 0xffffffffffffffffULL
u64 *status_word;
@@ -242,7 +293,7 @@ struct octeon_soft_command {
/** Maximum number of buffers to allocate into soft command buffer pool
*/
-#define MAX_SOFT_COMMAND_BUFFERS 16
+#define MAX_SOFT_COMMAND_BUFFERS 256
/** Head of a soft command buffer pool.
*/
@@ -268,14 +319,15 @@ void octeon_free_soft_command(struct octeon_device *oct,
/**
* octeon_init_instr_queue()
* @param octeon_dev - pointer to the octeon device structure.
- * @param iq_no - queue to be initialized (0 <= q_no <= 3).
+ * @param txpciq - queue to be initialized (0 <= q_no <= 3).
*
* Called at driver init time for each input queue. iq_conf has the
* configuration parameters for the queue.
*
* @return Success: 0 Failure: 1
*/
-int octeon_init_instr_queue(struct octeon_device *octeon_dev, u32 iq_no,
+int octeon_init_instr_queue(struct octeon_device *octeon_dev,
+ union oct_txpciq txpciq,
u32 num_descs);
/**
@@ -298,7 +350,7 @@ octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype,
int
lio_process_iq_request_list(struct octeon_device *oct,
- struct octeon_instr_queue *iq);
+ struct octeon_instr_queue *iq, u32 napi_budget);
int octeon_send_command(struct octeon_device *oct, u32 iq_no,
u32 force_db, void *cmd, void *buf,
@@ -313,7 +365,10 @@ void octeon_prepare_soft_command(struct octeon_device *oct,
int octeon_send_soft_command(struct octeon_device *oct,
struct octeon_soft_command *sc);
-int octeon_setup_iq(struct octeon_device *oct, u32 iq_no,
- u32 num_descs, void *app_ctx);
-
+int octeon_setup_iq(struct octeon_device *oct, int ifidx,
+ int q_index, union oct_txpciq iq_no, u32 num_descs,
+ void *app_ctx);
+int
+octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
+ u32 pending_thresh, u32 napi_budget);
#endif /* __OCTEON_IQ_H__ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
index cbd081981180..bc14e4c27332 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
@@ -126,22 +126,27 @@ static inline int octeon_map_pci_barx(struct octeon_device *oct,
}
static inline void *
-cnnic_alloc_aligned_dma(struct pci_dev *pci_dev,
- u32 size,
- u32 *alloc_size,
- size_t *orig_ptr,
- size_t *dma_addr __attribute__((unused)))
+cnnic_numa_alloc_aligned_dma(u32 size,
+ u32 *alloc_size,
+ size_t *orig_ptr,
+ int numa_node)
{
int retries = 0;
void *ptr = NULL;
#define OCTEON_MAX_ALLOC_RETRIES 1
do {
- ptr =
- (void *)__get_free_pages(GFP_KERNEL,
- get_order(size));
+ struct page *page = NULL;
+
+ page = alloc_pages_node(numa_node,
+ GFP_KERNEL,
+ get_order(size));
+ if (!page)
+ page = alloc_pages(GFP_KERNEL,
+ get_order(size));
+ ptr = (void *)page_address(page);
if ((unsigned long)ptr & 0x07) {
- free_pages((unsigned long)ptr, get_order(size));
+ __free_pages(page, get_order(size));
ptr = NULL;
/* Increment the size required if the first
* attempt failed.
@@ -169,7 +174,7 @@ sleep_cond(wait_queue_head_t *wait_queue, int *condition)
init_waitqueue_entry(&we, current);
add_wait_queue(wait_queue, &we);
- while (!(ACCESS_ONCE(*condition))) {
+ while (!(READ_ONCE(*condition))) {
set_current_state(TASK_INTERRUPTIBLE);
if (signal_pending(current))
goto out;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
index 5aecef870377..95a4bbedf557 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
@@ -19,43 +19,29 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
-#include <linux/version.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
#include <linux/pci.h>
-#include <linux/kthread.h>
#include <linux/netdevice.h>
-#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
-#include "octeon_nic.h"
-#include "octeon_main.h"
-#include "octeon_network.h"
-#include "cn66xx_regs.h"
-#include "cn66xx_device.h"
-#include "cn68xx_regs.h"
-#include "cn68xx_device.h"
-#include "liquidio_image.h"
-#include "octeon_mem_ops.h"
#define MEMOPS_IDX MAX_BAR1_MAP_INDEX
+#ifdef __BIG_ENDIAN_BITFIELD
static inline void
-octeon_toggle_bar1_swapmode(struct octeon_device *oct __attribute__((unused)),
- u32 idx __attribute__((unused)))
+octeon_toggle_bar1_swapmode(struct octeon_device *oct, u32 idx)
{
-#ifdef __BIG_ENDIAN_BITFIELD
u32 mask;
mask = oct->fn_list.bar1_idx_read(oct, idx);
mask = (mask & 0x2) ? (mask & ~2) : (mask | 2);
oct->fn_list.bar1_idx_write(oct, idx, mask);
-#endif
}
+#else
+#define octeon_toggle_bar1_swapmode(oct, idx) (oct = oct)
+#endif
static void
octeon_pci_fastwrite(struct octeon_device *oct, u8 __iomem *mapped_addr,
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
index b3abe5818fd3..fb820dc7fcb7 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
@@ -30,6 +30,20 @@
#include <linux/dma-mapping.h>
#include <linux/ptp_clock_kernel.h>
+#define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE)
+#define LIO_MIN_MTU_SIZE 68
+
+struct oct_nic_stats_resp {
+ u64 rh;
+ struct oct_link_stats stats;
+ u64 status;
+};
+
+struct oct_nic_stats_ctrl {
+ struct completion complete;
+ struct net_device *netdev;
+};
+
/** LiquidIO per-interface network private data */
struct lio {
/** State of the interface. Rx/Tx happens only in the RUNNING state. */
@@ -48,11 +62,11 @@ struct lio {
*/
int rxq;
- /** Guards the glist */
- spinlock_t lock;
+ /** Guards each glist */
+ spinlock_t *glist_lock;
- /** Linked list of gather components */
- struct list_head glist;
+ /** Array of gather component linked lists */
+ struct list_head *glist;
/** Pointer to the NIC properties for the Octeon device this network
* interface is associated with.
@@ -67,6 +81,9 @@ struct lio {
/** Link information sent by the core application for this interface. */
struct oct_link_info linfo;
+ /** counter of link changes */
+ u64 link_changes;
+
/** Size of Tx queue for this octeon device. */
u32 tx_qsize;
@@ -82,6 +99,12 @@ struct lio {
/** Copy of Interface capabilities: TSO, TSO6, LRO, Chescksums . */
u64 dev_capability;
+ /* Copy of transmit encapsulation capabilities:
+ * TSO, TSO6, Checksums for this device for Kernel
+ * 3.10.0 onwards
+ */
+ u64 enc_dev_capability;
+
/** Copy of beacaon reg in phy */
u32 phy_beacon_val;
@@ -101,7 +124,6 @@ struct lio {
/* work queue for txq status */
struct cavium_wq txq_status_wq;
-
};
#define LIO_SIZE (sizeof(struct lio))
@@ -111,8 +133,9 @@ struct lio {
* \brief Enable or disable feature
* @param netdev pointer to network device
* @param cmd Command that just requires acknowledgment
+ * @param param1 Parameter to command
*/
-int liquidio_set_feature(struct net_device *netdev, int cmd);
+int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1);
/**
* \brief Link control command completion callback
@@ -131,14 +154,30 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr);
*/
void liquidio_set_ethtool_ops(struct net_device *netdev);
-static inline void
-*recv_buffer_alloc(struct octeon_device *oct __attribute__((unused)),
- u32 q_no __attribute__((unused)), u32 size)
-{
#define SKB_ADJ_MASK 0x3F
#define SKB_ADJ (SKB_ADJ_MASK + 1)
- struct sk_buff *skb = dev_alloc_skb(size + SKB_ADJ);
+#define MIN_SKB_SIZE 256 /* 8 bytes and more - 8 bytes for PTP */
+#define LIO_RXBUFFER_SZ 2048
+
+static inline void
+*recv_buffer_alloc(struct octeon_device *oct,
+ struct octeon_skb_page_info *pg_info)
+{
+ struct page *page;
+ struct sk_buff *skb;
+ struct octeon_skb_page_info *skb_pg_info;
+
+ page = alloc_page(GFP_ATOMIC | __GFP_COLD);
+ if (unlikely(!page))
+ return NULL;
+
+ skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ);
+ if (unlikely(!skb)) {
+ __free_page(page);
+ pg_info->page = NULL;
+ return NULL;
+ }
if ((unsigned long)skb->data & SKB_ADJ_MASK) {
u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
@@ -146,11 +185,151 @@ static inline void
skb_reserve(skb, r);
}
+ skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
+ /* Get DMA info */
+ pg_info->dma = dma_map_page(&oct->pci_dev->dev, page, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+
+ /* Mapping failed!! */
+ if (dma_mapping_error(&oct->pci_dev->dev, pg_info->dma)) {
+ __free_page(page);
+ dev_kfree_skb_any((struct sk_buff *)skb);
+ pg_info->page = NULL;
+ return NULL;
+ }
+
+ pg_info->page = page;
+ pg_info->page_offset = 0;
+ skb_pg_info->page = page;
+ skb_pg_info->page_offset = 0;
+ skb_pg_info->dma = pg_info->dma;
+
return (void *)skb;
}
+static inline void
+*recv_buffer_fast_alloc(u32 size)
+{
+ struct sk_buff *skb;
+ struct octeon_skb_page_info *skb_pg_info;
+
+ skb = dev_alloc_skb(size + SKB_ADJ);
+ if (unlikely(!skb))
+ return NULL;
+
+ if ((unsigned long)skb->data & SKB_ADJ_MASK) {
+ u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
+
+ skb_reserve(skb, r);
+ }
+
+ skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
+ skb_pg_info->page = NULL;
+ skb_pg_info->page_offset = 0;
+ skb_pg_info->dma = 0;
+
+ return skb;
+}
+
+static inline int
+recv_buffer_recycle(struct octeon_device *oct, void *buf)
+{
+ struct octeon_skb_page_info *pg_info = buf;
+
+ if (!pg_info->page) {
+ dev_err(&oct->pci_dev->dev, "%s: pg_info->page NULL\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ if (unlikely(page_count(pg_info->page) != 1) ||
+ unlikely(page_to_nid(pg_info->page) != numa_node_id())) {
+ dma_unmap_page(&oct->pci_dev->dev,
+ pg_info->dma, (PAGE_SIZE << 0),
+ DMA_FROM_DEVICE);
+ pg_info->dma = 0;
+ pg_info->page = NULL;
+ pg_info->page_offset = 0;
+ return -ENOMEM;
+ }
+
+ /* Flip to other half of the buffer */
+ if (pg_info->page_offset == 0)
+ pg_info->page_offset = LIO_RXBUFFER_SZ;
+ else
+ pg_info->page_offset = 0;
+ page_ref_inc(pg_info->page);
+
+ return 0;
+}
+
+static inline void
+*recv_buffer_reuse(struct octeon_device *oct, void *buf)
+{
+ struct octeon_skb_page_info *pg_info = buf, *skb_pg_info;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ);
+ if (unlikely(!skb)) {
+ dma_unmap_page(&oct->pci_dev->dev,
+ pg_info->dma, (PAGE_SIZE << 0),
+ DMA_FROM_DEVICE);
+ return NULL;
+ }
+
+ if ((unsigned long)skb->data & SKB_ADJ_MASK) {
+ u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
+
+ skb_reserve(skb, r);
+ }
+
+ skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
+ skb_pg_info->page = pg_info->page;
+ skb_pg_info->page_offset = pg_info->page_offset;
+ skb_pg_info->dma = pg_info->dma;
+
+ return skb;
+}
+
+static inline void
+recv_buffer_destroy(void *buffer, struct octeon_skb_page_info *pg_info)
+{
+ struct sk_buff *skb = (struct sk_buff *)buffer;
+
+ put_page(pg_info->page);
+ pg_info->dma = 0;
+ pg_info->page = NULL;
+ pg_info->page_offset = 0;
+
+ if (skb)
+ dev_kfree_skb_any(skb);
+}
+
static inline void recv_buffer_free(void *buffer)
{
+ struct sk_buff *skb = (struct sk_buff *)buffer;
+ struct octeon_skb_page_info *pg_info;
+
+ pg_info = ((struct octeon_skb_page_info *)(skb->cb));
+
+ if (pg_info->page) {
+ put_page(pg_info->page);
+ pg_info->dma = 0;
+ pg_info->page = NULL;
+ pg_info->page_offset = 0;
+ }
+
+ dev_kfree_skb_any((struct sk_buff *)buffer);
+}
+
+static inline void
+recv_buffer_fast_free(void *buffer)
+{
+ dev_kfree_skb_any((struct sk_buff *)buffer);
+}
+
+static inline void tx_buffer_free(void *buffer)
+{
dev_kfree_skb_any((struct sk_buff *)buffer);
}
@@ -159,7 +338,17 @@ static inline void recv_buffer_free(void *buffer)
#define lio_dma_free(oct, size, virt_addr, dma_addr) \
dma_free_coherent(&oct->pci_dev->dev, size, virt_addr, dma_addr)
-#define get_rbd(ptr) (((struct sk_buff *)(ptr))->data)
+static inline
+void *get_rbd(struct sk_buff *skb)
+{
+ struct octeon_skb_page_info *pg_info;
+ unsigned char *va;
+
+ pg_info = ((struct octeon_skb_page_info *)(skb->cb));
+ va = page_address(pg_info->page) + pg_info->page_offset;
+
+ return va;
+}
static inline u64
lio_map_ring_info(struct octeon_droq *droq, u32 i)
@@ -170,7 +359,7 @@ lio_map_ring_info(struct octeon_droq *droq, u32 i)
dma_addr = dma_map_single(&oct->pci_dev->dev, &droq->info_list[i],
OCT_DROQ_INFO_SIZE, DMA_FROM_DEVICE);
- BUG_ON(dma_mapping_error(&oct->pci_dev->dev, dma_addr));
+ WARN_ON(dma_mapping_error(&oct->pci_dev->dev, dma_addr));
return (u64)dma_addr;
}
@@ -183,33 +372,44 @@ lio_unmap_ring_info(struct pci_dev *pci_dev,
}
static inline u64
-lio_map_ring(struct pci_dev *pci_dev,
- void *buf, u32 size)
+lio_map_ring(void *buf)
{
dma_addr_t dma_addr;
- dma_addr = dma_map_single(&pci_dev->dev, get_rbd(buf), size,
- DMA_FROM_DEVICE);
+ struct sk_buff *skb = (struct sk_buff *)buf;
+ struct octeon_skb_page_info *pg_info;
- BUG_ON(dma_mapping_error(&pci_dev->dev, dma_addr));
+ pg_info = ((struct octeon_skb_page_info *)(skb->cb));
+ if (!pg_info->page) {
+ pr_err("%s: pg_info->page NULL\n", __func__);
+ WARN_ON(1);
+ }
+
+ /* Get DMA info */
+ dma_addr = pg_info->dma;
+ if (!pg_info->dma) {
+ pr_err("%s: ERROR it should be already available\n",
+ __func__);
+ WARN_ON(1);
+ }
+ dma_addr += pg_info->page_offset;
return (u64)dma_addr;
}
static inline void
lio_unmap_ring(struct pci_dev *pci_dev,
- u64 buf_ptr, u32 size)
+ u64 buf_ptr)
+
{
- dma_unmap_single(&pci_dev->dev,
- buf_ptr, size,
- DMA_FROM_DEVICE);
+ dma_unmap_page(&pci_dev->dev,
+ buf_ptr, (PAGE_SIZE << 0),
+ DMA_FROM_DEVICE);
}
-static inline void *octeon_fast_packet_alloc(struct octeon_device *oct,
- struct octeon_droq *droq,
- u32 q_no, u32 size)
+static inline void *octeon_fast_packet_alloc(u32 size)
{
- return recv_buffer_alloc(oct, q_no, size);
+ return recv_buffer_fast_alloc(size);
}
static inline void octeon_fast_packet_next(struct octeon_droq *droq,
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
index 1a0191549cb3..166727be928f 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
@@ -19,14 +19,9 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
-#include <linux/version.h>
-#include <linux/types.h>
-#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
-#include <linux/kthread.h>
#include <linux/netdevice.h>
-#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
@@ -34,21 +29,14 @@
#include "octeon_device.h"
#include "octeon_nic.h"
#include "octeon_main.h"
-#include "octeon_network.h"
-#include "cn66xx_regs.h"
-#include "cn66xx_device.h"
-#include "cn68xx_regs.h"
-#include "cn68xx_device.h"
-#include "liquidio_image.h"
-#include "octeon_mem_ops.h"
void *
octeon_alloc_soft_command_resp(struct octeon_device *oct,
- struct octeon_instr_64B *cmd,
- size_t rdatasize)
+ union octeon_instr_64B *cmd,
+ u32 rdatasize)
{
struct octeon_soft_command *sc;
- struct octeon_instr_ih *ih;
+ struct octeon_instr_ih2 *ih2;
struct octeon_instr_irh *irh;
struct octeon_instr_rdp *rdp;
@@ -59,24 +47,25 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct,
return NULL;
/* Copy existing command structure into the soft command */
- memcpy(&sc->cmd, cmd, sizeof(struct octeon_instr_64B));
+ memcpy(&sc->cmd, cmd, sizeof(union octeon_instr_64B));
/* Add in the response related fields. Opcode and Param are already
* there.
*/
- ih = (struct octeon_instr_ih *)&sc->cmd.ih;
- ih->fsz = 40; /* irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */
+ ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
+ ih2->fsz = 40; /* irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */
- irh = (struct octeon_instr_irh *)&sc->cmd.irh;
irh->rflag = 1; /* a response is required */
- irh->len = 4; /* means four 64-bit words immediately follow irh */
- rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
rdp->pcie_port = oct->pcie_port;
rdp->rlen = rdatasize;
*sc->status_word = COMPLETION_WORD_INIT;
+ sc->cmd.cmd2.rptr = sc->dmarptr;
+
sc->wait_time = 1000;
sc->timeout = jiffies + sc->wait_time;
@@ -119,12 +108,11 @@ static void octnet_link_ctrl_callback(struct octeon_device *oct,
static inline struct octeon_soft_command
*octnic_alloc_ctrl_pkt_sc(struct octeon_device *oct,
- struct octnic_ctrl_pkt *nctrl,
- struct octnic_ctrl_params nparams)
+ struct octnic_ctrl_pkt *nctrl)
{
struct octeon_soft_command *sc = NULL;
u8 *data;
- size_t rdatasize;
+ u32 rdatasize;
u32 uddsize = 0, datasize = 0;
uddsize = (u32)(nctrl->ncmd.s.more * 8);
@@ -143,7 +131,7 @@ static inline struct octeon_soft_command
data = (u8 *)sc->virtdptr;
- memcpy(data, &nctrl->ncmd, OCTNET_CMD_SIZE);
+ memcpy(data, &nctrl->ncmd, OCTNET_CMD_SIZE);
octeon_swap_8B_data((u64 *)data, (OCTNET_CMD_SIZE >> 3));
@@ -152,6 +140,8 @@ static inline struct octeon_soft_command
memcpy(data + OCTNET_CMD_SIZE, nctrl->udd, uddsize);
}
+ sc->iq_no = (u32)nctrl->iq_no;
+
octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_CMD,
0, 0, 0);
@@ -164,26 +154,41 @@ static inline struct octeon_soft_command
int
octnet_send_nic_ctrl_pkt(struct octeon_device *oct,
- struct octnic_ctrl_pkt *nctrl,
- struct octnic_ctrl_params nparams)
+ struct octnic_ctrl_pkt *nctrl)
{
int retval;
struct octeon_soft_command *sc = NULL;
- sc = octnic_alloc_ctrl_pkt_sc(oct, nctrl, nparams);
+ spin_lock_bh(&oct->cmd_resp_wqlock);
+ /* Allow only rx ctrl command to stop traffic on the chip
+ * during offline operations
+ */
+ if ((oct->cmd_resp_state == OCT_DRV_OFFLINE) &&
+ (nctrl->ncmd.s.cmd != OCTNET_CMD_RX_CTL)) {
+ spin_unlock_bh(&oct->cmd_resp_wqlock);
+ dev_err(&oct->pci_dev->dev,
+ "%s cmd:%d not processed since driver offline\n",
+ __func__, nctrl->ncmd.s.cmd);
+ return -1;
+ }
+
+ sc = octnic_alloc_ctrl_pkt_sc(oct, nctrl);
if (!sc) {
dev_err(&oct->pci_dev->dev, "%s soft command alloc failed\n",
__func__);
+ spin_unlock_bh(&oct->cmd_resp_wqlock);
return -1;
}
retval = octeon_send_soft_command(oct, sc);
- if (retval) {
+ if (retval == IQ_SEND_FAILED) {
octeon_free_soft_command(oct, sc);
- dev_err(&oct->pci_dev->dev, "%s soft command send failed status: %x\n",
- __func__, retval);
+ dev_err(&oct->pci_dev->dev, "%s soft command:%d send failed status: %x\n",
+ __func__, nctrl->ncmd.s.cmd, retval);
+ spin_unlock_bh(&oct->cmd_resp_wqlock);
return -1;
}
+ spin_unlock_bh(&oct->cmd_resp_wqlock);
return retval;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
index 0238857c8105..b71a2bbe4bee 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
@@ -52,6 +52,9 @@ struct octnic_ctrl_pkt {
/** Additional data that may be needed by some commands. */
u64 udd[MAX_NCTRL_UDD];
+ /** Input queue to use to send this command. */
+ u64 iq_no;
+
/** Time to wait for Octeon software to respond to this control command.
* If wait_time is 0, OSI assumes no response is expected.
*/
@@ -82,7 +85,7 @@ struct octnic_data_pkt {
u32 datasize;
/** Command to be passed to the Octeon device software. */
- struct octeon_instr_64B cmd;
+ union octeon_instr_64B cmd;
/** Input queue to use to send this command. */
u32 q_no;
@@ -94,15 +97,14 @@ struct octnic_data_pkt {
*/
union octnic_cmd_setup {
struct {
- u32 ifidx:8;
- u32 cksum_offset:7;
+ u32 iq_no:8;
u32 gather:1;
u32 timestamp:1;
- u32 ipv4opts_ipv6exthdr:2;
u32 ip_csum:1;
+ u32 transport_csum:1;
u32 tnl_csum:1;
+ u32 rsvd:19;
- u32 rsvd:11;
union {
u32 datasize;
u32 gatherptrs;
@@ -113,79 +115,146 @@ union octnic_cmd_setup {
};
-struct octnic_ctrl_params {
- u32 resp_order;
-};
-
static inline int octnet_iq_is_full(struct octeon_device *oct, u32 q_no)
{
return ((u32)atomic_read(&oct->instr_queue[q_no]->instr_pending)
>= (oct->instr_queue[q_no]->max_count - 2));
}
-/** Utility function to prepare a 64B NIC instruction based on a setup command
- * @param cmd - pointer to instruction to be filled in.
- * @param setup - pointer to the setup structure
- * @param q_no - which queue for back pressure
- *
- * Assumes the cmd instruction is pre-allocated, but no fields are filled in.
- */
static inline void
-octnet_prepare_pci_cmd(struct octeon_instr_64B *cmd,
- union octnic_cmd_setup *setup, u32 tag)
+octnet_prepare_pci_cmd_o2(struct octeon_device *oct,
+ union octeon_instr_64B *cmd,
+ union octnic_cmd_setup *setup, u32 tag)
{
- struct octeon_instr_ih *ih;
+ struct octeon_instr_ih2 *ih2;
struct octeon_instr_irh *irh;
union octnic_packet_params packet_params;
+ int port;
- memset(cmd, 0, sizeof(struct octeon_instr_64B));
+ memset(cmd, 0, sizeof(union octeon_instr_64B));
- ih = (struct octeon_instr_ih *)&cmd->ih;
+ ih2 = (struct octeon_instr_ih2 *)&cmd->cmd2.ih2;
/* assume that rflag is cleared so therefore front data will only have
- * irh and ossp[1] and ossp[2] for a total of 24 bytes
+ * irh and ossp[0], ossp[1] for a total of 32 bytes
*/
- ih->fsz = 24;
+ ih2->fsz = 24;
+
+ ih2->tagtype = ORDERED_TAG;
+ ih2->grp = DEFAULT_POW_GRP;
- ih->tagtype = ORDERED_TAG;
- ih->grp = DEFAULT_POW_GRP;
+ port = (int)oct->instr_queue[setup->s.iq_no]->txpciq.s.port;
if (tag)
- ih->tag = tag;
+ ih2->tag = tag;
else
- ih->tag = LIO_DATA(setup->s.ifidx);
+ ih2->tag = LIO_DATA(port);
- ih->raw = 1;
- ih->qos = (setup->s.ifidx & 3) + 4; /* map qos based on interface */
+ ih2->raw = 1;
+ ih2->qos = (port & 3) + 4; /* map qos based on interface */
if (!setup->s.gather) {
- ih->dlengsz = setup->s.u.datasize;
+ ih2->dlengsz = setup->s.u.datasize;
} else {
- ih->gather = 1;
- ih->dlengsz = setup->s.u.gatherptrs;
+ ih2->gather = 1;
+ ih2->dlengsz = setup->s.u.gatherptrs;
}
- irh = (struct octeon_instr_irh *)&cmd->irh;
+ irh = (struct octeon_instr_irh *)&cmd->cmd2.irh;
irh->opcode = OPCODE_NIC;
irh->subcode = OPCODE_NIC_NW_DATA;
packet_params.u32 = 0;
- if (setup->s.cksum_offset) {
- packet_params.s.csoffset = setup->s.cksum_offset;
- packet_params.s.ipv4opts_ipv6exthdr =
- setup->s.ipv4opts_ipv6exthdr;
+ packet_params.s.ip_csum = setup->s.ip_csum;
+ packet_params.s.transport_csum = setup->s.transport_csum;
+ packet_params.s.tnl_csum = setup->s.tnl_csum;
+ packet_params.s.tsflag = setup->s.timestamp;
+
+ irh->ossp = packet_params.u32;
+}
+
+static inline void
+octnet_prepare_pci_cmd_o3(struct octeon_device *oct,
+ union octeon_instr_64B *cmd,
+ union octnic_cmd_setup *setup, u32 tag)
+{
+ struct octeon_instr_irh *irh;
+ struct octeon_instr_ih3 *ih3;
+ struct octeon_instr_pki_ih3 *pki_ih3;
+ union octnic_packet_params packet_params;
+ int port;
+
+ memset(cmd, 0, sizeof(union octeon_instr_64B));
+
+ ih3 = (struct octeon_instr_ih3 *)&cmd->cmd3.ih3;
+ pki_ih3 = (struct octeon_instr_pki_ih3 *)&cmd->cmd3.pki_ih3;
+
+ /* assume that rflag is cleared so therefore front data will only have
+ * irh and ossp[1] and ossp[2] for a total of 24 bytes
+ */
+ ih3->pkind = oct->instr_queue[setup->s.iq_no]->txpciq.s.pkind;
+ /*PKI IH*/
+ ih3->fsz = 24 + 8;
+
+ if (!setup->s.gather) {
+ ih3->dlengsz = setup->s.u.datasize;
+ } else {
+ ih3->gather = 1;
+ ih3->dlengsz = setup->s.u.gatherptrs;
}
+ pki_ih3->w = 1;
+ pki_ih3->raw = 1;
+ pki_ih3->utag = 1;
+ pki_ih3->utt = 1;
+ pki_ih3->uqpg = oct->instr_queue[setup->s.iq_no]->txpciq.s.use_qpg;
+
+ port = (int)oct->instr_queue[setup->s.iq_no]->txpciq.s.port;
+
+ if (tag)
+ pki_ih3->tag = tag;
+ else
+ pki_ih3->tag = LIO_DATA(port);
+
+ pki_ih3->tagtype = ORDERED_TAG;
+ pki_ih3->qpg = oct->instr_queue[setup->s.iq_no]->txpciq.s.qpg;
+ pki_ih3->pm = 0x7; /*0x7 - meant for Parse nothing, uninterpreted*/
+ pki_ih3->sl = 8; /* sl will be sizeof(pki_ih3)*/
+
+ irh = (struct octeon_instr_irh *)&cmd->cmd3.irh;
+
+ irh->opcode = OPCODE_NIC;
+ irh->subcode = OPCODE_NIC_NW_DATA;
+
+ packet_params.u32 = 0;
+
packet_params.s.ip_csum = setup->s.ip_csum;
+ packet_params.s.transport_csum = setup->s.transport_csum;
packet_params.s.tnl_csum = setup->s.tnl_csum;
- packet_params.s.ifidx = setup->s.ifidx;
packet_params.s.tsflag = setup->s.timestamp;
irh->ossp = packet_params.u32;
}
+/** Utility function to prepare a 64B NIC instruction based on a setup command
+ * @param cmd - pointer to instruction to be filled in.
+ * @param setup - pointer to the setup structure
+ * @param q_no - which queue for back pressure
+ *
+ * Assumes the cmd instruction is pre-allocated, but no fields are filled in.
+ */
+static inline void
+octnet_prepare_pci_cmd(struct octeon_device *oct, union octeon_instr_64B *cmd,
+ union octnic_cmd_setup *setup, u32 tag)
+{
+ if (OCTEON_CN6XXX(oct))
+ octnet_prepare_pci_cmd_o2(oct, cmd, setup, tag);
+ else
+ octnet_prepare_pci_cmd_o3(oct, cmd, setup, tag);
+}
+
/** Allocate and a soft command with space for a response immediately following
* the commnad.
* @param oct - octeon device pointer
@@ -198,8 +267,8 @@ octnet_prepare_pci_cmd(struct octeon_instr_64B *cmd,
*/
void *
octeon_alloc_soft_command_resp(struct octeon_device *oct,
- struct octeon_instr_64B *cmd,
- size_t rdatasize);
+ union octeon_instr_64B *cmd,
+ u32 rdatasize);
/** Send a NIC data packet to the device
* @param oct - octeon device pointer
@@ -214,14 +283,11 @@ int octnet_send_nic_data_pkt(struct octeon_device *oct,
/** Send a NIC control packet to the device
* @param oct - octeon device pointer
* @param nctrl - control structure with command, timout, and callback info
- * @param nparams - response control structure
- *
* @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
* queue should be stopped, and IQ_SEND_OK if it sent okay.
*/
int
octnet_send_nic_ctrl_pkt(struct octeon_device *oct,
- struct octnic_ctrl_pkt *nctrl,
- struct octnic_ctrl_params nparams);
+ struct octnic_ctrl_pkt *nctrl);
#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index a2a24652c8f3..d32492f185ff 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -19,28 +19,17 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
-#include <linux/version.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
#include <linux/pci.h>
-#include <linux/kthread.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
-#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
-#include "octeon_nic.h"
#include "octeon_main.h"
#include "octeon_network.h"
-#include "cn66xx_regs.h"
#include "cn66xx_device.h"
-#include "cn68xx_regs.h"
-#include "cn68xx_device.h"
-#include "liquidio_image.h"
#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
(octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
@@ -51,7 +40,7 @@ struct iq_post_status {
};
static void check_db_timeout(struct work_struct *work);
-static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no);
+static void __check_db_timeout(struct octeon_device *oct, u64 iq_no);
static void (*reqtype_free_fn[MAX_OCTEON_DEVICES][REQTYPE_LAST + 1]) (void *);
@@ -69,12 +58,16 @@ static inline int IQ_INSTR_MODE_64B(struct octeon_device *oct, int iq_no)
/* Return 0 on success, 1 on failure */
int octeon_init_instr_queue(struct octeon_device *oct,
- u32 iq_no, u32 num_descs)
+ union oct_txpciq txpciq,
+ u32 num_descs)
{
struct octeon_instr_queue *iq;
struct octeon_iq_config *conf = NULL;
+ u32 iq_no = (u32)txpciq.s.q_no;
u32 q_size;
struct cavium_wq *db_wq;
+ int orig_node = dev_to_node(&oct->pci_dev->dev);
+ int numa_node = cpu_to_node(iq_no % num_online_cpus());
if (OCTEON_CN6XXX(oct))
conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn6xxx, conf)));
@@ -95,9 +88,15 @@ int octeon_init_instr_queue(struct octeon_device *oct,
q_size = (u32)conf->instr_type * num_descs;
iq = oct->instr_queue[iq_no];
+ iq->oct_dev = oct;
+ set_dev_node(&oct->pci_dev->dev, numa_node);
iq->base_addr = lio_dma_alloc(oct, q_size,
(dma_addr_t *)&iq->base_addr_dma);
+ set_dev_node(&oct->pci_dev->dev, orig_node);
+ if (!iq->base_addr)
+ iq->base_addr = lio_dma_alloc(oct, q_size,
+ (dma_addr_t *)&iq->base_addr_dma);
if (!iq->base_addr) {
dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n",
iq_no);
@@ -109,7 +108,11 @@ int octeon_init_instr_queue(struct octeon_device *oct,
/* Initialize a list to holds requests that have been posted to Octeon
* but has yet to be fetched by octeon
*/
- iq->request_list = vmalloc(sizeof(*iq->request_list) * num_descs);
+ iq->request_list = vmalloc_node((sizeof(*iq->request_list) * num_descs),
+ numa_node);
+ if (!iq->request_list)
+ iq->request_list = vmalloc(sizeof(*iq->request_list) *
+ num_descs);
if (!iq->request_list) {
lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n",
@@ -122,7 +125,7 @@ int octeon_init_instr_queue(struct octeon_device *oct,
dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n",
iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count);
- iq->iq_no = iq_no;
+ iq->txpciq.u64 = txpciq.u64;
iq->fill_threshold = (u32)conf->db_min;
iq->fill_cnt = 0;
iq->host_write_index = 0;
@@ -135,8 +138,11 @@ int octeon_init_instr_queue(struct octeon_device *oct,
/* Initialize the spinlock for this instruction queue */
spin_lock_init(&iq->lock);
+ spin_lock_init(&iq->post_lock);
- oct->io_qmask.iq |= (1 << iq_no);
+ spin_lock_init(&iq->iq_flush_running_lock);
+
+ oct->io_qmask.iq |= (1ULL << iq_no);
/* Set the 32B/64B mode for each input queue */
oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
@@ -144,7 +150,9 @@ int octeon_init_instr_queue(struct octeon_device *oct,
oct->fn_list.setup_iq_regs(oct, iq_no);
- oct->check_db_wq[iq_no].wq = create_workqueue("check_iq_db");
+ oct->check_db_wq[iq_no].wq = alloc_workqueue("check_iq_db",
+ WQ_MEM_RECLAIM,
+ 0);
if (!oct->check_db_wq[iq_no].wq) {
lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n",
@@ -168,7 +176,6 @@ int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
cancel_delayed_work_sync(&oct->check_db_wq[iq_no].wk.work);
- flush_workqueue(oct->check_db_wq[iq_no].wq);
destroy_workqueue(oct->check_db_wq[iq_no].wq);
if (OCTEON_CN6XXX(oct))
@@ -188,26 +195,38 @@ int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
/* Return 0 on success, 1 on failure */
int octeon_setup_iq(struct octeon_device *oct,
- u32 iq_no,
+ int ifidx,
+ int q_index,
+ union oct_txpciq txpciq,
u32 num_descs,
void *app_ctx)
{
+ u32 iq_no = (u32)txpciq.s.q_no;
+ int numa_node = cpu_to_node(iq_no % num_online_cpus());
+
if (oct->instr_queue[iq_no]) {
dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n",
iq_no);
+ oct->instr_queue[iq_no]->txpciq.u64 = txpciq.u64;
oct->instr_queue[iq_no]->app_ctx = app_ctx;
return 0;
}
oct->instr_queue[iq_no] =
- vmalloc(sizeof(struct octeon_instr_queue));
+ vmalloc_node(sizeof(struct octeon_instr_queue), numa_node);
+ if (!oct->instr_queue[iq_no])
+ oct->instr_queue[iq_no] =
+ vmalloc(sizeof(struct octeon_instr_queue));
if (!oct->instr_queue[iq_no])
return 1;
memset(oct->instr_queue[iq_no], 0,
sizeof(struct octeon_instr_queue));
+ oct->instr_queue[iq_no]->q_index = q_index;
oct->instr_queue[iq_no]->app_ctx = app_ctx;
- if (octeon_init_instr_queue(oct, iq_no, num_descs)) {
+ oct->instr_queue[iq_no]->ifidx = ifidx;
+
+ if (octeon_init_instr_queue(oct, txpciq, num_descs)) {
vfree(oct->instr_queue[iq_no]);
oct->instr_queue[iq_no] = NULL;
return 1;
@@ -226,8 +245,8 @@ int lio_wait_for_instr_fetch(struct octeon_device *oct)
instr_cnt = 0;
/*for (i = 0; i < oct->num_iqs; i++) {*/
- for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
- if (!(oct->io_qmask.iq & (1UL << i)))
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.iq & (1ULL << i)))
continue;
pending =
atomic_read(&oct->
@@ -271,40 +290,8 @@ static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq,
memcpy(iqptr, cmd, cmdsize);
}
-static inline int
-__post_command(struct octeon_device *octeon_dev __attribute__((unused)),
- struct octeon_instr_queue *iq,
- u32 force_db __attribute__((unused)), u8 *cmd)
-{
- u32 index = -1;
-
- /* This ensures that the read index does not wrap around to the same
- * position if queue gets full before Octeon could fetch any instr.
- */
- if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1))
- return -1;
-
- __copy_cmd_into_iq(iq, cmd);
-
- /* "index" is returned, host_write_index is modified. */
- index = iq->host_write_index;
- INCR_INDEX_BY1(iq->host_write_index, iq->max_count);
- iq->fill_cnt++;
-
- /* Flush the command into memory. We need to be sure the data is in
- * memory before indicating that the instruction is pending.
- */
- wmb();
-
- atomic_inc(&iq->instr_pending);
-
- return index;
-}
-
static inline struct iq_post_status
-__post_command2(struct octeon_device *octeon_dev __attribute__((unused)),
- struct octeon_instr_queue *iq,
- u32 force_db __attribute__((unused)), u8 *cmd)
+__post_command2(struct octeon_instr_queue *iq, u8 *cmd)
{
struct iq_post_status st;
@@ -362,17 +349,19 @@ __add_to_request_list(struct octeon_instr_queue *iq,
iq->request_list[idx].reqtype = reqtype;
}
+/* Can only run in process context */
int
lio_process_iq_request_list(struct octeon_device *oct,
- struct octeon_instr_queue *iq)
+ struct octeon_instr_queue *iq, u32 napi_budget)
{
int reqtype;
void *buf;
u32 old = iq->flush_index;
u32 inst_count = 0;
- unsigned pkts_compl = 0, bytes_compl = 0;
+ unsigned int pkts_compl = 0, bytes_compl = 0;
struct octeon_soft_command *sc;
struct octeon_instr_irh *irh;
+ unsigned long flags;
while (old != iq->octeon_read_index) {
reqtype = iq->request_list[old].reqtype;
@@ -394,7 +383,7 @@ lio_process_iq_request_list(struct octeon_device *oct,
case REQTYPE_SOFT_COMMAND:
sc = buf;
- irh = (struct octeon_instr_irh *)&sc->cmd.irh;
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
if (irh->rflag) {
/* We're expecting a response from Octeon.
* It's up to lio_process_ordered_list() to
@@ -402,17 +391,22 @@ lio_process_iq_request_list(struct octeon_device *oct,
* command response list because we expect
* a response from Octeon.
*/
- spin_lock_bh(&oct->response_list
- [OCTEON_ORDERED_SC_LIST].lock);
+ spin_lock_irqsave
+ (&oct->response_list
+ [OCTEON_ORDERED_SC_LIST].lock,
+ flags);
atomic_inc(&oct->response_list
[OCTEON_ORDERED_SC_LIST].
pending_req_count);
list_add_tail(&sc->node, &oct->response_list
[OCTEON_ORDERED_SC_LIST].head);
- spin_unlock_bh(&oct->response_list
- [OCTEON_ORDERED_SC_LIST].lock);
+ spin_unlock_irqrestore
+ (&oct->response_list
+ [OCTEON_ORDERED_SC_LIST].lock,
+ flags);
} else {
if (sc->callback) {
+ /* This callback must not sleep */
sc->callback(oct, OCTEON_REQUEST_DONE,
sc->callback_arg);
}
@@ -430,6 +424,9 @@ lio_process_iq_request_list(struct octeon_device *oct,
skip_this:
inst_count++;
INCR_INDEX_BY1(old, iq->max_count);
+
+ if ((napi_budget) && (inst_count >= napi_budget))
+ break;
}
if (bytes_compl)
octeon_report_tx_completion_to_bql(iq->app_ctx, pkts_compl,
@@ -439,38 +436,63 @@ lio_process_iq_request_list(struct octeon_device *oct,
return inst_count;
}
-static inline void
-update_iq_indices(struct octeon_device *oct, struct octeon_instr_queue *iq)
+/* Can only be called from process context */
+int
+octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
+ u32 pending_thresh, u32 napi_budget)
{
u32 inst_processed = 0;
+ u32 tot_inst_processed = 0;
+ int tx_done = 1;
- /* Calculate how many commands Octeon has read and move the read index
- * accordingly.
- */
- iq->octeon_read_index = oct->fn_list.update_iq_read_idx(oct, iq);
+ if (!spin_trylock(&iq->iq_flush_running_lock))
+ return tx_done;
- /* Move the NORESPONSE requests to the per-device completion list. */
- if (iq->flush_index != iq->octeon_read_index)
- inst_processed = lio_process_iq_request_list(oct, iq);
+ spin_lock_bh(&iq->lock);
- if (inst_processed) {
- atomic_sub(inst_processed, &iq->instr_pending);
- iq->stats.instr_processed += inst_processed;
- }
-}
+ iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq);
-static void
-octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
- u32 pending_thresh)
-{
if (atomic_read(&iq->instr_pending) >= (s32)pending_thresh) {
- spin_lock_bh(&iq->lock);
- update_iq_indices(oct, iq);
- spin_unlock_bh(&iq->lock);
+ do {
+ /* Process any outstanding IQ packets. */
+ if (iq->flush_index == iq->octeon_read_index)
+ break;
+
+ if (napi_budget)
+ inst_processed = lio_process_iq_request_list
+ (oct, iq,
+ napi_budget - tot_inst_processed);
+ else
+ inst_processed =
+ lio_process_iq_request_list(oct, iq, 0);
+
+ if (inst_processed) {
+ atomic_sub(inst_processed, &iq->instr_pending);
+ iq->stats.instr_processed += inst_processed;
+ }
+
+ tot_inst_processed += inst_processed;
+ inst_processed = 0;
+
+ } while (tot_inst_processed < napi_budget);
+
+ if (napi_budget && (tot_inst_processed >= napi_budget))
+ tx_done = 0;
}
+
+ iq->last_db_time = jiffies;
+
+ spin_unlock_bh(&iq->lock);
+
+ spin_unlock(&iq->iq_flush_running_lock);
+
+ return tx_done;
}
-static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no)
+/* Process instruction queue after timeout.
+ * This routine gets called from a workqueue or when removing the module.
+ */
+static void __check_db_timeout(struct octeon_device *oct, u64 iq_no)
{
struct octeon_instr_queue *iq;
u64 next_time;
@@ -481,24 +503,17 @@ static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no)
if (!iq)
return;
+ /* return immediately, if no work pending */
+ if (!atomic_read(&iq->instr_pending))
+ return;
/* If jiffies - last_db_time < db_timeout do nothing */
next_time = iq->last_db_time + iq->db_timeout;
if (!time_after(jiffies, (unsigned long)next_time))
return;
iq->last_db_time = jiffies;
- /* Get the lock and prevent tasklets. This routine gets called from
- * the poll thread. Instructions can now be posted in tasklet context
- */
- spin_lock_bh(&iq->lock);
- if (iq->fill_cnt != 0)
- ring_doorbell(oct, iq);
-
- spin_unlock_bh(&iq->lock);
-
/* Flush the instruction queue */
- if (iq->do_auto_flush)
- octeon_flush_iq(oct, iq, 1);
+ octeon_flush_iq(oct, iq, 1, 0);
}
/* Called by the Poll thread at regular intervals to check the instruction
@@ -508,11 +523,12 @@ static void check_db_timeout(struct work_struct *work)
{
struct cavium_wk *wk = (struct cavium_wk *)work;
struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
- unsigned long iq_no = wk->ctxul;
+ u64 iq_no = wk->ctxul;
struct cavium_wq *db_wq = &oct->check_db_wq[iq_no];
+ u32 delay = 10;
__check_db_timeout(oct, iq_no);
- queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1));
+ queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(delay));
}
int
@@ -523,9 +539,12 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no,
struct iq_post_status st;
struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
- spin_lock_bh(&iq->lock);
+ /* Get the lock and prevent other tasks and tx interrupt handler from
+ * running.
+ */
+ spin_lock_bh(&iq->post_lock);
- st = __post_command2(oct, iq, force_db, cmd);
+ st = __post_command2(iq, cmd);
if (st.status != IQ_SEND_FAILED) {
octeon_report_sent_bytes_to_bql(buf, reqtype);
@@ -533,16 +552,19 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no,
INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize);
INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1);
- if (iq->fill_cnt >= iq->fill_threshold || force_db)
+ if (force_db)
ring_doorbell(oct, iq);
} else {
INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
}
- spin_unlock_bh(&iq->lock);
+ spin_unlock_bh(&iq->post_lock);
- if (iq->do_auto_flush)
- octeon_flush_iq(oct, iq, 2);
+ /* This is only done here to expedite packets being flushed
+ * for cases where there are no IQ completion interrupts.
+ */
+ /*if (iq->do_auto_flush)*/
+ /* octeon_flush_iq(oct, iq, 2, 0);*/
return st.status;
}
@@ -557,82 +579,75 @@ octeon_prepare_soft_command(struct octeon_device *oct,
u64 ossp1)
{
struct octeon_config *oct_cfg;
- struct octeon_instr_ih *ih;
+ struct octeon_instr_ih2 *ih2;
struct octeon_instr_irh *irh;
struct octeon_instr_rdp *rdp;
- BUG_ON(opcode > 15);
- BUG_ON(subcode > 127);
+ WARN_ON(opcode > 15);
+ WARN_ON(subcode > 127);
oct_cfg = octeon_get_conf(oct);
- ih = (struct octeon_instr_ih *)&sc->cmd.ih;
- ih->tagtype = ATOMIC_TAG;
- ih->tag = LIO_CONTROL;
- ih->raw = 1;
- ih->grp = CFG_GET_CTRL_Q_GRP(oct_cfg);
+ ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
+ ih2->tagtype = ATOMIC_TAG;
+ ih2->tag = LIO_CONTROL;
+ ih2->raw = 1;
+ ih2->grp = CFG_GET_CTRL_Q_GRP(oct_cfg);
if (sc->datasize) {
- ih->dlengsz = sc->datasize;
- ih->rs = 1;
+ ih2->dlengsz = sc->datasize;
+ ih2->rs = 1;
}
- irh = (struct octeon_instr_irh *)&sc->cmd.irh;
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
irh->opcode = opcode;
irh->subcode = subcode;
/* opcode/subcode specific parameters (ossp) */
irh->ossp = irh_ossp;
- sc->cmd.ossp[0] = ossp0;
- sc->cmd.ossp[1] = ossp1;
+ sc->cmd.cmd2.ossp[0] = ossp0;
+ sc->cmd.cmd2.ossp[1] = ossp1;
if (sc->rdatasize) {
- rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
rdp->pcie_port = oct->pcie_port;
rdp->rlen = sc->rdatasize;
irh->rflag = 1;
- irh->len = 4;
- ih->fsz = 40; /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
+ ih2->fsz = 40; /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
} else {
irh->rflag = 0;
- irh->len = 2;
- ih->fsz = 24; /* irh + ossp[0] + ossp[1] = 24 bytes */
+ ih2->fsz = 24; /* irh + ossp[0] + ossp[1] = 24 bytes */
}
-
- while (!(oct->io_qmask.iq & (1 << sc->iq_no)))
- sc->iq_no++;
}
int octeon_send_soft_command(struct octeon_device *oct,
struct octeon_soft_command *sc)
{
- struct octeon_instr_ih *ih;
+ struct octeon_instr_ih2 *ih2;
struct octeon_instr_irh *irh;
- struct octeon_instr_rdp *rdp;
+ u32 len;
- ih = (struct octeon_instr_ih *)&sc->cmd.ih;
- if (ih->dlengsz) {
- BUG_ON(!sc->dmadptr);
- sc->cmd.dptr = sc->dmadptr;
+ ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
+ if (ih2->dlengsz) {
+ WARN_ON(!sc->dmadptr);
+ sc->cmd.cmd2.dptr = sc->dmadptr;
}
-
- irh = (struct octeon_instr_irh *)&sc->cmd.irh;
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
if (irh->rflag) {
- BUG_ON(!sc->dmarptr);
- BUG_ON(!sc->status_word);
+ WARN_ON(!sc->dmarptr);
+ WARN_ON(!sc->status_word);
*sc->status_word = COMPLETION_WORD_INIT;
- rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
-
- sc->cmd.rptr = sc->dmarptr;
+ sc->cmd.cmd2.rptr = sc->dmarptr;
}
+ len = (u32)ih2->dlengsz;
if (sc->wait_time)
sc->timeout = jiffies + sc->wait_time;
- return octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
- (u32)ih->dlengsz, REQTYPE_SOFT_COMMAND);
+ return (octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
+ len, REQTYPE_SOFT_COMMAND));
}
int octeon_setup_sc_buffer_pool(struct octeon_device *oct)
@@ -667,7 +682,7 @@ int octeon_free_sc_buffer_pool(struct octeon_device *oct)
struct list_head *tmp, *tmp2;
struct octeon_soft_command *sc;
- spin_lock(&oct->sc_buf_pool.lock);
+ spin_lock_bh(&oct->sc_buf_pool.lock);
list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) {
list_del(tmp);
@@ -679,7 +694,7 @@ int octeon_free_sc_buffer_pool(struct octeon_device *oct)
INIT_LIST_HEAD(&oct->sc_buf_pool.head);
- spin_unlock(&oct->sc_buf_pool.lock);
+ spin_unlock_bh(&oct->sc_buf_pool.lock);
return 0;
}
@@ -695,13 +710,13 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
struct octeon_soft_command *sc = NULL;
struct list_head *tmp;
- BUG_ON((offset + datasize + rdatasize + ctxsize) >
+ WARN_ON((offset + datasize + rdatasize + ctxsize) >
SOFT_COMMAND_BUFFER_SIZE);
- spin_lock(&oct->sc_buf_pool.lock);
+ spin_lock_bh(&oct->sc_buf_pool.lock);
if (list_empty(&oct->sc_buf_pool.head)) {
- spin_unlock(&oct->sc_buf_pool.lock);
+ spin_unlock_bh(&oct->sc_buf_pool.lock);
return NULL;
}
@@ -712,7 +727,7 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
atomic_inc(&oct->sc_buf_pool.alloc_buf_count);
- spin_unlock(&oct->sc_buf_pool.lock);
+ spin_unlock_bh(&oct->sc_buf_pool.lock);
sc = (struct octeon_soft_command *)tmp;
@@ -742,7 +757,7 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
offset = (offset + datasize + 127) & 0xffffff80;
if (rdatasize) {
- BUG_ON(rdatasize < 16);
+ WARN_ON(rdatasize < 16);
sc->virtrptr = (u8 *)sc + offset;
sc->dmarptr = dma_addr + offset;
sc->rdatasize = rdatasize;
@@ -755,11 +770,11 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
void octeon_free_soft_command(struct octeon_device *oct,
struct octeon_soft_command *sc)
{
- spin_lock(&oct->sc_buf_pool.lock);
+ spin_lock_bh(&oct->sc_buf_pool.lock);
list_add_tail(&sc->node, &oct->sc_buf_pool.head);
atomic_dec(&oct->sc_buf_pool.alloc_buf_count);
- spin_unlock(&oct->sc_buf_pool.lock);
+ spin_unlock_bh(&oct->sc_buf_pool.lock);
}
diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.c b/drivers/net/ethernet/cavium/liquidio/response_manager.c
index 091f537a946e..709049e36627 100644
--- a/drivers/net/ethernet/cavium/liquidio/response_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/response_manager.c
@@ -19,28 +19,14 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
-#include <linux/version.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
-#include <linux/dma-mapping.h>
#include <linux/pci.h>
-#include <linux/kthread.h>
#include <linux/netdevice.h>
-#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
-#include "octeon_nic.h"
#include "octeon_main.h"
-#include "octeon_network.h"
-#include "cn66xx_regs.h"
-#include "cn66xx_device.h"
-#include "cn68xx_regs.h"
-#include "cn68xx_device.h"
-#include "liquidio_image.h"
static void oct_poll_req_completion(struct work_struct *work);
@@ -54,8 +40,9 @@ int octeon_setup_response_list(struct octeon_device *oct)
spin_lock_init(&oct->response_list[i].lock);
atomic_set(&oct->response_list[i].pending_req_count, 0);
}
+ spin_lock_init(&oct->cmd_resp_wqlock);
- oct->dma_comp_wq.wq = create_workqueue("dma-comp");
+ oct->dma_comp_wq.wq = alloc_workqueue("dma-comp", WQ_MEM_RECLAIM, 0);
if (!oct->dma_comp_wq.wq) {
dev_err(&oct->pci_dev->dev, "failed to create wq thread\n");
return -ENOMEM;
@@ -64,7 +51,8 @@ int octeon_setup_response_list(struct octeon_device *oct)
cwq = &oct->dma_comp_wq;
INIT_DELAYED_WORK(&cwq->wk.work, oct_poll_req_completion);
cwq->wk.ctxptr = oct;
- queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100));
+ oct->cmd_resp_state = OCT_DRV_ONLINE;
+ queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(50));
return ret;
}
@@ -72,7 +60,6 @@ int octeon_setup_response_list(struct octeon_device *oct)
void octeon_delete_response_list(struct octeon_device *oct)
{
cancel_delayed_work_sync(&oct->dma_comp_wq.wk.work);
- flush_workqueue(oct->dma_comp_wq.wq);
destroy_workqueue(oct->dma_comp_wq.wq);
}
@@ -86,6 +73,7 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev,
u32 status;
u64 status64;
struct octeon_instr_rdp *rdp;
+ u64 rptr;
ordered_sc_list = &octeon_dev->response_list[OCTEON_ORDERED_SC_LIST];
@@ -103,7 +91,8 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev,
sc = (struct octeon_soft_command *)ordered_sc_list->
head.next;
- rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
+ rptr = sc->cmd.cmd2.rptr;
status = OCTEON_REQUEST_PENDING;
@@ -111,7 +100,7 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev,
* to where rptr is pointing to
*/
dma_sync_single_for_cpu(&octeon_dev->pci_dev->dev,
- sc->cmd.rptr, rdp->rlen,
+ rptr, rdp->rlen,
DMA_FROM_DEVICE);
status64 = *sc->status_word;
@@ -173,6 +162,5 @@ static void oct_poll_req_completion(struct work_struct *work)
struct cavium_wq *cwq = &oct->dma_comp_wq;
lio_process_ordered_list(oct, 0);
-
- queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100));
+ queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(50));
}
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 388cd799d9ed..e8bc15bcde70 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -146,7 +146,6 @@ struct octeon_mgmt {
struct device *dev;
struct napi_struct napi;
struct tasklet_struct tx_clean_tasklet;
- struct phy_device *phydev;
struct device_node *phy_np;
resource_size_t mix_phys;
resource_size_t mix_size;
@@ -787,14 +786,12 @@ static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
static int octeon_mgmt_ioctl(struct net_device *netdev,
struct ifreq *rq, int cmd)
{
- struct octeon_mgmt *p = netdev_priv(netdev);
-
switch (cmd) {
case SIOCSHWTSTAMP:
return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd);
default:
- if (p->phydev)
- return phy_mii_ioctl(p->phydev, rq, cmd);
+ if (netdev->phydev)
+ return phy_mii_ioctl(netdev->phydev, rq, cmd);
return -EINVAL;
}
}
@@ -836,16 +833,18 @@ static void octeon_mgmt_enable_link(struct octeon_mgmt *p)
static void octeon_mgmt_update_link(struct octeon_mgmt *p)
{
+ struct net_device *ndev = p->netdev;
+ struct phy_device *phydev = ndev->phydev;
union cvmx_agl_gmx_prtx_cfg prtx_cfg;
prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
- if (!p->phydev->link)
+ if (!phydev->link)
prtx_cfg.s.duplex = 1;
else
- prtx_cfg.s.duplex = p->phydev->duplex;
+ prtx_cfg.s.duplex = phydev->duplex;
- switch (p->phydev->speed) {
+ switch (phydev->speed) {
case 10:
prtx_cfg.s.speed = 0;
prtx_cfg.s.slottime = 0;
@@ -871,7 +870,7 @@ static void octeon_mgmt_update_link(struct octeon_mgmt *p)
prtx_cfg.s.speed_msb = 0;
/* Only matters for half-duplex */
prtx_cfg.s.slottime = 1;
- prtx_cfg.s.burst = p->phydev->duplex;
+ prtx_cfg.s.burst = phydev->duplex;
}
break;
case 0: /* No link */
@@ -894,9 +893,9 @@ static void octeon_mgmt_update_link(struct octeon_mgmt *p)
/* MII (both speeds) and RGMII 1000 speed. */
agl_clk.s.clk_cnt = 1;
if (prtx_ctl.s.mode == 0) { /* RGMII mode */
- if (p->phydev->speed == 10)
+ if (phydev->speed == 10)
agl_clk.s.clk_cnt = 50;
- else if (p->phydev->speed == 100)
+ else if (phydev->speed == 100)
agl_clk.s.clk_cnt = 5;
}
cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64);
@@ -906,39 +905,40 @@ static void octeon_mgmt_update_link(struct octeon_mgmt *p)
static void octeon_mgmt_adjust_link(struct net_device *netdev)
{
struct octeon_mgmt *p = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
unsigned long flags;
int link_changed = 0;
- if (!p->phydev)
+ if (!phydev)
return;
spin_lock_irqsave(&p->lock, flags);
- if (!p->phydev->link && p->last_link)
+ if (!phydev->link && p->last_link)
link_changed = -1;
- if (p->phydev->link
- && (p->last_duplex != p->phydev->duplex
- || p->last_link != p->phydev->link
- || p->last_speed != p->phydev->speed)) {
+ if (phydev->link &&
+ (p->last_duplex != phydev->duplex ||
+ p->last_link != phydev->link ||
+ p->last_speed != phydev->speed)) {
octeon_mgmt_disable_link(p);
link_changed = 1;
octeon_mgmt_update_link(p);
octeon_mgmt_enable_link(p);
}
- p->last_link = p->phydev->link;
- p->last_speed = p->phydev->speed;
- p->last_duplex = p->phydev->duplex;
+ p->last_link = phydev->link;
+ p->last_speed = phydev->speed;
+ p->last_duplex = phydev->duplex;
spin_unlock_irqrestore(&p->lock, flags);
if (link_changed != 0) {
if (link_changed > 0) {
pr_info("%s: Link is up - %d/%s\n", netdev->name,
- p->phydev->speed,
- DUPLEX_FULL == p->phydev->duplex ?
+ phydev->speed,
+ phydev->duplex == DUPLEX_FULL ?
"Full" : "Half");
} else {
pr_info("%s: Link is down\n", netdev->name);
@@ -949,6 +949,7 @@ static void octeon_mgmt_adjust_link(struct net_device *netdev)
static int octeon_mgmt_init_phy(struct net_device *netdev)
{
struct octeon_mgmt *p = netdev_priv(netdev);
+ struct phy_device *phydev = NULL;
if (octeon_is_simulation() || p->phy_np == NULL) {
/* No PHYs in the simulator. */
@@ -956,11 +957,11 @@ static int octeon_mgmt_init_phy(struct net_device *netdev)
return 0;
}
- p->phydev = of_phy_connect(netdev, p->phy_np,
- octeon_mgmt_adjust_link, 0,
- PHY_INTERFACE_MODE_MII);
+ phydev = of_phy_connect(netdev, p->phy_np,
+ octeon_mgmt_adjust_link, 0,
+ PHY_INTERFACE_MODE_MII);
- if (!p->phydev)
+ if (!phydev)
return -ENODEV;
return 0;
@@ -1080,9 +1081,9 @@ static int octeon_mgmt_open(struct net_device *netdev)
}
/* Set the mode of the interface, RGMII/MII. */
- if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && p->phydev) {
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && netdev->phydev) {
union cvmx_agl_prtx_ctl agl_prtx_ctl;
- int rgmii_mode = (p->phydev->supported &
+ int rgmii_mode = (netdev->phydev->supported &
(SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0;
agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
@@ -1205,7 +1206,7 @@ static int octeon_mgmt_open(struct net_device *netdev)
/* Configure the port duplex, speed and enables */
octeon_mgmt_disable_link(p);
- if (p->phydev)
+ if (netdev->phydev)
octeon_mgmt_update_link(p);
octeon_mgmt_enable_link(p);
@@ -1214,9 +1215,9 @@ static int octeon_mgmt_open(struct net_device *netdev)
/* PHY is not present in simulator. The carrier is enabled
* while initializing the phy for simulator, leave it enabled.
*/
- if (p->phydev) {
+ if (netdev->phydev) {
netif_carrier_off(netdev);
- phy_start_aneg(p->phydev);
+ phy_start_aneg(netdev->phydev);
}
netif_wake_queue(netdev);
@@ -1244,9 +1245,8 @@ static int octeon_mgmt_stop(struct net_device *netdev)
napi_disable(&p->napi);
netif_stop_queue(netdev);
- if (p->phydev)
- phy_disconnect(p->phydev);
- p->phydev = NULL;
+ if (netdev->phydev)
+ phy_disconnect(netdev->phydev);
netif_carrier_off(netdev);
@@ -1346,50 +1346,23 @@ static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
}
-static int octeon_mgmt_get_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
-{
- struct octeon_mgmt *p = netdev_priv(netdev);
-
- if (p->phydev)
- return phy_ethtool_gset(p->phydev, cmd);
-
- return -EOPNOTSUPP;
-}
-
-static int octeon_mgmt_set_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
-{
- struct octeon_mgmt *p = netdev_priv(netdev);
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- if (p->phydev)
- return phy_ethtool_sset(p->phydev, cmd);
-
- return -EOPNOTSUPP;
-}
-
static int octeon_mgmt_nway_reset(struct net_device *dev)
{
- struct octeon_mgmt *p = netdev_priv(dev);
-
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- if (p->phydev)
- return phy_start_aneg(p->phydev);
+ if (dev->phydev)
+ return phy_start_aneg(dev->phydev);
return -EOPNOTSUPP;
}
static const struct ethtool_ops octeon_mgmt_ethtool_ops = {
.get_drvinfo = octeon_mgmt_get_drvinfo,
- .get_settings = octeon_mgmt_get_settings,
- .set_settings = octeon_mgmt_set_settings,
.nway_reset = octeon_mgmt_nway_reset,
.get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static const struct net_device_ops octeon_mgmt_ops = {
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 95f17f8cadac..16ed20357c5c 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -499,6 +499,7 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
u32 rr_quantum;
u8 sq_idx = sq->sq_num;
u8 pqs_vnic;
+ int svf;
if (sq->sqs_mode)
pqs_vnic = nic->pqs_vf[vnic];
@@ -511,10 +512,19 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
/* 24 bytes for FCS, IPG and preamble */
rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);
- tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
+ if (!sq->sqs_mode) {
+ tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
+ } else {
+ for (svf = 0; svf < MAX_SQS_PER_VF; svf++) {
+ if (nic->vf_sqs[pqs_vnic][svf] == vnic)
+ break;
+ }
+ tl4 = (MAX_LMAC_PER_BGX * NIC_TL4_PER_LMAC);
+ tl4 += (lmac * NIC_TL4_PER_LMAC * MAX_SQS_PER_VF);
+ tl4 += (svf * NIC_TL4_PER_LMAC);
+ tl4 += (bgx * NIC_TL4_PER_BGX);
+ }
tl4 += sq_idx;
- if (sq->sqs_mode)
- tl4 += vnic * 8;
tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3);
nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 3ed21988626b..63a39ac97d53 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -551,7 +551,9 @@ static int bgx_xaui_check_link(struct lmac *lmac)
}
/* Clear rcvflt bit (latching high) and read it back */
- bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
+ if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT)
+ bgx_reg_modify(bgx, lmacid,
+ BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
dev_err(&bgx->pdev->dev, "Receive fault, retry training\n");
if (bgx->use_training) {
@@ -570,13 +572,6 @@ static int bgx_xaui_check_link(struct lmac *lmac)
return -1;
}
- /* Wait for MAC RX to be ready */
- if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
- SMU_RX_CTL_STATUS, true)) {
- dev_err(&bgx->pdev->dev, "SMU RX link not okay\n");
- return -1;
- }
-
/* Wait for BGX RX to be idle */
if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
dev_err(&bgx->pdev->dev, "SMU RX not idle\n");
@@ -589,29 +584,30 @@ static int bgx_xaui_check_link(struct lmac *lmac)
return -1;
}
- if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
- dev_err(&bgx->pdev->dev, "Receive fault\n");
- return -1;
- }
-
- /* Receive link is latching low. Force it high and verify it */
- bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
- if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
- SPU_STATUS1_RCV_LNK, false)) {
- dev_err(&bgx->pdev->dev, "SPU receive link down\n");
- return -1;
- }
-
+ /* Clear receive packet disable */
cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
cfg &= ~SPU_MISC_CTL_RX_DIS;
bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
- return 0;
+
+ /* Check for MAC RX faults */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_CTL);
+ /* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */
+ cfg &= SMU_RX_CTL_STATUS;
+ if (!cfg)
+ return 0;
+
+ /* Rx local/remote fault seen.
+ * Do lmac reinit to see if condition recovers
+ */
+ bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type);
+
+ return -1;
}
static void bgx_poll_for_link(struct work_struct *work)
{
struct lmac *lmac;
- u64 link;
+ u64 spu_link, smu_link;
lmac = container_of(work, struct lmac, dwork.work);
@@ -621,8 +617,11 @@ static void bgx_poll_for_link(struct work_struct *work)
bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
SPU_STATUS1_RCV_LNK, false);
- link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
- if (link & SPU_STATUS1_RCV_LNK) {
+ spu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
+ smu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_RX_CTL);
+
+ if ((spu_link & SPU_STATUS1_RCV_LNK) &&
+ !(smu_link & SMU_RX_CTL_STATUS)) {
lmac->link_up = 1;
if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
lmac->last_speed = 40000;
@@ -636,9 +635,15 @@ static void bgx_poll_for_link(struct work_struct *work)
}
if (lmac->last_link != lmac->link_up) {
+ if (lmac->link_up) {
+ if (bgx_xaui_check_link(lmac)) {
+ /* Errors, clear link_up state */
+ lmac->link_up = 0;
+ lmac->last_speed = SPEED_UNKNOWN;
+ lmac->last_duplex = DUPLEX_UNKNOWN;
+ }
+ }
lmac->last_link = lmac->link_up;
- if (lmac->link_up)
- bgx_xaui_check_link(lmac);
}
queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2);
@@ -710,7 +715,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
{
struct lmac *lmac;
- u64 cmrx_cfg;
+ u64 cfg;
lmac = &bgx->lmac[lmacid];
if (lmac->check_link) {
@@ -719,9 +724,33 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
destroy_workqueue(lmac->check_link);
}
- cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
- cmrx_cfg &= ~(1 << 15);
- bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
+ /* Disable packet reception */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cfg &= ~CMR_PKT_RX_EN;
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+ /* Give chance for Rx/Tx FIFO to get drained */
+ bgx_poll_reg(bgx, lmacid, BGX_CMRX_RX_FIFO_LEN, (u64)0x1FFF, true);
+ bgx_poll_reg(bgx, lmacid, BGX_CMRX_TX_FIFO_LEN, (u64)0x3FFF, true);
+
+ /* Disable packet transmission */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cfg &= ~CMR_PKT_TX_EN;
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+ /* Disable serdes lanes */
+ if (!lmac->is_sgmii)
+ bgx_reg_modify(bgx, lmacid,
+ BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
+ else
+ bgx_reg_modify(bgx, lmacid,
+ BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_PWR_DN);
+
+ /* Disable LMAC */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cfg &= ~CMR_EN;
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
bgx_flush_dmac_addrs(bgx, lmacid);
if ((bgx->lmac_type != BGX_MODE_XFI) &&
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index 149e179363a1..42010d2e5ddf 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -41,6 +41,7 @@
#define BGX_CMRX_RX_STAT10 0xC0
#define BGX_CMRX_RX_BP_DROP 0xC8
#define BGX_CMRX_RX_DMAC_CTL 0x0E8
+#define BGX_CMRX_RX_FIFO_LEN 0x108
#define BGX_CMR_RX_DMACX_CAM 0x200
#define RX_DMACX_CAM_EN BIT_ULL(48)
#define RX_DMACX_CAM_LMACID(x) (x << 49)
@@ -50,6 +51,7 @@
#define BGX_CMR_CHAN_MSK_AND 0x450
#define BGX_CMR_BIST_STATUS 0x460
#define BGX_CMR_RX_LMACS 0x468
+#define BGX_CMRX_TX_FIFO_LEN 0x518
#define BGX_CMRX_TX_STAT0 0x600
#define BGX_CMRX_TX_STAT1 0x608
#define BGX_CMRX_TX_STAT2 0x610
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 477db477b133..c45de49dc963 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -64,6 +64,7 @@
#include <net/bonding.h>
#include <net/addrconf.h>
#include <asm/uaccess.h>
+#include <linux/crash_dump.h>
#include "cxgb4.h"
#include "t4_regs.h"
@@ -206,7 +207,7 @@ static int rx_dma_offset = 2;
static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
module_param_array(num_vf, uint, NULL, 0644);
-MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
+MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3, deprecated parameter - please use the pci sysfs interface.");
#endif
/* TX Queue select used to determine what algorithm to use for selecting TX
@@ -460,11 +461,8 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
- if (!(dev->flags & IFF_PROMISC)) {
- __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
- if (!(dev->flags & IFF_ALLMULTI))
- __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
- }
+ __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
+ __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
return t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu,
(dev->flags & IFF_PROMISC) ? 1 : 0,
@@ -3735,7 +3733,8 @@ static int adap_init0(struct adapter *adap)
return ret;
/* Contact FW, advertising Master capability */
- ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state);
+ ret = t4_fw_hello(adap, adap->mbox, adap->mbox,
+ is_kdump_kernel() ? MASTER_MUST : MASTER_MAY, &state);
if (ret < 0) {
dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
ret);
@@ -4366,6 +4365,11 @@ static void cfg_queues(struct adapter *adap)
if (q10g > netif_get_num_default_rss_queues())
q10g = netif_get_num_default_rss_queues();
+ /* Reduce memory usage in kdump environment, disable all offload.
+ */
+ if (is_kdump_kernel())
+ adap->params.offload = 0;
+
for_each_port(adap, i) {
struct port_info *pi = adap2pinfo(adap, i);
@@ -4829,6 +4833,60 @@ static int get_chip_type(struct pci_dev *pdev, u32 pl_rev)
return -EINVAL;
}
+#ifdef CONFIG_PCI_IOV
+static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ int err = 0;
+ int current_vfs = pci_num_vf(pdev);
+ u32 pcie_fw;
+ void __iomem *regs;
+
+ regs = pci_ioremap_bar(pdev, 0);
+ if (!regs) {
+ dev_err(&pdev->dev, "cannot map device registers\n");
+ return -ENOMEM;
+ }
+
+ pcie_fw = readl(regs + PCIE_FW_A);
+ iounmap(regs);
+ /* Check if cxgb4 is the MASTER and fw is initialized */
+ if (!(pcie_fw & PCIE_FW_INIT_F) ||
+ !(pcie_fw & PCIE_FW_MASTER_VLD_F) ||
+ PCIE_FW_MASTER_G(pcie_fw) != 4) {
+ dev_warn(&pdev->dev,
+ "cxgb4 driver needs to be MASTER to support SRIOV\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* If any of the VF's is already assigned to Guest OS, then
+ * SRIOV for the same cannot be modified
+ */
+ if (current_vfs && pci_vfs_assigned(pdev)) {
+ dev_err(&pdev->dev,
+ "Cannot modify SR-IOV while VFs are assigned\n");
+ num_vfs = current_vfs;
+ return num_vfs;
+ }
+
+ /* Disable SRIOV when zero is passed.
+ * One needs to disable SRIOV before modifying it, else
+ * stack throws the below warning:
+ * " 'n' VFs already enabled. Disable before enabling 'm' VFs."
+ */
+ if (!num_vfs) {
+ pci_disable_sriov(pdev);
+ return num_vfs;
+ }
+
+ if (num_vfs != current_vfs) {
+ err = pci_enable_sriov(pdev, num_vfs);
+ if (err)
+ return err;
+ }
+ return num_vfs;
+}
+#endif
+
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int func, i, err, s_qpp, qpp, num_seg;
@@ -5162,11 +5220,16 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
sriov:
#ifdef CONFIG_PCI_IOV
- if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
+ if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0) {
+ dev_warn(&pdev->dev,
+ "Enabling SR-IOV VFs using the num_vf module "
+ "parameter is deprecated - please use the pci sysfs "
+ "interface instead.\n");
if (pci_enable_sriov(pdev, num_vf[func]) == 0)
dev_info(&pdev->dev,
"instantiated %u virtual functions\n",
num_vf[func]);
+ }
#endif
return 0;
@@ -5259,6 +5322,9 @@ static struct pci_driver cxgb4_driver = {
.probe = init_one,
.remove = remove_one,
.shutdown = remove_one,
+#ifdef CONFIG_PCI_IOV
+ .sriov_configure = cxgb4_iov_configure,
+#endif
.err_handler = &cxgb4_eeh,
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index a2cdfc1261dc..50812a1d67bd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -144,6 +144,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */
CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */
CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x5018), /* T540-BT */
CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */
CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */
CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 734dd776c22f..109bc630408b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -353,6 +353,10 @@ struct hash_mac_addr {
u8 addr[ETH_ALEN];
};
+struct mbox_list {
+ struct list_head list;
+};
+
/*
* Per-"adapter" (Virtual Function) information.
*/
@@ -387,6 +391,10 @@ struct adapter {
/* various locks */
spinlock_t stats_lock;
+ /* lock for mailbox cmd list */
+ spinlock_t mbox_lock;
+ struct mbox_list mlist;
+
/* support for mailbox command/reply logging */
#define T4VF_OS_LOG_MBOX_CMDS 256
struct mbox_cmd_log *mbox_log;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 04fc6f6d1e25..9f5526478d2f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -937,12 +937,8 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
{
struct port_info *pi = netdev_priv(dev);
- if (!(dev->flags & IFF_PROMISC)) {
- __dev_uc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
- if (!(dev->flags & IFF_ALLMULTI))
- __dev_mc_sync(dev, cxgb4vf_mac_sync,
- cxgb4vf_mac_unsync);
- }
+ __dev_uc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
+ __dev_mc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
return t4vf_set_rxmode(pi->adapter, pi->viid, -1,
(dev->flags & IFF_PROMISC) != 0,
(dev->flags & IFF_ALLMULTI) != 0,
@@ -2778,6 +2774,8 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
* Initialize SMP data synchronization resources.
*/
spin_lock_init(&adapter->stats_lock);
+ spin_lock_init(&adapter->mbox_lock);
+ INIT_LIST_HEAD(&adapter->mlist.list);
/*
* Map our I/O registers in BAR0.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 955ff7c61f1b..61bfe86da86d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -139,6 +139,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL;
u32 cmd_op = FW_CMD_OP_G(be32_to_cpu(((struct fw_cmd_hdr *)cmd)->hi));
__be64 cmd_rpl[MBOX_LEN / 8];
+ struct mbox_list entry;
/* In T6, mailbox size is changed to 128 bytes to avoid
* invalidating the entire prefetch buffer.
@@ -156,6 +157,51 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4)
return -EINVAL;
+ /* Queue ourselves onto the mailbox access list. When our entry is at
+ * the front of the list, we have rights to access the mailbox. So we
+ * wait [for a while] till we're at the front [or bail out with an
+ * EBUSY] ...
+ */
+ spin_lock(&adapter->mbox_lock);
+ list_add_tail(&entry.list, &adapter->mlist.list);
+ spin_unlock(&adapter->mbox_lock);
+
+ delay_idx = 0;
+ ms = delay[0];
+
+ for (i = 0; ; i += ms) {
+ /* If we've waited too long, return a busy indication. This
+ * really ought to be based on our initial position in the
+ * mailbox access list but this is a start. We very rearely
+ * contend on access to the mailbox ...
+ */
+ if (i > FW_CMD_MAX_TIMEOUT) {
+ spin_lock(&adapter->mbox_lock);
+ list_del(&entry.list);
+ spin_unlock(&adapter->mbox_lock);
+ ret = -EBUSY;
+ t4vf_record_mbox(adapter, cmd, size, access, ret);
+ return ret;
+ }
+
+ /* If we're at the head, break out and start the mailbox
+ * protocol.
+ */
+ if (list_first_entry(&adapter->mlist.list, struct mbox_list,
+ list) == &entry)
+ break;
+
+ /* Delay for a bit before checking again ... */
+ if (sleep_ok) {
+ ms = delay[delay_idx]; /* last element may repeat */
+ if (delay_idx < ARRAY_SIZE(delay) - 1)
+ delay_idx++;
+ msleep(ms);
+ } else {
+ mdelay(ms);
+ }
+ }
+
/*
* Loop trying to get ownership of the mailbox. Return an error
* if we can't gain ownership.
@@ -164,6 +210,9 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
if (v != MBOX_OWNER_DRV) {
+ spin_lock(&adapter->mbox_lock);
+ list_del(&entry.list);
+ spin_unlock(&adapter->mbox_lock);
ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
t4vf_record_mbox(adapter, cmd, size, access, ret);
return ret;
@@ -248,6 +297,9 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
if (cmd_op != FW_VI_STATS_CMD)
t4vf_record_mbox(adapter, cmd_rpl, size, access,
execute);
+ spin_lock(&adapter->mbox_lock);
+ list_del(&entry.list);
+ spin_unlock(&adapter->mbox_lock);
return -FW_CMD_RETVAL_G(v);
}
}
@@ -255,6 +307,9 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
/* We timed out. Return the error ... */
ret = -ETIMEDOUT;
t4vf_record_mbox(adapter, cmd, size, access, ret);
+ spin_lock(&adapter->mbox_lock);
+ list_del(&entry.list);
+ spin_unlock(&adapter->mbox_lock);
return ret;
}
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 60383040d6c6..c363b58552e9 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -53,6 +53,8 @@
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -1895,9 +1897,17 @@ static int cs89x0_platform_remove(struct platform_device *pdev)
return 0;
}
+static const struct __maybe_unused of_device_id cs89x0_match[] = {
+ { .compatible = "cirrus,cs8900", },
+ { .compatible = "cirrus,cs8920", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, cs89x0_match);
+
static struct platform_driver cs89x0_driver = {
.driver = {
- .name = DRV_NAME,
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(cs89x0_match),
},
.remove = cs89x0_platform_remove,
};
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index f44a39c40642..fd3980cc1e34 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -103,25 +103,29 @@ static void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
}
}
-static int enic_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int enic_get_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *ecmd)
{
struct enic *enic = netdev_priv(netdev);
+ struct ethtool_link_settings *base = &ecmd->base;
- ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
- ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
- ecmd->port = PORT_FIBRE;
- ecmd->transceiver = XCVR_EXTERNAL;
+ ethtool_link_ksettings_add_link_mode(ecmd, supported,
+ 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(ecmd, advertising,
+ 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ecmd, advertising, FIBRE);
+ base->port = PORT_FIBRE;
if (netif_carrier_ok(netdev)) {
- ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev));
- ecmd->duplex = DUPLEX_FULL;
+ base->speed = vnic_dev_port_speed(enic->vdev);
+ base->duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
- ecmd->duplex = DUPLEX_UNKNOWN;
+ base->speed = SPEED_UNKNOWN;
+ base->duplex = DUPLEX_UNKNOWN;
}
- ecmd->autoneg = AUTONEG_DISABLE;
+ base->autoneg = AUTONEG_DISABLE;
return 0;
}
@@ -500,7 +504,6 @@ static int enic_set_rxfh(struct net_device *netdev, const u32 *indir,
}
static const struct ethtool_ops enic_ethtool_ops = {
- .get_settings = enic_get_settings,
.get_drvinfo = enic_get_drvinfo,
.get_msglevel = enic_get_msglevel,
.set_msglevel = enic_set_msglevel,
@@ -516,6 +519,7 @@ static const struct ethtool_ops enic_ethtool_ops = {
.get_rxfh_key_size = enic_get_rxfh_key_size,
.get_rxfh = enic_get_rxfh,
.set_rxfh = enic_set_rxfh,
+ .get_link_ksettings = enic_get_ksettings,
};
void enic_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index b69a9eacc531..c3b64cdd0dec 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -173,7 +173,7 @@ static int dnet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
static void dnet_handle_link_change(struct net_device *dev)
{
struct dnet *bp = netdev_priv(dev);
- struct phy_device *phydev = bp->phy_dev;
+ struct phy_device *phydev = dev->phydev;
unsigned long flags;
u32 mode_reg, ctl_reg;
@@ -295,7 +295,6 @@ static int dnet_mii_probe(struct net_device *dev)
bp->link = 0;
bp->speed = 0;
bp->duplex = -1;
- bp->phy_dev = phydev;
return 0;
}
@@ -629,16 +628,16 @@ static int dnet_open(struct net_device *dev)
struct dnet *bp = netdev_priv(dev);
/* if the phy is not yet register, retry later */
- if (!bp->phy_dev)
+ if (!dev->phydev)
return -EAGAIN;
napi_enable(&bp->napi);
dnet_init_hw(bp);
- phy_start_aneg(bp->phy_dev);
+ phy_start_aneg(dev->phydev);
/* schedule a link state check */
- phy_start(bp->phy_dev);
+ phy_start(dev->phydev);
netif_start_queue(dev);
@@ -652,8 +651,8 @@ static int dnet_close(struct net_device *dev)
netif_stop_queue(dev);
napi_disable(&bp->napi);
- if (bp->phy_dev)
- phy_stop(bp->phy_dev);
+ if (dev->phydev)
+ phy_stop(dev->phydev);
dnet_reset_hw(bp);
netif_carrier_off(dev);
@@ -731,32 +730,9 @@ static struct net_device_stats *dnet_get_stats(struct net_device *dev)
return nstat;
}
-static int dnet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct dnet *bp = netdev_priv(dev);
- struct phy_device *phydev = bp->phy_dev;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_ethtool_gset(phydev, cmd);
-}
-
-static int dnet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct dnet *bp = netdev_priv(dev);
- struct phy_device *phydev = bp->phy_dev;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_ethtool_sset(phydev, cmd);
-}
-
static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct dnet *bp = netdev_priv(dev);
- struct phy_device *phydev = bp->phy_dev;
+ struct phy_device *phydev = dev->phydev;
if (!netif_running(dev))
return -EINVAL;
@@ -776,11 +752,11 @@ static void dnet_get_drvinfo(struct net_device *dev,
}
static const struct ethtool_ops dnet_ethtool_ops = {
- .get_settings = dnet_get_settings,
- .set_settings = dnet_set_settings,
.get_drvinfo = dnet_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static const struct net_device_ops dnet_netdev_ops = {
@@ -875,7 +851,7 @@ static int dnet_probe(struct platform_device *pdev)
(bp->capabilities & DNET_HAS_IRQ) ? "" : "no ",
(bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ",
(bp->capabilities & DNET_HAS_DMA) ? "" : "no ");
- phydev = bp->phy_dev;
+ phydev = dev->phydev;
phy_attached_info(phydev);
return 0;
@@ -899,8 +875,8 @@ static int dnet_remove(struct platform_device *pdev)
if (dev) {
bp = netdev_priv(dev);
- if (bp->phy_dev)
- phy_disconnect(bp->phy_dev);
+ if (dev->phydev)
+ phy_disconnect(dev->phydev);
mdiobus_unregister(bp->mii_bus);
mdiobus_free(bp->mii_bus);
unregister_netdev(dev);
diff --git a/drivers/net/ethernet/dnet.h b/drivers/net/ethernet/dnet.h
index 37f5b30fa78b..d985080bbd5d 100644
--- a/drivers/net/ethernet/dnet.h
+++ b/drivers/net/ethernet/dnet.h
@@ -216,7 +216,6 @@ struct dnet {
/* PHY stuff */
struct mii_bus *mii_bus;
- struct phy_device *phy_dev;
unsigned int link;
unsigned int speed;
unsigned int duplex;
diff --git a/drivers/net/ethernet/emulex/benet/Kconfig b/drivers/net/ethernet/emulex/benet/Kconfig
index 7108563260ae..b4853ec9de8d 100644
--- a/drivers/net/ethernet/emulex/benet/Kconfig
+++ b/drivers/net/ethernet/emulex/benet/Kconfig
@@ -13,11 +13,3 @@ config BE2NET_HWMON
---help---
Say Y here if you want to expose thermal sensor data on
be2net network adapter.
-
-config BE2NET_VXLAN
- bool "VXLAN offload support on be2net driver"
- default y
- depends on BE2NET && VXLAN && !(BE2NET=y && VXLAN=m)
- ---help---
- Say Y here if you want to enable VXLAN offload support on
- be2net driver.
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index fe3763df3f13..4555e041ef69 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -97,7 +97,8 @@
* SURF/DPDK
*/
-#define MAX_RSS_IFACES 15
+#define MAX_PORT_RSS_TABLES 15
+#define MAX_NIC_FUNCS 16
#define MAX_RX_QS 32
#define MAX_EVT_QS 32
#define MAX_TX_QS 32
@@ -442,8 +443,20 @@ struct be_resources {
u16 max_iface_count;
u16 max_mcc_count;
u16 max_evt_qs;
+ u16 max_nic_evt_qs; /* NIC's share of evt qs */
u32 if_cap_flags;
u32 vf_if_cap_flags; /* VF if capability flags */
+ u32 flags;
+ /* Calculated PF Pool's share of RSS Tables. This is not enforced by
+ * the FW, but is a self-imposed driver limitation.
+ */
+ u16 max_rss_tables;
+};
+
+/* These are port-wide values */
+struct be_port_resources {
+ u16 max_vfs;
+ u16 nic_pfs;
};
#define be_is_os2bmc_enabled(adapter) (adapter->flags & BE_FLAGS_OS2BMC)
@@ -513,7 +526,8 @@ struct be_adapter {
spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
spinlock_t mcc_cq_lock;
- u16 cfg_num_qs; /* configured via set-channels */
+ u16 cfg_num_rx_irqs; /* configured via set-channels */
+ u16 cfg_num_tx_irqs; /* configured via set-channels */
u16 num_evt_qs;
u16 num_msix_vec;
struct be_eq_obj eq_obj[MAX_EVT_QS];
@@ -632,16 +646,42 @@ struct be_adapter {
#define be_max_txqs(adapter) (adapter->res.max_tx_qs)
#define be_max_prio_txqs(adapter) (adapter->res.max_prio_tx_qs)
#define be_max_rxqs(adapter) (adapter->res.max_rx_qs)
-#define be_max_eqs(adapter) (adapter->res.max_evt_qs)
+/* Max number of EQs available for the function (NIC + RoCE (if enabled)) */
+#define be_max_func_eqs(adapter) (adapter->res.max_evt_qs)
+/* Max number of EQs available avaialble only for NIC */
+#define be_max_nic_eqs(adapter) (adapter->res.max_nic_evt_qs)
#define be_if_cap_flags(adapter) (adapter->res.if_cap_flags)
-
-static inline u16 be_max_qs(struct be_adapter *adapter)
+#define be_max_pf_pool_rss_tables(adapter) \
+ (adapter->pool_res.max_rss_tables)
+/* Max irqs avaialble for NIC */
+#define be_max_irqs(adapter) \
+ (min_t(u16, be_max_nic_eqs(adapter), num_online_cpus()))
+
+/* Max irqs *needed* for RX queues */
+static inline u16 be_max_rx_irqs(struct be_adapter *adapter)
{
- /* If no RSS, need atleast the one def RXQ */
+ /* If no RSS, need atleast one irq for def-RXQ */
u16 num = max_t(u16, be_max_rss(adapter), 1);
- num = min(num, be_max_eqs(adapter));
- return min_t(u16, num, num_online_cpus());
+ return min_t(u16, num, be_max_irqs(adapter));
+}
+
+/* Max irqs *needed* for TX queues */
+static inline u16 be_max_tx_irqs(struct be_adapter *adapter)
+{
+ return min_t(u16, be_max_txqs(adapter), be_max_irqs(adapter));
+}
+
+/* Max irqs *needed* for combined queues */
+static inline u16 be_max_qp_irqs(struct be_adapter *adapter)
+{
+ return min(be_max_tx_irqs(adapter), be_max_rx_irqs(adapter));
+}
+
+/* Max irqs *needed* for RX and TX queues together */
+static inline u16 be_max_any_irqs(struct be_adapter *adapter)
+{
+ return max(be_max_tx_irqs(adapter), be_max_rx_irqs(adapter));
}
/* Is BE in pvid_tagging mode */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 22402db275f2..2cc11756859f 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -87,6 +87,11 @@ static struct be_cmd_priv_map cmd_priv_map[] = {
CMD_SUBSYSTEM_LOWLEVEL,
BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
},
+ {
+ OPCODE_COMMON_SET_HSW_CONFIG,
+ CMD_SUBSYSTEM_COMMON,
+ BE_PRIV_DEVCFG | BE_PRIV_VHADM
+ },
};
static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
@@ -3850,6 +3855,10 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
void *ctxt;
int status;
+ if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_HSW_CONFIG,
+ CMD_SUBSYSTEM_COMMON))
+ return -EPERM;
+
spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
@@ -3871,7 +3880,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
}
- if (!BEx_chip(adapter) && hsw_mode) {
+ if (hsw_mode) {
AMAP_SET_BITS(struct amap_set_hsw_context, interface_id,
ctxt, adapter->hba_port_num);
AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1);
@@ -4023,7 +4032,10 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *)cmd.va;
adapter->wol_cap = resp->wol_settings;
- if (adapter->wol_cap & BE_WOL_CAP)
+
+ /* Non-zero macaddr indicates WOL is enabled */
+ if (adapter->wol_cap & BE_WOL_CAP &&
+ !is_zero_ether_addr(resp->magic_mac))
adapter->wol_en = true;
}
err:
@@ -4360,9 +4372,35 @@ err:
return status;
}
+/* This routine returns a list of all the NIC PF_nums in the adapter */
+u16 be_get_nic_pf_num_list(u8 *buf, u32 desc_count, u16 *nic_pf_nums)
+{
+ struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
+ struct be_pcie_res_desc *pcie = NULL;
+ int i;
+ u16 nic_pf_count = 0;
+
+ for (i = 0; i < desc_count; i++) {
+ if (hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
+ hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1) {
+ pcie = (struct be_pcie_res_desc *)hdr;
+ if (pcie->pf_state && (pcie->pf_type == MISSION_NIC ||
+ pcie->pf_type == MISSION_RDMA)) {
+ nic_pf_nums[nic_pf_count++] = pcie->pf_num;
+ }
+ }
+
+ hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
+ hdr = (void *)hdr + hdr->desc_len;
+ }
+ return nic_pf_count;
+}
+
/* Will use MBOX only if MCCQ has not been created */
int be_cmd_get_profile_config(struct be_adapter *adapter,
- struct be_resources *res, u8 query, u8 domain)
+ struct be_resources *res,
+ struct be_port_resources *port_res,
+ u8 profile_type, u8 query, u8 domain)
{
struct be_cmd_resp_get_profile_config *resp;
struct be_cmd_req_get_profile_config *req;
@@ -4389,7 +4427,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
if (!lancer_chip(adapter))
req->hdr.version = 1;
- req->type = ACTIVE_PROFILE_TYPE;
+ req->type = profile_type;
req->hdr.domain = domain;
/* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the
@@ -4406,6 +4444,28 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
resp = cmd.va;
desc_count = le16_to_cpu(resp->desc_count);
+ if (port_res) {
+ u16 nic_pf_cnt = 0, i;
+ u16 nic_pf_num_list[MAX_NIC_FUNCS];
+
+ nic_pf_cnt = be_get_nic_pf_num_list(resp->func_param,
+ desc_count,
+ nic_pf_num_list);
+
+ for (i = 0; i < nic_pf_cnt; i++) {
+ nic = be_get_func_nic_desc(resp->func_param, desc_count,
+ nic_pf_num_list[i]);
+ if (nic->link_param == adapter->port_num) {
+ port_res->nic_pfs++;
+ pcie = be_get_pcie_desc(resp->func_param,
+ desc_count,
+ nic_pf_num_list[i]);
+ port_res->max_vfs += le16_to_cpu(pcie->num_vfs);
+ }
+ }
+ return status;
+ }
+
pcie = be_get_pcie_desc(resp->func_param, desc_count,
adapter->pf_num);
if (pcie)
@@ -4465,7 +4525,7 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
}
/* Mark all fields invalid */
-static void be_reset_nic_desc(struct be_nic_res_desc *nic)
+void be_reset_nic_desc(struct be_nic_res_desc *nic)
{
memset(nic, 0, sizeof(*nic));
nic->unicast_mac_count = 0xFFFF;
@@ -4534,73 +4594,9 @@ int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
1, version, domain);
}
-static void be_fill_vf_res_template(struct be_adapter *adapter,
- struct be_resources pool_res,
- u16 num_vfs, u16 num_vf_qs,
- struct be_nic_res_desc *nic_vft)
-{
- u32 vf_if_cap_flags = pool_res.vf_if_cap_flags;
- struct be_resources res_mod = {0};
-
- /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
- * which are modifiable using SET_PROFILE_CONFIG cmd.
- */
- be_cmd_get_profile_config(adapter, &res_mod, RESOURCE_MODIFIABLE, 0);
-
- /* If RSS IFACE capability flags are modifiable for a VF, set the
- * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
- * more than 1 RSSQ is available for a VF.
- * Otherwise, provision only 1 queue pair for VF.
- */
- if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
- nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
- if (num_vf_qs > 1) {
- vf_if_cap_flags |= BE_IF_FLAGS_RSS;
- if (pool_res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
- vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
- } else {
- vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
- BE_IF_FLAGS_DEFQ_RSS);
- }
- } else {
- num_vf_qs = 1;
- }
-
- if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
- nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
- vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
- }
-
- nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags);
- nic_vft->rq_count = cpu_to_le16(num_vf_qs);
- nic_vft->txq_count = cpu_to_le16(num_vf_qs);
- nic_vft->rssq_count = cpu_to_le16(num_vf_qs);
- nic_vft->cq_count = cpu_to_le16(pool_res.max_cq_count /
- (num_vfs + 1));
-
- /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
- * among the PF and it's VFs, if the fields are changeable
- */
- if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
- nic_vft->unicast_mac_count = cpu_to_le16(pool_res.max_uc_mac /
- (num_vfs + 1));
-
- if (res_mod.max_vlans == FIELD_MODIFIABLE)
- nic_vft->vlan_count = cpu_to_le16(pool_res.max_vlans /
- (num_vfs + 1));
-
- if (res_mod.max_iface_count == FIELD_MODIFIABLE)
- nic_vft->iface_count = cpu_to_le16(pool_res.max_iface_count /
- (num_vfs + 1));
-
- if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
- nic_vft->mcc_count = cpu_to_le16(pool_res.max_mcc_count /
- (num_vfs + 1));
-}
-
int be_cmd_set_sriov_config(struct be_adapter *adapter,
struct be_resources pool_res, u16 num_vfs,
- u16 num_vf_qs)
+ struct be_resources *vft_res)
{
struct {
struct be_pcie_res_desc pcie;
@@ -4620,12 +4616,26 @@ int be_cmd_set_sriov_config(struct be_adapter *adapter,
be_reset_nic_desc(&desc.nic_vft);
desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
- desc.nic_vft.flags = BIT(VFT_SHIFT) | BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
+ desc.nic_vft.flags = vft_res->flags | BIT(VFT_SHIFT) |
+ BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
desc.nic_vft.pf_num = adapter->pdev->devfn;
desc.nic_vft.vf_num = 0;
-
- be_fill_vf_res_template(adapter, pool_res, num_vfs, num_vf_qs,
- &desc.nic_vft);
+ desc.nic_vft.cap_flags = cpu_to_le32(vft_res->vf_if_cap_flags);
+ desc.nic_vft.rq_count = cpu_to_le16(vft_res->max_rx_qs);
+ desc.nic_vft.txq_count = cpu_to_le16(vft_res->max_tx_qs);
+ desc.nic_vft.rssq_count = cpu_to_le16(vft_res->max_rss_qs);
+ desc.nic_vft.cq_count = cpu_to_le16(vft_res->max_cq_count);
+
+ if (vft_res->max_uc_mac)
+ desc.nic_vft.unicast_mac_count =
+ cpu_to_le16(vft_res->max_uc_mac);
+ if (vft_res->max_vlans)
+ desc.nic_vft.vlan_count = cpu_to_le16(vft_res->max_vlans);
+ if (vft_res->max_iface_count)
+ desc.nic_vft.iface_count =
+ cpu_to_le16(vft_res->max_iface_count);
+ if (vft_res->max_mcc_count)
+ desc.nic_vft.mcc_count = cpu_to_le16(vft_res->max_mcc_count);
return be_cmd_set_profile_config(adapter, &desc,
2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index d8540ae95e5a..0d6be224a787 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -1556,7 +1556,9 @@ struct be_cmd_resp_acpi_wol_magic_config_v1 {
u8 rsvd0[2];
u8 wol_settings;
u8 rsvd1[5];
- u32 rsvd2[295];
+ u32 rsvd2[288];
+ u8 magic_mac[6];
+ u8 rsvd3[22];
} __packed;
#define BE_GET_WOL_CAP 2
@@ -2128,6 +2130,9 @@ struct be_cmd_req_set_ext_fat_caps {
#define IMM_SHIFT 6 /* Immediate */
#define NOSV_SHIFT 7 /* No save */
+#define MISSION_NIC 1
+#define MISSION_RDMA 8
+
struct be_res_desc_hdr {
u8 desc_type;
u8 desc_len;
@@ -2244,6 +2249,7 @@ struct be_cmd_req_get_profile_config {
struct be_cmd_req_hdr hdr;
u8 rsvd;
#define ACTIVE_PROFILE_TYPE 0x2
+#define SAVED_PROFILE_TYPE 0x0
#define QUERY_MODIFIABLE_FIELDS_TYPE BIT(3)
u8 type;
u16 rsvd1;
@@ -2449,7 +2455,9 @@ int be_cmd_query_port_name(struct be_adapter *adapter);
int be_cmd_get_func_config(struct be_adapter *adapter,
struct be_resources *res);
int be_cmd_get_profile_config(struct be_adapter *adapter,
- struct be_resources *res, u8 query, u8 domain);
+ struct be_resources *res,
+ struct be_port_resources *port_res,
+ u8 profile_type, u8 query, u8 domain);
int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile);
int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
int vf_num);
@@ -2461,4 +2469,4 @@ int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port);
int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op);
int be_cmd_set_sriov_config(struct be_adapter *adapter,
struct be_resources res, u16 num_vfs,
- u16 num_vf_qs);
+ struct be_resources *vft_res);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 2ff691636dac..50e7be5da50c 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -793,6 +793,11 @@ static void be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ struct device *dev = &adapter->pdev->dev;
+ struct be_dma_mem cmd;
+ u8 mac[ETH_ALEN];
+ bool enable;
+ int status;
if (wol->wolopts & ~WAKE_MAGIC)
return -EOPNOTSUPP;
@@ -802,12 +807,32 @@ static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return -EOPNOTSUPP;
}
- if (wol->wolopts & WAKE_MAGIC)
- adapter->wol_en = true;
- else
- adapter->wol_en = false;
+ cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
+ cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
+ if (!cmd.va)
+ return -ENOMEM;
- return 0;
+ eth_zero_addr(mac);
+
+ enable = wol->wolopts & WAKE_MAGIC;
+ if (enable)
+ ether_addr_copy(mac, adapter->netdev->dev_addr);
+
+ status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
+ if (status) {
+ dev_err(dev, "Could not set Wake-on-lan mac address\n");
+ status = be_cmd_status(status);
+ goto err;
+ }
+
+ pci_enable_wake(adapter->pdev, PCI_D3hot, enable);
+ pci_enable_wake(adapter->pdev, PCI_D3cold, enable);
+
+ adapter->wol_en = enable ? true : false;
+
+err:
+ dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma);
+ return status;
}
static int be_test_ddr_dma(struct be_adapter *adapter)
@@ -1171,9 +1196,17 @@ static void be_get_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ u16 num_rx_irqs = max_t(u16, adapter->num_rss_qs, 1);
- ch->combined_count = adapter->num_evt_qs;
- ch->max_combined = be_max_qs(adapter);
+ /* num_tx_qs is always same as the number of irqs used for TX */
+ ch->combined_count = min(adapter->num_tx_qs, num_rx_irqs);
+ ch->rx_count = num_rx_irqs - ch->combined_count;
+ ch->tx_count = adapter->num_tx_qs - ch->combined_count;
+
+ ch->max_combined = be_max_qp_irqs(adapter);
+ /* The user must create atleast one combined channel */
+ ch->max_rx = be_max_rx_irqs(adapter) - 1;
+ ch->max_tx = be_max_tx_irqs(adapter) - 1;
}
static int be_set_channels(struct net_device *netdev,
@@ -1182,11 +1215,22 @@ static int be_set_channels(struct net_device *netdev,
struct be_adapter *adapter = netdev_priv(netdev);
int status;
- if (ch->rx_count || ch->tx_count || ch->other_count ||
- !ch->combined_count || ch->combined_count > be_max_qs(adapter))
+ /* we support either only combined channels or a combination of
+ * combined and either RX-only or TX-only channels.
+ */
+ if (ch->other_count || !ch->combined_count ||
+ (ch->rx_count && ch->tx_count))
+ return -EINVAL;
+
+ if (ch->combined_count > be_max_qp_irqs(adapter) ||
+ (ch->rx_count &&
+ (ch->rx_count + ch->combined_count) > be_max_rx_irqs(adapter)) ||
+ (ch->tx_count &&
+ (ch->tx_count + ch->combined_count) > be_max_tx_irqs(adapter)))
return -EINVAL;
- adapter->cfg_num_qs = ch->combined_count;
+ adapter->cfg_num_rx_irqs = ch->combined_count + ch->rx_count;
+ adapter->cfg_num_tx_irqs = ch->combined_count + ch->tx_count;
status = be_update_queues(adapter);
return be_cmd_status(status);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index ed98ef1ecac3..1f16e73f6d0c 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -2620,8 +2620,10 @@ static int be_evt_queues_create(struct be_adapter *adapter)
struct be_aic_obj *aic;
int i, rc;
+ /* need enough EQs to service both RX and TX queues */
adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
- adapter->cfg_num_qs);
+ max(adapter->cfg_num_rx_irqs,
+ adapter->cfg_num_tx_irqs));
for_all_evt_queues(adapter, eqo, i) {
int numa_node = dev_to_node(&adapter->pdev->dev);
@@ -2726,7 +2728,7 @@ static int be_tx_qs_create(struct be_adapter *adapter)
struct be_eq_obj *eqo;
int status, i;
- adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
+ adapter->num_tx_qs = min(adapter->num_evt_qs, adapter->cfg_num_tx_irqs);
for_all_tx_queues(adapter, txo, i) {
cq = &txo->cq;
@@ -2784,11 +2786,11 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
struct be_rx_obj *rxo;
int rc, i;
- /* We can create as many RSS rings as there are EQs. */
- adapter->num_rss_qs = adapter->num_evt_qs;
+ adapter->num_rss_qs =
+ min(adapter->num_evt_qs, adapter->cfg_num_rx_irqs);
/* We'll use RSS only if atleast 2 RSS rings are supported. */
- if (adapter->num_rss_qs <= 1)
+ if (adapter->num_rss_qs < 2)
adapter->num_rss_qs = 0;
adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
@@ -3249,18 +3251,23 @@ static void be_msix_disable(struct be_adapter *adapter)
static int be_msix_enable(struct be_adapter *adapter)
{
- int i, num_vec;
+ unsigned int i, max_roce_eqs;
struct device *dev = &adapter->pdev->dev;
+ int num_vec;
- /* If RoCE is supported, program the max number of NIC vectors that
- * may be configured via set-channels, along with vectors needed for
- * RoCe. Else, just program the number we'll use initially.
+ /* If RoCE is supported, program the max number of vectors that
+ * could be used for NIC and RoCE, else, just program the number
+ * we'll use initially.
*/
- if (be_roce_supported(adapter))
- num_vec = min_t(int, 2 * be_max_eqs(adapter),
- 2 * num_online_cpus());
- else
- num_vec = adapter->cfg_num_qs;
+ if (be_roce_supported(adapter)) {
+ max_roce_eqs =
+ be_max_func_eqs(adapter) - be_max_nic_eqs(adapter);
+ max_roce_eqs = min(max_roce_eqs, num_online_cpus());
+ num_vec = be_max_any_irqs(adapter) + max_roce_eqs;
+ } else {
+ num_vec = max(adapter->cfg_num_rx_irqs,
+ adapter->cfg_num_tx_irqs);
+ }
for (i = 0; i < num_vec; i++)
adapter->msix_entries[i].entry = i;
@@ -3625,10 +3632,8 @@ static int be_open(struct net_device *netdev)
be_link_status_update(adapter, link_status);
netif_tx_start_all_queues(netdev);
-#ifdef CONFIG_BE2NET_VXLAN
if (skyhawk_chip(adapter))
- vxlan_get_rx_port(netdev);
-#endif
+ udp_tunnel_get_rx_info(netdev);
return 0;
err:
@@ -3636,40 +3641,6 @@ err:
return -EIO;
}
-static int be_setup_wol(struct be_adapter *adapter, bool enable)
-{
- struct device *dev = &adapter->pdev->dev;
- struct be_dma_mem cmd;
- u8 mac[ETH_ALEN];
- int status;
-
- eth_zero_addr(mac);
-
- cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
- cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
- if (!cmd.va)
- return -ENOMEM;
-
- if (enable) {
- status = pci_write_config_dword(adapter->pdev,
- PCICFG_PM_CONTROL_OFFSET,
- PCICFG_PM_CONTROL_MASK);
- if (status) {
- dev_err(dev, "Could not enable Wake-on-lan\n");
- goto err;
- }
- } else {
- ether_addr_copy(mac, adapter->netdev->dev_addr);
- }
-
- status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
- pci_enable_wake(adapter->pdev, PCI_D3hot, enable);
- pci_enable_wake(adapter->pdev, PCI_D3cold, enable);
-err:
- dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma);
- return status;
-}
-
static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
{
u32 addr;
@@ -3759,6 +3730,11 @@ static void be_vf_clear(struct be_adapter *adapter)
be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
}
+
+ if (BE3_chip(adapter))
+ be_cmd_set_hsw_config(adapter, 0, 0,
+ adapter->if_handle,
+ PORT_FWD_TYPE_PASSTHRU, 0);
done:
kfree(adapter->vf_cfg);
adapter->num_vfs = 0;
@@ -3789,7 +3765,6 @@ static void be_cancel_err_detection(struct be_adapter *adapter)
}
}
-#ifdef CONFIG_BE2NET_VXLAN
static void be_disable_vxlan_offloads(struct be_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -3808,37 +3783,87 @@ static void be_disable_vxlan_offloads(struct be_adapter *adapter)
netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
}
-#endif
-static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
+static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
+ struct be_resources *vft_res)
{
struct be_resources res = adapter->pool_res;
+ u32 vf_if_cap_flags = res.vf_if_cap_flags;
+ struct be_resources res_mod = {0};
u16 num_vf_qs = 1;
- /* Distribute the queue resources among the PF and it's VFs
- * Do not distribute queue resources in multi-channel configuration.
- */
- if (num_vfs && !be_is_mc(adapter)) {
- /* Divide the qpairs evenly among the VFs and the PF, capped
- * at VF-EQ-count. Any remainder qpairs belong to the PF.
- */
+ /* Distribute the queue resources among the PF and it's VFs */
+ if (num_vfs) {
+ /* Divide the rx queues evenly among the VFs and the PF, capped
+ * at VF-EQ-count. Any remainder queues belong to the PF.
+ */
num_vf_qs = min(SH_VF_MAX_NIC_EQS,
res.max_rss_qs / (num_vfs + 1));
- /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
- * interfaces per port. Provide RSS on VFs, only if number
- * of VFs requested is less than MAX_RSS_IFACES limit.
+ /* Skyhawk-R chip supports only MAX_PORT_RSS_TABLES
+ * RSS Tables per port. Provide RSS on VFs, only if number of
+ * VFs requested is less than it's PF Pool's RSS Tables limit.
*/
- if (num_vfs >= MAX_RSS_IFACES)
+ if (num_vfs >= be_max_pf_pool_rss_tables(adapter))
num_vf_qs = 1;
}
- return num_vf_qs;
+
+ /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
+ * which are modifiable using SET_PROFILE_CONFIG cmd.
+ */
+ be_cmd_get_profile_config(adapter, &res_mod, NULL, ACTIVE_PROFILE_TYPE,
+ RESOURCE_MODIFIABLE, 0);
+
+ /* If RSS IFACE capability flags are modifiable for a VF, set the
+ * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
+ * more than 1 RSSQ is available for a VF.
+ * Otherwise, provision only 1 queue pair for VF.
+ */
+ if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
+ vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
+ if (num_vf_qs > 1) {
+ vf_if_cap_flags |= BE_IF_FLAGS_RSS;
+ if (res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
+ vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
+ } else {
+ vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
+ BE_IF_FLAGS_DEFQ_RSS);
+ }
+ } else {
+ num_vf_qs = 1;
+ }
+
+ if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
+ vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
+ vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
+ }
+
+ vft_res->vf_if_cap_flags = vf_if_cap_flags;
+ vft_res->max_rx_qs = num_vf_qs;
+ vft_res->max_rss_qs = num_vf_qs;
+ vft_res->max_tx_qs = res.max_tx_qs / (num_vfs + 1);
+ vft_res->max_cq_count = res.max_cq_count / (num_vfs + 1);
+
+ /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
+ * among the PF and it's VFs, if the fields are changeable
+ */
+ if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
+ vft_res->max_uc_mac = res.max_uc_mac / (num_vfs + 1);
+
+ if (res_mod.max_vlans == FIELD_MODIFIABLE)
+ vft_res->max_vlans = res.max_vlans / (num_vfs + 1);
+
+ if (res_mod.max_iface_count == FIELD_MODIFIABLE)
+ vft_res->max_iface_count = res.max_iface_count / (num_vfs + 1);
+
+ if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
+ vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1);
}
static int be_clear(struct be_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
- u16 num_vf_qs;
+ struct be_resources vft_res = {0};
be_cancel_worker(adapter);
@@ -3850,16 +3875,15 @@ static int be_clear(struct be_adapter *adapter)
*/
if (skyhawk_chip(adapter) && be_physfn(adapter) &&
!pci_vfs_assigned(pdev)) {
- num_vf_qs = be_calculate_vf_qs(adapter,
- pci_sriov_get_totalvfs(pdev));
+ be_calculate_vf_res(adapter,
+ pci_sriov_get_totalvfs(pdev),
+ &vft_res);
be_cmd_set_sriov_config(adapter, adapter->pool_res,
pci_sriov_get_totalvfs(pdev),
- num_vf_qs);
+ &vft_res);
}
-#ifdef CONFIG_BE2NET_VXLAN
be_disable_vxlan_offloads(adapter);
-#endif
kfree(adapter->pmac_id);
adapter->pmac_id = NULL;
@@ -3884,7 +3908,8 @@ static int be_vfs_if_create(struct be_adapter *adapter)
for_all_vfs(adapter, vf_cfg, vf) {
if (!BE3_chip(adapter)) {
- status = be_cmd_get_profile_config(adapter, &res,
+ status = be_cmd_get_profile_config(adapter, &res, NULL,
+ ACTIVE_PROFILE_TYPE,
RESOURCE_LIMITS,
vf + 1);
if (!status) {
@@ -4000,6 +4025,15 @@ static int be_vf_setup(struct be_adapter *adapter)
}
}
+ if (BE3_chip(adapter)) {
+ /* On BE3, enable VEB only when SRIOV is enabled */
+ status = be_cmd_set_hsw_config(adapter, 0, 0,
+ adapter->if_handle,
+ PORT_FWD_TYPE_VEB, 0);
+ if (status)
+ goto err;
+ }
+
adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
return 0;
err:
@@ -4069,8 +4103,9 @@ static void BEx_get_resources(struct be_adapter *adapter,
/* On a SuperNIC profile, the driver needs to use the
* GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
*/
- be_cmd_get_profile_config(adapter, &super_nic_res,
- RESOURCE_LIMITS, 0);
+ be_cmd_get_profile_config(adapter, &super_nic_res, NULL,
+ ACTIVE_PROFILE_TYPE, RESOURCE_LIMITS,
+ 0);
/* Some old versions of BE3 FW don't report max_tx_qs value */
res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
} else {
@@ -4109,12 +4144,38 @@ static void be_setup_init(struct be_adapter *adapter)
adapter->cmd_privileges = MIN_PRIVILEGES;
}
+/* HW supports only MAX_PORT_RSS_TABLES RSS Policy Tables per port.
+ * However, this HW limitation is not exposed to the host via any SLI cmd.
+ * As a result, in the case of SRIOV and in particular multi-partition configs
+ * the driver needs to calcuate a proportional share of RSS Tables per PF-pool
+ * for distribution between the VFs. This self-imposed limit will determine the
+ * no: of VFs for which RSS can be enabled.
+ */
+void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
+{
+ struct be_port_resources port_res = {0};
+ u8 rss_tables_on_port;
+ u16 max_vfs = be_max_vfs(adapter);
+
+ be_cmd_get_profile_config(adapter, NULL, &port_res, SAVED_PROFILE_TYPE,
+ RESOURCE_LIMITS, 0);
+
+ rss_tables_on_port = MAX_PORT_RSS_TABLES - port_res.nic_pfs;
+
+ /* Each PF Pool's RSS Tables limit =
+ * PF's Max VFs / Total_Max_VFs on Port * RSS Tables on Port
+ */
+ adapter->pool_res.max_rss_tables =
+ max_vfs * rss_tables_on_port / port_res.max_vfs;
+}
+
static int be_get_sriov_config(struct be_adapter *adapter)
{
struct be_resources res = {0};
int max_vfs, old_vfs;
- be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
+ be_cmd_get_profile_config(adapter, &res, NULL, ACTIVE_PROFILE_TYPE,
+ RESOURCE_LIMITS, 0);
/* Some old versions of BE3 FW don't report max_vfs value */
if (BE3_chip(adapter) && !res.max_vfs) {
@@ -4138,13 +4199,19 @@ static int be_get_sriov_config(struct be_adapter *adapter)
adapter->num_vfs = old_vfs;
}
+ if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
+ be_calculate_pf_pool_rss_tables(adapter);
+ dev_info(&adapter->pdev->dev,
+ "RSS can be enabled for all VFs if num_vfs <= %d\n",
+ be_max_pf_pool_rss_tables(adapter));
+ }
return 0;
}
static void be_alloc_sriov_res(struct be_adapter *adapter)
{
int old_vfs = pci_num_vf(adapter->pdev);
- u16 num_vf_qs;
+ struct be_resources vft_res = {0};
int status;
be_get_sriov_config(adapter);
@@ -4158,9 +4225,9 @@ static void be_alloc_sriov_res(struct be_adapter *adapter)
* Also, this is done by FW in Lancer chip.
*/
if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
- num_vf_qs = be_calculate_vf_qs(adapter, 0);
+ be_calculate_vf_res(adapter, 0, &vft_res);
status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
- num_vf_qs);
+ &vft_res);
if (status)
dev_err(&adapter->pdev->dev,
"Failed to optimize SRIOV resources\n");
@@ -4173,16 +4240,13 @@ static int be_get_resources(struct be_adapter *adapter)
struct be_resources res = {0};
int status;
- if (BEx_chip(adapter)) {
- BEx_get_resources(adapter, &res);
- adapter->res = res;
- }
-
/* For Lancer, SH etc read per-function resource limits from FW.
* GET_FUNC_CONFIG returns per function guaranteed limits.
* GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
*/
- if (!BEx_chip(adapter)) {
+ if (BEx_chip(adapter)) {
+ BEx_get_resources(adapter, &res);
+ } else {
status = be_cmd_get_func_config(adapter, &res);
if (status)
return status;
@@ -4191,13 +4255,13 @@ static int be_get_resources(struct be_adapter *adapter)
if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
!(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
res.max_rss_qs -= 1;
-
- /* If RoCE may be enabled stash away half the EQs for RoCE */
- if (be_roce_supported(adapter))
- res.max_evt_qs /= 2;
- adapter->res = res;
}
+ /* If RoCE is supported stash away half the EQs for RoCE */
+ res.max_nic_evt_qs = be_roce_supported(adapter) ?
+ res.max_evt_qs / 2 : res.max_evt_qs;
+ adapter->res = res;
+
/* If FW supports RSS default queue, then skip creating non-RSS
* queue for non-IP traffic.
*/
@@ -4206,15 +4270,17 @@ static int be_get_resources(struct be_adapter *adapter)
dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
be_max_txqs(adapter), be_max_rxqs(adapter),
- be_max_rss(adapter), be_max_eqs(adapter),
+ be_max_rss(adapter), be_max_nic_eqs(adapter),
be_max_vfs(adapter));
dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
be_max_uc(adapter), be_max_mc(adapter),
be_max_vlans(adapter));
- /* Sanitize cfg_num_qs based on HW and platform limits */
- adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
- be_max_qs(adapter));
+ /* Ensure RX and TX queues are created in pairs at init time */
+ adapter->cfg_num_rx_irqs =
+ min_t(u16, netif_get_num_default_rss_queues(),
+ be_max_qp_irqs(adapter));
+ adapter->cfg_num_tx_irqs = adapter->cfg_num_rx_irqs;
return 0;
}
@@ -4241,6 +4307,8 @@ static int be_get_config(struct be_adapter *adapter)
}
be_cmd_get_acpi_wol_cap(adapter);
+ pci_enable_wake(adapter->pdev, PCI_D3hot, adapter->wol_en);
+ pci_enable_wake(adapter->pdev, PCI_D3cold, adapter->wol_en);
be_cmd_query_port_name(adapter);
@@ -4251,15 +4319,6 @@ static int be_get_config(struct be_adapter *adapter)
"Using profile 0x%x\n", profile_id);
}
- status = be_get_resources(adapter);
- if (status)
- return status;
-
- adapter->pmac_id = kcalloc(be_max_uc(adapter),
- sizeof(*adapter->pmac_id), GFP_KERNEL);
- if (!adapter->pmac_id)
- return -ENOMEM;
-
return 0;
}
@@ -4334,7 +4393,7 @@ static int be_if_create(struct be_adapter *adapter)
u32 cap_flags = be_if_cap_flags(adapter);
int status;
- if (adapter->cfg_num_qs == 1)
+ if (adapter->cfg_num_rx_irqs == 1)
cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
en_flags &= cap_flags;
@@ -4460,13 +4519,22 @@ static int be_setup(struct be_adapter *adapter)
return status;
}
+ status = be_get_config(adapter);
+ if (status)
+ goto err;
+
if (!BE2_chip(adapter) && be_physfn(adapter))
be_alloc_sriov_res(adapter);
- status = be_get_config(adapter);
+ status = be_get_resources(adapter);
if (status)
goto err;
+ adapter->pmac_id = kcalloc(be_max_uc(adapter),
+ sizeof(*adapter->pmac_id), GFP_KERNEL);
+ if (!adapter->pmac_id)
+ return -ENOMEM;
+
status = be_msix_enable(adapter);
if (status)
goto err;
@@ -4511,6 +4579,15 @@ static int be_setup(struct be_adapter *adapter)
be_cmd_set_logical_link_config(adapter,
IFLA_VF_LINK_STATE_AUTO, 0);
+ /* BE3 EVB echoes broadcast/multicast packets back to PF's vport
+ * confusing a linux bridge or OVS that it might be connected to.
+ * Set the EVB to PASSTHRU mode which effectively disables the EVB
+ * when SRIOV is not enabled.
+ */
+ if (BE3_chip(adapter))
+ be_cmd_set_hsw_config(adapter, 0, 0, adapter->if_handle,
+ PORT_FWD_TYPE_PASSTHRU, 0);
+
if (adapter->num_vfs)
be_vf_setup(adapter);
@@ -4651,7 +4728,6 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
0, 0, nlflags, filter_mask, NULL);
}
-#ifdef CONFIG_BE2NET_VXLAN
/* VxLAN offload Notes:
*
* The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
@@ -4666,13 +4742,17 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
* adds more than one port, disable offloads and don't re-enable them again
* until after all the tunnels are removed.
*/
-static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
- __be16 port)
+static void be_add_vxlan_port(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct device *dev = &adapter->pdev->dev;
+ __be16 port = ti->port;
int status;
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
+
if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
return;
@@ -4720,10 +4800,14 @@ err:
be_disable_vxlan_offloads(adapter);
}
-static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
- __be16 port)
+static void be_del_vxlan_port(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ __be16 port = ti->port;
+
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
return;
@@ -4785,7 +4869,6 @@ static netdev_features_t be_features_check(struct sk_buff *skb,
return features;
}
-#endif
static int be_get_phys_port_id(struct net_device *dev,
struct netdev_phys_item_id *ppid)
@@ -4833,11 +4916,9 @@ static const struct net_device_ops be_netdev_ops = {
#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = be_busy_poll,
#endif
-#ifdef CONFIG_BE2NET_VXLAN
- .ndo_add_vxlan_port = be_add_vxlan_port,
- .ndo_del_vxlan_port = be_del_vxlan_port,
+ .ndo_udp_tunnel_add = be_add_vxlan_port,
+ .ndo_udp_tunnel_del = be_del_vxlan_port,
.ndo_features_check = be_features_check,
-#endif
.ndo_get_phys_port_id = be_get_phys_port_id,
};
@@ -5410,9 +5491,6 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct be_adapter *adapter = pci_get_drvdata(pdev);
- if (adapter->wol_en)
- be_setup_wol(adapter, true);
-
be_intr_set(adapter, false);
be_cancel_err_detection(adapter);
@@ -5441,9 +5519,6 @@ static int be_pci_resume(struct pci_dev *pdev)
be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
- if (adapter->wol_en)
- be_setup_wol(adapter, false);
-
return 0;
}
@@ -5552,7 +5627,7 @@ err:
static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
struct be_adapter *adapter = pci_get_drvdata(pdev);
- u16 num_vf_qs;
+ struct be_resources vft_res = {0};
int status;
if (!num_vfs)
@@ -5575,9 +5650,10 @@ static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
* Also, this is done by FW in Lancer chip.
*/
if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
- num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
+ be_calculate_vf_res(adapter, adapter->num_vfs,
+ &vft_res);
status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
- adapter->num_vfs, num_vf_qs);
+ adapter->num_vfs, &vft_res);
if (status)
dev_err(&pdev->dev,
"Failed to optimize SR-IOV resources\n");
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c
index 4089156a7f5e..2b62841c4c63 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.c
+++ b/drivers/net/ethernet/emulex/benet/be_roce.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h
index fde609789483..e51719a7307f 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.h
+++ b/drivers/net/ethernet/emulex/benet/be_roce.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 41b010645100..4edb98c3c6c7 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1195,7 +1195,7 @@ static int ethoc_probe(struct platform_device *pdev)
priv->mdio = mdiobus_alloc();
if (!priv->mdio) {
ret = -ENOMEM;
- goto free;
+ goto free2;
}
priv->mdio->name = "ethoc-mdio";
@@ -1208,7 +1208,7 @@ static int ethoc_probe(struct platform_device *pdev)
ret = mdiobus_register(priv->mdio);
if (ret) {
dev_err(&netdev->dev, "failed to register MDIO bus\n");
- goto free;
+ goto free2;
}
ret = ethoc_mdio_probe(netdev);
@@ -1241,9 +1241,10 @@ error2:
error:
mdiobus_unregister(priv->mdio);
mdiobus_free(priv->mdio);
-free:
+free2:
if (priv->clk)
clk_disable_unprepare(priv->clk);
+free:
free_netdev(netdev);
out:
return ret;
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index f58f9ea51639..92fd5c0bf4df 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -442,6 +442,8 @@ struct bufdesc_ex {
#define FEC_QUIRK_SINGLE_MDIO (1 << 11)
/* Controller supports RACC register */
#define FEC_QUIRK_HAS_RACC (1 << 12)
+/* Controller supports interrupt coalesc */
+#define FEC_QUIRK_HAS_COALESCE (1 << 13)
struct bufdesc_prop {
int qid;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 3c0255e98535..4040003a74f9 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -111,7 +111,13 @@ static struct platform_device_id fec_devtype[] = {
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
- FEC_QUIRK_HAS_RACC,
+ FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
+ }, {
+ .name = "imx6ul-fec",
+ .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_BUG_CAPTURE |
+ FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
}, {
/* sentinel */
}
@@ -125,6 +131,7 @@ enum imx_fec_type {
IMX6Q_FEC,
MVF600_FEC,
IMX6SX_FEC,
+ IMX6UL_FEC,
};
static const struct of_device_id fec_dt_ids[] = {
@@ -134,6 +141,7 @@ static const struct of_device_id fec_dt_ids[] = {
{ .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
{ .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
{ .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], },
+ { .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fec_dt_ids);
@@ -2358,9 +2366,6 @@ static void fec_enet_itr_coal_set(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
int rx_itr, tx_itr;
- if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
- return;
-
/* Must be greater than zero to avoid unpredictable behavior */
if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
!fep->tx_time_itr || !fep->tx_pkts_itr)
@@ -2383,10 +2388,12 @@ static void fec_enet_itr_coal_set(struct net_device *ndev)
writel(tx_itr, fep->hwp + FEC_TXIC0);
writel(rx_itr, fep->hwp + FEC_RXIC0);
- writel(tx_itr, fep->hwp + FEC_TXIC1);
- writel(rx_itr, fep->hwp + FEC_RXIC1);
- writel(tx_itr, fep->hwp + FEC_TXIC2);
- writel(rx_itr, fep->hwp + FEC_RXIC2);
+ if (fep->quirks & FEC_QUIRK_HAS_AVB) {
+ writel(tx_itr, fep->hwp + FEC_TXIC1);
+ writel(rx_itr, fep->hwp + FEC_RXIC1);
+ writel(tx_itr, fep->hwp + FEC_TXIC2);
+ writel(rx_itr, fep->hwp + FEC_RXIC2);
+ }
}
static int
@@ -2394,7 +2401,7 @@ fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
+ if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
return -EOPNOTSUPP;
ec->rx_coalesce_usecs = fep->rx_time_itr;
@@ -2412,28 +2419,28 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
struct fec_enet_private *fep = netdev_priv(ndev);
unsigned int cycle;
- if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
+ if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
return -EOPNOTSUPP;
if (ec->rx_max_coalesced_frames > 255) {
- pr_err("Rx coalesced frames exceed hardware limiation");
+ pr_err("Rx coalesced frames exceed hardware limitation\n");
return -EINVAL;
}
if (ec->tx_max_coalesced_frames > 255) {
- pr_err("Tx coalesced frame exceed hardware limiation");
+ pr_err("Tx coalesced frame exceed hardware limitation\n");
return -EINVAL;
}
cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
if (cycle > 0xFFFF) {
- pr_err("Rx coalesed usec exceeed hardware limiation");
+ pr_err("Rx coalesced usec exceed hardware limitation\n");
return -EINVAL;
}
cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
if (cycle > 0xFFFF) {
- pr_err("Rx coalesed usec exceeed hardware limiation");
+ pr_err("Rx coalesced usec exceed hardware limitation\n");
return -EINVAL;
}
@@ -3191,7 +3198,12 @@ static void fec_reset_phy(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
return;
}
- msleep(msec);
+
+ if (msec > 20)
+ msleep(msec);
+ else
+ usleep_range(msec * 1000, msec * 1000 + 1000);
+
gpio_set_value_cansleep(phy_reset, !active_high);
}
#else /* CONFIG_OF */
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 7615e0668acb..d20935dc8399 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2275,7 +2275,7 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
fcb->flags = flags;
}
-void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
+static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
{
fcb->flags |= TXFCB_VLN;
fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
@@ -2440,7 +2440,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_queue->tx_ring_size);
if (likely(!nr_frags)) {
- lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+ if (likely(!do_tstamp))
+ lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
} else {
u32 lstatus_start = lstatus;
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index 4ccc032633c4..2e2566230e27 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_HISILICON
bool "Hisilicon devices"
default y
- depends on OF && HAS_DMA
+ depends on (OF || ACPI) && HAS_DMA
depends on ARM || ARM64 || COMPILE_TEST
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index b9f2ea59308a..275618bb4646 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -218,7 +218,6 @@ struct hix5hd2_priv {
struct device *dev;
struct net_device *netdev;
- struct phy_device *phy;
struct device_node *phy_node;
phy_interface_t phy_mode;
@@ -402,7 +401,7 @@ static int hix5hd2_net_set_mac_address(struct net_device *dev, void *p)
static void hix5hd2_adjust_link(struct net_device *dev)
{
struct hix5hd2_priv *priv = netdev_priv(dev);
- struct phy_device *phy = priv->phy;
+ struct phy_device *phy = dev->phydev;
if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) {
hix5hd2_config_port(dev, phy->speed, phy->duplex);
@@ -679,6 +678,7 @@ static void hix5hd2_free_dma_desc_rings(struct hix5hd2_priv *priv)
static int hix5hd2_net_open(struct net_device *dev)
{
struct hix5hd2_priv *priv = netdev_priv(dev);
+ struct phy_device *phy;
int ret;
ret = clk_prepare_enable(priv->clk);
@@ -687,12 +687,12 @@ static int hix5hd2_net_open(struct net_device *dev)
return ret;
}
- priv->phy = of_phy_connect(dev, priv->phy_node,
- &hix5hd2_adjust_link, 0, priv->phy_mode);
- if (!priv->phy)
+ phy = of_phy_connect(dev, priv->phy_node,
+ &hix5hd2_adjust_link, 0, priv->phy_mode);
+ if (!phy)
return -ENODEV;
- phy_start(priv->phy);
+ phy_start(phy);
hix5hd2_hw_init(priv);
hix5hd2_rx_refill(priv);
@@ -716,9 +716,9 @@ static int hix5hd2_net_close(struct net_device *dev)
netif_stop_queue(dev);
hix5hd2_free_dma_desc_rings(priv);
- if (priv->phy) {
- phy_stop(priv->phy);
- phy_disconnect(priv->phy);
+ if (dev->phydev) {
+ phy_stop(dev->phydev);
+ phy_disconnect(dev->phydev);
}
clk_disable_unprepare(priv->clk);
@@ -750,32 +750,10 @@ static const struct net_device_ops hix5hd2_netdev_ops = {
.ndo_set_mac_address = hix5hd2_net_set_mac_address,
};
-static int hix5hd2_get_settings(struct net_device *net_dev,
- struct ethtool_cmd *cmd)
-{
- struct hix5hd2_priv *priv = netdev_priv(net_dev);
-
- if (!priv->phy)
- return -ENODEV;
-
- return phy_ethtool_gset(priv->phy, cmd);
-}
-
-static int hix5hd2_set_settings(struct net_device *net_dev,
- struct ethtool_cmd *cmd)
-{
- struct hix5hd2_priv *priv = netdev_priv(net_dev);
-
- if (!priv->phy)
- return -ENODEV;
-
- return phy_ethtool_sset(priv->phy, cmd);
-}
-
static struct ethtool_ops hix5hd2_ethtools_ops = {
.get_link = ethtool_op_get_link,
- .get_settings = hix5hd2_get_settings,
- .set_settings = hix5hd2_set_settings,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static int hix5hd2_mdio_wait_ready(struct mii_bus *bus)
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index 5d3047cc8380..c54c6fac0d1d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -400,7 +400,6 @@ int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner)
if (!hdev->ops || !hdev->ops->get_handle ||
!hdev->ops->toggle_ring_irq ||
- !hdev->ops->toggle_queue_status ||
!hdev->ops->get_status || !hdev->ops->adjust_link)
return -EINVAL;
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index 529cb1341270..e093cbf26c8c 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -363,6 +363,14 @@ enum hnae_port_type {
HNAE_PORT_DEBUG
};
+/* mac media type */
+enum hnae_media_type {
+ HNAE_MEDIA_TYPE_UNKNOWN = 0,
+ HNAE_MEDIA_TYPE_FIBER,
+ HNAE_MEDIA_TYPE_COPPER,
+ HNAE_MEDIA_TYPE_BACKPLANE,
+};
+
/* This struct defines the operation on the handle.
*
* get_handle(): (mandatory)
@@ -454,7 +462,6 @@ struct hnae_ae_ops {
int (*get_info)(struct hnae_handle *handle,
u8 *auto_neg, u16 *speed, u8 *duplex);
void (*toggle_ring_irq)(struct hnae_ring *ring, u32 val);
- void (*toggle_queue_status)(struct hnae_queue *queue, u32 val);
void (*adjust_link)(struct hnae_handle *handle, int speed, int duplex);
int (*set_loopback)(struct hnae_handle *handle,
enum hnae_loop loop_mode, int en);
@@ -473,6 +480,11 @@ struct hnae_ae_ops {
int (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout);
int (*set_coalesce_frames)(struct hnae_handle *handle,
u32 coalesce_frames);
+ void (*get_coalesce_range)(struct hnae_handle *handle,
+ u32 *tx_frames_low, u32 *rx_frames_low,
+ u32 *tx_frames_high, u32 *rx_frames_high,
+ u32 *tx_usecs_low, u32 *rx_usecs_low,
+ u32 *tx_usecs_high, u32 *rx_usecs_high);
void (*set_promisc_mode)(struct hnae_handle *handle, u32 en);
int (*get_mac_addr)(struct hnae_handle *handle, void **p);
int (*set_mac_addr)(struct hnae_handle *handle, void *p);
@@ -521,6 +533,7 @@ struct hnae_handle {
u32 eport_id;
u32 dport_id; /* v2 tx bd should fill the dport_id */
enum hnae_port_type port_type;
+ enum hnae_media_type media_type;
struct list_head node; /* list to hnae_ae_dev->handle_list */
struct hnae_buf_ops *bops; /* operation for the buffer */
struct hnae_queue **qs; /* array base of all queues */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index d37b778b4a1b..e28d960997af 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -134,6 +134,7 @@ struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
ae_handle->phy_dev = vf_cb->mac_cb->phy_dev;
ae_handle->if_support = vf_cb->mac_cb->if_support;
ae_handle->port_type = vf_cb->mac_cb->mac_type;
+ ae_handle->media_type = vf_cb->mac_cb->media_type;
ae_handle->dport_id = port_id;
return ae_handle;
@@ -247,12 +248,21 @@ static void hns_ae_set_tso_stats(struct hnae_handle *handle, int enable)
static int hns_ae_start(struct hnae_handle *handle)
{
int ret;
+ int k;
struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
ret = hns_mac_vm_config_bc_en(mac_cb, 0, true);
if (ret)
return ret;
+ for (k = 0; k < handle->q_num; k++) {
+ if (AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver))
+ hns_rcb_int_clr_hw(handle->qs[k],
+ RCB_INT_FLAG_TX | RCB_INT_FLAG_RX);
+ else
+ hns_rcbv2_int_clr_hw(handle->qs[k],
+ RCB_INT_FLAG_TX | RCB_INT_FLAG_RX);
+ }
hns_ae_ring_enable_all(handle, 1);
msleep(100);
@@ -313,18 +323,6 @@ static void hns_aev2_toggle_ring_irq(struct hnae_ring *ring, u32 mask)
hns_rcbv2_int_ctrl_hw(ring->q, flag, mask);
}
-static void hns_ae_toggle_queue_status(struct hnae_queue *queue, u32 val)
-{
- struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(queue->dev);
-
- if (AE_IS_VER1(dsaf_dev->dsaf_ver))
- hns_rcb_int_clr_hw(queue, RCB_INT_FLAG_TX | RCB_INT_FLAG_RX);
- else
- hns_rcbv2_int_clr_hw(queue, RCB_INT_FLAG_TX | RCB_INT_FLAG_RX);
-
- hns_rcb_start(queue, val);
-}
-
static int hns_ae_get_link_status(struct hnae_handle *handle)
{
u32 link_status;
@@ -465,6 +463,30 @@ static int hns_ae_set_coalesce_frames(struct hnae_handle *handle,
ring_pair->port_id_in_comm, coalesce_frames);
}
+static void hns_ae_get_coalesce_range(struct hnae_handle *handle,
+ u32 *tx_frames_low, u32 *rx_frames_low,
+ u32 *tx_frames_high, u32 *rx_frames_high,
+ u32 *tx_usecs_low, u32 *rx_usecs_low,
+ u32 *tx_usecs_high, u32 *rx_usecs_high)
+{
+ struct dsaf_device *dsaf_dev;
+
+ dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
+
+ *tx_frames_low = HNS_RCB_MIN_COALESCED_FRAMES;
+ *rx_frames_low = HNS_RCB_MIN_COALESCED_FRAMES;
+ *tx_frames_high =
+ (dsaf_dev->desc_num - 1 > HNS_RCB_MAX_COALESCED_FRAMES) ?
+ HNS_RCB_MAX_COALESCED_FRAMES : dsaf_dev->desc_num - 1;
+ *rx_frames_high =
+ (dsaf_dev->desc_num - 1 > HNS_RCB_MAX_COALESCED_FRAMES) ?
+ HNS_RCB_MAX_COALESCED_FRAMES : dsaf_dev->desc_num - 1;
+ *tx_usecs_low = 0;
+ *rx_usecs_low = 0;
+ *tx_usecs_high = HNS_RCB_MAX_COALESCED_USECS;
+ *rx_usecs_high = HNS_RCB_MAX_COALESCED_USECS;
+}
+
void hns_ae_update_stats(struct hnae_handle *handle,
struct net_device_stats *net_stats)
{
@@ -587,6 +609,7 @@ void hns_ae_get_strings(struct hnae_handle *handle,
int idx;
struct hns_mac_cb *mac_cb;
struct hns_ppe_cb *ppe_cb;
+ struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
u8 *p = data;
struct hnae_vf_cb *vf_cb;
@@ -609,13 +632,14 @@ void hns_ae_get_strings(struct hnae_handle *handle,
p += ETH_GSTRING_LEN * hns_mac_get_sset_count(mac_cb, stringset);
if (mac_cb->mac_type == HNAE_PORT_SERVICE)
- hns_dsaf_get_strings(stringset, p, port);
+ hns_dsaf_get_strings(stringset, p, port, dsaf_dev);
}
int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset)
{
u32 sset_count = 0;
struct hns_mac_cb *mac_cb;
+ struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
assert(handle);
@@ -626,7 +650,7 @@ int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset)
sset_count += hns_mac_get_sset_count(mac_cb, stringset);
if (mac_cb->mac_type == HNAE_PORT_SERVICE)
- sset_count += hns_dsaf_get_sset_count(stringset);
+ sset_count += hns_dsaf_get_sset_count(dsaf_dev, stringset);
return sset_count;
}
@@ -782,7 +806,6 @@ static struct hnae_ae_ops hns_dsaf_ops = {
.stop = hns_ae_stop,
.reset = hns_ae_reset,
.toggle_ring_irq = hns_ae_toggle_ring_irq,
- .toggle_queue_status = hns_ae_toggle_queue_status,
.get_status = hns_ae_get_link_status,
.get_info = hns_ae_get_mac_info,
.adjust_link = hns_ae_adjust_link,
@@ -796,6 +819,7 @@ static struct hnae_ae_ops hns_dsaf_ops = {
.get_rx_max_coalesced_frames = hns_ae_get_rx_max_coalesced_frames,
.set_coalesce_usecs = hns_ae_set_coalesce_usecs,
.set_coalesce_frames = hns_ae_set_coalesce_frames,
+ .get_coalesce_range = hns_ae_get_coalesce_range,
.set_promisc_mode = hns_ae_set_promisc_mode,
.set_mac_addr = hns_ae_set_mac_address,
.set_mc_addr = hns_ae_set_multicast_one,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index c526558e6367..3fb87e233c49 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -56,20 +56,6 @@ static const enum mac_mode g_mac_mode_1000[] = {
[PHY_INTERFACE_MODE_RTBI] = MAC_MODE_RTBI_1000
};
-static enum mac_mode hns_mac_dev_to_enet_if(const struct hns_mac_cb *mac_cb)
-{
- switch (mac_cb->max_speed) {
- case MAC_SPEED_100:
- return g_mac_mode_100[mac_cb->phy_if];
- case MAC_SPEED_1000:
- return g_mac_mode_1000[mac_cb->phy_if];
- case MAC_SPEED_10000:
- return MAC_MODE_XGMII_10000;
- default:
- return MAC_MODE_MII_100;
- }
-}
-
static enum mac_mode hns_get_enet_interface(const struct hns_mac_cb *mac_cb)
{
switch (mac_cb->max_speed) {
@@ -134,7 +120,6 @@ void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
mac_cb->speed = speed;
mac_cb->half_duplex = !duplex;
- mac_ctrl_drv->mac_mode = hns_mac_dev_to_enet_if(mac_cb);
if (mac_ctrl_drv->adjust_link) {
ret = mac_ctrl_drv->adjust_link(mac_ctrl_drv,
@@ -748,6 +733,18 @@ static void hns_mac_register_phy(struct hns_mac_cb *mac_cb)
mac_cb->mac_id, addr);
}
+#define MAC_MEDIA_TYPE_MAX_LEN 16
+
+static const struct {
+ enum hnae_media_type value;
+ const char *name;
+} media_type_defs[] = {
+ {HNAE_MEDIA_TYPE_UNKNOWN, "unknown" },
+ {HNAE_MEDIA_TYPE_FIBER, "fiber" },
+ {HNAE_MEDIA_TYPE_COPPER, "copper" },
+ {HNAE_MEDIA_TYPE_BACKPLANE, "backplane" },
+};
+
/**
*hns_mac_get_info - get mac information from device node
*@mac_cb: mac device
@@ -759,10 +756,13 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
struct device_node *np;
struct regmap *syscon;
struct of_phandle_args cpld_args;
+ const char *media_type;
+ u32 i;
u32 ret;
mac_cb->link = false;
mac_cb->half_duplex = false;
+ mac_cb->media_type = HNAE_MEDIA_TYPE_UNKNOWN;
mac_cb->speed = mac_phy_to_speed[mac_cb->phy_if];
mac_cb->max_speed = mac_cb->speed;
@@ -864,6 +864,17 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
mac_cb->mac_id);
}
+ if (!fwnode_property_read_string(mac_cb->fw_port, "media-type",
+ &media_type)) {
+ for (i = 0; i < ARRAY_SIZE(media_type_defs); i++) {
+ if (!strncmp(media_type_defs[i].name, media_type,
+ MAC_MEDIA_TYPE_MAX_LEN)) {
+ mac_cb->media_type = media_type_defs[i].value;
+ break;
+ }
+ }
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index 05a6e8f7a419..4cbdf14f5c16 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -335,6 +335,7 @@ struct hns_mac_cb {
u64 txpkt_for_led;
u64 rxpkt_for_led;
enum hnae_port_type mac_type;
+ enum hnae_media_type media_type;
phy_interface_t phy_if;
enum hnae_loop loop_mode;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index ac03c4a53e08..2ef4277d00b3 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -114,9 +114,9 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
dsaf_dev->sc_base = devm_ioremap_resource(&pdev->dev,
res);
- if (!dsaf_dev->sc_base) {
+ if (IS_ERR(dsaf_dev->sc_base)) {
dev_err(dsaf_dev->dev, "subctrl can not map!\n");
- return -ENOMEM;
+ return PTR_ERR(dsaf_dev->sc_base);
}
res = platform_get_resource(pdev, IORESOURCE_MEM,
@@ -128,9 +128,9 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
dsaf_dev->sds_base = devm_ioremap_resource(&pdev->dev,
res);
- if (!dsaf_dev->sds_base) {
+ if (IS_ERR(dsaf_dev->sds_base)) {
dev_err(dsaf_dev->dev, "serdes-ctrl can not map!\n");
- return -ENOMEM;
+ return PTR_ERR(dsaf_dev->sds_base);
}
} else {
dsaf_dev->sub_ctrl = syscon;
@@ -146,9 +146,9 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
}
}
dsaf_dev->ppe_base = devm_ioremap_resource(&pdev->dev, res);
- if (!dsaf_dev->ppe_base) {
+ if (IS_ERR(dsaf_dev->ppe_base)) {
dev_err(dsaf_dev->dev, "ppe-base resource can not map!\n");
- return -ENOMEM;
+ return PTR_ERR(dsaf_dev->ppe_base);
}
dsaf_dev->ppe_paddr = res->start;
@@ -165,9 +165,9 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
}
}
dsaf_dev->io_base = devm_ioremap_resource(&pdev->dev, res);
- if (!dsaf_dev->io_base) {
+ if (IS_ERR(dsaf_dev->io_base)) {
dev_err(dsaf_dev->dev, "dsaf-base resource can not map!\n");
- return -ENOMEM;
+ return PTR_ERR(dsaf_dev->io_base);
}
}
@@ -176,7 +176,7 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
desc_num > HNS_DSAF_MAX_DESC_CNT) {
dev_err(dsaf_dev->dev, "get desc-num(%d) fail, ret=%d!\n",
desc_num, ret);
- goto unmap_base_addr;
+ return -EINVAL;
}
dsaf_dev->desc_num = desc_num;
@@ -192,7 +192,7 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
if (ret < 0) {
dev_err(dsaf_dev->dev,
"get buf-size fail, ret=%d!\r\n", ret);
- goto unmap_base_addr;
+ return ret;
}
dsaf_dev->buf_size = buf_size;
@@ -200,7 +200,7 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
if (dsaf_dev->buf_size_type < 0) {
dev_err(dsaf_dev->dev,
"buf_size(%d) is wrong!\n", buf_size);
- goto unmap_base_addr;
+ return -EINVAL;
}
dsaf_dev->misc_op = hns_misc_op_get(dsaf_dev);
@@ -213,32 +213,6 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
dev_err(dsaf_dev->dev, "set mask to 64bit fail!\n");
return 0;
-
-unmap_base_addr:
- if (dsaf_dev->io_base)
- iounmap(dsaf_dev->io_base);
- if (dsaf_dev->ppe_base)
- iounmap(dsaf_dev->ppe_base);
- if (dsaf_dev->sds_base)
- iounmap(dsaf_dev->sds_base);
- if (dsaf_dev->sc_base)
- iounmap(dsaf_dev->sc_base);
- return ret;
-}
-
-static void hns_dsaf_free_cfg(struct dsaf_device *dsaf_dev)
-{
- if (dsaf_dev->io_base)
- iounmap(dsaf_dev->io_base);
-
- if (dsaf_dev->ppe_base)
- iounmap(dsaf_dev->ppe_base);
-
- if (dsaf_dev->sds_base)
- iounmap(dsaf_dev->sds_base);
-
- if (dsaf_dev->sc_base)
- iounmap(dsaf_dev->sc_base);
}
/**
@@ -542,10 +516,10 @@ static void hns_dsafv2_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev)
o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
dsaf_set_field(o_sbm_bp_cfg,
DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_M,
- DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 110);
+ DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 48);
dsaf_set_field(o_sbm_bp_cfg,
DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M,
- DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 160);
+ DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 80);
dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
/* for no enable pfc mode */
@@ -553,29 +527,39 @@ static void hns_dsafv2_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev)
o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
dsaf_set_field(o_sbm_bp_cfg,
DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_M,
- DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_S, 128);
+ DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_S, 192);
dsaf_set_field(o_sbm_bp_cfg,
DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_M,
- DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S, 192);
+ DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S, 240);
dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
}
/* PPE */
- reg = DSAF_SBM_BP_CFG_2_PPE_REG_0_REG + 0x80 * i;
- o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
- dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_SET_BUF_NUM_M,
- DSAFV2_SBM_CFG2_SET_BUF_NUM_S, 10);
- dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_RESET_BUF_NUM_M,
- DSAFV2_SBM_CFG2_RESET_BUF_NUM_S, 12);
- dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
+ for (i = 0; i < DSAFV2_SBM_PPE_CHN; i++) {
+ reg = DSAF_SBM_BP_CFG_2_PPE_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAFV2_SBM_CFG2_PPE_SET_BUF_NUM_M,
+ DSAFV2_SBM_CFG2_PPE_SET_BUF_NUM_S, 2);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAFV2_SBM_CFG2_PPE_RESET_BUF_NUM_M,
+ DSAFV2_SBM_CFG2_PPE_RESET_BUF_NUM_S, 3);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAFV2_SBM_CFG2_PPE_CFG_USEFUL_NUM_M,
+ DSAFV2_SBM_CFG2_PPE_CFG_USEFUL_NUM_S, 52);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
+ }
+
/* RoCEE */
for (i = 0; i < DASFV2_ROCEE_CRD_NUM; i++) {
reg = DSAFV2_SBM_BP_CFG_2_ROCEE_REG_0_REG + 0x80 * i;
o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
- dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_SET_BUF_NUM_M,
- DSAFV2_SBM_CFG2_SET_BUF_NUM_S, 2);
- dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_RESET_BUF_NUM_M,
- DSAFV2_SBM_CFG2_RESET_BUF_NUM_S, 4);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_M,
+ DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_S, 2);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_M,
+ DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_S, 4);
dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
}
}
@@ -886,6 +870,8 @@ static void hns_dsaf_single_line_tbl_cfg(
struct dsaf_device *dsaf_dev,
u32 address, struct dsaf_tbl_line_cfg *ptbl_line)
{
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+
/*Write Addr*/
hns_dsaf_tbl_line_addr_cfg(dsaf_dev, address);
@@ -894,6 +880,8 @@ static void hns_dsaf_single_line_tbl_cfg(
/*Write Plus*/
hns_dsaf_tbl_line_pul(dsaf_dev);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
}
/**
@@ -907,6 +895,8 @@ static void hns_dsaf_tcam_uc_cfg(
struct dsaf_tbl_tcam_data *ptbl_tcam_data,
struct dsaf_tbl_tcam_ucast_cfg *ptbl_tcam_ucast)
{
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+
/*Write Addr*/
hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
/*Write Tcam Data*/
@@ -915,6 +905,8 @@ static void hns_dsaf_tcam_uc_cfg(
hns_dsaf_tbl_tcam_ucast_cfg(dsaf_dev, ptbl_tcam_ucast);
/*Write Plus*/
hns_dsaf_tbl_tcam_data_ucast_pul(dsaf_dev);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
}
/**
@@ -929,6 +921,8 @@ static void hns_dsaf_tcam_mc_cfg(
struct dsaf_tbl_tcam_data *ptbl_tcam_data,
struct dsaf_tbl_tcam_mcast_cfg *ptbl_tcam_mcast)
{
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+
/*Write Addr*/
hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
/*Write Tcam Data*/
@@ -937,6 +931,8 @@ static void hns_dsaf_tcam_mc_cfg(
hns_dsaf_tbl_tcam_mcast_cfg(dsaf_dev, ptbl_tcam_mcast);
/*Write Plus*/
hns_dsaf_tbl_tcam_data_mcast_pul(dsaf_dev);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
}
/**
@@ -946,6 +942,8 @@ static void hns_dsaf_tcam_mc_cfg(
*/
static void hns_dsaf_tcam_mc_invld(struct dsaf_device *dsaf_dev, u32 address)
{
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+
/*Write Addr*/
hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
@@ -958,6 +956,8 @@ static void hns_dsaf_tcam_mc_invld(struct dsaf_device *dsaf_dev, u32 address)
/*Write Plus*/
hns_dsaf_tbl_tcam_mcast_pul(dsaf_dev);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
}
/**
@@ -975,6 +975,8 @@ static void hns_dsaf_tcam_uc_get(
u32 tcam_read_data0;
u32 tcam_read_data4;
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+
/*Write Addr*/
hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
@@ -983,9 +985,9 @@ static void hns_dsaf_tcam_uc_get(
/*read tcam data*/
ptbl_tcam_data->tbl_tcam_data_high
- = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
- ptbl_tcam_data->tbl_tcam_data_low
= dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
+ ptbl_tcam_data->tbl_tcam_data_low
+ = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
/*read tcam mcast*/
tcam_read_data0 = dsaf_read_dev(dsaf_dev,
@@ -1007,6 +1009,8 @@ static void hns_dsaf_tcam_uc_get(
DSAF_TBL_UCAST_CFG1_OUT_PORT_S);
ptbl_tcam_ucast->tbl_ucast_dvc
= dsaf_get_bit(tcam_read_data0, DSAF_TBL_UCAST_CFG1_DVC_S);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
}
/**
@@ -1023,6 +1027,8 @@ static void hns_dsaf_tcam_mc_get(
{
u32 data_tmp;
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+
/*Write Addr*/
hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
@@ -1031,9 +1037,9 @@ static void hns_dsaf_tcam_mc_get(
/*read tcam data*/
ptbl_tcam_data->tbl_tcam_data_high =
- dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
- ptbl_tcam_data->tbl_tcam_data_low =
dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
+ ptbl_tcam_data->tbl_tcam_data_low =
+ dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
/*read tcam mcast*/
ptbl_tcam_mcast->tbl_mcast_port_msk[0] =
@@ -1053,6 +1059,8 @@ static void hns_dsaf_tcam_mc_get(
ptbl_tcam_mcast->tbl_mcast_port_msk[4] =
dsaf_get_field(data_tmp, DSAF_TBL_MCAST_CFG4_VM128_112_M,
DSAF_TBL_MCAST_CFG4_VM128_112_S);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
}
/**
@@ -1114,10 +1122,10 @@ int hns_dsaf_set_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
u32 en)
{
if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
- if (!en)
+ if (!en) {
dev_err(dsaf_dev->dev, "dsafv1 can't close rx_pause!\n");
-
- return -EINVAL;
+ return -EINVAL;
+ }
}
dsaf_set_dev_bit(dsaf_dev, DSAF_PAUSE_CFG_REG + mac_id * 4,
@@ -1377,6 +1385,7 @@ static int hns_dsaf_init(struct dsaf_device *dsaf_dev)
if (HNS_DSAF_IS_DEBUG(dsaf_dev))
return 0;
+ spin_lock_init(&dsaf_dev->tcam_lock);
ret = hns_dsaf_init_hw(dsaf_dev);
if (ret)
return ret;
@@ -2122,11 +2131,24 @@ void hns_dsaf_fix_mac_mode(struct hns_mac_cb *mac_cb)
hns_dsaf_port_work_rate_cfg(dsaf_dev, mac_id, mode);
}
+static u32 hns_dsaf_get_inode_prio_reg(int index)
+{
+ int base_index, offset;
+ u32 base_addr = DSAF_INODE_IN_PRIO_PAUSE_BASE_REG;
+
+ base_index = (index + 1) / DSAF_REG_PER_ZONE;
+ offset = (index + 1) % DSAF_REG_PER_ZONE;
+
+ return base_addr + DSAF_INODE_IN_PRIO_PAUSE_BASE_OFFSET * base_index +
+ DSAF_INODE_IN_PRIO_PAUSE_OFFSET * offset;
+}
+
void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num)
{
struct dsaf_hw_stats *hw_stats
= &dsaf_dev->hw_stats[node_num];
bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver);
+ int i;
u32 reg_tmp;
hw_stats->pad_drop += dsaf_read_dev(dsaf_dev,
@@ -2161,6 +2183,18 @@ void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num)
hw_stats->stp_drop += dsaf_read_dev(dsaf_dev,
DSAF_INODE_IN_DATA_STP_DISC_0_REG + 0x80 * (u64)node_num);
+ /* pfc pause frame statistics stored in dsaf inode*/
+ if ((node_num < DSAF_SERVICE_NW_NUM) && !is_ver1) {
+ for (i = 0; i < DSAF_PRIO_NR; i++) {
+ reg_tmp = hns_dsaf_get_inode_prio_reg(i);
+ hw_stats->rx_pfc[i] += dsaf_read_dev(dsaf_dev,
+ reg_tmp + 0x4 * (u64)node_num);
+ hw_stats->tx_pfc[i] += dsaf_read_dev(dsaf_dev,
+ DSAF_XOD_XGE_PFC_PRIO_CNT_BASE_REG +
+ DSAF_XOD_XGE_PFC_PRIO_CNT_OFFSET * i +
+ 0xF0 * (u64)node_num);
+ }
+ }
hw_stats->tx_pkts += dsaf_read_dev(dsaf_dev,
DSAF_XOD_RCVPKT_CNT_0_REG + 0x90 * (u64)node_num);
}
@@ -2498,38 +2532,53 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
p[i] = 0xdddddddd;
}
-static char *hns_dsaf_get_node_stats_strings(char *data, int node)
+static char *hns_dsaf_get_node_stats_strings(char *data, int node,
+ struct dsaf_device *dsaf_dev)
{
char *buff = data;
+ int i;
+ bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver);
snprintf(buff, ETH_GSTRING_LEN, "innod%d_pad_drop_pkts", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
snprintf(buff, ETH_GSTRING_LEN, "innod%d_manage_pkts", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pkts", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pkt_id", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pause_frame", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
snprintf(buff, ETH_GSTRING_LEN, "innod%d_release_buf_num", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
snprintf(buff, ETH_GSTRING_LEN, "innod%d_sbm_drop_pkts", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
snprintf(buff, ETH_GSTRING_LEN, "innod%d_crc_false_pkts", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
snprintf(buff, ETH_GSTRING_LEN, "innod%d_bp_drop_pkts", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
snprintf(buff, ETH_GSTRING_LEN, "innod%d_lookup_rslt_drop_pkts", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
snprintf(buff, ETH_GSTRING_LEN, "innod%d_local_rslt_fail_pkts", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
snprintf(buff, ETH_GSTRING_LEN, "innod%d_vlan_drop_pkts", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
snprintf(buff, ETH_GSTRING_LEN, "innod%d_stp_drop_pkts", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
+ if (node < DSAF_SERVICE_NW_NUM && !is_ver1) {
+ for (i = 0; i < DSAF_PRIO_NR; i++) {
+ snprintf(buff + 0 * ETH_GSTRING_LEN * DSAF_PRIO_NR,
+ ETH_GSTRING_LEN, "inod%d_pfc_prio%d_pkts",
+ node, i);
+ snprintf(buff + 1 * ETH_GSTRING_LEN * DSAF_PRIO_NR,
+ ETH_GSTRING_LEN, "onod%d_pfc_prio%d_pkts",
+ node, i);
+ buff += ETH_GSTRING_LEN;
+ }
+ buff += 1 * DSAF_PRIO_NR * ETH_GSTRING_LEN;
+ }
snprintf(buff, ETH_GSTRING_LEN, "onnod%d_tx_pkts", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
return buff;
}
@@ -2538,7 +2587,9 @@ static u64 *hns_dsaf_get_node_stats(struct dsaf_device *ddev, u64 *data,
int node_num)
{
u64 *p = data;
+ int i;
struct dsaf_hw_stats *hw_stats = &ddev->hw_stats[node_num];
+ bool is_ver1 = AE_IS_VER1(ddev->dsaf_ver);
p[0] = hw_stats->pad_drop;
p[1] = hw_stats->man_pkts;
@@ -2553,8 +2604,16 @@ static u64 *hns_dsaf_get_node_stats(struct dsaf_device *ddev, u64 *data,
p[10] = hw_stats->local_addr_false;
p[11] = hw_stats->vlan_drop;
p[12] = hw_stats->stp_drop;
- p[13] = hw_stats->tx_pkts;
+ if (node_num < DSAF_SERVICE_NW_NUM && !is_ver1) {
+ for (i = 0; i < DSAF_PRIO_NR; i++) {
+ p[13 + i + 0 * DSAF_PRIO_NR] = hw_stats->rx_pfc[i];
+ p[13 + i + 1 * DSAF_PRIO_NR] = hw_stats->tx_pfc[i];
+ }
+ p[29] = hw_stats->tx_pkts;
+ return &p[30];
+ }
+ p[13] = hw_stats->tx_pkts;
return &p[14];
}
@@ -2582,11 +2641,16 @@ void hns_dsaf_get_stats(struct dsaf_device *ddev, u64 *data, int port)
*@stringset: type of values in data
*return dsaf string name count
*/
-int hns_dsaf_get_sset_count(int stringset)
+int hns_dsaf_get_sset_count(struct dsaf_device *dsaf_dev, int stringset)
{
- if (stringset == ETH_SS_STATS)
- return DSAF_STATIC_NUM;
+ bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver);
+ if (stringset == ETH_SS_STATS) {
+ if (is_ver1)
+ return DSAF_STATIC_NUM;
+ else
+ return DSAF_V2_STATIC_NUM;
+ }
return 0;
}
@@ -2596,7 +2660,8 @@ int hns_dsaf_get_sset_count(int stringset)
*@data:strings name value
*@port:port index
*/
-void hns_dsaf_get_strings(int stringset, u8 *data, int port)
+void hns_dsaf_get_strings(int stringset, u8 *data, int port,
+ struct dsaf_device *dsaf_dev)
{
char *buff = (char *)data;
int node = port;
@@ -2605,11 +2670,11 @@ void hns_dsaf_get_strings(int stringset, u8 *data, int port)
return;
/* for ge/xge node info */
- buff = hns_dsaf_get_node_stats_strings(buff, node);
+ buff = hns_dsaf_get_node_stats_strings(buff, node, dsaf_dev);
/* for ppe node info */
node = port + DSAF_PPE_INODE_BASE;
- (void)hns_dsaf_get_node_stats_strings(buff, node);
+ (void)hns_dsaf_get_node_stats_strings(buff, node, dsaf_dev);
}
/**
@@ -2645,7 +2710,7 @@ static int hns_dsaf_probe(struct platform_device *pdev)
ret = hns_dsaf_init(dsaf_dev);
if (ret)
- goto free_cfg;
+ goto free_dev;
ret = hns_mac_init(dsaf_dev);
if (ret)
@@ -2670,9 +2735,6 @@ uninit_mac:
uninit_dsaf:
hns_dsaf_free(dsaf_dev);
-free_cfg:
- hns_dsaf_free_cfg(dsaf_dev);
-
free_dev:
hns_dsaf_free_dev(dsaf_dev);
@@ -2695,8 +2757,6 @@ static int hns_dsaf_remove(struct platform_device *pdev)
hns_dsaf_free(dsaf_dev);
- hns_dsaf_free_cfg(dsaf_dev);
-
hns_dsaf_free_dev(dsaf_dev);
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index 2e55b3cc4515..1daf018d9071 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -39,6 +39,9 @@ struct hns_mac_cb;
#define DSAF_DUMP_REGS_NUM 504
#define DSAF_STATIC_NUM 28
+#define DSAF_V2_STATIC_NUM 44
+#define DSAF_PRIO_NR 8
+#define DSAF_REG_PER_ZONE 3
#define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
#define HNS_DSAF_IS_DEBUG(dev) (dev->dsaf_mode == DSAF_MODE_DISABLE_SP)
@@ -176,6 +179,8 @@ struct dsaf_hw_stats {
u64 local_addr_false;
u64 vlan_drop;
u64 stp_drop;
+ u64 rx_pfc[DSAF_PRIO_NR];
+ u64 tx_pfc[DSAF_PRIO_NR];
u64 tx_pkts;
};
@@ -317,6 +322,8 @@ struct dsaf_device {
struct dsaf_hw_stats hw_stats[DSAF_NODE_NUM];
struct dsaf_int_stat int_stat;
+ /* make sure tcam table config spinlock */
+ spinlock_t tcam_lock;
};
static inline void *hns_dsaf_dev_priv(const struct dsaf_device *dsaf_dev)
@@ -417,9 +424,10 @@ void hns_dsaf_ae_uninit(struct dsaf_device *dsaf_dev);
void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 inode_num);
-int hns_dsaf_get_sset_count(int stringset);
+int hns_dsaf_get_sset_count(struct dsaf_device *dsaf_dev, int stringset);
void hns_dsaf_get_stats(struct dsaf_device *ddev, u64 *data, int port);
-void hns_dsaf_get_strings(int stringset, u8 *data, int port);
+void hns_dsaf_get_strings(int stringset, u8 *data, int port,
+ struct dsaf_device *dsaf_dev);
void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data);
int hns_dsaf_get_regs_count(void);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 96cb628a7c5f..611b67b6f450 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -86,9 +86,10 @@ static void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
mac_cb->cpld_led_value = value;
}
} else {
- dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
- CPLD_LED_DEFAULT_VALUE);
- mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE;
+ value = (mac_cb->cpld_led_value) & (0x1 << DSAF_LED_ANCHOR_B);
+ dsaf_write_syscon(mac_cb->cpld_ctrl,
+ mac_cb->cpld_ctrl_reg, value);
+ mac_cb->cpld_led_value = value;
}
}
@@ -114,7 +115,7 @@ static int cpld_set_led_id(struct hns_mac_cb *mac_cb,
CPLD_LED_ON_VALUE);
dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
mac_cb->cpld_led_value);
- return 2;
+ break;
case HNAE_LED_INACTIVE:
dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
CPLD_LED_DEFAULT_VALUE);
@@ -122,7 +123,8 @@ static int cpld_set_led_id(struct hns_mac_cb *mac_cb,
mac_cb->cpld_led_value);
break;
default:
- break;
+ dev_err(mac_cb->dev, "invalid led state: %d!", status);
+ return -EINVAL;
}
return 0;
@@ -251,10 +253,9 @@ static void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
reg_val_1 = 0x1 << port;
port_rst_off = dsaf_dev->mac_cb[port]->port_rst_off;
/* there is difference between V1 and V2 in register.*/
- if (AE_IS_VER1(dsaf_dev->dsaf_ver))
- reg_val_2 = 0x1041041 << port_rst_off;
- else
- reg_val_2 = 0x2082082 << port_rst_off;
+ reg_val_2 = AE_IS_VER1(dsaf_dev->dsaf_ver) ?
+ 0x1041041 : 0x2082082;
+ reg_val_2 <<= port_rst_off;
if (!dereset) {
dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ1_REG,
@@ -270,8 +271,11 @@ static void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
reg_val_1);
}
} else {
- reg_val_1 = 0x15540 << dsaf_dev->reset_offset;
- reg_val_2 = 0x100 << dsaf_dev->reset_offset;
+ reg_val_1 = 0x15540;
+ reg_val_2 = AE_IS_VER1(dsaf_dev->dsaf_ver) ? 0x100 : 0x40;
+
+ reg_val_1 <<= dsaf_dev->reset_offset;
+ reg_val_2 <<= dsaf_dev->reset_offset;
if (!dereset) {
dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ1_REG,
@@ -431,11 +435,6 @@ int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
*/
static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
{
- /* port 0-3 hilink4 base is serdes_vaddr + 0x00280000
- * port 4-7 hilink3 base is serdes_vaddr + 0x00200000
- */
- u8 *base_addr = (u8 *)mac_cb->serdes_vaddr +
- (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000);
const u8 lane_id[] = {
0, /* mac 0 -> lane 0 */
1, /* mac 1 -> lane 1 */
@@ -461,11 +460,30 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
}
if (mac_cb->serdes_ctrl) {
- u32 origin = dsaf_read_syscon(mac_cb->serdes_ctrl, reg_offset);
+ u32 origin;
+
+ if (!AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver)) {
+#define HILINK_ACCESS_SEL_CFG 0x40008
+ /* hilink4 & hilink3 use the same xge training and
+ * xge u adaptor. There is a hilink access sel cfg
+ * register to select which one to be configed
+ */
+ if ((!HNS_DSAF_IS_DEBUG(mac_cb->dsaf_dev)) &&
+ (mac_cb->mac_id <= 3))
+ dsaf_write_syscon(mac_cb->serdes_ctrl,
+ HILINK_ACCESS_SEL_CFG, 0);
+ else
+ dsaf_write_syscon(mac_cb->serdes_ctrl,
+ HILINK_ACCESS_SEL_CFG, 3);
+ }
+
+ origin = dsaf_read_syscon(mac_cb->serdes_ctrl, reg_offset);
dsaf_set_field(origin, 1ull << 10, 10, en);
dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin);
} else {
+ u8 *base_addr = (u8 *)mac_cb->serdes_vaddr +
+ (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000);
dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, en);
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 3ce24097fcf4..ef1107777c08 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -540,7 +540,7 @@ int hns_rcb_set_coalesce_usecs(
}
if (timeout > HNS_RCB_MAX_COALESCED_USECS) {
dev_err(rcb_common->dsaf_dev->dev,
- "error: not support coalesce %dus!\n", timeout);
+ "error: coalesce_usecs setting supports 0~1023us\n");
return -EINVAL;
}
hns_rcb_set_port_timeout(rcb_common, port_idx, timeout);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
index bd54dac82ee0..99b4e1ba0a94 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
@@ -40,7 +40,7 @@ struct rcb_common_cb;
#define HNS_RCB_DEF_COALESCED_FRAMES 50
#define HNS_RCB_CLK_FREQ_MHZ 350
#define HNS_RCB_MAX_COALESCED_USECS 0x3ff
-#define HNS_RCB_DEF_COALESCED_USECS 3
+#define HNS_RCB_DEF_COALESCED_USECS 50
#define HNS_RCB_COMMON_ENDIAN 1
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index 7c3b5103d151..235f74444b1d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -32,7 +32,7 @@
#define DSAFV2_SBM_NUM 8
#define DSAFV2_SBM_XGE_CHN 6
#define DSAFV2_SBM_PPE_CHN 1
-#define DASFV2_ROCEE_CRD_NUM 8
+#define DASFV2_ROCEE_CRD_NUM 1
#define DSAF_VOQ_NUM DSAF_NODE_NUM
#define DSAF_INODE_NUM DSAF_NODE_NUM
@@ -166,6 +166,9 @@
#define DSAF_INODE_GE_FC_EN_0_REG 0x1B00
#define DSAF_INODE_VC0_IN_PKT_NUM_0_REG 0x1B50
#define DSAF_INODE_VC1_IN_PKT_NUM_0_REG 0x1C00
+#define DSAF_INODE_IN_PRIO_PAUSE_BASE_REG 0x1C00
+#define DSAF_INODE_IN_PRIO_PAUSE_BASE_OFFSET 0x100
+#define DSAF_INODE_IN_PRIO_PAUSE_OFFSET 0x50
#define DSAF_SBM_CFG_REG_0_REG 0x2000
#define DSAF_SBM_BP_CFG_0_XGE_REG_0_REG 0x2004
@@ -175,7 +178,7 @@
#define DSAF_SBM_BP_CFG_2_XGE_REG_0_REG 0x200C
#define DSAF_SBM_BP_CFG_2_PPE_REG_0_REG 0x230C
#define DSAF_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x260C
-#define DSAFV2_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x238C
+#define DSAFV2_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x238C
#define DSAF_SBM_FREE_CNT_0_0_REG 0x2010
#define DSAF_SBM_FREE_CNT_1_0_REG 0x2014
#define DSAF_SBM_BP_CNT_0_0_REG 0x2018
@@ -232,6 +235,8 @@
#define DSAF_XOD_ROCEE_RCVIN0_CNT_0_REG 0x3074
#define DSAF_XOD_ROCEE_RCVIN1_CNT_0_REG 0x3078
#define DSAF_XOD_FIFO_STATUS_0_REG 0x307C
+#define DSAF_XOD_XGE_PFC_PRIO_CNT_BASE_REG 0x3A00
+#define DSAF_XOD_XGE_PFC_PRIO_CNT_OFFSET 0x4
#define DSAF_VOQ_ECC_INVERT_EN_0_REG 0x4004
#define DSAF_VOQ_SRAM_PKT_NUM_0_REG 0x4008
@@ -791,6 +796,18 @@
#define DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S 9
#define DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_M (((1ULL << 9) - 1) << 9)
+#define DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_S 0
+#define DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_M (((1ULL << 8) - 1) << 0)
+#define DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_S 8
+#define DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_M (((1ULL << 8) - 1) << 8)
+
+#define DSAFV2_SBM_CFG2_PPE_SET_BUF_NUM_S (0)
+#define DSAFV2_SBM_CFG2_PPE_SET_BUF_NUM_M (((1ULL << 6) - 1) << 0)
+#define DSAFV2_SBM_CFG2_PPE_RESET_BUF_NUM_S (6)
+#define DSAFV2_SBM_CFG2_PPE_RESET_BUF_NUM_M (((1ULL << 6) - 1) << 6)
+#define DSAFV2_SBM_CFG2_PPE_CFG_USEFUL_NUM_S (12)
+#define DSAFV2_SBM_CFG2_PPE_CFG_USEFUL_NUM_M (((1ULL << 6) - 1) << 12)
+
#define DSAF_TBL_TCAM_ADDR_S 0
#define DSAF_TBL_TCAM_ADDR_M ((1ULL << 9) - 1)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index ad742a6f5879..d7e1f8c7ae92 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -600,6 +600,7 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
ring->stats.sw_err_cnt++;
return -ENOMEM;
}
+ skb_reset_mac_header(skb);
prefetchw(skb->data);
length = le16_to_cpu(desc->rx.pkt_len);
@@ -761,16 +762,16 @@ static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data,
recv_pkts = 0, recv_bds = 0, clean_count = 0;
recv:
while (recv_pkts < budget && recv_bds < num) {
- /* reuse or realloc buffers*/
+ /* reuse or realloc buffers */
if (clean_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
hns_nic_alloc_rx_buffers(ring_data, clean_count);
clean_count = 0;
}
- /* poll one pkg*/
+ /* poll one pkt */
err = hns_nic_poll_rx_skb(ring_data, &skb, &bnum);
if (unlikely(!skb)) /* this fault cannot be repaired */
- break;
+ goto out;
recv_bds += bnum;
clean_count += bnum;
@@ -796,6 +797,7 @@ recv:
}
}
+out:
/* make all data has been write before submit */
if (clean_count > 0)
hns_nic_alloc_rx_buffers(ring_data, clean_count);
@@ -990,8 +992,26 @@ static void hns_nic_adjust_link(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h = priv->ae_handle;
+ int state = 1;
+
+ if (priv->phy) {
+ h->dev->ops->adjust_link(h, ndev->phydev->speed,
+ ndev->phydev->duplex);
+ state = priv->phy->link;
+ }
+ state = state && h->dev->ops->get_status(h);
- h->dev->ops->adjust_link(h, ndev->phydev->speed, ndev->phydev->duplex);
+ if (state != priv->link) {
+ if (state) {
+ netif_carrier_on(ndev);
+ netif_tx_wake_all_queues(ndev);
+ netdev_info(ndev, "link up\n");
+ } else {
+ netif_carrier_off(ndev);
+ netdev_info(ndev, "link down\n");
+ }
+ priv->link = state;
+ }
}
/**
@@ -1181,7 +1201,7 @@ static int hns_nic_net_up(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h = priv->ae_handle;
- int i, j, k;
+ int i, j;
int ret;
ret = hns_nic_init_irq(priv);
@@ -1196,9 +1216,6 @@ static int hns_nic_net_up(struct net_device *ndev)
goto out_has_some_queues;
}
- for (k = 0; k < h->q_num; k++)
- h->dev->ops->toggle_queue_status(h->qs[k], 1);
-
ret = h->dev->ops->set_mac_addr(h, ndev->dev_addr);
if (ret)
goto out_set_mac_addr_err;
@@ -1218,8 +1235,6 @@ static int hns_nic_net_up(struct net_device *ndev)
out_start_err:
netif_stop_queue(ndev);
out_set_mac_addr_err:
- for (k = 0; k < h->q_num; k++)
- h->dev->ops->toggle_queue_status(h->qs[k], 0);
out_has_some_queues:
for (j = i - 1; j >= 0; j--)
hns_nic_ring_close(ndev, j);
@@ -1426,7 +1441,6 @@ static int hns_nic_set_features(struct net_device *netdev,
netdev_features_t features)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
- struct hnae_handle *h = priv->ae_handle;
switch (priv->enet_ver) {
case AE_VERSION_1:
@@ -1439,11 +1453,9 @@ static int hns_nic_set_features(struct net_device *netdev,
priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
/* The chip only support 7*4096 */
netif_set_gso_max_size(netdev, 7 * 4096);
- h->dev->ops->set_tso_stats(h, 1);
} else {
priv->ops.fill_desc = fill_v2_desc;
priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
- h->dev->ops->set_tso_stats(h, 0);
}
break;
}
@@ -1576,27 +1588,14 @@ static void hns_nic_update_link_status(struct net_device *netdev)
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_handle *h = priv->ae_handle;
- int state = 1;
- if (priv->phy) {
- if (!genphy_update_link(priv->phy))
- state = priv->phy->link;
- else
- state = 0;
- }
- state = state && h->dev->ops->get_status(h);
+ if (h->phy_dev) {
+ if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
+ return;
- if (state != priv->link) {
- if (state) {
- netif_carrier_on(netdev);
- netif_tx_wake_all_queues(netdev);
- netdev_info(netdev, "link up\n");
- } else {
- netif_carrier_off(netdev);
- netdev_info(netdev, "link down\n");
- }
- priv->link = state;
+ (void)genphy_read_status(h->phy_dev);
}
+ hns_nic_adjust_link(netdev);
}
/* for dumping key regs*/
@@ -1632,7 +1631,7 @@ static void hns_nic_dump(struct hns_nic_priv *priv)
}
}
-/* for resetting suntask*/
+/* for resetting subtask */
static void hns_nic_reset_subtask(struct hns_nic_priv *priv)
{
enum hnae_port_type type = priv->ae_handle->port_type;
@@ -1802,11 +1801,14 @@ static void hns_nic_set_priv_ops(struct net_device *netdev)
priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
/* This chip only support 7*4096 */
netif_set_gso_max_size(netdev, 7 * 4096);
- h->dev->ops->set_tso_stats(h, 1);
} else {
priv->ops.fill_desc = fill_v2_desc;
priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
}
+ /* enable tso when init
+ * control tso on/off through TSE bit in bd
+ */
+ h->dev->ops->set_tso_stats(h, 1);
}
}
@@ -1971,7 +1973,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
dev_dbg(dev, "set mask to 64bit\n");
else
- dev_err(dev, "set mask to 32bit fail!\n");
+ dev_err(dev, "set mask to 64bit fail!\n");
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(ndev);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index a809f52f6c16..ab33487a5321 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -49,7 +49,7 @@ static u32 hns_nic_get_link(struct net_device *net_dev)
h = priv->ae_handle;
if (priv->phy) {
- if (!genphy_update_link(priv->phy))
+ if (!genphy_read_status(priv->phy))
link_stat = priv->phy->link;
else
link_stat = 0;
@@ -165,13 +165,21 @@ static int hns_nic_get_settings(struct net_device *net_dev,
cmd->advertising |= ADVERTISED_10000baseKR_Full;
}
- if (h->port_type == HNAE_PORT_SERVICE) {
+ switch (h->media_type) {
+ case HNAE_MEDIA_TYPE_FIBER:
cmd->port = PORT_FIBRE;
- cmd->supported |= SUPPORTED_Pause;
- } else {
+ break;
+ case HNAE_MEDIA_TYPE_COPPER:
cmd->port = PORT_TP;
+ break;
+ case HNAE_MEDIA_TYPE_UNKNOWN:
+ default:
+ break;
}
+ if (!(AE_IS_VER1(priv->enet_ver) && h->port_type == HNAE_PORT_DEBUG))
+ cmd->supported |= SUPPORTED_Pause;
+
cmd->transceiver = XCVR_EXTERNAL;
cmd->mdio_support = (ETH_MDIO_SUPPORTS_C45 | ETH_MDIO_SUPPORTS_C22);
hns_get_mdix_mode(net_dev, cmd);
@@ -242,6 +250,7 @@ static const char hns_nic_test_strs[][ETH_GSTRING_LEN] = {
static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
{
#define COPPER_CONTROL_REG 0
+#define PHY_POWER_DOWN BIT(11)
#define PHY_LOOP_BACK BIT(14)
u16 val = 0;
@@ -252,33 +261,40 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
/* speed : 1000M */
phy_write(phy_dev, HNS_PHY_PAGE_REG, 2);
phy_write(phy_dev, 21, 0x1046);
+
+ phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
/* Force Master */
phy_write(phy_dev, 9, 0x1F00);
+
/* Soft-reset */
phy_write(phy_dev, 0, 0x9140);
/* If autoneg disabled,two soft-reset operations */
phy_write(phy_dev, 0, 0x9140);
- phy_write(phy_dev, 22, 0xFA);
+
+ phy_write(phy_dev, HNS_PHY_PAGE_REG, 0xFA);
/* Default is 0x0400 */
phy_write(phy_dev, 1, 0x418);
/* Force 1000M Link, Default is 0x0200 */
phy_write(phy_dev, 7, 0x20C);
- phy_write(phy_dev, 22, 0);
+ phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
- /* Enable MAC loop-back */
+ /* Enable PHY loop-back */
val = phy_read(phy_dev, COPPER_CONTROL_REG);
val |= PHY_LOOP_BACK;
+ val &= ~PHY_POWER_DOWN;
phy_write(phy_dev, COPPER_CONTROL_REG, val);
} else {
- phy_write(phy_dev, 22, 0xFA);
+ phy_write(phy_dev, HNS_PHY_PAGE_REG, 0xFA);
phy_write(phy_dev, 1, 0x400);
phy_write(phy_dev, 7, 0x200);
- phy_write(phy_dev, 22, 0);
+ phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
+ phy_write(phy_dev, 9, 0xF00);
val = phy_read(phy_dev, COPPER_CONTROL_REG);
val &= ~PHY_LOOP_BACK;
+ val |= PHY_POWER_DOWN;
phy_write(phy_dev, COPPER_CONTROL_REG, val);
}
return 0;
@@ -339,28 +355,16 @@ static int __lb_up(struct net_device *ndev,
hns_nic_net_reset(ndev);
- if (priv->phy) {
- phy_disconnect(priv->phy);
- msleep(100);
-
- ret = hns_nic_init_phy(ndev, h);
- if (ret)
- return ret;
- }
-
ret = __lb_setup(ndev, loop_mode);
if (ret)
return ret;
- msleep(100);
+ msleep(200);
ret = h->dev->ops->start ? h->dev->ops->start(h) : 0;
if (ret)
return ret;
- if (priv->phy)
- phy_start(priv->phy);
-
/* link adjust duplex*/
if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII)
speed = 1000;
@@ -561,9 +565,6 @@ static int __lb_down(struct net_device *ndev)
__func__,
ret);
- if (priv->phy)
- phy_stop(priv->phy);
-
if (h->dev->ops->stop)
h->dev->ops->stop(h);
@@ -758,6 +759,16 @@ static int hns_get_coalesce(struct net_device *net_dev,
&ec->tx_max_coalesced_frames,
&ec->rx_max_coalesced_frames);
+ ops->get_coalesce_range(priv->ae_handle,
+ &ec->tx_max_coalesced_frames_low,
+ &ec->rx_max_coalesced_frames_low,
+ &ec->tx_max_coalesced_frames_high,
+ &ec->rx_max_coalesced_frames_high,
+ &ec->tx_coalesce_usecs_low,
+ &ec->rx_coalesce_usecs_low,
+ &ec->tx_coalesce_usecs_high,
+ &ec->rx_coalesce_usecs_high);
+
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 761a32fceceb..33f4c483af0f 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -37,9 +37,19 @@
#define MDIO_TIMEOUT 1000000
+struct hns_mdio_sc_reg {
+ u16 mdio_clk_en;
+ u16 mdio_clk_dis;
+ u16 mdio_reset_req;
+ u16 mdio_reset_dreq;
+ u16 mdio_clk_st;
+ u16 mdio_reset_st;
+};
+
struct hns_mdio_device {
void *vbase; /* mdio reg base address */
struct regmap *subctrl_vbase;
+ struct hns_mdio_sc_reg sc_reg;
};
/* mdio reg */
@@ -93,7 +103,6 @@ enum mdio_c45_op_seq {
#define MDIO_SC_CLK_DIS 0x33C
#define MDIO_SC_RESET_REQ 0xA38
#define MDIO_SC_RESET_DREQ 0xA3C
-#define MDIO_SC_CTRL 0x2010
#define MDIO_SC_CLK_ST 0x531C
#define MDIO_SC_RESET_ST 0x5A1C
@@ -353,6 +362,7 @@ static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
static int hns_mdio_reset(struct mii_bus *bus)
{
struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
+ const struct hns_mdio_sc_reg *sc_reg;
int ret;
if (dev_of_node(bus->parent)) {
@@ -361,9 +371,10 @@ static int hns_mdio_reset(struct mii_bus *bus)
return -ENODEV;
}
+ sc_reg = &mdio_dev->sc_reg;
/* 1. reset req, and read reset st check */
- ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_RESET_REQ, 0x1,
- MDIO_SC_RESET_ST, 0x1,
+ ret = mdio_sc_cfg_reg_write(mdio_dev, sc_reg->mdio_reset_req,
+ 0x1, sc_reg->mdio_reset_st, 0x1,
MDIO_CHECK_SET_ST);
if (ret) {
dev_err(&bus->dev, "MDIO reset fail\n");
@@ -371,8 +382,8 @@ static int hns_mdio_reset(struct mii_bus *bus)
}
/* 2. dis clk, and read clk st check */
- ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_CLK_DIS,
- 0x1, MDIO_SC_CLK_ST, 0x1,
+ ret = mdio_sc_cfg_reg_write(mdio_dev, sc_reg->mdio_clk_dis,
+ 0x1, sc_reg->mdio_clk_st, 0x1,
MDIO_CHECK_CLR_ST);
if (ret) {
dev_err(&bus->dev, "MDIO dis clk fail\n");
@@ -380,8 +391,8 @@ static int hns_mdio_reset(struct mii_bus *bus)
}
/* 3. reset dreq, and read reset st check */
- ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_RESET_DREQ, 0x1,
- MDIO_SC_RESET_ST, 0x1,
+ ret = mdio_sc_cfg_reg_write(mdio_dev, sc_reg->mdio_reset_dreq,
+ 0x1, sc_reg->mdio_reset_st, 0x1,
MDIO_CHECK_CLR_ST);
if (ret) {
dev_err(&bus->dev, "MDIO dis clk fail\n");
@@ -389,8 +400,8 @@ static int hns_mdio_reset(struct mii_bus *bus)
}
/* 4. en clk, and read clk st check */
- ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_CLK_EN,
- 0x1, MDIO_SC_CLK_ST, 0x1,
+ ret = mdio_sc_cfg_reg_write(mdio_dev, sc_reg->mdio_clk_en,
+ 0x1, sc_reg->mdio_clk_st, 0x1,
MDIO_CHECK_SET_ST);
if (ret)
dev_err(&bus->dev, "MDIO en clk fail\n");
@@ -458,13 +469,54 @@ static int hns_mdio_probe(struct platform_device *pdev)
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%s", "Mii",
dev_name(&pdev->dev));
if (dev_of_node(&pdev->dev)) {
- mdio_dev->subctrl_vbase = syscon_node_to_regmap(
- of_parse_phandle(pdev->dev.of_node,
- "subctrl-vbase", 0));
- if (IS_ERR(mdio_dev->subctrl_vbase)) {
- dev_warn(&pdev->dev, "no syscon hisilicon,peri-c-subctrl\n");
+ struct of_phandle_args reg_args;
+
+ ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
+ "subctrl-vbase",
+ 4,
+ 0,
+ &reg_args);
+ if (!ret) {
+ mdio_dev->subctrl_vbase =
+ syscon_node_to_regmap(reg_args.np);
+ if (IS_ERR(mdio_dev->subctrl_vbase)) {
+ dev_warn(&pdev->dev, "syscon_node_to_regmap error\n");
+ mdio_dev->subctrl_vbase = NULL;
+ } else {
+ if (reg_args.args_count == 4) {
+ mdio_dev->sc_reg.mdio_clk_en =
+ (u16)reg_args.args[0];
+ mdio_dev->sc_reg.mdio_clk_dis =
+ (u16)reg_args.args[0] + 4;
+ mdio_dev->sc_reg.mdio_reset_req =
+ (u16)reg_args.args[1];
+ mdio_dev->sc_reg.mdio_reset_dreq =
+ (u16)reg_args.args[1] + 4;
+ mdio_dev->sc_reg.mdio_clk_st =
+ (u16)reg_args.args[2];
+ mdio_dev->sc_reg.mdio_reset_st =
+ (u16)reg_args.args[3];
+ } else {
+ /* for compatible */
+ mdio_dev->sc_reg.mdio_clk_en =
+ MDIO_SC_CLK_EN;
+ mdio_dev->sc_reg.mdio_clk_dis =
+ MDIO_SC_CLK_DIS;
+ mdio_dev->sc_reg.mdio_reset_req =
+ MDIO_SC_RESET_REQ;
+ mdio_dev->sc_reg.mdio_reset_dreq =
+ MDIO_SC_RESET_DREQ;
+ mdio_dev->sc_reg.mdio_clk_st =
+ MDIO_SC_CLK_ST;
+ mdio_dev->sc_reg.mdio_reset_st =
+ MDIO_SC_RESET_ST;
+ }
+ }
+ } else {
+ dev_warn(&pdev->dev, "find syscon ret = %#x\n", ret);
mdio_dev->subctrl_vbase = NULL;
}
+
ret = of_mdiobus_register(new_bus, pdev->dev.of_node);
} else if (is_acpi_node(pdev->dev.fwnode)) {
/* Clear all the IRQ properties */
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 864cb21351a4..ecdb6854a898 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2121,7 +2121,7 @@ static void handle_error_info_rsp(union ibmvnic_crq *crq,
struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
- struct ibmvnic_error_buff *error_buff;
+ struct ibmvnic_error_buff *error_buff, *tmp;
unsigned long flags;
bool found = false;
int i;
@@ -2133,7 +2133,7 @@ static void handle_error_info_rsp(union ibmvnic_crq *crq,
}
spin_lock_irqsave(&adapter->error_list_lock, flags);
- list_for_each_entry(error_buff, &adapter->errors, list)
+ list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
if (error_buff->error_id == crq->request_error_rsp.error_id) {
found = true;
list_del(&error_buff->list);
@@ -3141,14 +3141,14 @@ static void handle_request_ras_comp_num_rsp(union ibmvnic_crq *crq,
static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
{
- struct ibmvnic_inflight_cmd *inflight_cmd;
+ struct ibmvnic_inflight_cmd *inflight_cmd, *tmp1;
struct device *dev = &adapter->vdev->dev;
- struct ibmvnic_error_buff *error_buff;
+ struct ibmvnic_error_buff *error_buff, *tmp2;
unsigned long flags;
unsigned long flags2;
spin_lock_irqsave(&adapter->inflight_lock, flags);
- list_for_each_entry(inflight_cmd, &adapter->inflight, list) {
+ list_for_each_entry_safe(inflight_cmd, tmp1, &adapter->inflight, list) {
switch (inflight_cmd->crq.generic.cmd) {
case LOGIN:
dma_unmap_single(dev, adapter->login_buf_token,
@@ -3165,8 +3165,8 @@ static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
break;
case REQUEST_ERROR_INFO:
spin_lock_irqsave(&adapter->error_list_lock, flags2);
- list_for_each_entry(error_buff, &adapter->errors,
- list) {
+ list_for_each_entry_safe(error_buff, tmp2,
+ &adapter->errors, list) {
dma_unmap_single(dev, error_buff->dma,
error_buff->len,
DMA_FROM_DEVICE);
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 714bd1014ddb..c0e17433f623 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -167,17 +167,6 @@ config IXGBE
To compile this driver as a module, choose M here. The module
will be called ixgbe.
-config IXGBE_VXLAN
- bool "Virtual eXtensible Local Area Network Support"
- default n
- depends on IXGBE && VXLAN && !(IXGBE=y && VXLAN=m)
- ---help---
- This allows one to create VXLAN virtual interfaces that provide
- Layer 2 Networks over Layer 3 Networks. VXLAN is often used
- to tunnel virtual network infrastructure in virtualized environments.
- Say Y here if you want to use Virtual eXtensible Local Area Network
- (VXLAN) in the driver.
-
config IXGBE_HWMON
bool "Intel(R) 10GbE PCI Express adapters HWMON support"
default y
@@ -236,27 +225,6 @@ config I40E
To compile this driver as a module, choose M here. The module
will be called i40e.
-config I40E_VXLAN
- bool "Virtual eXtensible Local Area Network Support"
- default n
- depends on I40E && VXLAN && !(I40E=y && VXLAN=m)
- ---help---
- This allows one to create VXLAN virtual interfaces that provide
- Layer 2 Networks over Layer 3 Networks. VXLAN is often used
- to tunnel virtual network infrastructure in virtualized environments.
- Say Y here if you want to use Virtual eXtensible Local Area Network
- (VXLAN) in the driver.
-
-config I40E_GENEVE
- bool "Generic Network Virtualization Encapsulation (GENEVE) Support"
- depends on I40E && GENEVE && !(I40E=y && GENEVE=m)
- default n
- ---help---
- This allows one to create GENEVE virtual interfaces that provide
- Layer 2 Networks over Layer 3 Networks. GENEVE is often used
- to tunnel virtual network infrastructure in virtualized environments.
- Say Y here if you want to use GENEVE in the driver.
-
config I40E_DCB
bool "Data Center Bridging (DCB) Support"
default n
@@ -307,15 +275,4 @@ config FM10K
To compile this driver as a module, choose M here. The module
will be called fm10k. MSI-X interrupt support is required
-config FM10K_VXLAN
- bool "Virtual eXtensible Local Area Network Support"
- default n
- depends on FM10K && VXLAN && !(FM10K=y && VXLAN=m)
- ---help---
- This allows one to create VXLAN virtual interfaces that provide
- Layer 2 Networks over Layer 3 Networks. VXLAN is often used
- to tunnel virtual network infrastructure in virtualized environments.
- Say Y here if you want to use Virtual eXtensible Local Area Network
- (VXLAN) in the driver.
-
endif # NET_VENDOR_INTEL
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 75e60897b7e7..8294c9a10bd2 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -154,6 +154,16 @@ void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
writel(val, hw->hw_addr + reg);
}
+static bool e1000e_vlan_used(struct e1000_adapter *adapter)
+{
+ u16 vid;
+
+ for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
+ return true;
+
+ return false;
+}
+
/**
* e1000_regdump - register printout routine
* @hw: pointer to the HW structure
@@ -2789,7 +2799,7 @@ static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter)
}
/**
- * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping
+ * e1000e_vlan_strip_disable - helper to disable HW VLAN stripping
* @adapter: board private structure to initialize
**/
static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter)
@@ -3443,7 +3453,8 @@ static void e1000e_set_rx_mode(struct net_device *netdev)
ew32(RCTL, rctl);
- if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX ||
+ e1000e_vlan_used(adapter))
e1000e_vlan_strip_enable(adapter);
else
e1000e_vlan_strip_disable(adapter);
@@ -4352,7 +4363,8 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
time_delta = systim_next - systim;
temp = time_delta;
- rem = do_div(temp, incvalue);
+ /* VMWare users have seen incvalue of zero, don't div / 0 */
+ rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0);
systim = systim_next;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index fcf106e545c5..e98b86bf0ca1 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -406,7 +406,7 @@ static inline u16 fm10k_desc_unused(struct fm10k_ring *ring)
(&(((union fm10k_rx_desc *)((R)->desc))[i]))
#define FM10K_MAX_TXD_PWR 14
-#define FM10K_MAX_DATA_PER_TXD BIT(FM10K_MAX_TXD_PWR)
+#define FM10K_MAX_DATA_PER_TXD (1u << FM10K_MAX_TXD_PWR)
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), FM10K_MAX_DATA_PER_TXD)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 9c0d87503977..9b5195435c87 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -983,9 +983,10 @@ void fm10k_write_reta(struct fm10k_intfc *interface, const u32 *indir)
/* generate a new table if we weren't given one */
for (j = 0; j < 4; j++) {
if (indir)
- n = indir[i + j];
+ n = indir[4 * i + j];
else
- n = ethtool_rxfh_indir_default(i + j, rss_i);
+ n = ethtool_rxfh_indir_default(4 * i + j,
+ rss_i);
table[j] = n;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 0e166e9c90c8..a9ccc1eb3ea4 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -56,7 +56,7 @@ static int __init fm10k_init_module(void)
pr_info("%s\n", fm10k_copyright);
/* create driver workqueue */
- fm10k_workqueue = create_workqueue("fm10k");
+ fm10k_workqueue = alloc_workqueue("fm10k", WQ_MEM_RECLAIM, 0);
fm10k_dbg_init();
@@ -77,7 +77,6 @@ static void __exit fm10k_exit_module(void)
fm10k_dbg_exit();
/* destroy driver workqueue */
- flush_workqueue(fm10k_workqueue);
destroy_workqueue(fm10k_workqueue);
}
module_exit(fm10k_exit_module);
@@ -272,7 +271,7 @@ static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer,
#if (PAGE_SIZE < 8192)
unsigned int truesize = FM10K_RX_BUFSZ;
#else
- unsigned int truesize = SKB_DATA_ALIGN(size);
+ unsigned int truesize = ALIGN(size, 512);
#endif
unsigned int pull_len;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 2a08d3f5b6df..d00cb193da9a 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -20,9 +20,7 @@
#include "fm10k.h"
#include <linux/vmalloc.h>
-#ifdef CONFIG_FM10K_VXLAN
-#include <net/vxlan.h>
-#endif /* CONFIG_FM10K_VXLAN */
+#include <net/udp_tunnel.h>
/**
* fm10k_setup_tx_resources - allocate Tx resources (Descriptors)
@@ -436,6 +434,7 @@ static void fm10k_restore_vxlan_port(struct fm10k_intfc *interface)
* @netdev: network interface device structure
* @sa_family: Address family of new port
* @port: port number used for VXLAN
+ * @type: Enumerated value specifying udp encapsulation type
*
* This function is called when a new VXLAN interface has added a new port
* number to the range that is currently in use for VXLAN. The new port
@@ -444,18 +443,21 @@ static void fm10k_restore_vxlan_port(struct fm10k_intfc *interface)
* is always used as the VXLAN port number for offloads.
**/
static void fm10k_add_vxlan_port(struct net_device *dev,
- sa_family_t sa_family, __be16 port) {
+ struct udp_tunnel_info *ti)
+{
struct fm10k_intfc *interface = netdev_priv(dev);
struct fm10k_vxlan_port *vxlan_port;
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
/* only the PF supports configuring tunnels */
if (interface->hw.mac.type != fm10k_mac_pf)
return;
/* existing ports are pulled out so our new entry is always last */
fm10k_vxlan_port_for_each(vxlan_port, interface) {
- if ((vxlan_port->port == port) &&
- (vxlan_port->sa_family == sa_family)) {
+ if ((vxlan_port->port == ti->port) &&
+ (vxlan_port->sa_family == ti->sa_family)) {
list_del(&vxlan_port->list);
goto insert_tail;
}
@@ -465,8 +467,8 @@ static void fm10k_add_vxlan_port(struct net_device *dev,
vxlan_port = kmalloc(sizeof(*vxlan_port), GFP_ATOMIC);
if (!vxlan_port)
return;
- vxlan_port->port = port;
- vxlan_port->sa_family = sa_family;
+ vxlan_port->port = ti->port;
+ vxlan_port->sa_family = ti->sa_family;
insert_tail:
/* add new port value to list */
@@ -480,6 +482,7 @@ insert_tail:
* @netdev: network interface device structure
* @sa_family: Address family of freed port
* @port: port number used for VXLAN
+ * @type: Enumerated value specifying udp encapsulation type
*
* This function is called when a new VXLAN interface has freed a port
* number from the range that is currently in use for VXLAN. The freed
@@ -487,17 +490,20 @@ insert_tail:
* the port number for offloads.
**/
static void fm10k_del_vxlan_port(struct net_device *dev,
- sa_family_t sa_family, __be16 port) {
+ struct udp_tunnel_info *ti)
+{
struct fm10k_intfc *interface = netdev_priv(dev);
struct fm10k_vxlan_port *vxlan_port;
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
if (interface->hw.mac.type != fm10k_mac_pf)
return;
/* find the port in the list and free it */
fm10k_vxlan_port_for_each(vxlan_port, interface) {
- if ((vxlan_port->port == port) &&
- (vxlan_port->sa_family == sa_family)) {
+ if ((vxlan_port->port == ti->port) &&
+ (vxlan_port->sa_family == ti->sa_family)) {
list_del(&vxlan_port->list);
kfree(vxlan_port);
break;
@@ -553,10 +559,8 @@ int fm10k_open(struct net_device *netdev)
if (err)
goto err_set_queues;
-#ifdef CONFIG_FM10K_VXLAN
/* update VXLAN port configuration */
- vxlan_get_rx_port(netdev);
-#endif
+ udp_tunnel_get_rx_info(netdev);
fm10k_up(interface);
@@ -1375,8 +1379,8 @@ static const struct net_device_ops fm10k_netdev_ops = {
.ndo_set_vf_vlan = fm10k_ndo_set_vf_vlan,
.ndo_set_vf_rate = fm10k_ndo_set_vf_bw,
.ndo_get_vf_config = fm10k_ndo_get_vf_config,
- .ndo_add_vxlan_port = fm10k_add_vxlan_port,
- .ndo_del_vxlan_port = fm10k_del_vxlan_port,
+ .ndo_udp_tunnel_add = fm10k_add_vxlan_port,
+ .ndo_udp_tunnel_del = fm10k_del_vxlan_port,
.ndo_dfwd_add_station = fm10k_dfwd_add_station,
.ndo_dfwd_del_station = fm10k_dfwd_del_station,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 9c44739da5e2..e83fc8afb30f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -283,6 +283,7 @@ struct i40e_pf {
#endif /* I40E_FCOE */
u16 num_lan_qps; /* num lan queues this PF has set up */
u16 num_lan_msix; /* num queue vectors for the base PF vsi */
+ u16 num_fdsb_msix; /* num queue vectors for sideband Fdir */
u16 num_iwarp_msix; /* num of iwarp vectors for this PF */
int iwarp_base_vector;
int queues_left; /* queues left unclaimed */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 422b41d61c9a..e447dc435464 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1967,6 +1967,62 @@ aq_add_vsi_exit:
}
/**
+ * i40e_aq_set_default_vsi
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)
+ &desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
+ cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
+ cmd->seid = cpu_to_le16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_clear_default_vsi
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)
+ &desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ cmd->promiscuous_flags = cpu_to_le16(0);
+ cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
+ cmd->seid = cpu_to_le16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_set_vsi_unicast_promiscuous
* @hw: pointer to the hw struct
* @seid: vsi number
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 5e8d84ff7d5f..4962e855fbd3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -313,8 +313,7 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported,
*advertising |= ADVERTISED_Autoneg |
ADVERTISED_40000baseCR4_Full;
}
- if ((phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) &&
- !(phy_types & I40E_CAP_PHY_TYPE_1000BASE_T)) {
+ if (phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) {
*supported |= SUPPORTED_Autoneg |
SUPPORTED_100baseT_Full;
*advertising |= ADVERTISED_Autoneg |
@@ -663,6 +662,7 @@ static int i40e_set_settings(struct net_device *netdev,
if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET &&
hw->phy.media_type != I40E_MEDIA_TYPE_FIBER &&
hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE &&
+ hw->phy.media_type != I40E_MEDIA_TYPE_DA &&
hw->phy.link_info.link_info & I40E_AQ_LINK_UP)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 5ea22008d721..2b1140563a64 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -31,12 +31,7 @@
/* Local includes */
#include "i40e.h"
#include "i40e_diag.h"
-#if IS_ENABLED(CONFIG_VXLAN)
-#include <net/vxlan.h>
-#endif
-#if IS_ENABLED(CONFIG_GENEVE)
-#include <net/geneve.h>
-#endif
+#include <net/udp_tunnel.h>
const char i40e_driver_name[] = "i40e";
static const char i40e_driver_string[] =
@@ -45,8 +40,8 @@ static const char i40e_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 1
-#define DRV_VERSION_MINOR 5
-#define DRV_VERSION_BUILD 16
+#define DRV_VERSION_MINOR 6
+#define DRV_VERSION_BUILD 4
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -1584,14 +1579,8 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
vsi->tc_config.numtc = numtc;
vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
/* Number of queues per enabled TC */
- /* In MFP case we can have a much lower count of MSIx
- * vectors available and so we need to lower the used
- * q count.
- */
- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
- qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
- else
- qcount = vsi->alloc_queue_pairs;
+ qcount = vsi->alloc_queue_pairs;
+
num_tc_qps = qcount / numtc;
num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
@@ -1845,8 +1834,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
{
struct list_head tmp_del_list, tmp_add_list;
struct i40e_mac_filter *f, *ftmp, *fclone;
+ struct i40e_hw *hw = &vsi->back->hw;
bool promisc_forced_on = false;
bool add_happened = false;
+ char vsi_name[16] = "PF";
int filter_list_len = 0;
u32 changed_flags = 0;
i40e_status aq_ret = 0;
@@ -1874,6 +1865,11 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
INIT_LIST_HEAD(&tmp_del_list);
INIT_LIST_HEAD(&tmp_add_list);
+ if (vsi->type == I40E_VSI_SRIOV)
+ snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
+ else if (vsi->type != I40E_VSI_MAIN)
+ snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
+
if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
@@ -1925,7 +1921,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
if (!list_empty(&tmp_del_list)) {
int del_list_size;
- filter_list_len = pf->hw.aq.asq_buf_size /
+ filter_list_len = hw->aq.asq_buf_size /
sizeof(struct i40e_aqc_remove_macvlan_element_data);
del_list_size = filter_list_len *
sizeof(struct i40e_aqc_remove_macvlan_element_data);
@@ -1957,21 +1953,21 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
/* flush a full buffer */
if (num_del == filter_list_len) {
- aq_ret = i40e_aq_remove_macvlan(&pf->hw,
- vsi->seid,
- del_list,
- num_del,
- NULL);
- aq_err = pf->hw.aq.asq_last_status;
+ aq_ret =
+ i40e_aq_remove_macvlan(hw, vsi->seid,
+ del_list,
+ num_del, NULL);
+ aq_err = hw->aq.asq_last_status;
num_del = 0;
memset(del_list, 0, del_list_size);
if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) {
retval = -EIO;
dev_err(&pf->pdev->dev,
- "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n",
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
+ "ignoring delete macvlan error on %s, err %s, aq_err %s while flushing a full buffer\n",
+ vsi_name,
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, aq_err));
}
}
/* Release memory for MAC filter entries which were
@@ -1982,17 +1978,17 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
}
if (num_del) {
- aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
- del_list, num_del,
- NULL);
- aq_err = pf->hw.aq.asq_last_status;
+ aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, del_list,
+ num_del, NULL);
+ aq_err = hw->aq.asq_last_status;
num_del = 0;
if (aq_ret && aq_err != I40E_AQ_RC_ENOENT)
dev_info(&pf->pdev->dev,
- "ignoring delete macvlan error, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
+ "ignoring delete macvlan error on %s, err %s aq_err %s\n",
+ vsi_name,
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, aq_err));
}
kfree(del_list);
@@ -2003,7 +1999,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
int add_list_size;
/* do all the adds now */
- filter_list_len = pf->hw.aq.asq_buf_size /
+ filter_list_len = hw->aq.asq_buf_size /
sizeof(struct i40e_aqc_add_macvlan_element_data),
add_list_size = filter_list_len *
sizeof(struct i40e_aqc_add_macvlan_element_data);
@@ -2038,10 +2034,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
/* flush a full buffer */
if (num_add == filter_list_len) {
- aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+ aq_ret = i40e_aq_add_macvlan(hw, vsi->seid,
add_list, num_add,
NULL);
- aq_err = pf->hw.aq.asq_last_status;
+ aq_err = hw->aq.asq_last_status;
num_add = 0;
if (aq_ret)
@@ -2056,9 +2052,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
}
if (num_add) {
- aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+ aq_ret = i40e_aq_add_macvlan(hw, vsi->seid,
add_list, num_add, NULL);
- aq_err = pf->hw.aq.asq_last_status;
+ aq_err = hw->aq.asq_last_status;
num_add = 0;
}
kfree(add_list);
@@ -2067,16 +2063,18 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
if (add_happened && aq_ret && aq_err != I40E_AQ_RC_EINVAL) {
retval = i40e_aq_rc_to_posix(aq_ret, aq_err);
dev_info(&pf->pdev->dev,
- "add filter failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
- if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
+ "add filter failed on %s, err %s aq_err %s\n",
+ vsi_name,
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, aq_err));
+ if ((hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
!test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
&vsi->state)) {
promisc_forced_on = true;
set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
&vsi->state);
- dev_info(&pf->pdev->dev, "promiscuous mode forced on\n");
+ dev_info(&pf->pdev->dev, "promiscuous mode forced on %s\n",
+ vsi_name);
}
}
}
@@ -2098,12 +2096,12 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
NULL);
if (aq_ret) {
retval = i40e_aq_rc_to_posix(aq_ret,
- pf->hw.aq.asq_last_status);
+ hw->aq.asq_last_status);
dev_info(&pf->pdev->dev,
- "set multi promisc failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ "set multi promisc failed on %s, err %s aq_err %s\n",
+ vsi_name,
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
}
}
if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
@@ -2122,33 +2120,58 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
*/
if (pf->cur_promisc != cur_promisc) {
pf->cur_promisc = cur_promisc;
- set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ if (cur_promisc)
+ aq_ret =
+ i40e_aq_set_default_vsi(hw,
+ vsi->seid,
+ NULL);
+ else
+ aq_ret =
+ i40e_aq_clear_default_vsi(hw,
+ vsi->seid,
+ NULL);
+ if (aq_ret) {
+ retval = i40e_aq_rc_to_posix(aq_ret,
+ hw->aq.asq_last_status);
+ dev_info(&pf->pdev->dev,
+ "Set default VSI failed on %s, err %s, aq_err %s\n",
+ vsi_name,
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw,
+ hw->aq.asq_last_status));
+ }
}
} else {
aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
- &vsi->back->hw,
+ hw,
vsi->seid,
cur_promisc, NULL,
true);
if (aq_ret) {
retval =
i40e_aq_rc_to_posix(aq_ret,
- pf->hw.aq.asq_last_status);
+ hw->aq.asq_last_status);
dev_info(&pf->pdev->dev,
- "set unicast promisc failed, err %d, aq_err %d\n",
- aq_ret, pf->hw.aq.asq_last_status);
+ "set unicast promisc failed on %s, err %s, aq_err %s\n",
+ vsi_name,
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw,
+ hw->aq.asq_last_status));
}
aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
- &vsi->back->hw,
+ hw,
vsi->seid,
cur_promisc, NULL);
if (aq_ret) {
retval =
i40e_aq_rc_to_posix(aq_ret,
- pf->hw.aq.asq_last_status);
+ hw->aq.asq_last_status);
dev_info(&pf->pdev->dev,
- "set multicast promisc failed, err %d, aq_err %d\n",
- aq_ret, pf->hw.aq.asq_last_status);
+ "set multicast promisc failed on %s, err %s, aq_err %s\n",
+ vsi_name,
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw,
+ hw->aq.asq_last_status));
}
}
aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
@@ -2159,9 +2182,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
pf->hw.aq.asq_last_status);
dev_info(&pf->pdev->dev,
"set brdcast promisc failed, err %s, aq_err %s\n",
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw,
+ hw->aq.asq_last_status));
}
}
out:
@@ -3952,6 +3975,7 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
/* clear the affinity_mask in the IRQ descriptor */
irq_set_affinity_hint(pf->msix_entries[vector].vector,
NULL);
+ synchronize_irq(pf->msix_entries[vector].vector);
free_irq(pf->msix_entries[vector].vector,
vsi->q_vectors[i]);
@@ -4958,7 +4982,6 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
if (pf->vsi[v]->netdev)
i40e_dcbnl_set_all(pf->vsi[v]);
}
- i40e_notify_client_of_l2_param_changes(pf->vsi[v]);
}
}
@@ -5183,12 +5206,6 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
usleep_range(1000, 2000);
i40e_down(vsi);
- /* Give a VF some time to respond to the reset. The
- * two second wait is based upon the watchdog cycle in
- * the VF driver.
- */
- if (vsi->type == I40E_VSI_SRIOV)
- msleep(2000);
i40e_up(vsi);
clear_bit(__I40E_CONFIG_BUSY, &pf->state);
}
@@ -5231,6 +5248,9 @@ void i40e_down(struct i40e_vsi *vsi)
i40e_clean_tx_ring(vsi->tx_rings[i]);
i40e_clean_rx_ring(vsi->rx_rings[i]);
}
+
+ i40e_notify_client_of_netdev_close(vsi, false);
+
}
/**
@@ -5342,14 +5362,7 @@ int i40e_open(struct net_device *netdev)
TCP_FLAG_CWR) >> 16);
wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
-#ifdef CONFIG_I40E_VXLAN
- vxlan_get_rx_port(netdev);
-#endif
-#ifdef CONFIG_I40E_GENEVE
- if (pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)
- geneve_get_rx_port(netdev);
-#endif
-
+ udp_tunnel_get_rx_info(netdev);
i40e_notify_client_of_netdev_open(vsi);
return 0;
@@ -5716,6 +5729,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
i40e_service_event_schedule(pf);
} else {
i40e_pf_unquiesce_all_vsi(pf);
+ /* Notify the client for the DCB changes */
+ i40e_notify_client_of_l2_param_changes(pf->vsi[pf->lan_vsi]);
}
exit:
@@ -5940,7 +5955,6 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
}
-
}
/**
@@ -7057,7 +7071,6 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
**/
static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
{
-#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE)
struct i40e_hw *hw = &pf->hw;
i40e_status ret;
__be16 port;
@@ -7092,7 +7105,6 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
}
}
}
-#endif
}
/**
@@ -7174,7 +7186,7 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
vsi->alloc_queue_pairs = 1;
vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
I40E_REQ_DESCRIPTOR_MULTIPLE);
- vsi->num_q_vectors = 1;
+ vsi->num_q_vectors = pf->num_fdsb_msix;
break;
case I40E_VSI_VMDQ2:
@@ -7558,9 +7570,11 @@ static int i40e_init_msix(struct i40e_pf *pf)
/* reserve one vector for sideband flow director */
if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
if (vectors_left) {
+ pf->num_fdsb_msix = 1;
v_budget++;
vectors_left--;
} else {
+ pf->num_fdsb_msix = 0;
pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
}
}
@@ -8579,7 +8593,9 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
/* Enable filters and mark for reset */
if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
need_reset = true;
- pf->flags |= I40E_FLAG_FD_SB_ENABLED;
+ /* enable FD_SB only if there is MSI-X vector */
+ if (pf->num_fdsb_msix > 0)
+ pf->flags |= I40E_FLAG_FD_SB_ENABLED;
} else {
/* turn off filters, mark for reset and clear SW filter list */
if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
@@ -8628,7 +8644,6 @@ static int i40e_set_features(struct net_device *netdev,
return 0;
}
-#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE)
/**
* i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
* @pf: board private structure
@@ -8648,21 +8663,18 @@ static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port)
return i;
}
-#endif
-
-#if IS_ENABLED(CONFIG_VXLAN)
/**
- * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
+ * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
* @netdev: This physical port's netdev
- * @sa_family: Socket Family that VXLAN is notifying us about
- * @port: New UDP port number that VXLAN started listening to
+ * @ti: Tunnel endpoint information
**/
-static void i40e_add_vxlan_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
+static void i40e_udp_tunnel_add(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
+ __be16 port = ti->port;
u8 next_idx;
u8 idx;
@@ -8670,7 +8682,7 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
/* Check if port already exists */
if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- netdev_info(netdev, "vxlan port %d already offloaded\n",
+ netdev_info(netdev, "port %d already offloaded\n",
ntohs(port));
return;
}
@@ -8679,131 +8691,75 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
next_idx = i40e_get_udp_port_idx(pf, 0);
if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n",
- ntohs(port));
- return;
- }
-
- /* New port: add it and mark its index in the bitmap */
- pf->udp_ports[next_idx].index = port;
- pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
- pf->pending_udp_bitmap |= BIT_ULL(next_idx);
- pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
-}
-
-/**
- * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away
- * @netdev: This physical port's netdev
- * @sa_family: Socket Family that VXLAN is notifying us about
- * @port: UDP port number that VXLAN stopped listening to
- **/
-static void i40e_del_vxlan_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
-{
- struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_vsi *vsi = np->vsi;
- struct i40e_pf *pf = vsi->back;
- u8 idx;
-
- idx = i40e_get_udp_port_idx(pf, port);
-
- /* Check if port already exists */
- if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- /* if port exists, set it to 0 (mark for deletion)
- * and make it pending
- */
- pf->udp_ports[idx].index = 0;
- pf->pending_udp_bitmap |= BIT_ULL(idx);
- pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
- } else {
- netdev_warn(netdev, "vxlan port %d was not found, not deleting\n",
- ntohs(port));
- }
-}
-#endif
-
-#if IS_ENABLED(CONFIG_GENEVE)
-/**
- * i40e_add_geneve_port - Get notifications about GENEVE ports that come up
- * @netdev: This physical port's netdev
- * @sa_family: Socket Family that GENEVE is notifying us about
- * @port: New UDP port number that GENEVE started listening to
- **/
-static void i40e_add_geneve_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
-{
- struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_vsi *vsi = np->vsi;
- struct i40e_pf *pf = vsi->back;
- u8 next_idx;
- u8 idx;
-
- if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE))
- return;
-
- idx = i40e_get_udp_port_idx(pf, port);
-
- /* Check if port already exists */
- if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- netdev_info(netdev, "udp port %d already offloaded\n",
+ netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
ntohs(port));
return;
}
- /* Now check if there is space to add the new port */
- next_idx = i40e_get_udp_port_idx(pf, 0);
-
- if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- netdev_info(netdev, "maximum number of UDP ports reached, not adding port %d\n",
- ntohs(port));
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE))
+ return;
+ pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
+ break;
+ default:
return;
}
/* New port: add it and mark its index in the bitmap */
pf->udp_ports[next_idx].index = port;
- pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
pf->pending_udp_bitmap |= BIT_ULL(next_idx);
pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
-
- dev_info(&pf->pdev->dev, "adding geneve port %d\n", ntohs(port));
}
/**
- * i40e_del_geneve_port - Get notifications about GENEVE ports that go away
+ * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
* @netdev: This physical port's netdev
- * @sa_family: Socket Family that GENEVE is notifying us about
- * @port: UDP port number that GENEVE stopped listening to
+ * @ti: Tunnel endpoint information
**/
-static void i40e_del_geneve_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
+static void i40e_udp_tunnel_del(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
+ __be16 port = ti->port;
u8 idx;
- if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE))
- return;
-
idx = i40e_get_udp_port_idx(pf, port);
/* Check if port already exists */
- if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- /* if port exists, set it to 0 (mark for deletion)
- * and make it pending
- */
- pf->udp_ports[idx].index = 0;
- pf->pending_udp_bitmap |= BIT_ULL(idx);
- pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
+ if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
+ goto not_found;
- dev_info(&pf->pdev->dev, "deleting geneve port %d\n",
- ntohs(port));
- } else {
- netdev_warn(netdev, "geneve port %d was not found, not deleting\n",
- ntohs(port));
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
+ goto not_found;
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
+ goto not_found;
+ break;
+ default:
+ goto not_found;
}
+
+ /* if port exists, set it to 0 (mark for deletion)
+ * and make it pending
+ */
+ pf->udp_ports[idx].index = 0;
+ pf->pending_udp_bitmap |= BIT_ULL(idx);
+ pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
+
+ return;
+not_found:
+ netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
+ ntohs(port));
}
-#endif
static int i40e_get_phys_port_id(struct net_device *netdev,
struct netdev_phys_item_id *ppid)
@@ -9033,14 +8989,8 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
.ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
.ndo_set_vf_trust = i40e_ndo_set_vf_trust,
-#if IS_ENABLED(CONFIG_VXLAN)
- .ndo_add_vxlan_port = i40e_add_vxlan_port,
- .ndo_del_vxlan_port = i40e_del_vxlan_port,
-#endif
-#if IS_ENABLED(CONFIG_GENEVE)
- .ndo_add_geneve_port = i40e_add_geneve_port,
- .ndo_del_geneve_port = i40e_del_geneve_port,
-#endif
+ .ndo_udp_tunnel_add = i40e_udp_tunnel_add,
+ .ndo_udp_tunnel_del = i40e_udp_tunnel_del,
.ndo_get_phys_port_id = i40e_get_phys_port_id,
.ndo_fdb_add = i40e_ndo_fdb_add,
.ndo_features_check = i40e_features_check,
@@ -10133,14 +10083,14 @@ void i40e_veb_release(struct i40e_veb *veb)
static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
{
struct i40e_pf *pf = veb->pf;
- bool is_default = veb->pf->cur_promisc;
bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
int ret;
- /* get a VEB from the hardware */
ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
- veb->enabled_tc, is_default,
+ veb->enabled_tc, false,
&veb->seid, enable_stats, NULL);
+
+ /* get a VEB from the hardware */
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't add VEB, err %s aq_err %s\n",
@@ -10689,12 +10639,8 @@ static void i40e_print_features(struct i40e_pf *pf)
}
if (pf->flags & I40E_FLAG_DCB_CAPABLE)
i += snprintf(&buf[i], REMAIN(i), " DCB");
-#if IS_ENABLED(CONFIG_VXLAN)
i += snprintf(&buf[i], REMAIN(i), " VxLAN");
-#endif
-#if IS_ENABLED(CONFIG_GENEVE)
i += snprintf(&buf[i], REMAIN(i), " Geneve");
-#endif
if (pf->flags & I40E_FLAG_PTP)
i += snprintf(&buf[i], REMAIN(i), " PTP");
#ifdef I40E_FCOE
@@ -11525,6 +11471,7 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
struct i40e_hw *hw = &pf->hw;
+ int retval = 0;
set_bit(__I40E_SUSPENDED, &pf->state);
set_bit(__I40E_DOWN, &pf->state);
@@ -11536,10 +11483,16 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+ i40e_stop_misc_vector(pf);
+
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+
pci_wake_from_d3(pdev, pf->wol_en);
pci_set_power_state(pdev, PCI_D3hot);
- return 0;
+ return retval;
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 80403c6ee7f0..4660c5abc855 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -98,6 +98,8 @@ i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
bool qualified_modules, bool report_init,
struct i40e_aq_get_phy_abilities_resp *abilities,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 1fcafcfa8f14..6fcbf764f32b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -665,6 +665,8 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
goto error_alloc_vsi_res;
}
if (type == I40E_VSI_SRIOV) {
+ u64 hena = i40e_pf_get_default_rss_hena(pf);
+
vf->lan_vsi_idx = vsi->idx;
vf->lan_vsi_id = vsi->id;
/* If the port VLAN has been configured and then the
@@ -687,6 +689,10 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
vf->default_lan_addr.addr, vf->vf_id);
}
spin_unlock_bh(&vsi->mac_filter_list_lock);
+ i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id),
+ (u32)hena);
+ i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id),
+ (u32)(hena >> 32));
}
/* program mac filter */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 16c552952860..eac057b88055 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -37,8 +37,8 @@ static const char i40evf_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 1
-#define DRV_VERSION_MINOR 5
-#define DRV_VERSION_BUILD 10
+#define DRV_VERSION_MINOR 6
+#define DRV_VERSION_BUILD 4
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \
@@ -825,7 +825,7 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
ether_addr_copy(f->macaddr, macaddr);
- list_add(&f->list, &adapter->mac_filter_list);
+ list_add_tail(&f->list, &adapter->mac_filter_list);
f->add = true;
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
}
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index f13445691507..d76c221d4c8a 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -434,6 +434,8 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
ether_addr_copy(veal->list[i].addr, f->macaddr);
i++;
f->add = false;
+ if (i == count)
+ break;
}
}
if (!more)
@@ -497,6 +499,8 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
i++;
list_del(&f->list);
kfree(f);
+ if (i == count)
+ break;
}
}
if (!more)
@@ -560,6 +564,8 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
vvfl->vlan_id[i] = f->vlan;
i++;
f->add = false;
+ if (i == count)
+ break;
}
}
if (!more)
@@ -623,6 +629,8 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
i++;
list_del(&f->list);
kfree(f);
+ if (i == count)
+ break;
}
}
if (!more)
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index b9609afa5ca3..5387b3a96489 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -445,6 +445,7 @@ struct igb_adapter {
unsigned long ptp_tx_start;
unsigned long last_rx_ptp_check;
unsigned long last_rx_timestamp;
+ unsigned int ptp_flags;
spinlock_t tmreg_lock;
struct cyclecounter cc;
struct timecounter tc;
@@ -474,12 +475,15 @@ struct igb_adapter {
u16 eee_advert;
};
+/* flags controlling PTP/1588 function */
+#define IGB_PTP_ENABLED BIT(0)
+#define IGB_PTP_OVERFLOW_CHECK BIT(1)
+
#define IGB_FLAG_HAS_MSI BIT(0)
#define IGB_FLAG_DCA_ENABLED BIT(1)
#define IGB_FLAG_QUAD_PORT_A BIT(2)
#define IGB_FLAG_QUEUE_PAIRS BIT(3)
#define IGB_FLAG_DMAC BIT(4)
-#define IGB_FLAG_PTP BIT(5)
#define IGB_FLAG_RSS_FIELD_IPV4_UDP BIT(6)
#define IGB_FLAG_RSS_FIELD_IPV6_UDP BIT(7)
#define IGB_FLAG_WOL_SUPPORTED BIT(8)
@@ -546,6 +550,7 @@ void igb_set_fw_version(struct igb_adapter *);
void igb_ptp_init(struct igb_adapter *adapter);
void igb_ptp_stop(struct igb_adapter *adapter);
void igb_ptp_reset(struct igb_adapter *adapter);
+void igb_ptp_suspend(struct igb_adapter *adapter);
void igb_ptp_rx_hang(struct igb_adapter *adapter);
void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index ef3d642f5ff2..9bcba42abb91 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2027,7 +2027,8 @@ void igb_reset(struct igb_adapter *adapter)
wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
/* Re-enable PTP, where applicable. */
- igb_ptp_reset(adapter);
+ if (adapter->ptp_flags & IGB_PTP_ENABLED)
+ igb_ptp_reset(adapter);
igb_get_phy_info(hw);
}
@@ -6855,12 +6856,12 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
**/
static bool igb_add_rx_frag(struct igb_ring *rx_ring,
struct igb_rx_buffer *rx_buffer,
+ unsigned int size,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
struct page *page = rx_buffer->page;
unsigned char *va = page_address(page) + rx_buffer->page_offset;
- unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
#if (PAGE_SIZE < 8192)
unsigned int truesize = IGB_RX_BUFSZ;
#else
@@ -6912,6 +6913,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
+ unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
struct igb_rx_buffer *rx_buffer;
struct page *page;
@@ -6947,11 +6949,11 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
dma_sync_single_range_for_cpu(rx_ring->dev,
rx_buffer->dma,
rx_buffer->page_offset,
- IGB_RX_BUFSZ,
+ size,
DMA_FROM_DEVICE);
/* pull page into skb */
- if (igb_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+ if (igb_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) {
/* hand second half of page back to the ring */
igb_reuse_rx_page(rx_ring, rx_buffer);
} else {
@@ -7527,6 +7529,8 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
if (netif_running(netdev))
__igb_close(netdev, true);
+ igb_ptp_suspend(adapter);
+
igb_clear_interrupt_scheme(adapter);
#ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index f097c5a8ab93..e61b647f5f2a 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -684,6 +684,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)
u32 tsyncrxctl = rd32(E1000_TSYNCRXCTL);
unsigned long rx_event;
+ /* Other hardware uses per-packet timestamps */
if (hw->mac.type != e1000_82576)
return;
@@ -1042,6 +1043,13 @@ int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
-EFAULT : 0;
}
+/**
+ * igb_ptp_init - Initialize PTP functionality
+ * @adapter: Board private structure
+ *
+ * This function is called at device probe to initialize the PTP
+ * functionality.
+ */
void igb_ptp_init(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -1064,8 +1072,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->cc.mask = CYCLECOUNTER_MASK(64);
adapter->cc.mult = 1;
adapter->cc.shift = IGB_82576_TSYNC_SHIFT;
- /* Dial the nominal frequency. */
- wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);
+ adapter->ptp_flags |= IGB_PTP_OVERFLOW_CHECK;
break;
case e1000_82580:
case e1000_i354:
@@ -1084,8 +1091,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->cc.mask = CYCLECOUNTER_MASK(IGB_NBITS_82580);
adapter->cc.mult = 1;
adapter->cc.shift = 0;
- /* Enable the timer functions by clearing bit 31. */
- wr32(E1000_TSAUXC, 0x0);
+ adapter->ptp_flags |= IGB_PTP_OVERFLOW_CHECK;
break;
case e1000_i210:
case e1000_i211:
@@ -1110,44 +1116,24 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.settime64 = igb_ptp_settime_i210;
adapter->ptp_caps.enable = igb_ptp_feature_enable_i210;
adapter->ptp_caps.verify = igb_ptp_verify_pin;
- /* Enable the timer functions by clearing bit 31. */
- wr32(E1000_TSAUXC, 0x0);
break;
default:
adapter->ptp_clock = NULL;
return;
}
- wrfl();
-
spin_lock_init(&adapter->tmreg_lock);
INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
- /* Initialize the clock and overflow work for devices that need it. */
- if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
- struct timespec64 ts = ktime_to_timespec64(ktime_get_real());
-
- igb_ptp_settime_i210(&adapter->ptp_caps, &ts);
- } else {
- timecounter_init(&adapter->tc, &adapter->cc,
- ktime_to_ns(ktime_get_real()));
-
+ if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK)
INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
igb_ptp_overflow_check);
- schedule_delayed_work(&adapter->ptp_overflow_work,
- IGB_SYSTIM_OVERFLOW_PERIOD);
- }
-
- /* Initialize the time sync interrupts for devices that support it. */
- if (hw->mac.type >= e1000_82580) {
- wr32(E1000_TSIM, TSYNC_INTERRUPTS);
- wr32(E1000_IMS, E1000_IMS_TS);
- }
-
adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+ igb_ptp_reset(adapter);
+
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
&adapter->pdev->dev);
if (IS_ERR(adapter->ptp_clock)) {
@@ -1156,32 +1142,24 @@ void igb_ptp_init(struct igb_adapter *adapter)
} else {
dev_info(&adapter->pdev->dev, "added PHC on %s\n",
adapter->netdev->name);
- adapter->flags |= IGB_FLAG_PTP;
+ adapter->ptp_flags |= IGB_PTP_ENABLED;
}
}
/**
- * igb_ptp_stop - Disable PTP device and stop the overflow check.
- * @adapter: Board private structure.
+ * igb_ptp_suspend - Disable PTP work items and prepare for suspend
+ * @adapter: Board private structure
*
- * This function stops the PTP support and cancels the delayed work.
- **/
-void igb_ptp_stop(struct igb_adapter *adapter)
+ * This function stops the overflow check work and PTP Tx timestamp work, and
+ * will prepare the device for OS suspend.
+ */
+void igb_ptp_suspend(struct igb_adapter *adapter)
{
- switch (adapter->hw.mac.type) {
- case e1000_82576:
- case e1000_82580:
- case e1000_i354:
- case e1000_i350:
- cancel_delayed_work_sync(&adapter->ptp_overflow_work);
- break;
- case e1000_i210:
- case e1000_i211:
- /* No delayed work to cancel. */
- break;
- default:
+ if (!(adapter->ptp_flags & IGB_PTP_ENABLED))
return;
- }
+
+ if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK)
+ cancel_delayed_work_sync(&adapter->ptp_overflow_work);
cancel_work_sync(&adapter->ptp_tx_work);
if (adapter->ptp_tx_skb) {
@@ -1189,12 +1167,23 @@ void igb_ptp_stop(struct igb_adapter *adapter)
adapter->ptp_tx_skb = NULL;
clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
}
+}
+
+/**
+ * igb_ptp_stop - Disable PTP device and stop the overflow check.
+ * @adapter: Board private structure.
+ *
+ * This function stops the PTP support and cancels the delayed work.
+ **/
+void igb_ptp_stop(struct igb_adapter *adapter)
+{
+ igb_ptp_suspend(adapter);
if (adapter->ptp_clock) {
ptp_clock_unregister(adapter->ptp_clock);
dev_info(&adapter->pdev->dev, "removed PHC on %s\n",
adapter->netdev->name);
- adapter->flags &= ~IGB_FLAG_PTP;
+ adapter->ptp_flags &= ~IGB_PTP_ENABLED;
}
}
@@ -1209,9 +1198,6 @@ void igb_ptp_reset(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
unsigned long flags;
- if (!(adapter->flags & IGB_FLAG_PTP))
- return;
-
/* reset the tstamp_config */
igb_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
@@ -1248,4 +1234,10 @@ void igb_ptp_reset(struct igb_adapter *adapter)
}
out:
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
+ wrfl();
+
+ if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK)
+ schedule_delayed_work(&adapter->ptp_overflow_work,
+ IGB_SYSTIM_OVERFLOW_PERIOD);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 59b771b9b354..8a8450788124 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2991,10 +2991,15 @@ static int ixgbe_get_ts_info(struct net_device *dev,
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
+ /* we always support timestamping disabled */
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
+
switch (adapter->hw.mac.type) {
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
+ /* fallthrough */
case ixgbe_mac_X540:
case ixgbe_mac_82599EB:
info->so_timestamping =
@@ -3014,8 +3019,7 @@ static int ixgbe_get_ts_info(struct net_device *dev,
BIT(HWTSTAMP_TX_OFF) |
BIT(HWTSTAMP_TX_ON);
- info->rx_filters =
- BIT(HWTSTAMP_FILTER_NONE) |
+ info->rx_filters |=
BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 088c47cf27d9..fd5a761c68f3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -50,7 +50,7 @@
#include <linux/if_bridge.h>
#include <linux/prefetch.h>
#include <scsi/fc/fc_fcoe.h>
-#include <net/vxlan.h>
+#include <net/udp_tunnel.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
@@ -5722,9 +5722,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
#ifdef CONFIG_IXGBE_DCA
adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
#endif
-#ifdef CONFIG_IXGBE_VXLAN
adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE;
-#endif
break;
default:
break;
@@ -6158,9 +6156,7 @@ int ixgbe_open(struct net_device *netdev)
ixgbe_up_complete(adapter);
ixgbe_clear_vxlan_port(adapter);
-#ifdef CONFIG_IXGBE_VXLAN
- vxlan_get_rx_port(netdev);
-#endif
+ udp_tunnel_get_rx_info(netdev);
return 0;
@@ -7262,14 +7258,12 @@ static void ixgbe_service_task(struct work_struct *work)
ixgbe_service_event_complete(adapter);
return;
}
-#ifdef CONFIG_IXGBE_VXLAN
- rtnl_lock();
if (adapter->flags2 & IXGBE_FLAG2_VXLAN_REREG_NEEDED) {
+ rtnl_lock();
adapter->flags2 &= ~IXGBE_FLAG2_VXLAN_REREG_NEEDED;
- vxlan_get_rx_port(adapter->netdev);
+ udp_tunnel_get_rx_info(adapter->netdev);
+ rtnl_unlock();
}
- rtnl_unlock();
-#endif /* CONFIG_IXGBE_VXLAN */
ixgbe_reset_subtask(adapter);
ixgbe_phy_interrupt_subtask(adapter);
ixgbe_sfp_detection_subtask(adapter);
@@ -7697,7 +7691,6 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
/* snag network header to get L4 type and address */
skb = first->skb;
hdr.network = skb_network_header(skb);
-#ifdef CONFIG_IXGBE_VXLAN
if (skb->encapsulation &&
first->protocol == htons(ETH_P_IP) &&
hdr.ipv4->protocol != IPPROTO_UDP) {
@@ -7708,7 +7701,6 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
udp_hdr(skb)->dest == adapter->vxlan_port)
hdr.network = skb_inner_network_header(skb);
}
-#endif /* CONFIG_IXGBE_VXLAN */
/* Currently only IPv4/IPv6 with TCP is supported */
switch (hdr.ipv4->version) {
@@ -8308,14 +8300,53 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter,
struct tc_cls_u32_offload *cls)
{
+ u32 hdl = cls->knode.handle;
u32 uhtid = TC_U32_USERHTID(cls->knode.handle);
- u32 loc;
- int err;
+ u32 loc = cls->knode.handle & 0xfffff;
+ int err = 0, i, j;
+ struct ixgbe_jump_table *jump = NULL;
+
+ if (loc > IXGBE_MAX_HW_ENTRIES)
+ return -EINVAL;
if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE))
return -EINVAL;
- loc = cls->knode.handle & 0xfffff;
+ /* Clear this filter in the link data it is associated with */
+ if (uhtid != 0x800) {
+ jump = adapter->jump_tables[uhtid];
+ if (!jump)
+ return -EINVAL;
+ if (!test_bit(loc - 1, jump->child_loc_map))
+ return -EINVAL;
+ clear_bit(loc - 1, jump->child_loc_map);
+ }
+
+ /* Check if the filter being deleted is a link */
+ for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
+ jump = adapter->jump_tables[i];
+ if (jump && jump->link_hdl == hdl) {
+ /* Delete filters in the hardware in the child hash
+ * table associated with this link
+ */
+ for (j = 0; j < IXGBE_MAX_HW_ENTRIES; j++) {
+ if (!test_bit(j, jump->child_loc_map))
+ continue;
+ spin_lock(&adapter->fdir_perfect_lock);
+ err = ixgbe_update_ethtool_fdir_entry(adapter,
+ NULL,
+ j + 1);
+ spin_unlock(&adapter->fdir_perfect_lock);
+ clear_bit(j, jump->child_loc_map);
+ }
+ /* Remove resources for this link */
+ kfree(jump->input);
+ kfree(jump->mask);
+ kfree(jump);
+ adapter->jump_tables[i] = NULL;
+ return err;
+ }
+ }
spin_lock(&adapter->fdir_perfect_lock);
err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc);
@@ -8549,6 +8580,18 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
if (!test_bit(link_uhtid - 1, &adapter->tables))
return err;
+ /* Multiple filters as links to the same hash table are not
+ * supported. To add a new filter with the same next header
+ * but different match/jump conditions, create a new hash table
+ * and link to it.
+ */
+ if (adapter->jump_tables[link_uhtid] &&
+ (adapter->jump_tables[link_uhtid])->link_hdl) {
+ e_err(drv, "Link filter exists for link: %x\n",
+ link_uhtid);
+ return err;
+ }
+
for (i = 0; nexthdr[i].jump; i++) {
if (nexthdr[i].o != cls->knode.sel->offoff ||
nexthdr[i].s != cls->knode.sel->offshift ||
@@ -8570,6 +8613,8 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
}
jump->input = input;
jump->mask = mask;
+ jump->link_hdl = cls->knode.handle;
+
err = ixgbe_clsu32_build_input(input, mask, cls,
field_ptr, &nexthdr[i]);
if (!err) {
@@ -8597,6 +8642,20 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
if ((adapter->jump_tables[uhtid])->mask)
memcpy(mask, (adapter->jump_tables[uhtid])->mask,
sizeof(*mask));
+
+ /* Lookup in all child hash tables if this location is already
+ * filled with a filter
+ */
+ for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
+ struct ixgbe_jump_table *link = adapter->jump_tables[i];
+
+ if (link && (test_bit(loc - 1, link->child_loc_map))) {
+ e_err(drv, "Filter exists in location: %x\n",
+ loc);
+ err = -EINVAL;
+ goto err_out;
+ }
+ }
}
err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL);
if (err)
@@ -8628,6 +8687,9 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
spin_unlock(&adapter->fdir_perfect_lock);
+ if ((uhtid != 0x800) && (adapter->jump_tables[uhtid]))
+ set_bit(loc - 1, (adapter->jump_tables[uhtid])->child_loc_map);
+
kfree(mask);
return err;
err_out_w_lock:
@@ -8770,14 +8832,12 @@ static int ixgbe_set_features(struct net_device *netdev,
netdev->features = features;
-#ifdef CONFIG_IXGBE_VXLAN
if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) {
if (features & NETIF_F_RXCSUM)
adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED;
else
ixgbe_clear_vxlan_port(adapter);
}
-#endif /* CONFIG_IXGBE_VXLAN */
if (need_reset)
ixgbe_do_reset(netdev);
@@ -8788,23 +8848,27 @@ static int ixgbe_set_features(struct net_device *netdev,
return 0;
}
-#ifdef CONFIG_IXGBE_VXLAN
/**
* ixgbe_add_vxlan_port - Get notifications about VXLAN ports that come up
* @dev: The port's netdev
* @sa_family: Socket Family that VXLAN is notifiying us about
* @port: New UDP port number that VXLAN started listening to
+ * @type: Enumerated type specifying UDP tunnel type
**/
-static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
- __be16 port)
+static void ixgbe_add_vxlan_port(struct net_device *dev,
+ struct udp_tunnel_info *ti)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
+ __be16 port = ti->port;
- if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
return;
- if (sa_family == AF_INET6)
+ if (ti->sa_family != AF_INET)
+ return;
+
+ if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
return;
if (adapter->vxlan_port == port)
@@ -8826,28 +8890,31 @@ static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
* @dev: The port's netdev
* @sa_family: Socket Family that VXLAN is notifying us about
* @port: UDP port number that VXLAN stopped listening to
+ * @type: Enumerated type specifying UDP tunnel type
**/
-static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
- __be16 port)
+static void ixgbe_del_vxlan_port(struct net_device *dev,
+ struct udp_tunnel_info *ti)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
- if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
return;
- if (sa_family == AF_INET6)
+ if (ti->sa_family != AF_INET)
return;
- if (adapter->vxlan_port != port) {
+ if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
+ return;
+
+ if (adapter->vxlan_port != ti->port) {
netdev_info(dev, "Port %d was not found, not deleting\n",
- ntohs(port));
+ ntohs(ti->port));
return;
}
ixgbe_clear_vxlan_port(adapter);
adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED;
}
-#endif /* CONFIG_IXGBE_VXLAN */
static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
@@ -9160,10 +9227,8 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
.ndo_dfwd_add_station = ixgbe_fwd_add,
.ndo_dfwd_del_station = ixgbe_fwd_del,
-#ifdef CONFIG_IXGBE_VXLAN
- .ndo_add_vxlan_port = ixgbe_add_vxlan_port,
- .ndo_del_vxlan_port = ixgbe_del_vxlan_port,
-#endif /* CONFIG_IXGBE_VXLAN */
+ .ndo_udp_tunnel_add = ixgbe_add_vxlan_port,
+ .ndo_udp_tunnel_del = ixgbe_del_vxlan_port,
.ndo_features_check = ixgbe_features_check,
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
index a8bed3d887f7..538a1c5475b6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
@@ -42,8 +42,12 @@ struct ixgbe_jump_table {
struct ixgbe_mat_field *mat;
struct ixgbe_fdir_filter *input;
union ixgbe_atr_input *mask;
+ u32 link_hdl;
+ unsigned long child_loc_map[32];
};
+#define IXGBE_MAX_HW_ENTRIES 2045
+
static inline int ixgbe_mat_prgm_sip(struct ixgbe_fdir_filter *input,
union ixgbe_atr_input *mask,
u32 val, u32 m)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index c5caacdd193d..8618599dfd6f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -954,6 +954,7 @@ static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
struct ixgbe_hw *hw = &adapter->hw;
hw->mac.ops.set_mac_anti_spoofing(hw, false, vf);
+ hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
}
}
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index dc82b1b19574..91e09d68b7e2 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -102,7 +102,6 @@ struct ltq_etop_priv {
struct resource *res;
struct mii_bus *mii_bus;
- struct phy_device *phydev;
struct ltq_etop_chan ch[MAX_DMA_CHAN];
int tx_free[MAX_DMA_CHAN >> 1];
@@ -305,34 +304,16 @@ ltq_etop_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
}
static int
-ltq_etop_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct ltq_etop_priv *priv = netdev_priv(dev);
-
- return phy_ethtool_gset(priv->phydev, cmd);
-}
-
-static int
-ltq_etop_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct ltq_etop_priv *priv = netdev_priv(dev);
-
- return phy_ethtool_sset(priv->phydev, cmd);
-}
-
-static int
ltq_etop_nway_reset(struct net_device *dev)
{
- struct ltq_etop_priv *priv = netdev_priv(dev);
-
- return phy_start_aneg(priv->phydev);
+ return phy_start_aneg(dev->phydev);
}
static const struct ethtool_ops ltq_etop_ethtool_ops = {
.get_drvinfo = ltq_etop_get_drvinfo,
- .get_settings = ltq_etop_get_settings,
- .set_settings = ltq_etop_set_settings,
.nway_reset = ltq_etop_nway_reset,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static int
@@ -401,7 +382,6 @@ ltq_etop_mdio_probe(struct net_device *dev)
| SUPPORTED_TP);
phydev->advertising = phydev->supported;
- priv->phydev = phydev;
phy_attached_info(phydev);
return 0;
@@ -411,7 +391,6 @@ static int
ltq_etop_mdio_init(struct net_device *dev)
{
struct ltq_etop_priv *priv = netdev_priv(dev);
- int i;
int err;
priv->mii_bus = mdiobus_alloc();
@@ -451,7 +430,7 @@ ltq_etop_mdio_cleanup(struct net_device *dev)
{
struct ltq_etop_priv *priv = netdev_priv(dev);
- phy_disconnect(priv->phydev);
+ phy_disconnect(dev->phydev);
mdiobus_unregister(priv->mii_bus);
mdiobus_free(priv->mii_bus);
}
@@ -470,7 +449,7 @@ ltq_etop_open(struct net_device *dev)
ltq_dma_open(&ch->dma);
napi_enable(&ch->napi);
}
- phy_start(priv->phydev);
+ phy_start(dev->phydev);
netif_tx_start_all_queues(dev);
return 0;
}
@@ -482,7 +461,7 @@ ltq_etop_stop(struct net_device *dev)
int i;
netif_tx_stop_all_queues(dev);
- phy_stop(priv->phydev);
+ phy_stop(dev->phydev);
for (i = 0; i < MAX_DMA_CHAN; i++) {
struct ltq_etop_chan *ch = &priv->ch[i];
@@ -557,10 +536,8 @@ ltq_etop_change_mtu(struct net_device *dev, int new_mtu)
static int
ltq_etop_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct ltq_etop_priv *priv = netdev_priv(dev);
-
/* TODO: mii-toll reports "No MII transceiver present!." ?!*/
- return phy_mii_ioctl(priv->phydev, rq, cmd);
+ return phy_mii_ioctl(dev->phydev, rq, cmd);
}
static int
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 868a957f24bb..0b047178cda1 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -699,7 +699,6 @@ struct mvpp2_port {
u16 rx_ring_size;
struct mvpp2_pcpu_stats __percpu *stats;
- struct phy_device *phy_dev;
phy_interface_t phy_interface;
struct device_node *phy_node;
unsigned int link;
@@ -4850,7 +4849,7 @@ static irqreturn_t mvpp2_isr(int irq, void *dev_id)
static void mvpp2_link_event(struct net_device *dev)
{
struct mvpp2_port *port = netdev_priv(dev);
- struct phy_device *phydev = port->phy_dev;
+ struct phy_device *phydev = dev->phydev;
int status_change = 0;
u32 val;
@@ -5416,6 +5415,8 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
/* Set hw internals when starting port */
static void mvpp2_start_dev(struct mvpp2_port *port)
{
+ struct net_device *ndev = port->dev;
+
mvpp2_gmac_max_rx_size_set(port);
mvpp2_txp_max_tx_size_set(port);
@@ -5425,13 +5426,15 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
mvpp2_interrupts_enable(port);
mvpp2_port_enable(port);
- phy_start(port->phy_dev);
+ phy_start(ndev->phydev);
netif_tx_start_all_queues(port->dev);
}
/* Set hw internals when stopping port */
static void mvpp2_stop_dev(struct mvpp2_port *port)
{
+ struct net_device *ndev = port->dev;
+
/* Stop new packets from arriving to RXQs */
mvpp2_ingress_disable(port);
@@ -5447,7 +5450,7 @@ static void mvpp2_stop_dev(struct mvpp2_port *port)
mvpp2_egress_disable(port);
mvpp2_port_disable(port);
- phy_stop(port->phy_dev);
+ phy_stop(ndev->phydev);
}
/* Return positive if MTU is valid */
@@ -5535,7 +5538,6 @@ static int mvpp2_phy_connect(struct mvpp2_port *port)
phy_dev->supported &= PHY_GBIT_FEATURES;
phy_dev->advertising = phy_dev->supported;
- port->phy_dev = phy_dev;
port->link = 0;
port->duplex = 0;
port->speed = 0;
@@ -5545,8 +5547,9 @@ static int mvpp2_phy_connect(struct mvpp2_port *port)
static void mvpp2_phy_disconnect(struct mvpp2_port *port)
{
- phy_disconnect(port->phy_dev);
- port->phy_dev = NULL;
+ struct net_device *ndev = port->dev;
+
+ phy_disconnect(ndev->phydev);
}
static int mvpp2_open(struct net_device *dev)
@@ -5796,13 +5799,12 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- struct mvpp2_port *port = netdev_priv(dev);
int ret;
- if (!port->phy_dev)
+ if (!dev->phydev)
return -ENOTSUPP;
- ret = phy_mii_ioctl(port->phy_dev, ifr, cmd);
+ ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
if (!ret)
mvpp2_link_event(dev);
@@ -5811,28 +5813,6 @@ static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
/* Ethtool methods */
-/* Get settings (phy address, speed) for ethtools */
-static int mvpp2_ethtool_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
-{
- struct mvpp2_port *port = netdev_priv(dev);
-
- if (!port->phy_dev)
- return -ENODEV;
- return phy_ethtool_gset(port->phy_dev, cmd);
-}
-
-/* Set settings (phy address, speed) for ethtools */
-static int mvpp2_ethtool_set_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
-{
- struct mvpp2_port *port = netdev_priv(dev);
-
- if (!port->phy_dev)
- return -ENODEV;
- return phy_ethtool_sset(port->phy_dev, cmd);
-}
-
/* Set interrupt coalescing for ethtools */
static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *c)
@@ -5967,13 +5947,13 @@ static const struct net_device_ops mvpp2_netdev_ops = {
static const struct ethtool_ops mvpp2_eth_tool_ops = {
.get_link = ethtool_op_get_link,
- .get_settings = mvpp2_ethtool_get_settings,
- .set_settings = mvpp2_ethtool_set_settings,
.set_coalesce = mvpp2_ethtool_set_coalesce,
.get_coalesce = mvpp2_ethtool_get_coalesce,
.get_drvinfo = mvpp2_ethtool_get_drvinfo,
.get_ringparam = mvpp2_ethtool_get_ringparam,
.set_ringparam = mvpp2_ethtool_set_ringparam,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
/* Driver initialization */
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index c984462fad2a..682797ea5e11 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -133,6 +133,8 @@ static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
static void mtk_phy_link_adjust(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
+ u16 lcl_adv = 0, rmt_adv = 0;
+ u8 flowctrl;
u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG |
MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN |
MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
@@ -150,11 +152,30 @@ static void mtk_phy_link_adjust(struct net_device *dev)
if (mac->phy_dev->link)
mcr |= MAC_MCR_FORCE_LINK;
- if (mac->phy_dev->duplex)
+ if (mac->phy_dev->duplex) {
mcr |= MAC_MCR_FORCE_DPX;
- if (mac->phy_dev->pause)
- mcr |= MAC_MCR_FORCE_RX_FC | MAC_MCR_FORCE_TX_FC;
+ if (mac->phy_dev->pause)
+ rmt_adv = LPA_PAUSE_CAP;
+ if (mac->phy_dev->asym_pause)
+ rmt_adv |= LPA_PAUSE_ASYM;
+
+ if (mac->phy_dev->advertising & ADVERTISED_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_CAP;
+ if (mac->phy_dev->advertising & ADVERTISED_Asym_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_ASYM;
+
+ flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+
+ if (flowctrl & FLOW_CTRL_TX)
+ mcr |= MAC_MCR_FORCE_TX_FC;
+ if (flowctrl & FLOW_CTRL_RX)
+ mcr |= MAC_MCR_FORCE_RX_FC;
+
+ netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n",
+ flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
+ flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
+ }
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
@@ -208,10 +229,16 @@ static int mtk_phy_connect(struct mtk_mac *mac)
u32 val, ge_mode;
np = of_parse_phandle(mac->of_node, "phy-handle", 0);
+ if (!np && of_phy_is_fixed_link(mac->of_node))
+ if (!of_phy_register_fixed_link(mac->of_node))
+ np = of_node_get(mac->of_node);
if (!np)
return -ENODEV;
switch (of_get_phy_mode(np)) {
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII:
ge_mode = 0;
break;
@@ -236,7 +263,8 @@ static int mtk_phy_connect(struct mtk_mac *mac)
mac->phy_dev->autoneg = AUTONEG_ENABLE;
mac->phy_dev->speed = 0;
mac->phy_dev->duplex = 0;
- mac->phy_dev->supported &= PHY_BASIC_FEATURES;
+ mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause;
mac->phy_dev->advertising = mac->phy_dev->supported |
ADVERTISED_Autoneg;
phy_start_aneg(mac->phy_dev);
@@ -280,7 +308,7 @@ static int mtk_mdio_init(struct mtk_eth *eth)
return 0;
err_free_bus:
- kfree(eth->mii_bus);
+ mdiobus_free(eth->mii_bus);
err_put_node:
of_node_put(mii_np);
@@ -295,27 +323,29 @@ static void mtk_mdio_cleanup(struct mtk_eth *eth)
mdiobus_unregister(eth->mii_bus);
of_node_put(eth->mii_bus->dev.of_node);
- kfree(eth->mii_bus);
+ mdiobus_free(eth->mii_bus);
}
static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
{
+ unsigned long flags;
u32 val;
+ spin_lock_irqsave(&eth->irq_lock, flags);
val = mtk_r32(eth, MTK_QDMA_INT_MASK);
mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK);
- /* flush write */
- mtk_r32(eth, MTK_QDMA_INT_MASK);
+ spin_unlock_irqrestore(&eth->irq_lock, flags);
}
static inline void mtk_irq_enable(struct mtk_eth *eth, u32 mask)
{
+ unsigned long flags;
u32 val;
+ spin_lock_irqsave(&eth->irq_lock, flags);
val = mtk_r32(eth, MTK_QDMA_INT_MASK);
mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK);
- /* flush write */
- mtk_r32(eth, MTK_QDMA_INT_MASK);
+ spin_unlock_irqrestore(&eth->irq_lock, flags);
}
static int mtk_set_mac_address(struct net_device *dev, void *p)
@@ -453,20 +483,23 @@ static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
/* the qdma core needs scratch memory to be setup */
static int mtk_init_fq_dma(struct mtk_eth *eth)
{
- dma_addr_t phy_ring_head, phy_ring_tail;
+ dma_addr_t phy_ring_tail;
int cnt = MTK_DMA_SIZE;
dma_addr_t dma_addr;
int i;
eth->scratch_ring = dma_alloc_coherent(eth->dev,
cnt * sizeof(struct mtk_tx_dma),
- &phy_ring_head,
+ &eth->phy_scratch_ring,
GFP_ATOMIC | __GFP_ZERO);
if (unlikely(!eth->scratch_ring))
return -ENOMEM;
eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
GFP_KERNEL);
+ if (unlikely(!eth->scratch_head))
+ return -ENOMEM;
+
dma_addr = dma_map_single(eth->dev,
eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
DMA_FROM_DEVICE);
@@ -474,19 +507,19 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
return -ENOMEM;
memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
- phy_ring_tail = phy_ring_head +
+ phy_ring_tail = eth->phy_scratch_ring +
(sizeof(struct mtk_tx_dma) * (cnt - 1));
for (i = 0; i < cnt; i++) {
eth->scratch_ring[i].txd1 =
(dma_addr + (i * MTK_QDMA_PAGE_SIZE));
if (i < cnt - 1)
- eth->scratch_ring[i].txd2 = (phy_ring_head +
+ eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
((i + 1) * sizeof(struct mtk_tx_dma)));
eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
}
- mtk_w32(eth, phy_ring_head, MTK_QDMA_FQ_HEAD);
+ mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
@@ -643,7 +676,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
err_dma:
do {
- tx_buf = mtk_desc_to_tx_buf(ring, txd);
+ tx_buf = mtk_desc_to_tx_buf(ring, itxd);
/* unmap dma */
mtk_tx_unmap(&dev->dev, tx_buf);
@@ -673,6 +706,20 @@ static inline int mtk_cal_txd_req(struct sk_buff *skb)
return nfrags;
}
+static int mtk_queue_stopped(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
+ if (netif_queue_stopped(eth->netdev[i]))
+ return 1;
+ }
+
+ return 0;
+}
+
static void mtk_wake_queue(struct mtk_eth *eth)
{
int i;
@@ -738,12 +785,9 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
goto drop;
- if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) {
+ if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
mtk_stop_queue(eth);
- if (unlikely(atomic_read(&ring->free_count) >
- ring->thresh))
- mtk_wake_queue(eth);
- }
+
spin_unlock_irqrestore(&eth->page_lock, flags);
return NETDEV_TX_OK;
@@ -756,7 +800,7 @@ drop:
}
static int mtk_poll_rx(struct napi_struct *napi, int budget,
- struct mtk_eth *eth, u32 rx_intr)
+ struct mtk_eth *eth)
{
struct mtk_rx_ring *ring = &eth->rx_ring;
int idx = ring->calc_idx;
@@ -798,6 +842,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
skb_free_frag(new_data);
+ netdev->stats.rx_dropped++;
goto release_desc;
}
@@ -805,6 +850,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
skb = build_skb(data, ring->frag_size);
if (unlikely(!skb)) {
put_page(virt_to_head_page(new_data));
+ netdev->stats.rx_dropped++;
goto release_desc;
}
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
@@ -842,22 +888,22 @@ release_desc:
}
if (done < budget)
- mtk_w32(eth, rx_intr, MTK_QMTK_INT_STATUS);
+ mtk_w32(eth, MTK_RX_DONE_INT, MTK_QMTK_INT_STATUS);
return done;
}
-static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
+static int mtk_poll_tx(struct mtk_eth *eth, int budget)
{
struct mtk_tx_ring *ring = &eth->tx_ring;
struct mtk_tx_dma *desc;
struct sk_buff *skb;
struct mtk_tx_buf *tx_buf;
- int total = 0, done[MTK_MAX_DEVS];
+ unsigned int done[MTK_MAX_DEVS];
unsigned int bytes[MTK_MAX_DEVS];
u32 cpu, dma;
static int condition;
- int i;
+ int total = 0, i;
memset(done, 0, sizeof(done));
memset(bytes, 0, sizeof(bytes));
@@ -893,7 +939,6 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
}
mtk_tx_unmap(eth->dev, tx_buf);
- ring->last_free->txd2 = next_cpu;
ring->last_free = desc;
atomic_inc(&ring->free_count);
@@ -909,64 +954,82 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
total += done[i];
}
- /* read hw index again make sure no new tx packet */
- if (cpu != dma || cpu != mtk_r32(eth, MTK_QTX_DRX_PTR))
- *tx_again = true;
- else
- mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
-
- if (!total)
- return 0;
-
- if (atomic_read(&ring->free_count) > ring->thresh)
+ if (mtk_queue_stopped(eth) &&
+ (atomic_read(&ring->free_count) > ring->thresh))
mtk_wake_queue(eth);
return total;
}
-static int mtk_poll(struct napi_struct *napi, int budget)
+static void mtk_handle_status_irq(struct mtk_eth *eth)
{
- struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
- u32 status, status2, mask, tx_intr, rx_intr, status_intr;
- int tx_done, rx_done;
- bool tx_again = false;
+ u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
- status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
- status2 = mtk_r32(eth, MTK_INT_STATUS2);
- tx_intr = MTK_TX_DONE_INT;
- rx_intr = MTK_RX_DONE_INT;
- status_intr = (MTK_GDM1_AF | MTK_GDM2_AF);
- tx_done = 0;
- rx_done = 0;
- tx_again = 0;
+ if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
+ mtk_stats_update(eth);
+ mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
+ MTK_INT_STATUS2);
+ }
+}
- if (status & tx_intr)
- tx_done = mtk_poll_tx(eth, budget, &tx_again);
+static int mtk_napi_tx(struct napi_struct *napi, int budget)
+{
+ struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
+ u32 status, mask;
+ int tx_done = 0;
- if (status & rx_intr)
- rx_done = mtk_poll_rx(napi, budget, eth, rx_intr);
+ mtk_handle_status_irq(eth);
+ mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
+ tx_done = mtk_poll_tx(eth, budget);
- if (unlikely(status2 & status_intr)) {
- mtk_stats_update(eth);
- mtk_w32(eth, status_intr, MTK_INT_STATUS2);
+ if (unlikely(netif_msg_intr(eth))) {
+ status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
+ mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
+ dev_info(eth->dev,
+ "done tx %d, intr 0x%08x/0x%x\n",
+ tx_done, status, mask);
}
+ if (tx_done == budget)
+ return budget;
+
+ status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
+ if (status & MTK_TX_DONE_INT)
+ return budget;
+
+ napi_complete(napi);
+ mtk_irq_enable(eth, MTK_TX_DONE_INT);
+
+ return tx_done;
+}
+
+static int mtk_napi_rx(struct napi_struct *napi, int budget)
+{
+ struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
+ u32 status, mask;
+ int rx_done = 0;
+
+ mtk_handle_status_irq(eth);
+ mtk_w32(eth, MTK_RX_DONE_INT, MTK_QMTK_INT_STATUS);
+ rx_done = mtk_poll_rx(napi, budget, eth);
+
if (unlikely(netif_msg_intr(eth))) {
+ status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
- netdev_info(eth->netdev[0],
- "done tx %d, rx %d, intr 0x%08x/0x%x\n",
- tx_done, rx_done, status, mask);
+ dev_info(eth->dev,
+ "done rx %d, intr 0x%08x/0x%x\n",
+ rx_done, status, mask);
}
- if (tx_again || rx_done == budget)
+ if (rx_done == budget)
return budget;
status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
- if (status & (tx_intr | rx_intr))
+ if (status & MTK_RX_DONE_INT)
return budget;
napi_complete(napi);
- mtk_irq_enable(eth, tx_intr | rx_intr);
+ mtk_irq_enable(eth, MTK_RX_DONE_INT);
return rx_done;
}
@@ -999,9 +1062,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
ring->next_free = &ring->dma[0];
- ring->last_free = &ring->dma[MTK_DMA_SIZE - 2];
- ring->thresh = max((unsigned long)MTK_DMA_SIZE >> 2,
- MAX_SKB_FRAGS);
+ ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
+ ring->thresh = MAX_SKB_FRAGS;
/* make sure that all changes to the dma ring are flushed before we
* continue
@@ -1179,6 +1241,14 @@ static void mtk_dma_free(struct mtk_eth *eth)
for (i = 0; i < MTK_MAC_COUNT; i++)
if (eth->netdev[i])
netdev_reset_queue(eth->netdev[i]);
+ if (eth->scratch_ring) {
+ dma_free_coherent(eth->dev,
+ MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
+ eth->scratch_ring,
+ eth->phy_scratch_ring);
+ eth->scratch_ring = NULL;
+ eth->phy_scratch_ring = 0;
+ }
mtk_tx_clean(eth);
mtk_rx_clean(eth);
kfree(eth->scratch_head);
@@ -1195,22 +1265,26 @@ static void mtk_tx_timeout(struct net_device *dev)
schedule_work(&eth->pending_work);
}
-static irqreturn_t mtk_handle_irq(int irq, void *_eth)
+static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
{
struct mtk_eth *eth = _eth;
- u32 status;
- status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
- if (unlikely(!status))
- return IRQ_NONE;
+ if (likely(napi_schedule_prep(&eth->rx_napi))) {
+ __napi_schedule(&eth->rx_napi);
+ mtk_irq_disable(eth, MTK_RX_DONE_INT);
+ }
- if (likely(status & (MTK_RX_DONE_INT | MTK_TX_DONE_INT))) {
- if (likely(napi_schedule_prep(&eth->rx_napi)))
- __napi_schedule(&eth->rx_napi);
- } else {
- mtk_w32(eth, status, MTK_QMTK_INT_STATUS);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
+{
+ struct mtk_eth *eth = _eth;
+
+ if (likely(napi_schedule_prep(&eth->tx_napi))) {
+ __napi_schedule(&eth->tx_napi);
+ mtk_irq_disable(eth, MTK_TX_DONE_INT);
}
- mtk_irq_disable(eth, (MTK_RX_DONE_INT | MTK_TX_DONE_INT));
return IRQ_HANDLED;
}
@@ -1223,7 +1297,7 @@ static void mtk_poll_controller(struct net_device *dev)
u32 int_mask = MTK_TX_DONE_INT | MTK_RX_DONE_INT;
mtk_irq_disable(eth, int_mask);
- mtk_handle_irq(dev->irq, dev);
+ mtk_handle_irq_rx(eth->irq[2], dev);
mtk_irq_enable(eth, int_mask);
}
#endif
@@ -1241,7 +1315,7 @@ static int mtk_start_dma(struct mtk_eth *eth)
mtk_w32(eth,
MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN |
MTK_RX_2B_OFFSET | MTK_DMA_SIZE_16DWORDS |
- MTK_RX_BT_32DWORDS,
+ MTK_RX_BT_32DWORDS | MTK_NDP_CO_PRO,
MTK_QDMA_GLO_CFG);
return 0;
@@ -1259,6 +1333,7 @@ static int mtk_open(struct net_device *dev)
if (err)
return err;
+ napi_enable(&eth->tx_napi);
napi_enable(&eth->rx_napi);
mtk_irq_enable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
}
@@ -1307,6 +1382,7 @@ static int mtk_stop(struct net_device *dev)
return 0;
mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
+ napi_disable(&eth->tx_napi);
napi_disable(&eth->rx_napi);
mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
@@ -1344,7 +1420,11 @@ static int __init mtk_hw_init(struct mtk_eth *eth)
/* Enable RX VLan Offloading */
mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
- err = devm_request_irq(eth->dev, eth->irq, mtk_handle_irq, 0,
+ err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
+ dev_name(eth->dev), eth);
+ if (err)
+ return err;
+ err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
dev_name(eth->dev), eth);
if (err)
return err;
@@ -1355,12 +1435,16 @@ static int __init mtk_hw_init(struct mtk_eth *eth)
/* disable delay and normal interrupt */
mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
- mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
+ mtk_irq_disable(eth, ~0);
mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
mtk_w32(eth, 0, MTK_RST_GL);
/* FE int grouping */
- mtk_w32(eth, 0, MTK_FE_INT_GRP);
+ mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
+ mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
+ mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
+ mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
+ mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
for (i = 0; i < 2; i++) {
u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
@@ -1408,7 +1492,8 @@ static void mtk_uninit(struct net_device *dev)
phy_disconnect(mac->phy_dev);
mtk_mdio_cleanup(eth);
mtk_irq_disable(eth, ~0);
- free_irq(dev->irq, dev);
+ free_irq(eth->irq[1], dev);
+ free_irq(eth->irq[2], dev);
}
static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -1669,7 +1754,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
SET_NETDEV_DEV(eth->netdev[id], eth->dev);
- eth->netdev[id]->watchdog_timeo = HZ;
+ eth->netdev[id]->watchdog_timeo = 5 * HZ;
eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
eth->netdev[id]->base_addr = (unsigned long)eth->base;
eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
@@ -1682,10 +1767,10 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
dev_err(eth->dev, "error bringing up device\n");
goto free_netdev;
}
- eth->netdev[id]->irq = eth->irq;
+ eth->netdev[id]->irq = eth->irq[0];
netif_info(eth, probe, eth->netdev[id],
"mediatek frame engine at 0x%08lx, irq %d\n",
- eth->netdev[id]->base_addr, eth->netdev[id]->irq);
+ eth->netdev[id]->base_addr, eth->irq[0]);
return 0;
@@ -1702,6 +1787,7 @@ static int mtk_probe(struct platform_device *pdev)
struct mtk_soc_data *soc;
struct mtk_eth *eth;
int err;
+ int i;
match = of_match_device(of_mtk_match, &pdev->dev);
soc = (struct mtk_soc_data *)match->data;
@@ -1715,6 +1801,7 @@ static int mtk_probe(struct platform_device *pdev)
return PTR_ERR(eth->base);
spin_lock_init(&eth->page_lock);
+ spin_lock_init(&eth->irq_lock);
eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"mediatek,ethsys");
@@ -1736,10 +1823,12 @@ static int mtk_probe(struct platform_device *pdev)
return PTR_ERR(eth->rstc);
}
- eth->irq = platform_get_irq(pdev, 0);
- if (eth->irq < 0) {
- dev_err(&pdev->dev, "no IRQ resource found\n");
- return -ENXIO;
+ for (i = 0; i < 3; i++) {
+ eth->irq[i] = platform_get_irq(pdev, i);
+ if (eth->irq[i] < 0) {
+ dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
+ return -ENXIO;
+ }
}
eth->clk_ethif = devm_clk_get(&pdev->dev, "ethif");
@@ -1780,7 +1869,9 @@ static int mtk_probe(struct platform_device *pdev)
* for NAPI to work
*/
init_dummy_netdev(&eth->dummy_dev);
- netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_poll,
+ netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
+ MTK_NAPI_WEIGHT);
+ netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx,
MTK_NAPI_WEIGHT);
platform_set_drvdata(pdev, eth);
@@ -1801,6 +1892,7 @@ static int mtk_remove(struct platform_device *pdev)
clk_disable_unprepare(eth->clk_gp1);
clk_disable_unprepare(eth->clk_gp2);
+ netif_napi_del(&eth->tx_napi);
netif_napi_del(&eth->rx_napi);
mtk_cleanup(eth);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index eed626d56ea4..f82e3acb947b 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -68,6 +68,10 @@
/* Unicast Filter MAC Address Register - High */
#define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000))
+/* PDMA Interrupt grouping registers */
+#define MTK_PDMA_INT_GRP1 0xa50
+#define MTK_PDMA_INT_GRP2 0xa54
+
/* QDMA TX Queue Configuration Registers */
#define MTK_QTX_CFG(x) (0x1800 + (x * 0x10))
#define QDMA_RES_THRES 4
@@ -91,6 +95,7 @@
#define MTK_QDMA_GLO_CFG 0x1A04
#define MTK_RX_2B_OFFSET BIT(31)
#define MTK_RX_BT_32DWORDS (3 << 11)
+#define MTK_NDP_CO_PRO BIT(10)
#define MTK_TX_WB_DDONE BIT(6)
#define MTK_DMA_SIZE_16DWORDS (2 << 4)
#define MTK_RX_DMA_BUSY BIT(3)
@@ -124,6 +129,11 @@
#define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
+/* QDMA Interrupt grouping registers */
+#define MTK_QDMA_INT_GRP1 0x1a20
+#define MTK_QDMA_INT_GRP2 0x1a24
+#define MTK_RLS_DONE_INT BIT(0)
+
/* QDMA Interrupt Status Register */
#define MTK_QDMA_INT_MASK 0x1A1C
@@ -355,8 +365,10 @@ struct mtk_rx_ring {
* @dma_refcnt: track how many netdevs are using the DMA engine
* @tx_ring: Pointer to the memore holding info about the TX ring
* @rx_ring: Pointer to the memore holding info about the RX ring
- * @rx_napi: The NAPI struct
+ * @tx_napi: The TX NAPI struct
+ * @rx_napi: The RX NAPI struct
* @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
+ * @phy_scratch_ring: physical address of scratch_ring
* @scratch_head: The scratch memory that scratch_ring points to.
* @clk_ethif: The ethif clock
* @clk_esw: The switch clock
@@ -371,10 +383,11 @@ struct mtk_eth {
void __iomem *base;
struct reset_control *rstc;
spinlock_t page_lock;
+ spinlock_t irq_lock;
struct net_device dummy_dev;
struct net_device *netdev[MTK_MAX_DEVS];
struct mtk_mac *mac[MTK_MAX_DEVS];
- int irq;
+ int irq[3];
u32 msg_enable;
unsigned long sysclk;
struct regmap *ethsys;
@@ -382,8 +395,10 @@ struct mtk_eth {
atomic_t dma_refcnt;
struct mtk_tx_ring tx_ring;
struct mtk_rx_ring rx_ring;
+ struct napi_struct tx_napi;
struct napi_struct rx_napi;
struct mtk_tx_dma *scratch_ring;
+ dma_addr_t phy_scratch_ring;
void *scratch_head;
struct clk *clk_ethif;
struct clk *clk_esw;
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index 9ca3734ebb6b..5098e7f21987 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -24,13 +24,6 @@ config MLX4_EN_DCB
If unsure, set to Y
-config MLX4_EN_VXLAN
- bool "VXLAN offloads Support"
- default y
- depends on MLX4_EN && VXLAN && !(MLX4_EN=y && VXLAN=m)
- ---help---
- Say Y here if you want to use VXLAN offloads in the driver.
-
config MLX4_CORE
tristate
depends on PCI
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index e94ca1c3fc7c..f04a423ff79d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2597,7 +2597,6 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
priv->cmd.free_head = 0;
sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
- spin_lock_init(&priv->cmd.context_lock);
for (priv->cmd.token_mask = 1;
priv->cmd.token_mask < priv->cmd.max_cmds;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index f01918c63f28..99c6bbdff501 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -37,6 +37,11 @@
#include "mlx4_en.h"
#include "fw_qos.h"
+enum {
+ MLX4_CEE_STATE_DOWN = 0,
+ MLX4_CEE_STATE_UP = 1,
+};
+
/* Definitions for QCN
*/
@@ -80,13 +85,202 @@ struct mlx4_congestion_control_mb_prio_802_1_qau_statistics {
__be32 reserved3[4];
};
+static u8 mlx4_en_dcbnl_getcap(struct net_device *dev, int capid, u8 *cap)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ switch (capid) {
+ case DCB_CAP_ATTR_PFC:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_DCBX:
+ *cap = priv->cee_params.dcbx_cap;
+ break;
+ case DCB_CAP_ATTR_PFC_TCS:
+ *cap = 1 << mlx4_max_tc(priv->mdev->dev);
+ break;
+ default:
+ *cap = false;
+ break;
+ }
+
+ return 0;
+}
+
+static u8 mlx4_en_dcbnl_getpfcstate(struct net_device *netdev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+
+ return priv->cee_params.dcb_cfg.pfc_state;
+}
+
+static void mlx4_en_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
+{
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+
+ priv->cee_params.dcb_cfg.pfc_state = state;
+}
+
+static void mlx4_en_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
+ u8 *setting)
+{
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+
+ *setting = priv->cee_params.dcb_cfg.tc_config[priority].dcb_pfc;
+}
+
+static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
+ u8 setting)
+{
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+
+ priv->cee_params.dcb_cfg.tc_config[priority].dcb_pfc = setting;
+ priv->cee_params.dcb_cfg.pfc_state = true;
+}
+
+static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
+{
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+
+ if (!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED))
+ return -EINVAL;
+
+ if (tcid == DCB_NUMTCS_ATTR_PFC)
+ *num = mlx4_max_tc(priv->mdev->dev);
+ else
+ *num = 0;
+
+ return 0;
+}
+
+static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_cee_config *dcb_cfg = &priv->cee_params.dcb_cfg;
+ int err = 0;
+
+ if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return -EINVAL;
+
+ if (dcb_cfg->pfc_state) {
+ int tc;
+
+ priv->prof->rx_pause = 0;
+ priv->prof->tx_pause = 0;
+ for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) {
+ u8 tc_mask = 1 << tc;
+
+ switch (dcb_cfg->tc_config[tc].dcb_pfc) {
+ case pfc_disabled:
+ priv->prof->tx_ppp &= ~tc_mask;
+ priv->prof->rx_ppp &= ~tc_mask;
+ break;
+ case pfc_enabled_full:
+ priv->prof->tx_ppp |= tc_mask;
+ priv->prof->rx_ppp |= tc_mask;
+ break;
+ case pfc_enabled_tx:
+ priv->prof->tx_ppp |= tc_mask;
+ priv->prof->rx_ppp &= ~tc_mask;
+ break;
+ case pfc_enabled_rx:
+ priv->prof->tx_ppp &= ~tc_mask;
+ priv->prof->rx_ppp |= tc_mask;
+ break;
+ default:
+ break;
+ }
+ }
+ en_dbg(DRV, priv, "Set pfc on\n");
+ } else {
+ priv->prof->rx_pause = 1;
+ priv->prof->tx_pause = 1;
+ en_dbg(DRV, priv, "Set pfc off\n");
+ }
+
+ err = mlx4_SET_PORT_general(mdev->dev, priv->port,
+ priv->rx_skb_size + ETH_FCS_LEN,
+ priv->prof->tx_pause,
+ priv->prof->tx_ppp,
+ priv->prof->rx_pause,
+ priv->prof->rx_ppp);
+ if (err)
+ en_err(priv, "Failed setting pause params\n");
+ return err;
+}
+
+static u8 mlx4_en_dcbnl_get_state(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ if (priv->flags & MLX4_EN_FLAG_DCB_ENABLED)
+ return MLX4_CEE_STATE_UP;
+
+ return MLX4_CEE_STATE_DOWN;
+}
+
+static u8 mlx4_en_dcbnl_set_state(struct net_device *dev, u8 state)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ int num_tcs = 0;
+
+ if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return 1;
+
+ if (!!(state) == !!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED))
+ return 0;
+
+ if (state) {
+ priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
+ num_tcs = IEEE_8021QAZ_MAX_TCS;
+ } else {
+ priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
+ }
+
+ return mlx4_en_setup_tc(dev, num_tcs);
+}
+
+/* On success returns a non-zero 802.1p user priority bitmap
+ * otherwise returns 0 as the invalid user priority bitmap to
+ * indicate an error.
+ */
+static int mlx4_en_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
+{
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+ struct dcb_app app = {
+ .selector = idtype,
+ .protocol = id,
+ };
+ if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return 0;
+
+ return dcb_getapp(netdev, &app);
+}
+
+static int mlx4_en_dcbnl_setapp(struct net_device *netdev, u8 idtype,
+ u16 id, u8 up)
+{
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+ struct dcb_app app;
+
+ if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return -EINVAL;
+
+ memset(&app, 0, sizeof(struct dcb_app));
+ app.selector = idtype;
+ app.protocol = id;
+ app.priority = up;
+
+ return dcb_setapp(netdev, &app);
+}
+
static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev,
struct ieee_ets *ets)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct ieee_ets *my_ets = &priv->ets;
- /* No IEEE PFC settings available */
if (!my_ets)
return -EINVAL;
@@ -237,18 +431,51 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev)
{
- return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ return priv->cee_params.dcbx_cap;
}
static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct ieee_ets ets = {0};
+ struct ieee_pfc pfc = {0};
+
+ if (mode == priv->cee_params.dcbx_cap)
+ return 0;
+
if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
- (mode & DCB_CAP_DCBX_VER_CEE) ||
- !(mode & DCB_CAP_DCBX_VER_IEEE) ||
+ ((mode & DCB_CAP_DCBX_VER_IEEE) &&
+ (mode & DCB_CAP_DCBX_VER_CEE)) ||
!(mode & DCB_CAP_DCBX_HOST))
- return 1;
+ goto err;
+
+ priv->cee_params.dcbx_cap = mode;
+
+ ets.ets_cap = IEEE_8021QAZ_MAX_TCS;
+ pfc.pfc_cap = IEEE_8021QAZ_MAX_TCS;
+
+ if (mode & DCB_CAP_DCBX_VER_IEEE) {
+ if (mlx4_en_dcbnl_ieee_setets(dev, &ets))
+ goto err;
+ if (mlx4_en_dcbnl_ieee_setpfc(dev, &pfc))
+ goto err;
+ } else if (mode & DCB_CAP_DCBX_VER_CEE) {
+ if (mlx4_en_dcbnl_set_all(dev))
+ goto err;
+ } else {
+ if (mlx4_en_dcbnl_ieee_setets(dev, &ets))
+ goto err;
+ if (mlx4_en_dcbnl_ieee_setpfc(dev, &pfc))
+ goto err;
+ if (mlx4_en_setup_tc(dev, 0))
+ goto err;
+ }
return 0;
+err:
+ return 1;
}
#define MLX4_RATELIMIT_UNITS_IN_KB 100000 /* rate-limit HW unit in Kbps */
@@ -463,24 +690,46 @@ static int mlx4_en_dcbnl_ieee_getqcnstats(struct net_device *dev,
}
const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = {
- .ieee_getets = mlx4_en_dcbnl_ieee_getets,
- .ieee_setets = mlx4_en_dcbnl_ieee_setets,
- .ieee_getmaxrate = mlx4_en_dcbnl_ieee_getmaxrate,
- .ieee_setmaxrate = mlx4_en_dcbnl_ieee_setmaxrate,
- .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc,
- .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc,
+ .ieee_getets = mlx4_en_dcbnl_ieee_getets,
+ .ieee_setets = mlx4_en_dcbnl_ieee_setets,
+ .ieee_getmaxrate = mlx4_en_dcbnl_ieee_getmaxrate,
+ .ieee_setmaxrate = mlx4_en_dcbnl_ieee_setmaxrate,
+ .ieee_getqcn = mlx4_en_dcbnl_ieee_getqcn,
+ .ieee_setqcn = mlx4_en_dcbnl_ieee_setqcn,
+ .ieee_getqcnstats = mlx4_en_dcbnl_ieee_getqcnstats,
+ .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc,
+ .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc,
+
+ .getstate = mlx4_en_dcbnl_get_state,
+ .setstate = mlx4_en_dcbnl_set_state,
+ .getpfccfg = mlx4_en_dcbnl_get_pfc_cfg,
+ .setpfccfg = mlx4_en_dcbnl_set_pfc_cfg,
+ .setall = mlx4_en_dcbnl_set_all,
+ .getcap = mlx4_en_dcbnl_getcap,
+ .getnumtcs = mlx4_en_dcbnl_getnumtcs,
+ .getpfcstate = mlx4_en_dcbnl_getpfcstate,
+ .setpfcstate = mlx4_en_dcbnl_setpfcstate,
+ .getapp = mlx4_en_dcbnl_getapp,
+ .setapp = mlx4_en_dcbnl_setapp,
.getdcbx = mlx4_en_dcbnl_getdcbx,
.setdcbx = mlx4_en_dcbnl_setdcbx,
- .ieee_getqcn = mlx4_en_dcbnl_ieee_getqcn,
- .ieee_setqcn = mlx4_en_dcbnl_ieee_setqcn,
- .ieee_getqcnstats = mlx4_en_dcbnl_ieee_getqcnstats,
};
const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops = {
.ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc,
.ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc,
+ .setstate = mlx4_en_dcbnl_set_state,
+ .getpfccfg = mlx4_en_dcbnl_get_pfc_cfg,
+ .setpfccfg = mlx4_en_dcbnl_set_pfc_cfg,
+ .setall = mlx4_en_dcbnl_set_all,
+ .getnumtcs = mlx4_en_dcbnl_getnumtcs,
+ .getpfcstate = mlx4_en_dcbnl_getpfcstate,
+ .setpfcstate = mlx4_en_dcbnl_setpfcstate,
+ .getapp = mlx4_en_dcbnl_getapp,
+ .setapp = mlx4_en_dcbnl_setapp,
+
.getdcbx = mlx4_en_dcbnl_getdcbx,
.setdcbx = mlx4_en_dcbnl_setdcbx,
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index fc95affaf76b..51a2e8252b82 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1107,7 +1107,7 @@ static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- return priv->rx_ring_num;
+ return rounddown_pow_of_two(priv->rx_ring_num);
}
static u32 mlx4_en_get_rxfh_key_size(struct net_device *netdev)
@@ -1141,19 +1141,17 @@ static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
u8 *hfunc)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_rss_map *rss_map = &priv->rss_map;
- int rss_rings;
- size_t n = priv->rx_ring_num;
+ u32 n = mlx4_en_get_rxfh_indir_size(dev);
+ u32 i, rss_rings;
int err = 0;
- rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num;
- rss_rings = 1 << ilog2(rss_rings);
+ rss_rings = priv->prof->rss_rings ?: n;
+ rss_rings = rounddown_pow_of_two(rss_rings);
- while (n--) {
+ for (i = 0; i < n; i++) {
if (!ring_index)
break;
- ring_index[n] = rss_map->qps[n % rss_rings].qpn -
- rss_map->base_qpn;
+ ring_index[i] = i % rss_rings;
}
if (key)
memcpy(key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE);
@@ -1166,6 +1164,7 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
const u8 *key, const u8 hfunc)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ u32 n = mlx4_en_get_rxfh_indir_size(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int port_up = 0;
int err = 0;
@@ -1175,18 +1174,18 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
/* Calculate RSS table size and make sure flows are spread evenly
* between rings
*/
- for (i = 0; i < priv->rx_ring_num; i++) {
+ for (i = 0; i < n; i++) {
if (!ring_index)
- continue;
+ break;
if (i > 0 && !ring_index[i] && !rss_rings)
rss_rings = i;
- if (ring_index[i] != (i % (rss_rings ?: priv->rx_ring_num)))
+ if (ring_index[i] != (i % (rss_rings ?: n)))
return -EINVAL;
}
if (!rss_rings)
- rss_rings = priv->rx_ring_num;
+ rss_rings = n;
/* RSS table size must be an order of 2 */
if (!is_power_of_2(rss_rings))
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 19ceced6736c..6083775dae16 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -67,6 +67,17 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up)
offset += priv->num_tx_rings_p_up;
}
+#ifdef CONFIG_MLX4_EN_DCB
+ if (!mlx4_is_slave(priv->mdev->dev)) {
+ if (up) {
+ priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
+ } else {
+ priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
+ priv->cee_params.dcb_cfg.pfc_state = false;
+ }
+ }
+#endif /* CONFIG_MLX4_EN_DCB */
+
return 0;
}
@@ -406,14 +417,18 @@ static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
mutex_lock(&mdev->state_lock);
if (mdev->device_up && priv->port_up) {
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
- if (err)
+ if (err) {
en_err(priv, "Failed configuring VLAN filter\n");
+ goto out;
+ }
}
- if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
- en_dbg(HW, priv, "failed adding vlan %d\n", vid);
- mutex_unlock(&mdev->state_lock);
+ err = mlx4_register_vlan(mdev->dev, priv->port, vid, &idx);
+ if (err)
+ en_dbg(HW, priv, "Failed adding vlan %d\n", vid);
- return 0;
+out:
+ mutex_unlock(&mdev->state_lock);
+ return err;
}
static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
@@ -421,7 +436,7 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
- int err;
+ int err = 0;
en_dbg(HW, priv, "Killing VID:%d\n", vid);
@@ -438,7 +453,7 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
}
mutex_unlock(&mdev->state_lock);
- return 0;
+ return err;
}
static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
@@ -1197,8 +1212,8 @@ static void mlx4_en_netpoll(struct net_device *dev)
struct mlx4_en_cq *cq;
int i;
- for (i = 0; i < priv->rx_ring_num; i++) {
- cq = priv->rx_cq[i];
+ for (i = 0; i < priv->tx_ring_num; i++) {
+ cq = priv->tx_cq[i];
napi_schedule(&cq->napi);
}
}
@@ -1692,10 +1707,9 @@ int mlx4_en_start_port(struct net_device *dev)
/* Schedule multicast task to populate multicast list */
queue_work(mdev->workqueue, &priv->rx_mode_task);
-#ifdef CONFIG_MLX4_EN_VXLAN
if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
- vxlan_get_rx_port(dev);
-#endif
+ udp_tunnel_get_rx_info(dev);
+
priv->port_up = true;
netif_tx_start_all_queues(dev);
netif_device_attach(dev);
@@ -2032,11 +2046,20 @@ err:
return -ENOMEM;
}
+static void mlx4_en_shutdown(struct net_device *dev)
+{
+ rtnl_lock();
+ netif_device_detach(dev);
+ mlx4_en_close(dev);
+ rtnl_unlock();
+}
void mlx4_en_destroy_netdev(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ bool shutdown = mdev->dev->persist->interface_state &
+ MLX4_INTERFACE_STATE_SHUTDOWN;
en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
@@ -2044,7 +2067,10 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
if (priv->registered) {
devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev,
priv->port));
- unregister_netdev(dev);
+ if (shutdown)
+ mlx4_en_shutdown(dev);
+ else
+ unregister_netdev(dev);
}
if (priv->allocated)
@@ -2069,7 +2095,8 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
kfree(priv->tx_ring);
kfree(priv->tx_cq);
- free_netdev(dev);
+ if (!shutdown)
+ free_netdev(dev);
}
static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
@@ -2342,7 +2369,6 @@ static int mlx4_en_get_phys_port_id(struct net_device *dev,
return 0;
}
-#ifdef CONFIG_MLX4_EN_VXLAN
static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
{
int ret;
@@ -2392,15 +2418,19 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
}
static void mlx4_en_add_vxlan_port(struct net_device *dev,
- sa_family_t sa_family, __be16 port)
+ struct udp_tunnel_info *ti)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ __be16 port = ti->port;
__be16 current_port;
- if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
+
+ if (ti->sa_family != AF_INET)
return;
- if (sa_family == AF_INET6)
+ if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
return;
current_port = priv->vxlan_port;
@@ -2415,15 +2445,19 @@ static void mlx4_en_add_vxlan_port(struct net_device *dev,
}
static void mlx4_en_del_vxlan_port(struct net_device *dev,
- sa_family_t sa_family, __be16 port)
+ struct udp_tunnel_info *ti)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ __be16 port = ti->port;
__be16 current_port;
- if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
+
+ if (ti->sa_family != AF_INET)
return;
- if (sa_family == AF_INET6)
+ if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
return;
current_port = priv->vxlan_port;
@@ -2447,13 +2481,17 @@ static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
* strip that feature if this is an IPv6 encapsulated frame.
*/
if (skb->encapsulation &&
- (skb->ip_summed == CHECKSUM_PARTIAL) &&
- (ip_hdr(skb)->version != 4))
- features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+ (skb->ip_summed == CHECKSUM_PARTIAL)) {
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ if (!priv->vxlan_port ||
+ (ip_hdr(skb)->version != 4) ||
+ (udp_hdr(skb)->dest != priv->vxlan_port))
+ features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+ }
return features;
}
-#endif
static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate)
{
@@ -2506,11 +2544,9 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
.ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
-#ifdef CONFIG_MLX4_EN_VXLAN
- .ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
- .ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
+ .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port,
+ .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
.ndo_features_check = mlx4_en_features_check,
-#endif
.ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
};
@@ -2544,11 +2580,9 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
.ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
-#ifdef CONFIG_MLX4_EN_VXLAN
- .ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
- .ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
+ .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port,
+ .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
.ndo_features_check = mlx4_en_features_check,
-#endif
.ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
};
@@ -2814,6 +2848,9 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
struct mlx4_en_priv *priv;
int i;
int err;
+#ifdef CONFIG_MLX4_EN_DCB
+ struct tc_configuration *tc;
+#endif
dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
MAX_TX_RINGS, MAX_RX_RINGS);
@@ -2839,10 +2876,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
-#ifdef CONFIG_MLX4_EN_VXLAN
INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads);
INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads);
-#endif
#ifdef CONFIG_RFS_ACCEL
INIT_LIST_HEAD(&priv->filters);
spin_lock_init(&priv->filters_lock);
@@ -2882,6 +2917,17 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->msg_enable = MLX4_EN_MSG_LEVEL;
#ifdef CONFIG_MLX4_EN_DCB
if (!mlx4_is_slave(priv->mdev->dev)) {
+ priv->cee_params.dcbx_cap = DCB_CAP_DCBX_VER_CEE |
+ DCB_CAP_DCBX_HOST |
+ DCB_CAP_DCBX_VER_IEEE;
+ priv->flags |= MLX4_EN_DCB_ENABLED;
+ priv->cee_params.dcb_cfg.pfc_state = false;
+
+ for (i = 0; i < MLX4_EN_NUM_UP; i++) {
+ tc = &priv->cee_params.dcb_cfg.tc_config[i];
+ tc->dcb_pfc = pfc_disabled;
+ }
+
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
} else {
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index e97094598b2d..f4497cf4d06d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -1128,6 +1128,7 @@ int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_c
port_cap->max_pkeys = 1 << (field & 0xf);
MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
port_cap->max_vl = field & 0xf;
+ port_cap->max_tc_eth = field >> 4;
MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET);
port_cap->log_max_macs = field & 0xf;
port_cap->log_max_vlans = field >> 4;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 7ea258af636a..cdbd76f10ced 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -53,6 +53,7 @@ struct mlx4_port_cap {
int ib_mtu;
int max_port_width;
int max_vl;
+ int max_tc_eth;
int max_gids;
int max_pkeys;
u64 def_mac;
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index dec77d6f0ac9..7ae1cdad9bf0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -147,7 +147,7 @@ int mlx4_do_bond(struct mlx4_dev *dev, bool enable)
if (enable) {
dev->flags |= MLX4_FLAG_BONDED;
} else {
- ret = mlx4_virt2phy_port_map(dev, 1, 2);
+ ret = mlx4_virt2phy_port_map(dev, 1, 2);
if (ret) {
mlx4_err(dev, "Fail to reset port map\n");
return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 12c77a70abdb..75dd2e3d3059 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -292,6 +292,7 @@ static int _mlx4_dev_port(struct mlx4_dev *dev, int port,
dev->caps.pkey_table_len[port] = port_cap->max_pkeys;
dev->caps.port_width_cap[port] = port_cap->max_port_width;
dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu;
+ dev->caps.max_tc_eth = port_cap->max_tc_eth;
dev->caps.def_mac[port] = port_cap->def_mac;
dev->caps.supported_type[port] = port_cap->supported_port_types;
dev->caps.suggested_type[port] = port_cap->suggested_type;
@@ -2599,7 +2600,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
err = mlx4_init_uar_table(dev);
if (err) {
mlx4_err(dev, "Failed to initialize user access region table, aborting\n");
- return err;
+ return err;
}
err = mlx4_uar_alloc(dev, &priv->driver_uar);
@@ -3222,6 +3223,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
INIT_LIST_HEAD(&priv->pgdir_list);
mutex_init(&priv->pgdir_mutex);
+ spin_lock_init(&priv->cmd.context_lock);
INIT_LIST_HEAD(&priv->bf_list);
mutex_init(&priv->bf_mutex);
@@ -4134,8 +4136,11 @@ static void mlx4_shutdown(struct pci_dev *pdev)
mlx4_info(persist->dev, "mlx4_shutdown was called\n");
mutex_lock(&persist->interface_state_mutex);
- if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
+ if (persist->interface_state & MLX4_INTERFACE_STATE_UP) {
+ /* Notify mlx4 clients that the kernel is being shut down */
+ persist->interface_state |= MLX4_INTERFACE_STATE_SHUTDOWN;
mlx4_unload_one(pdev);
+ }
mutex_unlock(&persist->interface_state_mutex);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index f2d0920018a5..94b891c118c1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -618,8 +618,8 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
err = mlx4_READ_ENTRY(dev,
entry->index,
mailbox);
- if (err)
- goto out_mailbox;
+ if (err)
+ goto out_mailbox;
members_count =
be32_to_cpu(mgm->members_count) &
0xffffff;
@@ -657,8 +657,8 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
err = mlx4_WRITE_ENTRY(dev,
entry->index,
mailbox);
- if (err)
- goto out_mailbox;
+ if (err)
+ goto out_mailbox;
}
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 467d47ed2c39..d39bf594abe4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -448,6 +448,27 @@ struct mlx4_en_frag_info {
#define MLX4_EN_TC_ETS 7
+enum dcb_pfc_type {
+ pfc_disabled = 0,
+ pfc_enabled_full,
+ pfc_enabled_tx,
+ pfc_enabled_rx
+};
+
+struct tc_configuration {
+ enum dcb_pfc_type dcb_pfc;
+};
+
+struct mlx4_en_cee_config {
+ bool pfc_state;
+ struct tc_configuration tc_config[MLX4_EN_NUM_UP];
+};
+
+struct mlx4_en_cee_params {
+ u8 dcbx_cap;
+ struct mlx4_en_cee_config dcb_cfg;
+};
+
#endif
struct ethtool_flow_id {
@@ -467,6 +488,9 @@ enum {
MLX4_EN_FLAG_RX_FILTER_NEEDED = (1 << 3),
MLX4_EN_FLAG_FORCE_PROMISC = (1 << 4),
MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP = (1 << 5),
+#ifdef CONFIG_MLX4_EN_DCB
+ MLX4_EN_FLAG_DCB_ENABLED = (1 << 6),
+#endif
};
#define PORT_BEACON_MAX_LIMIT (65535)
@@ -545,10 +569,8 @@ struct mlx4_en_priv {
struct work_struct linkstate_task;
struct delayed_work stats_task;
struct delayed_work service_task;
-#ifdef CONFIG_MLX4_EN_VXLAN
struct work_struct vxlan_add_task;
struct work_struct vxlan_del_task;
-#endif
struct mlx4_en_perf_stats pstats;
struct mlx4_en_pkt_stats pkstats;
struct mlx4_en_counter_stats pf_stats;
@@ -570,9 +592,11 @@ struct mlx4_en_priv {
u32 counter_index;
#ifdef CONFIG_MLX4_EN_DCB
+#define MLX4_EN_DCB_ENABLED 0x3
struct ieee_ets ets;
u16 maxrate[IEEE_8021QAZ_MAX_TCS];
enum dcbnl_cndd_states cndd_state[IEEE_8021QAZ_MAX_TCS];
+ struct mlx4_en_cee_params cee_params;
#endif
#ifdef CONFIG_RFS_ACCEL
spinlock_t filters_lock;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 93195191f45b..395b5463cfd9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -248,7 +248,7 @@ static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
offset, order);
return;
}
- __mlx4_free_mtt_range(dev, offset, order);
+ __mlx4_free_mtt_range(dev, offset, order);
}
void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 087b23b320cb..3d2095e5c61c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -52,6 +52,7 @@
#define MLX4_FLAG_V_IGNORE_FCS_MASK 0x2
#define MLX4_IGNORE_FCS_MASK 0x1
+#define MLNX4_TX_MAX_NUMBER 8
void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
{
@@ -2015,3 +2016,14 @@ out:
return ret;
}
EXPORT_SYMBOL(mlx4_get_module_info);
+
+int mlx4_max_tc(struct mlx4_dev *dev)
+{
+ u8 num_tc = dev->caps.max_tc_eth;
+
+ if (!num_tc)
+ num_tc = MLNX4_TX_MAX_NUMBER;
+
+ return num_tc;
+}
+EXPORT_SYMBOL(mlx4_max_tc);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index cd9b2b28df88..8b81114bdc72 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2372,16 +2372,15 @@ static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
__mlx4_mpt_release(dev, index);
break;
case RES_OP_MAP_ICM:
- index = get_param_l(&in_param);
- id = index & mpt_mask(dev);
- err = mr_res_start_move_to(dev, slave, id,
- RES_MPT_RESERVED, &mpt);
- if (err)
- return err;
-
- __mlx4_mpt_free_icm(dev, mpt->key);
- res_end_move(dev, slave, RES_MPT, id);
+ index = get_param_l(&in_param);
+ id = index & mpt_mask(dev);
+ err = mr_res_start_move_to(dev, slave, id,
+ RES_MPT_RESERVED, &mpt);
+ if (err)
return err;
+
+ __mlx4_mpt_free_icm(dev, mpt->key);
+ res_end_move(dev, slave, RES_MPT, id);
break;
default:
err = -EINVAL;
@@ -4253,9 +4252,8 @@ int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)) &&
!(dev->caps.flags2 &
MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) {
- mlx4_warn(dev,
- "Src check LB for slave %d isn't supported\n",
- slave);
+ mlx4_warn(dev, "Src check LB for slave %d isn't supported\n",
+ slave);
return -ENOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 1cf722eba607..aae46884bf93 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -4,6 +4,7 @@
config MLX5_CORE
tristate "Mellanox Technologies ConnectX-4 and Connect-IB core driver"
+ depends on MAY_USE_DEVLINK
depends on PCI
default n
---help---
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 9ea7b583096a..05cc1effc13c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -1,11 +1,13 @@
obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
- health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
- mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o fs_counters.o
+ health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
+ mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
+ fs_counters.o rl.o
-mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \
- en_main.o en_fs.o en_ethtool.o en_tx.o en_rx.o \
- en_txrx.o en_clock.o vxlan.o en_tc.o en_arfs.o
+mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o eswitch_offloads.o \
+ en_main.o en_common.o en_fs.o en_ethtool.o en_tx.o \
+ en_rx.o en_rx_am.o en_txrx.o en_clock.o vxlan.o \
+ en_tc.o en_arfs.o en_rep.o en_fs_ethtool.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index dcd2df6518de..0b4986268cc9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -545,6 +545,7 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER);
MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
+ MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
default: return "unknown command opcode";
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index e8a6c3325b39..1365cdc81838 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -44,6 +44,7 @@
#include <linux/mlx5/vport.h>
#include <linux/mlx5/transobj.h>
#include <linux/rhashtable.h>
+#include <net/switchdev.h>
#include "wq.h"
#include "mlx5_core.h"
#include "en_stats.h"
@@ -79,6 +80,7 @@
#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
+#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
@@ -88,6 +90,7 @@
#define MLX5E_LOG_INDIR_RQT_SIZE 0x7
#define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE)
#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1)
+#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
#define MLX5E_TX_CQ_POLL_BUDGET 128
#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
#define MLX5E_SQ_BF_BUDGET 16
@@ -143,11 +146,32 @@ struct mlx5e_umr_wqe {
struct mlx5_wqe_data_seg data;
};
+static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = {
+ "rx_cqe_moder",
+};
+
+enum mlx5e_priv_flag {
+ MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0),
+};
+
+#define MLX5E_SET_PRIV_FLAG(priv, pflag, enable) \
+ do { \
+ if (enable) \
+ priv->pflags |= pflag; \
+ else \
+ priv->pflags &= ~pflag; \
+ } while (0)
+
#ifdef CONFIG_MLX5_CORE_EN_DCB
#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
#define MLX5E_MIN_BW_ALLOC 1 /* Min percentage of BW allocation */
#endif
+struct mlx5e_cq_moder {
+ u16 usec;
+ u16 pkts;
+};
+
struct mlx5e_params {
u8 log_sq_size;
u8 rq_wq_type;
@@ -156,12 +180,11 @@ struct mlx5e_params {
u8 log_rq_size;
u16 num_channels;
u8 num_tc;
+ u8 rx_cq_period_mode;
bool rx_cqe_compress_admin;
bool rx_cqe_compress;
- u16 rx_cq_moderation_usec;
- u16 rx_cq_moderation_pkts;
- u16 tx_cq_moderation_usec;
- u16 tx_cq_moderation_pkts;
+ struct mlx5e_cq_moder rx_cq_moderation;
+ struct mlx5e_cq_moder tx_cq_moderation;
u16 min_rx_wqes;
bool lro_en;
u32 lro_wqe_sz;
@@ -173,6 +196,7 @@ struct mlx5e_params {
#ifdef CONFIG_MLX5_CORE_EN_DCB
struct ieee_ets ets;
#endif
+ bool rx_am_enabled;
};
struct mlx5e_tstamp {
@@ -191,6 +215,7 @@ struct mlx5e_tstamp {
enum {
MLX5E_RQ_STATE_POST_WQES_ENABLE,
MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
+ MLX5E_RQ_STATE_AM,
};
struct mlx5e_cq {
@@ -198,6 +223,7 @@ struct mlx5e_cq {
struct mlx5_cqwq wq;
/* data path - accessed per napi poll */
+ u16 event_ctr;
struct napi_struct *napi;
struct mlx5_core_cq mcq;
struct mlx5e_channel *channel;
@@ -225,6 +251,30 @@ struct mlx5e_dma_info {
dma_addr_t addr;
};
+struct mlx5e_rx_am_stats {
+ int ppms; /* packets per msec */
+ int epms; /* events per msec */
+};
+
+struct mlx5e_rx_am_sample {
+ ktime_t time;
+ unsigned int pkt_ctr;
+ u16 event_ctr;
+};
+
+struct mlx5e_rx_am { /* Adaptive Moderation */
+ u8 state;
+ struct mlx5e_rx_am_stats prev_stats;
+ struct mlx5e_rx_am_sample start_sample;
+ struct work_struct work;
+ u8 profile_ix;
+ u8 mode;
+ u8 tune_state;
+ u8 steps_right;
+ u8 steps_left;
+ u8 tired;
+};
+
struct mlx5e_rq {
/* data path */
struct mlx5_wq_ll wq;
@@ -245,6 +295,8 @@ struct mlx5e_rq {
unsigned long state;
int ix;
+ struct mlx5e_rx_am am; /* Adaptive Moderation */
+
/* control */
struct mlx5_wq_ctrl wq_ctrl;
u8 wq_type;
@@ -354,6 +406,7 @@ struct mlx5e_sq {
struct mlx5e_channel *channel;
int tc;
struct mlx5e_ico_wqe_info *ico_wqe_info;
+ u32 rate_limit;
} ____cacheline_aligned_in_smp;
static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n)
@@ -401,7 +454,7 @@ enum mlx5e_traffic_types {
};
enum {
- MLX5E_STATE_ASYNC_EVENTS_ENABLE,
+ MLX5E_STATE_ASYNC_EVENTS_ENABLED,
MLX5E_STATE_OPENED,
MLX5E_STATE_DESTROYING,
};
@@ -491,8 +544,24 @@ enum {
MLX5E_ARFS_FT_LEVEL
};
+struct mlx5e_ethtool_table {
+ struct mlx5_flow_table *ft;
+ int num_rules;
+};
+
+#define ETHTOOL_NUM_L3_L4_FTS 7
+#define ETHTOOL_NUM_L2_FTS 4
+
+struct mlx5e_ethtool_steering {
+ struct mlx5e_ethtool_table l3_l4_ft[ETHTOOL_NUM_L3_L4_FTS];
+ struct mlx5e_ethtool_table l2_ft[ETHTOOL_NUM_L2_FTS];
+ struct list_head rules;
+ int tot_num_rules;
+};
+
struct mlx5e_flow_steering {
struct mlx5_flow_namespace *ns;
+ struct mlx5e_ethtool_steering ethtool;
struct mlx5e_tc_table tc;
struct mlx5e_vlan_table vlan;
struct mlx5e_l2_table l2;
@@ -500,9 +569,15 @@ struct mlx5e_flow_steering {
struct mlx5e_arfs_tables arfs;
};
-struct mlx5e_direct_tir {
- u32 tirn;
+struct mlx5e_rqt {
u32 rqtn;
+ bool enabled;
+};
+
+struct mlx5e_tir {
+ u32 tirn;
+ struct mlx5e_rqt rqt;
+ struct list_head list;
};
enum {
@@ -510,6 +585,22 @@ enum {
MLX5E_NIC_PRIO
};
+struct mlx5e_profile {
+ void (*init)(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile, void *ppriv);
+ void (*cleanup)(struct mlx5e_priv *priv);
+ int (*init_rx)(struct mlx5e_priv *priv);
+ void (*cleanup_rx)(struct mlx5e_priv *priv);
+ int (*init_tx)(struct mlx5e_priv *priv);
+ void (*cleanup_tx)(struct mlx5e_priv *priv);
+ void (*enable)(struct mlx5e_priv *priv);
+ void (*disable)(struct mlx5e_priv *priv);
+ void (*update_stats)(struct mlx5e_priv *priv);
+ int (*max_nch)(struct mlx5_core_dev *mdev);
+ int max_tc;
+};
+
struct mlx5e_priv {
/* priv data path fields - start */
struct mlx5e_sq **txq_to_sq_map;
@@ -518,18 +609,15 @@ struct mlx5e_priv {
unsigned long state;
struct mutex state_lock; /* Protects Interface state */
- struct mlx5_uar cq_uar;
- u32 pdn;
- u32 tdn;
- struct mlx5_core_mkey mkey;
struct mlx5_core_mkey umr_mkey;
struct mlx5e_rq drop_rq;
struct mlx5e_channel **channel;
u32 tisn[MLX5E_MAX_NUM_TC];
- u32 indir_rqtn;
- u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
- struct mlx5e_direct_tir direct_tir[MLX5E_MAX_NUM_CHANNELS];
+ struct mlx5e_rqt indir_rqt;
+ struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
+ struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS];
+ u32 tx_rates[MLX5E_MAX_NUM_SQS];
struct mlx5e_flow_steering fs;
struct mlx5e_vxlan_db vxlan;
@@ -540,11 +628,14 @@ struct mlx5e_priv {
struct work_struct set_rx_mode_work;
struct delayed_work update_stats_work;
+ u32 pflags;
struct mlx5_core_dev *mdev;
struct net_device *netdev;
struct mlx5e_stats stats;
struct mlx5e_tstamp tstamp;
u16 q_counter;
+ const struct mlx5e_profile *profile;
+ void *ppriv;
};
enum mlx5e_link_mode {
@@ -562,6 +653,7 @@ enum mlx5e_link_mode {
MLX5E_10GBASE_ER = 14,
MLX5E_40GBASE_SR4 = 15,
MLX5E_40GBASE_LR4 = 16,
+ MLX5E_50GBASE_SR2 = 18,
MLX5E_100GBASE_CR4 = 20,
MLX5E_100GBASE_SR4 = 21,
MLX5E_100GBASE_KR4 = 22,
@@ -579,6 +671,9 @@ enum mlx5e_link_mode {
#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
+
+void mlx5e_build_ptys2ethtool_map(void);
+
void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw);
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback);
@@ -612,12 +707,26 @@ void mlx5e_free_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
struct mlx5e_mpw_info *wi);
struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
+void mlx5e_rx_am(struct mlx5e_rq *rq);
+void mlx5e_rx_am_work(struct work_struct *work);
+struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode);
+
void mlx5e_update_stats(struct mlx5e_priv *priv);
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
+int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
+ int location);
+int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv,
+ struct ethtool_rxnfc *info, u32 *rule_locs);
+int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
+ struct ethtool_rx_flow_spec *fs);
+int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv,
+ int location);
+void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv);
+void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv);
void mlx5e_set_rx_mode_work(struct work_struct *work);
void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp,
@@ -647,6 +756,9 @@ void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
int num_channels);
int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
+void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
+ u8 cq_period_mode);
+
static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz)
{
@@ -723,5 +835,39 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
#endif
u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev);
+int mlx5e_create_tir(struct mlx5_core_dev *mdev,
+ struct mlx5e_tir *tir, u32 *in, int inlen);
+void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
+ struct mlx5e_tir *tir);
+int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
+void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
+int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev);
+
+struct mlx5_eswitch_rep;
+int mlx5e_vport_rep_load(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep);
+void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep);
+int mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep);
+void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep);
+int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
+void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
+int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr);
+
+int mlx5e_create_direct_rqts(struct mlx5e_priv *priv);
+void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
+int mlx5e_create_direct_tirs(struct mlx5e_priv *priv);
+void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv);
+int mlx5e_create_tises(struct mlx5e_priv *priv);
+void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv);
+int mlx5e_close(struct net_device *netdev);
+int mlx5e_open(struct net_device *netdev);
+void mlx5e_update_stats_work(struct work_struct *work);
+void *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
+ const struct mlx5e_profile *profile, void *ppriv);
+void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv);
+struct rtnl_link_stats64 *
+mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
#endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 3515e78ba68f..a8cb38789774 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -93,14 +93,14 @@ static enum mlx5e_traffic_types arfs_get_tt(enum arfs_type type)
static int arfs_disable(struct mlx5e_priv *priv)
{
struct mlx5_flow_destination dest;
- u32 *tirn = priv->indir_tirn;
+ struct mlx5e_tir *tir = priv->indir_tir;
int err = 0;
int tt;
int i;
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
for (i = 0; i < ARFS_NUM_TYPES; i++) {
- dest.tir_num = tirn[i];
+ dest.tir_num = tir[i].tirn;
tt = arfs_get_tt(i);
/* Modify ttc rules destination to bypass the aRFS tables*/
err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
@@ -175,15 +175,12 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
{
struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type];
struct mlx5_flow_destination dest;
- u8 match_criteria_enable = 0;
- u32 *tirn = priv->indir_tirn;
- u32 *match_criteria;
- u32 *match_value;
+ struct mlx5e_tir *tir = priv->indir_tir;
+ struct mlx5_flow_spec *spec;
int err = 0;
- match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- if (!match_value || !match_criteria) {
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
err = -ENOMEM;
goto out;
@@ -192,24 +189,23 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
switch (type) {
case ARFS_IPV4_TCP:
- dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
+ dest.tir_num = tir[MLX5E_TT_IPV4_TCP].tirn;
break;
case ARFS_IPV4_UDP:
- dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
+ dest.tir_num = tir[MLX5E_TT_IPV4_UDP].tirn;
break;
case ARFS_IPV6_TCP:
- dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
+ dest.tir_num = tir[MLX5E_TT_IPV6_TCP].tirn;
break;
case ARFS_IPV6_UDP:
- dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
+ dest.tir_num = tir[MLX5E_TT_IPV6_UDP].tirn;
break;
default:
err = -EINVAL;
goto out;
}
- arfs_t->default_rule = mlx5_add_flow_rule(arfs_t->ft.t, match_criteria_enable,
- match_criteria, match_value,
+ arfs_t->default_rule = mlx5_add_flow_rule(arfs_t->ft.t, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG,
&dest);
@@ -220,8 +216,7 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
__func__, type);
}
out:
- kvfree(match_criteria);
- kvfree(match_value);
+ kvfree(spec);
return err;
}
@@ -475,23 +470,20 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv,
struct mlx5_flow_rule *rule = NULL;
struct mlx5_flow_destination dest;
struct arfs_table *arfs_table;
- u8 match_criteria_enable = 0;
+ struct mlx5_flow_spec *spec;
struct mlx5_flow_table *ft;
- u32 *match_criteria;
- u32 *match_value;
int err = 0;
- match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- if (!match_value || !match_criteria) {
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
err = -ENOMEM;
goto out;
}
- match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.ethertype);
- MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype,
ntohs(tuple->etype));
arfs_table = arfs_get_table(arfs, tuple->ip_proto, tuple->etype);
if (!arfs_table) {
@@ -501,59 +493,58 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv,
ft = arfs_table->ft.t;
if (tuple->ip_proto == IPPROTO_TCP) {
- MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.tcp_dport);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.tcp_sport);
- MLX5_SET(fte_match_param, match_value, outer_headers.tcp_dport,
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_dport,
ntohs(tuple->dst_port));
- MLX5_SET(fte_match_param, match_value, outer_headers.tcp_sport,
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_sport,
ntohs(tuple->src_port));
} else {
- MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.udp_dport);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.udp_sport);
- MLX5_SET(fte_match_param, match_value, outer_headers.udp_dport,
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport,
ntohs(tuple->dst_port));
- MLX5_SET(fte_match_param, match_value, outer_headers.udp_sport,
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_sport,
ntohs(tuple->src_port));
}
if (tuple->etype == htons(ETH_P_IP)) {
- memcpy(MLX5_ADDR_OF(fte_match_param, match_value,
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
&tuple->src_ipv4,
4);
- memcpy(MLX5_ADDR_OF(fte_match_param, match_value,
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
&tuple->dst_ipv4,
4);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
} else {
- memcpy(MLX5_ADDR_OF(fte_match_param, match_value,
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
&tuple->src_ipv6,
16);
- memcpy(MLX5_ADDR_OF(fte_match_param, match_value,
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
&tuple->dst_ipv6,
16);
- memset(MLX5_ADDR_OF(fte_match_param, match_criteria,
+ memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
0xff,
16);
- memset(MLX5_ADDR_OF(fte_match_param, match_criteria,
+ memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
0xff,
16);
}
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn;
- rule = mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria,
- match_value, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ rule = mlx5_add_flow_rule(ft, spec, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG,
&dest);
if (IS_ERR(rule)) {
@@ -563,8 +554,7 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv,
}
out:
- kvfree(match_criteria);
- kvfree(match_value);
+ kvfree(spec);
return err ? ERR_PTR(err) : rule;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
new file mode 100644
index 000000000000..673043ccd76c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "en.h"
+
+/* mlx5e global resources should be placed in this file.
+ * Global resources are common to all the netdevices crated on the same nic.
+ */
+
+int mlx5e_create_tir(struct mlx5_core_dev *mdev,
+ struct mlx5e_tir *tir, u32 *in, int inlen)
+{
+ int err;
+
+ err = mlx5_core_create_tir(mdev, in, inlen, &tir->tirn);
+ if (err)
+ return err;
+
+ list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list);
+
+ return 0;
+}
+
+void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
+ struct mlx5e_tir *tir)
+{
+ mlx5_core_destroy_tir(mdev, tir->tirn);
+ list_del(&tir->list);
+}
+
+static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
+ struct mlx5_core_mkey *mkey)
+{
+ struct mlx5_create_mkey_mbox_in *in;
+ int err;
+
+ in = mlx5_vzalloc(sizeof(*in));
+ if (!in)
+ return -ENOMEM;
+
+ in->seg.flags = MLX5_PERM_LOCAL_WRITE |
+ MLX5_PERM_LOCAL_READ |
+ MLX5_ACCESS_MODE_PA;
+ in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
+ in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
+
+ err = mlx5_core_create_mkey(mdev, mkey, in, sizeof(*in), NULL, NULL,
+ NULL);
+
+ kvfree(in);
+
+ return err;
+}
+
+int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
+{
+ struct mlx5e_resources *res = &mdev->mlx5e_res;
+ int err;
+
+ err = mlx5_alloc_map_uar(mdev, &res->cq_uar, false);
+ if (err) {
+ mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
+ return err;
+ }
+
+ err = mlx5_core_alloc_pd(mdev, &res->pdn);
+ if (err) {
+ mlx5_core_err(mdev, "alloc pd failed, %d\n", err);
+ goto err_unmap_free_uar;
+ }
+
+ err = mlx5_core_alloc_transport_domain(mdev, &res->td.tdn);
+ if (err) {
+ mlx5_core_err(mdev, "alloc td failed, %d\n", err);
+ goto err_dealloc_pd;
+ }
+
+ err = mlx5e_create_mkey(mdev, res->pdn, &res->mkey);
+ if (err) {
+ mlx5_core_err(mdev, "create mkey failed, %d\n", err);
+ goto err_dealloc_transport_domain;
+ }
+
+ INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
+
+ return 0;
+
+err_dealloc_transport_domain:
+ mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
+err_dealloc_pd:
+ mlx5_core_dealloc_pd(mdev, res->pdn);
+err_unmap_free_uar:
+ mlx5_unmap_free_uar(mdev, &res->cq_uar);
+
+ return err;
+}
+
+void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
+{
+ struct mlx5e_resources *res = &mdev->mlx5e_res;
+
+ mlx5_core_destroy_mkey(mdev, &res->mkey);
+ mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
+ mlx5_core_dealloc_pd(mdev, res->pdn);
+ mlx5_unmap_free_uar(mdev, &res->cq_uar);
+}
+
+int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev)
+{
+ struct mlx5e_tir *tir;
+ void *in;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
+
+ list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
+ err = mlx5_core_modify_tir(mdev, tir->tirn, in, inlen);
+ if (err)
+ return err;
+ }
+
+ kvfree(in);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index b2db180ae2a5..e6883132b555 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -191,7 +191,6 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
- enum mlx5_port_status ps;
u8 curr_pfc_en;
int ret;
@@ -200,14 +199,8 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
if (pfc->pfc_en == curr_pfc_en)
return 0;
- mlx5_query_port_admin_status(mdev, &ps);
- if (ps == MLX5_PORT_UP)
- mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
-
ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en);
-
- if (ps == MLX5_PORT_UP)
- mlx5_set_port_admin_status(mdev, MLX5_PORT_UP);
+ mlx5_toggle_port_link(mdev);
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index fc7dcc03b1de..4a3757e60441 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -48,123 +48,85 @@ static void mlx5e_get_drvinfo(struct net_device *dev,
sizeof(drvinfo->bus_info));
}
-static const struct {
- u32 supported;
- u32 advertised;
+struct ptys2ethtool_config {
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised);
u32 speed;
-} ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER] = {
- [MLX5E_1000BASE_CX_SGMII] = {
- .supported = SUPPORTED_1000baseKX_Full,
- .advertised = ADVERTISED_1000baseKX_Full,
- .speed = 1000,
- },
- [MLX5E_1000BASE_KX] = {
- .supported = SUPPORTED_1000baseKX_Full,
- .advertised = ADVERTISED_1000baseKX_Full,
- .speed = 1000,
- },
- [MLX5E_10GBASE_CX4] = {
- .supported = SUPPORTED_10000baseKX4_Full,
- .advertised = ADVERTISED_10000baseKX4_Full,
- .speed = 10000,
- },
- [MLX5E_10GBASE_KX4] = {
- .supported = SUPPORTED_10000baseKX4_Full,
- .advertised = ADVERTISED_10000baseKX4_Full,
- .speed = 10000,
- },
- [MLX5E_10GBASE_KR] = {
- .supported = SUPPORTED_10000baseKR_Full,
- .advertised = ADVERTISED_10000baseKR_Full,
- .speed = 10000,
- },
- [MLX5E_20GBASE_KR2] = {
- .supported = SUPPORTED_20000baseKR2_Full,
- .advertised = ADVERTISED_20000baseKR2_Full,
- .speed = 20000,
- },
- [MLX5E_40GBASE_CR4] = {
- .supported = SUPPORTED_40000baseCR4_Full,
- .advertised = ADVERTISED_40000baseCR4_Full,
- .speed = 40000,
- },
- [MLX5E_40GBASE_KR4] = {
- .supported = SUPPORTED_40000baseKR4_Full,
- .advertised = ADVERTISED_40000baseKR4_Full,
- .speed = 40000,
- },
- [MLX5E_56GBASE_R4] = {
- .supported = SUPPORTED_56000baseKR4_Full,
- .advertised = ADVERTISED_56000baseKR4_Full,
- .speed = 56000,
- },
- [MLX5E_10GBASE_CR] = {
- .supported = SUPPORTED_10000baseKR_Full,
- .advertised = ADVERTISED_10000baseKR_Full,
- .speed = 10000,
- },
- [MLX5E_10GBASE_SR] = {
- .supported = SUPPORTED_10000baseKR_Full,
- .advertised = ADVERTISED_10000baseKR_Full,
- .speed = 10000,
- },
- [MLX5E_10GBASE_ER] = {
- .supported = SUPPORTED_10000baseKR_Full,
- .advertised = ADVERTISED_10000baseKR_Full,
- .speed = 10000,
- },
- [MLX5E_40GBASE_SR4] = {
- .supported = SUPPORTED_40000baseSR4_Full,
- .advertised = ADVERTISED_40000baseSR4_Full,
- .speed = 40000,
- },
- [MLX5E_40GBASE_LR4] = {
- .supported = SUPPORTED_40000baseLR4_Full,
- .advertised = ADVERTISED_40000baseLR4_Full,
- .speed = 40000,
- },
- [MLX5E_100GBASE_CR4] = {
- .speed = 100000,
- },
- [MLX5E_100GBASE_SR4] = {
- .speed = 100000,
- },
- [MLX5E_100GBASE_KR4] = {
- .speed = 100000,
- },
- [MLX5E_100GBASE_LR4] = {
- .speed = 100000,
- },
- [MLX5E_100BASE_TX] = {
- .speed = 100,
- },
- [MLX5E_1000BASE_T] = {
- .supported = SUPPORTED_1000baseT_Full,
- .advertised = ADVERTISED_1000baseT_Full,
- .speed = 1000,
- },
- [MLX5E_10GBASE_T] = {
- .supported = SUPPORTED_10000baseT_Full,
- .advertised = ADVERTISED_10000baseT_Full,
- .speed = 1000,
- },
- [MLX5E_25GBASE_CR] = {
- .speed = 25000,
- },
- [MLX5E_25GBASE_KR] = {
- .speed = 25000,
- },
- [MLX5E_25GBASE_SR] = {
- .speed = 25000,
- },
- [MLX5E_50GBASE_CR2] = {
- .speed = 50000,
- },
- [MLX5E_50GBASE_KR2] = {
- .speed = 50000,
- },
};
+static struct ptys2ethtool_config ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER];
+
+#define MLX5_BUILD_PTYS2ETHTOOL_CONFIG(reg_, speed_, ...) \
+ ({ \
+ struct ptys2ethtool_config *cfg; \
+ const unsigned int modes[] = { __VA_ARGS__ }; \
+ unsigned int i; \
+ cfg = &ptys2ethtool_table[reg_]; \
+ cfg->speed = speed_; \
+ bitmap_zero(cfg->supported, \
+ __ETHTOOL_LINK_MODE_MASK_NBITS); \
+ bitmap_zero(cfg->advertised, \
+ __ETHTOOL_LINK_MODE_MASK_NBITS); \
+ for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) { \
+ __set_bit(modes[i], cfg->supported); \
+ __set_bit(modes[i], cfg->advertised); \
+ } \
+ })
+
+void mlx5e_build_ptys2ethtool_map(void)
+{
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_CX_SGMII, SPEED_1000,
+ ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_KX, SPEED_1000,
+ ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CX4, SPEED_10000,
+ ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KX4, SPEED_10000,
+ ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KR, SPEED_10000,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_20GBASE_KR2, SPEED_20000,
+ ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_CR4, SPEED_40000,
+ ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_KR4, SPEED_40000,
+ ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_56GBASE_R4, SPEED_56000,
+ ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CR, SPEED_10000,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_SR, SPEED_10000,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_ER, SPEED_10000,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_SR4, SPEED_40000,
+ ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_LR4, SPEED_40000,
+ ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_SR2, SPEED_50000,
+ ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_CR4, SPEED_100000,
+ ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_SR4, SPEED_100000,
+ ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_KR4, SPEED_100000,
+ ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_LR4, SPEED_100000,
+ ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_T, SPEED_10000,
+ ETHTOOL_LINK_MODE_10000baseT_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_CR, SPEED_25000,
+ ETHTOOL_LINK_MODE_25000baseCR_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_KR, SPEED_25000,
+ ETHTOOL_LINK_MODE_25000baseKR_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_SR, SPEED_25000,
+ ETHTOOL_LINK_MODE_25000baseSR_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_CR2, SPEED_50000,
+ ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_KR2, SPEED_50000,
+ ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT);
+}
+
static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
@@ -177,6 +139,18 @@ static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
return err ? 0 : pfc_en_tx | pfc_en_rx;
}
+static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 rx_pause;
+ u32 tx_pause;
+ int err;
+
+ err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
+
+ return err ? false : rx_pause | tx_pause;
+}
+
#define MLX5E_NUM_Q_CNTRS(priv) (NUM_Q_COUNTERS * (!!priv->q_counter))
#define MLX5E_NUM_RQ_STATS(priv) \
(NUM_RQ_STATS * priv->params.num_channels * \
@@ -184,7 +158,9 @@ static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
#define MLX5E_NUM_SQ_STATS(priv) \
(NUM_SQ_STATS * priv->params.num_channels * priv->params.num_tc * \
test_bit(MLX5E_STATE_OPENED, &priv->state))
-#define MLX5E_NUM_PFC_COUNTERS(priv) hweight8(mlx5e_query_pfc_combined(priv))
+#define MLX5E_NUM_PFC_COUNTERS(priv) \
+ ((mlx5e_query_global_pause_combined(priv) + hweight8(mlx5e_query_pfc_combined(priv))) * \
+ NUM_PPORT_PER_PRIO_PFC_COUNTERS)
static int mlx5e_get_sset_count(struct net_device *dev, int sset)
{
@@ -198,6 +174,8 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
MLX5E_NUM_RQ_STATS(priv) +
MLX5E_NUM_SQ_STATS(priv) +
MLX5E_NUM_PFC_COUNTERS(priv);
+ case ETH_SS_PRIV_FLAGS:
+ return ARRAY_SIZE(mlx5e_priv_flags);
/* fallthrough */
default:
return -EOPNOTSUPP;
@@ -211,42 +189,51 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
/* SW counters */
for (i = 0; i < NUM_SW_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].name);
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
/* Q counters */
for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].name);
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].format);
/* VPORT counters */
for (i = 0; i < NUM_VPORT_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
- vport_stats_desc[i].name);
+ vport_stats_desc[i].format);
/* PPORT counters */
for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_802_3_stats_desc[i].name);
+ pport_802_3_stats_desc[i].format);
for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_2863_stats_desc[i].name);
+ pport_2863_stats_desc[i].format);
for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_2819_stats_desc[i].name);
+ pport_2819_stats_desc[i].format);
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN, "prio%d_%s",
- prio,
- pport_per_prio_traffic_stats_desc[i].name);
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ pport_per_prio_traffic_stats_desc[i].format, prio);
}
pfc_combined = mlx5e_query_pfc_combined(priv);
for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
- sprintf(data + (idx++) * ETH_GSTRING_LEN, "prio%d_%s",
- prio, pport_per_prio_pfc_stats_desc[i].name);
+ char pfc_string[ETH_GSTRING_LEN];
+
+ snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ pport_per_prio_pfc_stats_desc[i].format, pfc_string);
+ }
+ }
+
+ if (mlx5e_query_global_pause_combined(priv)) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ pport_per_prio_pfc_stats_desc[i].format, "global");
}
}
@@ -256,25 +243,27 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
/* per channel counters */
for (i = 0; i < priv->params.num_channels; i++)
for (j = 0; j < NUM_RQ_STATS; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN, "rx%d_%s", i,
- rq_stats_desc[j].name);
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ rq_stats_desc[j].format, i);
for (tc = 0; tc < priv->params.num_tc; tc++)
for (i = 0; i < priv->params.num_channels; i++)
for (j = 0; j < NUM_SQ_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
- "tx%d_%s",
- priv->channeltc_to_txq_map[i][tc],
- sq_stats_desc[j].name);
+ sq_stats_desc[j].format,
+ priv->channeltc_to_txq_map[i][tc]);
}
static void mlx5e_get_strings(struct net_device *dev,
uint32_t stringset, uint8_t *data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ int i;
switch (stringset) {
case ETH_SS_PRIV_FLAGS:
+ for (i = 0; i < ARRAY_SIZE(mlx5e_priv_flags); i++)
+ strcpy(data + i * ETH_GSTRING_LEN, mlx5e_priv_flags[i]);
break;
case ETH_SS_TEST:
@@ -339,6 +328,13 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
}
}
+ if (mlx5e_query_global_pause_combined(priv)) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
+ data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
+ pport_per_prio_pfc_stats_desc, 0);
+ }
+ }
+
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return;
@@ -519,10 +515,11 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
return -ENOTSUPP;
- coal->rx_coalesce_usecs = priv->params.rx_cq_moderation_usec;
- coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation_pkts;
- coal->tx_coalesce_usecs = priv->params.tx_cq_moderation_usec;
- coal->tx_max_coalesced_frames = priv->params.tx_cq_moderation_pkts;
+ coal->rx_coalesce_usecs = priv->params.rx_cq_moderation.usec;
+ coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts;
+ coal->tx_coalesce_usecs = priv->params.tx_cq_moderation.usec;
+ coal->tx_max_coalesced_frames = priv->params.tx_cq_moderation.pkts;
+ coal->use_adaptive_rx_coalesce = priv->params.rx_am_enabled;
return 0;
}
@@ -533,6 +530,10 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_channel *c;
+ bool restart =
+ !!coal->use_adaptive_rx_coalesce != priv->params.rx_am_enabled;
+ bool was_opened;
+ int err = 0;
int tc;
int i;
@@ -540,12 +541,19 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
return -ENOTSUPP;
mutex_lock(&priv->state_lock);
- priv->params.tx_cq_moderation_usec = coal->tx_coalesce_usecs;
- priv->params.tx_cq_moderation_pkts = coal->tx_max_coalesced_frames;
- priv->params.rx_cq_moderation_usec = coal->rx_coalesce_usecs;
- priv->params.rx_cq_moderation_pkts = coal->rx_max_coalesced_frames;
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (was_opened && restart) {
+ mlx5e_close_locked(netdev);
+ priv->params.rx_am_enabled = !!coal->use_adaptive_rx_coalesce;
+ }
+
+ priv->params.tx_cq_moderation.usec = coal->tx_coalesce_usecs;
+ priv->params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames;
+ priv->params.rx_cq_moderation.usec = coal->rx_coalesce_usecs;
+ priv->params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames;
+
+ if (!was_opened || restart)
goto out;
for (i = 0; i < priv->params.num_channels; ++i) {
@@ -564,35 +572,37 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
}
out:
+ if (was_opened && restart)
+ err = mlx5e_open_locked(netdev);
+
mutex_unlock(&priv->state_lock);
- return 0;
+ return err;
}
-static u32 ptys2ethtool_supported_link(u32 eth_proto_cap)
+static void ptys2ethtool_supported_link(unsigned long *supported_modes,
+ u32 eth_proto_cap)
{
- int i;
- u32 supported_modes = 0;
+ int proto;
- for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
- if (eth_proto_cap & MLX5E_PROT_MASK(i))
- supported_modes |= ptys2ethtool_table[i].supported;
- }
- return supported_modes;
+ for_each_set_bit(proto, (unsigned long *)&eth_proto_cap, MLX5E_LINK_MODES_NUMBER)
+ bitmap_or(supported_modes, supported_modes,
+ ptys2ethtool_table[proto].supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static u32 ptys2ethtool_adver_link(u32 eth_proto_cap)
+static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
+ u32 eth_proto_cap)
{
- int i;
- u32 advertising_modes = 0;
+ int proto;
- for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
- if (eth_proto_cap & MLX5E_PROT_MASK(i))
- advertising_modes |= ptys2ethtool_table[i].advertised;
- }
- return advertising_modes;
+ for_each_set_bit(proto, (unsigned long *)&eth_proto_cap, MLX5E_LINK_MODES_NUMBER)
+ bitmap_or(advertising_modes, advertising_modes,
+ ptys2ethtool_table[proto].advertised,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static u32 ptys2ethtool_supported_port(u32 eth_proto_cap)
+static void ptys2ethtool_supported_port(struct ethtool_link_ksettings *link_ksettings,
+ u32 eth_proto_cap)
{
if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
| MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
@@ -600,7 +610,7 @@ static u32 ptys2ethtool_supported_port(u32 eth_proto_cap)
| MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
| MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
| MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) {
- return SUPPORTED_FIBRE;
+ ethtool_link_ksettings_add_link_mode(link_ksettings, supported, FIBRE);
}
if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_100GBASE_KR4)
@@ -608,9 +618,8 @@ static u32 ptys2ethtool_supported_port(u32 eth_proto_cap)
| MLX5E_PROT_MASK(MLX5E_10GBASE_KR)
| MLX5E_PROT_MASK(MLX5E_10GBASE_KX4)
| MLX5E_PROT_MASK(MLX5E_1000BASE_KX))) {
- return SUPPORTED_Backplane;
+ ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Backplane);
}
- return 0;
}
int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
@@ -634,7 +643,7 @@ int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
static void get_speed_duplex(struct net_device *netdev,
u32 eth_proto_oper,
- struct ethtool_cmd *cmd)
+ struct ethtool_link_ksettings *link_ksettings)
{
int i;
u32 speed = SPEED_UNKNOWN;
@@ -651,23 +660,32 @@ static void get_speed_duplex(struct net_device *netdev,
}
}
out:
- ethtool_cmd_speed_set(cmd, speed);
- cmd->duplex = duplex;
+ link_ksettings->base.speed = speed;
+ link_ksettings->base.duplex = duplex;
}
-static void get_supported(u32 eth_proto_cap, u32 *supported)
+static void get_supported(u32 eth_proto_cap,
+ struct ethtool_link_ksettings *link_ksettings)
{
- *supported |= ptys2ethtool_supported_port(eth_proto_cap);
- *supported |= ptys2ethtool_supported_link(eth_proto_cap);
- *supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ unsigned long *supported = link_ksettings->link_modes.supported;
+
+ ptys2ethtool_supported_port(link_ksettings, eth_proto_cap);
+ ptys2ethtool_supported_link(supported, eth_proto_cap);
+ ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
+ ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Asym_Pause);
}
static void get_advertising(u32 eth_proto_cap, u8 tx_pause,
- u8 rx_pause, u32 *advertising)
+ u8 rx_pause,
+ struct ethtool_link_ksettings *link_ksettings)
{
- *advertising |= ptys2ethtool_adver_link(eth_proto_cap);
- *advertising |= tx_pause ? ADVERTISED_Pause : 0;
- *advertising |= (tx_pause ^ rx_pause) ? ADVERTISED_Asym_Pause : 0;
+ unsigned long *advertising = link_ksettings->link_modes.advertising;
+
+ ptys2ethtool_adver_link(advertising, eth_proto_cap);
+ if (tx_pause)
+ ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause);
+ if (tx_pause ^ rx_pause)
+ ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Asym_Pause);
}
static u8 get_connector_port(u32 eth_proto)
@@ -695,13 +713,16 @@ static u8 get_connector_port(u32 eth_proto)
return PORT_OTHER;
}
-static void get_lp_advertising(u32 eth_proto_lp, u32 *lp_advertising)
+static void get_lp_advertising(u32 eth_proto_lp,
+ struct ethtool_link_ksettings *link_ksettings)
{
- *lp_advertising = ptys2ethtool_adver_link(eth_proto_lp);
+ unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising;
+
+ ptys2ethtool_adver_link(lp_advertising, eth_proto_lp);
}
-static int mlx5e_get_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
+static int mlx5e_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
@@ -710,6 +731,8 @@ static int mlx5e_get_settings(struct net_device *netdev,
u32 eth_proto_admin;
u32 eth_proto_lp;
u32 eth_proto_oper;
+ u8 an_disable_admin;
+ u8 an_status;
int err;
err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
@@ -720,35 +743,49 @@ static int mlx5e_get_settings(struct net_device *netdev,
goto err_query_ptys;
}
- eth_proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability);
- eth_proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin);
- eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
- eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
+ eth_proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability);
+ eth_proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin);
+ eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
+ eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
+ an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
+ an_status = MLX5_GET(ptys_reg, out, an_status);
- cmd->supported = 0;
- cmd->advertising = 0;
+ ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
+ ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
- get_supported(eth_proto_cap, &cmd->supported);
- get_advertising(eth_proto_admin, 0, 0, &cmd->advertising);
- get_speed_duplex(netdev, eth_proto_oper, cmd);
+ get_supported(eth_proto_cap, link_ksettings);
+ get_advertising(eth_proto_admin, 0, 0, link_ksettings);
+ get_speed_duplex(netdev, eth_proto_oper, link_ksettings);
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
- cmd->port = get_connector_port(eth_proto_oper);
- get_lp_advertising(eth_proto_lp, &cmd->lp_advertising);
+ link_ksettings->base.port = get_connector_port(eth_proto_oper);
+ get_lp_advertising(eth_proto_lp, link_ksettings);
- cmd->transceiver = XCVR_INTERNAL;
+ if (an_status == MLX5_AN_COMPLETE)
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ lp_advertising, Autoneg);
+
+ link_ksettings->base.autoneg = an_disable_admin ? AUTONEG_DISABLE :
+ AUTONEG_ENABLE;
+ ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
+ Autoneg);
+ if (!an_disable_admin)
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising, Autoneg);
err_query_ptys:
return err;
}
-static u32 mlx5e_ethtool2ptys_adver_link(u32 link_modes)
+static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
{
u32 i, ptys_modes = 0;
for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
- if (ptys2ethtool_table[i].advertised & link_modes)
+ if (bitmap_intersects(ptys2ethtool_table[i].advertised,
+ link_modes,
+ __ETHTOOL_LINK_MODE_MASK_NBITS))
ptys_modes |= MLX5E_PROT_MASK(i);
}
@@ -767,21 +804,25 @@ static u32 mlx5e_ethtool2ptys_speed_link(u32 speed)
return speed_links;
}
-static int mlx5e_set_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
+static int mlx5e_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
+ u32 eth_proto_cap, eth_proto_admin;
+ bool an_changes = false;
+ u8 an_disable_admin;
+ u8 an_disable_cap;
+ bool an_disable;
u32 link_modes;
+ u8 an_status;
u32 speed;
- u32 eth_proto_cap, eth_proto_admin;
- enum mlx5_port_status ps;
int err;
- speed = ethtool_cmd_speed(cmd);
+ speed = link_ksettings->base.speed;
- link_modes = cmd->autoneg == AUTONEG_ENABLE ?
- mlx5e_ethtool2ptys_adver_link(cmd->advertising) :
+ link_modes = link_ksettings->base.autoneg == AUTONEG_ENABLE ?
+ mlx5e_ethtool2ptys_adver_link(link_ksettings->link_modes.advertising) :
mlx5e_ethtool2ptys_speed_link(speed);
err = mlx5_query_port_proto_cap(mdev, &eth_proto_cap, MLX5_PTYS_EN);
@@ -806,15 +847,18 @@ static int mlx5e_set_settings(struct net_device *netdev,
goto out;
}
- if (link_modes == eth_proto_admin)
+ mlx5_query_port_autoneg(mdev, MLX5_PTYS_EN, &an_status,
+ &an_disable_cap, &an_disable_admin);
+
+ an_disable = link_ksettings->base.autoneg == AUTONEG_DISABLE;
+ an_changes = ((!an_disable && an_disable_admin) ||
+ (an_disable && !an_disable_admin));
+
+ if (!an_changes && link_modes == eth_proto_admin)
goto out;
- mlx5_query_port_admin_status(mdev, &ps);
- if (ps == MLX5_PORT_UP)
- mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
- mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN);
- if (ps == MLX5_PORT_UP)
- mlx5_set_port_admin_status(mdev, MLX5_PORT_UP);
+ mlx5_set_port_ptys(mdev, an_disable, link_modes, MLX5_PTYS_EN);
+ mlx5_toggle_port_link(mdev);
out:
return err;
@@ -861,7 +905,7 @@ static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
mlx5e_build_tir_ctx_hash(tirc, priv);
for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
- mlx5_core_modify_tir(mdev, priv->indir_tirn[i], in, inlen);
+ mlx5_core_modify_tir(mdev, priv->indir_tir[i].tirn, in, inlen);
}
static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
@@ -883,7 +927,7 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
mutex_lock(&priv->state_lock);
if (indir) {
- u32 rqtn = priv->indir_rqtn;
+ u32 rqtn = priv->indir_rqt.rqtn;
memcpy(priv->params.indirection_rqt, indir,
sizeof(priv->params.indirection_rqt));
@@ -916,6 +960,15 @@ static int mlx5e_get_rxnfc(struct net_device *netdev,
case ETHTOOL_GRXRINGS:
info->data = priv->params.num_channels;
break;
+ case ETHTOOL_GRXCLSRLCNT:
+ info->rule_cnt = priv->fs.ethtool.tot_num_rules;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs);
+ break;
default:
err = -EOPNOTSUPP;
break;
@@ -1272,6 +1325,107 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev,
return 0;
}
+typedef int (*mlx5e_pflag_handler)(struct net_device *netdev, bool enable);
+
+static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ bool rx_mode_changed;
+ u8 rx_cq_period_mode;
+ int err = 0;
+ bool reset;
+
+ rx_cq_period_mode = enable ?
+ MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
+ MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+ rx_mode_changed = rx_cq_period_mode != priv->params.rx_cq_period_mode;
+
+ if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
+ !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe))
+ return -ENOTSUPP;
+
+ if (!rx_mode_changed)
+ return 0;
+
+ reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (reset)
+ mlx5e_close_locked(netdev);
+
+ mlx5e_set_rx_cq_mode_params(&priv->params, rx_cq_period_mode);
+
+ if (reset)
+ err = mlx5e_open_locked(netdev);
+
+ return err;
+}
+
+static int mlx5e_handle_pflag(struct net_device *netdev,
+ u32 wanted_flags,
+ enum mlx5e_priv_flag flag,
+ mlx5e_pflag_handler pflag_handler)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ bool enable = !!(wanted_flags & flag);
+ u32 changes = wanted_flags ^ priv->pflags;
+ int err;
+
+ if (!(changes & flag))
+ return 0;
+
+ err = pflag_handler(netdev, enable);
+ if (err) {
+ netdev_err(netdev, "%s private flag 0x%x failed err %d\n",
+ enable ? "Enable" : "Disable", flag, err);
+ return err;
+ }
+
+ MLX5E_SET_PRIV_FLAG(priv, flag, enable);
+ return 0;
+}
+
+static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err;
+
+ mutex_lock(&priv->state_lock);
+
+ err = mlx5e_handle_pflag(netdev, pflags,
+ MLX5E_PFLAG_RX_CQE_BASED_MODER,
+ set_pflag_rx_cqe_based_moder);
+
+ mutex_unlock(&priv->state_lock);
+ return err ? -EINVAL : 0;
+}
+
+static u32 mlx5e_get_priv_flags(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ return priv->pflags;
+}
+
+static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ int err = 0;
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ err = mlx5e_ethtool_flow_replace(priv, &cmd->fs);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
const struct ethtool_ops mlx5e_ethtool_ops = {
.get_drvinfo = mlx5e_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -1284,13 +1438,14 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.set_channels = mlx5e_set_channels,
.get_coalesce = mlx5e_get_coalesce,
.set_coalesce = mlx5e_set_coalesce,
- .get_settings = mlx5e_get_settings,
- .set_settings = mlx5e_set_settings,
+ .get_link_ksettings = mlx5e_get_link_ksettings,
+ .set_link_ksettings = mlx5e_set_link_ksettings,
.get_rxfh_key_size = mlx5e_get_rxfh_key_size,
.get_rxfh_indir_size = mlx5e_get_rxfh_indir_size,
.get_rxfh = mlx5e_get_rxfh,
.set_rxfh = mlx5e_set_rxfh,
.get_rxnfc = mlx5e_get_rxnfc,
+ .set_rxnfc = mlx5e_set_rxnfc,
.get_tunable = mlx5e_get_tunable,
.set_tunable = mlx5e_set_tunable,
.get_pauseparam = mlx5e_get_pauseparam,
@@ -1301,4 +1456,6 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.set_wol = mlx5e_set_wol,
.get_module_info = mlx5e_get_module_info,
.get_module_eeprom = mlx5e_get_module_eeprom,
+ .get_priv_flags = mlx5e_get_priv_flags,
+ .set_priv_flags = mlx5e_set_priv_flags
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index b32740092854..1587a9fd5724 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -156,19 +156,18 @@ enum mlx5e_vlan_rule_type {
static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
enum mlx5e_vlan_rule_type rule_type,
- u16 vid, u32 *mc, u32 *mv)
+ u16 vid, struct mlx5_flow_spec *spec)
{
struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
struct mlx5_flow_destination dest;
- u8 match_criteria_enable = 0;
struct mlx5_flow_rule **rule_p;
int err = 0;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = priv->fs.l2.ft.t;
- match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
@@ -176,17 +175,19 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
break;
case MLX5E_VLAN_RULE_TYPE_ANY_VID:
rule_p = &priv->fs.vlan.any_vlan_rule;
- MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1);
break;
default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
rule_p = &priv->fs.vlan.active_vlans_rule[vid];
- MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
- MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.first_vid);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
+ vid);
break;
}
- *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+ *rule_p = mlx5_add_flow_rule(ft, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG,
&dest);
@@ -203,27 +204,21 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
enum mlx5e_vlan_rule_type rule_type, u16 vid)
{
- u32 *match_criteria;
- u32 *match_value;
+ struct mlx5_flow_spec *spec;
int err = 0;
- match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- if (!match_value || !match_criteria) {
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
- err = -ENOMEM;
- goto add_vlan_rule_out;
+ return -ENOMEM;
}
if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID)
mlx5e_vport_context_update_vlans(priv);
- err = __mlx5e_add_vlan_rule(priv, rule_type, vid, match_criteria,
- match_value);
+ err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec);
-add_vlan_rule_out:
- kvfree(match_criteria);
- kvfree(match_value);
+ kvfree(spec);
return err;
}
@@ -598,32 +593,27 @@ static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
u8 proto)
{
struct mlx5_flow_rule *rule;
- u8 match_criteria_enable = 0;
- u32 *match_criteria;
- u32 *match_value;
+ struct mlx5_flow_spec *spec;
int err = 0;
- match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- if (!match_value || !match_criteria) {
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
- err = -ENOMEM;
- goto out;
+ return ERR_PTR(-ENOMEM);
}
if (proto) {
- match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.ip_protocol);
- MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol, proto);
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto);
}
if (etype) {
- match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.ethertype);
- MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, etype);
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
}
- rule = mlx5_add_flow_rule(ft, match_criteria_enable,
- match_criteria, match_value,
+ rule = mlx5_add_flow_rule(ft, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG,
dest);
@@ -631,9 +621,8 @@ static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
}
-out:
- kvfree(match_criteria);
- kvfree(match_value);
+
+ kvfree(spec);
return err ? ERR_PTR(err) : rule;
}
@@ -655,7 +644,7 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
if (tt == MLX5E_TT_ANY)
dest.tir_num = priv->direct_tir[0].tirn;
else
- dest.tir_num = priv->indir_tirn[tt];
+ dest.tir_num = priv->indir_tir[tt].tirn;
rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
ttc_rules[tt].etype,
ttc_rules[tt].proto);
@@ -792,24 +781,20 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
{
struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
struct mlx5_flow_destination dest;
- u8 match_criteria_enable = 0;
- u32 *match_criteria;
- u32 *match_value;
+ struct mlx5_flow_spec *spec;
int err = 0;
u8 *mc_dmac;
u8 *mv_dmac;
- match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- if (!match_value || !match_criteria) {
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
- err = -ENOMEM;
- goto add_l2_rule_out;
+ return -ENOMEM;
}
- mc_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers.dmac_47_16);
- mv_dmac = MLX5_ADDR_OF(fte_match_param, match_value,
+ mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dmac_47_16);
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
@@ -817,13 +802,13 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
switch (type) {
case MLX5E_FULLMATCH:
- match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
eth_broadcast_addr(mc_dmac);
ether_addr_copy(mv_dmac, ai->addr);
break;
case MLX5E_ALLMULTI:
- match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
mc_dmac[0] = 0x01;
mv_dmac[0] = 0x01;
break;
@@ -832,8 +817,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
break;
}
- ai->rule = mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria,
- match_value,
+ ai->rule = mlx5_add_flow_rule(ft, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, &dest);
if (IS_ERR(ai->rule)) {
@@ -843,9 +827,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
ai->rule = NULL;
}
-add_l2_rule_out:
- kvfree(match_criteria);
- kvfree(match_value);
+ kvfree(spec);
return err;
}
@@ -1102,6 +1084,8 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
goto err_destroy_l2_table;
}
+ mlx5e_ethtool_init_steering(priv);
+
return 0;
err_destroy_l2_table:
@@ -1121,4 +1105,5 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
mlx5e_destroy_l2_table(priv);
mlx5e_destroy_ttc_table(priv);
mlx5e_arfs_destroy_tables(priv);
+ mlx5e_ethtool_cleanup_steering(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
new file mode 100644
index 000000000000..d17c24227900
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -0,0 +1,586 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/fs.h>
+#include "en.h"
+
+struct mlx5e_ethtool_rule {
+ struct list_head list;
+ struct ethtool_rx_flow_spec flow_spec;
+ struct mlx5_flow_rule *rule;
+ struct mlx5e_ethtool_table *eth_ft;
+};
+
+static void put_flow_table(struct mlx5e_ethtool_table *eth_ft)
+{
+ if (!--eth_ft->num_rules) {
+ mlx5_destroy_flow_table(eth_ft->ft);
+ eth_ft->ft = NULL;
+ }
+}
+
+#define MLX5E_ETHTOOL_L3_L4_PRIO 0
+#define MLX5E_ETHTOOL_L2_PRIO (MLX5E_ETHTOOL_L3_L4_PRIO + ETHTOOL_NUM_L3_L4_FTS)
+#define MLX5E_ETHTOOL_NUM_ENTRIES 64000
+#define MLX5E_ETHTOOL_NUM_GROUPS 10
+static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
+ struct ethtool_rx_flow_spec *fs,
+ int num_tuples)
+{
+ struct mlx5e_ethtool_table *eth_ft;
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table *ft;
+ int max_tuples;
+ int table_size;
+ int prio;
+
+ switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ max_tuples = ETHTOOL_NUM_L3_L4_FTS;
+ prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
+ eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
+ break;
+ case IP_USER_FLOW:
+ max_tuples = ETHTOOL_NUM_L3_L4_FTS;
+ prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
+ eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
+ break;
+ case ETHER_FLOW:
+ max_tuples = ETHTOOL_NUM_L2_FTS;
+ prio = max_tuples - num_tuples;
+ eth_ft = &priv->fs.ethtool.l2_ft[prio];
+ prio += MLX5E_ETHTOOL_L2_PRIO;
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ eth_ft->num_rules++;
+ if (eth_ft->ft)
+ return eth_ft;
+
+ ns = mlx5_get_flow_namespace(priv->mdev,
+ MLX5_FLOW_NAMESPACE_ETHTOOL);
+ if (!ns)
+ return ERR_PTR(-ENOTSUPP);
+
+ table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
+ flow_table_properties_nic_receive.log_max_ft_size)),
+ MLX5E_ETHTOOL_NUM_ENTRIES);
+ ft = mlx5_create_auto_grouped_flow_table(ns, prio,
+ table_size,
+ MLX5E_ETHTOOL_NUM_GROUPS, 0);
+ if (IS_ERR(ft))
+ return (void *)ft;
+
+ eth_ft->ft = ft;
+ return eth_ft;
+}
+
+static void mask_spec(u8 *mask, u8 *val, size_t size)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++, mask++, val++)
+ *((u8 *)val) = *((u8 *)mask) & *((u8 *)val);
+}
+
+static void set_ips(void *outer_headers_v, void *outer_headers_c, __be32 ip4src_m,
+ __be32 ip4src_v, __be32 ip4dst_m, __be32 ip4dst_v)
+{
+ if (ip4src_m) {
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ &ip4src_v, sizeof(ip4src_v));
+ memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ 0xff, sizeof(ip4src_m));
+ }
+ if (ip4dst_m) {
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+ &ip4dst_v, sizeof(ip4dst_v));
+ memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+ 0xff, sizeof(ip4dst_m));
+ }
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+ ethertype, ETH_P_IP);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+ ethertype, 0xffff);
+}
+
+static int set_flow_attrs(u32 *match_c, u32 *match_v,
+ struct ethtool_rx_flow_spec *fs)
+{
+ void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
+ outer_headers);
+ void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
+ outer_headers);
+ u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
+ struct ethtool_tcpip4_spec *l4_mask;
+ struct ethtool_tcpip4_spec *l4_val;
+ struct ethtool_usrip4_spec *l3_mask;
+ struct ethtool_usrip4_spec *l3_val;
+ struct ethhdr *eth_val;
+ struct ethhdr *eth_mask;
+
+ switch (flow_type) {
+ case TCP_V4_FLOW:
+ l4_mask = &fs->m_u.tcp_ip4_spec;
+ l4_val = &fs->h_u.tcp_ip4_spec;
+ set_ips(outer_headers_v, outer_headers_c, l4_mask->ip4src,
+ l4_val->ip4src, l4_mask->ip4dst, l4_val->ip4dst);
+
+ if (l4_mask->psrc) {
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport,
+ 0xffff);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_sport,
+ ntohs(l4_val->psrc));
+ }
+ if (l4_mask->pdst) {
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport,
+ 0xffff);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_dport,
+ ntohs(l4_val->pdst));
+ }
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
+ 0xffff);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
+ IPPROTO_TCP);
+ break;
+ case UDP_V4_FLOW:
+ l4_mask = &fs->m_u.tcp_ip4_spec;
+ l4_val = &fs->h_u.tcp_ip4_spec;
+ set_ips(outer_headers_v, outer_headers_c, l4_mask->ip4src,
+ l4_val->ip4src, l4_mask->ip4dst, l4_val->ip4dst);
+
+ if (l4_mask->psrc) {
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_sport,
+ 0xffff);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_sport,
+ ntohs(l4_val->psrc));
+ }
+ if (l4_mask->pdst) {
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_dport,
+ 0xffff);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_dport,
+ ntohs(l4_val->pdst));
+ }
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
+ 0xffff);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
+ IPPROTO_UDP);
+ break;
+ case IP_USER_FLOW:
+ l3_mask = &fs->m_u.usr_ip4_spec;
+ l3_val = &fs->h_u.usr_ip4_spec;
+ set_ips(outer_headers_v, outer_headers_c, l3_mask->ip4src,
+ l3_val->ip4src, l3_mask->ip4dst, l3_val->ip4dst);
+ break;
+ case ETHER_FLOW:
+ eth_mask = &fs->m_u.ether_spec;
+ eth_val = &fs->h_u.ether_spec;
+
+ mask_spec((u8 *)eth_mask, (u8 *)eth_val, sizeof(*eth_mask));
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
+ outer_headers_c, smac_47_16),
+ eth_mask->h_source);
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
+ outer_headers_v, smac_47_16),
+ eth_val->h_source);
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
+ outer_headers_c, dmac_47_16),
+ eth_mask->h_dest);
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
+ outer_headers_v, dmac_47_16),
+ eth_val->h_dest);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ethertype,
+ ntohs(eth_mask->h_proto));
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ethertype,
+ ntohs(eth_val->h_proto));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if ((fs->flow_type & FLOW_EXT) &&
+ (fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) {
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+ vlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+ vlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+ first_vid, 0xfff);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+ first_vid, ntohs(fs->h_ext.vlan_tci));
+ }
+ if (fs->flow_type & FLOW_MAC_EXT &&
+ !is_zero_ether_addr(fs->m_ext.h_dest)) {
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
+ outer_headers_c, dmac_47_16),
+ fs->m_ext.h_dest);
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
+ outer_headers_v, dmac_47_16),
+ fs->h_ext.h_dest);
+ }
+
+ return 0;
+}
+
+static void add_rule_to_list(struct mlx5e_priv *priv,
+ struct mlx5e_ethtool_rule *rule)
+{
+ struct mlx5e_ethtool_rule *iter;
+ struct list_head *head = &priv->fs.ethtool.rules;
+
+ list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
+ if (iter->flow_spec.location > rule->flow_spec.location)
+ break;
+ head = &iter->list;
+ }
+ priv->fs.ethtool.tot_num_rules++;
+ list_add(&rule->list, head);
+}
+
+static bool outer_header_zero(u32 *match_criteria)
+{
+ int size = MLX5_ST_SZ_BYTES(fte_match_param);
+ char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ outer_headers);
+
+ return outer_headers_c[0] == 0 && !memcmp(outer_headers_c,
+ outer_headers_c + 1,
+ size - 1);
+}
+
+static struct mlx5_flow_rule *add_ethtool_flow_rule(struct mlx5e_priv *priv,
+ struct mlx5_flow_table *ft,
+ struct ethtool_rx_flow_spec *fs)
+{
+ struct mlx5_flow_destination *dst = NULL;
+ struct mlx5_flow_spec *spec;
+ struct mlx5_flow_rule *rule;
+ int err = 0;
+ u32 action;
+
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec)
+ return ERR_PTR(-ENOMEM);
+ err = set_flow_attrs(spec->match_criteria, spec->match_value,
+ fs);
+ if (err)
+ goto free;
+
+ if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
+ action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+ } else {
+ dst = kzalloc(sizeof(*dst), GFP_KERNEL);
+ if (!dst) {
+ err = -ENOMEM;
+ goto free;
+ }
+
+ dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dst->tir_num = priv->direct_tir[fs->ring_cookie].tirn;
+ action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ }
+
+ spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
+ rule = mlx5_add_flow_rule(ft, spec, action,
+ MLX5_FS_DEFAULT_FLOW_TAG, dst);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",
+ __func__, err);
+ goto free;
+ }
+free:
+ kvfree(spec);
+ kfree(dst);
+ return err ? ERR_PTR(err) : rule;
+}
+
+static void del_ethtool_rule(struct mlx5e_priv *priv,
+ struct mlx5e_ethtool_rule *eth_rule)
+{
+ if (eth_rule->rule)
+ mlx5_del_flow_rule(eth_rule->rule);
+ list_del(&eth_rule->list);
+ priv->fs.ethtool.tot_num_rules--;
+ put_flow_table(eth_rule->eth_ft);
+ kfree(eth_rule);
+}
+
+static struct mlx5e_ethtool_rule *find_ethtool_rule(struct mlx5e_priv *priv,
+ int location)
+{
+ struct mlx5e_ethtool_rule *iter;
+
+ list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
+ if (iter->flow_spec.location == location)
+ return iter;
+ }
+ return NULL;
+}
+
+static struct mlx5e_ethtool_rule *get_ethtool_rule(struct mlx5e_priv *priv,
+ int location)
+{
+ struct mlx5e_ethtool_rule *eth_rule;
+
+ eth_rule = find_ethtool_rule(priv, location);
+ if (eth_rule)
+ del_ethtool_rule(priv, eth_rule);
+
+ eth_rule = kzalloc(sizeof(*eth_rule), GFP_KERNEL);
+ if (!eth_rule)
+ return ERR_PTR(-ENOMEM);
+
+ add_rule_to_list(priv, eth_rule);
+ return eth_rule;
+}
+
+#define MAX_NUM_OF_ETHTOOL_RULES BIT(10)
+
+#define all_ones(field) (field == (__force typeof(field))-1)
+#define all_zeros_or_all_ones(field) \
+ ((field) == 0 || (field) == (__force typeof(field))-1)
+
+static int validate_flow(struct mlx5e_priv *priv,
+ struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_tcpip4_spec *l4_mask;
+ struct ethtool_usrip4_spec *l3_mask;
+ struct ethhdr *eth_mask;
+ int num_tuples = 0;
+
+ if (fs->location >= MAX_NUM_OF_ETHTOOL_RULES)
+ return -EINVAL;
+
+ if (fs->ring_cookie >= priv->params.num_channels &&
+ fs->ring_cookie != RX_CLS_FLOW_DISC)
+ return -EINVAL;
+
+ switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+ case ETHER_FLOW:
+ eth_mask = &fs->m_u.ether_spec;
+ if (!is_zero_ether_addr(eth_mask->h_dest))
+ num_tuples++;
+ if (!is_zero_ether_addr(eth_mask->h_source))
+ num_tuples++;
+ if (eth_mask->h_proto)
+ num_tuples++;
+ break;
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ if (fs->m_u.tcp_ip4_spec.tos)
+ return -EINVAL;
+ l4_mask = &fs->m_u.tcp_ip4_spec;
+ if (l4_mask->ip4src) {
+ if (!all_ones(l4_mask->ip4src))
+ return -EINVAL;
+ num_tuples++;
+ }
+ if (l4_mask->ip4dst) {
+ if (!all_ones(l4_mask->ip4dst))
+ return -EINVAL;
+ num_tuples++;
+ }
+ if (l4_mask->psrc) {
+ if (!all_ones(l4_mask->psrc))
+ return -EINVAL;
+ num_tuples++;
+ }
+ if (l4_mask->pdst) {
+ if (!all_ones(l4_mask->pdst))
+ return -EINVAL;
+ num_tuples++;
+ }
+ /* Flow is TCP/UDP */
+ num_tuples++;
+ break;
+ case IP_USER_FLOW:
+ l3_mask = &fs->m_u.usr_ip4_spec;
+ if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto ||
+ fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
+ return -EINVAL;
+ if (l3_mask->ip4src) {
+ if (!all_ones(l3_mask->ip4src))
+ return -EINVAL;
+ num_tuples++;
+ }
+ if (l3_mask->ip4dst) {
+ if (!all_ones(l3_mask->ip4dst))
+ return -EINVAL;
+ num_tuples++;
+ }
+ /* Flow is IPv4 */
+ num_tuples++;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if ((fs->flow_type & FLOW_EXT)) {
+ if (fs->m_ext.vlan_etype ||
+ (fs->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK)))
+ return -EINVAL;
+
+ if (fs->m_ext.vlan_tci) {
+ if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
+ return -EINVAL;
+ }
+ num_tuples++;
+ }
+
+ if (fs->flow_type & FLOW_MAC_EXT &&
+ !is_zero_ether_addr(fs->m_ext.h_dest))
+ num_tuples++;
+
+ return num_tuples;
+}
+
+int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
+ struct ethtool_rx_flow_spec *fs)
+{
+ struct mlx5e_ethtool_table *eth_ft;
+ struct mlx5e_ethtool_rule *eth_rule;
+ struct mlx5_flow_rule *rule;
+ int num_tuples;
+ int err;
+
+ num_tuples = validate_flow(priv, fs);
+ if (num_tuples <= 0) {
+ netdev_warn(priv->netdev, "%s: flow is not valid\n", __func__);
+ return -EINVAL;
+ }
+
+ eth_ft = get_flow_table(priv, fs, num_tuples);
+ if (IS_ERR(eth_ft))
+ return PTR_ERR(eth_ft);
+
+ eth_rule = get_ethtool_rule(priv, fs->location);
+ if (IS_ERR(eth_rule)) {
+ put_flow_table(eth_ft);
+ return PTR_ERR(eth_rule);
+ }
+
+ eth_rule->flow_spec = *fs;
+ eth_rule->eth_ft = eth_ft;
+ if (!eth_ft->ft) {
+ err = -EINVAL;
+ goto del_ethtool_rule;
+ }
+ rule = add_ethtool_flow_rule(priv, eth_ft->ft, fs);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto del_ethtool_rule;
+ }
+
+ eth_rule->rule = rule;
+
+ return 0;
+
+del_ethtool_rule:
+ del_ethtool_rule(priv, eth_rule);
+
+ return err;
+}
+
+int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv,
+ int location)
+{
+ struct mlx5e_ethtool_rule *eth_rule;
+ int err = 0;
+
+ if (location >= MAX_NUM_OF_ETHTOOL_RULES)
+ return -ENOSPC;
+
+ eth_rule = find_ethtool_rule(priv, location);
+ if (!eth_rule) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ del_ethtool_rule(priv, eth_rule);
+out:
+ return err;
+}
+
+int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
+ int location)
+{
+ struct mlx5e_ethtool_rule *eth_rule;
+
+ if (location < 0 || location >= MAX_NUM_OF_ETHTOOL_RULES)
+ return -EINVAL;
+
+ list_for_each_entry(eth_rule, &priv->fs.ethtool.rules, list) {
+ if (eth_rule->flow_spec.location == location) {
+ info->fs = eth_rule->flow_spec;
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
+ u32 *rule_locs)
+{
+ int location = 0;
+ int idx = 0;
+ int err = 0;
+
+ while ((!err || err == -ENOENT) && idx < info->rule_cnt) {
+ err = mlx5e_ethtool_get_flow(priv, info, location);
+ if (!err)
+ rule_locs[idx++] = location;
+ location++;
+ }
+ return err;
+}
+
+void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv)
+{
+ struct mlx5e_ethtool_rule *iter;
+ struct mlx5e_ethtool_rule *temp;
+
+ list_for_each_entry_safe(iter, temp, &priv->fs.ethtool.rules, list)
+ del_ethtool_rule(priv, iter);
+}
+
+void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv)
+{
+ INIT_LIST_HEAD(&priv->fs.ethtool.rules);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index fd4392999eee..96ec53a6a595 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -40,8 +40,9 @@
#include "vxlan.h"
struct mlx5e_rq_param {
- u32 rqc[MLX5_ST_SZ_DW(rqc)];
- struct mlx5_wq_param wq;
+ u32 rqc[MLX5_ST_SZ_DW(rqc)];
+ struct mlx5_wq_param wq;
+ bool am_enabled;
};
struct mlx5e_sq_param {
@@ -55,6 +56,7 @@ struct mlx5e_cq_param {
u32 cqc[MLX5_ST_SZ_DW(cqc)];
struct mlx5_wq_param wq;
u16 eq_ix;
+ u8 cq_period_mode;
};
struct mlx5e_channel_param {
@@ -105,11 +107,11 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->rx_packets += rq_stats->packets;
s->rx_bytes += rq_stats->bytes;
- s->lro_packets += rq_stats->lro_packets;
- s->lro_bytes += rq_stats->lro_bytes;
+ s->rx_lro_packets += rq_stats->lro_packets;
+ s->rx_lro_bytes += rq_stats->lro_bytes;
s->rx_csum_none += rq_stats->csum_none;
- s->rx_csum_sw += rq_stats->csum_sw;
- s->rx_csum_inner += rq_stats->csum_inner;
+ s->rx_csum_complete += rq_stats->csum_complete;
+ s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
s->rx_wqe_err += rq_stats->wqe_err;
s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
s->rx_mpwqe_frag += rq_stats->mpwqe_frag;
@@ -122,24 +124,23 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->tx_packets += sq_stats->packets;
s->tx_bytes += sq_stats->bytes;
- s->tso_packets += sq_stats->tso_packets;
- s->tso_bytes += sq_stats->tso_bytes;
- s->tso_inner_packets += sq_stats->tso_inner_packets;
- s->tso_inner_bytes += sq_stats->tso_inner_bytes;
+ s->tx_tso_packets += sq_stats->tso_packets;
+ s->tx_tso_bytes += sq_stats->tso_bytes;
+ s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
+ s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
s->tx_queue_stopped += sq_stats->stopped;
s->tx_queue_wake += sq_stats->wake;
s->tx_queue_dropped += sq_stats->dropped;
- s->tx_csum_inner += sq_stats->csum_offload_inner;
- tx_offload_none += sq_stats->csum_offload_none;
+ s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
+ tx_offload_none += sq_stats->csum_none;
}
}
/* Update calculated offload counters */
- s->tx_csum_offload = s->tx_packets - tx_offload_none - s->tx_csum_inner;
- s->rx_csum_good = s->rx_packets - s->rx_csum_none -
- s->rx_csum_sw;
+ s->tx_csum_partial = s->tx_packets - tx_offload_none - s->tx_csum_partial_inner;
+ s->rx_csum_unnecessary = s->rx_packets - s->rx_csum_none - s->rx_csum_complete;
- s->link_down_events = MLX5_GET(ppcnt_reg,
+ s->link_down_events_phy = MLX5_GET(ppcnt_reg,
priv->stats.pport.phy_counters,
counter_set.phys_layer_cntrs.link_down_events);
}
@@ -225,14 +226,14 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
mlx5e_update_sw_counters(priv);
}
-static void mlx5e_update_stats_work(struct work_struct *work)
+void mlx5e_update_stats_work(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
update_stats_work);
mutex_lock(&priv->state_lock);
if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- mlx5e_update_stats(priv);
+ priv->profile->update_stats(priv);
queue_delayed_work(priv->wq, dwork,
msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
}
@@ -244,7 +245,7 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
{
struct mlx5e_priv *priv = vpriv;
- if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
+ if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
return;
switch (event) {
@@ -260,12 +261,12 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
{
- set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
+ set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
}
static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
{
- clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
+ clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
}
@@ -336,6 +337,9 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
wqe->data.byte_count = cpu_to_be32(byte_count);
}
+ INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
+ rq->am.mode = priv->params.rx_cq_period_mode;
+
rq->wq_type = priv->params.rq_wq_type;
rq->pdev = c->pdev;
rq->netdev = c->netdev;
@@ -508,6 +512,9 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
if (err)
goto err_disable_rq;
+ if (param->am_enabled)
+ set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
+
set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP;
@@ -536,6 +543,8 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq)
/* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
napi_synchronize(&rq->channel->napi);
+ cancel_work_sync(&rq->am.work);
+
mlx5e_disable_rq(rq);
mlx5e_destroy_rq(rq);
}
@@ -580,7 +589,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
int err;
- err = mlx5_alloc_map_uar(mdev, &sq->uar, true);
+ err = mlx5_alloc_map_uar(mdev, &sq->uar, !!MLX5_CAP_GEN(mdev, bf));
if (err)
return err;
@@ -702,7 +711,8 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
return err;
}
-static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
+static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state,
+ int next_state, bool update_rl, int rl_index)
{
struct mlx5e_channel *c = sq->channel;
struct mlx5e_priv *priv = c->priv;
@@ -722,6 +732,10 @@ static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
MLX5_SET(modify_sq_in, in, sq_state, curr_state);
MLX5_SET(sqc, sqc, state, next_state);
+ if (update_rl && next_state == MLX5_SQC_STATE_RDY) {
+ MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
+ MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index);
+ }
err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen);
@@ -737,6 +751,8 @@ static void mlx5e_disable_sq(struct mlx5e_sq *sq)
struct mlx5_core_dev *mdev = priv->mdev;
mlx5_core_destroy_sq(mdev, sq->sqn);
+ if (sq->rate_limit)
+ mlx5_rl_remove_rate(mdev, sq->rate_limit);
}
static int mlx5e_open_sq(struct mlx5e_channel *c,
@@ -754,7 +770,8 @@ static int mlx5e_open_sq(struct mlx5e_channel *c,
if (err)
goto err_destroy_sq;
- err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY);
+ err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY,
+ false, 0);
if (err)
goto err_disable_sq;
@@ -793,7 +810,8 @@ static void mlx5e_close_sq(struct mlx5e_sq *sq)
if (mlx5e_sq_has_room_for(sq, 1))
mlx5e_send_nop(sq, true);
- mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
+ mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR,
+ false, 0);
}
while (sq->cc != sq->pc) /* wait till sq is empty */
@@ -840,7 +858,7 @@ static int mlx5e_create_cq(struct mlx5e_channel *c,
mcq->comp = mlx5e_completion_event;
mcq->event = mlx5e_cq_error_event;
mcq->irqn = irqn;
- mcq->uar = &priv->cq_uar;
+ mcq->uar = &mdev->mlx5e_res.cq_uar;
for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
@@ -887,6 +905,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
+ MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
MLX5_SET(cqc, cqc, c_eqn, eqn);
MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
@@ -916,8 +935,7 @@ static void mlx5e_disable_cq(struct mlx5e_cq *cq)
static int mlx5e_open_cq(struct mlx5e_channel *c,
struct mlx5e_cq_param *param,
struct mlx5e_cq *cq,
- u16 moderation_usecs,
- u16 moderation_frames)
+ struct mlx5e_cq_moder moderation)
{
int err;
struct mlx5e_priv *priv = c->priv;
@@ -933,8 +951,8 @@ static int mlx5e_open_cq(struct mlx5e_channel *c,
if (MLX5_CAP_GEN(mdev, cq_moderation))
mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
- moderation_usecs,
- moderation_frames);
+ moderation.usec,
+ moderation.pkts);
return 0;
err_destroy_cq:
@@ -963,8 +981,7 @@ static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
for (tc = 0; tc < c->num_tc; tc++) {
err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq,
- priv->params.tx_cq_moderation_usec,
- priv->params.tx_cq_moderation_pkts);
+ priv->params.tx_cq_moderation);
if (err)
goto err_close_tx_cqs;
}
@@ -1019,19 +1036,96 @@ static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv *priv, int ix)
{
int i;
- for (i = 0; i < MLX5E_MAX_NUM_TC; i++)
+ for (i = 0; i < priv->profile->max_tc; i++)
priv->channeltc_to_txq_map[ix][i] =
ix + i * priv->params.num_channels;
}
+static int mlx5e_set_sq_maxrate(struct net_device *dev,
+ struct mlx5e_sq *sq, u32 rate)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u16 rl_index = 0;
+ int err;
+
+ if (rate == sq->rate_limit)
+ /* nothing to do */
+ return 0;
+
+ if (sq->rate_limit)
+ /* remove current rl index to free space to next ones */
+ mlx5_rl_remove_rate(mdev, sq->rate_limit);
+
+ sq->rate_limit = 0;
+
+ if (rate) {
+ err = mlx5_rl_add_rate(mdev, rate, &rl_index);
+ if (err) {
+ netdev_err(dev, "Failed configuring rate %u: %d\n",
+ rate, err);
+ return err;
+ }
+ }
+
+ err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
+ MLX5_SQC_STATE_RDY, true, rl_index);
+ if (err) {
+ netdev_err(dev, "Failed configuring rate %u: %d\n",
+ rate, err);
+ /* remove the rate from the table */
+ if (rate)
+ mlx5_rl_remove_rate(mdev, rate);
+ return err;
+ }
+
+ sq->rate_limit = rate;
+ return 0;
+}
+
+static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_sq *sq = priv->txq_to_sq_map[index];
+ int err = 0;
+
+ if (!mlx5_rl_is_supported(mdev)) {
+ netdev_err(dev, "Rate limiting is not supported on this device\n");
+ return -EINVAL;
+ }
+
+ /* rate is given in Mb/sec, HW config is in Kb/sec */
+ rate = rate << 10;
+
+ /* Check whether rate in valid range, 0 is always valid */
+ if (rate && !mlx5_rl_is_in_range(mdev, rate)) {
+ netdev_err(dev, "TX rate %u, is not in range\n", rate);
+ return -ERANGE;
+ }
+
+ mutex_lock(&priv->state_lock);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ err = mlx5e_set_sq_maxrate(dev, sq, rate);
+ if (!err)
+ priv->tx_rates[index] = rate;
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_channel_param *cparam,
struct mlx5e_channel **cp)
{
+ struct mlx5e_cq_moder icosq_cq_moder = {0, 0};
struct net_device *netdev = priv->netdev;
+ struct mlx5e_cq_moder rx_cq_profile;
int cpu = mlx5e_get_cpu(priv, ix);
struct mlx5e_channel *c;
+ struct mlx5e_sq *sq;
int err;
+ int i;
c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
if (!c)
@@ -1042,14 +1136,19 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->cpu = cpu;
c->pdev = &priv->mdev->pdev->dev;
c->netdev = priv->netdev;
- c->mkey_be = cpu_to_be32(priv->mkey.key);
+ c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
c->num_tc = priv->params.num_tc;
+ if (priv->params.rx_am_enabled)
+ rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode);
+ else
+ rx_cq_profile = priv->params.rx_cq_moderation;
+
mlx5e_build_channeltc_to_txq_map(priv, ix);
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
- err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, 0, 0);
+ err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, icosq_cq_moder);
if (err)
goto err_napi_del;
@@ -1058,8 +1157,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
goto err_close_icosq_cq;
err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
- priv->params.rx_cq_moderation_usec,
- priv->params.rx_cq_moderation_pkts);
+ rx_cq_profile);
if (err)
goto err_close_tx_cqs;
@@ -1073,6 +1171,16 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
if (err)
goto err_close_icosq;
+ for (i = 0; i < priv->params.num_tc; i++) {
+ u32 txq_ix = priv->channeltc_to_txq_map[ix][i];
+
+ if (priv->tx_rates[txq_ix]) {
+ sq = priv->txq_to_sq_map[txq_ix];
+ mlx5e_set_sq_maxrate(priv->netdev, sq,
+ priv->tx_rates[txq_ix]);
+ }
+ }
+
err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
if (err)
goto err_close_sqs;
@@ -1144,11 +1252,13 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
- MLX5_SET(wq, wq, pd, priv->pdn);
+ MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
param->wq.linear = 1;
+
+ param->am_enabled = priv->params.rx_am_enabled;
}
static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
@@ -1167,7 +1277,7 @@ static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
- MLX5_SET(wq, wq, pd, priv->pdn);
+ MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
}
@@ -1189,7 +1299,7 @@ static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
{
void *cqc = param->cqc;
- MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index);
+ MLX5_SET(cqc, cqc, uar_page, priv->mdev->mlx5e_res.cq_uar.index);
}
static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
@@ -1214,6 +1324,8 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
}
mlx5e_build_common_cq_param(priv, param);
+
+ param->cq_period_mode = priv->params.rx_cq_period_mode;
}
static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
@@ -1224,6 +1336,8 @@ static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
mlx5e_build_common_cq_param(priv, param);
+
+ param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
}
static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
@@ -1235,6 +1349,8 @@ static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
mlx5e_build_common_cq_param(priv, param);
+
+ param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
}
static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
@@ -1370,7 +1486,8 @@ static void mlx5e_fill_direct_rqt_rqn(struct mlx5e_priv *priv, void *rqtc,
MLX5_SET(rqtc, rqtc, rq_num[0], rqn);
}
-static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, int ix, u32 *rqtn)
+static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz,
+ int ix, struct mlx5e_rqt *rqt)
{
struct mlx5_core_dev *mdev = priv->mdev;
void *rqtc;
@@ -1393,34 +1510,36 @@ static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, int ix, u32 *rqtn)
else
mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
- err = mlx5_core_create_rqt(mdev, in, inlen, rqtn);
+ err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn);
+ if (!err)
+ rqt->enabled = true;
kvfree(in);
return err;
}
-static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, u32 rqtn)
+void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt)
{
- mlx5_core_destroy_rqt(priv->mdev, rqtn);
+ rqt->enabled = false;
+ mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn);
}
-static int mlx5e_create_rqts(struct mlx5e_priv *priv)
+static int mlx5e_create_indirect_rqts(struct mlx5e_priv *priv)
{
- int nch = mlx5e_get_max_num_channels(priv->mdev);
- u32 *rqtn;
+ struct mlx5e_rqt *rqt = &priv->indir_rqt;
+
+ return mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, 0, rqt);
+}
+
+int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
+{
+ struct mlx5e_rqt *rqt;
int err;
int ix;
- /* Indirect RQT */
- rqtn = &priv->indir_rqtn;
- err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, 0, rqtn);
- if (err)
- return err;
-
- /* Direct RQTs */
- for (ix = 0; ix < nch; ix++) {
- rqtn = &priv->direct_tir[ix].rqtn;
- err = mlx5e_create_rqt(priv, 1 /*size */, ix, rqtn);
+ for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
+ rqt = &priv->direct_tir[ix].rqt;
+ err = mlx5e_create_rqt(priv, 1 /*size */, ix, rqt);
if (err)
goto err_destroy_rqts;
}
@@ -1429,24 +1548,11 @@ static int mlx5e_create_rqts(struct mlx5e_priv *priv)
err_destroy_rqts:
for (ix--; ix >= 0; ix--)
- mlx5e_destroy_rqt(priv, priv->direct_tir[ix].rqtn);
-
- mlx5e_destroy_rqt(priv, priv->indir_rqtn);
+ mlx5e_destroy_rqt(priv, &priv->direct_tir[ix].rqt);
return err;
}
-static void mlx5e_destroy_rqts(struct mlx5e_priv *priv)
-{
- int nch = mlx5e_get_max_num_channels(priv->mdev);
- int i;
-
- for (i = 0; i < nch; i++)
- mlx5e_destroy_rqt(priv, priv->direct_tir[i].rqtn);
-
- mlx5e_destroy_rqt(priv, priv->indir_rqtn);
-}
-
int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix)
{
struct mlx5_core_dev *mdev = priv->mdev;
@@ -1482,10 +1588,15 @@ static void mlx5e_redirect_rqts(struct mlx5e_priv *priv)
u32 rqtn;
int ix;
- rqtn = priv->indir_rqtn;
- mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
+ if (priv->indir_rqt.enabled) {
+ rqtn = priv->indir_rqt.rqtn;
+ mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
+ }
+
for (ix = 0; ix < priv->params.num_channels; ix++) {
- rqtn = priv->direct_tir[ix].rqtn;
+ if (!priv->direct_tir[ix].rqt.enabled)
+ continue;
+ rqtn = priv->direct_tir[ix].rqt.rqtn;
mlx5e_redirect_rqt(priv, rqtn, 1, ix);
}
}
@@ -1545,13 +1656,13 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
mlx5e_build_tir_ctx_lro(tirc, priv);
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
- err = mlx5_core_modify_tir(mdev, priv->indir_tirn[tt], in,
+ err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in,
inlen);
if (err)
goto free_in;
}
- for (ix = 0; ix < mlx5e_get_max_num_channels(mdev); ix++) {
+ for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
in, inlen);
if (err)
@@ -1564,40 +1675,6 @@ free_in:
return err;
}
-static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
-{
- void *in;
- int inlen;
- int err;
- int i;
-
- inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
- in = mlx5_vzalloc(inlen);
- if (!in)
- return -ENOMEM;
-
- MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
-
- for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) {
- err = mlx5_core_modify_tir(priv->mdev, priv->indir_tirn[i], in,
- inlen);
- if (err)
- return err;
- }
-
- for (i = 0; i < priv->params.num_channels; i++) {
- err = mlx5_core_modify_tir(priv->mdev,
- priv->direct_tir[i].tirn, in,
- inlen);
- if (err)
- return err;
- }
-
- kvfree(in);
-
- return 0;
-}
-
static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
{
struct mlx5_core_dev *mdev = priv->mdev;
@@ -1666,6 +1743,7 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
int mlx5e_open_locked(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
int num_txqs;
int err;
@@ -1688,7 +1766,7 @@ int mlx5e_open_locked(struct net_device *netdev)
goto err_clear_state_opened_flag;
}
- err = mlx5e_refresh_tirs_self_loopback_enable(priv);
+ err = mlx5e_refresh_tirs_self_loopback_enable(priv->mdev);
if (err) {
netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n",
__func__, err);
@@ -1701,9 +1779,14 @@ int mlx5e_open_locked(struct net_device *netdev)
#ifdef CONFIG_RFS_ACCEL
priv->netdev->rx_cpu_rmap = priv->mdev->rmap;
#endif
+ if (priv->profile->update_stats)
+ queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
- queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
-
+ if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
+ err = mlx5e_add_sqs_fwd_rules(priv);
+ if (err)
+ goto err_close_channels;
+ }
return 0;
err_close_channels:
@@ -1713,7 +1796,7 @@ err_clear_state_opened_flag:
return err;
}
-static int mlx5e_open(struct net_device *netdev)
+int mlx5e_open(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
@@ -1728,6 +1811,7 @@ static int mlx5e_open(struct net_device *netdev)
int mlx5e_close_locked(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
/* May already be CLOSED in case a previous configuration operation
* (e.g RX/TX queue size change) that involves close&open failed.
@@ -1737,6 +1821,9 @@ int mlx5e_close_locked(struct net_device *netdev)
clear_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (MLX5_CAP_GEN(mdev, vport_group_manager))
+ mlx5e_remove_sqs_fwd_rules(priv);
+
mlx5e_timestamp_cleanup(priv);
netif_carrier_off(priv->netdev);
mlx5e_redirect_rqts(priv);
@@ -1745,7 +1832,7 @@ int mlx5e_close_locked(struct net_device *netdev)
return 0;
}
-static int mlx5e_close(struct net_device *netdev)
+int mlx5e_close(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
@@ -1804,7 +1891,7 @@ static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
mcq->comp = mlx5e_completion_event;
mcq->event = mlx5e_cq_error_event;
mcq->irqn = irqn;
- mcq->uar = &priv->cq_uar;
+ mcq->uar = &mdev->mlx5e_res.cq_uar;
cq->priv = priv;
@@ -1870,7 +1957,7 @@ static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
memset(in, 0, sizeof(in));
MLX5_SET(tisc, tisc, prio, tc << 1);
- MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
+ MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
}
@@ -1880,12 +1967,12 @@ static void mlx5e_destroy_tis(struct mlx5e_priv *priv, int tc)
mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
}
-static int mlx5e_create_tises(struct mlx5e_priv *priv)
+int mlx5e_create_tises(struct mlx5e_priv *priv)
{
int err;
int tc;
- for (tc = 0; tc < MLX5E_MAX_NUM_TC; tc++) {
+ for (tc = 0; tc < priv->profile->max_tc; tc++) {
err = mlx5e_create_tis(priv, tc);
if (err)
goto err_close_tises;
@@ -1900,11 +1987,11 @@ err_close_tises:
return err;
}
-static void mlx5e_destroy_tises(struct mlx5e_priv *priv)
+void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
{
int tc;
- for (tc = 0; tc < MLX5E_MAX_NUM_TC; tc++)
+ for (tc = 0; tc < priv->profile->max_tc; tc++)
mlx5e_destroy_tis(priv, tc);
}
@@ -1913,7 +2000,7 @@ static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
{
void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
- MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
+ MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP)
@@ -1930,7 +2017,7 @@ static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
mlx5e_build_tir_ctx_lro(tirc, priv);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
- MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqtn);
+ MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
mlx5e_build_tir_ctx_hash(tirc, priv);
switch (tt) {
@@ -2020,7 +2107,7 @@ static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
u32 rqtn)
{
- MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
+ MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
mlx5e_build_tir_ctx_lro(tirc, priv);
@@ -2029,15 +2116,13 @@ static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
}
-static int mlx5e_create_tirs(struct mlx5e_priv *priv)
+static int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
{
- int nch = mlx5e_get_max_num_channels(priv->mdev);
+ struct mlx5e_tir *tir;
void *tirc;
int inlen;
- u32 *tirn;
int err;
u32 *in;
- int ix;
int tt;
inlen = MLX5_ST_SZ_BYTES(create_tir_in);
@@ -2045,25 +2130,51 @@ static int mlx5e_create_tirs(struct mlx5e_priv *priv)
if (!in)
return -ENOMEM;
- /* indirect tirs */
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
memset(in, 0, inlen);
- tirn = &priv->indir_tirn[tt];
+ tir = &priv->indir_tir[tt];
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
mlx5e_build_indir_tir_ctx(priv, tirc, tt);
- err = mlx5_core_create_tir(priv->mdev, in, inlen, tirn);
+ err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
if (err)
goto err_destroy_tirs;
}
- /* direct tirs */
+ kvfree(in);
+
+ return 0;
+
+err_destroy_tirs:
+ for (tt--; tt >= 0; tt--)
+ mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]);
+
+ kvfree(in);
+
+ return err;
+}
+
+int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
+{
+ int nch = priv->profile->max_nch(priv->mdev);
+ struct mlx5e_tir *tir;
+ void *tirc;
+ int inlen;
+ int err;
+ u32 *in;
+ int ix;
+
+ inlen = MLX5_ST_SZ_BYTES(create_tir_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
for (ix = 0; ix < nch; ix++) {
memset(in, 0, inlen);
- tirn = &priv->direct_tir[ix].tirn;
+ tir = &priv->direct_tir[ix];
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
mlx5e_build_direct_tir_ctx(priv, tirc,
- priv->direct_tir[ix].rqtn);
- err = mlx5_core_create_tir(priv->mdev, in, inlen, tirn);
+ priv->direct_tir[ix].rqt.rqtn);
+ err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
if (err)
goto err_destroy_ch_tirs;
}
@@ -2074,27 +2185,28 @@ static int mlx5e_create_tirs(struct mlx5e_priv *priv)
err_destroy_ch_tirs:
for (ix--; ix >= 0; ix--)
- mlx5_core_destroy_tir(priv->mdev, priv->direct_tir[ix].tirn);
-
-err_destroy_tirs:
- for (tt--; tt >= 0; tt--)
- mlx5_core_destroy_tir(priv->mdev, priv->indir_tirn[tt]);
+ mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[ix]);
kvfree(in);
return err;
}
-static void mlx5e_destroy_tirs(struct mlx5e_priv *priv)
+static void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
{
- int nch = mlx5e_get_max_num_channels(priv->mdev);
int i;
- for (i = 0; i < nch; i++)
- mlx5_core_destroy_tir(priv->mdev, priv->direct_tir[i].tirn);
-
for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
- mlx5_core_destroy_tir(priv->mdev, priv->indir_tirn[i]);
+ mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
+}
+
+void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv)
+{
+ int nch = priv->profile->max_nch(priv->mdev);
+ int i;
+
+ for (i = 0; i < nch; i++)
+ mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[i]);
}
int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd)
@@ -2168,7 +2280,7 @@ mqprio:
return mlx5e_setup_tc(dev, tc->tc);
}
-static struct rtnl_link_stats64 *
+struct rtnl_link_stats64 *
mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -2520,25 +2632,31 @@ static int mlx5e_get_vf_stats(struct net_device *dev,
}
static void mlx5e_add_vxlan_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
+ struct udp_tunnel_info *ti)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
+
if (!mlx5e_vxlan_allowed(priv->mdev))
return;
- mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 1);
+ mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1);
}
static void mlx5e_del_vxlan_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
+ struct udp_tunnel_info *ti)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
+
if (!mlx5e_vxlan_allowed(priv->mdev))
return;
- mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 0);
+ mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0);
}
static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
@@ -2605,6 +2723,7 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = {
.ndo_set_features = mlx5e_set_features,
.ndo_change_mtu = mlx5e_change_mtu,
.ndo_do_ioctl = mlx5e_ioctl,
+ .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx5e_rx_flow_steer,
#endif
@@ -2624,8 +2743,9 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
.ndo_set_features = mlx5e_set_features,
.ndo_change_mtu = mlx5e_change_mtu,
.ndo_do_ioctl = mlx5e_ioctl,
- .ndo_add_vxlan_port = mlx5e_add_vxlan_port,
- .ndo_del_vxlan_port = mlx5e_del_vxlan_port,
+ .ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
+ .ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
+ .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
.ndo_features_check = mlx5e_features_check,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx5e_rx_flow_steer,
@@ -2754,13 +2874,31 @@ static bool cqe_compress_heuristic(u32 link_speed, u32 pci_bw)
(pci_bw < 40000) && (pci_bw < link_speed));
}
-static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- int num_channels)
+void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
+{
+ params->rx_cq_period_mode = cq_period_mode;
+
+ params->rx_cq_moderation.pkts =
+ MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
+ params->rx_cq_moderation.usec =
+ MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
+
+ if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
+ params->rx_cq_moderation.usec =
+ MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
+}
+
+static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
u32 link_speed = 0;
u32 pci_bw = 0;
+ u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
+ MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
+ MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
priv->params.log_sq_size =
MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
@@ -2806,13 +2944,13 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
BIT(priv->params.log_rq_size));
- priv->params.rx_cq_moderation_usec =
- MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
- priv->params.rx_cq_moderation_pkts =
- MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
- priv->params.tx_cq_moderation_usec =
+
+ priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
+ mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode);
+
+ priv->params.tx_cq_moderation.usec =
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
- priv->params.tx_cq_moderation_pkts =
+ priv->params.tx_cq_moderation.pkts =
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
priv->params.num_tc = 1;
@@ -2822,14 +2960,20 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
sizeof(priv->params.toeplitz_hash_key));
mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt,
- MLX5E_INDIR_RQT_SIZE, num_channels);
+ MLX5E_INDIR_RQT_SIZE, profile->max_nch(mdev));
priv->params.lro_wqe_sz =
MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+ /* Initialize pflags */
+ MLX5E_SET_PRIV_FLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER,
+ priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
+
priv->mdev = mdev;
priv->netdev = netdev;
- priv->params.num_channels = num_channels;
+ priv->params.num_channels = profile->max_nch(mdev);
+ priv->profile = profile;
+ priv->ppriv = ppriv;
#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_ets_init(priv);
@@ -2854,7 +2998,11 @@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
}
}
-static void mlx5e_build_netdev(struct net_device *netdev)
+static const struct switchdev_ops mlx5e_switchdev_ops = {
+ .switchdev_port_attr_get = mlx5e_attr_get,
+};
+
+static void mlx5e_build_nic_netdev(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
@@ -2935,31 +3083,11 @@ static void mlx5e_build_netdev(struct net_device *netdev)
netdev->priv_flags |= IFF_UNICAST_FLT;
mlx5e_set_netdev_dev_addr(netdev);
-}
-
-static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
- struct mlx5_core_mkey *mkey)
-{
- struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5_create_mkey_mbox_in *in;
- int err;
-
- in = mlx5_vzalloc(sizeof(*in));
- if (!in)
- return -ENOMEM;
- in->seg.flags = MLX5_PERM_LOCAL_WRITE |
- MLX5_PERM_LOCAL_READ |
- MLX5_ACCESS_MODE_PA;
- in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
- in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
-
- err = mlx5_core_create_mkey(mdev, mkey, in, sizeof(*in), NULL, NULL,
- NULL);
-
- kvfree(in);
-
- return err;
+#ifdef CONFIG_NET_SWITCHDEV
+ if (MLX5_CAP_GEN(mdev, vport_group_manager))
+ netdev->switchdev_ops = &mlx5e_switchdev_ops;
+#endif
}
static void mlx5e_create_q_counter(struct mlx5e_priv *priv)
@@ -2989,7 +3117,7 @@ static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
struct mlx5_mkey_seg *mkc;
int inlen = sizeof(*in);
u64 npages =
- mlx5e_get_max_num_channels(mdev) * MLX5_CHANNEL_MAX_NUM_MTTS;
+ priv->profile->max_nch(mdev) * MLX5_CHANNEL_MAX_NUM_MTTS;
int err;
in = mlx5_vzalloc(inlen);
@@ -3004,7 +3132,7 @@ static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
MLX5_ACCESS_MODE_MTT;
mkc->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
- mkc->flags_pd = cpu_to_be32(priv->pdn);
+ mkc->flags_pd = cpu_to_be32(mdev->mlx5e_res.pdn);
mkc->len = cpu_to_be64(npages << PAGE_SHIFT);
mkc->xlt_oct_size = cpu_to_be32(mlx5e_get_mtt_octw(npages));
mkc->log2_page_size = PAGE_SHIFT;
@@ -3017,160 +3145,233 @@ static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
return err;
}
-static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
+static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
{
- struct net_device *netdev;
- struct mlx5e_priv *priv;
- int nch = mlx5e_get_max_num_channels(mdev);
- int err;
-
- if (mlx5e_check_required_hca_cap(mdev))
- return NULL;
+ struct mlx5e_priv *priv = netdev_priv(netdev);
- netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
- nch * MLX5E_MAX_NUM_TC,
- nch);
- if (!netdev) {
- mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
- return NULL;
- }
+ mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv);
+ mlx5e_build_nic_netdev(netdev);
+ mlx5e_vxlan_init(priv);
+}
- mlx5e_build_netdev_priv(mdev, netdev, nch);
- mlx5e_build_netdev(netdev);
+static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
- netif_carrier_off(netdev);
+ mlx5e_vxlan_cleanup(priv);
- priv = netdev_priv(netdev);
+ if (MLX5_CAP_GEN(mdev, vport_group_manager))
+ mlx5_eswitch_unregister_vport_rep(esw, 0);
+}
- priv->wq = create_singlethread_workqueue("mlx5e");
- if (!priv->wq)
- goto err_free_netdev;
+static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int err;
+ int i;
- err = mlx5_alloc_map_uar(mdev, &priv->cq_uar, false);
+ err = mlx5e_create_indirect_rqts(priv);
if (err) {
- mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
- goto err_destroy_wq;
+ mlx5_core_warn(mdev, "create indirect rqts failed, %d\n", err);
+ return err;
}
- err = mlx5_core_alloc_pd(mdev, &priv->pdn);
+ err = mlx5e_create_direct_rqts(priv);
if (err) {
- mlx5_core_err(mdev, "alloc pd failed, %d\n", err);
- goto err_unmap_free_uar;
+ mlx5_core_warn(mdev, "create direct rqts failed, %d\n", err);
+ goto err_destroy_indirect_rqts;
}
- err = mlx5_core_alloc_transport_domain(mdev, &priv->tdn);
+ err = mlx5e_create_indirect_tirs(priv);
if (err) {
- mlx5_core_err(mdev, "alloc td failed, %d\n", err);
- goto err_dealloc_pd;
+ mlx5_core_warn(mdev, "create indirect tirs failed, %d\n", err);
+ goto err_destroy_direct_rqts;
}
- err = mlx5e_create_mkey(priv, priv->pdn, &priv->mkey);
+ err = mlx5e_create_direct_tirs(priv);
if (err) {
- mlx5_core_err(mdev, "create mkey failed, %d\n", err);
- goto err_dealloc_transport_domain;
+ mlx5_core_warn(mdev, "create direct tirs failed, %d\n", err);
+ goto err_destroy_indirect_tirs;
}
- err = mlx5e_create_umr_mkey(priv);
+ err = mlx5e_create_flow_steering(priv);
if (err) {
- mlx5_core_err(mdev, "create umr mkey failed, %d\n", err);
- goto err_destroy_mkey;
+ mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
+ goto err_destroy_direct_tirs;
}
+ err = mlx5e_tc_init(priv);
+ if (err)
+ goto err_destroy_flow_steering;
+
+ return 0;
+
+err_destroy_flow_steering:
+ mlx5e_destroy_flow_steering(priv);
+err_destroy_direct_tirs:
+ mlx5e_destroy_direct_tirs(priv);
+err_destroy_indirect_tirs:
+ mlx5e_destroy_indirect_tirs(priv);
+err_destroy_direct_rqts:
+ for (i = 0; i < priv->profile->max_nch(mdev); i++)
+ mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
+err_destroy_indirect_rqts:
+ mlx5e_destroy_rqt(priv, &priv->indir_rqt);
+ return err;
+}
+
+static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
+{
+ int i;
+
+ mlx5e_tc_cleanup(priv);
+ mlx5e_destroy_flow_steering(priv);
+ mlx5e_destroy_direct_tirs(priv);
+ mlx5e_destroy_indirect_tirs(priv);
+ for (i = 0; i < priv->profile->max_nch(priv->mdev); i++)
+ mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
+ mlx5e_destroy_rqt(priv, &priv->indir_rqt);
+}
+
+static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
+{
+ int err;
+
err = mlx5e_create_tises(priv);
if (err) {
- mlx5_core_warn(mdev, "create tises failed, %d\n", err);
- goto err_destroy_umr_mkey;
+ mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
+ return err;
}
- err = mlx5e_open_drop_rq(priv);
- if (err) {
- mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
- goto err_destroy_tises;
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ mlx5e_dcbnl_ieee_setets_core(priv, &priv->params.ets);
+#endif
+ return 0;
+}
+
+static void mlx5e_nic_enable(struct mlx5e_priv *priv)
+{
+ struct net_device *netdev = priv->netdev;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ struct mlx5_eswitch_rep rep;
+
+ if (mlx5e_vxlan_allowed(mdev)) {
+ rtnl_lock();
+ udp_tunnel_get_rx_info(netdev);
+ rtnl_unlock();
}
- err = mlx5e_create_rqts(priv);
- if (err) {
- mlx5_core_warn(mdev, "create rqts failed, %d\n", err);
- goto err_close_drop_rq;
+ mlx5e_enable_async_events(priv);
+ queue_work(priv->wq, &priv->set_rx_mode_work);
+
+ if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
+ rep.load = mlx5e_nic_rep_load;
+ rep.unload = mlx5e_nic_rep_unload;
+ rep.vport = 0;
+ rep.priv_data = priv;
+ mlx5_eswitch_register_vport_rep(esw, &rep);
}
+}
- err = mlx5e_create_tirs(priv);
- if (err) {
- mlx5_core_warn(mdev, "create tirs failed, %d\n", err);
- goto err_destroy_rqts;
+static void mlx5e_nic_disable(struct mlx5e_priv *priv)
+{
+ queue_work(priv->wq, &priv->set_rx_mode_work);
+ mlx5e_disable_async_events(priv);
+}
+
+static const struct mlx5e_profile mlx5e_nic_profile = {
+ .init = mlx5e_nic_init,
+ .cleanup = mlx5e_nic_cleanup,
+ .init_rx = mlx5e_init_nic_rx,
+ .cleanup_rx = mlx5e_cleanup_nic_rx,
+ .init_tx = mlx5e_init_nic_tx,
+ .cleanup_tx = mlx5e_cleanup_nic_tx,
+ .enable = mlx5e_nic_enable,
+ .disable = mlx5e_nic_disable,
+ .update_stats = mlx5e_update_stats,
+ .max_nch = mlx5e_get_max_num_channels,
+ .max_tc = MLX5E_MAX_NUM_TC,
+};
+
+void *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
+ const struct mlx5e_profile *profile, void *ppriv)
+{
+ struct net_device *netdev;
+ struct mlx5e_priv *priv;
+ int nch = profile->max_nch(mdev);
+ int err;
+
+ netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
+ nch * profile->max_tc,
+ nch);
+ if (!netdev) {
+ mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
+ return NULL;
}
- err = mlx5e_create_flow_steering(priv);
+ profile->init(mdev, netdev, profile, ppriv);
+
+ netif_carrier_off(netdev);
+
+ priv = netdev_priv(netdev);
+
+ priv->wq = create_singlethread_workqueue("mlx5e");
+ if (!priv->wq)
+ goto err_free_netdev;
+
+ err = mlx5e_create_umr_mkey(priv);
if (err) {
- mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
- goto err_destroy_tirs;
+ mlx5_core_err(mdev, "create umr mkey failed, %d\n", err);
+ goto err_destroy_wq;
}
- mlx5e_create_q_counter(priv);
-
- mlx5e_init_l2_addr(priv);
+ err = profile->init_tx(priv);
+ if (err)
+ goto err_destroy_umr_mkey;
- mlx5e_vxlan_init(priv);
+ err = mlx5e_open_drop_rq(priv);
+ if (err) {
+ mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
+ goto err_cleanup_tx;
+ }
- err = mlx5e_tc_init(priv);
+ err = profile->init_rx(priv);
if (err)
- goto err_dealloc_q_counters;
+ goto err_close_drop_rq;
-#ifdef CONFIG_MLX5_CORE_EN_DCB
- mlx5e_dcbnl_ieee_setets_core(priv, &priv->params.ets);
-#endif
+ mlx5e_create_q_counter(priv);
+
+ mlx5e_init_l2_addr(priv);
err = register_netdev(netdev);
if (err) {
mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
- goto err_tc_cleanup;
- }
-
- if (mlx5e_vxlan_allowed(mdev)) {
- rtnl_lock();
- vxlan_get_rx_port(netdev);
- rtnl_unlock();
+ goto err_dealloc_q_counters;
}
- mlx5e_enable_async_events(priv);
- queue_work(priv->wq, &priv->set_rx_mode_work);
+ if (profile->enable)
+ profile->enable(priv);
return priv;
-err_tc_cleanup:
- mlx5e_tc_cleanup(priv);
-
err_dealloc_q_counters:
mlx5e_destroy_q_counter(priv);
- mlx5e_destroy_flow_steering(priv);
-
-err_destroy_tirs:
- mlx5e_destroy_tirs(priv);
-
-err_destroy_rqts:
- mlx5e_destroy_rqts(priv);
+ profile->cleanup_rx(priv);
err_close_drop_rq:
mlx5e_close_drop_rq(priv);
-err_destroy_tises:
- mlx5e_destroy_tises(priv);
+err_cleanup_tx:
+ profile->cleanup_tx(priv);
err_destroy_umr_mkey:
mlx5_core_destroy_mkey(mdev, &priv->umr_mkey);
-err_destroy_mkey:
- mlx5_core_destroy_mkey(mdev, &priv->mkey);
-
-err_dealloc_transport_domain:
- mlx5_core_dealloc_transport_domain(mdev, priv->tdn);
-
-err_dealloc_pd:
- mlx5_core_dealloc_pd(mdev, priv->pdn);
-
-err_unmap_free_uar:
- mlx5_unmap_free_uar(mdev, &priv->cq_uar);
-
err_destroy_wq:
destroy_workqueue(priv->wq);
@@ -3180,46 +3381,96 @@ err_free_netdev:
return NULL;
}
-static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
+static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev)
{
- struct mlx5e_priv *priv = vpriv;
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ int total_vfs = MLX5_TOTAL_VPORTS(mdev);
+ int vport;
+
+ if (!MLX5_CAP_GEN(mdev, vport_group_manager))
+ return;
+
+ for (vport = 1; vport < total_vfs; vport++) {
+ struct mlx5_eswitch_rep rep;
+
+ rep.load = mlx5e_vport_rep_load;
+ rep.unload = mlx5e_vport_rep_unload;
+ rep.vport = vport;
+ mlx5_eswitch_register_vport_rep(esw, &rep);
+ }
+}
+
+static void *mlx5e_add(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ void *ppriv = NULL;
+ void *ret;
+
+ if (mlx5e_check_required_hca_cap(mdev))
+ return NULL;
+
+ if (mlx5e_create_mdev_resources(mdev))
+ return NULL;
+
+ mlx5e_register_vport_rep(mdev);
+
+ if (MLX5_CAP_GEN(mdev, vport_group_manager))
+ ppriv = &esw->offloads.vport_reps[0];
+
+ ret = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, ppriv);
+ if (!ret) {
+ mlx5e_destroy_mdev_resources(mdev);
+ return NULL;
+ }
+ return ret;
+}
+
+void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv)
+{
+ const struct mlx5e_profile *profile = priv->profile;
struct net_device *netdev = priv->netdev;
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
+ if (profile->disable)
+ profile->disable(priv);
- queue_work(priv->wq, &priv->set_rx_mode_work);
- mlx5e_disable_async_events(priv);
flush_workqueue(priv->wq);
if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
netif_device_detach(netdev);
- mutex_lock(&priv->state_lock);
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_close_locked(netdev);
- mutex_unlock(&priv->state_lock);
+ mlx5e_close(netdev);
} else {
unregister_netdev(netdev);
}
- mlx5e_tc_cleanup(priv);
- mlx5e_vxlan_cleanup(priv);
mlx5e_destroy_q_counter(priv);
- mlx5e_destroy_flow_steering(priv);
- mlx5e_destroy_tirs(priv);
- mlx5e_destroy_rqts(priv);
+ profile->cleanup_rx(priv);
mlx5e_close_drop_rq(priv);
- mlx5e_destroy_tises(priv);
+ profile->cleanup_tx(priv);
mlx5_core_destroy_mkey(priv->mdev, &priv->umr_mkey);
- mlx5_core_destroy_mkey(priv->mdev, &priv->mkey);
- mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn);
- mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
- mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
cancel_delayed_work_sync(&priv->update_stats_work);
destroy_workqueue(priv->wq);
+ if (profile->cleanup)
+ profile->cleanup(priv);
if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state))
free_netdev(netdev);
}
+static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
+{
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ int total_vfs = MLX5_TOTAL_VPORTS(mdev);
+ struct mlx5e_priv *priv = vpriv;
+ int vport;
+
+ mlx5e_destroy_netdev(mdev, priv);
+
+ for (vport = 1; vport < total_vfs; vport++)
+ mlx5_eswitch_unregister_vport_rep(esw, vport);
+
+ mlx5e_destroy_mdev_resources(mdev);
+}
+
static void *mlx5e_get_netdev(void *vpriv)
{
struct mlx5e_priv *priv = vpriv;
@@ -3228,8 +3479,8 @@ static void *mlx5e_get_netdev(void *vpriv)
}
static struct mlx5_interface mlx5e_interface = {
- .add = mlx5e_create_netdev,
- .remove = mlx5e_destroy_netdev,
+ .add = mlx5e_add,
+ .remove = mlx5e_remove,
.event = mlx5e_async_event,
.protocol = MLX5_INTERFACE_PROTOCOL_ETH,
.get_dev = mlx5e_get_netdev,
@@ -3237,6 +3488,7 @@ static struct mlx5_interface mlx5e_interface = {
void mlx5e_init(void)
{
+ mlx5e_build_ptys2ethtool_map();
mlx5_register_interface(&mlx5e_interface);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
new file mode 100644
index 000000000000..5ef02f02a1d5
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -0,0 +1,394 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <generated/utsrelease.h>
+#include <linux/mlx5/fs.h>
+#include <net/switchdev.h>
+
+#include "eswitch.h"
+#include "en.h"
+
+static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
+
+static void mlx5e_rep_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
+ sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
+}
+
+static const struct counter_desc sw_rep_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
+};
+
+#define NUM_VPORT_REP_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
+
+static void mlx5e_rep_get_strings(struct net_device *dev,
+ u32 stringset, uint8_t *data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++)
+ strcpy(data + (i * ETH_GSTRING_LEN),
+ sw_rep_stats_desc[i].format);
+ break;
+ }
+}
+
+static void mlx5e_update_sw_rep_counters(struct mlx5e_priv *priv)
+{
+ struct mlx5e_sw_stats *s = &priv->stats.sw;
+ struct mlx5e_rq_stats *rq_stats;
+ struct mlx5e_sq_stats *sq_stats;
+ int i, j;
+
+ memset(s, 0, sizeof(*s));
+ for (i = 0; i < priv->params.num_channels; i++) {
+ rq_stats = &priv->channel[i]->rq.stats;
+
+ s->rx_packets += rq_stats->packets;
+ s->rx_bytes += rq_stats->bytes;
+
+ for (j = 0; j < priv->params.num_tc; j++) {
+ sq_stats = &priv->channel[i]->sq[j].stats;
+
+ s->tx_packets += sq_stats->packets;
+ s->tx_bytes += sq_stats->bytes;
+ }
+ }
+}
+
+static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int i;
+
+ if (!data)
+ return;
+
+ mutex_lock(&priv->state_lock);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_update_sw_rep_counters(priv);
+ mutex_unlock(&priv->state_lock);
+
+ for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++)
+ data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
+ sw_rep_stats_desc, i);
+}
+
+static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return NUM_VPORT_REP_COUNTERS;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
+ .get_drvinfo = mlx5e_rep_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = mlx5e_rep_get_strings,
+ .get_sset_count = mlx5e_rep_get_sset_count,
+ .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
+};
+
+int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ u8 mac[ETH_ALEN];
+
+ if (esw->mode == SRIOV_NONE)
+ return -EOPNOTSUPP;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
+ mlx5_query_nic_vport_mac_address(priv->mdev, 0, mac);
+ attr->u.ppid.id_len = ETH_ALEN;
+ memcpy(&attr->u.ppid.id, &mac, ETH_ALEN);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
+
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_eswitch_rep *rep = priv->ppriv;
+ struct mlx5e_channel *c;
+ int n, tc, err, num_sqs = 0;
+ u16 *sqs;
+
+ sqs = kcalloc(priv->params.num_channels * priv->params.num_tc, sizeof(u16), GFP_KERNEL);
+ if (!sqs)
+ return -ENOMEM;
+
+ for (n = 0; n < priv->params.num_channels; n++) {
+ c = priv->channel[n];
+ for (tc = 0; tc < c->num_tc; tc++)
+ sqs[num_sqs++] = c->sq[tc].sqn;
+ }
+
+ err = mlx5_eswitch_sqs2vport_start(esw, rep, sqs, num_sqs);
+
+ kfree(sqs);
+ return err;
+}
+
+int mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5e_priv *priv = rep->priv_data;
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return mlx5e_add_sqs_fwd_rules(priv);
+ return 0;
+}
+
+void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_eswitch_rep *rep = priv->ppriv;
+
+ mlx5_eswitch_sqs2vport_stop(esw, rep);
+}
+
+void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5e_priv *priv = rep->priv_data;
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_remove_sqs_fwd_rules(priv);
+}
+
+static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
+ char *buf, size_t len)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_eswitch_rep *rep = priv->ppriv;
+ int ret;
+
+ ret = snprintf(buf, len, "%d", rep->vport - 1);
+ if (ret >= len)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static const struct switchdev_ops mlx5e_rep_switchdev_ops = {
+ .switchdev_port_attr_get = mlx5e_attr_get,
+};
+
+static const struct net_device_ops mlx5e_netdev_ops_rep = {
+ .ndo_open = mlx5e_open,
+ .ndo_stop = mlx5e_close,
+ .ndo_start_xmit = mlx5e_xmit,
+ .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
+ .ndo_get_stats64 = mlx5e_get_stats,
+};
+
+static void mlx5e_build_rep_netdev_priv(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
+ MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
+ MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+
+ priv->params.log_sq_size =
+ MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
+ priv->params.rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST;
+ priv->params.log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
+
+ priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
+ BIT(priv->params.log_rq_size));
+
+ priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
+ mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode);
+
+ priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
+ priv->params.num_tc = 1;
+
+ priv->params.lro_wqe_sz =
+ MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+
+ priv->mdev = mdev;
+ priv->netdev = netdev;
+ priv->params.num_channels = profile->max_nch(mdev);
+ priv->profile = profile;
+ priv->ppriv = ppriv;
+
+ mutex_init(&priv->state_lock);
+
+ INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
+}
+
+static void mlx5e_build_rep_netdev(struct net_device *netdev)
+{
+ netdev->netdev_ops = &mlx5e_netdev_ops_rep;
+
+ netdev->watchdog_timeo = 15 * HZ;
+
+ netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
+
+#ifdef CONFIG_NET_SWITCHDEV
+ netdev->switchdev_ops = &mlx5e_rep_switchdev_ops;
+#endif
+
+ netdev->features |= NETIF_F_VLAN_CHALLENGED;
+
+ eth_hw_addr_random(netdev);
+}
+
+static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
+{
+ mlx5e_build_rep_netdev_priv(mdev, netdev, profile, ppriv);
+ mlx5e_build_rep_netdev(netdev);
+}
+
+static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_eswitch_rep *rep = priv->ppriv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_flow_rule *flow_rule;
+ int err;
+ int i;
+
+ err = mlx5e_create_direct_rqts(priv);
+ if (err) {
+ mlx5_core_warn(mdev, "create direct rqts failed, %d\n", err);
+ return err;
+ }
+
+ err = mlx5e_create_direct_tirs(priv);
+ if (err) {
+ mlx5_core_warn(mdev, "create direct tirs failed, %d\n", err);
+ goto err_destroy_direct_rqts;
+ }
+
+ flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
+ rep->vport,
+ priv->direct_tir[0].tirn);
+ if (IS_ERR(flow_rule)) {
+ err = PTR_ERR(flow_rule);
+ goto err_destroy_direct_tirs;
+ }
+ rep->vport_rx_rule = flow_rule;
+
+ return 0;
+
+err_destroy_direct_tirs:
+ mlx5e_destroy_direct_tirs(priv);
+err_destroy_direct_rqts:
+ for (i = 0; i < priv->params.num_channels; i++)
+ mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
+ return err;
+}
+
+static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch_rep *rep = priv->ppriv;
+ int i;
+
+ mlx5_del_flow_rule(rep->vport_rx_rule);
+ mlx5e_destroy_direct_tirs(priv);
+ for (i = 0; i < priv->params.num_channels; i++)
+ mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
+}
+
+static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
+{
+ int err;
+
+ err = mlx5e_create_tises(priv);
+ if (err) {
+ mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
+ return err;
+ }
+ return 0;
+}
+
+static int mlx5e_get_rep_max_num_channels(struct mlx5_core_dev *mdev)
+{
+#define MLX5E_PORT_REPRESENTOR_NCH 1
+ return MLX5E_PORT_REPRESENTOR_NCH;
+}
+
+static struct mlx5e_profile mlx5e_rep_profile = {
+ .init = mlx5e_init_rep,
+ .init_rx = mlx5e_init_rep_rx,
+ .cleanup_rx = mlx5e_cleanup_rep_rx,
+ .init_tx = mlx5e_init_rep_tx,
+ .cleanup_tx = mlx5e_cleanup_nic_tx,
+ .update_stats = mlx5e_update_sw_rep_counters,
+ .max_nch = mlx5e_get_rep_max_num_channels,
+ .max_tc = 1,
+};
+
+int mlx5e_vport_rep_load(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep)
+{
+ rep->priv_data = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rep);
+ if (!rep->priv_data) {
+ pr_warn("Failed to create representor for vport %d\n",
+ rep->vport);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5e_priv *priv = rep->priv_data;
+
+ mlx5e_destroy_netdev(esw->dev, priv);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index bd947704b59c..022acc2e8922 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -689,7 +689,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
if (is_first_ethertype_ip(skb)) {
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
- rq->stats.csum_sw++;
+ rq->stats.csum_complete++;
return;
}
@@ -699,7 +699,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
if (cqe_is_tunneled(cqe)) {
skb->csum_level = 1;
skb->encapsulation = 1;
- rq->stats.csum_inner++;
+ rq->stats.csum_unnecessary_inner++;
}
return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
new file mode 100644
index 000000000000..1fffe48a93cc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
@@ -0,0 +1,335 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "en.h"
+
+/* Adaptive moderation profiles */
+#define MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
+#define MLX5E_RX_AM_DEF_PROFILE_CQE 1
+#define MLX5E_RX_AM_DEF_PROFILE_EQE 1
+#define MLX5E_PARAMS_AM_NUM_PROFILES 5
+
+/* All profiles sizes must be MLX5E_PARAMS_AM_NUM_PROFILES */
+#define MLX5_AM_EQE_PROFILES { \
+ {1, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {8, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {64, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {128, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {256, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+}
+
+#define MLX5_AM_CQE_PROFILES { \
+ {2, 256}, \
+ {8, 128}, \
+ {16, 64}, \
+ {32, 64}, \
+ {64, 64} \
+}
+
+static const struct mlx5e_cq_moder
+profile[MLX5_CQ_PERIOD_NUM_MODES][MLX5E_PARAMS_AM_NUM_PROFILES] = {
+ MLX5_AM_EQE_PROFILES,
+ MLX5_AM_CQE_PROFILES,
+};
+
+static inline struct mlx5e_cq_moder mlx5e_am_get_profile(u8 cq_period_mode, int ix)
+{
+ return profile[cq_period_mode][ix];
+}
+
+struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode)
+{
+ int default_profile_ix;
+
+ if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
+ default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_CQE;
+ else /* MLX5_CQ_PERIOD_MODE_START_FROM_EQE */
+ default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_EQE;
+
+ return profile[rx_cq_period_mode][default_profile_ix];
+}
+
+/* Adaptive moderation logic */
+enum {
+ MLX5E_AM_START_MEASURE,
+ MLX5E_AM_MEASURE_IN_PROGRESS,
+ MLX5E_AM_APPLY_NEW_PROFILE,
+};
+
+enum {
+ MLX5E_AM_PARKING_ON_TOP,
+ MLX5E_AM_PARKING_TIRED,
+ MLX5E_AM_GOING_RIGHT,
+ MLX5E_AM_GOING_LEFT,
+};
+
+enum {
+ MLX5E_AM_STATS_WORSE,
+ MLX5E_AM_STATS_SAME,
+ MLX5E_AM_STATS_BETTER,
+};
+
+enum {
+ MLX5E_AM_STEPPED,
+ MLX5E_AM_TOO_TIRED,
+ MLX5E_AM_ON_EDGE,
+};
+
+static bool mlx5e_am_on_top(struct mlx5e_rx_am *am)
+{
+ switch (am->tune_state) {
+ case MLX5E_AM_PARKING_ON_TOP:
+ case MLX5E_AM_PARKING_TIRED:
+ WARN_ONCE(true, "mlx5e_am_on_top: PARKING\n");
+ return true;
+ case MLX5E_AM_GOING_RIGHT:
+ return (am->steps_left > 1) && (am->steps_right == 1);
+ default: /* MLX5E_AM_GOING_LEFT */
+ return (am->steps_right > 1) && (am->steps_left == 1);
+ }
+}
+
+static void mlx5e_am_turn(struct mlx5e_rx_am *am)
+{
+ switch (am->tune_state) {
+ case MLX5E_AM_PARKING_ON_TOP:
+ case MLX5E_AM_PARKING_TIRED:
+ WARN_ONCE(true, "mlx5e_am_turn: PARKING\n");
+ break;
+ case MLX5E_AM_GOING_RIGHT:
+ am->tune_state = MLX5E_AM_GOING_LEFT;
+ am->steps_left = 0;
+ break;
+ case MLX5E_AM_GOING_LEFT:
+ am->tune_state = MLX5E_AM_GOING_RIGHT;
+ am->steps_right = 0;
+ break;
+ }
+}
+
+static int mlx5e_am_step(struct mlx5e_rx_am *am)
+{
+ if (am->tired == (MLX5E_PARAMS_AM_NUM_PROFILES * 2))
+ return MLX5E_AM_TOO_TIRED;
+
+ switch (am->tune_state) {
+ case MLX5E_AM_PARKING_ON_TOP:
+ case MLX5E_AM_PARKING_TIRED:
+ WARN_ONCE(true, "mlx5e_am_step: PARKING\n");
+ break;
+ case MLX5E_AM_GOING_RIGHT:
+ if (am->profile_ix == (MLX5E_PARAMS_AM_NUM_PROFILES - 1))
+ return MLX5E_AM_ON_EDGE;
+ am->profile_ix++;
+ am->steps_right++;
+ break;
+ case MLX5E_AM_GOING_LEFT:
+ if (am->profile_ix == 0)
+ return MLX5E_AM_ON_EDGE;
+ am->profile_ix--;
+ am->steps_left++;
+ break;
+ }
+
+ am->tired++;
+ return MLX5E_AM_STEPPED;
+}
+
+static void mlx5e_am_park_on_top(struct mlx5e_rx_am *am)
+{
+ am->steps_right = 0;
+ am->steps_left = 0;
+ am->tired = 0;
+ am->tune_state = MLX5E_AM_PARKING_ON_TOP;
+}
+
+static void mlx5e_am_park_tired(struct mlx5e_rx_am *am)
+{
+ am->steps_right = 0;
+ am->steps_left = 0;
+ am->tune_state = MLX5E_AM_PARKING_TIRED;
+}
+
+static void mlx5e_am_exit_parking(struct mlx5e_rx_am *am)
+{
+ am->tune_state = am->profile_ix ? MLX5E_AM_GOING_LEFT :
+ MLX5E_AM_GOING_RIGHT;
+ mlx5e_am_step(am);
+}
+
+static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr,
+ struct mlx5e_rx_am_stats *prev)
+{
+ int diff;
+
+ if (!prev->ppms)
+ return curr->ppms ? MLX5E_AM_STATS_BETTER :
+ MLX5E_AM_STATS_SAME;
+
+ diff = curr->ppms - prev->ppms;
+ if (((100 * abs(diff)) / prev->ppms) > 10) /* more than 10% diff */
+ return (diff > 0) ? MLX5E_AM_STATS_BETTER :
+ MLX5E_AM_STATS_WORSE;
+
+ if (!prev->epms)
+ return curr->epms ? MLX5E_AM_STATS_WORSE :
+ MLX5E_AM_STATS_SAME;
+
+ diff = curr->epms - prev->epms;
+ if (((100 * abs(diff)) / prev->epms) > 10) /* more than 10% diff */
+ return (diff < 0) ? MLX5E_AM_STATS_BETTER :
+ MLX5E_AM_STATS_WORSE;
+
+ return MLX5E_AM_STATS_SAME;
+}
+
+static bool mlx5e_am_decision(struct mlx5e_rx_am_stats *curr_stats,
+ struct mlx5e_rx_am *am)
+{
+ int prev_state = am->tune_state;
+ int prev_ix = am->profile_ix;
+ int stats_res;
+ int step_res;
+
+ switch (am->tune_state) {
+ case MLX5E_AM_PARKING_ON_TOP:
+ stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats);
+ if (stats_res != MLX5E_AM_STATS_SAME)
+ mlx5e_am_exit_parking(am);
+ break;
+
+ case MLX5E_AM_PARKING_TIRED:
+ am->tired--;
+ if (!am->tired)
+ mlx5e_am_exit_parking(am);
+ break;
+
+ case MLX5E_AM_GOING_RIGHT:
+ case MLX5E_AM_GOING_LEFT:
+ stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats);
+ if (stats_res != MLX5E_AM_STATS_BETTER)
+ mlx5e_am_turn(am);
+
+ if (mlx5e_am_on_top(am)) {
+ mlx5e_am_park_on_top(am);
+ break;
+ }
+
+ step_res = mlx5e_am_step(am);
+ switch (step_res) {
+ case MLX5E_AM_ON_EDGE:
+ mlx5e_am_park_on_top(am);
+ break;
+ case MLX5E_AM_TOO_TIRED:
+ mlx5e_am_park_tired(am);
+ break;
+ }
+
+ break;
+ }
+
+ if ((prev_state != MLX5E_AM_PARKING_ON_TOP) ||
+ (am->tune_state != MLX5E_AM_PARKING_ON_TOP))
+ am->prev_stats = *curr_stats;
+
+ return am->profile_ix != prev_ix;
+}
+
+static void mlx5e_am_sample(struct mlx5e_rq *rq,
+ struct mlx5e_rx_am_sample *s)
+{
+ s->time = ktime_get();
+ s->pkt_ctr = rq->stats.packets;
+ s->event_ctr = rq->cq.event_ctr;
+}
+
+#define MLX5E_AM_NEVENTS 64
+
+static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
+ struct mlx5e_rx_am_sample *end,
+ struct mlx5e_rx_am_stats *curr_stats)
+{
+ /* u32 holds up to 71 minutes, should be enough */
+ u32 delta_us = ktime_us_delta(end->time, start->time);
+ unsigned int npkts = end->pkt_ctr - start->pkt_ctr;
+
+ if (!delta_us) {
+ WARN_ONCE(true, "mlx5e_am_calc_stats: delta_us=0\n");
+ return;
+ }
+
+ curr_stats->ppms = (npkts * USEC_PER_MSEC) / delta_us;
+ curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us;
+}
+
+void mlx5e_rx_am_work(struct work_struct *work)
+{
+ struct mlx5e_rx_am *am = container_of(work, struct mlx5e_rx_am,
+ work);
+ struct mlx5e_rq *rq = container_of(am, struct mlx5e_rq, am);
+ struct mlx5e_cq_moder cur_profile = profile[am->mode][am->profile_ix];
+
+ mlx5_core_modify_cq_moderation(rq->priv->mdev, &rq->cq.mcq,
+ cur_profile.usec, cur_profile.pkts);
+
+ am->state = MLX5E_AM_START_MEASURE;
+}
+
+void mlx5e_rx_am(struct mlx5e_rq *rq)
+{
+ struct mlx5e_rx_am *am = &rq->am;
+ struct mlx5e_rx_am_sample end_sample;
+ struct mlx5e_rx_am_stats curr_stats;
+ u16 nevents;
+
+ switch (am->state) {
+ case MLX5E_AM_MEASURE_IN_PROGRESS:
+ nevents = rq->cq.event_ctr - am->start_sample.event_ctr;
+ if (nevents < MLX5E_AM_NEVENTS)
+ break;
+ mlx5e_am_sample(rq, &end_sample);
+ mlx5e_am_calc_stats(&am->start_sample, &end_sample,
+ &curr_stats);
+ if (mlx5e_am_decision(&curr_stats, am)) {
+ am->state = MLX5E_AM_APPLY_NEW_PROFILE;
+ schedule_work(&am->work);
+ break;
+ }
+ /* fall through */
+ case MLX5E_AM_START_MEASURE:
+ mlx5e_am_sample(rq, &am->start_sample);
+ am->state = MLX5E_AM_MEASURE_IN_PROGRESS;
+ break;
+ case MLX5E_AM_APPLY_NEW_PROFILE:
+ break;
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 83bc32b25849..7b9d8a989b52 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -42,9 +42,11 @@
be64_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
+#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
+#define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld)
struct counter_desc {
- char name[ETH_GSTRING_LEN];
+ char format[ETH_GSTRING_LEN];
int offset; /* Byte offset */
};
@@ -53,18 +55,18 @@ struct mlx5e_sw_stats {
u64 rx_bytes;
u64 tx_packets;
u64 tx_bytes;
- u64 tso_packets;
- u64 tso_bytes;
- u64 tso_inner_packets;
- u64 tso_inner_bytes;
- u64 lro_packets;
- u64 lro_bytes;
- u64 rx_csum_good;
+ u64 tx_tso_packets;
+ u64 tx_tso_bytes;
+ u64 tx_tso_inner_packets;
+ u64 tx_tso_inner_bytes;
+ u64 rx_lro_packets;
+ u64 rx_lro_bytes;
+ u64 rx_csum_unnecessary;
u64 rx_csum_none;
- u64 rx_csum_sw;
- u64 rx_csum_inner;
- u64 tx_csum_offload;
- u64 tx_csum_inner;
+ u64 rx_csum_complete;
+ u64 rx_csum_unnecessary_inner;
+ u64 tx_csum_partial;
+ u64 tx_csum_partial_inner;
u64 tx_queue_stopped;
u64 tx_queue_wake;
u64 tx_queue_dropped;
@@ -76,7 +78,7 @@ struct mlx5e_sw_stats {
u64 rx_cqe_compress_pkts;
/* Special handling counters */
- u64 link_down_events;
+ u64 link_down_events_phy;
};
static const struct counter_desc sw_stats_desc[] = {
@@ -84,18 +86,18 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_inner_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_inner_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, lro_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, lro_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_good) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_sw) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_inner) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_offload) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_inner) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
@@ -105,7 +107,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
};
struct mlx5e_qcounter_stats {
@@ -125,12 +127,6 @@ struct mlx5e_vport_stats {
};
static const struct counter_desc vport_stats_desc[] = {
- { "rx_vport_error_packets",
- VPORT_COUNTER_OFF(received_errors.packets) },
- { "rx_vport_error_bytes", VPORT_COUNTER_OFF(received_errors.octets) },
- { "tx_vport_error_packets",
- VPORT_COUNTER_OFF(transmit_errors.packets) },
- { "tx_vport_error_bytes", VPORT_COUNTER_OFF(transmit_errors.octets) },
{ "rx_vport_unicast_packets",
VPORT_COUNTER_OFF(received_eth_unicast.packets) },
{ "rx_vport_unicast_bytes",
@@ -155,6 +151,22 @@ static const struct counter_desc vport_stats_desc[] = {
VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
{ "tx_vport_broadcast_bytes",
VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
+ { "rx_vport_rdma_unicast_packets",
+ VPORT_COUNTER_OFF(received_ib_unicast.packets) },
+ { "rx_vport_rdma_unicast_bytes",
+ VPORT_COUNTER_OFF(received_ib_unicast.octets) },
+ { "tx_vport_rdma_unicast_packets",
+ VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
+ { "tx_vport_rdma_unicast_bytes",
+ VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
+ { "rx_vport_rdma_multicast_packets",
+ VPORT_COUNTER_OFF(received_ib_multicast.packets) },
+ { "rx_vport_rdma_multicast_bytes",
+ VPORT_COUNTER_OFF(received_ib_multicast.octets) },
+ { "tx_vport_rdma_multicast_packets",
+ VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
+ { "tx_vport_rdma_multicast_bytes",
+ VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
};
#define PPORT_802_3_OFF(c) \
@@ -192,94 +204,69 @@ struct mlx5e_pport_stats {
};
static const struct counter_desc pport_802_3_stats_desc[] = {
- { "frames_tx", PPORT_802_3_OFF(a_frames_transmitted_ok) },
- { "frames_rx", PPORT_802_3_OFF(a_frames_received_ok) },
- { "check_seq_err", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
- { "alignment_err", PPORT_802_3_OFF(a_alignment_errors) },
- { "octets_tx", PPORT_802_3_OFF(a_octets_transmitted_ok) },
- { "octets_received", PPORT_802_3_OFF(a_octets_received_ok) },
- { "multicast_xmitted", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
- { "broadcast_xmitted", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
- { "multicast_rx", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
- { "broadcast_rx", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
- { "in_range_len_errors", PPORT_802_3_OFF(a_in_range_length_errors) },
- { "out_of_range_len", PPORT_802_3_OFF(a_out_of_range_length_field) },
- { "too_long_errors", PPORT_802_3_OFF(a_frame_too_long_errors) },
- { "symbol_err", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
- { "mac_control_tx", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
- { "mac_control_rx", PPORT_802_3_OFF(a_mac_control_frames_received) },
- { "unsupported_op_rx",
- PPORT_802_3_OFF(a_unsupported_opcodes_received) },
- { "pause_ctrl_rx", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
- { "pause_ctrl_tx",
- PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
+ { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
+ { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
+ { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
+ { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
+ { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
+ { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
+ { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
+ { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
+ { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
+ { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
+ { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
+ { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
+ { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
+ { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
+ { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
+ { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
+ { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
+ { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
};
static const struct counter_desc pport_2863_stats_desc[] = {
- { "in_octets", PPORT_2863_OFF(if_in_octets) },
- { "in_ucast_pkts", PPORT_2863_OFF(if_in_ucast_pkts) },
- { "in_discards", PPORT_2863_OFF(if_in_discards) },
- { "in_errors", PPORT_2863_OFF(if_in_errors) },
- { "in_unknown_protos", PPORT_2863_OFF(if_in_unknown_protos) },
- { "out_octets", PPORT_2863_OFF(if_out_octets) },
- { "out_ucast_pkts", PPORT_2863_OFF(if_out_ucast_pkts) },
- { "out_discards", PPORT_2863_OFF(if_out_discards) },
- { "out_errors", PPORT_2863_OFF(if_out_errors) },
- { "in_multicast_pkts", PPORT_2863_OFF(if_in_multicast_pkts) },
- { "in_broadcast_pkts", PPORT_2863_OFF(if_in_broadcast_pkts) },
- { "out_multicast_pkts", PPORT_2863_OFF(if_out_multicast_pkts) },
- { "out_broadcast_pkts", PPORT_2863_OFF(if_out_broadcast_pkts) },
+ { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
+ { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
+ { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
};
static const struct counter_desc pport_2819_stats_desc[] = {
- { "drop_events", PPORT_2819_OFF(ether_stats_drop_events) },
- { "octets", PPORT_2819_OFF(ether_stats_octets) },
- { "pkts", PPORT_2819_OFF(ether_stats_pkts) },
- { "broadcast_pkts", PPORT_2819_OFF(ether_stats_broadcast_pkts) },
- { "multicast_pkts", PPORT_2819_OFF(ether_stats_multicast_pkts) },
- { "crc_align_errors", PPORT_2819_OFF(ether_stats_crc_align_errors) },
- { "undersize_pkts", PPORT_2819_OFF(ether_stats_undersize_pkts) },
- { "oversize_pkts", PPORT_2819_OFF(ether_stats_oversize_pkts) },
- { "fragments", PPORT_2819_OFF(ether_stats_fragments) },
- { "jabbers", PPORT_2819_OFF(ether_stats_jabbers) },
- { "collisions", PPORT_2819_OFF(ether_stats_collisions) },
- { "p64octets", PPORT_2819_OFF(ether_stats_pkts64octets) },
- { "p65to127octets", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
- { "p128to255octets", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
- { "p256to511octets", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
- { "p512to1023octets", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
- { "p1024to1518octets",
- PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
- { "p1519to2047octets",
- PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
- { "p2048to4095octets",
- PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
- { "p4096to8191octets",
- PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
- { "p8192to10239octets",
- PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
+ { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
+ { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
+ { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
+ { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
+ { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
+ { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
+ { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
+ { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
+ { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
+ { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
+ { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
+ { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
+ { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
};
static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
- { "rx_octets", PPORT_PER_PRIO_OFF(rx_octets) },
- { "rx_frames", PPORT_PER_PRIO_OFF(rx_frames) },
- { "tx_octets", PPORT_PER_PRIO_OFF(tx_octets) },
- { "tx_frames", PPORT_PER_PRIO_OFF(tx_frames) },
+ { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
+ { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
+ { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
+ { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
};
static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
- { "rx_pause", PPORT_PER_PRIO_OFF(rx_pause) },
- { "rx_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
- { "tx_pause", PPORT_PER_PRIO_OFF(tx_pause) },
- { "tx_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
- { "rx_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
+ /* %s is "global" or "prio{i}" */
+ { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
+ { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
+ { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
+ { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
+ { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
};
struct mlx5e_rq_stats {
u64 packets;
u64 bytes;
- u64 csum_sw;
- u64 csum_inner;
+ u64 csum_complete;
+ u64 csum_unnecessary_inner;
u64 csum_none;
u64 lro_packets;
u64 lro_bytes;
@@ -292,19 +279,19 @@ struct mlx5e_rq_stats {
};
static const struct counter_desc rq_stats_desc[] = {
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_sw) },
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_inner) },
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_none) },
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, lro_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, lro_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, wqe_err) },
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, mpwqe_frag) },
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_frag) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
};
struct mlx5e_sq_stats {
@@ -315,28 +302,28 @@ struct mlx5e_sq_stats {
u64 tso_bytes;
u64 tso_inner_packets;
u64 tso_inner_bytes;
- u64 csum_offload_inner;
+ u64 csum_partial_inner;
u64 nop;
/* less likely accessed in data path */
- u64 csum_offload_none;
+ u64 csum_none;
u64 stopped;
u64 wake;
u64 dropped;
};
static const struct counter_desc sq_stats_desc[] = {
- { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, csum_offload_inner) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, nop) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, csum_offload_none) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, stopped) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, wake) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, dropped) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
};
#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 704c3d30493e..3261e8b1286e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -50,7 +50,7 @@ struct mlx5e_tc_flow {
#define MLX5E_TC_TABLE_NUM_GROUPS 4
static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv,
- u32 *match_c, u32 *match_v,
+ struct mlx5_flow_spec *spec,
u32 action, u32 flow_tag)
{
struct mlx5_core_dev *dev = priv->mdev;
@@ -88,8 +88,8 @@ static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv,
table_created = true;
}
- rule = mlx5_add_flow_rule(priv->fs.tc.t, MLX5_MATCH_OUTER_HEADERS,
- match_c, match_v,
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ rule = mlx5_add_flow_rule(priv->fs.tc.t, spec,
action, flow_tag,
&dest);
@@ -126,12 +126,13 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
}
}
-static int parse_cls_flower(struct mlx5e_priv *priv,
- u32 *match_c, u32 *match_v,
+static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f)
{
- void *headers_c = MLX5_ADDR_OF(fte_match_param, match_c, outer_headers);
- void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v, outer_headers);
+ void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers);
+ void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers);
u16 addr_type = 0;
u8 ip_proto = 0;
@@ -342,12 +343,11 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
struct tc_cls_flower_offload *f)
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
- u32 *match_c;
- u32 *match_v;
int err = 0;
u32 flow_tag;
u32 action;
struct mlx5e_tc_flow *flow;
+ struct mlx5_flow_spec *spec;
struct mlx5_flow_rule *old = NULL;
flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
@@ -357,16 +357,15 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
else
flow = kzalloc(sizeof(*flow), GFP_KERNEL);
- match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
- match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
- if (!match_c || !match_v || !flow) {
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec || !flow) {
err = -ENOMEM;
goto err_free;
}
flow->cookie = f->cookie;
- err = parse_cls_flower(priv, match_c, match_v, f);
+ err = parse_cls_flower(priv, spec, f);
if (err < 0)
goto err_free;
@@ -379,8 +378,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
if (err)
goto err_free;
- flow->rule = mlx5e_tc_add_flow(priv, match_c, match_v, action,
- flow_tag);
+ flow->rule = mlx5e_tc_add_flow(priv, spec, action, flow_tag);
if (IS_ERR(flow->rule)) {
err = PTR_ERR(flow->rule);
goto err_hash_del;
@@ -398,8 +396,7 @@ err_free:
if (!old)
kfree(flow);
out:
- kfree(match_c);
- kfree(match_v);
+ kvfree(spec);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 229ab16fb8d3..5a750b9cd006 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -192,12 +192,12 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
if (skb->encapsulation) {
eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
MLX5_ETH_WQE_L4_INNER_CSUM;
- sq->stats.csum_offload_inner++;
+ sq->stats.csum_partial_inner++;
} else {
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
}
} else
- sq->stats.csum_offload_none++;
+ sq->stats.csum_none++;
if (sq->cc != sq->prev_cc) {
sq->prev_cc = sq->cc;
@@ -317,7 +317,8 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
while ((sq->pc & wq->sz_m1) > sq->edge)
mlx5e_send_nop(sq, false);
- sq->bf_budget = bf ? sq->bf_budget - 1 : 0;
+ if (bf)
+ sq->bf_budget--;
sq->stats.packets++;
sq->stats.bytes += num_bytes;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index c38781fa567d..64ae2e800daa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -136,6 +136,10 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
for (i = 0; i < c->num_tc; i++)
mlx5e_cq_arm(&c->sq[i].cq);
+
+ if (test_bit(MLX5E_RQ_STATE_AM, &c->rq.state))
+ mlx5e_rx_am(&c->rq);
+
mlx5e_cq_arm(&c->rq.cq);
mlx5e_cq_arm(&c->icosq.cq);
@@ -146,6 +150,7 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq)
{
struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
+ cq->event_ctr++;
set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags);
napi_schedule(cq->napi);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index b84a6918a700..f6d667797ee1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -40,17 +40,6 @@
#define UPLINK_VPORT 0xFFFF
-#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
-
-#define esw_info(dev, format, ...) \
- pr_info("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
-
-#define esw_warn(dev, format, ...) \
- pr_warn("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
-
-#define esw_debug(dev, format, ...) \
- mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
-
enum {
MLX5_ACTION_NONE = 0,
MLX5_ACTION_ADD = 1,
@@ -92,6 +81,9 @@ enum {
MC_ADDR_CHANGE | \
PROMISC_CHANGE)
+int esw_offloads_init(struct mlx5_eswitch *esw, int nvports);
+void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports);
+
static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
u32 events_mask)
{
@@ -337,25 +329,23 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
MLX5_MATCH_OUTER_HEADERS);
struct mlx5_flow_rule *flow_rule = NULL;
struct mlx5_flow_destination dest;
+ struct mlx5_flow_spec *spec;
void *mv_misc = NULL;
void *mc_misc = NULL;
u8 *dmac_v = NULL;
u8 *dmac_c = NULL;
- u32 *match_v;
- u32 *match_c;
if (rx_rule)
match_header |= MLX5_MATCH_MISC_PARAMETERS;
- match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
- match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
- if (!match_v || !match_c) {
+
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
pr_warn("FDB: Failed to alloc match parameters\n");
- goto out;
+ return NULL;
}
-
- dmac_v = MLX5_ADDR_OF(fte_match_param, match_v,
+ dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dmac_47_16);
- dmac_c = MLX5_ADDR_OF(fte_match_param, match_c,
+ dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers.dmac_47_16);
if (match_header & MLX5_MATCH_OUTER_HEADERS) {
@@ -364,8 +354,10 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
}
if (match_header & MLX5_MATCH_MISC_PARAMETERS) {
- mv_misc = MLX5_ADDR_OF(fte_match_param, match_v, misc_parameters);
- mc_misc = MLX5_ADDR_OF(fte_match_param, match_c, misc_parameters);
+ mv_misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters);
+ mc_misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters);
MLX5_SET(fte_match_set_misc, mv_misc, source_port, UPLINK_VPORT);
MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
}
@@ -376,22 +368,19 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
esw_debug(esw->dev,
"\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
dmac_v, dmac_c, vport);
+ spec->match_criteria_enable = match_header;
flow_rule =
- mlx5_add_flow_rule(esw->fdb_table.fdb,
- match_header,
- match_c,
- match_v,
+ mlx5_add_flow_rule(esw->fdb_table.fdb, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0, &dest);
- if (IS_ERR_OR_NULL(flow_rule)) {
+ if (IS_ERR(flow_rule)) {
pr_warn(
"FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
flow_rule = NULL;
}
-out:
- kfree(match_v);
- kfree(match_c);
+
+ kvfree(spec);
return flow_rule;
}
@@ -428,7 +417,7 @@ esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport)
return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
}
-static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
+static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_core_dev *dev = esw->dev;
@@ -457,7 +446,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0);
- if (IS_ERR_OR_NULL(fdb)) {
+ if (IS_ERR(fdb)) {
err = PTR_ERR(fdb);
esw_warn(dev, "Failed to create FDB Table err %d\n", err);
goto out;
@@ -474,12 +463,12 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
eth_broadcast_addr(dmac);
g = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR_OR_NULL(g)) {
+ if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create flow group err(%d)\n", err);
goto out;
}
- esw->fdb_table.addr_grp = g;
+ esw->fdb_table.legacy.addr_grp = g;
/* Allmulti group : One rule that forwards any mcast traffic */
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
@@ -489,12 +478,12 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
eth_zero_addr(dmac);
dmac[0] = 0x01;
g = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR_OR_NULL(g)) {
+ if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
goto out;
}
- esw->fdb_table.allmulti_grp = g;
+ esw->fdb_table.legacy.allmulti_grp = g;
/* Promiscuous group :
* One rule that forward all unmatched traffic from previous groups
@@ -506,22 +495,22 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
g = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR_OR_NULL(g)) {
+ if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
goto out;
}
- esw->fdb_table.promisc_grp = g;
+ esw->fdb_table.legacy.promisc_grp = g;
out:
if (err) {
- if (!IS_ERR_OR_NULL(esw->fdb_table.allmulti_grp)) {
- mlx5_destroy_flow_group(esw->fdb_table.allmulti_grp);
- esw->fdb_table.allmulti_grp = NULL;
+ if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.allmulti_grp)) {
+ mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
+ esw->fdb_table.legacy.allmulti_grp = NULL;
}
- if (!IS_ERR_OR_NULL(esw->fdb_table.addr_grp)) {
- mlx5_destroy_flow_group(esw->fdb_table.addr_grp);
- esw->fdb_table.addr_grp = NULL;
+ if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.addr_grp)) {
+ mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
+ esw->fdb_table.legacy.addr_grp = NULL;
}
if (!IS_ERR_OR_NULL(esw->fdb_table.fdb)) {
mlx5_destroy_flow_table(esw->fdb_table.fdb);
@@ -529,24 +518,24 @@ out:
}
}
- kfree(flow_group_in);
+ kvfree(flow_group_in);
return err;
}
-static void esw_destroy_fdb_table(struct mlx5_eswitch *esw)
+static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
{
if (!esw->fdb_table.fdb)
return;
esw_debug(esw->dev, "Destroy FDB Table\n");
- mlx5_destroy_flow_group(esw->fdb_table.promisc_grp);
- mlx5_destroy_flow_group(esw->fdb_table.allmulti_grp);
- mlx5_destroy_flow_group(esw->fdb_table.addr_grp);
+ mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
+ mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
+ mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
mlx5_destroy_flow_table(esw->fdb_table.fdb);
esw->fdb_table.fdb = NULL;
- esw->fdb_table.addr_grp = NULL;
- esw->fdb_table.allmulti_grp = NULL;
- esw->fdb_table.promisc_grp = NULL;
+ esw->fdb_table.legacy.addr_grp = NULL;
+ esw->fdb_table.legacy.allmulti_grp = NULL;
+ esw->fdb_table.legacy.promisc_grp = NULL;
}
/* E-Switch vport UC/MC lists management */
@@ -578,7 +567,8 @@ static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
if (err)
goto abort;
- if (esw->fdb_table.fdb) /* SRIOV is enabled: Forward UC MAC to vport */
+ /* SRIOV is enabled: Forward UC MAC to vport */
+ if (esw->fdb_table.fdb && esw->mode == SRIOV_LEGACY)
vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM index:%d fr(%p)\n",
@@ -651,6 +641,7 @@ static void update_allmulti_vports(struct mlx5_eswitch *esw,
esw_fdb_set_vport_rule(esw,
mac,
vport_idx);
+ iter_vaddr->mc_promisc = true;
break;
case MLX5_ACTION_DEL:
if (!iter_vaddr)
@@ -1060,7 +1051,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
return;
acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
- if (IS_ERR_OR_NULL(acl)) {
+ if (IS_ERR(acl)) {
err = PTR_ERR(acl);
esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
vport->vport, err);
@@ -1075,7 +1066,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
- if (IS_ERR_OR_NULL(vlan_grp)) {
+ if (IS_ERR(vlan_grp)) {
err = PTR_ERR(vlan_grp);
esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
vport->vport, err);
@@ -1086,7 +1077,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
drop_grp = mlx5_create_flow_group(acl, flow_group_in);
- if (IS_ERR_OR_NULL(drop_grp)) {
+ if (IS_ERR(drop_grp)) {
err = PTR_ERR(drop_grp);
esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
vport->vport, err);
@@ -1097,7 +1088,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
vport->egress.drop_grp = drop_grp;
vport->egress.allowed_vlans_grp = vlan_grp;
out:
- kfree(flow_group_in);
+ kvfree(flow_group_in);
if (err && !IS_ERR_OR_NULL(vlan_grp))
mlx5_destroy_flow_group(vlan_grp);
if (err && !IS_ERR_OR_NULL(acl))
@@ -1174,7 +1165,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
return;
acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
- if (IS_ERR_OR_NULL(acl)) {
+ if (IS_ERR(acl)) {
err = PTR_ERR(acl);
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
vport->vport, err);
@@ -1192,7 +1183,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
g = mlx5_create_flow_group(acl, flow_group_in);
- if (IS_ERR_OR_NULL(g)) {
+ if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
vport->vport, err);
@@ -1207,7 +1198,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
g = mlx5_create_flow_group(acl, flow_group_in);
- if (IS_ERR_OR_NULL(g)) {
+ if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
vport->vport, err);
@@ -1223,7 +1214,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
g = mlx5_create_flow_group(acl, flow_group_in);
- if (IS_ERR_OR_NULL(g)) {
+ if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
vport->vport, err);
@@ -1236,7 +1227,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
g = mlx5_create_flow_group(acl, flow_group_in);
- if (IS_ERR_OR_NULL(g)) {
+ if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
vport->vport, err);
@@ -1259,7 +1250,7 @@ out:
mlx5_destroy_flow_table(vport->ingress.acl);
}
- kfree(flow_group_in);
+ kvfree(flow_group_in);
}
static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
@@ -1299,9 +1290,8 @@ static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
+ struct mlx5_flow_spec *spec;
u8 smac[ETH_ALEN];
- u32 *match_v;
- u32 *match_c;
int err = 0;
u8 *smac_v;
@@ -1335,9 +1325,8 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
"vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
vport->vport, vport->vlan, vport->qos);
- match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
- match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
- if (!match_v || !match_c) {
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
err = -ENOMEM;
esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n",
vport->vport, err);
@@ -1345,25 +1334,23 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
}
if (vport->vlan || vport->qos)
- MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
if (vport->spoofchk) {
- MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.smac_47_16);
- MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.smac_15_0);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
smac_v = MLX5_ADDR_OF(fte_match_param,
- match_v,
+ spec->match_value,
outer_headers.smac_47_16);
ether_addr_copy(smac_v, smac);
}
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
vport->ingress.allow_rule =
- mlx5_add_flow_rule(vport->ingress.acl,
- MLX5_MATCH_OUTER_HEADERS,
- match_c,
- match_v,
+ mlx5_add_flow_rule(vport->ingress.acl, spec,
MLX5_FLOW_CONTEXT_ACTION_ALLOW,
0, NULL);
- if (IS_ERR_OR_NULL(vport->ingress.allow_rule)) {
+ if (IS_ERR(vport->ingress.allow_rule)) {
err = PTR_ERR(vport->ingress.allow_rule);
pr_warn("vport[%d] configure ingress allow rule, err(%d)\n",
vport->vport, err);
@@ -1371,16 +1358,12 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
goto out;
}
- memset(match_c, 0, MLX5_ST_SZ_BYTES(fte_match_param));
- memset(match_v, 0, MLX5_ST_SZ_BYTES(fte_match_param));
+ memset(spec, 0, sizeof(*spec));
vport->ingress.drop_rule =
- mlx5_add_flow_rule(vport->ingress.acl,
- 0,
- match_c,
- match_v,
+ mlx5_add_flow_rule(vport->ingress.acl, spec,
MLX5_FLOW_CONTEXT_ACTION_DROP,
0, NULL);
- if (IS_ERR_OR_NULL(vport->ingress.drop_rule)) {
+ if (IS_ERR(vport->ingress.drop_rule)) {
err = PTR_ERR(vport->ingress.drop_rule);
pr_warn("vport[%d] configure ingress drop rule, err(%d)\n",
vport->vport, err);
@@ -1391,17 +1374,14 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
out:
if (err)
esw_vport_cleanup_ingress_rules(esw, vport);
-
- kfree(match_v);
- kfree(match_c);
+ kvfree(spec);
return err;
}
static int esw_vport_egress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
- u32 *match_v;
- u32 *match_c;
+ struct mlx5_flow_spec *spec;
int err = 0;
esw_vport_cleanup_egress_rules(esw, vport);
@@ -1417,9 +1397,8 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
"vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
vport->vport, vport->vlan, vport->qos);
- match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
- match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
- if (!match_v || !match_c) {
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
err = -ENOMEM;
esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n",
vport->vport, err);
@@ -1427,19 +1406,17 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
}
/* Allowed vlan rule */
- MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.vlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, match_v, outer_headers.vlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.first_vid);
- MLX5_SET(fte_match_param, match_v, outer_headers.first_vid, vport->vlan);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->vlan);
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
vport->egress.allowed_vlan =
- mlx5_add_flow_rule(vport->egress.acl,
- MLX5_MATCH_OUTER_HEADERS,
- match_c,
- match_v,
+ mlx5_add_flow_rule(vport->egress.acl, spec,
MLX5_FLOW_CONTEXT_ACTION_ALLOW,
0, NULL);
- if (IS_ERR_OR_NULL(vport->egress.allowed_vlan)) {
+ if (IS_ERR(vport->egress.allowed_vlan)) {
err = PTR_ERR(vport->egress.allowed_vlan);
pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
vport->vport, err);
@@ -1448,24 +1425,19 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
}
/* Drop others rule (star rule) */
- memset(match_c, 0, MLX5_ST_SZ_BYTES(fte_match_param));
- memset(match_v, 0, MLX5_ST_SZ_BYTES(fte_match_param));
+ memset(spec, 0, sizeof(*spec));
vport->egress.drop_rule =
- mlx5_add_flow_rule(vport->egress.acl,
- 0,
- match_c,
- match_v,
+ mlx5_add_flow_rule(vport->egress.acl, spec,
MLX5_FLOW_CONTEXT_ACTION_DROP,
0, NULL);
- if (IS_ERR_OR_NULL(vport->egress.drop_rule)) {
+ if (IS_ERR(vport->egress.drop_rule)) {
err = PTR_ERR(vport->egress.drop_rule);
pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n",
vport->vport, err);
vport->egress.drop_rule = NULL;
}
out:
- kfree(match_v);
- kfree(match_c);
+ kvfree(spec);
return err;
}
@@ -1491,14 +1463,11 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
/* Sync with current vport context */
vport->enabled_events = enable_events;
- esw_vport_change_handle_locked(vport);
-
vport->enabled = true;
/* only PF is trusted by default */
vport->trusted = (vport_num) ? false : true;
-
- arm_vport_context_events_cmd(esw->dev, vport_num, enable_events);
+ esw_vport_change_handle_locked(vport);
esw->enabled_vports++;
esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
@@ -1542,10 +1511,10 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
}
/* Public E-Switch API */
-int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs)
+int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
{
int err;
- int i;
+ int i, enabled_events;
if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
@@ -1563,16 +1532,20 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs)
if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n");
- esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d)\n", nvfs);
-
+ esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode);
+ esw->mode = mode;
esw_disable_vport(esw, 0);
- err = esw_create_fdb_table(esw, nvfs + 1);
+ if (mode == SRIOV_LEGACY)
+ err = esw_create_legacy_fdb_table(esw, nvfs + 1);
+ else
+ err = esw_offloads_init(esw, nvfs + 1);
if (err)
goto abort;
+ enabled_events = (mode == SRIOV_LEGACY) ? SRIOV_VPORT_EVENTS : UC_ADDR_CHANGE;
for (i = 0; i <= nvfs; i++)
- esw_enable_vport(esw, i, SRIOV_VPORT_EVENTS);
+ esw_enable_vport(esw, i, enabled_events);
esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n",
esw->enabled_vports);
@@ -1586,16 +1559,18 @@ abort:
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
{
struct esw_mc_addr *mc_promisc;
+ int nvports;
int i;
if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
return;
- esw_info(esw->dev, "disable SRIOV: active vports(%d)\n",
- esw->enabled_vports);
+ esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n",
+ esw->enabled_vports, esw->mode);
mc_promisc = esw->mc_promisc;
+ nvports = esw->enabled_vports;
for (i = 0; i < esw->total_vports; i++)
esw_disable_vport(esw, i);
@@ -1603,8 +1578,12 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
if (mc_promisc && mc_promisc->uplink_rule)
mlx5_del_flow_rule(mc_promisc->uplink_rule);
- esw_destroy_fdb_table(esw);
+ if (esw->mode == SRIOV_LEGACY)
+ esw_destroy_legacy_fdb_table(esw);
+ else if (esw->mode == SRIOV_OFFLOADS)
+ esw_offloads_cleanup(esw, nvports);
+ esw->mode = SRIOV_NONE;
/* VPORT 0 (PF) must be enabled back with non-sriov configuration */
esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
}
@@ -1662,6 +1641,14 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
goto abort;
}
+ esw->offloads.vport_reps =
+ kzalloc(total_vports * sizeof(struct mlx5_eswitch_rep),
+ GFP_KERNEL);
+ if (!esw->offloads.vport_reps) {
+ err = -ENOMEM;
+ goto abort;
+ }
+
mutex_init(&esw->state_lock);
for (vport_num = 0; vport_num < total_vports; vport_num++) {
@@ -1675,6 +1662,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
esw->total_vports = total_vports;
esw->enabled_vports = 0;
+ esw->mode = SRIOV_NONE;
dev->priv.eswitch = esw;
esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
@@ -1685,6 +1673,7 @@ abort:
destroy_workqueue(esw->work_queue);
kfree(esw->l2_table.bitmap);
kfree(esw->vports);
+ kfree(esw->offloads.vport_reps);
kfree(esw);
return err;
}
@@ -1702,6 +1691,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
destroy_workqueue(esw->work_queue);
kfree(esw->l2_table.bitmap);
kfree(esw->mc_promisc);
+ kfree(esw->offloads.vport_reps);
kfree(esw->vports);
kfree(esw);
}
@@ -1728,11 +1718,24 @@ void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
(esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev))
#define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
+static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
+{
+ ((u8 *)node_guid)[7] = mac[0];
+ ((u8 *)node_guid)[6] = mac[1];
+ ((u8 *)node_guid)[5] = mac[2];
+ ((u8 *)node_guid)[4] = 0xff;
+ ((u8 *)node_guid)[3] = 0xfe;
+ ((u8 *)node_guid)[2] = mac[3];
+ ((u8 *)node_guid)[1] = mac[4];
+ ((u8 *)node_guid)[0] = mac[5];
+}
+
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
int vport, u8 mac[ETH_ALEN])
{
- int err = 0;
struct mlx5_vport *evport;
+ u64 node_guid;
+ int err = 0;
if (!ESW_ALLOWED(esw))
return -EPERM;
@@ -1756,11 +1759,17 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
return err;
}
+ node_guid_gen_from_mac(&node_guid, mac);
+ err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid);
+ if (err)
+ mlx5_core_warn(esw->dev,
+ "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
+ vport, err);
+
mutex_lock(&esw->state_lock);
if (evport->enabled)
err = esw_vport_ingress_config(esw, evport);
mutex_unlock(&esw->state_lock);
-
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index fd6800256d4a..7b45e6a6efb8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -35,6 +35,7 @@
#include <linux/if_ether.h>
#include <linux/if_link.h>
+#include <net/devlink.h>
#include <linux/mlx5/device.h>
#define MLX5_MAX_UC_PER_VPORT(dev) \
@@ -46,6 +47,8 @@
#define MLX5_L2_ADDR_HASH_SIZE (BIT(BITS_PER_BYTE))
#define MLX5_L2_ADDR_HASH(addr) (addr[5])
+#define FDB_UPLINK_VPORT 0xffff
+
/* L2 -mac address based- hash helpers */
struct l2addr_node {
struct hlist_node hlist;
@@ -134,9 +137,48 @@ struct mlx5_l2_table {
struct mlx5_eswitch_fdb {
void *fdb;
- struct mlx5_flow_group *addr_grp;
- struct mlx5_flow_group *allmulti_grp;
- struct mlx5_flow_group *promisc_grp;
+ union {
+ struct legacy_fdb {
+ struct mlx5_flow_group *addr_grp;
+ struct mlx5_flow_group *allmulti_grp;
+ struct mlx5_flow_group *promisc_grp;
+ } legacy;
+
+ struct offloads_fdb {
+ struct mlx5_flow_group *send_to_vport_grp;
+ struct mlx5_flow_group *miss_grp;
+ struct mlx5_flow_rule *miss_rule;
+ } offloads;
+ };
+};
+
+enum {
+ SRIOV_NONE,
+ SRIOV_LEGACY,
+ SRIOV_OFFLOADS
+};
+
+struct mlx5_esw_sq {
+ struct mlx5_flow_rule *send_to_vport_rule;
+ struct list_head list;
+};
+
+struct mlx5_eswitch_rep {
+ int (*load)(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep);
+ void (*unload)(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep);
+ u16 vport;
+ struct mlx5_flow_rule *vport_rx_rule;
+ void *priv_data;
+ struct list_head vport_sqs_list;
+ bool valid;
+};
+
+struct mlx5_esw_offload {
+ struct mlx5_flow_table *ft_offloads;
+ struct mlx5_flow_group *vport_rx_group;
+ struct mlx5_eswitch_rep *vport_reps;
};
struct mlx5_eswitch {
@@ -153,13 +195,15 @@ struct mlx5_eswitch {
*/
struct mutex state_lock;
struct esw_mc_addr *mc_promisc;
+ struct mlx5_esw_offload offloads;
+ int mode;
};
/* E-Switch API */
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe);
-int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs);
+int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode);
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw);
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
int vport, u8 mac[ETH_ALEN]);
@@ -177,4 +221,30 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
int vport,
struct ifla_vf_stats *vf_stats);
+struct mlx5_flow_rule *
+mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn);
+
+int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep,
+ u16 *sqns_array, int sqns_num);
+void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep);
+
+int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode);
+int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
+void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep);
+void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
+ int vport);
+
+#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
+
+#define esw_info(dev, format, ...) \
+ pr_info("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
+
+#define esw_warn(dev, format, ...) \
+ pr_warn("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
+
+#define esw_debug(dev, format, ...) \
+ mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
#endif /* __MLX5_ESWITCH_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
new file mode 100644
index 000000000000..1842dfb4636b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -0,0 +1,561 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/mlx5_ifc.h>
+#include <linux/mlx5/vport.h>
+#include <linux/mlx5/fs.h>
+#include "mlx5_core.h"
+#include "eswitch.h"
+
+static struct mlx5_flow_rule *
+mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
+{
+ struct mlx5_flow_destination dest;
+ struct mlx5_flow_rule *flow_rule;
+ struct mlx5_flow_spec *spec;
+ void *misc;
+
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
+ esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
+ flow_rule = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
+ MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
+ MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest.vport_num = vport;
+
+ flow_rule = mlx5_add_flow_rule(esw->fdb_table.fdb, spec,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ 0, &dest);
+ if (IS_ERR(flow_rule))
+ esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
+out:
+ kvfree(spec);
+ return flow_rule;
+}
+
+void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5_esw_sq *esw_sq, *tmp;
+
+ if (esw->mode != SRIOV_OFFLOADS)
+ return;
+
+ list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
+ mlx5_del_flow_rule(esw_sq->send_to_vport_rule);
+ list_del(&esw_sq->list);
+ kfree(esw_sq);
+ }
+}
+
+int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep,
+ u16 *sqns_array, int sqns_num)
+{
+ struct mlx5_flow_rule *flow_rule;
+ struct mlx5_esw_sq *esw_sq;
+ int vport;
+ int err;
+ int i;
+
+ if (esw->mode != SRIOV_OFFLOADS)
+ return 0;
+
+ vport = rep->vport == 0 ?
+ FDB_UPLINK_VPORT : rep->vport;
+
+ for (i = 0; i < sqns_num; i++) {
+ esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
+ if (!esw_sq) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ /* Add re-inject rule to the PF/representor sqs */
+ flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
+ vport,
+ sqns_array[i]);
+ if (IS_ERR(flow_rule)) {
+ err = PTR_ERR(flow_rule);
+ kfree(esw_sq);
+ goto out_err;
+ }
+ esw_sq->send_to_vport_rule = flow_rule;
+ list_add(&esw_sq->list, &rep->vport_sqs_list);
+ }
+ return 0;
+
+out_err:
+ mlx5_eswitch_sqs2vport_stop(esw, rep);
+ return err;
+}
+
+static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
+{
+ struct mlx5_flow_destination dest;
+ struct mlx5_flow_rule *flow_rule = NULL;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
+ esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest.vport_num = 0;
+
+ flow_rule = mlx5_add_flow_rule(esw->fdb_table.fdb, spec,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ 0, &dest);
+ if (IS_ERR(flow_rule)) {
+ err = PTR_ERR(flow_rule);
+ esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err);
+ goto out;
+ }
+
+ esw->fdb_table.offloads.miss_rule = flow_rule;
+out:
+ kvfree(spec);
+ return err;
+}
+
+#define MAX_PF_SQ 256
+
+static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_namespace *root_ns;
+ struct mlx5_flow_table *fdb = NULL;
+ struct mlx5_flow_group *g;
+ u32 *flow_group_in;
+ void *match_criteria;
+ int table_size, ix, err = 0;
+
+ flow_group_in = mlx5_vzalloc(inlen);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+ if (!root_ns) {
+ esw_warn(dev, "Failed to get FDB flow namespace\n");
+ goto ns_err;
+ }
+
+ esw_debug(dev, "Create offloads FDB table, log_max_size(%d)\n",
+ MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
+
+ table_size = nvports + MAX_PF_SQ + 1;
+ fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0);
+ if (IS_ERR(fdb)) {
+ err = PTR_ERR(fdb);
+ esw_warn(dev, "Failed to create FDB Table err %d\n", err);
+ goto fdb_err;
+ }
+ esw->fdb_table.fdb = fdb;
+
+ /* create send-to-vport group */
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS);
+
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
+
+ ix = nvports + MAX_PF_SQ;
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
+
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
+ goto send_vport_err;
+ }
+ esw->fdb_table.offloads.send_to_vport_grp = g;
+
+ /* create miss group */
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 0);
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 1);
+
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
+ goto miss_err;
+ }
+ esw->fdb_table.offloads.miss_grp = g;
+
+ err = esw_add_fdb_miss_rule(esw);
+ if (err)
+ goto miss_rule_err;
+
+ return 0;
+
+miss_rule_err:
+ mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
+miss_err:
+ mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
+send_vport_err:
+ mlx5_destroy_flow_table(fdb);
+fdb_err:
+ns_err:
+ kvfree(flow_group_in);
+ return err;
+}
+
+static void esw_destroy_offloads_fdb_table(struct mlx5_eswitch *esw)
+{
+ if (!esw->fdb_table.fdb)
+ return;
+
+ esw_debug(esw->dev, "Destroy offloads FDB Table\n");
+ mlx5_del_flow_rule(esw->fdb_table.offloads.miss_rule);
+ mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
+ mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
+
+ mlx5_destroy_flow_table(esw->fdb_table.fdb);
+}
+
+static int esw_create_offloads_table(struct mlx5_eswitch *esw)
+{
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table *ft_offloads;
+ struct mlx5_core_dev *dev = esw->dev;
+ int err = 0;
+
+ ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
+ if (!ns) {
+ esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
+ return -ENOMEM;
+ }
+
+ ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0);
+ if (IS_ERR(ft_offloads)) {
+ err = PTR_ERR(ft_offloads);
+ esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
+ return err;
+ }
+
+ esw->offloads.ft_offloads = ft_offloads;
+ return 0;
+}
+
+static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
+{
+ struct mlx5_esw_offload *offloads = &esw->offloads;
+
+ mlx5_destroy_flow_table(offloads->ft_offloads);
+}
+
+static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *g;
+ struct mlx5_priv *priv = &esw->dev->priv;
+ u32 *flow_group_in;
+ void *match_criteria, *misc;
+ int err = 0;
+ int nvports = priv->sriov.num_vfs + 2;
+
+ flow_group_in = mlx5_vzalloc(inlen);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ /* create vport rx group */
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS);
+
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+ misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
+
+ g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
+
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
+ goto out;
+ }
+
+ esw->offloads.vport_rx_group = g;
+out:
+ kfree(flow_group_in);
+ return err;
+}
+
+static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
+{
+ mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
+}
+
+struct mlx5_flow_rule *
+mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
+{
+ struct mlx5_flow_destination dest;
+ struct mlx5_flow_rule *flow_rule;
+ struct mlx5_flow_spec *spec;
+ void *misc;
+
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
+ esw_warn(esw->dev, "Failed to alloc match parameters\n");
+ flow_rule = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
+ MLX5_SET(fte_match_set_misc, misc, source_port, vport);
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dest.tir_num = tirn;
+
+ flow_rule = mlx5_add_flow_rule(esw->offloads.ft_offloads, spec,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ 0, &dest);
+ if (IS_ERR(flow_rule)) {
+ esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
+ goto out;
+ }
+
+out:
+ kvfree(spec);
+ return flow_rule;
+}
+
+static int esw_offloads_start(struct mlx5_eswitch *esw)
+{
+ int err, num_vfs = esw->dev->priv.sriov.num_vfs;
+
+ if (esw->mode != SRIOV_LEGACY) {
+ esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
+ return -EINVAL;
+ }
+
+ mlx5_eswitch_disable_sriov(esw);
+ err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
+ if (err)
+ esw_warn(esw->dev, "Failed set eswitch to offloads, err %d\n", err);
+ return err;
+}
+
+int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
+{
+ struct mlx5_eswitch_rep *rep;
+ int vport;
+ int err;
+
+ err = esw_create_offloads_fdb_table(esw, nvports);
+ if (err)
+ return err;
+
+ err = esw_create_offloads_table(esw);
+ if (err)
+ goto create_ft_err;
+
+ err = esw_create_vport_rx_group(esw);
+ if (err)
+ goto create_fg_err;
+
+ for (vport = 0; vport < nvports; vport++) {
+ rep = &esw->offloads.vport_reps[vport];
+ if (!rep->valid)
+ continue;
+
+ err = rep->load(esw, rep);
+ if (err)
+ goto err_reps;
+ }
+ return 0;
+
+err_reps:
+ for (vport--; vport >= 0; vport--) {
+ rep = &esw->offloads.vport_reps[vport];
+ if (!rep->valid)
+ continue;
+ rep->unload(esw, rep);
+ }
+ esw_destroy_vport_rx_group(esw);
+
+create_fg_err:
+ esw_destroy_offloads_table(esw);
+
+create_ft_err:
+ esw_destroy_offloads_fdb_table(esw);
+ return err;
+}
+
+static int esw_offloads_stop(struct mlx5_eswitch *esw)
+{
+ int err, num_vfs = esw->dev->priv.sriov.num_vfs;
+
+ mlx5_eswitch_disable_sriov(esw);
+ err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
+ if (err)
+ esw_warn(esw->dev, "Failed set eswitch legacy mode. err %d\n", err);
+
+ return err;
+}
+
+void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
+{
+ struct mlx5_eswitch_rep *rep;
+ int vport;
+
+ for (vport = 0; vport < nvports; vport++) {
+ rep = &esw->offloads.vport_reps[vport];
+ if (!rep->valid)
+ continue;
+ rep->unload(esw, rep);
+ }
+
+ esw_destroy_vport_rx_group(esw);
+ esw_destroy_offloads_table(esw);
+ esw_destroy_offloads_fdb_table(esw);
+}
+
+static int mlx5_esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
+{
+ switch (mode) {
+ case DEVLINK_ESWITCH_MODE_LEGACY:
+ *mlx5_mode = SRIOV_LEGACY;
+ break;
+ case DEVLINK_ESWITCH_MODE_SWITCHDEV:
+ *mlx5_mode = SRIOV_OFFLOADS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
+{
+ struct mlx5_core_dev *dev;
+ u16 cur_mlx5_mode, mlx5_mode = 0;
+
+ dev = devlink_priv(devlink);
+
+ if (!MLX5_CAP_GEN(dev, vport_group_manager))
+ return -EOPNOTSUPP;
+
+ cur_mlx5_mode = dev->priv.eswitch->mode;
+
+ if (cur_mlx5_mode == SRIOV_NONE)
+ return -EOPNOTSUPP;
+
+ if (mlx5_esw_mode_from_devlink(mode, &mlx5_mode))
+ return -EINVAL;
+
+ if (cur_mlx5_mode == mlx5_mode)
+ return 0;
+
+ if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
+ return esw_offloads_start(dev->priv.eswitch);
+ else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
+ return esw_offloads_stop(dev->priv.eswitch);
+ else
+ return -EINVAL;
+}
+
+int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
+{
+ struct mlx5_core_dev *dev;
+
+ dev = devlink_priv(devlink);
+
+ if (!MLX5_CAP_GEN(dev, vport_group_manager))
+ return -EOPNOTSUPP;
+
+ if (dev->priv.eswitch->mode == SRIOV_NONE)
+ return -EOPNOTSUPP;
+
+ *mode = dev->priv.eswitch->mode;
+
+ return 0;
+}
+
+void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5_esw_offload *offloads = &esw->offloads;
+
+ memcpy(&offloads->vport_reps[rep->vport], rep,
+ sizeof(struct mlx5_eswitch_rep));
+
+ INIT_LIST_HEAD(&offloads->vport_reps[rep->vport].vport_sqs_list);
+ offloads->vport_reps[rep->vport].valid = true;
+}
+
+void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
+ int vport)
+{
+ struct mlx5_esw_offload *offloads = &esw->offloads;
+ struct mlx5_eswitch_rep *rep;
+
+ rep = &offloads->vport_reps[vport];
+
+ if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport].enabled)
+ rep->unload(esw, rep);
+
+ offloads->vport_reps[vport].valid = false;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 8b5f0b2c0d5c..b0a130479085 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -67,13 +67,21 @@
#define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
.caps = (long[]) {__VA_ARGS__} }
+#define FS_CHAINING_CAPS FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \
+ FS_CAP(flow_table_properties_nic_receive.modify_root), \
+ FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
+ FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
+
#define LEFTOVERS_NUM_LEVELS 1
#define LEFTOVERS_NUM_PRIOS 1
#define BY_PASS_PRIO_NUM_LEVELS 1
-#define BY_PASS_MIN_LEVEL (KERNEL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
+#define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
LEFTOVERS_NUM_PRIOS)
+#define ETHTOOL_PRIO_NUM_LEVELS 1
+#define ETHTOOL_NUM_PRIOS 10
+#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
/* Vlan, mac, ttc, aRFS */
#define KERNEL_NIC_PRIO_NUM_LEVELS 4
#define KERNEL_NIC_NUM_PRIOS 1
@@ -83,6 +91,11 @@
#define ANCHOR_NUM_LEVELS 1
#define ANCHOR_NUM_PRIOS 1
#define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
+
+#define OFFLOADS_MAX_FT 1
+#define OFFLOADS_NUM_PRIOS 1
+#define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + 1)
+
struct node_caps {
size_t arr_sz;
long *caps;
@@ -98,24 +111,24 @@ static struct init_tree_node {
int num_levels;
} root_fs = {
.type = FS_TYPE_NAMESPACE,
- .ar_size = 4,
+ .ar_size = 6,
.children = (struct init_tree_node[]) {
ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
- FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en),
- FS_CAP(flow_table_properties_nic_receive.modify_root),
- FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode),
- FS_CAP(flow_table_properties_nic_receive.flow_table_modify)),
+ FS_CHAINING_CAPS,
ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
BY_PASS_PRIO_NUM_LEVELS))),
+ ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {},
+ ADD_NS(ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, OFFLOADS_MAX_FT))),
+ ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS,
+ ADD_NS(ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
+ ETHTOOL_PRIO_NUM_LEVELS))),
ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
ADD_NS(ADD_MULTIPLE_PRIO(1, 1),
ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
KERNEL_NIC_PRIO_NUM_LEVELS))),
ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
- FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en),
- FS_CAP(flow_table_properties_nic_receive.modify_root),
- FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode),
- FS_CAP(flow_table_properties_nic_receive.flow_table_modify)),
+ FS_CHAINING_CAPS,
ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_NUM_LEVELS))),
ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_NUM_LEVELS))),
@@ -1152,9 +1165,7 @@ static bool dest_is_valid(struct mlx5_flow_destination *dest,
static struct mlx5_flow_rule *
_mlx5_add_flow_rule(struct mlx5_flow_table *ft,
- u8 match_criteria_enable,
- u32 *match_criteria,
- u32 *match_value,
+ struct mlx5_flow_spec *spec,
u32 action,
u32 flow_tag,
struct mlx5_flow_destination *dest)
@@ -1168,22 +1179,23 @@ _mlx5_add_flow_rule(struct mlx5_flow_table *ft,
nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT);
fs_for_each_fg(g, ft)
if (compare_match_criteria(g->mask.match_criteria_enable,
- match_criteria_enable,
+ spec->match_criteria_enable,
g->mask.match_criteria,
- match_criteria)) {
- rule = add_rule_fg(g, match_value,
+ spec->match_criteria)) {
+ rule = add_rule_fg(g, spec->match_value,
action, flow_tag, dest);
if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC)
goto unlock;
}
- g = create_autogroup(ft, match_criteria_enable, match_criteria);
+ g = create_autogroup(ft, spec->match_criteria_enable,
+ spec->match_criteria);
if (IS_ERR(g)) {
rule = (void *)g;
goto unlock;
}
- rule = add_rule_fg(g, match_value,
+ rule = add_rule_fg(g, spec->match_value,
action, flow_tag, dest);
if (IS_ERR(rule)) {
/* Remove assumes refcount > 0 and autogroup creates a group
@@ -1207,9 +1219,7 @@ static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
struct mlx5_flow_rule *
mlx5_add_flow_rule(struct mlx5_flow_table *ft,
- u8 match_criteria_enable,
- u32 *match_criteria,
- u32 *match_value,
+ struct mlx5_flow_spec *spec,
u32 action,
u32 flow_tag,
struct mlx5_flow_destination *dest)
@@ -1240,8 +1250,7 @@ mlx5_add_flow_rule(struct mlx5_flow_table *ft,
}
}
- rule = _mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria,
- match_value, action, flow_tag, dest);
+ rule = _mlx5_add_flow_rule(ft, spec, action, flow_tag, dest);
if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
if (!IS_ERR_OR_NULL(rule) &&
@@ -1292,8 +1301,8 @@ static int update_root_ft_destroy(struct mlx5_flow_table *ft)
ft->id);
return err;
}
- root->root_ft = new_root_ft;
}
+ root->root_ft = new_root_ft;
return 0;
}
@@ -1359,40 +1368,47 @@ void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type type)
{
- struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns;
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ struct mlx5_flow_root_namespace *root_ns;
int prio;
struct fs_prio *fs_prio;
struct mlx5_flow_namespace *ns;
- if (!root_ns)
+ if (!steering)
return NULL;
switch (type) {
case MLX5_FLOW_NAMESPACE_BYPASS:
+ case MLX5_FLOW_NAMESPACE_OFFLOADS:
+ case MLX5_FLOW_NAMESPACE_ETHTOOL:
case MLX5_FLOW_NAMESPACE_KERNEL:
case MLX5_FLOW_NAMESPACE_LEFTOVERS:
case MLX5_FLOW_NAMESPACE_ANCHOR:
prio = type;
break;
case MLX5_FLOW_NAMESPACE_FDB:
- if (dev->priv.fdb_root_ns)
- return &dev->priv.fdb_root_ns->ns;
+ if (steering->fdb_root_ns)
+ return &steering->fdb_root_ns->ns;
else
return NULL;
case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
- if (dev->priv.esw_egress_root_ns)
- return &dev->priv.esw_egress_root_ns->ns;
+ if (steering->esw_egress_root_ns)
+ return &steering->esw_egress_root_ns->ns;
else
return NULL;
case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
- if (dev->priv.esw_ingress_root_ns)
- return &dev->priv.esw_ingress_root_ns->ns;
+ if (steering->esw_ingress_root_ns)
+ return &steering->esw_ingress_root_ns->ns;
else
return NULL;
default:
return NULL;
}
+ root_ns = steering->root_ns;
+ if (!root_ns)
+ return NULL;
+
fs_prio = find_prio(&root_ns->ns, prio);
if (!fs_prio)
return NULL;
@@ -1478,13 +1494,13 @@ static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
return true;
}
-static int init_root_tree_recursive(struct mlx5_core_dev *dev,
+static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
struct init_tree_node *init_node,
struct fs_node *fs_parent_node,
struct init_tree_node *init_parent_node,
int prio)
{
- int max_ft_level = MLX5_CAP_FLOWTABLE(dev,
+ int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev,
flow_table_properties_nic_receive.
max_ft_level);
struct mlx5_flow_namespace *fs_ns;
@@ -1495,7 +1511,7 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev,
if (init_node->type == FS_TYPE_PRIO) {
if ((init_node->min_ft_level > max_ft_level) ||
- !has_required_caps(dev, &init_node->caps))
+ !has_required_caps(steering->dev, &init_node->caps))
return 0;
fs_get_obj(fs_ns, fs_parent_node);
@@ -1516,7 +1532,7 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev,
}
prio = 0;
for (i = 0; i < init_node->ar_size; i++) {
- err = init_root_tree_recursive(dev, &init_node->children[i],
+ err = init_root_tree_recursive(steering, &init_node->children[i],
base, init_node, prio);
if (err)
return err;
@@ -1529,7 +1545,7 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev,
return 0;
}
-static int init_root_tree(struct mlx5_core_dev *dev,
+static int init_root_tree(struct mlx5_flow_steering *steering,
struct init_tree_node *init_node,
struct fs_node *fs_parent_node)
{
@@ -1539,7 +1555,7 @@ static int init_root_tree(struct mlx5_core_dev *dev,
fs_get_obj(fs_ns, fs_parent_node);
for (i = 0; i < init_node->ar_size; i++) {
- err = init_root_tree_recursive(dev, &init_node->children[i],
+ err = init_root_tree_recursive(steering, &init_node->children[i],
&fs_ns->node,
init_node, i);
if (err)
@@ -1548,7 +1564,7 @@ static int init_root_tree(struct mlx5_core_dev *dev,
return 0;
}
-static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_core_dev *dev,
+static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_flow_steering *steering,
enum fs_flow_table_type
table_type)
{
@@ -1560,7 +1576,7 @@ static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_core_dev *dev
if (!root_ns)
return NULL;
- root_ns->dev = dev;
+ root_ns->dev = steering->dev;
root_ns->table_type = table_type;
ns = &root_ns->ns;
@@ -1615,209 +1631,126 @@ static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
#define ANCHOR_PRIO 0
#define ANCHOR_SIZE 1
#define ANCHOR_LEVEL 0
-static int create_anchor_flow_table(struct mlx5_core_dev
- *dev)
+static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
{
struct mlx5_flow_namespace *ns = NULL;
struct mlx5_flow_table *ft;
- ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ANCHOR);
+ ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
if (!ns)
return -EINVAL;
ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL);
if (IS_ERR(ft)) {
- mlx5_core_err(dev, "Failed to create last anchor flow table");
+ mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
return PTR_ERR(ft);
}
return 0;
}
-static int init_root_ns(struct mlx5_core_dev *dev)
+static int init_root_ns(struct mlx5_flow_steering *steering)
{
- dev->priv.root_ns = create_root_ns(dev, FS_FT_NIC_RX);
- if (IS_ERR_OR_NULL(dev->priv.root_ns))
+ steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
+ if (IS_ERR_OR_NULL(steering->root_ns))
goto cleanup;
- if (init_root_tree(dev, &root_fs, &dev->priv.root_ns->ns.node))
+ if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node))
goto cleanup;
- set_prio_attrs(dev->priv.root_ns);
+ set_prio_attrs(steering->root_ns);
- if (create_anchor_flow_table(dev))
+ if (create_anchor_flow_table(steering))
goto cleanup;
return 0;
cleanup:
- mlx5_cleanup_fs(dev);
+ mlx5_cleanup_fs(steering->dev);
return -ENOMEM;
}
-static void cleanup_single_prio_root_ns(struct mlx5_core_dev *dev,
- struct mlx5_flow_root_namespace *root_ns)
+static void clean_tree(struct fs_node *node)
{
- struct fs_node *prio;
-
- if (!root_ns)
- return;
+ if (node) {
+ struct fs_node *iter;
+ struct fs_node *temp;
- if (!list_empty(&root_ns->ns.node.children)) {
- prio = list_first_entry(&root_ns->ns.node.children,
- struct fs_node,
- list);
- if (tree_remove_node(prio))
- mlx5_core_warn(dev,
- "Flow steering priority wasn't destroyed, refcount > 1\n");
+ list_for_each_entry_safe(iter, temp, &node->children, list)
+ clean_tree(iter);
+ tree_remove_node(node);
}
- if (tree_remove_node(&root_ns->ns.node))
- mlx5_core_warn(dev,
- "Flow steering namespace wasn't destroyed, refcount > 1\n");
- root_ns = NULL;
-}
-
-static void destroy_flow_tables(struct fs_prio *prio)
-{
- struct mlx5_flow_table *iter;
- struct mlx5_flow_table *tmp;
-
- fs_for_each_ft_safe(iter, tmp, prio)
- mlx5_destroy_flow_table(iter);
}
-static void cleanup_root_ns(struct mlx5_core_dev *dev)
+static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
{
- struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns;
- struct fs_prio *iter_prio;
-
- if (!MLX5_CAP_GEN(dev, nic_flow_table))
- return;
-
if (!root_ns)
return;
- /* stage 1 */
- fs_for_each_prio(iter_prio, &root_ns->ns) {
- struct fs_node *node;
- struct mlx5_flow_namespace *iter_ns;
-
- fs_for_each_ns_or_ft(node, iter_prio) {
- if (node->type == FS_TYPE_FLOW_TABLE)
- continue;
- fs_get_obj(iter_ns, node);
- while (!list_empty(&iter_ns->node.children)) {
- struct fs_prio *obj_iter_prio2;
- struct fs_node *iter_prio2 =
- list_first_entry(&iter_ns->node.children,
- struct fs_node,
- list);
-
- fs_get_obj(obj_iter_prio2, iter_prio2);
- destroy_flow_tables(obj_iter_prio2);
- if (tree_remove_node(iter_prio2)) {
- mlx5_core_warn(dev,
- "Priority %d wasn't destroyed, refcount > 1\n",
- obj_iter_prio2->prio);
- return;
- }
- }
- }
- }
-
- /* stage 2 */
- fs_for_each_prio(iter_prio, &root_ns->ns) {
- while (!list_empty(&iter_prio->node.children)) {
- struct fs_node *iter_ns =
- list_first_entry(&iter_prio->node.children,
- struct fs_node,
- list);
- if (tree_remove_node(iter_ns)) {
- mlx5_core_warn(dev,
- "Namespace wasn't destroyed, refcount > 1\n");
- return;
- }
- }
- }
-
- /* stage 3 */
- while (!list_empty(&root_ns->ns.node.children)) {
- struct fs_prio *obj_prio_node;
- struct fs_node *prio_node =
- list_first_entry(&root_ns->ns.node.children,
- struct fs_node,
- list);
-
- fs_get_obj(obj_prio_node, prio_node);
- if (tree_remove_node(prio_node)) {
- mlx5_core_warn(dev,
- "Priority %d wasn't destroyed, refcount > 1\n",
- obj_prio_node->prio);
- return;
- }
- }
-
- if (tree_remove_node(&root_ns->ns.node)) {
- mlx5_core_warn(dev,
- "root namespace wasn't destroyed, refcount > 1\n");
- return;
- }
-
- dev->priv.root_ns = NULL;
+ clean_tree(&root_ns->ns.node);
}
void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
{
- cleanup_root_ns(dev);
- cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns);
- cleanup_single_prio_root_ns(dev, dev->priv.esw_egress_root_ns);
- cleanup_single_prio_root_ns(dev, dev->priv.esw_ingress_root_ns);
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+
+ if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return;
+
+ cleanup_root_ns(steering->root_ns);
+ cleanup_root_ns(steering->esw_egress_root_ns);
+ cleanup_root_ns(steering->esw_ingress_root_ns);
+ cleanup_root_ns(steering->fdb_root_ns);
mlx5_cleanup_fc_stats(dev);
+ kfree(steering);
}
-static int init_fdb_root_ns(struct mlx5_core_dev *dev)
+static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
{
struct fs_prio *prio;
- dev->priv.fdb_root_ns = create_root_ns(dev, FS_FT_FDB);
- if (!dev->priv.fdb_root_ns)
+ steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
+ if (!steering->fdb_root_ns)
return -ENOMEM;
/* Create single prio */
- prio = fs_create_prio(&dev->priv.fdb_root_ns->ns, 0, 1);
+ prio = fs_create_prio(&steering->fdb_root_ns->ns, 0, 1);
if (IS_ERR(prio)) {
- cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns);
+ cleanup_root_ns(steering->fdb_root_ns);
+ steering->fdb_root_ns = NULL;
return PTR_ERR(prio);
} else {
return 0;
}
}
-static int init_egress_acl_root_ns(struct mlx5_core_dev *dev)
+static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering)
{
struct fs_prio *prio;
- dev->priv.esw_egress_root_ns = create_root_ns(dev, FS_FT_ESW_EGRESS_ACL);
- if (!dev->priv.esw_egress_root_ns)
+ steering->esw_egress_root_ns = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
+ if (!steering->esw_egress_root_ns)
return -ENOMEM;
/* create 1 prio*/
- prio = fs_create_prio(&dev->priv.esw_egress_root_ns->ns, 0, MLX5_TOTAL_VPORTS(dev));
+ prio = fs_create_prio(&steering->esw_egress_root_ns->ns, 0,
+ MLX5_TOTAL_VPORTS(steering->dev));
if (IS_ERR(prio))
return PTR_ERR(prio);
else
return 0;
}
-static int init_ingress_acl_root_ns(struct mlx5_core_dev *dev)
+static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering)
{
struct fs_prio *prio;
- dev->priv.esw_ingress_root_ns = create_root_ns(dev, FS_FT_ESW_INGRESS_ACL);
- if (!dev->priv.esw_ingress_root_ns)
+ steering->esw_ingress_root_ns = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
+ if (!steering->esw_ingress_root_ns)
return -ENOMEM;
/* create 1 prio*/
- prio = fs_create_prio(&dev->priv.esw_ingress_root_ns->ns, 0, MLX5_TOTAL_VPORTS(dev));
+ prio = fs_create_prio(&steering->esw_ingress_root_ns->ns, 0,
+ MLX5_TOTAL_VPORTS(steering->dev));
if (IS_ERR(prio))
return PTR_ERR(prio);
else
@@ -1826,31 +1759,45 @@ static int init_ingress_acl_root_ns(struct mlx5_core_dev *dev)
int mlx5_init_fs(struct mlx5_core_dev *dev)
{
+ struct mlx5_flow_steering *steering;
int err = 0;
+ if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return 0;
+
err = mlx5_init_fc_stats(dev);
if (err)
return err;
- if (MLX5_CAP_GEN(dev, nic_flow_table)) {
- err = init_root_ns(dev);
+ steering = kzalloc(sizeof(*steering), GFP_KERNEL);
+ if (!steering)
+ return -ENOMEM;
+ steering->dev = dev;
+ dev->priv.steering = steering;
+
+ if (MLX5_CAP_GEN(dev, nic_flow_table) &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
+ err = init_root_ns(steering);
if (err)
goto err;
}
+
if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
- err = init_fdb_root_ns(dev);
- if (err)
- goto err;
- }
- if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
- err = init_egress_acl_root_ns(dev);
- if (err)
- goto err;
- }
- if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
- err = init_ingress_acl_root_ns(dev);
- if (err)
- goto err;
+ if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
+ err = init_fdb_root_ns(steering);
+ if (err)
+ goto err;
+ }
+ if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
+ err = init_egress_acl_root_ns(steering);
+ if (err)
+ goto err;
+ }
+ if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
+ err = init_ingress_acl_root_ns(steering);
+ if (err)
+ goto err;
+ }
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index aa41a7314691..d7ba91a1eea2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -55,6 +55,14 @@ enum fs_fte_status {
FS_FTE_STATUS_EXISTING = 1UL << 0,
};
+struct mlx5_flow_steering {
+ struct mlx5_core_dev *dev;
+ struct mlx5_flow_root_namespace *root_ns;
+ struct mlx5_flow_root_namespace *fdb_root_ns;
+ struct mlx5_flow_root_namespace *esw_egress_root_ns;
+ struct mlx5_flow_root_namespace *esw_ingress_root_ns;
+};
+
struct fs_node {
struct list_head list;
struct list_head children;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 75c7ae6a5cc4..77fc1aa26114 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -151,6 +151,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
+ if (MLX5_CAP_GEN(dev, qos)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_QOS);
+ if (err)
+ return err;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index a19b59348dd6..1fb3c681df97 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -51,6 +51,7 @@
#ifdef CONFIG_RFS_ACCEL
#include <linux/cpu_rmap.h>
#endif
+#include <net/devlink.h>
#include "mlx5_core.h"
#include "fs_core.h"
#ifdef CONFIG_MLX5_CORE_EN
@@ -1144,6 +1145,13 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
dev_err(&pdev->dev, "Failed to init flow steering\n");
goto err_fs;
}
+
+ err = mlx5_init_rl_table(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init rate limiting\n");
+ goto err_rl;
+ }
+
#ifdef CONFIG_MLX5_CORE_EN
err = mlx5_eswitch_init(dev);
if (err) {
@@ -1183,6 +1191,8 @@ err_sriov:
mlx5_eswitch_cleanup(dev->priv.eswitch);
#endif
err_reg_dev:
+ mlx5_cleanup_rl_table(dev);
+err_rl:
mlx5_cleanup_fs(dev);
err_fs:
mlx5_cleanup_mkey_table(dev);
@@ -1253,6 +1263,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
mlx5_eswitch_cleanup(dev->priv.eswitch);
#endif
+ mlx5_cleanup_rl_table(dev);
mlx5_cleanup_fs(dev);
mlx5_cleanup_mkey_table(dev);
mlx5_cleanup_srq_table(dev);
@@ -1305,19 +1316,28 @@ struct mlx5_core_event_handler {
void *data);
};
+static const struct devlink_ops mlx5_devlink_ops = {
+#ifdef CONFIG_MLX5_CORE_EN
+ .eswitch_mode_set = mlx5_devlink_eswitch_mode_set,
+ .eswitch_mode_get = mlx5_devlink_eswitch_mode_get,
+#endif
+};
static int init_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct mlx5_core_dev *dev;
+ struct devlink *devlink;
struct mlx5_priv *priv;
int err;
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
+ devlink = devlink_alloc(&mlx5_devlink_ops, sizeof(*dev));
+ if (!devlink) {
dev_err(&pdev->dev, "kzalloc failed\n");
return -ENOMEM;
}
+
+ dev = devlink_priv(devlink);
priv = &dev->priv;
priv->pci_dev_data = id->driver_data;
@@ -1354,15 +1374,21 @@ static int init_one(struct pci_dev *pdev,
goto clean_health;
}
+ err = devlink_register(devlink, &pdev->dev);
+ if (err)
+ goto clean_load;
+
return 0;
+clean_load:
+ mlx5_unload_one(dev, priv);
clean_health:
mlx5_health_cleanup(dev);
close_pci:
mlx5_pci_close(dev, priv);
clean_dev:
pci_set_drvdata(pdev, NULL);
- kfree(dev);
+ devlink_free(devlink);
return err;
}
@@ -1370,8 +1396,10 @@ clean_dev:
static void remove_one(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ struct devlink *devlink = priv_to_devlink(dev);
struct mlx5_priv *priv = &dev->priv;
+ devlink_unregister(devlink);
if (mlx5_unload_one(dev, priv)) {
dev_err(&dev->pdev->dev, "mlx5_unload_one failed\n");
mlx5_health_cleanup(dev);
@@ -1380,7 +1408,7 @@ static void remove_one(struct pci_dev *pdev)
mlx5_health_cleanup(dev);
mlx5_pci_close(dev, priv);
pci_set_drvdata(pdev, NULL);
- kfree(dev);
+ devlink_free(devlink);
}
static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
@@ -1508,8 +1536,9 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */
{ PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */
{ PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */
- { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5 */
+ { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5, PCIe 3.0 */
{ PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */
+ { PCI_VDEVICE(MELLANOX, 0x1019) }, /* ConnectX-5, PCIe 4.0 */
{ 0, }
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 3e35611b19c3..752c08127138 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -202,15 +202,24 @@ int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL_GPL(mlx5_query_port_proto_oper);
-int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
- int proto_mask)
+int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable,
+ u32 proto_admin, int proto_mask)
{
- u32 in[MLX5_ST_SZ_DW(ptys_reg)];
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ u32 in[MLX5_ST_SZ_DW(ptys_reg)];
+ u8 an_disable_admin;
+ u8 an_disable_cap;
+ u8 an_status;
+
+ mlx5_query_port_autoneg(dev, proto_mask, &an_status,
+ &an_disable_cap, &an_disable_admin);
+ if (!an_disable_cap && an_disable)
+ return -EPERM;
memset(in, 0, sizeof(in));
MLX5_SET(ptys_reg, in, local_port, 1);
+ MLX5_SET(ptys_reg, in, an_disable_admin, an_disable);
MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
if (proto_mask == MLX5_PTYS_EN)
MLX5_SET(ptys_reg, in, eth_proto_admin, proto_admin);
@@ -220,7 +229,19 @@ int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PTYS, 0, 1);
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_proto);
+EXPORT_SYMBOL_GPL(mlx5_set_port_ptys);
+
+/* This function should be used after setting a port register only */
+void mlx5_toggle_port_link(struct mlx5_core_dev *dev)
+{
+ enum mlx5_port_status ps;
+
+ mlx5_query_port_admin_status(dev, &ps);
+ mlx5_set_port_admin_status(dev, MLX5_PORT_DOWN);
+ if (ps == MLX5_PORT_UP)
+ mlx5_set_port_admin_status(dev, MLX5_PORT_UP);
+}
+EXPORT_SYMBOL_GPL(mlx5_toggle_port_link);
int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status status)
@@ -518,6 +539,25 @@ int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx)
}
EXPORT_SYMBOL_GPL(mlx5_query_port_pfc);
+void mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask,
+ u8 *an_status,
+ u8 *an_disable_cap, u8 *an_disable_admin)
+{
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+
+ *an_status = 0;
+ *an_disable_cap = 0;
+ *an_disable_admin = 0;
+
+ if (mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1))
+ return;
+
+ *an_status = MLX5_GET(ptys_reg, out, an_status);
+ *an_disable_cap = MLX5_GET(ptys_reg, out, an_disable_cap);
+ *an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_autoneg);
+
int mlx5_max_tc(struct mlx5_core_dev *mdev)
{
u8 num_tc = MLX5_CAP_GEN(mdev, max_tc) ? : 8;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index b720a274220d..b82d65802d96 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -418,7 +418,7 @@ int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
if (out.hdr.status)
err = mlx5_cmd_status_to_err(&out.hdr);
else
- *xrcdn = be32_to_cpu(out.xrcdn);
+ *xrcdn = be32_to_cpu(out.xrcdn) & 0xffffff;
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
new file mode 100644
index 000000000000..c07c28bd3d55
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/cmd.h>
+#include "mlx5_core.h"
+
+/* Finds an entry where we can register the given rate
+ * If the rate already exists, return the entry where it is registered,
+ * otherwise return the first available entry.
+ * If the table is full, return NULL
+ */
+static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
+ u32 rate)
+{
+ struct mlx5_rl_entry *ret_entry = NULL;
+ bool empty_found = false;
+ int i;
+
+ for (i = 0; i < table->max_size; i++) {
+ if (table->rl_entry[i].rate == rate)
+ return &table->rl_entry[i];
+ if (!empty_found && !table->rl_entry[i].rate) {
+ empty_found = true;
+ ret_entry = &table->rl_entry[i];
+ }
+ }
+
+ return ret_entry;
+}
+
+static int mlx5_set_rate_limit_cmd(struct mlx5_core_dev *dev,
+ u32 rate, u16 index)
+{
+ u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)];
+ u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(set_rate_limit_in, in, opcode,
+ MLX5_CMD_OP_SET_RATE_LIMIT);
+ MLX5_SET(set_rate_limit_in, in, rate_limit_index, index);
+ MLX5_SET(set_rate_limit_in, in, rate_limit, rate);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
+ out, sizeof(out));
+}
+
+bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate)
+{
+ struct mlx5_rl_table *table = &dev->priv.rl_table;
+
+ return (rate <= table->max_rate && rate >= table->min_rate);
+}
+EXPORT_SYMBOL(mlx5_rl_is_in_range);
+
+int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index)
+{
+ struct mlx5_rl_table *table = &dev->priv.rl_table;
+ struct mlx5_rl_entry *entry;
+ int err = 0;
+
+ mutex_lock(&table->rl_lock);
+
+ if (!rate || !mlx5_rl_is_in_range(dev, rate)) {
+ mlx5_core_err(dev, "Invalid rate: %u, should be %u to %u\n",
+ rate, table->min_rate, table->max_rate);
+ err = -EINVAL;
+ goto out;
+ }
+
+ entry = find_rl_entry(table, rate);
+ if (!entry) {
+ mlx5_core_err(dev, "Max number of %u rates reached\n",
+ table->max_size);
+ err = -ENOSPC;
+ goto out;
+ }
+ if (entry->refcount) {
+ /* rate already configured */
+ entry->refcount++;
+ } else {
+ /* new rate limit */
+ err = mlx5_set_rate_limit_cmd(dev, rate, entry->index);
+ if (err) {
+ mlx5_core_err(dev, "Failed configuring rate: %u (%d)\n",
+ rate, err);
+ goto out;
+ }
+ entry->rate = rate;
+ entry->refcount = 1;
+ }
+ *index = entry->index;
+
+out:
+ mutex_unlock(&table->rl_lock);
+ return err;
+}
+EXPORT_SYMBOL(mlx5_rl_add_rate);
+
+void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate)
+{
+ struct mlx5_rl_table *table = &dev->priv.rl_table;
+ struct mlx5_rl_entry *entry = NULL;
+
+ /* 0 is a reserved value for unlimited rate */
+ if (rate == 0)
+ return;
+
+ mutex_lock(&table->rl_lock);
+ entry = find_rl_entry(table, rate);
+ if (!entry || !entry->refcount) {
+ mlx5_core_warn(dev, "Rate %u is not configured\n", rate);
+ goto out;
+ }
+
+ entry->refcount--;
+ if (!entry->refcount) {
+ /* need to remove rate */
+ mlx5_set_rate_limit_cmd(dev, 0, entry->index);
+ entry->rate = 0;
+ }
+
+out:
+ mutex_unlock(&table->rl_lock);
+}
+EXPORT_SYMBOL(mlx5_rl_remove_rate);
+
+int mlx5_init_rl_table(struct mlx5_core_dev *dev)
+{
+ struct mlx5_rl_table *table = &dev->priv.rl_table;
+ int i;
+
+ mutex_init(&table->rl_lock);
+ if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, packet_pacing)) {
+ table->max_size = 0;
+ return 0;
+ }
+
+ /* First entry is reserved for unlimited rate */
+ table->max_size = MLX5_CAP_QOS(dev, packet_pacing_rate_table_size) - 1;
+ table->max_rate = MLX5_CAP_QOS(dev, packet_pacing_max_rate);
+ table->min_rate = MLX5_CAP_QOS(dev, packet_pacing_min_rate);
+
+ table->rl_entry = kcalloc(table->max_size, sizeof(struct mlx5_rl_entry),
+ GFP_KERNEL);
+ if (!table->rl_entry)
+ return -ENOMEM;
+
+ /* The index represents the index in HW rate limit table
+ * Index 0 is reserved for unlimited rate
+ */
+ for (i = 0; i < table->max_size; i++)
+ table->rl_entry[i].index = i + 1;
+
+ /* Index 0 is reserved */
+ mlx5_core_info(dev, "Rate limit: %u rates are supported, range: %uMbps to %uMbps\n",
+ table->max_size,
+ table->min_rate >> 10,
+ table->max_rate >> 10);
+
+ return 0;
+}
+
+void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev)
+{
+ struct mlx5_rl_table *table = &dev->priv.rl_table;
+ int i;
+
+ /* Clear all configured rates */
+ for (i = 0; i < table->max_size; i++)
+ if (table->rl_entry[i].rate)
+ mlx5_set_rate_limit_cmd(dev, 0,
+ table->rl_entry[i].index);
+
+ kfree(dev->priv.rl_table.rl_entry);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index d6a3f412ba9f..b380a6bc1f85 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -167,7 +167,7 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
mlx5_core_init_vfs(dev, num_vfs);
#ifdef CONFIG_MLX5_CORE_EN
- mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs);
+ mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY);
#endif
return num_vfs;
@@ -209,7 +209,8 @@ int mlx5_sriov_init(struct mlx5_core_dev *dev)
mlx5_core_init_vfs(dev, cur_vfs);
#ifdef CONFIG_MLX5_CORE_EN
if (cur_vfs)
- mlx5_eswitch_enable_sriov(dev->priv.eswitch, cur_vfs);
+ mlx5_eswitch_enable_sriov(dev->priv.eswitch, cur_vfs,
+ SRIOV_LEGACY);
#endif
enable_vfs(dev, cur_vfs);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index b69dadcfb897..daf44cd4c566 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -508,6 +508,44 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
+int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
+ u32 vport, u64 node_guid)
+{
+ int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+ void *nic_vport_context;
+ u8 *guid;
+ void *in;
+ int err;
+
+ if (!vport)
+ return -EINVAL;
+ if (!MLX5_CAP_GEN(mdev, vport_group_manager))
+ return -EACCES;
+ if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
+ return -ENOTSUPP;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_nic_vport_context_in, in,
+ field_select.node_guid, 1);
+ MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
+ MLX5_SET(modify_nic_vport_context_in, in, other_vport, !!vport);
+
+ nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
+ in, nic_vport_context);
+ guid = MLX5_ADDR_OF(nic_vport_context, nic_vport_context,
+ node_guid);
+ MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
+
+ err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+
int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
u16 *qkey_viol_cntr)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
index f2fd1ef16da7..05de77267d58 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
@@ -105,6 +105,9 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
struct mlx5e_vxlan *vxlan;
int err;
+ if (mlx5e_vxlan_lookup_port(priv, port))
+ goto free_work;
+
if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
goto free_work;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index ce21ee5b2357..821a087c7ae2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -75,14 +75,14 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
return err;
}
err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
&wq_ctrl->buf, param->buf_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
@@ -111,14 +111,14 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
return err;
}
err = mlx5_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
&wq_ctrl->buf, param->buf_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
@@ -148,13 +148,14 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
return err;
}
- err = mlx5_buf_alloc(mdev, mlx5_wq_ll_get_byte_size(wq), &wq_ctrl->buf);
+ err = mlx5_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq),
+ &wq_ctrl->buf, param->buf_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 9b5ebf84c051..d20ae1838a64 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -7,5 +7,6 @@ obj-$(CONFIG_MLXSW_SWITCHX2) += mlxsw_switchx2.o
mlxsw_switchx2-objs := switchx2.o
obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o
mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
- spectrum_switchdev.o
+ spectrum_switchdev.o spectrum_router.o \
+ spectrum_kvdl.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
index cd63b8263688..f9cd6e3f7709 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -607,6 +607,24 @@ MLXSW_ITEM32(cmd_mbox, config_profile,
*/
MLXSW_ITEM32(cmd_mbox, config_profile, set_ar_sec, 0x0C, 15, 1);
+/* cmd_mbox_config_set_kvd_linear_size
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_linear_size, 0x0C, 24, 1);
+
+/* cmd_mbox_config_set_kvd_hash_single_size
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_single_size, 0x0C, 25, 1);
+
+/* cmd_mbox_config_set_kvd_hash_double_size
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_double_size, 0x0C, 26, 1);
+
/* cmd_mbox_config_profile_max_vepa_channels
* Maximum number of VEPA channels per port (0 through 16)
* 0 - multi-channel VEPA is disabled
@@ -733,6 +751,31 @@ MLXSW_ITEM32(cmd_mbox, config_profile, adaptive_routing_group_cap, 0x4C, 0, 16);
*/
MLXSW_ITEM32(cmd_mbox, config_profile, arn, 0x50, 31, 1);
+/* cmd_mbox_config_kvd_linear_size
+ * KVD Linear Size
+ * Valid for Spectrum only
+ * Allowed values are 128*N where N=0 or higher
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, kvd_linear_size, 0x54, 0, 24);
+
+/* cmd_mbox_config_kvd_hash_single_size
+ * KVD Hash single-entries size
+ * Valid for Spectrum only
+ * Allowed values are 128*N where N=0 or higher
+ * Must be greater or equal to cap_min_kvd_hash_single_size
+ * Must be smaller or equal to cap_kvd_size - kvd_linear_size
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, kvd_hash_single_size, 0x58, 0, 24);
+
+/* cmd_mbox_config_kvd_hash_double_size
+ * KVD Hash double-entries size (units of single-size entries)
+ * Valid for Spectrum only
+ * Allowed values are 128*N where N=0 or higher
+ * Must be either 0 or greater or equal to cap_min_kvd_hash_double_size
+ * Must be smaller or equal to cap_kvd_size - kvd_linear_size
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, kvd_hash_double_size, 0x5C, 0, 24);
+
/* cmd_mbox_config_profile_swid_config_mask
* Modify Switch Partition Configuration mask. When set, the configu-
* ration value for the Switch Partition are taken from the mailbox.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index b0a0b01bb4ef..01ae54826d5c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1736,7 +1736,7 @@ static int __init mlxsw_core_module_init(void)
{
int err;
- mlxsw_wq = create_workqueue(mlxsw_core_driver_name);
+ mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0);
if (!mlxsw_wq)
return -ENOMEM;
mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 436bc49df6ab..2fe385cce203 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -190,7 +190,8 @@ struct mlxsw_config_profile {
used_max_ib_mc:1,
used_max_pkey:1,
used_ar_sec:1,
- used_adaptive_routing_group_cap:1;
+ used_adaptive_routing_group_cap:1,
+ used_kvd_sizes:1;
u8 max_vepa_channels;
u16 max_lag;
u16 max_port_per_lag;
@@ -211,6 +212,9 @@ struct mlxsw_config_profile {
u8 ar_sec;
u16 adaptive_routing_group_cap;
u8 arn;
+ u32 kvd_linear_size;
+ u32 kvd_hash_single_size;
+ u32 kvd_hash_double_size;
struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT];
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 7f4173c8eda3..ddbc9f22278d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1255,6 +1255,20 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
mbox, profile->adaptive_routing_group_cap);
}
+ if (profile->used_kvd_sizes) {
+ mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(
+ mbox, profile->kvd_linear_size);
+ mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(
+ mbox, profile->kvd_hash_single_size);
+ mlxsw_cmd_mbox_config_profile_set_kvd_hash_double_size_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(
+ mbox, profile->kvd_hash_double_size);
+ }
for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 1977e7a5c530..0cc148566677 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -1,9 +1,10 @@
/*
* drivers/net/ethernet/mellanox/mlxsw/reg.h
* Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015-2016 Ido Schimmel <idosch@mellanox.com>
* Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015-2016 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -386,7 +387,9 @@ enum mlxsw_reg_sfd_rec_action {
/* forward and trap, trap_id is FDB_TRAP */
MLXSW_REG_SFD_REC_ACTION_MIRROR_TO_CPU = 1,
/* trap and do not forward, trap_id is FDB_TRAP */
- MLXSW_REG_SFD_REC_ACTION_TRAP = 3,
+ MLXSW_REG_SFD_REC_ACTION_TRAP = 2,
+ /* forward to IP router */
+ MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER = 3,
MLXSW_REG_SFD_REC_ACTION_DISCARD_ERROR = 15,
};
@@ -3186,6 +3189,1183 @@ static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action, u16 trap_id)
mlxsw_reg_hpkt_ctrl_set(payload, MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT);
}
+/* RGCR - Router General Configuration Register
+ * --------------------------------------------
+ * The register is used for setting up the router configuration.
+ */
+#define MLXSW_REG_RGCR_ID 0x8001
+#define MLXSW_REG_RGCR_LEN 0x28
+
+static const struct mlxsw_reg_info mlxsw_reg_rgcr = {
+ .id = MLXSW_REG_RGCR_ID,
+ .len = MLXSW_REG_RGCR_LEN,
+};
+
+/* reg_rgcr_ipv4_en
+ * IPv4 router enable.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rgcr, ipv4_en, 0x00, 31, 1);
+
+/* reg_rgcr_ipv6_en
+ * IPv6 router enable.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rgcr, ipv6_en, 0x00, 30, 1);
+
+/* reg_rgcr_max_router_interfaces
+ * Defines the maximum number of active router interfaces for all virtual
+ * routers.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rgcr, max_router_interfaces, 0x10, 0, 16);
+
+/* reg_rgcr_usp
+ * Update switch priority and packet color.
+ * 0 - Preserve the value of Switch Priority and packet color.
+ * 1 - Recalculate the value of Switch Priority and packet color.
+ * Access: RW
+ *
+ * Note: Not supported by SwitchX and SwitchX-2.
+ */
+MLXSW_ITEM32(reg, rgcr, usp, 0x18, 20, 1);
+
+/* reg_rgcr_pcp_rw
+ * Indicates how to handle the pcp_rewrite_en value:
+ * 0 - Preserve the value of pcp_rewrite_en.
+ * 2 - Disable PCP rewrite.
+ * 3 - Enable PCP rewrite.
+ * Access: RW
+ *
+ * Note: Not supported by SwitchX and SwitchX-2.
+ */
+MLXSW_ITEM32(reg, rgcr, pcp_rw, 0x18, 16, 2);
+
+/* reg_rgcr_activity_dis
+ * Activity disable:
+ * 0 - Activity will be set when an entry is hit (default).
+ * 1 - Activity will not be set when an entry is hit.
+ *
+ * Bit 0 - Disable activity bit in Router Algorithmic LPM Unicast Entry
+ * (RALUE).
+ * Bit 1 - Disable activity bit in Router Algorithmic LPM Unicast Host
+ * Entry (RAUHT).
+ * Bits 2:7 are reserved.
+ * Access: RW
+ *
+ * Note: Not supported by SwitchX, SwitchX-2 and Switch-IB.
+ */
+MLXSW_ITEM32(reg, rgcr, activity_dis, 0x20, 0, 8);
+
+static inline void mlxsw_reg_rgcr_pack(char *payload, bool ipv4_en)
+{
+ MLXSW_REG_ZERO(rgcr, payload);
+ mlxsw_reg_rgcr_ipv4_en_set(payload, ipv4_en);
+}
+
+/* RITR - Router Interface Table Register
+ * --------------------------------------
+ * The register is used to configure the router interface table.
+ */
+#define MLXSW_REG_RITR_ID 0x8002
+#define MLXSW_REG_RITR_LEN 0x40
+
+static const struct mlxsw_reg_info mlxsw_reg_ritr = {
+ .id = MLXSW_REG_RITR_ID,
+ .len = MLXSW_REG_RITR_LEN,
+};
+
+/* reg_ritr_enable
+ * Enables routing on the router interface.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, enable, 0x00, 31, 1);
+
+/* reg_ritr_ipv4
+ * IPv4 routing enable. Enables routing of IPv4 traffic on the router
+ * interface.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, ipv4, 0x00, 29, 1);
+
+/* reg_ritr_ipv6
+ * IPv6 routing enable. Enables routing of IPv6 traffic on the router
+ * interface.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, ipv6, 0x00, 28, 1);
+
+enum mlxsw_reg_ritr_if_type {
+ MLXSW_REG_RITR_VLAN_IF,
+ MLXSW_REG_RITR_FID_IF,
+ MLXSW_REG_RITR_SP_IF,
+};
+
+/* reg_ritr_type
+ * Router interface type.
+ * 0 - VLAN interface.
+ * 1 - FID interface.
+ * 2 - Sub-port interface.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, type, 0x00, 23, 3);
+
+enum {
+ MLXSW_REG_RITR_RIF_CREATE,
+ MLXSW_REG_RITR_RIF_DEL,
+};
+
+/* reg_ritr_op
+ * Opcode:
+ * 0 - Create or edit RIF.
+ * 1 - Delete RIF.
+ * Reserved for SwitchX-2. For Spectrum, editing of interface properties
+ * is not supported. An interface must be deleted and re-created in order
+ * to update properties.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, ritr, op, 0x00, 20, 2);
+
+/* reg_ritr_rif
+ * Router interface index. A pointer to the Router Interface Table.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ritr, rif, 0x00, 0, 16);
+
+/* reg_ritr_ipv4_fe
+ * IPv4 Forwarding Enable.
+ * Enables routing of IPv4 traffic on the router interface. When disabled,
+ * forwarding is blocked but local traffic (traps and IP2ME) will be enabled.
+ * Not supported in SwitchX-2.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, ipv4_fe, 0x04, 29, 1);
+
+/* reg_ritr_ipv6_fe
+ * IPv6 Forwarding Enable.
+ * Enables routing of IPv6 traffic on the router interface. When disabled,
+ * forwarding is blocked but local traffic (traps and IP2ME) will be enabled.
+ * Not supported in SwitchX-2.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1);
+
+/* reg_ritr_virtual_router
+ * Virtual router ID associated with the router interface.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, virtual_router, 0x04, 0, 16);
+
+/* reg_ritr_mtu
+ * Router interface MTU.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, mtu, 0x34, 0, 16);
+
+/* reg_ritr_if_swid
+ * Switch partition ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, if_swid, 0x08, 24, 8);
+
+/* reg_ritr_if_mac
+ * Router interface MAC address.
+ * In Spectrum, all MAC addresses must have the same 38 MSBits.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, ritr, if_mac, 0x12, 6);
+
+/* VLAN Interface */
+
+/* reg_ritr_vlan_if_vid
+ * VLAN ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, vlan_if_vid, 0x08, 0, 12);
+
+/* FID Interface */
+
+/* reg_ritr_fid_if_fid
+ * Filtering ID. Used to connect a bridge to the router. Only FIDs from
+ * the vFID range are supported.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, fid_if_fid, 0x08, 0, 16);
+
+static inline void mlxsw_reg_ritr_fid_set(char *payload,
+ enum mlxsw_reg_ritr_if_type rif_type,
+ u16 fid)
+{
+ if (rif_type == MLXSW_REG_RITR_FID_IF)
+ mlxsw_reg_ritr_fid_if_fid_set(payload, fid);
+ else
+ mlxsw_reg_ritr_vlan_if_vid_set(payload, fid);
+}
+
+/* Sub-port Interface */
+
+/* reg_ritr_sp_if_lag
+ * LAG indication. When this bit is set the system_port field holds the
+ * LAG identifier.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, sp_if_lag, 0x08, 24, 1);
+
+/* reg_ritr_sp_system_port
+ * Port unique indentifier. When lag bit is set, this field holds the
+ * lag_id in bits 0:9.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, sp_if_system_port, 0x08, 0, 16);
+
+/* reg_ritr_sp_if_vid
+ * VLAN ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, sp_if_vid, 0x18, 0, 12);
+
+static inline void mlxsw_reg_ritr_rif_pack(char *payload, u16 rif)
+{
+ MLXSW_REG_ZERO(ritr, payload);
+ mlxsw_reg_ritr_rif_set(payload, rif);
+}
+
+static inline void mlxsw_reg_ritr_sp_if_pack(char *payload, bool lag,
+ u16 system_port, u16 vid)
+{
+ mlxsw_reg_ritr_sp_if_lag_set(payload, lag);
+ mlxsw_reg_ritr_sp_if_system_port_set(payload, system_port);
+ mlxsw_reg_ritr_sp_if_vid_set(payload, vid);
+}
+
+static inline void mlxsw_reg_ritr_pack(char *payload, bool enable,
+ enum mlxsw_reg_ritr_if_type type,
+ u16 rif, u16 mtu, const char *mac)
+{
+ bool op = enable ? MLXSW_REG_RITR_RIF_CREATE : MLXSW_REG_RITR_RIF_DEL;
+
+ MLXSW_REG_ZERO(ritr, payload);
+ mlxsw_reg_ritr_enable_set(payload, enable);
+ mlxsw_reg_ritr_ipv4_set(payload, 1);
+ mlxsw_reg_ritr_type_set(payload, type);
+ mlxsw_reg_ritr_op_set(payload, op);
+ mlxsw_reg_ritr_rif_set(payload, rif);
+ mlxsw_reg_ritr_ipv4_fe_set(payload, 1);
+ mlxsw_reg_ritr_mtu_set(payload, mtu);
+ mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac);
+}
+
+/* RATR - Router Adjacency Table Register
+ * --------------------------------------
+ * The RATR register is used to configure the Router Adjacency (next-hop)
+ * Table.
+ */
+#define MLXSW_REG_RATR_ID 0x8008
+#define MLXSW_REG_RATR_LEN 0x2C
+
+static const struct mlxsw_reg_info mlxsw_reg_ratr = {
+ .id = MLXSW_REG_RATR_ID,
+ .len = MLXSW_REG_RATR_LEN,
+};
+
+enum mlxsw_reg_ratr_op {
+ /* Read */
+ MLXSW_REG_RATR_OP_QUERY_READ = 0,
+ /* Read and clear activity */
+ MLXSW_REG_RATR_OP_QUERY_READ_CLEAR = 2,
+ /* Write Adjacency entry */
+ MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY = 1,
+ /* Write Adjacency entry only if the activity is cleared.
+ * The write may not succeed if the activity is set. There is not
+ * direct feedback if the write has succeeded or not, however
+ * the get will reveal the actual entry (SW can compare the get
+ * response to the set command).
+ */
+ MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY = 3,
+};
+
+/* reg_ratr_op
+ * Note that Write operation may also be used for updating
+ * counter_set_type and counter_index. In this case all other
+ * fields must not be updated.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, ratr, op, 0x00, 28, 4);
+
+/* reg_ratr_v
+ * Valid bit. Indicates if the adjacency entry is valid.
+ * Note: the device may need some time before reusing an invalidated
+ * entry. During this time the entry can not be reused. It is
+ * recommended to use another entry before reusing an invalidated
+ * entry (e.g. software can put it at the end of the list for
+ * reusing). Trying to access an invalidated entry not yet cleared
+ * by the device results with failure indicating "Try Again" status.
+ * When valid is '0' then egress_router_interface,trap_action,
+ * adjacency_parameters and counters are reserved
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ratr, v, 0x00, 24, 1);
+
+/* reg_ratr_a
+ * Activity. Set for new entries. Set if a packet lookup has hit on
+ * the specific entry. To clear the a bit, use "clear activity".
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ratr, a, 0x00, 16, 1);
+
+/* reg_ratr_adjacency_index_low
+ * Bits 15:0 of index into the adjacency table.
+ * For SwitchX and SwitchX-2, the adjacency table is linear and
+ * used for adjacency entries only.
+ * For Spectrum, the index is to the KVD linear.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ratr, adjacency_index_low, 0x04, 0, 16);
+
+/* reg_ratr_egress_router_interface
+ * Range is 0 .. cap_max_router_interfaces - 1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ratr, egress_router_interface, 0x08, 0, 16);
+
+enum mlxsw_reg_ratr_trap_action {
+ MLXSW_REG_RATR_TRAP_ACTION_NOP,
+ MLXSW_REG_RATR_TRAP_ACTION_TRAP,
+ MLXSW_REG_RATR_TRAP_ACTION_MIRROR_TO_CPU,
+ MLXSW_REG_RATR_TRAP_ACTION_MIRROR,
+ MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS,
+};
+
+/* reg_ratr_trap_action
+ * see mlxsw_reg_ratr_trap_action
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ratr, trap_action, 0x0C, 28, 4);
+
+enum mlxsw_reg_ratr_trap_id {
+ MLXSW_REG_RATR_TRAP_ID_RTR_EGRESS0 = 0,
+ MLXSW_REG_RATR_TRAP_ID_RTR_EGRESS1 = 1,
+};
+
+/* reg_ratr_adjacency_index_high
+ * Bits 23:16 of the adjacency_index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ratr, adjacency_index_high, 0x0C, 16, 8);
+
+/* reg_ratr_trap_id
+ * Trap ID to be reported to CPU.
+ * Trap-ID is RTR_EGRESS0 or RTR_EGRESS1.
+ * For trap_action of NOP, MIRROR and DISCARD_ERROR
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ratr, trap_id, 0x0C, 0, 8);
+
+/* reg_ratr_eth_destination_mac
+ * MAC address of the destination next-hop.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, ratr, eth_destination_mac, 0x12, 6);
+
+static inline void
+mlxsw_reg_ratr_pack(char *payload,
+ enum mlxsw_reg_ratr_op op, bool valid,
+ u32 adjacency_index, u16 egress_rif)
+{
+ MLXSW_REG_ZERO(ratr, payload);
+ mlxsw_reg_ratr_op_set(payload, op);
+ mlxsw_reg_ratr_v_set(payload, valid);
+ mlxsw_reg_ratr_adjacency_index_low_set(payload, adjacency_index);
+ mlxsw_reg_ratr_adjacency_index_high_set(payload, adjacency_index >> 16);
+ mlxsw_reg_ratr_egress_router_interface_set(payload, egress_rif);
+}
+
+static inline void mlxsw_reg_ratr_eth_entry_pack(char *payload,
+ const char *dest_mac)
+{
+ mlxsw_reg_ratr_eth_destination_mac_memcpy_to(payload, dest_mac);
+}
+
+/* RALTA - Router Algorithmic LPM Tree Allocation Register
+ * -------------------------------------------------------
+ * RALTA is used to allocate the LPM trees of the SHSPM method.
+ */
+#define MLXSW_REG_RALTA_ID 0x8010
+#define MLXSW_REG_RALTA_LEN 0x04
+
+static const struct mlxsw_reg_info mlxsw_reg_ralta = {
+ .id = MLXSW_REG_RALTA_ID,
+ .len = MLXSW_REG_RALTA_LEN,
+};
+
+/* reg_ralta_op
+ * opcode (valid for Write, must be 0 on Read)
+ * 0 - allocate a tree
+ * 1 - deallocate a tree
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, ralta, op, 0x00, 28, 2);
+
+enum mlxsw_reg_ralxx_protocol {
+ MLXSW_REG_RALXX_PROTOCOL_IPV4,
+ MLXSW_REG_RALXX_PROTOCOL_IPV6,
+};
+
+/* reg_ralta_protocol
+ * Protocol.
+ * Deallocation opcode: Reserved.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralta, protocol, 0x00, 24, 4);
+
+/* reg_ralta_tree_id
+ * An identifier (numbered from 1..cap_shspm_max_trees-1) representing
+ * the tree identifier (managed by software).
+ * Note that tree_id 0 is allocated for a default-route tree.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ralta, tree_id, 0x00, 0, 8);
+
+static inline void mlxsw_reg_ralta_pack(char *payload, bool alloc,
+ enum mlxsw_reg_ralxx_protocol protocol,
+ u8 tree_id)
+{
+ MLXSW_REG_ZERO(ralta, payload);
+ mlxsw_reg_ralta_op_set(payload, !alloc);
+ mlxsw_reg_ralta_protocol_set(payload, protocol);
+ mlxsw_reg_ralta_tree_id_set(payload, tree_id);
+}
+
+/* RALST - Router Algorithmic LPM Structure Tree Register
+ * ------------------------------------------------------
+ * RALST is used to set and query the structure of an LPM tree.
+ * The structure of the tree must be sorted as a sorted binary tree, while
+ * each node is a bin that is tagged as the length of the prefixes the lookup
+ * will refer to. Therefore, bin X refers to a set of entries with prefixes
+ * of X bits to match with the destination address. The bin 0 indicates
+ * the default action, when there is no match of any prefix.
+ */
+#define MLXSW_REG_RALST_ID 0x8011
+#define MLXSW_REG_RALST_LEN 0x104
+
+static const struct mlxsw_reg_info mlxsw_reg_ralst = {
+ .id = MLXSW_REG_RALST_ID,
+ .len = MLXSW_REG_RALST_LEN,
+};
+
+/* reg_ralst_root_bin
+ * The bin number of the root bin.
+ * 0<root_bin=<(length of IP address)
+ * For a default-route tree configure 0xff
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralst, root_bin, 0x00, 16, 8);
+
+/* reg_ralst_tree_id
+ * Tree identifier numbered from 1..(cap_shspm_max_trees-1).
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ralst, tree_id, 0x00, 0, 8);
+
+#define MLXSW_REG_RALST_BIN_NO_CHILD 0xff
+#define MLXSW_REG_RALST_BIN_OFFSET 0x04
+#define MLXSW_REG_RALST_BIN_COUNT 128
+
+/* reg_ralst_left_child_bin
+ * Holding the children of the bin according to the stored tree's structure.
+ * For trees composed of less than 4 blocks, the bins in excess are reserved.
+ * Note that tree_id 0 is allocated for a default-route tree, bins are 0xff
+ * Access: RW
+ */
+MLXSW_ITEM16_INDEXED(reg, ralst, left_child_bin, 0x04, 8, 8, 0x02, 0x00, false);
+
+/* reg_ralst_right_child_bin
+ * Holding the children of the bin according to the stored tree's structure.
+ * For trees composed of less than 4 blocks, the bins in excess are reserved.
+ * Note that tree_id 0 is allocated for a default-route tree, bins are 0xff
+ * Access: RW
+ */
+MLXSW_ITEM16_INDEXED(reg, ralst, right_child_bin, 0x04, 0, 8, 0x02, 0x00,
+ false);
+
+static inline void mlxsw_reg_ralst_pack(char *payload, u8 root_bin, u8 tree_id)
+{
+ MLXSW_REG_ZERO(ralst, payload);
+
+ /* Initialize all bins to have no left or right child */
+ memset(payload + MLXSW_REG_RALST_BIN_OFFSET,
+ MLXSW_REG_RALST_BIN_NO_CHILD, MLXSW_REG_RALST_BIN_COUNT * 2);
+
+ mlxsw_reg_ralst_root_bin_set(payload, root_bin);
+ mlxsw_reg_ralst_tree_id_set(payload, tree_id);
+}
+
+static inline void mlxsw_reg_ralst_bin_pack(char *payload, u8 bin_number,
+ u8 left_child_bin,
+ u8 right_child_bin)
+{
+ int bin_index = bin_number - 1;
+
+ mlxsw_reg_ralst_left_child_bin_set(payload, bin_index, left_child_bin);
+ mlxsw_reg_ralst_right_child_bin_set(payload, bin_index,
+ right_child_bin);
+}
+
+/* RALTB - Router Algorithmic LPM Tree Binding Register
+ * ----------------------------------------------------
+ * RALTB is used to bind virtual router and protocol to an allocated LPM tree.
+ */
+#define MLXSW_REG_RALTB_ID 0x8012
+#define MLXSW_REG_RALTB_LEN 0x04
+
+static const struct mlxsw_reg_info mlxsw_reg_raltb = {
+ .id = MLXSW_REG_RALTB_ID,
+ .len = MLXSW_REG_RALTB_LEN,
+};
+
+/* reg_raltb_virtual_router
+ * Virtual Router ID
+ * Range is 0..cap_max_virtual_routers-1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, raltb, virtual_router, 0x00, 16, 16);
+
+/* reg_raltb_protocol
+ * Protocol.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, raltb, protocol, 0x00, 12, 4);
+
+/* reg_raltb_tree_id
+ * Tree to be used for the {virtual_router, protocol}
+ * Tree identifier numbered from 1..(cap_shspm_max_trees-1).
+ * By default, all Unicast IPv4 and IPv6 are bound to tree_id 0.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, raltb, tree_id, 0x00, 0, 8);
+
+static inline void mlxsw_reg_raltb_pack(char *payload, u16 virtual_router,
+ enum mlxsw_reg_ralxx_protocol protocol,
+ u8 tree_id)
+{
+ MLXSW_REG_ZERO(raltb, payload);
+ mlxsw_reg_raltb_virtual_router_set(payload, virtual_router);
+ mlxsw_reg_raltb_protocol_set(payload, protocol);
+ mlxsw_reg_raltb_tree_id_set(payload, tree_id);
+}
+
+/* RALUE - Router Algorithmic LPM Unicast Entry Register
+ * -----------------------------------------------------
+ * RALUE is used to configure and query LPM entries that serve
+ * the Unicast protocols.
+ */
+#define MLXSW_REG_RALUE_ID 0x8013
+#define MLXSW_REG_RALUE_LEN 0x38
+
+static const struct mlxsw_reg_info mlxsw_reg_ralue = {
+ .id = MLXSW_REG_RALUE_ID,
+ .len = MLXSW_REG_RALUE_LEN,
+};
+
+/* reg_ralue_protocol
+ * Protocol.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ralue, protocol, 0x00, 24, 4);
+
+enum mlxsw_reg_ralue_op {
+ /* Read operation. If entry doesn't exist, the operation fails. */
+ MLXSW_REG_RALUE_OP_QUERY_READ = 0,
+ /* Clear on read operation. Used to read entry and
+ * clear Activity bit.
+ */
+ MLXSW_REG_RALUE_OP_QUERY_CLEAR = 1,
+ /* Write operation. Used to write a new entry to the table. All RW
+ * fields are written for new entry. Activity bit is set
+ * for new entries.
+ */
+ MLXSW_REG_RALUE_OP_WRITE_WRITE = 0,
+ /* Update operation. Used to update an existing route entry and
+ * only update the RW fields that are detailed in the field
+ * op_u_mask. If entry doesn't exist, the operation fails.
+ */
+ MLXSW_REG_RALUE_OP_WRITE_UPDATE = 1,
+ /* Clear activity. The Activity bit (the field a) is cleared
+ * for the entry.
+ */
+ MLXSW_REG_RALUE_OP_WRITE_CLEAR = 2,
+ /* Delete operation. Used to delete an existing entry. If entry
+ * doesn't exist, the operation fails.
+ */
+ MLXSW_REG_RALUE_OP_WRITE_DELETE = 3,
+};
+
+/* reg_ralue_op
+ * Operation.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, ralue, op, 0x00, 20, 3);
+
+/* reg_ralue_a
+ * Activity. Set for new entries. Set if a packet lookup has hit on the
+ * specific entry, only if the entry is a route. To clear the a bit, use
+ * "clear activity" op.
+ * Enabled by activity_dis in RGCR
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ralue, a, 0x00, 16, 1);
+
+/* reg_ralue_virtual_router
+ * Virtual Router ID
+ * Range is 0..cap_max_virtual_routers-1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ralue, virtual_router, 0x04, 16, 16);
+
+#define MLXSW_REG_RALUE_OP_U_MASK_ENTRY_TYPE BIT(0)
+#define MLXSW_REG_RALUE_OP_U_MASK_BMP_LEN BIT(1)
+#define MLXSW_REG_RALUE_OP_U_MASK_ACTION BIT(2)
+
+/* reg_ralue_op_u_mask
+ * opcode update mask.
+ * On read operation, this field is reserved.
+ * This field is valid for update opcode, otherwise - reserved.
+ * This field is a bitmask of the fields that should be updated.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, ralue, op_u_mask, 0x04, 8, 3);
+
+/* reg_ralue_prefix_len
+ * Number of bits in the prefix of the LPM route.
+ * Note that for IPv6 prefixes, if prefix_len>64 the entry consumes
+ * two entries in the physical HW table.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ralue, prefix_len, 0x08, 0, 8);
+
+/* reg_ralue_dip*
+ * The prefix of the route or of the marker that the object of the LPM
+ * is compared with. The most significant bits of the dip are the prefix.
+ * The list significant bits must be '0' if the prefix_len is smaller
+ * than 128 for IPv6 or smaller than 32 for IPv4.
+ * IPv4 address uses bits dip[31:0] and bits dip[127:32] are reserved.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ralue, dip4, 0x18, 0, 32);
+
+enum mlxsw_reg_ralue_entry_type {
+ MLXSW_REG_RALUE_ENTRY_TYPE_MARKER_ENTRY = 1,
+ MLXSW_REG_RALUE_ENTRY_TYPE_ROUTE_ENTRY = 2,
+ MLXSW_REG_RALUE_ENTRY_TYPE_MARKER_AND_ROUTE_ENTRY = 3,
+};
+
+/* reg_ralue_entry_type
+ * Entry type.
+ * Note - for Marker entries, the action_type and action fields are reserved.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, entry_type, 0x1C, 30, 2);
+
+/* reg_ralue_bmp_len
+ * The best match prefix length in the case that there is no match for
+ * longer prefixes.
+ * If (entry_type != MARKER_ENTRY), bmp_len must be equal to prefix_len
+ * Note for any update operation with entry_type modification this
+ * field must be set.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, bmp_len, 0x1C, 16, 8);
+
+enum mlxsw_reg_ralue_action_type {
+ MLXSW_REG_RALUE_ACTION_TYPE_REMOTE,
+ MLXSW_REG_RALUE_ACTION_TYPE_LOCAL,
+ MLXSW_REG_RALUE_ACTION_TYPE_IP2ME,
+};
+
+/* reg_ralue_action_type
+ * Action Type
+ * Indicates how the IP address is connected.
+ * It can be connected to a local subnet through local_erif or can be
+ * on a remote subnet connected through a next-hop router,
+ * or transmitted to the CPU.
+ * Reserved when entry_type = MARKER_ENTRY
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, action_type, 0x1C, 0, 2);
+
+enum mlxsw_reg_ralue_trap_action {
+ MLXSW_REG_RALUE_TRAP_ACTION_NOP,
+ MLXSW_REG_RALUE_TRAP_ACTION_TRAP,
+ MLXSW_REG_RALUE_TRAP_ACTION_MIRROR_TO_CPU,
+ MLXSW_REG_RALUE_TRAP_ACTION_MIRROR,
+ MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR,
+};
+
+/* reg_ralue_trap_action
+ * Trap action.
+ * For IP2ME action, only NOP and MIRROR are possible.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, trap_action, 0x20, 28, 4);
+
+/* reg_ralue_trap_id
+ * Trap ID to be reported to CPU.
+ * Trap ID is RTR_INGRESS0 or RTR_INGRESS1.
+ * For trap_action of NOP, MIRROR and DISCARD_ERROR, trap_id is reserved.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, trap_id, 0x20, 0, 9);
+
+/* reg_ralue_adjacency_index
+ * Points to the first entry of the group-based ECMP.
+ * Only relevant in case of REMOTE action.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, adjacency_index, 0x24, 0, 24);
+
+/* reg_ralue_ecmp_size
+ * Amount of sequential entries starting
+ * from the adjacency_index (the number of ECMPs).
+ * The valid range is 1-64, 512, 1024, 2048 and 4096.
+ * Reserved when trap_action is TRAP or DISCARD_ERROR.
+ * Only relevant in case of REMOTE action.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, ecmp_size, 0x28, 0, 13);
+
+/* reg_ralue_local_erif
+ * Egress Router Interface.
+ * Only relevant in case of LOCAL action.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, local_erif, 0x24, 0, 16);
+
+/* reg_ralue_v
+ * Valid bit for the tunnel_ptr field.
+ * If valid = 0 then trap to CPU as IP2ME trap ID.
+ * If valid = 1 and the packet format allows NVE or IPinIP tunnel
+ * decapsulation then tunnel decapsulation is done.
+ * If valid = 1 and packet format does not allow NVE or IPinIP tunnel
+ * decapsulation then trap as IP2ME trap ID.
+ * Only relevant in case of IP2ME action.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, v, 0x24, 31, 1);
+
+/* reg_ralue_tunnel_ptr
+ * Tunnel Pointer for NVE or IPinIP tunnel decapsulation.
+ * For Spectrum, pointer to KVD Linear.
+ * Only relevant in case of IP2ME action.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, tunnel_ptr, 0x24, 0, 24);
+
+static inline void mlxsw_reg_ralue_pack(char *payload,
+ enum mlxsw_reg_ralxx_protocol protocol,
+ enum mlxsw_reg_ralue_op op,
+ u16 virtual_router, u8 prefix_len)
+{
+ MLXSW_REG_ZERO(ralue, payload);
+ mlxsw_reg_ralue_protocol_set(payload, protocol);
+ mlxsw_reg_ralue_virtual_router_set(payload, virtual_router);
+ mlxsw_reg_ralue_prefix_len_set(payload, prefix_len);
+ mlxsw_reg_ralue_entry_type_set(payload,
+ MLXSW_REG_RALUE_ENTRY_TYPE_ROUTE_ENTRY);
+ mlxsw_reg_ralue_bmp_len_set(payload, prefix_len);
+}
+
+static inline void mlxsw_reg_ralue_pack4(char *payload,
+ enum mlxsw_reg_ralxx_protocol protocol,
+ enum mlxsw_reg_ralue_op op,
+ u16 virtual_router, u8 prefix_len,
+ u32 dip)
+{
+ mlxsw_reg_ralue_pack(payload, protocol, op, virtual_router, prefix_len);
+ mlxsw_reg_ralue_dip4_set(payload, dip);
+}
+
+static inline void
+mlxsw_reg_ralue_act_remote_pack(char *payload,
+ enum mlxsw_reg_ralue_trap_action trap_action,
+ u16 trap_id, u32 adjacency_index, u16 ecmp_size)
+{
+ mlxsw_reg_ralue_action_type_set(payload,
+ MLXSW_REG_RALUE_ACTION_TYPE_REMOTE);
+ mlxsw_reg_ralue_trap_action_set(payload, trap_action);
+ mlxsw_reg_ralue_trap_id_set(payload, trap_id);
+ mlxsw_reg_ralue_adjacency_index_set(payload, adjacency_index);
+ mlxsw_reg_ralue_ecmp_size_set(payload, ecmp_size);
+}
+
+static inline void
+mlxsw_reg_ralue_act_local_pack(char *payload,
+ enum mlxsw_reg_ralue_trap_action trap_action,
+ u16 trap_id, u16 local_erif)
+{
+ mlxsw_reg_ralue_action_type_set(payload,
+ MLXSW_REG_RALUE_ACTION_TYPE_LOCAL);
+ mlxsw_reg_ralue_trap_action_set(payload, trap_action);
+ mlxsw_reg_ralue_trap_id_set(payload, trap_id);
+ mlxsw_reg_ralue_local_erif_set(payload, local_erif);
+}
+
+static inline void
+mlxsw_reg_ralue_act_ip2me_pack(char *payload)
+{
+ mlxsw_reg_ralue_action_type_set(payload,
+ MLXSW_REG_RALUE_ACTION_TYPE_IP2ME);
+}
+
+/* RAUHT - Router Algorithmic LPM Unicast Host Table Register
+ * ----------------------------------------------------------
+ * The RAUHT register is used to configure and query the Unicast Host table in
+ * devices that implement the Algorithmic LPM.
+ */
+#define MLXSW_REG_RAUHT_ID 0x8014
+#define MLXSW_REG_RAUHT_LEN 0x74
+
+static const struct mlxsw_reg_info mlxsw_reg_rauht = {
+ .id = MLXSW_REG_RAUHT_ID,
+ .len = MLXSW_REG_RAUHT_LEN,
+};
+
+enum mlxsw_reg_rauht_type {
+ MLXSW_REG_RAUHT_TYPE_IPV4,
+ MLXSW_REG_RAUHT_TYPE_IPV6,
+};
+
+/* reg_rauht_type
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rauht, type, 0x00, 24, 2);
+
+enum mlxsw_reg_rauht_op {
+ MLXSW_REG_RAUHT_OP_QUERY_READ = 0,
+ /* Read operation */
+ MLXSW_REG_RAUHT_OP_QUERY_CLEAR_ON_READ = 1,
+ /* Clear on read operation. Used to read entry and clear
+ * activity bit.
+ */
+ MLXSW_REG_RAUHT_OP_WRITE_ADD = 0,
+ /* Add. Used to write a new entry to the table. All R/W fields are
+ * relevant for new entry. Activity bit is set for new entries.
+ */
+ MLXSW_REG_RAUHT_OP_WRITE_UPDATE = 1,
+ /* Update action. Used to update an existing route entry and
+ * only update the following fields:
+ * trap_action, trap_id, mac, counter_set_type, counter_index
+ */
+ MLXSW_REG_RAUHT_OP_WRITE_CLEAR_ACTIVITY = 2,
+ /* Clear activity. A bit is cleared for the entry. */
+ MLXSW_REG_RAUHT_OP_WRITE_DELETE = 3,
+ /* Delete entry */
+ MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL = 4,
+ /* Delete all host entries on a RIF. In this command, dip
+ * field is reserved.
+ */
+};
+
+/* reg_rauht_op
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, rauht, op, 0x00, 20, 3);
+
+/* reg_rauht_a
+ * Activity. Set for new entries. Set if a packet lookup has hit on
+ * the specific entry.
+ * To clear the a bit, use "clear activity" op.
+ * Enabled by activity_dis in RGCR
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, rauht, a, 0x00, 16, 1);
+
+/* reg_rauht_rif
+ * Router Interface
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rauht, rif, 0x00, 0, 16);
+
+/* reg_rauht_dip*
+ * Destination address.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rauht, dip4, 0x1C, 0x0, 32);
+
+enum mlxsw_reg_rauht_trap_action {
+ MLXSW_REG_RAUHT_TRAP_ACTION_NOP,
+ MLXSW_REG_RAUHT_TRAP_ACTION_TRAP,
+ MLXSW_REG_RAUHT_TRAP_ACTION_MIRROR_TO_CPU,
+ MLXSW_REG_RAUHT_TRAP_ACTION_MIRROR,
+ MLXSW_REG_RAUHT_TRAP_ACTION_DISCARD_ERRORS,
+};
+
+/* reg_rauht_trap_action
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rauht, trap_action, 0x60, 28, 4);
+
+enum mlxsw_reg_rauht_trap_id {
+ MLXSW_REG_RAUHT_TRAP_ID_RTR_EGRESS0,
+ MLXSW_REG_RAUHT_TRAP_ID_RTR_EGRESS1,
+};
+
+/* reg_rauht_trap_id
+ * Trap ID to be reported to CPU.
+ * Trap-ID is RTR_EGRESS0 or RTR_EGRESS1.
+ * For trap_action of NOP, MIRROR and DISCARD_ERROR,
+ * trap_id is reserved.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rauht, trap_id, 0x60, 0, 9);
+
+/* reg_rauht_counter_set_type
+ * Counter set type for flow counters
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rauht, counter_set_type, 0x68, 24, 8);
+
+/* reg_rauht_counter_index
+ * Counter index for flow counters
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rauht, counter_index, 0x68, 0, 24);
+
+/* reg_rauht_mac
+ * MAC address.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, rauht, mac, 0x6E, 6);
+
+static inline void mlxsw_reg_rauht_pack(char *payload,
+ enum mlxsw_reg_rauht_op op, u16 rif,
+ const char *mac)
+{
+ MLXSW_REG_ZERO(rauht, payload);
+ mlxsw_reg_rauht_op_set(payload, op);
+ mlxsw_reg_rauht_rif_set(payload, rif);
+ mlxsw_reg_rauht_mac_memcpy_to(payload, mac);
+}
+
+static inline void mlxsw_reg_rauht_pack4(char *payload,
+ enum mlxsw_reg_rauht_op op, u16 rif,
+ const char *mac, u32 dip)
+{
+ mlxsw_reg_rauht_pack(payload, op, rif, mac);
+ mlxsw_reg_rauht_dip4_set(payload, dip);
+}
+
+/* RALEU - Router Algorithmic LPM ECMP Update Register
+ * ---------------------------------------------------
+ * The register enables updating the ECMP section in the action for multiple
+ * LPM Unicast entries in a single operation. The update is executed to
+ * all entries of a {virtual router, protocol} tuple using the same ECMP group.
+ */
+#define MLXSW_REG_RALEU_ID 0x8015
+#define MLXSW_REG_RALEU_LEN 0x28
+
+static const struct mlxsw_reg_info mlxsw_reg_raleu = {
+ .id = MLXSW_REG_RALEU_ID,
+ .len = MLXSW_REG_RALEU_LEN,
+};
+
+/* reg_raleu_protocol
+ * Protocol.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, raleu, protocol, 0x00, 24, 4);
+
+/* reg_raleu_virtual_router
+ * Virtual Router ID
+ * Range is 0..cap_max_virtual_routers-1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, raleu, virtual_router, 0x00, 0, 16);
+
+/* reg_raleu_adjacency_index
+ * Adjacency Index used for matching on the existing entries.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, raleu, adjacency_index, 0x10, 0, 24);
+
+/* reg_raleu_ecmp_size
+ * ECMP Size used for matching on the existing entries.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, raleu, ecmp_size, 0x14, 0, 13);
+
+/* reg_raleu_new_adjacency_index
+ * New Adjacency Index.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, raleu, new_adjacency_index, 0x20, 0, 24);
+
+/* reg_raleu_new_ecmp_size
+ * New ECMP Size.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, raleu, new_ecmp_size, 0x24, 0, 13);
+
+static inline void mlxsw_reg_raleu_pack(char *payload,
+ enum mlxsw_reg_ralxx_protocol protocol,
+ u16 virtual_router,
+ u32 adjacency_index, u16 ecmp_size,
+ u32 new_adjacency_index,
+ u16 new_ecmp_size)
+{
+ MLXSW_REG_ZERO(raleu, payload);
+ mlxsw_reg_raleu_protocol_set(payload, protocol);
+ mlxsw_reg_raleu_virtual_router_set(payload, virtual_router);
+ mlxsw_reg_raleu_adjacency_index_set(payload, adjacency_index);
+ mlxsw_reg_raleu_ecmp_size_set(payload, ecmp_size);
+ mlxsw_reg_raleu_new_adjacency_index_set(payload, new_adjacency_index);
+ mlxsw_reg_raleu_new_ecmp_size_set(payload, new_ecmp_size);
+}
+
+/* RAUHTD - Router Algorithmic LPM Unicast Host Table Dump Register
+ * ----------------------------------------------------------------
+ * The RAUHTD register allows dumping entries from the Router Unicast Host
+ * Table. For a given session an entry is dumped no more than one time. The
+ * first RAUHTD access after reset is a new session. A session ends when the
+ * num_rec response is smaller than num_rec request or for IPv4 when the
+ * num_entries is smaller than 4. The clear activity affect the current session
+ * or the last session if a new session has not started.
+ */
+#define MLXSW_REG_RAUHTD_ID 0x8018
+#define MLXSW_REG_RAUHTD_BASE_LEN 0x20
+#define MLXSW_REG_RAUHTD_REC_LEN 0x20
+#define MLXSW_REG_RAUHTD_REC_MAX_NUM 32
+#define MLXSW_REG_RAUHTD_LEN (MLXSW_REG_RAUHTD_BASE_LEN + \
+ MLXSW_REG_RAUHTD_REC_MAX_NUM * MLXSW_REG_RAUHTD_REC_LEN)
+#define MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC 4
+
+static const struct mlxsw_reg_info mlxsw_reg_rauhtd = {
+ .id = MLXSW_REG_RAUHTD_ID,
+ .len = MLXSW_REG_RAUHTD_LEN,
+};
+
+#define MLXSW_REG_RAUHTD_FILTER_A BIT(0)
+#define MLXSW_REG_RAUHTD_FILTER_RIF BIT(3)
+
+/* reg_rauhtd_filter_fields
+ * if a bit is '0' then the relevant field is ignored and dump is done
+ * regardless of the field value
+ * Bit0 - filter by activity: entry_a
+ * Bit3 - filter by entry rip: entry_rif
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rauhtd, filter_fields, 0x00, 0, 8);
+
+enum mlxsw_reg_rauhtd_op {
+ MLXSW_REG_RAUHTD_OP_DUMP,
+ MLXSW_REG_RAUHTD_OP_DUMP_AND_CLEAR,
+};
+
+/* reg_rauhtd_op
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, rauhtd, op, 0x04, 24, 2);
+
+/* reg_rauhtd_num_rec
+ * At request: number of records requested
+ * At response: number of records dumped
+ * For IPv4, each record has 4 entries at request and up to 4 entries
+ * at response
+ * Range is 0..MLXSW_REG_RAUHTD_REC_MAX_NUM
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rauhtd, num_rec, 0x04, 0, 8);
+
+/* reg_rauhtd_entry_a
+ * Dump only if activity has value of entry_a
+ * Reserved if filter_fields bit0 is '0'
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rauhtd, entry_a, 0x08, 16, 1);
+
+enum mlxsw_reg_rauhtd_type {
+ MLXSW_REG_RAUHTD_TYPE_IPV4,
+ MLXSW_REG_RAUHTD_TYPE_IPV6,
+};
+
+/* reg_rauhtd_type
+ * Dump only if record type is:
+ * 0 - IPv4
+ * 1 - IPv6
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rauhtd, type, 0x08, 0, 4);
+
+/* reg_rauhtd_entry_rif
+ * Dump only if RIF has value of entry_rif
+ * Reserved if filter_fields bit3 is '0'
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rauhtd, entry_rif, 0x0C, 0, 16);
+
+static inline void mlxsw_reg_rauhtd_pack(char *payload,
+ enum mlxsw_reg_rauhtd_type type)
+{
+ MLXSW_REG_ZERO(rauhtd, payload);
+ mlxsw_reg_rauhtd_filter_fields_set(payload, MLXSW_REG_RAUHTD_FILTER_A);
+ mlxsw_reg_rauhtd_op_set(payload, MLXSW_REG_RAUHTD_OP_DUMP_AND_CLEAR);
+ mlxsw_reg_rauhtd_num_rec_set(payload, MLXSW_REG_RAUHTD_REC_MAX_NUM);
+ mlxsw_reg_rauhtd_entry_a_set(payload, 1);
+ mlxsw_reg_rauhtd_type_set(payload, type);
+}
+
+/* reg_rauhtd_ipv4_rec_num_entries
+ * Number of valid entries in this record:
+ * 0 - 1 valid entry
+ * 1 - 2 valid entries
+ * 2 - 3 valid entries
+ * 3 - 4 valid entries
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_rec_num_entries,
+ MLXSW_REG_RAUHTD_BASE_LEN, 28, 2,
+ MLXSW_REG_RAUHTD_REC_LEN, 0x00, false);
+
+/* reg_rauhtd_rec_type
+ * Record type.
+ * 0 - IPv4
+ * 1 - IPv6
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, rauhtd, rec_type, MLXSW_REG_RAUHTD_BASE_LEN, 24, 2,
+ MLXSW_REG_RAUHTD_REC_LEN, 0x00, false);
+
+#define MLXSW_REG_RAUHTD_IPV4_ENT_LEN 0x8
+
+/* reg_rauhtd_ipv4_ent_a
+ * Activity. Set for new entries. Set if a packet lookup has hit on the
+ * specific entry.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_ent_a, MLXSW_REG_RAUHTD_BASE_LEN, 16, 1,
+ MLXSW_REG_RAUHTD_IPV4_ENT_LEN, 0x00, false);
+
+/* reg_rauhtd_ipv4_ent_rif
+ * Router interface.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_ent_rif, MLXSW_REG_RAUHTD_BASE_LEN, 0,
+ 16, MLXSW_REG_RAUHTD_IPV4_ENT_LEN, 0x00, false);
+
+/* reg_rauhtd_ipv4_ent_dip
+ * Destination IPv4 address.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_ent_dip, MLXSW_REG_RAUHTD_BASE_LEN, 0,
+ 32, MLXSW_REG_RAUHTD_IPV4_ENT_LEN, 0x04, false);
+
+static inline void mlxsw_reg_rauhtd_ent_ipv4_unpack(char *payload,
+ int ent_index, u16 *p_rif,
+ u32 *p_dip)
+{
+ *p_rif = mlxsw_reg_rauhtd_ipv4_ent_rif_get(payload, ent_index);
+ *p_dip = mlxsw_reg_rauhtd_ipv4_ent_dip_get(payload, ent_index);
+}
+
/* MFCR - Management Fan Control Register
* --------------------------------------
* This register controls the settings of the Fan Speed PWM mechanism.
@@ -3924,6 +5104,26 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
return "HTGT";
case MLXSW_REG_HPKT_ID:
return "HPKT";
+ case MLXSW_REG_RGCR_ID:
+ return "RGCR";
+ case MLXSW_REG_RITR_ID:
+ return "RITR";
+ case MLXSW_REG_RATR_ID:
+ return "RATR";
+ case MLXSW_REG_RALTA_ID:
+ return "RALTA";
+ case MLXSW_REG_RALST_ID:
+ return "RALST";
+ case MLXSW_REG_RALTB_ID:
+ return "RALTB";
+ case MLXSW_REG_RALUE_ID:
+ return "RALUE";
+ case MLXSW_REG_RAUHT_ID:
+ return "RAUHT";
+ case MLXSW_REG_RALEU_ID:
+ return "RALEU";
+ case MLXSW_REG_RAUHTD_ID:
+ return "RAUHTD";
case MLXSW_REG_MFCR_ID:
return "MFCR";
case MLXSW_REG_MFSC_ID:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 4a7273771028..c812513e079d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -49,7 +49,9 @@
#include <linux/jiffies.h>
#include <linux/bitops.h>
#include <linux/list.h>
+#include <linux/notifier.h>
#include <linux/dcbnl.h>
+#include <linux/inetdevice.h>
#include <net/switchdev.h>
#include <generated/utsrelease.h>
@@ -209,23 +211,6 @@ static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
}
-static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid, enum mlxsw_reg_spms_state state)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char *spms_pl;
- int err;
-
- spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
- if (!spms_pl)
- return -ENOMEM;
- mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
- mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
- kfree(spms_pl);
- return err;
-}
-
static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
@@ -247,15 +232,23 @@ static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
}
-static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
+static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ u8 swid)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pspa_pl[MLXSW_REG_PSPA_LEN];
- mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
+ mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
}
+static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
+ swid);
+}
+
static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool enable)
{
@@ -305,9 +298,9 @@ mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
}
-static int __mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
- u8 local_port, u8 *p_module,
- u8 *p_width, u8 *p_lane)
+static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
+ u8 local_port, u8 *p_module,
+ u8 *p_width, u8 *p_lane)
{
char pmlp_pl[MLXSW_REG_PMLP_LEN];
int err;
@@ -322,16 +315,6 @@ static int __mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
- u8 local_port, u8 *p_module,
- u8 *p_width)
-{
- u8 lane;
-
- return __mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, p_module,
- p_width, &lane);
-}
-
static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
u8 module, u8 width, u8 lane)
{
@@ -410,7 +393,11 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
}
mlxsw_sp_txhdr_construct(skb, &tx_info);
- len = skb->len;
+ /* TX header is consumed by HW on the way so we shouldn't count its
+ * bytes as being sent.
+ */
+ len = skb->len - MLXSW_TXHDR_LEN;
+
/* Due to a race we might fail here because of a full queue. In that
* unlikely case we simply drop the packet.
*/
@@ -634,94 +621,8 @@ static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
return 0;
}
-static struct mlxsw_sp_vfid *
-mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid)
-{
- struct mlxsw_sp_vfid *vfid;
-
- list_for_each_entry(vfid, &mlxsw_sp->port_vfids.list, list) {
- if (vfid->vid == vid)
- return vfid;
- }
-
- return NULL;
-}
-
-static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
-{
- return find_first_zero_bit(mlxsw_sp->port_vfids.mapped,
- MLXSW_SP_VFID_PORT_MAX);
-}
-
-static int __mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid)
-{
- u16 fid = mlxsw_sp_vfid_to_fid(vfid);
- char sfmr_pl[MLXSW_REG_SFMR_LEN];
-
- mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, 0);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
-}
-
-static void __mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid)
-{
- u16 fid = mlxsw_sp_vfid_to_fid(vfid);
- char sfmr_pl[MLXSW_REG_SFMR_LEN];
-
- mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, fid, 0);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
-}
-
-static struct mlxsw_sp_vfid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
- u16 vid)
-{
- struct device *dev = mlxsw_sp->bus_info->dev;
- struct mlxsw_sp_vfid *vfid;
- u16 n_vfid;
- int err;
-
- n_vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
- if (n_vfid == MLXSW_SP_VFID_PORT_MAX) {
- dev_err(dev, "No available vFIDs\n");
- return ERR_PTR(-ERANGE);
- }
-
- err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid);
- if (err) {
- dev_err(dev, "Failed to create vFID=%d\n", n_vfid);
- return ERR_PTR(err);
- }
-
- vfid = kzalloc(sizeof(*vfid), GFP_KERNEL);
- if (!vfid)
- goto err_allocate_vfid;
-
- vfid->vfid = n_vfid;
- vfid->vid = vid;
-
- list_add(&vfid->list, &mlxsw_sp->port_vfids.list);
- set_bit(n_vfid, mlxsw_sp->port_vfids.mapped);
-
- return vfid;
-
-err_allocate_vfid:
- __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid);
- return ERR_PTR(-ENOMEM);
-}
-
-static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_vfid *vfid)
-{
- clear_bit(vfid->vfid, mlxsw_sp->port_vfids.mapped);
- list_del(&vfid->list);
-
- __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid);
-
- kfree(vfid);
-}
-
static struct mlxsw_sp_port *
-mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_vfid *vfid)
+mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_vport;
@@ -739,8 +640,7 @@ mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
- mlxsw_sp_vport->vport.vfid = vfid;
- mlxsw_sp_vport->vport.vid = vfid->vid;
+ mlxsw_sp_vport->vport.vid = vid;
list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
@@ -757,9 +657,8 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_port *mlxsw_sp_vport;
- struct mlxsw_sp_vfid *vfid;
+ bool untagged = vid == 1;
int err;
/* VLAN 0 is added to HW filter when device goes up, but it is
@@ -773,31 +672,10 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
return 0;
}
- vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
- if (!vfid) {
- vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
- if (IS_ERR(vfid)) {
- netdev_err(dev, "Failed to create vFID for VID=%d\n",
- vid);
- return PTR_ERR(vfid);
- }
- }
-
- mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vfid);
+ mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid);
if (!mlxsw_sp_vport) {
netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
- err = -ENOMEM;
- goto err_port_vport_create;
- }
-
- if (!vfid->nr_vports) {
- err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid,
- true, false);
- if (err) {
- netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
- vfid->vfid);
- goto err_vport_flood_set;
- }
+ return -ENOMEM;
}
/* When adding the first VLAN interface on a bridged port we need to
@@ -812,70 +690,37 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
}
}
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
- MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
- true,
- mlxsw_sp_vfid_to_fid(vfid->vfid),
- vid);
- if (err) {
- netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n",
- vid, vfid->vfid);
- goto err_port_vid_to_fid_set;
- }
-
err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
if (err) {
netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
goto err_port_vid_learning_set;
}
- err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, false);
+ err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged);
if (err) {
netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
vid);
goto err_port_add_vid;
}
- err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
- MLXSW_REG_SPMS_STATE_FORWARDING);
- if (err) {
- netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
- goto err_port_stp_state_set;
- }
-
- vfid->nr_vports++;
-
return 0;
-err_port_stp_state_set:
- mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
err_port_add_vid:
mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
err_port_vid_learning_set:
- mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
- MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
- mlxsw_sp_vfid_to_fid(vfid->vfid), vid);
-err_port_vid_to_fid_set:
if (list_is_singular(&mlxsw_sp_port->vports_list))
mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
err_port_vp_mode_trans:
- if (!vfid->nr_vports)
- mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false,
- false);
-err_vport_flood_set:
mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
-err_port_vport_create:
- if (!vfid->nr_vports)
- mlxsw_sp_vfid_destroy(mlxsw_sp, vfid);
return err;
}
-int mlxsw_sp_port_kill_vid(struct net_device *dev,
- __be16 __always_unused proto, u16 vid)
+static int mlxsw_sp_port_kill_vid(struct net_device *dev,
+ __be16 __always_unused proto, u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp_port *mlxsw_sp_vport;
- struct mlxsw_sp_vfid *vfid;
+ struct mlxsw_sp_fid *f;
int err;
/* VLAN 0 is removed from HW filter when device goes down, but
@@ -890,15 +735,6 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev,
return 0;
}
- vfid = mlxsw_sp_vport->vport.vfid;
-
- err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
- MLXSW_REG_SPMS_STATE_DISCARDING);
- if (err) {
- netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
- return err;
- }
-
err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
if (err) {
netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
@@ -912,16 +748,12 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev,
return err;
}
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
- MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
- false,
- mlxsw_sp_vfid_to_fid(vfid->vfid),
- vid);
- if (err) {
- netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n",
- vid, vfid->vfid);
- return err;
- }
+ /* Drop FID reference. If this was the last reference the
+ * resources will be freed.
+ */
+ f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
+ if (f && !WARN_ON(!f->leave))
+ f->leave(mlxsw_sp_vport);
/* When removing the last VLAN interface on a bridged port we need to
* transition all active 802.1Q bridge VLANs to use VID to FID
@@ -935,13 +767,8 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev,
}
}
- vfid->nr_vports--;
mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
- /* Destroy the vFID if no vPorts are assigned to it anymore. */
- if (!vfid->nr_vports)
- mlxsw_sp_vfid_destroy(mlxsw_sp_port->mlxsw_sp, vfid);
-
return 0;
}
@@ -949,17 +776,11 @@ static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
size_t len)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- u8 module, width, lane;
+ u8 module = mlxsw_sp_port->mapping.module;
+ u8 width = mlxsw_sp_port->mapping.width;
+ u8 lane = mlxsw_sp_port->mapping.lane;
int err;
- err = __mlxsw_sp_port_module_info_get(mlxsw_sp_port->mlxsw_sp,
- mlxsw_sp_port->local_port,
- &module, &width, &lane);
- if (err) {
- netdev_err(dev, "Failed to retrieve module information\n");
- return err;
- }
-
if (!mlxsw_sp_port->split)
err = snprintf(name, len, "p%d", module + 1);
else
@@ -982,6 +803,8 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_get_stats64 = mlxsw_sp_port_get_stats64,
.ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
.ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
+ .ndo_neigh_construct = mlxsw_sp_router_neigh_construct,
+ .ndo_neigh_destroy = mlxsw_sp_router_neigh_destroy,
.ndo_fdb_add = switchdev_port_fdb_add,
.ndo_fdb_del = switchdev_port_fdb_del,
.ndo_fdb_dump = switchdev_port_fdb_dump,
@@ -1681,8 +1504,8 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
return 0;
}
-static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- bool split, u8 module, u8 width)
+static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ bool split, u8 module, u8 width, u8 lane)
{
struct mlxsw_sp_port *mlxsw_sp_port;
struct net_device *dev;
@@ -1697,6 +1520,9 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
mlxsw_sp_port->local_port = local_port;
mlxsw_sp_port->split = split;
+ mlxsw_sp_port->mapping.module = module;
+ mlxsw_sp_port->mapping.width = width;
+ mlxsw_sp_port->mapping.lane = lane;
bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
if (!mlxsw_sp_port->active_vlans) {
@@ -1839,45 +1665,6 @@ err_port_active_vlans_alloc:
return err;
}
-static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- bool split, u8 module, u8 width, u8 lane)
-{
- int err;
-
- err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
- lane);
- if (err)
- return err;
-
- err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split, module,
- width);
- if (err)
- goto err_port_create;
-
- return 0;
-
-err_port_create:
- mlxsw_sp_port_module_unmap(mlxsw_sp, local_port);
- return err;
-}
-
-static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
-{
- struct net_device *dev = mlxsw_sp_port->dev;
- struct mlxsw_sp_port *mlxsw_sp_vport, *tmp;
-
- list_for_each_entry_safe(mlxsw_sp_vport, tmp,
- &mlxsw_sp_port->vports_list, vport.list) {
- u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
-
- /* vPorts created for VLAN devices should already be gone
- * by now, since we unregistered the port netdev.
- */
- WARN_ON(is_vlan_dev(mlxsw_sp_vport->dev));
- mlxsw_sp_port_kill_vid(dev, 0, vid);
- }
-}
-
static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
@@ -1888,13 +1675,14 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
- mlxsw_sp_port_vports_fini(mlxsw_sp_port);
+ mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
free_percpu(mlxsw_sp_port->pcpu_stats);
kfree(mlxsw_sp_port->untagged_vlans);
kfree(mlxsw_sp_port->active_vlans);
+ WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list));
free_netdev(mlxsw_sp_port->dev);
}
@@ -1909,8 +1697,8 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
{
+ u8 module, width, lane;
size_t alloc_size;
- u8 module, width;
int i;
int err;
@@ -1921,13 +1709,14 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
- &width);
+ &width, &lane);
if (err)
goto err_port_module_info_get;
if (!width)
continue;
mlxsw_sp->port_to_module[i] = module;
- err = __mlxsw_sp_port_create(mlxsw_sp, i, false, module, width);
+ err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width,
+ lane);
if (err)
goto err_port_create;
}
@@ -1948,12 +1737,85 @@ static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
return local_port - offset;
}
+static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
+ u8 module, unsigned int count)
+{
+ u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
+ int err, i;
+
+ for (i = 0; i < count; i++) {
+ err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
+ width, i * width);
+ if (err)
+ goto err_port_module_map;
+ }
+
+ for (i = 0; i < count; i++) {
+ err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
+ if (err)
+ goto err_port_swid_set;
+ }
+
+ for (i = 0; i < count; i++) {
+ err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
+ module, width, i * width);
+ if (err)
+ goto err_port_create;
+ }
+
+ return 0;
+
+err_port_create:
+ for (i--; i >= 0; i--)
+ mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
+ i = count;
+err_port_swid_set:
+ for (i--; i >= 0; i--)
+ __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
+ MLXSW_PORT_SWID_DISABLED_PORT);
+ i = count;
+err_port_module_map:
+ for (i--; i >= 0; i--)
+ mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
+ return err;
+}
+
+static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
+ u8 base_port, unsigned int count)
+{
+ u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
+ int i;
+
+ /* Split by four means we need to re-create two ports, otherwise
+ * only one.
+ */
+ count = count / 2;
+
+ for (i = 0; i < count; i++) {
+ local_port = base_port + i * 2;
+ module = mlxsw_sp->port_to_module[local_port];
+
+ mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
+ 0);
+ }
+
+ for (i = 0; i < count; i++)
+ __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
+
+ for (i = 0; i < count; i++) {
+ local_port = base_port + i * 2;
+ module = mlxsw_sp->port_to_module[local_port];
+
+ mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
+ width, 0);
+ }
+}
+
static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
unsigned int count)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
struct mlxsw_sp_port *mlxsw_sp_port;
- u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
u8 module, cur_width, base_port;
int i;
int err;
@@ -1965,18 +1827,14 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
return -EINVAL;
}
+ module = mlxsw_sp_port->mapping.module;
+ cur_width = mlxsw_sp_port->mapping.width;
+
if (count != 2 && count != 4) {
netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
return -EINVAL;
}
- err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
- &cur_width);
- if (err) {
- netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
- return err;
- }
-
if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
return -EINVAL;
@@ -2001,25 +1859,16 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
for (i = 0; i < count; i++)
mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
- for (i = 0; i < count; i++) {
- err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
- module, width, i * width);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to create split port\n");
- goto err_port_create;
- }
+ err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
+ goto err_port_split_create;
}
return 0;
-err_port_create:
- for (i--; i >= 0; i--)
- mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
- for (i = 0; i < count / 2; i++) {
- module = mlxsw_sp->port_to_module[base_port + i * 2];
- mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
- module, MLXSW_PORT_MODULE_MAX_WIDTH, 0);
- }
+err_port_split_create:
+ mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
return err;
}
@@ -2027,10 +1876,9 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
struct mlxsw_sp_port *mlxsw_sp_port;
- u8 module, cur_width, base_port;
+ u8 cur_width, base_port;
unsigned int count;
int i;
- int err;
mlxsw_sp_port = mlxsw_sp->ports[local_port];
if (!mlxsw_sp_port) {
@@ -2044,12 +1892,7 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
return -EINVAL;
}
- err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
- &cur_width);
- if (err) {
- netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
- return err;
- }
+ cur_width = mlxsw_sp_port->mapping.width;
count = cur_width == 1 ? 4 : 2;
base_port = mlxsw_sp_cluster_base_port_get(local_port);
@@ -2061,14 +1904,7 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
for (i = 0; i < count; i++)
mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
- for (i = 0; i < count / 2; i++) {
- module = mlxsw_sp->port_to_module[base_port + i * 2];
- err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
- module, MLXSW_PORT_MODULE_MAX_WIDTH,
- 0);
- if (err)
- dev_err(mlxsw_sp->bus_info->dev, "Failed to reinstantiate port\n");
- }
+ mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
return 0;
}
@@ -2083,11 +1919,8 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
local_port = mlxsw_reg_pude_local_port_get(pude_pl);
mlxsw_sp_port = mlxsw_sp->ports[local_port];
- if (!mlxsw_sp_port) {
- dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n",
- local_port);
+ if (!mlxsw_sp_port)
return;
- }
status = mlxsw_reg_pude_oper_status_get(pude_pl);
if (status == MLXSW_PORT_OPER_STATUS_UP) {
@@ -2242,6 +2075,31 @@ static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
.local_port = MLXSW_PORT_DONT_CARE,
.trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
},
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_ARPBC,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_ARPUC,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IP2ME,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_RTR_INGRESS0,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_HOST_MISS_IPV4,
+ },
};
static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
@@ -2282,7 +2140,7 @@ err_rx_trap_set:
mlxsw_sp);
err_rx_listener_register:
for (i--; i >= 0; i--) {
- mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
mlxsw_sp_rx_listener[i].trap_id);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
@@ -2299,7 +2157,7 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
int i;
for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
- mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
mlxsw_sp_rx_listener[i].trap_id);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
@@ -2378,8 +2236,8 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->core = mlxsw_core;
mlxsw_sp->bus_info = mlxsw_bus_info;
- INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list);
- INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list);
+ INIT_LIST_HEAD(&mlxsw_sp->fids);
+ INIT_LIST_HEAD(&mlxsw_sp->vfids.list);
INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
err = mlxsw_sp_base_mac_get(mlxsw_sp);
@@ -2388,16 +2246,10 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
return err;
}
- err = mlxsw_sp_ports_create(mlxsw_sp);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
- return err;
- }
-
err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
- goto err_event_register;
+ return err;
}
err = mlxsw_sp_traps_init(mlxsw_sp);
@@ -2430,8 +2282,24 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_switchdev_init;
}
+ err = mlxsw_sp_router_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
+ goto err_router_init;
+ }
+
+ err = mlxsw_sp_ports_create(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
+ goto err_ports_create;
+ }
+
return 0;
+err_ports_create:
+ mlxsw_sp_router_fini(mlxsw_sp);
+err_router_init:
+ mlxsw_sp_switchdev_fini(mlxsw_sp);
err_switchdev_init:
err_lag_init:
mlxsw_sp_buffers_fini(mlxsw_sp);
@@ -2440,20 +2308,24 @@ err_flood_init:
mlxsw_sp_traps_fini(mlxsw_sp);
err_rx_listener_register:
mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
-err_event_register:
- mlxsw_sp_ports_remove(mlxsw_sp);
return err;
}
static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ int i;
+ mlxsw_sp_ports_remove(mlxsw_sp);
+ mlxsw_sp_router_fini(mlxsw_sp);
mlxsw_sp_switchdev_fini(mlxsw_sp);
mlxsw_sp_buffers_fini(mlxsw_sp);
mlxsw_sp_traps_fini(mlxsw_sp);
mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
- mlxsw_sp_ports_remove(mlxsw_sp);
+ WARN_ON(!list_empty(&mlxsw_sp->vfids.list));
+ WARN_ON(!list_empty(&mlxsw_sp->fids));
+ for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
+ WARN_ON_ONCE(mlxsw_sp->rifs[i]);
}
static struct mlxsw_config_profile mlxsw_sp_config_profile = {
@@ -2484,6 +2356,10 @@ static struct mlxsw_config_profile mlxsw_sp_config_profile = {
.max_ib_mc = 0,
.used_max_pkey = 1,
.max_pkey = 0,
+ .used_kvd_sizes = 1,
+ .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
+ .kvd_hash_single_size = MLXSW_SP_KVD_HASH_SINGLE_SIZE,
+ .kvd_hash_double_size = MLXSW_SP_KVD_HASH_DOUBLE_SIZE,
.swid_config = {
{
.used_type = 1,
@@ -2515,16 +2391,590 @@ static struct mlxsw_driver mlxsw_sp_driver = {
.profile = &mlxsw_sp_config_profile,
};
-static int
-mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port *mlxsw_sp_port)
+static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
+{
+ return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
+}
+
+static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
+{
+ struct net_device *lower_dev;
+ struct list_head *iter;
+
+ if (mlxsw_sp_port_dev_check(dev))
+ return netdev_priv(dev);
+
+ netdev_for_each_all_lower_dev(dev, lower_dev, iter) {
+ if (mlxsw_sp_port_dev_check(lower_dev))
+ return netdev_priv(lower_dev);
+ }
+ return NULL;
+}
+
+static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port;
+
+ mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
+ return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
+}
+
+static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
+{
+ struct net_device *lower_dev;
+ struct list_head *iter;
+
+ if (mlxsw_sp_port_dev_check(dev))
+ return netdev_priv(dev);
+
+ netdev_for_each_all_lower_dev_rcu(dev, lower_dev, iter) {
+ if (mlxsw_sp_port_dev_check(lower_dev))
+ return netdev_priv(lower_dev);
+ }
+ return NULL;
+}
+
+struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port;
+
+ rcu_read_lock();
+ mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
+ if (mlxsw_sp_port)
+ dev_hold(mlxsw_sp_port->dev);
+ rcu_read_unlock();
+ return mlxsw_sp_port;
+}
+
+void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ dev_put(mlxsw_sp_port->dev);
+}
+
+static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r,
+ unsigned long event)
+{
+ switch (event) {
+ case NETDEV_UP:
+ if (!r)
+ return true;
+ r->ref_count++;
+ return false;
+ case NETDEV_DOWN:
+ if (r && --r->ref_count == 0)
+ return true;
+ /* It is possible we already removed the RIF ourselves
+ * if it was assigned to a netdev that is now a bridge
+ * or LAG slave.
+ */
+ return false;
+ }
+
+ return false;
+}
+
+static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
+ if (!mlxsw_sp->rifs[i])
+ return i;
+
+ return MLXSW_SP_RIF_MAX;
+}
+
+static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
+ bool *p_lagged, u16 *p_system_port)
+{
+ u8 local_port = mlxsw_sp_vport->local_port;
+
+ *p_lagged = mlxsw_sp_vport->lagged;
+ *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port;
+}
+
+static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport,
+ struct net_device *l3_dev, u16 rif,
+ bool create)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+ bool lagged = mlxsw_sp_vport->lagged;
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+ u16 system_port;
+
+ mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif,
+ l3_dev->mtu, l3_dev->dev_addr);
+
+ mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port);
+ mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port,
+ mlxsw_sp_vport_vid_get(mlxsw_sp_vport));
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
+
+static struct mlxsw_sp_fid *
+mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev)
+{
+ struct mlxsw_sp_fid *f;
+
+ f = kzalloc(sizeof(*f), GFP_KERNEL);
+ if (!f)
+ return NULL;
+
+ f->leave = mlxsw_sp_vport_rif_sp_leave;
+ f->ref_count = 0;
+ f->dev = l3_dev;
+ f->fid = fid;
+
+ return f;
+}
+
+static struct mlxsw_sp_rif *
+mlxsw_sp_rif_alloc(u16 rif, struct net_device *l3_dev, struct mlxsw_sp_fid *f)
+{
+ struct mlxsw_sp_rif *r;
+
+ r = kzalloc(sizeof(*r), GFP_KERNEL);
+ if (!r)
+ return NULL;
+
+ ether_addr_copy(r->addr, l3_dev->dev_addr);
+ r->mtu = l3_dev->mtu;
+ r->ref_count = 1;
+ r->dev = l3_dev;
+ r->rif = rif;
+ r->f = f;
+
+ return r;
+}
+
+static struct mlxsw_sp_rif *
+mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
+ struct net_device *l3_dev)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+ struct mlxsw_sp_fid *f;
+ struct mlxsw_sp_rif *r;
+ u16 fid, rif;
+ int err;
+
+ rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
+ if (rif == MLXSW_SP_RIF_MAX)
+ return ERR_PTR(-ERANGE);
+
+ err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, true);
+ if (err)
+ return ERR_PTR(err);
+
+ fid = mlxsw_sp_rif_sp_to_fid(rif);
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true);
+ if (err)
+ goto err_rif_fdb_op;
+
+ f = mlxsw_sp_rfid_alloc(fid, l3_dev);
+ if (!f) {
+ err = -ENOMEM;
+ goto err_rfid_alloc;
+ }
+
+ r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
+ if (!r) {
+ err = -ENOMEM;
+ goto err_rif_alloc;
+ }
+
+ f->r = r;
+ mlxsw_sp->rifs[rif] = r;
+
+ return r;
+
+err_rif_alloc:
+ kfree(f);
+err_rfid_alloc:
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
+err_rif_fdb_op:
+ mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
+ struct mlxsw_sp_rif *r)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+ struct net_device *l3_dev = r->dev;
+ struct mlxsw_sp_fid *f = r->f;
+ u16 fid = f->fid;
+ u16 rif = r->rif;
+
+ mlxsw_sp->rifs[rif] = NULL;
+ f->r = NULL;
+
+ kfree(r);
+
+ kfree(f);
+
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
+
+ mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
+}
+
+static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport,
+ struct net_device *l3_dev)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+ struct mlxsw_sp_rif *r;
+
+ r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
+ if (!r) {
+ r = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev);
+ if (IS_ERR(r))
+ return PTR_ERR(r);
+ }
+
+ mlxsw_sp_vport_fid_set(mlxsw_sp_vport, r->f);
+ r->f->ref_count++;
+
+ netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", r->f->fid);
+
+ return 0;
+}
+
+static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
+{
+ struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
+
+ netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
+
+ mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
+ if (--f->ref_count == 0)
+ mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->r);
+}
+
+static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev,
+ struct net_device *port_dev,
+ unsigned long event, u16 vid)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
+ struct mlxsw_sp_port *mlxsw_sp_vport;
+
+ mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
+ if (WARN_ON(!mlxsw_sp_vport))
+ return -EINVAL;
+
+ switch (event) {
+ case NETDEV_UP:
+ return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev);
+ case NETDEV_DOWN:
+ mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
+ break;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
+ unsigned long event)
+{
+ if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev))
+ return 0;
+
+ return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
+}
+
+static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
+ struct net_device *lag_dev,
+ unsigned long event, u16 vid)
+{
+ struct net_device *port_dev;
+ struct list_head *iter;
+ int err;
+
+ netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
+ if (mlxsw_sp_port_dev_check(port_dev)) {
+ err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev,
+ event, vid);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
+ unsigned long event)
+{
+ if (netif_is_bridge_port(lag_dev))
+ return 0;
+
+ return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
+}
+
+static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *l3_dev)
+{
+ u16 fid;
+
+ if (is_vlan_dev(l3_dev))
+ fid = vlan_dev_vlan_id(l3_dev);
+ else if (mlxsw_sp->master_bridge.dev == l3_dev)
+ fid = 1;
+ else
+ return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
+
+ return mlxsw_sp_fid_find(mlxsw_sp, fid);
+}
+
+static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
+{
+ if (mlxsw_sp_fid_is_vfid(fid))
+ return MLXSW_REG_RITR_FID_IF;
+ else
+ return MLXSW_REG_RITR_VLAN_IF;
+}
+
+static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *l3_dev,
+ u16 fid, u16 rif,
+ bool create)
+{
+ enum mlxsw_reg_ritr_if_type rif_type;
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+
+ rif_type = mlxsw_sp_rif_type_get(fid);
+ mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, l3_dev->mtu,
+ l3_dev->dev_addr);
+ mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *l3_dev,
+ struct mlxsw_sp_fid *f)
+{
+ struct mlxsw_sp_rif *r;
+ u16 rif;
+ int err;
+
+ rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
+ if (rif == MLXSW_SP_RIF_MAX)
+ return -ERANGE;
+
+ err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
+ if (err)
+ goto err_rif_fdb_op;
+
+ r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
+ if (!r) {
+ err = -ENOMEM;
+ goto err_rif_alloc;
+ }
+
+ f->r = r;
+ mlxsw_sp->rifs[rif] = r;
+
+ netdev_dbg(l3_dev, "RIF=%d created\n", rif);
+
+ return 0;
+
+err_rif_alloc:
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
+err_rif_fdb_op:
+ mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
+ return err;
+}
+
+void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *r)
+{
+ struct net_device *l3_dev = r->dev;
+ struct mlxsw_sp_fid *f = r->f;
+ u16 rif = r->rif;
+
+ mlxsw_sp->rifs[rif] = NULL;
+ f->r = NULL;
+
+ kfree(r);
+
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
+
+ mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
+
+ netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif);
+}
+
+static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
+ struct net_device *br_dev,
+ unsigned long event)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
+ struct mlxsw_sp_fid *f;
+
+ /* FID can either be an actual FID if the L3 device is the
+ * VLAN-aware bridge or a VLAN device on top. Otherwise, the
+ * L3 device is a VLAN-unaware bridge and we get a vFID.
+ */
+ f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
+ if (WARN_ON(!f))
+ return -EINVAL;
+
+ switch (event) {
+ case NETDEV_UP:
+ return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
+ case NETDEV_DOWN:
+ mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
+ break;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
+ unsigned long event)
+{
+ struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
+ u16 vid = vlan_dev_vlan_id(vlan_dev);
+
+ if (mlxsw_sp_port_dev_check(real_dev))
+ return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
+ vid);
+ else if (netif_is_lag_master(real_dev))
+ return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
+ vid);
+ else if (netif_is_bridge_master(real_dev) &&
+ mlxsw_sp->master_bridge.dev == real_dev)
+ return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
+ event);
+
+ return 0;
+}
+
+static int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
+ struct net_device *dev = ifa->ifa_dev->dev;
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_rif *r;
+ int err = 0;
+
+ mlxsw_sp = mlxsw_sp_lower_get(dev);
+ if (!mlxsw_sp)
+ goto out;
+
+ r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ if (!mlxsw_sp_rif_should_config(r, event))
+ goto out;
+
+ if (mlxsw_sp_port_dev_check(dev))
+ err = mlxsw_sp_inetaddr_port_event(dev, event);
+ else if (netif_is_lag_master(dev))
+ err = mlxsw_sp_inetaddr_lag_event(dev, event);
+ else if (netif_is_bridge_master(dev))
+ err = mlxsw_sp_inetaddr_bridge_event(dev, dev, event);
+ else if (is_vlan_dev(dev))
+ err = mlxsw_sp_inetaddr_vlan_event(dev, event);
+
+out:
+ return notifier_from_errno(err);
+}
+
+static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif,
+ const char *mac, int mtu)
+{
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+ int err;
+
+ mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
+ mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
+ mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
+{
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_rif *r;
+ int err;
+
+ mlxsw_sp = mlxsw_sp_lower_get(dev);
+ if (!mlxsw_sp)
+ return 0;
+
+ r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ if (!r)
+ return 0;
+
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, false);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_rif_edit(mlxsw_sp, r->rif, dev->dev_addr, dev->mtu);
+ if (err)
+ goto err_rif_edit;
+
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, r->f->fid, true);
+ if (err)
+ goto err_rif_fdb_op;
+
+ ether_addr_copy(r->addr, dev->dev_addr);
+ r->mtu = dev->mtu;
+
+ netdev_dbg(dev, "Updated RIF=%d\n", r->rif);
+
+ return 0;
+
+err_rif_fdb_op:
+ mlxsw_sp_rif_edit(mlxsw_sp, r->rif, r->addr, r->mtu);
+err_rif_edit:
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, true);
+ return err;
+}
+
+static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port,
+ u16 fid)
+{
+ if (mlxsw_sp_fid_is_vfid(fid))
+ return mlxsw_sp_port_vport_find_by_fid(lag_port, fid);
+ else
+ return test_bit(fid, lag_port->active_vlans);
+}
+
+static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 fid)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char sfdf_pl[MLXSW_REG_SFDF_LEN];
+ u8 local_port = mlxsw_sp_port->local_port;
+ u16 lag_id = mlxsw_sp_port->lag_id;
+ int i, count = 0;
- mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT);
- mlxsw_reg_sfdf_system_port_set(sfdf_pl, mlxsw_sp_port->local_port);
+ if (!mlxsw_sp_port->lagged)
+ return true;
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
+ for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
+ struct mlxsw_sp_port *lag_port;
+
+ lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
+ if (!lag_port || lag_port->local_port == local_port)
+ continue;
+ if (mlxsw_sp_lag_port_fid_member(lag_port, fid))
+ count++;
+ }
+
+ return !count;
}
static int
@@ -2539,17 +2989,8 @@ mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
mlxsw_sp_port->local_port);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
-}
-
-static int
-mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port *mlxsw_sp_port)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char sfdf_pl[MLXSW_REG_SFDF_LEN];
-
- mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG);
- mlxsw_reg_sfdf_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
+ netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n",
+ mlxsw_sp_port->local_port, fid);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
}
@@ -2565,71 +3006,64 @@ mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
+ netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n",
+ mlxsw_sp_port->lag_id, fid);
+
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
}
-static int
-__mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port *mlxsw_sp_port)
+int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
{
- int err, last_err = 0;
- u16 vid;
-
- for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
- err = mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, vid);
- if (err)
- last_err = err;
- }
+ if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid))
+ return 0;
- return last_err;
+ if (mlxsw_sp_port->lagged)
+ return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port,
+ fid);
+ else
+ return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid);
}
-static int
-__mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port *mlxsw_sp_port)
+static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp *mlxsw_sp)
{
- int err, last_err = 0;
- u16 vid;
+ struct mlxsw_sp_fid *f, *tmp;
- for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
- err = mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, vid);
- if (err)
- last_err = err;
- }
-
- return last_err;
+ list_for_each_entry_safe(f, tmp, &mlxsw_sp->fids, list)
+ if (--f->ref_count == 0)
+ mlxsw_sp_fid_destroy(mlxsw_sp, f);
+ else
+ WARN_ON_ONCE(1);
}
-static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port)
+static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *br_dev)
{
- if (!list_empty(&mlxsw_sp_port->vports_list))
- if (mlxsw_sp_port->lagged)
- return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port);
- else
- return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port);
- else
- if (mlxsw_sp_port->lagged)
- return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port);
- else
- return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port);
+ return !mlxsw_sp->master_bridge.dev ||
+ mlxsw_sp->master_bridge.dev == br_dev;
}
-static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_vport)
+static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *br_dev)
{
- u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_vport);
- u16 fid = mlxsw_sp_vfid_to_fid(vfid);
-
- if (mlxsw_sp_vport->lagged)
- return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport,
- fid);
- else
- return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport, fid);
+ mlxsw_sp->master_bridge.dev = br_dev;
+ mlxsw_sp->master_bridge.ref_count++;
}
-static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
+static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp)
{
- return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
+ if (--mlxsw_sp->master_bridge.ref_count == 0) {
+ mlxsw_sp->master_bridge.dev = NULL;
+ /* It's possible upper VLAN devices are still holding
+ * references to underlying FIDs. Drop the reference
+ * and release the resources if it was the last one.
+ * If it wasn't, then something bad happened.
+ */
+ mlxsw_sp_master_bridge_gone_sync(mlxsw_sp);
+ }
}
-static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
+static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *br_dev)
{
struct net_device *dev = mlxsw_sp_port->dev;
int err;
@@ -2643,6 +3077,8 @@ static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
if (err)
return err;
+ mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev);
+
mlxsw_sp_port->learning = 1;
mlxsw_sp_port->learning_sync = 1;
mlxsw_sp_port->uc_flood = 1;
@@ -2651,16 +3087,14 @@ static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
return 0;
}
-static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
- bool flush_fdb)
+static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct net_device *dev = mlxsw_sp_port->dev;
- if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port))
- netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
-
mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
+ mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp);
+
mlxsw_sp_port->learning = 0;
mlxsw_sp_port->learning_sync = 0;
mlxsw_sp_port->uc_flood = 0;
@@ -2669,28 +3103,7 @@ static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
/* Add implicit VLAN interface in the device, so that untagged
* packets will be classified to the default vFID.
*/
- return mlxsw_sp_port_add_vid(dev, 0, 1);
-}
-
-static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
- struct net_device *br_dev)
-{
- return !mlxsw_sp->master_bridge.dev ||
- mlxsw_sp->master_bridge.dev == br_dev;
-}
-
-static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
- struct net_device *br_dev)
-{
- mlxsw_sp->master_bridge.dev = br_dev;
- mlxsw_sp->master_bridge.ref_count++;
-}
-
-static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp,
- struct net_device *br_dev)
-{
- if (--mlxsw_sp->master_bridge.ref_count == 0)
- mlxsw_sp->master_bridge.dev = NULL;
+ mlxsw_sp_port_add_vid(dev, 0, 1);
}
static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
@@ -2806,6 +3219,45 @@ static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
return -EBUSY;
}
+static void
+mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 lag_id)
+{
+ struct mlxsw_sp_port *mlxsw_sp_vport;
+ struct mlxsw_sp_fid *f;
+
+ mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
+ if (WARN_ON(!mlxsw_sp_vport))
+ return;
+
+ /* If vPort is assigned a RIF, then leave it since it's no
+ * longer valid.
+ */
+ f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
+ if (f)
+ f->leave(mlxsw_sp_vport);
+
+ mlxsw_sp_vport->lag_id = lag_id;
+ mlxsw_sp_vport->lagged = 1;
+}
+
+static void
+mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_port *mlxsw_sp_vport;
+ struct mlxsw_sp_fid *f;
+
+ mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
+ if (WARN_ON(!mlxsw_sp_vport))
+ return;
+
+ f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
+ if (f)
+ f->leave(mlxsw_sp_vport);
+
+ mlxsw_sp_vport->lagged = 0;
+}
+
static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *lag_dev)
{
@@ -2841,6 +3293,9 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port->lag_id = lag_id;
mlxsw_sp_port->lagged = 1;
lag->ref_count++;
+
+ mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_id);
+
return 0;
err_col_port_enable:
@@ -2851,65 +3306,35 @@ err_col_port_add:
return err;
}
-static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct net_device *br_dev,
- bool flush_fdb);
-
-static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
- struct net_device *lag_dev)
+static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *lag_dev)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct mlxsw_sp_port *mlxsw_sp_vport;
- struct mlxsw_sp_upper *lag;
u16 lag_id = mlxsw_sp_port->lag_id;
- int err;
+ struct mlxsw_sp_upper *lag;
if (!mlxsw_sp_port->lagged)
- return 0;
+ return;
lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
WARN_ON(lag->ref_count == 0);
- err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
- if (err)
- return err;
- err = mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
- if (err)
- return err;
-
- /* In case we leave a LAG device that has bridges built on top,
- * then their teardown sequence is never issued and we need to
- * invoke the necessary cleanup routines ourselves.
- */
- list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
- vport.list) {
- struct net_device *br_dev;
-
- if (!mlxsw_sp_vport->bridged)
- continue;
-
- br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
- mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, false);
- }
+ mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
+ mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
if (mlxsw_sp_port->bridged) {
mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
- mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false);
- mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL);
+ mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
}
- if (lag->ref_count == 1) {
- if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port))
- netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
- err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
- if (err)
- return err;
- }
+ if (lag->ref_count == 1)
+ mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
mlxsw_sp_port->local_port);
mlxsw_sp_port->lagged = 0;
lag->ref_count--;
- return 0;
+
+ mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port);
}
static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -2958,42 +3383,25 @@ static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid = vlan_dev_vlan_id(vlan_dev);
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
- if (!mlxsw_sp_vport) {
- WARN_ON(!mlxsw_sp_vport);
+ if (WARN_ON(!mlxsw_sp_vport))
return -EINVAL;
- }
mlxsw_sp_vport->dev = vlan_dev;
return 0;
}
-static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
- struct net_device *vlan_dev)
+static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *vlan_dev)
{
struct mlxsw_sp_port *mlxsw_sp_vport;
u16 vid = vlan_dev_vlan_id(vlan_dev);
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
- if (!mlxsw_sp_vport) {
- WARN_ON(!mlxsw_sp_vport);
- return -EINVAL;
- }
-
- /* When removing a VLAN device while still bridged we should first
- * remove it from the bridge, as we receive the bridge's notification
- * when the vPort is already gone.
- */
- if (mlxsw_sp_vport->bridged) {
- struct net_device *br_dev;
-
- br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
- mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, true);
- }
+ if (WARN_ON(!mlxsw_sp_vport))
+ return;
mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
-
- return 0;
}
static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
@@ -3003,7 +3411,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
struct mlxsw_sp_port *mlxsw_sp_port;
struct net_device *upper_dev;
struct mlxsw_sp *mlxsw_sp;
- int err;
+ int err = 0;
mlxsw_sp_port = netdev_priv(dev);
mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
@@ -3012,73 +3420,56 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
switch (event) {
case NETDEV_PRECHANGEUPPER:
upper_dev = info->upper_dev;
- if (!info->master || !info->linking)
+ if (!is_vlan_dev(upper_dev) &&
+ !netif_is_lag_master(upper_dev) &&
+ !netif_is_bridge_master(upper_dev))
+ return -EINVAL;
+ if (!info->linking)
break;
/* HW limitation forbids to put ports to multiple bridges. */
if (netif_is_bridge_master(upper_dev) &&
!mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
- return NOTIFY_BAD;
+ return -EINVAL;
if (netif_is_lag_master(upper_dev) &&
!mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
info->upper_info))
- return NOTIFY_BAD;
+ return -EINVAL;
+ if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
+ return -EINVAL;
+ if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
+ !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
+ return -EINVAL;
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
if (is_vlan_dev(upper_dev)) {
- if (info->linking) {
+ if (info->linking)
err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
upper_dev);
- if (err) {
- netdev_err(dev, "Failed to link VLAN device\n");
- return NOTIFY_BAD;
- }
- } else {
- err = mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
- upper_dev);
- if (err) {
- netdev_err(dev, "Failed to unlink VLAN device\n");
- return NOTIFY_BAD;
- }
- }
+ else
+ mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
+ upper_dev);
} else if (netif_is_bridge_master(upper_dev)) {
- if (info->linking) {
- err = mlxsw_sp_port_bridge_join(mlxsw_sp_port);
- if (err) {
- netdev_err(dev, "Failed to join bridge\n");
- return NOTIFY_BAD;
- }
- mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
- } else {
- err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
- true);
- mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
- if (err) {
- netdev_err(dev, "Failed to leave bridge\n");
- return NOTIFY_BAD;
- }
- }
+ if (info->linking)
+ err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
+ upper_dev);
+ else
+ mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
} else if (netif_is_lag_master(upper_dev)) {
- if (info->linking) {
+ if (info->linking)
err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
upper_dev);
- if (err) {
- netdev_err(dev, "Failed to join link aggregation\n");
- return NOTIFY_BAD;
- }
- } else {
- err = mlxsw_sp_port_lag_leave(mlxsw_sp_port,
- upper_dev);
- if (err) {
- netdev_err(dev, "Failed to leave link aggregation\n");
- return NOTIFY_BAD;
- }
- }
+ else
+ mlxsw_sp_port_lag_leave(mlxsw_sp_port,
+ upper_dev);
+ } else {
+ err = -EINVAL;
+ WARN_ON(1);
}
break;
}
- return NOTIFY_DONE;
+ return err;
}
static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
@@ -3102,7 +3493,7 @@ static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
break;
}
- return NOTIFY_DONE;
+ return 0;
}
static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
@@ -3116,7 +3507,7 @@ static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
}
- return NOTIFY_DONE;
+ return 0;
}
static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
@@ -3129,218 +3520,230 @@ static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
netdev_for_each_lower_dev(lag_dev, dev, iter) {
if (mlxsw_sp_port_dev_check(dev)) {
ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
- if (ret == NOTIFY_BAD)
+ if (ret)
return ret;
}
}
- return NOTIFY_DONE;
+ return 0;
}
-static struct mlxsw_sp_vfid *
-mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp,
- const struct net_device *br_dev)
+static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *vlan_dev)
{
- struct mlxsw_sp_vfid *vfid;
+ u16 fid = vlan_dev_vlan_id(vlan_dev);
+ struct mlxsw_sp_fid *f;
- list_for_each_entry(vfid, &mlxsw_sp->br_vfids.list, list) {
- if (vfid->br_dev == br_dev)
- return vfid;
+ f = mlxsw_sp_fid_find(mlxsw_sp, fid);
+ if (!f) {
+ f = mlxsw_sp_fid_create(mlxsw_sp, fid);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
}
- return NULL;
+ f->ref_count++;
+
+ return 0;
+}
+
+static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *vlan_dev)
+{
+ u16 fid = vlan_dev_vlan_id(vlan_dev);
+ struct mlxsw_sp_fid *f;
+
+ f = mlxsw_sp_fid_find(mlxsw_sp, fid);
+ if (f && f->r)
+ mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
+ if (f && --f->ref_count == 0)
+ mlxsw_sp_fid_destroy(mlxsw_sp, f);
}
-static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid)
+static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
+ unsigned long event, void *ptr)
{
- return vfid - MLXSW_SP_VFID_PORT_MAX;
+ struct netdev_notifier_changeupper_info *info;
+ struct net_device *upper_dev;
+ struct mlxsw_sp *mlxsw_sp;
+ int err;
+
+ mlxsw_sp = mlxsw_sp_lower_get(br_dev);
+ if (!mlxsw_sp)
+ return 0;
+ if (br_dev != mlxsw_sp->master_bridge.dev)
+ return 0;
+
+ info = ptr;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (!is_vlan_dev(upper_dev))
+ break;
+ if (info->linking) {
+ err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp,
+ upper_dev);
+ if (err)
+ return err;
+ } else {
+ mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp, upper_dev);
+ }
+ break;
+ }
+
+ return 0;
}
-static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid)
+static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
{
- return MLXSW_SP_VFID_PORT_MAX + br_vfid;
+ return find_first_zero_bit(mlxsw_sp->vfids.mapped,
+ MLXSW_SP_VFID_MAX);
}
-static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp)
+static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
{
- return find_first_zero_bit(mlxsw_sp->br_vfids.mapped,
- MLXSW_SP_VFID_BR_MAX);
+ char sfmr_pl[MLXSW_REG_SFMR_LEN];
+
+ mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
}
-static struct mlxsw_sp_vfid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp,
- struct net_device *br_dev)
+static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
+
+static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *br_dev)
{
struct device *dev = mlxsw_sp->bus_info->dev;
- struct mlxsw_sp_vfid *vfid;
- u16 n_vfid;
+ struct mlxsw_sp_fid *f;
+ u16 vfid, fid;
int err;
- n_vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp));
- if (n_vfid == MLXSW_SP_VFID_MAX) {
+ vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
+ if (vfid == MLXSW_SP_VFID_MAX) {
dev_err(dev, "No available vFIDs\n");
return ERR_PTR(-ERANGE);
}
- err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid);
+ fid = mlxsw_sp_vfid_to_fid(vfid);
+ err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
if (err) {
- dev_err(dev, "Failed to create vFID=%d\n", n_vfid);
+ dev_err(dev, "Failed to create FID=%d\n", fid);
return ERR_PTR(err);
}
- vfid = kzalloc(sizeof(*vfid), GFP_KERNEL);
- if (!vfid)
+ f = kzalloc(sizeof(*f), GFP_KERNEL);
+ if (!f)
goto err_allocate_vfid;
- vfid->vfid = n_vfid;
- vfid->br_dev = br_dev;
+ f->leave = mlxsw_sp_vport_vfid_leave;
+ f->fid = fid;
+ f->dev = br_dev;
- list_add(&vfid->list, &mlxsw_sp->br_vfids.list);
- set_bit(mlxsw_sp_vfid_to_br_vfid(n_vfid), mlxsw_sp->br_vfids.mapped);
+ list_add(&f->list, &mlxsw_sp->vfids.list);
+ set_bit(vfid, mlxsw_sp->vfids.mapped);
- return vfid;
+ return f;
err_allocate_vfid:
- __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid);
+ mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
return ERR_PTR(-ENOMEM);
}
-static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_vfid *vfid)
+static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fid *f)
{
- u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid->vfid);
+ u16 vfid = mlxsw_sp_fid_to_vfid(f->fid);
+ u16 fid = f->fid;
+
+ clear_bit(vfid, mlxsw_sp->vfids.mapped);
+ list_del(&f->list);
- clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped);
- list_del(&vfid->list);
+ if (f->r)
+ mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
- __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid);
+ kfree(f);
- kfree(vfid);
+ mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
}
-static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct net_device *br_dev,
- bool flush_fdb)
+static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
+ bool valid)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+ enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
- struct net_device *dev = mlxsw_sp_vport->dev;
- struct mlxsw_sp_vfid *vfid, *new_vfid;
- int err;
-
- vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
- if (!vfid) {
- WARN_ON(!vfid);
- return -EINVAL;
- }
-
- /* We need a vFID to go back to after leaving the bridge's vFID. */
- new_vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
- if (!new_vfid) {
- new_vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
- if (IS_ERR(new_vfid)) {
- netdev_err(dev, "Failed to create vFID for VID=%d\n",
- vid);
- return PTR_ERR(new_vfid);
- }
- }
- /* Invalidate existing {Port, VID} to vFID mapping and create a new
- * one for the new vFID.
- */
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
- MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
- false,
- mlxsw_sp_vfid_to_fid(vfid->vfid),
- vid);
- if (err) {
- netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
- vfid->vfid);
- goto err_port_vid_to_fid_invalidate;
- }
+ return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid,
+ vid);
+}
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
- MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
- true,
- mlxsw_sp_vfid_to_fid(new_vfid->vfid),
- vid);
- if (err) {
- netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
- new_vfid->vfid);
- goto err_port_vid_to_fid_validate;
- }
+static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport,
+ struct net_device *br_dev)
+{
+ struct mlxsw_sp_fid *f;
+ int err;
- err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
- if (err) {
- netdev_err(dev, "Failed to disable learning\n");
- goto err_port_vid_learning_set;
+ f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev);
+ if (!f) {
+ f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
}
- err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false,
- false);
- if (err) {
- netdev_err(dev, "Failed clear to clear flooding\n");
+ err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true);
+ if (err)
goto err_vport_flood_set;
- }
- err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
- MLXSW_REG_SPMS_STATE_FORWARDING);
- if (err) {
- netdev_err(dev, "Failed to set STP state\n");
- goto err_port_stp_state_set;
- }
-
- if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport))
- netdev_err(dev, "Failed to flush FDB\n");
+ err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true);
+ if (err)
+ goto err_vport_fid_map;
- /* Switch between the vFIDs and destroy the old one if needed. */
- new_vfid->nr_vports++;
- mlxsw_sp_vport->vport.vfid = new_vfid;
- vfid->nr_vports--;
- if (!vfid->nr_vports)
- mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
+ mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f);
+ f->ref_count++;
- mlxsw_sp_vport->learning = 0;
- mlxsw_sp_vport->learning_sync = 0;
- mlxsw_sp_vport->uc_flood = 0;
- mlxsw_sp_vport->bridged = 0;
+ netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid);
return 0;
-err_port_stp_state_set:
+err_vport_fid_map:
+ mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
err_vport_flood_set:
-err_port_vid_learning_set:
-err_port_vid_to_fid_validate:
-err_port_vid_to_fid_invalidate:
- /* Rollback vFID only if new. */
- if (!new_vfid->nr_vports)
- mlxsw_sp_vfid_destroy(mlxsw_sp, new_vfid);
+ if (!f->ref_count)
+ mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
return err;
}
+static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
+{
+ struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
+
+ netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
+
+ mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false);
+
+ mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
+
+ mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid);
+
+ mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
+ if (--f->ref_count == 0)
+ mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
+}
+
static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
struct net_device *br_dev)
{
- struct mlxsw_sp_vfid *old_vfid = mlxsw_sp_vport->vport.vfid;
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+ struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
struct net_device *dev = mlxsw_sp_vport->dev;
- struct mlxsw_sp_vfid *vfid;
int err;
- vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
- if (!vfid) {
- vfid = mlxsw_sp_br_vfid_create(mlxsw_sp, br_dev);
- if (IS_ERR(vfid)) {
- netdev_err(dev, "Failed to create bridge vFID\n");
- return PTR_ERR(vfid);
- }
- }
+ if (f && !WARN_ON(!f->leave))
+ f->leave(mlxsw_sp_vport);
- err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, true, false);
+ err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport, br_dev);
if (err) {
- netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
- vfid->vfid);
- goto err_port_flood_set;
+ netdev_err(dev, "Failed to join vFID\n");
+ return err;
}
err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
@@ -3349,38 +3752,6 @@ static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
goto err_port_vid_learning_set;
}
- /* We need to invalidate existing {Port, VID} to vFID mapping and
- * create a new one for the bridge's vFID.
- */
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
- MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
- false,
- mlxsw_sp_vfid_to_fid(old_vfid->vfid),
- vid);
- if (err) {
- netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
- old_vfid->vfid);
- goto err_port_vid_to_fid_invalidate;
- }
-
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
- MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
- true,
- mlxsw_sp_vfid_to_fid(vfid->vfid),
- vid);
- if (err) {
- netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
- vfid->vfid);
- goto err_port_vid_to_fid_validate;
- }
-
- /* Switch between the vFIDs and destroy the old one if needed. */
- vfid->nr_vports++;
- mlxsw_sp_vport->vport.vfid = vfid;
- old_vfid->nr_vports--;
- if (!old_vfid->nr_vports)
- mlxsw_sp_vfid_destroy(mlxsw_sp, old_vfid);
-
mlxsw_sp_vport->learning = 1;
mlxsw_sp_vport->learning_sync = 1;
mlxsw_sp_vport->uc_flood = 1;
@@ -3388,20 +3759,25 @@ static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
return 0;
-err_port_vid_to_fid_validate:
- mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
- MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
- mlxsw_sp_vfid_to_fid(old_vfid->vfid), vid);
-err_port_vid_to_fid_invalidate:
- mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
err_port_vid_learning_set:
- mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, false);
-err_port_flood_set:
- if (!vfid->nr_vports)
- mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
+ mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
return err;
}
+static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
+{
+ u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
+
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
+
+ mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
+
+ mlxsw_sp_vport->learning = 0;
+ mlxsw_sp_vport->learning_sync = 0;
+ mlxsw_sp_vport->uc_flood = 0;
+ mlxsw_sp_vport->bridged = 0;
+}
+
static bool
mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
const struct net_device *br_dev)
@@ -3410,7 +3786,9 @@ mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
vport.list) {
- if (mlxsw_sp_vport_br_get(mlxsw_sp_vport) == br_dev)
+ struct net_device *dev = mlxsw_sp_vport_dev_get(mlxsw_sp_vport);
+
+ if (dev && dev == br_dev)
return false;
}
@@ -3425,56 +3803,39 @@ static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
struct netdev_notifier_changeupper_info *info = ptr;
struct mlxsw_sp_port *mlxsw_sp_vport;
struct net_device *upper_dev;
- int err;
+ int err = 0;
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
switch (event) {
case NETDEV_PRECHANGEUPPER:
upper_dev = info->upper_dev;
- if (!info->master || !info->linking)
- break;
if (!netif_is_bridge_master(upper_dev))
- return NOTIFY_BAD;
+ return -EINVAL;
+ if (!info->linking)
+ break;
/* We can't have multiple VLAN interfaces configured on
* the same port and being members in the same bridge.
*/
if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
upper_dev))
- return NOTIFY_BAD;
+ return -EINVAL;
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
- if (!info->master)
- break;
if (info->linking) {
- if (!mlxsw_sp_vport) {
- WARN_ON(!mlxsw_sp_vport);
- return NOTIFY_BAD;
- }
+ if (WARN_ON(!mlxsw_sp_vport))
+ return -EINVAL;
err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
upper_dev);
- if (err) {
- netdev_err(dev, "Failed to join bridge\n");
- return NOTIFY_BAD;
- }
} else {
- /* We ignore bridge's unlinking notifications if vPort
- * is gone, since we already left the bridge when the
- * VLAN device was unlinked from the real device.
- */
if (!mlxsw_sp_vport)
- return NOTIFY_DONE;
- err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport,
- upper_dev, true);
- if (err) {
- netdev_err(dev, "Failed to leave bridge\n");
- return NOTIFY_BAD;
- }
+ return 0;
+ mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
}
}
- return NOTIFY_DONE;
+ return err;
}
static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
@@ -3489,12 +3850,12 @@ static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
if (mlxsw_sp_port_dev_check(dev)) {
ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
vid);
- if (ret == NOTIFY_BAD)
+ if (ret)
return ret;
}
}
- return NOTIFY_DONE;
+ return 0;
}
static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
@@ -3510,35 +3871,44 @@ static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
vid);
- return NOTIFY_DONE;
+ return 0;
}
static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ int err = 0;
- if (mlxsw_sp_port_dev_check(dev))
- return mlxsw_sp_netdevice_port_event(dev, event, ptr);
-
- if (netif_is_lag_master(dev))
- return mlxsw_sp_netdevice_lag_event(dev, event, ptr);
+ if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
+ err = mlxsw_sp_netdevice_router_port_event(dev);
+ else if (mlxsw_sp_port_dev_check(dev))
+ err = mlxsw_sp_netdevice_port_event(dev, event, ptr);
+ else if (netif_is_lag_master(dev))
+ err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
+ else if (netif_is_bridge_master(dev))
+ err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
+ else if (is_vlan_dev(dev))
+ err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
- if (is_vlan_dev(dev))
- return mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
-
- return NOTIFY_DONE;
+ return notifier_from_errno(err);
}
static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
.notifier_call = mlxsw_sp_netdevice_event,
};
+static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
+ .notifier_call = mlxsw_sp_inetaddr_event,
+ .priority = 10, /* Must be called before FIB notifier block */
+};
+
static int __init mlxsw_sp_module_init(void)
{
int err;
register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
+ register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
err = mlxsw_core_driver_register(&mlxsw_sp_driver);
if (err)
goto err_core_driver_register;
@@ -3552,6 +3922,7 @@ err_core_driver_register:
static void __exit mlxsw_sp_module_exit(void)
{
mlxsw_core_driver_unregister(&mlxsw_sp_driver);
+ unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index e2c022d3e2f3..ef4ac8987a2a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -39,19 +39,22 @@
#include <linux/types.h>
#include <linux/netdevice.h>
+#include <linux/rhashtable.h>
#include <linux/bitops.h>
#include <linux/if_vlan.h>
#include <linux/list.h>
#include <linux/dcbnl.h>
+#include <linux/in6.h>
#include <net/switchdev.h>
#include "port.h"
#include "core.h"
#define MLXSW_SP_VFID_BASE VLAN_N_VID
-#define MLXSW_SP_VFID_PORT_MAX 512 /* Non-bridged VLAN interfaces */
-#define MLXSW_SP_VFID_BR_MAX 6144 /* Bridged VLAN interfaces */
-#define MLXSW_SP_VFID_MAX (MLXSW_SP_VFID_PORT_MAX + MLXSW_SP_VFID_BR_MAX)
+#define MLXSW_SP_VFID_MAX 6656 /* Bridged VLAN interfaces */
+
+#define MLXSW_SP_RFID_BASE 15360
+#define MLXSW_SP_RIF_MAX 800
#define MLXSW_SP_LAG_MAX 64
#define MLXSW_SP_PORT_PER_LAG_MAX 16
@@ -60,6 +63,12 @@
#define MLXSW_SP_PORTS_PER_CLUSTER_MAX 4
+#define MLXSW_SP_LPM_TREE_MIN 2 /* trees 0 and 1 are reserved */
+#define MLXSW_SP_LPM_TREE_MAX 22
+#define MLXSW_SP_LPM_TREE_COUNT (MLXSW_SP_LPM_TREE_MAX - MLXSW_SP_LPM_TREE_MIN)
+
+#define MLXSW_SP_VIRTUAL_ROUTER_MAX 256
+
#define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */
#define MLXSW_SP_BYTES_PER_CELL 96
@@ -67,6 +76,10 @@
#define MLXSW_SP_BYTES_TO_CELLS(b) DIV_ROUND_UP(b, MLXSW_SP_BYTES_PER_CELL)
#define MLXSW_SP_CELLS_TO_BYTES(c) (c * MLXSW_SP_BYTES_PER_CELL)
+#define MLXSW_SP_KVD_LINEAR_SIZE 65536 /* entries */
+#define MLXSW_SP_KVD_HASH_SINGLE_SIZE 163840 /* entries */
+#define MLXSW_SP_KVD_HASH_DOUBLE_SIZE 32768 /* entries */
+
/* Maximum delay buffer needed in case of PAUSE frames, in cells.
* Assumes 100m cable and maximum MTU.
*/
@@ -87,12 +100,22 @@ struct mlxsw_sp_upper {
unsigned int ref_count;
};
-struct mlxsw_sp_vfid {
+struct mlxsw_sp_fid {
+ void (*leave)(struct mlxsw_sp_port *mlxsw_sp_vport);
struct list_head list;
- u16 nr_vports;
- u16 vfid; /* Starting at 0 */
- struct net_device *br_dev;
- u16 vid;
+ unsigned int ref_count;
+ struct net_device *dev;
+ struct mlxsw_sp_rif *r;
+ u16 fid;
+};
+
+struct mlxsw_sp_rif {
+ struct net_device *dev;
+ unsigned int ref_count;
+ struct mlxsw_sp_fid *f;
+ unsigned char addr[ETH_ALEN];
+ int mtu;
+ u16 rif;
};
struct mlxsw_sp_mid {
@@ -115,7 +138,17 @@ static inline u16 mlxsw_sp_fid_to_vfid(u16 fid)
static inline bool mlxsw_sp_fid_is_vfid(u16 fid)
{
- return fid >= MLXSW_SP_VFID_BASE;
+ return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_RFID_BASE;
+}
+
+static inline bool mlxsw_sp_fid_is_rfid(u16 fid)
+{
+ return fid >= MLXSW_SP_RFID_BASE;
+}
+
+static inline u16 mlxsw_sp_rif_sp_to_fid(u16 rif)
+{
+ return MLXSW_SP_RFID_BASE + rif;
}
struct mlxsw_sp_sb_pr {
@@ -152,20 +185,60 @@ struct mlxsw_sp_sb {
} ports[MLXSW_PORT_MAX_PORTS];
};
-struct mlxsw_sp {
+#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE)
+
+struct mlxsw_sp_prefix_usage {
+ DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
+};
+
+enum mlxsw_sp_l3proto {
+ MLXSW_SP_L3_PROTO_IPV4,
+ MLXSW_SP_L3_PROTO_IPV6,
+};
+
+struct mlxsw_sp_lpm_tree {
+ u8 id; /* tree ID */
+ unsigned int ref_count;
+ enum mlxsw_sp_l3proto proto;
+ struct mlxsw_sp_prefix_usage prefix_usage;
+};
+
+struct mlxsw_sp_fib;
+
+struct mlxsw_sp_vr {
+ u16 id; /* virtual router ID */
+ bool used;
+ enum mlxsw_sp_l3proto proto;
+ u32 tb_id; /* kernel fib table id */
+ struct mlxsw_sp_lpm_tree *lpm_tree;
+ struct mlxsw_sp_fib *fib;
+};
+
+struct mlxsw_sp_router {
+ struct mlxsw_sp_lpm_tree lpm_trees[MLXSW_SP_LPM_TREE_COUNT];
+ struct mlxsw_sp_vr vrs[MLXSW_SP_VIRTUAL_ROUTER_MAX];
+ struct rhashtable neigh_ht;
struct {
- struct list_head list;
- unsigned long mapped[BITS_TO_LONGS(MLXSW_SP_VFID_PORT_MAX)];
- } port_vfids;
+ struct delayed_work dw;
+ unsigned long interval; /* ms */
+ } neighs_update;
+ struct delayed_work nexthop_probe_dw;
+#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
+ struct list_head nexthop_group_list;
+ struct list_head nexthop_neighs_list;
+};
+
+struct mlxsw_sp {
struct {
struct list_head list;
- unsigned long mapped[BITS_TO_LONGS(MLXSW_SP_VFID_BR_MAX)];
- } br_vfids;
+ DECLARE_BITMAP(mapped, MLXSW_SP_VFID_MAX);
+ } vfids;
struct {
struct list_head list;
- unsigned long mapped[BITS_TO_LONGS(MLXSW_SP_MID_MAX)];
+ DECLARE_BITMAP(mapped, MLXSW_SP_MID_MAX);
} br_mids;
- unsigned long active_fids[BITS_TO_LONGS(VLAN_N_VID)];
+ struct list_head fids; /* VLAN-aware bridge FIDs */
+ struct mlxsw_sp_rif *rifs[MLXSW_SP_RIF_MAX];
struct mlxsw_sp_port **ports;
struct mlxsw_core *core;
const struct mlxsw_bus_info *bus_info;
@@ -183,6 +256,10 @@ struct mlxsw_sp {
struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX];
u8 port_to_module[MLXSW_PORT_MAX_PORTS];
struct mlxsw_sp_sb sb;
+ struct mlxsw_sp_router router;
+ struct {
+ DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE);
+ } kvdl;
};
static inline struct mlxsw_sp_upper *
@@ -217,7 +294,7 @@ struct mlxsw_sp_port {
u16 lag_id;
struct {
struct list_head list;
- struct mlxsw_sp_vfid *vfid;
+ struct mlxsw_sp_fid *f;
u16 vid;
} vport;
struct {
@@ -229,6 +306,11 @@ struct mlxsw_sp_port {
struct ieee_maxrate *maxrate;
struct ieee_pfc *pfc;
} dcb;
+ struct {
+ u8 module;
+ u8 width;
+ u8 lane;
+ } mapping;
/* 802.1Q bridge VLANs */
unsigned long *active_vlans;
unsigned long *untagged_vlans;
@@ -236,6 +318,9 @@ struct mlxsw_sp_port {
struct list_head vports_list;
};
+struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
+void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
+
static inline bool
mlxsw_sp_port_is_pause_en(const struct mlxsw_sp_port *mlxsw_sp_port)
{
@@ -254,28 +339,38 @@ mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index)
return mlxsw_sp_port && mlxsw_sp_port->lagged ? mlxsw_sp_port : NULL;
}
+static inline u16
+mlxsw_sp_vport_vid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
+{
+ return mlxsw_sp_vport->vport.vid;
+}
+
static inline bool
mlxsw_sp_port_is_vport(const struct mlxsw_sp_port *mlxsw_sp_port)
{
- return mlxsw_sp_port->vport.vfid;
+ u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
+
+ return vid != 0;
}
-static inline struct net_device *
-mlxsw_sp_vport_br_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
+static inline void mlxsw_sp_vport_fid_set(struct mlxsw_sp_port *mlxsw_sp_vport,
+ struct mlxsw_sp_fid *f)
{
- return mlxsw_sp_vport->vport.vfid->br_dev;
+ mlxsw_sp_vport->vport.f = f;
}
-static inline u16
-mlxsw_sp_vport_vid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
+static inline struct mlxsw_sp_fid *
+mlxsw_sp_vport_fid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
{
- return mlxsw_sp_vport->vport.vid;
+ return mlxsw_sp_vport->vport.f;
}
-static inline u16
-mlxsw_sp_vport_vfid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
+static inline struct net_device *
+mlxsw_sp_vport_dev_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
{
- return mlxsw_sp_vport->vport.vfid->vfid;
+ struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
+
+ return f ? f->dev : NULL;
}
static inline struct mlxsw_sp_port *
@@ -293,20 +388,60 @@ mlxsw_sp_port_vport_find(const struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
}
static inline struct mlxsw_sp_port *
-mlxsw_sp_port_vport_find_by_vfid(const struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vfid)
+mlxsw_sp_port_vport_find_by_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 fid)
{
struct mlxsw_sp_port *mlxsw_sp_vport;
list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
vport.list) {
- if (mlxsw_sp_vport_vfid_get(mlxsw_sp_vport) == vfid)
+ struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
+
+ if (f && f->fid == fid)
return mlxsw_sp_vport;
}
return NULL;
}
+static inline struct mlxsw_sp_fid *mlxsw_sp_fid_find(struct mlxsw_sp *mlxsw_sp,
+ u16 fid)
+{
+ struct mlxsw_sp_fid *f;
+
+ list_for_each_entry(f, &mlxsw_sp->fids, list)
+ if (f->fid == fid)
+ return f;
+
+ return NULL;
+}
+
+static inline struct mlxsw_sp_fid *
+mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev)
+{
+ struct mlxsw_sp_fid *f;
+
+ list_for_each_entry(f, &mlxsw_sp->vfids.list, list)
+ if (f->dev == br_dev)
+ return f;
+
+ return NULL;
+}
+
+static inline struct mlxsw_sp_rif *
+mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
+ if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev)
+ return mlxsw_sp->rifs[i];
+
+ return NULL;
+}
+
enum mlxsw_sp_flood_table {
MLXSW_SP_FLOOD_TABLE_UC,
MLXSW_SP_FLOOD_TABLE_BM,
@@ -359,12 +494,17 @@ int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
u16 vid_end, bool is_member, bool untagged);
int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
u16 vid);
-int mlxsw_sp_port_kill_vid(struct net_device *dev,
- __be16 __always_unused proto, u16 vid);
-int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid,
- bool set, bool only_uc);
+int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
+ bool set);
void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
+int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid);
+int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
+ bool adding);
+struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid);
+void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f);
+void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *r);
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
bool dwrr, u8 dwrr_weight);
@@ -394,4 +534,19 @@ static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
#endif
+int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_router_fib4_add(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_ipv4_fib *fib4,
+ struct switchdev_trans *trans);
+int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_ipv4_fib *fib4);
+int mlxsw_sp_router_neigh_construct(struct net_device *dev,
+ struct neighbour *n);
+void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
+ struct neighbour *n);
+
+int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count);
+void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
new file mode 100644
index 000000000000..ac321e8e5c1a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
@@ -0,0 +1,91 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+
+#include "spectrum.h"
+
+#define MLXSW_SP_KVDL_SINGLE_BASE 0
+#define MLXSW_SP_KVDL_SINGLE_SIZE 16384
+#define MLXSW_SP_KVDL_CHUNKS_BASE \
+ (MLXSW_SP_KVDL_SINGLE_BASE + MLXSW_SP_KVDL_SINGLE_SIZE)
+#define MLXSW_SP_KVDL_CHUNKS_SIZE \
+ (MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP_KVDL_CHUNKS_BASE)
+#define MLXSW_SP_CHUNK_MAX 32
+
+int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count)
+{
+ int entry_index;
+ int size;
+ int type_base;
+ int type_size;
+ int type_entries;
+
+ if (entry_count == 0 || entry_count > MLXSW_SP_CHUNK_MAX) {
+ return -EINVAL;
+ } else if (entry_count == 1) {
+ type_base = MLXSW_SP_KVDL_SINGLE_BASE;
+ type_size = MLXSW_SP_KVDL_SINGLE_SIZE;
+ type_entries = 1;
+ } else {
+ type_base = MLXSW_SP_KVDL_CHUNKS_BASE;
+ type_size = MLXSW_SP_KVDL_CHUNKS_SIZE;
+ type_entries = MLXSW_SP_CHUNK_MAX;
+ }
+
+ entry_index = type_base;
+ size = type_base + type_size;
+ for_each_clear_bit_from(entry_index, mlxsw_sp->kvdl.usage, size) {
+ int i;
+
+ for (i = 0; i < type_entries; i++)
+ set_bit(entry_index + i, mlxsw_sp->kvdl.usage);
+ return entry_index;
+ }
+ return -ENOBUFS;
+}
+
+void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index)
+{
+ int type_entries;
+ int i;
+
+ if (entry_index < MLXSW_SP_KVDL_CHUNKS_BASE)
+ type_entries = 1;
+ else
+ type_entries = MLXSW_SP_CHUNK_MAX;
+ for (i = 0; i < type_entries; i++)
+ clear_bit(entry_index + i, mlxsw_sp->kvdl.usage);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
new file mode 100644
index 000000000000..e084ea5448ac
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -0,0 +1,1814 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/rhashtable.h>
+#include <linux/bitops.h>
+#include <linux/in6.h>
+#include <linux/notifier.h>
+#include <net/netevent.h>
+#include <net/neighbour.h>
+#include <net/arp.h>
+
+#include "spectrum.h"
+#include "core.h"
+#include "reg.h"
+
+#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
+ for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
+
+static bool
+mlxsw_sp_prefix_usage_subset(struct mlxsw_sp_prefix_usage *prefix_usage1,
+ struct mlxsw_sp_prefix_usage *prefix_usage2)
+{
+ unsigned char prefix;
+
+ mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage1) {
+ if (!test_bit(prefix, prefix_usage2->b))
+ return false;
+ }
+ return true;
+}
+
+static bool
+mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
+ struct mlxsw_sp_prefix_usage *prefix_usage2)
+{
+ return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
+}
+
+static bool
+mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
+{
+ struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
+
+ return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
+}
+
+static void
+mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
+ struct mlxsw_sp_prefix_usage *prefix_usage2)
+{
+ memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
+}
+
+static void
+mlxsw_sp_prefix_usage_zero(struct mlxsw_sp_prefix_usage *prefix_usage)
+{
+ memset(prefix_usage, 0, sizeof(*prefix_usage));
+}
+
+static void
+mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
+ unsigned char prefix_len)
+{
+ set_bit(prefix_len, prefix_usage->b);
+}
+
+static void
+mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
+ unsigned char prefix_len)
+{
+ clear_bit(prefix_len, prefix_usage->b);
+}
+
+struct mlxsw_sp_fib_key {
+ unsigned char addr[sizeof(struct in6_addr)];
+ unsigned char prefix_len;
+};
+
+enum mlxsw_sp_fib_entry_type {
+ MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
+ MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
+ MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
+};
+
+struct mlxsw_sp_nexthop_group;
+
+struct mlxsw_sp_fib_entry {
+ struct rhash_head ht_node;
+ struct mlxsw_sp_fib_key key;
+ enum mlxsw_sp_fib_entry_type type;
+ u8 added:1;
+ u16 rif; /* used for action local */
+ struct mlxsw_sp_vr *vr;
+ struct list_head nexthop_group_node;
+ struct mlxsw_sp_nexthop_group *nh_group;
+};
+
+struct mlxsw_sp_fib {
+ struct rhashtable ht;
+ unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
+ struct mlxsw_sp_prefix_usage prefix_usage;
+};
+
+static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
+ .key_offset = offsetof(struct mlxsw_sp_fib_entry, key),
+ .head_offset = offsetof(struct mlxsw_sp_fib_entry, ht_node),
+ .key_len = sizeof(struct mlxsw_sp_fib_key),
+ .automatic_shrinking = true,
+};
+
+static int mlxsw_sp_fib_entry_insert(struct mlxsw_sp_fib *fib,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ unsigned char prefix_len = fib_entry->key.prefix_len;
+ int err;
+
+ err = rhashtable_insert_fast(&fib->ht, &fib_entry->ht_node,
+ mlxsw_sp_fib_ht_params);
+ if (err)
+ return err;
+ if (fib->prefix_ref_count[prefix_len]++ == 0)
+ mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
+ return 0;
+}
+
+static void mlxsw_sp_fib_entry_remove(struct mlxsw_sp_fib *fib,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ unsigned char prefix_len = fib_entry->key.prefix_len;
+
+ if (--fib->prefix_ref_count[prefix_len] == 0)
+ mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
+ rhashtable_remove_fast(&fib->ht, &fib_entry->ht_node,
+ mlxsw_sp_fib_ht_params);
+}
+
+static struct mlxsw_sp_fib_entry *
+mlxsw_sp_fib_entry_create(struct mlxsw_sp_fib *fib, const void *addr,
+ size_t addr_len, unsigned char prefix_len)
+{
+ struct mlxsw_sp_fib_entry *fib_entry;
+
+ fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL);
+ if (!fib_entry)
+ return NULL;
+ memcpy(fib_entry->key.addr, addr, addr_len);
+ fib_entry->key.prefix_len = prefix_len;
+ return fib_entry;
+}
+
+static void mlxsw_sp_fib_entry_destroy(struct mlxsw_sp_fib_entry *fib_entry)
+{
+ kfree(fib_entry);
+}
+
+static struct mlxsw_sp_fib_entry *
+mlxsw_sp_fib_entry_lookup(struct mlxsw_sp_fib *fib, const void *addr,
+ size_t addr_len, unsigned char prefix_len)
+{
+ struct mlxsw_sp_fib_key key = {{ 0 } };
+
+ memcpy(key.addr, addr, addr_len);
+ key.prefix_len = prefix_len;
+ return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
+}
+
+static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void)
+{
+ struct mlxsw_sp_fib *fib;
+ int err;
+
+ fib = kzalloc(sizeof(*fib), GFP_KERNEL);
+ if (!fib)
+ return ERR_PTR(-ENOMEM);
+ err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
+ if (err)
+ goto err_rhashtable_init;
+ return fib;
+
+err_rhashtable_init:
+ kfree(fib);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
+{
+ rhashtable_destroy(&fib->ht);
+ kfree(fib);
+}
+
+static struct mlxsw_sp_lpm_tree *
+mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp, bool one_reserved)
+{
+ static struct mlxsw_sp_lpm_tree *lpm_tree;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
+ lpm_tree = &mlxsw_sp->router.lpm_trees[i];
+ if (lpm_tree->ref_count == 0) {
+ if (one_reserved)
+ one_reserved = false;
+ else
+ return lpm_tree;
+ }
+ }
+ return NULL;
+}
+
+static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_lpm_tree *lpm_tree)
+{
+ char ralta_pl[MLXSW_REG_RALTA_LEN];
+
+ mlxsw_reg_ralta_pack(ralta_pl, true, lpm_tree->proto, lpm_tree->id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
+}
+
+static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_lpm_tree *lpm_tree)
+{
+ char ralta_pl[MLXSW_REG_RALTA_LEN];
+
+ mlxsw_reg_ralta_pack(ralta_pl, false, lpm_tree->proto, lpm_tree->id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
+}
+
+static int
+mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_prefix_usage *prefix_usage,
+ struct mlxsw_sp_lpm_tree *lpm_tree)
+{
+ char ralst_pl[MLXSW_REG_RALST_LEN];
+ u8 root_bin = 0;
+ u8 prefix;
+ u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
+
+ mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
+ root_bin = prefix;
+
+ mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
+ mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
+ if (prefix == 0)
+ continue;
+ mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
+ MLXSW_REG_RALST_BIN_NO_CHILD);
+ last_prefix = prefix;
+ }
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
+}
+
+static struct mlxsw_sp_lpm_tree *
+mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_prefix_usage *prefix_usage,
+ enum mlxsw_sp_l3proto proto, bool one_reserved)
+{
+ struct mlxsw_sp_lpm_tree *lpm_tree;
+ int err;
+
+ lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp, one_reserved);
+ if (!lpm_tree)
+ return ERR_PTR(-EBUSY);
+ lpm_tree->proto = proto;
+ err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
+ if (err)
+ return ERR_PTR(err);
+
+ err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
+ lpm_tree);
+ if (err)
+ goto err_left_struct_set;
+ return lpm_tree;
+
+err_left_struct_set:
+ mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
+ return ERR_PTR(err);
+}
+
+static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_lpm_tree *lpm_tree)
+{
+ return mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
+}
+
+static struct mlxsw_sp_lpm_tree *
+mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_prefix_usage *prefix_usage,
+ enum mlxsw_sp_l3proto proto, bool one_reserved)
+{
+ struct mlxsw_sp_lpm_tree *lpm_tree;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
+ lpm_tree = &mlxsw_sp->router.lpm_trees[i];
+ if (lpm_tree->proto == proto &&
+ mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
+ prefix_usage))
+ goto inc_ref_count;
+ }
+ lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage,
+ proto, one_reserved);
+ if (IS_ERR(lpm_tree))
+ return lpm_tree;
+
+inc_ref_count:
+ lpm_tree->ref_count++;
+ return lpm_tree;
+}
+
+static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_lpm_tree *lpm_tree)
+{
+ if (--lpm_tree->ref_count == 0)
+ return mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
+ return 0;
+}
+
+static void mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_lpm_tree *lpm_tree;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
+ lpm_tree = &mlxsw_sp->router.lpm_trees[i];
+ lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
+ }
+}
+
+static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_vr *vr;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
+ vr = &mlxsw_sp->router.vrs[i];
+ if (!vr->used)
+ return vr;
+ }
+ return NULL;
+}
+
+static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_vr *vr)
+{
+ char raltb_pl[MLXSW_REG_RALTB_LEN];
+
+ mlxsw_reg_raltb_pack(raltb_pl, vr->id, vr->proto, vr->lpm_tree->id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
+}
+
+static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_vr *vr)
+{
+ char raltb_pl[MLXSW_REG_RALTB_LEN];
+
+ /* Bind to tree 0 which is default */
+ mlxsw_reg_raltb_pack(raltb_pl, vr->id, vr->proto, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
+}
+
+static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
+{
+ /* For our purpose, squash main and local table into one */
+ if (tb_id == RT_TABLE_LOCAL)
+ tb_id = RT_TABLE_MAIN;
+ return tb_id;
+}
+
+static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
+ u32 tb_id,
+ enum mlxsw_sp_l3proto proto)
+{
+ struct mlxsw_sp_vr *vr;
+ int i;
+
+ tb_id = mlxsw_sp_fix_tb_id(tb_id);
+ for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
+ vr = &mlxsw_sp->router.vrs[i];
+ if (vr->used && vr->proto == proto && vr->tb_id == tb_id)
+ return vr;
+ }
+ return NULL;
+}
+
+static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
+ unsigned char prefix_len,
+ u32 tb_id,
+ enum mlxsw_sp_l3proto proto)
+{
+ struct mlxsw_sp_prefix_usage req_prefix_usage;
+ struct mlxsw_sp_lpm_tree *lpm_tree;
+ struct mlxsw_sp_vr *vr;
+ int err;
+
+ vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
+ if (!vr)
+ return ERR_PTR(-EBUSY);
+ vr->fib = mlxsw_sp_fib_create();
+ if (IS_ERR(vr->fib))
+ return ERR_CAST(vr->fib);
+
+ vr->proto = proto;
+ vr->tb_id = tb_id;
+ mlxsw_sp_prefix_usage_zero(&req_prefix_usage);
+ mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
+ lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
+ proto, true);
+ if (IS_ERR(lpm_tree)) {
+ err = PTR_ERR(lpm_tree);
+ goto err_tree_get;
+ }
+ vr->lpm_tree = lpm_tree;
+ err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
+ if (err)
+ goto err_tree_bind;
+
+ vr->used = true;
+ return vr;
+
+err_tree_bind:
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
+err_tree_get:
+ mlxsw_sp_fib_destroy(vr->fib);
+
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_vr *vr)
+{
+ mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr);
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
+ mlxsw_sp_fib_destroy(vr->fib);
+ vr->used = false;
+}
+
+static int
+mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
+ struct mlxsw_sp_prefix_usage *req_prefix_usage)
+{
+ struct mlxsw_sp_lpm_tree *lpm_tree;
+
+ if (mlxsw_sp_prefix_usage_eq(req_prefix_usage,
+ &vr->lpm_tree->prefix_usage))
+ return 0;
+
+ lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
+ vr->proto, false);
+ if (IS_ERR(lpm_tree)) {
+ /* We failed to get a tree according to the required
+ * prefix usage. However, the current tree might be still good
+ * for us if our requirement is subset of the prefixes used
+ * in the tree.
+ */
+ if (mlxsw_sp_prefix_usage_subset(req_prefix_usage,
+ &vr->lpm_tree->prefix_usage))
+ return 0;
+ return PTR_ERR(lpm_tree);
+ }
+
+ mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr);
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
+ vr->lpm_tree = lpm_tree;
+ return mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
+}
+
+static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp,
+ unsigned char prefix_len,
+ u32 tb_id,
+ enum mlxsw_sp_l3proto proto)
+{
+ struct mlxsw_sp_vr *vr;
+ int err;
+
+ tb_id = mlxsw_sp_fix_tb_id(tb_id);
+ vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id, proto);
+ if (!vr) {
+ vr = mlxsw_sp_vr_create(mlxsw_sp, prefix_len, tb_id, proto);
+ if (IS_ERR(vr))
+ return vr;
+ } else {
+ struct mlxsw_sp_prefix_usage req_prefix_usage;
+
+ mlxsw_sp_prefix_usage_cpy(&req_prefix_usage,
+ &vr->fib->prefix_usage);
+ mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
+ /* Need to replace LPM tree in case new prefix is required. */
+ err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
+ &req_prefix_usage);
+ if (err)
+ return ERR_PTR(err);
+ }
+ return vr;
+}
+
+static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
+{
+ /* Destroy virtual router entity in case the associated FIB is empty
+ * and allow it to be used for other tables in future. Otherwise,
+ * check if some prefix usage did not disappear and change tree if
+ * that is the case. Note that in case new, smaller tree cannot be
+ * allocated, the original one will be kept being used.
+ */
+ if (mlxsw_sp_prefix_usage_none(&vr->fib->prefix_usage))
+ mlxsw_sp_vr_destroy(mlxsw_sp, vr);
+ else
+ mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
+ &vr->fib->prefix_usage);
+}
+
+static void mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_vr *vr;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
+ vr = &mlxsw_sp->router.vrs[i];
+ vr->id = i;
+ }
+}
+
+struct mlxsw_sp_neigh_key {
+ unsigned char addr[sizeof(struct in6_addr)];
+ struct net_device *dev;
+};
+
+struct mlxsw_sp_neigh_entry {
+ struct rhash_head ht_node;
+ struct mlxsw_sp_neigh_key key;
+ u16 rif;
+ struct neighbour *n;
+ bool offloaded;
+ struct delayed_work dw;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ unsigned char ha[ETH_ALEN];
+ struct list_head nexthop_list; /* list of nexthops using
+ * this neigh entry
+ */
+ struct list_head nexthop_neighs_list_node;
+};
+
+static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
+ .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
+ .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
+ .key_len = sizeof(struct mlxsw_sp_neigh_key),
+};
+
+static int
+mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_neigh_entry *neigh_entry)
+{
+ return rhashtable_insert_fast(&mlxsw_sp->router.neigh_ht,
+ &neigh_entry->ht_node,
+ mlxsw_sp_neigh_ht_params);
+}
+
+static void
+mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_neigh_entry *neigh_entry)
+{
+ rhashtable_remove_fast(&mlxsw_sp->router.neigh_ht,
+ &neigh_entry->ht_node,
+ mlxsw_sp_neigh_ht_params);
+}
+
+static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work);
+
+static struct mlxsw_sp_neigh_entry *
+mlxsw_sp_neigh_entry_create(const void *addr, size_t addr_len,
+ struct net_device *dev, u16 rif,
+ struct neighbour *n)
+{
+ struct mlxsw_sp_neigh_entry *neigh_entry;
+
+ neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_ATOMIC);
+ if (!neigh_entry)
+ return NULL;
+ memcpy(neigh_entry->key.addr, addr, addr_len);
+ neigh_entry->key.dev = dev;
+ neigh_entry->rif = rif;
+ neigh_entry->n = n;
+ INIT_DELAYED_WORK(&neigh_entry->dw, mlxsw_sp_router_neigh_update_hw);
+ INIT_LIST_HEAD(&neigh_entry->nexthop_list);
+ return neigh_entry;
+}
+
+static void
+mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp_neigh_entry *neigh_entry)
+{
+ kfree(neigh_entry);
+}
+
+static struct mlxsw_sp_neigh_entry *
+mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, const void *addr,
+ size_t addr_len, struct net_device *dev)
+{
+ struct mlxsw_sp_neigh_key key = {{ 0 } };
+
+ memcpy(key.addr, addr, addr_len);
+ key.dev = dev;
+ return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht,
+ &key, mlxsw_sp_neigh_ht_params);
+}
+
+int mlxsw_sp_router_neigh_construct(struct net_device *dev,
+ struct neighbour *n)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_neigh_entry *neigh_entry;
+ struct mlxsw_sp_rif *r;
+ u32 dip;
+ int err;
+
+ if (n->tbl != &arp_tbl)
+ return 0;
+
+ dip = ntohl(*((__be32 *) n->primary_key));
+ neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip),
+ n->dev);
+ if (neigh_entry) {
+ WARN_ON(neigh_entry->n != n);
+ return 0;
+ }
+
+ r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ if (WARN_ON(!r))
+ return -EINVAL;
+
+ neigh_entry = mlxsw_sp_neigh_entry_create(&dip, sizeof(dip), n->dev,
+ r->rif, n);
+ if (!neigh_entry)
+ return -ENOMEM;
+ err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
+ if (err)
+ goto err_neigh_entry_insert;
+ return 0;
+
+err_neigh_entry_insert:
+ mlxsw_sp_neigh_entry_destroy(neigh_entry);
+ return err;
+}
+
+void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
+ struct neighbour *n)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_neigh_entry *neigh_entry;
+ u32 dip;
+
+ if (n->tbl != &arp_tbl)
+ return;
+
+ dip = ntohl(*((__be32 *) n->primary_key));
+ neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip),
+ n->dev);
+ if (!neigh_entry)
+ return;
+ mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
+ mlxsw_sp_neigh_entry_destroy(neigh_entry);
+}
+
+static void
+mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
+{
+ unsigned long interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
+
+ mlxsw_sp->router.neighs_update.interval = jiffies_to_msecs(interval);
+}
+
+static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
+ char *rauhtd_pl,
+ int ent_index)
+{
+ struct net_device *dev;
+ struct neighbour *n;
+ __be32 dipn;
+ u32 dip;
+ u16 rif;
+
+ mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
+
+ if (!mlxsw_sp->rifs[rif]) {
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
+ return;
+ }
+
+ dipn = htonl(dip);
+ dev = mlxsw_sp->rifs[rif]->dev;
+ n = neigh_lookup(&arp_tbl, &dipn, dev);
+ if (!n) {
+ netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
+ &dip);
+ return;
+ }
+
+ netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
+ neigh_event_send(n, NULL);
+ neigh_release(n);
+}
+
+static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
+ char *rauhtd_pl,
+ int rec_index)
+{
+ u8 num_entries;
+ int i;
+
+ num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
+ rec_index);
+ /* Hardware starts counting at 0, so add 1. */
+ num_entries++;
+
+ /* Each record consists of several neighbour entries. */
+ for (i = 0; i < num_entries; i++) {
+ int ent_index;
+
+ ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
+ mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
+ ent_index);
+ }
+
+}
+
+static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
+ char *rauhtd_pl, int rec_index)
+{
+ switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
+ case MLXSW_REG_RAUHTD_TYPE_IPV4:
+ mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
+ rec_index);
+ break;
+ case MLXSW_REG_RAUHTD_TYPE_IPV6:
+ WARN_ON_ONCE(1);
+ break;
+ }
+}
+
+static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
+{
+ char *rauhtd_pl;
+ u8 num_rec;
+ int i, err;
+
+ rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
+ if (!rauhtd_pl)
+ return -ENOMEM;
+
+ /* Make sure the neighbour's netdev isn't removed in the
+ * process.
+ */
+ rtnl_lock();
+ do {
+ mlxsw_reg_rauhtd_pack(rauhtd_pl, MLXSW_REG_RAUHTD_TYPE_IPV4);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
+ rauhtd_pl);
+ if (err) {
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour talbe\n");
+ break;
+ }
+ num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
+ for (i = 0; i < num_rec; i++)
+ mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
+ i);
+ } while (num_rec);
+ rtnl_unlock();
+
+ kfree(rauhtd_pl);
+ return err;
+}
+
+static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_neigh_entry *neigh_entry;
+
+ /* Take RTNL mutex here to prevent lists from changes */
+ rtnl_lock();
+ list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
+ nexthop_neighs_list_node) {
+ /* If this neigh have nexthops, make the kernel think this neigh
+ * is active regardless of the traffic.
+ */
+ if (!list_empty(&neigh_entry->nexthop_list))
+ neigh_event_send(neigh_entry->n, NULL);
+ }
+ rtnl_unlock();
+}
+
+static void
+mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
+{
+ unsigned long interval = mlxsw_sp->router.neighs_update.interval;
+
+ mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw,
+ msecs_to_jiffies(interval));
+}
+
+static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
+{
+ struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp,
+ router.neighs_update.dw.work);
+ int err;
+
+ err = mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp);
+ if (err)
+ dev_err(mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
+
+ mlxsw_sp_router_neighs_update_nh(mlxsw_sp);
+
+ mlxsw_sp_router_neighs_update_work_schedule(mlxsw_sp);
+}
+
+static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
+{
+ struct mlxsw_sp_neigh_entry *neigh_entry;
+ struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp,
+ router.nexthop_probe_dw.work);
+
+ /* Iterate over nexthop neighbours, find those who are unresolved and
+ * send arp on them. This solves the chicken-egg problem when
+ * the nexthop wouldn't get offloaded until the neighbor is resolved
+ * but it wouldn't get resolved ever in case traffic is flowing in HW
+ * using different nexthop.
+ *
+ * Take RTNL mutex here to prevent lists from changes.
+ */
+ rtnl_lock();
+ list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
+ nexthop_neighs_list_node) {
+ if (!(neigh_entry->n->nud_state & NUD_VALID) &&
+ !list_empty(&neigh_entry->nexthop_list))
+ neigh_event_send(neigh_entry->n, NULL);
+ }
+ rtnl_unlock();
+
+ mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw,
+ MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
+}
+
+static void
+mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_neigh_entry *neigh_entry,
+ bool removing);
+
+static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work)
+{
+ struct mlxsw_sp_neigh_entry *neigh_entry =
+ container_of(work, struct mlxsw_sp_neigh_entry, dw.work);
+ struct neighbour *n = neigh_entry->n;
+ struct mlxsw_sp_port *mlxsw_sp_port = neigh_entry->mlxsw_sp_port;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char rauht_pl[MLXSW_REG_RAUHT_LEN];
+ struct net_device *dev;
+ bool entry_connected;
+ u8 nud_state;
+ bool updating;
+ bool removing;
+ bool adding;
+ u32 dip;
+ int err;
+
+ read_lock_bh(&n->lock);
+ dip = ntohl(*((__be32 *) n->primary_key));
+ memcpy(neigh_entry->ha, n->ha, sizeof(neigh_entry->ha));
+ nud_state = n->nud_state;
+ dev = n->dev;
+ read_unlock_bh(&n->lock);
+
+ entry_connected = nud_state & NUD_VALID;
+ adding = (!neigh_entry->offloaded) && entry_connected;
+ updating = neigh_entry->offloaded && entry_connected;
+ removing = neigh_entry->offloaded && !entry_connected;
+
+ if (adding || updating) {
+ mlxsw_reg_rauht_pack4(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_ADD,
+ neigh_entry->rif,
+ neigh_entry->ha, dip);
+ err = mlxsw_reg_write(mlxsw_sp->core,
+ MLXSW_REG(rauht), rauht_pl);
+ if (err) {
+ netdev_err(dev, "Could not add neigh %pI4h\n", &dip);
+ neigh_entry->offloaded = false;
+ } else {
+ neigh_entry->offloaded = true;
+ }
+ mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, false);
+ } else if (removing) {
+ mlxsw_reg_rauht_pack4(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE,
+ neigh_entry->rif,
+ neigh_entry->ha, dip);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht),
+ rauht_pl);
+ if (err) {
+ netdev_err(dev, "Could not delete neigh %pI4h\n", &dip);
+ neigh_entry->offloaded = true;
+ } else {
+ neigh_entry->offloaded = false;
+ }
+ mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, true);
+ }
+
+ neigh_release(n);
+ mlxsw_sp_port_dev_put(mlxsw_sp_port);
+}
+
+static int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct mlxsw_sp_neigh_entry *neigh_entry;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp *mlxsw_sp;
+ unsigned long interval;
+ struct net_device *dev;
+ struct neigh_parms *p;
+ struct neighbour *n;
+ u32 dip;
+
+ switch (event) {
+ case NETEVENT_DELAY_PROBE_TIME_UPDATE:
+ p = ptr;
+
+ /* We don't care about changes in the default table. */
+ if (!p->dev || p->tbl != &arp_tbl)
+ return NOTIFY_DONE;
+
+ /* We are in atomic context and can't take RTNL mutex,
+ * so use RCU variant to walk the device chain.
+ */
+ mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
+ if (!mlxsw_sp_port)
+ return NOTIFY_DONE;
+
+ mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
+ mlxsw_sp->router.neighs_update.interval = interval;
+
+ mlxsw_sp_port_dev_put(mlxsw_sp_port);
+ break;
+ case NETEVENT_NEIGH_UPDATE:
+ n = ptr;
+ dev = n->dev;
+
+ if (n->tbl != &arp_tbl)
+ return NOTIFY_DONE;
+
+ mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(dev);
+ if (!mlxsw_sp_port)
+ return NOTIFY_DONE;
+
+ mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ dip = ntohl(*((__be32 *) n->primary_key));
+ neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp,
+ &dip,
+ sizeof(__be32),
+ dev);
+ if (WARN_ON(!neigh_entry) || WARN_ON(neigh_entry->n != n)) {
+ mlxsw_sp_port_dev_put(mlxsw_sp_port);
+ return NOTIFY_DONE;
+ }
+ neigh_entry->mlxsw_sp_port = mlxsw_sp_port;
+
+ /* Take a reference to ensure the neighbour won't be
+ * destructed until we drop the reference in delayed
+ * work.
+ */
+ neigh_clone(n);
+ if (!mlxsw_core_schedule_dw(&neigh_entry->dw, 0)) {
+ neigh_release(n);
+ mlxsw_sp_port_dev_put(mlxsw_sp_port);
+ }
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
+ .notifier_call = mlxsw_sp_router_netevent_event,
+};
+
+static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
+
+ err = rhashtable_init(&mlxsw_sp->router.neigh_ht,
+ &mlxsw_sp_neigh_ht_params);
+ if (err)
+ return err;
+
+ /* Initialize the polling interval according to the default
+ * table.
+ */
+ mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
+
+ err = register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
+ if (err)
+ goto err_register_netevent_notifier;
+
+ /* Create the delayed works for the activity_update */
+ INIT_DELAYED_WORK(&mlxsw_sp->router.neighs_update.dw,
+ mlxsw_sp_router_neighs_update_work);
+ INIT_DELAYED_WORK(&mlxsw_sp->router.nexthop_probe_dw,
+ mlxsw_sp_router_probe_unresolved_nexthops);
+ mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw, 0);
+ mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw, 0);
+ return 0;
+
+err_register_netevent_notifier:
+ rhashtable_destroy(&mlxsw_sp->router.neigh_ht);
+ return err;
+}
+
+static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ cancel_delayed_work_sync(&mlxsw_sp->router.neighs_update.dw);
+ cancel_delayed_work_sync(&mlxsw_sp->router.nexthop_probe_dw);
+ unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
+ rhashtable_destroy(&mlxsw_sp->router.neigh_ht);
+}
+
+struct mlxsw_sp_nexthop {
+ struct list_head neigh_list_node; /* member of neigh entry list */
+ struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
+ * this belongs to
+ */
+ u8 should_offload:1, /* set indicates this neigh is connected and
+ * should be put to KVD linear area of this group.
+ */
+ offloaded:1, /* set in case the neigh is actually put into
+ * KVD linear area of this group.
+ */
+ update:1; /* set indicates that MAC of this neigh should be
+ * updated in HW
+ */
+ struct mlxsw_sp_neigh_entry *neigh_entry;
+};
+
+struct mlxsw_sp_nexthop_group {
+ struct list_head list; /* node in mlxsw->router.nexthop_group_list */
+ struct list_head fib_list; /* list of fib entries that use this group */
+ u8 adj_index_valid:1;
+ u32 adj_index;
+ u16 ecmp_size;
+ u16 count;
+ struct mlxsw_sp_nexthop nexthops[0];
+};
+
+static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_vr *vr,
+ u32 adj_index, u16 ecmp_size,
+ u32 new_adj_index,
+ u16 new_ecmp_size)
+{
+ char raleu_pl[MLXSW_REG_RALEU_LEN];
+
+ mlxsw_reg_raleu_pack(raleu_pl, vr->proto, vr->id,
+ adj_index, ecmp_size,
+ new_adj_index, new_ecmp_size);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
+}
+
+static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp,
+ u32 old_adj_index, u16 old_ecmp_size)
+{
+ struct mlxsw_sp_fib_entry *fib_entry;
+ struct mlxsw_sp_vr *vr = NULL;
+ int err;
+
+ list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
+ if (vr == fib_entry->vr)
+ continue;
+ vr = fib_entry->vr;
+ err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr,
+ old_adj_index,
+ old_ecmp_size,
+ nh_grp->adj_index,
+ nh_grp->ecmp_size);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
+ struct mlxsw_sp_nexthop *nh)
+{
+ struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
+ char ratr_pl[MLXSW_REG_RATR_LEN];
+
+ mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
+ true, adj_index, neigh_entry->rif);
+ mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
+}
+
+static int
+mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ u32 adj_index = nh_grp->adj_index; /* base */
+ struct mlxsw_sp_nexthop *nh;
+ int i;
+ int err;
+
+ for (i = 0; i < nh_grp->count; i++) {
+ nh = &nh_grp->nexthops[i];
+
+ if (!nh->should_offload) {
+ nh->offloaded = 0;
+ continue;
+ }
+
+ if (nh->update) {
+ err = mlxsw_sp_nexthop_mac_update(mlxsw_sp,
+ adj_index, nh);
+ if (err)
+ return err;
+ nh->update = 0;
+ nh->offloaded = 1;
+ }
+ adj_index++;
+ }
+ return 0;
+}
+
+static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry);
+
+static int
+mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ struct mlxsw_sp_fib_entry *fib_entry;
+ int err;
+
+ list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
+ err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static void
+mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ struct mlxsw_sp_nexthop *nh;
+ bool offload_change = false;
+ u32 adj_index;
+ u16 ecmp_size = 0;
+ bool old_adj_index_valid;
+ u32 old_adj_index;
+ u16 old_ecmp_size;
+ int ret;
+ int i;
+ int err;
+
+ for (i = 0; i < nh_grp->count; i++) {
+ nh = &nh_grp->nexthops[i];
+
+ if (nh->should_offload ^ nh->offloaded) {
+ offload_change = true;
+ if (nh->should_offload)
+ nh->update = 1;
+ }
+ if (nh->should_offload)
+ ecmp_size++;
+ }
+ if (!offload_change) {
+ /* Nothing was added or removed, so no need to reallocate. Just
+ * update MAC on existing adjacency indexes.
+ */
+ err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp);
+ if (err) {
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
+ goto set_trap;
+ }
+ return;
+ }
+ if (!ecmp_size)
+ /* No neigh of this group is connected so we just set
+ * the trap and let everthing flow through kernel.
+ */
+ goto set_trap;
+
+ ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size);
+ if (ret < 0) {
+ /* We ran out of KVD linear space, just set the
+ * trap and let everything flow through kernel.
+ */
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
+ goto set_trap;
+ }
+ adj_index = ret;
+ old_adj_index_valid = nh_grp->adj_index_valid;
+ old_adj_index = nh_grp->adj_index;
+ old_ecmp_size = nh_grp->ecmp_size;
+ nh_grp->adj_index_valid = 1;
+ nh_grp->adj_index = adj_index;
+ nh_grp->ecmp_size = ecmp_size;
+ err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp);
+ if (err) {
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
+ goto set_trap;
+ }
+
+ if (!old_adj_index_valid) {
+ /* The trap was set for fib entries, so we have to call
+ * fib entry update to unset it and use adjacency index.
+ */
+ err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
+ if (err) {
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
+ goto set_trap;
+ }
+ return;
+ }
+
+ err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
+ old_adj_index, old_ecmp_size);
+ mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
+ if (err) {
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
+ goto set_trap;
+ }
+ return;
+
+set_trap:
+ old_adj_index_valid = nh_grp->adj_index_valid;
+ nh_grp->adj_index_valid = 0;
+ for (i = 0; i < nh_grp->count; i++) {
+ nh = &nh_grp->nexthops[i];
+ nh->offloaded = 0;
+ }
+ err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
+ if (err)
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
+ if (old_adj_index_valid)
+ mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
+}
+
+static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
+ bool removing)
+{
+ if (!removing && !nh->should_offload)
+ nh->should_offload = 1;
+ else if (removing && nh->offloaded)
+ nh->should_offload = 0;
+ nh->update = 1;
+}
+
+static void
+mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_neigh_entry *neigh_entry,
+ bool removing)
+{
+ struct mlxsw_sp_nexthop *nh;
+
+ /* Take RTNL mutex here to prevent lists from changes */
+ rtnl_lock();
+ list_for_each_entry(nh, &neigh_entry->nexthop_list,
+ neigh_list_node) {
+ __mlxsw_sp_nexthop_neigh_update(nh, removing);
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
+ }
+ rtnl_unlock();
+}
+
+static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp,
+ struct mlxsw_sp_nexthop *nh,
+ struct fib_nh *fib_nh)
+{
+ struct mlxsw_sp_neigh_entry *neigh_entry;
+ u32 gwip = ntohl(fib_nh->nh_gw);
+ struct net_device *dev = fib_nh->nh_dev;
+ struct neighbour *n;
+ u8 nud_state;
+
+ neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip,
+ sizeof(gwip), dev);
+ if (!neigh_entry) {
+ __be32 gwipn = htonl(gwip);
+
+ n = neigh_create(&arp_tbl, &gwipn, dev);
+ if (IS_ERR(n))
+ return PTR_ERR(n);
+ neigh_event_send(n, NULL);
+ neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip,
+ sizeof(gwip), dev);
+ if (!neigh_entry) {
+ neigh_release(n);
+ return -EINVAL;
+ }
+ } else {
+ /* Take a reference of neigh here ensuring that neigh would
+ * not be detructed before the nexthop entry is finished.
+ * The second branch takes the reference in neith_create()
+ */
+ n = neigh_entry->n;
+ neigh_clone(n);
+ }
+
+ /* If that is the first nexthop connected to that neigh, add to
+ * nexthop_neighs_list
+ */
+ if (list_empty(&neigh_entry->nexthop_list))
+ list_add_tail(&neigh_entry->nexthop_neighs_list_node,
+ &mlxsw_sp->router.nexthop_neighs_list);
+
+ nh->nh_grp = nh_grp;
+ nh->neigh_entry = neigh_entry;
+ list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
+ read_lock_bh(&n->lock);
+ nud_state = n->nud_state;
+ read_unlock_bh(&n->lock);
+ __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID));
+
+ return 0;
+}
+
+static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
+
+ list_del(&nh->neigh_list_node);
+
+ /* If that is the last nexthop connected to that neigh, remove from
+ * nexthop_neighs_list
+ */
+ if (list_empty(&nh->neigh_entry->nexthop_list))
+ list_del(&nh->neigh_entry->nexthop_neighs_list_node);
+
+ neigh_release(neigh_entry->n);
+}
+
+static struct mlxsw_sp_nexthop_group *
+mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp;
+ struct mlxsw_sp_nexthop *nh;
+ struct fib_nh *fib_nh;
+ size_t alloc_size;
+ int i;
+ int err;
+
+ alloc_size = sizeof(*nh_grp) +
+ fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
+ nh_grp = kzalloc(alloc_size, GFP_KERNEL);
+ if (!nh_grp)
+ return ERR_PTR(-ENOMEM);
+ INIT_LIST_HEAD(&nh_grp->fib_list);
+ nh_grp->count = fi->fib_nhs;
+ for (i = 0; i < nh_grp->count; i++) {
+ nh = &nh_grp->nexthops[i];
+ fib_nh = &fi->fib_nh[i];
+ err = mlxsw_sp_nexthop_init(mlxsw_sp, nh_grp, nh, fib_nh);
+ if (err)
+ goto err_nexthop_init;
+ }
+ list_add_tail(&nh_grp->list, &mlxsw_sp->router.nexthop_group_list);
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
+ return nh_grp;
+
+err_nexthop_init:
+ for (i--; i >= 0; i--)
+ mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
+ kfree(nh_grp);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ struct mlxsw_sp_nexthop *nh;
+ int i;
+
+ list_del(&nh_grp->list);
+ for (i = 0; i < nh_grp->count; i++) {
+ nh = &nh_grp->nexthops[i];
+ mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
+ }
+ kfree(nh_grp);
+}
+
+static bool mlxsw_sp_nexthop_match(struct mlxsw_sp_nexthop *nh,
+ struct fib_info *fi)
+{
+ int i;
+
+ for (i = 0; i < fi->fib_nhs; i++) {
+ struct fib_nh *fib_nh = &fi->fib_nh[i];
+ u32 gwip = ntohl(fib_nh->nh_gw);
+
+ if (memcmp(nh->neigh_entry->key.addr,
+ &gwip, sizeof(u32)) == 0 &&
+ nh->neigh_entry->key.dev == fib_nh->nh_dev)
+ return true;
+ }
+ return false;
+}
+
+static bool mlxsw_sp_nexthop_group_match(struct mlxsw_sp_nexthop_group *nh_grp,
+ struct fib_info *fi)
+{
+ int i;
+
+ if (nh_grp->count != fi->fib_nhs)
+ return false;
+ for (i = 0; i < nh_grp->count; i++) {
+ struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
+
+ if (!mlxsw_sp_nexthop_match(nh, fi))
+ return false;
+ }
+ return true;
+}
+
+static struct mlxsw_sp_nexthop_group *
+mlxsw_sp_nexthop_group_find(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp;
+
+ list_for_each_entry(nh_grp, &mlxsw_sp->router.nexthop_group_list,
+ list) {
+ if (mlxsw_sp_nexthop_group_match(nh_grp, fi))
+ return nh_grp;
+ }
+ return NULL;
+}
+
+static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ struct fib_info *fi)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp;
+
+ nh_grp = mlxsw_sp_nexthop_group_find(mlxsw_sp, fi);
+ if (!nh_grp) {
+ nh_grp = mlxsw_sp_nexthop_group_create(mlxsw_sp, fi);
+ if (IS_ERR(nh_grp))
+ return PTR_ERR(nh_grp);
+ }
+ list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
+ fib_entry->nh_group = nh_grp;
+ return 0;
+}
+
+static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
+
+ list_del(&fib_entry->nexthop_group_node);
+ if (!list_empty(&nh_grp->fib_list))
+ return;
+ mlxsw_sp_nexthop_group_destroy(mlxsw_sp, nh_grp);
+}
+
+static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
+{
+ char rgcr_pl[MLXSW_REG_RGCR_LEN];
+
+ mlxsw_reg_rgcr_pack(rgcr_pl, true);
+ mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, MLXSW_SP_RIF_MAX);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
+}
+
+static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ char rgcr_pl[MLXSW_REG_RGCR_LEN];
+
+ mlxsw_reg_rgcr_pack(rgcr_pl, false);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
+}
+
+int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
+
+ INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_neighs_list);
+ INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_group_list);
+ err = __mlxsw_sp_router_init(mlxsw_sp);
+ if (err)
+ return err;
+ mlxsw_sp_lpm_init(mlxsw_sp);
+ mlxsw_sp_vrs_init(mlxsw_sp);
+ return mlxsw_sp_neigh_init(mlxsw_sp);
+}
+
+void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp_neigh_fini(mlxsw_sp);
+ __mlxsw_sp_router_fini(mlxsw_sp);
+}
+
+static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
+{
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
+ u32 *p_dip = (u32 *) fib_entry->key.addr;
+ struct mlxsw_sp_vr *vr = fib_entry->vr;
+ enum mlxsw_reg_ralue_trap_action trap_action;
+ u16 trap_id = 0;
+ u32 adjacency_index = 0;
+ u16 ecmp_size = 0;
+
+ /* In case the nexthop group adjacency index is valid, use it
+ * with provided ECMP size. Otherwise, setup trap and pass
+ * traffic to kernel.
+ */
+ if (fib_entry->nh_group->adj_index_valid) {
+ trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
+ adjacency_index = fib_entry->nh_group->adj_index;
+ ecmp_size = fib_entry->nh_group->ecmp_size;
+ } else {
+ trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
+ trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
+ }
+
+ mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id,
+ fib_entry->key.prefix_len, *p_dip);
+ mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
+ adjacency_index, ecmp_size);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+}
+
+static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
+{
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
+ u32 *p_dip = (u32 *) fib_entry->key.addr;
+ struct mlxsw_sp_vr *vr = fib_entry->vr;
+
+ mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id,
+ fib_entry->key.prefix_len, *p_dip);
+ mlxsw_reg_ralue_act_local_pack(ralue_pl,
+ MLXSW_REG_RALUE_TRAP_ACTION_NOP, 0,
+ fib_entry->rif);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+}
+
+static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
+{
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
+ u32 *p_dip = (u32 *) fib_entry->key.addr;
+ struct mlxsw_sp_vr *vr = fib_entry->vr;
+
+ mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id,
+ fib_entry->key.prefix_len, *p_dip);
+ mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+}
+
+static int mlxsw_sp_fib_entry_op4(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
+{
+ switch (fib_entry->type) {
+ case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
+ return mlxsw_sp_fib_entry_op4_remote(mlxsw_sp, fib_entry, op);
+ case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
+ return mlxsw_sp_fib_entry_op4_local(mlxsw_sp, fib_entry, op);
+ case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
+ return mlxsw_sp_fib_entry_op4_trap(mlxsw_sp, fib_entry, op);
+ }
+ return -EINVAL;
+}
+
+static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
+{
+ switch (fib_entry->vr->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ return mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op);
+ case MLXSW_SP_L3_PROTO_IPV6:
+ return -EINVAL;
+ }
+ return -EINVAL;
+}
+
+static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ enum mlxsw_reg_ralue_op op;
+
+ op = !fib_entry->added ? MLXSW_REG_RALUE_OP_WRITE_WRITE :
+ MLXSW_REG_RALUE_OP_WRITE_UPDATE;
+ return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
+}
+
+static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
+ MLXSW_REG_RALUE_OP_WRITE_DELETE);
+}
+
+struct mlxsw_sp_router_fib4_add_info {
+ struct switchdev_trans_item tritem;
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_fib_entry *fib_entry;
+};
+
+static void mlxsw_sp_router_fib4_add_info_destroy(void const *data)
+{
+ const struct mlxsw_sp_router_fib4_add_info *info = data;
+ struct mlxsw_sp_fib_entry *fib_entry = info->fib_entry;
+ struct mlxsw_sp *mlxsw_sp = info->mlxsw_sp;
+
+ mlxsw_sp_fib_entry_destroy(fib_entry);
+ mlxsw_sp_vr_put(mlxsw_sp, fib_entry->vr);
+ kfree(info);
+}
+
+static int
+mlxsw_sp_router_fib4_entry_init(struct mlxsw_sp *mlxsw_sp,
+ const struct switchdev_obj_ipv4_fib *fib4,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ struct fib_info *fi = fib4->fi;
+
+ if (fib4->type == RTN_LOCAL || fib4->type == RTN_BROADCAST) {
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
+ return 0;
+ }
+ if (fib4->type != RTN_UNICAST)
+ return -EINVAL;
+
+ if (fi->fib_scope != RT_SCOPE_UNIVERSE) {
+ struct mlxsw_sp_rif *r;
+
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
+ r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fi->fib_dev);
+ if (!r)
+ return -EINVAL;
+ fib_entry->rif = r->rif;
+ return 0;
+ }
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
+ return mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fi);
+}
+
+static void
+mlxsw_sp_router_fib4_entry_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_REMOTE)
+ return;
+ mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
+}
+
+static int
+mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_ipv4_fib *fib4,
+ struct switchdev_trans *trans)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_router_fib4_add_info *info;
+ struct mlxsw_sp_fib_entry *fib_entry;
+ struct mlxsw_sp_vr *vr;
+ int err;
+
+ vr = mlxsw_sp_vr_get(mlxsw_sp, fib4->dst_len, fib4->tb_id,
+ MLXSW_SP_L3_PROTO_IPV4);
+ if (IS_ERR(vr))
+ return PTR_ERR(vr);
+
+ fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fib4->dst,
+ sizeof(fib4->dst), fib4->dst_len);
+ if (!fib_entry) {
+ err = -ENOMEM;
+ goto err_fib_entry_create;
+ }
+ fib_entry->vr = vr;
+
+ err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fib4, fib_entry);
+ if (err)
+ goto err_fib4_entry_init;
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ err = -ENOMEM;
+ goto err_alloc_info;
+ }
+ info->mlxsw_sp = mlxsw_sp;
+ info->fib_entry = fib_entry;
+ switchdev_trans_item_enqueue(trans, info,
+ mlxsw_sp_router_fib4_add_info_destroy,
+ &info->tritem);
+ return 0;
+
+err_alloc_info:
+ mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
+err_fib4_entry_init:
+ mlxsw_sp_fib_entry_destroy(fib_entry);
+err_fib_entry_create:
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
+ return err;
+}
+
+static int
+mlxsw_sp_router_fib4_add_commit(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_ipv4_fib *fib4,
+ struct switchdev_trans *trans)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_router_fib4_add_info *info;
+ struct mlxsw_sp_fib_entry *fib_entry;
+ struct mlxsw_sp_vr *vr;
+ int err;
+
+ info = switchdev_trans_item_dequeue(trans);
+ fib_entry = info->fib_entry;
+ kfree(info);
+
+ vr = fib_entry->vr;
+ err = mlxsw_sp_fib_entry_insert(fib_entry->vr->fib, fib_entry);
+ if (err)
+ goto err_fib_entry_insert;
+ err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
+ if (err)
+ goto err_fib_entry_add;
+ return 0;
+
+err_fib_entry_add:
+ mlxsw_sp_fib_entry_remove(vr->fib, fib_entry);
+err_fib_entry_insert:
+ mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
+ mlxsw_sp_fib_entry_destroy(fib_entry);
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
+ return err;
+}
+
+int mlxsw_sp_router_fib4_add(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_ipv4_fib *fib4,
+ struct switchdev_trans *trans)
+{
+ if (switchdev_trans_ph_prepare(trans))
+ return mlxsw_sp_router_fib4_add_prepare(mlxsw_sp_port,
+ fib4, trans);
+ return mlxsw_sp_router_fib4_add_commit(mlxsw_sp_port,
+ fib4, trans);
+}
+
+int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_ipv4_fib *fib4)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_fib_entry *fib_entry;
+ struct mlxsw_sp_vr *vr;
+
+ vr = mlxsw_sp_vr_find(mlxsw_sp, fib4->tb_id, MLXSW_SP_L3_PROTO_IPV4);
+ if (!vr) {
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to find virtual router for FIB4 entry being removed.\n");
+ return -ENOENT;
+ }
+ fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
+ sizeof(fib4->dst), fib4->dst_len);
+ if (!fib_entry) {
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to find FIB4 entry being removed.\n");
+ return PTR_ERR(vr);
+ }
+ mlxsw_sp_fib_entry_del(mlxsw_sp_port->mlxsw_sp, fib_entry);
+ mlxsw_sp_fib_entry_remove(vr->fib, fib_entry);
+ mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
+ mlxsw_sp_fib_entry_destroy(fib_entry);
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 3710f19ed6bb..a1ad5e6bdfa8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -55,13 +55,10 @@
static u16 mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid)
{
+ struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_port);
u16 fid = vid;
- if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
- u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port);
-
- fid = mlxsw_sp_vfid_to_fid(vfid);
- }
+ fid = f ? f->fid : fid;
if (!fid)
fid = mlxsw_sp_port->pvid;
@@ -169,11 +166,6 @@ static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
}
-static bool mlxsw_sp_vfid_is_vport_br(u16 vfid)
-{
- return vfid >= MLXSW_SP_VFID_PORT_MAX;
-}
-
static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 idx_begin, u16 idx_end, bool set,
bool only_uc)
@@ -185,15 +177,10 @@ static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
char *sftr_pl;
int err;
- if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
+ if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
- if (mlxsw_sp_vfid_is_vport_br(idx_begin))
- local_port = mlxsw_sp_port->local_port;
- else
- local_port = MLXSW_PORT_CPU_PORT;
- } else {
+ else
table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
- }
sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
if (!sftr_pl)
@@ -236,7 +223,8 @@ static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
int err;
if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
- u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port);
+ u16 fid = mlxsw_sp_vport_fid_get(mlxsw_sp_port)->fid;
+ u16 vfid = mlxsw_sp_fid_to_vfid(fid);
return __mlxsw_sp_port_flood_set(mlxsw_sp_port, vfid, vfid,
set, true);
@@ -260,14 +248,17 @@ err_port_flood_set:
return err;
}
-int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid,
- bool set, bool only_uc)
+int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
+ bool set)
{
+ u16 vfid;
+
/* In case of vFIDs, index into the flooding table is relative to
* the start of the vFIDs range.
*/
+ vfid = mlxsw_sp_fid_to_vfid(fid);
return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set,
- only_uc);
+ false);
}
static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -383,6 +374,187 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
return err;
}
+static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
+{
+ char sfmr_pl[MLXSW_REG_SFMR_LEN];
+
+ mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, fid);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
+}
+
+static int mlxsw_sp_fid_map(struct mlxsw_sp *mlxsw_sp, u16 fid, bool valid)
+{
+ enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
+ char svfa_pl[MLXSW_REG_SVFA_LEN];
+
+ mlxsw_reg_svfa_pack(svfa_pl, 0, mt, valid, fid, fid);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
+}
+
+static struct mlxsw_sp_fid *mlxsw_sp_fid_alloc(u16 fid)
+{
+ struct mlxsw_sp_fid *f;
+
+ f = kzalloc(sizeof(*f), GFP_KERNEL);
+ if (!f)
+ return NULL;
+
+ f->fid = fid;
+
+ return f;
+}
+
+struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
+{
+ struct mlxsw_sp_fid *f;
+ int err;
+
+ err = mlxsw_sp_fid_op(mlxsw_sp, fid, true);
+ if (err)
+ return ERR_PTR(err);
+
+ /* Although all the ports member in the FID might be using a
+ * {Port, VID} to FID mapping, we create a global VID-to-FID
+ * mapping. This allows a port to transition to VLAN mode,
+ * knowing the global mapping exists.
+ */
+ err = mlxsw_sp_fid_map(mlxsw_sp, fid, true);
+ if (err)
+ goto err_fid_map;
+
+ f = mlxsw_sp_fid_alloc(fid);
+ if (!f) {
+ err = -ENOMEM;
+ goto err_allocate_fid;
+ }
+
+ list_add(&f->list, &mlxsw_sp->fids);
+
+ return f;
+
+err_allocate_fid:
+ mlxsw_sp_fid_map(mlxsw_sp, fid, false);
+err_fid_map:
+ mlxsw_sp_fid_op(mlxsw_sp, fid, false);
+ return ERR_PTR(err);
+}
+
+void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f)
+{
+ u16 fid = f->fid;
+
+ list_del(&f->list);
+
+ if (f->r)
+ mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
+
+ kfree(f);
+
+ mlxsw_sp_fid_op(mlxsw_sp, fid, false);
+}
+
+static int __mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 fid)
+{
+ struct mlxsw_sp_fid *f;
+
+ f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
+ if (!f) {
+ f = mlxsw_sp_fid_create(mlxsw_sp_port->mlxsw_sp, fid);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+ }
+
+ f->ref_count++;
+
+ netdev_dbg(mlxsw_sp_port->dev, "Joined FID=%d\n", fid);
+
+ return 0;
+}
+
+static void __mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 fid)
+{
+ struct mlxsw_sp_fid *f;
+
+ f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
+ if (WARN_ON(!f))
+ return;
+
+ netdev_dbg(mlxsw_sp_port->dev, "Left FID=%d\n", fid);
+
+ mlxsw_sp_port_fdb_flush(mlxsw_sp_port, fid);
+
+ if (--f->ref_count == 0)
+ mlxsw_sp_fid_destroy(mlxsw_sp_port->mlxsw_sp, f);
+}
+
+static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid,
+ bool valid)
+{
+ enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
+
+ /* If port doesn't have vPorts, then it can use the global
+ * VID-to-FID mapping.
+ */
+ if (list_empty(&mlxsw_sp_port->vports_list))
+ return 0;
+
+ return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, valid, fid, fid);
+}
+
+static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 fid_begin, u16 fid_end)
+{
+ int fid, err;
+
+ for (fid = fid_begin; fid <= fid_end; fid++) {
+ err = __mlxsw_sp_port_fid_join(mlxsw_sp_port, fid);
+ if (err)
+ goto err_port_fid_join;
+ }
+
+ err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end,
+ true, false);
+ if (err)
+ goto err_port_flood_set;
+
+ for (fid = fid_begin; fid <= fid_end; fid++) {
+ err = mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, true);
+ if (err)
+ goto err_port_fid_map;
+ }
+
+ return 0;
+
+err_port_fid_map:
+ for (fid--; fid >= fid_begin; fid--)
+ mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
+ __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
+ false);
+err_port_flood_set:
+ fid = fid_end;
+err_port_fid_join:
+ for (fid--; fid >= fid_begin; fid--)
+ __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
+ return err;
+}
+
+static void mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 fid_begin, u16 fid_end)
+{
+ int fid;
+
+ for (fid = fid_begin; fid <= fid_end; fid++)
+ mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
+
+ __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
+ false);
+
+ for (fid = fid_begin; fid <= fid_end; fid++)
+ __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
+}
+
static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid)
{
@@ -440,74 +612,6 @@ err_port_allow_untagged_set:
return err;
}
-static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
-{
- char sfmr_pl[MLXSW_REG_SFMR_LEN];
- int err;
-
- mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, fid);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
-
- if (err)
- return err;
-
- set_bit(fid, mlxsw_sp->active_fids);
- return 0;
-}
-
-static void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, u16 fid)
-{
- char sfmr_pl[MLXSW_REG_SFMR_LEN];
-
- clear_bit(fid, mlxsw_sp->active_fids);
-
- mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID,
- fid, fid);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
-}
-
-static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
-{
- enum mlxsw_reg_svfa_mt mt;
-
- if (!list_empty(&mlxsw_sp_port->vports_list))
- mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
- else
- mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
-
- return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, fid, fid);
-}
-
-static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
-{
- enum mlxsw_reg_svfa_mt mt;
-
- if (list_empty(&mlxsw_sp_port->vports_list))
- return 0;
-
- mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
- return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid, fid);
-}
-
-static int mlxsw_sp_port_add_vids(struct net_device *dev, u16 vid_begin,
- u16 vid_end)
-{
- u16 vid;
- int err;
-
- for (vid = vid_begin; vid <= vid_end; vid++) {
- err = mlxsw_sp_port_add_vid(dev, 0, vid);
- if (err)
- goto err_port_add_vid;
- }
- return 0;
-
-err_port_add_vid:
- for (vid--; vid >= vid_begin; vid--)
- mlxsw_sp_port_kill_vid(dev, 0, vid);
- return err;
-}
-
static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid_begin, u16 vid_end, bool is_member,
bool untagged)
@@ -533,57 +637,17 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid_begin, u16 vid_end,
bool flag_untagged, bool flag_pvid)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct net_device *dev = mlxsw_sp_port->dev;
- u16 vid, last_visited_vid, old_pvid;
- enum mlxsw_reg_svfa_mt mt;
+ u16 vid, old_pvid;
int err;
- /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
- * not bridged, then packets ingressing through the port with
- * the specified VIDs will be directed to CPU.
- */
if (!mlxsw_sp_port->bridged)
- return mlxsw_sp_port_add_vids(dev, vid_begin, vid_end);
-
- for (vid = vid_begin; vid <= vid_end; vid++) {
- if (!test_bit(vid, mlxsw_sp->active_fids)) {
- err = mlxsw_sp_fid_create(mlxsw_sp, vid);
- if (err) {
- netdev_err(dev, "Failed to create FID=%d\n",
- vid);
- return err;
- }
-
- /* When creating a FID, we set a VID to FID mapping
- * regardless of the port's mode.
- */
- mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt,
- true, vid, vid);
- if (err) {
- netdev_err(dev, "Failed to create FID=VID=%d mapping\n",
- vid);
- goto err_port_vid_to_fid_set;
- }
- }
- }
-
- /* Set FID mapping according to port's mode */
- for (vid = vid_begin; vid <= vid_end; vid++) {
- err = mlxsw_sp_port_fid_map(mlxsw_sp_port, vid);
- if (err) {
- netdev_err(dev, "Failed to map FID=%d", vid);
- last_visited_vid = --vid;
- goto err_port_fid_map;
- }
- }
+ return -EINVAL;
- err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
- true, false);
+ err = mlxsw_sp_port_fid_join(mlxsw_sp_port, vid_begin, vid_end);
if (err) {
- netdev_err(dev, "Failed to configure flooding\n");
- goto err_port_flood_set;
+ netdev_err(dev, "Failed to join FIDs\n");
+ return err;
}
err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
@@ -628,10 +692,6 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
-err_port_vid_to_fid_set:
- mlxsw_sp_fid_destroy(mlxsw_sp, vid);
- return err;
-
err_port_stp_state_set:
for (vid = vid_begin; vid <= vid_end; vid++)
clear_bit(vid, mlxsw_sp_port->active_vlans);
@@ -641,13 +701,7 @@ err_port_pvid_set:
__mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false,
false);
err_port_vlans_set:
- __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, false,
- false);
-err_port_flood_set:
- last_visited_vid = vid_end;
-err_port_fid_map:
- for (vid = last_visited_vid; vid >= vid_begin; vid--)
- mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid);
+ mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
return err;
}
@@ -678,9 +732,10 @@ static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
MLXSW_REG_SFD_OP_WRITE_REMOVE;
}
-static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- const char *mac, u16 fid, bool adding,
- bool dynamic)
+static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ const char *mac, u16 fid, bool adding,
+ enum mlxsw_reg_sfd_rec_action action,
+ bool dynamic)
{
char *sfd_pl;
int err;
@@ -691,14 +746,29 @@ static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
- mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
- local_port);
+ mac, fid, action, local_port);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
kfree(sfd_pl);
return err;
}
+static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ const char *mac, u16 fid, bool adding,
+ bool dynamic)
+{
+ return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
+ MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
+}
+
+int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
+ bool adding)
+{
+ return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
+ MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
+ false);
+}
+
static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
const char *mac, u16 fid, u16 lag_vid,
bool adding, bool dynamic)
@@ -903,6 +973,11 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
SWITCHDEV_OBJ_PORT_VLAN(obj),
trans);
break;
+ case SWITCHDEV_OBJ_ID_IPV4_FIB:
+ err = mlxsw_sp_router_fib4_add(mlxsw_sp_port,
+ SWITCHDEV_OBJ_IPV4_FIB(obj),
+ trans);
+ break;
case SWITCHDEV_OBJ_ID_PORT_FDB:
err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_FDB(obj),
@@ -921,21 +996,6 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
return err;
}
-static int mlxsw_sp_port_kill_vids(struct net_device *dev, u16 vid_begin,
- u16 vid_end)
-{
- u16 vid;
- int err;
-
- for (vid = vid_begin; vid <= vid_end; vid++) {
- err = mlxsw_sp_port_kill_vid(dev, 0, vid);
- if (err)
- return err;
- }
-
- return 0;
-}
-
static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid_begin, u16 vid_end, bool init)
{
@@ -943,12 +1003,8 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid, pvid;
int err;
- /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
- * not bridged, then prevent packets ingressing through the
- * port with the specified VIDs from being trapped to CPU.
- */
if (!init && !mlxsw_sp_port->bridged)
- return mlxsw_sp_port_kill_vids(dev, vid_begin, vid_end);
+ return -EINVAL;
err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
false, false);
@@ -970,21 +1026,7 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
- err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
- false, false);
- if (err) {
- netdev_err(dev, "Failed to clear flooding\n");
- return err;
- }
-
- for (vid = vid_begin; vid <= vid_end; vid++) {
- /* Remove FID mapping in case of Virtual mode */
- err = mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid);
- if (err) {
- netdev_err(dev, "Failed to unmap FID=%d", vid);
- return err;
- }
- }
+ mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
out:
/* Changing activity bits only if HW operation succeded */
@@ -1081,6 +1123,10 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev,
err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
+ case SWITCHDEV_OBJ_ID_IPV4_FIB:
+ err = mlxsw_sp_router_fib4_del(mlxsw_sp_port,
+ SWITCHDEV_OBJ_IPV4_FIB(obj));
+ break;
case SWITCHDEV_OBJ_ID_PORT_FDB:
err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_FDB(obj));
@@ -1118,7 +1164,8 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_port *tmp;
- u16 vport_fid = 0;
+ struct mlxsw_sp_fid *f;
+ u16 vport_fid;
char *sfd_pl;
char mac[ETH_ALEN];
u16 fid;
@@ -1133,12 +1180,8 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
if (!sfd_pl)
return -ENOMEM;
- if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
- u16 tmp;
-
- tmp = mlxsw_sp_vport_vfid_get(mlxsw_sp_port);
- vport_fid = mlxsw_sp_vfid_to_fid(tmp);
- }
+ f = mlxsw_sp_vport_fid_get(mlxsw_sp_port);
+ vport_fid = f ? f->fid : 0;
mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
do {
@@ -1310,11 +1353,10 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
}
if (mlxsw_sp_fid_is_vfid(fid)) {
- u16 vfid = mlxsw_sp_fid_to_vfid(fid);
struct mlxsw_sp_port *mlxsw_sp_vport;
- mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port,
- vfid);
+ mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
+ fid);
if (!mlxsw_sp_vport) {
netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
goto just_remove;
@@ -1370,11 +1412,10 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
}
if (mlxsw_sp_fid_is_vfid(fid)) {
- u16 vfid = mlxsw_sp_fid_to_vfid(fid);
struct mlxsw_sp_port *mlxsw_sp_vport;
- mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port,
- vfid);
+ mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
+ fid);
if (!mlxsw_sp_vport) {
netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
goto just_remove;
@@ -1495,14 +1536,6 @@ static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw);
}
-static void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp)
-{
- u16 fid;
-
- for_each_set_bit(fid, mlxsw_sp->active_fids, VLAN_N_VID)
- mlxsw_sp_fid_destroy(mlxsw_sp, fid);
-}
-
int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
{
return mlxsw_sp_fdb_init(mlxsw_sp);
@@ -1511,7 +1544,6 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
{
mlxsw_sp_fdb_fini(mlxsw_sp);
- mlxsw_sp_fids_fini(mlxsw_sp);
}
int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 3842eab9449a..25f658b3849a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -316,7 +316,10 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
}
}
mlxsw_sx_txhdr_construct(skb, &tx_info);
- len = skb->len;
+ /* TX header is consumed by HW on the way so we shouldn't count its
+ * bytes as being sent.
+ */
+ len = skb->len - MLXSW_TXHDR_LEN;
/* Due to a race we might fail here because of a full queue. In that
* unlikely case we simply drop the packet.
*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 53a9550be75e..470d7696e9fe 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -54,6 +54,11 @@ enum {
MLXSW_TRAP_ID_IGMP_V2_REPORT = 0x32,
MLXSW_TRAP_ID_IGMP_V2_LEAVE = 0x33,
MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34,
+ MLXSW_TRAP_ID_ARPBC = 0x50,
+ MLXSW_TRAP_ID_ARPUC = 0x51,
+ MLXSW_TRAP_ID_IP2ME = 0x5F,
+ MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70,
+ MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90,
MLXSW_TRAP_ID_MAX = 0x1FF
};
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index e744acc18ef4..690635660195 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -63,7 +63,7 @@
#define NFP_NET_POLL_TIMEOUT 5
/* Bar allocation */
-#define NFP_NET_CRTL_BAR 0
+#define NFP_NET_CTRL_BAR 0
#define NFP_NET_Q0_BAR 2
#define NFP_NET_Q1_BAR 4 /* OBSOLETE */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index fa47c14c743a..1e74b911accb 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1845,13 +1845,14 @@ void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
}
/**
- * nfp_net_write_mac_addr() - Write mac address to device registers
+ * nfp_net_write_mac_addr() - Write mac address to the device control BAR
* @nn: NFP Net device to reconfigure
- * @mac: Six-byte MAC address to be written
*
- * We do a bit of byte swapping dance because firmware is LE.
+ * Writes the MAC address from the netdev to the device control BAR. Does not
+ * perform the required reconfig. We do a bit of byte swapping dance because
+ * firmware is LE.
*/
-static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *mac)
+static void nfp_net_write_mac_addr(struct nfp_net *nn)
{
nn_writel(nn, NFP_NET_CFG_MACADDR + 0,
get_unaligned_be32(nn->netdev->dev_addr));
@@ -1952,7 +1953,7 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->num_rx_rings == 64 ?
0xffffffffffffffffULL : ((u64)1 << nn->num_rx_rings) - 1);
- nfp_net_write_mac_addr(nn, nn->netdev->dev_addr);
+ nfp_net_write_mac_addr(nn);
nn_writel(nn, NFP_NET_CFG_MTU, nn->netdev->mtu);
nn_writel(nn, NFP_NET_CFG_FLBUFSZ, nn->fl_bufsz);
@@ -1979,7 +1980,7 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
if (nn->ctrl & NFP_NET_CFG_CTRL_VXLAN) {
memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports));
memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt));
- vxlan_get_rx_port(nn->netdev);
+ udp_tunnel_get_rx_info(nn->netdev);
}
return err;
@@ -2015,7 +2016,7 @@ static void nfp_net_open_stack(struct nfp_net *nn)
netif_tx_wake_all_queues(nn->netdev);
- enable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector);
+ enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
nfp_net_read_link_status(nn);
}
@@ -2044,7 +2045,7 @@ static int nfp_net_netdev_open(struct net_device *netdev)
NFP_NET_IRQ_LSC_IDX, nn->lsc_handler);
if (err)
goto err_free_exn;
- disable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector);
+ disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings),
GFP_KERNEL);
@@ -2133,7 +2134,7 @@ static void nfp_net_close_stack(struct nfp_net *nn)
{
unsigned int r;
- disable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector);
+ disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
netif_carrier_off(nn->netdev);
nn->link_up = false;
@@ -2551,26 +2552,32 @@ static int nfp_net_find_vxlan_idx(struct nfp_net *nn, __be16 port)
}
static void nfp_net_add_vxlan_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
+ struct udp_tunnel_info *ti)
{
struct nfp_net *nn = netdev_priv(netdev);
int idx;
- idx = nfp_net_find_vxlan_idx(nn, port);
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
+
+ idx = nfp_net_find_vxlan_idx(nn, ti->port);
if (idx == -ENOSPC)
return;
if (!nn->vxlan_usecnt[idx]++)
- nfp_net_set_vxlan_port(nn, idx, port);
+ nfp_net_set_vxlan_port(nn, idx, ti->port);
}
static void nfp_net_del_vxlan_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
+ struct udp_tunnel_info *ti)
{
struct nfp_net *nn = netdev_priv(netdev);
int idx;
- idx = nfp_net_find_vxlan_idx(nn, port);
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
+
+ idx = nfp_net_find_vxlan_idx(nn, ti->port);
if (!nn->vxlan_usecnt[idx] || idx == -ENOSPC)
return;
@@ -2589,8 +2596,8 @@ static const struct net_device_ops nfp_net_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_set_features = nfp_net_set_features,
.ndo_features_check = nfp_net_features_check,
- .ndo_add_vxlan_port = nfp_net_add_vxlan_port,
- .ndo_del_vxlan_port = nfp_net_del_vxlan_port,
+ .ndo_udp_tunnel_add = nfp_net_add_vxlan_port,
+ .ndo_udp_tunnel_del = nfp_net_del_vxlan_port,
};
/**
@@ -2733,7 +2740,7 @@ int nfp_net_netdev_init(struct net_device *netdev)
nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
- nfp_net_write_mac_addr(nn, nn->netdev->dev_addr);
+ nfp_net_write_mac_addr(nn);
/* Set default MTU and Freelist buffer size */
if (nn->max_mtu < NFP_NET_DEFAULT_MTU)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index ccfef1f17627..7d7933d00b8f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -605,6 +605,7 @@ static int nfp_net_set_coalesce(struct net_device *netdev,
static const struct ethtool_ops nfp_net_ethtool_ops = {
.get_drvinfo = nfp_net_get_drvinfo,
+ .get_link = ethtool_op_get_link,
.get_ringparam = nfp_net_get_ringparam,
.set_ringparam = nfp_net_set_ringparam,
.get_strings = nfp_net_get_strings,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index e2b22b8a20f1..37abef016a0a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -124,11 +124,11 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
* first NFP_NET_CFG_BAR_SZ of the BAR. This keeps the code
* the identical for PF and VF drivers.
*/
- ctrl_bar = ioremap_nocache(pci_resource_start(pdev, NFP_NET_CRTL_BAR),
+ ctrl_bar = ioremap_nocache(pci_resource_start(pdev, NFP_NET_CTRL_BAR),
NFP_NET_CFG_BAR_SZ);
if (!ctrl_bar) {
dev_err(&pdev->dev,
- "Failed to map resource %d\n", NFP_NET_CRTL_BAR);
+ "Failed to map resource %d\n", NFP_NET_CTRL_BAR);
err = -EIO;
goto err_pci_regions;
}
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index b1ce7aaa8f8b..4d4ecba0aad9 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -425,7 +425,6 @@ struct netdata_local {
unsigned int last_tx_idx;
unsigned int num_used_tx_buffs;
struct mii_bus *mii_bus;
- struct phy_device *phy_dev;
struct clk *clk;
dma_addr_t dma_buff_base_p;
void *dma_buff_base_v;
@@ -750,7 +749,7 @@ static int lpc_mdio_reset(struct mii_bus *bus)
static void lpc_handle_link_change(struct net_device *ndev)
{
struct netdata_local *pldat = netdev_priv(ndev);
- struct phy_device *phydev = pldat->phy_dev;
+ struct phy_device *phydev = ndev->phydev;
unsigned long flags;
bool status_change = false;
@@ -814,7 +813,6 @@ static int lpc_mii_probe(struct net_device *ndev)
pldat->link = 0;
pldat->speed = 0;
pldat->duplex = -1;
- pldat->phy_dev = phydev;
phy_attached_info(phydev);
@@ -1048,8 +1046,8 @@ static int lpc_eth_close(struct net_device *ndev)
napi_disable(&pldat->napi);
netif_stop_queue(ndev);
- if (pldat->phy_dev)
- phy_stop(pldat->phy_dev);
+ if (ndev->phydev)
+ phy_stop(ndev->phydev);
spin_lock_irqsave(&pldat->lock, flags);
__lpc_eth_reset(pldat);
@@ -1185,8 +1183,7 @@ static void lpc_eth_set_multicast_list(struct net_device *ndev)
static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
{
- struct netdata_local *pldat = netdev_priv(ndev);
- struct phy_device *phydev = pldat->phy_dev;
+ struct phy_device *phydev = ndev->phydev;
if (!netif_running(ndev))
return -EINVAL;
@@ -1207,14 +1204,14 @@ static int lpc_eth_open(struct net_device *ndev)
__lpc_eth_clock_enable(pldat, true);
/* Suspended PHY makes LPC ethernet core block, so resume now */
- phy_resume(pldat->phy_dev);
+ phy_resume(ndev->phydev);
/* Reset and initialize */
__lpc_eth_reset(pldat);
__lpc_eth_init(pldat);
/* schedule a link state check */
- phy_start(pldat->phy_dev);
+ phy_start(ndev->phydev);
netif_start_queue(ndev);
napi_enable(&pldat->napi);
@@ -1247,37 +1244,13 @@ static void lpc_eth_ethtool_setmsglevel(struct net_device *ndev, u32 level)
pldat->msg_enable = level;
}
-static int lpc_eth_ethtool_getsettings(struct net_device *ndev,
- struct ethtool_cmd *cmd)
-{
- struct netdata_local *pldat = netdev_priv(ndev);
- struct phy_device *phydev = pldat->phy_dev;
-
- if (!phydev)
- return -EOPNOTSUPP;
-
- return phy_ethtool_gset(phydev, cmd);
-}
-
-static int lpc_eth_ethtool_setsettings(struct net_device *ndev,
- struct ethtool_cmd *cmd)
-{
- struct netdata_local *pldat = netdev_priv(ndev);
- struct phy_device *phydev = pldat->phy_dev;
-
- if (!phydev)
- return -EOPNOTSUPP;
-
- return phy_ethtool_sset(phydev, cmd);
-}
-
static const struct ethtool_ops lpc_eth_ethtool_ops = {
.get_drvinfo = lpc_eth_ethtool_getdrvinfo,
- .get_settings = lpc_eth_ethtool_getsettings,
- .set_settings = lpc_eth_ethtool_setsettings,
.get_msglevel = lpc_eth_ethtool_getmsglevel,
.set_msglevel = lpc_eth_ethtool_setmsglevel,
.get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static const struct net_device_ops lpc_netdev_ops = {
@@ -1460,7 +1433,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
netdev_info(ndev, "LPC mac at 0x%08x irq %d\n",
res->start, ndev->irq);
- phydev = pldat->phy_dev;
+ phydev = ndev->phydev;
device_init_wakeup(&pdev->dev, 1);
device_set_wakeup_enable(&pdev->dev, 0);
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 680d8c736d2b..6ba48406899e 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -54,16 +54,6 @@ config QLCNIC_DCB
mode of DCB is supported. PG and PFC values are related only
to Tx.
-config QLCNIC_VXLAN
- bool "Virtual eXtensible Local Area Network (VXLAN) offload support"
- default n
- depends on QLCNIC && VXLAN && !(QLCNIC=y && VXLAN=m)
- ---help---
- This enables hardware offload support for VXLAN protocol over QLogic's
- 84XX series adapters.
- Say Y here if you want to enable hardware offload support for
- Virtual eXtensible Local Area Network (VXLAN) in the driver.
-
config QLCNIC_HWMON
bool "QLOGIC QLCNIC 82XX and 83XX family HWMON support"
depends on QLCNIC && HWMON && !(QLCNIC=y && HWMON=m)
@@ -114,24 +104,4 @@ config QEDE
---help---
This enables the support for ...
-config QEDE_VXLAN
- bool "Virtual eXtensible Local Area Network support"
- default n
- depends on QEDE && VXLAN && !(QEDE=y && VXLAN=m)
- ---help---
- This enables hardware offload support for VXLAN protocol over
- qede module. Say Y here if you want to enable hardware offload
- support for Virtual eXtensible Local Area Network (VXLAN)
- in the driver.
-
-config QEDE_GENEVE
- bool "Generic Network Virtualization Encapsulation (GENEVE) support"
- depends on QEDE && GENEVE && !(QEDE=y && GENEVE=m)
- ---help---
- This allows one to create GENEVE virtual interfaces that provide
- Layer 2 Networks over Layer 3 Networks. GENEVE is often used
- to tunnel virtual network infrastructure in virtualized environments.
- Say Y here if you want to enable hardware offload support for
- Generic Network Virtualization Encapsulation (GENEVE) in the driver.
-
endif # NET_VENDOR_QLOGIC
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 9a63df1184f1..35e53771533f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -489,8 +489,8 @@ struct qed_dev {
u32 int_mode;
enum qed_coalescing_mode int_coalescing_mode;
- u8 rx_coalesce_usecs;
- u8 tx_coalesce_usecs;
+ u16 rx_coalesce_usecs;
+ u16 tx_coalesce_usecs;
/* Start Bar offset of first hwfn */
void __iomem *regview;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index d85b7ba8cb9a..1c35f376143e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -850,7 +850,7 @@ static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
val = 0;
entries[j].next = cpu_to_be64(val);
- conn_num -= ent_per_page;
+ conn_num -= ent_num;
}
return 0;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 21ec1c2df2c7..d0dc28f93c0e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -9,6 +9,7 @@
#include <linux/types.h>
#include <asm/byteorder.h>
#include <linux/bitops.h>
+#include <linux/dcbnl.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -18,6 +19,9 @@
#include "qed_dcbx.h"
#include "qed_hsi.h"
#include "qed_sp.h"
+#ifdef CONFIG_DCB
+#include <linux/qed/qed_eth_if.h>
+#endif
#define QED_DCBX_MAX_MIB_READ_TRY (100)
#define QED_ETH_TYPE_DEFAULT (0)
@@ -252,7 +256,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
if (p_data->arr[type].update)
continue;
- enable = (type == DCBX_PROTOCOL_ETH) ? false : dcbx_enabled;
+ enable = !(type == DCBX_PROTOCOL_ETH);
qed_dcbx_update_app_info(p_data, p_hwfn, enable, true,
priority, tc, type);
}
@@ -351,6 +355,293 @@ qed_dcbx_copy_mib(struct qed_hwfn *p_hwfn,
return rc;
}
+#ifdef CONFIG_DCB
+static void
+qed_dcbx_get_priority_info(struct qed_hwfn *p_hwfn,
+ struct qed_dcbx_app_prio *p_prio,
+ struct qed_dcbx_results *p_results)
+{
+ u8 val;
+
+ p_prio->roce = QED_DCBX_INVALID_PRIORITY;
+ p_prio->roce_v2 = QED_DCBX_INVALID_PRIORITY;
+ p_prio->iscsi = QED_DCBX_INVALID_PRIORITY;
+ p_prio->fcoe = QED_DCBX_INVALID_PRIORITY;
+
+ if (p_results->arr[DCBX_PROTOCOL_ROCE].update &&
+ p_results->arr[DCBX_PROTOCOL_ROCE].enable)
+ p_prio->roce = p_results->arr[DCBX_PROTOCOL_ROCE].priority;
+
+ if (p_results->arr[DCBX_PROTOCOL_ROCE_V2].update &&
+ p_results->arr[DCBX_PROTOCOL_ROCE_V2].enable) {
+ val = p_results->arr[DCBX_PROTOCOL_ROCE_V2].priority;
+ p_prio->roce_v2 = val;
+ }
+
+ if (p_results->arr[DCBX_PROTOCOL_ISCSI].update &&
+ p_results->arr[DCBX_PROTOCOL_ISCSI].enable)
+ p_prio->iscsi = p_results->arr[DCBX_PROTOCOL_ISCSI].priority;
+
+ if (p_results->arr[DCBX_PROTOCOL_FCOE].update &&
+ p_results->arr[DCBX_PROTOCOL_FCOE].enable)
+ p_prio->fcoe = p_results->arr[DCBX_PROTOCOL_FCOE].priority;
+
+ if (p_results->arr[DCBX_PROTOCOL_ETH].update &&
+ p_results->arr[DCBX_PROTOCOL_ETH].enable)
+ p_prio->eth = p_results->arr[DCBX_PROTOCOL_ETH].priority;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "Priorities: iscsi %d, roce %d, roce v2 %d, fcoe %d, eth %d\n",
+ p_prio->iscsi, p_prio->roce, p_prio->roce_v2, p_prio->fcoe,
+ p_prio->eth);
+}
+
+static void
+qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn,
+ struct dcbx_app_priority_feature *p_app,
+ struct dcbx_app_priority_entry *p_tbl,
+ struct qed_dcbx_params *p_params)
+{
+ struct qed_app_entry *entry;
+ u8 pri_map;
+ int i;
+
+ p_params->app_willing = QED_MFW_GET_FIELD(p_app->flags,
+ DCBX_APP_WILLING);
+ p_params->app_valid = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_ENABLED);
+ p_params->app_error = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_ERROR);
+ p_params->num_app_entries = QED_MFW_GET_FIELD(p_app->flags,
+ DCBX_APP_NUM_ENTRIES);
+ for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
+ entry = &p_params->app_entry[i];
+ entry->ethtype = !(QED_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_SF));
+ pri_map = QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP);
+ entry->prio = ffs(pri_map) - 1;
+ entry->proto_id = QED_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_PROTOCOL_ID);
+ qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
+ entry->proto_id,
+ &entry->proto_type);
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "APP params: willing %d, valid %d error = %d\n",
+ p_params->app_willing, p_params->app_valid,
+ p_params->app_error);
+}
+
+static void
+qed_dcbx_get_pfc_data(struct qed_hwfn *p_hwfn,
+ u32 pfc, struct qed_dcbx_params *p_params)
+{
+ u8 pfc_map;
+
+ p_params->pfc.willing = QED_MFW_GET_FIELD(pfc, DCBX_PFC_WILLING);
+ p_params->pfc.max_tc = QED_MFW_GET_FIELD(pfc, DCBX_PFC_CAPS);
+ p_params->pfc.enabled = QED_MFW_GET_FIELD(pfc, DCBX_PFC_ENABLED);
+ pfc_map = QED_MFW_GET_FIELD(pfc, DCBX_PFC_PRI_EN_BITMAP);
+ p_params->pfc.prio[0] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_0);
+ p_params->pfc.prio[1] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_1);
+ p_params->pfc.prio[2] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_2);
+ p_params->pfc.prio[3] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_3);
+ p_params->pfc.prio[4] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_4);
+ p_params->pfc.prio[5] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_5);
+ p_params->pfc.prio[6] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_6);
+ p_params->pfc.prio[7] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_7);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "PFC params: willing %d, pfc_bitmap %d\n",
+ p_params->pfc.willing, pfc_map);
+}
+
+static void
+qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn,
+ struct dcbx_ets_feature *p_ets,
+ struct qed_dcbx_params *p_params)
+{
+ u32 bw_map[2], tsa_map[2], pri_map;
+ int i;
+
+ p_params->ets_willing = QED_MFW_GET_FIELD(p_ets->flags,
+ DCBX_ETS_WILLING);
+ p_params->ets_enabled = QED_MFW_GET_FIELD(p_ets->flags,
+ DCBX_ETS_ENABLED);
+ p_params->ets_cbs = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_CBS);
+ p_params->max_ets_tc = QED_MFW_GET_FIELD(p_ets->flags,
+ DCBX_ETS_MAX_TCS);
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "ETS params: willing %d, ets_cbs %d pri_tc_tbl_0 %x max_ets_tc %d\n",
+ p_params->ets_willing,
+ p_params->ets_cbs,
+ p_ets->pri_tc_tbl[0], p_params->max_ets_tc);
+
+ /* 8 bit tsa and bw data corresponding to each of the 8 TC's are
+ * encoded in a type u32 array of size 2.
+ */
+ bw_map[0] = be32_to_cpu(p_ets->tc_bw_tbl[0]);
+ bw_map[1] = be32_to_cpu(p_ets->tc_bw_tbl[1]);
+ tsa_map[0] = be32_to_cpu(p_ets->tc_tsa_tbl[0]);
+ tsa_map[1] = be32_to_cpu(p_ets->tc_tsa_tbl[1]);
+ pri_map = be32_to_cpu(p_ets->pri_tc_tbl[0]);
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) {
+ p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i];
+ p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i];
+ p_params->ets_pri_tc_tbl[i] = QED_DCBX_PRIO2TC(pri_map, i);
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "elem %d bw_tbl %x tsa_tbl %x\n",
+ i, p_params->ets_tc_bw_tbl[i],
+ p_params->ets_tc_tsa_tbl[i]);
+ }
+}
+
+static void
+qed_dcbx_get_common_params(struct qed_hwfn *p_hwfn,
+ struct dcbx_app_priority_feature *p_app,
+ struct dcbx_app_priority_entry *p_tbl,
+ struct dcbx_ets_feature *p_ets,
+ u32 pfc, struct qed_dcbx_params *p_params)
+{
+ qed_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params);
+ qed_dcbx_get_ets_data(p_hwfn, p_ets, p_params);
+ qed_dcbx_get_pfc_data(p_hwfn, pfc, p_params);
+}
+
+static void
+qed_dcbx_get_local_params(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, struct qed_dcbx_get *params)
+{
+ struct dcbx_features *p_feat;
+
+ p_feat = &p_hwfn->p_dcbx_info->local_admin.features;
+ qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
+ p_feat->app.app_pri_tbl, &p_feat->ets,
+ p_feat->pfc, &params->local.params);
+ params->local.valid = true;
+}
+
+static void
+qed_dcbx_get_remote_params(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, struct qed_dcbx_get *params)
+{
+ struct dcbx_features *p_feat;
+
+ p_feat = &p_hwfn->p_dcbx_info->remote.features;
+ qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
+ p_feat->app.app_pri_tbl, &p_feat->ets,
+ p_feat->pfc, &params->remote.params);
+ params->remote.valid = true;
+}
+
+static void
+qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_dcbx_get *params)
+{
+ struct qed_dcbx_operational_params *p_operational;
+ struct qed_dcbx_results *p_results;
+ struct dcbx_features *p_feat;
+ bool enabled, err;
+ u32 flags;
+ bool val;
+
+ flags = p_hwfn->p_dcbx_info->operational.flags;
+
+ /* If DCBx version is non zero, then negotiation
+ * was successfuly performed
+ */
+ p_operational = &params->operational;
+ enabled = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) !=
+ DCBX_CONFIG_VERSION_DISABLED);
+ if (!enabled) {
+ p_operational->enabled = enabled;
+ p_operational->valid = false;
+ return;
+ }
+
+ p_feat = &p_hwfn->p_dcbx_info->operational.features;
+ p_results = &p_hwfn->p_dcbx_info->results;
+
+ val = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
+ DCBX_CONFIG_VERSION_IEEE);
+ p_operational->ieee = val;
+ val = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
+ DCBX_CONFIG_VERSION_CEE);
+ p_operational->cee = val;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Version support: ieee %d, cee %d\n",
+ p_operational->ieee, p_operational->cee);
+
+ qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
+ p_feat->app.app_pri_tbl, &p_feat->ets,
+ p_feat->pfc, &params->operational.params);
+ qed_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio, p_results);
+ err = QED_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR);
+ p_operational->err = err;
+ p_operational->enabled = enabled;
+ p_operational->valid = true;
+}
+
+static void
+qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_dcbx_get *params)
+{
+ struct lldp_config_params_s *p_local;
+
+ p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE];
+
+ memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id,
+ ARRAY_SIZE(p_local->local_chassis_id));
+ memcpy(params->lldp_local.local_port_id, p_local->local_port_id,
+ ARRAY_SIZE(p_local->local_port_id));
+}
+
+static void
+qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_dcbx_get *params)
+{
+ struct lldp_status_params_s *p_remote;
+
+ p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE];
+
+ memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id,
+ ARRAY_SIZE(p_remote->peer_chassis_id));
+ memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id,
+ ARRAY_SIZE(p_remote->peer_port_id));
+}
+
+static int
+qed_dcbx_get_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_dcbx_get *p_params,
+ enum qed_mib_read_type type)
+{
+ switch (type) {
+ case QED_DCBX_REMOTE_MIB:
+ qed_dcbx_get_remote_params(p_hwfn, p_ptt, p_params);
+ break;
+ case QED_DCBX_LOCAL_MIB:
+ qed_dcbx_get_local_params(p_hwfn, p_ptt, p_params);
+ break;
+ case QED_DCBX_OPERATIONAL_MIB:
+ qed_dcbx_get_operational_params(p_hwfn, p_ptt, p_params);
+ break;
+ case QED_DCBX_REMOTE_LLDP_MIB:
+ qed_dcbx_get_remote_lldp_params(p_hwfn, p_ptt, p_params);
+ break;
+ case QED_DCBX_LOCAL_LLDP_MIB:
+ qed_dcbx_get_local_lldp_params(p_hwfn, p_ptt, p_params);
+ break;
+ default:
+ DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+#endif
+
static int
qed_dcbx_read_local_lldp_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
@@ -561,3 +852,1333 @@ void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
p_dcb_data = &p_dest->eth_dcb_data;
qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ETH);
}
+
+#ifdef CONFIG_DCB
+static int qed_dcbx_query_params(struct qed_hwfn *p_hwfn,
+ struct qed_dcbx_get *p_get,
+ enum qed_mib_read_type type)
+{
+ struct qed_ptt *p_ptt;
+ int rc;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+
+ rc = qed_dcbx_read_mib(p_hwfn, p_ptt, type);
+ if (rc)
+ goto out;
+
+ rc = qed_dcbx_get_params(p_hwfn, p_ptt, p_get, type);
+
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+ return rc;
+}
+
+static void
+qed_dcbx_set_pfc_data(struct qed_hwfn *p_hwfn,
+ u32 *pfc, struct qed_dcbx_params *p_params)
+{
+ u8 pfc_map = 0;
+ int i;
+
+ if (p_params->pfc.willing)
+ *pfc |= DCBX_PFC_WILLING_MASK;
+ else
+ *pfc &= ~DCBX_PFC_WILLING_MASK;
+
+ if (p_params->pfc.enabled)
+ *pfc |= DCBX_PFC_ENABLED_MASK;
+ else
+ *pfc &= ~DCBX_PFC_ENABLED_MASK;
+
+ *pfc &= ~DCBX_PFC_CAPS_MASK;
+ *pfc |= (u32)p_params->pfc.max_tc << DCBX_PFC_CAPS_SHIFT;
+
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
+ if (p_params->pfc.prio[i])
+ pfc_map |= BIT(i);
+
+ *pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_SHIFT);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB, "pfc = 0x%x\n", *pfc);
+}
+
+static void
+qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn,
+ struct dcbx_ets_feature *p_ets,
+ struct qed_dcbx_params *p_params)
+{
+ u8 *bw_map, *tsa_map;
+ u32 val;
+ int i;
+
+ if (p_params->ets_willing)
+ p_ets->flags |= DCBX_ETS_WILLING_MASK;
+ else
+ p_ets->flags &= ~DCBX_ETS_WILLING_MASK;
+
+ if (p_params->ets_cbs)
+ p_ets->flags |= DCBX_ETS_CBS_MASK;
+ else
+ p_ets->flags &= ~DCBX_ETS_CBS_MASK;
+
+ if (p_params->ets_enabled)
+ p_ets->flags |= DCBX_ETS_ENABLED_MASK;
+ else
+ p_ets->flags &= ~DCBX_ETS_ENABLED_MASK;
+
+ p_ets->flags &= ~DCBX_ETS_MAX_TCS_MASK;
+ p_ets->flags |= (u32)p_params->max_ets_tc << DCBX_ETS_MAX_TCS_SHIFT;
+
+ bw_map = (u8 *)&p_ets->tc_bw_tbl[0];
+ tsa_map = (u8 *)&p_ets->tc_tsa_tbl[0];
+ p_ets->pri_tc_tbl[0] = 0;
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) {
+ bw_map[i] = p_params->ets_tc_bw_tbl[i];
+ tsa_map[i] = p_params->ets_tc_tsa_tbl[i];
+ /* Copy the priority value to the corresponding 4 bits in the
+ * traffic class table.
+ */
+ val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4));
+ p_ets->pri_tc_tbl[0] |= val;
+ }
+ p_ets->pri_tc_tbl[0] = cpu_to_be32(p_ets->pri_tc_tbl[0]);
+ for (i = 0; i < 2; i++) {
+ p_ets->tc_bw_tbl[i] = cpu_to_be32(p_ets->tc_bw_tbl[i]);
+ p_ets->tc_tsa_tbl[i] = cpu_to_be32(p_ets->tc_tsa_tbl[i]);
+ }
+}
+
+static void
+qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn,
+ struct dcbx_app_priority_feature *p_app,
+ struct qed_dcbx_params *p_params)
+{
+ u32 *entry;
+ int i;
+
+ if (p_params->app_willing)
+ p_app->flags |= DCBX_APP_WILLING_MASK;
+ else
+ p_app->flags &= ~DCBX_APP_WILLING_MASK;
+
+ if (p_params->app_valid)
+ p_app->flags |= DCBX_APP_ENABLED_MASK;
+ else
+ p_app->flags &= ~DCBX_APP_ENABLED_MASK;
+
+ p_app->flags &= ~DCBX_APP_NUM_ENTRIES_MASK;
+ p_app->flags |= (u32)p_params->num_app_entries <<
+ DCBX_APP_NUM_ENTRIES_SHIFT;
+
+ for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
+ entry = &p_app->app_pri_tbl[i].entry;
+ *entry &= ~DCBX_APP_SF_MASK;
+ if (p_params->app_entry[i].ethtype)
+ *entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
+ DCBX_APP_SF_SHIFT);
+ else
+ *entry |= ((u32)DCBX_APP_SF_PORT << DCBX_APP_SF_SHIFT);
+ *entry &= ~DCBX_APP_PROTOCOL_ID_MASK;
+ *entry |= ((u32)p_params->app_entry[i].proto_id <<
+ DCBX_APP_PROTOCOL_ID_SHIFT);
+ *entry &= ~DCBX_APP_PRI_MAP_MASK;
+ *entry |= ((u32)(p_params->app_entry[i].prio) <<
+ DCBX_APP_PRI_MAP_SHIFT);
+ }
+}
+
+static void
+qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn,
+ struct dcbx_local_params *local_admin,
+ struct qed_dcbx_set *params)
+{
+ local_admin->flags = 0;
+ memcpy(&local_admin->features,
+ &p_hwfn->p_dcbx_info->operational.features,
+ sizeof(local_admin->features));
+
+ if (params->enabled)
+ local_admin->config = params->ver_num;
+ else
+ local_admin->config = DCBX_CONFIG_VERSION_DISABLED;
+
+ if (params->override_flags & QED_DCBX_OVERRIDE_PFC_CFG)
+ qed_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc,
+ &params->config.params);
+
+ if (params->override_flags & QED_DCBX_OVERRIDE_ETS_CFG)
+ qed_dcbx_set_ets_data(p_hwfn, &local_admin->features.ets,
+ &params->config.params);
+
+ if (params->override_flags & QED_DCBX_OVERRIDE_APP_CFG)
+ qed_dcbx_set_app_data(p_hwfn, &local_admin->features.app,
+ &params->config.params);
+}
+
+int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_dcbx_set *params, bool hw_commit)
+{
+ struct dcbx_local_params local_admin;
+ struct qed_dcbx_mib_meta_data data;
+ u32 resp = 0, param = 0;
+ int rc = 0;
+
+ if (!hw_commit) {
+ memcpy(&p_hwfn->p_dcbx_info->set, params,
+ sizeof(struct qed_dcbx_set));
+ return 0;
+ }
+
+ /* clear set-parmas cache */
+ memset(&p_hwfn->p_dcbx_info->set, 0, sizeof(p_hwfn->p_dcbx_info->set));
+
+ memset(&local_admin, 0, sizeof(local_admin));
+ qed_dcbx_set_local_params(p_hwfn, &local_admin, params);
+
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, local_admin_dcbx_mib);
+ data.local_admin = &local_admin;
+ data.size = sizeof(struct dcbx_local_params);
+ qed_memcpy_to(p_hwfn, p_ptt, data.addr, data.local_admin, data.size);
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_DCBX,
+ 1 << DRV_MB_PARAM_LLDP_SEND_SHIFT, &resp, &param);
+ if (rc)
+ DP_NOTICE(p_hwfn, "Failed to send DCBX update request\n");
+
+ return rc;
+}
+
+int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
+ struct qed_dcbx_set *params)
+{
+ struct qed_dcbx_get *dcbx_info;
+ int rc;
+
+ if (p_hwfn->p_dcbx_info->set.config.valid) {
+ memcpy(params, &p_hwfn->p_dcbx_info->set,
+ sizeof(struct qed_dcbx_set));
+ return 0;
+ }
+
+ dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_KERNEL);
+ if (!dcbx_info) {
+ DP_ERR(p_hwfn, "Failed to allocate struct qed_dcbx_info\n");
+ return -ENOMEM;
+ }
+
+ rc = qed_dcbx_query_params(p_hwfn, dcbx_info, QED_DCBX_OPERATIONAL_MIB);
+ if (rc) {
+ kfree(dcbx_info);
+ return rc;
+ }
+
+ p_hwfn->p_dcbx_info->set.override_flags = 0;
+ p_hwfn->p_dcbx_info->set.ver_num = DCBX_CONFIG_VERSION_DISABLED;
+ if (dcbx_info->operational.cee)
+ p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_CEE;
+ if (dcbx_info->operational.ieee)
+ p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_IEEE;
+
+ p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled;
+ memcpy(&p_hwfn->p_dcbx_info->set.config.params,
+ &dcbx_info->operational.params,
+ sizeof(struct qed_dcbx_admin_params));
+ p_hwfn->p_dcbx_info->set.config.valid = true;
+
+ memcpy(params, &p_hwfn->p_dcbx_info->set, sizeof(struct qed_dcbx_set));
+
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
+ enum qed_mib_read_type type)
+{
+ struct qed_dcbx_get *dcbx_info;
+
+ dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_KERNEL);
+ if (!dcbx_info) {
+ DP_ERR(hwfn->cdev, "Failed to allocate memory for dcbx_info\n");
+ return NULL;
+ }
+
+ if (qed_dcbx_query_params(hwfn, dcbx_info, type)) {
+ kfree(dcbx_info);
+ return NULL;
+ }
+
+ if ((type == QED_DCBX_OPERATIONAL_MIB) &&
+ !dcbx_info->operational.enabled) {
+ DP_INFO(hwfn, "DCBX is not enabled/operational\n");
+ kfree(dcbx_info);
+ return NULL;
+ }
+
+ return dcbx_info;
+}
+
+static u8 qed_dcbnl_getstate(struct qed_dev *cdev)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ bool enabled;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return 0;
+
+ enabled = dcbx_info->operational.enabled;
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "DCB state = %d\n", enabled);
+ kfree(dcbx_info);
+
+ return enabled;
+}
+
+static u8 qed_dcbnl_setstate(struct qed_dev *cdev, u8 state)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "DCB state = %d\n", state);
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return 1;
+
+ dcbx_set.enabled = !!state;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return 1;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc ? 1 : 0;
+}
+
+static void qed_dcbnl_getpgtccfgtx(struct qed_dev *cdev, int tc, u8 *prio_type,
+ u8 *pgid, u8 *bw_pct, u8 *up_map)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "tc = %d\n", tc);
+ *prio_type = *pgid = *bw_pct = *up_map = 0;
+ if (tc < 0 || tc >= QED_MAX_PFC_PRIORITIES) {
+ DP_INFO(hwfn, "Invalid tc %d\n", tc);
+ return;
+ }
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return;
+
+ *pgid = dcbx_info->operational.params.ets_pri_tc_tbl[tc];
+ kfree(dcbx_info);
+}
+
+static void qed_dcbnl_getpgbwgcfgtx(struct qed_dev *cdev, int pgid, u8 *bw_pct)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+
+ *bw_pct = 0;
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "pgid = %d\n", pgid);
+ if (pgid < 0 || pgid >= QED_MAX_PFC_PRIORITIES) {
+ DP_INFO(hwfn, "Invalid pgid %d\n", pgid);
+ return;
+ }
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return;
+
+ *bw_pct = dcbx_info->operational.params.ets_tc_bw_tbl[pgid];
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "bw_pct = %d\n", *bw_pct);
+ kfree(dcbx_info);
+}
+
+static void qed_dcbnl_getpgtccfgrx(struct qed_dev *cdev, int tc, u8 *prio,
+ u8 *bwg_id, u8 *bw_pct, u8 *up_map)
+{
+ DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n");
+ *prio = *bwg_id = *bw_pct = *up_map = 0;
+}
+
+static void qed_dcbnl_getpgbwgcfgrx(struct qed_dev *cdev,
+ int bwg_id, u8 *bw_pct)
+{
+ DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n");
+ *bw_pct = 0;
+}
+
+static void qed_dcbnl_getpfccfg(struct qed_dev *cdev,
+ int priority, u8 *setting)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "priority = %d\n", priority);
+ if (priority < 0 || priority >= QED_MAX_PFC_PRIORITIES) {
+ DP_INFO(hwfn, "Invalid priority %d\n", priority);
+ return;
+ }
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return;
+
+ *setting = dcbx_info->operational.params.pfc.prio[priority];
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "setting = %d\n", *setting);
+ kfree(dcbx_info);
+}
+
+static void qed_dcbnl_setpfccfg(struct qed_dev *cdev, int priority, u8 setting)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "priority = %d setting = %d\n",
+ priority, setting);
+ if (priority < 0 || priority >= QED_MAX_PFC_PRIORITIES) {
+ DP_INFO(hwfn, "Invalid priority %d\n", priority);
+ return;
+ }
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return;
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG;
+ dcbx_set.config.params.pfc.prio[priority] = !!setting;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+}
+
+static u8 qed_dcbnl_getcap(struct qed_dev *cdev, int capid, u8 *cap)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ int rc = 0;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "capid = %d\n", capid);
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return 1;
+
+ switch (capid) {
+ case DCB_CAP_ATTR_PG:
+ case DCB_CAP_ATTR_PFC:
+ case DCB_CAP_ATTR_UP2TC:
+ case DCB_CAP_ATTR_GSP:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_PG_TCS:
+ case DCB_CAP_ATTR_PFC_TCS:
+ *cap = 0x80;
+ break;
+ case DCB_CAP_ATTR_DCBX:
+ *cap = (DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE |
+ DCB_CAP_DCBX_VER_IEEE);
+ break;
+ default:
+ *cap = false;
+ rc = 1;
+ }
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "id = %d caps = %d\n", capid, *cap);
+ kfree(dcbx_info);
+
+ return rc;
+}
+
+static int qed_dcbnl_getnumtcs(struct qed_dev *cdev, int tcid, u8 *num)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ int rc = 0;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "tcid = %d\n", tcid);
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ switch (tcid) {
+ case DCB_NUMTCS_ATTR_PG:
+ *num = dcbx_info->operational.params.max_ets_tc;
+ break;
+ case DCB_NUMTCS_ATTR_PFC:
+ *num = dcbx_info->operational.params.pfc.max_tc;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ kfree(dcbx_info);
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "numtcs = %d\n", *num);
+
+ return rc;
+}
+
+static u8 qed_dcbnl_getpfcstate(struct qed_dev *cdev)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ bool enabled;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return 0;
+
+ enabled = dcbx_info->operational.params.pfc.enabled;
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "pfc state = %d\n", enabled);
+ kfree(dcbx_info);
+
+ return enabled;
+}
+
+static u8 qed_dcbnl_getdcbx(struct qed_dev *cdev)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ u8 mode = 0;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return 0;
+
+ if (dcbx_info->operational.enabled)
+ mode |= DCB_CAP_DCBX_LLD_MANAGED;
+ if (dcbx_info->operational.ieee)
+ mode |= DCB_CAP_DCBX_VER_IEEE;
+ if (dcbx_info->operational.cee)
+ mode |= DCB_CAP_DCBX_VER_CEE;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "dcb mode = %d\n", mode);
+ kfree(dcbx_info);
+
+ return mode;
+}
+
+static void qed_dcbnl_setpgtccfgtx(struct qed_dev *cdev,
+ int tc,
+ u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB,
+ "tc = %d pri_type = %d pgid = %d bw_pct = %d up_map = %d\n",
+ tc, pri_type, pgid, bw_pct, up_map);
+
+ if (tc < 0 || tc >= QED_MAX_PFC_PRIORITIES) {
+ DP_INFO(hwfn, "Invalid tc %d\n", tc);
+ return;
+ }
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return;
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG;
+ dcbx_set.config.params.ets_pri_tc_tbl[tc] = pgid;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+}
+
+static void qed_dcbnl_setpgtccfgrx(struct qed_dev *cdev, int prio,
+ u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map)
+{
+ DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n");
+}
+
+static void qed_dcbnl_setpgbwgcfgtx(struct qed_dev *cdev, int pgid, u8 bw_pct)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "pgid = %d bw_pct = %d\n", pgid, bw_pct);
+ if (pgid < 0 || pgid >= QED_MAX_PFC_PRIORITIES) {
+ DP_INFO(hwfn, "Invalid pgid %d\n", pgid);
+ return;
+ }
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return;
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG;
+ dcbx_set.config.params.ets_tc_bw_tbl[pgid] = bw_pct;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+}
+
+static void qed_dcbnl_setpgbwgcfgrx(struct qed_dev *cdev, int pgid, u8 bw_pct)
+{
+ DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n");
+}
+
+static u8 qed_dcbnl_setall(struct qed_dev *cdev)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return 1;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return 1;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 1);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+static int qed_dcbnl_setnumtcs(struct qed_dev *cdev, int tcid, u8 num)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "tcid = %d num = %d\n", tcid, num);
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return 1;
+
+ switch (tcid) {
+ case DCB_NUMTCS_ATTR_PG:
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG;
+ dcbx_set.config.params.max_ets_tc = num;
+ break;
+ case DCB_NUMTCS_ATTR_PFC:
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG;
+ dcbx_set.config.params.pfc.max_tc = num;
+ break;
+ default:
+ DP_INFO(hwfn, "Invalid tcid %d\n", tcid);
+ return -EINVAL;
+ }
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EINVAL;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return 0;
+}
+
+static void qed_dcbnl_setpfcstate(struct qed_dev *cdev, u8 state)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "new state = %d\n", state);
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return;
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG;
+ dcbx_set.config.params.pfc.enabled = !!state;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+}
+
+static int qed_dcbnl_getapp(struct qed_dev *cdev, u8 idtype, u16 idval)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ struct qed_app_entry *entry;
+ bool ethtype;
+ u8 prio = 0;
+ int i;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ ethtype = !!(idtype == DCB_APP_IDTYPE_ETHTYPE);
+ for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) {
+ entry = &dcbx_info->operational.params.app_entry[i];
+ if ((entry->ethtype == ethtype) && (entry->proto_id == idval)) {
+ prio = entry->prio;
+ break;
+ }
+ }
+
+ if (i == QED_DCBX_MAX_APP_PROTOCOL) {
+ DP_ERR(cdev, "App entry (%d, %d) not found\n", idtype, idval);
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ kfree(dcbx_info);
+
+ return prio;
+}
+
+static int qed_dcbnl_setapp(struct qed_dev *cdev,
+ u8 idtype, u16 idval, u8 pri_map)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_app_entry *entry;
+ struct qed_ptt *ptt;
+ bool ethtype;
+ int rc, i;
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return -EINVAL;
+
+ ethtype = !!(idtype == DCB_APP_IDTYPE_ETHTYPE);
+ for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) {
+ entry = &dcbx_set.config.params.app_entry[i];
+ if ((entry->ethtype == ethtype) && (entry->proto_id == idval))
+ break;
+ /* First empty slot */
+ if (!entry->proto_id)
+ break;
+ }
+
+ if (i == QED_DCBX_MAX_APP_PROTOCOL) {
+ DP_ERR(cdev, "App table is full\n");
+ return -EBUSY;
+ }
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_APP_CFG;
+ dcbx_set.config.params.app_entry[i].ethtype = ethtype;
+ dcbx_set.config.params.app_entry[i].proto_id = idval;
+ dcbx_set.config.params.app_entry[i].prio = pri_map;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EBUSY;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+static u8 qed_dcbnl_setdcbx(struct qed_dev *cdev, u8 mode)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "new mode = %x\n", mode);
+
+ if (!(mode & DCB_CAP_DCBX_VER_IEEE) && !(mode & DCB_CAP_DCBX_VER_CEE)) {
+ DP_INFO(hwfn, "Allowed mode is cee, ieee or both\n");
+ return 1;
+ }
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return 1;
+
+ dcbx_set.ver_num = 0;
+ if (mode & DCB_CAP_DCBX_VER_CEE) {
+ dcbx_set.ver_num |= DCBX_CONFIG_VERSION_CEE;
+ dcbx_set.enabled = true;
+ }
+
+ if (mode & DCB_CAP_DCBX_VER_IEEE) {
+ dcbx_set.ver_num |= DCBX_CONFIG_VERSION_IEEE;
+ dcbx_set.enabled = true;
+ }
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return 1;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return 0;
+}
+
+static u8 qed_dcbnl_getfeatcfg(struct qed_dev *cdev, int featid, u8 *flags)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "Feature id = %d\n", featid);
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return 1;
+
+ *flags = 0;
+ switch (featid) {
+ case DCB_FEATCFG_ATTR_PG:
+ if (dcbx_info->operational.params.ets_enabled)
+ *flags = DCB_FEATCFG_ENABLE;
+ else
+ *flags = DCB_FEATCFG_ERROR;
+ break;
+ case DCB_FEATCFG_ATTR_PFC:
+ if (dcbx_info->operational.params.pfc.enabled)
+ *flags = DCB_FEATCFG_ENABLE;
+ else
+ *flags = DCB_FEATCFG_ERROR;
+ break;
+ case DCB_FEATCFG_ATTR_APP:
+ if (dcbx_info->operational.params.app_valid)
+ *flags = DCB_FEATCFG_ENABLE;
+ else
+ *flags = DCB_FEATCFG_ERROR;
+ break;
+ default:
+ DP_INFO(hwfn, "Invalid feature-ID %d\n", featid);
+ kfree(dcbx_info);
+ return 1;
+ }
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "flags = %d\n", *flags);
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static u8 qed_dcbnl_setfeatcfg(struct qed_dev *cdev, int featid, u8 flags)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ bool enabled, willing;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "featid = %d flags = %d\n",
+ featid, flags);
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return 1;
+
+ enabled = !!(flags & DCB_FEATCFG_ENABLE);
+ willing = !!(flags & DCB_FEATCFG_WILLING);
+ switch (featid) {
+ case DCB_FEATCFG_ATTR_PG:
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG;
+ dcbx_set.config.params.ets_enabled = enabled;
+ dcbx_set.config.params.ets_willing = willing;
+ break;
+ case DCB_FEATCFG_ATTR_PFC:
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG;
+ dcbx_set.config.params.pfc.enabled = enabled;
+ dcbx_set.config.params.pfc.willing = willing;
+ break;
+ case DCB_FEATCFG_ATTR_APP:
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_APP_CFG;
+ dcbx_set.config.params.app_willing = willing;
+ break;
+ default:
+ DP_INFO(hwfn, "Invalid feature-ID %d\n", featid);
+ return 1;
+ }
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return 1;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return 0;
+}
+
+static int qed_dcbnl_peer_getappinfo(struct qed_dev *cdev,
+ struct dcb_peer_app_info *info,
+ u16 *app_count)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ info->willing = dcbx_info->remote.params.app_willing;
+ info->error = dcbx_info->remote.params.app_error;
+ *app_count = dcbx_info->remote.params.num_app_entries;
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static int qed_dcbnl_peer_getapptable(struct qed_dev *cdev,
+ struct dcb_app *table)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ int i;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ for (i = 0; i < dcbx_info->remote.params.num_app_entries; i++) {
+ if (dcbx_info->remote.params.app_entry[i].ethtype)
+ table[i].selector = DCB_APP_IDTYPE_ETHTYPE;
+ else
+ table[i].selector = DCB_APP_IDTYPE_PORTNUM;
+ table[i].priority = dcbx_info->remote.params.app_entry[i].prio;
+ table[i].protocol =
+ dcbx_info->remote.params.app_entry[i].proto_id;
+ }
+
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static int qed_dcbnl_cee_peer_getpfc(struct qed_dev *cdev, struct cee_pfc *pfc)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ int i;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
+ if (dcbx_info->remote.params.pfc.prio[i])
+ pfc->pfc_en |= BIT(i);
+
+ pfc->tcs_supported = dcbx_info->remote.params.pfc.max_tc;
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "pfc state = %d tcs_supported = %d\n",
+ pfc->pfc_en, pfc->tcs_supported);
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static int qed_dcbnl_cee_peer_getpg(struct qed_dev *cdev, struct cee_pg *pg)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ int i;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ pg->willing = dcbx_info->remote.params.ets_willing;
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) {
+ pg->pg_bw[i] = dcbx_info->remote.params.ets_tc_bw_tbl[i];
+ pg->prio_pg[i] = dcbx_info->remote.params.ets_pri_tc_tbl[i];
+ }
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "willing = %d", pg->willing);
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static int qed_dcbnl_get_ieee_pfc(struct qed_dev *cdev,
+ struct ieee_pfc *pfc, bool remote)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_params *params;
+ struct qed_dcbx_get *dcbx_info;
+ int rc, i;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ if (!dcbx_info->operational.ieee) {
+ DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+ return -EINVAL;
+ }
+
+ if (remote) {
+ memset(dcbx_info, 0, sizeof(*dcbx_info));
+ rc = qed_dcbx_query_params(hwfn, dcbx_info,
+ QED_DCBX_REMOTE_MIB);
+ if (rc) {
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ params = &dcbx_info->remote.params;
+ } else {
+ params = &dcbx_info->operational.params;
+ }
+
+ pfc->pfc_cap = params->pfc.max_tc;
+ pfc->pfc_en = 0;
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
+ if (params->pfc.prio[i])
+ pfc->pfc_en |= BIT(i);
+
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static int qed_dcbnl_ieee_getpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
+{
+ return qed_dcbnl_get_ieee_pfc(cdev, pfc, false);
+}
+
+static int qed_dcbnl_ieee_setpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc, i;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ if (!dcbx_info->operational.ieee) {
+ DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ kfree(dcbx_info);
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return -EINVAL;
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG;
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
+ dcbx_set.config.params.pfc.prio[i] = !!(pfc->pfc_en & BIT(i));
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EINVAL;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+static int qed_dcbnl_get_ieee_ets(struct qed_dev *cdev,
+ struct ieee_ets *ets, bool remote)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ struct qed_dcbx_params *params;
+ int rc;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ if (!dcbx_info->operational.ieee) {
+ DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ if (remote) {
+ memset(dcbx_info, 0, sizeof(*dcbx_info));
+ rc = qed_dcbx_query_params(hwfn, dcbx_info,
+ QED_DCBX_REMOTE_MIB);
+ if (rc) {
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ params = &dcbx_info->remote.params;
+ } else {
+ params = &dcbx_info->operational.params;
+ }
+
+ ets->ets_cap = params->max_ets_tc;
+ ets->willing = params->ets_willing;
+ ets->cbs = params->ets_cbs;
+ memcpy(ets->tc_tx_bw, params->ets_tc_bw_tbl, sizeof(ets->tc_tx_bw));
+ memcpy(ets->tc_tsa, params->ets_tc_tsa_tbl, sizeof(ets->tc_tsa));
+ memcpy(ets->prio_tc, params->ets_pri_tc_tbl, sizeof(ets->prio_tc));
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static int qed_dcbnl_ieee_getets(struct qed_dev *cdev, struct ieee_ets *ets)
+{
+ return qed_dcbnl_get_ieee_ets(cdev, ets, false);
+}
+
+static int qed_dcbnl_ieee_setets(struct qed_dev *cdev, struct ieee_ets *ets)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ if (!dcbx_info->operational.ieee) {
+ DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ kfree(dcbx_info);
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return -EINVAL;
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG;
+ dcbx_set.config.params.max_ets_tc = ets->ets_cap;
+ dcbx_set.config.params.ets_willing = ets->willing;
+ dcbx_set.config.params.ets_cbs = ets->cbs;
+ memcpy(dcbx_set.config.params.ets_tc_bw_tbl, ets->tc_tx_bw,
+ sizeof(ets->tc_tx_bw));
+ memcpy(dcbx_set.config.params.ets_tc_tsa_tbl, ets->tc_tsa,
+ sizeof(ets->tc_tsa));
+ memcpy(dcbx_set.config.params.ets_pri_tc_tbl, ets->prio_tc,
+ sizeof(ets->prio_tc));
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EINVAL;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+int qed_dcbnl_ieee_peer_getets(struct qed_dev *cdev, struct ieee_ets *ets)
+{
+ return qed_dcbnl_get_ieee_ets(cdev, ets, true);
+}
+
+int qed_dcbnl_ieee_peer_getpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
+{
+ return qed_dcbnl_get_ieee_pfc(cdev, pfc, true);
+}
+
+int qed_dcbnl_ieee_getapp(struct qed_dev *cdev, struct dcb_app *app)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ struct qed_app_entry *entry;
+ bool ethtype;
+ u8 prio = 0;
+ int i;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ if (!dcbx_info->operational.ieee) {
+ DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ /* ieee defines the selector field value for ethertype to be 1 */
+ ethtype = !!((app->selector - 1) == DCB_APP_IDTYPE_ETHTYPE);
+ for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) {
+ entry = &dcbx_info->operational.params.app_entry[i];
+ if ((entry->ethtype == ethtype) &&
+ (entry->proto_id == app->protocol)) {
+ prio = entry->prio;
+ break;
+ }
+ }
+
+ if (i == QED_DCBX_MAX_APP_PROTOCOL) {
+ DP_ERR(cdev, "App entry (%d, %d) not found\n", app->selector,
+ app->protocol);
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ app->priority = ffs(prio) - 1;
+
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ struct qed_dcbx_set dcbx_set;
+ struct qed_app_entry *entry;
+ struct qed_ptt *ptt;
+ bool ethtype;
+ int rc, i;
+
+ if (app->priority < 0 || app->priority >= QED_MAX_PFC_PRIORITIES) {
+ DP_INFO(hwfn, "Invalid priority %d\n", app->priority);
+ return -EINVAL;
+ }
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ if (!dcbx_info->operational.ieee) {
+ DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ kfree(dcbx_info);
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return -EINVAL;
+
+ /* ieee defines the selector field value for ethertype to be 1 */
+ ethtype = !!((app->selector - 1) == DCB_APP_IDTYPE_ETHTYPE);
+ for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) {
+ entry = &dcbx_set.config.params.app_entry[i];
+ if ((entry->ethtype == ethtype) &&
+ (entry->proto_id == app->protocol))
+ break;
+ /* First empty slot */
+ if (!entry->proto_id)
+ break;
+ }
+
+ if (i == QED_DCBX_MAX_APP_PROTOCOL) {
+ DP_ERR(cdev, "App table is full\n");
+ return -EBUSY;
+ }
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_APP_CFG;
+ dcbx_set.config.params.app_entry[i].ethtype = ethtype;
+ dcbx_set.config.params.app_entry[i].proto_id = app->protocol;
+ dcbx_set.config.params.app_entry[i].prio = BIT(app->priority);
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EBUSY;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass = {
+ .getstate = qed_dcbnl_getstate,
+ .setstate = qed_dcbnl_setstate,
+ .getpgtccfgtx = qed_dcbnl_getpgtccfgtx,
+ .getpgbwgcfgtx = qed_dcbnl_getpgbwgcfgtx,
+ .getpgtccfgrx = qed_dcbnl_getpgtccfgrx,
+ .getpgbwgcfgrx = qed_dcbnl_getpgbwgcfgrx,
+ .getpfccfg = qed_dcbnl_getpfccfg,
+ .setpfccfg = qed_dcbnl_setpfccfg,
+ .getcap = qed_dcbnl_getcap,
+ .getnumtcs = qed_dcbnl_getnumtcs,
+ .getpfcstate = qed_dcbnl_getpfcstate,
+ .getdcbx = qed_dcbnl_getdcbx,
+ .setpgtccfgtx = qed_dcbnl_setpgtccfgtx,
+ .setpgtccfgrx = qed_dcbnl_setpgtccfgrx,
+ .setpgbwgcfgtx = qed_dcbnl_setpgbwgcfgtx,
+ .setpgbwgcfgrx = qed_dcbnl_setpgbwgcfgrx,
+ .setall = qed_dcbnl_setall,
+ .setnumtcs = qed_dcbnl_setnumtcs,
+ .setpfcstate = qed_dcbnl_setpfcstate,
+ .setapp = qed_dcbnl_setapp,
+ .setdcbx = qed_dcbnl_setdcbx,
+ .setfeatcfg = qed_dcbnl_setfeatcfg,
+ .getfeatcfg = qed_dcbnl_getfeatcfg,
+ .getapp = qed_dcbnl_getapp,
+ .peer_getappinfo = qed_dcbnl_peer_getappinfo,
+ .peer_getapptable = qed_dcbnl_peer_getapptable,
+ .cee_peer_getpfc = qed_dcbnl_cee_peer_getpfc,
+ .cee_peer_getpg = qed_dcbnl_cee_peer_getpg,
+ .ieee_getpfc = qed_dcbnl_ieee_getpfc,
+ .ieee_setpfc = qed_dcbnl_ieee_setpfc,
+ .ieee_getets = qed_dcbnl_ieee_getets,
+ .ieee_setets = qed_dcbnl_ieee_setets,
+ .ieee_peer_getpfc = qed_dcbnl_ieee_peer_getpfc,
+ .ieee_peer_getets = qed_dcbnl_ieee_peer_getets,
+ .ieee_getapp = qed_dcbnl_ieee_getapp,
+ .ieee_setapp = qed_dcbnl_ieee_setapp,
+};
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
index e7f834dbda2d..9ba681643d05 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
@@ -33,6 +33,24 @@ struct qed_dcbx_app_data {
u8 tc; /* Traffic Class */
};
+#ifdef CONFIG_DCB
+#define QED_DCBX_VERSION_DISABLED 0
+#define QED_DCBX_VERSION_IEEE 1
+#define QED_DCBX_VERSION_CEE 2
+
+struct qed_dcbx_set {
+#define QED_DCBX_OVERRIDE_STATE BIT(0)
+#define QED_DCBX_OVERRIDE_PFC_CFG BIT(1)
+#define QED_DCBX_OVERRIDE_ETS_CFG BIT(2)
+#define QED_DCBX_OVERRIDE_APP_CFG BIT(3)
+#define QED_DCBX_OVERRIDE_DSCP_CFG BIT(4)
+ u32 override_flags;
+ bool enabled;
+ struct qed_dcbx_admin_params config;
+ u32 ver_num;
+};
+#endif
+
struct qed_dcbx_results {
bool dcbx_enabled;
u8 pf_id;
@@ -55,6 +73,9 @@ struct qed_dcbx_info {
struct qed_dcbx_results results;
struct dcbx_mib operational;
struct dcbx_mib remote;
+#ifdef CONFIG_DCB
+ struct qed_dcbx_set set;
+#endif
u8 dcbx_cap;
};
@@ -67,6 +88,13 @@ struct qed_dcbx_mib_meta_data {
u32 addr;
};
+#ifdef CONFIG_DCB
+int qed_dcbx_get_config_params(struct qed_hwfn *, struct qed_dcbx_set *);
+
+int qed_dcbx_config_params(struct qed_hwfn *,
+ struct qed_ptt *, struct qed_dcbx_set *, bool);
+#endif
+
/* QED local interface routines */
int
qed_dcbx_mib_update_event(struct qed_hwfn *,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index e45cff4df280..b26fe267a150 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -2222,6 +2222,110 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
return 0;
}
+static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u32 hw_addr, void *p_eth_qzone,
+ size_t eth_qzone_size, u8 timeset)
+{
+ struct coalescing_timeset *p_coal_timeset;
+
+ if (p_hwfn->cdev->int_coalescing_mode != QED_COAL_MODE_ENABLE) {
+ DP_NOTICE(p_hwfn, "Coalescing configuration not enabled\n");
+ return -EINVAL;
+ }
+
+ p_coal_timeset = p_eth_qzone;
+ memset(p_coal_timeset, 0, eth_qzone_size);
+ SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset);
+ SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1);
+ qed_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size);
+
+ return 0;
+}
+
+int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 coalesce, u8 qid, u16 sb_id)
+{
+ struct ustorm_eth_queue_zone eth_qzone;
+ u8 timeset, timer_res;
+ u16 fw_qid = 0;
+ u32 address;
+ int rc;
+
+ /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
+ if (coalesce <= 0x7F) {
+ timer_res = 0;
+ } else if (coalesce <= 0xFF) {
+ timer_res = 1;
+ } else if (coalesce <= 0x1FF) {
+ timer_res = 2;
+ } else {
+ DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce);
+ return -EINVAL;
+ }
+ timeset = (u8)(coalesce >> timer_res);
+
+ rc = qed_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid);
+ if (rc)
+ return rc;
+
+ rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, false);
+ if (rc)
+ goto out;
+
+ address = BAR0_MAP_REG_USDM_RAM + USTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid);
+
+ rc = qed_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
+ sizeof(struct ustorm_eth_queue_zone), timeset);
+ if (rc)
+ goto out;
+
+ p_hwfn->cdev->rx_coalesce_usecs = coalesce;
+out:
+ return rc;
+}
+
+int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 coalesce, u8 qid, u16 sb_id)
+{
+ struct xstorm_eth_queue_zone eth_qzone;
+ u8 timeset, timer_res;
+ u16 fw_qid = 0;
+ u32 address;
+ int rc;
+
+ /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
+ if (coalesce <= 0x7F) {
+ timer_res = 0;
+ } else if (coalesce <= 0xFF) {
+ timer_res = 1;
+ } else if (coalesce <= 0x1FF) {
+ timer_res = 2;
+ } else {
+ DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce);
+ return -EINVAL;
+ }
+ timeset = (u8)(coalesce >> timer_res);
+
+ rc = qed_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid);
+ if (rc)
+ return rc;
+
+ rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, true);
+ if (rc)
+ goto out;
+
+ address = BAR0_MAP_REG_XSDM_RAM + XSTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid);
+
+ rc = qed_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
+ sizeof(struct xstorm_eth_queue_zone), timeset);
+ if (rc)
+ goto out;
+
+ p_hwfn->cdev->tx_coalesce_usecs = coalesce;
+out:
+ return rc;
+}
+
/* Calculate final WFQ values for all vports and configure them.
* After this configuration each vport will have
* approx min rate = min_pf_rate * (vport_wfq / QED_WFQ_UNIT)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index f810ce45d463..343bb0344f62 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -212,6 +212,20 @@ qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
u32 size_in_dwords,
u32 flags);
+ /**
+ * @brief qed_dmae_grc2host - Read data from dmae data offset
+ * to source address using the given ptt
+ *
+ * @param p_ptt
+ * @param grc_addr (dmae_data_offset)
+ * @param dest_addr
+ * @param size_in_dwords
+ * @param flags - one of the flags defined above
+ */
+int qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u32 grc_addr, dma_addr_t dest_addr, u32 size_in_dwords,
+ u32 flags);
+
/**
* @brief qed_dmae_host2host - copy data from to source address
* to a destination adress (for SRIOV) using the given ptt
@@ -308,4 +322,37 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
int qed_final_cleanup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 id, bool is_vf);
+/**
+ * @brief qed_set_rxq_coalesce - Configure coalesce parameters for an Rx queue
+ * The fact that we can configure coalescing to up to 511, but on varying
+ * accuracy [the bigger the value the less accurate] up to a mistake of 3usec
+ * for the highest values.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param coalesce - Coalesce value in micro seconds.
+ * @param qid - Queue index.
+ * @param qid - SB Id
+ *
+ * @return int
+ */
+int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 coalesce, u8 qid, u16 sb_id);
+
+/**
+ * @brief qed_set_txq_coalesce - Configure coalesce parameters for a Tx queue
+ * While the API allows setting coalescing per-qid, all tx queues sharing a
+ * SB should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
+ * otherwise configuration would break.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param coalesce - Coalesce value in micro seconds.
+ * @param qid - Queue index.
+ * @param qid - SB Id
+ *
+ * @return int
+ */
+int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 coalesce, u8 qid, u16 sb_id);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 72e0ad6d16c6..592784019994 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -7244,6 +7244,9 @@ struct public_drv_mb {
#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8
#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00
+#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001
+#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0
+
#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0
#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index 2693c30981eb..e17885321faf 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -769,6 +769,29 @@ int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
}
int
+qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 grc_addr,
+ dma_addr_t dest_addr, u32 size_in_dwords, u32 flags)
+{
+ u32 grc_addr_in_dw = grc_addr / sizeof(u32);
+ struct qed_dmae_params params;
+ int rc;
+
+ memset(&params, 0, sizeof(struct qed_dmae_params));
+ params.flags = flags;
+
+ mutex_lock(&p_hwfn->dmae_info.mutex);
+
+ rc = qed_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw,
+ dest_addr, QED_DMAE_ADDRESS_GRC,
+ QED_DMAE_ADDRESS_HOST_VIRT,
+ size_in_dwords, &params);
+
+ mutex_unlock(&p_hwfn->dmae_info.mutex);
+
+ return rc;
+}
+
+int
qed_dmae_host2host(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
dma_addr_t source_addr,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 09a6ad3d22dd..8fa50fa23c8d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -2418,6 +2418,7 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
{
struct qed_dev *cdev = p_hwfn->cdev;
u32 cau_state;
+ u8 timer_res;
memset(p_sb_entry, 0, sizeof(*p_sb_entry));
@@ -2443,6 +2444,23 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
cdev->tx_coalesce_usecs = QED_CAU_DEF_TX_USECS;
}
+ /* Coalesce = (timeset << timer-res), timeset is 7bit wide */
+ if (cdev->rx_coalesce_usecs <= 0x7F)
+ timer_res = 0;
+ else if (cdev->rx_coalesce_usecs <= 0xFF)
+ timer_res = 1;
+ else
+ timer_res = 2;
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
+
+ if (cdev->tx_coalesce_usecs <= 0x7F)
+ timer_res = 0;
+ else if (cdev->tx_coalesce_usecs <= 0xFF)
+ timer_res = 1;
+ else
+ timer_res = 2;
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
+
SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
}
@@ -2484,17 +2502,28 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
/* Configure pi coalescing if set */
if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
- u8 timeset = p_hwfn->cdev->rx_coalesce_usecs >>
- (QED_CAU_DEF_RX_TIMER_RES + 1);
+ u8 timeset, timer_res;
u8 num_tc = 1, i;
+ /* timeset = (coalesce >> timer-res), timeset is 7bit wide */
+ if (p_hwfn->cdev->rx_coalesce_usecs <= 0x7F)
+ timer_res = 0;
+ else if (p_hwfn->cdev->rx_coalesce_usecs <= 0xFF)
+ timer_res = 1;
+ else
+ timer_res = 2;
+ timeset = (u8)(p_hwfn->cdev->rx_coalesce_usecs >> timer_res);
qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
QED_COAL_RX_STATE_MACHINE,
timeset);
- timeset = p_hwfn->cdev->tx_coalesce_usecs >>
- (QED_CAU_DEF_TX_TIMER_RES + 1);
-
+ if (p_hwfn->cdev->tx_coalesce_usecs <= 0x7F)
+ timer_res = 0;
+ else if (p_hwfn->cdev->tx_coalesce_usecs <= 0xFF)
+ timer_res = 1;
+ else
+ timer_res = 2;
+ timeset = (u8)(p_hwfn->cdev->tx_coalesce_usecs >> timer_res);
for (i = 0; i < num_tc; i++) {
qed_int_cau_conf_pi(p_hwfn, p_ptt,
igu_sb_id, TX_PI(i),
@@ -3199,3 +3228,39 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev)
for_each_hwfn(cdev, i)
cdev->hwfns[i].b_int_requested = false;
}
+
+int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u8 timer_res, u16 sb_id, bool tx)
+{
+ struct cau_sb_entry sb_entry;
+ int rc;
+
+ if (!p_hwfn->hw_init_done) {
+ DP_ERR(p_hwfn, "hardware not initialized yet\n");
+ return -EINVAL;
+ }
+
+ rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
+ sb_id * sizeof(u64),
+ (u64)(uintptr_t)&sb_entry, 2, 0);
+ if (rc) {
+ DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
+ return rc;
+ }
+
+ if (tx)
+ SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
+ else
+ SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
+
+ rc = qed_dmae_host2grc(p_hwfn, p_ptt,
+ (u64)(uintptr_t)&sb_entry,
+ CAU_REG_SB_VAR_MEMORY +
+ sb_id * sizeof(u64), 2, 0);
+ if (rc) {
+ DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index 20b468637504..0948be64dc78 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -389,6 +389,9 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
u16 vf_number,
u8 vf_valid);
+int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u8 timer_res, u16 sb_id, bool tx);
+
#define QED_MAPPING_MEMORY_SIZE(dev) (NUM_OF_SBS(dev))
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 2ee496e26df6..a12c6caa6c66 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -72,6 +72,7 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
p_ramrod->mtu = cpu_to_le16(p_params->mtu);
p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
p_ramrod->drop_ttl0_en = p_params->drop_ttl0;
+ p_ramrod->untagged = p_params->only_untagged;
SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
@@ -247,10 +248,6 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
!!(accept_filter & QED_ACCEPT_NONE));
- SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
- (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
- !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
-
SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
!!(accept_filter & QED_ACCEPT_NONE));
@@ -1756,7 +1753,8 @@ static int qed_start_vport(struct qed_dev *cdev,
start.vport_id, start.mtu);
}
- qed_reset_vport_stats(cdev);
+ if (params->clear_stats)
+ qed_reset_vport_stats(cdev);
return 0;
}
@@ -2166,11 +2164,18 @@ static int qed_fp_cqe_completion(struct qed_dev *dev,
extern const struct qed_iov_hv_ops qed_iov_ops_pass;
#endif
+#ifdef CONFIG_DCB
+extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass;
+#endif
+
static const struct qed_eth_ops qed_eth_ops_pass = {
.common = &qed_common_ops_pass,
#ifdef CONFIG_QED_SRIOV
.iov = &qed_iov_ops_pass,
#endif
+#ifdef CONFIG_DCB
+ .dcb = &qed_dcbnl_ops_pass,
+#endif
.fill_dev_info = &qed_fill_eth_dev_info,
.register_ops = &qed_register_eth_ops,
.check_mac = &qed_check_mac,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index f41f78d6a3f9..1f13abb5c316 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1088,6 +1088,7 @@ static int qed_get_port_type(u32 media_type)
case MEDIA_SFPP_10G_FIBER:
case MEDIA_SFP_1G_FIBER:
case MEDIA_XFP_FIBER:
+ case MEDIA_MODULE_FIBER:
case MEDIA_KR:
port_type = PORT_FIBRE;
break;
@@ -1108,6 +1109,39 @@ static int qed_get_port_type(u32 media_type)
return port_type;
}
+static int qed_get_link_data(struct qed_hwfn *hwfn,
+ struct qed_mcp_link_params *params,
+ struct qed_mcp_link_state *link,
+ struct qed_mcp_link_capabilities *link_caps)
+{
+ void *p;
+
+ if (!IS_PF(hwfn->cdev)) {
+ qed_vf_get_link_params(hwfn, params);
+ qed_vf_get_link_state(hwfn, link);
+ qed_vf_get_link_caps(hwfn, link_caps);
+
+ return 0;
+ }
+
+ p = qed_mcp_get_link_params(hwfn);
+ if (!p)
+ return -ENXIO;
+ memcpy(params, p, sizeof(*params));
+
+ p = qed_mcp_get_link_state(hwfn);
+ if (!p)
+ return -ENXIO;
+ memcpy(link, p, sizeof(*link));
+
+ p = qed_mcp_get_link_capabilities(hwfn);
+ if (!p)
+ return -ENXIO;
+ memcpy(link_caps, p, sizeof(*link_caps));
+
+ return 0;
+}
+
static void qed_fill_link(struct qed_hwfn *hwfn,
struct qed_link_output *if_link)
{
@@ -1119,15 +1153,9 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
memset(if_link, 0, sizeof(*if_link));
/* Prepare source inputs */
- if (IS_PF(hwfn->cdev)) {
- memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
- memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
- memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn),
- sizeof(link_caps));
- } else {
- qed_vf_get_link_params(hwfn, &params);
- qed_vf_get_link_state(hwfn, &link);
- qed_vf_get_link_caps(hwfn, &link_caps);
+ if (qed_get_link_data(hwfn, &params, &link, &link_caps)) {
+ dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
+ return;
}
/* Set the link parameters to pass to protocol driver */
@@ -1276,6 +1304,38 @@ static int qed_drain(struct qed_dev *cdev)
return 0;
}
+static void qed_get_coalesce(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal)
+{
+ *rx_coal = cdev->rx_coalesce_usecs;
+ *tx_coal = cdev->tx_coalesce_usecs;
+}
+
+static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
+ u8 qid, u16 sb_id)
+{
+ struct qed_hwfn *hwfn;
+ struct qed_ptt *ptt;
+ int hwfn_index;
+ int status = 0;
+
+ hwfn_index = qid % cdev->num_hwfns;
+ hwfn = &cdev->hwfns[hwfn_index];
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EAGAIN;
+
+ status = qed_set_rxq_coalesce(hwfn, ptt, rx_coal,
+ qid / cdev->num_hwfns, sb_id);
+ if (status)
+ goto out;
+ status = qed_set_txq_coalesce(hwfn, ptt, tx_coal,
+ qid / cdev->num_hwfns, sb_id);
+out:
+ qed_ptt_release(hwfn, ptt);
+
+ return status;
+}
+
static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
@@ -1322,5 +1382,7 @@ const struct qed_common_ops qed_common_ops_pass = {
.update_msglvl = &qed_init_dp,
.chain_alloc = &qed_chain_alloc,
.chain_free = &qed_chain_free,
+ .get_coalesce = &qed_get_coalesce,
+ .set_coalesce = &qed_set_coalesce,
.set_led = &qed_set_led,
};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index aa08ddbd95df..f6b86ca1ff79 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -80,6 +80,8 @@
0x1f00000UL
#define BAR0_MAP_REG_TSDM_RAM \
0x1c80000UL
+#define BAR0_MAP_REG_XSDM_RAM \
+ 0x1e00000UL
#define NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \
0x5011f4UL
#define PRS_REG_SEARCH_TCP \
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index ad9bf5c85c3f..97ffeae262bb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -213,19 +213,15 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
DQ_XCM_CORE_SPQ_PROD_CMD);
db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
-
- /* validate producer is up to-date */
- rmb();
-
db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
- /* do not reorder */
- barrier();
+ /* make sure the SPQE is updated before the doorbell */
+ wmb();
DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
/* make sure doorbell is rang */
- mmiowb();
+ wmb();
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
"Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
@@ -620,7 +616,9 @@ qed_spq_add_entry(struct qed_hwfn *p_hwfn,
*p_en2 = *p_ent;
- kfree(p_ent);
+ /* EBLOCK responsible to free the allocated p_ent */
+ if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
+ kfree(p_ent);
p_ent = p_en2;
}
@@ -755,6 +753,15 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
* Thus, after gaining the answer perform the cleanup here.
*/
rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
+
+ if (p_ent->queue == &p_spq->unlimited_pending) {
+ /* This is an allocated p_ent which does not need to
+ * return to pool.
+ */
+ kfree(p_ent);
+ return rc;
+ }
+
if (rc)
goto spq_post_fail2;
@@ -850,8 +857,12 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
fw_return_code);
- if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
- /* EBLOCK is responsible for freeing its own entry */
+ if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
+ (found->queue == &p_spq->unlimited_pending))
+ /* EBLOCK is responsible for returning its own entry into the
+ * free list, unless it originally added the entry into the
+ * unlimited pending list.
+ */
qed_spq_return_entry(p_hwfn, found);
/* Attempt to post pending requests */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index eb75b8234a7c..4d161c751c12 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -21,18 +21,18 @@
#include "qed_vf.h"
/* IOV ramrods */
-static int qed_sp_vf_start(struct qed_hwfn *p_hwfn,
- u32 concrete_vfid, u16 opaque_vfid)
+static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
{
struct vf_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = -EINVAL;
+ u8 fp_minor;
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = qed_spq_get_cid(p_hwfn);
- init_data.opaque_fid = opaque_vfid;
+ init_data.opaque_fid = p_vf->opaque_fid;
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent,
@@ -43,12 +43,39 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn,
p_ramrod = &p_ent->ramrod.vf_start;
- p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
- p_ramrod->opaque_fid = cpu_to_le16(opaque_vfid);
+ p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
+ p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid);
+
+ switch (p_hwfn->hw_info.personality) {
+ case QED_PCI_ETH:
+ p_ramrod->personality = PERSONALITY_ETH;
+ break;
+ case QED_PCI_ETH_ROCE:
+ p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unknown VF personality %d\n",
+ p_hwfn->hw_info.personality);
+ return -EINVAL;
+ }
+
+ fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
+ if (fp_minor > ETH_HSI_VER_MINOR) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n",
+ p_vf->abs_vf_id,
+ ETH_HSI_VER_MAJOR,
+ fp_minor, ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
+ fp_minor = ETH_HSI_VER_MINOR;
+ }
- p_ramrod->personality = PERSONALITY_ETH;
p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
- p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
+ p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d] - Starting using HSI %02x.%02x\n",
+ p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
return qed_spq_post(p_hwfn, p_ent, NULL);
}
@@ -119,6 +146,45 @@ static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
return vf;
}
+static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, u16 rx_qid)
+{
+ if (rx_qid >= p_vf->num_rxqs)
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n",
+ p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
+ return rx_qid < p_vf->num_rxqs;
+}
+
+static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, u16 tx_qid)
+{
+ if (tx_qid >= p_vf->num_txqs)
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n",
+ p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
+ return tx_qid < p_vf->num_txqs;
+}
+
+static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, u16 sb_idx)
+{
+ int i;
+
+ for (i = 0; i < p_vf->num_sbs; i++)
+ if (p_vf->igu_sbs[i] == sb_idx)
+ return true;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n",
+ p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
+
+ return false;
+}
+
int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
int vfid, struct qed_ptt *p_ptt)
{
@@ -295,6 +361,9 @@ static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
(vf->abs_vf_id << 8);
vf->vport_id = idx + 1;
+
+ vf->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
+ vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
}
}
@@ -600,17 +669,6 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
/* unpretend */
qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
- if (vf->state != VF_STOPPED) {
- DP_NOTICE(p_hwfn, "VF[%02x] is already started\n",
- vf->abs_vf_id);
- return -EINVAL;
- }
-
- /* Start VF */
- rc = qed_sp_vf_start(p_hwfn, vf->concrete_fid, vf->opaque_fid);
- if (rc)
- DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id);
-
vf->state = VF_FREE;
return rc;
@@ -854,7 +912,6 @@ static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_params params;
struct qed_mcp_link_state link;
struct qed_vf_info *vf = NULL;
- int rc = 0;
vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
if (!vf) {
@@ -876,18 +933,8 @@ static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
qed_iov_set_link(p_hwfn, rel_vf_id, &params, &link, &caps);
- if (vf->state != VF_STOPPED) {
- /* Stopping the VF */
- rc = qed_sp_vf_stop(p_hwfn, vf->concrete_fid, vf->opaque_fid);
-
- if (rc != 0) {
- DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n",
- rc);
- return rc;
- }
-
- vf->state = VF_STOPPED;
- }
+ /* Forget the VF's acquisition message */
+ memset(&vf->acquire, 0, sizeof(vf->acquire));
/* disablng interrupts and resetting permission table was done during
* vf-close, however, we could get here without going through vf_close
@@ -1118,8 +1165,6 @@ static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
p_vf->vf_bulletin = 0;
p_vf->vport_instance = 0;
- p_vf->num_mac_filters = 0;
- p_vf->num_vlan_filters = 0;
p_vf->configured_features = 0;
/* If VF previously requested less resources, go back to default */
@@ -1132,9 +1177,95 @@ static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
p_vf->vf_queues[i].rxq_active = 0;
memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
+ memset(&p_vf->acquire, 0, sizeof(p_vf->acquire));
qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id);
}
+static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *p_vf,
+ struct vf_pf_resc_request *p_req,
+ struct pf_vf_resc *p_resp)
+{
+ int i;
+
+ /* Queue related information */
+ p_resp->num_rxqs = p_vf->num_rxqs;
+ p_resp->num_txqs = p_vf->num_txqs;
+ p_resp->num_sbs = p_vf->num_sbs;
+
+ for (i = 0; i < p_resp->num_sbs; i++) {
+ p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
+ p_resp->hw_sbs[i].sb_qid = 0;
+ }
+
+ /* These fields are filled for backward compatibility.
+ * Unused by modern vfs.
+ */
+ for (i = 0; i < p_resp->num_rxqs; i++) {
+ qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
+ (u16 *)&p_resp->hw_qid[i]);
+ p_resp->cid[i] = p_vf->vf_queues[i].fw_cid;
+ }
+
+ /* Filter related information */
+ p_resp->num_mac_filters = min_t(u8, p_vf->num_mac_filters,
+ p_req->num_mac_filters);
+ p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters,
+ p_req->num_vlan_filters);
+
+ /* This isn't really needed/enforced, but some legacy VFs might depend
+ * on the correct filling of this field.
+ */
+ p_resp->num_mc_filters = QED_MAX_MC_ADDRS;
+
+ /* Validate sufficient resources for VF */
+ if (p_resp->num_rxqs < p_req->num_rxqs ||
+ p_resp->num_txqs < p_req->num_txqs ||
+ p_resp->num_sbs < p_req->num_sbs ||
+ p_resp->num_mac_filters < p_req->num_mac_filters ||
+ p_resp->num_vlan_filters < p_req->num_vlan_filters ||
+ p_resp->num_mc_filters < p_req->num_mc_filters) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]\n",
+ p_vf->abs_vf_id,
+ p_req->num_rxqs,
+ p_resp->num_rxqs,
+ p_req->num_rxqs,
+ p_resp->num_txqs,
+ p_req->num_sbs,
+ p_resp->num_sbs,
+ p_req->num_mac_filters,
+ p_resp->num_mac_filters,
+ p_req->num_vlan_filters,
+ p_resp->num_vlan_filters,
+ p_req->num_mc_filters, p_resp->num_mc_filters);
+ return PFVF_STATUS_NO_RESOURCE;
+ }
+
+ return PFVF_STATUS_SUCCESS;
+}
+
+static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn *p_hwfn,
+ struct pfvf_stats_info *p_stats)
+{
+ p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
+ offsetof(struct mstorm_vf_zone,
+ non_trigger.eth_queue_stat);
+ p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
+ p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
+ offsetof(struct ustorm_vf_zone,
+ non_trigger.eth_queue_stat);
+ p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
+ p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
+ offsetof(struct pstorm_vf_zone,
+ non_trigger.eth_queue_stat);
+ p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
+ p_stats->tstats.address = 0;
+ p_stats->tstats.len = 0;
+}
+
static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf)
@@ -1143,25 +1274,27 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
- u8 i, vfpf_status = PFVF_STATUS_SUCCESS;
+ u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
struct pf_vf_resc *resc = &resp->resc;
+ int rc;
+
+ memset(resp, 0, sizeof(*resp));
/* Validate FW compatibility */
- if (req->vfdev_info.fw_major != FW_MAJOR_VERSION ||
- req->vfdev_info.fw_minor != FW_MINOR_VERSION ||
- req->vfdev_info.fw_revision != FW_REVISION_VERSION ||
- req->vfdev_info.fw_engineering != FW_ENGINEERING_VERSION) {
+ if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
DP_INFO(p_hwfn,
- "VF[%d] is running an incompatible driver [VF needs FW %02x:%02x:%02x:%02x but Hypervisor is using %02x:%02x:%02x:%02x]\n",
+ "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
vf->abs_vf_id,
- req->vfdev_info.fw_major,
- req->vfdev_info.fw_minor,
- req->vfdev_info.fw_revision,
- req->vfdev_info.fw_engineering,
- FW_MAJOR_VERSION,
- FW_MINOR_VERSION,
- FW_REVISION_VERSION, FW_ENGINEERING_VERSION);
- vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
+ req->vfdev_info.eth_fp_hsi_major,
+ req->vfdev_info.eth_fp_hsi_minor,
+ ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
+
+ /* Write the PF version so that VF would know which version
+ * is supported.
+ */
+ pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
+ pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
+
goto out;
}
@@ -1171,16 +1304,13 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
DP_INFO(p_hwfn,
"VF[%d] is running an old driver that doesn't support 100g\n",
vf->abs_vf_id);
- vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
goto out;
}
- memset(resp, 0, sizeof(*resp));
+ /* Store the acquire message */
+ memcpy(&vf->acquire, req, sizeof(vf->acquire));
- /* Fill in vf info stuff */
vf->opaque_fid = req->vfdev_info.opaque_fid;
- vf->num_mac_filters = 1;
- vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
vf->vf_bulletin = req->bulletin_addr;
vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
@@ -1196,26 +1326,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
if (p_hwfn->cdev->num_hwfns > 1)
pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
- pfdev_info->stats_info.mstats.address =
- PXP_VF_BAR0_START_MSDM_ZONE_B +
- offsetof(struct mstorm_vf_zone, non_trigger.eth_queue_stat);
- pfdev_info->stats_info.mstats.len =
- sizeof(struct eth_mstorm_per_queue_stat);
-
- pfdev_info->stats_info.ustats.address =
- PXP_VF_BAR0_START_USDM_ZONE_B +
- offsetof(struct ustorm_vf_zone, non_trigger.eth_queue_stat);
- pfdev_info->stats_info.ustats.len =
- sizeof(struct eth_ustorm_per_queue_stat);
-
- pfdev_info->stats_info.pstats.address =
- PXP_VF_BAR0_START_PSDM_ZONE_B +
- offsetof(struct pstorm_vf_zone, non_trigger.eth_queue_stat);
- pfdev_info->stats_info.pstats.len =
- sizeof(struct eth_pstorm_per_queue_stat);
-
- pfdev_info->stats_info.tstats.address = 0;
- pfdev_info->stats_info.tstats.len = 0;
+ qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
@@ -1223,36 +1334,31 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
pfdev_info->fw_minor = FW_MINOR_VERSION;
pfdev_info->fw_rev = FW_REVISION_VERSION;
pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
+ pfdev_info->minor_fp_hsi = min_t(u8,
+ ETH_HSI_VER_MINOR,
+ req->vfdev_info.eth_fp_hsi_minor);
pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
pfdev_info->dev_type = p_hwfn->cdev->type;
pfdev_info->chip_rev = p_hwfn->cdev->chip_rev;
- resc->num_rxqs = vf->num_rxqs;
- resc->num_txqs = vf->num_txqs;
- resc->num_sbs = vf->num_sbs;
- for (i = 0; i < resc->num_sbs; i++) {
- resc->hw_sbs[i].hw_sb_id = vf->igu_sbs[i];
- resc->hw_sbs[i].sb_qid = 0;
- }
+ /* Fill resources available to VF; Make sure there are enough to
+ * satisfy the VF's request.
+ */
+ vfpf_status = qed_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
+ &req->resc_request, resc);
+ if (vfpf_status != PFVF_STATUS_SUCCESS)
+ goto out;
- for (i = 0; i < resc->num_rxqs; i++) {
- qed_fw_l2_queue(p_hwfn, vf->vf_queues[i].fw_rx_qid,
- (u16 *)&resc->hw_qid[i]);
- resc->cid[i] = vf->vf_queues[i].fw_cid;
+ /* Start the VF in FW */
+ rc = qed_sp_vf_start(p_hwfn, vf);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id);
+ vfpf_status = PFVF_STATUS_FAILURE;
+ goto out;
}
- resc->num_mac_filters = min_t(u8, vf->num_mac_filters,
- req->resc_request.num_mac_filters);
- resc->num_vlan_filters = min_t(u8, vf->num_vlan_filters,
- req->resc_request.num_vlan_filters);
-
- /* This isn't really required as VF isn't limited, but some VFs might
- * actually test this value, so need to provide it.
- */
- resc->num_mc_filters = req->resc_request.num_mc_filters;
-
/* Fill agreed size of bulletin board in response */
resp->bulletin_size = vf->bulletin.size;
qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
@@ -1620,12 +1726,17 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
{
struct qed_queue_start_common_params params;
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
- u8 status = PFVF_STATUS_SUCCESS;
+ u8 status = PFVF_STATUS_NO_RESOURCE;
struct vfpf_start_rxq_tlv *req;
int rc;
memset(&params, 0, sizeof(params));
req = &mbx->req_virt->start_rxq;
+
+ if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid) ||
+ !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
+ goto out;
+
params.queue_id = vf->vf_queues[req->rx_qid].fw_rx_qid;
params.vf_qid = req->rx_qid;
params.vport_id = vf->vport_id;
@@ -1643,22 +1754,48 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
if (rc) {
status = PFVF_STATUS_FAILURE;
} else {
+ status = PFVF_STATUS_SUCCESS;
vf->vf_queues[req->rx_qid].rxq_active = true;
vf->num_active_rxqs++;
}
+out:
qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status);
}
+static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *p_vf, u8 status)
+{
+ struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct pfvf_start_queue_resp_tlv *p_tlv;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+
+ p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
+ sizeof(*p_tlv));
+ qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* Update the TLV with the response */
+ if (status == PFVF_STATUS_SUCCESS) {
+ u16 qid = mbx->req_virt->start_txq.tx_qid;
+
+ p_tlv->offset = qed_db_addr(p_vf->vf_queues[qid].fw_cid,
+ DQ_DEMS_LEGACY);
+ }
+
+ qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_tlv), status);
+}
+
static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf)
{
- u16 length = sizeof(struct pfvf_def_resp_tlv);
struct qed_queue_start_common_params params;
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ u8 status = PFVF_STATUS_NO_RESOURCE;
union qed_qm_pq_params pq_params;
- u8 status = PFVF_STATUS_SUCCESS;
struct vfpf_start_txq_tlv *req;
int rc;
@@ -1669,6 +1806,11 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
memset(&params, 0, sizeof(params));
req = &mbx->req_virt->start_txq;
+
+ if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid) ||
+ !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
+ goto out;
+
params.queue_id = vf->vf_queues[req->tx_qid].fw_tx_qid;
params.vport_id = vf->vport_id;
params.sb = req->hw_sb;
@@ -1682,13 +1824,15 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
req->pbl_addr,
req->pbl_size, &pq_params);
- if (rc)
+ if (rc) {
status = PFVF_STATUS_FAILURE;
- else
+ } else {
+ status = PFVF_STATUS_SUCCESS;
vf->vf_queues[req->tx_qid].txq_active = true;
+ }
- qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_START_TXQ,
- length, status);
+out:
+ qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, status);
}
static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
@@ -2113,6 +2257,16 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
u16 length;
int rc;
+ /* Valiate PF can send such a request */
+ if (!vf->vport_instance) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "No VPORT instance available for VF[%d], failing vport update\n",
+ vf->abs_vf_id);
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
memset(&params, 0, sizeof(params));
params.opaque_fid = vf->opaque_fid;
params.vport_id = vf->vport_id;
@@ -2155,15 +2309,12 @@ out:
qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
}
-static int qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
- struct qed_vf_info *p_vf,
- struct qed_filter_ucast *p_params)
+static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf,
+ struct qed_filter_ucast *p_params)
{
int i;
- if (p_params->type == QED_FILTER_MAC)
- return 0;
-
/* First remove entries and then add new ones */
if (p_params->opcode == QED_FILTER_REMOVE) {
for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
@@ -2216,6 +2367,80 @@ static int qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
return 0;
}
+static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf,
+ struct qed_filter_ucast *p_params)
+{
+ int i;
+
+ /* If we're in forced-mode, we don't allow any change */
+ if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
+ return 0;
+
+ /* First remove entries and then add new ones */
+ if (p_params->opcode == QED_FILTER_REMOVE) {
+ for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
+ if (ether_addr_equal(p_vf->shadow_config.macs[i],
+ p_params->mac)) {
+ memset(p_vf->shadow_config.macs[i], 0,
+ ETH_ALEN);
+ break;
+ }
+ }
+
+ if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "MAC isn't configured\n");
+ return -EINVAL;
+ }
+ } else if (p_params->opcode == QED_FILTER_REPLACE ||
+ p_params->opcode == QED_FILTER_FLUSH) {
+ for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++)
+ memset(p_vf->shadow_config.macs[i], 0, ETH_ALEN);
+ }
+
+ /* List the new MAC address */
+ if (p_params->opcode != QED_FILTER_ADD &&
+ p_params->opcode != QED_FILTER_REPLACE)
+ return 0;
+
+ for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
+ if (is_zero_ether_addr(p_vf->shadow_config.macs[i])) {
+ ether_addr_copy(p_vf->shadow_config.macs[i],
+ p_params->mac);
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Added MAC at %d entry in shadow\n", i);
+ break;
+ }
+ }
+
+ if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No available place for MAC\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf,
+ struct qed_filter_ucast *p_params)
+{
+ int rc = 0;
+
+ if (p_params->type == QED_FILTER_MAC) {
+ rc = qed_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
+ if (rc)
+ return rc;
+ }
+
+ if (p_params->type == QED_FILTER_VLAN)
+ rc = qed_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
+
+ return rc;
+}
+
int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
int vfid, struct qed_filter_ucast *params)
{
@@ -2360,11 +2585,27 @@ static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn,
struct qed_vf_info *p_vf)
{
u16 length = sizeof(struct pfvf_def_resp_tlv);
+ u8 status = PFVF_STATUS_SUCCESS;
+ int rc = 0;
qed_iov_vf_cleanup(p_hwfn, p_vf);
+ if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
+ /* Stopping the VF */
+ rc = qed_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
+ p_vf->opaque_fid);
+
+ if (rc) {
+ DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n",
+ rc);
+ status = PFVF_STATUS_FAILURE;
+ }
+
+ p_vf->state = VF_STOPPED;
+ }
+
qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
- length, PFVF_STATUS_SUCCESS);
+ length, status);
}
static int
@@ -2616,7 +2857,6 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
{
struct qed_iov_vf_mbx *mbx;
struct qed_vf_info *p_vf;
- int i;
p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
if (!p_vf)
@@ -2625,9 +2865,8 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
mbx = &p_vf->vf_mbx;
/* qed_iov_process_mbx_request */
- DP_VERBOSE(p_hwfn,
- QED_MSG_IOV,
- "qed_iov_process_mbx_req vfid %d\n", p_vf->abs_vf_id);
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%02x]: Processing mailbox message\n", p_vf->abs_vf_id);
mbx->first_tlv = mbx->req_virt->first_tlv;
@@ -2681,15 +2920,28 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
* support them. Or this may be because someone wrote a crappy
* VF driver and is sending garbage over the channel.
*/
- DP_ERR(p_hwfn,
- "unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n",
- mbx->first_tlv.tl.type, mbx->first_tlv.tl.length);
-
- for (i = 0; i < 20; i++) {
+ DP_NOTICE(p_hwfn,
+ "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n",
+ p_vf->abs_vf_id,
+ mbx->first_tlv.tl.type,
+ mbx->first_tlv.tl.length,
+ mbx->first_tlv.padding, mbx->first_tlv.reply_address);
+
+ /* Try replying in case reply address matches the acquisition's
+ * posted address.
+ */
+ if (p_vf->acquire.first_tlv.reply_address &&
+ (mbx->first_tlv.reply_address ==
+ p_vf->acquire.first_tlv.reply_address)) {
+ qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
+ mbx->first_tlv.tl.type,
+ sizeof(struct pfvf_def_resp_tlv),
+ PFVF_STATUS_NOT_SUPPORTED);
+ } else {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
- "%x ",
- mbx->req_virt->tlv_buf_size.tlv_buffer[i]);
+ "VF[%02x]: Can't respond to TLV - no valid reply address\n",
+ p_vf->abs_vf_id);
}
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index c8667c65e685..0dd23e409b3f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -10,19 +10,23 @@
#define _QED_SRIOV_H
#include <linux/types.h>
#include "qed_vf.h"
+
+#define QED_ETH_VF_NUM_MAC_FILTERS 1
+#define QED_ETH_VF_NUM_VLAN_FILTERS 2
#define QED_VF_ARRAY_LENGTH (3)
+#ifdef CONFIG_QED_SRIOV
#define IS_VF(cdev) ((cdev)->b_is_vf)
#define IS_PF(cdev) (!((cdev)->b_is_vf))
-#ifdef CONFIG_QED_SRIOV
#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info))
#else
+#define IS_VF(cdev) (0)
+#define IS_PF(cdev) (1)
#define IS_PF_SRIOV(p_hwfn) (0)
#endif
#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info))
#define QED_MAX_VF_CHAINS_PER_PF 16
-#define QED_ETH_VF_NUM_VLAN_FILTERS 2
#define QED_ETH_MAX_VF_NUM_VLAN_FILTERS \
(MAX_NUM_VFS * QED_ETH_VF_NUM_VLAN_FILTERS)
@@ -118,6 +122,8 @@ struct qed_vf_shadow_config {
/* Shadow copy of all guest vlans */
struct qed_vf_vlan_shadow vlans[QED_ETH_VF_NUM_VLAN_FILTERS + 1];
+ /* Shadow copy of all configured MACs; Empty if forcing MACs */
+ u8 macs[QED_ETH_VF_NUM_MAC_FILTERS][ETH_ALEN];
u8 inner_vlan_removal;
};
@@ -131,6 +137,9 @@ struct qed_vf_info {
struct qed_bulletin bulletin;
dma_addr_t vf_bulletin;
+ /* PF saves a copy of the last VF acquire message */
+ struct vfpf_acquire_tlv acquire;
+
u32 concrete_fid;
u16 opaque_fid;
u16 mtu;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 72e69c0ec10d..9819230947bf 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -117,36 +117,64 @@ exit:
}
#define VF_ACQUIRE_THRESH 3
-#define VF_ACQUIRE_MAC_FILTERS 1
+static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn,
+ struct vf_pf_resc_request *p_req,
+ struct pf_vf_resc *p_resp)
+{
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]. Try PF recommended amount\n",
+ p_req->num_rxqs,
+ p_resp->num_rxqs,
+ p_req->num_rxqs,
+ p_resp->num_txqs,
+ p_req->num_sbs,
+ p_resp->num_sbs,
+ p_req->num_mac_filters,
+ p_resp->num_mac_filters,
+ p_req->num_vlan_filters,
+ p_resp->num_vlan_filters,
+ p_req->num_mc_filters, p_resp->num_mc_filters);
+
+ /* humble our request */
+ p_req->num_txqs = p_resp->num_txqs;
+ p_req->num_rxqs = p_resp->num_rxqs;
+ p_req->num_sbs = p_resp->num_sbs;
+ p_req->num_mac_filters = p_resp->num_mac_filters;
+ p_req->num_vlan_filters = p_resp->num_vlan_filters;
+ p_req->num_mc_filters = p_resp->num_mc_filters;
+}
static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
- u8 rx_count = 1, tx_count = 1, num_sbs = 1;
- u8 num_mac = VF_ACQUIRE_MAC_FILTERS;
+ struct vf_pf_resc_request *p_resc;
bool resources_acquired = false;
struct vfpf_acquire_tlv *req;
int rc = 0, attempts = 0;
/* clear mailbox and prep first tlv */
req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req));
+ p_resc = &req->resc_request;
/* starting filling the request */
req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid;
- req->resc_request.num_rxqs = rx_count;
- req->resc_request.num_txqs = tx_count;
- req->resc_request.num_sbs = num_sbs;
- req->resc_request.num_mac_filters = num_mac;
- req->resc_request.num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
+ p_resc->num_rxqs = QED_MAX_VF_CHAINS_PER_PF;
+ p_resc->num_txqs = QED_MAX_VF_CHAINS_PER_PF;
+ p_resc->num_sbs = QED_MAX_VF_CHAINS_PER_PF;
+ p_resc->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
+ p_resc->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX;
req->vfdev_info.fw_major = FW_MAJOR_VERSION;
req->vfdev_info.fw_minor = FW_MINOR_VERSION;
req->vfdev_info.fw_revision = FW_REVISION_VERSION;
req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION;
+ req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
+ req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR;
/* Fill capability field with any non-deprecated config we support */
req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G;
@@ -185,21 +213,21 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
resources_acquired = true;
} else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE &&
attempts < VF_ACQUIRE_THRESH) {
- DP_VERBOSE(p_hwfn,
- QED_MSG_IOV,
- "PF unwilling to fullfill resource request. Try PF recommended amount\n");
-
- /* humble our request */
- req->resc_request.num_txqs = resp->resc.num_txqs;
- req->resc_request.num_rxqs = resp->resc.num_rxqs;
- req->resc_request.num_sbs = resp->resc.num_sbs;
- req->resc_request.num_mac_filters =
- resp->resc.num_mac_filters;
- req->resc_request.num_vlan_filters =
- resp->resc.num_vlan_filters;
+ qed_vf_pf_acquire_reduce_resc(p_hwfn, p_resc,
+ &resp->resc);
/* Clear response buffer */
memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
+ } else if ((resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) &&
+ pfdev_info->major_fp_hsi &&
+ (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) {
+ DP_NOTICE(p_hwfn,
+ "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n",
+ pfdev_info->major_fp_hsi,
+ pfdev_info->minor_fp_hsi,
+ ETH_HSI_VER_MAJOR,
+ ETH_HSI_VER_MINOR, pfdev_info->major_fp_hsi);
+ return -EINVAL;
} else {
DP_ERR(p_hwfn,
"PF returned error %d to VF acquisition request\n",
@@ -225,6 +253,13 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
}
}
+ if (ETH_HSI_VER_MINOR &&
+ (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) {
+ DP_INFO(p_hwfn,
+ "PF is using older fastpath HSI; %02x.%02x is configured\n",
+ ETH_HSI_VER_MAJOR, resp->pfdev_info.minor_fp_hsi);
+ }
+
return 0;
}
@@ -405,8 +440,8 @@ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
u16 pbl_size, void __iomem **pp_doorbell)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_start_queue_resp_tlv *resp;
struct vfpf_start_txq_tlv *req;
- struct pfvf_def_resp_tlv *resp;
int rc;
/* clear mailbox and prep first tlv */
@@ -424,20 +459,24 @@ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
- resp = &p_iov->pf2vf_reply->default_resp;
+ resp = &p_iov->pf2vf_reply->queue_start;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EINVAL;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
if (pp_doorbell) {
- u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id];
+ *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + resp->offset;
- *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
- qed_db_addr(cid, DQ_DEMS_LEGACY);
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
+ tx_queue_id, *pp_doorbell, resp->offset);
}
+exit:
return rc;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
index b82fda964bbd..b23ce58e932f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
@@ -96,7 +96,9 @@ struct vfpf_acquire_tlv {
u32 driver_version;
u16 opaque_fid; /* ME register value */
u8 os_type; /* VFPF_ACQUIRE_OS_* value */
- u8 padding[5];
+ u8 eth_fp_hsi_major;
+ u8 eth_fp_hsi_minor;
+ u8 padding[3];
} vfdev_info;
struct vf_pf_resc_request resc_request;
@@ -171,7 +173,14 @@ struct pfvf_acquire_resp_tlv {
struct pfvf_stats_info stats_info;
u8 port_mac[ETH_ALEN];
- u8 padding2[2];
+
+ /* It's possible PF had to configure an older fastpath HSI
+ * [in case VF is newer than PF]. This is communicated back
+ * to the VF. It can also be used in case of error due to
+ * non-matching versions to shed light in VF about failure.
+ */
+ u8 major_fp_hsi;
+ u8 minor_fp_hsi;
} pfdev_info;
struct pf_vf_resc {
diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile
index 06ff90d87572..74a49850d74d 100644
--- a/drivers/net/ethernet/qlogic/qede/Makefile
+++ b/drivers/net/ethernet/qlogic/qede/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_QEDE) := qede.o
qede-y := qede_main.o qede_ethtool.o
+qede-$(CONFIG_DCB) += qede_dcbnl.o
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 47d6b22252f6..02b06d4e40ae 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -24,7 +24,7 @@
#include <linux/qed/qed_eth_if.h>
#define QEDE_MAJOR_VERSION 8
-#define QEDE_MINOR_VERSION 7
+#define QEDE_MINOR_VERSION 10
#define QEDE_REVISION_VERSION 1
#define QEDE_ENGINEERING_VERSION 20
#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
@@ -143,6 +143,8 @@ struct qede_dev {
struct mutex qede_lock;
u32 state; /* Protected by qede_lock */
u16 rx_buf_size;
+ u32 rx_copybreak;
+
/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
#define ETH_OVERHEAD (ETH_HLEN + 8 + 8)
/* Max supported alignment is 256 (8 shift)
@@ -235,6 +237,7 @@ struct qede_rx_queue {
u64 rx_hw_errors;
u64 rx_alloc_errors;
+ u64 rx_ip_frags;
};
union db_prod {
@@ -304,6 +307,9 @@ union qede_reload_args {
u16 mtu;
};
+#ifdef CONFIG_DCB
+void qede_set_dcbnl_ops(struct net_device *ndev);
+#endif
void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level);
void qede_set_ethtool_ops(struct net_device *netdev);
void qede_reload(struct qede_dev *edev,
@@ -329,6 +335,7 @@ void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev,
#define NUM_TX_BDS_MIN 128
#define NUM_TX_BDS_DEF NUM_TX_BDS_MAX
+#define QEDE_MIN_PKT_LEN 64
#define QEDE_RX_HDR_SIZE 256
#define for_each_rss(i) for (i = 0; i < edev->num_rss; i++)
diff --git a/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
new file mode 100644
index 000000000000..03e8c0212433
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
@@ -0,0 +1,348 @@
+/* QLogic qede NIC Driver
+* Copyright (c) 2015 QLogic Corporation
+*
+* This software is available under the terms of the GNU General Public License
+* (GPL) Version 2, available from the file COPYING in the main directory of
+* this source tree.
+*/
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <net/dcbnl.h>
+#include "qede.h"
+
+static u8 qede_dcbnl_getstate(struct net_device *netdev)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getstate(edev->cdev);
+}
+
+static u8 qede_dcbnl_setstate(struct net_device *netdev, u8 state)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setstate(edev->cdev, state);
+}
+
+static void qede_dcbnl_getpermhwaddr(struct net_device *netdev,
+ u8 *perm_addr)
+{
+ memcpy(perm_addr, netdev->dev_addr, netdev->addr_len);
+}
+
+static void qede_dcbnl_getpgtccfgtx(struct net_device *netdev, int prio,
+ u8 *prio_type, u8 *pgid, u8 *bw_pct,
+ u8 *up_map)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ edev->ops->dcb->getpgtccfgtx(edev->cdev, prio, prio_type,
+ pgid, bw_pct, up_map);
+}
+
+static void qede_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
+ int pgid, u8 *bw_pct)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ edev->ops->dcb->getpgbwgcfgtx(edev->cdev, pgid, bw_pct);
+}
+
+static void qede_dcbnl_getpgtccfgrx(struct net_device *netdev, int prio,
+ u8 *prio_type, u8 *pgid, u8 *bw_pct,
+ u8 *up_map)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ edev->ops->dcb->getpgtccfgrx(edev->cdev, prio, prio_type, pgid, bw_pct,
+ up_map);
+}
+
+static void qede_dcbnl_getpgbwgcfgrx(struct net_device *netdev,
+ int pgid, u8 *bw_pct)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ edev->ops->dcb->getpgbwgcfgrx(edev->cdev, pgid, bw_pct);
+}
+
+static void qede_dcbnl_getpfccfg(struct net_device *netdev, int prio,
+ u8 *setting)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ edev->ops->dcb->getpfccfg(edev->cdev, prio, setting);
+}
+
+static void qede_dcbnl_setpfccfg(struct net_device *netdev, int prio,
+ u8 setting)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ edev->ops->dcb->setpfccfg(edev->cdev, prio, setting);
+}
+
+static u8 qede_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getcap(edev->cdev, capid, cap);
+}
+
+static int qede_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getnumtcs(edev->cdev, tcid, num);
+}
+
+static u8 qede_dcbnl_getpfcstate(struct net_device *netdev)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getpfcstate(edev->cdev);
+}
+
+static int qede_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getapp(edev->cdev, idtype, id);
+}
+
+static u8 qede_dcbnl_getdcbx(struct net_device *netdev)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getdcbx(edev->cdev);
+}
+
+static void qede_dcbnl_setpgtccfgtx(struct net_device *netdev, int prio,
+ u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setpgtccfgtx(edev->cdev, prio, pri_type, pgid,
+ bw_pct, up_map);
+}
+
+static void qede_dcbnl_setpgtccfgrx(struct net_device *netdev, int prio,
+ u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setpgtccfgrx(edev->cdev, prio, pri_type, pgid,
+ bw_pct, up_map);
+}
+
+static void qede_dcbnl_setpgbwgcfgtx(struct net_device *netdev, int pgid,
+ u8 bw_pct)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setpgbwgcfgtx(edev->cdev, pgid, bw_pct);
+}
+
+static void qede_dcbnl_setpgbwgcfgrx(struct net_device *netdev, int pgid,
+ u8 bw_pct)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setpgbwgcfgrx(edev->cdev, pgid, bw_pct);
+}
+
+static u8 qede_dcbnl_setall(struct net_device *netdev)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setall(edev->cdev);
+}
+
+static int qede_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setnumtcs(edev->cdev, tcid, num);
+}
+
+static void qede_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setpfcstate(edev->cdev, state);
+}
+
+static int qede_dcbnl_setapp(struct net_device *netdev, u8 idtype, u16 idval,
+ u8 up)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setapp(edev->cdev, idtype, idval, up);
+}
+
+static u8 qede_dcbnl_setdcbx(struct net_device *netdev, u8 state)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setdcbx(edev->cdev, state);
+}
+
+static u8 qede_dcbnl_getfeatcfg(struct net_device *netdev, int featid,
+ u8 *flags)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getfeatcfg(edev->cdev, featid, flags);
+}
+
+static u8 qede_dcbnl_setfeatcfg(struct net_device *netdev, int featid, u8 flags)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setfeatcfg(edev->cdev, featid, flags);
+}
+
+static int qede_dcbnl_peer_getappinfo(struct net_device *netdev,
+ struct dcb_peer_app_info *info,
+ u16 *count)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->peer_getappinfo(edev->cdev, info, count);
+}
+
+static int qede_dcbnl_peer_getapptable(struct net_device *netdev,
+ struct dcb_app *app)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->peer_getapptable(edev->cdev, app);
+}
+
+static int qede_dcbnl_cee_peer_getpfc(struct net_device *netdev,
+ struct cee_pfc *pfc)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->cee_peer_getpfc(edev->cdev, pfc);
+}
+
+static int qede_dcbnl_cee_peer_getpg(struct net_device *netdev,
+ struct cee_pg *pg)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->cee_peer_getpg(edev->cdev, pg);
+}
+
+static int qede_dcbnl_ieee_getpfc(struct net_device *netdev,
+ struct ieee_pfc *pfc)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_getpfc(edev->cdev, pfc);
+}
+
+static int qede_dcbnl_ieee_setpfc(struct net_device *netdev,
+ struct ieee_pfc *pfc)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_setpfc(edev->cdev, pfc);
+}
+
+static int qede_dcbnl_ieee_getets(struct net_device *netdev,
+ struct ieee_ets *ets)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_getets(edev->cdev, ets);
+}
+
+static int qede_dcbnl_ieee_setets(struct net_device *netdev,
+ struct ieee_ets *ets)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_setets(edev->cdev, ets);
+}
+
+static int qede_dcbnl_ieee_getapp(struct net_device *netdev,
+ struct dcb_app *app)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_getapp(edev->cdev, app);
+}
+
+static int qede_dcbnl_ieee_setapp(struct net_device *netdev,
+ struct dcb_app *app)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_setapp(edev->cdev, app);
+}
+
+static int qede_dcbnl_ieee_peer_getpfc(struct net_device *netdev,
+ struct ieee_pfc *pfc)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_peer_getpfc(edev->cdev, pfc);
+}
+
+static int qede_dcbnl_ieee_peer_getets(struct net_device *netdev,
+ struct ieee_ets *ets)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_peer_getets(edev->cdev, ets);
+}
+
+static const struct dcbnl_rtnl_ops qede_dcbnl_ops = {
+ .ieee_getpfc = qede_dcbnl_ieee_getpfc,
+ .ieee_setpfc = qede_dcbnl_ieee_setpfc,
+ .ieee_getets = qede_dcbnl_ieee_getets,
+ .ieee_setets = qede_dcbnl_ieee_setets,
+ .ieee_getapp = qede_dcbnl_ieee_getapp,
+ .ieee_setapp = qede_dcbnl_ieee_setapp,
+ .getdcbx = qede_dcbnl_getdcbx,
+ .ieee_peer_getpfc = qede_dcbnl_ieee_peer_getpfc,
+ .ieee_peer_getets = qede_dcbnl_ieee_peer_getets,
+ .getstate = qede_dcbnl_getstate,
+ .setstate = qede_dcbnl_setstate,
+ .getpermhwaddr = qede_dcbnl_getpermhwaddr,
+ .getpgtccfgtx = qede_dcbnl_getpgtccfgtx,
+ .getpgbwgcfgtx = qede_dcbnl_getpgbwgcfgtx,
+ .getpgtccfgrx = qede_dcbnl_getpgtccfgrx,
+ .getpgbwgcfgrx = qede_dcbnl_getpgbwgcfgrx,
+ .getpfccfg = qede_dcbnl_getpfccfg,
+ .setpfccfg = qede_dcbnl_setpfccfg,
+ .getcap = qede_dcbnl_getcap,
+ .getnumtcs = qede_dcbnl_getnumtcs,
+ .getpfcstate = qede_dcbnl_getpfcstate,
+ .getapp = qede_dcbnl_getapp,
+ .getdcbx = qede_dcbnl_getdcbx,
+ .setpgtccfgtx = qede_dcbnl_setpgtccfgtx,
+ .setpgtccfgrx = qede_dcbnl_setpgtccfgrx,
+ .setpgbwgcfgtx = qede_dcbnl_setpgbwgcfgtx,
+ .setpgbwgcfgrx = qede_dcbnl_setpgbwgcfgrx,
+ .setall = qede_dcbnl_setall,
+ .setnumtcs = qede_dcbnl_setnumtcs,
+ .setpfcstate = qede_dcbnl_setpfcstate,
+ .setapp = qede_dcbnl_setapp,
+ .setdcbx = qede_dcbnl_setdcbx,
+ .setfeatcfg = qede_dcbnl_setfeatcfg,
+ .getfeatcfg = qede_dcbnl_getfeatcfg,
+ .peer_getappinfo = qede_dcbnl_peer_getappinfo,
+ .peer_getapptable = qede_dcbnl_peer_getapptable,
+ .cee_peer_getpfc = qede_dcbnl_cee_peer_getpfc,
+ .cee_peer_getpg = qede_dcbnl_cee_peer_getpg,
+};
+
+void qede_set_dcbnl_ops(struct net_device *dev)
+{
+ dev->dcbnl_ops = &qede_dcbnl_ops;
+}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 6836d44b6b89..f8492cac9290 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -37,6 +37,7 @@ static const struct {
} qede_rqstats_arr[] = {
QEDE_RQSTAT(rx_hw_errors),
QEDE_RQSTAT(rx_alloc_errors),
+ QEDE_RQSTAT(rx_ip_frags),
};
#define QEDE_NUM_RQSTATS ARRAY_SIZE(qede_rqstats_arr)
@@ -426,6 +427,59 @@ static u32 qede_get_link(struct net_device *dev)
return current_link.link_up;
}
+static int qede_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u16 rxc, txc;
+
+ memset(coal, 0, sizeof(struct ethtool_coalesce));
+ edev->ops->common->get_coalesce(edev->cdev, &rxc, &txc);
+
+ coal->rx_coalesce_usecs = rxc;
+ coal->tx_coalesce_usecs = txc;
+
+ return 0;
+}
+
+static int qede_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ int i, rc = 0;
+ u16 rxc, txc;
+ u8 sb_id;
+
+ if (!netif_running(dev)) {
+ DP_INFO(edev, "Interface is down\n");
+ return -EINVAL;
+ }
+
+ if (coal->rx_coalesce_usecs > QED_COALESCE_MAX ||
+ coal->tx_coalesce_usecs > QED_COALESCE_MAX) {
+ DP_INFO(edev,
+ "Can't support requested %s coalesce value [max supported value %d]\n",
+ coal->rx_coalesce_usecs > QED_COALESCE_MAX ? "rx"
+ : "tx",
+ QED_COALESCE_MAX);
+ return -EINVAL;
+ }
+
+ rxc = (u16)coal->rx_coalesce_usecs;
+ txc = (u16)coal->tx_coalesce_usecs;
+ for_each_rss(i) {
+ sb_id = edev->fp_array[i].sb_info->igu_sb_id;
+ rc = edev->ops->common->set_coalesce(edev->cdev, rxc, txc,
+ (u8)i, sb_id);
+ if (rc) {
+ DP_INFO(edev, "Set coalesce error, rc = %d\n", rc);
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
static void qede_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *ering)
{
@@ -1131,6 +1185,48 @@ static void qede_self_test(struct net_device *dev,
}
}
+static int qede_set_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna,
+ const void *data)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u32 val;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ val = *(u32 *)data;
+ if (val < QEDE_MIN_PKT_LEN || val > QEDE_RX_HDR_SIZE) {
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Invalid rx copy break value, range is [%u, %u]",
+ QEDE_MIN_PKT_LEN, QEDE_RX_HDR_SIZE);
+ return -EINVAL;
+ }
+
+ edev->rx_copybreak = *(u32 *)data;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int qede_get_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna, void *data)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)data = edev->rx_copybreak;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops qede_ethtool_ops = {
.get_settings = qede_get_settings,
.set_settings = qede_set_settings,
@@ -1139,6 +1235,8 @@ static const struct ethtool_ops qede_ethtool_ops = {
.set_msglevel = qede_set_msglevel,
.nway_reset = qede_nway_reset,
.get_link = qede_get_link,
+ .get_coalesce = qede_get_coalesce,
+ .set_coalesce = qede_set_coalesce,
.get_ringparam = qede_get_ringparam,
.set_ringparam = qede_set_ringparam,
.get_pauseparam = qede_get_pauseparam,
@@ -1157,6 +1255,8 @@ static const struct ethtool_ops qede_ethtool_ops = {
.get_channels = qede_get_channels,
.set_channels = qede_set_channels,
.self_test = qede_self_test,
+ .get_tunable = qede_get_tunable,
+ .set_tunable = qede_set_tunable,
};
static const struct ethtool_ops qede_vf_ethtool_ops = {
@@ -1179,6 +1279,8 @@ static const struct ethtool_ops qede_vf_ethtool_ops = {
.set_rxfh = qede_set_rxfh,
.get_channels = qede_get_channels,
.set_channels = qede_set_channels,
+ .get_tunable = qede_get_tunable,
+ .set_tunable = qede_set_tunable,
};
void qede_set_ethtool_ops(struct net_device *dev)
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index edd2d908c52e..91e7bb0b85c8 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -24,12 +24,7 @@
#include <linux/netdev_features.h>
#include <linux/udp.h>
#include <linux/tcp.h>
-#ifdef CONFIG_QEDE_VXLAN
-#include <net/vxlan.h>
-#endif
-#ifdef CONFIG_QEDE_GENEVE
-#include <net/geneve.h>
-#endif
+#include <net/udp_tunnel.h>
#include <linux/ip.h>
#include <net/ipv6.h>
#include <net/tcp.h>
@@ -87,7 +82,9 @@ static const struct pci_device_id qede_pci_tbl[] = {
{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
+#ifdef CONFIG_QED_SRIOV
{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
+#endif
{ 0 }
};
@@ -488,6 +485,24 @@ static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
}
#endif
+static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
+{
+ /* wmb makes sure that the BDs data is updated before updating the
+ * producer, otherwise FW may read old data from the BDs.
+ */
+ wmb();
+ barrier();
+ writel(txq->tx_db.raw, txq->doorbell_addr);
+
+ /* mmiowb is needed to synchronize doorbell writes from more than one
+ * processor. It guarantees that the write arrives to the device before
+ * the queue lock is released and another start_xmit is called (possibly
+ * on another CPU). Without this barrier, the next doorbell can bypass
+ * this doorbell. This is applicable to IA64/Altix systems.
+ */
+ mmiowb();
+}
+
/* Main transmit function */
static
netdev_tx_t qede_start_xmit(struct sk_buff *skb,
@@ -546,6 +561,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
DP_NOTICE(edev, "SKB mapping failed\n");
qede_free_failed_tx_pkt(edev, txq, first_bd, 0, false);
+ qede_update_tx_producer(txq);
return NETDEV_TX_OK;
}
nbd++;
@@ -660,6 +676,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
if (rc) {
qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
data_split);
+ qede_update_tx_producer(txq);
return NETDEV_TX_OK;
}
@@ -684,6 +701,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
if (rc) {
qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
data_split);
+ qede_update_tx_producer(txq);
return NETDEV_TX_OK;
}
}
@@ -704,20 +722,8 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
txq->tx_db.data.bd_prod =
cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
- /* wmb makes sure that the BDs data is updated before updating the
- * producer, otherwise FW may read old data from the BDs.
- */
- wmb();
- barrier();
- writel(txq->tx_db.raw, txq->doorbell_addr);
-
- /* mmiowb is needed to synchronize doorbell writes from more than one
- * processor. It guarantees that the write arrives to the device before
- * the queue lock is released and another start_xmit is called (possibly
- * on another CPU). Without this barrier, the next doorbell can bypass
- * this doorbell. This is applicable to IA64/Altix systems.
- */
- mmiowb();
+ if (!skb->xmit_more || netif_tx_queue_stopped(netdev_txq))
+ qede_update_tx_producer(txq);
if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
< (MAX_SKB_FRAGS + 1))) {
@@ -1351,6 +1357,20 @@ static u8 qede_check_csum(u16 flag)
return qede_check_tunn_csum(flag);
}
+static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe,
+ u16 flag)
+{
+ u8 tun_pars_flg = cqe->tunnel_pars_flags.flags;
+
+ if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK <<
+ ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) ||
+ (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
+ PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT)))
+ return true;
+
+ return false;
+}
+
static int qede_rx_int(struct qede_fastpath *fp, int budget)
{
struct qede_dev *edev = fp->edev;
@@ -1429,6 +1449,12 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
csum_flag = qede_check_csum(parse_flag);
if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
+ if (qede_pkt_is_ip_fragmented(&cqe->fast_path_regular,
+ parse_flag)) {
+ rxq->rx_ip_frags++;
+ goto alloc_skb;
+ }
+
DP_NOTICE(edev,
"CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
sw_comp_cons, parse_flag);
@@ -1437,6 +1463,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
goto next_cqe;
}
+alloc_skb:
skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
if (unlikely(!skb)) {
DP_NOTICE(edev,
@@ -1447,7 +1474,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
}
/* Copy data into SKB */
- if (len + pad <= QEDE_RX_HDR_SIZE) {
+ if (len + pad <= edev->rx_copybreak) {
memcpy(skb_put(skb, len),
page_address(data) + pad +
sw_rx_data->page_offset, len);
@@ -1579,56 +1606,49 @@ next_cqe: /* don't consume bd rx buffer */
static int qede_poll(struct napi_struct *napi, int budget)
{
- int work_done = 0;
struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
- napi);
+ napi);
struct qede_dev *edev = fp->edev;
+ int rx_work_done = 0;
+ u8 tc;
- while (1) {
- u8 tc;
-
- for (tc = 0; tc < edev->num_tc; tc++)
- if (qede_txq_has_work(&fp->txqs[tc]))
- qede_tx_int(edev, &fp->txqs[tc]);
-
- if (qede_has_rx_work(fp->rxq)) {
- work_done += qede_rx_int(fp, budget - work_done);
-
- /* must not complete if we consumed full budget */
- if (work_done >= budget)
- break;
- }
+ for (tc = 0; tc < edev->num_tc; tc++)
+ if (qede_txq_has_work(&fp->txqs[tc]))
+ qede_tx_int(edev, &fp->txqs[tc]);
+
+ rx_work_done = qede_has_rx_work(fp->rxq) ?
+ qede_rx_int(fp, budget) : 0;
+ if (rx_work_done < budget) {
+ qed_sb_update_sb_idx(fp->sb_info);
+ /* *_has_*_work() reads the status block,
+ * thus we need to ensure that status block indices
+ * have been actually read (qed_sb_update_sb_idx)
+ * prior to this check (*_has_*_work) so that
+ * we won't write the "newer" value of the status block
+ * to HW (if there was a DMA right after
+ * qede_has_rx_work and if there is no rmb, the memory
+ * reading (qed_sb_update_sb_idx) may be postponed
+ * to right before *_ack_sb). In this case there
+ * will never be another interrupt until there is
+ * another update of the status block, while there
+ * is still unhandled work.
+ */
+ rmb();
/* Fall out from the NAPI loop if needed */
- if (!(qede_has_rx_work(fp->rxq) || qede_has_tx_work(fp))) {
- qed_sb_update_sb_idx(fp->sb_info);
- /* *_has_*_work() reads the status block,
- * thus we need to ensure that status block indices
- * have been actually read (qed_sb_update_sb_idx)
- * prior to this check (*_has_*_work) so that
- * we won't write the "newer" value of the status block
- * to HW (if there was a DMA right after
- * qede_has_rx_work and if there is no rmb, the memory
- * reading (qed_sb_update_sb_idx) may be postponed
- * to right before *_ack_sb). In this case there
- * will never be another interrupt until there is
- * another update of the status block, while there
- * is still unhandled work.
- */
- rmb();
-
- if (!(qede_has_rx_work(fp->rxq) ||
- qede_has_tx_work(fp))) {
- napi_complete(napi);
- /* Update and reenable interrupts */
- qed_sb_ack(fp->sb_info, IGU_INT_ENABLE,
- 1 /*update*/);
- break;
- }
+ if (!(qede_has_rx_work(fp->rxq) ||
+ qede_has_tx_work(fp))) {
+ napi_complete(napi);
+
+ /* Update and reenable interrupts */
+ qed_sb_ack(fp->sb_info, IGU_INT_ENABLE,
+ 1 /*update*/);
+ } else {
+ rx_work_done = budget;
}
}
- return work_done;
+ return rx_work_done;
}
static irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
@@ -2110,75 +2130,75 @@ int qede_set_features(struct net_device *dev, netdev_features_t features)
return 0;
}
-#ifdef CONFIG_QEDE_VXLAN
-static void qede_add_vxlan_port(struct net_device *dev,
- sa_family_t sa_family, __be16 port)
+static void qede_udp_tunnel_add(struct net_device *dev,
+ struct udp_tunnel_info *ti)
{
struct qede_dev *edev = netdev_priv(dev);
- u16 t_port = ntohs(port);
+ u16 t_port = ntohs(ti->port);
- if (edev->vxlan_dst_port)
- return;
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ if (edev->vxlan_dst_port)
+ return;
- edev->vxlan_dst_port = t_port;
+ edev->vxlan_dst_port = t_port;
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d", t_port);
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d",
+ t_port);
- set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
- schedule_delayed_work(&edev->sp_task, 0);
-}
+ set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ if (edev->geneve_dst_port)
+ return;
-static void qede_del_vxlan_port(struct net_device *dev,
- sa_family_t sa_family, __be16 port)
-{
- struct qede_dev *edev = netdev_priv(dev);
- u16 t_port = ntohs(port);
+ edev->geneve_dst_port = t_port;
- if (t_port != edev->vxlan_dst_port)
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d",
+ t_port);
+ set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
+ break;
+ default:
return;
+ }
- edev->vxlan_dst_port = 0;
-
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d", t_port);
-
- set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
schedule_delayed_work(&edev->sp_task, 0);
}
-#endif
-#ifdef CONFIG_QEDE_GENEVE
-static void qede_add_geneve_port(struct net_device *dev,
- sa_family_t sa_family, __be16 port)
+static void qede_udp_tunnel_del(struct net_device *dev,
+ struct udp_tunnel_info *ti)
{
struct qede_dev *edev = netdev_priv(dev);
- u16 t_port = ntohs(port);
+ u16 t_port = ntohs(ti->port);
- if (edev->geneve_dst_port)
- return;
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ if (t_port != edev->vxlan_dst_port)
+ return;
- edev->geneve_dst_port = t_port;
+ edev->vxlan_dst_port = 0;
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d", t_port);
- set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
- schedule_delayed_work(&edev->sp_task, 0);
-}
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d",
+ t_port);
-static void qede_del_geneve_port(struct net_device *dev,
- sa_family_t sa_family, __be16 port)
-{
- struct qede_dev *edev = netdev_priv(dev);
- u16 t_port = ntohs(port);
+ set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ if (t_port != edev->geneve_dst_port)
+ return;
- if (t_port != edev->geneve_dst_port)
- return;
+ edev->geneve_dst_port = 0;
- edev->geneve_dst_port = 0;
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d",
+ t_port);
+ set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
+ break;
+ default:
+ return;
+ }
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d", t_port);
- set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
schedule_delayed_work(&edev->sp_task, 0);
}
-#endif
static const struct net_device_ops qede_netdev_ops = {
.ndo_open = qede_open,
@@ -2202,14 +2222,8 @@ static const struct net_device_ops qede_netdev_ops = {
.ndo_get_vf_config = qede_get_vf_config,
.ndo_set_vf_rate = qede_set_vf_rate,
#endif
-#ifdef CONFIG_QEDE_VXLAN
- .ndo_add_vxlan_port = qede_add_vxlan_port,
- .ndo_del_vxlan_port = qede_del_vxlan_port,
-#endif
-#ifdef CONFIG_QEDE_GENEVE
- .ndo_add_geneve_port = qede_add_geneve_port,
- .ndo_del_geneve_port = qede_del_geneve_port,
-#endif
+ .ndo_udp_tunnel_add = qede_udp_tunnel_add,
+ .ndo_udp_tunnel_del = qede_udp_tunnel_del,
};
/* -------------------------------------------------------------------------
@@ -2499,8 +2513,13 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
edev->ops->register_ops(cdev, &qede_ll_ops, edev);
+#ifdef CONFIG_DCB
+ qede_set_dcbnl_ops(edev->ndev);
+#endif
+
INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
mutex_init(&edev->qede_lock);
+ edev->rx_copybreak = QEDE_RX_HDR_SIZE;
DP_INFO(edev, "Ending successfully qede probe\n");
@@ -3227,7 +3246,7 @@ static int qede_stop_queues(struct qede_dev *edev)
return rc;
}
-static int qede_start_queues(struct qede_dev *edev)
+static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
{
int rc, tc, i;
int vlan_removal_en = 1;
@@ -3458,6 +3477,7 @@ out:
enum qede_load_mode {
QEDE_LOAD_NORMAL,
+ QEDE_LOAD_RELOAD,
};
static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
@@ -3496,7 +3516,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
goto err3;
DP_INFO(edev, "Setup IRQs succeeded\n");
- rc = qede_start_queues(edev);
+ rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
if (rc)
goto err4;
DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
@@ -3551,7 +3571,7 @@ void qede_reload(struct qede_dev *edev,
if (func)
func(edev, args);
- qede_load(edev, QEDE_LOAD_NORMAL);
+ qede_load(edev, QEDE_LOAD_RELOAD);
mutex_lock(&edev->qede_lock);
qede_config_rx_mode(edev->ndev);
@@ -3573,12 +3593,8 @@ static int qede_open(struct net_device *ndev)
if (rc)
return rc;
-#ifdef CONFIG_QEDE_VXLAN
- vxlan_get_rx_port(ndev);
-#endif
-#ifdef CONFIG_QEDE_GENEVE
- geneve_get_rx_port(ndev);
-#endif
+ udp_tunnel_get_rx_info(ndev);
+
return 0;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index caf6ddb7ea76..fd973f4f16c7 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1026,10 +1026,8 @@ struct qlcnic_ipaddr {
#define QLCNIC_HAS_PHYS_PORT_ID 0x40000
#define QLCNIC_TSS_RSS 0x80000
-#ifdef CONFIG_QLCNIC_VXLAN
#define QLCNIC_ADD_VXLAN_PORT 0x100000
#define QLCNIC_DEL_VXLAN_PORT 0x200000
-#endif
#define QLCNIC_VLAN_FILTERING 0x800000
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index f9640d5ce6ba..bdbcd2b088a0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -2159,7 +2159,6 @@ int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac,
struct qlcnic_cmd_args cmd;
u32 mac_low, mac_high;
- function = 0;
err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS);
if (err)
return err;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index bf892160dd5f..a496390b8632 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1020,7 +1020,6 @@ static int qlcnic_83xx_idc_check_state_validity(struct qlcnic_adapter *adapter,
return 0;
}
-#ifdef CONFIG_QLCNIC_VXLAN
#define QLC_83XX_ENCAP_TYPE_VXLAN BIT_1
#define QLC_83XX_MATCH_ENCAP_ID BIT_2
#define QLC_83XX_SET_VXLAN_UDP_DPORT BIT_3
@@ -1089,14 +1088,12 @@ static int qlcnic_set_vxlan_parsing(struct qlcnic_adapter *adapter,
return ret;
}
-#endif
static void qlcnic_83xx_periodic_tasks(struct qlcnic_adapter *adapter)
{
if (adapter->fhash.fnum)
qlcnic_prune_lb_filters(adapter);
-#ifdef CONFIG_QLCNIC_VXLAN
if (adapter->flags & QLCNIC_ADD_VXLAN_PORT) {
if (qlcnic_set_vxlan_port(adapter))
return;
@@ -1112,7 +1109,6 @@ static void qlcnic_83xx_periodic_tasks(struct qlcnic_adapter *adapter)
adapter->ahw->vxlan_port = 0;
adapter->flags &= ~QLCNIC_DEL_VXLAN_PORT;
}
-#endif
}
/**
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 7bd6f25b4625..607bb7d4514d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -2220,7 +2220,7 @@ void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
if (!opcode)
return;
- ring = QLCNIC_FETCH_RING_ID(qlcnic_83xx_hndl(sts_data[0]));
+ ring = QLCNIC_FETCH_RING_ID(sts_data[0]);
qlcnic_83xx_process_rcv_diag(adapter, ring, sts_data);
desc = &sds_ring->desc_head[consumer];
desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 1c29105b6c36..3ebef27e0964 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -16,9 +16,7 @@
#include <linux/aer.h>
#include <linux/log2.h>
#include <linux/pci.h>
-#ifdef CONFIG_QLCNIC_VXLAN
#include <net/vxlan.h>
-#endif
#include "qlcnic.h"
#include "qlcnic_sriov.h"
@@ -474,13 +472,15 @@ static int qlcnic_get_phys_port_id(struct net_device *netdev,
return 0;
}
-#ifdef CONFIG_QLCNIC_VXLAN
static void qlcnic_add_vxlan_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
+ struct udp_tunnel_info *ti)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
+
/* Adapter supports only one VXLAN port. Use very first port
* for enabling offload
*/
@@ -488,23 +488,26 @@ static void qlcnic_add_vxlan_port(struct net_device *netdev,
return;
if (!ahw->vxlan_port_count) {
ahw->vxlan_port_count = 1;
- ahw->vxlan_port = ntohs(port);
+ ahw->vxlan_port = ntohs(ti->port);
adapter->flags |= QLCNIC_ADD_VXLAN_PORT;
return;
}
- if (ahw->vxlan_port == ntohs(port))
+ if (ahw->vxlan_port == ntohs(ti->port))
ahw->vxlan_port_count++;
}
static void qlcnic_del_vxlan_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
+ struct udp_tunnel_info *ti)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
+
if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port_count ||
- (ahw->vxlan_port != ntohs(port)))
+ (ahw->vxlan_port != ntohs(ti->port)))
return;
ahw->vxlan_port_count--;
@@ -519,7 +522,6 @@ static netdev_features_t qlcnic_features_check(struct sk_buff *skb,
features = vlan_features_check(skb, features);
return vxlan_features_check(skb, features);
}
-#endif
static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_open = qlcnic_open,
@@ -539,11 +541,9 @@ static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_fdb_del = qlcnic_fdb_del,
.ndo_fdb_dump = qlcnic_fdb_dump,
.ndo_get_phys_port_id = qlcnic_get_phys_port_id,
-#ifdef CONFIG_QLCNIC_VXLAN
- .ndo_add_vxlan_port = qlcnic_add_vxlan_port,
- .ndo_del_vxlan_port = qlcnic_del_vxlan_port,
+ .ndo_udp_tunnel_add = qlcnic_add_vxlan_port,
+ .ndo_udp_tunnel_del = qlcnic_del_vxlan_port,
.ndo_features_check = qlcnic_features_check,
-#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = qlcnic_poll_controller,
#endif
@@ -2015,10 +2015,8 @@ qlcnic_attach(struct qlcnic_adapter *adapter)
qlcnic_create_sysfs_entries(adapter);
-#ifdef CONFIG_QLCNIC_VXLAN
if (qlcnic_encap_rx_offload(adapter))
- vxlan_get_rx_port(netdev);
-#endif
+ udp_tunnel_get_rx_info(netdev);
adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
return 0;
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 6b541e57c96a..cb29ee24cf1b 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -4,7 +4,7 @@
* Copyright (C) 2004 Sten Wang <sten.wang@rdc.com.tw>
* Copyright (C) 2007
* Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>
- * Copyright (C) 2007-2012 Florian Fainelli <florian@openwrt.org>
+ * Copyright (C) 2007-2012 Florian Fainelli <f.fainelli@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -48,8 +48,8 @@
#include <asm/processor.h>
#define DRV_NAME "r6040"
-#define DRV_VERSION "0.28"
-#define DRV_RELDATE "07Oct2011"
+#define DRV_VERSION "0.29"
+#define DRV_RELDATE "04Jul2016"
/* Time in jiffies before concluding the transmitter is hung. */
#define TX_TIMEOUT (6000 * HZ / 1000)
@@ -162,7 +162,7 @@
MODULE_AUTHOR("Sten Wang <sten.wang@rdc.com.tw>,"
"Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>,"
- "Florian Fainelli <florian@openwrt.org>");
+ "Florian Fainelli <f.fainelli@gmail.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("RDC R6040 NAPI PCI FastEthernet driver");
MODULE_VERSION(DRV_VERSION " " DRV_RELDATE);
@@ -200,7 +200,6 @@ struct r6040_private {
struct mii_bus *mii_bus;
struct napi_struct napi;
void __iomem *base;
- struct phy_device *phydev;
int old_link;
int old_duplex;
};
@@ -474,7 +473,7 @@ static void r6040_down(struct net_device *dev)
iowrite16(adrp[1], ioaddr + MID_0M);
iowrite16(adrp[2], ioaddr + MID_0H);
- phy_stop(lp->phydev);
+ phy_stop(dev->phydev);
}
static int r6040_close(struct net_device *dev)
@@ -515,12 +514,10 @@ static int r6040_close(struct net_device *dev)
static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct r6040_private *lp = netdev_priv(dev);
-
- if (!lp->phydev)
+ if (!dev->phydev)
return -EINVAL;
- return phy_mii_ioctl(lp->phydev, rq, cmd);
+ return phy_mii_ioctl(dev->phydev, rq, cmd);
}
static int r6040_rx(struct net_device *dev, int limit)
@@ -617,10 +614,15 @@ static void r6040_tx(struct net_device *dev)
if (descptr->status & DSC_OWNER_MAC)
break; /* Not complete */
skb_ptr = descptr->skb_ptr;
+
+ /* Statistic Counter */
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb_ptr->len;
+
pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf),
skb_ptr->len, PCI_DMA_TODEVICE);
/* Free buffer */
- dev_kfree_skb_irq(skb_ptr);
+ dev_kfree_skb(skb_ptr);
descptr->skb_ptr = NULL;
/* To next descriptor */
descptr = descptr->vndescp;
@@ -641,12 +643,15 @@ static int r6040_poll(struct napi_struct *napi, int budget)
void __iomem *ioaddr = priv->base;
int work_done;
+ r6040_tx(dev);
+
work_done = r6040_rx(dev, budget);
if (work_done < budget) {
- napi_complete(napi);
- /* Enable RX interrupt */
- iowrite16(ioread16(ioaddr + MIER) | RX_INTS, ioaddr + MIER);
+ napi_complete_done(napi, work_done);
+ /* Enable RX/TX interrupt */
+ iowrite16(ioread16(ioaddr + MIER) | RX_INTS | TX_INTS,
+ ioaddr + MIER);
}
return work_done;
}
@@ -673,7 +678,7 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
}
/* RX interrupt request */
- if (status & RX_INTS) {
+ if (status & (RX_INTS | TX_INTS)) {
if (status & RX_NO_DESC) {
/* RX descriptor unavailable */
dev->stats.rx_dropped++;
@@ -684,15 +689,11 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
if (likely(napi_schedule_prep(&lp->napi))) {
/* Mask off RX interrupt */
- misr &= ~RX_INTS;
- __napi_schedule(&lp->napi);
+ misr &= ~(RX_INTS | TX_INTS);
+ __napi_schedule_irqoff(&lp->napi);
}
}
- /* TX interrupt request */
- if (status & TX_INTS)
- r6040_tx(dev);
-
/* Restore RDC MAC interrupt */
iowrite16(misr, ioaddr + MIER);
@@ -732,7 +733,7 @@ static int r6040_up(struct net_device *dev)
/* Initialize all MAC registers */
r6040_init_mac_regs(dev);
- phy_start(lp->phydev);
+ phy_start(dev->phydev);
return 0;
}
@@ -813,6 +814,9 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
void __iomem *ioaddr = lp->base;
unsigned long flags;
+ if (skb_put_padto(skb, ETH_ZLEN) < 0)
+ return NETDEV_TX_OK;
+
/* Critical Section */
spin_lock_irqsave(&lp->lock, flags);
@@ -824,17 +828,10 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
- /* Statistic Counter */
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
/* Set TX descriptor & Transmit it */
lp->tx_free_desc--;
descptr = lp->tx_insert_ptr;
- if (skb->len < ETH_ZLEN)
- descptr->len = ETH_ZLEN;
- else
- descptr->len = skb->len;
-
+ descptr->len = skb->len;
descptr->skb_ptr = skb;
descptr->buf = cpu_to_le32(pci_map_single(lp->pdev,
skb->data, skb->len, PCI_DMA_TODEVICE));
@@ -843,7 +840,8 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
skb_tx_timestamp(skb);
/* Trigger the MAC to check the TX descriptor */
- iowrite16(TM2TX, ioaddr + MTPR);
+ if (!skb->xmit_more || netif_queue_stopped(dev))
+ iowrite16(TM2TX, ioaddr + MTPR);
lp->tx_insert_ptr = descptr->vndescp;
/* If no tx resource, stop */
@@ -957,26 +955,12 @@ static void netdev_get_drvinfo(struct net_device *dev,
strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
}
-static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct r6040_private *rp = netdev_priv(dev);
-
- return phy_ethtool_gset(rp->phydev, cmd);
-}
-
-static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct r6040_private *rp = netdev_priv(dev);
-
- return phy_ethtool_sset(rp->phydev, cmd);
-}
-
static const struct ethtool_ops netdev_ethtool_ops = {
.get_drvinfo = netdev_get_drvinfo,
- .get_settings = netdev_get_settings,
- .set_settings = netdev_set_settings,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static const struct net_device_ops r6040_netdev_ops = {
@@ -998,7 +982,7 @@ static const struct net_device_ops r6040_netdev_ops = {
static void r6040_adjust_link(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
- struct phy_device *phydev = lp->phydev;
+ struct phy_device *phydev = dev->phydev;
int status_changed = 0;
void __iomem *ioaddr = lp->base;
@@ -1018,14 +1002,8 @@ static void r6040_adjust_link(struct net_device *dev)
lp->old_duplex = phydev->duplex;
}
- if (status_changed) {
- pr_info("%s: link %s", dev->name, phydev->link ?
- "UP" : "DOWN");
- if (phydev->link)
- pr_cont(" - %d/%s", phydev->speed,
- DUPLEX_FULL == phydev->duplex ? "full" : "half");
- pr_cont("\n");
- }
+ if (status_changed)
+ phy_print_status(phydev);
}
static int r6040_mii_probe(struct net_device *dev)
@@ -1057,7 +1035,6 @@ static int r6040_mii_probe(struct net_device *dev)
| SUPPORTED_TP);
phydev->advertising = phydev->supported;
- lp->phydev = phydev;
lp->old_link = 0;
lp->old_duplex = -1;
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 28b775e5a9ad..f0b09b05ed3f 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1996,7 +1996,8 @@ static int rocker_port_change_proto_down(struct net_device *dev,
return 0;
}
-static void rocker_port_neigh_destroy(struct neighbour *n)
+static void rocker_port_neigh_destroy(struct net_device *dev,
+ struct neighbour *n)
{
struct rocker_port *rocker_port = netdev_priv(n->dev);
int err;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
index 45019649bbbd..5cb51b609f02 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
@@ -475,7 +475,6 @@ struct sxgbe_priv_data {
int rxcsum_insertion;
spinlock_t stats_lock; /* lock for tx/rx statatics */
- struct phy_device *phydev;
int oldlink;
int speed;
int oldduplex;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
index c0981ae45874..542b67d436df 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
@@ -147,7 +147,7 @@ static int sxgbe_get_eee(struct net_device *dev,
edata->eee_active = priv->eee_active;
edata->tx_lpi_timer = priv->tx_lpi_timer;
- return phy_ethtool_get_eee(priv->phydev, edata);
+ return phy_ethtool_get_eee(dev->phydev, edata);
}
static int sxgbe_set_eee(struct net_device *dev,
@@ -172,7 +172,7 @@ static int sxgbe_set_eee(struct net_device *dev,
priv->tx_lpi_timer = edata->tx_lpi_timer;
}
- return phy_ethtool_set_eee(priv->phydev, edata);
+ return phy_ethtool_set_eee(dev->phydev, edata);
}
static void sxgbe_getdrvinfo(struct net_device *dev,
@@ -182,27 +182,6 @@ static void sxgbe_getdrvinfo(struct net_device *dev,
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
-static int sxgbe_getsettings(struct net_device *dev,
- struct ethtool_cmd *cmd)
-{
- struct sxgbe_priv_data *priv = netdev_priv(dev);
-
- if (priv->phydev)
- return phy_ethtool_gset(priv->phydev, cmd);
-
- return -EOPNOTSUPP;
-}
-
-static int sxgbe_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct sxgbe_priv_data *priv = netdev_priv(dev);
-
- if (priv->phydev)
- return phy_ethtool_sset(priv->phydev, cmd);
-
- return -EOPNOTSUPP;
-}
-
static u32 sxgbe_getmsglevel(struct net_device *dev)
{
struct sxgbe_priv_data *priv = netdev_priv(dev);
@@ -255,7 +234,7 @@ static void sxgbe_get_ethtool_stats(struct net_device *dev,
char *p;
if (priv->eee_enabled) {
- int val = phy_get_eee_err(priv->phydev);
+ int val = phy_get_eee_err(dev->phydev);
if (val)
priv->xstats.eee_wakeup_error_n = val;
@@ -499,8 +478,6 @@ static int sxgbe_get_regs_len(struct net_device *dev)
static const struct ethtool_ops sxgbe_ethtool_ops = {
.get_drvinfo = sxgbe_getdrvinfo,
- .get_settings = sxgbe_getsettings,
- .set_settings = sxgbe_setsettings,
.get_msglevel = sxgbe_getmsglevel,
.set_msglevel = sxgbe_setmsglevel,
.get_link = ethtool_op_get_link,
@@ -516,6 +493,8 @@ static const struct ethtool_ops sxgbe_ethtool_ops = {
.get_regs_len = sxgbe_get_regs_len,
.get_eee = sxgbe_get_eee,
.set_eee = sxgbe_set_eee,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
void sxgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 413ea14ab91f..ea44a2456ce1 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -124,12 +124,13 @@ static void sxgbe_eee_ctrl_timer(unsigned long arg)
*/
bool sxgbe_eee_init(struct sxgbe_priv_data * const priv)
{
+ struct net_device *ndev = priv->dev;
bool ret = false;
/* MAC core supports the EEE feature. */
if (priv->hw_cap.eee) {
/* Check if the PHY supports EEE */
- if (phy_init_eee(priv->phydev, 1))
+ if (phy_init_eee(ndev->phydev, 1))
return false;
priv->eee_active = 1;
@@ -152,12 +153,14 @@ bool sxgbe_eee_init(struct sxgbe_priv_data * const priv)
static void sxgbe_eee_adjust(const struct sxgbe_priv_data *priv)
{
+ struct net_device *ndev = priv->dev;
+
/* When the EEE has been already initialised we have to
* modify the PLS bit in the LPI ctrl & status reg according
* to the PHY link status. For this reason.
*/
if (priv->eee_enabled)
- priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link);
+ priv->hw->mac->set_eee_pls(priv->ioaddr, ndev->phydev->link);
}
/**
@@ -203,7 +206,7 @@ static inline u32 sxgbe_tx_avail(struct sxgbe_tx_queue *queue, int tx_qsize)
static void sxgbe_adjust_link(struct net_device *dev)
{
struct sxgbe_priv_data *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = dev->phydev;
u8 new_state = 0;
u8 speed = 0xff;
@@ -306,9 +309,6 @@ static int sxgbe_init_phy(struct net_device *ndev)
netdev_dbg(ndev, "%s: attached to PHY (UID 0x%x) Link = %d\n",
__func__, phydev->phy_id, phydev->link);
- /* save phy device in private structure */
- priv->phydev = phydev;
-
return 0;
}
@@ -1173,8 +1173,8 @@ static int sxgbe_open(struct net_device *dev)
priv->hw->dma->start_tx(priv->ioaddr, SXGBE_TX_QUEUES);
priv->hw->dma->start_rx(priv->ioaddr, SXGBE_RX_QUEUES);
- if (priv->phydev)
- phy_start(priv->phydev);
+ if (dev->phydev)
+ phy_start(dev->phydev);
/* initialise TX coalesce parameters */
sxgbe_tx_init_coalesce(priv);
@@ -1194,8 +1194,8 @@ static int sxgbe_open(struct net_device *dev)
init_error:
free_dma_desc_resources(priv);
- if (priv->phydev)
- phy_disconnect(priv->phydev);
+ if (dev->phydev)
+ phy_disconnect(dev->phydev);
phy_error:
clk_disable_unprepare(priv->sxgbe_clk);
@@ -1216,10 +1216,9 @@ static int sxgbe_release(struct net_device *dev)
del_timer_sync(&priv->eee_ctrl_timer);
/* Stop and disconnect the PHY */
- if (priv->phydev) {
- phy_stop(priv->phydev);
- phy_disconnect(priv->phydev);
- priv->phydev = NULL;
+ if (dev->phydev) {
+ phy_stop(dev->phydev);
+ phy_disconnect(dev->phydev);
}
netif_tx_stop_all_queues(dev);
@@ -1969,7 +1968,6 @@ static void sxgbe_poll_controller(struct net_device *dev)
*/
static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct sxgbe_priv_data *priv = netdev_priv(dev);
int ret = -EOPNOTSUPP;
if (!netif_running(dev))
@@ -1979,9 +1977,9 @@ static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSMIIREG:
- if (!priv->phydev)
+ if (!dev->phydev)
return -EINVAL;
- ret = phy_mii_ioctl(priv->phydev, rq, cmd);
+ ret = phy_mii_ioctl(dev->phydev, rq, cmd);
break;
default:
break;
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 1f309127457d..f658fee74f18 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -50,14 +50,34 @@ enum {
#define HUNT_FILTER_TBL_ROWS 8192
#define EFX_EF10_FILTER_ID_INVALID 0xffff
+
+#define EFX_EF10_FILTER_DEV_UC_MAX 32
+#define EFX_EF10_FILTER_DEV_MC_MAX 256
+
+/* VLAN list entry */
+struct efx_ef10_vlan {
+ struct list_head list;
+ u16 vid;
+};
+
+/* Per-VLAN filters information */
+struct efx_ef10_filter_vlan {
+ struct list_head list;
+ u16 vid;
+ u16 uc[EFX_EF10_FILTER_DEV_UC_MAX];
+ u16 mc[EFX_EF10_FILTER_DEV_MC_MAX];
+ u16 ucdef;
+ u16 bcast;
+ u16 mcdef;
+};
+
struct efx_ef10_dev_addr {
u8 addr[ETH_ALEN];
- u16 id;
};
struct efx_ef10_filter_table {
-/* The RX match field masks supported by this fw & hw, in order of priority */
- enum efx_filter_match_flags rx_match_flags[
+/* The MCDI match masks supported by this fw & hw, in order of priority */
+ u32 rx_match_mcdi_flags[
MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM];
unsigned int rx_match_count;
@@ -73,16 +93,16 @@ struct efx_ef10_filter_table {
} *entry;
wait_queue_head_t waitq;
/* Shadow of net_device address lists, guarded by mac_lock */
-#define EFX_EF10_FILTER_DEV_UC_MAX 32
-#define EFX_EF10_FILTER_DEV_MC_MAX 256
struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX];
struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
int dev_uc_count;
int dev_mc_count;
-/* Indices (like efx_ef10_dev_addr.id) for promisc/allmulti filters */
- u16 ucdef_id;
- u16 bcast_id;
- u16 mcdef_id;
+ bool uc_promisc;
+ bool mc_promisc;
+/* Whether in multicast promiscuous mode when last changed */
+ bool mc_promisc_last;
+ bool vlan_filter;
+ struct list_head vlan_list;
};
/* An arbitrary search limit for the software hash table */
@@ -90,6 +110,10 @@ struct efx_ef10_filter_table {
static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
static void efx_ef10_filter_table_remove(struct efx_nic *efx);
+static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid);
+static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx,
+ struct efx_ef10_filter_vlan *vlan);
+static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid);
static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
{
@@ -275,6 +299,131 @@ static ssize_t efx_ef10_show_primary_flag(struct device *dev,
? 1 : 0);
}
+static struct efx_ef10_vlan *efx_ef10_find_vlan(struct efx_nic *efx, u16 vid)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_ef10_vlan *vlan;
+
+ WARN_ON(!mutex_is_locked(&nic_data->vlan_lock));
+
+ list_for_each_entry(vlan, &nic_data->vlan_list, list) {
+ if (vlan->vid == vid)
+ return vlan;
+ }
+
+ return NULL;
+}
+
+static int efx_ef10_add_vlan(struct efx_nic *efx, u16 vid)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_ef10_vlan *vlan;
+ int rc;
+
+ mutex_lock(&nic_data->vlan_lock);
+
+ vlan = efx_ef10_find_vlan(efx, vid);
+ if (vlan) {
+ /* We add VID 0 on init. 8021q adds it on module init
+ * for all interfaces with VLAN filtring feature.
+ */
+ if (vid == 0)
+ goto done_unlock;
+ netif_warn(efx, drv, efx->net_dev,
+ "VLAN %u already added\n", vid);
+ rc = -EALREADY;
+ goto fail_exist;
+ }
+
+ rc = -ENOMEM;
+ vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+ if (!vlan)
+ goto fail_alloc;
+
+ vlan->vid = vid;
+
+ list_add_tail(&vlan->list, &nic_data->vlan_list);
+
+ if (efx->filter_state) {
+ mutex_lock(&efx->mac_lock);
+ down_write(&efx->filter_sem);
+ rc = efx_ef10_filter_add_vlan(efx, vlan->vid);
+ up_write(&efx->filter_sem);
+ mutex_unlock(&efx->mac_lock);
+ if (rc)
+ goto fail_filter_add_vlan;
+ }
+
+done_unlock:
+ mutex_unlock(&nic_data->vlan_lock);
+ return 0;
+
+fail_filter_add_vlan:
+ list_del(&vlan->list);
+ kfree(vlan);
+fail_alloc:
+fail_exist:
+ mutex_unlock(&nic_data->vlan_lock);
+ return rc;
+}
+
+static void efx_ef10_del_vlan_internal(struct efx_nic *efx,
+ struct efx_ef10_vlan *vlan)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ WARN_ON(!mutex_is_locked(&nic_data->vlan_lock));
+
+ if (efx->filter_state) {
+ down_write(&efx->filter_sem);
+ efx_ef10_filter_del_vlan(efx, vlan->vid);
+ up_write(&efx->filter_sem);
+ }
+
+ list_del(&vlan->list);
+ kfree(vlan);
+}
+
+static int efx_ef10_del_vlan(struct efx_nic *efx, u16 vid)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_ef10_vlan *vlan;
+ int rc = 0;
+
+ /* 8021q removes VID 0 on module unload for all interfaces
+ * with VLAN filtering feature. We need to keep it to receive
+ * untagged traffic.
+ */
+ if (vid == 0)
+ return 0;
+
+ mutex_lock(&nic_data->vlan_lock);
+
+ vlan = efx_ef10_find_vlan(efx, vid);
+ if (!vlan) {
+ netif_err(efx, drv, efx->net_dev,
+ "VLAN %u to be deleted not found\n", vid);
+ rc = -ENOENT;
+ } else {
+ efx_ef10_del_vlan_internal(efx, vlan);
+ }
+
+ mutex_unlock(&nic_data->vlan_lock);
+
+ return rc;
+}
+
+static void efx_ef10_cleanup_vlans(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_ef10_vlan *vlan, *next_vlan;
+
+ mutex_lock(&nic_data->vlan_lock);
+ list_for_each_entry_safe(vlan, next_vlan, &nic_data->vlan_list, list)
+ efx_ef10_del_vlan_internal(efx, vlan);
+ mutex_unlock(&nic_data->vlan_lock);
+}
+
static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag,
NULL);
static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL);
@@ -421,8 +570,30 @@ static int efx_ef10_probe(struct efx_nic *efx)
#endif
ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr);
+ INIT_LIST_HEAD(&nic_data->vlan_list);
+ mutex_init(&nic_data->vlan_lock);
+
+ /* Add unspecified VID to support VLAN filtering being disabled */
+ rc = efx_ef10_add_vlan(efx, EFX_FILTER_VID_UNSPEC);
+ if (rc)
+ goto fail_add_vid_unspec;
+
+ /* If VLAN filtering is enabled, we need VID 0 to get untagged
+ * traffic. It is added automatically if 8021q module is loaded,
+ * but we can't rely on it since module may be not loaded.
+ */
+ rc = efx_ef10_add_vlan(efx, 0);
+ if (rc)
+ goto fail_add_vid_0;
+
return 0;
+fail_add_vid_0:
+ efx_ef10_cleanup_vlans(efx);
+fail_add_vid_unspec:
+ mutex_destroy(&nic_data->vlan_lock);
+ efx_ptp_remove(efx);
+ efx_mcdi_mon_remove(efx);
fail5:
device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
fail4:
@@ -676,6 +847,9 @@ static void efx_ef10_remove(struct efx_nic *efx)
}
#endif
+ efx_ef10_cleanup_vlans(efx);
+ mutex_destroy(&nic_data->vlan_lock);
+
efx_ptp_remove(efx);
efx_mcdi_mon_remove(efx);
@@ -704,6 +878,45 @@ static int efx_ef10_probe_pf(struct efx_nic *efx)
return efx_ef10_probe(efx);
}
+int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id,
+ u32 *port_flags, u32 *vadaptor_flags,
+ unsigned int *vlan_tags)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_QUERY_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_VADAPTOR_QUERY_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ if (nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN)) {
+ MCDI_SET_DWORD(inbuf, VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID,
+ port_id);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_QUERY, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+
+ if (outlen < sizeof(outbuf)) {
+ rc = -EIO;
+ return rc;
+ }
+ }
+
+ if (port_flags)
+ *port_flags = MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_PORT_FLAGS);
+ if (vadaptor_flags)
+ *vadaptor_flags =
+ MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS);
+ if (vlan_tags)
+ *vlan_tags =
+ MCDI_DWORD(outbuf,
+ VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS);
+
+ return 0;
+}
+
int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN);
@@ -3040,15 +3253,55 @@ static int efx_ef10_filter_push(struct efx_nic *efx,
return rc;
}
-static int efx_ef10_filter_rx_match_pri(struct efx_ef10_filter_table *table,
- enum efx_filter_match_flags match_flags)
+static u32 efx_ef10_filter_mcdi_flags_from_spec(const struct efx_filter_spec *spec)
{
+ unsigned int match_flags = spec->match_flags;
+ u32 mcdi_flags = 0;
+
+ if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) {
+ match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG;
+ mcdi_flags |=
+ is_multicast_ether_addr(spec->loc_mac) ?
+ (1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN) :
+ (1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN);
+ }
+
+#define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field) { \
+ unsigned int old_match_flags = match_flags; \
+ match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag; \
+ if (match_flags != old_match_flags) \
+ mcdi_flags |= \
+ (1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
+ mcdi_field ## _LBN); \
+ }
+ MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP);
+ MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP);
+ MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC);
+ MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT);
+ MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC);
+ MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT);
+ MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE);
+ MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN);
+ MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN);
+ MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO);
+#undef MAP_FILTER_TO_MCDI_FLAG
+
+ /* Did we map them all? */
+ WARN_ON_ONCE(match_flags);
+
+ return mcdi_flags;
+}
+
+static int efx_ef10_filter_pri(struct efx_ef10_filter_table *table,
+ const struct efx_filter_spec *spec)
+{
+ u32 mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec);
unsigned int match_pri;
for (match_pri = 0;
match_pri < table->rx_match_count;
match_pri++)
- if (table->rx_match_flags[match_pri] == match_flags)
+ if (table->rx_match_mcdi_flags[match_pri] == mcdi_flags)
return match_pri;
return -EPROTONOSUPPORT;
@@ -3074,7 +3327,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
EFX_FILTER_FLAG_RX)
return -EINVAL;
- rc = efx_ef10_filter_rx_match_pri(table, spec->match_flags);
+ rc = efx_ef10_filter_pri(table, spec);
if (rc < 0)
return rc;
match_pri = rc;
@@ -3313,7 +3566,7 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
spec = efx_ef10_filter_entry_spec(table, filter_idx);
if (!spec ||
(!by_index &&
- efx_ef10_filter_rx_match_pri(table, spec->match_flags) !=
+ efx_ef10_filter_pri(table, spec) !=
filter_id / HUNT_FILTER_TBL_ROWS)) {
rc = -ENOENT;
goto out_unlock;
@@ -3394,12 +3647,13 @@ static u32 efx_ef10_filter_get_unsafe_id(struct efx_nic *efx, u32 filter_id)
return filter_id % HUNT_FILTER_TBL_ROWS;
}
-static int efx_ef10_filter_remove_unsafe(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id)
+static void efx_ef10_filter_remove_unsafe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id)
{
- return efx_ef10_filter_remove_internal(efx, 1U << priority,
- filter_id, true);
+ if (filter_id == EFX_EF10_FILTER_ID_INVALID)
+ return;
+ efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id, true);
}
static int efx_ef10_filter_get_safe(struct efx_nic *efx,
@@ -3414,7 +3668,7 @@ static int efx_ef10_filter_get_safe(struct efx_nic *efx,
spin_lock_bh(&efx->filter_lock);
saved_spec = efx_ef10_filter_entry_spec(table, filter_idx);
if (saved_spec && saved_spec->priority == priority &&
- efx_ef10_filter_rx_match_pri(table, saved_spec->match_flags) ==
+ efx_ef10_filter_pri(table, saved_spec) ==
filter_id / HUNT_FILTER_TBL_ROWS) {
*spec = *saved_spec;
rc = 0;
@@ -3487,8 +3741,7 @@ static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
count = -EMSGSIZE;
break;
}
- buf[count++] = (efx_ef10_filter_rx_match_pri(
- table, spec->match_flags) *
+ buf[count++] = (efx_ef10_filter_pri(table, spec) *
HUNT_FILTER_TBL_ROWS +
filter_idx);
}
@@ -3724,15 +3977,58 @@ static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags)
return match_flags;
}
+static void efx_ef10_filter_cleanup_vlans(struct efx_nic *efx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_ef10_filter_vlan *vlan, *next_vlan;
+
+ /* See comment in efx_ef10_filter_table_remove() */
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return;
+
+ if (!table)
+ return;
+
+ list_for_each_entry_safe(vlan, next_vlan, &table->vlan_list, list)
+ efx_ef10_filter_del_vlan_internal(efx, vlan);
+}
+
+static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table,
+ enum efx_filter_match_flags match_flags)
+{
+ unsigned int match_pri;
+ int mf;
+
+ for (match_pri = 0;
+ match_pri < table->rx_match_count;
+ match_pri++) {
+ mf = efx_ef10_filter_match_flags_from_mcdi(
+ table->rx_match_mcdi_flags[match_pri]);
+ if (mf == match_flags)
+ return true;
+ }
+
+ return false;
+}
+
static int efx_ef10_filter_table_probe(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct net_device *net_dev = efx->net_dev;
unsigned int pd_match_pri, pd_match_count;
struct efx_ef10_filter_table *table;
+ struct efx_ef10_vlan *vlan;
size_t outlen;
int rc;
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return -EINVAL;
+
+ if (efx->filter_state) /* already probed */
+ return 0;
+
table = kzalloc(sizeof(*table), GFP_KERNEL);
if (!table)
return -ENOMEM;
@@ -3765,24 +4061,48 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx)
"%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n",
__func__, mcdi_flags, pd_match_pri,
rc, table->rx_match_count);
- table->rx_match_flags[table->rx_match_count++] = rc;
+ table->rx_match_mcdi_flags[table->rx_match_count] = mcdi_flags;
+ table->rx_match_count++;
}
}
+ if ((efx_supported_features(efx) & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ !(efx_ef10_filter_match_supported(table,
+ (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC)) &&
+ efx_ef10_filter_match_supported(table,
+ (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC_IG)))) {
+ netif_info(efx, probe, net_dev,
+ "VLAN filters are not supported in this firmware variant\n");
+ net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ efx->fixed_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ net_dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ }
+
table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry));
if (!table->entry) {
rc = -ENOMEM;
goto fail;
}
- table->ucdef_id = EFX_EF10_FILTER_ID_INVALID;
- table->bcast_id = EFX_EF10_FILTER_ID_INVALID;
- table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
+ table->mc_promisc_last = false;
+ table->vlan_filter =
+ !!(efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
+ INIT_LIST_HEAD(&table->vlan_list);
efx->filter_state = table;
init_waitqueue_head(&table->waitq);
+
+ list_for_each_entry(vlan, &nic_data->vlan_list, list) {
+ rc = efx_ef10_filter_add_vlan(efx, vlan->vid);
+ if (rc)
+ goto fail_add_vlan;
+ }
+
return 0;
+fail_add_vlan:
+ efx_ef10_filter_cleanup_vlans(efx);
+ efx->filter_state = NULL;
fail:
kfree(table);
return rc;
@@ -3843,7 +4163,6 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx)
nic_data->must_restore_filters = false;
}
-/* Caller must hold efx->filter_sem for write */
static void efx_ef10_filter_table_remove(struct efx_nic *efx)
{
struct efx_ef10_filter_table *table = efx->filter_state;
@@ -3852,7 +4171,17 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx)
unsigned int filter_idx;
int rc;
+ efx_ef10_filter_cleanup_vlans(efx);
efx->filter_state = NULL;
+ /* If we were called without locking, then it's not safe to free
+ * the table as others might be using it. So we just WARN, leak
+ * the memory, and potentially get an inconsistent filter table
+ * state.
+ * This should never actually happen.
+ */
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return;
+
if (!table)
return;
@@ -3880,37 +4209,54 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx)
kfree(table);
}
-#define EFX_EF10_FILTER_DO_MARK_OLD(id) \
- if (id != EFX_EF10_FILTER_ID_INVALID) { \
- filter_idx = efx_ef10_filter_get_unsafe_id(efx, id); \
- if (!table->entry[filter_idx].spec) \
- netif_dbg(efx, drv, efx->net_dev, \
- "%s: marked null spec old %04x:%04x\n", \
- __func__, id, filter_idx); \
- table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;\
- }
-static void efx_ef10_filter_mark_old(struct efx_nic *efx)
+static void efx_ef10_filter_mark_one_old(struct efx_nic *efx, uint16_t *id)
{
struct efx_ef10_filter_table *table = efx->filter_state;
- unsigned int filter_idx, i;
+ unsigned int filter_idx;
- if (!table)
- return;
+ if (*id != EFX_EF10_FILTER_ID_INVALID) {
+ filter_idx = efx_ef10_filter_get_unsafe_id(efx, *id);
+ if (!table->entry[filter_idx].spec)
+ netif_dbg(efx, drv, efx->net_dev,
+ "marked null spec old %04x:%04x\n", *id,
+ filter_idx);
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
+ *id = EFX_EF10_FILTER_ID_INVALID;
+ }
+}
+
+/* Mark old per-VLAN filters that may need to be removed */
+static void _efx_ef10_filter_vlan_mark_old(struct efx_nic *efx,
+ struct efx_ef10_filter_vlan *vlan)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ unsigned int i;
- /* Mark old filters that may need to be removed */
- spin_lock_bh(&efx->filter_lock);
for (i = 0; i < table->dev_uc_count; i++)
- EFX_EF10_FILTER_DO_MARK_OLD(table->dev_uc_list[i].id);
+ efx_ef10_filter_mark_one_old(efx, &vlan->uc[i]);
for (i = 0; i < table->dev_mc_count; i++)
- EFX_EF10_FILTER_DO_MARK_OLD(table->dev_mc_list[i].id);
- EFX_EF10_FILTER_DO_MARK_OLD(table->ucdef_id);
- EFX_EF10_FILTER_DO_MARK_OLD(table->bcast_id);
- EFX_EF10_FILTER_DO_MARK_OLD(table->mcdef_id);
+ efx_ef10_filter_mark_one_old(efx, &vlan->mc[i]);
+ efx_ef10_filter_mark_one_old(efx, &vlan->ucdef);
+ efx_ef10_filter_mark_one_old(efx, &vlan->bcast);
+ efx_ef10_filter_mark_one_old(efx, &vlan->mcdef);
+}
+
+/* Mark old filters that may need to be removed.
+ * Caller must hold efx->filter_sem for read if race against
+ * efx_ef10_filter_table_remove() is possible
+ */
+static void efx_ef10_filter_mark_old(struct efx_nic *efx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_ef10_filter_vlan *vlan;
+
+ spin_lock_bh(&efx->filter_lock);
+ list_for_each_entry(vlan, &table->vlan_list, list)
+ _efx_ef10_filter_vlan_mark_old(efx, vlan);
spin_unlock_bh(&efx->filter_lock);
}
-#undef EFX_EF10_FILTER_DO_MARK_OLD
-static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx, bool *promisc)
+static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx)
{
struct efx_ef10_filter_table *table = efx->filter_state;
struct net_device *net_dev = efx->net_dev;
@@ -3918,45 +4264,38 @@ static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx, bool *promisc)
int addr_count;
unsigned int i;
- table->ucdef_id = EFX_EF10_FILTER_ID_INVALID;
addr_count = netdev_uc_count(net_dev);
- if (net_dev->flags & IFF_PROMISC)
- *promisc = true;
+ table->uc_promisc = !!(net_dev->flags & IFF_PROMISC);
table->dev_uc_count = 1 + addr_count;
ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
i = 1;
netdev_for_each_uc_addr(uc, net_dev) {
if (i >= EFX_EF10_FILTER_DEV_UC_MAX) {
- *promisc = true;
+ table->uc_promisc = true;
break;
}
ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
- table->dev_uc_list[i].id = EFX_EF10_FILTER_ID_INVALID;
i++;
}
}
-static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx, bool *promisc)
+static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx)
{
struct efx_ef10_filter_table *table = efx->filter_state;
struct net_device *net_dev = efx->net_dev;
struct netdev_hw_addr *mc;
unsigned int i, addr_count;
- table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
- table->bcast_id = EFX_EF10_FILTER_ID_INVALID;
- if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI))
- *promisc = true;
+ table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI));
addr_count = netdev_mc_count(net_dev);
i = 0;
netdev_for_each_mc_addr(mc, net_dev) {
if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
- *promisc = true;
+ table->mc_promisc = true;
break;
}
ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
- table->dev_mc_list[i].id = EFX_EF10_FILTER_ID_INVALID;
i++;
}
@@ -3964,7 +4303,8 @@ static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx, bool *promisc)
}
static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
- bool multicast, bool rollback)
+ struct efx_ef10_filter_vlan *vlan,
+ bool multicast, bool rollback)
{
struct efx_ef10_filter_table *table = efx->filter_state;
struct efx_ef10_dev_addr *addr_list;
@@ -3973,14 +4313,17 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
u8 baddr[ETH_ALEN];
unsigned int i, j;
int addr_count;
+ u16 *ids;
int rc;
if (multicast) {
addr_list = table->dev_mc_list;
addr_count = table->dev_mc_count;
+ ids = vlan->mc;
} else {
addr_list = table->dev_uc_list;
addr_count = table->dev_uc_count;
+ ids = vlan->uc;
}
filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0;
@@ -3988,8 +4331,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
/* Insert/renew filters */
for (i = 0; i < addr_count; i++) {
efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
- efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
- addr_list[i].addr);
+ efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr);
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
if (rollback) {
@@ -3998,12 +4340,10 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
rc);
/* Fall back to promiscuous */
for (j = 0; j < i; j++) {
- if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID)
- continue;
efx_ef10_filter_remove_unsafe(
efx, EFX_FILTER_PRI_AUTO,
- addr_list[j].id);
- addr_list[j].id = EFX_EF10_FILTER_ID_INVALID;
+ ids[j]);
+ ids[j] = EFX_EF10_FILTER_ID_INVALID;
}
return rc;
} else {
@@ -4011,40 +4351,40 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
rc = EFX_EF10_FILTER_ID_INVALID;
}
}
- addr_list[i].id = efx_ef10_filter_get_unsafe_id(efx, rc);
+ ids[i] = efx_ef10_filter_get_unsafe_id(efx, rc);
}
if (multicast && rollback) {
/* Also need an Ethernet broadcast filter */
efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
eth_broadcast_addr(baddr);
- efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr);
+ efx_filter_set_eth_local(&spec, vlan->vid, baddr);
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
netif_warn(efx, drv, efx->net_dev,
"Broadcast filter insert failed rc=%d\n", rc);
/* Fall back to promiscuous */
for (j = 0; j < i; j++) {
- if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID)
- continue;
efx_ef10_filter_remove_unsafe(
efx, EFX_FILTER_PRI_AUTO,
- addr_list[j].id);
- addr_list[j].id = EFX_EF10_FILTER_ID_INVALID;
+ ids[j]);
+ ids[j] = EFX_EF10_FILTER_ID_INVALID;
}
return rc;
} else {
- table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc);
+ EFX_WARN_ON_PARANOID(vlan->bcast !=
+ EFX_EF10_FILTER_ID_INVALID);
+ vlan->bcast = efx_ef10_filter_get_unsafe_id(efx, rc);
}
}
return 0;
}
-static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
- bool rollback)
+static int efx_ef10_filter_insert_def(struct efx_nic *efx,
+ struct efx_ef10_filter_vlan *vlan,
+ bool multicast, bool rollback)
{
- struct efx_ef10_filter_table *table = efx->filter_state;
struct efx_ef10_nic_data *nic_data = efx->nic_data;
enum efx_filter_flags filter_flags;
struct efx_filter_spec spec;
@@ -4060,6 +4400,9 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
else
efx_filter_set_uc_def(&spec);
+ if (vlan->vid != EFX_FILTER_VID_UNSPEC)
+ efx_filter_set_eth_local(&spec, vlan->vid, NULL);
+
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
netif_printk(efx, drv, rc == -EPERM ? KERN_DEBUG : KERN_WARNING,
@@ -4067,14 +4410,14 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
"%scast mismatch filter insert failed rc=%d\n",
multicast ? "Multi" : "Uni", rc);
} else if (multicast) {
- table->mcdef_id = efx_ef10_filter_get_unsafe_id(efx, rc);
+ EFX_WARN_ON_PARANOID(vlan->mcdef != EFX_EF10_FILTER_ID_INVALID);
+ vlan->mcdef = efx_ef10_filter_get_unsafe_id(efx, rc);
if (!nic_data->workaround_26807) {
/* Also need an Ethernet broadcast filter */
efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
filter_flags, 0);
eth_broadcast_addr(baddr);
- efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
- baddr);
+ efx_filter_set_eth_local(&spec, vlan->vid, baddr);
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
netif_warn(efx, drv, efx->net_dev,
@@ -4084,17 +4427,20 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
/* Roll back the mc_def filter */
efx_ef10_filter_remove_unsafe(
efx, EFX_FILTER_PRI_AUTO,
- table->mcdef_id);
- table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
+ vlan->mcdef);
+ vlan->mcdef = EFX_EF10_FILTER_ID_INVALID;
return rc;
}
} else {
- table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc);
+ EFX_WARN_ON_PARANOID(vlan->bcast !=
+ EFX_EF10_FILTER_ID_INVALID);
+ vlan->bcast = efx_ef10_filter_get_unsafe_id(efx, rc);
}
}
rc = 0;
} else {
- table->ucdef_id = rc;
+ EFX_WARN_ON_PARANOID(vlan->ucdef != EFX_EF10_FILTER_ID_INVALID);
+ vlan->ucdef = rc;
rc = 0;
}
return rc;
@@ -4203,64 +4549,55 @@ reset_nic:
/* Caller must hold efx->filter_sem for read if race against
* efx_ef10_filter_table_remove() is possible
*/
-static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
+static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx,
+ struct efx_ef10_filter_vlan *vlan)
{
struct efx_ef10_filter_table *table = efx->filter_state;
struct efx_ef10_nic_data *nic_data = efx->nic_data;
- struct net_device *net_dev = efx->net_dev;
- bool uc_promisc = false, mc_promisc = false;
- if (!efx_dev_registered(efx))
- return;
-
- if (!table)
- return;
-
- efx_ef10_filter_mark_old(efx);
-
- /* Copy/convert the address lists; add the primary station
- * address and broadcast address
+ /* Do not install unspecified VID if VLAN filtering is enabled.
+ * Do not install all specified VIDs if VLAN filtering is disabled.
*/
- netif_addr_lock_bh(net_dev);
- efx_ef10_filter_uc_addr_list(efx, &uc_promisc);
- efx_ef10_filter_mc_addr_list(efx, &mc_promisc);
- netif_addr_unlock_bh(net_dev);
+ if ((vlan->vid == EFX_FILTER_VID_UNSPEC) == table->vlan_filter)
+ return;
/* Insert/renew unicast filters */
- if (uc_promisc) {
- efx_ef10_filter_insert_def(efx, false, false);
- efx_ef10_filter_insert_addr_list(efx, false, false);
+ if (table->uc_promisc) {
+ efx_ef10_filter_insert_def(efx, vlan, false, false);
+ efx_ef10_filter_insert_addr_list(efx, vlan, false, false);
} else {
/* If any of the filters failed to insert, fall back to
* promiscuous mode - add in the uc_def filter. But keep
* our individual unicast filters.
*/
- if (efx_ef10_filter_insert_addr_list(efx, false, false))
- efx_ef10_filter_insert_def(efx, false, false);
+ if (efx_ef10_filter_insert_addr_list(efx, vlan, false, false))
+ efx_ef10_filter_insert_def(efx, vlan, false, false);
}
/* Insert/renew multicast filters */
/* If changing promiscuous state with cascaded multicast filters, remove
* old filters first, so that packets are dropped rather than duplicated
*/
- if (nic_data->workaround_26807 && efx->mc_promisc != mc_promisc)
+ if (nic_data->workaround_26807 &&
+ table->mc_promisc_last != table->mc_promisc)
efx_ef10_filter_remove_old(efx);
- if (mc_promisc) {
+ if (table->mc_promisc) {
if (nic_data->workaround_26807) {
/* If we failed to insert promiscuous filters, rollback
* and fall back to individual multicast filters
*/
- if (efx_ef10_filter_insert_def(efx, true, true)) {
+ if (efx_ef10_filter_insert_def(efx, vlan, true, true)) {
/* Changing promisc state, so remove old filters */
efx_ef10_filter_remove_old(efx);
- efx_ef10_filter_insert_addr_list(efx, true, false);
+ efx_ef10_filter_insert_addr_list(efx, vlan,
+ true, false);
}
} else {
/* If we failed to insert promiscuous filters, don't
* rollback. Regardless, also insert the mc_list
*/
- efx_ef10_filter_insert_def(efx, true, false);
- efx_ef10_filter_insert_addr_list(efx, true, false);
+ efx_ef10_filter_insert_def(efx, vlan, true, false);
+ efx_ef10_filter_insert_addr_list(efx, vlan, true, false);
}
} else {
/* If any filters failed to insert, rollback and fall back to
@@ -4268,17 +4605,153 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
* that fails, roll back again and insert as many of our
* individual multicast filters as we can.
*/
- if (efx_ef10_filter_insert_addr_list(efx, true, true)) {
+ if (efx_ef10_filter_insert_addr_list(efx, vlan, true, true)) {
/* Changing promisc state, so remove old filters */
if (nic_data->workaround_26807)
efx_ef10_filter_remove_old(efx);
- if (efx_ef10_filter_insert_def(efx, true, true))
- efx_ef10_filter_insert_addr_list(efx, true, false);
+ if (efx_ef10_filter_insert_def(efx, vlan, true, true))
+ efx_ef10_filter_insert_addr_list(efx, vlan,
+ true, false);
}
}
+}
+
+/* Caller must hold efx->filter_sem for read if race against
+ * efx_ef10_filter_table_remove() is possible
+ */
+static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct net_device *net_dev = efx->net_dev;
+ struct efx_ef10_filter_vlan *vlan;
+ bool vlan_filter;
+
+ if (!efx_dev_registered(efx))
+ return;
+
+ if (!table)
+ return;
+
+ efx_ef10_filter_mark_old(efx);
+
+ /* Copy/convert the address lists; add the primary station
+ * address and broadcast address
+ */
+ netif_addr_lock_bh(net_dev);
+ efx_ef10_filter_uc_addr_list(efx);
+ efx_ef10_filter_mc_addr_list(efx);
+ netif_addr_unlock_bh(net_dev);
+
+ /* If VLAN filtering changes, all old filters are finally removed.
+ * Do it in advance to avoid conflicts for unicast untagged and
+ * VLAN 0 tagged filters.
+ */
+ vlan_filter = !!(net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
+ if (table->vlan_filter != vlan_filter) {
+ table->vlan_filter = vlan_filter;
+ efx_ef10_filter_remove_old(efx);
+ }
+
+ list_for_each_entry(vlan, &table->vlan_list, list)
+ efx_ef10_filter_vlan_sync_rx_mode(efx, vlan);
efx_ef10_filter_remove_old(efx);
- efx->mc_promisc = mc_promisc;
+ table->mc_promisc_last = table->mc_promisc;
+}
+
+static struct efx_ef10_filter_vlan *efx_ef10_filter_find_vlan(struct efx_nic *efx, u16 vid)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_ef10_filter_vlan *vlan;
+
+ WARN_ON(!rwsem_is_locked(&efx->filter_sem));
+
+ list_for_each_entry(vlan, &table->vlan_list, list) {
+ if (vlan->vid == vid)
+ return vlan;
+ }
+
+ return NULL;
+}
+
+static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_ef10_filter_vlan *vlan;
+ unsigned int i;
+
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return -EINVAL;
+
+ vlan = efx_ef10_filter_find_vlan(efx, vid);
+ if (WARN_ON(vlan)) {
+ netif_err(efx, drv, efx->net_dev,
+ "VLAN %u already added\n", vid);
+ return -EALREADY;
+ }
+
+ vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+ if (!vlan)
+ return -ENOMEM;
+
+ vlan->vid = vid;
+
+ for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
+ vlan->uc[i] = EFX_EF10_FILTER_ID_INVALID;
+ for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
+ vlan->mc[i] = EFX_EF10_FILTER_ID_INVALID;
+ vlan->ucdef = EFX_EF10_FILTER_ID_INVALID;
+ vlan->bcast = EFX_EF10_FILTER_ID_INVALID;
+ vlan->mcdef = EFX_EF10_FILTER_ID_INVALID;
+
+ list_add_tail(&vlan->list, &table->vlan_list);
+
+ if (efx_dev_registered(efx))
+ efx_ef10_filter_vlan_sync_rx_mode(efx, vlan);
+
+ return 0;
+}
+
+static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx,
+ struct efx_ef10_filter_vlan *vlan)
+{
+ unsigned int i;
+
+ /* See comment in efx_ef10_filter_table_remove() */
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return;
+
+ list_del(&vlan->list);
+
+ for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
+ efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
+ vlan->uc[i]);
+ for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
+ efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
+ vlan->mc[i]);
+ efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->ucdef);
+ efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->bcast);
+ efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->mcdef);
+
+ kfree(vlan);
+}
+
+static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid)
+{
+ struct efx_ef10_filter_vlan *vlan;
+
+ /* See comment in efx_ef10_filter_table_remove() */
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return;
+
+ vlan = efx_ef10_filter_find_vlan(efx, vid);
+ if (!vlan) {
+ netif_err(efx, drv, efx->net_dev,
+ "VLAN %u not found in filter state\n", vid);
+ return;
+ }
+
+ efx_ef10_filter_del_vlan_internal(efx, vlan);
}
static int efx_ef10_set_mac_address(struct efx_nic *efx)
@@ -4290,6 +4763,8 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
efx_device_detach_sync(efx);
efx_net_stop(efx->net_dev);
+
+ mutex_lock(&efx->mac_lock);
down_write(&efx->filter_sem);
efx_ef10_filter_table_remove(efx);
@@ -4302,6 +4777,8 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
efx_ef10_filter_table_probe(efx);
up_write(&efx->filter_sem);
+ mutex_unlock(&efx->mac_lock);
+
if (was_enabled)
efx_net_open(efx->net_dev);
netif_device_attach(efx->net_dev);
@@ -4703,6 +5180,29 @@ static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
}
}
+static int efx_ef10_vlan_rx_add_vid(struct efx_nic *efx, __be16 proto, u16 vid)
+{
+ if (proto != htons(ETH_P_8021Q))
+ return -EINVAL;
+
+ return efx_ef10_add_vlan(efx, vid);
+}
+
+static int efx_ef10_vlan_rx_kill_vid(struct efx_nic *efx, __be16 proto, u16 vid)
+{
+ if (proto != htons(ETH_P_8021Q))
+ return -EINVAL;
+
+ return efx_ef10_del_vlan(efx, vid);
+}
+
+#define EF10_OFFLOAD_FEATURES \
+ (NETIF_F_IP_CSUM | \
+ NETIF_F_HW_VLAN_CTAG_FILTER | \
+ NETIF_F_IPV6_CSUM | \
+ NETIF_F_RXHASH | \
+ NETIF_F_NTUPLE)
+
const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.is_vf = true,
.mem_bar = EFX_MEM_VF_BAR,
@@ -4780,6 +5280,8 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
#endif
.ptp_write_host_time = efx_ef10_ptp_write_host_time_vf,
.ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf,
+ .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
+ .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
#ifdef CONFIG_SFC_SRIOV
.vswitching_probe = efx_ef10_vswitching_probe_vf,
.vswitching_restore = efx_ef10_vswitching_restore_vf,
@@ -4798,8 +5300,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.always_rx_scatter = true,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
.timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
- .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_RXHASH | NETIF_F_NTUPLE),
+ .offload_features = EF10_OFFLOAD_FEATURES,
.mcdi_max_ver = 2,
.max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
.hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
@@ -4891,6 +5392,8 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.ptp_write_host_time = efx_ef10_ptp_write_host_time,
.ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
.ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
+ .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
+ .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
#ifdef CONFIG_SFC_SRIOV
.sriov_configure = efx_ef10_sriov_configure,
.sriov_init = efx_ef10_sriov_init,
@@ -4919,8 +5422,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.always_rx_scatter = true,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
.timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
- .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_RXHASH | NETIF_F_NTUPLE),
+ .offload_features = EF10_OFFLOAD_FEATURES,
.mcdi_max_ver = 2,
.max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
.hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index 3c17f274e802..a949b9d27329 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -232,6 +232,35 @@ fail:
return rc;
}
+static int efx_ef10_vadaptor_alloc_set_features(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u32 port_flags;
+ int rc;
+
+ rc = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+ if (rc)
+ goto fail_vadaptor_alloc;
+
+ rc = efx_ef10_vadaptor_query(efx, nic_data->vport_id,
+ &port_flags, NULL, NULL);
+ if (rc)
+ goto fail_vadaptor_query;
+
+ if (port_flags &
+ (1 << MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN))
+ efx->fixed_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ else
+ efx->fixed_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ return 0;
+
+fail_vadaptor_query:
+ efx_ef10_vadaptor_free(efx, EVB_PORT_ID_ASSIGNED);
+fail_vadaptor_alloc:
+ return rc;
+}
+
/* On top of the default firmware vswitch setup, create a VEB vswitch and
* expansion vport for use by this function.
*/
@@ -243,7 +272,7 @@ int efx_ef10_vswitching_probe_pf(struct efx_nic *efx)
if (pci_sriov_get_totalvfs(efx->pci_dev) <= 0) {
/* vswitch not needed as we have no VFs */
- efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+ efx_ef10_vadaptor_alloc_set_features(efx);
return 0;
}
@@ -263,7 +292,7 @@ int efx_ef10_vswitching_probe_pf(struct efx_nic *efx)
goto fail3;
ether_addr_copy(nic_data->vport_mac, net_dev->dev_addr);
- rc = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+ rc = efx_ef10_vadaptor_alloc_set_features(efx);
if (rc)
goto fail4;
@@ -282,9 +311,7 @@ fail1:
int efx_ef10_vswitching_probe_vf(struct efx_nic *efx)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
-
- return efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+ return efx_ef10_vadaptor_alloc_set_features(efx);
}
int efx_ef10_vswitching_restore_pf(struct efx_nic *efx)
@@ -554,6 +581,7 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan,
efx_device_detach_sync(vf->efx);
efx_net_stop(vf->efx->net_dev);
+ mutex_lock(&vf->efx->mac_lock);
down_write(&vf->efx->filter_sem);
vf->efx->type->filter_table_remove(vf->efx);
@@ -630,6 +658,7 @@ restore_filters:
goto reset_nic_up_write;
up_write(&vf->efx->filter_sem);
+ mutex_unlock(&vf->efx->mac_lock);
up_write(&vf->efx->filter_sem);
@@ -642,9 +671,10 @@ restore_filters:
return rc;
reset_nic_up_write:
- if (vf->efx)
+ if (vf->efx) {
up_write(&vf->efx->filter_sem);
-
+ mutex_unlock(&vf->efx->mac_lock);
+ }
reset_nic:
if (vf->efx) {
netif_err(efx, drv, efx->net_dev,
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.h b/drivers/net/ethernet/sfc/ef10_sriov.h
index 6d25b92cb45e..9ceb7ef0a210 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.h
+++ b/drivers/net/ethernet/sfc/ef10_sriov.h
@@ -70,6 +70,9 @@ int efx_ef10_vport_add_mac(struct efx_nic *efx,
int efx_ef10_vport_del_mac(struct efx_nic *efx,
unsigned int port_id, u8 *mac);
int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id);
+int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id,
+ u32 *port_flags, u32 *vadaptor_flags,
+ unsigned int *vlan_tags);
int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id);
#endif /* EF10_SRIOV_H */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 097f363f1630..14b821b1c880 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -600,6 +600,7 @@ fail:
*/
static void efx_start_datapath(struct efx_nic *efx)
{
+ netdev_features_t old_features = efx->net_dev->features;
bool old_rx_scatter = efx->rx_scatter;
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
@@ -644,6 +645,15 @@ static void efx_start_datapath(struct efx_nic *efx)
efx->rx_dma_len, efx->rx_page_buf_step,
efx->rx_bufs_per_page, efx->rx_pages_per_batch);
+ /* Restore previously fixed features in hw_features and remove
+ * features which are fixed now
+ */
+ efx->net_dev->hw_features |= efx->net_dev->features;
+ efx->net_dev->hw_features &= ~efx->fixed_features;
+ efx->net_dev->features |= efx->fixed_features;
+ if (efx->net_dev->features != old_features)
+ netdev_features_change(efx->net_dev);
+
/* RX filters may also have scatter-enabled flags */
if (efx->rx_scatter != old_rx_scatter)
efx->type->filter_update_rx_scatter(efx);
@@ -1719,6 +1729,7 @@ static int efx_probe_filters(struct efx_nic *efx)
spin_lock_init(&efx->filter_lock);
init_rwsem(&efx->filter_sem);
+ mutex_lock(&efx->mac_lock);
down_write(&efx->filter_sem);
rc = efx->type->filter_table_probe(efx);
if (rc)
@@ -1757,6 +1768,7 @@ static int efx_probe_filters(struct efx_nic *efx)
#endif
out_unlock:
up_write(&efx->filter_sem);
+ mutex_unlock(&efx->mac_lock);
return rc;
}
@@ -2312,14 +2324,46 @@ static void efx_set_rx_mode(struct net_device *net_dev)
static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
{
struct efx_nic *efx = netdev_priv(net_dev);
+ int rc;
/* If disabling RX n-tuple filtering, clear existing filters */
- if (net_dev->features & ~data & NETIF_F_NTUPLE)
- return efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
+ if (net_dev->features & ~data & NETIF_F_NTUPLE) {
+ rc = efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
+ if (rc)
+ return rc;
+ }
+
+ /* If Rx VLAN filter is changed, update filters via mac_reconfigure */
+ if ((net_dev->features ^ data) & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ /* efx_set_rx_mode() will schedule MAC work to update filters
+ * when a new features are finally set in net_dev.
+ */
+ efx_set_rx_mode(net_dev);
+ }
return 0;
}
+static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->vlan_rx_add_vid)
+ return efx->type->vlan_rx_add_vid(efx, proto, vid);
+ else
+ return -EOPNOTSUPP;
+}
+
+static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vid)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->vlan_rx_kill_vid)
+ return efx->type->vlan_rx_kill_vid(efx, proto, vid);
+ else
+ return -EOPNOTSUPP;
+}
+
static const struct net_device_ops efx_netdev_ops = {
.ndo_open = efx_net_open,
.ndo_stop = efx_net_stop,
@@ -2332,6 +2376,8 @@ static const struct net_device_ops efx_netdev_ops = {
.ndo_set_mac_address = efx_set_mac_address,
.ndo_set_rx_mode = efx_set_rx_mode,
.ndo_set_features = efx_set_features,
+ .ndo_vlan_rx_add_vid = efx_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = efx_vlan_rx_kill_vid,
#ifdef CONFIG_SFC_SRIOV
.ndo_set_vf_mac = efx_sriov_set_vf_mac,
.ndo_set_vf_vlan = efx_sriov_set_vf_vlan,
@@ -3147,17 +3193,25 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
return -ENOMEM;
efx = netdev_priv(net_dev);
efx->type = (const struct efx_nic_type *) entry->driver_data;
+ efx->fixed_features |= NETIF_F_HIGHDMA;
net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
- NETIF_F_HIGHDMA | NETIF_F_TSO |
- NETIF_F_RXCSUM);
+ NETIF_F_TSO | NETIF_F_RXCSUM);
if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
net_dev->features |= NETIF_F_TSO6;
/* Mask for features that also apply to VLAN devices */
net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
NETIF_F_RXCSUM);
- /* All offloads can be toggled */
- net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
+
+ net_dev->hw_features = net_dev->features & ~efx->fixed_features;
+
+ /* Disable VLAN filtering by default. It may be enforced if
+ * the feature is fixed (i.e. VLAN filters are required to
+ * receive VLAN tagged packets due to vPort restrictions).
+ */
+ net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ net_dev->features |= efx->fixed_features;
+
pci_set_drvdata(pci_dev, efx);
SET_NETDEV_DEV(net_dev, &pci_dev->dev);
rc = efx_init_struct(efx, pci_dev, net_dev);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 5e3f93f04e62..c3ae739e9c7a 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -274,4 +274,13 @@ static inline void efx_device_detach_sync(struct efx_nic *efx)
netif_tx_unlock_bh(dev);
}
+static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem)
+{
+ if (WARN_ON(down_read_trylock(sem))) {
+ up_read(sem);
+ return false;
+ }
+ return true;
+}
+
#endif /* EFX_EFX_H */
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 133e9e35be9e..4c83739d158f 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -104,7 +104,8 @@ int efx_farch_test_registers(struct efx_nic *efx,
const struct efx_farch_register_test *regs,
size_t n_regs)
{
- unsigned address = 0, i, j;
+ unsigned address = 0;
+ int i, j;
efx_oword_t mask, imask, original, reg, buf;
for (i = 0; i < n_regs; ++i) {
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index 4cc772164a79..c9a5b003caaf 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -273,6 +273,9 @@
* have already installed filters. See the comment at
* MC_CMD_WORKAROUND_BUG26807. */
#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
+/* The clock whose frequency you've attempted to set set
+ * doesn't exist on this NIC */
+#define MC_CMD_ERR_NO_CLOCK 0x1015
#define MC_CMD_ERR_CODE_OFST 0
@@ -292,9 +295,11 @@
/* Point to the copycode entry point. */
#define SIENA_MC_BOOTROM_COPYCODE_VEC (0x800 - 3 * 0x4)
#define HUNT_MC_BOOTROM_COPYCODE_VEC (0x8000 - 3 * 0x4)
+#define MEDFORD_MC_BOOTROM_COPYCODE_VEC (0x10000 - 3 * 0x4)
/* Points to the recovery mode entry point. */
#define SIENA_MC_BOOTROM_NOFLASH_VEC (0x800 - 2 * 0x4)
#define HUNT_MC_BOOTROM_NOFLASH_VEC (0x8000 - 2 * 0x4)
+#define MEDFORD_MC_BOOTROM_NOFLASH_VEC (0x10000 - 2 * 0x4)
/* The command set exported by the boot ROM (MCDI v0) */
#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \
@@ -686,6 +691,12 @@
#define FCDI_EVENT_CODE_PTP_STATUS 0x9
/* enum: Port id config to map MC-FC port idx */
#define FCDI_EVENT_CODE_PORT_CONFIG 0xa
+/* enum: Boot result or error code */
+#define FCDI_EVENT_CODE_BOOT_RESULT 0xb
+#define FCDI_EVENT_REBOOT_SRC_LBN 36
+#define FCDI_EVENT_REBOOT_SRC_WIDTH 8
+#define FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */
+#define FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
@@ -717,6 +728,11 @@
#define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0
#define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0
#define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32
+#define FCDI_EVENT_BOOT_RESULT_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */
+#define FCDI_EVENT_BOOT_RESULT_LBN 0
+#define FCDI_EVENT_BOOT_RESULT_WIDTH 32
/* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events
* to the MC. Note that this structure | is overlayed over a normal FCDI event
@@ -1649,15 +1665,30 @@
/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
-/* Uncorrected error on transmit timestamps in NIC clock format */
+/* Uncorrected error on PTP transmit timestamps in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0
-/* Uncorrected error on receive timestamps in NIC clock format */
+/* Uncorrected error on PTP receive timestamps in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4
/* Uncorrected error on PPS output in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8
/* Uncorrected error on PPS input in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12
+/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2 msgresponse */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN 24
+/* Uncorrected error on PTP transmit timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_OFST 0
+/* Uncorrected error on PTP receive timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_OFST 4
+/* Uncorrected error on PPS output in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_OFST 8
+/* Uncorrected error on PPS input in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_OFST 12
+/* Uncorrected error on non-PTP transmit timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_OFST 16
+/* Uncorrected error on non-PTP receive timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_OFST 20
+
/* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */
#define MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4
/* Results of testing */
@@ -2158,8 +2189,12 @@
/* MC_CMD_DRV_ATTACH_IN msgrequest */
#define MC_CMD_DRV_ATTACH_IN_LEN 12
-/* new state (0=detached, 1=attached) to set if UPDATE=1 */
+/* new state to set if UPDATE=1 */
#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
+#define MC_CMD_DRV_ATTACH_LBN 0
+#define MC_CMD_DRV_ATTACH_WIDTH 1
+#define MC_CMD_DRV_PREBOOT_LBN 1
+#define MC_CMD_DRV_PREBOOT_WIDTH 1
/* 1 to set new state, or 0 to just report the existing state */
#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
/* preferred datapath firmware (for Huntington; ignored for Siena) */
@@ -2181,12 +2216,12 @@
/* MC_CMD_DRV_ATTACH_OUT msgresponse */
#define MC_CMD_DRV_ATTACH_OUT_LEN 4
-/* previous or existing state (0=detached, 1=attached) */
+/* previous or existing state, see the bitmask at NEW_STATE */
#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
/* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */
#define MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8
-/* previous or existing state (0=detached, 1=attached) */
+/* previous or existing state, see the bitmask at NEW_STATE */
#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0
/* Flags associated with this function */
#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4
@@ -2198,6 +2233,10 @@
#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL 0x1
/* enum: The function can perform privileged operations */
#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED 0x2
+/* enum: The function does not have an active port associated with it. The port
+ * refers to the Sorrento external FPGA port.
+ */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT 0x3
/***********************************/
@@ -2892,7 +2931,7 @@
*/
#define MC_CMD_SET_MAC 0x2c
-#define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_LINK
+#define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_SET_MAC_IN msgrequest */
#define MC_CMD_SET_MAC_IN_LEN 28
@@ -2927,9 +2966,66 @@
#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0
#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1
+/* MC_CMD_SET_MAC_EXT_IN msgrequest */
+#define MC_CMD_SET_MAC_EXT_IN_LEN 32
+/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
+ * EtherII, VLAN, bug16011 padding).
+ */
+#define MC_CMD_SET_MAC_EXT_IN_MTU_OFST 0
+#define MC_CMD_SET_MAC_EXT_IN_DRAIN_OFST 4
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_OFST 8
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_LEN 8
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_OFST 8
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_OFST 12
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_OFST 16
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_LBN 0
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_LBN 1
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_FCNTL_OFST 20
+/* enum: Flow control is off. */
+/* MC_CMD_FCNTL_OFF 0x0 */
+/* enum: Respond to flow control. */
+/* MC_CMD_FCNTL_RESPOND 0x1 */
+/* enum: Respond to and Issue flow control. */
+/* MC_CMD_FCNTL_BIDIR 0x2 */
+/* enum: Auto neg flow control. */
+/* MC_CMD_FCNTL_AUTO 0x3 */
+/* enum: Priority flow control (eftest builds only). */
+/* MC_CMD_FCNTL_QBB 0x4 */
+/* enum: Issue flow control. */
+/* MC_CMD_FCNTL_GENERATE 0x5 */
+#define MC_CMD_SET_MAC_EXT_IN_FLAGS_OFST 24
+#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_LBN 0
+#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_WIDTH 1
+/* Select which parameters to configure. A parameter will only be modified if
+ * the corresponding control flag is set. If SET_MAC_ENHANCED is not set in
+ * capabilities then this field is ignored (and all flags are assumed to be
+ * set).
+ */
+#define MC_CMD_SET_MAC_EXT_IN_CONTROL_OFST 28
+#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_LBN 0
+#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_LBN 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_LBN 2
+#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_LBN 3
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_LBN 4
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_WIDTH 1
+
/* MC_CMD_SET_MAC_OUT msgresponse */
#define MC_CMD_SET_MAC_OUT_LEN 0
+/* MC_CMD_SET_MAC_V2_OUT msgresponse */
+#define MC_CMD_SET_MAC_V2_OUT_LEN 4
+/* MTU as configured after processing the request. See comment at
+ * MC_CMD_SET_MAC_IN/MTU. To query MTU without doing any changes, set CONTROL
+ * to 0.
+ */
+#define MC_CMD_SET_MAC_V2_OUT_MTU_OFST 0
+
/***********************************/
/* MC_CMD_PHY_STATS
@@ -3521,6 +3617,26 @@
#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20
+/* MC_CMD_NVRAM_INFO_V2_OUT msgresponse */
+#define MC_CMD_NVRAM_INFO_V2_OUT_LEN 28
+#define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_OFST 4
+#define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_OFST 8
+#define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_OFST 12
+#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_LBN 0
+#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_LBN 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7
+#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16
+#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_OFST 20
+/* Writes must be multiples of this size. Added to support the MUM on Sorrento.
+ */
+#define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_OFST 24
+
/***********************************/
/* MC_CMD_NVRAM_UPDATE_START
@@ -3561,6 +3677,37 @@
/* amount to read in bytes */
#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8
+/* MC_CMD_NVRAM_READ_IN_V2 msgrequest */
+#define MC_CMD_NVRAM_READ_IN_V2_LEN 16
+#define MC_CMD_NVRAM_READ_IN_V2_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_READ_IN_V2_OFFSET_OFST 4
+/* amount to read in bytes */
+#define MC_CMD_NVRAM_READ_IN_V2_LENGTH_OFST 8
+/* Optional control info. If a partition is stored with an A/B versioning
+ * scheme (i.e. in more than one physical partition in NVRAM) the host can set
+ * this to control which underlying physical partition is used to read data
+ * from. This allows it to perform a read-modify-write-verify with the write
+ * lock continuously held by calling NVRAM_UPDATE_START, reading the old
+ * contents using MODE=TARGET_CURRENT, overwriting the old partition and then
+ * verifying by reading with MODE=TARGET_BACKUP.
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_MODE_OFST 12
+/* enum: Same as omitting MODE: caller sees data in current partition unless it
+ * holds the write lock in which case it sees data in the partition it is
+ * updating.
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_DEFAULT 0x0
+/* enum: Read from the current partition of an A/B pair, even if holding the
+ * write lock.
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT 0x1
+/* enum: Read from the non-current (i.e. to be updated) partition of an A/B
+ * pair
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_TARGET_BACKUP 0x2
+
/* MC_CMD_NVRAM_READ_OUT msgresponse */
#define MC_CMD_NVRAM_READ_OUT_LENMIN 1
#define MC_CMD_NVRAM_READ_OUT_LENMAX 252
@@ -3895,6 +4042,8 @@
#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY 0x39
/* enum: CCOM AVREG 1v8 supply (external ADC): mV */
#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC 0x3a
+/* enum: CCOM RTS temperature: degC */
+#define MC_CMD_SENSOR_CONTROLLER_RTS 0x3b
/* enum: Not a sensor: reserved for the next page flag */
#define MC_CMD_SENSOR_PAGE1_NEXT 0x3f
/* enum: controller internal temperature sensor voltage on master core
@@ -3931,6 +4080,12 @@
#define MC_CMD_SENSOR_PHY0_VCC 0x4c
/* enum: Voltage supplied to the QSFP #1 from their power supply: mV */
#define MC_CMD_SENSOR_PHY1_VCC 0x4d
+/* enum: Controller die temperature (TDIODE): degC */
+#define MC_CMD_SENSOR_CONTROLLER_TDIODE_TEMP 0x4e
+/* enum: Board temperature (front): degC */
+#define MC_CMD_SENSOR_BOARD_FRONT_TEMP 0x4f
+/* enum: Board temperature (back): degC */
+#define MC_CMD_SENSOR_BOARD_BACK_TEMP 0x50
/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
#define MC_CMD_SENSOR_ENTRY_OFST 4
#define MC_CMD_SENSOR_ENTRY_LEN 8
@@ -4007,7 +4162,7 @@
/* MC_CMD_READ_SENSORS_EXT_IN msgrequest */
#define MC_CMD_READ_SENSORS_EXT_IN_LEN 12
-/* DMA address of host buffer for sensor readings */
+/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */
#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_OFST 0
#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LEN 8
#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_OFST 0
@@ -4608,6 +4763,10 @@
* operations
*/
#define MC_CMD_MUM_OP_QSFP 0xc
+/* enum: Request discrete and SODIMM DDR info (type, size, speed grade, voltage
+ * level) from MUM
+ */
+#define MC_CMD_MUM_OP_READ_DDR_INFO 0xd
/* MC_CMD_MUM_IN_NULL msgrequest */
#define MC_CMD_MUM_IN_NULL_LEN 4
@@ -4793,6 +4952,10 @@
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_WIDTH 1
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_LBN 2
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_WIDTH 1
/* MC_CMD_MUM_IN_FPGA_LOAD msgrequest */
#define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8
@@ -4862,6 +5025,11 @@
#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4
#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8
+/* MC_CMD_MUM_IN_READ_DDR_INFO msgrequest */
+#define MC_CMD_MUM_IN_READ_DDR_INFO_LEN 4
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+
/* MC_CMD_MUM_OUT msgresponse */
#define MC_CMD_MUM_OUT_LEN 0
@@ -5004,6 +5172,69 @@
#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4
#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0
+/* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX 248
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num))
+/* Discrete (soldered) DDR resistor strap info */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16
+/* Number of SODIMM info records */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_OFST 4
+/* Array of SODIMM info records */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_OFST 12
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MINNUM 2
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM 30
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_LBN 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_WIDTH 8
+/* enum: SODIMM bank 1 (Top SODIMM for Sorrento) */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK1 0x0
+/* enum: SODIMM bank 2 (Bottom SODDIMM for Sorrento) */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK2 0x1
+/* enum: Total number of SODIMM banks */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_BANKS 0x2
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_LBN 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_WIDTH 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_LBN 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_WIDTH 4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_LBN 20
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_WIDTH 4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_POWERED 0x0 /* enum */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V25 0x1 /* enum */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V35 0x2 /* enum */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V5 0x3 /* enum */
+/* enum: Values 5-15 are reserved for future usage */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V8 0x4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_LBN 24
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_WIDTH 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_LBN 32
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_WIDTH 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_LBN 48
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_WIDTH 4
+/* enum: No module present */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_ABSENT 0x0
+/* enum: Module present supported and powered on */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_POWERED 0x1
+/* enum: Module present but bad type */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_TYPE 0x2
+/* enum: Module present but incompatible voltage */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_VOLTAGE 0x3
+/* enum: Module present but unknown SPD */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SPD 0x4
+/* enum: Module present but slot cannot support it */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SLOT 0x5
+/* enum: Modules may or may not be present, but cannot establish contact by I2C
+ */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_REACHABLE 0x6
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_LBN 52
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_WIDTH 12
+
/* MC_CMD_RESOURCE_SPECIFIER enum */
/* enum: Any */
#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff
@@ -5076,6 +5307,8 @@
#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500
/* enum: Expansion ROM configuration data for port 0 */
#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600
+/* enum: Synonym for EXPROM_CONFIG_PORT0 as used in pmap files */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG 0x600
/* enum: Expansion ROM configuration data for port 1 */
#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601
/* enum: Expansion ROM configuration data for port 2 */
@@ -5084,6 +5317,8 @@
#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603
/* enum: Non-volatile log output partition */
#define NVRAM_PARTITION_TYPE_LOG 0x700
+/* enum: Non-volatile log output of second core on dual-core device */
+#define NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701
/* enum: Device state dump output partition */
#define NVRAM_PARTITION_TYPE_DUMP 0x800
/* enum: Application license key storage partition */
@@ -5116,6 +5351,20 @@
#define NVRAM_PARTITION_TYPE_MUM_USER_ROM 0xc05
/* enum: MUM fuses and lockbits partition. */
#define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06
+/* enum: UEFI expansion ROM if separate from PXE */
+#define NVRAM_PARTITION_TYPE_EXPANSION_UEFI 0xd00
+/* enum: Spare partition 0 */
+#define NVRAM_PARTITION_TYPE_SPARE_0 0x1000
+/* enum: Spare partition 1 */
+#define NVRAM_PARTITION_TYPE_SPARE_1 0x1100
+/* enum: Spare partition 2 */
+#define NVRAM_PARTITION_TYPE_SPARE_2 0x1200
+/* enum: Spare partition 3 */
+#define NVRAM_PARTITION_TYPE_SPARE_3 0x1300
+/* enum: Spare partition 4 */
+#define NVRAM_PARTITION_TYPE_SPARE_4 0x1400
+/* enum: Spare partition 5 */
+#define NVRAM_PARTITION_TYPE_SPARE_5 0x1500
/* enum: Start of reserved value range (firmware may use for any purpose) */
#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
/* enum: End of reserved value range (firmware may use for any purpose) */
@@ -5149,6 +5398,90 @@
#define LICENSED_APP_ID_ID_LBN 0
#define LICENSED_APP_ID_ID_WIDTH 32
+/* LICENSED_FEATURES structuredef */
+#define LICENSED_FEATURES_LEN 8
+/* Bitmask of licensed firmware features */
+#define LICENSED_FEATURES_MASK_OFST 0
+#define LICENSED_FEATURES_MASK_LEN 8
+#define LICENSED_FEATURES_MASK_LO_OFST 0
+#define LICENSED_FEATURES_MASK_HI_OFST 4
+#define LICENSED_FEATURES_RX_CUT_THROUGH_LBN 0
+#define LICENSED_FEATURES_RX_CUT_THROUGH_WIDTH 1
+#define LICENSED_FEATURES_PIO_LBN 1
+#define LICENSED_FEATURES_PIO_WIDTH 1
+#define LICENSED_FEATURES_EVQ_TIMER_LBN 2
+#define LICENSED_FEATURES_EVQ_TIMER_WIDTH 1
+#define LICENSED_FEATURES_CLOCK_LBN 3
+#define LICENSED_FEATURES_CLOCK_WIDTH 1
+#define LICENSED_FEATURES_RX_TIMESTAMPS_LBN 4
+#define LICENSED_FEATURES_RX_TIMESTAMPS_WIDTH 1
+#define LICENSED_FEATURES_TX_TIMESTAMPS_LBN 5
+#define LICENSED_FEATURES_TX_TIMESTAMPS_WIDTH 1
+#define LICENSED_FEATURES_RX_SNIFF_LBN 6
+#define LICENSED_FEATURES_RX_SNIFF_WIDTH 1
+#define LICENSED_FEATURES_TX_SNIFF_LBN 7
+#define LICENSED_FEATURES_TX_SNIFF_WIDTH 1
+#define LICENSED_FEATURES_PROXY_FILTER_OPS_LBN 8
+#define LICENSED_FEATURES_PROXY_FILTER_OPS_WIDTH 1
+#define LICENSED_FEATURES_EVENT_CUT_THROUGH_LBN 9
+#define LICENSED_FEATURES_EVENT_CUT_THROUGH_WIDTH 1
+#define LICENSED_FEATURES_MASK_LBN 0
+#define LICENSED_FEATURES_MASK_WIDTH 64
+
+/* LICENSED_V3_APPS structuredef */
+#define LICENSED_V3_APPS_LEN 8
+/* Bitmask of licensed applications */
+#define LICENSED_V3_APPS_MASK_OFST 0
+#define LICENSED_V3_APPS_MASK_LEN 8
+#define LICENSED_V3_APPS_MASK_LO_OFST 0
+#define LICENSED_V3_APPS_MASK_HI_OFST 4
+#define LICENSED_V3_APPS_ONLOAD_LBN 0
+#define LICENSED_V3_APPS_ONLOAD_WIDTH 1
+#define LICENSED_V3_APPS_PTP_LBN 1
+#define LICENSED_V3_APPS_PTP_WIDTH 1
+#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_LBN 2
+#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_WIDTH 1
+#define LICENSED_V3_APPS_SOLARSECURE_LBN 3
+#define LICENSED_V3_APPS_SOLARSECURE_WIDTH 1
+#define LICENSED_V3_APPS_PERF_MONITOR_LBN 4
+#define LICENSED_V3_APPS_PERF_MONITOR_WIDTH 1
+#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_LBN 5
+#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_WIDTH 1
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_LBN 6
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_WIDTH 1
+#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_LBN 7
+#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_WIDTH 1
+#define LICENSED_V3_APPS_MASK_LBN 0
+#define LICENSED_V3_APPS_MASK_WIDTH 64
+
+/* LICENSED_V3_FEATURES structuredef */
+#define LICENSED_V3_FEATURES_LEN 8
+/* Bitmask of licensed firmware features */
+#define LICENSED_V3_FEATURES_MASK_OFST 0
+#define LICENSED_V3_FEATURES_MASK_LEN 8
+#define LICENSED_V3_FEATURES_MASK_LO_OFST 0
+#define LICENSED_V3_FEATURES_MASK_HI_OFST 4
+#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_LBN 0
+#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_WIDTH 1
+#define LICENSED_V3_FEATURES_PIO_LBN 1
+#define LICENSED_V3_FEATURES_PIO_WIDTH 1
+#define LICENSED_V3_FEATURES_EVQ_TIMER_LBN 2
+#define LICENSED_V3_FEATURES_EVQ_TIMER_WIDTH 1
+#define LICENSED_V3_FEATURES_CLOCK_LBN 3
+#define LICENSED_V3_FEATURES_CLOCK_WIDTH 1
+#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_LBN 4
+#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_WIDTH 1
+#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN 5
+#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_WIDTH 1
+#define LICENSED_V3_FEATURES_RX_SNIFF_LBN 6
+#define LICENSED_V3_FEATURES_RX_SNIFF_WIDTH 1
+#define LICENSED_V3_FEATURES_TX_SNIFF_LBN 7
+#define LICENSED_V3_FEATURES_TX_SNIFF_WIDTH 1
+#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_LBN 8
+#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_WIDTH 1
+#define LICENSED_V3_FEATURES_MASK_LBN 0
+#define LICENSED_V3_FEATURES_MASK_WIDTH 64
+
/* TX_TIMESTAMP_EVENT structuredef */
#define TX_TIMESTAMP_EVENT_LEN 6
/* lower 16 bits of timestamp data */
@@ -5258,6 +5591,8 @@
#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_WIDTH 1
#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_LBN 5
#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_LBN 6
+#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_WIDTH 1
#define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20
/* enum: Disabled */
#define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0
@@ -5362,6 +5697,8 @@
#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1
#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_LBN 9
#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_FORCE_EV_MERGING_LBN 10
+#define MC_CMD_INIT_RXQ_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
@@ -5422,6 +5759,8 @@
#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_LBN 19
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
@@ -5535,6 +5874,8 @@
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_LBN 12
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
@@ -5747,6 +6088,46 @@
#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44
#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64
+/* MC_CMD_PROXY_CONFIGURE_EXT_IN msgrequest */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_LEN 112
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_OFST 0
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_LBN 0
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_WIDTH 1
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REQUEST_BLOCK_SIZE.
+ */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_OFST 8
+/* Must be a power of 2 */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_OFST 12
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REPLY_BLOCK_SIZE.
+ */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_OFST 20
+/* Must be a power of 2 */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_OFST 24
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
+ * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
+ */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_OFST 32
+/* Must be a power of 2, or zero if this buffer is not provided */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_OFST 36
+/* Applies to all three buffers */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_OFST 40
+/* A bit mask defining which MCDI operations may be proxied */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_OFST 44
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_LEN 64
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_OFST 108
+
/* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */
#define MC_CMD_PROXY_CONFIGURE_OUT_LEN 0
@@ -6323,6 +6704,15 @@
* client
*/
#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS 0x2
+/* enum: read properties relating to security rules (Medford-only; for use by
+ * SolarSecure apps, not directly by drivers. See SF-114946-SW.)
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SECURITY_RULE_INFO 0x3
+/* enum: read the list of supported RX filter matches for VXLAN/NVGRE
+ * encapsulated frames, which follow a different match sequence to normal
+ * frames (Medford only)
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES 0x4
/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
@@ -6356,7 +6746,10 @@
/***********************************/
/* MC_CMD_PARSER_DISP_RW
- * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging
+ * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging.
+ * Please note that this interface is only of use to debug tools which have
+ * knowledge of firmware and hardware data structures; nothing here is intended
+ * for use by normal driver code.
*/
#define MC_CMD_PARSER_DISP_RW 0xe5
@@ -6374,6 +6767,12 @@
#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2
/* enum: Lookup engine (with requested metadata format) */
#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3
+/* enum: RX0 dispatcher CPU (alias for RX_DICPU; Medford has 2 RX DICPUs) */
+#define MC_CMD_PARSER_DISP_RW_IN_RX0_DICPU 0x0
+/* enum: RX1 dispatcher CPU (only valid for Medford) */
+#define MC_CMD_PARSER_DISP_RW_IN_RX1_DICPU 0x4
+/* enum: Miscellaneous other state (only valid for Medford) */
+#define MC_CMD_PARSER_DISP_RW_IN_MISC_STATE 0x5
/* identifies the type of operation requested */
#define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
/* enum: read a word of DICPU DMEM or a LUE entry */
@@ -6382,8 +6781,12 @@
#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1
/* enum: read-modify-write a word of DICPU DMEM (not valid for LUE) */
#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2
-/* data memory address or LUE index */
+/* data memory address (DICPU targets) or LUE index (LUE targets) */
#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8
+/* selector (for MISC_STATE target) */
+#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_OFST 8
+/* enum: Port to datapath mapping */
+#define MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING 0x1
/* value to write (for DMEM writes) */
#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12
/* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */
@@ -6408,6 +6811,12 @@
*/
#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_OFST 20
#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_LEN 32
+/* datapath(s) used for each port (for MISC_STATE PORT_DP_MAPPING selector) */
+#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_OFST 0
+#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_LEN 4
+#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_NUM 4
+#define MC_CMD_PARSER_DISP_RW_OUT_DP0 0x1 /* enum */
+#define MC_CMD_PARSER_DISP_RW_OUT_DP1 0x2 /* enum */
/***********************************/
@@ -7071,6 +7480,24 @@
#define MC_CMD_GET_CAPABILITIES_OUT_LEN 20
/* First word of flags. */
#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN 13
@@ -7138,6 +7565,8 @@
#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
/* enum: RXDP Test firmware image 8 */
#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
/* TxDPCPU firmware id. */
#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6
#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2
@@ -7153,6 +7582,8 @@
#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
/* enum: TXDP Test firmware image 2 */
#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_CSR 0x103
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_OFST 8
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_LEN 2
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_LBN 0
@@ -7227,6 +7658,258 @@
/* Licensed capabilities */
#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16
+/* MC_CMD_GET_CAPABILITIES_V2_IN msgrequest */
+#define MC_CMD_GET_CAPABILITIES_V2_IN_LEN 0
+
+/* MC_CMD_GET_CAPABILITIES_V2_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_LEN 72
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Virtual switching (full feature) RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Virtual switching (full feature) TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_OFST 12
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_OFST 16
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
+ * on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_LEN 2
+
/***********************************/
/* MC_CMD_V2_EXTN
@@ -7475,6 +8158,25 @@
/***********************************/
+/* MC_CMD_VSWITCH_QUERY
+ * read some config of v-switch. For now this command is an empty placeholder.
+ * It may be used to check if a v-switch is connected to a given EVB port (if
+ * not, then the command returns ENOENT).
+ */
+#define MC_CMD_VSWITCH_QUERY 0x63
+
+#define MC_CMD_0x63_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VSWITCH_QUERY_IN msgrequest */
+#define MC_CMD_VSWITCH_QUERY_IN_LEN 4
+/* The port to which the v-switch is connected. */
+#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VSWITCH_QUERY_OUT msgresponse */
+#define MC_CMD_VSWITCH_QUERY_OUT_LEN 0
+
+
+/***********************************/
/* MC_CMD_VPORT_ALLOC
* allocate a v-port.
*/
@@ -7510,6 +8212,8 @@
#define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8
#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN 1
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_WIDTH 1
/* The number of VLAN tags to insert/remove. An error will be returned if
* incompatible with the number of VLAN tags specified for the upstream
* v-switch.
@@ -7561,6 +8265,8 @@
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 1
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
/* The number of VLAN tags to strip on receive */
#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12
/* The number of VLAN tags to transparently insert/remove. */
@@ -7639,6 +8345,29 @@
/***********************************/
+/* MC_CMD_VADAPTOR_QUERY
+ * read some config of v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_QUERY 0x61
+
+#define MC_CMD_0x61_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_QUERY_IN msgrequest */
+#define MC_CMD_VADAPTOR_QUERY_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VADAPTOR_QUERY_OUT msgresponse */
+#define MC_CMD_VADAPTOR_QUERY_OUT_LEN 12
+/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
+#define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_OFST 0
+/* The v-adaptor flags as defined at MC_CMD_VADAPTOR_ALLOC. */
+#define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_OFST 4
+/* The number of VLAN tags that may still be added */
+#define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 8
+
+
+/***********************************/
/* MC_CMD_EVB_PORT_ASSIGN
* assign a port to a PCI function.
*/
@@ -7875,10 +8604,17 @@
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
-/* Hash control flags. The _EN bits are always supported. The _MODE bits only
- * work when the firmware reports ADDITIONAL_RSS_MODES in
- * MC_CMD_GET_CAPABILITIES and override the _EN bits if any of them are not 0.
- * See the RSS_MODE structure for the meaning of the mode bits.
+/* Hash control flags. The _EN bits are always supported, but new modes are
+ * available when ADDITIONAL_RSS_MODES is reported by MC_CMD_GET_CAPABILITIES:
+ * in this case, the MODE fields may be set to non-zero values, and will take
+ * effect regardless of the settings of the _EN flags. See the RSS_MODE
+ * structure for the meaning of the mode bits. Drivers must check the
+ * capability before trying to set any _MODE fields, as older firmware will
+ * reject any attempt to set the FLAGS field to a value > 0xff with EINVAL. In
+ * the case where all the _MODE flags are zero, the _EN flags take effect,
+ * providing backward compatibility for existing drivers. (Setting all _MODE
+ * *and* all _EN flags to zero is valid, to disable RSS spreading for that
+ * particular packet type.)
*/
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0
@@ -7923,11 +8659,18 @@
/* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8
-/* Hash control flags. If any _MODE bits are non-zero (which will only be true
- * when the firmware reports ADDITIONAL_RSS_MODES) then the _EN bits should be
- * disregarded (but are guaranteed to be consistent with the _MODE bits if
- * RSS_CONTEXT_SET_FLAGS has never been called for this context since it was
- * allocated).
+/* Hash control flags. If all _MODE bits are zero (which will always be true
+ * for older firmware which does not report the ADDITIONAL_RSS_MODES
+ * capability), the _EN bits report the state. If any _MODE bits are non-zero
+ * (which will only be true when the firmware reports ADDITIONAL_RSS_MODES)
+ * then the _EN bits should be disregarded, although the _MODE flags are
+ * guaranteed to be consistent with the _EN flags for a freshly-allocated RSS
+ * context and in the case where the _EN flags were used in the SET. This
+ * provides backward compatibility: old drivers will not be attempting to
+ * derive any meaning from the _MODE bits (and can never set them to any value
+ * not representable by the _EN bits); new drivers can always determine the
+ * mode by looking only at the _MODE bits; the value returned by a GET can
+ * always be used for a SET regardless of old/new driver vs. old/new firmware.
*/
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0
@@ -8155,6 +8898,74 @@
/***********************************/
+/* MC_CMD_VPORT_RECONFIGURE
+ * Replace VLAN tags and/or MAC addresses of an existing v-port. If the v-port
+ * has already been passed to another function (v-port's user), then that
+ * function will be reset before applying the changes.
+ */
+#define MC_CMD_VPORT_RECONFIGURE 0xeb
+
+#define MC_CMD_0xeb_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_RECONFIGURE_IN msgrequest */
+#define MC_CMD_VPORT_RECONFIGURE_IN_LEN 44
+/* The handle of the v-port */
+#define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_OFST 0
+/* Flags requesting what should be changed. */
+#define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_OFST 4
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_LBN 0
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_WIDTH 1
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_LBN 1
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_WIDTH 1
+/* The number of VLAN tags to insert/remove. An error will be returned if
+ * incompatible with the number of VLAN tags specified for the upstream
+ * v-switch.
+ */
+#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_OFST 8
+/* The actual VLAN tags to insert/remove */
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_OFST 12
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_LBN 0
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_WIDTH 16
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_LBN 16
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_WIDTH 16
+/* The number of MAC addresses to add */
+#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_OFST 16
+/* MAC addresses to add */
+#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_OFST 20
+#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_LEN 6
+#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_NUM 4
+
+/* MC_CMD_VPORT_RECONFIGURE_OUT msgresponse */
+#define MC_CMD_VPORT_RECONFIGURE_OUT_LEN 4
+#define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_OFST 0
+#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_LBN 0
+#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_EVB_PORT_QUERY
+ * read some config of v-port.
+ */
+#define MC_CMD_EVB_PORT_QUERY 0x62
+
+#define MC_CMD_0x62_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_EVB_PORT_QUERY_IN msgrequest */
+#define MC_CMD_EVB_PORT_QUERY_IN_LEN 4
+/* The handle of the v-port */
+#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_OFST 0
+
+/* MC_CMD_EVB_PORT_QUERY_OUT msgresponse */
+#define MC_CMD_EVB_PORT_QUERY_OUT_LEN 8
+/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
+#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_OFST 0
+/* The number of VLAN tags that may be used on a v-adaptor connected to this
+ * EVB port.
+ */
+#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 4
+
+
+/***********************************/
/* MC_CMD_DUMP_BUFTBL_ENTRIES
* Dump buffer table entries, mainly for command client debug use. Dumps
* absolute entries, and does not use chunk handles. All entries must be in
@@ -8196,6 +9007,14 @@
#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0
#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_LBN 1
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_WIDTH 2
+/* enum: pad to 64 bytes */
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64 0x0
+/* enum: pad to 128 bytes (Medford only) */
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128 0x1
+/* enum: pad to 256 bytes (Medford only) */
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256 0x2
/* MC_CMD_SET_RXDP_CONFIG_OUT msgresponse */
#define MC_CMD_SET_RXDP_CONFIG_OUT_LEN 0
@@ -8217,6 +9036,10 @@
#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0
#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0
#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1
+#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_LBN 1
+#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_WIDTH 2
+/* Enum values, see field(s): */
+/* MC_CMD_SET_RXDP_CONFIG/MC_CMD_SET_RXDP_CONFIG_IN/PAD_HOST_LEN */
/***********************************/
@@ -8788,32 +9611,38 @@
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
-/* enum: Attenuation (0-15, TBD for Medford) */
+/* enum: Attenuation (0-15, Huntington) */
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0
-/* enum: CTLE Boost (0-15, TBD for Medford) */
+/* enum: CTLE Boost (0-15, Huntington) */
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1
-/* enum: Edge DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive, TBD
- * for Medford)
+/* enum: Edge DFE Tap1 (Huntington - 0 - max negative, 64 - zero, 127 - max
+ * positive, Medford - 0-31)
*/
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2
-/* enum: Edge DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive, TBD for
- * Medford)
+/* enum: Edge DFE Tap2 (Huntington - 0 - max negative, 32 - zero, 63 - max
+ * positive, Medford - 0-31)
*/
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3
-/* enum: Edge DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive, TBD for
- * Medford)
+/* enum: Edge DFE Tap3 (Huntington - 0 - max negative, 32 - zero, 63 - max
+ * positive, Medford - 0-16)
*/
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4
-/* enum: Edge DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive, TBD for
- * Medford)
+/* enum: Edge DFE Tap4 (Huntington - 0 - max negative, 32 - zero, 63 - max
+ * positive, Medford - 0-16)
*/
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5
-/* enum: Edge DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive, TBD for
- * Medford)
+/* enum: Edge DFE Tap5 (Huntington - 0 - max negative, 32 - zero, 63 - max
+ * positive, Medford - 0-16)
*/
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6
-/* enum: Edge DFE DLEV (TBD for Medford) */
+/* enum: Edge DFE DLEV (0-128 for Medford) */
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_DLEV 0x7
+/* enum: Variable Gain Amplifier (0-15, Medford) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_VGA 0x8
+/* enum: CTLE EQ Capacitor (0-15, Medford) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
+/* enum: CTLE EQ Resistor (0-7, Medford) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
@@ -8885,26 +9714,32 @@
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
-/* enum: TX Amplitude */
+/* enum: TX Amplitude (Huntington, Medford) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV 0x0
-/* enum: De-Emphasis Tap1 Magnitude (0-7) */
+/* enum: De-Emphasis Tap1 Magnitude (0-7) (Huntington) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE 0x1
/* enum: De-Emphasis Tap1 Fine */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_DTLEV 0x2
-/* enum: De-Emphasis Tap2 Magnitude (0-6) */
+/* enum: De-Emphasis Tap2 Magnitude (0-6) (Huntington) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2 0x3
-/* enum: De-Emphasis Tap2 Fine */
+/* enum: De-Emphasis Tap2 Fine (Huntington) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2TLEV 0x4
-/* enum: Pre-Emphasis Magnitude */
+/* enum: Pre-Emphasis Magnitude (Huntington) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_E 0x5
-/* enum: Pre-Emphasis Fine */
+/* enum: Pre-Emphasis Fine (Huntington) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_ETLEV 0x6
-/* enum: TX Slew Rate Coarse control */
+/* enum: TX Slew Rate Coarse control (Huntington) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7
-/* enum: TX Slew Rate Fine control */
+/* enum: TX Slew Rate Fine control (Huntington) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8
-/* enum: TX Termination Impedance control */
+/* enum: TX Termination Impedance control (Huntington) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9
+/* enum: TX Amplitude Fine control (Medford) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_FINE 0xa
+/* enum: Pre-shoot Tap (Medford) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV 0xb
+/* enum: De-emphasis Tap (Medford) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY 0xc
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */
@@ -9086,8 +9921,16 @@
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5
/* enum: DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6
+/* enum: DFE DLev */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_DLEV 0x7
+/* enum: Figure of Merit */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_FOM 0x8
+/* enum: CTLE EQ Capacitor (HF Gain) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
+/* enum: CTLE EQ Resistor (DC Gain) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 4
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 5
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
@@ -9096,12 +9939,57 @@
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x8 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 12
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_8 0x8 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_9 0x9 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_10 0xa /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_11 0xb /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_12 0xc /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_13 0xd /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_14 0xe /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_15 0xf /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x10 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 13
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 14
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 10
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+/* MC_CMD_PCIE_TUNE_RXEQ_SET_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMIN 8
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMAX 252
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_LEN 3
+/* RXEQ Parameter */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_OFST 4
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LEN 4
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_ID */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 5
+/* Enum values, see field(s): */
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 13
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_LBN 14
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 2
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8
+
+/* MC_CMD_PCIE_TUNE_RXEQ_SET_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_OUT_LEN 0
+
/* MC_CMD_PCIE_TUNE_TXEQ_GET_IN msgrequest */
#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_LEN 4
/* Requested operation */
@@ -9176,6 +10064,7 @@
/***********************************/
/* MC_CMD_LICENSING
* Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition
+ * - not used for V3 licensing
*/
#define MC_CMD_LICENSING 0xf3
@@ -9220,6 +10109,93 @@
/***********************************/
+/* MC_CMD_LICENSING_V3
+ * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition
+ * - V3 licensing (Medford)
+ */
+#define MC_CMD_LICENSING_V3 0xd0
+
+#define MC_CMD_0xd0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSING_V3_IN msgrequest */
+#define MC_CMD_LICENSING_V3_IN_LEN 4
+/* identifies the type of operation requested */
+#define MC_CMD_LICENSING_V3_IN_OP_OFST 0
+/* enum: re-read and apply licenses after a license key partition update; note
+ * that this operation returns a zero-length response
+ */
+#define MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE 0x0
+/* enum: report counts of installed licenses */
+#define MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE 0x1
+
+/* MC_CMD_LICENSING_V3_OUT msgresponse */
+#define MC_CMD_LICENSING_V3_OUT_LEN 88
+/* count of keys which are valid */
+#define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_OFST 0
+/* sum of UNVERIFIABLE_KEYS + WRONG_NODE_KEYS (for compatibility with
+ * MC_CMD_FC_OP_LICENSE)
+ */
+#define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_OFST 4
+/* count of keys which are invalid due to being unverifiable */
+#define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_OFST 8
+/* count of keys which are invalid due to being for the wrong node */
+#define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_OFST 12
+/* licensing state (for diagnostics; the exact meaning of the bits in this
+ * field are private to the firmware)
+ */
+#define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_OFST 16
+/* licensing subsystem self-test report (for manftest) */
+#define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_OFST 20
+/* enum: licensing subsystem self-test failed */
+#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_FAIL 0x0
+/* enum: licensing subsystem self-test passed */
+#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_PASS 0x1
+/* bitmask of licensed applications */
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_OFST 24
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LEN 8
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LO_OFST 24
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_HI_OFST 28
+/* reserved for future use */
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_OFST 32
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_LEN 24
+/* bitmask of licensed features */
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_OFST 56
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LEN 8
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LO_OFST 56
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_HI_OFST 60
+/* reserved for future use */
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_OFST 64
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_LEN 24
+
+
+/***********************************/
+/* MC_CMD_LICENSING_GET_ID_V3
+ * Get ID and type from the NVRAM_PARTITION_TYPE_LICENSE application license
+ * partition - V3 licensing (Medford)
+ */
+#define MC_CMD_LICENSING_GET_ID_V3 0xd1
+
+#define MC_CMD_0xd1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSING_GET_ID_V3_IN msgrequest */
+#define MC_CMD_LICENSING_GET_ID_V3_IN_LEN 0
+
+/* MC_CMD_LICENSING_GET_ID_V3_OUT msgresponse */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN 8
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX 252
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num))
+/* type of license (eg 3) */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0
+/* length of the license ID (in bytes) */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_OFST 4
+/* the unique license ID of the adapter */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST 8
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MINNUM 0
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM 244
+
+
+/***********************************/
/* MC_CMD_MC2MC_PROXY
* Execute an arbitrary MCDI command on the slave MC of a dual-core device.
* This will fail on a single-core system.
@@ -9239,7 +10215,7 @@
/* MC_CMD_GET_LICENSED_APP_STATE
* Query the state of an individual licensed application. (Note that the actual
* state may be invalidated by the MC_CMD_LICENSING OP_UPDATE_LICENSE operation
- * or a reboot of the MC.)
+ * or a reboot of the MC.) Not used for V3 licensing
*/
#define MC_CMD_GET_LICENSED_APP_STATE 0xf5
@@ -9261,8 +10237,68 @@
/***********************************/
+/* MC_CMD_GET_LICENSED_V3_APP_STATE
+ * Query the state of an individual licensed application. (Note that the actual
+ * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE
+ * operation or a reboot of the MC.) Used for V3 licensing (Medford)
+ */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE 0xd2
+
+#define MC_CMD_0xd2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LICENSED_V3_APP_STATE_IN msgrequest */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN 8
+/* application ID to query (LICENSED_V3_APPS_xxx) expressed as a single bit
+ * mask
+ */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_OFST 0
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LEN 8
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_OFST 4
+
+/* MC_CMD_GET_LICENSED_V3_APP_STATE_OUT msgresponse */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4
+/* state of this application */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0
+/* enum: no (or invalid) license is present for the application */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0
+/* enum: a valid license is present for the application */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LICENSED 0x1
+
+
+/***********************************/
+/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES
+ * Query the state of an one or more licensed features. (Note that the actual
+ * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE
+ * operation or a reboot of the MC.) Used for V3 licensing (Medford)
+ */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES 0xd3
+
+#define MC_CMD_0xd3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN msgrequest */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_LEN 8
+/* features to query (LICENSED_V3_FEATURES_xxx) expressed as a mask with one or
+ * more bits set
+ */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LEN 8
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_OFST 4
+
+/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT msgresponse */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_LEN 8
+/* states of these features - bit set for licensed, clear for not licensed */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LEN 8
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_OFST 4
+
+
+/***********************************/
/* MC_CMD_LICENSED_APP_OP
- * Perform an action for an individual licensed application.
+ * Perform an action for an individual licensed application - not used for V3
+ * licensing.
*/
#define MC_CMD_LICENSED_APP_OP 0xf6
@@ -9328,6 +10364,67 @@
/***********************************/
+/* MC_CMD_LICENSED_V3_VALIDATE_APP
+ * Perform validation for an individual licensed application - V3 licensing
+ * (Medford)
+ */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP 0xd4
+
+#define MC_CMD_0xd4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSED_V3_VALIDATE_APP_IN msgrequest */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_LEN 72
+/* application ID expressed as a single bit mask */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_OFST 0
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LEN 8
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_OFST 0
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_OFST 4
+/* challenge for validation */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_OFST 8
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_LEN 64
+
+/* MC_CMD_LICENSED_V3_VALIDATE_APP_OUT msgresponse */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_LEN 72
+/* application expiry time */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 0
+/* application expiry units */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 4
+/* enum: expiry units are accounting units */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0
+/* enum: expiry units are calendar days */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_DAYS 0x1
+/* validation response to challenge */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_OFST 8
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 64
+
+
+/***********************************/
+/* MC_CMD_LICENSED_V3_MASK_FEATURES
+ * Mask features - V3 licensing (Medford)
+ */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5
+
+#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSED_V3_MASK_FEATURES_IN msgrequest */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_LEN 12
+/* mask to be applied to features to be changed */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_OFST 0
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LEN 8
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_OFST 0
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4
+/* whether to turn on or turn off the masked features */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8
+/* enum: turn the features off */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0
+/* enum: turn the features back on */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_ON 0x1
+
+/* MC_CMD_LICENSED_V3_MASK_FEATURES_OUT msgresponse */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_OUT_LEN 0
+
+
+/***********************************/
/* MC_CMD_SET_PORT_SNIFF_CONFIG
* Configure RX port sniffing for the physical port associated with the calling
* function. Only a privileged function may change the port sniffing
@@ -9696,12 +10793,27 @@
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP 0x8 /* enum */
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS 0x10 /* enum */
-#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20 /* enum */
+/* enum: Deprecated. Equivalent to MAC_SPOOFING_TX combined with CHANGE_MAC. */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST 0x40 /* enum */
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST 0x80 /* enum */
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST 0x100 /* enum */
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST 0x200 /* enum */
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS 0x400 /* enum */
+/* enum: Allows to set the TX packets' source MAC address to any arbitrary MAC
+ * adress.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING_TX 0x800
+/* enum: Privilege that allows a Function to change the MAC address configured
+ * in its associated vAdapter/vPort.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_CHANGE_MAC 0x1000
+/* enum: Privilege that allows a Function to install filters that specify VLANs
+ * that are not in the permit list for the associated vPort. This privilege is
+ * primarily to support ESX where vPorts are created that restrict traffic to
+ * only a set of permitted VLANs. See the vPort flag FLAG_VLAN_RESTRICT.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNRESTRICTED_VLAN 0x2000
/* enum: Set this bit to indicate that a new privilege mask is to be set,
* otherwise the command will only read the existing mask.
*/
@@ -9951,7 +11063,7 @@
/* Sector type */
#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4
/* Enum values, see field(s): */
-/* MC_CMD_XPM_READ_SECTOR_OUT/TYPE */
+/* MC_CMD_XPM_READ_SECTOR/MC_CMD_XPM_READ_SECTOR_OUT/TYPE */
/* Sector size */
#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8
/* Sector data */
@@ -10067,4 +11179,123 @@
#define MC_CMD_XPM_WRITE_TEST_OUT_LEN 0
+/***********************************/
+/* MC_CMD_EXEC_SIGNED
+ * Check the CMAC of the contents of IMEM and DMEM against the value supplied
+ * and if correct begin execution from the start of IMEM. The caller supplies a
+ * key ID, the length of IMEM and DMEM to validate and the expected CMAC. CMAC
+ * computation runs from the start of IMEM, and from the start of DMEM + 16k,
+ * to match flash booting. The command will respond with EINVAL if the CMAC
+ * does match, otherwise it will respond with success before it jumps to IMEM.
+ */
+#define MC_CMD_EXEC_SIGNED 0x10c
+
+#define MC_CMD_0x10c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_EXEC_SIGNED_IN msgrequest */
+#define MC_CMD_EXEC_SIGNED_IN_LEN 28
+/* the length of code to include in the CMAC */
+#define MC_CMD_EXEC_SIGNED_IN_CODELEN_OFST 0
+/* the length of date to include in the CMAC */
+#define MC_CMD_EXEC_SIGNED_IN_DATALEN_OFST 4
+/* the XPM sector containing the key to use */
+#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_OFST 8
+/* the expected CMAC value */
+#define MC_CMD_EXEC_SIGNED_IN_CMAC_OFST 12
+#define MC_CMD_EXEC_SIGNED_IN_CMAC_LEN 16
+
+/* MC_CMD_EXEC_SIGNED_OUT msgresponse */
+#define MC_CMD_EXEC_SIGNED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PREPARE_SIGNED
+ * Prepare to upload a signed image. This will scrub the specified length of
+ * the data region, which must be at least as large as the DATALEN supplied to
+ * MC_CMD_EXEC_SIGNED.
+ */
+#define MC_CMD_PREPARE_SIGNED 0x10d
+
+#define MC_CMD_0x10d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PREPARE_SIGNED_IN msgrequest */
+#define MC_CMD_PREPARE_SIGNED_IN_LEN 4
+/* the length of data area to clear */
+#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_OFST 0
+
+/* MC_CMD_PREPARE_SIGNED_OUT msgresponse */
+#define MC_CMD_PREPARE_SIGNED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS
+ * Configure UDP ports for tunnel encapsulation hardware acceleration. The
+ * parser-dispatcher will attempt to parse traffic on these ports as tunnel
+ * encapsulation PDUs and filter them using the tunnel encapsulation filter
+ * chain rather than the standard filter chain. Note that this command can
+ * cause all functions to see a reset. (Available on Medford only.)
+ */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS 0x117
+
+#define MC_CMD_0x117_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN msgrequest */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMIN 4
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX 68
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num) (4+4*(num))
+/* Flags */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_LEN 2
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_LBN 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_WIDTH 1
+/* The number of entries in the ENTRIES array */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST 2
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN 2
+/* Entries defining the UDP port to protocol mapping, each laid out as a
+ * TUNNEL_ENCAP_UDP_PORT_ENTRY
+ */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_OFST 4
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_LEN 4
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MINNUM 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM 16
+
+/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT msgresponse */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN 2
+/* Flags */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_OFST 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_LEN 2
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_RX_BALANCING
+ * Configure a port upconverter to distribute the packets on both RX engines.
+ * Packets are distributed based on a table with the destination vFIFO. The
+ * index of the table is a hash of source and destination of IPV4 and VLAN
+ * priority.
+ */
+#define MC_CMD_RX_BALANCING 0x118
+
+#define MC_CMD_0x118_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_RX_BALANCING_IN msgrequest */
+#define MC_CMD_RX_BALANCING_IN_LEN 4
+/* The RX port whose upconverter table will be modified */
+#define MC_CMD_RX_BALANCING_IN_PORT_OFST 0
+#define MC_CMD_RX_BALANCING_IN_PORT_LEN 1
+/* The VLAN priority associated to the table index and vFIFO */
+#define MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 1
+#define MC_CMD_RX_BALANCING_IN_PRIORITY_LEN 1
+/* The resulting bit of SRC^DST for indexing the table */
+#define MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 2
+#define MC_CMD_RX_BALANCING_IN_SRC_DST_LEN 1
+/* The RX engine to which the vFIFO in the table entry will point to */
+#define MC_CMD_RX_BALANCING_IN_ENG_OFST 3
+#define MC_CMD_RX_BALANCING_IN_ENG_LEN 1
+
+/* MC_CMD_RX_BALANCING_OUT msgresponse */
+#define MC_CMD_RX_BALANCING_OUT_LEN 0
+
+
#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 7f295c4d7b80..2a9228a6e4a0 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -189,11 +189,12 @@ static u32 mcdi_to_ethtool_cap(u32 media, u32 cap)
case MC_CMD_MEDIA_XFP:
case MC_CMD_MEDIA_SFP_PLUS:
- result |= SUPPORTED_FIBRE;
- break;
-
case MC_CMD_MEDIA_QSFP_PLUS:
result |= SUPPORTED_FIBRE;
+ if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ result |= SUPPORTED_1000baseT_Full;
+ if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+ result |= SUPPORTED_10000baseT_Full;
if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
result |= SUPPORTED_40000baseCR4_Full;
break;
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index d13ddf9703ff..9ff062a36ea8 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -868,6 +868,7 @@ struct vfdi_status;
* be held to modify it.
* @port_initialized: Port initialized?
* @net_dev: Operating system network device. Consider holding the rtnl lock
+ * @fixed_features: Features which cannot be turned off
* @stats_buffer: DMA buffer for statistics
* @phy_type: PHY type
* @phy_op: PHY interface
@@ -916,7 +917,6 @@ struct vfdi_status;
* @stats_lock: Statistics update lock. Must be held when calling
* efx_nic_type::{update,start,stop}_stats.
* @n_rx_noskb_drops: Count of RX packets dropped due to failure to allocate an skb
- * @mc_promisc: Whether in multicast promiscuous mode when last changed
*
* This is stored in the private area of the &struct net_device.
*/
@@ -1008,6 +1008,8 @@ struct efx_nic {
bool port_initialized;
struct net_device *net_dev;
+ netdev_features_t fixed_features;
+
struct efx_buffer stats_buffer;
u64 rx_nodesc_drops_total;
u64 rx_nodesc_drops_while_down;
@@ -1065,7 +1067,6 @@ struct efx_nic {
int last_irq_cpu;
spinlock_t stats_lock;
atomic_t n_rx_noskb_drops;
- bool mc_promisc;
};
static inline int efx_dev_registered(struct efx_nic *efx)
@@ -1333,6 +1334,8 @@ struct efx_nic_type {
int (*ptp_set_ts_config)(struct efx_nic *efx,
struct hwtstamp_config *init);
int (*sriov_configure)(struct efx_nic *efx, int num_vfs);
+ int (*vlan_rx_add_vid)(struct efx_nic *efx, __be16 proto, u16 vid);
+ int (*vlan_rx_kill_vid)(struct efx_nic *efx, __be16 proto, u16 vid);
int (*sriov_init)(struct efx_nic *efx);
void (*sriov_fini)(struct efx_nic *efx);
bool (*sriov_wanted)(struct efx_nic *efx);
@@ -1521,4 +1524,16 @@ static inline void efx_xmit_hwtstamp_pending(struct sk_buff *skb)
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
}
+/* Get all supported features.
+ * If a feature is not fixed, it is present in hw_features.
+ * If a feature is fixed, it does not present in hw_features, but
+ * always in features.
+ */
+static inline netdev_features_t efx_supported_features(const struct efx_nic *efx)
+{
+ const struct net_device *net_dev = efx->net_dev;
+
+ return net_dev->features | net_dev->hw_features;
+}
+
#endif /* EFX_NET_DRIVER_H */
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 0b536e27d3b2..96944c3c9d14 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -519,6 +519,9 @@ enum {
#ifdef CONFIG_SFC_SRIOV
* @vf: Pointer to VF data structure
#endif
+ * @vport_mac: The MAC address on the vport, only for PFs; VFs will be zero
+ * @vlan_list: List of VLANs added over the interface. Serialised by vlan_lock.
+ * @vlan_lock: Lock to serialize access to vlan_list.
*/
struct efx_ef10_nic_data {
struct efx_buffer mcdi_buf;
@@ -550,6 +553,8 @@ struct efx_ef10_nic_data {
struct ef10_vf *vf;
#endif
u8 vport_mac[ETH_ALEN];
+ struct list_head vlan_list;
+ struct mutex vlan_lock;
};
int efx_init_sriov(void);
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 8af25563f627..ca3134540d2d 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -114,9 +114,7 @@ struct smsc911x_data {
/* spinlock to ensure register accesses are serialised */
spinlock_t dev_lock;
- struct phy_device *phy_dev;
struct mii_bus *mii_bus;
- int phy_irq[PHY_MAX_ADDR];
unsigned int using_extphy;
int last_duplex;
int last_carrier;
@@ -834,7 +832,7 @@ static int smsc911x_phy_reset(struct smsc911x_data *pdata)
static int smsc911x_phy_loopbacktest(struct net_device *dev)
{
struct smsc911x_data *pdata = netdev_priv(dev);
- struct phy_device *phy_dev = pdata->phy_dev;
+ struct phy_device *phy_dev = dev->phydev;
int result = -EIO;
unsigned int i, val;
unsigned long flags;
@@ -904,7 +902,8 @@ static int smsc911x_phy_loopbacktest(struct net_device *dev)
static void smsc911x_phy_update_flowcontrol(struct smsc911x_data *pdata)
{
- struct phy_device *phy_dev = pdata->phy_dev;
+ struct net_device *ndev = pdata->dev;
+ struct phy_device *phy_dev = ndev->phydev;
u32 afc = smsc911x_reg_read(pdata, AFC_CFG);
u32 flow;
unsigned long flags;
@@ -945,7 +944,7 @@ static void smsc911x_phy_update_flowcontrol(struct smsc911x_data *pdata)
static void smsc911x_phy_adjust_link(struct net_device *dev)
{
struct smsc911x_data *pdata = netdev_priv(dev);
- struct phy_device *phy_dev = pdata->phy_dev;
+ struct phy_device *phy_dev = dev->phydev;
unsigned long flags;
int carrier;
@@ -1038,7 +1037,6 @@ static int smsc911x_mii_probe(struct net_device *dev)
SUPPORTED_Asym_Pause);
phydev->advertising = phydev->supported;
- pdata->phy_dev = phydev;
pdata->last_duplex = -1;
pdata->last_carrier = -1;
@@ -1073,7 +1071,6 @@ static int smsc911x_mii_init(struct platform_device *pdev,
pdata->mii_bus->priv = pdata;
pdata->mii_bus->read = smsc911x_mii_read;
pdata->mii_bus->write = smsc911x_mii_write;
- memcpy(pdata->mii_bus->irq, pdata->phy_irq, sizeof(pdata->mii_bus));
pdata->mii_bus->parent = &pdev->dev;
@@ -1340,9 +1337,11 @@ static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata)
static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata)
{
+ struct net_device *ndev = pdata->dev;
+ struct phy_device *phy_dev = ndev->phydev;
int rc = 0;
- if (!pdata->phy_dev)
+ if (!phy_dev)
return rc;
/* If the internal PHY is in General Power-Down mode, all, except the
@@ -1352,7 +1351,7 @@ static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata)
* In that case, clear the bit 0.11, so the PHY powers up and we can
* access to the phy registers.
*/
- rc = phy_read(pdata->phy_dev, MII_BMCR);
+ rc = phy_read(phy_dev, MII_BMCR);
if (rc < 0) {
SMSC_WARN(pdata, drv, "Failed reading PHY control reg");
return rc;
@@ -1362,7 +1361,7 @@ static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata)
* disable the general power down-mode.
*/
if (rc & BMCR_PDOWN) {
- rc = phy_write(pdata->phy_dev, MII_BMCR, rc & ~BMCR_PDOWN);
+ rc = phy_write(phy_dev, MII_BMCR, rc & ~BMCR_PDOWN);
if (rc < 0) {
SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
return rc;
@@ -1376,12 +1375,14 @@ static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata)
static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
{
+ struct net_device *ndev = pdata->dev;
+ struct phy_device *phy_dev = ndev->phydev;
int rc = 0;
- if (!pdata->phy_dev)
+ if (!phy_dev)
return rc;
- rc = phy_read(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS);
+ rc = phy_read(phy_dev, MII_LAN83C185_CTRL_STATUS);
if (rc < 0) {
SMSC_WARN(pdata, drv, "Failed reading PHY control reg");
@@ -1391,7 +1392,7 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
/* Only disable if energy detect mode is already enabled */
if (rc & MII_LAN83C185_EDPWRDOWN) {
/* Disable energy detect mode for this SMSC Transceivers */
- rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS,
+ rc = phy_write(phy_dev, MII_LAN83C185_CTRL_STATUS,
rc & (~MII_LAN83C185_EDPWRDOWN));
if (rc < 0) {
@@ -1407,12 +1408,14 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata)
{
+ struct net_device *ndev = pdata->dev;
+ struct phy_device *phy_dev = ndev->phydev;
int rc = 0;
- if (!pdata->phy_dev)
+ if (!phy_dev)
return rc;
- rc = phy_read(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS);
+ rc = phy_read(phy_dev, MII_LAN83C185_CTRL_STATUS);
if (rc < 0) {
SMSC_WARN(pdata, drv, "Failed reading PHY control reg");
@@ -1422,7 +1425,7 @@ static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata)
/* Only enable if energy detect mode is already disabled */
if (!(rc & MII_LAN83C185_EDPWRDOWN)) {
/* Enable energy detect mode for this SMSC Transceivers */
- rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS,
+ rc = phy_write(phy_dev, MII_LAN83C185_CTRL_STATUS,
rc | MII_LAN83C185_EDPWRDOWN);
if (rc < 0) {
@@ -1519,7 +1522,7 @@ static int smsc911x_open(struct net_device *dev)
unsigned int intcfg;
/* if the phy is not yet registered, retry later*/
- if (!pdata->phy_dev) {
+ if (!dev->phydev) {
SMSC_WARN(pdata, hw, "phy_dev is NULL");
return -EAGAIN;
}
@@ -1610,7 +1613,7 @@ static int smsc911x_open(struct net_device *dev)
pdata->last_carrier = -1;
/* Bring the PHY up */
- phy_start(pdata->phy_dev);
+ phy_start(dev->phydev);
temp = smsc911x_reg_read(pdata, HW_CFG);
/* Preserve TX FIFO size and external PHY configuration */
@@ -1665,8 +1668,8 @@ static int smsc911x_stop(struct net_device *dev)
smsc911x_tx_update_txcounters(dev);
/* Bring the PHY down */
- if (pdata->phy_dev)
- phy_stop(pdata->phy_dev);
+ if (dev->phydev)
+ phy_stop(dev->phydev);
SMSC_TRACE(pdata, ifdown, "Interface stopped");
return 0;
@@ -1906,30 +1909,10 @@ static int smsc911x_set_mac_address(struct net_device *dev, void *p)
/* Standard ioctls for mii-tool */
static int smsc911x_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- struct smsc911x_data *pdata = netdev_priv(dev);
-
- if (!netif_running(dev) || !pdata->phy_dev)
+ if (!netif_running(dev) || !dev->phydev)
return -EINVAL;
- return phy_mii_ioctl(pdata->phy_dev, ifr, cmd);
-}
-
-static int
-smsc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct smsc911x_data *pdata = netdev_priv(dev);
-
- cmd->maxtxpkt = 1;
- cmd->maxrxpkt = 1;
- return phy_ethtool_gset(pdata->phy_dev, cmd);
-}
-
-static int
-smsc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct smsc911x_data *pdata = netdev_priv(dev);
-
- return phy_ethtool_sset(pdata->phy_dev, cmd);
+ return phy_mii_ioctl(dev->phydev, ifr, cmd);
}
static void smsc911x_ethtool_getdrvinfo(struct net_device *dev,
@@ -1943,9 +1926,7 @@ static void smsc911x_ethtool_getdrvinfo(struct net_device *dev,
static int smsc911x_ethtool_nwayreset(struct net_device *dev)
{
- struct smsc911x_data *pdata = netdev_priv(dev);
-
- return phy_start_aneg(pdata->phy_dev);
+ return phy_start_aneg(dev->phydev);
}
static u32 smsc911x_ethtool_getmsglevel(struct net_device *dev)
@@ -1971,7 +1952,7 @@ smsc911x_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs,
void *buf)
{
struct smsc911x_data *pdata = netdev_priv(dev);
- struct phy_device *phy_dev = pdata->phy_dev;
+ struct phy_device *phy_dev = dev->phydev;
unsigned long flags;
unsigned int i;
unsigned int j = 0;
@@ -2117,8 +2098,6 @@ static int smsc911x_ethtool_set_eeprom(struct net_device *dev,
}
static const struct ethtool_ops smsc911x_ethtool_ops = {
- .get_settings = smsc911x_ethtool_getsettings,
- .set_settings = smsc911x_ethtool_setsettings,
.get_link = ethtool_op_get_link,
.get_drvinfo = smsc911x_ethtool_getdrvinfo,
.nway_reset = smsc911x_ethtool_nwayreset,
@@ -2130,6 +2109,8 @@ static const struct ethtool_ops smsc911x_ethtool_ops = {
.get_eeprom = smsc911x_ethtool_get_eeprom,
.set_eeprom = smsc911x_ethtool_set_eeprom,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static const struct net_device_ops smsc911x_netdev_ops = {
@@ -2310,12 +2291,11 @@ static int smsc911x_drv_remove(struct platform_device *pdev)
pdata = netdev_priv(dev);
BUG_ON(!pdata);
BUG_ON(!pdata->ioaddr);
- BUG_ON(!pdata->phy_dev);
+ BUG_ON(!dev->phydev);
SMSC_TRACE(pdata, ifdown, "Stopping driver");
- phy_disconnect(pdata->phy_dev);
- pdata->phy_dev = NULL;
+ phy_disconnect(dev->phydev);
mdiobus_unregister(pdata->mii_bus);
mdiobus_free(pdata->mii_bus);
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index fc60368df2e7..2533b91f1421 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -232,6 +232,11 @@ struct stmmac_extra_stats {
#define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY iface */
#define DEFAULT_DMA_PBL 8
+/* PCS status and mask defines */
+#define PCS_ANE_IRQ BIT(2) /* PCS Auto-Negotiation */
+#define PCS_LINK_IRQ BIT(1) /* PCS Link */
+#define PCS_RGSMIIIS_IRQ BIT(0) /* RGMII or SMII Interrupt */
+
/* Max/Min RI Watchdog Timer count value */
#define MAX_DMA_RIWT 0xff
#define MIN_DMA_RIWT 0x20
@@ -272,9 +277,6 @@ enum dma_irq_status {
#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 2)
#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 3)
-#define CORE_PCS_ANE_COMPLETE (1 << 5)
-#define CORE_PCS_LINK_STATUS (1 << 6)
-#define CORE_RGMII_IRQ (1 << 7)
#define CORE_IRQ_MTL_RX_OVERFLOW BIT(8)
/* Physical Coding Sublayer */
@@ -469,9 +471,12 @@ struct stmmac_ops {
void (*reset_eee_mode)(struct mac_device_info *hw);
void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw);
void (*set_eee_pls)(struct mac_device_info *hw, int link);
- void (*ctrl_ane)(struct mac_device_info *hw, bool restart);
- void (*get_adv)(struct mac_device_info *hw, struct rgmii_adv *adv);
void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x);
+ /* PCS calls */
+ void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral,
+ bool loopback);
+ void (*pcs_rane)(void __iomem *ioaddr, bool restart);
+ void (*pcs_get_adv_lp)(void __iomem *ioaddr, struct rgmii_adv *adv);
};
/* PTP and HW Timer helpers */
@@ -524,6 +529,9 @@ struct mac_device_info {
int unicast_filter_entries;
int mcast_bits_log2;
unsigned int rx_csum;
+ unsigned int pcs;
+ unsigned int pmt;
+ unsigned int ps;
};
struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
@@ -546,6 +554,7 @@ void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable);
void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
+
extern const struct stmmac_mode_ops ring_mode_ops;
extern const struct stmmac_mode_ops chain_mode_ops;
extern const struct stmmac_desc_ops dwmac4_desc_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 0cd3ecff768b..92105916ef40 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -46,6 +46,7 @@ struct rk_priv_data {
struct platform_device *pdev;
int phy_iface;
struct regulator *regulator;
+ bool suspended;
const struct rk_gmac_ops *ops;
bool clk_enabled;
@@ -72,6 +73,122 @@ struct rk_priv_data {
#define GRF_BIT(nr) (BIT(nr) | BIT(nr+16))
#define GRF_CLR_BIT(nr) (BIT(nr+16))
+#define RK3228_GRF_MAC_CON0 0x0900
+#define RK3228_GRF_MAC_CON1 0x0904
+
+/* RK3228_GRF_MAC_CON0 */
+#define RK3228_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7)
+#define RK3228_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+
+/* RK3228_GRF_MAC_CON1 */
+#define RK3228_GMAC_PHY_INTF_SEL_RGMII \
+ (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6))
+#define RK3228_GMAC_PHY_INTF_SEL_RMII \
+ (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6))
+#define RK3228_GMAC_FLOW_CTRL GRF_BIT(3)
+#define RK3228_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
+#define RK3228_GMAC_SPEED_10M GRF_CLR_BIT(2)
+#define RK3228_GMAC_SPEED_100M GRF_BIT(2)
+#define RK3228_GMAC_RMII_CLK_25M GRF_BIT(7)
+#define RK3228_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(7)
+#define RK3228_GMAC_CLK_125M (GRF_CLR_BIT(8) | GRF_CLR_BIT(9))
+#define RK3228_GMAC_CLK_25M (GRF_BIT(8) | GRF_BIT(9))
+#define RK3228_GMAC_CLK_2_5M (GRF_CLR_BIT(8) | GRF_BIT(9))
+#define RK3228_GMAC_RMII_MODE GRF_BIT(10)
+#define RK3228_GMAC_RMII_MODE_CLR GRF_CLR_BIT(10)
+#define RK3228_GMAC_TXCLK_DLY_ENABLE GRF_BIT(0)
+#define RK3228_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(0)
+#define RK3228_GMAC_RXCLK_DLY_ENABLE GRF_BIT(1)
+#define RK3228_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(1)
+
+static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
+ RK3228_GMAC_PHY_INTF_SEL_RGMII |
+ RK3228_GMAC_RMII_MODE_CLR |
+ RK3228_GMAC_RXCLK_DLY_ENABLE |
+ RK3228_GMAC_TXCLK_DLY_ENABLE);
+
+ regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON0,
+ RK3228_GMAC_CLK_RX_DL_CFG(rx_delay) |
+ RK3228_GMAC_CLK_TX_DL_CFG(tx_delay));
+}
+
+static void rk3228_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
+ RK3228_GMAC_PHY_INTF_SEL_RMII |
+ RK3228_GMAC_RMII_MODE);
+
+ /* set MAC to RMII mode */
+ regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, GRF_BIT(11));
+}
+
+static void rk3228_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ if (speed == 10)
+ regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
+ RK3228_GMAC_CLK_2_5M);
+ else if (speed == 100)
+ regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
+ RK3228_GMAC_CLK_25M);
+ else if (speed == 1000)
+ regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
+ RK3228_GMAC_CLK_125M);
+ else
+ dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
+}
+
+static void rk3228_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ if (speed == 10)
+ regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
+ RK3228_GMAC_RMII_CLK_2_5M |
+ RK3228_GMAC_SPEED_10M);
+ else if (speed == 100)
+ regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
+ RK3228_GMAC_RMII_CLK_25M |
+ RK3228_GMAC_SPEED_100M);
+ else
+ dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+}
+
+static const struct rk_gmac_ops rk3228_ops = {
+ .set_to_rgmii = rk3228_set_to_rgmii,
+ .set_to_rmii = rk3228_set_to_rmii,
+ .set_rgmii_speed = rk3228_set_rgmii_speed,
+ .set_rmii_speed = rk3228_set_rmii_speed,
+};
+
#define RK3288_GRF_SOC_CON1 0x0248
#define RK3288_GRF_SOC_CON3 0x0250
@@ -529,9 +646,8 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
return bsp_priv;
}
-static int rk_gmac_init(struct platform_device *pdev, void *priv)
+static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
{
- struct rk_priv_data *bsp_priv = priv;
int ret;
ret = phy_power_on(bsp_priv, true);
@@ -545,14 +661,50 @@ static int rk_gmac_init(struct platform_device *pdev, void *priv)
return 0;
}
-static void rk_gmac_exit(struct platform_device *pdev, void *priv)
+static void rk_gmac_powerdown(struct rk_priv_data *gmac)
{
- struct rk_priv_data *gmac = priv;
-
phy_power_on(gmac, false);
gmac_clk_enable(gmac, false);
}
+static int rk_gmac_init(struct platform_device *pdev, void *priv)
+{
+ struct rk_priv_data *bsp_priv = priv;
+
+ return rk_gmac_powerup(bsp_priv);
+}
+
+static void rk_gmac_exit(struct platform_device *pdev, void *priv)
+{
+ struct rk_priv_data *bsp_priv = priv;
+
+ rk_gmac_powerdown(bsp_priv);
+}
+
+static void rk_gmac_suspend(struct platform_device *pdev, void *priv)
+{
+ struct rk_priv_data *bsp_priv = priv;
+
+ /* Keep the PHY up if we use Wake-on-Lan. */
+ if (device_may_wakeup(&pdev->dev))
+ return;
+
+ rk_gmac_powerdown(bsp_priv);
+ bsp_priv->suspended = true;
+}
+
+static void rk_gmac_resume(struct platform_device *pdev, void *priv)
+{
+ struct rk_priv_data *bsp_priv = priv;
+
+ /* The PHY was up for Wake-on-Lan. */
+ if (!bsp_priv->suspended)
+ return;
+
+ rk_gmac_powerup(bsp_priv);
+ bsp_priv->suspended = false;
+}
+
static void rk_fix_speed(void *priv, unsigned int speed)
{
struct rk_priv_data *bsp_priv = priv;
@@ -591,6 +743,8 @@ static int rk_gmac_probe(struct platform_device *pdev)
plat_dat->init = rk_gmac_init;
plat_dat->exit = rk_gmac_exit;
plat_dat->fix_mac_speed = rk_fix_speed;
+ plat_dat->suspend = rk_gmac_suspend;
+ plat_dat->resume = rk_gmac_resume;
plat_dat->bsp_priv = rk_gmac_setup(pdev, data);
if (IS_ERR(plat_dat->bsp_priv))
@@ -604,6 +758,7 @@ static int rk_gmac_probe(struct platform_device *pdev)
}
static const struct of_device_id rk_gmac_dwmac_match[] = {
+ { .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops },
{ .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops },
{ .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops },
{ }
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index b0593a4268ee..ff3e5ab39bd0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -38,19 +38,26 @@
#define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */
#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */
-enum dwmac1000_irq_status {
- lpiis_irq = 0x400,
- time_stamp_irq = 0x0200,
- mmc_rx_csum_offload_irq = 0x0080,
- mmc_tx_irq = 0x0040,
- mmc_rx_irq = 0x0020,
- mmc_irq = 0x0010,
- pmt_irq = 0x0008,
- pcs_ane_irq = 0x0004,
- pcs_link_irq = 0x0002,
- rgmii_irq = 0x0001,
-};
-#define GMAC_INT_MASK 0x0000003c /* interrupt mask register */
+#define GMAC_INT_STATUS_PMT BIT(3)
+#define GMAC_INT_STATUS_MMCIS BIT(4)
+#define GMAC_INT_STATUS_MMCRIS BIT(5)
+#define GMAC_INT_STATUS_MMCTIS BIT(6)
+#define GMAC_INT_STATUS_MMCCSUM BIT(7)
+#define GMAC_INT_STATUS_TSTAMP BIT(9)
+#define GMAC_INT_STATUS_LPIIS BIT(10)
+
+/* interrupt mask register */
+#define GMAC_INT_MASK 0x0000003c
+#define GMAC_INT_DISABLE_RGMII BIT(0)
+#define GMAC_INT_DISABLE_PCSLINK BIT(1)
+#define GMAC_INT_DISABLE_PCSAN BIT(2)
+#define GMAC_INT_DISABLE_PMT BIT(3)
+#define GMAC_INT_DISABLE_TIMESTAMP BIT(9)
+#define GMAC_INT_DISABLE_PCS (GMAC_INT_DISABLE_RGMII | \
+ GMAC_INT_DISABLE_PCSLINK | \
+ GMAC_INT_DISABLE_PCSAN)
+#define GMAC_INT_DEFAULT_MASK (GMAC_INT_DISABLE_TIMESTAMP | \
+ GMAC_INT_DISABLE_PCS)
/* PMT Control and Status */
#define GMAC_PMT 0x0000002c
@@ -90,42 +97,23 @@ enum power_event {
(reg * 8))
#define GMAC_MAX_PERFECT_ADDRESSES 1
-/* PCS registers (AN/TBI/SGMII/RGMII) offset */
-#define GMAC_AN_CTRL 0x000000c0 /* AN control */
-#define GMAC_AN_STATUS 0x000000c4 /* AN status */
-#define GMAC_ANE_ADV 0x000000c8 /* Auto-Neg. Advertisement */
-#define GMAC_ANE_LPA 0x000000cc /* Auto-Neg. link partener ability */
-#define GMAC_ANE_EXP 0x000000d0 /* ANE expansion */
-#define GMAC_TBI 0x000000d4 /* TBI extend status */
-#define GMAC_S_R_GMII 0x000000d8 /* SGMII RGMII status */
-
-/* AN Configuration defines */
-#define GMAC_AN_CTRL_RAN 0x00000200 /* Restart Auto-Negotiation */
-#define GMAC_AN_CTRL_ANE 0x00001000 /* Auto-Negotiation Enable */
-#define GMAC_AN_CTRL_ELE 0x00004000 /* External Loopback Enable */
-#define GMAC_AN_CTRL_ECD 0x00010000 /* Enable Comma Detect */
-#define GMAC_AN_CTRL_LR 0x00020000 /* Lock to Reference */
-#define GMAC_AN_CTRL_SGMRAL 0x00040000 /* SGMII RAL Control */
-
-/* AN Status defines */
-#define GMAC_AN_STATUS_LS 0x00000004 /* Link Status 0:down 1:up */
-#define GMAC_AN_STATUS_ANA 0x00000008 /* Auto-Negotiation Ability */
-#define GMAC_AN_STATUS_ANC 0x00000020 /* Auto-Negotiation Complete */
-#define GMAC_AN_STATUS_ES 0x00000100 /* Extended Status */
-
-/* Register 54 (SGMII/RGMII status register) */
-#define GMAC_S_R_GMII_LINK 0x8
-#define GMAC_S_R_GMII_SPEED 0x5
-#define GMAC_S_R_GMII_SPEED_SHIFT 0x1
-#define GMAC_S_R_GMII_MODE 0x1
-#define GMAC_S_R_GMII_SPEED_125 2
-#define GMAC_S_R_GMII_SPEED_25 1
-
-/* Common ADV and LPA defines */
-#define GMAC_ANE_FD (1 << 5)
-#define GMAC_ANE_HD (1 << 6)
-#define GMAC_ANE_PSE (3 << 7)
-#define GMAC_ANE_PSE_SHIFT 7
+#define GMAC_PCS_BASE 0x000000c0 /* PCS register base */
+#define GMAC_RGSMIIIS 0x000000d8 /* RGMII/SMII status */
+
+/* SGMII/RGMII status register */
+#define GMAC_RGSMIIIS_LNKMODE BIT(0)
+#define GMAC_RGSMIIIS_SPEED GENMASK(2, 1)
+#define GMAC_RGSMIIIS_SPEED_SHIFT 1
+#define GMAC_RGSMIIIS_LNKSTS BIT(3)
+#define GMAC_RGSMIIIS_JABTO BIT(4)
+#define GMAC_RGSMIIIS_FALSECARDET BIT(5)
+#define GMAC_RGSMIIIS_SMIDRXS BIT(16)
+/* LNKMOD */
+#define GMAC_RGSMIIIS_LNKMOD_MASK 0x1
+/* LNKSPEED */
+#define GMAC_RGSMIIIS_SPEED_125 0x2
+#define GMAC_RGSMIIIS_SPEED_25 0x1
+#define GMAC_RGSMIIIS_SPEED_2_5 0x0
/* GMAC Configuration defines */
#define GMAC_CONTROL_2K 0x08000000 /* IEEE 802.3as 2K packets */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index fb1eb578e34e..cbefe9e2207c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -30,22 +30,48 @@
#include <linux/slab.h>
#include <linux/ethtool.h>
#include <asm/io.h>
+#include "stmmac_pcs.h"
#include "dwmac1000.h"
static void dwmac1000_core_init(struct mac_device_info *hw, int mtu)
{
void __iomem *ioaddr = hw->pcsr;
u32 value = readl(ioaddr + GMAC_CONTROL);
+
+ /* Configure GMAC core */
value |= GMAC_CORE_INIT;
+
if (mtu > 1500)
value |= GMAC_CONTROL_2K;
if (mtu > 2000)
value |= GMAC_CONTROL_JE;
+ if (hw->ps) {
+ value |= GMAC_CONTROL_TE;
+
+ if (hw->ps == SPEED_1000) {
+ value &= ~GMAC_CONTROL_PS;
+ } else {
+ value |= GMAC_CONTROL_PS;
+
+ if (hw->ps == SPEED_10)
+ value &= ~GMAC_CONTROL_FES;
+ else
+ value |= GMAC_CONTROL_FES;
+ }
+ }
+
writel(value, ioaddr + GMAC_CONTROL);
/* Mask GMAC interrupts */
- writel(0x207, ioaddr + GMAC_INT_MASK);
+ value = GMAC_INT_DEFAULT_MASK;
+
+ if (hw->pmt)
+ value &= ~GMAC_INT_DISABLE_PMT;
+ if (hw->pcs)
+ value &= ~GMAC_INT_DISABLE_PCS;
+
+ writel(value, ioaddr + GMAC_INT_MASK);
#ifdef STMMAC_VLAN_TAG_USED
/* Tag detection without filtering */
@@ -241,6 +267,39 @@ static void dwmac1000_pmt(struct mac_device_info *hw, unsigned long mode)
writel(pmt, ioaddr + GMAC_PMT);
}
+/* RGMII or SMII interface */
+static void dwmac1000_rgsmii(void __iomem *ioaddr, struct stmmac_extra_stats *x)
+{
+ u32 status;
+
+ status = readl(ioaddr + GMAC_RGSMIIIS);
+ x->irq_rgmii_n++;
+
+ /* Check the link status */
+ if (status & GMAC_RGSMIIIS_LNKSTS) {
+ int speed_value;
+
+ x->pcs_link = 1;
+
+ speed_value = ((status & GMAC_RGSMIIIS_SPEED) >>
+ GMAC_RGSMIIIS_SPEED_SHIFT);
+ if (speed_value == GMAC_RGSMIIIS_SPEED_125)
+ x->pcs_speed = SPEED_1000;
+ else if (speed_value == GMAC_RGSMIIIS_SPEED_25)
+ x->pcs_speed = SPEED_100;
+ else
+ x->pcs_speed = SPEED_10;
+
+ x->pcs_duplex = (status & GMAC_RGSMIIIS_LNKMOD_MASK);
+
+ pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed,
+ x->pcs_duplex ? "Full" : "Half");
+ } else {
+ x->pcs_link = 0;
+ pr_info("Link is Down\n");
+ }
+}
+
static int dwmac1000_irq_status(struct mac_device_info *hw,
struct stmmac_extra_stats *x)
{
@@ -249,19 +308,20 @@ static int dwmac1000_irq_status(struct mac_device_info *hw,
int ret = 0;
/* Not used events (e.g. MMC interrupts) are not handled. */
- if ((intr_status & mmc_tx_irq))
+ if ((intr_status & GMAC_INT_STATUS_MMCTIS))
x->mmc_tx_irq_n++;
- if (unlikely(intr_status & mmc_rx_irq))
+ if (unlikely(intr_status & GMAC_INT_STATUS_MMCRIS))
x->mmc_rx_irq_n++;
- if (unlikely(intr_status & mmc_rx_csum_offload_irq))
+ if (unlikely(intr_status & GMAC_INT_STATUS_MMCCSUM))
x->mmc_rx_csum_offload_irq_n++;
- if (unlikely(intr_status & pmt_irq)) {
+ if (unlikely(intr_status & GMAC_INT_DISABLE_PMT)) {
/* clear the PMT bits 5 and 6 by reading the PMT status reg */
readl(ioaddr + GMAC_PMT);
x->irq_receive_pmt_irq_n++;
}
- /* MAC trx/rx EEE LPI entry/exit interrupts */
- if (intr_status & lpiis_irq) {
+
+ /* MAC tx/rx EEE LPI entry/exit interrupts */
+ if (intr_status & GMAC_INT_STATUS_LPIIS) {
/* Clean LPI interrupt by reading the Reg 12 */
ret = readl(ioaddr + LPI_CTRL_STATUS);
@@ -275,36 +335,10 @@ static int dwmac1000_irq_status(struct mac_device_info *hw,
x->irq_rx_path_exit_lpi_mode_n++;
}
- if ((intr_status & pcs_ane_irq) || (intr_status & pcs_link_irq)) {
- readl(ioaddr + GMAC_AN_STATUS);
- x->irq_pcs_ane_n++;
- }
- if (intr_status & rgmii_irq) {
- u32 status = readl(ioaddr + GMAC_S_R_GMII);
- x->irq_rgmii_n++;
-
- /* Save and dump the link status. */
- if (status & GMAC_S_R_GMII_LINK) {
- int speed_value = (status & GMAC_S_R_GMII_SPEED) >>
- GMAC_S_R_GMII_SPEED_SHIFT;
- x->pcs_duplex = (status & GMAC_S_R_GMII_MODE);
-
- if (speed_value == GMAC_S_R_GMII_SPEED_125)
- x->pcs_speed = SPEED_1000;
- else if (speed_value == GMAC_S_R_GMII_SPEED_25)
- x->pcs_speed = SPEED_100;
- else
- x->pcs_speed = SPEED_10;
+ dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
- x->pcs_link = 1;
- pr_debug("%s: Link is Up - %d/%s\n", __func__,
- (int)x->pcs_speed,
- x->pcs_duplex ? "Full" : "Half");
- } else {
- x->pcs_link = 0;
- pr_debug("%s: Link is Down\n", __func__);
- }
- }
+ if (intr_status & PCS_RGSMIIIS_IRQ)
+ dwmac1000_rgsmii(ioaddr, x);
return ret;
}
@@ -363,38 +397,20 @@ static void dwmac1000_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
writel(value, ioaddr + LPI_TIMER_CTRL);
}
-static void dwmac1000_ctrl_ane(struct mac_device_info *hw, bool restart)
+static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
+ bool loopback)
{
- void __iomem *ioaddr = hw->pcsr;
- /* auto negotiation enable and External Loopback enable */
- u32 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE;
-
- if (restart)
- value |= GMAC_AN_CTRL_RAN;
-
- writel(value, ioaddr + GMAC_AN_CTRL);
+ dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
}
-static void dwmac1000_get_adv(struct mac_device_info *hw, struct rgmii_adv *adv)
+static void dwmac1000_rane(void __iomem *ioaddr, bool restart)
{
- void __iomem *ioaddr = hw->pcsr;
- u32 value = readl(ioaddr + GMAC_ANE_ADV);
-
- if (value & GMAC_ANE_FD)
- adv->duplex = DUPLEX_FULL;
- if (value & GMAC_ANE_HD)
- adv->duplex |= DUPLEX_HALF;
-
- adv->pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
-
- value = readl(ioaddr + GMAC_ANE_LPA);
-
- if (value & GMAC_ANE_FD)
- adv->lp_duplex = DUPLEX_FULL;
- if (value & GMAC_ANE_HD)
- adv->lp_duplex = DUPLEX_HALF;
+ dwmac_rane(ioaddr, GMAC_PCS_BASE, restart);
+}
- adv->lp_pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
+static void dwmac1000_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
+{
+ dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
}
static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x)
@@ -485,9 +501,10 @@ static const struct stmmac_ops dwmac1000_ops = {
.reset_eee_mode = dwmac1000_reset_eee_mode,
.set_eee_timer = dwmac1000_set_eee_timer,
.set_eee_pls = dwmac1000_set_eee_pls,
- .ctrl_ane = dwmac1000_ctrl_ane,
- .get_adv = dwmac1000_get_adv,
.debug = dwmac1000_debug,
+ .pcs_ctrl_ane = dwmac1000_ctrl_ane,
+ .pcs_rane = dwmac1000_rane,
+ .pcs_get_adv_lp = dwmac1000_get_adv_lp,
};
struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index bc50952a18e7..6f4f5ce25114 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -24,10 +24,8 @@
#define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4)
#define GMAC_INT_STATUS 0x000000b0
#define GMAC_INT_EN 0x000000b4
-#define GMAC_AN_CTRL 0x000000e0
-#define GMAC_AN_STATUS 0x000000e4
-#define GMAC_AN_ADV 0x000000e8
-#define GMAC_AN_LPA 0x000000ec
+#define GMAC_PCS_BASE 0x000000e0
+#define GMAC_PHYIF_CONTROL_STATUS 0x000000f8
#define GMAC_PMT 0x000000c0
#define GMAC_VERSION 0x00000110
#define GMAC_DEBUG 0x00000114
@@ -54,9 +52,18 @@
#define GMAC_TX_FLOW_CTRL_PT_SHIFT 16
/* MAC Interrupt bitmap*/
+#define GMAC_INT_RGSMIIS BIT(0)
+#define GMAC_INT_PCS_LINK BIT(1)
+#define GMAC_INT_PCS_ANE BIT(2)
+#define GMAC_INT_PCS_PHYIS BIT(3)
#define GMAC_INT_PMT_EN BIT(4)
#define GMAC_INT_LPI_EN BIT(5)
+#define GMAC_PCS_IRQ_DEFAULT (GMAC_INT_RGSMIIS | GMAC_INT_PCS_LINK | \
+ GMAC_INT_PCS_ANE)
+
+#define GMAC_INT_DEFAULT_MASK GMAC_INT_PMT_EN
+
enum dwmac4_irq_status {
time_stamp_irq = 0x00001000,
mmc_rx_csum_offload_irq = 0x00000800,
@@ -64,19 +71,8 @@ enum dwmac4_irq_status {
mmc_rx_irq = 0x00000200,
mmc_irq = 0x00000100,
pmt_irq = 0x00000010,
- pcs_ane_irq = 0x00000004,
- pcs_link_irq = 0x00000002,
};
-/* MAC Auto-Neg bitmap*/
-#define GMAC_AN_CTRL_RAN BIT(9)
-#define GMAC_AN_CTRL_ANE BIT(12)
-#define GMAC_AN_CTRL_ELE BIT(14)
-#define GMAC_AN_FD BIT(5)
-#define GMAC_AN_HD BIT(6)
-#define GMAC_AN_PSE_MASK GENMASK(8, 7)
-#define GMAC_AN_PSE_SHIFT 7
-
/* MAC PMT bitmap */
enum power_event {
pointer_reset = 0x80000000,
@@ -250,6 +246,23 @@ enum power_event {
#define MTL_DEBUG_RRCSTS_FLUSH 3
#define MTL_DEBUG_RWCSTS BIT(0)
+/* SGMII/RGMII status register */
+#define GMAC_PHYIF_CTRLSTATUS_TC BIT(0)
+#define GMAC_PHYIF_CTRLSTATUS_LUD BIT(1)
+#define GMAC_PHYIF_CTRLSTATUS_SMIDRXS BIT(4)
+#define GMAC_PHYIF_CTRLSTATUS_LNKMOD BIT(16)
+#define GMAC_PHYIF_CTRLSTATUS_SPEED GENMASK(18, 17)
+#define GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT 17
+#define GMAC_PHYIF_CTRLSTATUS_LNKSTS BIT(19)
+#define GMAC_PHYIF_CTRLSTATUS_JABTO BIT(20)
+#define GMAC_PHYIF_CTRLSTATUS_FALSECARDET BIT(21)
+/* LNKMOD */
+#define GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK 0x1
+/* LNKSPEED */
+#define GMAC_PHYIF_CTRLSTATUS_SPEED_125 0x2
+#define GMAC_PHYIF_CTRLSTATUS_SPEED_25 0x1
+#define GMAC_PHYIF_CTRLSTATUS_SPEED_2_5 0x0
+
extern const struct stmmac_dma_ops dwmac4_dma_ops;
extern const struct stmmac_dma_ops dwmac410_dma_ops;
#endif /* __DWMAC4_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 4f7283d05588..df5580dcdfed 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/ethtool.h>
#include <linux/io.h>
+#include "stmmac_pcs.h"
#include "dwmac4.h"
static void dwmac4_core_init(struct mac_device_info *hw, int mtu)
@@ -31,10 +32,31 @@ static void dwmac4_core_init(struct mac_device_info *hw, int mtu)
if (mtu > 2000)
value |= GMAC_CONFIG_JE;
+ if (hw->ps) {
+ value |= GMAC_CONFIG_TE;
+
+ if (hw->ps == SPEED_1000) {
+ value &= ~GMAC_CONFIG_PS;
+ } else {
+ value |= GMAC_CONFIG_PS;
+
+ if (hw->ps == SPEED_10)
+ value &= ~GMAC_CONFIG_FES;
+ else
+ value |= GMAC_CONFIG_FES;
+ }
+ }
+
writel(value, ioaddr + GMAC_CONFIG);
/* Mask GMAC interrupts */
- writel(GMAC_INT_PMT_EN, ioaddr + GMAC_INT_EN);
+ value = GMAC_INT_DEFAULT_MASK;
+ if (hw->pmt)
+ value |= GMAC_INT_PMT_EN;
+ if (hw->pcs)
+ value |= GMAC_PCS_IRQ_DEFAULT;
+
+ writel(value, ioaddr + GMAC_INT_EN);
}
static void dwmac4_dump_regs(struct mac_device_info *hw)
@@ -156,7 +178,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
struct netdev_hw_addr *ha;
netdev_for_each_uc_addr(ha, dev) {
- dwmac4_set_umac_addr(ioaddr, ha->addr, reg);
+ dwmac4_set_umac_addr(hw, ha->addr, reg);
reg++;
}
}
@@ -190,39 +212,53 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
}
}
-static void dwmac4_ctrl_ane(struct mac_device_info *hw, bool restart)
+static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
+ bool loopback)
{
- void __iomem *ioaddr = hw->pcsr;
-
- /* auto negotiation enable and External Loopback enable */
- u32 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE;
+ dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
+}
- if (restart)
- value |= GMAC_AN_CTRL_RAN;
+static void dwmac4_rane(void __iomem *ioaddr, bool restart)
+{
+ dwmac_rane(ioaddr, GMAC_PCS_BASE, restart);
+}
- writel(value, ioaddr + GMAC_AN_CTRL);
+static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
+{
+ dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
}
-static void dwmac4_get_adv(struct mac_device_info *hw, struct rgmii_adv *adv)
+/* RGMII or SMII interface */
+static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x)
{
- void __iomem *ioaddr = hw->pcsr;
- u32 value = readl(ioaddr + GMAC_AN_ADV);
+ u32 status;
- if (value & GMAC_AN_FD)
- adv->duplex = DUPLEX_FULL;
- if (value & GMAC_AN_HD)
- adv->duplex |= DUPLEX_HALF;
+ status = readl(ioaddr + GMAC_PHYIF_CONTROL_STATUS);
+ x->irq_rgmii_n++;
- adv->pause = (value & GMAC_AN_PSE_MASK) >> GMAC_AN_PSE_SHIFT;
+ /* Check the link status */
+ if (status & GMAC_PHYIF_CTRLSTATUS_LNKSTS) {
+ int speed_value;
- value = readl(ioaddr + GMAC_AN_LPA);
+ x->pcs_link = 1;
+
+ speed_value = ((status & GMAC_PHYIF_CTRLSTATUS_SPEED) >>
+ GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT);
+ if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_125)
+ x->pcs_speed = SPEED_1000;
+ else if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_25)
+ x->pcs_speed = SPEED_100;
+ else
+ x->pcs_speed = SPEED_10;
- if (value & GMAC_AN_FD)
- adv->lp_duplex = DUPLEX_FULL;
- if (value & GMAC_AN_HD)
- adv->lp_duplex = DUPLEX_HALF;
+ x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK);
- adv->lp_pause = (value & GMAC_AN_PSE_MASK) >> GMAC_AN_PSE_SHIFT;
+ pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed,
+ x->pcs_duplex ? "Full" : "Half");
+ } else {
+ x->pcs_link = 0;
+ pr_info("Link is Down\n");
+ }
}
static int dwmac4_irq_status(struct mac_device_info *hw,
@@ -248,11 +284,6 @@ static int dwmac4_irq_status(struct mac_device_info *hw,
x->irq_receive_pmt_irq_n++;
}
- if ((intr_status & pcs_ane_irq) || (intr_status & pcs_link_irq)) {
- readl(ioaddr + GMAC_AN_STATUS);
- x->irq_pcs_ane_n++;
- }
-
mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
/* Check MTL Interrupt: Currently only one queue is used: Q0. */
if (mtl_int_qx_status & MTL_INT_Q0) {
@@ -267,6 +298,10 @@ static int dwmac4_irq_status(struct mac_device_info *hw,
}
}
+ dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
+ if (intr_status & PCS_RGSMIIIS_IRQ)
+ dwmac4_phystatus(ioaddr, x);
+
return ret;
}
@@ -363,8 +398,9 @@ static const struct stmmac_ops dwmac4_ops = {
.pmt = dwmac4_pmt,
.set_umac_addr = dwmac4_set_umac_addr,
.get_umac_addr = dwmac4_get_umac_addr,
- .ctrl_ane = dwmac4_ctrl_ane,
- .get_adv = dwmac4_get_adv,
+ .pcs_ctrl_ane = dwmac4_ctrl_ane,
+ .pcs_rane = dwmac4_rane,
+ .pcs_get_adv_lp = dwmac4_get_adv_lp,
.debug = dwmac4_debug,
.set_filter = dwmac4_set_filter,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 59ae6088cd22..8dc9056c1001 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -117,7 +117,6 @@ struct stmmac_priv {
int eee_enabled;
int eee_active;
int tx_lpi_timer;
- int pcs;
unsigned int mode;
int extend_desc;
struct ptp_clock *ptp_clock;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index e2b98b01647e..1e06173fc9d7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -276,7 +276,8 @@ static int stmmac_ethtool_getsettings(struct net_device *dev,
struct phy_device *phy = priv->phydev;
int rc;
- if ((priv->pcs & STMMAC_PCS_RGMII) || (priv->pcs & STMMAC_PCS_SGMII)) {
+ if (priv->hw->pcs & STMMAC_PCS_RGMII ||
+ priv->hw->pcs & STMMAC_PCS_SGMII) {
struct rgmii_adv adv;
if (!priv->xstats.pcs_link) {
@@ -289,10 +290,10 @@ static int stmmac_ethtool_getsettings(struct net_device *dev,
ethtool_cmd_speed_set(cmd, priv->xstats.pcs_speed);
/* Get and convert ADV/LP_ADV from the HW AN registers */
- if (!priv->hw->mac->get_adv)
+ if (!priv->hw->mac->pcs_get_adv_lp)
return -EOPNOTSUPP; /* should never happen indeed */
- priv->hw->mac->get_adv(priv->hw, &adv);
+ priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv);
/* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */
@@ -361,7 +362,8 @@ static int stmmac_ethtool_setsettings(struct net_device *dev,
struct phy_device *phy = priv->phydev;
int rc;
- if ((priv->pcs & STMMAC_PCS_RGMII) || (priv->pcs & STMMAC_PCS_SGMII)) {
+ if (priv->hw->pcs & STMMAC_PCS_RGMII ||
+ priv->hw->pcs & STMMAC_PCS_SGMII) {
u32 mask = ADVERTISED_Autoneg | ADVERTISED_Pause;
/* Only support ANE */
@@ -376,8 +378,11 @@ static int stmmac_ethtool_setsettings(struct net_device *dev,
ADVERTISED_10baseT_Full);
spin_lock(&priv->lock);
- if (priv->hw->mac->ctrl_ane)
- priv->hw->mac->ctrl_ane(priv->hw, 1);
+
+ if (priv->hw->mac->pcs_ctrl_ane)
+ priv->hw->mac->pcs_ctrl_ane(priv->ioaddr, 1,
+ priv->hw->ps, 0);
+
spin_unlock(&priv->lock);
return 0;
@@ -452,11 +457,22 @@ stmmac_get_pauseparam(struct net_device *netdev,
{
struct stmmac_priv *priv = netdev_priv(netdev);
- if (priv->pcs) /* FIXME */
- return;
-
pause->rx_pause = 0;
pause->tx_pause = 0;
+
+ if (priv->hw->pcs && priv->hw->mac->pcs_get_adv_lp) {
+ struct rgmii_adv adv_lp;
+
+ pause->autoneg = 1;
+ priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv_lp);
+ if (!adv_lp.pause)
+ return;
+ } else {
+ if (!(priv->phydev->supported & SUPPORTED_Pause) ||
+ !(priv->phydev->supported & SUPPORTED_Asym_Pause))
+ return;
+ }
+
pause->autoneg = priv->phydev->autoneg;
if (priv->flow_ctrl & FLOW_RX)
@@ -473,10 +489,19 @@ stmmac_set_pauseparam(struct net_device *netdev,
struct stmmac_priv *priv = netdev_priv(netdev);
struct phy_device *phy = priv->phydev;
int new_pause = FLOW_OFF;
- int ret = 0;
- if (priv->pcs) /* FIXME */
- return -EOPNOTSUPP;
+ if (priv->hw->pcs && priv->hw->mac->pcs_get_adv_lp) {
+ struct rgmii_adv adv_lp;
+
+ pause->autoneg = 1;
+ priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv_lp);
+ if (!adv_lp.pause)
+ return -EOPNOTSUPP;
+ } else {
+ if (!(phy->supported & SUPPORTED_Pause) ||
+ !(phy->supported & SUPPORTED_Asym_Pause))
+ return -EOPNOTSUPP;
+ }
if (pause->rx_pause)
new_pause |= FLOW_RX;
@@ -488,11 +513,12 @@ stmmac_set_pauseparam(struct net_device *netdev,
if (phy->autoneg) {
if (netif_running(netdev))
- ret = phy_start_aneg(phy);
- } else
- priv->hw->mac->flow_ctrl(priv->hw, phy->duplex,
- priv->flow_ctrl, priv->pause);
- return ret;
+ return phy_start_aneg(phy);
+ }
+
+ priv->hw->mac->flow_ctrl(priv->hw, phy->duplex, priv->flow_ctrl,
+ priv->pause);
+ return 0;
}
static void stmmac_get_ethtool_stats(struct net_device *dev,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index eac45d0c75e2..aab777c1ba33 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -285,8 +285,9 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
/* Using PCS we cannot dial with the phy registers at this stage
* so we do not support extra feature like EEE.
*/
- if ((priv->pcs == STMMAC_PCS_RGMII) || (priv->pcs == STMMAC_PCS_TBI) ||
- (priv->pcs == STMMAC_PCS_RTBI))
+ if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
+ (priv->hw->pcs == STMMAC_PCS_TBI) ||
+ (priv->hw->pcs == STMMAC_PCS_RTBI))
goto out;
/* MAC core supports the EEE feature. */
@@ -799,10 +800,10 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
(interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
(interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
pr_debug("STMMAC: PCS RGMII support enable\n");
- priv->pcs = STMMAC_PCS_RGMII;
+ priv->hw->pcs = STMMAC_PCS_RGMII;
} else if (interface == PHY_INTERFACE_MODE_SGMII) {
pr_debug("STMMAC: PCS SGMII support enable\n");
- priv->pcs = STMMAC_PCS_SGMII;
+ priv->hw->pcs = STMMAC_PCS_SGMII;
}
}
}
@@ -1665,6 +1666,19 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
if (priv->plat->bus_setup)
priv->plat->bus_setup(priv->ioaddr);
+ /* PS and related bits will be programmed according to the speed */
+ if (priv->hw->pcs) {
+ int speed = priv->plat->mac_port_sel_speed;
+
+ if ((speed == SPEED_10) || (speed == SPEED_100) ||
+ (speed == SPEED_1000)) {
+ priv->hw->ps = speed;
+ } else {
+ dev_warn(priv->device, "invalid port speed\n");
+ priv->hw->ps = 0;
+ }
+ }
+
/* Initialize the MAC Core */
priv->hw->mac->core_init(priv->hw, dev->mtu);
@@ -1714,8 +1728,8 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
}
- if (priv->pcs && priv->hw->mac->ctrl_ane)
- priv->hw->mac->ctrl_ane(priv->hw, 0);
+ if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
+ priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
/* set TX ring length */
if (priv->hw->dma->set_tx_ring_len)
@@ -1748,8 +1762,9 @@ static int stmmac_open(struct net_device *dev)
stmmac_check_ether_addr(priv);
- if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
- priv->pcs != STMMAC_PCS_RTBI) {
+ if (priv->hw->pcs != STMMAC_PCS_RGMII &&
+ priv->hw->pcs != STMMAC_PCS_TBI &&
+ priv->hw->pcs != STMMAC_PCS_RTBI) {
ret = stmmac_init_phy(dev);
if (ret) {
pr_err("%s: Cannot attach to PHY (error: %d)\n",
@@ -2809,6 +2824,14 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
priv->rx_tail_addr,
STMMAC_CHAN0);
}
+
+ /* PCS link status */
+ if (priv->hw->pcs) {
+ if (priv->xstats.pcs_link)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+ }
}
/* To handle DMA interrupts */
@@ -3130,6 +3153,7 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
*/
priv->plat->enh_desc = priv->dma_cap.enh_desc;
priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
+ priv->hw->pmt = priv->plat->pmt;
/* TXCOE doesn't work in thresh DMA mode */
if (priv->plat->force_thresh_dma_mode)
@@ -3325,8 +3349,9 @@ int stmmac_dvr_probe(struct device *device,
stmmac_check_pcs_mode(priv);
- if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
- priv->pcs != STMMAC_PCS_RTBI) {
+ if (priv->hw->pcs != STMMAC_PCS_RGMII &&
+ priv->hw->pcs != STMMAC_PCS_TBI &&
+ priv->hw->pcs != STMMAC_PCS_RTBI) {
/* MDIO bus Registration */
ret = stmmac_mdio_register(ndev);
if (ret < 0) {
@@ -3376,8 +3401,9 @@ int stmmac_dvr_remove(struct device *dev)
reset_control_assert(priv->stmmac_rst);
clk_disable_unprepare(priv->pclk);
clk_disable_unprepare(priv->stmmac_clk);
- if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
- priv->pcs != STMMAC_PCS_RTBI)
+ if (priv->hw->pcs != STMMAC_PCS_RGMII &&
+ priv->hw->pcs != STMMAC_PCS_TBI &&
+ priv->hw->pcs != STMMAC_PCS_RTBI)
stmmac_mdio_unregister(ndev);
free_netdev(ndev);
@@ -3450,8 +3476,6 @@ int stmmac_resume(struct device *dev)
if (!netif_running(ndev))
return 0;
- spin_lock_irqsave(&priv->lock, flags);
-
/* Power Down bit, into the PM register, is cleared
* automatically as soon as a magic packet or a Wake-up frame
* is received. Anyway, it's better to manually clear
@@ -3459,7 +3483,9 @@ int stmmac_resume(struct device *dev)
* from another devices (e.g. serial console).
*/
if (device_may_wakeup(priv->device)) {
+ spin_lock_irqsave(&priv->lock, flags);
priv->hw->mac->pmt(priv->hw, 0);
+ spin_unlock_irqrestore(&priv->lock, flags);
priv->irq_wake = 0;
} else {
pinctrl_pm_select_default_state(priv->device);
@@ -3473,6 +3499,8 @@ int stmmac_resume(struct device *dev)
netif_device_attach(ndev);
+ spin_lock_irqsave(&priv->lock, flags);
+
priv->cur_rx = 0;
priv->dirty_rx = 0;
priv->dirty_tx = 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
new file mode 100644
index 000000000000..eba41c24b7a7
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
@@ -0,0 +1,159 @@
+/*
+ * stmmac_pcs.h: Physical Coding Sublayer Header File
+ *
+ * Copyright (C) 2016 STMicroelectronics (R&D) Limited
+ * Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __STMMAC_PCS_H__
+#define __STMMAC_PCS_H__
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include "common.h"
+
+/* PCS registers (AN/TBI/SGMII/RGMII) offsets */
+#define GMAC_AN_CTRL(x) (x) /* AN control */
+#define GMAC_AN_STATUS(x) (x + 0x4) /* AN status */
+#define GMAC_ANE_ADV(x) (x + 0x8) /* ANE Advertisement */
+#define GMAC_ANE_LPA(x) (x + 0xc) /* ANE link partener ability */
+#define GMAC_ANE_EXP(x) (x + 0x10) /* ANE expansion */
+#define GMAC_TBI(x) (x + 0x14) /* TBI extend status */
+
+/* AN Configuration defines */
+#define GMAC_AN_CTRL_RAN BIT(9) /* Restart Auto-Negotiation */
+#define GMAC_AN_CTRL_ANE BIT(12) /* Auto-Negotiation Enable */
+#define GMAC_AN_CTRL_ELE BIT(14) /* External Loopback Enable */
+#define GMAC_AN_CTRL_ECD BIT(16) /* Enable Comma Detect */
+#define GMAC_AN_CTRL_LR BIT(17) /* Lock to Reference */
+#define GMAC_AN_CTRL_SGMRAL BIT(18) /* SGMII RAL Control */
+
+/* AN Status defines */
+#define GMAC_AN_STATUS_LS BIT(2) /* Link Status 0:down 1:up */
+#define GMAC_AN_STATUS_ANA BIT(3) /* Auto-Negotiation Ability */
+#define GMAC_AN_STATUS_ANC BIT(5) /* Auto-Negotiation Complete */
+#define GMAC_AN_STATUS_ES BIT(8) /* Extended Status */
+
+/* ADV and LPA defines */
+#define GMAC_ANE_FD BIT(5)
+#define GMAC_ANE_HD BIT(6)
+#define GMAC_ANE_PSE GENMASK(8, 7)
+#define GMAC_ANE_PSE_SHIFT 7
+#define GMAC_ANE_RFE GENMASK(13, 12)
+#define GMAC_ANE_RFE_SHIFT 12
+#define GMAC_ANE_ACK BIT(14)
+
+/**
+ * dwmac_pcs_isr - TBI, RTBI, or SGMII PHY ISR
+ * @ioaddr: IO registers pointer
+ * @reg: Base address of the AN Control Register.
+ * @intr_status: GMAC core interrupt status
+ * @x: pointer to log these events as stats
+ * Description: it is the ISR for PCS events: Auto-Negotiation Completed and
+ * Link status.
+ */
+static inline void dwmac_pcs_isr(void __iomem *ioaddr, u32 reg,
+ unsigned int intr_status,
+ struct stmmac_extra_stats *x)
+{
+ u32 val = readl(ioaddr + GMAC_AN_STATUS(reg));
+
+ if (intr_status & PCS_ANE_IRQ) {
+ x->irq_pcs_ane_n++;
+ if (val & GMAC_AN_STATUS_ANC)
+ pr_info("stmmac_pcs: ANE process completed\n");
+ }
+
+ if (intr_status & PCS_LINK_IRQ) {
+ x->irq_pcs_link_n++;
+ if (val & GMAC_AN_STATUS_LS)
+ pr_info("stmmac_pcs: Link Up\n");
+ else
+ pr_info("stmmac_pcs: Link Down\n");
+ }
+}
+
+/**
+ * dwmac_rane - To restart ANE
+ * @ioaddr: IO registers pointer
+ * @reg: Base address of the AN Control Register.
+ * @restart: to restart ANE
+ * Description: this is to just restart the Auto-Negotiation.
+ */
+static inline void dwmac_rane(void __iomem *ioaddr, u32 reg, bool restart)
+{
+ u32 value = readl(ioaddr + GMAC_AN_CTRL(reg));
+
+ if (restart)
+ value |= GMAC_AN_CTRL_RAN;
+
+ writel(value, ioaddr + GMAC_AN_CTRL(reg));
+}
+
+/**
+ * dwmac_ctrl_ane - To program the AN Control Register.
+ * @ioaddr: IO registers pointer
+ * @reg: Base address of the AN Control Register.
+ * @ane: to enable the auto-negotiation
+ * @srgmi_ral: to manage MAC-2-MAC SGMII connections.
+ * @loopback: to cause the PHY to loopback tx data into rx path.
+ * Description: this is the main function to configure the AN control register
+ * and init the ANE, select loopback (usually for debugging purpose) and
+ * configure SGMII RAL.
+ */
+static inline void dwmac_ctrl_ane(void __iomem *ioaddr, u32 reg, bool ane,
+ bool srgmi_ral, bool loopback)
+{
+ u32 value = readl(ioaddr + GMAC_AN_CTRL(reg));
+
+ /* Enable and restart the Auto-Negotiation */
+ if (ane)
+ value |= GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_RAN;
+
+ /* In case of MAC-2-MAC connection, block is configured to operate
+ * according to MAC conf register.
+ */
+ if (srgmi_ral)
+ value |= GMAC_AN_CTRL_SGMRAL;
+
+ if (loopback)
+ value |= GMAC_AN_CTRL_ELE;
+
+ writel(value, ioaddr + GMAC_AN_CTRL(reg));
+}
+
+/**
+ * dwmac_get_adv_lp - Get ADV and LP cap
+ * @ioaddr: IO registers pointer
+ * @reg: Base address of the AN Control Register.
+ * @adv_lp: structure to store the adv,lp status
+ * Description: this is to expose the ANE advertisement and Link partner ability
+ * status to ethtool support.
+ */
+static inline void dwmac_get_adv_lp(void __iomem *ioaddr, u32 reg,
+ struct rgmii_adv *adv_lp)
+{
+ u32 value = readl(ioaddr + GMAC_ANE_ADV(reg));
+
+ if (value & GMAC_ANE_FD)
+ adv_lp->duplex = DUPLEX_FULL;
+ if (value & GMAC_ANE_HD)
+ adv_lp->duplex |= DUPLEX_HALF;
+
+ adv_lp->pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
+
+ value = readl(ioaddr + GMAC_ANE_LPA(reg));
+
+ if (value & GMAC_ANE_FD)
+ adv_lp->lp_duplex = DUPLEX_FULL;
+ if (value & GMAC_ANE_HD)
+ adv_lp->lp_duplex = DUPLEX_HALF;
+
+ adv_lp->lp_pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
+}
+#endif /* __STMMAC_PCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 409db913b117..f7dfc0ae8e9c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -319,6 +319,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set.");
}
+ of_property_read_u32(np, "snps,ps-speed", &plat->mac_port_sel_speed);
+
plat->axi = stmmac_axi_setup(pdev);
return plat;
@@ -411,7 +413,9 @@ static int stmmac_pltfr_suspend(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
ret = stmmac_suspend(dev);
- if (priv->plat->exit)
+ if (priv->plat->suspend)
+ priv->plat->suspend(pdev, priv->plat->bsp_priv);
+ else if (priv->plat->exit)
priv->plat->exit(pdev, priv->plat->bsp_priv);
return ret;
@@ -430,7 +434,9 @@ static int stmmac_pltfr_resume(struct device *dev)
struct stmmac_priv *priv = netdev_priv(ndev);
struct platform_device *pdev = to_platform_device(dev);
- if (priv->plat->init)
+ if (priv->plat->resume)
+ priv->plat->resume(pdev, priv->plat->bsp_priv);
+ else if (priv->plat->init)
priv->plat->init(pdev, priv->plat->bsp_priv);
return stmmac_resume(dev);
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
index 158213cd6cdd..c34111b390c7 100644
--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -46,7 +46,6 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
-#include <linux/version.h>
#include <linux/device.h>
#include <linux/bitrev.h>
@@ -598,7 +597,6 @@ struct net_local {
struct work_struct txtimeout_reinit;
phy_interface_t phy_interface;
- struct phy_device *phy_dev;
struct mii_bus *mii_bus;
unsigned int link;
@@ -816,7 +814,7 @@ static int dwceqos_mdio_write(struct mii_bus *bus, int mii_id, int phyreg,
static int dwceqos_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
struct net_local *lp = netdev_priv(ndev);
- struct phy_device *phydev = lp->phy_dev;
+ struct phy_device *phydev = ndev->phydev;
if (!netif_running(ndev))
return -EINVAL;
@@ -850,6 +848,7 @@ static void dwceqos_link_down(struct net_local *lp)
static void dwceqos_link_up(struct net_local *lp)
{
+ struct net_device *ndev = lp->ndev;
u32 regval;
unsigned long flags;
@@ -860,7 +859,7 @@ static void dwceqos_link_up(struct net_local *lp)
dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
spin_unlock_irqrestore(&lp->hw_lock, flags);
- lp->eee_active = !phy_init_eee(lp->phy_dev, 0);
+ lp->eee_active = !phy_init_eee(ndev->phydev, 0);
/* Check for changed EEE capability */
if (!lp->eee_active && lp->eee_enabled) {
@@ -876,7 +875,8 @@ static void dwceqos_link_up(struct net_local *lp)
static void dwceqos_set_speed(struct net_local *lp)
{
- struct phy_device *phydev = lp->phy_dev;
+ struct net_device *ndev = lp->ndev;
+ struct phy_device *phydev = ndev->phydev;
u32 regval;
regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
@@ -903,7 +903,7 @@ static void dwceqos_set_speed(struct net_local *lp)
static void dwceqos_adjust_link(struct net_device *ndev)
{
struct net_local *lp = netdev_priv(ndev);
- struct phy_device *phydev = lp->phy_dev;
+ struct phy_device *phydev = ndev->phydev;
int status_change = 0;
if (lp->phy_defer)
@@ -987,7 +987,6 @@ static int dwceqos_mii_probe(struct net_device *ndev)
lp->link = 0;
lp->speed = 0;
lp->duplex = DUPLEX_UNKNOWN;
- lp->phy_dev = phydev;
return 0;
}
@@ -1531,6 +1530,7 @@ static void dwceqos_configure_bus(struct net_local *lp)
static void dwceqos_init_hw(struct net_local *lp)
{
+ struct net_device *ndev = lp->ndev;
u32 regval;
u32 buswidth;
u32 dma_skip;
@@ -1645,10 +1645,10 @@ static void dwceqos_init_hw(struct net_local *lp)
regval | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE);
lp->phy_defer = false;
- mutex_lock(&lp->phy_dev->lock);
- phy_read_status(lp->phy_dev);
+ mutex_lock(&ndev->phydev->lock);
+ phy_read_status(ndev->phydev);
dwceqos_adjust_link(lp->ndev);
- mutex_unlock(&lp->phy_dev->lock);
+ mutex_unlock(&ndev->phydev->lock);
}
static void dwceqos_tx_reclaim(unsigned long data)
@@ -1898,7 +1898,7 @@ static int dwceqos_open(struct net_device *ndev)
* hence the unusual init order with phy_start first.
*/
lp->phy_defer = true;
- phy_start(lp->phy_dev);
+ phy_start(ndev->phydev);
dwceqos_init_hw(lp);
napi_enable(&lp->napi);
@@ -1943,7 +1943,7 @@ static int dwceqos_stop(struct net_device *ndev)
dwceqos_drain_dma(lp);
dwceqos_reset_hw(lp);
- phy_stop(lp->phy_dev);
+ phy_stop(ndev->phydev);
dwceqos_descriptor_free(lp);
@@ -2523,30 +2523,6 @@ dwceqos_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *s)
return s;
}
-static int
-dwceqos_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
-{
- struct net_local *lp = netdev_priv(ndev);
- struct phy_device *phydev = lp->phy_dev;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_ethtool_gset(phydev, ecmd);
-}
-
-static int
-dwceqos_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
-{
- struct net_local *lp = netdev_priv(ndev);
- struct phy_device *phydev = lp->phy_dev;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_ethtool_sset(phydev, ecmd);
-}
-
static void
dwceqos_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *ed)
{
@@ -2574,17 +2550,17 @@ static int dwceqos_set_pauseparam(struct net_device *ndev,
lp->flowcontrol.autoneg = pp->autoneg;
if (pp->autoneg) {
- lp->phy_dev->advertising |= ADVERTISED_Pause;
- lp->phy_dev->advertising |= ADVERTISED_Asym_Pause;
+ ndev->phydev->advertising |= ADVERTISED_Pause;
+ ndev->phydev->advertising |= ADVERTISED_Asym_Pause;
} else {
- lp->phy_dev->advertising &= ~ADVERTISED_Pause;
- lp->phy_dev->advertising &= ~ADVERTISED_Asym_Pause;
+ ndev->phydev->advertising &= ~ADVERTISED_Pause;
+ ndev->phydev->advertising &= ~ADVERTISED_Asym_Pause;
lp->flowcontrol.rx = pp->rx_pause;
lp->flowcontrol.tx = pp->tx_pause;
}
if (netif_running(ndev))
- ret = phy_start_aneg(lp->phy_dev);
+ ret = phy_start_aneg(ndev->phydev);
return ret;
}
@@ -2705,7 +2681,7 @@ static int dwceqos_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
dwceqos_get_tx_lpi_state(regval));
}
- return phy_ethtool_get_eee(lp->phy_dev, edata);
+ return phy_ethtool_get_eee(ndev->phydev, edata);
}
static int dwceqos_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
@@ -2747,7 +2723,7 @@ static int dwceqos_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
spin_unlock_irqrestore(&lp->hw_lock, flags);
}
- return phy_ethtool_set_eee(lp->phy_dev, edata);
+ return phy_ethtool_set_eee(ndev->phydev, edata);
}
static u32 dwceqos_get_msglevel(struct net_device *ndev)
@@ -2765,8 +2741,6 @@ static void dwceqos_set_msglevel(struct net_device *ndev, u32 msglevel)
}
static struct ethtool_ops dwceqos_ethtool_ops = {
- .get_settings = dwceqos_get_settings,
- .set_settings = dwceqos_set_settings,
.get_drvinfo = dwceqos_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_pauseparam = dwceqos_get_pauseparam,
@@ -2780,6 +2754,8 @@ static struct ethtool_ops dwceqos_ethtool_ops = {
.set_eee = dwceqos_set_eee,
.get_msglevel = dwceqos_get_msglevel,
.set_msglevel = dwceqos_set_msglevel,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static struct net_device_ops netdev_ops = {
@@ -2981,8 +2957,8 @@ static int dwceqos_remove(struct platform_device *pdev)
if (ndev) {
lp = netdev_priv(ndev);
- if (lp->phy_dev)
- phy_disconnect(lp->phy_dev);
+ if (ndev->phydev)
+ phy_disconnect(ndev->phydev);
mdiobus_unregister(lp->mii_bus);
mdiobus_free(lp->mii_bus);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 635979bddd34..1a93a1f28433 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1243,6 +1243,7 @@ static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv)
slave->phy = NULL;
cpsw_ale_control_set(priv->ale, slave_port,
ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
+ soft_reset_slave(slave);
}
static int cpsw_ndo_open(struct net_device *ndev)
@@ -1251,7 +1252,11 @@ static int cpsw_ndo_open(struct net_device *ndev)
int i, ret;
u32 reg;
- pm_runtime_get_sync(&priv->pdev->dev);
+ ret = pm_runtime_get_sync(&priv->pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&priv->pdev->dev);
+ return ret;
+ }
if (!cpsw_common_res_usage_state(priv))
cpsw_intr_disable(priv);
@@ -1277,6 +1282,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
if (!cpsw_common_res_usage_state(priv)) {
struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0);
+ int buf_num;
/* setup tx dma to fixed prio and zero offset */
cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1);
@@ -1304,10 +1310,8 @@ static int cpsw_ndo_open(struct net_device *ndev)
enable_irq(priv->irqs_table[0]);
}
- if (WARN_ON(!priv->data.rx_descs))
- priv->data.rx_descs = 128;
-
- for (i = 0; i < priv->data.rx_descs; i++) {
+ buf_num = cpdma_chan_get_rx_buf_num(priv->dma);
+ for (i = 0; i < buf_num; i++) {
struct sk_buff *skb;
ret = -ENOMEM;
@@ -1338,7 +1342,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
if (priv->coal_intvl != 0) {
struct ethtool_coalesce coal;
- coal.rx_coalesce_usecs = (priv->coal_intvl << 4);
+ coal.rx_coalesce_usecs = priv->coal_intvl;
cpsw_set_coalesce(ndev, &coal);
}
@@ -1610,10 +1614,17 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
struct sockaddr *addr = (struct sockaddr *)p;
int flags = 0;
u16 vid = 0;
+ int ret;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
+ ret = pm_runtime_get_sync(&priv->pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&priv->pdev->dev);
+ return ret;
+ }
+
if (priv->data.dual_emac) {
vid = priv->slaves[priv->emac_port].port_vlan;
flags = ALE_VLAN;
@@ -1628,6 +1639,8 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
for_each_slave(priv, cpsw_set_slave_mac, priv);
+ pm_runtime_put(&priv->pdev->dev);
+
return 0;
}
@@ -1692,10 +1705,17 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
__be16 proto, u16 vid)
{
struct cpsw_priv *priv = netdev_priv(ndev);
+ int ret;
if (vid == priv->data.default_vlan)
return 0;
+ ret = pm_runtime_get_sync(&priv->pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&priv->pdev->dev);
+ return ret;
+ }
+
if (priv->data.dual_emac) {
/* In dual EMAC, reserved VLAN id should not be used for
* creating VLAN interfaces as this can break the dual
@@ -1710,7 +1730,10 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
}
dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
- return cpsw_add_vlan_ale_entry(priv, vid);
+ ret = cpsw_add_vlan_ale_entry(priv, vid);
+
+ pm_runtime_put(&priv->pdev->dev);
+ return ret;
}
static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
@@ -1722,6 +1745,12 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
if (vid == priv->data.default_vlan)
return 0;
+ ret = pm_runtime_get_sync(&priv->pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&priv->pdev->dev);
+ return ret;
+ }
+
if (priv->data.dual_emac) {
int i;
@@ -1741,8 +1770,10 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
if (ret != 0)
return ret;
- return cpsw_ale_del_mcast(priv->ale, priv->ndev->broadcast,
- 0, ALE_VLAN, vid);
+ ret = cpsw_ale_del_mcast(priv->ale, priv->ndev->broadcast,
+ 0, ALE_VLAN, vid);
+ pm_runtime_put(&priv->pdev->dev);
+ return ret;
}
static const struct net_device_ops cpsw_netdev_ops = {
@@ -1901,10 +1932,33 @@ static int cpsw_set_pauseparam(struct net_device *ndev,
priv->tx_pause = pause->tx_pause ? true : false;
for_each_slave(priv, _cpsw_adjust_link, priv, &link);
-
return 0;
}
+static int cpsw_ethtool_op_begin(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = pm_runtime_get_sync(&priv->pdev->dev);
+ if (ret < 0) {
+ cpsw_err(priv, drv, "ethtool begin failed %d\n", ret);
+ pm_runtime_put_noidle(&priv->pdev->dev);
+ }
+
+ return ret;
+}
+
+static void cpsw_ethtool_op_complete(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = pm_runtime_put(&priv->pdev->dev);
+ if (ret < 0)
+ cpsw_err(priv, drv, "ethtool complete failed %d\n", ret);
+}
+
static const struct ethtool_ops cpsw_ethtool_ops = {
.get_drvinfo = cpsw_get_drvinfo,
.get_msglevel = cpsw_get_msglevel,
@@ -1924,6 +1978,8 @@ static const struct ethtool_ops cpsw_ethtool_ops = {
.set_wol = cpsw_set_wol,
.get_regs_len = cpsw_get_regs_len,
.get_regs = cpsw_get_regs,
+ .begin = cpsw_ethtool_op_begin,
+ .complete = cpsw_ethtool_op_complete,
};
static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
@@ -1998,12 +2054,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
}
data->bd_ram_size = prop;
- if (of_property_read_u32(node, "rx_descs", &prop)) {
- dev_err(&pdev->dev, "Missing rx_descs property in the DT.\n");
- return -EINVAL;
- }
- data->rx_descs = prop;
-
if (of_property_read_u32(node, "mac_control", &prop)) {
dev_err(&pdev->dev, "Missing mac_control property in the DT.\n");
return -EINVAL;
@@ -2021,7 +2071,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
if (ret)
dev_warn(&pdev->dev, "Doesn't have any child node\n");
- for_each_child_of_node(node, slave_node) {
+ for_each_available_child_of_node(node, slave_node) {
struct cpsw_slave_data *slave_data = data->slave_data + i;
const void *mac_addr = NULL;
int lenp;
@@ -2318,7 +2368,11 @@ static int cpsw_probe(struct platform_device *pdev)
/* Need to enable clocks with runtime PM api to access module
* registers
*/
- pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
+ goto clean_runtime_disable_ret;
+ }
priv->version = readl(&priv->regs->id_ver);
pm_runtime_put_sync(&pdev->dev);
@@ -2502,8 +2556,6 @@ static int cpsw_probe(struct platform_device *pdev)
clean_ale_ret:
cpsw_ale_destroy(priv->ale);
clean_dma_ret:
- cpdma_chan_destroy(priv->txch);
- cpdma_chan_destroy(priv->rxch);
cpdma_ctlr_destroy(priv->dma);
clean_runtime_disable_ret:
pm_runtime_disable(&pdev->dev);
@@ -2531,8 +2583,6 @@ static int cpsw_remove(struct platform_device *pdev)
unregister_netdev(ndev);
cpsw_ale_destroy(priv->ale);
- cpdma_chan_destroy(priv->txch);
- cpdma_chan_destroy(priv->rxch);
cpdma_ctlr_destroy(priv->dma);
pm_runtime_disable(&pdev->dev);
device_for_each_child(&pdev->dev, NULL, cpsw_remove_child_device);
@@ -2555,16 +2605,12 @@ static int cpsw_suspend(struct device *dev)
for (i = 0; i < priv->data.slaves; i++) {
if (netif_running(priv->slaves[i].ndev))
cpsw_ndo_stop(priv->slaves[i].ndev);
- soft_reset_slave(priv->slaves + i);
}
} else {
if (netif_running(ndev))
cpsw_ndo_stop(ndev);
- for_each_slave(priv, soft_reset_slave);
}
- pm_runtime_put_sync(&pdev->dev);
-
/* Select sleep pin state */
pinctrl_pm_select_sleep_state(&pdev->dev);
@@ -2577,8 +2623,6 @@ static int cpsw_resume(struct device *dev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct cpsw_priv *priv = netdev_priv(ndev);
- pm_runtime_get_sync(&pdev->dev);
-
/* Select default pin state */
pinctrl_pm_select_default_state(&pdev->dev);
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
index e50afd1b2eda..16b54c6f32c2 100644
--- a/drivers/net/ethernet/ti/cpsw.h
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -35,7 +35,6 @@ struct cpsw_platform_data {
u32 cpts_clock_shift; /* convert input clock ticks to nanoseconds */
u32 ale_entries; /* ale table size */
u32 bd_ram_size; /*buffer descriptor ram size */
- u32 rx_descs; /* Number of Rx Descriptios */
u32 mac_control; /* Mac control register */
u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/
bool dual_emac; /* Enable Dual EMAC mode */
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 18bf3a8fdc50..1c653ca7c316 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -21,7 +21,7 @@
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/delay.h>
-
+#include <linux/genalloc.h>
#include "davinci_cpdma.h"
/* DMA Registers */
@@ -87,9 +87,8 @@ struct cpdma_desc_pool {
void *cpumap; /* dma_alloc map */
int desc_size, mem_size;
int num_desc, used_desc;
- unsigned long *bitmap;
struct device *dev;
- spinlock_t lock;
+ struct gen_pool *gen_pool;
};
enum cpdma_state {
@@ -117,6 +116,7 @@ struct cpdma_chan {
int chan_num;
spinlock_t lock;
int count;
+ u32 desc_num;
u32 mask;
cpdma_handler_fn handler;
enum dma_data_direction dir;
@@ -145,6 +145,19 @@ struct cpdma_chan {
(directed << CPDMA_TO_PORT_SHIFT)); \
} while (0)
+static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
+{
+ if (!pool)
+ return;
+
+ WARN_ON(pool->used_desc);
+ if (pool->cpumap)
+ dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
+ pool->phys);
+ else
+ iounmap(pool->iomap);
+}
+
/*
* Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
* emac) have dedicated on-chip memory for these descriptors. Some other
@@ -155,24 +168,25 @@ static struct cpdma_desc_pool *
cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr,
int size, int align)
{
- int bitmap_size;
struct cpdma_desc_pool *pool;
+ int ret;
pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL);
if (!pool)
- goto fail;
-
- spin_lock_init(&pool->lock);
+ goto gen_pool_create_fail;
pool->dev = dev;
pool->mem_size = size;
pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align);
pool->num_desc = size / pool->desc_size;
- bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long);
- pool->bitmap = devm_kzalloc(dev, bitmap_size, GFP_KERNEL);
- if (!pool->bitmap)
- goto fail;
+ pool->gen_pool = devm_gen_pool_create(dev, ilog2(pool->desc_size), -1,
+ "cpdma");
+ if (IS_ERR(pool->gen_pool)) {
+ dev_err(dev, "pool create failed %ld\n",
+ PTR_ERR(pool->gen_pool));
+ goto gen_pool_create_fail;
+ }
if (phys) {
pool->phys = phys;
@@ -185,24 +199,22 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr,
pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */
}
- if (pool->iomap)
- return pool;
-fail:
- return NULL;
-}
-
-static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
-{
- if (!pool)
- return;
+ if (!pool->iomap)
+ goto gen_pool_create_fail;
- WARN_ON(pool->used_desc);
- if (pool->cpumap) {
- dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
- pool->phys);
- } else {
- iounmap(pool->iomap);
+ ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap,
+ pool->phys, pool->mem_size, -1);
+ if (ret < 0) {
+ dev_err(dev, "pool add failed %d\n", ret);
+ goto gen_pool_add_virt_fail;
}
+
+ return pool;
+
+gen_pool_add_virt_fail:
+ cpdma_desc_pool_destroy(pool);
+gen_pool_create_fail:
+ return NULL;
}
static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
@@ -220,47 +232,23 @@ desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
}
static struct cpdma_desc __iomem *
-cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc, bool is_rx)
+cpdma_desc_alloc(struct cpdma_desc_pool *pool)
{
- unsigned long flags;
- int index;
- int desc_start;
- int desc_end;
struct cpdma_desc __iomem *desc = NULL;
- spin_lock_irqsave(&pool->lock, flags);
-
- if (is_rx) {
- desc_start = 0;
- desc_end = pool->num_desc/2;
- } else {
- desc_start = pool->num_desc/2;
- desc_end = pool->num_desc;
- }
-
- index = bitmap_find_next_zero_area(pool->bitmap,
- desc_end, desc_start, num_desc, 0);
- if (index < desc_end) {
- bitmap_set(pool->bitmap, index, num_desc);
- desc = pool->iomap + pool->desc_size * index;
+ desc = (struct cpdma_desc __iomem *)gen_pool_alloc(pool->gen_pool,
+ pool->desc_size);
+ if (desc)
pool->used_desc++;
- }
- spin_unlock_irqrestore(&pool->lock, flags);
return desc;
}
static void cpdma_desc_free(struct cpdma_desc_pool *pool,
struct cpdma_desc __iomem *desc, int num_desc)
{
- unsigned long flags, index;
-
- index = ((unsigned long)desc - (unsigned long)pool->iomap) /
- pool->desc_size;
- spin_lock_irqsave(&pool->lock, flags);
- bitmap_clear(pool->bitmap, index, num_desc);
+ gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size);
pool->used_desc--;
- spin_unlock_irqrestore(&pool->lock, flags);
}
struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
@@ -516,6 +504,7 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
chan->state = CPDMA_STATE_IDLE;
chan->chan_num = chan_num;
chan->handler = handler;
+ chan->desc_num = ctlr->pool->num_desc / 2;
if (is_rx_chan(chan)) {
chan->hdp = ctlr->params.rxhdp + offset;
@@ -543,6 +532,12 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
}
EXPORT_SYMBOL_GPL(cpdma_chan_create);
+int cpdma_chan_get_rx_buf_num(struct cpdma_ctlr *ctlr)
+{
+ return ctlr->pool->num_desc / 2;
+}
+EXPORT_SYMBOL_GPL(cpdma_chan_get_rx_buf_num);
+
int cpdma_chan_destroy(struct cpdma_chan *chan)
{
struct cpdma_ctlr *ctlr;
@@ -675,7 +670,13 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
goto unlock_ret;
}
- desc = cpdma_desc_alloc(ctlr->pool, 1, is_rx_chan(chan));
+ if (chan->count >= chan->desc_num) {
+ chan->stats.desc_alloc_fail++;
+ ret = -ENOMEM;
+ goto unlock_ret;
+ }
+
+ desc = cpdma_desc_alloc(ctlr->pool);
if (!desc) {
chan->stats.desc_alloc_fail++;
ret = -ENOMEM;
@@ -721,24 +722,16 @@ EXPORT_SYMBOL_GPL(cpdma_chan_submit);
bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
{
- unsigned long flags;
- int index;
- bool ret;
struct cpdma_ctlr *ctlr = chan->ctlr;
struct cpdma_desc_pool *pool = ctlr->pool;
+ bool free_tx_desc;
+ unsigned long flags;
- spin_lock_irqsave(&pool->lock, flags);
-
- index = bitmap_find_next_zero_area(pool->bitmap,
- pool->num_desc, pool->num_desc/2, 1, 0);
-
- if (index < pool->num_desc)
- ret = true;
- else
- ret = false;
-
- spin_unlock_irqrestore(&pool->lock, flags);
- return ret;
+ spin_lock_irqsave(&chan->lock, flags);
+ free_tx_desc = (chan->count < chan->desc_num) &&
+ gen_pool_avail(pool->gen_pool);
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return free_tx_desc;
}
EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc);
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h
index 86dee487f2f0..80c015cbbce5 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.h
+++ b/drivers/net/ethernet/ti/davinci_cpdma.h
@@ -81,6 +81,7 @@ int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr);
struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
cpdma_handler_fn handler);
+int cpdma_chan_get_rx_buf_num(struct cpdma_ctlr *ctlr);
int cpdma_chan_destroy(struct cpdma_chan *chan);
int cpdma_chan_start(struct cpdma_chan *chan);
int cpdma_chan_stop(struct cpdma_chan *chan);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index f56d66e6ec15..c6c54659f8d4 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -348,7 +348,6 @@ struct emac_priv {
u32 rx_addr_type;
const char *phy_id;
struct device_node *phy_node;
- struct phy_device *phydev;
spinlock_t lock;
/*platform specific members*/
void (*int_enable) (void);
@@ -486,43 +485,6 @@ static void emac_get_drvinfo(struct net_device *ndev,
}
/**
- * emac_get_settings - Get EMAC settings
- * @ndev: The DaVinci EMAC network adapter
- * @ecmd: ethtool command
- *
- * Executes ethool get command
- *
- */
-static int emac_get_settings(struct net_device *ndev,
- struct ethtool_cmd *ecmd)
-{
- struct emac_priv *priv = netdev_priv(ndev);
- if (priv->phydev)
- return phy_ethtool_gset(priv->phydev, ecmd);
- else
- return -EOPNOTSUPP;
-
-}
-
-/**
- * emac_set_settings - Set EMAC settings
- * @ndev: The DaVinci EMAC network adapter
- * @ecmd: ethtool command
- *
- * Executes ethool set command
- *
- */
-static int emac_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
-{
- struct emac_priv *priv = netdev_priv(ndev);
- if (priv->phydev)
- return phy_ethtool_sset(priv->phydev, ecmd);
- else
- return -EOPNOTSUPP;
-
-}
-
-/**
* emac_get_coalesce - Get interrupt coalesce settings for this device
* @ndev : The DaVinci EMAC network adapter
* @coal : ethtool coalesce settings structure
@@ -625,12 +587,12 @@ static int emac_set_coalesce(struct net_device *ndev,
*/
static const struct ethtool_ops ethtool_ops = {
.get_drvinfo = emac_get_drvinfo,
- .get_settings = emac_get_settings,
- .set_settings = emac_set_settings,
.get_link = ethtool_op_get_link,
.get_coalesce = emac_get_coalesce,
.set_coalesce = emac_set_coalesce,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
/**
@@ -651,8 +613,8 @@ static void emac_update_phystatus(struct emac_priv *priv)
mac_control = emac_read(EMAC_MACCONTROL);
cur_duplex = (mac_control & EMAC_MACCONTROL_FULLDUPLEXEN) ?
DUPLEX_FULL : DUPLEX_HALF;
- if (priv->phydev)
- new_duplex = priv->phydev->duplex;
+ if (ndev->phydev)
+ new_duplex = ndev->phydev->duplex;
else
new_duplex = DUPLEX_FULL;
@@ -1454,7 +1416,7 @@ static void emac_poll_controller(struct net_device *ndev)
static void emac_adjust_link(struct net_device *ndev)
{
struct emac_priv *priv = netdev_priv(ndev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = ndev->phydev;
unsigned long flags;
int new_state = 0;
@@ -1483,7 +1445,7 @@ static void emac_adjust_link(struct net_device *ndev)
}
if (new_state) {
emac_update_phystatus(priv);
- phy_print_status(priv->phydev);
+ phy_print_status(ndev->phydev);
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1505,15 +1467,13 @@ static void emac_adjust_link(struct net_device *ndev)
*/
static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
{
- struct emac_priv *priv = netdev_priv(ndev);
-
if (!(netif_running(ndev)))
return -EINVAL;
/* TODO: Add phy read and write and private statistics get feature */
- if (priv->phydev)
- return phy_mii_ioctl(priv->phydev, ifrq, cmd);
+ if (ndev->phydev)
+ return phy_mii_ioctl(ndev->phydev, ifrq, cmd);
else
return -EOPNOTSUPP;
}
@@ -1542,6 +1502,7 @@ static int emac_dev_open(struct net_device *ndev)
int res_num = 0, irq_num = 0;
int i = 0;
struct emac_priv *priv = netdev_priv(ndev);
+ struct phy_device *phydev = NULL;
ret = pm_runtime_get_sync(&priv->pdev->dev);
if (ret < 0) {
@@ -1607,12 +1568,10 @@ static int emac_dev_open(struct net_device *ndev)
cpdma_ctlr_start(priv->dma);
- priv->phydev = NULL;
-
if (priv->phy_node) {
- priv->phydev = of_phy_connect(ndev, priv->phy_node,
- &emac_adjust_link, 0, 0);
- if (!priv->phydev) {
+ phydev = of_phy_connect(ndev, priv->phy_node,
+ &emac_adjust_link, 0, 0);
+ if (!phydev) {
dev_err(emac_dev, "could not connect to phy %s\n",
priv->phy_node->full_name);
ret = -ENODEV;
@@ -1621,7 +1580,7 @@ static int emac_dev_open(struct net_device *ndev)
}
/* use the first phy on the bus if pdata did not give us a phy id */
- if (!priv->phydev && !priv->phy_id) {
+ if (!phydev && !priv->phy_id) {
struct device *phy;
phy = bus_find_device(&mdio_bus_type, NULL, NULL,
@@ -1630,16 +1589,15 @@ static int emac_dev_open(struct net_device *ndev)
priv->phy_id = dev_name(phy);
}
- if (!priv->phydev && priv->phy_id && *priv->phy_id) {
- priv->phydev = phy_connect(ndev, priv->phy_id,
- &emac_adjust_link,
- PHY_INTERFACE_MODE_MII);
+ if (!phydev && priv->phy_id && *priv->phy_id) {
+ phydev = phy_connect(ndev, priv->phy_id,
+ &emac_adjust_link,
+ PHY_INTERFACE_MODE_MII);
- if (IS_ERR(priv->phydev)) {
+ if (IS_ERR(phydev)) {
dev_err(emac_dev, "could not connect to phy %s\n",
priv->phy_id);
- ret = PTR_ERR(priv->phydev);
- priv->phydev = NULL;
+ ret = PTR_ERR(phydev);
goto err;
}
@@ -1647,10 +1605,10 @@ static int emac_dev_open(struct net_device *ndev)
priv->speed = 0;
priv->duplex = ~0;
- phy_attached_info(priv->phydev);
+ phy_attached_info(phydev);
}
- if (!priv->phydev) {
+ if (!phydev) {
/* No PHY , fix the link, speed and duplex settings */
dev_notice(emac_dev, "no phy, defaulting to 100/full\n");
priv->link = 1;
@@ -1665,8 +1623,8 @@ static int emac_dev_open(struct net_device *ndev)
if (netif_msg_drv(priv))
dev_notice(emac_dev, "DaVinci EMAC: Opened %s\n", ndev->name);
- if (priv->phydev)
- phy_start(priv->phydev);
+ if (phydev)
+ phy_start(phydev);
return 0;
@@ -1717,8 +1675,8 @@ static int emac_dev_stop(struct net_device *ndev)
cpdma_ctlr_stop(priv->dma);
emac_write(EMAC_SOFTRESET, 1);
- if (priv->phydev)
- phy_disconnect(priv->phydev);
+ if (ndev->phydev)
+ phy_disconnect(ndev->phydev);
/* Free IRQ */
while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) {
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 4e7c9b9b042a..33df340db1f1 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -53,6 +53,10 @@
#define DEF_OUT_FREQ 2200000 /* 2.2 MHz */
+struct davinci_mdio_of_param {
+ int autosuspend_delay_ms;
+};
+
struct davinci_mdio_regs {
u32 version;
u32 control;
@@ -90,19 +94,19 @@ static const struct mdio_platform_data default_pdata = {
struct davinci_mdio_data {
struct mdio_platform_data pdata;
struct davinci_mdio_regs __iomem *regs;
- spinlock_t lock;
struct clk *clk;
struct device *dev;
struct mii_bus *bus;
- bool suspended;
+ bool active_in_suspend;
unsigned long access_time; /* jiffies */
/* Indicates that driver shouldn't modify phy_mask in case
* if MDIO bus is registered from DT.
*/
bool skip_scan;
+ u32 clk_div;
};
-static void __davinci_mdio_reset(struct davinci_mdio_data *data)
+static void davinci_mdio_init_clk(struct davinci_mdio_data *data)
{
u32 mdio_in, div, mdio_out_khz, access_time;
@@ -111,9 +115,7 @@ static void __davinci_mdio_reset(struct davinci_mdio_data *data)
if (div > CONTROL_MAX_DIV)
div = CONTROL_MAX_DIV;
- /* set enable and clock divider */
- __raw_writel(div | CONTROL_ENABLE, &data->regs->control);
-
+ data->clk_div = div;
/*
* One mdio transaction consists of:
* 32 bits of preamble
@@ -134,12 +136,23 @@ static void __davinci_mdio_reset(struct davinci_mdio_data *data)
data->access_time = 1;
}
+static void davinci_mdio_enable(struct davinci_mdio_data *data)
+{
+ /* set enable and clock divider */
+ __raw_writel(data->clk_div | CONTROL_ENABLE, &data->regs->control);
+}
+
static int davinci_mdio_reset(struct mii_bus *bus)
{
struct davinci_mdio_data *data = bus->priv;
u32 phy_mask, ver;
+ int ret;
- __davinci_mdio_reset(data);
+ ret = pm_runtime_get_sync(data->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(data->dev);
+ return ret;
+ }
/* wait for scan logic to settle */
msleep(PHY_MAX_ADDR * data->access_time);
@@ -150,7 +163,7 @@ static int davinci_mdio_reset(struct mii_bus *bus)
(ver >> 8) & 0xff, ver & 0xff);
if (data->skip_scan)
- return 0;
+ goto done;
/* get phy mask from the alive register */
phy_mask = __raw_readl(&data->regs->alive);
@@ -165,6 +178,10 @@ static int davinci_mdio_reset(struct mii_bus *bus)
}
data->bus->phy_mask = phy_mask;
+done:
+ pm_runtime_mark_last_busy(data->dev);
+ pm_runtime_put_autosuspend(data->dev);
+
return 0;
}
@@ -190,7 +207,7 @@ static inline int wait_for_user_access(struct davinci_mdio_data *data)
* operation
*/
dev_warn(data->dev, "resetting idled controller\n");
- __davinci_mdio_reset(data);
+ davinci_mdio_enable(data);
return -EAGAIN;
}
@@ -225,11 +242,10 @@ static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
return -EINVAL;
- spin_lock(&data->lock);
-
- if (data->suspended) {
- spin_unlock(&data->lock);
- return -ENODEV;
+ ret = pm_runtime_get_sync(data->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(data->dev);
+ return ret;
}
reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
@@ -255,8 +271,8 @@ static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
break;
}
- spin_unlock(&data->lock);
-
+ pm_runtime_mark_last_busy(data->dev);
+ pm_runtime_put_autosuspend(data->dev);
return ret;
}
@@ -270,11 +286,10 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
return -EINVAL;
- spin_lock(&data->lock);
-
- if (data->suspended) {
- spin_unlock(&data->lock);
- return -ENODEV;
+ ret = pm_runtime_get_sync(data->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(data->dev);
+ return ret;
}
reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
@@ -295,9 +310,10 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
break;
}
- spin_unlock(&data->lock);
+ pm_runtime_mark_last_busy(data->dev);
+ pm_runtime_put_autosuspend(data->dev);
- return 0;
+ return ret;
}
#if IS_ENABLED(CONFIG_OF)
@@ -320,6 +336,19 @@ static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
}
#endif
+#if IS_ENABLED(CONFIG_OF)
+static const struct davinci_mdio_of_param of_cpsw_mdio_data = {
+ .autosuspend_delay_ms = 100,
+};
+
+static const struct of_device_id davinci_mdio_of_mtable[] = {
+ { .compatible = "ti,davinci_mdio", },
+ { .compatible = "ti,cpsw-mdio", .data = &of_cpsw_mdio_data},
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable);
+#endif
+
static int davinci_mdio_probe(struct platform_device *pdev)
{
struct mdio_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -328,6 +357,7 @@ static int davinci_mdio_probe(struct platform_device *pdev)
struct resource *res;
struct phy_device *phy;
int ret, addr;
+ int autosuspend_delay_ms = -1;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -340,9 +370,22 @@ static int davinci_mdio_probe(struct platform_device *pdev)
}
if (dev->of_node) {
- if (davinci_mdio_probe_dt(&data->pdata, pdev))
- data->pdata = default_pdata;
+ const struct of_device_id *of_id;
+
+ ret = davinci_mdio_probe_dt(&data->pdata, pdev);
+ if (ret)
+ return ret;
snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
+
+ of_id = of_match_device(davinci_mdio_of_mtable, &pdev->dev);
+ if (of_id) {
+ const struct davinci_mdio_of_param *of_mdio_data;
+
+ of_mdio_data = of_id->data;
+ if (of_mdio_data)
+ autosuspend_delay_ms =
+ of_mdio_data->autosuspend_delay_ms;
+ }
} else {
data->pdata = pdata ? (*pdata) : default_pdata;
snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x",
@@ -356,26 +399,25 @@ static int davinci_mdio_probe(struct platform_device *pdev)
data->bus->parent = dev;
data->bus->priv = data;
- pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
data->clk = devm_clk_get(dev, "fck");
if (IS_ERR(data->clk)) {
dev_err(dev, "failed to get device clock\n");
- ret = PTR_ERR(data->clk);
- data->clk = NULL;
- goto bail_out;
+ return PTR_ERR(data->clk);
}
dev_set_drvdata(dev, data);
data->dev = dev;
- spin_lock_init(&data->lock);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
data->regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(data->regs)) {
- ret = PTR_ERR(data->regs);
- goto bail_out;
- }
+ if (IS_ERR(data->regs))
+ return PTR_ERR(data->regs);
+
+ davinci_mdio_init_clk(data);
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, autosuspend_delay_ms);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
/* register the mii bus
* Create PHYs from DT only in case if PHY child nodes are explicitly
@@ -404,9 +446,8 @@ static int davinci_mdio_probe(struct platform_device *pdev)
return 0;
bail_out:
- pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
-
return ret;
}
@@ -417,29 +458,47 @@ static int davinci_mdio_remove(struct platform_device *pdev)
if (data->bus)
mdiobus_unregister(data->bus);
- pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int davinci_mdio_suspend(struct device *dev)
+#ifdef CONFIG_PM
+static int davinci_mdio_runtime_suspend(struct device *dev)
{
struct davinci_mdio_data *data = dev_get_drvdata(dev);
u32 ctrl;
- spin_lock(&data->lock);
-
/* shutdown the scan state machine */
ctrl = __raw_readl(&data->regs->control);
ctrl &= ~CONTROL_ENABLE;
__raw_writel(ctrl, &data->regs->control);
wait_for_idle(data);
- data->suspended = true;
- spin_unlock(&data->lock);
- pm_runtime_put_sync(data->dev);
+ return 0;
+}
+
+static int davinci_mdio_runtime_resume(struct device *dev)
+{
+ struct davinci_mdio_data *data = dev_get_drvdata(dev);
+
+ davinci_mdio_enable(data);
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int davinci_mdio_suspend(struct device *dev)
+{
+ struct davinci_mdio_data *data = dev_get_drvdata(dev);
+ int ret = 0;
+
+ data->active_in_suspend = !pm_runtime_status_suspended(dev);
+ if (data->active_in_suspend)
+ ret = pm_runtime_force_suspend(dev);
+ if (ret < 0)
+ return ret;
/* Select sleep pin state */
pinctrl_pm_select_sleep_state(dev);
@@ -454,31 +513,19 @@ static int davinci_mdio_resume(struct device *dev)
/* Select default pin state */
pinctrl_pm_select_default_state(dev);
- pm_runtime_get_sync(data->dev);
-
- spin_lock(&data->lock);
- /* restart the scan state machine */
- __davinci_mdio_reset(data);
-
- data->suspended = false;
- spin_unlock(&data->lock);
+ if (data->active_in_suspend)
+ pm_runtime_force_resume(dev);
return 0;
}
#endif
static const struct dev_pm_ops davinci_mdio_pm_ops = {
+ SET_RUNTIME_PM_OPS(davinci_mdio_runtime_suspend,
+ davinci_mdio_runtime_resume, NULL)
SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume)
};
-#if IS_ENABLED(CONFIG_OF)
-static const struct of_device_id davinci_mdio_of_mtable[] = {
- { .compatible = "ti,davinci_mdio", },
- { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable);
-#endif
-
static struct platform_driver davinci_mdio_driver = {
.driver = {
.name = "davinci_mdio",
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 561703317312..ece0ea0f6b38 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -1651,7 +1651,6 @@ static u32 tlan_handle_tx_eoc(struct net_device *dev, u16 host_int)
dma_addr_t head_list_phys;
u32 ack = 1;
- host_int = 0;
if (priv->tlan_rev < 0x30) {
TLAN_DBG(TLAN_DEBUG_TX,
"TRANSMIT: handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 0a15acc075b3..11213a38c795 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -462,7 +462,7 @@ static void tile_tx_timestamp(struct sk_buff *skb, int instance)
if (unlikely((shtx->tx_flags & SKBTX_HW_TSTAMP) != 0)) {
struct mpipe_data *md = &mpipe_data[instance];
struct skb_shared_hwtstamps shhwtstamps;
- struct timespec ts;
+ struct timespec64 ts;
shtx->tx_flags |= SKBTX_IN_PROGRESS;
gxio_mpipe_get_timestamp(&md->context, &ts);
@@ -886,9 +886,9 @@ static struct ptp_clock_info ptp_mpipe_caps = {
/* Sync mPIPE's timestamp up with Linux system time and register PTP clock. */
static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md)
{
- struct timespec ts;
+ struct timespec64 ts;
- getnstimeofday(&ts);
+ ktime_get_ts64(&ts);
gxio_mpipe_set_timestamp(&md->context, &ts);
mutex_init(&md->ptp_lock);
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 54874783476a..74e671906ddb 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -280,7 +280,7 @@ struct tc35815_regs {
* Descriptors
*/
-/* Frame descripter */
+/* Frame descriptor */
struct FDesc {
volatile __u32 FDNext;
volatile __u32 FDSystem;
@@ -288,7 +288,7 @@ struct FDesc {
volatile __u32 FDCtl;
};
-/* Buffer descripter */
+/* Buffer descriptor */
struct BDesc {
volatile __u32 BuffData;
volatile __u32 BDCtl;
@@ -296,7 +296,7 @@ struct BDesc {
#define FD_ALIGN 16
-/* Frame Descripter bit assign ---------------------------------------------- */
+/* Frame Descriptor bit assign ---------------------------------------------- */
#define FD_FDLength_MASK 0x0000FFFF /* Length MASK */
#define FD_BDCnt_MASK 0x001F0000 /* BD count MASK in FD */
#define FD_FrmOpt_MASK 0x7C000000 /* Frame option MASK */
@@ -309,7 +309,7 @@ struct BDesc {
#define FD_Next_EOL 0x00000001 /* FD EOL indicator */
#define FD_BDCnt_SHIFT 16
-/* Buffer Descripter bit assign --------------------------------------------- */
+/* Buffer Descriptor bit assign --------------------------------------------- */
#define BD_BuffLength_MASK 0x0000FFFF /* Receive Data Size */
#define BD_RxBDID_MASK 0x00FF0000 /* BD ID Number MASK */
#define BD_RxBDSeqN_MASK 0x7F000000 /* Rx BD Sequence Number */
@@ -1387,7 +1387,7 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit)
if (status & Int_IntExBD) {
if (netif_msg_rx_err(lp))
dev_warn(&dev->dev,
- "Excessive Buffer Descriptiors (%#x).\n",
+ "Excessive Buffer Descriptors (%#x).\n",
status);
dev->stats.rx_length_errors++;
ret = 0;
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index 7b44968e02e6..ddced28e8247 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -1144,8 +1144,8 @@ xirc2ps_interrupt(int irq, void *dev_id)
dev->stats.tx_packets += lp->last_ptr_value - n;
netif_wake_queue(dev);
}
- if (tx_status & 0x0002) { /* Execessive collissions */
- pr_debug("tx restarted due to execssive collissions\n");
+ if (tx_status & 0x0002) { /* Excessive collisions */
+ pr_debug("tx restarted due to excessive collisions\n");
PutByte(XIRCREG_CR, RestartTx); /* restart transmitter process */
}
if (tx_status & 0x0040)
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 5138407941cf..7f127dc1b7ba 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -171,7 +171,6 @@ struct port {
struct npe *npe;
struct net_device *netdev;
struct napi_struct napi;
- struct phy_device *phydev;
struct eth_plat_info *plat;
buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
struct desc *desc_tab; /* coherent */
@@ -562,7 +561,7 @@ static void ixp4xx_mdio_remove(void)
static void ixp4xx_adjust_link(struct net_device *dev)
{
struct port *port = netdev_priv(dev);
- struct phy_device *phydev = port->phydev;
+ struct phy_device *phydev = dev->phydev;
if (!phydev->link) {
if (port->speed) {
@@ -976,8 +975,6 @@ static void eth_set_mcast_list(struct net_device *dev)
static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{
- struct port *port = netdev_priv(dev);
-
if (!netif_running(dev))
return -EINVAL;
@@ -988,7 +985,7 @@ static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
return hwtstamp_get(dev, req);
}
- return phy_mii_ioctl(port->phydev, req, cmd);
+ return phy_mii_ioctl(dev->phydev, req, cmd);
}
/* ethtool support */
@@ -1005,22 +1002,9 @@ static void ixp4xx_get_drvinfo(struct net_device *dev,
strlcpy(info->bus_info, "internal", sizeof(info->bus_info));
}
-static int ixp4xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct port *port = netdev_priv(dev);
- return phy_ethtool_gset(port->phydev, cmd);
-}
-
-static int ixp4xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct port *port = netdev_priv(dev);
- return phy_ethtool_sset(port->phydev, cmd);
-}
-
static int ixp4xx_nway_reset(struct net_device *dev)
{
- struct port *port = netdev_priv(dev);
- return phy_start_aneg(port->phydev);
+ return phy_start_aneg(dev->phydev);
}
int ixp46x_phc_index = -1;
@@ -1054,11 +1038,11 @@ static int ixp4xx_get_ts_info(struct net_device *dev,
static const struct ethtool_ops ixp4xx_ethtool_ops = {
.get_drvinfo = ixp4xx_get_drvinfo,
- .get_settings = ixp4xx_get_settings,
- .set_settings = ixp4xx_set_settings,
.nway_reset = ixp4xx_nway_reset,
.get_link = ethtool_op_get_link,
.get_ts_info = ixp4xx_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
@@ -1259,7 +1243,7 @@ static int eth_open(struct net_device *dev)
}
port->speed = 0; /* force "link up" message */
- phy_start(port->phydev);
+ phy_start(dev->phydev);
for (i = 0; i < ETH_ALEN; i++)
__raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]);
@@ -1380,7 +1364,7 @@ static int eth_close(struct net_device *dev)
printk(KERN_CRIT "%s: unable to disable loopback\n",
dev->name);
- phy_stop(port->phydev);
+ phy_stop(dev->phydev);
if (!ports_open)
qmgr_disable_irq(TXDONE_QUEUE);
@@ -1405,6 +1389,7 @@ static int eth_init_one(struct platform_device *pdev)
struct port *port;
struct net_device *dev;
struct eth_plat_info *plat = dev_get_platdata(&pdev->dev);
+ struct phy_device *phydev = NULL;
u32 regs_phys;
char phy_id[MII_BUS_ID_SIZE + 3];
int err;
@@ -1466,14 +1451,14 @@ static int eth_init_one(struct platform_device *pdev)
snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
mdio_bus->id, plat->phy);
- port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link,
- PHY_INTERFACE_MODE_MII);
- if (IS_ERR(port->phydev)) {
- err = PTR_ERR(port->phydev);
+ phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link,
+ PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(phydev)) {
+ err = PTR_ERR(phydev);
goto err_free_mem;
}
- port->phydev->irq = PHY_POLL;
+ phydev->irq = PHY_POLL;
if ((err = register_netdev(dev)))
goto err_phy_dis;
@@ -1484,7 +1469,7 @@ static int eth_init_one(struct platform_device *pdev)
return 0;
err_phy_dis:
- phy_disconnect(port->phydev);
+ phy_disconnect(phydev);
err_free_mem:
npe_port_tab[NPE_ID(port->id)] = NULL;
release_resource(port->mem_res);
@@ -1498,10 +1483,11 @@ err_free:
static int eth_remove_one(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
+ struct phy_device *phydev = dev->phydev;
struct port *port = netdev_priv(dev);
unregister_netdev(dev);
- phy_disconnect(port->phydev);
+ phy_disconnect(phydev);
npe_port_tab[NPE_ID(port->id)] = NULL;
npe_release(port->npe);
release_resource(port->mem_res);
diff --git a/drivers/net/fddi/skfp/Makefile b/drivers/net/fddi/skfp/Makefile
index b0be0234abf6..a957a1c7e5ba 100644
--- a/drivers/net/fddi/skfp/Makefile
+++ b/drivers/net/fddi/skfp/Makefile
@@ -17,4 +17,4 @@ skfp-objs := skfddi.o hwmtm.o fplustm.o smt.o cfm.o \
# projects. To keep the source common for all those drivers (and
# thus simplify fixes to it), please do not clean it up!
-ccflags-y := -Idrivers/net/skfp -DPCI -DMEM_MAPPED_IO -Wno-strict-prototypes
+ccflags-y := -DPCI -DMEM_MAPPED_IO -Wno-strict-prototypes
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index cadefe4fdaa2..310e0b9c2657 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/hash.h>
#include <net/dst_metadata.h>
@@ -397,23 +396,6 @@ static struct socket *geneve_create_sock(struct net *net, bool ipv6,
return sock;
}
-static void geneve_notify_add_rx_port(struct geneve_sock *gs)
-{
- struct net_device *dev;
- struct sock *sk = gs->sock->sk;
- struct net *net = sock_net(sk);
- sa_family_t sa_family = geneve_get_sk_family(gs);
- __be16 port = inet_sk(sk)->inet_sport;
-
- rcu_read_lock();
- for_each_netdev_rcu(net, dev) {
- if (dev->netdev_ops->ndo_add_geneve_port)
- dev->netdev_ops->ndo_add_geneve_port(dev, sa_family,
- port);
- }
- rcu_read_unlock();
-}
-
static int geneve_hlen(struct genevehdr *gh)
{
return sizeof(*gh) + gh->opt_len * 4;
@@ -533,7 +515,7 @@ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
INIT_HLIST_HEAD(&gs->vni_list[h]);
/* Initialize the geneve udp offloads structure */
- geneve_notify_add_rx_port(gs);
+ udp_tunnel_notify_add_rx_port(gs->sock, UDP_TUNNEL_TYPE_GENEVE);
/* Mark socket as an encapsulation socket */
memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
@@ -548,31 +530,13 @@ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
return gs;
}
-static void geneve_notify_del_rx_port(struct geneve_sock *gs)
-{
- struct net_device *dev;
- struct sock *sk = gs->sock->sk;
- struct net *net = sock_net(sk);
- sa_family_t sa_family = geneve_get_sk_family(gs);
- __be16 port = inet_sk(sk)->inet_sport;
-
- rcu_read_lock();
- for_each_netdev_rcu(net, dev) {
- if (dev->netdev_ops->ndo_del_geneve_port)
- dev->netdev_ops->ndo_del_geneve_port(dev, sa_family,
- port);
- }
-
- rcu_read_unlock();
-}
-
static void __geneve_sock_release(struct geneve_sock *gs)
{
if (!gs || --gs->refcnt)
return;
list_del(&gs->list);
- geneve_notify_del_rx_port(gs);
+ udp_tunnel_notify_del_rx_port(gs->sock, UDP_TUNNEL_TYPE_GENEVE);
udp_tunnel_sock_release(gs->sock);
kfree_rcu(gs, rcu);
}
@@ -958,8 +922,8 @@ tx_error:
dev->stats.collisions++;
else if (err == -ENETUNREACH)
dev->stats.tx_carrier_errors++;
- else
- dev->stats.tx_errors++;
+
+ dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
@@ -1048,8 +1012,8 @@ tx_error:
dev->stats.collisions++;
else if (err == -ENETUNREACH)
dev->stats.tx_carrier_errors++;
- else
- dev->stats.tx_errors++;
+
+ dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
#endif
@@ -1165,29 +1129,20 @@ static struct device_type geneve_type = {
.name = "geneve",
};
-/* Calls the ndo_add_geneve_port of the caller in order to
+/* Calls the ndo_add_udp_enc_port of the caller in order to
* supply the listening GENEVE udp ports. Callers are expected
- * to implement the ndo_add_geneve_port.
+ * to implement the ndo_add_udp_enc_port.
*/
static void geneve_push_rx_ports(struct net_device *dev)
{
struct net *net = dev_net(dev);
struct geneve_net *gn = net_generic(net, geneve_net_id);
struct geneve_sock *gs;
- sa_family_t sa_family;
- struct sock *sk;
- __be16 port;
-
- if (!dev->netdev_ops->ndo_add_geneve_port)
- return;
rcu_read_lock();
- list_for_each_entry_rcu(gs, &gn->sock_list, list) {
- sk = gs->sock->sk;
- sa_family = sk->sk_family;
- port = inet_sk(sk)->inet_sport;
- dev->netdev_ops->ndo_add_geneve_port(dev, sa_family, port);
- }
+ list_for_each_entry_rcu(gs, &gn->sock_list, list)
+ udp_tunnel_push_rx_port(dev, gs->sock,
+ UDP_TUNNEL_TYPE_GENEVE);
rcu_read_unlock();
}
@@ -1508,6 +1463,7 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
{
struct nlattr *tb[IFLA_MAX + 1];
struct net_device *dev;
+ LIST_HEAD(list_kill);
int err;
memset(tb, 0, sizeof(tb));
@@ -1519,8 +1475,10 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
err = geneve_configure(net, dev, &geneve_remote_unspec,
0, 0, 0, 0, htons(dst_port), true,
GENEVE_F_UDP_ZERO_CSUM6_RX);
- if (err)
- goto err;
+ if (err) {
+ free_netdev(dev);
+ return ERR_PTR(err);
+ }
/* openvswitch users expect packet sizes to be unrestricted,
* so set the largest MTU we can.
@@ -1529,10 +1487,15 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
if (err)
goto err;
+ err = rtnl_configure_link(dev, NULL);
+ if (err < 0)
+ goto err;
+
return dev;
err:
- free_netdev(dev);
+ geneve_dellink(dev, &list_kill);
+ unregister_netdevice_many(&list_kill);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(geneve_dev_create_fb);
@@ -1542,7 +1505,7 @@ static int geneve_netdevice_event(struct notifier_block *unused,
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- if (event == NETDEV_OFFLOAD_PUSH_GENEVE)
+ if (event == NETDEV_UDP_TUNNEL_PUSH_INFO)
geneve_push_rx_ports(dev);
return NOTIFY_DONE;
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 4e976a0d5a76..97e0cbca0a08 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -16,7 +16,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/skbuff.h>
#include <linux/udp.h>
#include <linux/rculist.h>
diff --git a/drivers/net/hamradio/baycom_par.c b/drivers/net/hamradio/baycom_par.c
index acb636963e90..072cddce9264 100644
--- a/drivers/net/hamradio/baycom_par.c
+++ b/drivers/net/hamradio/baycom_par.c
@@ -156,7 +156,7 @@ struct baycom_state {
/* --------------------------------------------------------------------- */
-static void __inline__ baycom_int_freq(struct baycom_state *bc)
+static inline void baycom_int_freq(struct baycom_state *bc)
{
#ifdef BAYCOM_DEBUG
unsigned long cur_jiffies = jiffies;
@@ -192,7 +192,7 @@ static void __inline__ baycom_int_freq(struct baycom_state *bc)
/* --------------------------------------------------------------------- */
-static __inline__ void par96_tx(struct net_device *dev, struct baycom_state *bc)
+static inline void par96_tx(struct net_device *dev, struct baycom_state *bc)
{
int i;
unsigned int data = hdlcdrv_getbits(&bc->hdrv);
@@ -216,7 +216,7 @@ static __inline__ void par96_tx(struct net_device *dev, struct baycom_state *bc)
/* --------------------------------------------------------------------- */
-static __inline__ void par96_rx(struct net_device *dev, struct baycom_state *bc)
+static inline void par96_rx(struct net_device *dev, struct baycom_state *bc)
{
int i;
unsigned int data, mask, mask2, descx;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 96f00c0261fa..6909c322de4e 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -244,7 +244,6 @@ static int netvsc_destroy_buf(struct hv_device *device)
static int netvsc_init_buf(struct hv_device *device)
{
int ret = 0;
- unsigned long t;
struct netvsc_device *net_device;
struct nvsp_message *init_packet;
struct net_device *ndev;
@@ -305,9 +304,7 @@ static int netvsc_init_buf(struct hv_device *device)
goto cleanup;
}
- t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
- BUG_ON(t == 0);
-
+ wait_for_completion(&net_device->channel_init_wait);
/* Check the response */
if (init_packet->msg.v1_msg.
@@ -390,8 +387,7 @@ static int netvsc_init_buf(struct hv_device *device)
goto cleanup;
}
- t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
- BUG_ON(t == 0);
+ wait_for_completion(&net_device->channel_init_wait);
/* Check the response */
if (init_packet->msg.v1_msg.
@@ -445,7 +441,6 @@ static int negotiate_nvsp_ver(struct hv_device *device,
{
struct net_device *ndev = hv_get_drvdata(device);
int ret;
- unsigned long t;
memset(init_packet, 0, sizeof(struct nvsp_message));
init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
@@ -462,10 +457,7 @@ static int negotiate_nvsp_ver(struct hv_device *device,
if (ret != 0)
return ret;
- t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
-
- if (t == 0)
- return -ETIMEDOUT;
+ wait_for_completion(&net_device->channel_init_wait);
if (init_packet->msg.init_msg.init_complete.status !=
NVSP_STAT_SUCCESS)
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 979fa4455f72..8e830f741d47 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -466,7 +466,6 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
struct rndis_query_request *query;
struct rndis_query_complete *query_complete;
int ret = 0;
- unsigned long t;
if (!result)
return -EINVAL;
@@ -503,11 +502,7 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
if (ret != 0)
goto cleanup;
- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
- if (t == 0) {
- ret = -ETIMEDOUT;
- goto cleanup;
- }
+ wait_for_completion(&request->wait_event);
/* Copy the response back */
query_complete = &request->response_msg.msg.query_complete;
@@ -556,7 +551,6 @@ int rndis_filter_set_device_mac(struct net_device *ndev, char *mac)
u32 extlen = sizeof(struct rndis_config_parameter_info) +
2*NWADR_STRLEN + 4*ETH_ALEN;
int ret;
- unsigned long t;
request = get_rndis_request(rdev, RNDIS_MSG_SET,
RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
@@ -597,21 +591,13 @@ int rndis_filter_set_device_mac(struct net_device *ndev, char *mac)
if (ret != 0)
goto cleanup;
- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
- if (t == 0) {
- netdev_err(ndev, "timeout before we got a set response...\n");
- /*
- * can't put_rndis_request, since we may still receive a
- * send-completion.
- */
- return -EBUSY;
- } else {
- set_complete = &request->response_msg.msg.set_complete;
- if (set_complete->status != RNDIS_STATUS_SUCCESS) {
- netdev_err(ndev, "Fail to set MAC on host side:0x%x\n",
- set_complete->status);
- ret = -EINVAL;
- }
+ wait_for_completion(&request->wait_event);
+
+ set_complete = &request->response_msg.msg.set_complete;
+ if (set_complete->status != RNDIS_STATUS_SUCCESS) {
+ netdev_err(ndev, "Fail to set MAC on host side:0x%x\n",
+ set_complete->status);
+ ret = -EINVAL;
}
cleanup:
@@ -631,7 +617,6 @@ rndis_filter_set_offload_params(struct net_device *ndev,
struct rndis_set_complete *set_complete;
u32 extlen = sizeof(struct ndis_offload_params);
int ret;
- unsigned long t;
u32 vsp_version = nvdev->nvsp_version;
if (vsp_version <= NVSP_PROTOCOL_VERSION_4) {
@@ -665,20 +650,12 @@ rndis_filter_set_offload_params(struct net_device *ndev,
if (ret != 0)
goto cleanup;
- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
- if (t == 0) {
- netdev_err(ndev, "timeout before we got aOFFLOAD set response...\n");
- /* can't put_rndis_request, since we may still receive a
- * send-completion.
- */
- return -EBUSY;
- } else {
- set_complete = &request->response_msg.msg.set_complete;
- if (set_complete->status != RNDIS_STATUS_SUCCESS) {
- netdev_err(ndev, "Fail to set offload on host side:0x%x\n",
- set_complete->status);
- ret = -EINVAL;
- }
+ wait_for_completion(&request->wait_event);
+ set_complete = &request->response_msg.msg.set_complete;
+ if (set_complete->status != RNDIS_STATUS_SUCCESS) {
+ netdev_err(ndev, "Fail to set offload on host side:0x%x\n",
+ set_complete->status);
+ ret = -EINVAL;
}
cleanup:
@@ -706,7 +683,6 @@ static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
u32 *itab;
u8 *keyp;
int i, ret;
- unsigned long t;
request = get_rndis_request(
rdev, RNDIS_MSG_SET,
@@ -749,20 +725,12 @@ static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
if (ret != 0)
goto cleanup;
- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
- if (t == 0) {
- netdev_err(ndev, "timeout before we got a set response...\n");
- /* can't put_rndis_request, since we may still receive a
- * send-completion.
- */
- return -ETIMEDOUT;
- } else {
- set_complete = &request->response_msg.msg.set_complete;
- if (set_complete->status != RNDIS_STATUS_SUCCESS) {
- netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
- set_complete->status);
- ret = -EINVAL;
- }
+ wait_for_completion(&request->wait_event);
+ set_complete = &request->response_msg.msg.set_complete;
+ if (set_complete->status != RNDIS_STATUS_SUCCESS) {
+ netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
+ set_complete->status);
+ ret = -EINVAL;
}
cleanup:
@@ -791,8 +759,6 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
struct rndis_set_complete *set_complete;
u32 status;
int ret;
- unsigned long t;
- struct net_device *ndev = dev->ndev;
request = get_rndis_request(dev, RNDIS_MSG_SET,
RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
@@ -815,26 +781,14 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
if (ret != 0)
goto cleanup;
- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+ wait_for_completion(&request->wait_event);
- if (t == 0) {
- netdev_err(ndev,
- "timeout before we got a set response...\n");
- ret = -ETIMEDOUT;
- /*
- * We can't deallocate the request since we may still receive a
- * send completion for it.
- */
- goto exit;
- } else {
- set_complete = &request->response_msg.msg.set_complete;
- status = set_complete->status;
- }
+ set_complete = &request->response_msg.msg.set_complete;
+ status = set_complete->status;
cleanup:
if (request)
put_rndis_request(dev, request);
-exit:
return ret;
}
@@ -846,7 +800,6 @@ static int rndis_filter_init_device(struct rndis_device *dev)
struct rndis_initialize_complete *init_complete;
u32 status;
int ret;
- unsigned long t;
struct netvsc_device *nvdev = net_device_to_netvsc_device(dev->ndev);
request = get_rndis_request(dev, RNDIS_MSG_INIT,
@@ -870,12 +823,7 @@ static int rndis_filter_init_device(struct rndis_device *dev)
goto cleanup;
}
- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
-
- if (t == 0) {
- ret = -ETIMEDOUT;
- goto cleanup;
- }
+ wait_for_completion(&request->wait_event);
init_complete = &request->response_msg.msg.init_complete;
status = init_complete->status;
@@ -1008,7 +956,6 @@ int rndis_filter_device_add(struct hv_device *dev,
struct netvsc_device_info *device_info = additional_info;
struct ndis_offload_params offloads;
struct nvsp_message *init_packet;
- unsigned long t;
struct ndis_recv_scale_cap rsscap;
u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
u32 mtu, size;
@@ -1151,11 +1098,8 @@ int rndis_filter_device_add(struct hv_device *dev,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret)
goto out;
- t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
- if (t == 0) {
- ret = -ETIMEDOUT;
- goto out;
- }
+ wait_for_completion(&net_device->channel_init_wait);
+
if (init_packet->msg.v5_msg.subchn_comp.status !=
NVSP_STAT_SUCCESS) {
ret = -ENODEV;
@@ -1192,17 +1136,12 @@ void rndis_filter_device_remove(struct hv_device *dev)
{
struct netvsc_device *net_dev = hv_device_to_netvsc_device(dev);
struct rndis_device *rndis_dev = net_dev->extension;
- unsigned long t;
/* If not all subchannel offers are complete, wait for them until
* completion to avoid race.
*/
- while (net_dev->num_sc_offered > 0) {
- t = wait_for_completion_timeout(&net_dev->channel_init_wait,
- 10 * HZ);
- if (t == 0)
- WARN(1, "Netvsc: Waiting for sub-channel processing");
- }
+ if (net_dev->num_sc_offered > 0)
+ wait_for_completion(&net_dev->channel_init_wait);
/* Halt and release the rndis device */
rndis_filter_halt_device(rndis_dev);
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 1c4d395fbd49..18b4e8c7f68a 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -80,13 +80,6 @@ static void ipvlan_port_destroy(struct net_device *dev)
kfree_rcu(port, rcu);
}
-/* ipvlan network devices have devices nesting below it and are a special
- * "super class" of normal network devices; split their locks off into a
- * separate class since they always nest.
- */
-static struct lock_class_key ipvlan_netdev_xmit_lock_key;
-static struct lock_class_key ipvlan_netdev_addr_lock_key;
-
#define IPVLAN_FEATURES \
(NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \
@@ -96,19 +89,6 @@ static struct lock_class_key ipvlan_netdev_addr_lock_key;
#define IPVLAN_STATE_MASK \
((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
-static void ipvlan_set_lockdep_class_one(struct net_device *dev,
- struct netdev_queue *txq,
- void *_unused)
-{
- lockdep_set_class(&txq->_xmit_lock, &ipvlan_netdev_xmit_lock_key);
-}
-
-static void ipvlan_set_lockdep_class(struct net_device *dev)
-{
- lockdep_set_class(&dev->addr_list_lock, &ipvlan_netdev_addr_lock_key);
- netdev_for_each_tx_queue(dev, ipvlan_set_lockdep_class_one, NULL);
-}
-
static int ipvlan_init(struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
@@ -123,7 +103,7 @@ static int ipvlan_init(struct net_device *dev)
dev->gso_max_segs = phy_dev->gso_max_segs;
dev->hard_header_len = phy_dev->hard_header_len;
- ipvlan_set_lockdep_class(dev);
+ netdev_lockdep_set_classes(dev);
ipvlan->pcpu_stats = alloc_percpu(struct ipvl_pcpu_stats);
if (!ipvlan->pcpu_stats)
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 47ee2c840b55..0e7eff7f1cd2 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -605,12 +605,41 @@ static void macsec_encrypt_done(struct crypto_async_request *base, int err)
dev_put(dev);
}
+static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
+ unsigned char **iv,
+ struct scatterlist **sg)
+{
+ size_t size, iv_offset, sg_offset;
+ struct aead_request *req;
+ void *tmp;
+
+ size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm);
+ iv_offset = size;
+ size += GCM_AES_IV_LEN;
+
+ size = ALIGN(size, __alignof__(struct scatterlist));
+ sg_offset = size;
+ size += sizeof(struct scatterlist) * (MAX_SKB_FRAGS + 1);
+
+ tmp = kmalloc(size, GFP_ATOMIC);
+ if (!tmp)
+ return NULL;
+
+ *iv = (unsigned char *)(tmp + iv_offset);
+ *sg = (struct scatterlist *)(tmp + sg_offset);
+ req = tmp;
+
+ aead_request_set_tfm(req, tfm);
+
+ return req;
+}
+
static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
struct net_device *dev)
{
int ret;
- struct scatterlist sg[MAX_SKB_FRAGS + 1];
- unsigned char iv[GCM_AES_IV_LEN];
+ struct scatterlist *sg;
+ unsigned char *iv;
struct ethhdr *eth;
struct macsec_eth_header *hh;
size_t unprotected_len;
@@ -668,8 +697,6 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
macsec_fill_sectag(hh, secy, pn);
macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
- macsec_fill_iv(iv, secy->sci, pn);
-
skb_put(skb, secy->icv_len);
if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) {
@@ -684,13 +711,15 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
return ERR_PTR(-EINVAL);
}
- req = aead_request_alloc(tx_sa->key.tfm, GFP_ATOMIC);
+ req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg);
if (!req) {
macsec_txsa_put(tx_sa);
kfree_skb(skb);
return ERR_PTR(-ENOMEM);
}
+ macsec_fill_iv(iv, secy->sci, pn);
+
sg_init_table(sg, MAX_SKB_FRAGS + 1);
skb_to_sgvec(skb, sg, 0, skb->len);
@@ -861,7 +890,6 @@ static void macsec_decrypt_done(struct crypto_async_request *base, int err)
out:
macsec_rxsa_put(rx_sa);
dev_put(dev);
- return;
}
static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
@@ -871,8 +899,8 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
struct macsec_secy *secy)
{
int ret;
- struct scatterlist sg[MAX_SKB_FRAGS + 1];
- unsigned char iv[GCM_AES_IV_LEN];
+ struct scatterlist *sg;
+ unsigned char *iv;
struct aead_request *req;
struct macsec_eth_header *hdr;
u16 icv_len = secy->icv_len;
@@ -882,7 +910,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
if (!skb)
return ERR_PTR(-ENOMEM);
- req = aead_request_alloc(rx_sa->key.tfm, GFP_ATOMIC);
+ req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg);
if (!req) {
kfree_skb(skb);
return ERR_PTR(-ENOMEM);
@@ -1234,7 +1262,7 @@ static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
struct crypto_aead *tfm;
int ret;
- tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
+ tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
if (!tfm || IS_ERR(tfm))
return NULL;
@@ -3361,6 +3389,7 @@ static void __exit macsec_exit(void)
genl_unregister_family(&macsec_fam);
rtnl_link_unregister(&macsec_link_ops);
unregister_netdevice_notifier(&macsec_notifier);
+ rcu_barrier();
}
module_init(macsec_init);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 0c65bd914aed..cd9b53834bf6 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -788,7 +788,6 @@ static int macvlan_change_mtu(struct net_device *dev, int new_mtu)
* "super class" of normal network devices; split their locks off into a
* separate class since they always nest.
*/
-static struct lock_class_key macvlan_netdev_xmit_lock_key;
static struct lock_class_key macvlan_netdev_addr_lock_key;
#define ALWAYS_ON_FEATURES \
@@ -809,20 +808,12 @@ static int macvlan_get_nest_level(struct net_device *dev)
return ((struct macvlan_dev *)netdev_priv(dev))->nest_level;
}
-static void macvlan_set_lockdep_class_one(struct net_device *dev,
- struct netdev_queue *txq,
- void *_unused)
-{
- lockdep_set_class(&txq->_xmit_lock,
- &macvlan_netdev_xmit_lock_key);
-}
-
static void macvlan_set_lockdep_class(struct net_device *dev)
{
+ netdev_lockdep_set_classes(dev);
lockdep_set_class_and_subclass(&dev->addr_list_lock,
&macvlan_netdev_addr_lock_key,
macvlan_get_nest_level(dev));
- netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL);
}
static int macvlan_init(struct net_device *dev)
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index bd6720962b1f..95a13321f688 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -627,93 +627,6 @@ static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad,
return skb;
}
-/*
- * macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should
- * be shared with the tun/tap driver.
- */
-static int macvtap_skb_from_vnet_hdr(struct macvtap_queue *q,
- struct sk_buff *skb,
- struct virtio_net_hdr *vnet_hdr)
-{
- unsigned short gso_type = 0;
- if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
- switch (vnet_hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
- case VIRTIO_NET_HDR_GSO_TCPV4:
- gso_type = SKB_GSO_TCPV4;
- break;
- case VIRTIO_NET_HDR_GSO_TCPV6:
- gso_type = SKB_GSO_TCPV6;
- break;
- case VIRTIO_NET_HDR_GSO_UDP:
- gso_type = SKB_GSO_UDP;
- break;
- default:
- return -EINVAL;
- }
-
- if (vnet_hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
- gso_type |= SKB_GSO_TCP_ECN;
-
- if (vnet_hdr->gso_size == 0)
- return -EINVAL;
- }
-
- if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
- if (!skb_partial_csum_set(skb, macvtap16_to_cpu(q, vnet_hdr->csum_start),
- macvtap16_to_cpu(q, vnet_hdr->csum_offset)))
- return -EINVAL;
- }
-
- if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
- skb_shinfo(skb)->gso_size = macvtap16_to_cpu(q, vnet_hdr->gso_size);
- skb_shinfo(skb)->gso_type = gso_type;
-
- /* Header must be checked, and gso_segs computed. */
- skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
- skb_shinfo(skb)->gso_segs = 0;
- }
- return 0;
-}
-
-static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q,
- const struct sk_buff *skb,
- struct virtio_net_hdr *vnet_hdr)
-{
- memset(vnet_hdr, 0, sizeof(*vnet_hdr));
-
- if (skb_is_gso(skb)) {
- struct skb_shared_info *sinfo = skb_shinfo(skb);
-
- /* This is a hint as to how much should be linear. */
- vnet_hdr->hdr_len = cpu_to_macvtap16(q, skb_headlen(skb));
- vnet_hdr->gso_size = cpu_to_macvtap16(q, sinfo->gso_size);
- if (sinfo->gso_type & SKB_GSO_TCPV4)
- vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
- else if (sinfo->gso_type & SKB_GSO_TCPV6)
- vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
- else if (sinfo->gso_type & SKB_GSO_UDP)
- vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
- else
- BUG();
- if (sinfo->gso_type & SKB_GSO_TCP_ECN)
- vnet_hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
- } else
- vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
-
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- if (skb_vlan_tag_present(skb))
- vnet_hdr->csum_start = cpu_to_macvtap16(q,
- skb_checksum_start_offset(skb) + VLAN_HLEN);
- else
- vnet_hdr->csum_start = cpu_to_macvtap16(q,
- skb_checksum_start_offset(skb));
- vnet_hdr->csum_offset = cpu_to_macvtap16(q, skb->csum_offset);
- } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
- vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
- } /* else everything is zero */
-}
-
/* Neighbour code has some assumptions on HH_DATA_MOD alignment */
#define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN)
@@ -812,7 +725,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
skb->protocol = eth_hdr(skb)->h_proto;
if (vnet_hdr_len) {
- err = macvtap_skb_from_vnet_hdr(q, skb, &vnet_hdr);
+ err = virtio_net_hdr_to_skb(skb, &vnet_hdr,
+ macvtap_is_little_endian(q));
if (err)
goto err_kfree;
}
@@ -880,7 +794,10 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
if (iov_iter_count(iter) < vnet_hdr_len)
return -EINVAL;
- macvtap_skb_to_vnet_hdr(q, skb, &vnet_hdr);
+ ret = virtio_net_hdr_from_skb(skb, &vnet_hdr,
+ macvtap_is_little_endian(q));
+ if (ret)
+ BUG();
if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
sizeof(vnet_hdr))
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 6dad9a9c356c..f96829415ce6 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -12,6 +12,9 @@ menuconfig PHYLIB
if PHYLIB
+config SWPHY
+ bool
+
comment "MII PHY device drivers"
config AQUANTIA_PHY
@@ -159,6 +162,7 @@ config MICROCHIP_PHY
config FIXED_PHY
tristate "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
depends on PHYLIB
+ select SWPHY
---help---
Adds the platform "fixed" MDIO Bus to cover the boards that use
PHYs that are not connected to the real MDIO bus.
@@ -254,6 +258,17 @@ config MDIO_BUS_MUX_MMIOREG
Currently, only 8-bit registers are supported.
+config MDIO_BUS_MUX_BCM_IPROC
+ tristate "Support for iProc based MDIO bus multiplexers"
+ depends on OF && OF_MDIO && (ARCH_BCM_IPROC || COMPILE_TEST)
+ select MDIO_BUS_MUX
+ default ARCH_BCM_IPROC
+ help
+ This module provides a driver for MDIO bus multiplexers found in
+ iProc based Broadcom SoCs. This multiplexer connects one of several
+ child MDIO bus to a parent bus. Buses could be internal as well as
+ external and selection logic lies inside the same multiplexer.
+
config MDIO_BCM_UNIMAC
tristate "Broadcom UniMAC MDIO bus controller"
depends on HAS_IOMEM
@@ -271,6 +286,14 @@ config MDIO_BCM_IPROC
This module provides a driver for the MDIO busses found in the
Broadcom iProc SoC's.
+config INTEL_XWAY_PHY
+ tristate "Driver for Intel XWAY PHYs"
+ ---help---
+ Supports the Intel XWAY (former Lantiq) 11G and 22E PHYs.
+ These PHYs are marked as standalone chips under the names
+ PEF 7061, PEF 7071 and PEF 7072 or integrated into the Intel
+ SoCs xRX200, xRX300, xRX330, xRX350 and xRX550.
+
endif # PHYLIB
config MICREL_KS8995MA
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index fcdbb9299fab..7158274327d0 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -1,6 +1,7 @@
# Makefile for Linux PHY drivers
-libphy-objs := phy.o phy_device.o mdio_bus.o mdio_device.o
+libphy-y := phy.o phy_device.o mdio_bus.o mdio_device.o
+libphy-$(CONFIG_SWPHY) += swphy.o
obj-$(CONFIG_PHYLIB) += libphy.o
obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o
@@ -39,8 +40,10 @@ obj-$(CONFIG_AMD_PHY) += amd.o
obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o
obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
+obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) += mdio-mux-bcm-iproc.o
obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
obj-$(CONFIG_MDIO_BCM_UNIMAC) += mdio-bcm-unimac.o
obj-$(CONFIG_MICROCHIP_PHY) += microchip.o
obj-$(CONFIG_MDIO_BCM_IPROC) += mdio-bcm-iproc.o
+obj-$(CONFIG_INTEL_XWAY_PHY) += intel-xway.o
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index 2d2e4339f0df..c649c101bbab 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -23,8 +23,10 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/gpio.h>
+#include <linux/seqlock.h>
+#include <linux/idr.h>
-#define MII_REGS_NUM 29
+#include "swphy.h"
struct fixed_mdio_bus {
struct mii_bus *mii_bus;
@@ -33,8 +35,8 @@ struct fixed_mdio_bus {
struct fixed_phy {
int addr;
- u16 regs[MII_REGS_NUM];
struct phy_device *phydev;
+ seqcount_t seqcount;
struct fixed_phy_status status;
int (*link_update)(struct net_device *, struct fixed_phy_status *);
struct list_head node;
@@ -46,103 +48,10 @@ static struct fixed_mdio_bus platform_fmb = {
.phys = LIST_HEAD_INIT(platform_fmb.phys),
};
-static int fixed_phy_update_regs(struct fixed_phy *fp)
+static void fixed_phy_update(struct fixed_phy *fp)
{
- u16 bmsr = BMSR_ANEGCAPABLE;
- u16 bmcr = 0;
- u16 lpagb = 0;
- u16 lpa = 0;
-
if (gpio_is_valid(fp->link_gpio))
fp->status.link = !!gpio_get_value_cansleep(fp->link_gpio);
-
- if (fp->status.duplex) {
- switch (fp->status.speed) {
- case 1000:
- bmsr |= BMSR_ESTATEN;
- break;
- case 100:
- bmsr |= BMSR_100FULL;
- break;
- case 10:
- bmsr |= BMSR_10FULL;
- break;
- default:
- break;
- }
- } else {
- switch (fp->status.speed) {
- case 1000:
- bmsr |= BMSR_ESTATEN;
- break;
- case 100:
- bmsr |= BMSR_100HALF;
- break;
- case 10:
- bmsr |= BMSR_10HALF;
- break;
- default:
- break;
- }
- }
-
- if (fp->status.link) {
- bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE;
-
- if (fp->status.duplex) {
- bmcr |= BMCR_FULLDPLX;
-
- switch (fp->status.speed) {
- case 1000:
- bmcr |= BMCR_SPEED1000;
- lpagb |= LPA_1000FULL;
- break;
- case 100:
- bmcr |= BMCR_SPEED100;
- lpa |= LPA_100FULL;
- break;
- case 10:
- lpa |= LPA_10FULL;
- break;
- default:
- pr_warn("fixed phy: unknown speed\n");
- return -EINVAL;
- }
- } else {
- switch (fp->status.speed) {
- case 1000:
- bmcr |= BMCR_SPEED1000;
- lpagb |= LPA_1000HALF;
- break;
- case 100:
- bmcr |= BMCR_SPEED100;
- lpa |= LPA_100HALF;
- break;
- case 10:
- lpa |= LPA_10HALF;
- break;
- default:
- pr_warn("fixed phy: unknown speed\n");
- return -EINVAL;
- }
- }
-
- if (fp->status.pause)
- lpa |= LPA_PAUSE_CAP;
-
- if (fp->status.asym_pause)
- lpa |= LPA_PAUSE_ASYM;
- }
-
- fp->regs[MII_PHYSID1] = 0;
- fp->regs[MII_PHYSID2] = 0;
-
- fp->regs[MII_BMSR] = bmsr;
- fp->regs[MII_BMCR] = bmcr;
- fp->regs[MII_LPA] = lpa;
- fp->regs[MII_STAT1000] = lpagb;
-
- return 0;
}
static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num)
@@ -150,29 +59,23 @@ static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num)
struct fixed_mdio_bus *fmb = bus->priv;
struct fixed_phy *fp;
- if (reg_num >= MII_REGS_NUM)
- return -1;
-
- /* We do not support emulating Clause 45 over Clause 22 register reads
- * return an error instead of bogus data.
- */
- switch (reg_num) {
- case MII_MMD_CTRL:
- case MII_MMD_DATA:
- return -1;
- default:
- break;
- }
-
list_for_each_entry(fp, &fmb->phys, node) {
if (fp->addr == phy_addr) {
- /* Issue callback if user registered it. */
- if (fp->link_update) {
- fp->link_update(fp->phydev->attached_dev,
- &fp->status);
- fixed_phy_update_regs(fp);
- }
- return fp->regs[reg_num];
+ struct fixed_phy_status state;
+ int s;
+
+ do {
+ s = read_seqcount_begin(&fp->seqcount);
+ /* Issue callback if user registered it. */
+ if (fp->link_update) {
+ fp->link_update(fp->phydev->attached_dev,
+ &fp->status);
+ fixed_phy_update(fp);
+ }
+ state = fp->status;
+ } while (read_seqcount_retry(&fp->seqcount, s));
+
+ return swphy_read_reg(reg_num, &state);
}
}
@@ -224,6 +127,7 @@ int fixed_phy_update_state(struct phy_device *phydev,
list_for_each_entry(fp, &fmb->phys, node) {
if (fp->addr == phydev->mdio.addr) {
+ write_seqcount_begin(&fp->seqcount);
#define _UPD(x) if (changed->x) \
fp->status.x = status->x
_UPD(link);
@@ -232,7 +136,8 @@ int fixed_phy_update_state(struct phy_device *phydev,
_UPD(pause);
_UPD(asym_pause);
#undef _UPD
- fixed_phy_update_regs(fp);
+ fixed_phy_update(fp);
+ write_seqcount_end(&fp->seqcount);
return 0;
}
}
@@ -249,11 +154,15 @@ int fixed_phy_add(unsigned int irq, int phy_addr,
struct fixed_mdio_bus *fmb = &platform_fmb;
struct fixed_phy *fp;
+ ret = swphy_validate_state(status);
+ if (ret < 0)
+ return ret;
+
fp = kzalloc(sizeof(*fp), GFP_KERNEL);
if (!fp)
return -ENOMEM;
- memset(fp->regs, 0xFF, sizeof(fp->regs[0]) * MII_REGS_NUM);
+ seqcount_init(&fp->seqcount);
if (irq != PHY_POLL)
fmb->mii_bus->irq[phy_addr] = irq;
@@ -269,23 +178,20 @@ int fixed_phy_add(unsigned int irq, int phy_addr,
goto err_regs;
}
- ret = fixed_phy_update_regs(fp);
- if (ret)
- goto err_gpio;
+ fixed_phy_update(fp);
list_add_tail(&fp->node, &fmb->phys);
return 0;
-err_gpio:
- if (gpio_is_valid(fp->link_gpio))
- gpio_free(fp->link_gpio);
err_regs:
kfree(fp);
return ret;
}
EXPORT_SYMBOL_GPL(fixed_phy_add);
+static DEFINE_IDA(phy_fixed_ida);
+
static void fixed_phy_del(int phy_addr)
{
struct fixed_mdio_bus *fmb = &platform_fmb;
@@ -297,14 +203,12 @@ static void fixed_phy_del(int phy_addr)
if (gpio_is_valid(fp->link_gpio))
gpio_free(fp->link_gpio);
kfree(fp);
+ ida_simple_remove(&phy_fixed_ida, phy_addr);
return;
}
}
}
-static int phy_fixed_addr;
-static DEFINE_SPINLOCK(phy_fixed_addr_lock);
-
struct phy_device *fixed_phy_register(unsigned int irq,
struct fixed_phy_status *status,
int link_gpio,
@@ -319,17 +223,15 @@ struct phy_device *fixed_phy_register(unsigned int irq,
return ERR_PTR(-EPROBE_DEFER);
/* Get the next available PHY address, up to PHY_MAX_ADDR */
- spin_lock(&phy_fixed_addr_lock);
- if (phy_fixed_addr == PHY_MAX_ADDR) {
- spin_unlock(&phy_fixed_addr_lock);
- return ERR_PTR(-ENOSPC);
- }
- phy_addr = phy_fixed_addr++;
- spin_unlock(&phy_fixed_addr_lock);
+ phy_addr = ida_simple_get(&phy_fixed_ida, 0, PHY_MAX_ADDR, GFP_KERNEL);
+ if (phy_addr < 0)
+ return ERR_PTR(phy_addr);
ret = fixed_phy_add(irq, phy_addr, status, link_gpio);
- if (ret < 0)
+ if (ret < 0) {
+ ida_simple_remove(&phy_fixed_ida, phy_addr);
return ERR_PTR(ret);
+ }
phy = get_phy_device(fmb->mii_bus, phy_addr, false);
if (IS_ERR(phy)) {
@@ -434,6 +336,7 @@ static void __exit fixed_mdio_bus_exit(void)
list_del(&fp->node);
kfree(fp);
}
+ ida_destroy(&phy_fixed_ida);
}
module_exit(fixed_mdio_bus_exit);
diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c
new file mode 100644
index 000000000000..c300ab5587b8
--- /dev/null
+++ b/drivers/net/phy/intel-xway.c
@@ -0,0 +1,376 @@
+/*
+ * Copyright (C) 2012 Daniel Schwierzeck <daniel.schwierzeck@googlemail.com>
+ * Copyright (C) 2016 Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mdio.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+
+#define XWAY_MDIO_IMASK 0x19 /* interrupt mask */
+#define XWAY_MDIO_ISTAT 0x1A /* interrupt status */
+
+#define XWAY_MDIO_INIT_WOL BIT(15) /* Wake-On-LAN */
+#define XWAY_MDIO_INIT_MSRE BIT(14)
+#define XWAY_MDIO_INIT_NPRX BIT(13)
+#define XWAY_MDIO_INIT_NPTX BIT(12)
+#define XWAY_MDIO_INIT_ANE BIT(11) /* Auto-Neg error */
+#define XWAY_MDIO_INIT_ANC BIT(10) /* Auto-Neg complete */
+#define XWAY_MDIO_INIT_ADSC BIT(5) /* Link auto-downspeed detect */
+#define XWAY_MDIO_INIT_MPIPC BIT(4)
+#define XWAY_MDIO_INIT_MDIXC BIT(3)
+#define XWAY_MDIO_INIT_DXMC BIT(2) /* Duplex mode change */
+#define XWAY_MDIO_INIT_LSPC BIT(1) /* Link speed change */
+#define XWAY_MDIO_INIT_LSTC BIT(0) /* Link state change */
+#define XWAY_MDIO_INIT_MASK (XWAY_MDIO_INIT_LSTC | \
+ XWAY_MDIO_INIT_ADSC)
+
+#define ADVERTISED_MPD BIT(10) /* Multi-port device */
+
+/* LED Configuration */
+#define XWAY_MMD_LEDCH 0x01E0
+/* Inverse of SCAN Function */
+#define XWAY_MMD_LEDCH_NACS_NONE 0x0000
+#define XWAY_MMD_LEDCH_NACS_LINK 0x0001
+#define XWAY_MMD_LEDCH_NACS_PDOWN 0x0002
+#define XWAY_MMD_LEDCH_NACS_EEE 0x0003
+#define XWAY_MMD_LEDCH_NACS_ANEG 0x0004
+#define XWAY_MMD_LEDCH_NACS_ABIST 0x0005
+#define XWAY_MMD_LEDCH_NACS_CDIAG 0x0006
+#define XWAY_MMD_LEDCH_NACS_TEST 0x0007
+/* Slow Blink Frequency */
+#define XWAY_MMD_LEDCH_SBF_F02HZ 0x0000
+#define XWAY_MMD_LEDCH_SBF_F04HZ 0x0010
+#define XWAY_MMD_LEDCH_SBF_F08HZ 0x0020
+#define XWAY_MMD_LEDCH_SBF_F16HZ 0x0030
+/* Fast Blink Frequency */
+#define XWAY_MMD_LEDCH_FBF_F02HZ 0x0000
+#define XWAY_MMD_LEDCH_FBF_F04HZ 0x0040
+#define XWAY_MMD_LEDCH_FBF_F08HZ 0x0080
+#define XWAY_MMD_LEDCH_FBF_F16HZ 0x00C0
+/* LED Configuration */
+#define XWAY_MMD_LEDCL 0x01E1
+/* Complex Blinking Configuration */
+#define XWAY_MMD_LEDCH_CBLINK_NONE 0x0000
+#define XWAY_MMD_LEDCH_CBLINK_LINK 0x0001
+#define XWAY_MMD_LEDCH_CBLINK_PDOWN 0x0002
+#define XWAY_MMD_LEDCH_CBLINK_EEE 0x0003
+#define XWAY_MMD_LEDCH_CBLINK_ANEG 0x0004
+#define XWAY_MMD_LEDCH_CBLINK_ABIST 0x0005
+#define XWAY_MMD_LEDCH_CBLINK_CDIAG 0x0006
+#define XWAY_MMD_LEDCH_CBLINK_TEST 0x0007
+/* Complex SCAN Configuration */
+#define XWAY_MMD_LEDCH_SCAN_NONE 0x0000
+#define XWAY_MMD_LEDCH_SCAN_LINK 0x0010
+#define XWAY_MMD_LEDCH_SCAN_PDOWN 0x0020
+#define XWAY_MMD_LEDCH_SCAN_EEE 0x0030
+#define XWAY_MMD_LEDCH_SCAN_ANEG 0x0040
+#define XWAY_MMD_LEDCH_SCAN_ABIST 0x0050
+#define XWAY_MMD_LEDCH_SCAN_CDIAG 0x0060
+#define XWAY_MMD_LEDCH_SCAN_TEST 0x0070
+/* Configuration for LED Pin x */
+#define XWAY_MMD_LED0H 0x01E2
+/* Fast Blinking Configuration */
+#define XWAY_MMD_LEDxH_BLINKF_MASK 0x000F
+#define XWAY_MMD_LEDxH_BLINKF_NONE 0x0000
+#define XWAY_MMD_LEDxH_BLINKF_LINK10 0x0001
+#define XWAY_MMD_LEDxH_BLINKF_LINK100 0x0002
+#define XWAY_MMD_LEDxH_BLINKF_LINK10X 0x0003
+#define XWAY_MMD_LEDxH_BLINKF_LINK1000 0x0004
+#define XWAY_MMD_LEDxH_BLINKF_LINK10_0 0x0005
+#define XWAY_MMD_LEDxH_BLINKF_LINK100X 0x0006
+#define XWAY_MMD_LEDxH_BLINKF_LINK10XX 0x0007
+#define XWAY_MMD_LEDxH_BLINKF_PDOWN 0x0008
+#define XWAY_MMD_LEDxH_BLINKF_EEE 0x0009
+#define XWAY_MMD_LEDxH_BLINKF_ANEG 0x000A
+#define XWAY_MMD_LEDxH_BLINKF_ABIST 0x000B
+#define XWAY_MMD_LEDxH_BLINKF_CDIAG 0x000C
+/* Constant On Configuration */
+#define XWAY_MMD_LEDxH_CON_MASK 0x00F0
+#define XWAY_MMD_LEDxH_CON_NONE 0x0000
+#define XWAY_MMD_LEDxH_CON_LINK10 0x0010
+#define XWAY_MMD_LEDxH_CON_LINK100 0x0020
+#define XWAY_MMD_LEDxH_CON_LINK10X 0x0030
+#define XWAY_MMD_LEDxH_CON_LINK1000 0x0040
+#define XWAY_MMD_LEDxH_CON_LINK10_0 0x0050
+#define XWAY_MMD_LEDxH_CON_LINK100X 0x0060
+#define XWAY_MMD_LEDxH_CON_LINK10XX 0x0070
+#define XWAY_MMD_LEDxH_CON_PDOWN 0x0080
+#define XWAY_MMD_LEDxH_CON_EEE 0x0090
+#define XWAY_MMD_LEDxH_CON_ANEG 0x00A0
+#define XWAY_MMD_LEDxH_CON_ABIST 0x00B0
+#define XWAY_MMD_LEDxH_CON_CDIAG 0x00C0
+#define XWAY_MMD_LEDxH_CON_COPPER 0x00D0
+#define XWAY_MMD_LEDxH_CON_FIBER 0x00E0
+/* Configuration for LED Pin x */
+#define XWAY_MMD_LED0L 0x01E3
+/* Pulsing Configuration */
+#define XWAY_MMD_LEDxL_PULSE_MASK 0x000F
+#define XWAY_MMD_LEDxL_PULSE_NONE 0x0000
+#define XWAY_MMD_LEDxL_PULSE_TXACT 0x0001
+#define XWAY_MMD_LEDxL_PULSE_RXACT 0x0002
+#define XWAY_MMD_LEDxL_PULSE_COL 0x0004
+/* Slow Blinking Configuration */
+#define XWAY_MMD_LEDxL_BLINKS_MASK 0x00F0
+#define XWAY_MMD_LEDxL_BLINKS_NONE 0x0000
+#define XWAY_MMD_LEDxL_BLINKS_LINK10 0x0010
+#define XWAY_MMD_LEDxL_BLINKS_LINK100 0x0020
+#define XWAY_MMD_LEDxL_BLINKS_LINK10X 0x0030
+#define XWAY_MMD_LEDxL_BLINKS_LINK1000 0x0040
+#define XWAY_MMD_LEDxL_BLINKS_LINK10_0 0x0050
+#define XWAY_MMD_LEDxL_BLINKS_LINK100X 0x0060
+#define XWAY_MMD_LEDxL_BLINKS_LINK10XX 0x0070
+#define XWAY_MMD_LEDxL_BLINKS_PDOWN 0x0080
+#define XWAY_MMD_LEDxL_BLINKS_EEE 0x0090
+#define XWAY_MMD_LEDxL_BLINKS_ANEG 0x00A0
+#define XWAY_MMD_LEDxL_BLINKS_ABIST 0x00B0
+#define XWAY_MMD_LEDxL_BLINKS_CDIAG 0x00C0
+#define XWAY_MMD_LED1H 0x01E4
+#define XWAY_MMD_LED1L 0x01E5
+#define XWAY_MMD_LED2H 0x01E6
+#define XWAY_MMD_LED2L 0x01E7
+#define XWAY_MMD_LED3H 0x01E8
+#define XWAY_MMD_LED3L 0x01E9
+
+#define PHY_ID_PHY11G_1_3 0x030260D1
+#define PHY_ID_PHY22F_1_3 0x030260E1
+#define PHY_ID_PHY11G_1_4 0xD565A400
+#define PHY_ID_PHY22F_1_4 0xD565A410
+#define PHY_ID_PHY11G_1_5 0xD565A401
+#define PHY_ID_PHY22F_1_5 0xD565A411
+#define PHY_ID_PHY11G_VR9 0xD565A409
+#define PHY_ID_PHY22F_VR9 0xD565A419
+
+static int xway_gphy_config_init(struct phy_device *phydev)
+{
+ int err;
+ u32 ledxh;
+ u32 ledxl;
+
+ /* Mask all interrupts */
+ err = phy_write(phydev, XWAY_MDIO_IMASK, 0);
+ if (err)
+ return err;
+
+ /* Clear all pending interrupts */
+ phy_read(phydev, XWAY_MDIO_ISTAT);
+
+ phy_write_mmd_indirect(phydev, XWAY_MMD_LEDCH, MDIO_MMD_VEND2,
+ XWAY_MMD_LEDCH_NACS_NONE |
+ XWAY_MMD_LEDCH_SBF_F02HZ |
+ XWAY_MMD_LEDCH_FBF_F16HZ);
+ phy_write_mmd_indirect(phydev, XWAY_MMD_LEDCL, MDIO_MMD_VEND2,
+ XWAY_MMD_LEDCH_CBLINK_NONE |
+ XWAY_MMD_LEDCH_SCAN_NONE);
+
+ /**
+ * In most cases only one LED is connected to this phy, so
+ * configure them all to constant on and pulse mode. LED3 is
+ * only available in some packages, leave it in its reset
+ * configuration.
+ */
+ ledxh = XWAY_MMD_LEDxH_BLINKF_NONE | XWAY_MMD_LEDxH_CON_LINK10XX;
+ ledxl = XWAY_MMD_LEDxL_PULSE_TXACT | XWAY_MMD_LEDxL_PULSE_RXACT |
+ XWAY_MMD_LEDxL_BLINKS_NONE;
+ phy_write_mmd_indirect(phydev, XWAY_MMD_LED0H, MDIO_MMD_VEND2, ledxh);
+ phy_write_mmd_indirect(phydev, XWAY_MMD_LED0L, MDIO_MMD_VEND2, ledxl);
+ phy_write_mmd_indirect(phydev, XWAY_MMD_LED1H, MDIO_MMD_VEND2, ledxh);
+ phy_write_mmd_indirect(phydev, XWAY_MMD_LED1L, MDIO_MMD_VEND2, ledxl);
+ phy_write_mmd_indirect(phydev, XWAY_MMD_LED2H, MDIO_MMD_VEND2, ledxh);
+ phy_write_mmd_indirect(phydev, XWAY_MMD_LED2L, MDIO_MMD_VEND2, ledxl);
+
+ return 0;
+}
+
+static int xway_gphy14_config_aneg(struct phy_device *phydev)
+{
+ int reg, err;
+
+ /* Advertise as multi-port device, see IEEE802.3-2002 40.5.1.1 */
+ /* This is a workaround for an errata in rev < 1.5 devices */
+ reg = phy_read(phydev, MII_CTRL1000);
+ reg |= ADVERTISED_MPD;
+ err = phy_write(phydev, MII_CTRL1000, reg);
+ if (err)
+ return err;
+
+ return genphy_config_aneg(phydev);
+}
+
+static int xway_gphy_ack_interrupt(struct phy_device *phydev)
+{
+ int reg;
+
+ reg = phy_read(phydev, XWAY_MDIO_ISTAT);
+ return (reg < 0) ? reg : 0;
+}
+
+static int xway_gphy_did_interrupt(struct phy_device *phydev)
+{
+ int reg;
+
+ reg = phy_read(phydev, XWAY_MDIO_ISTAT);
+ return reg & XWAY_MDIO_INIT_MASK;
+}
+
+static int xway_gphy_config_intr(struct phy_device *phydev)
+{
+ u16 mask = 0;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ mask = XWAY_MDIO_INIT_MASK;
+
+ return phy_write(phydev, XWAY_MDIO_IMASK, mask);
+}
+
+static struct phy_driver xway_gphy[] = {
+ {
+ .phy_id = PHY_ID_PHY11G_1_3,
+ .phy_id_mask = 0xffffffff,
+ .name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.3",
+ .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = xway_gphy_config_init,
+ .config_aneg = xway_gphy14_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = xway_gphy_ack_interrupt,
+ .did_interrupt = xway_gphy_did_interrupt,
+ .config_intr = xway_gphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ }, {
+ .phy_id = PHY_ID_PHY22F_1_3,
+ .phy_id_mask = 0xffffffff,
+ .name = "Intel XWAY PHY22F (PEF 7061) v1.3",
+ .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = xway_gphy_config_init,
+ .config_aneg = xway_gphy14_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = xway_gphy_ack_interrupt,
+ .did_interrupt = xway_gphy_did_interrupt,
+ .config_intr = xway_gphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ }, {
+ .phy_id = PHY_ID_PHY11G_1_4,
+ .phy_id_mask = 0xffffffff,
+ .name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.4",
+ .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = xway_gphy_config_init,
+ .config_aneg = xway_gphy14_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = xway_gphy_ack_interrupt,
+ .did_interrupt = xway_gphy_did_interrupt,
+ .config_intr = xway_gphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ }, {
+ .phy_id = PHY_ID_PHY22F_1_4,
+ .phy_id_mask = 0xffffffff,
+ .name = "Intel XWAY PHY22F (PEF 7061) v1.4",
+ .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = xway_gphy_config_init,
+ .config_aneg = xway_gphy14_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = xway_gphy_ack_interrupt,
+ .did_interrupt = xway_gphy_did_interrupt,
+ .config_intr = xway_gphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ }, {
+ .phy_id = PHY_ID_PHY11G_1_5,
+ .phy_id_mask = 0xffffffff,
+ .name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.5 / v1.6",
+ .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = xway_gphy_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = xway_gphy_ack_interrupt,
+ .did_interrupt = xway_gphy_did_interrupt,
+ .config_intr = xway_gphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ }, {
+ .phy_id = PHY_ID_PHY22F_1_5,
+ .phy_id_mask = 0xffffffff,
+ .name = "Intel XWAY PHY22F (PEF 7061) v1.5 / v1.6",
+ .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = xway_gphy_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = xway_gphy_ack_interrupt,
+ .did_interrupt = xway_gphy_did_interrupt,
+ .config_intr = xway_gphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ }, {
+ .phy_id = PHY_ID_PHY11G_VR9,
+ .phy_id_mask = 0xffffffff,
+ .name = "Intel XWAY PHY11G (xRX integrated)",
+ .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = xway_gphy_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = xway_gphy_ack_interrupt,
+ .did_interrupt = xway_gphy_did_interrupt,
+ .config_intr = xway_gphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ }, {
+ .phy_id = PHY_ID_PHY22F_VR9,
+ .phy_id_mask = 0xffffffff,
+ .name = "Intel XWAY PHY22F (xRX integrated)",
+ .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = xway_gphy_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = xway_gphy_ack_interrupt,
+ .did_interrupt = xway_gphy_did_interrupt,
+ .config_intr = xway_gphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ },
+};
+module_phy_driver(xway_gphy);
+
+static struct mdio_device_id __maybe_unused xway_gphy_tbl[] = {
+ { PHY_ID_PHY11G_1_3, 0xffffffff },
+ { PHY_ID_PHY22F_1_3, 0xffffffff },
+ { PHY_ID_PHY11G_1_4, 0xffffffff },
+ { PHY_ID_PHY22F_1_4, 0xffffffff },
+ { PHY_ID_PHY11G_1_5, 0xffffffff },
+ { PHY_ID_PHY22F_1_5, 0xffffffff },
+ { PHY_ID_PHY11G_VR9, 0xffffffff },
+ { PHY_ID_PHY22F_VR9, 0xffffffff },
+ { }
+};
+MODULE_DEVICE_TABLE(mdio, xway_gphy_tbl);
+
+MODULE_DESCRIPTION("Intel XWAY PHY driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 280e8795b463..ec2c1eee6405 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -285,6 +285,48 @@ static int marvell_config_aneg(struct phy_device *phydev)
return 0;
}
+static int m88e1111_config_aneg(struct phy_device *phydev)
+{
+ int err;
+
+ /* The Marvell PHY has an errata which requires
+ * that certain registers get written in order
+ * to restart autonegotiation
+ */
+ err = phy_write(phydev, MII_BMCR, BMCR_RESET);
+
+ err = marvell_set_polarity(phydev, phydev->mdix);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, MII_M1111_PHY_LED_CONTROL,
+ MII_M1111_PHY_LED_DIRECT);
+ if (err < 0)
+ return err;
+
+ err = genphy_config_aneg(phydev);
+ if (err < 0)
+ return err;
+
+ if (phydev->autoneg != AUTONEG_ENABLE) {
+ int bmcr;
+
+ /* A write to speed/duplex bits (that is performed by
+ * genphy_config_aneg() call above) must be followed by
+ * a software reset. Otherwise, the write has no effect.
+ */
+ bmcr = phy_read(phydev, MII_BMCR);
+ if (bmcr < 0)
+ return bmcr;
+
+ err = phy_write(phydev, MII_BMCR, bmcr | BMCR_RESET);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_OF_MDIO
/*
* Set and/or override some configuration registers based on the
@@ -407,15 +449,7 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
if (err < 0)
return err;
- oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
-
- phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
- phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF);
- phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
-
- err = genphy_config_aneg(phydev);
-
- return err;
+ return genphy_config_aneg(phydev);
}
static int m88e1318_config_aneg(struct phy_device *phydev)
@@ -636,6 +670,28 @@ static int m88e1111_config_init(struct phy_device *phydev)
return phy_write(phydev, MII_BMCR, BMCR_RESET);
}
+static int m88e1121_config_init(struct phy_device *phydev)
+{
+ int err, oldpage;
+
+ oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
+
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
+ if (err < 0)
+ return err;
+
+ /* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */
+ err = phy_write(phydev, MII_88E1121_PHY_LED_CTRL,
+ MII_88E1121_PHY_LED_DEF);
+ if (err < 0)
+ return err;
+
+ phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
+
+ /* Set marvell,reg-init configuration from device tree */
+ return marvell_config_init(phydev);
+}
+
static int m88e1510_config_init(struct phy_device *phydev)
{
int err;
@@ -668,7 +724,7 @@ static int m88e1510_config_init(struct phy_device *phydev)
return err;
}
- return marvell_config_init(phydev);
+ return m88e1121_config_init(phydev);
}
static int m88e1118_config_aneg(struct phy_device *phydev)
@@ -1161,7 +1217,7 @@ static struct phy_driver marvell_drivers[] = {
.flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e1111_config_init,
- .config_aneg = &marvell_config_aneg,
+ .config_aneg = &m88e1111_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
@@ -1196,7 +1252,7 @@ static struct phy_driver marvell_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
- .config_init = &marvell_config_init,
+ .config_init = &m88e1121_config_init,
.config_aneg = &m88e1121_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
@@ -1215,7 +1271,7 @@ static struct phy_driver marvell_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
- .config_init = &marvell_config_init,
+ .config_init = &m88e1121_config_init,
.config_aneg = &m88e1318_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c
new file mode 100644
index 000000000000..0a0412524cec
--- /dev/null
+++ b/drivers/net/phy/mdio-mux-bcm-iproc.c
@@ -0,0 +1,248 @@
+/*
+ * Copyright 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation (the "GPL").
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 (GPLv2) for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 (GPLv2) along with this source code.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/of_mdio.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/mdio-mux.h>
+#include <linux/delay.h>
+
+#define MDIO_PARAM_OFFSET 0x00
+#define MDIO_PARAM_MIIM_CYCLE 29
+#define MDIO_PARAM_INTERNAL_SEL 25
+#define MDIO_PARAM_BUS_ID 22
+#define MDIO_PARAM_C45_SEL 21
+#define MDIO_PARAM_PHY_ID 16
+#define MDIO_PARAM_PHY_DATA 0
+
+#define MDIO_READ_OFFSET 0x04
+#define MDIO_READ_DATA_MASK 0xffff
+#define MDIO_ADDR_OFFSET 0x08
+
+#define MDIO_CTRL_OFFSET 0x0C
+#define MDIO_CTRL_WRITE_OP 0x1
+#define MDIO_CTRL_READ_OP 0x2
+
+#define MDIO_STAT_OFFSET 0x10
+#define MDIO_STAT_DONE 1
+
+#define BUS_MAX_ADDR 32
+#define EXT_BUS_START_ADDR 16
+
+struct iproc_mdiomux_desc {
+ void *mux_handle;
+ void __iomem *base;
+ struct device *dev;
+ struct mii_bus *mii_bus;
+};
+
+static int iproc_mdio_wait_for_idle(void __iomem *base, bool result)
+{
+ unsigned int timeout = 1000; /* loop for 1s */
+ u32 val;
+
+ do {
+ val = readl(base + MDIO_STAT_OFFSET);
+ if ((val & MDIO_STAT_DONE) == result)
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while (timeout--);
+
+ return -ETIMEDOUT;
+}
+
+/* start_miim_ops- Program and start MDIO transaction over mdio bus.
+ * @base: Base address
+ * @phyid: phyid of the selected bus.
+ * @reg: register offset to be read/written.
+ * @val :0 if read op else value to be written in @reg;
+ * @op: Operation that need to be carried out.
+ * MDIO_CTRL_READ_OP: Read transaction.
+ * MDIO_CTRL_WRITE_OP: Write transaction.
+ *
+ * Return value: Successful Read operation returns read reg values and write
+ * operation returns 0. Failure operation returns negative error code.
+ */
+static int start_miim_ops(void __iomem *base,
+ u16 phyid, u32 reg, u16 val, u32 op)
+{
+ u32 param;
+ int ret;
+
+ writel(0, base + MDIO_CTRL_OFFSET);
+ ret = iproc_mdio_wait_for_idle(base, 0);
+ if (ret)
+ goto err;
+
+ param = readl(base + MDIO_PARAM_OFFSET);
+ param |= phyid << MDIO_PARAM_PHY_ID;
+ param |= val << MDIO_PARAM_PHY_DATA;
+ if (reg & MII_ADDR_C45)
+ param |= BIT(MDIO_PARAM_C45_SEL);
+
+ writel(param, base + MDIO_PARAM_OFFSET);
+
+ writel(reg, base + MDIO_ADDR_OFFSET);
+
+ writel(op, base + MDIO_CTRL_OFFSET);
+
+ ret = iproc_mdio_wait_for_idle(base, 1);
+ if (ret)
+ goto err;
+
+ if (op == MDIO_CTRL_READ_OP)
+ ret = readl(base + MDIO_READ_OFFSET) & MDIO_READ_DATA_MASK;
+err:
+ return ret;
+}
+
+static int iproc_mdiomux_read(struct mii_bus *bus, int phyid, int reg)
+{
+ struct iproc_mdiomux_desc *md = bus->priv;
+ int ret;
+
+ ret = start_miim_ops(md->base, phyid, reg, 0, MDIO_CTRL_READ_OP);
+ if (ret < 0)
+ dev_err(&bus->dev, "mdiomux read operation failed!!!");
+
+ return ret;
+}
+
+static int iproc_mdiomux_write(struct mii_bus *bus,
+ int phyid, int reg, u16 val)
+{
+ struct iproc_mdiomux_desc *md = bus->priv;
+ int ret;
+
+ /* Write val at reg offset */
+ ret = start_miim_ops(md->base, phyid, reg, val, MDIO_CTRL_WRITE_OP);
+ if (ret < 0)
+ dev_err(&bus->dev, "mdiomux write operation failed!!!");
+
+ return ret;
+}
+
+static int mdio_mux_iproc_switch_fn(int current_child, int desired_child,
+ void *data)
+{
+ struct iproc_mdiomux_desc *md = data;
+ u32 param, bus_id;
+ bool bus_dir;
+
+ /* select bus and its properties */
+ bus_dir = (desired_child < EXT_BUS_START_ADDR);
+ bus_id = bus_dir ? desired_child : (desired_child - EXT_BUS_START_ADDR);
+
+ param = (bus_dir ? 1 : 0) << MDIO_PARAM_INTERNAL_SEL;
+ param |= (bus_id << MDIO_PARAM_BUS_ID);
+
+ writel(param, md->base + MDIO_PARAM_OFFSET);
+ return 0;
+}
+
+static int mdio_mux_iproc_probe(struct platform_device *pdev)
+{
+ struct iproc_mdiomux_desc *md;
+ struct mii_bus *bus;
+ struct resource *res;
+ int rc;
+
+ md = devm_kzalloc(&pdev->dev, sizeof(*md), GFP_KERNEL);
+ if (!md)
+ return -ENOMEM;
+ md->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ md->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(md->base)) {
+ dev_err(&pdev->dev, "failed to ioremap register\n");
+ return PTR_ERR(md->base);
+ }
+
+ md->mii_bus = mdiobus_alloc();
+ if (!md->mii_bus) {
+ dev_err(&pdev->dev, "mdiomux bus alloc failed\n");
+ return -ENOMEM;
+ }
+
+ bus = md->mii_bus;
+ bus->priv = md;
+ bus->name = "iProc MDIO mux bus";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", pdev->name, pdev->id);
+ bus->parent = &pdev->dev;
+ bus->read = iproc_mdiomux_read;
+ bus->write = iproc_mdiomux_write;
+
+ bus->phy_mask = ~0;
+ bus->dev.of_node = pdev->dev.of_node;
+ rc = mdiobus_register(bus);
+ if (rc) {
+ dev_err(&pdev->dev, "mdiomux registration failed\n");
+ goto out;
+ }
+
+ platform_set_drvdata(pdev, md);
+
+ rc = mdio_mux_init(md->dev, mdio_mux_iproc_switch_fn,
+ &md->mux_handle, md, md->mii_bus);
+ if (rc) {
+ dev_info(md->dev, "mdiomux initialization failed\n");
+ goto out;
+ }
+
+ dev_info(md->dev, "iProc mdiomux registered\n");
+ return 0;
+out:
+ mdiobus_free(bus);
+ return rc;
+}
+
+static int mdio_mux_iproc_remove(struct platform_device *pdev)
+{
+ struct iproc_mdiomux_desc *md = dev_get_platdata(&pdev->dev);
+
+ mdio_mux_uninit(md->mux_handle);
+ mdiobus_unregister(md->mii_bus);
+ mdiobus_free(md->mii_bus);
+
+ return 0;
+}
+
+static const struct of_device_id mdio_mux_iproc_match[] = {
+ {
+ .compatible = "brcm,mdio-mux-iproc",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mdio_mux_iproc_match);
+
+static struct platform_driver mdiomux_iproc_driver = {
+ .driver = {
+ .name = "mdio-mux-iproc",
+ .of_match_table = mdio_mux_iproc_match,
+ },
+ .probe = mdio_mux_iproc_probe,
+ .remove = mdio_mux_iproc_remove,
+};
+
+module_platform_driver(mdiomux_iproc_driver);
+
+MODULE_DESCRIPTION("iProc MDIO Mux Bus Driver");
+MODULE_AUTHOR("Pramod Kumar <pramod.kumar@broadcom.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c
index 7ddb1ab70891..919949960a10 100644
--- a/drivers/net/phy/mdio-mux-gpio.c
+++ b/drivers/net/phy/mdio-mux-gpio.c
@@ -55,7 +55,7 @@ static int mdio_mux_gpio_probe(struct platform_device *pdev)
return PTR_ERR(s->gpios);
r = mdio_mux_init(&pdev->dev,
- mdio_mux_gpio_switch_fn, &s->mux_handle, s);
+ mdio_mux_gpio_switch_fn, &s->mux_handle, s, NULL);
if (r != 0) {
gpiod_put_array(s->gpios);
diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/phy/mdio-mux-mmioreg.c
index 7fde454fbc4f..d0bed52c8d16 100644
--- a/drivers/net/phy/mdio-mux-mmioreg.c
+++ b/drivers/net/phy/mdio-mux-mmioreg.c
@@ -126,7 +126,7 @@ static int mdio_mux_mmioreg_probe(struct platform_device *pdev)
}
ret = mdio_mux_init(&pdev->dev, mdio_mux_mmioreg_switch_fn,
- &s->mux_handle, s);
+ &s->mux_handle, s, NULL);
if (ret) {
dev_err(&pdev->dev, "failed to register mdio-mux bus %s\n",
np->full_name);
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 5c81d6faf304..963838d4fac1 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -89,7 +89,8 @@ static int parent_count;
int mdio_mux_init(struct device *dev,
int (*switch_fn)(int cur, int desired, void *data),
void **mux_handle,
- void *data)
+ void *data,
+ struct mii_bus *mux_bus)
{
struct device_node *parent_bus_node;
struct device_node *child_bus_node;
@@ -101,10 +102,22 @@ int mdio_mux_init(struct device *dev,
if (!dev->of_node)
return -ENODEV;
- parent_bus_node = of_parse_phandle(dev->of_node, "mdio-parent-bus", 0);
+ if (!mux_bus) {
+ parent_bus_node = of_parse_phandle(dev->of_node,
+ "mdio-parent-bus", 0);
- if (!parent_bus_node)
- return -ENODEV;
+ if (!parent_bus_node)
+ return -ENODEV;
+
+ parent_bus = of_mdio_find_bus(parent_bus_node);
+ if (!parent_bus) {
+ ret_val = -EPROBE_DEFER;
+ goto err_parent_bus;
+ }
+ } else {
+ parent_bus_node = NULL;
+ parent_bus = mux_bus;
+ }
pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL);
if (pb == NULL) {
@@ -112,11 +125,6 @@ int mdio_mux_init(struct device *dev,
goto err_parent_bus;
}
- parent_bus = of_mdio_find_bus(parent_bus_node);
- if (parent_bus == NULL) {
- ret_val = -EPROBE_DEFER;
- goto err_parent_bus;
- }
pb->switch_data = data;
pb->switch_fn = switch_fn;
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 2e21e9366f76..b62c4aaee40b 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -75,22 +75,13 @@ static int smsc_phy_reset(struct phy_device *phydev)
* in all capable mode before using it.
*/
if ((rc & MII_LAN83C185_MODE_MASK) == MII_LAN83C185_MODE_POWERDOWN) {
- int timeout = 50000;
-
- /* set "all capable" mode and reset the phy */
+ /* set "all capable" mode */
rc |= MII_LAN83C185_MODE_ALL;
phy_write(phydev, MII_LAN83C185_SPECIAL_MODES, rc);
- phy_write(phydev, MII_BMCR, BMCR_RESET);
-
- /* wait end of reset (max 500 ms) */
- do {
- udelay(10);
- if (timeout-- == 0)
- return -1;
- rc = phy_read(phydev, MII_BMCR);
- } while (rc & BMCR_RESET);
}
- return 0;
+
+ /* reset the phy */
+ return genphy_soft_reset(phydev);
}
static int lan911x_config_init(struct phy_device *phydev)
diff --git a/drivers/net/phy/swphy.c b/drivers/net/phy/swphy.c
new file mode 100644
index 000000000000..34f58f2349e9
--- /dev/null
+++ b/drivers/net/phy/swphy.c
@@ -0,0 +1,179 @@
+/*
+ * Software PHY emulation
+ *
+ * Code taken from fixed_phy.c by Russell King <rmk+kernel@arm.linux.org.uk>
+ *
+ * Author: Vitaly Bordug <vbordug@ru.mvista.com>
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * Copyright (c) 2006-2007 MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <linux/export.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+
+#include "swphy.h"
+
+#define MII_REGS_NUM 29
+
+struct swmii_regs {
+ u16 bmcr;
+ u16 bmsr;
+ u16 lpa;
+ u16 lpagb;
+};
+
+enum {
+ SWMII_SPEED_10 = 0,
+ SWMII_SPEED_100,
+ SWMII_SPEED_1000,
+ SWMII_DUPLEX_HALF = 0,
+ SWMII_DUPLEX_FULL,
+};
+
+/*
+ * These two tables get bitwise-anded together to produce the final result.
+ * This means the speed table must contain both duplex settings, and the
+ * duplex table must contain all speed settings.
+ */
+static const struct swmii_regs speed[] = {
+ [SWMII_SPEED_10] = {
+ .bmcr = BMCR_FULLDPLX,
+ .lpa = LPA_10FULL | LPA_10HALF,
+ },
+ [SWMII_SPEED_100] = {
+ .bmcr = BMCR_FULLDPLX | BMCR_SPEED100,
+ .bmsr = BMSR_100FULL | BMSR_100HALF,
+ .lpa = LPA_100FULL | LPA_100HALF,
+ },
+ [SWMII_SPEED_1000] = {
+ .bmcr = BMCR_FULLDPLX | BMCR_SPEED1000,
+ .bmsr = BMSR_ESTATEN,
+ .lpagb = LPA_1000FULL | LPA_1000HALF,
+ },
+};
+
+static const struct swmii_regs duplex[] = {
+ [SWMII_DUPLEX_HALF] = {
+ .bmcr = ~BMCR_FULLDPLX,
+ .bmsr = BMSR_ESTATEN | BMSR_100HALF,
+ .lpa = LPA_10HALF | LPA_100HALF,
+ .lpagb = LPA_1000HALF,
+ },
+ [SWMII_DUPLEX_FULL] = {
+ .bmcr = ~0,
+ .bmsr = BMSR_ESTATEN | BMSR_100FULL,
+ .lpa = LPA_10FULL | LPA_100FULL,
+ .lpagb = LPA_1000FULL,
+ },
+};
+
+static int swphy_decode_speed(int speed)
+{
+ switch (speed) {
+ case 1000:
+ return SWMII_SPEED_1000;
+ case 100:
+ return SWMII_SPEED_100;
+ case 10:
+ return SWMII_SPEED_10;
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * swphy_validate_state - validate the software phy status
+ * @state: software phy status
+ *
+ * This checks that we can represent the state stored in @state can be
+ * represented in the emulated MII registers. Returns 0 if it can,
+ * otherwise returns -EINVAL.
+ */
+int swphy_validate_state(const struct fixed_phy_status *state)
+{
+ int err;
+
+ if (state->link) {
+ err = swphy_decode_speed(state->speed);
+ if (err < 0) {
+ pr_warn("swphy: unknown speed\n");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(swphy_validate_state);
+
+/**
+ * swphy_read_reg - return a MII register from the fixed phy state
+ * @reg: MII register
+ * @state: fixed phy status
+ *
+ * Return the MII @reg register generated from the fixed phy state @state.
+ */
+int swphy_read_reg(int reg, const struct fixed_phy_status *state)
+{
+ int speed_index, duplex_index;
+ u16 bmsr = BMSR_ANEGCAPABLE;
+ u16 bmcr = 0;
+ u16 lpagb = 0;
+ u16 lpa = 0;
+
+ if (reg > MII_REGS_NUM)
+ return -1;
+
+ speed_index = swphy_decode_speed(state->speed);
+ if (WARN_ON(speed_index < 0))
+ return 0;
+
+ duplex_index = state->duplex ? SWMII_DUPLEX_FULL : SWMII_DUPLEX_HALF;
+
+ bmsr |= speed[speed_index].bmsr & duplex[duplex_index].bmsr;
+
+ if (state->link) {
+ bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE;
+
+ bmcr |= speed[speed_index].bmcr & duplex[duplex_index].bmcr;
+ lpa |= speed[speed_index].lpa & duplex[duplex_index].lpa;
+ lpagb |= speed[speed_index].lpagb & duplex[duplex_index].lpagb;
+
+ if (state->pause)
+ lpa |= LPA_PAUSE_CAP;
+
+ if (state->asym_pause)
+ lpa |= LPA_PAUSE_ASYM;
+ }
+
+ switch (reg) {
+ case MII_BMCR:
+ return bmcr;
+ case MII_BMSR:
+ return bmsr;
+ case MII_PHYSID1:
+ case MII_PHYSID2:
+ return 0;
+ case MII_LPA:
+ return lpa;
+ case MII_STAT1000:
+ return lpagb;
+
+ /*
+ * We do not support emulating Clause 45 over Clause 22 register
+ * reads. Return an error instead of bogus data.
+ */
+ case MII_MMD_CTRL:
+ case MII_MMD_DATA:
+ return -1;
+
+ default:
+ return 0xffff;
+ }
+}
+EXPORT_SYMBOL_GPL(swphy_read_reg);
diff --git a/drivers/net/phy/swphy.h b/drivers/net/phy/swphy.h
new file mode 100644
index 000000000000..2f09ac324e18
--- /dev/null
+++ b/drivers/net/phy/swphy.h
@@ -0,0 +1,9 @@
+#ifndef SWPHY_H
+#define SWPHY_H
+
+struct fixed_phy_status;
+
+int swphy_validate_state(const struct fixed_phy_status *state);
+int swphy_read_reg(int reg, const struct fixed_phy_status *state);
+
+#endif
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 8dedafa1a95d..17953ab15000 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1312,10 +1312,9 @@ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
return stats64;
}
-static struct lock_class_key ppp_tx_busylock;
static int ppp_dev_init(struct net_device *dev)
{
- dev->qdisc_tx_busylock = &ppp_tx_busylock;
+ netdev_lockdep_set_classes(dev);
return 0;
}
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 2ace126533cd..a380649bf6b5 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1203,8 +1203,10 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
goto err_dev_open;
}
+ netif_addr_lock_bh(dev);
dev_uc_sync_multiple(port_dev, dev);
dev_mc_sync_multiple(port_dev, dev);
+ netif_addr_unlock_bh(dev);
err = vlan_vids_add_by_dev(port_dev, dev);
if (err) {
@@ -1574,23 +1576,6 @@ static const struct team_option team_options[] = {
},
};
-static struct lock_class_key team_netdev_xmit_lock_key;
-static struct lock_class_key team_netdev_addr_lock_key;
-static struct lock_class_key team_tx_busylock_key;
-
-static void team_set_lockdep_class_one(struct net_device *dev,
- struct netdev_queue *txq,
- void *unused)
-{
- lockdep_set_class(&txq->_xmit_lock, &team_netdev_xmit_lock_key);
-}
-
-static void team_set_lockdep_class(struct net_device *dev)
-{
- lockdep_set_class(&dev->addr_list_lock, &team_netdev_addr_lock_key);
- netdev_for_each_tx_queue(dev, team_set_lockdep_class_one, NULL);
- dev->qdisc_tx_busylock = &team_tx_busylock_key;
-}
static int team_init(struct net_device *dev)
{
@@ -1626,7 +1611,7 @@ static int team_init(struct net_device *dev)
goto err_options_register;
netif_carrier_off(dev);
- team_set_lockdep_class(dev);
+ netdev_lockdep_set_classes(dev);
return 0;
@@ -2017,6 +2002,8 @@ static const struct net_device_ops team_netdev_ops = {
.ndo_add_slave = team_add_slave,
.ndo_del_slave = team_del_slave,
.ndo_fix_features = team_fix_features,
+ .ndo_neigh_construct = netdev_default_l2upper_neigh_construct,
+ .ndo_neigh_destroy = netdev_default_l2upper_neigh_destroy,
.ndo_change_carrier = team_change_carrier,
.ndo_bridge_setlink = switchdev_port_bridge_setlink,
.ndo_bridge_getlink = switchdev_port_bridge_getlink,
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index e16487cc6a9a..5eadb7a1ad7b 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -71,6 +71,7 @@
#include <net/sock.h>
#include <linux/seq_file.h>
#include <linux/uio.h>
+#include <linux/skb_array.h>
#include <asm/uaccess.h>
@@ -167,6 +168,7 @@ struct tun_file {
};
struct list_head next;
struct tun_struct *detached;
+ struct skb_array tx_array;
};
struct tun_flow_entry {
@@ -515,7 +517,11 @@ static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
static void tun_queue_purge(struct tun_file *tfile)
{
- skb_queue_purge(&tfile->sk.sk_receive_queue);
+ struct sk_buff *skb;
+
+ while ((skb = skb_array_consume(&tfile->tx_array)) != NULL)
+ kfree_skb(skb);
+
skb_queue_purge(&tfile->sk.sk_error_queue);
}
@@ -560,6 +566,8 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
tun->dev->reg_state == NETREG_REGISTERED)
unregister_netdevice(tun->dev);
}
+ if (tun)
+ skb_array_cleanup(&tfile->tx_array);
sock_put(&tfile->sk);
}
}
@@ -613,6 +621,7 @@ static void tun_detach_all(struct net_device *dev)
static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filter)
{
struct tun_file *tfile = file->private_data;
+ struct net_device *dev = tun->dev;
int err;
err = security_tun_dev_attach(tfile->socket.sk, tun->security);
@@ -642,6 +651,13 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
if (!err)
goto out;
}
+
+ if (!tfile->detached &&
+ skb_array_init(&tfile->tx_array, dev->tx_queue_len, GFP_KERNEL)) {
+ err = -ENOMEM;
+ goto out;
+ }
+
tfile->queue_index = tun->numqueues;
tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
rcu_assign_pointer(tfile->tun, tun);
@@ -891,8 +907,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
nf_reset(skb);
- /* Enqueue packet */
- skb_queue_tail(&tfile->socket.sk->sk_receive_queue, skb);
+ if (skb_array_produce(&tfile->tx_array, skb))
+ goto drop;
/* Notify and wake up reader process */
if (tfile->flags & TUN_FASYNC)
@@ -1107,7 +1123,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
poll_wait(file, sk_sleep(sk), wait);
- if (!skb_queue_empty(&sk->sk_receive_queue))
+ if (!skb_array_empty(&tfile->tx_array))
mask |= POLLIN | POLLRDNORM;
if (sock_writeable(sk) ||
@@ -1254,13 +1270,11 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
return -EFAULT;
}
- if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
- if (!skb_partial_csum_set(skb, tun16_to_cpu(tun, gso.csum_start),
- tun16_to_cpu(tun, gso.csum_offset))) {
- this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
- kfree_skb(skb);
- return -EINVAL;
- }
+ err = virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun));
+ if (err) {
+ this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
+ kfree_skb(skb);
+ return -EINVAL;
}
switch (tun->flags & TUN_TYPE_MASK) {
@@ -1289,39 +1303,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
break;
}
- if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
- pr_debug("GSO!\n");
- switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
- case VIRTIO_NET_HDR_GSO_TCPV4:
- skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
- break;
- case VIRTIO_NET_HDR_GSO_TCPV6:
- skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
- break;
- case VIRTIO_NET_HDR_GSO_UDP:
- skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
- break;
- default:
- this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
- kfree_skb(skb);
- return -EINVAL;
- }
-
- if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN)
- skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
-
- skb_shinfo(skb)->gso_size = tun16_to_cpu(tun, gso.gso_size);
- if (skb_shinfo(skb)->gso_size == 0) {
- this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
- kfree_skb(skb);
- return -EINVAL;
- }
-
- /* Header must be checked, and gso_segs computed. */
- skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
- skb_shinfo(skb)->gso_segs = 0;
- }
-
/* copy skb_ubuf_info for callback when skb has no error */
if (zerocopy) {
skb_shinfo(skb)->destructor_arg = msg_control;
@@ -1399,46 +1380,26 @@ static ssize_t tun_put_user(struct tun_struct *tun,
if (vnet_hdr_sz) {
struct virtio_net_hdr gso = { 0 }; /* no info leak */
+ int ret;
+
if (iov_iter_count(iter) < vnet_hdr_sz)
return -EINVAL;
- if (skb_is_gso(skb)) {
+ ret = virtio_net_hdr_from_skb(skb, &gso,
+ tun_is_little_endian(tun));
+ if (ret) {
struct skb_shared_info *sinfo = skb_shinfo(skb);
-
- /* This is a hint as to how much should be linear. */
- gso.hdr_len = cpu_to_tun16(tun, skb_headlen(skb));
- gso.gso_size = cpu_to_tun16(tun, sinfo->gso_size);
- if (sinfo->gso_type & SKB_GSO_TCPV4)
- gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
- else if (sinfo->gso_type & SKB_GSO_TCPV6)
- gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
- else if (sinfo->gso_type & SKB_GSO_UDP)
- gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
- else {
- pr_err("unexpected GSO type: "
- "0x%x, gso_size %d, hdr_len %d\n",
- sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
- tun16_to_cpu(tun, gso.hdr_len));
- print_hex_dump(KERN_ERR, "tun: ",
- DUMP_PREFIX_NONE,
- 16, 1, skb->head,
- min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
- WARN_ON_ONCE(1);
- return -EINVAL;
- }
- if (sinfo->gso_type & SKB_GSO_TCP_ECN)
- gso.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
- } else
- gso.gso_type = VIRTIO_NET_HDR_GSO_NONE;
-
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- gso.csum_start = cpu_to_tun16(tun, skb_checksum_start_offset(skb) +
- vlan_hlen);
- gso.csum_offset = cpu_to_tun16(tun, skb->csum_offset);
- } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
- gso.flags = VIRTIO_NET_HDR_F_DATA_VALID;
- } /* else everything is zero */
+ pr_err("unexpected GSO type: "
+ "0x%x, gso_size %d, hdr_len %d\n",
+ sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
+ tun16_to_cpu(tun, gso.hdr_len));
+ print_hex_dump(KERN_ERR, "tun: ",
+ DUMP_PREFIX_NONE,
+ 16, 1, skb->head,
+ min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
return -EFAULT;
@@ -1481,22 +1442,63 @@ done:
return total;
}
+static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock,
+ int *err)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ struct sk_buff *skb = NULL;
+ int error = 0;
+
+ skb = skb_array_consume(&tfile->tx_array);
+ if (skb)
+ goto out;
+ if (noblock) {
+ error = -EAGAIN;
+ goto out;
+ }
+
+ add_wait_queue(&tfile->wq.wait, &wait);
+ current->state = TASK_INTERRUPTIBLE;
+
+ while (1) {
+ skb = skb_array_consume(&tfile->tx_array);
+ if (skb)
+ break;
+ if (signal_pending(current)) {
+ error = -ERESTARTSYS;
+ break;
+ }
+ if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) {
+ error = -EFAULT;
+ break;
+ }
+
+ schedule();
+ }
+
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&tfile->wq.wait, &wait);
+
+out:
+ *err = error;
+ return skb;
+}
+
static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
struct iov_iter *to,
int noblock)
{
struct sk_buff *skb;
ssize_t ret;
- int peeked, err, off = 0;
+ int err;
tun_debug(KERN_INFO, tun, "tun_do_read\n");
if (!iov_iter_count(to))
return 0;
- /* Read frames from queue */
- skb = __skb_recv_datagram(tfile->socket.sk, noblock ? MSG_DONTWAIT : 0,
- &peeked, &off, &err);
+ /* Read frames from ring */
+ skb = tun_ring_recv(tfile, noblock, &err);
if (!skb)
return err;
@@ -1629,8 +1631,25 @@ out:
return ret;
}
+static int tun_peek_len(struct socket *sock)
+{
+ struct tun_file *tfile = container_of(sock, struct tun_file, socket);
+ struct tun_struct *tun;
+ int ret = 0;
+
+ tun = __tun_get(tfile);
+ if (!tun)
+ return 0;
+
+ ret = skb_array_peek_len(&tfile->tx_array);
+ tun_put(tun);
+
+ return ret;
+}
+
/* Ops structure to mimic raw sockets with tun */
static const struct proto_ops tun_socket_ops = {
+ .peek_len = tun_peek_len,
.sendmsg = tun_sendmsg,
.recvmsg = tun_recvmsg,
};
@@ -2452,6 +2471,53 @@ static const struct ethtool_ops tun_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
};
+static int tun_queue_resize(struct tun_struct *tun)
+{
+ struct net_device *dev = tun->dev;
+ struct tun_file *tfile;
+ struct skb_array **arrays;
+ int n = tun->numqueues + tun->numdisabled;
+ int ret, i;
+
+ arrays = kmalloc(sizeof *arrays * n, GFP_KERNEL);
+ if (!arrays)
+ return -ENOMEM;
+
+ for (i = 0; i < tun->numqueues; i++) {
+ tfile = rtnl_dereference(tun->tfiles[i]);
+ arrays[i] = &tfile->tx_array;
+ }
+ list_for_each_entry(tfile, &tun->disabled, next)
+ arrays[i++] = &tfile->tx_array;
+
+ ret = skb_array_resize_multiple(arrays, n,
+ dev->tx_queue_len, GFP_KERNEL);
+
+ kfree(arrays);
+ return ret;
+}
+
+static int tun_device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct tun_struct *tun = netdev_priv(dev);
+
+ switch (event) {
+ case NETDEV_CHANGE_TX_QUEUE_LEN:
+ if (tun_queue_resize(tun))
+ return NOTIFY_BAD;
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block tun_notifier_block __read_mostly = {
+ .notifier_call = tun_device_event,
+};
static int __init tun_init(void)
{
@@ -2471,6 +2537,8 @@ static int __init tun_init(void)
pr_err("Can't register misc device %d\n", TUN_MINOR);
goto err_misc;
}
+
+ register_netdevice_notifier(&tun_notifier_block);
return 0;
err_misc:
rtnl_link_unregister(&tun_link_ops);
@@ -2482,6 +2550,7 @@ static void tun_cleanup(void)
{
misc_deregister(&tun_miscdev);
rtnl_link_unregister(&tun_link_ops);
+ unregister_netdevice_notifier(&tun_notifier_block);
}
/* Get an underlying socket object from tun file. Returns error unless file is
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 3f9f6ed3eec4..24d367280ecf 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -31,7 +31,7 @@
#define NETNEXT_VERSION "08"
/* Information for net */
-#define NET_VERSION "3"
+#define NET_VERSION "4"
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -116,6 +116,7 @@
#define USB_TX_DMA 0xd434
#define USB_TOLERANCE 0xd490
#define USB_LPM_CTRL 0xd41a
+#define USB_BMU_RESET 0xd4b0
#define USB_UPS_CTRL 0xd800
#define USB_MISC_0 0xd81a
#define USB_POWER_CUT 0xd80a
@@ -338,6 +339,10 @@
#define TEST_MODE_DISABLE 0x00000001
#define TX_SIZE_ADJUST1 0x00000100
+/* USB_BMU_RESET */
+#define BMU_RESET_EP_IN 0x01
+#define BMU_RESET_EP_OUT 0x02
+
/* USB_UPS_CTRL */
#define POWER_CUT 0x0100
@@ -602,7 +607,7 @@ struct r8152 {
struct list_head rx_done, tx_free;
struct sk_buff_head tx_queue, rx_queue;
spinlock_t rx_lock, tx_lock;
- struct delayed_work schedule;
+ struct delayed_work schedule, hw_phy_work;
struct mii_if_info mii;
struct mutex control; /* use for hw setting */
#ifdef CONFIG_PM_SLEEP
@@ -619,6 +624,7 @@ struct r8152 {
int (*eee_get)(struct r8152 *, struct ethtool_eee *);
int (*eee_set)(struct r8152 *, struct ethtool_eee *);
bool (*in_nway)(struct r8152 *);
+ void (*hw_phy_cfg)(struct r8152 *);
} rtl_ops;
int intr_interval;
@@ -627,8 +633,11 @@ struct r8152 {
u32 tx_qlen;
u32 coalesce;
u16 ocp_base;
+ u16 speed;
u8 *intr_buff;
u8 version;
+ u8 duplex;
+ u8 autoneg;
};
enum rtl_version {
@@ -1742,7 +1751,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
pkt_len -= CRC_SIZE;
rx_data += sizeof(struct rx_desc);
- skb = netdev_alloc_skb_ip_align(netdev, pkt_len);
+ skb = napi_alloc_skb(&tp->napi, pkt_len);
if (!skb) {
stats->rx_dropped++;
goto find_next_rx;
@@ -2169,7 +2178,7 @@ static void r8153_set_rx_early_timeout(struct r8152 *tp)
static void r8153_set_rx_early_size(struct r8152 *tp)
{
u32 mtu = tp->netdev->mtu;
- u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 4;
+ u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 8;
ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data);
}
@@ -2456,6 +2465,17 @@ static void r8153_teredo_off(struct r8152 *tp)
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TEREDO_TIMER, 0);
}
+static void rtl_reset_bmu(struct r8152 *tp)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_BMU_RESET);
+ ocp_data &= ~(BMU_RESET_EP_IN | BMU_RESET_EP_OUT);
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data);
+ ocp_data |= BMU_RESET_EP_IN | BMU_RESET_EP_OUT;
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data);
+}
+
static void r8152_aldps_en(struct r8152 *tp, bool enable)
{
if (enable) {
@@ -2499,8 +2519,6 @@ static void r8152b_exit_oob(struct r8152 *tp)
rxdy_gated_en(tp, true);
r8153_teredo_off(tp);
- r8152b_hw_phy_cfg(tp);
-
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, 0x00);
@@ -2678,9 +2696,8 @@ static void r8153_first_init(struct r8152 *tp)
ocp_data &= ~RCR_ACPT_ALL;
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
- r8153_hw_phy_cfg(tp);
-
rtl8152_nic_reset(tp);
+ rtl_reset_bmu(tp);
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
ocp_data &= ~NOW_IS_OOB;
@@ -2742,6 +2759,7 @@ static void r8153_enter_oob(struct r8152 *tp)
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
rtl_disable(tp);
+ rtl_reset_bmu(tp);
for (i = 0; i < 1000; i++) {
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
@@ -2803,6 +2821,7 @@ static void rtl8153_disable(struct r8152 *tp)
{
r8153_aldps_en(tp, false);
rtl_disable(tp);
+ rtl_reset_bmu(tp);
r8153_aldps_en(tp, true);
usb_enable_lpm(tp->udev);
}
@@ -2872,7 +2891,7 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
}
- if (test_bit(PHY_RESET, &tp->flags))
+ if (test_and_clear_bit(PHY_RESET, &tp->flags))
bmcr |= BMCR_RESET;
if (tp->mii.supports_gmii)
@@ -2881,7 +2900,7 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
r8152_mdio_write(tp, MII_ADVERTISE, anar);
r8152_mdio_write(tp, MII_BMCR, bmcr);
- if (test_and_clear_bit(PHY_RESET, &tp->flags)) {
+ if (bmcr & BMCR_RESET) {
int i;
for (i = 0; i < 50; i++) {
@@ -3040,6 +3059,27 @@ out1:
usb_autopm_put_interface(tp->intf);
}
+static void rtl_hw_phy_work_func_t(struct work_struct *work)
+{
+ struct r8152 *tp = container_of(work, struct r8152, hw_phy_work.work);
+
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ return;
+
+ if (usb_autopm_get_interface(tp->intf) < 0)
+ return;
+
+ mutex_lock(&tp->control);
+
+ tp->rtl_ops.hw_phy_cfg(tp);
+
+ rtl8152_set_speed(tp, tp->autoneg, tp->speed, tp->duplex);
+
+ mutex_unlock(&tp->control);
+
+ usb_autopm_put_interface(tp->intf);
+}
+
#ifdef CONFIG_PM_SLEEP
static int rtl_notifier(struct notifier_block *nb, unsigned long action,
void *data)
@@ -3088,9 +3128,6 @@ static int rtl8152_open(struct net_device *netdev)
tp->rtl_ops.up(tp);
- rtl8152_set_speed(tp, AUTONEG_ENABLE,
- tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
- DUPLEX_FULL);
netif_carrier_off(netdev);
netif_start_queue(netdev);
set_bit(WORK_ENABLE, &tp->flags);
@@ -3382,15 +3419,11 @@ static void r8153_init(struct r8152 *tp)
r8153_power_cut_en(tp, false);
r8153_u1u2en(tp, true);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, ALDPS_SPDWN_RATIO);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, EEE_SPDWN_RATIO);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3,
- PKT_AVAIL_SPDWN_EN | SUSPEND_SPDWN_EN |
- U1U2_SPDWN_EN | L1_SPDWN_EN);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4,
- PWRSAVE_SPDWN_EN | RXDV_SPDWN_EN | TX10MIDLE_EN |
- TP100_SPDWN_EN | TP500_SPDWN_EN | TP1000_SPDWN_EN |
- EEE_SPDWN_EN);
+ /* MAC clock speed down */
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, 0);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, 0);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0);
r8153_enable_eee(tp);
r8153_aldps_en(tp, true);
@@ -3518,6 +3551,7 @@ static int rtl8152_resume(struct usb_interface *intf)
if (!test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
tp->rtl_ops.init(tp);
+ queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
netif_device_attach(tp->netdev);
}
@@ -3532,10 +3566,6 @@ static int rtl8152_resume(struct usb_interface *intf)
napi_enable(&tp->napi);
} else {
tp->rtl_ops.up(tp);
- rtl8152_set_speed(tp, AUTONEG_ENABLE,
- tp->mii.supports_gmii ?
- SPEED_1000 : SPEED_100,
- DUPLEX_FULL);
netif_carrier_off(tp->netdev);
set_bit(WORK_ENABLE, &tp->flags);
}
@@ -3665,6 +3695,11 @@ static int rtl8152_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
mutex_lock(&tp->control);
ret = rtl8152_set_speed(tp, cmd->autoneg, cmd->speed, cmd->duplex);
+ if (!ret) {
+ tp->autoneg = cmd->autoneg;
+ tp->speed = cmd->speed;
+ tp->duplex = cmd->duplex;
+ }
mutex_unlock(&tp->control);
@@ -4122,6 +4157,7 @@ static int rtl_ops_init(struct r8152 *tp)
ops->eee_get = r8152_get_eee;
ops->eee_set = r8152_set_eee;
ops->in_nway = rtl8152_in_nway;
+ ops->hw_phy_cfg = r8152b_hw_phy_cfg;
break;
case RTL_VER_03:
@@ -4137,6 +4173,7 @@ static int rtl_ops_init(struct r8152 *tp)
ops->eee_get = r8153_get_eee;
ops->eee_set = r8153_set_eee;
ops->in_nway = rtl8153_in_nway;
+ ops->hw_phy_cfg = r8153_hw_phy_cfg;
break;
default:
@@ -4183,6 +4220,7 @@ static int rtl8152_probe(struct usb_interface *intf,
mutex_init(&tp->control);
INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t);
+ INIT_DELAYED_WORK(&tp->hw_phy_work, rtl_hw_phy_work_func_t);
netdev->netdev_ops = &rtl8152_netdev_ops;
netdev->watchdog_timeo = RTL8152_TX_TIMEOUT;
@@ -4222,9 +4260,14 @@ static int rtl8152_probe(struct usb_interface *intf,
break;
}
+ tp->autoneg = AUTONEG_ENABLE;
+ tp->speed = tp->mii.supports_gmii ? SPEED_1000 : SPEED_100;
+ tp->duplex = DUPLEX_FULL;
+
intf->needs_remote_wakeup = 1;
tp->rtl_ops.init(tp);
+ queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
set_ethernet_addr(tp);
usb_set_intfdata(intf, tp);
@@ -4270,6 +4313,7 @@ static void rtl8152_disconnect(struct usb_interface *intf)
netif_napi_del(&tp->napi);
unregister_netdev(tp->netdev);
+ cancel_delayed_work_sync(&tp->hw_phy_work);
tp->rtl_ops.unload(tp);
free_netdev(tp->netdev);
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index e0638e556fe7..1dd08d4b9c31 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -479,53 +479,21 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
stats->rx_packets++;
u64_stats_update_end(&stats->rx_syncp);
- if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
- pr_debug("Needs csum!\n");
- if (!skb_partial_csum_set(skb,
- virtio16_to_cpu(vi->vdev, hdr->hdr.csum_start),
- virtio16_to_cpu(vi->vdev, hdr->hdr.csum_offset)))
- goto frame_err;
- } else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) {
+ if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
+ virtio_is_little_endian(vi->vdev))) {
+ net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
+ dev->name, hdr->hdr.gso_type,
+ hdr->hdr.gso_size);
+ goto frame_err;
}
skb->protocol = eth_type_trans(skb, dev);
pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
ntohs(skb->protocol), skb->len, skb->pkt_type);
- if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
- pr_debug("GSO!\n");
- switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
- case VIRTIO_NET_HDR_GSO_TCPV4:
- skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
- break;
- case VIRTIO_NET_HDR_GSO_UDP:
- skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
- break;
- case VIRTIO_NET_HDR_GSO_TCPV6:
- skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
- break;
- default:
- net_warn_ratelimited("%s: bad gso type %u.\n",
- dev->name, hdr->hdr.gso_type);
- goto frame_err;
- }
-
- if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
- skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
-
- skb_shinfo(skb)->gso_size = virtio16_to_cpu(vi->vdev,
- hdr->hdr.gso_size);
- if (skb_shinfo(skb)->gso_size == 0) {
- net_warn_ratelimited("%s: zero gso size.\n", dev->name);
- goto frame_err;
- }
-
- /* Header must be checked, and gso_segs computed. */
- skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
- skb_shinfo(skb)->gso_segs = 0;
- }
-
napi_gro_receive(&rq->napi, skb);
return;
@@ -868,35 +836,9 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
else
hdr = skb_vnet_hdr(skb);
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- hdr->hdr.csum_start = cpu_to_virtio16(vi->vdev,
- skb_checksum_start_offset(skb));
- hdr->hdr.csum_offset = cpu_to_virtio16(vi->vdev,
- skb->csum_offset);
- } else {
- hdr->hdr.flags = 0;
- hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
- }
-
- if (skb_is_gso(skb)) {
- hdr->hdr.hdr_len = cpu_to_virtio16(vi->vdev, skb_headlen(skb));
- hdr->hdr.gso_size = cpu_to_virtio16(vi->vdev,
- skb_shinfo(skb)->gso_size);
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
- hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
- else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
- hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
- else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
- hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
- else
- BUG();
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
- hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
- } else {
- hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
- hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
- }
+ if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
+ virtio_is_little_endian(vi->vdev)))
+ BUG();
if (vi->mergeable_rx_bufs)
hdr->num_buffers = 0;
@@ -1780,6 +1722,7 @@ static int virtnet_probe(struct virtio_device *vdev)
struct net_device *dev;
struct virtnet_info *vi;
u16 max_queue_pairs;
+ int mtu;
if (!vdev->config->get) {
dev_err(&vdev->dev, "%s failure: config access disabled\n",
@@ -1896,6 +1839,14 @@ static int virtnet_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
vi->has_cvq = true;
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
+ mtu = virtio_cread16(vdev,
+ offsetof(struct virtio_net_config,
+ mtu));
+ if (virtnet_change_mtu(dev, mtu))
+ __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
+ }
+
if (vi->any_header_sg)
dev->needed_headroom = vi->hdr_len;
@@ -2067,6 +2018,7 @@ static unsigned int features[] = {
VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
VIRTIO_NET_F_CTRL_MAC_ADDR,
VIRTIO_F_ANY_LAYOUT,
+ VIRTIO_NET_F_MTU,
};
static struct virtio_driver virtio_net_driver = {
diff --git a/drivers/net/vmxnet3/Makefile b/drivers/net/vmxnet3/Makefile
index 880f5098eac9..8cdbb63d1bb0 100644
--- a/drivers/net/vmxnet3/Makefile
+++ b/drivers/net/vmxnet3/Makefile
@@ -2,7 +2,7 @@
#
# Linux driver for VMware's vmxnet3 ethernet NIC.
#
-# Copyright (C) 2007-2009, VMware, Inc. All Rights Reserved.
+# Copyright (C) 2007-2016, VMware, Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
@@ -21,7 +21,7 @@
# The full GNU General Public License is included in this distribution in
# the file called "COPYING".
#
-# Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
+# Maintained by: pv-drivers@vmware.com
#
#
################################################################################
diff --git a/drivers/net/vmxnet3/upt1_defs.h b/drivers/net/vmxnet3/upt1_defs.h
index 969c751ee404..db9f1fde3aac 100644
--- a/drivers/net/vmxnet3/upt1_defs.h
+++ b/drivers/net/vmxnet3/upt1_defs.h
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -20,7 +20,7 @@
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
- * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
+ * Maintained by: pv-drivers@vmware.com
*
*/
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
index 72ba8ae7f09a..c3a31646189f 100644
--- a/drivers/net/vmxnet3/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2015, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -20,7 +20,7 @@
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
- * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
+ * Maintained by: pv-drivers@vmware.com
*
*/
@@ -76,7 +76,12 @@ enum {
VMXNET3_CMD_UPDATE_IML,
VMXNET3_CMD_UPDATE_PMCFG,
VMXNET3_CMD_UPDATE_FEATURE,
+ VMXNET3_CMD_RESERVED1,
VMXNET3_CMD_LOAD_PLUGIN,
+ VMXNET3_CMD_RESERVED2,
+ VMXNET3_CMD_RESERVED3,
+ VMXNET3_CMD_SET_COALESCE,
+ VMXNET3_CMD_REGISTER_MEMREGS,
VMXNET3_CMD_FIRST_GET = 0xF00D0000,
VMXNET3_CMD_GET_QUEUE_STATUS = VMXNET3_CMD_FIRST_GET,
@@ -87,7 +92,10 @@ enum {
VMXNET3_CMD_GET_DID_LO,
VMXNET3_CMD_GET_DID_HI,
VMXNET3_CMD_GET_DEV_EXTRA_INFO,
- VMXNET3_CMD_GET_CONF_INTR
+ VMXNET3_CMD_GET_CONF_INTR,
+ VMXNET3_CMD_GET_RESERVED1,
+ VMXNET3_CMD_GET_TXDATA_DESC_SIZE,
+ VMXNET3_CMD_GET_COALESCE,
};
/*
@@ -169,6 +177,8 @@ struct Vmxnet3_TxDataDesc {
u8 data[VMXNET3_HDR_COPY_SIZE];
};
+typedef u8 Vmxnet3_RxDataDesc;
+
#define VMXNET3_TCD_GEN_SHIFT 31
#define VMXNET3_TCD_GEN_SIZE 1
#define VMXNET3_TCD_TXIDX_SHIFT 0
@@ -373,6 +383,14 @@ union Vmxnet3_GenericDesc {
#define VMXNET3_RING_SIZE_ALIGN 32
#define VMXNET3_RING_SIZE_MASK (VMXNET3_RING_SIZE_ALIGN - 1)
+/* Tx Data Ring buffer size must be a multiple of 64 */
+#define VMXNET3_TXDATA_DESC_SIZE_ALIGN 64
+#define VMXNET3_TXDATA_DESC_SIZE_MASK (VMXNET3_TXDATA_DESC_SIZE_ALIGN - 1)
+
+/* Rx Data Ring buffer size must be a multiple of 64 */
+#define VMXNET3_RXDATA_DESC_SIZE_ALIGN 64
+#define VMXNET3_RXDATA_DESC_SIZE_MASK (VMXNET3_RXDATA_DESC_SIZE_ALIGN - 1)
+
/* Max ring size */
#define VMXNET3_TX_RING_MAX_SIZE 4096
#define VMXNET3_TC_RING_MAX_SIZE 4096
@@ -380,6 +398,11 @@ union Vmxnet3_GenericDesc {
#define VMXNET3_RX_RING2_MAX_SIZE 4096
#define VMXNET3_RC_RING_MAX_SIZE 8192
+#define VMXNET3_TXDATA_DESC_MIN_SIZE 128
+#define VMXNET3_TXDATA_DESC_MAX_SIZE 2048
+
+#define VMXNET3_RXDATA_DESC_MAX_SIZE 2048
+
/* a list of reasons for queue stop */
enum {
@@ -466,7 +489,9 @@ struct Vmxnet3_TxQueueConf {
__le32 compRingSize; /* # of comp desc */
__le32 ddLen; /* size of driver data */
u8 intrIdx;
- u8 _pad[7];
+ u8 _pad1[1];
+ __le16 txDataRingDescSize;
+ u8 _pad2[4];
};
@@ -474,12 +499,14 @@ struct Vmxnet3_RxQueueConf {
__le64 rxRingBasePA[2];
__le64 compRingBasePA;
__le64 ddPA; /* driver data */
- __le64 reserved;
+ __le64 rxDataRingBasePA;
__le32 rxRingSize[2]; /* # of rx desc */
__le32 compRingSize; /* # of rx comp desc */
__le32 ddLen; /* size of driver data */
u8 intrIdx;
- u8 _pad[7];
+ u8 _pad1[1];
+ __le16 rxDataRingDescSize; /* size of rx data ring buffer */
+ u8 _pad2[4];
};
@@ -609,6 +636,63 @@ struct Vmxnet3_RxQueueDesc {
u8 __pad[88]; /* 128 aligned */
};
+struct Vmxnet3_SetPolling {
+ u8 enablePolling;
+};
+
+#define VMXNET3_COAL_STATIC_MAX_DEPTH 128
+#define VMXNET3_COAL_RBC_MIN_RATE 100
+#define VMXNET3_COAL_RBC_MAX_RATE 100000
+
+enum Vmxnet3_CoalesceMode {
+ VMXNET3_COALESCE_DISABLED = 0,
+ VMXNET3_COALESCE_ADAPT = 1,
+ VMXNET3_COALESCE_STATIC = 2,
+ VMXNET3_COALESCE_RBC = 3
+};
+
+struct Vmxnet3_CoalesceRbc {
+ u32 rbc_rate;
+};
+
+struct Vmxnet3_CoalesceStatic {
+ u32 tx_depth;
+ u32 tx_comp_depth;
+ u32 rx_depth;
+};
+
+struct Vmxnet3_CoalesceScheme {
+ enum Vmxnet3_CoalesceMode coalMode;
+ union {
+ struct Vmxnet3_CoalesceRbc coalRbc;
+ struct Vmxnet3_CoalesceStatic coalStatic;
+ } coalPara;
+};
+
+struct Vmxnet3_MemoryRegion {
+ __le64 startPA;
+ __le32 length;
+ __le16 txQueueBits;
+ __le16 rxQueueBits;
+};
+
+#define MAX_MEMORY_REGION_PER_QUEUE 16
+#define MAX_MEMORY_REGION_PER_DEVICE 256
+
+struct Vmxnet3_MemRegs {
+ __le16 numRegs;
+ __le16 pad[3];
+ struct Vmxnet3_MemoryRegion memRegs[1];
+};
+
+/* If the command data <= 16 bytes, use the shared memory directly.
+ * otherwise, use variable length configuration descriptor.
+ */
+union Vmxnet3_CmdInfo {
+ struct Vmxnet3_VariableLenConfDesc varConf;
+ struct Vmxnet3_SetPolling setPolling;
+ __le64 data[2];
+};
struct Vmxnet3_DSDevRead {
/* read-only region for device, read by dev in response to a SET cmd */
@@ -627,7 +711,14 @@ struct Vmxnet3_DriverShared {
__le32 pad;
struct Vmxnet3_DSDevRead devRead;
__le32 ecr;
- __le32 reserved[5];
+ __le32 reserved;
+ union {
+ __le32 reserved1[4];
+ union Vmxnet3_CmdInfo cmdInfo; /* only valid in the context of
+ * executing the relevant
+ * command
+ */
+ } cu;
};
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index db8022ae415b..c68fe495d3f9 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -20,7 +20,7 @@
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
- * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
+ * Maintained by: pv-drivers@vmware.com
*
*/
@@ -435,8 +435,8 @@ vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
tq->tx_ring.base = NULL;
}
if (tq->data_ring.base) {
- dma_free_coherent(&adapter->pdev->dev, tq->data_ring.size *
- sizeof(struct Vmxnet3_TxDataDesc),
+ dma_free_coherent(&adapter->pdev->dev,
+ tq->data_ring.size * tq->txdata_desc_size,
tq->data_ring.base, tq->data_ring.basePA);
tq->data_ring.base = NULL;
}
@@ -478,8 +478,8 @@ vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
tq->tx_ring.gen = VMXNET3_INIT_GEN;
- memset(tq->data_ring.base, 0, tq->data_ring.size *
- sizeof(struct Vmxnet3_TxDataDesc));
+ memset(tq->data_ring.base, 0,
+ tq->data_ring.size * tq->txdata_desc_size);
/* reset the tx comp ring contents to 0 and reset comp ring states */
memset(tq->comp_ring.base, 0, tq->comp_ring.size *
@@ -514,10 +514,10 @@ vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
}
tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
- tq->data_ring.size * sizeof(struct Vmxnet3_TxDataDesc),
+ tq->data_ring.size * tq->txdata_desc_size,
&tq->data_ring.basePA, GFP_KERNEL);
if (!tq->data_ring.base) {
- netdev_err(adapter->netdev, "failed to allocate data ring\n");
+ netdev_err(adapter->netdev, "failed to allocate tx data ring\n");
goto err;
}
@@ -689,7 +689,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
if (ctx->copy_size) {
ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
tq->tx_ring.next2fill *
- sizeof(struct Vmxnet3_TxDataDesc));
+ tq->txdata_desc_size);
ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
ctx->sop_txd->dword[3] = 0;
@@ -873,8 +873,9 @@ vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
ctx->eth_ip_hdr_size = 0;
ctx->l4_hdr_size = 0;
/* copy as much as allowed */
- ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE
- , skb_headlen(skb));
+ ctx->copy_size = min_t(unsigned int,
+ tq->txdata_desc_size,
+ skb_headlen(skb));
}
if (skb->len <= VMXNET3_HDR_COPY_SIZE)
@@ -885,7 +886,7 @@ vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
goto err;
}
- if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) {
+ if (unlikely(ctx->copy_size > tq->txdata_desc_size)) {
tq->stats.oversized_hdr++;
ctx->copy_size = 0;
return 0;
@@ -1283,9 +1284,10 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
*/
break;
}
- BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
+ BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
+ rcd->rqID != rq->dataRingQid);
idx = rcd->rxdIdx;
- ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
+ ring_idx = VMXNET3_GET_RING_IDX(adapter, rcd->rqID);
ring = rq->rx_ring + ring_idx;
vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
&rxCmdDesc);
@@ -1300,8 +1302,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
}
if (rcd->sop) { /* first buf of the pkt */
+ bool rxDataRingUsed;
+ u16 len;
+
BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
- rcd->rqID != rq->qid);
+ (rcd->rqID != rq->qid &&
+ rcd->rqID != rq->dataRingQid));
BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
@@ -1317,8 +1323,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
skip_page_frags = false;
ctx->skb = rbi->skb;
+
+ rxDataRingUsed =
+ VMXNET3_RX_DATA_RING(adapter, rcd->rqID);
+ len = rxDataRingUsed ? rcd->len : rbi->len;
new_skb = netdev_alloc_skb_ip_align(adapter->netdev,
- rbi->len);
+ len);
if (new_skb == NULL) {
/* Skb allocation failed, do not handover this
* skb to stack. Reuse it. Drop the existing pkt
@@ -1329,25 +1339,48 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
skip_page_frags = true;
goto rcd_done;
}
- new_dma_addr = dma_map_single(&adapter->pdev->dev,
- new_skb->data, rbi->len,
- PCI_DMA_FROMDEVICE);
- if (dma_mapping_error(&adapter->pdev->dev,
- new_dma_addr)) {
- dev_kfree_skb(new_skb);
- /* Skb allocation failed, do not handover this
- * skb to stack. Reuse it. Drop the existing pkt
- */
- rq->stats.rx_buf_alloc_failure++;
- ctx->skb = NULL;
- rq->stats.drop_total++;
- skip_page_frags = true;
- goto rcd_done;
- }
- dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr,
- rbi->len,
- PCI_DMA_FROMDEVICE);
+ if (rxDataRingUsed) {
+ size_t sz;
+
+ BUG_ON(rcd->len > rq->data_ring.desc_size);
+
+ ctx->skb = new_skb;
+ sz = rcd->rxdIdx * rq->data_ring.desc_size;
+ memcpy(new_skb->data,
+ &rq->data_ring.base[sz], rcd->len);
+ } else {
+ ctx->skb = rbi->skb;
+
+ new_dma_addr =
+ dma_map_single(&adapter->pdev->dev,
+ new_skb->data, rbi->len,
+ PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ new_dma_addr)) {
+ dev_kfree_skb(new_skb);
+ /* Skb allocation failed, do not
+ * handover this skb to stack. Reuse
+ * it. Drop the existing pkt.
+ */
+ rq->stats.rx_buf_alloc_failure++;
+ ctx->skb = NULL;
+ rq->stats.drop_total++;
+ skip_page_frags = true;
+ goto rcd_done;
+ }
+
+ dma_unmap_single(&adapter->pdev->dev,
+ rbi->dma_addr,
+ rbi->len,
+ PCI_DMA_FROMDEVICE);
+
+ /* Immediate refill */
+ rbi->skb = new_skb;
+ rbi->dma_addr = new_dma_addr;
+ rxd->addr = cpu_to_le64(rbi->dma_addr);
+ rxd->len = rbi->len;
+ }
#ifdef VMXNET3_RSS
if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
@@ -1358,18 +1391,13 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
#endif
skb_put(ctx->skb, rcd->len);
- /* Immediate refill */
- rbi->skb = new_skb;
- rbi->dma_addr = new_dma_addr;
- rxd->addr = cpu_to_le64(rbi->dma_addr);
- rxd->len = rbi->len;
- if (adapter->version == 2 &&
+ if (VMXNET3_VERSION_GE_2(adapter) &&
rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) {
struct Vmxnet3_RxCompDescExt *rcdlro;
rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
segCnt = rcdlro->segCnt;
- BUG_ON(segCnt <= 1);
+ WARN_ON_ONCE(segCnt == 0);
mss = rcdlro->mss;
if (unlikely(segCnt <= 1))
segCnt = 0;
@@ -1589,6 +1617,13 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
rq->buf_info[i] = NULL;
}
+ if (rq->data_ring.base) {
+ dma_free_coherent(&adapter->pdev->dev,
+ rq->rx_ring[0].size * rq->data_ring.desc_size,
+ rq->data_ring.base, rq->data_ring.basePA);
+ rq->data_ring.base = NULL;
+ }
+
if (rq->comp_ring.base) {
dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
* sizeof(struct Vmxnet3_RxCompDesc),
@@ -1604,6 +1639,25 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
}
}
+void
+vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
+
+ if (rq->data_ring.base) {
+ dma_free_coherent(&adapter->pdev->dev,
+ (rq->rx_ring[0].size *
+ rq->data_ring.desc_size),
+ rq->data_ring.base,
+ rq->data_ring.basePA);
+ rq->data_ring.base = NULL;
+ rq->data_ring.desc_size = 0;
+ }
+ }
+}
static int
vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
@@ -1697,6 +1751,22 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
}
}
+ if ((adapter->rxdataring_enabled) && (rq->data_ring.desc_size != 0)) {
+ sz = rq->rx_ring[0].size * rq->data_ring.desc_size;
+ rq->data_ring.base =
+ dma_alloc_coherent(&adapter->pdev->dev, sz,
+ &rq->data_ring.basePA,
+ GFP_KERNEL);
+ if (!rq->data_ring.base) {
+ netdev_err(adapter->netdev,
+ "rx data ring will be disabled\n");
+ adapter->rxdataring_enabled = false;
+ }
+ } else {
+ rq->data_ring.base = NULL;
+ rq->data_ring.desc_size = 0;
+ }
+
sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
&rq->comp_ring.basePA,
@@ -1729,6 +1799,8 @@ vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
{
int i, err = 0;
+ adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
+
for (i = 0; i < adapter->num_rx_queues; i++) {
err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
if (unlikely(err)) {
@@ -1738,6 +1810,10 @@ vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
goto err_out;
}
}
+
+ if (!adapter->rxdataring_enabled)
+ vmxnet3_rq_destroy_all_rxdataring(adapter);
+
return err;
err_out:
vmxnet3_rq_destroy_all(adapter);
@@ -2045,10 +2121,9 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
rq->qid = i;
rq->qid2 = i + adapter->num_rx_queues;
+ rq->dataRingQid = i + 2 * adapter->num_rx_queues;
}
-
-
/* init our intr settings */
for (i = 0; i < intr->num_intrs; i++)
intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
@@ -2336,6 +2411,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
tqc->ddPA = cpu_to_le64(tq->buf_info_pa);
tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
+ tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size);
tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
tqc->ddLen = cpu_to_le32(
sizeof(struct vmxnet3_tx_buf_info) *
@@ -2360,6 +2436,12 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
(rqc->rxRingSize[0] +
rqc->rxRingSize[1]));
rqc->intrIdx = rq->comp_ring.intr_idx;
+ if (VMXNET3_VERSION_GE_3(adapter)) {
+ rqc->rxDataRingBasePA =
+ cpu_to_le64(rq->data_ring.basePA);
+ rqc->rxDataRingDescSize =
+ cpu_to_le16(rq->data_ring.desc_size);
+ }
}
#ifdef VMXNET3_RSS
@@ -2409,6 +2491,32 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
/* the rest are already zeroed */
}
+static void
+vmxnet3_init_coalesce(struct vmxnet3_adapter *adapter)
+{
+ struct Vmxnet3_DriverShared *shared = adapter->shared;
+ union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
+ unsigned long flags;
+
+ if (!VMXNET3_VERSION_GE_3(adapter))
+ return;
+
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ cmdInfo->varConf.confVer = 1;
+ cmdInfo->varConf.confLen =
+ cpu_to_le32(sizeof(*adapter->coal_conf));
+ cmdInfo->varConf.confPA = cpu_to_le64(adapter->coal_conf_pa);
+
+ if (adapter->default_coal_mode) {
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_COALESCE);
+ } else {
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_SET_COALESCE);
+ }
+
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+}
int
vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
@@ -2458,6 +2566,8 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
goto activate_err;
}
+ vmxnet3_init_coalesce(adapter);
+
for (i = 0; i < adapter->num_rx_queues; i++) {
VMXNET3_WRITE_BAR0_REG(adapter,
VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
@@ -2689,7 +2799,8 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
int
vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
- u32 rx_ring_size, u32 rx_ring2_size)
+ u32 rx_ring_size, u32 rx_ring2_size,
+ u16 txdata_desc_size, u16 rxdata_desc_size)
{
int err = 0, i;
@@ -2698,6 +2809,7 @@ vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
tq->tx_ring.size = tx_ring_size;
tq->data_ring.size = tx_ring_size;
tq->comp_ring.size = tx_ring_size;
+ tq->txdata_desc_size = txdata_desc_size;
tq->shared = &adapter->tqd_start[i].ctrl;
tq->stopped = true;
tq->adapter = adapter;
@@ -2714,12 +2826,15 @@ vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
vmxnet3_adjust_rx_ring_size(adapter);
+
+ adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
for (i = 0; i < adapter->num_rx_queues; i++) {
struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
/* qid and qid2 for rx queues will be assigned later when num
* of rx queues is finalized after allocating intrs */
rq->shared = &adapter->rqd_start[i].ctrl;
rq->adapter = adapter;
+ rq->data_ring.desc_size = rxdata_desc_size;
err = vmxnet3_rq_create(rq, adapter);
if (err) {
if (i == 0) {
@@ -2737,6 +2852,10 @@ vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
}
}
}
+
+ if (!adapter->rxdataring_enabled)
+ vmxnet3_rq_destroy_all_rxdataring(adapter);
+
return err;
queue_err:
vmxnet3_tq_destroy_all(adapter);
@@ -2754,9 +2873,35 @@ vmxnet3_open(struct net_device *netdev)
for (i = 0; i < adapter->num_tx_queues; i++)
spin_lock_init(&adapter->tx_queue[i].tx_lock);
- err = vmxnet3_create_queues(adapter, adapter->tx_ring_size,
+ if (VMXNET3_VERSION_GE_3(adapter)) {
+ unsigned long flags;
+ u16 txdata_desc_size;
+
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
+ txdata_desc_size = VMXNET3_READ_BAR1_REG(adapter,
+ VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+
+ if ((txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE) ||
+ (txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE) ||
+ (txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK)) {
+ adapter->txdata_desc_size =
+ sizeof(struct Vmxnet3_TxDataDesc);
+ } else {
+ adapter->txdata_desc_size = txdata_desc_size;
+ }
+ } else {
+ adapter->txdata_desc_size = sizeof(struct Vmxnet3_TxDataDesc);
+ }
+
+ err = vmxnet3_create_queues(adapter,
+ adapter->tx_ring_size,
adapter->rx_ring_size,
- adapter->rx_ring2_size);
+ adapter->rx_ring2_size,
+ adapter->txdata_desc_size,
+ adapter->rxdata_desc_size);
if (err)
goto queue_err;
@@ -3200,12 +3345,21 @@ vmxnet3_probe_device(struct pci_dev *pdev,
goto err_alloc_pci;
ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
- if (ver & 2) {
- VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 2);
- adapter->version = 2;
- } else if (ver & 1) {
- VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1);
- adapter->version = 1;
+ if (ver & (1 << VMXNET3_REV_3)) {
+ VMXNET3_WRITE_BAR1_REG(adapter,
+ VMXNET3_REG_VRRS,
+ 1 << VMXNET3_REV_3);
+ adapter->version = VMXNET3_REV_3 + 1;
+ } else if (ver & (1 << VMXNET3_REV_2)) {
+ VMXNET3_WRITE_BAR1_REG(adapter,
+ VMXNET3_REG_VRRS,
+ 1 << VMXNET3_REV_2);
+ adapter->version = VMXNET3_REV_2 + 1;
+ } else if (ver & (1 << VMXNET3_REV_1)) {
+ VMXNET3_WRITE_BAR1_REG(adapter,
+ VMXNET3_REG_VRRS,
+ 1 << VMXNET3_REV_1);
+ adapter->version = VMXNET3_REV_1 + 1;
} else {
dev_err(&pdev->dev,
"Incompatible h/w version (0x%x) for adapter\n", ver);
@@ -3224,9 +3378,28 @@ vmxnet3_probe_device(struct pci_dev *pdev,
goto err_ver;
}
+ if (VMXNET3_VERSION_GE_3(adapter)) {
+ adapter->coal_conf =
+ dma_alloc_coherent(&adapter->pdev->dev,
+ sizeof(struct Vmxnet3_CoalesceScheme)
+ ,
+ &adapter->coal_conf_pa,
+ GFP_KERNEL);
+ if (!adapter->coal_conf) {
+ err = -ENOMEM;
+ goto err_ver;
+ }
+ memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
+ adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED;
+ adapter->default_coal_mode = true;
+ }
+
SET_NETDEV_DEV(netdev, &pdev->dev);
vmxnet3_declare_features(adapter, dma64);
+ adapter->rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ?
+ VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
+
if (adapter->num_tx_queues == adapter->num_rx_queues)
adapter->share_intr = VMXNET3_INTR_BUDDYSHARE;
else
@@ -3283,6 +3456,11 @@ vmxnet3_probe_device(struct pci_dev *pdev,
return 0;
err_register:
+ if (VMXNET3_VERSION_GE_3(adapter)) {
+ dma_free_coherent(&adapter->pdev->dev,
+ sizeof(struct Vmxnet3_CoalesceScheme),
+ adapter->coal_conf, adapter->coal_conf_pa);
+ }
vmxnet3_free_intr_resources(adapter);
err_ver:
vmxnet3_free_pci_resources(adapter);
@@ -3333,6 +3511,11 @@ vmxnet3_remove_device(struct pci_dev *pdev)
vmxnet3_free_intr_resources(adapter);
vmxnet3_free_pci_resources(adapter);
+ if (VMXNET3_VERSION_GE_3(adapter)) {
+ dma_free_coherent(&adapter->pdev->dev,
+ sizeof(struct Vmxnet3_CoalesceScheme),
+ adapter->coal_conf, adapter->coal_conf_pa);
+ }
#ifdef VMXNET3_RSS
dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
adapter->rss_conf, adapter->rss_conf_pa);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 9ba11d737753..aabc6ef366b4 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -20,7 +20,7 @@
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
- * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
+ * Maintained by: pv-drivers@vmware.com
*
*/
@@ -396,8 +396,7 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
buf[j++] = VMXNET3_GET_ADDR_LO(tq->data_ring.basePA);
buf[j++] = VMXNET3_GET_ADDR_HI(tq->data_ring.basePA);
buf[j++] = tq->data_ring.size;
- /* transmit data ring buffer size */
- buf[j++] = VMXNET3_HDR_COPY_SIZE;
+ buf[j++] = tq->txdata_desc_size;
buf[j++] = VMXNET3_GET_ADDR_LO(tq->comp_ring.basePA);
buf[j++] = VMXNET3_GET_ADDR_HI(tq->comp_ring.basePA);
@@ -431,11 +430,10 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
buf[j++] = rq->rx_ring[1].next2comp;
buf[j++] = rq->rx_ring[1].gen;
- /* receive data ring */
- buf[j++] = 0;
- buf[j++] = 0;
- buf[j++] = 0;
- buf[j++] = 0;
+ buf[j++] = VMXNET3_GET_ADDR_LO(rq->data_ring.basePA);
+ buf[j++] = VMXNET3_GET_ADDR_HI(rq->data_ring.basePA);
+ buf[j++] = rq->rx_ring[0].size;
+ buf[j++] = rq->data_ring.desc_size;
buf[j++] = VMXNET3_GET_ADDR_LO(rq->comp_ring.basePA);
buf[j++] = VMXNET3_GET_ADDR_HI(rq->comp_ring.basePA);
@@ -504,12 +502,14 @@ vmxnet3_get_ringparam(struct net_device *netdev,
param->rx_max_pending = VMXNET3_RX_RING_MAX_SIZE;
param->tx_max_pending = VMXNET3_TX_RING_MAX_SIZE;
- param->rx_mini_max_pending = 0;
+ param->rx_mini_max_pending = VMXNET3_VERSION_GE_3(adapter) ?
+ VMXNET3_RXDATA_DESC_MAX_SIZE : 0;
param->rx_jumbo_max_pending = VMXNET3_RX_RING2_MAX_SIZE;
param->rx_pending = adapter->rx_ring_size;
param->tx_pending = adapter->tx_ring_size;
- param->rx_mini_pending = 0;
+ param->rx_mini_pending = VMXNET3_VERSION_GE_3(adapter) ?
+ adapter->rxdata_desc_size : 0;
param->rx_jumbo_pending = adapter->rx_ring2_size;
}
@@ -520,6 +520,7 @@ vmxnet3_set_ringparam(struct net_device *netdev,
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
u32 new_tx_ring_size, new_rx_ring_size, new_rx_ring2_size;
+ u16 new_rxdata_desc_size;
u32 sz;
int err = 0;
@@ -542,6 +543,15 @@ vmxnet3_set_ringparam(struct net_device *netdev,
return -EOPNOTSUPP;
}
+ if (VMXNET3_VERSION_GE_3(adapter)) {
+ if (param->rx_mini_pending < 0 ||
+ param->rx_mini_pending > VMXNET3_RXDATA_DESC_MAX_SIZE) {
+ return -EINVAL;
+ }
+ } else if (param->rx_mini_pending != 0) {
+ return -EINVAL;
+ }
+
/* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */
new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) &
~VMXNET3_RING_SIZE_MASK;
@@ -568,9 +578,19 @@ vmxnet3_set_ringparam(struct net_device *netdev,
new_rx_ring2_size = min_t(u32, new_rx_ring2_size,
VMXNET3_RX_RING2_MAX_SIZE);
+ /* rx data ring buffer size has to be a multiple of
+ * VMXNET3_RXDATA_DESC_SIZE_ALIGN
+ */
+ new_rxdata_desc_size =
+ (param->rx_mini_pending + VMXNET3_RXDATA_DESC_SIZE_MASK) &
+ ~VMXNET3_RXDATA_DESC_SIZE_MASK;
+ new_rxdata_desc_size = min_t(u16, new_rxdata_desc_size,
+ VMXNET3_RXDATA_DESC_MAX_SIZE);
+
if (new_tx_ring_size == adapter->tx_ring_size &&
new_rx_ring_size == adapter->rx_ring_size &&
- new_rx_ring2_size == adapter->rx_ring2_size) {
+ new_rx_ring2_size == adapter->rx_ring2_size &&
+ new_rxdata_desc_size == adapter->rxdata_desc_size) {
return 0;
}
@@ -591,8 +611,9 @@ vmxnet3_set_ringparam(struct net_device *netdev,
vmxnet3_rq_destroy_all(adapter);
err = vmxnet3_create_queues(adapter, new_tx_ring_size,
- new_rx_ring_size, new_rx_ring2_size);
-
+ new_rx_ring_size, new_rx_ring2_size,
+ adapter->txdata_desc_size,
+ new_rxdata_desc_size);
if (err) {
/* failed, most likely because of OOM, try default
* size */
@@ -601,10 +622,15 @@ vmxnet3_set_ringparam(struct net_device *netdev,
new_rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
new_rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
new_tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
+ new_rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ?
+ VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
+
err = vmxnet3_create_queues(adapter,
new_tx_ring_size,
new_rx_ring_size,
- new_rx_ring2_size);
+ new_rx_ring2_size,
+ adapter->txdata_desc_size,
+ new_rxdata_desc_size);
if (err) {
netdev_err(netdev, "failed to create queues "
"with default sizes. Closing it\n");
@@ -620,6 +646,7 @@ vmxnet3_set_ringparam(struct net_device *netdev,
adapter->tx_ring_size = new_tx_ring_size;
adapter->rx_ring_size = new_rx_ring_size;
adapter->rx_ring2_size = new_rx_ring2_size;
+ adapter->rxdata_desc_size = new_rxdata_desc_size;
out:
clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
@@ -698,6 +725,162 @@ vmxnet3_set_rss(struct net_device *netdev, const u32 *p, const u8 *key,
}
#endif
+static int
+vmxnet3_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
+ if (!VMXNET3_VERSION_GE_3(adapter))
+ return -EOPNOTSUPP;
+
+ switch (adapter->coal_conf->coalMode) {
+ case VMXNET3_COALESCE_DISABLED:
+ /* struct ethtool_coalesce is already initialized to 0 */
+ break;
+ case VMXNET3_COALESCE_ADAPT:
+ ec->use_adaptive_rx_coalesce = true;
+ break;
+ case VMXNET3_COALESCE_STATIC:
+ ec->tx_max_coalesced_frames =
+ adapter->coal_conf->coalPara.coalStatic.tx_comp_depth;
+ ec->rx_max_coalesced_frames =
+ adapter->coal_conf->coalPara.coalStatic.rx_depth;
+ break;
+ case VMXNET3_COALESCE_RBC: {
+ u32 rbc_rate;
+
+ rbc_rate = adapter->coal_conf->coalPara.coalRbc.rbc_rate;
+ ec->rx_coalesce_usecs = VMXNET3_COAL_RBC_USECS(rbc_rate);
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+vmxnet3_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ struct Vmxnet3_DriverShared *shared = adapter->shared;
+ union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
+ unsigned long flags;
+
+ if (!VMXNET3_VERSION_GE_3(adapter))
+ return -EOPNOTSUPP;
+
+ if (ec->rx_coalesce_usecs_irq ||
+ ec->rx_max_coalesced_frames_irq ||
+ ec->tx_coalesce_usecs ||
+ ec->tx_coalesce_usecs_irq ||
+ ec->tx_max_coalesced_frames_irq ||
+ ec->stats_block_coalesce_usecs ||
+ ec->use_adaptive_tx_coalesce ||
+ ec->pkt_rate_low ||
+ ec->rx_coalesce_usecs_low ||
+ ec->rx_max_coalesced_frames_low ||
+ ec->tx_coalesce_usecs_low ||
+ ec->tx_max_coalesced_frames_low ||
+ ec->pkt_rate_high ||
+ ec->rx_coalesce_usecs_high ||
+ ec->rx_max_coalesced_frames_high ||
+ ec->tx_coalesce_usecs_high ||
+ ec->tx_max_coalesced_frames_high ||
+ ec->rate_sample_interval) {
+ return -EINVAL;
+ }
+
+ if ((ec->rx_coalesce_usecs == 0) &&
+ (ec->use_adaptive_rx_coalesce == 0) &&
+ (ec->tx_max_coalesced_frames == 0) &&
+ (ec->rx_max_coalesced_frames == 0)) {
+ memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
+ adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED;
+ goto done;
+ }
+
+ if (ec->rx_coalesce_usecs != 0) {
+ u32 rbc_rate;
+
+ if ((ec->use_adaptive_rx_coalesce != 0) ||
+ (ec->tx_max_coalesced_frames != 0) ||
+ (ec->rx_max_coalesced_frames != 0)) {
+ return -EINVAL;
+ }
+
+ rbc_rate = VMXNET3_COAL_RBC_RATE(ec->rx_coalesce_usecs);
+ if (rbc_rate < VMXNET3_COAL_RBC_MIN_RATE ||
+ rbc_rate > VMXNET3_COAL_RBC_MAX_RATE) {
+ return -EINVAL;
+ }
+
+ memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
+ adapter->coal_conf->coalMode = VMXNET3_COALESCE_RBC;
+ adapter->coal_conf->coalPara.coalRbc.rbc_rate = rbc_rate;
+ goto done;
+ }
+
+ if (ec->use_adaptive_rx_coalesce != 0) {
+ if ((ec->rx_coalesce_usecs != 0) ||
+ (ec->tx_max_coalesced_frames != 0) ||
+ (ec->rx_max_coalesced_frames != 0)) {
+ return -EINVAL;
+ }
+ memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
+ adapter->coal_conf->coalMode = VMXNET3_COALESCE_ADAPT;
+ goto done;
+ }
+
+ if ((ec->tx_max_coalesced_frames != 0) ||
+ (ec->rx_max_coalesced_frames != 0)) {
+ if ((ec->rx_coalesce_usecs != 0) ||
+ (ec->use_adaptive_rx_coalesce != 0)) {
+ return -EINVAL;
+ }
+
+ if ((ec->tx_max_coalesced_frames >
+ VMXNET3_COAL_STATIC_MAX_DEPTH) ||
+ (ec->rx_max_coalesced_frames >
+ VMXNET3_COAL_STATIC_MAX_DEPTH)) {
+ return -EINVAL;
+ }
+
+ memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
+ adapter->coal_conf->coalMode = VMXNET3_COALESCE_STATIC;
+
+ adapter->coal_conf->coalPara.coalStatic.tx_comp_depth =
+ (ec->tx_max_coalesced_frames ?
+ ec->tx_max_coalesced_frames :
+ VMXNET3_COAL_STATIC_DEFAULT_DEPTH);
+
+ adapter->coal_conf->coalPara.coalStatic.rx_depth =
+ (ec->rx_max_coalesced_frames ?
+ ec->rx_max_coalesced_frames :
+ VMXNET3_COAL_STATIC_DEFAULT_DEPTH);
+
+ adapter->coal_conf->coalPara.coalStatic.tx_depth =
+ VMXNET3_COAL_STATIC_DEFAULT_DEPTH;
+ goto done;
+ }
+
+done:
+ adapter->default_coal_mode = false;
+ if (netif_running(netdev)) {
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ cmdInfo->varConf.confVer = 1;
+ cmdInfo->varConf.confLen =
+ cpu_to_le32(sizeof(*adapter->coal_conf));
+ cmdInfo->varConf.confPA = cpu_to_le64(adapter->coal_conf_pa);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_SET_COALESCE);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops vmxnet3_ethtool_ops = {
.get_settings = vmxnet3_get_settings,
.get_drvinfo = vmxnet3_get_drvinfo,
@@ -706,6 +889,8 @@ static const struct ethtool_ops vmxnet3_ethtool_ops = {
.get_wol = vmxnet3_get_wol,
.set_wol = vmxnet3_set_wol,
.get_link = ethtool_op_get_link,
+ .get_coalesce = vmxnet3_get_coalesce,
+ .set_coalesce = vmxnet3_set_coalesce,
.get_strings = vmxnet3_get_strings,
.get_sset_count = vmxnet3_get_sset_count,
.get_ethtool_stats = vmxnet3_get_ethtool_stats,
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index c4825392d64b..74fc03072b87 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -20,7 +20,7 @@
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
- * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
+ * Maintained by: pv-drivers@vmware.com
*
*/
@@ -69,16 +69,20 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.4.7.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.4.9.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01040700
+#define VMXNET3_DRIVER_VERSION_NUM 0x01040900
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
#define VMXNET3_RSS
#endif
+#define VMXNET3_REV_3 2 /* Vmxnet3 Rev. 3 */
+#define VMXNET3_REV_2 1 /* Vmxnet3 Rev. 2 */
+#define VMXNET3_REV_1 0 /* Vmxnet3 Rev. 1 */
+
/*
* Capabilities
*/
@@ -237,6 +241,7 @@ struct vmxnet3_tx_queue {
int num_stop; /* # of times the queue is
* stopped */
int qid;
+ u16 txdata_desc_size;
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
enum vmxnet3_rx_buf_type {
@@ -267,15 +272,23 @@ struct vmxnet3_rq_driver_stats {
u64 rx_buf_alloc_failure;
};
+struct vmxnet3_rx_data_ring {
+ Vmxnet3_RxDataDesc *base;
+ dma_addr_t basePA;
+ u16 desc_size;
+};
+
struct vmxnet3_rx_queue {
char name[IFNAMSIZ + 8]; /* To identify interrupt */
struct vmxnet3_adapter *adapter;
struct napi_struct napi;
struct vmxnet3_cmd_ring rx_ring[2];
+ struct vmxnet3_rx_data_ring data_ring;
struct vmxnet3_comp_ring comp_ring;
struct vmxnet3_rx_ctx rx_ctx;
u32 qid; /* rqID in RCD for buffer from 1st ring */
u32 qid2; /* rqID in RCD for buffer from 2nd ring */
+ u32 dataRingQid; /* rqID in RCD for buffer from data ring */
struct vmxnet3_rx_buf_info *buf_info[2];
dma_addr_t buf_info_pa;
struct Vmxnet3_RxQueueCtrl *shared;
@@ -345,6 +358,7 @@ struct vmxnet3_adapter {
int rx_buf_per_pkt; /* only apply to the 1st ring */
dma_addr_t shared_pa;
dma_addr_t queue_desc_pa;
+ dma_addr_t coal_conf_pa;
/* Wake-on-LAN */
u32 wol;
@@ -359,12 +373,21 @@ struct vmxnet3_adapter {
u32 rx_ring_size;
u32 rx_ring2_size;
+ /* Size of buffer in the data ring */
+ u16 txdata_desc_size;
+ u16 rxdata_desc_size;
+
+ bool rxdataring_enabled;
+
struct work_struct work;
unsigned long state; /* VMXNET3_STATE_BIT_xxx */
int share_intr;
+ struct Vmxnet3_CoalesceScheme *coal_conf;
+ bool default_coal_mode;
+
dma_addr_t adapter_pa;
dma_addr_t pm_conf_pa;
dma_addr_t rss_conf_pa;
@@ -387,14 +410,34 @@ struct vmxnet3_adapter {
#define VMXNET3_GET_ADDR_LO(dma) ((u32)(dma))
#define VMXNET3_GET_ADDR_HI(dma) ((u32)(((u64)(dma)) >> 32))
+#define VMXNET3_VERSION_GE_2(adapter) \
+ (adapter->version >= VMXNET3_REV_2 + 1)
+#define VMXNET3_VERSION_GE_3(adapter) \
+ (adapter->version >= VMXNET3_REV_3 + 1)
+
/* must be a multiple of VMXNET3_RING_SIZE_ALIGN */
#define VMXNET3_DEF_TX_RING_SIZE 512
#define VMXNET3_DEF_RX_RING_SIZE 256
#define VMXNET3_DEF_RX_RING2_SIZE 128
+#define VMXNET3_DEF_RXDATA_DESC_SIZE 128
+
#define VMXNET3_MAX_ETH_HDR_SIZE 22
#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024)
+#define VMXNET3_GET_RING_IDX(adapter, rqID) \
+ ((rqID >= adapter->num_rx_queues && \
+ rqID < 2 * adapter->num_rx_queues) ? 1 : 0) \
+
+#define VMXNET3_RX_DATA_RING(adapter, rqID) \
+ (rqID >= 2 * adapter->num_rx_queues && \
+ rqID < 3 * adapter->num_rx_queues) \
+
+#define VMXNET3_COAL_STATIC_DEFAULT_DEPTH 64
+
+#define VMXNET3_COAL_RBC_RATE(usecs) (1000000 / usecs)
+#define VMXNET3_COAL_RBC_USECS(rbc_rate) (1000000 / rbc_rate)
+
int
vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter);
@@ -418,7 +461,8 @@ vmxnet3_set_features(struct net_device *netdev, netdev_features_t features);
int
vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
- u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size);
+ u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size,
+ u16 txdata_desc_size, u16 rxdata_desc_size);
void vmxnet3_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 1b214ea4619a..1ce7420322ee 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -35,6 +35,7 @@
#include <net/route.h>
#include <net/addrconf.h>
#include <net/l3mdev.h>
+#include <net/fib_rules.h>
#define RT_FL_TOS(oldflp4) \
((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
@@ -42,6 +43,9 @@
#define DRV_NAME "vrf"
#define DRV_VERSION "1.0"
+#define FIB_RULE_PREF 1000 /* default preference for FIB rules */
+static bool add_fib_rules = true;
+
struct net_vrf {
struct rtable __rcu *rth;
struct rtable __rcu *rth_local;
@@ -374,23 +378,37 @@ static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
}
/* holding rtnl */
-static void vrf_rt6_release(struct net_vrf *vrf)
+static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
{
struct rt6_info *rt6 = rtnl_dereference(vrf->rt6);
struct rt6_info *rt6_local = rtnl_dereference(vrf->rt6_local);
+ struct net *net = dev_net(dev);
+ struct dst_entry *dst;
RCU_INIT_POINTER(vrf->rt6, NULL);
RCU_INIT_POINTER(vrf->rt6_local, NULL);
synchronize_rcu();
- if (rt6)
- dst_release(&rt6->dst);
+ /* move dev in dst's to loopback so this VRF device can be deleted
+ * - based on dst_ifdown
+ */
+ if (rt6) {
+ dst = &rt6->dst;
+ dev_put(dst->dev);
+ dst->dev = net->loopback_dev;
+ dev_hold(dst->dev);
+ dst_release(dst);
+ }
if (rt6_local) {
if (rt6_local->rt6i_idev)
in6_dev_put(rt6_local->rt6i_idev);
- dst_release(&rt6_local->dst);
+ dst = &rt6_local->dst;
+ dev_put(dst->dev);
+ dst->dev = net->loopback_dev;
+ dev_hold(dst->dev);
+ dst_release(dst);
}
}
@@ -403,6 +421,10 @@ static int vrf_rt6_create(struct net_device *dev)
struct rt6_info *rt6, *rt6_local;
int rc = -ENOMEM;
+ /* IPv6 can be CONFIG enabled and then disabled runtime */
+ if (!ipv6_mod_enabled())
+ return 0;
+
rt6i_table = fib6_new_table(net, vrf->tb_id);
if (!rt6i_table)
goto out;
@@ -441,7 +463,7 @@ out:
return rc;
}
#else
-static void vrf_rt6_release(struct net_vrf *vrf)
+static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
{
}
@@ -510,20 +532,35 @@ static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
}
/* holding rtnl */
-static void vrf_rtable_release(struct net_vrf *vrf)
+static void vrf_rtable_release(struct net_device *dev, struct net_vrf *vrf)
{
struct rtable *rth = rtnl_dereference(vrf->rth);
struct rtable *rth_local = rtnl_dereference(vrf->rth_local);
+ struct net *net = dev_net(dev);
+ struct dst_entry *dst;
RCU_INIT_POINTER(vrf->rth, NULL);
RCU_INIT_POINTER(vrf->rth_local, NULL);
synchronize_rcu();
- if (rth)
- dst_release(&rth->dst);
+ /* move dev in dst's to loopback so this VRF device can be deleted
+ * - based on dst_ifdown
+ */
+ if (rth) {
+ dst = &rth->dst;
+ dev_put(dst->dev);
+ dst->dev = net->loopback_dev;
+ dev_hold(dst->dev);
+ dst_release(dst);
+ }
- if (rth_local)
- dst_release(&rth_local->dst);
+ if (rth_local) {
+ dst = &rth_local->dst;
+ dev_put(dst->dev);
+ dst->dev = net->loopback_dev;
+ dev_hold(dst->dev);
+ dst_release(dst);
+ }
}
static int vrf_rtable_create(struct net_device *dev)
@@ -625,8 +662,8 @@ static void vrf_dev_uninit(struct net_device *dev)
struct net_device *port_dev;
struct list_head *iter;
- vrf_rtable_release(vrf);
- vrf_rt6_release(vrf);
+ vrf_rtable_release(dev, vrf);
+ vrf_rt6_release(dev, vrf);
netdev_for_each_lower_dev(dev, port_dev, iter)
vrf_del_slave(dev, port_dev);
@@ -657,11 +694,11 @@ static int vrf_dev_init(struct net_device *dev)
/* similarly, oper state is irrelevant; set to up to avoid confusion */
dev->operstate = IF_OPER_UP;
-
+ netdev_lockdep_set_classes(dev);
return 0;
out_rth:
- vrf_rtable_release(vrf);
+ vrf_rtable_release(dev, vrf);
out_stats:
free_percpu(dev->dstats);
dev->dstats = NULL;
@@ -742,6 +779,25 @@ static int vrf_get_saddr(struct net_device *dev, struct flowi4 *fl4)
return rc;
}
+static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+ return 0;
+}
+
+static struct sk_buff *vrf_rcv_nfhook(u8 pf, unsigned int hook,
+ struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct net *net = dev_net(dev);
+
+ nf_reset(skb);
+
+ if (NF_HOOK(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) < 0)
+ skb = NULL; /* kfree_skb(skb) handled by nf code */
+
+ return skb;
+}
+
#if IS_ENABLED(CONFIG_IPV6)
/* neighbor handling is done with actual device; do not want
* to flip skb->dev for those ndisc packets. This really fails
@@ -777,9 +833,63 @@ out:
return rc;
}
+static struct rt6_info *vrf_ip6_route_lookup(struct net *net,
+ const struct net_device *dev,
+ struct flowi6 *fl6,
+ int ifindex,
+ int flags)
+{
+ struct net_vrf *vrf = netdev_priv(dev);
+ struct fib6_table *table = NULL;
+ struct rt6_info *rt6;
+
+ rcu_read_lock();
+
+ /* fib6_table does not have a refcnt and can not be freed */
+ rt6 = rcu_dereference(vrf->rt6);
+ if (likely(rt6))
+ table = rt6->rt6i_table;
+
+ rcu_read_unlock();
+
+ if (!table)
+ return NULL;
+
+ return ip6_pol_route(net, table, ifindex, fl6, flags);
+}
+
+static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev,
+ int ifindex)
+{
+ const struct ipv6hdr *iph = ipv6_hdr(skb);
+ struct flowi6 fl6 = {
+ .daddr = iph->daddr,
+ .saddr = iph->saddr,
+ .flowlabel = ip6_flowinfo(iph),
+ .flowi6_mark = skb->mark,
+ .flowi6_proto = iph->nexthdr,
+ .flowi6_iif = ifindex,
+ };
+ struct net *net = dev_net(vrf_dev);
+ struct rt6_info *rt6;
+
+ rt6 = vrf_ip6_route_lookup(net, vrf_dev, &fl6, ifindex,
+ RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_IFACE);
+ if (unlikely(!rt6))
+ return;
+
+ if (unlikely(&rt6->dst == &net->ipv6.ip6_null_entry->dst))
+ return;
+
+ skb_dst_set(skb, &rt6->dst);
+}
+
static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
struct sk_buff *skb)
{
+ int orig_iif = skb->skb_iif;
+ bool need_strict;
+
/* loopback traffic; do not push through packet taps again.
* Reset pkt_type for upper layers to process skb
*/
@@ -790,8 +900,11 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
goto out;
}
- /* if packet is NDISC keep the ingress interface */
- if (!ipv6_ndisc_frame(skb)) {
+ /* if packet is NDISC or addressed to multicast or link-local
+ * then keep the ingress interface
+ */
+ need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
+ if (!ipv6_ndisc_frame(skb) && !need_strict) {
skb->dev = vrf_dev;
skb->skb_iif = vrf_dev->ifindex;
@@ -802,6 +915,10 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
}
+ if (need_strict)
+ vrf_ip6_input_dst(skb, vrf_dev, orig_iif);
+
+ skb = vrf_rcv_nfhook(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, vrf_dev);
out:
return skb;
}
@@ -832,6 +949,7 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
dev_queue_xmit_nit(skb, vrf_dev);
skb_pull(skb, skb->mac_len);
+ skb = vrf_rcv_nfhook(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, vrf_dev);
out:
return skb;
}
@@ -853,13 +971,37 @@ static struct sk_buff *vrf_l3_rcv(struct net_device *vrf_dev,
#if IS_ENABLED(CONFIG_IPV6)
static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev,
- const struct flowi6 *fl6)
+ struct flowi6 *fl6)
{
+ bool need_strict = rt6_need_strict(&fl6->daddr);
+ struct net_vrf *vrf = netdev_priv(dev);
+ struct net *net = dev_net(dev);
struct dst_entry *dst = NULL;
+ struct rt6_info *rt;
- if (!(fl6->flowi6_flags & FLOWI_FLAG_L3MDEV_SRC)) {
- struct net_vrf *vrf = netdev_priv(dev);
- struct rt6_info *rt;
+ /* send to link-local or multicast address */
+ if (need_strict) {
+ int flags = RT6_LOOKUP_F_IFACE;
+
+ /* VRF device does not have a link-local address and
+ * sending packets to link-local or mcast addresses over
+ * a VRF device does not make sense
+ */
+ if (fl6->flowi6_oif == dev->ifindex) {
+ struct dst_entry *dst = &net->ipv6.ip6_null_entry->dst;
+
+ dst_hold(dst);
+ return dst;
+ }
+
+ if (!ipv6_addr_any(&fl6->saddr))
+ flags |= RT6_LOOKUP_F_HAS_SADDR;
+
+ rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif, flags);
+ if (rt)
+ dst = &rt->dst;
+
+ } else if (!(fl6->flowi6_flags & FLOWI_FLAG_L3MDEV_SRC)) {
rcu_read_lock();
@@ -872,8 +1014,52 @@ static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev,
rcu_read_unlock();
}
+ /* make sure oif is set to VRF device for lookup */
+ if (!need_strict)
+ fl6->flowi6_oif = dev->ifindex;
+
return dst;
}
+
+/* called under rcu_read_lock */
+static int vrf_get_saddr6(struct net_device *dev, const struct sock *sk,
+ struct flowi6 *fl6)
+{
+ struct net *net = dev_net(dev);
+ struct dst_entry *dst;
+ struct rt6_info *rt;
+ int err;
+
+ if (rt6_need_strict(&fl6->daddr)) {
+ rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif,
+ RT6_LOOKUP_F_IFACE);
+ if (unlikely(!rt))
+ return 0;
+
+ dst = &rt->dst;
+ } else {
+ __u8 flags = fl6->flowi6_flags;
+
+ fl6->flowi6_flags |= FLOWI_FLAG_L3MDEV_SRC;
+ fl6->flowi6_flags |= FLOWI_FLAG_SKIP_NH_OIF;
+
+ dst = ip6_route_output(net, sk, fl6);
+ rt = (struct rt6_info *)dst;
+
+ fl6->flowi6_flags = flags;
+ }
+
+ err = dst->error;
+ if (!err) {
+ err = ip6_route_get_saddr(net, rt, &fl6->daddr,
+ sk ? inet6_sk(sk)->srcprefs : 0,
+ &fl6->saddr);
+ }
+
+ dst_release(dst);
+
+ return err;
+}
#endif
static const struct l3mdev_ops vrf_l3mdev_ops = {
@@ -883,6 +1069,7 @@ static const struct l3mdev_ops vrf_l3mdev_ops = {
.l3mdev_l3_rcv = vrf_l3_rcv,
#if IS_ENABLED(CONFIG_IPV6)
.l3mdev_get_rt6_dst = vrf_get_rt6_dst,
+ .l3mdev_get_saddr6 = vrf_get_saddr6,
#endif
};
@@ -897,6 +1084,94 @@ static const struct ethtool_ops vrf_ethtool_ops = {
.get_drvinfo = vrf_get_drvinfo,
};
+static inline size_t vrf_fib_rule_nl_size(void)
+{
+ size_t sz;
+
+ sz = NLMSG_ALIGN(sizeof(struct fib_rule_hdr));
+ sz += nla_total_size(sizeof(u8)); /* FRA_L3MDEV */
+ sz += nla_total_size(sizeof(u32)); /* FRA_PRIORITY */
+
+ return sz;
+}
+
+static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it)
+{
+ struct fib_rule_hdr *frh;
+ struct nlmsghdr *nlh;
+ struct sk_buff *skb;
+ int err;
+
+ if (family == AF_INET6 && !ipv6_mod_enabled())
+ return 0;
+
+ skb = nlmsg_new(vrf_fib_rule_nl_size(), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ nlh = nlmsg_put(skb, 0, 0, 0, sizeof(*frh), 0);
+ if (!nlh)
+ goto nla_put_failure;
+
+ /* rule only needs to appear once */
+ nlh->nlmsg_flags &= NLM_F_EXCL;
+
+ frh = nlmsg_data(nlh);
+ memset(frh, 0, sizeof(*frh));
+ frh->family = family;
+ frh->action = FR_ACT_TO_TBL;
+
+ if (nla_put_u32(skb, FRA_L3MDEV, 1))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, FRA_PRIORITY, FIB_RULE_PREF))
+ goto nla_put_failure;
+
+ nlmsg_end(skb, nlh);
+
+ /* fib_nl_{new,del}rule handling looks for net from skb->sk */
+ skb->sk = dev_net(dev)->rtnl;
+ if (add_it) {
+ err = fib_nl_newrule(skb, nlh);
+ if (err == -EEXIST)
+ err = 0;
+ } else {
+ err = fib_nl_delrule(skb, nlh);
+ if (err == -ENOENT)
+ err = 0;
+ }
+ nlmsg_free(skb);
+
+ return err;
+
+nla_put_failure:
+ nlmsg_free(skb);
+
+ return -EMSGSIZE;
+}
+
+static int vrf_add_fib_rules(const struct net_device *dev)
+{
+ int err;
+
+ err = vrf_fib_rule(dev, AF_INET, true);
+ if (err < 0)
+ goto out_err;
+
+ err = vrf_fib_rule(dev, AF_INET6, true);
+ if (err < 0)
+ goto ipv6_err;
+
+ return 0;
+
+ipv6_err:
+ vrf_fib_rule(dev, AF_INET, false);
+
+out_err:
+ netdev_err(dev, "Failed to add FIB rules.\n");
+ return err;
+}
+
static void vrf_setup(struct net_device *dev)
{
ether_setup(dev);
@@ -915,6 +1190,20 @@ static void vrf_setup(struct net_device *dev)
/* don't allow vrf devices to change network namespaces. */
dev->features |= NETIF_F_NETNS_LOCAL;
+
+ /* does not make sense for a VLAN to be added to a vrf device */
+ dev->features |= NETIF_F_VLAN_CHALLENGED;
+
+ /* enable offload features */
+ dev->features |= NETIF_F_GSO_SOFTWARE;
+ dev->features |= NETIF_F_RXCSUM | NETIF_F_HW_CSUM;
+ dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
+
+ dev->hw_features = dev->features;
+ dev->hw_enc_features = dev->features;
+
+ /* default to no qdisc; user can add if desired */
+ dev->priv_flags |= IFF_NO_QUEUE;
}
static int vrf_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -937,6 +1226,7 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
struct net_vrf *vrf = netdev_priv(dev);
+ int err;
if (!data || !data[IFLA_VRF_TABLE])
return -EINVAL;
@@ -945,7 +1235,21 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
dev->priv_flags |= IFF_L3MDEV_MASTER;
- return register_netdevice(dev);
+ err = register_netdevice(dev);
+ if (err)
+ goto out;
+
+ if (add_fib_rules) {
+ err = vrf_add_fib_rules(dev);
+ if (err) {
+ unregister_netdevice(dev);
+ goto out;
+ }
+ add_fib_rules = false;
+ }
+
+out:
+ return err;
}
static size_t vrf_nl_getsize(const struct net_device *dev)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index f999db2f97b4..ae7455da1687 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -11,32 +11,18 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
-#include <linux/types.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/slab.h>
-#include <linux/skbuff.h>
-#include <linux/rculist.h>
-#include <linux/netdevice.h>
-#include <linux/in.h>
-#include <linux/ip.h>
#include <linux/udp.h>
#include <linux/igmp.h>
-#include <linux/etherdevice.h>
#include <linux/if_ether.h>
-#include <linux/if_vlan.h>
-#include <linux/hash.h>
#include <linux/ethtool.h>
#include <net/arp.h>
#include <net/ndisc.h>
#include <net/ip.h>
-#include <net/ip_tunnels.h>
#include <net/icmp.h>
-#include <net/udp.h>
-#include <net/udp_tunnel.h>
#include <net/rtnetlink.h>
-#include <net/route.h>
-#include <net/dsfield.h>
#include <net/inet_ecn.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
@@ -44,12 +30,9 @@
#include <net/protocol.h>
#if IS_ENABLED(CONFIG_IPV6)
-#include <net/ipv6.h>
-#include <net/addrconf.h>
#include <net/ip6_tunnel.h>
#include <net/ip6_checksum.h>
#endif
-#include <net/dst_metadata.h>
#define VXLAN_VERSION "0.1"
@@ -619,42 +602,6 @@ static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
}
-/* Notify netdevs that UDP port started listening */
-static void vxlan_notify_add_rx_port(struct vxlan_sock *vs)
-{
- struct net_device *dev;
- struct sock *sk = vs->sock->sk;
- struct net *net = sock_net(sk);
- sa_family_t sa_family = vxlan_get_sk_family(vs);
- __be16 port = inet_sk(sk)->inet_sport;
-
- rcu_read_lock();
- for_each_netdev_rcu(net, dev) {
- if (dev->netdev_ops->ndo_add_vxlan_port)
- dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
- port);
- }
- rcu_read_unlock();
-}
-
-/* Notify netdevs that UDP port is no more listening */
-static void vxlan_notify_del_rx_port(struct vxlan_sock *vs)
-{
- struct net_device *dev;
- struct sock *sk = vs->sock->sk;
- struct net *net = sock_net(sk);
- sa_family_t sa_family = vxlan_get_sk_family(vs);
- __be16 port = inet_sk(sk)->inet_sport;
-
- rcu_read_lock();
- for_each_netdev_rcu(net, dev) {
- if (dev->netdev_ops->ndo_del_vxlan_port)
- dev->netdev_ops->ndo_del_vxlan_port(dev, sa_family,
- port);
- }
- rcu_read_unlock();
-}
-
/* Add new entry to forwarding table -- assumes lock held */
static int vxlan_fdb_create(struct vxlan_dev *vxlan,
const u8 *mac, union vxlan_addr *ip,
@@ -1050,7 +997,10 @@ static bool __vxlan_sock_release_prep(struct vxlan_sock *vs)
vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id);
spin_lock(&vn->sock_lock);
hlist_del_rcu(&vs->hlist);
- vxlan_notify_del_rx_port(vs);
+ udp_tunnel_notify_del_rx_port(vs->sock,
+ (vs->flags & VXLAN_F_GPE) ?
+ UDP_TUNNEL_TYPE_VXLAN_GPE :
+ UDP_TUNNEL_TYPE_VXLAN);
spin_unlock(&vn->sock_lock);
return true;
@@ -2525,30 +2475,24 @@ static struct device_type vxlan_type = {
.name = "vxlan",
};
-/* Calls the ndo_add_vxlan_port of the caller in order to
+/* Calls the ndo_add_udp_enc_port of the caller in order to
* supply the listening VXLAN udp ports. Callers are expected
- * to implement the ndo_add_vxlan_port.
+ * to implement the ndo_add_udp_enc_port.
*/
static void vxlan_push_rx_ports(struct net_device *dev)
{
struct vxlan_sock *vs;
struct net *net = dev_net(dev);
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
- sa_family_t sa_family;
- __be16 port;
unsigned int i;
- if (!dev->netdev_ops->ndo_add_vxlan_port)
- return;
-
spin_lock(&vn->sock_lock);
for (i = 0; i < PORT_HASH_SIZE; ++i) {
- hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
- port = inet_sk(vs->sock->sk)->inet_sport;
- sa_family = vxlan_get_sk_family(vs);
- dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
- port);
- }
+ hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist)
+ udp_tunnel_push_rx_port(dev, vs->sock,
+ (vs->flags & VXLAN_F_GPE) ?
+ UDP_TUNNEL_TYPE_VXLAN_GPE :
+ UDP_TUNNEL_TYPE_VXLAN);
}
spin_unlock(&vn->sock_lock);
}
@@ -2750,7 +2694,10 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
spin_lock(&vn->sock_lock);
hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
- vxlan_notify_add_rx_port(vs);
+ udp_tunnel_notify_add_rx_port(sock,
+ (vs->flags & VXLAN_F_GPE) ?
+ UDP_TUNNEL_TYPE_VXLAN_GPE :
+ UDP_TUNNEL_TYPE_VXLAN);
spin_unlock(&vn->sock_lock);
/* Mark socket as an encapsulation socket. */
@@ -2952,30 +2899,6 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
return 0;
}
-struct net_device *vxlan_dev_create(struct net *net, const char *name,
- u8 name_assign_type, struct vxlan_config *conf)
-{
- struct nlattr *tb[IFLA_MAX+1];
- struct net_device *dev;
- int err;
-
- memset(&tb, 0, sizeof(tb));
-
- dev = rtnl_create_link(net, name, name_assign_type,
- &vxlan_link_ops, tb);
- if (IS_ERR(dev))
- return dev;
-
- err = vxlan_dev_configure(net, dev, conf);
- if (err < 0) {
- free_netdev(dev);
- return ERR_PTR(err);
- }
-
- return dev;
-}
-EXPORT_SYMBOL_GPL(vxlan_dev_create);
-
static int vxlan_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
@@ -3268,6 +3191,40 @@ static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
.get_link_net = vxlan_get_link_net,
};
+struct net_device *vxlan_dev_create(struct net *net, const char *name,
+ u8 name_assign_type,
+ struct vxlan_config *conf)
+{
+ struct nlattr *tb[IFLA_MAX + 1];
+ struct net_device *dev;
+ int err;
+
+ memset(&tb, 0, sizeof(tb));
+
+ dev = rtnl_create_link(net, name, name_assign_type,
+ &vxlan_link_ops, tb);
+ if (IS_ERR(dev))
+ return dev;
+
+ err = vxlan_dev_configure(net, dev, conf);
+ if (err < 0) {
+ free_netdev(dev);
+ return ERR_PTR(err);
+ }
+
+ err = rtnl_configure_link(dev, NULL);
+ if (err < 0) {
+ LIST_HEAD(list_kill);
+
+ vxlan_dellink(dev, &list_kill);
+ unregister_netdevice_many(&list_kill);
+ return ERR_PTR(err);
+ }
+
+ return dev;
+}
+EXPORT_SYMBOL_GPL(vxlan_dev_create);
+
static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
struct net_device *dev)
{
@@ -3298,7 +3255,7 @@ static int vxlan_netdevice_event(struct notifier_block *unused,
if (event == NETDEV_UNREGISTER)
vxlan_handle_lowerdev_unregister(vn, dev);
- else if (event == NETDEV_OFFLOAD_PUSH_VXLAN)
+ else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO)
vxlan_push_rx_ports(dev);
return NOTIFY_DONE;
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index a2fdd15f285a..33ab3345d333 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -280,6 +280,28 @@ config DSCC4
To compile this driver as a module, choose M here: the
module will be called dscc4.
+config FSL_UCC_HDLC
+ tristate "Freescale QUICC Engine HDLC support"
+ depends on HDLC
+ depends on QUICC_ENGINE
+ help
+ Driver for Freescale QUICC Engine HDLC controller. The driver
+ supports HDLC in NMSI and TDM mode.
+
+ To compile this driver as a module, choose M here: the
+ module will be called fsl_ucc_hdlc.
+
+config SLIC_DS26522
+ tristate "Slic Maxim ds26522 card support"
+ depends on SPI
+ depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
+ help
+ This module initializes and configures the slic maxim card
+ in T1 or E1 mode.
+
+ To compile this driver as a module, choose M here: the
+ module will be called slic_ds26522.
+
config DSCC4_PCISYNC
bool "Etinc PCISYNC features"
depends on DSCC4
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index c135ef47cbca..73c2326603fc 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -32,6 +32,8 @@ obj-$(CONFIG_WANXL) += wanxl.o
obj-$(CONFIG_PCI200SYN) += pci200syn.o
obj-$(CONFIG_PC300TOO) += pc300too.o
obj-$(CONFIG_IXP4XX_HSS) += ixp4xx_hss.o
+obj-$(CONFIG_FSL_UCC_HDLC) += fsl_ucc_hdlc.o
+obj-$(CONFIG_SLIC_DS26522) += slic_ds26522.o
clean-files := wanxlfw.inc
$(obj)/wanxl.o: $(obj)/wanxlfw.inc
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
new file mode 100644
index 000000000000..19174ac1e338
--- /dev/null
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -0,0 +1,1192 @@
+/* Freescale QUICC Engine HDLC Device Driver
+ *
+ * Copyright 2016 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/hdlc.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <soc/fsl/qe/qe_tdm.h>
+#include <uapi/linux/if_arp.h>
+
+#include "fsl_ucc_hdlc.h"
+
+#define DRV_DESC "Freescale QE UCC HDLC Driver"
+#define DRV_NAME "ucc_hdlc"
+
+#define TDM_PPPOHT_SLIC_MAXIN
+#define BROKEN_FRAME_INFO
+
+static struct ucc_tdm_info utdm_primary_info = {
+ .uf_info = {
+ .tsa = 0,
+ .cdp = 0,
+ .cds = 1,
+ .ctsp = 1,
+ .ctss = 1,
+ .revd = 0,
+ .urfs = 256,
+ .utfs = 256,
+ .urfet = 128,
+ .urfset = 192,
+ .utfet = 128,
+ .utftt = 0x40,
+ .ufpt = 256,
+ .mode = UCC_FAST_PROTOCOL_MODE_HDLC,
+ .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
+ .tenc = UCC_FAST_TX_ENCODING_NRZ,
+ .renc = UCC_FAST_RX_ENCODING_NRZ,
+ .tcrc = UCC_FAST_16_BIT_CRC,
+ .synl = UCC_FAST_SYNC_LEN_NOT_USED,
+ },
+
+ .si_info = {
+#ifdef TDM_PPPOHT_SLIC_MAXIN
+ .simr_rfsd = 1,
+ .simr_tfsd = 2,
+#else
+ .simr_rfsd = 0,
+ .simr_tfsd = 0,
+#endif
+ .simr_crt = 0,
+ .simr_sl = 0,
+ .simr_ce = 1,
+ .simr_fe = 1,
+ .simr_gm = 0,
+ },
+};
+
+static struct ucc_tdm_info utdm_info[MAX_HDLC_NUM];
+
+static int uhdlc_init(struct ucc_hdlc_private *priv)
+{
+ struct ucc_tdm_info *ut_info;
+ struct ucc_fast_info *uf_info;
+ u32 cecr_subblock;
+ u16 bd_status;
+ int ret, i;
+ void *bd_buffer;
+ dma_addr_t bd_dma_addr;
+ u32 riptr;
+ u32 tiptr;
+ u32 gumr;
+
+ ut_info = priv->ut_info;
+ uf_info = &ut_info->uf_info;
+
+ if (priv->tsa) {
+ uf_info->tsa = 1;
+ uf_info->ctsp = 1;
+ }
+ uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF |
+ UCC_HDLC_UCCE_TXB) << 16);
+
+ ret = ucc_fast_init(uf_info, &priv->uccf);
+ if (ret) {
+ dev_err(priv->dev, "Failed to init uccf.");
+ return ret;
+ }
+
+ priv->uf_regs = priv->uccf->uf_regs;
+ ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
+
+ /* Loopback mode */
+ if (priv->loopback) {
+ dev_info(priv->dev, "Loopback Mode\n");
+ gumr = ioread32be(&priv->uf_regs->gumr);
+ gumr |= (UCC_FAST_GUMR_LOOPBACK | UCC_FAST_GUMR_CDS |
+ UCC_FAST_GUMR_TCI);
+ gumr &= ~(UCC_FAST_GUMR_CTSP | UCC_FAST_GUMR_RSYN);
+ iowrite32be(gumr, &priv->uf_regs->gumr);
+ }
+
+ /* Initialize SI */
+ if (priv->tsa)
+ ucc_tdm_init(priv->utdm, priv->ut_info);
+
+ /* Write to QE CECR, UCCx channel to Stop Transmission */
+ cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
+ ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
+ QE_CR_PROTOCOL_UNSPECIFIED, 0);
+
+ /* Set UPSMR normal mode (need fixed)*/
+ iowrite32be(0, &priv->uf_regs->upsmr);
+
+ priv->rx_ring_size = RX_BD_RING_LEN;
+ priv->tx_ring_size = TX_BD_RING_LEN;
+ /* Alloc Rx BD */
+ priv->rx_bd_base = dma_alloc_coherent(priv->dev,
+ RX_BD_RING_LEN * sizeof(struct qe_bd *),
+ &priv->dma_rx_bd, GFP_KERNEL);
+
+ if (!priv->rx_bd_base) {
+ dev_err(priv->dev, "Cannot allocate MURAM memory for RxBDs\n");
+ ret = -ENOMEM;
+ goto rxbd_alloc_error;
+ }
+
+ /* Alloc Tx BD */
+ priv->tx_bd_base = dma_alloc_coherent(priv->dev,
+ TX_BD_RING_LEN * sizeof(struct qe_bd *),
+ &priv->dma_tx_bd, GFP_KERNEL);
+
+ if (!priv->tx_bd_base) {
+ dev_err(priv->dev, "Cannot allocate MURAM memory for TxBDs\n");
+ ret = -ENOMEM;
+ goto txbd_alloc_error;
+ }
+
+ /* Alloc parameter ram for ucc hdlc */
+ priv->ucc_pram_offset = qe_muram_alloc(sizeof(priv->ucc_pram),
+ ALIGNMENT_OF_UCC_HDLC_PRAM);
+
+ if (priv->ucc_pram_offset < 0) {
+ dev_err(priv->dev, "Can not allocate MURAM for hdlc prameter.\n");
+ ret = -ENOMEM;
+ goto pram_alloc_error;
+ }
+
+ priv->rx_skbuff = kzalloc(priv->rx_ring_size * sizeof(*priv->rx_skbuff),
+ GFP_KERNEL);
+ if (!priv->rx_skbuff)
+ goto rx_skb_alloc_error;
+
+ priv->tx_skbuff = kzalloc(priv->tx_ring_size * sizeof(*priv->tx_skbuff),
+ GFP_KERNEL);
+ if (!priv->tx_skbuff)
+ goto tx_skb_alloc_error;
+
+ priv->skb_curtx = 0;
+ priv->skb_dirtytx = 0;
+ priv->curtx_bd = priv->tx_bd_base;
+ priv->dirty_tx = priv->tx_bd_base;
+ priv->currx_bd = priv->rx_bd_base;
+ priv->currx_bdnum = 0;
+
+ /* init parameter base */
+ cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
+ ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
+ QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
+
+ priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
+ qe_muram_addr(priv->ucc_pram_offset);
+
+ /* Zero out parameter ram */
+ memset_io(priv->ucc_pram, 0, sizeof(struct ucc_hdlc_param));
+
+ /* Alloc riptr, tiptr */
+ riptr = qe_muram_alloc(32, 32);
+ if (riptr < 0) {
+ dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
+ ret = -ENOMEM;
+ goto riptr_alloc_error;
+ }
+
+ tiptr = qe_muram_alloc(32, 32);
+ if (tiptr < 0) {
+ dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
+ ret = -ENOMEM;
+ goto tiptr_alloc_error;
+ }
+
+ /* Set RIPTR, TIPTR */
+ iowrite16be(riptr, &priv->ucc_pram->riptr);
+ iowrite16be(tiptr, &priv->ucc_pram->tiptr);
+
+ /* Set MRBLR */
+ iowrite16be(MAX_RX_BUF_LENGTH, &priv->ucc_pram->mrblr);
+
+ /* Set RBASE, TBASE */
+ iowrite32be(priv->dma_rx_bd, &priv->ucc_pram->rbase);
+ iowrite32be(priv->dma_tx_bd, &priv->ucc_pram->tbase);
+
+ /* Set RSTATE, TSTATE */
+ iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->rstate);
+ iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->tstate);
+
+ /* Set C_MASK, C_PRES for 16bit CRC */
+ iowrite32be(CRC_16BIT_MASK, &priv->ucc_pram->c_mask);
+ iowrite32be(CRC_16BIT_PRES, &priv->ucc_pram->c_pres);
+
+ iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr);
+ iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr);
+ iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt);
+ iowrite16be(DEFAULT_ADDR_MASK, &priv->ucc_pram->hmask);
+ iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1);
+ iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2);
+ iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3);
+ iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4);
+
+ /* Get BD buffer */
+ bd_buffer = dma_alloc_coherent(priv->dev,
+ (RX_BD_RING_LEN + TX_BD_RING_LEN) *
+ MAX_RX_BUF_LENGTH,
+ &bd_dma_addr, GFP_KERNEL);
+
+ if (!bd_buffer) {
+ dev_err(priv->dev, "Could not allocate buffer descriptors\n");
+ ret = -ENOMEM;
+ goto bd_alloc_error;
+ }
+
+ memset(bd_buffer, 0, (RX_BD_RING_LEN + TX_BD_RING_LEN)
+ * MAX_RX_BUF_LENGTH);
+
+ priv->rx_buffer = bd_buffer;
+ priv->tx_buffer = bd_buffer + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
+
+ priv->dma_rx_addr = bd_dma_addr;
+ priv->dma_tx_addr = bd_dma_addr + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
+
+ for (i = 0; i < RX_BD_RING_LEN; i++) {
+ if (i < (RX_BD_RING_LEN - 1))
+ bd_status = R_E_S | R_I_S;
+ else
+ bd_status = R_E_S | R_I_S | R_W_S;
+
+ iowrite16be(bd_status, &priv->rx_bd_base[i].status);
+ iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
+ &priv->rx_bd_base[i].buf);
+ }
+
+ for (i = 0; i < TX_BD_RING_LEN; i++) {
+ if (i < (TX_BD_RING_LEN - 1))
+ bd_status = T_I_S | T_TC_S;
+ else
+ bd_status = T_I_S | T_TC_S | T_W_S;
+
+ iowrite16be(bd_status, &priv->tx_bd_base[i].status);
+ iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
+ &priv->tx_bd_base[i].buf);
+ }
+
+ return 0;
+
+bd_alloc_error:
+ qe_muram_free(tiptr);
+tiptr_alloc_error:
+ qe_muram_free(riptr);
+riptr_alloc_error:
+ kfree(priv->tx_skbuff);
+tx_skb_alloc_error:
+ kfree(priv->rx_skbuff);
+rx_skb_alloc_error:
+ qe_muram_free(priv->ucc_pram_offset);
+pram_alloc_error:
+ dma_free_coherent(priv->dev,
+ TX_BD_RING_LEN * sizeof(struct qe_bd),
+ priv->tx_bd_base, priv->dma_tx_bd);
+txbd_alloc_error:
+ dma_free_coherent(priv->dev,
+ RX_BD_RING_LEN * sizeof(struct qe_bd),
+ priv->rx_bd_base, priv->dma_rx_bd);
+rxbd_alloc_error:
+ ucc_fast_free(priv->uccf);
+
+ return ret;
+}
+
+static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv;
+ struct qe_bd __iomem *bd;
+ u16 bd_status;
+ unsigned long flags;
+ u8 *send_buf;
+ int i;
+ u16 *proto_head;
+
+ switch (dev->type) {
+ case ARPHRD_RAWHDLC:
+ if (skb_headroom(skb) < HDLC_HEAD_LEN) {
+ dev->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ netdev_err(dev, "No enough space for hdlc head\n");
+ return -ENOMEM;
+ }
+
+ skb_push(skb, HDLC_HEAD_LEN);
+
+ proto_head = (u16 *)skb->data;
+ *proto_head = htons(DEFAULT_HDLC_HEAD);
+
+ dev->stats.tx_bytes += skb->len;
+ break;
+
+ case ARPHRD_PPP:
+ proto_head = (u16 *)skb->data;
+ if (*proto_head != htons(DEFAULT_PPP_HEAD)) {
+ dev->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ netdev_err(dev, "Wrong ppp header\n");
+ return -ENOMEM;
+ }
+
+ dev->stats.tx_bytes += skb->len;
+ break;
+
+ default:
+ dev->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ pr_info("Tx data skb->len:%d ", skb->len);
+ send_buf = (u8 *)skb->data;
+ pr_info("\nTransmitted data:\n");
+ for (i = 0; i < 16; i++) {
+ if (i == skb->len)
+ pr_info("++++");
+ else
+ pr_info("%02x\n", send_buf[i]);
+ }
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Start from the next BD that should be filled */
+ bd = priv->curtx_bd;
+ bd_status = ioread16be(&bd->status);
+ /* Save the skb pointer so we can free it later */
+ priv->tx_skbuff[priv->skb_curtx] = skb;
+
+ /* Update the current skb pointer (wrapping if this was the last) */
+ priv->skb_curtx =
+ (priv->skb_curtx + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
+
+ /* copy skb data to tx buffer for sdma processing */
+ memcpy(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
+ skb->data, skb->len);
+
+ /* set bd status and length */
+ bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
+
+ iowrite16be(bd_status, &bd->status);
+ iowrite16be(skb->len, &bd->length);
+
+ /* Move to next BD in the ring */
+ if (!(bd_status & T_W_S))
+ bd += 1;
+ else
+ bd = priv->tx_bd_base;
+
+ if (bd == priv->dirty_tx) {
+ if (!netif_queue_stopped(dev))
+ netif_stop_queue(dev);
+ }
+
+ priv->curtx_bd = bd;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+static int hdlc_tx_done(struct ucc_hdlc_private *priv)
+{
+ /* Start from the next BD that should be filled */
+ struct net_device *dev = priv->ndev;
+ struct qe_bd *bd; /* BD pointer */
+ u16 bd_status;
+
+ bd = priv->dirty_tx;
+ bd_status = ioread16be(&bd->status);
+
+ /* Normal processing. */
+ while ((bd_status & T_R_S) == 0) {
+ struct sk_buff *skb;
+
+ /* BD contains already transmitted buffer. */
+ /* Handle the transmitted buffer and release */
+ /* the BD to be used with the current frame */
+
+ skb = priv->tx_skbuff[priv->skb_dirtytx];
+ if (!skb)
+ break;
+ pr_info("TxBD: %x\n", bd_status);
+ dev->stats.tx_packets++;
+ memset(priv->tx_buffer +
+ (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
+ 0, skb->len);
+ dev_kfree_skb_irq(skb);
+
+ priv->tx_skbuff[priv->skb_dirtytx] = NULL;
+ priv->skb_dirtytx =
+ (priv->skb_dirtytx +
+ 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
+
+ /* We freed a buffer, so now we can restart transmission */
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+
+ /* Advance the confirmation BD pointer */
+ if (!(bd_status & T_W_S))
+ bd += 1;
+ else
+ bd = priv->tx_bd_base;
+ bd_status = ioread16be(&bd->status);
+ }
+ priv->dirty_tx = bd;
+
+ return 0;
+}
+
+static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
+{
+ struct net_device *dev = priv->ndev;
+ struct sk_buff *skb;
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ struct qe_bd *bd;
+ u32 bd_status;
+ u16 length, howmany = 0;
+ u8 *bdbuffer;
+ int i;
+ static int entry;
+
+ bd = priv->currx_bd;
+ bd_status = ioread16be(&bd->status);
+
+ /* while there are received buffers and BD is full (~R_E) */
+ while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) {
+ if (bd_status & R_OV_S)
+ dev->stats.rx_over_errors++;
+ if (bd_status & R_CR_S) {
+#ifdef BROKEN_FRAME_INFO
+ pr_info("Broken Frame with RxBD: %x\n", bd_status);
+#endif
+ dev->stats.rx_crc_errors++;
+ dev->stats.rx_dropped++;
+ goto recycle;
+ }
+ bdbuffer = priv->rx_buffer +
+ (priv->currx_bdnum * MAX_RX_BUF_LENGTH);
+ length = ioread16be(&bd->length);
+
+ pr_info("Received data length:%d", length);
+ pr_info("while entry times:%d", entry++);
+
+ pr_info("\nReceived data:\n");
+ for (i = 0; (i < 16); i++) {
+ if (i == length)
+ pr_info("++++");
+ else
+ pr_info("%02x\n", bdbuffer[i]);
+ }
+
+ switch (dev->type) {
+ case ARPHRD_RAWHDLC:
+ bdbuffer += HDLC_HEAD_LEN;
+ length -= (HDLC_HEAD_LEN + HDLC_CRC_SIZE);
+
+ skb = dev_alloc_skb(length);
+ if (!skb) {
+ dev->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+
+ skb_put(skb, length);
+ skb->len = length;
+ skb->dev = dev;
+ memcpy(skb->data, bdbuffer, length);
+ break;
+
+ case ARPHRD_PPP:
+ length -= HDLC_CRC_SIZE;
+
+ skb = dev_alloc_skb(length);
+ if (!skb) {
+ dev->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+
+ skb_put(skb, length);
+ skb->len = length;
+ skb->dev = dev;
+ memcpy(skb->data, bdbuffer, length);
+ break;
+ }
+
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += skb->len;
+ howmany++;
+ if (hdlc->proto)
+ skb->protocol = hdlc_type_trans(skb, dev);
+ pr_info("skb->protocol:%x\n", skb->protocol);
+ netif_receive_skb(skb);
+
+recycle:
+ iowrite16be(bd_status | R_E_S | R_I_S, &bd->status);
+
+ /* update to point at the next bd */
+ if (bd_status & R_W_S) {
+ priv->currx_bdnum = 0;
+ bd = priv->rx_bd_base;
+ } else {
+ if (priv->currx_bdnum < (RX_BD_RING_LEN - 1))
+ priv->currx_bdnum += 1;
+ else
+ priv->currx_bdnum = RX_BD_RING_LEN - 1;
+
+ bd += 1;
+ }
+
+ bd_status = ioread16be(&bd->status);
+ }
+
+ priv->currx_bd = bd;
+ return howmany;
+}
+
+static int ucc_hdlc_poll(struct napi_struct *napi, int budget)
+{
+ struct ucc_hdlc_private *priv = container_of(napi,
+ struct ucc_hdlc_private,
+ napi);
+ int howmany;
+
+ /* Tx event processing */
+ spin_lock(&priv->lock);
+ hdlc_tx_done(priv);
+ spin_unlock(&priv->lock);
+
+ howmany = 0;
+ howmany += hdlc_rx_done(priv, budget - howmany);
+
+ if (howmany < budget) {
+ napi_complete(napi);
+ qe_setbits32(priv->uccf->p_uccm,
+ (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
+ }
+
+ return howmany;
+}
+
+static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id)
+{
+ struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id;
+ struct net_device *dev = priv->ndev;
+ struct ucc_fast_private *uccf;
+ struct ucc_tdm_info *ut_info;
+ u32 ucce;
+ u32 uccm;
+
+ ut_info = priv->ut_info;
+ uccf = priv->uccf;
+
+ ucce = ioread32be(uccf->p_ucce);
+ uccm = ioread32be(uccf->p_uccm);
+ ucce &= uccm;
+ iowrite32be(ucce, uccf->p_ucce);
+ pr_info("irq ucce:%x\n", ucce);
+ if (!ucce)
+ return IRQ_NONE;
+
+ if ((ucce >> 16) & (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)) {
+ if (napi_schedule_prep(&priv->napi)) {
+ uccm &= ~((UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)
+ << 16);
+ iowrite32be(uccm, uccf->p_uccm);
+ __napi_schedule(&priv->napi);
+ }
+ }
+
+ /* Errors and other events */
+ if (ucce >> 16 & UCC_HDLC_UCCE_BSY)
+ dev->stats.rx_errors++;
+ if (ucce >> 16 & UCC_HDLC_UCCE_TXE)
+ dev->stats.tx_errors++;
+
+ return IRQ_HANDLED;
+}
+
+static int uhdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ const size_t size = sizeof(te1_settings);
+ te1_settings line;
+ struct ucc_hdlc_private *priv = netdev_priv(dev);
+
+ if (cmd != SIOCWANDEV)
+ return hdlc_ioctl(dev, ifr, cmd);
+
+ switch (ifr->ifr_settings.type) {
+ case IF_GET_IFACE:
+ ifr->ifr_settings.type = IF_IFACE_E1;
+ if (ifr->ifr_settings.size < size) {
+ ifr->ifr_settings.size = size; /* data size wanted */
+ return -ENOBUFS;
+ }
+ line.clock_type = priv->clocking;
+ line.clock_rate = 0;
+ line.loopback = 0;
+
+ if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
+ return -EFAULT;
+ return 0;
+
+ default:
+ return hdlc_ioctl(dev, ifr, cmd);
+ }
+}
+
+static int uhdlc_open(struct net_device *dev)
+{
+ u32 cecr_subblock;
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ struct ucc_hdlc_private *priv = hdlc->priv;
+ struct ucc_tdm *utdm = priv->utdm;
+
+ if (priv->hdlc_busy != 1) {
+ if (request_irq(priv->ut_info->uf_info.irq,
+ ucc_hdlc_irq_handler, 0, "hdlc", priv))
+ return -ENODEV;
+
+ cecr_subblock = ucc_fast_get_qe_cr_subblock(
+ priv->ut_info->uf_info.ucc_num);
+
+ qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
+ QE_CR_PROTOCOL_UNSPECIFIED, 0);
+
+ ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
+
+ /* Enable the TDM port */
+ if (priv->tsa)
+ utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
+
+ priv->hdlc_busy = 1;
+ netif_device_attach(priv->ndev);
+ napi_enable(&priv->napi);
+ netif_start_queue(dev);
+ hdlc_open(dev);
+ }
+
+ return 0;
+}
+
+static void uhdlc_memclean(struct ucc_hdlc_private *priv)
+{
+ qe_muram_free(priv->ucc_pram->riptr);
+ qe_muram_free(priv->ucc_pram->tiptr);
+
+ if (priv->rx_bd_base) {
+ dma_free_coherent(priv->dev,
+ RX_BD_RING_LEN * sizeof(struct qe_bd),
+ priv->rx_bd_base, priv->dma_rx_bd);
+
+ priv->rx_bd_base = NULL;
+ priv->dma_rx_bd = 0;
+ }
+
+ if (priv->tx_bd_base) {
+ dma_free_coherent(priv->dev,
+ TX_BD_RING_LEN * sizeof(struct qe_bd),
+ priv->tx_bd_base, priv->dma_tx_bd);
+
+ priv->tx_bd_base = NULL;
+ priv->dma_tx_bd = 0;
+ }
+
+ if (priv->ucc_pram) {
+ qe_muram_free(priv->ucc_pram_offset);
+ priv->ucc_pram = NULL;
+ priv->ucc_pram_offset = 0;
+ }
+
+ kfree(priv->rx_skbuff);
+ priv->rx_skbuff = NULL;
+
+ kfree(priv->tx_skbuff);
+ priv->tx_skbuff = NULL;
+
+ if (priv->uf_regs) {
+ iounmap(priv->uf_regs);
+ priv->uf_regs = NULL;
+ }
+
+ if (priv->uccf) {
+ ucc_fast_free(priv->uccf);
+ priv->uccf = NULL;
+ }
+
+ if (priv->rx_buffer) {
+ dma_free_coherent(priv->dev,
+ RX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
+ priv->rx_buffer, priv->dma_rx_addr);
+ priv->rx_buffer = NULL;
+ priv->dma_rx_addr = 0;
+ }
+
+ if (priv->tx_buffer) {
+ dma_free_coherent(priv->dev,
+ TX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
+ priv->tx_buffer, priv->dma_tx_addr);
+ priv->tx_buffer = NULL;
+ priv->dma_tx_addr = 0;
+ }
+}
+
+static int uhdlc_close(struct net_device *dev)
+{
+ struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
+ struct ucc_tdm *utdm = priv->utdm;
+ u32 cecr_subblock;
+
+ napi_disable(&priv->napi);
+ cecr_subblock = ucc_fast_get_qe_cr_subblock(
+ priv->ut_info->uf_info.ucc_num);
+
+ qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
+ (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
+ qe_issue_cmd(QE_CLOSE_RX_BD, cecr_subblock,
+ (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
+
+ if (priv->tsa)
+ utdm->si_regs->siglmr1_h &= ~(0x1 << utdm->tdm_port);
+
+ ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
+
+ free_irq(priv->ut_info->uf_info.irq, priv);
+ netif_stop_queue(dev);
+ priv->hdlc_busy = 0;
+
+ return 0;
+}
+
+static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding,
+ unsigned short parity)
+{
+ struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
+
+ if (encoding != ENCODING_NRZ &&
+ encoding != ENCODING_NRZI)
+ return -EINVAL;
+
+ if (parity != PARITY_NONE &&
+ parity != PARITY_CRC32_PR1_CCITT &&
+ parity != PARITY_CRC16_PR1_CCITT)
+ return -EINVAL;
+
+ priv->encoding = encoding;
+ priv->parity = parity;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static void store_clk_config(struct ucc_hdlc_private *priv)
+{
+ struct qe_mux *qe_mux_reg = &qe_immr->qmx;
+
+ /* store si clk */
+ priv->cmxsi1cr_h = ioread32be(&qe_mux_reg->cmxsi1cr_h);
+ priv->cmxsi1cr_l = ioread32be(&qe_mux_reg->cmxsi1cr_l);
+
+ /* store si sync */
+ priv->cmxsi1syr = ioread32be(&qe_mux_reg->cmxsi1syr);
+
+ /* store ucc clk */
+ memcpy_fromio(priv->cmxucr, qe_mux_reg->cmxucr, 4 * sizeof(u32));
+}
+
+static void resume_clk_config(struct ucc_hdlc_private *priv)
+{
+ struct qe_mux *qe_mux_reg = &qe_immr->qmx;
+
+ memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32));
+
+ iowrite32be(priv->cmxsi1cr_h, &qe_mux_reg->cmxsi1cr_h);
+ iowrite32be(priv->cmxsi1cr_l, &qe_mux_reg->cmxsi1cr_l);
+
+ iowrite32be(priv->cmxsi1syr, &qe_mux_reg->cmxsi1syr);
+}
+
+static int uhdlc_suspend(struct device *dev)
+{
+ struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
+ struct ucc_tdm_info *ut_info;
+ struct ucc_fast __iomem *uf_regs;
+
+ if (!priv)
+ return -EINVAL;
+
+ if (!netif_running(priv->ndev))
+ return 0;
+
+ netif_device_detach(priv->ndev);
+ napi_disable(&priv->napi);
+
+ ut_info = priv->ut_info;
+ uf_regs = priv->uf_regs;
+
+ /* backup gumr guemr*/
+ priv->gumr = ioread32be(&uf_regs->gumr);
+ priv->guemr = ioread8(&uf_regs->guemr);
+
+ priv->ucc_pram_bak = kmalloc(sizeof(*priv->ucc_pram_bak),
+ GFP_KERNEL);
+ if (!priv->ucc_pram_bak)
+ return -ENOMEM;
+
+ /* backup HDLC parameter */
+ memcpy_fromio(priv->ucc_pram_bak, priv->ucc_pram,
+ sizeof(struct ucc_hdlc_param));
+
+ /* store the clk configuration */
+ store_clk_config(priv);
+
+ /* save power */
+ ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
+
+ dev_dbg(dev, "ucc hdlc suspend\n");
+ return 0;
+}
+
+static int uhdlc_resume(struct device *dev)
+{
+ struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
+ struct ucc_tdm *utdm = priv->utdm;
+ struct ucc_tdm_info *ut_info;
+ struct ucc_fast __iomem *uf_regs;
+ struct ucc_fast_private *uccf;
+ struct ucc_fast_info *uf_info;
+ int ret, i;
+ u32 cecr_subblock;
+ u16 bd_status;
+
+ if (!priv)
+ return -EINVAL;
+
+ if (!netif_running(priv->ndev))
+ return 0;
+
+ ut_info = priv->ut_info;
+ uf_info = &ut_info->uf_info;
+ uf_regs = priv->uf_regs;
+ uccf = priv->uccf;
+
+ /* restore gumr guemr */
+ iowrite8(priv->guemr, &uf_regs->guemr);
+ iowrite32be(priv->gumr, &uf_regs->gumr);
+
+ /* Set Virtual Fifo registers */
+ iowrite16be(uf_info->urfs, &uf_regs->urfs);
+ iowrite16be(uf_info->urfet, &uf_regs->urfet);
+ iowrite16be(uf_info->urfset, &uf_regs->urfset);
+ iowrite16be(uf_info->utfs, &uf_regs->utfs);
+ iowrite16be(uf_info->utfet, &uf_regs->utfet);
+ iowrite16be(uf_info->utftt, &uf_regs->utftt);
+ /* utfb, urfb are offsets from MURAM base */
+ iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb);
+ iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb);
+
+ /* Rx Tx and sync clock routing */
+ resume_clk_config(priv);
+
+ iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
+ iowrite32be(0xffffffff, &uf_regs->ucce);
+
+ ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
+
+ /* rebuild SIRAM */
+ if (priv->tsa)
+ ucc_tdm_init(priv->utdm, priv->ut_info);
+
+ /* Write to QE CECR, UCCx channel to Stop Transmission */
+ cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
+ ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
+ (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
+
+ /* Set UPSMR normal mode */
+ iowrite32be(0, &uf_regs->upsmr);
+
+ /* init parameter base */
+ cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
+ ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
+ QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
+
+ priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
+ qe_muram_addr(priv->ucc_pram_offset);
+
+ /* restore ucc parameter */
+ memcpy_toio(priv->ucc_pram, priv->ucc_pram_bak,
+ sizeof(struct ucc_hdlc_param));
+ kfree(priv->ucc_pram_bak);
+
+ /* rebuild BD entry */
+ for (i = 0; i < RX_BD_RING_LEN; i++) {
+ if (i < (RX_BD_RING_LEN - 1))
+ bd_status = R_E_S | R_I_S;
+ else
+ bd_status = R_E_S | R_I_S | R_W_S;
+
+ iowrite16be(bd_status, &priv->rx_bd_base[i].status);
+ iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
+ &priv->rx_bd_base[i].buf);
+ }
+
+ for (i = 0; i < TX_BD_RING_LEN; i++) {
+ if (i < (TX_BD_RING_LEN - 1))
+ bd_status = T_I_S | T_TC_S;
+ else
+ bd_status = T_I_S | T_TC_S | T_W_S;
+
+ iowrite16be(bd_status, &priv->tx_bd_base[i].status);
+ iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
+ &priv->tx_bd_base[i].buf);
+ }
+
+ /* if hdlc is busy enable TX and RX */
+ if (priv->hdlc_busy == 1) {
+ cecr_subblock = ucc_fast_get_qe_cr_subblock(
+ priv->ut_info->uf_info.ucc_num);
+
+ qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
+ (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
+
+ ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
+
+ /* Enable the TDM port */
+ if (priv->tsa)
+ utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
+ }
+
+ napi_enable(&priv->napi);
+ netif_device_attach(priv->ndev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops uhdlc_pm_ops = {
+ .suspend = uhdlc_suspend,
+ .resume = uhdlc_resume,
+ .freeze = uhdlc_suspend,
+ .thaw = uhdlc_resume,
+};
+
+#define HDLC_PM_OPS (&uhdlc_pm_ops)
+
+#else
+
+#define HDLC_PM_OPS NULL
+
+#endif
+static const struct net_device_ops uhdlc_ops = {
+ .ndo_open = uhdlc_open,
+ .ndo_stop = uhdlc_close,
+ .ndo_change_mtu = hdlc_change_mtu,
+ .ndo_start_xmit = hdlc_start_xmit,
+ .ndo_do_ioctl = uhdlc_ioctl,
+};
+
+static int ucc_hdlc_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct ucc_hdlc_private *uhdlc_priv = NULL;
+ struct ucc_tdm_info *ut_info;
+ struct ucc_tdm *utdm;
+ struct resource res;
+ struct net_device *dev;
+ hdlc_device *hdlc;
+ int ucc_num;
+ const char *sprop;
+ int ret;
+ u32 val;
+
+ ret = of_property_read_u32_index(np, "cell-index", 0, &val);
+ if (ret) {
+ dev_err(&pdev->dev, "Invalid ucc property\n");
+ return -ENODEV;
+ }
+
+ ucc_num = val - 1;
+ if ((ucc_num > 3) || (ucc_num < 0)) {
+ dev_err(&pdev->dev, ": Invalid UCC num\n");
+ return -EINVAL;
+ }
+
+ memcpy(&utdm_info[ucc_num], &utdm_primary_info,
+ sizeof(utdm_primary_info));
+
+ ut_info = &utdm_info[ucc_num];
+ ut_info->uf_info.ucc_num = ucc_num;
+
+ sprop = of_get_property(np, "rx-clock-name", NULL);
+ if (sprop) {
+ ut_info->uf_info.rx_clock = qe_clock_source(sprop);
+ if ((ut_info->uf_info.rx_clock < QE_CLK_NONE) ||
+ (ut_info->uf_info.rx_clock > QE_CLK24)) {
+ dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
+ return -EINVAL;
+ }
+ } else {
+ dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
+ return -EINVAL;
+ }
+
+ sprop = of_get_property(np, "tx-clock-name", NULL);
+ if (sprop) {
+ ut_info->uf_info.tx_clock = qe_clock_source(sprop);
+ if ((ut_info->uf_info.tx_clock < QE_CLK_NONE) ||
+ (ut_info->uf_info.tx_clock > QE_CLK24)) {
+ dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
+ return -EINVAL;
+ }
+ } else {
+ dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
+ return -EINVAL;
+ }
+
+ /* use the same clock when work in loopback */
+ if (ut_info->uf_info.rx_clock == ut_info->uf_info.tx_clock)
+ qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1);
+
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret)
+ return -EINVAL;
+
+ ut_info->uf_info.regs = res.start;
+ ut_info->uf_info.irq = irq_of_parse_and_map(np, 0);
+
+ uhdlc_priv = kzalloc(sizeof(*uhdlc_priv), GFP_KERNEL);
+ if (!uhdlc_priv) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "No mem to alloc hdlc private data\n");
+ goto err_alloc_priv;
+ }
+
+ dev_set_drvdata(&pdev->dev, uhdlc_priv);
+ uhdlc_priv->dev = &pdev->dev;
+ uhdlc_priv->ut_info = ut_info;
+
+ if (of_get_property(np, "fsl,tdm-interface", NULL))
+ uhdlc_priv->tsa = 1;
+
+ if (of_get_property(np, "fsl,ucc-internal-loopback", NULL))
+ uhdlc_priv->loopback = 1;
+
+ if (uhdlc_priv->tsa == 1) {
+ utdm = kzalloc(sizeof(*utdm), GFP_KERNEL);
+ if (!utdm) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "No mem to alloc ucc tdm data\n");
+ goto err_alloc_utdm;
+ }
+ uhdlc_priv->utdm = utdm;
+ ret = ucc_of_parse_tdm(np, utdm, ut_info);
+ if (ret)
+ goto err_miss_tsa_property;
+ }
+
+ ret = uhdlc_init(uhdlc_priv);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to init uhdlc\n");
+ goto err_hdlc_init;
+ }
+
+ dev = alloc_hdlcdev(uhdlc_priv);
+ if (!dev) {
+ ret = -ENOMEM;
+ pr_err("ucc_hdlc: unable to allocate memory\n");
+ goto err_hdlc_init;
+ }
+
+ uhdlc_priv->ndev = dev;
+ hdlc = dev_to_hdlc(dev);
+ dev->tx_queue_len = 16;
+ dev->netdev_ops = &uhdlc_ops;
+ hdlc->attach = ucc_hdlc_attach;
+ hdlc->xmit = ucc_hdlc_tx;
+ netif_napi_add(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32);
+ if (register_hdlc_device(dev)) {
+ ret = -ENOBUFS;
+ pr_err("ucc_hdlc: unable to register hdlc device\n");
+ free_netdev(dev);
+ goto err_hdlc_init;
+ }
+
+ return 0;
+
+err_hdlc_init:
+err_miss_tsa_property:
+ kfree(uhdlc_priv);
+ if (uhdlc_priv->tsa)
+ kfree(utdm);
+err_alloc_utdm:
+ kfree(uhdlc_priv);
+err_alloc_priv:
+ return ret;
+}
+
+static int ucc_hdlc_remove(struct platform_device *pdev)
+{
+ struct ucc_hdlc_private *priv = dev_get_drvdata(&pdev->dev);
+
+ uhdlc_memclean(priv);
+
+ if (priv->utdm->si_regs) {
+ iounmap(priv->utdm->si_regs);
+ priv->utdm->si_regs = NULL;
+ }
+
+ if (priv->utdm->siram) {
+ iounmap(priv->utdm->siram);
+ priv->utdm->siram = NULL;
+ }
+ kfree(priv);
+
+ dev_info(&pdev->dev, "UCC based hdlc module removed\n");
+
+ return 0;
+}
+
+static const struct of_device_id fsl_ucc_hdlc_of_match[] = {
+ {
+ .compatible = "fsl,ucc-hdlc",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, fsl_ucc_hdlc_of_match);
+
+static struct platform_driver ucc_hdlc_driver = {
+ .probe = ucc_hdlc_probe,
+ .remove = ucc_hdlc_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRV_NAME,
+ .pm = HDLC_PM_OPS,
+ .of_match_table = fsl_ucc_hdlc_of_match,
+ },
+};
+
+static int __init ucc_hdlc_init(void)
+{
+ return platform_driver_register(&ucc_hdlc_driver);
+}
+
+static void __exit ucc_hdlc_exit(void)
+{
+ platform_driver_unregister(&ucc_hdlc_driver);
+}
+
+module_init(ucc_hdlc_init);
+module_exit(ucc_hdlc_exit);
diff --git a/drivers/net/wan/fsl_ucc_hdlc.h b/drivers/net/wan/fsl_ucc_hdlc.h
new file mode 100644
index 000000000000..881ecdeef076
--- /dev/null
+++ b/drivers/net/wan/fsl_ucc_hdlc.h
@@ -0,0 +1,147 @@
+/* Freescale QUICC Engine HDLC Device Driver
+ *
+ * Copyright 2014 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _UCC_HDLC_H_
+#define _UCC_HDLC_H_
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+
+#include <soc/fsl/qe/ucc.h>
+#include <soc/fsl/qe/ucc_fast.h>
+
+/* UCC HDLC event register */
+#define UCCE_HDLC_RX_EVENTS \
+(UCC_HDLC_UCCE_RXF | UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_BSY)
+#define UCCE_HDLC_TX_EVENTS (UCC_HDLC_UCCE_TXB | UCC_HDLC_UCCE_TXE)
+
+struct ucc_hdlc_param {
+ __be16 riptr;
+ __be16 tiptr;
+ __be16 res0;
+ __be16 mrblr;
+ __be32 rstate;
+ __be32 rbase;
+ __be16 rbdstat;
+ __be16 rbdlen;
+ __be32 rdptr;
+ __be32 tstate;
+ __be32 tbase;
+ __be16 tbdstat;
+ __be16 tbdlen;
+ __be32 tdptr;
+ __be32 rbptr;
+ __be32 tbptr;
+ __be32 rcrc;
+ __be32 res1;
+ __be32 tcrc;
+ __be32 res2;
+ __be32 res3;
+ __be32 c_mask;
+ __be32 c_pres;
+ __be16 disfc;
+ __be16 crcec;
+ __be16 abtsc;
+ __be16 nmarc;
+ __be32 max_cnt;
+ __be16 mflr;
+ __be16 rfthr;
+ __be16 rfcnt;
+ __be16 hmask;
+ __be16 haddr1;
+ __be16 haddr2;
+ __be16 haddr3;
+ __be16 haddr4;
+ __be16 ts_tmp;
+ __be16 tmp_mb;
+};
+
+struct ucc_hdlc_private {
+ struct ucc_tdm *utdm;
+ struct ucc_tdm_info *ut_info;
+ struct ucc_fast_private *uccf;
+ struct device *dev;
+ struct net_device *ndev;
+ struct napi_struct napi;
+ struct ucc_fast __iomem *uf_regs; /* UCC Fast registers */
+ struct ucc_hdlc_param __iomem *ucc_pram;
+ u16 tsa;
+ bool hdlc_busy;
+ bool loopback;
+
+ u8 *tx_buffer;
+ u8 *rx_buffer;
+ dma_addr_t dma_tx_addr;
+ dma_addr_t dma_rx_addr;
+
+ struct qe_bd *tx_bd_base;
+ struct qe_bd *rx_bd_base;
+ dma_addr_t dma_tx_bd;
+ dma_addr_t dma_rx_bd;
+ struct qe_bd *curtx_bd;
+ struct qe_bd *currx_bd;
+ struct qe_bd *dirty_tx;
+ u16 currx_bdnum;
+
+ struct sk_buff **tx_skbuff;
+ struct sk_buff **rx_skbuff;
+ u16 skb_curtx;
+ u16 skb_currx;
+ unsigned short skb_dirtytx;
+
+ unsigned short tx_ring_size;
+ unsigned short rx_ring_size;
+ u32 ucc_pram_offset;
+
+ unsigned short encoding;
+ unsigned short parity;
+ u32 clocking;
+ spinlock_t lock; /* lock for Tx BD and Tx buffer */
+#ifdef CONFIG_PM
+ struct ucc_hdlc_param *ucc_pram_bak;
+ u32 gumr;
+ u8 guemr;
+ u32 cmxsi1cr_l, cmxsi1cr_h;
+ u32 cmxsi1syr;
+ u32 cmxucr[4];
+#endif
+};
+
+#define TX_BD_RING_LEN 0x10
+#define RX_BD_RING_LEN 0x20
+#define RX_CLEAN_MAX 0x10
+#define NUM_OF_BUF 4
+#define MAX_RX_BUF_LENGTH (48 * 0x20)
+#define MAX_FRAME_LENGTH (MAX_RX_BUF_LENGTH + 8)
+#define ALIGNMENT_OF_UCC_HDLC_PRAM 64
+#define SI_BANK_SIZE 128
+#define MAX_HDLC_NUM 4
+#define HDLC_HEAD_LEN 2
+#define HDLC_CRC_SIZE 2
+#define TX_RING_MOD_MASK(size) (size - 1)
+#define RX_RING_MOD_MASK(size) (size - 1)
+
+#define HDLC_HEAD_MASK 0x0000
+#define DEFAULT_HDLC_HEAD 0xff44
+#define DEFAULT_ADDR_MASK 0x00ff
+#define DEFAULT_HDLC_ADDR 0x00ff
+
+#define BMR_GBL 0x20000000
+#define BMR_BIG_ENDIAN 0x10000000
+#define CRC_16BIT_MASK 0x0000F0B8
+#define CRC_16BIT_PRES 0x0000FFFF
+#define DEFAULT_RFTHR 1
+
+#define DEFAULT_PPP_HEAD 0xff03
+
+#endif
diff --git a/drivers/net/wan/slic_ds26522.c b/drivers/net/wan/slic_ds26522.c
new file mode 100644
index 000000000000..d06a887a2352
--- /dev/null
+++ b/drivers/net/wan/slic_ds26522.c
@@ -0,0 +1,255 @@
+/*
+ * drivers/net/wan/slic_ds26522.c
+ *
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ *
+ * Author:Zhao Qiang<qiang.zhao@nxp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/bitrev.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+#include <linux/spi/spi.h>
+#include <linux/wait.h>
+#include <linux/param.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include "slic_ds26522.h"
+
+#define DRV_NAME "ds26522"
+
+#define SLIC_TRANS_LEN 1
+#define SLIC_TWO_LEN 2
+#define SLIC_THREE_LEN 3
+
+static struct spi_device *g_spi;
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Zhao Qiang<B45475@freescale.com>");
+
+/* the read/write format of address is
+ * w/r|A13|A12|A11|A10|A9|A8|A7|A6|A5|A4|A3|A2|A1|A0|x
+ */
+static void slic_write(struct spi_device *spi, u16 addr,
+ u8 data)
+{
+ u8 temp[3];
+
+ addr = bitrev16(addr) >> 1;
+ data = bitrev8(data);
+ temp[0] = (u8)((addr >> 8) & 0x7f);
+ temp[1] = (u8)(addr & 0xfe);
+ temp[2] = data;
+
+ /* write spi addr and value */
+ spi_write(spi, &temp[0], SLIC_THREE_LEN);
+}
+
+static u8 slic_read(struct spi_device *spi, u16 addr)
+{
+ u8 temp[2];
+ u8 data;
+
+ addr = bitrev16(addr) >> 1;
+ temp[0] = (u8)(((addr >> 8) & 0x7f) | 0x80);
+ temp[1] = (u8)(addr & 0xfe);
+
+ spi_write_then_read(spi, &temp[0], SLIC_TWO_LEN, &data,
+ SLIC_TRANS_LEN);
+
+ data = bitrev8(data);
+ return data;
+}
+
+static bool get_slic_product_code(struct spi_device *spi)
+{
+ u8 device_id;
+
+ device_id = slic_read(spi, DS26522_IDR_ADDR);
+ if ((device_id & 0xf8) == 0x68)
+ return true;
+ else
+ return false;
+}
+
+static void ds26522_e1_spec_config(struct spi_device *spi)
+{
+ /* Receive E1 Mode, Framer Disabled */
+ slic_write(spi, DS26522_RMMR_ADDR, DS26522_RMMR_E1);
+
+ /* Transmit E1 Mode, Framer Disable */
+ slic_write(spi, DS26522_TMMR_ADDR, DS26522_TMMR_E1);
+
+ /* Receive E1 Mode Framer Enable */
+ slic_write(spi, DS26522_RMMR_ADDR,
+ slic_read(spi, DS26522_RMMR_ADDR) | DS26522_RMMR_FRM_EN);
+
+ /* Transmit E1 Mode Framer Enable */
+ slic_write(spi, DS26522_TMMR_ADDR,
+ slic_read(spi, DS26522_TMMR_ADDR) | DS26522_TMMR_FRM_EN);
+
+ /* RCR1, receive E1 B8zs & ESF */
+ slic_write(spi, DS26522_RCR1_ADDR,
+ DS26522_RCR1_E1_HDB3 | DS26522_RCR1_E1_CCS);
+
+ /* RSYSCLK=2.048MHz, RSYNC-Output */
+ slic_write(spi, DS26522_RIOCR_ADDR,
+ DS26522_RIOCR_2048KHZ | DS26522_RIOCR_RSIO_OUT);
+
+ /* TCR1 Transmit E1 b8zs */
+ slic_write(spi, DS26522_TCR1_ADDR, DS26522_TCR1_TB8ZS);
+
+ /* TSYSCLK=2.048MHz, TSYNC-Output */
+ slic_write(spi, DS26522_TIOCR_ADDR,
+ DS26522_TIOCR_2048KHZ | DS26522_TIOCR_TSIO_OUT);
+
+ /* Set E1TAF */
+ slic_write(spi, DS26522_E1TAF_ADDR, DS26522_E1TAF_DEFAULT);
+
+ /* Set E1TNAF register */
+ slic_write(spi, DS26522_E1TNAF_ADDR, DS26522_E1TNAF_DEFAULT);
+
+ /* Receive E1 Mode Framer Enable & init Done */
+ slic_write(spi, DS26522_RMMR_ADDR, slic_read(spi, DS26522_RMMR_ADDR) |
+ DS26522_RMMR_INIT_DONE);
+
+ /* Transmit E1 Mode Framer Enable & init Done */
+ slic_write(spi, DS26522_TMMR_ADDR, slic_read(spi, DS26522_TMMR_ADDR) |
+ DS26522_TMMR_INIT_DONE);
+
+ /* Configure LIU E1 mode */
+ slic_write(spi, DS26522_LTRCR_ADDR, DS26522_LTRCR_E1);
+
+ /* E1 Mode default 75 ohm w/Transmit Impedance Matlinking */
+ slic_write(spi, DS26522_LTITSR_ADDR,
+ DS26522_LTITSR_TLIS_75OHM | DS26522_LTITSR_LBOS_75OHM);
+
+ /* E1 Mode default 75 ohm Long Haul w/Receive Impedance Matlinking */
+ slic_write(spi, DS26522_LRISMR_ADDR,
+ DS26522_LRISMR_75OHM | DS26522_LRISMR_MAX);
+
+ /* Enable Transmit output */
+ slic_write(spi, DS26522_LMCR_ADDR, DS26522_LMCR_TE);
+}
+
+static int slic_ds26522_init_configure(struct spi_device *spi)
+{
+ u16 addr;
+
+ /* set clock */
+ slic_write(spi, DS26522_GTCCR_ADDR, DS26522_GTCCR_BPREFSEL_REFCLKIN |
+ DS26522_GTCCR_BFREQSEL_2048KHZ |
+ DS26522_GTCCR_FREQSEL_2048KHZ);
+ slic_write(spi, DS26522_GTCR2_ADDR, DS26522_GTCR2_TSSYNCOUT);
+ slic_write(spi, DS26522_GFCR_ADDR, DS26522_GFCR_BPCLK_2048KHZ);
+
+ /* set gtcr */
+ slic_write(spi, DS26522_GTCR1_ADDR, DS26522_GTCR1);
+
+ /* Global LIU Software Reset Register */
+ slic_write(spi, DS26522_GLSRR_ADDR, DS26522_GLSRR_RESET);
+
+ /* Global Framer and BERT Software Reset Register */
+ slic_write(spi, DS26522_GFSRR_ADDR, DS26522_GFSRR_RESET);
+
+ usleep_range(100, 120);
+
+ slic_write(spi, DS26522_GLSRR_ADDR, DS26522_GLSRR_NORMAL);
+ slic_write(spi, DS26522_GFSRR_ADDR, DS26522_GFSRR_NORMAL);
+
+ /* Perform RX/TX SRESET,Reset receiver */
+ slic_write(spi, DS26522_RMMR_ADDR, DS26522_RMMR_SFTRST);
+
+ /* Reset tranceiver */
+ slic_write(spi, DS26522_TMMR_ADDR, DS26522_TMMR_SFTRST);
+
+ usleep_range(100, 120);
+
+ /* Zero all Framer Registers */
+ for (addr = DS26522_RF_ADDR_START; addr <= DS26522_RF_ADDR_END;
+ addr++)
+ slic_write(spi, addr, 0);
+
+ for (addr = DS26522_TF_ADDR_START; addr <= DS26522_TF_ADDR_END;
+ addr++)
+ slic_write(spi, addr, 0);
+
+ for (addr = DS26522_LIU_ADDR_START; addr <= DS26522_LIU_ADDR_END;
+ addr++)
+ slic_write(spi, addr, 0);
+
+ for (addr = DS26522_BERT_ADDR_START; addr <= DS26522_BERT_ADDR_END;
+ addr++)
+ slic_write(spi, addr, 0);
+
+ /* setup ds26522 for E1 specification */
+ ds26522_e1_spec_config(spi);
+
+ slic_write(spi, DS26522_GTCR1_ADDR, 0x00);
+
+ return 0;
+}
+
+static int slic_ds26522_remove(struct spi_device *spi)
+{
+ pr_info("DS26522 module uninstalled\n");
+ return 0;
+}
+
+static int slic_ds26522_probe(struct spi_device *spi)
+{
+ int ret = 0;
+
+ g_spi = spi;
+ spi->bits_per_word = 8;
+
+ if (!get_slic_product_code(spi))
+ return ret;
+
+ ret = slic_ds26522_init_configure(spi);
+ if (ret == 0)
+ pr_info("DS26522 cs%d configurated\n", spi->chip_select);
+
+ return ret;
+}
+
+static const struct of_device_id slic_ds26522_match[] = {
+ {
+ .compatible = "maxim,ds26522",
+ },
+ {},
+};
+
+static struct spi_driver slic_ds26522_driver = {
+ .driver = {
+ .name = "ds26522",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ .of_match_table = slic_ds26522_match,
+ },
+ .probe = slic_ds26522_probe,
+ .remove = slic_ds26522_remove,
+};
+
+static int __init slic_ds26522_init(void)
+{
+ return spi_register_driver(&slic_ds26522_driver);
+}
+
+static void __exit slic_ds26522_exit(void)
+{
+ spi_unregister_driver(&slic_ds26522_driver);
+}
+
+module_init(slic_ds26522_init);
+module_exit(slic_ds26522_exit);
diff --git a/drivers/net/wan/slic_ds26522.h b/drivers/net/wan/slic_ds26522.h
new file mode 100644
index 000000000000..22aa0ecbd9fd
--- /dev/null
+++ b/drivers/net/wan/slic_ds26522.h
@@ -0,0 +1,134 @@
+/*
+ * drivers/tdm/line_ctrl/slic_ds26522.h
+ *
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ *
+ * Author: Zhao Qiang <B45475@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#define DS26522_RF_ADDR_START 0x00
+#define DS26522_RF_ADDR_END 0xef
+#define DS26522_GLB_ADDR_START 0xf0
+#define DS26522_GLB_ADDR_END 0xff
+#define DS26522_TF_ADDR_START 0x100
+#define DS26522_TF_ADDR_END 0x1ef
+#define DS26522_LIU_ADDR_START 0x1000
+#define DS26522_LIU_ADDR_END 0x101f
+#define DS26522_TEST_ADDR_START 0x1008
+#define DS26522_TEST_ADDR_END 0x101f
+#define DS26522_BERT_ADDR_START 0x1100
+#define DS26522_BERT_ADDR_END 0x110f
+
+#define DS26522_RMMR_ADDR 0x80
+#define DS26522_RCR1_ADDR 0x81
+#define DS26522_RCR3_ADDR 0x83
+#define DS26522_RIOCR_ADDR 0x84
+
+#define DS26522_GTCR1_ADDR 0xf0
+#define DS26522_GFCR_ADDR 0xf1
+#define DS26522_GTCR2_ADDR 0xf2
+#define DS26522_GTCCR_ADDR 0xf3
+#define DS26522_GLSRR_ADDR 0xf5
+#define DS26522_GFSRR_ADDR 0xf6
+#define DS26522_IDR_ADDR 0xf8
+
+#define DS26522_E1TAF_ADDR 0x164
+#define DS26522_E1TNAF_ADDR 0x165
+#define DS26522_TMMR_ADDR 0x180
+#define DS26522_TCR1_ADDR 0x181
+#define DS26522_TIOCR_ADDR 0x184
+
+#define DS26522_LTRCR_ADDR 0x1000
+#define DS26522_LTITSR_ADDR 0x1001
+#define DS26522_LMCR_ADDR 0x1002
+#define DS26522_LRISMR_ADDR 0x1007
+
+#define MAX_NUM_OF_CHANNELS 8
+#define PQ_MDS_8E1T1_BRD_REV 0x00
+#define PQ_MDS_8E1T1_PLD_REV 0x00
+
+#define DS26522_GTCCR_BPREFSEL_REFCLKIN 0xa0
+#define DS26522_GTCCR_BFREQSEL_1544KHZ 0x08
+#define DS26522_GTCCR_FREQSEL_1544KHZ 0x04
+#define DS26522_GTCCR_BFREQSEL_2048KHZ 0x00
+#define DS26522_GTCCR_FREQSEL_2048KHZ 0x00
+
+#define DS26522_GFCR_BPCLK_2048KHZ 0x00
+
+#define DS26522_GTCR2_TSSYNCOUT 0x02
+#define DS26522_GTCR1 0x00
+
+#define DS26522_GFSRR_RESET 0x01
+#define DS26522_GFSRR_NORMAL 0x00
+
+#define DS26522_GLSRR_RESET 0x01
+#define DS26522_GLSRR_NORMAL 0x00
+
+#define DS26522_RMMR_SFTRST 0x02
+#define DS26522_RMMR_FRM_EN 0x80
+#define DS26522_RMMR_INIT_DONE 0x40
+#define DS26522_RMMR_T1 0x00
+#define DS26522_RMMR_E1 0x01
+
+#define DS26522_E1TAF_DEFAULT 0x1b
+#define DS26522_E1TNAF_DEFAULT 0x40
+
+#define DS26522_TMMR_SFTRST 0x02
+#define DS26522_TMMR_FRM_EN 0x80
+#define DS26522_TMMR_INIT_DONE 0x40
+#define DS26522_TMMR_T1 0x00
+#define DS26522_TMMR_E1 0x01
+
+#define DS26522_RCR1_T1_SYNCT 0x80
+#define DS26522_RCR1_T1_RB8ZS 0x40
+#define DS26522_RCR1_T1_SYNCC 0x08
+
+#define DS26522_RCR1_E1_HDB3 0x40
+#define DS26522_RCR1_E1_CCS 0x20
+
+#define DS26522_RIOCR_1544KHZ 0x00
+#define DS26522_RIOCR_2048KHZ 0x10
+#define DS26522_RIOCR_RSIO_OUT 0x00
+
+#define DS26522_RCR3_FLB 0x01
+
+#define DS26522_TIOCR_1544KHZ 0x00
+#define DS26522_TIOCR_2048KHZ 0x10
+#define DS26522_TIOCR_TSIO_OUT 0x04
+
+#define DS26522_TCR1_TB8ZS 0x04
+
+#define DS26522_LTRCR_T1 0x02
+#define DS26522_LTRCR_E1 0x00
+
+#define DS26522_LTITSR_TLIS_75OHM 0x00
+#define DS26522_LTITSR_LBOS_75OHM 0x00
+#define DS26522_LTITSR_TLIS_100OHM 0x10
+#define DS26522_LTITSR_TLIS_0DB_CSU 0x00
+
+#define DS26522_LRISMR_75OHM 0x00
+#define DS26522_LRISMR_100OHM 0x10
+#define DS26522_LRISMR_MAX 0x03
+
+#define DS26522_LMCR_TE 0x01
+
+enum line_rate {
+ LINE_RATE_T1, /* T1 line rate (1.544 Mbps) */
+ LINE_RATE_E1 /* E1 line rate (2.048 Mbps) */
+};
+
+enum tdm_trans_mode {
+ NORMAL = 0,
+ FRAMER_LB
+};
+
+enum card_support_type {
+ LM_CARD = 0,
+ DS26522_CARD,
+ NO_CARD
+};
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
index bd62bc19e758..acec16b9cf49 100644
--- a/drivers/net/wireless/ath/ath10k/ahb.c
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -25,10 +25,9 @@
#include "ahb.h"
static const struct of_device_id ath10k_ahb_of_match[] = {
- /* TODO: enable this entry once everything in place.
- * { .compatible = "qcom,ipq4019-wifi",
- * .data = (void *)ATH10K_HW_QCA4019 },
- */
+ { .compatible = "qcom,ipq4019-wifi",
+ .data = (void *)ATH10K_HW_QCA4019
+ },
{ }
};
@@ -476,6 +475,7 @@ static irqreturn_t ath10k_ahb_interrupt_handler(int irq, void *arg)
static int ath10k_ahb_request_irq_legacy(struct ath10k *ar)
{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
int ret;
@@ -487,6 +487,7 @@ static int ath10k_ahb_request_irq_legacy(struct ath10k *ar)
ar_ahb->irq, ret);
return ret;
}
+ ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY;
return 0;
}
@@ -918,8 +919,6 @@ int ath10k_ahb_init(void)
{
int ret;
- printk(KERN_ERR "AHB support is still work in progress\n");
-
ret = platform_driver_register(&ath10k_ahb_driver);
if (ret)
printk(KERN_ERR "failed to register ath10k ahb driver: %d\n",
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 49af62428c88..dfb3db0ee5d1 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/of.h>
+#include <asm/byteorder.h>
#include "core.h"
#include "mac.h"
@@ -55,7 +56,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca988x hw2.0",
.patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
- .has_shifted_cc_wraparound = true,
+ .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL,
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
@@ -69,6 +70,25 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
},
},
{
+ .id = QCA9887_HW_1_0_VERSION,
+ .dev_id = QCA9887_1_0_DEVICE_ID,
+ .name = "qca9887 hw1.0",
+ .patch_load_addr = QCA9887_HW_1_0_PATCH_LOAD_ADDR,
+ .uart_pin = 7,
+ .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL,
+ .otp_exe_param = 0,
+ .channel_counters_freq_hz = 88000,
+ .max_probe_resp_desc_thres = 0,
+ .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
+ .cal_data_len = 2116,
+ .fw = {
+ .dir = QCA9887_HW_1_0_FW_DIR,
+ .board = QCA9887_HW_1_0_BOARD_DATA_FILE,
+ .board_size = QCA9887_BOARD_DATA_SZ,
+ .board_ext_size = QCA9887_BOARD_EXT_DATA_SZ,
+ },
+ },
+ {
.id = QCA6174_HW_2_1_VERSION,
.dev_id = QCA6164_2_1_DEVICE_ID,
.name = "qca6164 hw2.1",
@@ -148,6 +168,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.uart_pin = 7,
.otp_exe_param = 0x00000700,
.continuous_frag_desc = true,
+ .cck_rate_map_rev2 = true,
.channel_counters_freq_hz = 150000,
.max_probe_resp_desc_thres = 24,
.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
@@ -163,6 +184,29 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
},
},
{
+ .id = QCA9984_HW_1_0_DEV_VERSION,
+ .dev_id = QCA9984_1_0_DEVICE_ID,
+ .name = "qca9984/qca9994 hw1.0",
+ .patch_load_addr = QCA9984_HW_1_0_PATCH_LOAD_ADDR,
+ .uart_pin = 7,
+ .otp_exe_param = 0x00000700,
+ .continuous_frag_desc = true,
+ .cck_rate_map_rev2 = true,
+ .channel_counters_freq_hz = 150000,
+ .max_probe_resp_desc_thres = 24,
+ .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
+ .tx_chain_mask = 0xf,
+ .rx_chain_mask = 0xf,
+ .max_spatial_stream = 4,
+ .cal_data_len = 12064,
+ .fw = {
+ .dir = QCA9984_HW_1_0_FW_DIR,
+ .board = QCA9984_HW_1_0_BOARD_DATA_FILE,
+ .board_size = QCA99X0_BOARD_DATA_SZ,
+ .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
+ },
+ },
+ {
.id = QCA9377_HW_1_0_DEV_VERSION,
.dev_id = QCA9377_1_0_DEVICE_ID,
.name = "qca9377 hw1.0",
@@ -202,9 +246,10 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca4019 hw1.0",
.patch_load_addr = QCA4019_HW_1_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
- .has_shifted_cc_wraparound = true,
+ .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_EACH,
.otp_exe_param = 0x0010000,
.continuous_frag_desc = true,
+ .cck_rate_map_rev2 = true,
.channel_counters_freq_hz = 125000,
.max_probe_resp_desc_thres = 24,
.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
@@ -236,6 +281,7 @@ static const char *const ath10k_core_fw_feature_str[] = {
[ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA] = "adaptive-cca",
[ATH10K_FW_FEATURE_MFP_SUPPORT] = "mfp",
[ATH10K_FW_FEATURE_PEER_FLOW_CONTROL] = "peer-flow-ctrl",
+ [ATH10K_FW_FEATURE_BTCOEX_PARAM] = "btcoex-param",
};
static unsigned int ath10k_core_get_fw_feature_str(char *buf,
@@ -531,6 +577,35 @@ out:
return ret;
}
+static int ath10k_download_cal_eeprom(struct ath10k *ar)
+{
+ size_t data_len;
+ void *data = NULL;
+ int ret;
+
+ ret = ath10k_hif_fetch_cal_eeprom(ar, &data, &data_len);
+ if (ret) {
+ if (ret != -EOPNOTSUPP)
+ ath10k_warn(ar, "failed to read calibration data from EEPROM: %d\n",
+ ret);
+ goto out_free;
+ }
+
+ ret = ath10k_download_board_data(ar, data, data_len);
+ if (ret) {
+ ath10k_warn(ar, "failed to download calibration data from EEPROM: %d\n",
+ ret);
+ goto out_free;
+ }
+
+ ret = 0;
+
+out_free:
+ kfree(data);
+
+ return ret;
+}
+
static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
{
u32 result, address;
@@ -1083,7 +1158,7 @@ int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
}
ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "features", "",
- ar->running_fw->fw_file.fw_features,
+ fw_file->fw_features,
sizeof(fw_file->fw_features));
break;
case ATH10K_FW_IE_FW_IMAGE:
@@ -1293,7 +1368,17 @@ static int ath10k_download_cal_data(struct ath10k *ar)
}
ath10k_dbg(ar, ATH10K_DBG_BOOT,
- "boot did not find DT entry, try OTP next: %d\n",
+ "boot did not find DT entry, try target EEPROM next: %d\n",
+ ret);
+
+ ret = ath10k_download_cal_eeprom(ar);
+ if (ret == 0) {
+ ar->cal_mode = ATH10K_CAL_MODE_EEPROM;
+ goto done;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot did not find target EEPROM entry, try OTP next: %d\n",
ret);
ret = ath10k_download_and_run_otp(ar);
@@ -1733,6 +1818,16 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map))
val |= WMI_10_4_BSS_CHANNEL_INFO_64;
+ /* 10.4 firmware supports BT-Coex without reloading firmware
+ * via pdev param. To support Bluetooth coexistence pdev param,
+ * WMI_COEX_GPIO_SUPPORT of extended resource config should be
+ * enabled always.
+ */
+ if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) &&
+ test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM,
+ ar->running_fw->fw_file.fw_features))
+ val |= WMI_10_4_COEX_GPIO_SUPPORT;
+
status = ath10k_mac_ext_resource_config(ar, val);
if (status) {
ath10k_err(ar,
@@ -2062,6 +2157,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
switch (hw_rev) {
case ATH10K_HW_QCA988X:
+ case ATH10K_HW_QCA9887:
ar->regs = &qca988x_regs;
ar->hw_values = &qca988x_values;
break;
@@ -2071,6 +2167,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
ar->hw_values = &qca6174_values;
break;
case ATH10K_HW_QCA99X0:
+ case ATH10K_HW_QCA9984:
ar->regs = &qca99x0_regs;
ar->hw_values = &qca99x0_values;
break;
@@ -2159,5 +2256,5 @@ void ath10k_core_destroy(struct ath10k *ar)
EXPORT_SYMBOL(ath10k_core_destroy);
MODULE_AUTHOR("Qualcomm Atheros");
-MODULE_DESCRIPTION("Core module for QCA988X PCIe devices.");
+MODULE_DESCRIPTION("Core module for Qualcomm Atheros 802.11ac wireless LAN cards.");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 1852e0ee3fa1..3da18c9dbd7a 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -535,6 +535,13 @@ enum ath10k_fw_features {
*/
ATH10K_FW_FEATURE_PEER_FLOW_CONTROL = 13,
+ /* Firmware supports BT-Coex without reloading firmware via pdev param.
+ * To support Bluetooth coexistence pdev param, WMI_COEX_GPIO_SUPPORT of
+ * extended resource config should be enabled always. This firmware IE
+ * is used to configure WMI_COEX_GPIO_SUPPORT.
+ */
+ ATH10K_FW_FEATURE_BTCOEX_PARAM = 14,
+
/* keep last */
ATH10K_FW_FEATURE_COUNT,
};
@@ -571,6 +578,7 @@ enum ath10k_cal_mode {
ATH10K_CAL_MODE_DT,
ATH10K_PRE_CAL_MODE_FILE,
ATH10K_PRE_CAL_MODE_DT,
+ ATH10K_CAL_MODE_EEPROM,
};
enum ath10k_crypt_mode {
@@ -593,6 +601,8 @@ static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode)
return "pre-cal-file";
case ATH10K_PRE_CAL_MODE_DT:
return "pre-cal-dt";
+ case ATH10K_CAL_MODE_EEPROM:
+ return "eeprom";
}
return "unknown";
@@ -703,12 +713,10 @@ struct ath10k {
int uart_pin;
u32 otp_exe_param;
- /* This is true if given HW chip has a quirky Cycle Counter
- * wraparound which resets to 0x7fffffff instead of 0. All
- * other CC related counters (e.g. Rx Clear Count) are divided
- * by 2 so they never wraparound themselves.
+ /* Type of hw cycle counter wraparound logic, for more info
+ * refer enum ath10k_hw_cc_wraparound_type.
*/
- bool has_shifted_cc_wraparound;
+ enum ath10k_hw_cc_wraparound_type cc_wraparound_type;
/* Some of chip expects fragment descriptor to be continuous
* memory for any TX operation. Set continuous_frag_desc flag
@@ -716,6 +724,12 @@ struct ath10k {
*/
bool continuous_frag_desc;
+ /* CCK hardware rate table mapping for the newer chipsets
+ * like QCA99X0, QCA4019 got revised. The CCK h/w rate values
+ * are in a proper order with respect to the rate/preamble
+ */
+ bool cck_rate_map_rev2;
+
u32 channel_counters_freq_hz;
/* Mgmt tx descriptors threshold for limiting probe response
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index e2511550fbb8..8fbb8f2c7828 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -609,25 +609,23 @@ static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
char buf[32];
int ret;
- mutex_lock(&ar->conf_mutex);
-
simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
/* make sure that buf is null terminated */
buf[sizeof(buf) - 1] = 0;
+ /* drop the possible '\n' from the end */
+ if (buf[count - 1] == '\n')
+ buf[count - 1] = 0;
+
+ mutex_lock(&ar->conf_mutex);
+
if (ar->state != ATH10K_STATE_ON &&
ar->state != ATH10K_STATE_RESTARTED) {
ret = -ENETDOWN;
goto exit;
}
- /* drop the possible '\n' from the end */
- if (buf[count - 1] == '\n') {
- buf[count - 1] = 0;
- count--;
- }
-
if (!strcmp(buf, "soft")) {
ath10k_info(ar, "simulating soft firmware crash\n");
ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0);
@@ -2127,6 +2125,7 @@ static ssize_t ath10k_write_btcoex(struct file *file,
size_t buf_size;
int ret;
bool val;
+ u32 pdev_param;
buf_size = min(count, (sizeof(buf) - 1));
if (copy_from_user(buf, ubuf, buf_size))
@@ -2150,14 +2149,25 @@ static ssize_t ath10k_write_btcoex(struct file *file,
goto exit;
}
+ pdev_param = ar->wmi.pdev_param->enable_btcoex;
+ if (test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM,
+ ar->running_fw->fw_file.fw_features)) {
+ ret = ath10k_wmi_pdev_set_param(ar, pdev_param, val);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable btcoex: %d\n", ret);
+ ret = count;
+ goto exit;
+ }
+ } else {
+ ath10k_info(ar, "restarting firmware due to btcoex change");
+ queue_work(ar->workqueue, &ar->restart_work);
+ }
+
if (val)
set_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags);
else
clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags);
- ath10k_info(ar, "restarting firmware due to btcoex change");
-
- queue_work(ar->workqueue, &ar->restart_work);
ret = count;
exit:
diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h
index 89e7076c919f..b2566b06e1e1 100644
--- a/drivers/net/wireless/ath/ath10k/hif.h
+++ b/drivers/net/wireless/ath/ath10k/hif.h
@@ -87,6 +87,10 @@ struct ath10k_hif_ops {
int (*suspend)(struct ath10k *ar);
int (*resume)(struct ath10k *ar);
+
+ /* fetch calibration data from target eeprom */
+ int (*fetch_cal_eeprom)(struct ath10k *ar, void **data,
+ size_t *data_len);
};
static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
@@ -202,4 +206,14 @@ static inline void ath10k_hif_write32(struct ath10k *ar,
ar->hif.ops->write32(ar, address, data);
}
+static inline int ath10k_hif_fetch_cal_eeprom(struct ath10k *ar,
+ void **data,
+ size_t *data_len)
+{
+ if (!ar->hif.ops->fetch_cal_eeprom)
+ return -EOPNOTSUPP;
+
+ return ar->hif.ops->fetch_cal_eeprom(ar, data, data_len);
+}
+
#endif /* _HIF_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 911c535d0863..430a83e142aa 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -485,10 +485,10 @@ struct htt_mgmt_tx_completion {
__le32 status;
} __packed;
-#define HTT_RX_INDICATION_INFO0_EXT_TID_MASK (0x3F)
+#define HTT_RX_INDICATION_INFO0_EXT_TID_MASK (0x1F)
#define HTT_RX_INDICATION_INFO0_EXT_TID_LSB (0)
-#define HTT_RX_INDICATION_INFO0_FLUSH_VALID (1 << 6)
-#define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 7)
+#define HTT_RX_INDICATION_INFO0_FLUSH_VALID (1 << 5)
+#define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 6)
#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_MASK 0x0000003F
#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_LSB 0
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index cc979a4faeb0..80e645302b54 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -748,7 +748,7 @@ ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
if (WARN_ON_ONCE(!arvif))
return NULL;
- if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
+ if (WARN_ON_ONCE(ath10k_mac_vif_chan(arvif->vif, &def)))
return NULL;
return def.chan;
@@ -939,7 +939,8 @@ static void ath10k_process_rx(struct ath10k *ar,
is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
"mcast" : "ucast",
(__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
- status->flag == 0 ? "legacy" : "",
+ (status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) == 0 ?
+ "legacy" : "",
status->flag & RX_FLAG_HT ? "ht" : "",
status->flag & RX_FLAG_VHT ? "vht" : "",
status->flag & RX_FLAG_40MHZ ? "40" : "",
@@ -1904,7 +1905,6 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
return;
}
}
- ath10k_htt_rx_msdu_buff_replenish(htt);
}
static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
@@ -2182,34 +2182,6 @@ static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
ath10k_mac_tx_push_pending(ar);
}
-static inline enum nl80211_band phy_mode_to_band(u32 phy_mode)
-{
- enum nl80211_band band;
-
- switch (phy_mode) {
- case MODE_11A:
- case MODE_11NA_HT20:
- case MODE_11NA_HT40:
- case MODE_11AC_VHT20:
- case MODE_11AC_VHT40:
- case MODE_11AC_VHT80:
- band = NL80211_BAND_5GHZ;
- break;
- case MODE_11G:
- case MODE_11B:
- case MODE_11GONLY:
- case MODE_11NG_HT20:
- case MODE_11NG_HT40:
- case MODE_11AC_VHT20_2G:
- case MODE_11AC_VHT40_2G:
- case MODE_11AC_VHT80_2G:
- default:
- band = NL80211_BAND_2GHZ;
- }
-
- return band;
-}
-
void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
{
bool release;
@@ -2291,7 +2263,6 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
ath10k_htt_tx_mgmt_dec_pending(htt);
spin_unlock_bh(&htt->tx_lock);
}
- ath10k_mac_tx_push_pending(ar);
break;
}
case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
@@ -2442,8 +2413,6 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr)
dev_kfree_skb_any(skb);
}
- ath10k_mac_tx_push_pending(ar);
-
num_mpdus = atomic_read(&htt->num_mpdus_ready);
while (num_mpdus) {
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index f544d48518c3..bd86e7a38db9 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -179,17 +179,35 @@ void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev)
{
u32 cc_fix = 0;
+ u32 rcc_fix = 0;
+ enum ath10k_hw_cc_wraparound_type wraparound_type;
survey->filled |= SURVEY_INFO_TIME |
SURVEY_INFO_TIME_BUSY;
- if (ar->hw_params.has_shifted_cc_wraparound && cc < cc_prev) {
- cc_fix = 0x7fffffff;
- survey->filled &= ~SURVEY_INFO_TIME_BUSY;
+ wraparound_type = ar->hw_params.cc_wraparound_type;
+
+ if (cc < cc_prev || rcc < rcc_prev) {
+ switch (wraparound_type) {
+ case ATH10K_HW_CC_WRAP_SHIFTED_ALL:
+ if (cc < cc_prev) {
+ cc_fix = 0x7fffffff;
+ survey->filled &= ~SURVEY_INFO_TIME_BUSY;
+ }
+ break;
+ case ATH10K_HW_CC_WRAP_SHIFTED_EACH:
+ if (cc < cc_prev)
+ cc_fix = 0x7fffffff;
+ else
+ rcc_fix = 0x7fffffff;
+ break;
+ case ATH10K_HW_CC_WRAP_DISABLED:
+ break;
+ }
}
cc -= cc_prev - cc_fix;
- rcc -= rcc_prev;
+ rcc -= rcc_prev - rcc_fix;
survey->time = CCNT_TO_MSEC(ar, cc);
survey->time_busy = CCNT_TO_MSEC(ar, rcc);
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index aedd8987040b..f31d3ce42470 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -26,7 +26,9 @@
#define QCA6164_2_1_DEVICE_ID (0x0041)
#define QCA6174_2_1_DEVICE_ID (0x003e)
#define QCA99X0_2_0_DEVICE_ID (0x0040)
+#define QCA9984_1_0_DEVICE_ID (0x0046)
#define QCA9377_1_0_DEVICE_ID (0x0042)
+#define QCA9887_1_0_DEVICE_ID (0x0050)
/* QCA988X 1.0 definitions (unsupported) */
#define QCA988X_HW_1_0_CHIP_ID_REV 0x0
@@ -38,6 +40,13 @@
#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
+/* QCA9887 1.0 definitions */
+#define QCA9887_HW_1_0_VERSION 0x4100016d
+#define QCA9887_HW_1_0_CHIP_ID_REV 0
+#define QCA9887_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9887/hw1.0"
+#define QCA9887_HW_1_0_BOARD_DATA_FILE "board.bin"
+#define QCA9887_HW_1_0_PATCH_LOAD_ADDR 0x1234
+
/* QCA6174 target BMI version signatures */
#define QCA6174_HW_1_0_VERSION 0x05000000
#define QCA6174_HW_1_1_VERSION 0x05000001
@@ -91,6 +100,14 @@ enum qca9377_chip_id_rev {
#define QCA99X0_HW_2_0_BOARD_DATA_FILE "board.bin"
#define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234
+/* QCA9984 1.0 defines */
+#define QCA9984_HW_1_0_DEV_VERSION 0x1000000
+#define QCA9984_HW_DEV_TYPE 0xa
+#define QCA9984_HW_1_0_CHIP_ID_REV 0x0
+#define QCA9984_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9984/hw1.0"
+#define QCA9984_HW_1_0_BOARD_DATA_FILE "board.bin"
+#define QCA9984_HW_1_0_PATCH_LOAD_ADDR 0x1234
+
/* QCA9377 1.0 definitions */
#define QCA9377_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9377/hw1.0"
#define QCA9377_HW_1_0_BOARD_DATA_FILE "board.bin"
@@ -193,8 +210,10 @@ enum ath10k_hw_rev {
ATH10K_HW_QCA988X,
ATH10K_HW_QCA6174,
ATH10K_HW_QCA99X0,
+ ATH10K_HW_QCA9984,
ATH10K_HW_QCA9377,
ATH10K_HW_QCA4019,
+ ATH10K_HW_QCA9887,
};
struct ath10k_hw_regs {
@@ -247,8 +266,10 @@ void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev);
#define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X)
+#define QCA_REV_9887(ar) ((ar)->hw_rev == ATH10K_HW_QCA9887)
#define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174)
#define QCA_REV_99X0(ar) ((ar)->hw_rev == ATH10K_HW_QCA99X0)
+#define QCA_REV_9984(ar) ((ar)->hw_rev == ATH10K_HW_QCA9984)
#define QCA_REV_9377(ar) ((ar)->hw_rev == ATH10K_HW_QCA9377)
#define QCA_REV_40XX(ar) ((ar)->hw_rev == ATH10K_HW_QCA4019)
@@ -315,11 +336,41 @@ enum ath10k_hw_rate_cck {
ATH10K_HW_RATE_CCK_SP_2M,
};
+enum ath10k_hw_rate_rev2_cck {
+ ATH10K_HW_RATE_REV2_CCK_LP_1M = 1,
+ ATH10K_HW_RATE_REV2_CCK_LP_2M,
+ ATH10K_HW_RATE_REV2_CCK_LP_5_5M,
+ ATH10K_HW_RATE_REV2_CCK_LP_11M,
+ ATH10K_HW_RATE_REV2_CCK_SP_2M,
+ ATH10K_HW_RATE_REV2_CCK_SP_5_5M,
+ ATH10K_HW_RATE_REV2_CCK_SP_11M,
+};
+
enum ath10k_hw_4addr_pad {
ATH10K_HW_4ADDR_PAD_AFTER,
ATH10K_HW_4ADDR_PAD_BEFORE,
};
+enum ath10k_hw_cc_wraparound_type {
+ ATH10K_HW_CC_WRAP_DISABLED = 0,
+
+ /* This type is when the HW chip has a quirky Cycle Counter
+ * wraparound which resets to 0x7fffffff instead of 0. All
+ * other CC related counters (e.g. Rx Clear Count) are divided
+ * by 2 so they never wraparound themselves.
+ */
+ ATH10K_HW_CC_WRAP_SHIFTED_ALL = 1,
+
+ /* Each hw counter wrapsaround independently. When the
+ * counter overflows the repestive counter is right shifted
+ * by 1, i.e reset to 0x7fffffff, and other counters will be
+ * running unaffected. In this type of wraparound, it should
+ * be possible to report accurate Rx busy time unlike the
+ * first type.
+ */
+ ATH10K_HW_CC_WRAP_SHIFTED_EACH = 2,
+};
+
/* Target specific defines for MAIN firmware */
#define TARGET_NUM_VDEVS 8
#define TARGET_NUM_PEER_AST 2
@@ -547,7 +598,10 @@ enum ath10k_hw_4addr_pad {
#define WLAN_SYSTEM_SLEEP_DISABLE_MASK 0x00000001
#define WLAN_GPIO_PIN0_ADDRESS 0x00000028
+#define WLAN_GPIO_PIN0_CONFIG_LSB 11
#define WLAN_GPIO_PIN0_CONFIG_MASK 0x00007800
+#define WLAN_GPIO_PIN0_PAD_PULL_LSB 5
+#define WLAN_GPIO_PIN0_PAD_PULL_MASK 0x00000060
#define WLAN_GPIO_PIN1_ADDRESS 0x0000002c
#define WLAN_GPIO_PIN1_CONFIG_MASK 0x00007800
#define WLAN_GPIO_PIN10_ADDRESS 0x00000050
@@ -560,6 +614,8 @@ enum ath10k_hw_4addr_pad {
#define CLOCK_GPIO_BT_CLK_OUT_EN_MASK 0
#define SI_CONFIG_OFFSET 0x00000000
+#define SI_CONFIG_ERR_INT_LSB 19
+#define SI_CONFIG_ERR_INT_MASK 0x00080000
#define SI_CONFIG_BIDIR_OD_DATA_LSB 18
#define SI_CONFIG_BIDIR_OD_DATA_MASK 0x00040000
#define SI_CONFIG_I2C_LSB 16
@@ -573,7 +629,9 @@ enum ath10k_hw_4addr_pad {
#define SI_CONFIG_DIVIDER_LSB 0
#define SI_CONFIG_DIVIDER_MASK 0x0000000f
#define SI_CS_OFFSET 0x00000004
+#define SI_CS_DONE_ERR_LSB 10
#define SI_CS_DONE_ERR_MASK 0x00000400
+#define SI_CS_DONE_INT_LSB 9
#define SI_CS_DONE_INT_MASK 0x00000200
#define SI_CS_START_LSB 8
#define SI_CS_START_MASK 0x00000100
@@ -624,7 +682,10 @@ enum ath10k_hw_4addr_pad {
#define GPIO_BASE_ADDRESS WLAN_GPIO_BASE_ADDRESS
#define GPIO_PIN0_OFFSET WLAN_GPIO_PIN0_ADDRESS
#define GPIO_PIN1_OFFSET WLAN_GPIO_PIN1_ADDRESS
+#define GPIO_PIN0_CONFIG_LSB WLAN_GPIO_PIN0_CONFIG_LSB
#define GPIO_PIN0_CONFIG_MASK WLAN_GPIO_PIN0_CONFIG_MASK
+#define GPIO_PIN0_PAD_PULL_LSB WLAN_GPIO_PIN0_PAD_PULL_LSB
+#define GPIO_PIN0_PAD_PULL_MASK WLAN_GPIO_PIN0_PAD_PULL_MASK
#define GPIO_PIN1_CONFIG_MASK WLAN_GPIO_PIN1_CONFIG_MASK
#define SI_BASE_ADDRESS WLAN_SI_BASE_ADDRESS
#define SCRATCH_BASE_ADDRESS SOC_CORE_BASE_ADDRESS
@@ -679,6 +740,18 @@ enum ath10k_hw_4addr_pad {
#define WINDOW_READ_ADDR_ADDRESS MISSING
#define WINDOW_WRITE_ADDR_ADDRESS MISSING
+#define QCA9887_1_0_I2C_SDA_GPIO_PIN 5
+#define QCA9887_1_0_I2C_SDA_PIN_CONFIG 3
+#define QCA9887_1_0_SI_CLK_GPIO_PIN 17
+#define QCA9887_1_0_SI_CLK_PIN_CONFIG 3
+#define QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS 0x00000010
+
+#define QCA9887_EEPROM_SELECT_READ 0xa10000a0
+#define QCA9887_EEPROM_ADDR_HI_MASK 0x0000ff00
+#define QCA9887_EEPROM_ADDR_HI_LSB 8
+#define QCA9887_EEPROM_ADDR_LO_MASK 0x00ff0000
+#define QCA9887_EEPROM_ADDR_LO_LSB 16
+
#define RTC_STATE_V_GET(x) (((x) & RTC_STATE_V_MASK) >> RTC_STATE_V_LSB)
#endif /* _HW_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 6dd1d26b357f..d4b7a168f7c0 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -62,6 +62,32 @@ static struct ieee80211_rate ath10k_rates[] = {
{ .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
};
+static struct ieee80211_rate ath10k_rates_rev2[] = {
+ { .bitrate = 10,
+ .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_1M },
+ { .bitrate = 20,
+ .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_2M,
+ .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_2M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 55,
+ .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_5_5M,
+ .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_5_5M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 110,
+ .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_11M,
+ .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_11M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+
+ { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
+ { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
+ { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
+ { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
+ { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
+ { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
+ { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
+ { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
+};
+
#define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4
#define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX)
@@ -70,6 +96,9 @@ static struct ieee80211_rate ath10k_rates[] = {
#define ath10k_g_rates (ath10k_rates + 0)
#define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
+#define ath10k_g_rates_rev2 (ath10k_rates_rev2 + 0)
+#define ath10k_g_rates_rev2_size (ARRAY_SIZE(ath10k_rates_rev2))
+
static bool ath10k_mac_bitrate_is_cck(int bitrate)
{
switch (bitrate) {
@@ -679,10 +708,10 @@ static int ath10k_peer_create(struct ath10k *ar,
peer = ath10k_peer_find(ar, vdev_id, addr);
if (!peer) {
+ spin_unlock_bh(&ar->data_lock);
ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n",
addr, vdev_id);
ath10k_wmi_peer_delete(ar, vdev_id, addr);
- spin_unlock_bh(&ar->data_lock);
return -ENOENT;
}
@@ -3781,6 +3810,9 @@ void ath10k_mac_tx_push_pending(struct ath10k *ar)
int ret;
int max;
+ if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2))
+ return;
+
spin_lock_bh(&ar->txqs_lock);
rcu_read_lock();
@@ -4051,9 +4083,7 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
list_add_tail(&artxq->list, &ar->txqs);
spin_unlock_bh(&ar->txqs_lock);
- if (ath10k_mac_tx_can_push(hw, txq))
- tasklet_schedule(&ar->htt.txrx_compl_task);
-
+ ath10k_mac_tx_push_pending(ar);
ath10k_htt_tx_txq_update(hw, txq);
}
@@ -4467,6 +4497,19 @@ static int ath10k_start(struct ieee80211_hw *hw)
}
}
+ param = ar->wmi.pdev_param->enable_btcoex;
+ if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) &&
+ test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM,
+ ar->running_fw->fw_file.fw_features)) {
+ ret = ath10k_wmi_pdev_set_param(ar, param, 0);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to set btcoex param: %d\n", ret);
+ goto err_core_stop;
+ }
+ clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags);
+ }
+
ar->num_started_vdevs = 0;
ath10k_regd_update(ar);
@@ -7695,8 +7738,14 @@ int ath10k_mac_register(struct ath10k *ar)
band = &ar->mac.sbands[NL80211_BAND_2GHZ];
band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
band->channels = channels;
- band->n_bitrates = ath10k_g_rates_size;
- band->bitrates = ath10k_g_rates;
+
+ if (ar->hw_params.cck_rate_map_rev2) {
+ band->n_bitrates = ath10k_g_rates_rev2_size;
+ band->bitrates = ath10k_g_rates_rev2;
+ } else {
+ band->n_bitrates = ath10k_g_rates_size;
+ band->bitrates = ath10k_g_rates;
+ }
ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
}
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 8133d7b5b956..f06dd3941bac 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -56,7 +56,9 @@ static const struct pci_device_id ath10k_pci_id_table[] = {
{ PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
{ PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
{ PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */
+ { PCI_VDEVICE(ATHEROS, QCA9984_1_0_DEVICE_ID) }, /* PCI-E QCA9984 V1 */
{ PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */
+ { PCI_VDEVICE(ATHEROS, QCA9887_1_0_DEVICE_ID) }, /* PCI-E QCA9887 */
{0}
};
@@ -81,8 +83,12 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
{ QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
+ { QCA9984_1_0_DEVICE_ID, QCA9984_HW_1_0_CHIP_ID_REV },
+
{ QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV },
{ QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV },
+
+ { QCA9887_1_0_DEVICE_ID, QCA9887_HW_1_0_CHIP_ID_REV },
};
static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
@@ -837,6 +843,7 @@ static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
switch (ar->hw_rev) {
case ATH10K_HW_QCA988X:
+ case ATH10K_HW_QCA9887:
case ATH10K_HW_QCA6174:
case ATH10K_HW_QCA9377:
val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
@@ -844,6 +851,7 @@ static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
0x7ff) << 21;
break;
case ATH10K_HW_QCA99X0:
+ case ATH10K_HW_QCA9984:
case ATH10K_HW_QCA4019:
val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
break;
@@ -864,7 +872,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret = 0;
u32 *buf;
- unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
+ unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
struct ath10k_ce_pipe *ce_diag;
/* Host buffer address in CE space */
u32 ce_data;
@@ -882,9 +890,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
* 1) 4-byte alignment
* 2) Buffer in DMA-able space
*/
- orig_nbytes = nbytes;
+ alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
+
data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
- orig_nbytes,
+ alloc_nbytes,
&ce_data_base,
GFP_ATOMIC);
@@ -892,9 +901,9 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
ret = -ENOMEM;
goto done;
}
- memset(data_buf, 0, orig_nbytes);
+ memset(data_buf, 0, alloc_nbytes);
- remaining_bytes = orig_nbytes;
+ remaining_bytes = nbytes;
ce_data = ce_data_base;
while (remaining_bytes) {
nbytes = min_t(unsigned int, remaining_bytes,
@@ -954,19 +963,22 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
}
remaining_bytes -= nbytes;
+
+ if (ret) {
+ ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n",
+ address, ret);
+ break;
+ }
+ memcpy(data, data_buf, nbytes);
+
address += nbytes;
- ce_data += nbytes;
+ data += nbytes;
}
done:
- if (ret == 0)
- memcpy(data, data_buf, orig_nbytes);
- else
- ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n",
- address, ret);
if (data_buf)
- dma_free_coherent(ar->dev, orig_nbytes, data_buf,
+ dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
ce_data_base);
spin_unlock_bh(&ar_pci->ce_lock);
@@ -1560,6 +1572,7 @@ static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
switch (ar->hw_rev) {
case ATH10K_HW_QCA988X:
+ case ATH10K_HW_QCA9887:
case ATH10K_HW_QCA6174:
case ATH10K_HW_QCA9377:
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
@@ -1569,6 +1582,7 @@ static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
CORE_CTRL_ADDRESS, val);
break;
case ATH10K_HW_QCA99X0:
+ case ATH10K_HW_QCA9984:
case ATH10K_HW_QCA4019:
/* TODO: Find appropriate register configuration for QCA99X0
* to mask irq/MSI.
@@ -1583,6 +1597,7 @@ static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
switch (ar->hw_rev) {
case ATH10K_HW_QCA988X:
+ case ATH10K_HW_QCA9887:
case ATH10K_HW_QCA6174:
case ATH10K_HW_QCA9377:
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
@@ -1592,6 +1607,7 @@ static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
CORE_CTRL_ADDRESS, val);
break;
case ATH10K_HW_QCA99X0:
+ case ATH10K_HW_QCA9984:
case ATH10K_HW_QCA4019:
/* TODO: Find appropriate register configuration for QCA99X0
* to unmask irq/MSI.
@@ -1932,6 +1948,8 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar)
switch (ar_pci->pdev->device) {
case QCA988X_2_0_DEVICE_ID:
case QCA99X0_2_0_DEVICE_ID:
+ case QCA9984_1_0_DEVICE_ID:
+ case QCA9887_1_0_DEVICE_ID:
return 1;
case QCA6164_2_1_DEVICE_ID:
case QCA6174_2_1_DEVICE_ID:
@@ -2293,16 +2311,20 @@ static int ath10k_pci_warm_reset(struct ath10k *ar)
return 0;
}
+static int ath10k_pci_qca99x0_soft_chip_reset(struct ath10k *ar)
+{
+ ath10k_pci_irq_disable(ar);
+ return ath10k_pci_qca99x0_chip_reset(ar);
+}
+
static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
{
- if (QCA_REV_988X(ar) || QCA_REV_6174(ar)) {
- return ath10k_pci_warm_reset(ar);
- } else if (QCA_REV_99X0(ar)) {
- ath10k_pci_irq_disable(ar);
- return ath10k_pci_qca99x0_chip_reset(ar);
- } else {
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ if (!ar_pci->pci_soft_reset)
return -ENOTSUPP;
- }
+
+ return ar_pci->pci_soft_reset(ar);
}
static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
@@ -2437,16 +2459,12 @@ static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar)
static int ath10k_pci_chip_reset(struct ath10k *ar)
{
- if (QCA_REV_988X(ar))
- return ath10k_pci_qca988x_chip_reset(ar);
- else if (QCA_REV_6174(ar))
- return ath10k_pci_qca6174_chip_reset(ar);
- else if (QCA_REV_9377(ar))
- return ath10k_pci_qca6174_chip_reset(ar);
- else if (QCA_REV_99X0(ar))
- return ath10k_pci_qca99x0_chip_reset(ar);
- else
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ if (WARN_ON(!ar_pci->pci_hard_reset))
return -ENOTSUPP;
+
+ return ar_pci->pci_hard_reset(ar);
}
static int ath10k_pci_hif_power_up(struct ath10k *ar)
@@ -2559,6 +2577,144 @@ static int ath10k_pci_hif_resume(struct ath10k *ar)
}
#endif
+static bool ath10k_pci_validate_cal(void *data, size_t size)
+{
+ __le16 *cal_words = data;
+ u16 checksum = 0;
+ size_t i;
+
+ if (size % 2 != 0)
+ return false;
+
+ for (i = 0; i < size / 2; i++)
+ checksum ^= le16_to_cpu(cal_words[i]);
+
+ return checksum == 0xffff;
+}
+
+static void ath10k_pci_enable_eeprom(struct ath10k *ar)
+{
+ /* Enable SI clock */
+ ath10k_pci_soc_write32(ar, CLOCK_CONTROL_OFFSET, 0x0);
+
+ /* Configure GPIOs for I2C operation */
+ ath10k_pci_write32(ar,
+ GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
+ 4 * QCA9887_1_0_I2C_SDA_GPIO_PIN,
+ SM(QCA9887_1_0_I2C_SDA_PIN_CONFIG,
+ GPIO_PIN0_CONFIG) |
+ SM(1, GPIO_PIN0_PAD_PULL));
+
+ ath10k_pci_write32(ar,
+ GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
+ 4 * QCA9887_1_0_SI_CLK_GPIO_PIN,
+ SM(QCA9887_1_0_SI_CLK_PIN_CONFIG, GPIO_PIN0_CONFIG) |
+ SM(1, GPIO_PIN0_PAD_PULL));
+
+ ath10k_pci_write32(ar,
+ GPIO_BASE_ADDRESS +
+ QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS,
+ 1u << QCA9887_1_0_SI_CLK_GPIO_PIN);
+
+ /* In Swift ASIC - EEPROM clock will be (110MHz/512) = 214KHz */
+ ath10k_pci_write32(ar,
+ SI_BASE_ADDRESS + SI_CONFIG_OFFSET,
+ SM(1, SI_CONFIG_ERR_INT) |
+ SM(1, SI_CONFIG_BIDIR_OD_DATA) |
+ SM(1, SI_CONFIG_I2C) |
+ SM(1, SI_CONFIG_POS_SAMPLE) |
+ SM(1, SI_CONFIG_INACTIVE_DATA) |
+ SM(1, SI_CONFIG_INACTIVE_CLK) |
+ SM(8, SI_CONFIG_DIVIDER));
+}
+
+static int ath10k_pci_read_eeprom(struct ath10k *ar, u16 addr, u8 *out)
+{
+ u32 reg;
+ int wait_limit;
+
+ /* set device select byte and for the read operation */
+ reg = QCA9887_EEPROM_SELECT_READ |
+ SM(addr, QCA9887_EEPROM_ADDR_LO) |
+ SM(addr >> 8, QCA9887_EEPROM_ADDR_HI);
+ ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_TX_DATA0_OFFSET, reg);
+
+ /* write transmit data, transfer length, and START bit */
+ ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET,
+ SM(1, SI_CS_START) | SM(1, SI_CS_RX_CNT) |
+ SM(4, SI_CS_TX_CNT));
+
+ /* wait max 1 sec */
+ wait_limit = 100000;
+
+ /* wait for SI_CS_DONE_INT */
+ do {
+ reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET);
+ if (MS(reg, SI_CS_DONE_INT))
+ break;
+
+ wait_limit--;
+ udelay(10);
+ } while (wait_limit > 0);
+
+ if (!MS(reg, SI_CS_DONE_INT)) {
+ ath10k_err(ar, "timeout while reading device EEPROM at %04x\n",
+ addr);
+ return -ETIMEDOUT;
+ }
+
+ /* clear SI_CS_DONE_INT */
+ ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, reg);
+
+ if (MS(reg, SI_CS_DONE_ERR)) {
+ ath10k_err(ar, "failed to read device EEPROM at %04x\n", addr);
+ return -EIO;
+ }
+
+ /* extract receive data */
+ reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_RX_DATA0_OFFSET);
+ *out = reg;
+
+ return 0;
+}
+
+static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data,
+ size_t *data_len)
+{
+ u8 *caldata = NULL;
+ size_t calsize, i;
+ int ret;
+
+ if (!QCA_REV_9887(ar))
+ return -EOPNOTSUPP;
+
+ calsize = ar->hw_params.cal_data_len;
+ caldata = kmalloc(calsize, GFP_KERNEL);
+ if (!caldata)
+ return -ENOMEM;
+
+ ath10k_pci_enable_eeprom(ar);
+
+ for (i = 0; i < calsize; i++) {
+ ret = ath10k_pci_read_eeprom(ar, i, &caldata[i]);
+ if (ret)
+ goto err_free;
+ }
+
+ if (!ath10k_pci_validate_cal(caldata, calsize))
+ goto err_free;
+
+ *data = caldata;
+ *data_len = calsize;
+
+ return 0;
+
+err_free:
+ kfree(data);
+
+ return -EINVAL;
+}
+
static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
.tx_sg = ath10k_pci_hif_tx_sg,
.diag_read = ath10k_pci_hif_diag_read,
@@ -2578,6 +2734,7 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
.suspend = ath10k_pci_hif_suspend,
.resume = ath10k_pci_hif_resume,
#endif
+ .fetch_cal_eeprom = ath10k_pci_hif_fetch_cal_eeprom,
};
/*
@@ -2976,24 +3133,47 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
enum ath10k_hw_rev hw_rev;
u32 chip_id;
bool pci_ps;
+ int (*pci_soft_reset)(struct ath10k *ar);
+ int (*pci_hard_reset)(struct ath10k *ar);
switch (pci_dev->device) {
case QCA988X_2_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA988X;
pci_ps = false;
+ pci_soft_reset = ath10k_pci_warm_reset;
+ pci_hard_reset = ath10k_pci_qca988x_chip_reset;
+ break;
+ case QCA9887_1_0_DEVICE_ID:
+ dev_warn(&pdev->dev, "QCA9887 support is still experimental, there are likely bugs. You have been warned.\n");
+ hw_rev = ATH10K_HW_QCA9887;
+ pci_ps = false;
+ pci_soft_reset = ath10k_pci_warm_reset;
+ pci_hard_reset = ath10k_pci_qca988x_chip_reset;
break;
case QCA6164_2_1_DEVICE_ID:
case QCA6174_2_1_DEVICE_ID:
hw_rev = ATH10K_HW_QCA6174;
pci_ps = true;
+ pci_soft_reset = ath10k_pci_warm_reset;
+ pci_hard_reset = ath10k_pci_qca6174_chip_reset;
break;
case QCA99X0_2_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA99X0;
pci_ps = false;
+ pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
+ pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
+ break;
+ case QCA9984_1_0_DEVICE_ID:
+ hw_rev = ATH10K_HW_QCA9984;
+ pci_ps = false;
+ pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
+ pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
break;
case QCA9377_1_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA9377;
pci_ps = true;
+ pci_soft_reset = NULL;
+ pci_hard_reset = ath10k_pci_qca6174_chip_reset;
break;
default:
WARN_ON(1);
@@ -3018,6 +3198,8 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ar->dev_id = pci_dev->device;
ar_pci->pci_ps = pci_ps;
ar_pci->bus_ops = &ath10k_pci_bus_ops;
+ ar_pci->pci_soft_reset = pci_soft_reset;
+ ar_pci->pci_hard_reset = pci_hard_reset;
ar->id.vendor = pdev->vendor;
ar->id.device = pdev->device;
@@ -3169,7 +3351,7 @@ static void __exit ath10k_pci_exit(void)
module_exit(ath10k_pci_exit);
MODULE_AUTHOR("Qualcomm Atheros");
-MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
+MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN PCIe/AHB devices");
MODULE_LICENSE("Dual BSD/GPL");
/* QCA988x 2.0 firmware files */
@@ -3180,6 +3362,11 @@ MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
+/* QCA9887 1.0 firmware files */
+MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
+MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" QCA9887_HW_1_0_BOARD_DATA_FILE);
+MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
+
/* QCA6174 2.1 firmware files */
MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 959dc321b75e..6eca1df2ce60 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -234,6 +234,12 @@ struct ath10k_pci {
const struct ath10k_bus_ops *bus_ops;
+ /* Chip specific pci reset routine used to do a safe reset */
+ int (*pci_soft_reset)(struct ath10k *ar);
+
+ /* Chip specific pci full reset function */
+ int (*pci_hard_reset)(struct ath10k *ar);
+
/* Keep this entry in the last, memory for struct ath10k_ahb is
* allocated (ahb support enabled case) in the continuation of
* this struct.
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index ca8d16884af1..034e7a54c5b2 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -656,26 +656,6 @@ struct rx_msdu_end {
* Reserved: HW should fill with zero. FW should ignore.
*/
-#define RX_PPDU_START_SIG_RATE_SELECT_OFDM 0
-#define RX_PPDU_START_SIG_RATE_SELECT_CCK 1
-
-#define RX_PPDU_START_SIG_RATE_OFDM_48 0
-#define RX_PPDU_START_SIG_RATE_OFDM_24 1
-#define RX_PPDU_START_SIG_RATE_OFDM_12 2
-#define RX_PPDU_START_SIG_RATE_OFDM_6 3
-#define RX_PPDU_START_SIG_RATE_OFDM_54 4
-#define RX_PPDU_START_SIG_RATE_OFDM_36 5
-#define RX_PPDU_START_SIG_RATE_OFDM_18 6
-#define RX_PPDU_START_SIG_RATE_OFDM_9 7
-
-#define RX_PPDU_START_SIG_RATE_CCK_LP_11 0
-#define RX_PPDU_START_SIG_RATE_CCK_LP_5_5 1
-#define RX_PPDU_START_SIG_RATE_CCK_LP_2 2
-#define RX_PPDU_START_SIG_RATE_CCK_LP_1 3
-#define RX_PPDU_START_SIG_RATE_CCK_SP_11 4
-#define RX_PPDU_START_SIG_RATE_CCK_SP_5_5 5
-#define RX_PPDU_START_SIG_RATE_CCK_SP_2 6
-
#define HTT_RX_PPDU_START_PREAMBLE_LEGACY 0x04
#define HTT_RX_PPDU_START_PREAMBLE_HT 0x08
#define HTT_RX_PPDU_START_PREAMBLE_HT_WITH_TXBF 0x09
@@ -711,25 +691,6 @@ struct rx_msdu_end {
/* No idea what this flag means. It seems to be always set in rate. */
#define RX_PPDU_START_RATE_FLAG BIT(3)
-enum rx_ppdu_start_rate {
- RX_PPDU_START_RATE_OFDM_48M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_48M,
- RX_PPDU_START_RATE_OFDM_24M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_24M,
- RX_PPDU_START_RATE_OFDM_12M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_12M,
- RX_PPDU_START_RATE_OFDM_6M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_6M,
- RX_PPDU_START_RATE_OFDM_54M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_54M,
- RX_PPDU_START_RATE_OFDM_36M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_36M,
- RX_PPDU_START_RATE_OFDM_18M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_18M,
- RX_PPDU_START_RATE_OFDM_9M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_9M,
-
- RX_PPDU_START_RATE_CCK_LP_11M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_11M,
- RX_PPDU_START_RATE_CCK_LP_5_5M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_5_5M,
- RX_PPDU_START_RATE_CCK_LP_2M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_2M,
- RX_PPDU_START_RATE_CCK_LP_1M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_1M,
- RX_PPDU_START_RATE_CCK_SP_11M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_11M,
- RX_PPDU_START_RATE_CCK_SP_5_5M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_5_5M,
- RX_PPDU_START_RATE_CCK_SP_2M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_2M,
-};
-
struct rx_ppdu_start {
struct {
u8 pri20_mhz;
@@ -994,7 +955,41 @@ struct rx_pkt_end {
__le32 info0; /* %RX_PKT_END_INFO0_ */
__le32 phy_timestamp_1;
__le32 phy_timestamp_2;
- __le32 rx_location_info; /* %RX_LOCATION_INFO_ */
+} __packed;
+
+#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_MASK 0x00003fff
+#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_LSB 0
+#define RX_LOCATION_INFO0_RTT_FAC_VHT_MASK 0x1fff8000
+#define RX_LOCATION_INFO0_RTT_FAC_VHT_LSB 15
+#define RX_LOCATION_INFO0_RTT_STRONGEST_CHAIN_MASK 0xc0000000
+#define RX_LOCATION_INFO0_RTT_STRONGEST_CHAIN_LSB 30
+#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_STATUS BIT(14)
+#define RX_LOCATION_INFO0_RTT_FAC_VHT_STATUS BIT(29)
+
+#define RX_LOCATION_INFO1_RTT_PREAMBLE_TYPE_MASK 0x0000000c
+#define RX_LOCATION_INFO1_RTT_PREAMBLE_TYPE_LSB 2
+#define RX_LOCATION_INFO1_PKT_BW_MASK 0x00000030
+#define RX_LOCATION_INFO1_PKT_BW_LSB 4
+#define RX_LOCATION_INFO1_SKIP_P_SKIP_BTCF_MASK 0x0000ff00
+#define RX_LOCATION_INFO1_SKIP_P_SKIP_BTCF_LSB 8
+#define RX_LOCATION_INFO1_RTT_MSC_RATE_MASK 0x000f0000
+#define RX_LOCATION_INFO1_RTT_MSC_RATE_LSB 16
+#define RX_LOCATION_INFO1_RTT_PBD_LEG_BW_MASK 0x00300000
+#define RX_LOCATION_INFO1_RTT_PBD_LEG_BW_LSB 20
+#define RX_LOCATION_INFO1_TIMING_BACKOFF_MASK 0x07c00000
+#define RX_LOCATION_INFO1_TIMING_BACKOFF_LSB 22
+#define RX_LOCATION_INFO1_RTT_TX_FRAME_PHASE_MASK 0x18000000
+#define RX_LOCATION_INFO1_RTT_TX_FRAME_PHASE_LSB 27
+#define RX_LOCATION_INFO1_RTT_CFR_STATUS BIT(0)
+#define RX_LOCATION_INFO1_RTT_CIR_STATUS BIT(1)
+#define RX_LOCATION_INFO1_RTT_GI_TYPE BIT(7)
+#define RX_LOCATION_INFO1_RTT_MAC_PHY_PHASE BIT(29)
+#define RX_LOCATION_INFO1_RTT_TX_DATA_START_X_PHASE BIT(30)
+#define RX_LOCATION_INFO1_RX_LOCATION_VALID BIT(31)
+
+struct rx_location_info {
+ __le32 rx_location_info0; /* %RX_LOCATION_INFO0_ */
+ __le32 rx_location_info1; /* %RX_LOCATION_INFO1_ */
} __packed;
enum rx_phy_ppdu_end_info0 {
@@ -1067,6 +1062,17 @@ struct rx_phy_ppdu_end {
struct rx_ppdu_end_qca99x0 {
struct rx_pkt_end rx_pkt_end;
+ __le32 rx_location_info; /* %RX_LOCATION_INFO_ */
+ struct rx_phy_ppdu_end rx_phy_ppdu_end;
+ __le32 rx_timing_offset; /* %RX_PPDU_END_RX_TIMING_OFFSET_ */
+ __le32 rx_info; /* %RX_PPDU_END_RX_INFO_ */
+ __le16 bb_length;
+ __le16 info1; /* %RX_PPDU_END_INFO1_ */
+} __packed;
+
+struct rx_ppdu_end_qca9984 {
+ struct rx_pkt_end rx_pkt_end;
+ struct rx_location_info rx_location_info;
struct rx_phy_ppdu_end rx_phy_ppdu_end;
__le32 rx_timing_offset; /* %RX_PPDU_END_RX_TIMING_OFFSET_ */
__le32 rx_info; /* %RX_PPDU_END_RX_INFO_ */
@@ -1080,6 +1086,7 @@ struct rx_ppdu_end {
struct rx_ppdu_end_qca988x qca988x;
struct rx_ppdu_end_qca6174 qca6174;
struct rx_ppdu_end_qca99x0 qca99x0;
+ struct rx_ppdu_end_qca9984 qca9984;
} __packed;
} __packed;
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
index 8e24099fa936..aaf53a81e78b 100644
--- a/drivers/net/wireless/ath/ath10k/targaddrs.h
+++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
@@ -447,6 +447,9 @@ Fw Mode/SubMode Mask
#define QCA988X_BOARD_DATA_SZ 7168
#define QCA988X_BOARD_EXT_DATA_SZ 0
+#define QCA9887_BOARD_DATA_SZ 7168
+#define QCA9887_BOARD_EXT_DATA_SZ 0
+
#define QCA6174_BOARD_DATA_SZ 8192
#define QCA6174_BOARD_EXT_DATA_SZ 0
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 576e7c42ed65..1966c787998b 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -117,6 +117,9 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
ieee80211_tx_status(htt->ar->hw, msdu);
/* we do not own the msdu anymore */
+
+ ath10k_mac_tx_push_pending(ar);
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 2c300329ebc3..6279ab4a760e 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -1104,6 +1104,7 @@ static struct wmi_pdev_param_map wmi_pdev_param_map = {
.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
};
static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
@@ -1199,6 +1200,7 @@ static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
};
static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
@@ -1294,6 +1296,7 @@ static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
};
/* firmware 10.2 specific mappings */
@@ -1550,6 +1553,7 @@ static struct wmi_pdev_param_map wmi_10_4_pdev_param_map = {
.wapi_mbssid_offset = WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET,
.arp_srcaddr = WMI_10_4_PDEV_PARAM_ARP_SRCADDR,
.arp_dstaddr = WMI_10_4_PDEV_PARAM_ARP_DSTADDR,
+ .enable_btcoex = WMI_10_4_PDEV_PARAM_ENABLE_BTCOEX,
};
static const struct wmi_peer_flags_map wmi_peer_flags_map = {
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 9fdf47ea27d0..90f594e89f94 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -3447,6 +3447,7 @@ struct wmi_pdev_param_map {
u32 wapi_mbssid_offset;
u32 arp_srcaddr;
u32 arp_dstaddr;
+ u32 enable_btcoex;
};
#define WMI_PDEV_PARAM_UNSUPPORTED 0
@@ -3760,6 +3761,9 @@ enum wmi_10_4_pdev_param {
WMI_10_4_PDEV_PARAM_ATF_OBSS_NOISE_SCH,
WMI_10_4_PDEV_PARAM_ATF_OBSS_NOISE_SCALING_FACTOR,
WMI_10_4_PDEV_PARAM_CUST_TXPOWER_SCALE,
+ WMI_10_4_PDEV_PARAM_ATF_DYNAMIC_ENABLE,
+ WMI_10_4_PDEV_PARAM_ATF_SSID_GROUP_POLICY,
+ WMI_10_4_PDEV_PARAM_ENABLE_BTCOEX,
};
struct wmi_pdev_set_param_cmd {
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index fc47b70988b1..f23c851765df 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -219,8 +219,8 @@ ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
sifs = AR5K_INIT_SIFS_QUARTER_RATE;
break;
case AR5K_BWMODE_DEFAULT:
- sifs = AR5K_INIT_SIFS_DEFAULT_BG;
default:
+ sifs = AR5K_INIT_SIFS_DEFAULT_BG;
if (channel->band == NL80211_BAND_5GHZ)
sifs = AR5K_INIT_SIFS_DEFAULT_A;
break;
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
index 7a1970e484a6..ac25f1781b42 100644
--- a/drivers/net/wireless/ath/ath6kl/core.h
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -148,7 +148,7 @@ enum ath6kl_fw_capability {
/* ratetable is the 2 stream version (max MCS15) */
ATH6KL_FW_CAPABILITY_RATETABLE_MCS15,
- /* firmare doesn't support IP checksumming */
+ /* firmware doesn't support IP checksumming */
ATH6KL_FW_CAPABILITY_NO_IP_CHECKSUM,
/* this needs to be last */
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 631c3a0c572b..b8cf04d11975 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -2544,8 +2544,7 @@ int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi, u8 if_idx,
s32 nominal_phy = 0;
int ret;
- if (!((params->user_pri < 8) &&
- (params->user_pri <= 0x7) &&
+ if (!((params->user_pri <= 0x7) &&
(up_to_ac[params->user_pri & 0x7] == params->traffic_class) &&
(params->traffic_direc == UPLINK_TRAFFIC ||
params->traffic_direc == DNLINK_TRAFFIC ||
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index dec1a317a070..d0224fc58e78 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3202,8 +3202,7 @@ static int ar9300_compress_decision(struct ath_hw *ah,
it, length);
break;
case _CompressBlock:
- if (reference == 0) {
- } else {
+ if (reference != 0) {
eep = ar9003_eeprom_struct_find_by_id(reference);
if (eep == NULL) {
ath_dbg(common, EEPROM,
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 9272ca90632b..80ff69f99229 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -1122,12 +1122,12 @@ enum {
#define AR9300_NUM_GPIO 16
#define AR9330_NUM_GPIO 16
#define AR9340_NUM_GPIO 23
-#define AR9462_NUM_GPIO 10
+#define AR9462_NUM_GPIO 14
#define AR9485_NUM_GPIO 12
#define AR9531_NUM_GPIO 18
#define AR9550_NUM_GPIO 24
#define AR9561_NUM_GPIO 23
-#define AR9565_NUM_GPIO 12
+#define AR9565_NUM_GPIO 14
#define AR9580_NUM_GPIO 16
#define AR7010_NUM_GPIO 16
@@ -1139,12 +1139,12 @@ enum {
#define AR9300_GPIO_MASK 0x0000F4FF
#define AR9330_GPIO_MASK 0x0000F4FF
#define AR9340_GPIO_MASK 0x0000000F
-#define AR9462_GPIO_MASK 0x000003FF
+#define AR9462_GPIO_MASK 0x00003FFF
#define AR9485_GPIO_MASK 0x00000FFF
#define AR9531_GPIO_MASK 0x0000000F
#define AR9550_GPIO_MASK 0x0000000F
#define AR9561_GPIO_MASK 0x0000000F
-#define AR9565_GPIO_MASK 0x00000FFF
+#define AR9565_GPIO_MASK 0x00003FFF
#define AR9580_GPIO_MASK 0x0000F4FF
#define AR7010_GPIO_MASK 0x0000FFFF
diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c
index ac4781f37e78..16aca9e28b77 100644
--- a/drivers/net/wireless/ath/ath9k/tx99.c
+++ b/drivers/net/wireless/ath/ath9k/tx99.c
@@ -132,7 +132,6 @@ static int ath9k_tx99_init(struct ath_softc *sc)
ath9k_ps_wakeup(sc);
ath9k_hw_disable_interrupts(ah);
- atomic_set(&ah->intr_ref_cnt, -1);
ath_drain_all_txq(sc);
ath_stoprecv(sc);
@@ -266,7 +265,7 @@ static const struct file_operations fops_tx99_power = {
void ath9k_tx99_init_debug(struct ath_softc *sc)
{
- if (!AR_SREV_9300_20_OR_LATER(sc->sc_ah))
+ if (!AR_SREV_9280_20_OR_LATER(sc->sc_ah))
return;
debugfs_create_file("tx99", S_IRUSR | S_IWUSR,
diff --git a/drivers/net/wireless/ath/carl9170/Kconfig b/drivers/net/wireless/ath/carl9170/Kconfig
index 1a796e5f69ec..2e34baeaf764 100644
--- a/drivers/net/wireless/ath/carl9170/Kconfig
+++ b/drivers/net/wireless/ath/carl9170/Kconfig
@@ -5,12 +5,10 @@ config CARL9170
select FW_LOADER
select CRC32
help
- This is another driver for the Atheros "otus" 802.11n USB devices.
+ This is the mainline driver for the Atheros "otus" 802.11n USB devices.
- This driver provides more features than the original,
- but it needs a special firmware (carl9170-1.fw) to do that.
-
- The firmware can be downloaded from our wiki here:
+ It needs a special firmware (carl9170-1.fw), which can be downloaded
+ from our wiki here:
<http://wireless.kernel.org/en/users/Drivers/carl9170>
If you choose to build a module, it'll be called carl9170.
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 5769811291bf..62bf9331bd7f 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -378,6 +378,10 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
/* social scan on P2P_DEVICE is handled as p2p search */
if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE &&
wil_p2p_is_social_scan(request)) {
+ if (!wil->p2p.p2p_dev_started) {
+ wil_err(wil, "P2P search requested on stopped P2P device\n");
+ return -EIO;
+ }
wil->scan_request = request;
wil->radio_wdev = wdev;
rc = wil_p2p_search(wil, request);
@@ -1351,6 +1355,7 @@ static int wil_cfg80211_start_p2p_device(struct wiphy *wiphy,
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
wil_dbg_misc(wil, "%s: entered\n", __func__);
+ wil->p2p.p2p_dev_started = 1;
return 0;
}
@@ -1358,8 +1363,19 @@ static void wil_cfg80211_stop_p2p_device(struct wiphy *wiphy,
struct wireless_dev *wdev)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ u8 started;
wil_dbg_misc(wil, "%s: entered\n", __func__);
+ mutex_lock(&wil->mutex);
+ started = wil_p2p_stop_discovery(wil);
+ if (started && wil->scan_request) {
+ cfg80211_scan_done(wil->scan_request, 1);
+ wil->scan_request = NULL;
+ wil->radio_wdev = wil->wdev;
+ }
+ mutex_unlock(&wil->mutex);
+
+ wil->p2p.p2p_dev_started = 0;
}
static struct cfg80211_ops wil_cfg80211_ops = {
diff --git a/drivers/net/wireless/ath/wil6210/debug.c b/drivers/net/wireless/ath/wil6210/debug.c
index c312a667c12a..217a4591bde4 100644
--- a/drivers/net/wireless/ath/wil6210/debug.c
+++ b/drivers/net/wireless/ath/wil6210/debug.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2013,2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -19,34 +19,31 @@
void __wil_err(struct wil6210_priv *wil, const char *fmt, ...)
{
- struct net_device *ndev = wil_to_ndev(wil);
- struct va_format vaf = {
- .fmt = fmt,
- };
+ struct va_format vaf;
va_list args;
va_start(args, fmt);
+ vaf.fmt = fmt;
vaf.va = &args;
- netdev_err(ndev, "%pV", &vaf);
+ netdev_err(wil_to_ndev(wil), "%pV", &vaf);
trace_wil6210_log_err(&vaf);
va_end(args);
}
void __wil_err_ratelimited(struct wil6210_priv *wil, const char *fmt, ...)
{
- if (net_ratelimit()) {
- struct net_device *ndev = wil_to_ndev(wil);
- struct va_format vaf = {
- .fmt = fmt,
- };
- va_list args;
+ struct va_format vaf;
+ va_list args;
- va_start(args, fmt);
- vaf.va = &args;
- netdev_err(ndev, "%pV", &vaf);
- trace_wil6210_log_err(&vaf);
- va_end(args);
- }
+ if (!net_ratelimit())
+ return;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ netdev_err(wil_to_ndev(wil), "%pV", &vaf);
+ trace_wil6210_log_err(&vaf);
+ va_end(args);
}
void wil_dbg_ratelimited(const struct wil6210_priv *wil, const char *fmt, ...)
@@ -67,27 +64,24 @@ void wil_dbg_ratelimited(const struct wil6210_priv *wil, const char *fmt, ...)
void __wil_info(struct wil6210_priv *wil, const char *fmt, ...)
{
- struct net_device *ndev = wil_to_ndev(wil);
- struct va_format vaf = {
- .fmt = fmt,
- };
+ struct va_format vaf;
va_list args;
va_start(args, fmt);
+ vaf.fmt = fmt;
vaf.va = &args;
- netdev_info(ndev, "%pV", &vaf);
+ netdev_info(wil_to_ndev(wil), "%pV", &vaf);
trace_wil6210_log_info(&vaf);
va_end(args);
}
void wil_dbg_trace(struct wil6210_priv *wil, const char *fmt, ...)
{
- struct va_format vaf = {
- .fmt = fmt,
- };
+ struct va_format vaf;
va_list args;
va_start(args, fmt);
+ vaf.fmt = fmt;
vaf.va = &args;
trace_wil6210_log_dbg(&vaf);
va_end(args);
diff --git a/drivers/net/wireless/ath/wil6210/p2p.c b/drivers/net/wireless/ath/wil6210/p2p.c
index 1c9153894dca..213b8259638c 100644
--- a/drivers/net/wireless/ath/wil6210/p2p.c
+++ b/drivers/net/wireless/ath/wil6210/p2p.c
@@ -114,8 +114,10 @@ int wil_p2p_listen(struct wil6210_priv *wil, unsigned int duration,
u8 channel = P2P_DMG_SOCIAL_CHANNEL;
int rc;
- if (chan)
- channel = chan->hw_value;
+ if (!chan)
+ return -EINVAL;
+
+ channel = chan->hw_value;
wil_dbg_misc(wil, "%s: duration %d\n", __func__, duration);
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index aeb72c438e44..7b5c4222bc33 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -18,13 +18,20 @@
#include <linux/pci.h>
#include <linux/moduleparam.h>
#include <linux/interrupt.h>
-
+#include <linux/suspend.h>
#include "wil6210.h"
static bool use_msi = true;
module_param(use_msi, bool, S_IRUGO);
MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - true");
+#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
+static int wil6210_pm_notify(struct notifier_block *notify_block,
+ unsigned long mode, void *unused);
+#endif /* CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM */
+
static
void wil_set_capabilities(struct wil6210_priv *wil)
{
@@ -238,6 +245,18 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto bus_disable;
}
+#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
+ wil->pm_notify.notifier_call = wil6210_pm_notify;
+ rc = register_pm_notifier(&wil->pm_notify);
+ if (rc)
+ /* Do not fail the driver initialization, as suspend can
+ * be prevented in a later phase if needed
+ */
+ wil_err(wil, "register_pm_notifier failed: %d\n", rc);
+#endif /* CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM */
+
wil6210_debugfs_init(wil);
@@ -267,6 +286,12 @@ static void wil_pcie_remove(struct pci_dev *pdev)
wil_dbg_misc(wil, "%s()\n", __func__);
+#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
+ unregister_pm_notifier(&wil->pm_notify);
+#endif /* CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM */
+
wil6210_debugfs_remove(wil);
wil_if_remove(wil);
wil_if_pcie_disable(wil);
@@ -335,6 +360,45 @@ static int wil6210_resume(struct device *dev, bool is_runtime)
return rc;
}
+static int wil6210_pm_notify(struct notifier_block *notify_block,
+ unsigned long mode, void *unused)
+{
+ struct wil6210_priv *wil = container_of(
+ notify_block, struct wil6210_priv, pm_notify);
+ int rc = 0;
+ enum wil_platform_event evt;
+
+ wil_dbg_pm(wil, "%s: mode (%ld)\n", __func__, mode);
+
+ switch (mode) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ case PM_RESTORE_PREPARE:
+ rc = wil_can_suspend(wil, false);
+ if (rc)
+ break;
+ evt = WIL_PLATFORM_EVT_PRE_SUSPEND;
+ if (wil->platform_ops.notify)
+ rc = wil->platform_ops.notify(wil->platform_handle,
+ evt);
+ break;
+ case PM_POST_SUSPEND:
+ case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
+ evt = WIL_PLATFORM_EVT_POST_SUSPEND;
+ if (wil->platform_ops.notify)
+ rc = wil->platform_ops.notify(wil->platform_handle,
+ evt);
+ break;
+ default:
+ wil_dbg_pm(wil, "unhandled notify mode %ld\n", mode);
+ break;
+ }
+
+ wil_dbg_pm(wil, "notification mode %ld: rc (%d)\n", mode, rc);
+ return rc;
+}
+
static int wil6210_pm_suspend(struct device *dev)
{
return wil6210_suspend(dev, false);
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index 0b7ecbcac19c..11ee24d509e5 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014,2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -24,10 +24,32 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
wil_dbg_pm(wil, "%s(%s)\n", __func__,
is_runtime ? "runtime" : "system");
+ if (!netif_running(wil_to_ndev(wil))) {
+ /* can always sleep when down */
+ wil_dbg_pm(wil, "Interface is down\n");
+ goto out;
+ }
+ if (test_bit(wil_status_resetting, wil->status)) {
+ wil_dbg_pm(wil, "Delay suspend when resetting\n");
+ rc = -EBUSY;
+ goto out;
+ }
+ if (wil->recovery_state != fw_recovery_idle) {
+ wil_dbg_pm(wil, "Delay suspend during recovery\n");
+ rc = -EBUSY;
+ goto out;
+ }
+
+ /* interface is running */
switch (wdev->iftype) {
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
+ if (test_bit(wil_status_fwconnecting, wil->status)) {
+ wil_dbg_pm(wil, "Delay suspend when connecting\n");
+ rc = -EBUSY;
+ goto out;
+ }
break;
/* AP-like interface - can't suspend */
default:
@@ -36,6 +58,7 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
break;
}
+out:
wil_dbg_pm(wil, "%s(%s) => %s (%d)\n", __func__,
is_runtime ? "runtime" : "system", rc ? "No" : "Yes", rc);
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index a4e43796addb..f2f6a404d3d1 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -184,6 +184,13 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
&vring->va[vring->swtail].tx;
ctx = &vring->ctx[vring->swtail];
+ if (!ctx) {
+ wil_dbg_txrx(wil,
+ "ctx(%d) was already completed\n",
+ vring->swtail);
+ vring->swtail = wil_vring_next_tail(vring);
+ continue;
+ }
*d = *_d;
wil_txdesc_unmap(dev, d, ctx);
if (ctx->skb)
@@ -544,6 +551,12 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count)
break;
}
}
+
+ /* make sure all writes to descriptors (shared memory) are done before
+ * committing them to HW
+ */
+ wmb();
+
wil_w(wil, v->hwtail, v->swtail);
return rc;
@@ -969,6 +982,13 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
txdata->dot1x_open = false;
txdata->enabled = 0; /* no Tx can be in progress or start anew */
spin_unlock_bh(&txdata->lock);
+ /* napi_synchronize waits for completion of the current NAPI but will
+ * not prevent the next NAPI run.
+ * Add a memory barrier to guarantee that txdata->enabled is zeroed
+ * before napi_synchronize so that the next scheduled NAPI will not
+ * handle this vring
+ */
+ wmb();
/* make sure NAPI won't touch this vring */
if (test_bit(wil_status_napi_en, wil->status))
napi_synchronize(&wil->napi_tx);
@@ -1551,6 +1571,13 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring,
vring_index, used, used + descs_used);
}
+ /* Make sure to advance the head only after descriptor update is done.
+ * This will prevent a race condition where the completion thread
+ * will see the DU bit set from previous run and will handle the
+ * skb before it was completed.
+ */
+ wmb();
+
/* advance swhead */
wil_vring_advance_head(vring, descs_used);
wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
@@ -1567,7 +1594,7 @@ mem_error:
while (descs_used > 0) {
struct wil_ctx *ctx;
- i = (swhead + descs_used) % vring->size;
+ i = (swhead + descs_used - 1) % vring->size;
d = (struct vring_tx_desc *)&vring->va[i].tx;
_desc = &vring->va[i].tx;
*d = *_desc;
@@ -1691,6 +1718,13 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
vring_index, used, used + nr_frags + 1);
}
+ /* Make sure to advance the head only after descriptor update is done.
+ * This will prevent a race condition where the completion thread
+ * will see the DU bit set from previous run and will handle the
+ * skb before it was completed.
+ */
+ wmb();
+
/* advance swhead */
wil_vring_advance_head(vring, nr_frags + 1);
wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead,
@@ -1914,6 +1948,12 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
wil_consume_skb(skb, d->dma.error == 0);
}
memset(ctx, 0, sizeof(*ctx));
+ /* Make sure the ctx is zeroed before updating the tail
+ * to prevent a case where wil_tx_vring will see
+ * this descriptor as used and handle it before ctx zero
+ * is completed.
+ */
+ wmb();
/* There is no need to touch HW descriptor:
* - ststus bit TX_DMA_STATUS_DU is set by design,
* so hardware will not try to process this desc.,
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index aa09cbcce47c..ecab4af90602 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -458,6 +458,7 @@ struct wil_tid_crypto_rx {
struct wil_p2p_info {
struct ieee80211_channel listen_chan;
u8 discovery_started;
+ u8 p2p_dev_started;
u64 cookie;
struct timer_list discovery_timer; /* listen/search duration */
struct work_struct discovery_expired_work; /* listen/search expire */
@@ -662,6 +663,11 @@ struct wil6210_priv {
/* High Access Latency Policy voting */
struct wil_halp halp;
+#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
+ struct notifier_block pm_notify;
+#endif /* CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM */
};
#define wil_to_wiphy(i) (i->wdev->wiphy)
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h
index 33d4a34b3b1c..f8c41172a3f4 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.h
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -23,6 +23,8 @@ enum wil_platform_event {
WIL_PLATFORM_EVT_FW_CRASH = 0,
WIL_PLATFORM_EVT_PRE_RESET = 1,
WIL_PLATFORM_EVT_FW_RDY = 2,
+ WIL_PLATFORM_EVT_PRE_SUSPEND = 3,
+ WIL_PLATFORM_EVT_POST_SUSPEND = 4,
};
/**
diff --git a/drivers/net/wireless/broadcom/b43/Makefile b/drivers/net/wireless/broadcom/b43/Makefile
index ddc4df46656f..27fab958e3d5 100644
--- a/drivers/net/wireless/broadcom/b43/Makefile
+++ b/drivers/net/wireless/broadcom/b43/Makefile
@@ -1,6 +1,6 @@
b43-y += main.o
b43-y += bus.o
-b43-$(CONFIG_B43_PHY_G) += phy_a.o phy_g.o tables.o lo.o wa.o
+b43-$(CONFIG_B43_PHY_G) += phy_g.o tables.o lo.o wa.o
b43-$(CONFIG_B43_PHY_N) += tables_nphy.o
b43-$(CONFIG_B43_PHY_N) += radio_2055.o
b43-$(CONFIG_B43_PHY_N) += radio_2056.o
diff --git a/drivers/net/wireless/broadcom/b43/leds.c b/drivers/net/wireless/broadcom/b43/leds.c
index d79ab2a227e1..cb987c2ecc6b 100644
--- a/drivers/net/wireless/broadcom/b43/leds.c
+++ b/drivers/net/wireless/broadcom/b43/leds.c
@@ -222,7 +222,7 @@ static void b43_led_get_sprominfo(struct b43_wldev *dev,
sprom[2] = dev->dev->bus_sprom->gpio2;
sprom[3] = dev->dev->bus_sprom->gpio3;
- if (sprom[led_index] == 0xFF) {
+ if ((sprom[0] & sprom[1] & sprom[2] & sprom[3]) == 0xff) {
/* There is no LED information in the SPROM
* for this LED. Hardcode it here. */
*activelow = false;
@@ -250,7 +250,11 @@ static void b43_led_get_sprominfo(struct b43_wldev *dev,
return;
}
} else {
- *behaviour = sprom[led_index] & B43_LED_BEHAVIOUR;
+ /* keep LED disabled if no mapping is defined */
+ if (sprom[led_index] == 0xff)
+ *behaviour = B43_LED_OFF;
+ else
+ *behaviour = sprom[led_index] & B43_LED_BEHAVIOUR;
*activelow = !!(sprom[led_index] & B43_LED_ACTIVELOW);
}
}
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index 4ee5c5853f9f..6e5d9095b195 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -3180,7 +3180,6 @@ static void b43_rate_memory_write(struct b43_wldev *dev, u16 rate, int is_ofdm)
static void b43_rate_memory_init(struct b43_wldev *dev)
{
switch (dev->phy.type) {
- case B43_PHYTYPE_A:
case B43_PHYTYPE_G:
case B43_PHYTYPE_N:
case B43_PHYTYPE_LP:
@@ -3194,8 +3193,6 @@ static void b43_rate_memory_init(struct b43_wldev *dev)
b43_rate_memory_write(dev, B43_OFDM_RATE_36MB, 1);
b43_rate_memory_write(dev, B43_OFDM_RATE_48MB, 1);
b43_rate_memory_write(dev, B43_OFDM_RATE_54MB, 1);
- if (dev->phy.type == B43_PHYTYPE_A)
- break;
/* fallthrough */
case B43_PHYTYPE_B:
b43_rate_memory_write(dev, B43_CCK_RATE_1MB, 0);
@@ -4604,14 +4601,6 @@ static int b43_phy_versioning(struct b43_wldev *dev)
if (radio_manuf != 0x17F /* Broadcom */)
unsupported = 1;
switch (phy_type) {
- case B43_PHYTYPE_A:
- if (radio_id != 0x2060)
- unsupported = 1;
- if (radio_rev != 1)
- unsupported = 1;
- if (radio_manuf != 0x17F)
- unsupported = 1;
- break;
case B43_PHYTYPE_B:
if ((radio_id & 0xFFF0) != 0x2050)
unsupported = 1;
@@ -4766,10 +4755,7 @@ static void b43_set_synth_pu_delay(struct b43_wldev *dev, bool idle)
u16 pu_delay;
/* The time value is in microseconds. */
- if (dev->phy.type == B43_PHYTYPE_A)
- pu_delay = 3700;
- else
- pu_delay = 1050;
+ pu_delay = 1050;
if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC) || idle)
pu_delay = 500;
if ((dev->phy.radio_ver == 0x2050) && (dev->phy.radio_rev == 8))
@@ -4784,14 +4770,10 @@ static void b43_set_pretbtt(struct b43_wldev *dev)
u16 pretbtt;
/* The time value is in microseconds. */
- if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC)) {
+ if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC))
pretbtt = 2;
- } else {
- if (dev->phy.type == B43_PHYTYPE_A)
- pretbtt = 120;
- else
- pretbtt = 250;
- }
+ else
+ pretbtt = 250;
b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_PRETBTT, pretbtt);
b43_write16(dev, B43_MMIO_TSF_CFP_PRETBTT, pretbtt);
}
@@ -5380,10 +5362,6 @@ static void b43_supported_bands(struct b43_wldev *dev, bool *have_2ghz_phy,
/* As a fallback, try to guess using PHY type */
switch (dev->phy.type) {
- case B43_PHYTYPE_A:
- *have_2ghz_phy = false;
- *have_5ghz_phy = true;
- return;
case B43_PHYTYPE_G:
case B43_PHYTYPE_N:
case B43_PHYTYPE_LP:
@@ -5455,7 +5433,6 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
/* We don't support 5 GHz on some PHYs yet */
if (have_5ghz_phy) {
switch (dev->phy.type) {
- case B43_PHYTYPE_A:
case B43_PHYTYPE_G:
case B43_PHYTYPE_LP:
case B43_PHYTYPE_HT:
diff --git a/drivers/net/wireless/broadcom/b43/phy_a.c b/drivers/net/wireless/broadcom/b43/phy_a.c
deleted file mode 100644
index 99c036f5ecb7..000000000000
--- a/drivers/net/wireless/broadcom/b43/phy_a.c
+++ /dev/null
@@ -1,595 +0,0 @@
-/*
-
- Broadcom B43 wireless driver
- IEEE 802.11a PHY driver
-
- Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
- Copyright (c) 2005-2007 Stefano Brivio <stefano.brivio@polimi.it>
- Copyright (c) 2005-2008 Michael Buesch <m@bues.ch>
- Copyright (c) 2005, 2006 Danny van Dyk <kugelfang@gentoo.org>
- Copyright (c) 2005, 2006 Andreas Jaggi <andreas.jaggi@waterwave.ch>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING. If not, write to
- the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
- Boston, MA 02110-1301, USA.
-
-*/
-
-#include <linux/slab.h>
-
-#include "b43.h"
-#include "phy_a.h"
-#include "phy_common.h"
-#include "wa.h"
-#include "tables.h"
-#include "main.h"
-
-
-/* Get the freq, as it has to be written to the device. */
-static inline u16 channel2freq_a(u8 channel)
-{
- B43_WARN_ON(channel > 200);
-
- return (5000 + 5 * channel);
-}
-
-static inline u16 freq_r3A_value(u16 frequency)
-{
- u16 value;
-
- if (frequency < 5091)
- value = 0x0040;
- else if (frequency < 5321)
- value = 0x0000;
- else if (frequency < 5806)
- value = 0x0080;
- else
- value = 0x0040;
-
- return value;
-}
-
-#if 0
-/* This function converts a TSSI value to dBm in Q5.2 */
-static s8 b43_aphy_estimate_power_out(struct b43_wldev *dev, s8 tssi)
-{
- struct b43_phy *phy = &dev->phy;
- struct b43_phy_a *aphy = phy->a;
- s8 dbm = 0;
- s32 tmp;
-
- tmp = (aphy->tgt_idle_tssi - aphy->cur_idle_tssi + tssi);
- tmp += 0x80;
- tmp = clamp_val(tmp, 0x00, 0xFF);
- dbm = aphy->tssi2dbm[tmp];
- //TODO: There's a FIXME on the specs
-
- return dbm;
-}
-#endif
-
-static void b43_radio_set_tx_iq(struct b43_wldev *dev)
-{
- static const u8 data_high[5] = { 0x00, 0x40, 0x80, 0x90, 0xD0 };
- static const u8 data_low[5] = { 0x00, 0x01, 0x05, 0x06, 0x0A };
- u16 tmp = b43_radio_read16(dev, 0x001E);
- int i, j;
-
- for (i = 0; i < 5; i++) {
- for (j = 0; j < 5; j++) {
- if (tmp == (data_high[i] << 4 | data_low[j])) {
- b43_phy_write(dev, 0x0069,
- (i - j) << 8 | 0x00C0);
- return;
- }
- }
- }
-}
-
-static void aphy_channel_switch(struct b43_wldev *dev, unsigned int channel)
-{
- u16 freq, r8, tmp;
-
- freq = channel2freq_a(channel);
-
- r8 = b43_radio_read16(dev, 0x0008);
- b43_write16(dev, 0x03F0, freq);
- b43_radio_write16(dev, 0x0008, r8);
-
- //TODO: write max channel TX power? to Radio 0x2D
- tmp = b43_radio_read16(dev, 0x002E);
- tmp &= 0x0080;
- //TODO: OR tmp with the Power out estimation for this channel?
- b43_radio_write16(dev, 0x002E, tmp);
-
- if (freq >= 4920 && freq <= 5500) {
- /*
- * r8 = (((freq * 15 * 0xE1FC780F) >> 32) / 29) & 0x0F;
- * = (freq * 0.025862069
- */
- r8 = 3 * freq / 116; /* is equal to r8 = freq * 0.025862 */
- }
- b43_radio_write16(dev, 0x0007, (r8 << 4) | r8);
- b43_radio_write16(dev, 0x0020, (r8 << 4) | r8);
- b43_radio_write16(dev, 0x0021, (r8 << 4) | r8);
- b43_radio_maskset(dev, 0x0022, 0x000F, (r8 << 4));
- b43_radio_write16(dev, 0x002A, (r8 << 4));
- b43_radio_write16(dev, 0x002B, (r8 << 4));
- b43_radio_maskset(dev, 0x0008, 0x00F0, (r8 << 4));
- b43_radio_maskset(dev, 0x0029, 0xFF0F, 0x00B0);
- b43_radio_write16(dev, 0x0035, 0x00AA);
- b43_radio_write16(dev, 0x0036, 0x0085);
- b43_radio_maskset(dev, 0x003A, 0xFF20, freq_r3A_value(freq));
- b43_radio_mask(dev, 0x003D, 0x00FF);
- b43_radio_maskset(dev, 0x0081, 0xFF7F, 0x0080);
- b43_radio_mask(dev, 0x0035, 0xFFEF);
- b43_radio_maskset(dev, 0x0035, 0xFFEF, 0x0010);
- b43_radio_set_tx_iq(dev);
- //TODO: TSSI2dbm workaround
-//FIXME b43_phy_xmitpower(dev);
-}
-
-static void b43_radio_init2060(struct b43_wldev *dev)
-{
- b43_radio_write16(dev, 0x0004, 0x00C0);
- b43_radio_write16(dev, 0x0005, 0x0008);
- b43_radio_write16(dev, 0x0009, 0x0040);
- b43_radio_write16(dev, 0x0005, 0x00AA);
- b43_radio_write16(dev, 0x0032, 0x008F);
- b43_radio_write16(dev, 0x0006, 0x008F);
- b43_radio_write16(dev, 0x0034, 0x008F);
- b43_radio_write16(dev, 0x002C, 0x0007);
- b43_radio_write16(dev, 0x0082, 0x0080);
- b43_radio_write16(dev, 0x0080, 0x0000);
- b43_radio_write16(dev, 0x003F, 0x00DA);
- b43_radio_mask(dev, 0x0005, ~0x0008);
- b43_radio_mask(dev, 0x0081, ~0x0010);
- b43_radio_mask(dev, 0x0081, ~0x0020);
- b43_radio_mask(dev, 0x0081, ~0x0020);
- msleep(1); /* delay 400usec */
-
- b43_radio_maskset(dev, 0x0081, ~0x0020, 0x0010);
- msleep(1); /* delay 400usec */
-
- b43_radio_maskset(dev, 0x0005, ~0x0008, 0x0008);
- b43_radio_mask(dev, 0x0085, ~0x0010);
- b43_radio_mask(dev, 0x0005, ~0x0008);
- b43_radio_mask(dev, 0x0081, ~0x0040);
- b43_radio_maskset(dev, 0x0081, ~0x0040, 0x0040);
- b43_radio_write16(dev, 0x0005,
- (b43_radio_read16(dev, 0x0081) & ~0x0008) | 0x0008);
- b43_phy_write(dev, 0x0063, 0xDDC6);
- b43_phy_write(dev, 0x0069, 0x07BE);
- b43_phy_write(dev, 0x006A, 0x0000);
-
- aphy_channel_switch(dev, dev->phy.ops->get_default_chan(dev));
-
- msleep(1);
-}
-
-static void b43_phy_rssiagc(struct b43_wldev *dev, u8 enable)
-{
- int i;
-
- if (dev->phy.rev < 3) {
- if (enable)
- for (i = 0; i < B43_TAB_RSSIAGC1_SIZE; i++) {
- b43_ofdmtab_write16(dev,
- B43_OFDMTAB_LNAHPFGAIN1, i, 0xFFF8);
- b43_ofdmtab_write16(dev,
- B43_OFDMTAB_WRSSI, i, 0xFFF8);
- }
- else
- for (i = 0; i < B43_TAB_RSSIAGC1_SIZE; i++) {
- b43_ofdmtab_write16(dev,
- B43_OFDMTAB_LNAHPFGAIN1, i, b43_tab_rssiagc1[i]);
- b43_ofdmtab_write16(dev,
- B43_OFDMTAB_WRSSI, i, b43_tab_rssiagc1[i]);
- }
- } else {
- if (enable)
- for (i = 0; i < B43_TAB_RSSIAGC1_SIZE; i++)
- b43_ofdmtab_write16(dev,
- B43_OFDMTAB_WRSSI, i, 0x0820);
- else
- for (i = 0; i < B43_TAB_RSSIAGC2_SIZE; i++)
- b43_ofdmtab_write16(dev,
- B43_OFDMTAB_WRSSI, i, b43_tab_rssiagc2[i]);
- }
-}
-
-static void b43_phy_ww(struct b43_wldev *dev)
-{
- u16 b, curr_s, best_s = 0xFFFF;
- int i;
-
- b43_phy_mask(dev, B43_PHY_CRS0, ~B43_PHY_CRS0_EN);
- b43_phy_set(dev, B43_PHY_OFDM(0x1B), 0x1000);
- b43_phy_maskset(dev, B43_PHY_OFDM(0x82), 0xF0FF, 0x0300);
- b43_radio_set(dev, 0x0009, 0x0080);
- b43_radio_maskset(dev, 0x0012, 0xFFFC, 0x0002);
- b43_wa_initgains(dev);
- b43_phy_write(dev, B43_PHY_OFDM(0xBA), 0x3ED5);
- b = b43_phy_read(dev, B43_PHY_PWRDOWN);
- b43_phy_write(dev, B43_PHY_PWRDOWN, (b & 0xFFF8) | 0x0005);
- b43_radio_set(dev, 0x0004, 0x0004);
- for (i = 0x10; i <= 0x20; i++) {
- b43_radio_write16(dev, 0x0013, i);
- curr_s = b43_phy_read(dev, B43_PHY_OTABLEQ) & 0x00FF;
- if (!curr_s) {
- best_s = 0x0000;
- break;
- } else if (curr_s >= 0x0080)
- curr_s = 0x0100 - curr_s;
- if (curr_s < best_s)
- best_s = curr_s;
- }
- b43_phy_write(dev, B43_PHY_PWRDOWN, b);
- b43_radio_mask(dev, 0x0004, 0xFFFB);
- b43_radio_write16(dev, 0x0013, best_s);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1_R1, 0, 0xFFEC);
- b43_phy_write(dev, B43_PHY_OFDM(0xB7), 0x1E80);
- b43_phy_write(dev, B43_PHY_OFDM(0xB6), 0x1C00);
- b43_phy_write(dev, B43_PHY_OFDM(0xB5), 0x0EC0);
- b43_phy_write(dev, B43_PHY_OFDM(0xB2), 0x00C0);
- b43_phy_write(dev, B43_PHY_OFDM(0xB9), 0x1FFF);
- b43_phy_maskset(dev, B43_PHY_OFDM(0xBB), 0xF000, 0x0053);
- b43_phy_maskset(dev, B43_PHY_OFDM61, 0xFE1F, 0x0120);
- b43_phy_maskset(dev, B43_PHY_OFDM(0x13), 0x0FFF, 0x3000);
- b43_phy_maskset(dev, B43_PHY_OFDM(0x14), 0x0FFF, 0x3000);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 6, 0x0017);
- for (i = 0; i < 6; i++)
- b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, i, 0x000F);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 0x0D, 0x000E);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 0x0E, 0x0011);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 0x0F, 0x0013);
- b43_phy_write(dev, B43_PHY_OFDM(0x33), 0x5030);
- b43_phy_set(dev, B43_PHY_CRS0, B43_PHY_CRS0_EN);
-}
-
-static void hardware_pctl_init_aphy(struct b43_wldev *dev)
-{
- //TODO
-}
-
-void b43_phy_inita(struct b43_wldev *dev)
-{
- struct b43_phy *phy = &dev->phy;
-
- /* This lowlevel A-PHY init is also called from G-PHY init.
- * So we must not access phy->a, if called from G-PHY code.
- */
- B43_WARN_ON((phy->type != B43_PHYTYPE_A) &&
- (phy->type != B43_PHYTYPE_G));
-
- might_sleep();
-
- if (phy->rev >= 6) {
- if (phy->type == B43_PHYTYPE_A)
- b43_phy_mask(dev, B43_PHY_OFDM(0x1B), ~0x1000);
- if (b43_phy_read(dev, B43_PHY_ENCORE) & B43_PHY_ENCORE_EN)
- b43_phy_set(dev, B43_PHY_ENCORE, 0x0010);
- else
- b43_phy_mask(dev, B43_PHY_ENCORE, ~0x1010);
- }
-
- b43_wa_all(dev);
-
- if (phy->type == B43_PHYTYPE_A) {
- if (phy->gmode && (phy->rev < 3))
- b43_phy_set(dev, 0x0034, 0x0001);
- b43_phy_rssiagc(dev, 0);
-
- b43_phy_set(dev, B43_PHY_CRS0, B43_PHY_CRS0_EN);
-
- b43_radio_init2060(dev);
-
- if ((dev->dev->board_vendor == SSB_BOARDVENDOR_BCM) &&
- ((dev->dev->board_type == SSB_BOARD_BU4306) ||
- (dev->dev->board_type == SSB_BOARD_BU4309))) {
- ; //TODO: A PHY LO
- }
-
- if (phy->rev >= 3)
- b43_phy_ww(dev);
-
- hardware_pctl_init_aphy(dev);
-
- //TODO: radar detection
- }
-
- if ((phy->type == B43_PHYTYPE_G) &&
- (dev->dev->bus_sprom->boardflags_lo & B43_BFL_PACTRL)) {
- b43_phy_maskset(dev, B43_PHY_OFDM(0x6E), 0xE000, 0x3CF);
- }
-}
-
-/* Initialise the TSSI->dBm lookup table */
-static int b43_aphy_init_tssi2dbm_table(struct b43_wldev *dev)
-{
- struct b43_phy *phy = &dev->phy;
- struct b43_phy_a *aphy = phy->a;
- s16 pab0, pab1, pab2;
-
- pab0 = (s16) (dev->dev->bus_sprom->pa1b0);
- pab1 = (s16) (dev->dev->bus_sprom->pa1b1);
- pab2 = (s16) (dev->dev->bus_sprom->pa1b2);
-
- if (pab0 != 0 && pab1 != 0 && pab2 != 0 &&
- pab0 != -1 && pab1 != -1 && pab2 != -1) {
- /* The pabX values are set in SPROM. Use them. */
- if ((s8) dev->dev->bus_sprom->itssi_a != 0 &&
- (s8) dev->dev->bus_sprom->itssi_a != -1)
- aphy->tgt_idle_tssi =
- (s8) (dev->dev->bus_sprom->itssi_a);
- else
- aphy->tgt_idle_tssi = 62;
- aphy->tssi2dbm = b43_generate_dyn_tssi2dbm_tab(dev, pab0,
- pab1, pab2);
- if (!aphy->tssi2dbm)
- return -ENOMEM;
- } else {
- /* pabX values not set in SPROM,
- * but APHY needs a generated table. */
- aphy->tssi2dbm = NULL;
- b43err(dev->wl, "Could not generate tssi2dBm "
- "table (wrong SPROM info)!\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
-static int b43_aphy_op_allocate(struct b43_wldev *dev)
-{
- struct b43_phy_a *aphy;
- int err;
-
- aphy = kzalloc(sizeof(*aphy), GFP_KERNEL);
- if (!aphy)
- return -ENOMEM;
- dev->phy.a = aphy;
-
- err = b43_aphy_init_tssi2dbm_table(dev);
- if (err)
- goto err_free_aphy;
-
- return 0;
-
-err_free_aphy:
- kfree(aphy);
- dev->phy.a = NULL;
-
- return err;
-}
-
-static void b43_aphy_op_prepare_structs(struct b43_wldev *dev)
-{
- struct b43_phy *phy = &dev->phy;
- struct b43_phy_a *aphy = phy->a;
- const void *tssi2dbm;
- int tgt_idle_tssi;
-
- /* tssi2dbm table is constant, so it is initialized at alloc time.
- * Save a copy of the pointer. */
- tssi2dbm = aphy->tssi2dbm;
- tgt_idle_tssi = aphy->tgt_idle_tssi;
-
- /* Zero out the whole PHY structure. */
- memset(aphy, 0, sizeof(*aphy));
-
- aphy->tssi2dbm = tssi2dbm;
- aphy->tgt_idle_tssi = tgt_idle_tssi;
-
- //TODO init struct b43_phy_a
-
-}
-
-static void b43_aphy_op_free(struct b43_wldev *dev)
-{
- struct b43_phy *phy = &dev->phy;
- struct b43_phy_a *aphy = phy->a;
-
- kfree(aphy->tssi2dbm);
- aphy->tssi2dbm = NULL;
-
- kfree(aphy);
- dev->phy.a = NULL;
-}
-
-static int b43_aphy_op_init(struct b43_wldev *dev)
-{
- b43_phy_inita(dev);
-
- return 0;
-}
-
-static inline u16 adjust_phyreg(struct b43_wldev *dev, u16 offset)
-{
- /* OFDM registers are base-registers for the A-PHY. */
- if ((offset & B43_PHYROUTE) == B43_PHYROUTE_OFDM_GPHY) {
- offset &= ~B43_PHYROUTE;
- offset |= B43_PHYROUTE_BASE;
- }
-
-#if B43_DEBUG
- if ((offset & B43_PHYROUTE) == B43_PHYROUTE_EXT_GPHY) {
- /* Ext-G registers are only available on G-PHYs */
- b43err(dev->wl, "Invalid EXT-G PHY access at "
- "0x%04X on A-PHY\n", offset);
- dump_stack();
- }
- if ((offset & B43_PHYROUTE) == B43_PHYROUTE_N_BMODE) {
- /* N-BMODE registers are only available on N-PHYs */
- b43err(dev->wl, "Invalid N-BMODE PHY access at "
- "0x%04X on A-PHY\n", offset);
- dump_stack();
- }
-#endif /* B43_DEBUG */
-
- return offset;
-}
-
-static u16 b43_aphy_op_read(struct b43_wldev *dev, u16 reg)
-{
- reg = adjust_phyreg(dev, reg);
- b43_write16f(dev, B43_MMIO_PHY_CONTROL, reg);
- return b43_read16(dev, B43_MMIO_PHY_DATA);
-}
-
-static void b43_aphy_op_write(struct b43_wldev *dev, u16 reg, u16 value)
-{
- reg = adjust_phyreg(dev, reg);
- b43_write16f(dev, B43_MMIO_PHY_CONTROL, reg);
- b43_write16(dev, B43_MMIO_PHY_DATA, value);
-}
-
-static u16 b43_aphy_op_radio_read(struct b43_wldev *dev, u16 reg)
-{
- /* Register 1 is a 32-bit register. */
- B43_WARN_ON(reg == 1);
- /* A-PHY needs 0x40 for read access */
- reg |= 0x40;
-
- b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg);
- return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW);
-}
-
-static void b43_aphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
-{
- /* Register 1 is a 32-bit register. */
- B43_WARN_ON(reg == 1);
-
- b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg);
- b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value);
-}
-
-static bool b43_aphy_op_supports_hwpctl(struct b43_wldev *dev)
-{
- return (dev->phy.rev >= 5);
-}
-
-static void b43_aphy_op_software_rfkill(struct b43_wldev *dev,
- bool blocked)
-{
- struct b43_phy *phy = &dev->phy;
-
- if (!blocked) {
- if (phy->radio_on)
- return;
- b43_radio_write16(dev, 0x0004, 0x00C0);
- b43_radio_write16(dev, 0x0005, 0x0008);
- b43_phy_mask(dev, 0x0010, 0xFFF7);
- b43_phy_mask(dev, 0x0011, 0xFFF7);
- b43_radio_init2060(dev);
- } else {
- b43_radio_write16(dev, 0x0004, 0x00FF);
- b43_radio_write16(dev, 0x0005, 0x00FB);
- b43_phy_set(dev, 0x0010, 0x0008);
- b43_phy_set(dev, 0x0011, 0x0008);
- }
-}
-
-static int b43_aphy_op_switch_channel(struct b43_wldev *dev,
- unsigned int new_channel)
-{
- if (new_channel > 200)
- return -EINVAL;
- aphy_channel_switch(dev, new_channel);
-
- return 0;
-}
-
-static unsigned int b43_aphy_op_get_default_chan(struct b43_wldev *dev)
-{
- return 36; /* Default to channel 36 */
-}
-
-static void b43_aphy_op_set_rx_antenna(struct b43_wldev *dev, int antenna)
-{//TODO
- struct b43_phy *phy = &dev->phy;
- u16 tmp;
- int autodiv = 0;
-
- if (antenna == B43_ANTENNA_AUTO0 || antenna == B43_ANTENNA_AUTO1)
- autodiv = 1;
-
- b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_ANTDIVHELP);
-
- b43_phy_maskset(dev, B43_PHY_BBANDCFG, ~B43_PHY_BBANDCFG_RXANT,
- (autodiv ? B43_ANTENNA_AUTO1 : antenna) <<
- B43_PHY_BBANDCFG_RXANT_SHIFT);
-
- if (autodiv) {
- tmp = b43_phy_read(dev, B43_PHY_ANTDWELL);
- if (antenna == B43_ANTENNA_AUTO1)
- tmp &= ~B43_PHY_ANTDWELL_AUTODIV1;
- else
- tmp |= B43_PHY_ANTDWELL_AUTODIV1;
- b43_phy_write(dev, B43_PHY_ANTDWELL, tmp);
- }
- if (phy->rev < 3)
- b43_phy_maskset(dev, B43_PHY_ANTDWELL, 0xFF00, 0x24);
- else {
- b43_phy_set(dev, B43_PHY_OFDM61, 0x10);
- if (phy->rev == 3) {
- b43_phy_write(dev, B43_PHY_CLIPPWRDOWNT, 0x1D);
- b43_phy_write(dev, B43_PHY_ADIVRELATED, 8);
- } else {
- b43_phy_write(dev, B43_PHY_CLIPPWRDOWNT, 0x3A);
- b43_phy_maskset(dev, B43_PHY_ADIVRELATED, 0xFF00, 8);
- }
- }
-
- b43_hf_write(dev, b43_hf_read(dev) | B43_HF_ANTDIVHELP);
-}
-
-static void b43_aphy_op_adjust_txpower(struct b43_wldev *dev)
-{//TODO
-}
-
-static enum b43_txpwr_result b43_aphy_op_recalc_txpower(struct b43_wldev *dev,
- bool ignore_tssi)
-{//TODO
- return B43_TXPWR_RES_DONE;
-}
-
-static void b43_aphy_op_pwork_15sec(struct b43_wldev *dev)
-{//TODO
-}
-
-static void b43_aphy_op_pwork_60sec(struct b43_wldev *dev)
-{//TODO
-}
-
-static const struct b43_phy_operations b43_phyops_a = {
- .allocate = b43_aphy_op_allocate,
- .free = b43_aphy_op_free,
- .prepare_structs = b43_aphy_op_prepare_structs,
- .init = b43_aphy_op_init,
- .phy_read = b43_aphy_op_read,
- .phy_write = b43_aphy_op_write,
- .radio_read = b43_aphy_op_radio_read,
- .radio_write = b43_aphy_op_radio_write,
- .supports_hwpctl = b43_aphy_op_supports_hwpctl,
- .software_rfkill = b43_aphy_op_software_rfkill,
- .switch_analog = b43_phyop_switch_analog_generic,
- .switch_channel = b43_aphy_op_switch_channel,
- .get_default_chan = b43_aphy_op_get_default_chan,
- .set_rx_antenna = b43_aphy_op_set_rx_antenna,
- .recalc_txpower = b43_aphy_op_recalc_txpower,
- .adjust_txpower = b43_aphy_op_adjust_txpower,
- .pwork_15sec = b43_aphy_op_pwork_15sec,
- .pwork_60sec = b43_aphy_op_pwork_60sec,
-};
diff --git a/drivers/net/wireless/broadcom/b43/phy_a.h b/drivers/net/wireless/broadcom/b43/phy_a.h
index f7d0d929a374..0a92d01c21f9 100644
--- a/drivers/net/wireless/broadcom/b43/phy_a.h
+++ b/drivers/net/wireless/broadcom/b43/phy_a.h
@@ -101,26 +101,4 @@ u32 b43_ofdmtab_read32(struct b43_wldev *dev, u16 table, u16 offset);
void b43_ofdmtab_write32(struct b43_wldev *dev, u16 table,
u16 offset, u32 value);
-
-struct b43_phy_a {
- /* Pointer to the table used to convert a
- * TSSI value to dBm-Q5.2 */
- const s8 *tssi2dbm;
- /* Target idle TSSI */
- int tgt_idle_tssi;
- /* Current idle TSSI */
- int cur_idle_tssi;//FIXME value currently not set
-
- /* A-PHY TX Power control value. */
- u16 txpwr_offset;
-
- //TODO lots of missing stuff
-};
-
-/**
- * b43_phy_inita - Lowlevel A-PHY init routine.
- * This is _only_ used by the G-PHY code.
- */
-void b43_phy_inita(struct b43_wldev *dev);
-
#endif /* LINUX_B43_PHY_A_H_ */
diff --git a/drivers/net/wireless/broadcom/b43/phy_common.h b/drivers/net/wireless/broadcom/b43/phy_common.h
index 78d86526799e..ced054a9850c 100644
--- a/drivers/net/wireless/broadcom/b43/phy_common.h
+++ b/drivers/net/wireless/broadcom/b43/phy_common.h
@@ -190,7 +190,6 @@ struct b43_phy_operations {
void (*pwork_60sec)(struct b43_wldev *dev);
};
-struct b43_phy_a;
struct b43_phy_g;
struct b43_phy_n;
struct b43_phy_lp;
@@ -210,8 +209,6 @@ struct b43_phy {
#else
union {
#endif
- /* A-PHY specific information */
- struct b43_phy_a *a;
/* G-PHY specific information */
struct b43_phy_g *g;
/* N-PHY specific information */
diff --git a/drivers/net/wireless/broadcom/b43/phy_g.c b/drivers/net/wireless/broadcom/b43/phy_g.c
index 462310e6e88f..822dcaa8ace6 100644
--- a/drivers/net/wireless/broadcom/b43/phy_g.c
+++ b/drivers/net/wireless/broadcom/b43/phy_g.c
@@ -31,6 +31,7 @@
#include "phy_common.h"
#include "lo.h"
#include "main.h"
+#include "wa.h"
#include <linux/bitrev.h>
#include <linux/slab.h>
@@ -1987,6 +1988,25 @@ static void b43_phy_init_pctl(struct b43_wldev *dev)
b43_shm_clear_tssi(dev);
}
+static void b43_phy_inita(struct b43_wldev *dev)
+{
+ struct b43_phy *phy = &dev->phy;
+
+ might_sleep();
+
+ if (phy->rev >= 6) {
+ if (b43_phy_read(dev, B43_PHY_ENCORE) & B43_PHY_ENCORE_EN)
+ b43_phy_set(dev, B43_PHY_ENCORE, 0x0010);
+ else
+ b43_phy_mask(dev, B43_PHY_ENCORE, ~0x1010);
+ }
+
+ b43_wa_all(dev);
+
+ if (dev->dev->bus_sprom->boardflags_lo & B43_BFL_PACTRL)
+ b43_phy_maskset(dev, B43_PHY_OFDM(0x6E), 0xE000, 0x3CF);
+}
+
static void b43_phy_initg(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
@@ -2150,11 +2170,6 @@ static void default_radio_attenuation(struct b43_wldev *dev,
}
}
- if (phy->type == B43_PHYTYPE_A) {
- rf->att = 0x60;
- return;
- }
-
switch (phy->radio_ver) {
case 0x2053:
switch (phy->radio_rev) {
diff --git a/drivers/net/wireless/broadcom/b43/wa.c b/drivers/net/wireless/broadcom/b43/wa.c
index c218c08fb2f5..0e96c08d1e17 100644
--- a/drivers/net/wireless/broadcom/b43/wa.c
+++ b/drivers/net/wireless/broadcom/b43/wa.c
@@ -30,33 +30,6 @@
#include "phy_common.h"
#include "wa.h"
-static void b43_wa_papd(struct b43_wldev *dev)
-{
- u16 backup;
-
- backup = b43_ofdmtab_read16(dev, B43_OFDMTAB_PWRDYN2, 0);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_PWRDYN2, 0, 7);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_UNKNOWN_APHY, 0, 0);
- b43_dummy_transmission(dev, true, true);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_PWRDYN2, 0, backup);
-}
-
-static void b43_wa_auxclipthr(struct b43_wldev *dev)
-{
- b43_phy_write(dev, B43_PHY_OFDM(0x8E), 0x3800);
-}
-
-static void b43_wa_afcdac(struct b43_wldev *dev)
-{
- b43_phy_write(dev, 0x0035, 0x03FF);
- b43_phy_write(dev, 0x0036, 0x0400);
-}
-
-static void b43_wa_txdc_offset(struct b43_wldev *dev)
-{
- b43_ofdmtab_write16(dev, B43_OFDMTAB_DC, 0, 0x0051);
-}
-
void b43_wa_initgains(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
@@ -81,41 +54,6 @@ void b43_wa_initgains(struct b43_wldev *dev)
b43_phy_write(dev, 0x00BA, 0x3ED5);
}
-static void b43_wa_divider(struct b43_wldev *dev)
-{
- b43_phy_mask(dev, 0x002B, ~0x0100);
- b43_phy_write(dev, 0x008E, 0x58C1);
-}
-
-static void b43_wa_gt(struct b43_wldev *dev) /* Gain table. */
-{
- if (dev->phy.rev <= 2) {
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 0, 15);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 1, 31);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 2, 42);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 3, 48);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 4, 58);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 0, 19);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 1, 19);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 2, 19);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 3, 19);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 4, 21);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 5, 21);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 6, 25);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN1, 0, 3);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN1, 1, 3);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN1, 2, 7);
- } else {
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 0, 19);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 1, 19);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 2, 19);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 3, 19);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 4, 21);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 5, 21);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 6, 25);
- }
-}
-
static void b43_wa_rssi_lt(struct b43_wldev *dev) /* RSSI lookup table */
{
int i;
@@ -133,15 +71,11 @@ static void b43_wa_rssi_lt(struct b43_wldev *dev) /* RSSI lookup table */
static void b43_wa_analog(struct b43_wldev *dev)
{
- struct b43_phy *phy = &dev->phy;
u16 ofdmrev;
ofdmrev = b43_phy_read(dev, B43_PHY_VERSION_OFDM) & B43_PHYVER_VERSION;
if (ofdmrev > 2) {
- if (phy->type == B43_PHYTYPE_A)
- b43_phy_write(dev, B43_PHY_PWRDOWN, 0x1808);
- else
- b43_phy_write(dev, B43_PHY_PWRDOWN, 0x1000);
+ b43_phy_write(dev, B43_PHY_PWRDOWN, 0x1000);
} else {
b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 3, 0x1044);
b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 4, 0x7201);
@@ -149,26 +83,13 @@ static void b43_wa_analog(struct b43_wldev *dev)
}
}
-static void b43_wa_dac(struct b43_wldev *dev)
-{
- if (dev->phy.analog == 1)
- b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 1,
- (b43_ofdmtab_read16(dev, B43_OFDMTAB_DAC, 1) & ~0x0034) | 0x0008);
- else
- b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 1,
- (b43_ofdmtab_read16(dev, B43_OFDMTAB_DAC, 1) & ~0x0078) | 0x0010);
-}
-
static void b43_wa_fft(struct b43_wldev *dev) /* Fine frequency table */
{
int i;
- if (dev->phy.type == B43_PHYTYPE_A)
- for (i = 0; i < B43_TAB_FINEFREQA_SIZE; i++)
- b43_ofdmtab_write16(dev, B43_OFDMTAB_DACRFPABB, i, b43_tab_finefreqa[i]);
- else
- for (i = 0; i < B43_TAB_FINEFREQG_SIZE; i++)
- b43_ofdmtab_write16(dev, B43_OFDMTAB_DACRFPABB, i, b43_tab_finefreqg[i]);
+ for (i = 0; i < B43_TAB_FINEFREQG_SIZE; i++)
+ b43_ofdmtab_write16(dev, B43_OFDMTAB_DACRFPABB, i,
+ b43_tab_finefreqg[i]);
}
static void b43_wa_nft(struct b43_wldev *dev) /* Noise figure table */
@@ -176,21 +97,14 @@ static void b43_wa_nft(struct b43_wldev *dev) /* Noise figure table */
struct b43_phy *phy = &dev->phy;
int i;
- if (phy->type == B43_PHYTYPE_A) {
- if (phy->rev == 2)
- for (i = 0; i < B43_TAB_NOISEA2_SIZE; i++)
- b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i, b43_tab_noisea2[i]);
- else
- for (i = 0; i < B43_TAB_NOISEA3_SIZE; i++)
- b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i, b43_tab_noisea3[i]);
- } else {
- if (phy->rev == 1)
- for (i = 0; i < B43_TAB_NOISEG1_SIZE; i++)
- b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i, b43_tab_noiseg1[i]);
- else
- for (i = 0; i < B43_TAB_NOISEG2_SIZE; i++)
- b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i, b43_tab_noiseg2[i]);
- }
+ if (phy->rev == 1)
+ for (i = 0; i < B43_TAB_NOISEG1_SIZE; i++)
+ b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i,
+ b43_tab_noiseg1[i]);
+ else
+ for (i = 0; i < B43_TAB_NOISEG2_SIZE; i++)
+ b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i,
+ b43_tab_noiseg2[i]);
}
static void b43_wa_rt(struct b43_wldev *dev) /* Rotor table */
@@ -201,14 +115,6 @@ static void b43_wa_rt(struct b43_wldev *dev) /* Rotor table */
b43_ofdmtab_write32(dev, B43_OFDMTAB_ROTOR, i, b43_tab_rotor[i]);
}
-static void b43_write_null_nst(struct b43_wldev *dev)
-{
- int i;
-
- for (i = 0; i < B43_TAB_NOISESCALE_SIZE; i++)
- b43_ofdmtab_write16(dev, B43_OFDMTAB_NOISESCALE, i, 0);
-}
-
static void b43_write_nst(struct b43_wldev *dev, const u16 *nst)
{
int i;
@@ -221,24 +127,13 @@ static void b43_wa_nst(struct b43_wldev *dev) /* Noise scale table */
{
struct b43_phy *phy = &dev->phy;
- if (phy->type == B43_PHYTYPE_A) {
- if (phy->rev <= 1)
- b43_write_null_nst(dev);
- else if (phy->rev == 2)
- b43_write_nst(dev, b43_tab_noisescalea2);
- else if (phy->rev == 3)
- b43_write_nst(dev, b43_tab_noisescalea3);
- else
+ if (phy->rev >= 6) {
+ if (b43_phy_read(dev, B43_PHY_ENCORE) & B43_PHY_ENCORE_EN)
b43_write_nst(dev, b43_tab_noisescaleg3);
+ else
+ b43_write_nst(dev, b43_tab_noisescaleg2);
} else {
- if (phy->rev >= 6) {
- if (b43_phy_read(dev, B43_PHY_ENCORE) & B43_PHY_ENCORE_EN)
- b43_write_nst(dev, b43_tab_noisescaleg3);
- else
- b43_write_nst(dev, b43_tab_noisescaleg2);
- } else {
- b43_write_nst(dev, b43_tab_noisescaleg1);
- }
+ b43_write_nst(dev, b43_tab_noisescaleg1);
}
}
@@ -251,41 +146,13 @@ static void b43_wa_art(struct b43_wldev *dev) /* ADV retard table */
i, b43_tab_retard[i]);
}
-static void b43_wa_txlna_gain(struct b43_wldev *dev)
-{
- b43_ofdmtab_write16(dev, B43_OFDMTAB_DC, 13, 0x0000);
-}
-
-static void b43_wa_crs_reset(struct b43_wldev *dev)
-{
- b43_phy_write(dev, 0x002C, 0x0064);
-}
-
-static void b43_wa_2060txlna_gain(struct b43_wldev *dev)
-{
- b43_hf_write(dev, b43_hf_read(dev) |
- B43_HF_2060W);
-}
-
-static void b43_wa_lms(struct b43_wldev *dev)
-{
- b43_phy_maskset(dev, 0x0055, 0xFFC0, 0x0004);
-}
-
-static void b43_wa_mixedsignal(struct b43_wldev *dev)
-{
- b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 1, 3);
-}
-
static void b43_wa_msst(struct b43_wldev *dev) /* Min sigma square table */
{
struct b43_phy *phy = &dev->phy;
int i;
const u16 *tab;
- if (phy->type == B43_PHYTYPE_A) {
- tab = b43_tab_sigmasqr1;
- } else if (phy->type == B43_PHYTYPE_G) {
+ if (phy->type == B43_PHYTYPE_G) {
tab = b43_tab_sigmasqr2;
} else {
B43_WARN_ON(1);
@@ -298,13 +165,6 @@ static void b43_wa_msst(struct b43_wldev *dev) /* Min sigma square table */
}
}
-static void b43_wa_iqadc(struct b43_wldev *dev)
-{
- if (dev->phy.analog == 4)
- b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 0,
- b43_ofdmtab_read16(dev, B43_OFDMTAB_DAC, 0) & ~0xF000);
-}
-
static void b43_wa_crs_ed(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
@@ -450,38 +310,6 @@ static void b43_wa_cpll_nonpilot(struct b43_wldev *dev)
b43_ofdmtab_write16(dev, B43_OFDMTAB_UNKNOWN_11, 1, 0);
}
-static void b43_wa_rssi_adc(struct b43_wldev *dev)
-{
- if (dev->phy.analog == 4)
- b43_phy_write(dev, 0x00DC, 0x7454);
-}
-
-static void b43_wa_boards_a(struct b43_wldev *dev)
-{
- if (dev->dev->board_vendor == SSB_BOARDVENDOR_BCM &&
- dev->dev->board_type == SSB_BOARD_BU4306 &&
- dev->dev->board_rev < 0x30) {
- b43_phy_write(dev, 0x0010, 0xE000);
- b43_phy_write(dev, 0x0013, 0x0140);
- b43_phy_write(dev, 0x0014, 0x0280);
- } else {
- if (dev->dev->board_type == SSB_BOARD_MP4318 &&
- dev->dev->board_rev < 0x20) {
- b43_phy_write(dev, 0x0013, 0x0210);
- b43_phy_write(dev, 0x0014, 0x0840);
- } else {
- b43_phy_write(dev, 0x0013, 0x0140);
- b43_phy_write(dev, 0x0014, 0x0280);
- }
- if (dev->phy.rev <= 4)
- b43_phy_write(dev, 0x0010, 0xE000);
- else
- b43_phy_write(dev, 0x0010, 0x2000);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_DC, 1, 0x0039);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_UNKNOWN_APHY, 7, 0x0040);
- }
-}
-
static void b43_wa_boards_g(struct b43_wldev *dev)
{
struct ssb_sprom *sprom = dev->dev->bus_sprom;
@@ -518,80 +346,7 @@ void b43_wa_all(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
- if (phy->type == B43_PHYTYPE_A) {
- switch (phy->rev) {
- case 2:
- b43_wa_papd(dev);
- b43_wa_auxclipthr(dev);
- b43_wa_afcdac(dev);
- b43_wa_txdc_offset(dev);
- b43_wa_initgains(dev);
- b43_wa_divider(dev);
- b43_wa_gt(dev);
- b43_wa_rssi_lt(dev);
- b43_wa_analog(dev);
- b43_wa_dac(dev);
- b43_wa_fft(dev);
- b43_wa_nft(dev);
- b43_wa_rt(dev);
- b43_wa_nst(dev);
- b43_wa_art(dev);
- b43_wa_txlna_gain(dev);
- b43_wa_crs_reset(dev);
- b43_wa_2060txlna_gain(dev);
- b43_wa_lms(dev);
- break;
- case 3:
- b43_wa_papd(dev);
- b43_wa_mixedsignal(dev);
- b43_wa_rssi_lt(dev);
- b43_wa_txdc_offset(dev);
- b43_wa_initgains(dev);
- b43_wa_dac(dev);
- b43_wa_nft(dev);
- b43_wa_nst(dev);
- b43_wa_msst(dev);
- b43_wa_analog(dev);
- b43_wa_gt(dev);
- b43_wa_txpuoff_rxpuon(dev);
- b43_wa_txlna_gain(dev);
- break;
- case 5:
- b43_wa_iqadc(dev);
- case 6:
- b43_wa_papd(dev);
- b43_wa_rssi_lt(dev);
- b43_wa_txdc_offset(dev);
- b43_wa_initgains(dev);
- b43_wa_dac(dev);
- b43_wa_nft(dev);
- b43_wa_nst(dev);
- b43_wa_msst(dev);
- b43_wa_analog(dev);
- b43_wa_gt(dev);
- b43_wa_txpuoff_rxpuon(dev);
- b43_wa_txlna_gain(dev);
- break;
- case 7:
- b43_wa_iqadc(dev);
- b43_wa_papd(dev);
- b43_wa_rssi_lt(dev);
- b43_wa_txdc_offset(dev);
- b43_wa_initgains(dev);
- b43_wa_dac(dev);
- b43_wa_nft(dev);
- b43_wa_nst(dev);
- b43_wa_msst(dev);
- b43_wa_analog(dev);
- b43_wa_gt(dev);
- b43_wa_txpuoff_rxpuon(dev);
- b43_wa_txlna_gain(dev);
- b43_wa_rssi_adc(dev);
- default:
- B43_WARN_ON(1);
- }
- b43_wa_boards_a(dev);
- } else if (phy->type == B43_PHYTYPE_G) {
+ if (phy->type == B43_PHYTYPE_G) {
switch (phy->rev) {
case 1://XXX review rev1
b43_wa_crs_ed(dev);
diff --git a/drivers/net/wireless/broadcom/b43/xmit.c b/drivers/net/wireless/broadcom/b43/xmit.c
index f6201264de49..b068d5aeee24 100644
--- a/drivers/net/wireless/broadcom/b43/xmit.c
+++ b/drivers/net/wireless/broadcom/b43/xmit.c
@@ -205,7 +205,7 @@ static u16 b43_generate_tx_phy_ctl1(struct b43_wldev *dev, u8 bitrate)
return control;
}
-static u8 b43_calc_fallback_rate(u8 bitrate)
+static u8 b43_calc_fallback_rate(u8 bitrate, int gmode)
{
switch (bitrate) {
case B43_CCK_RATE_1MB:
@@ -216,8 +216,15 @@ static u8 b43_calc_fallback_rate(u8 bitrate)
return B43_CCK_RATE_2MB;
case B43_CCK_RATE_11MB:
return B43_CCK_RATE_5MB;
+ /*
+ * Don't just fallback to CCK; it may be in 5GHz operation
+ * and falling back to CCK won't work out very well.
+ */
case B43_OFDM_RATE_6MB:
- return B43_CCK_RATE_5MB;
+ if (gmode)
+ return B43_CCK_RATE_5MB;
+ else
+ return B43_OFDM_RATE_6MB;
case B43_OFDM_RATE_9MB:
return B43_OFDM_RATE_6MB;
case B43_OFDM_RATE_12MB:
@@ -438,7 +445,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
rts_rate = rts_cts_rate ? rts_cts_rate->hw_value : B43_CCK_RATE_1MB;
rts_rate_ofdm = b43_is_ofdm_rate(rts_rate);
- rts_rate_fb = b43_calc_fallback_rate(rts_rate);
+ rts_rate_fb = b43_calc_fallback_rate(rts_rate, phy->gmode);
rts_rate_fb_ofdm = b43_is_ofdm_rate(rts_rate_fb);
if (rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
@@ -642,11 +649,7 @@ static s8 b43_rssinoise_postprocess(struct b43_wldev *dev, u8 in_rssi)
struct b43_phy *phy = &dev->phy;
s8 ret;
- if (phy->type == B43_PHYTYPE_A) {
- //TODO: Incomplete specs.
- ret = 0;
- } else
- ret = b43_rssi_postprocess(dev, in_rssi, 0, 1, 1);
+ ret = b43_rssi_postprocess(dev, in_rssi, 0, 1, 1);
return ret;
}
@@ -663,7 +666,6 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
u16 uninitialized_var(chanstat), uninitialized_var(mactime);
u32 uninitialized_var(macstat);
u16 chanid;
- u16 phytype;
int padding, rate_idx;
memset(&status, 0, sizeof(status));
@@ -684,7 +686,6 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
chanstat = le16_to_cpu(rxhdr->format_351.channel);
break;
}
- phytype = chanstat & B43_RX_CHAN_PHYTYPE;
if (unlikely(macstat & B43_RX_MAC_FCSERR)) {
dev->wl->ieee_stats.dot11FCSErrorCount++;
@@ -755,7 +756,6 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
else
status.signal = max(rxhdr->power0, rxhdr->power1);
break;
- case B43_PHYTYPE_A:
case B43_PHYTYPE_B:
case B43_PHYTYPE_G:
case B43_PHYTYPE_LP:
@@ -802,14 +802,6 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
chanid = (chanstat & B43_RX_CHAN_ID) >> B43_RX_CHAN_ID_SHIFT;
switch (chanstat & B43_RX_CHAN_PHYTYPE) {
- case B43_PHYTYPE_A:
- status.band = NL80211_BAND_5GHZ;
- B43_WARN_ON(1);
- /* FIXME: We don't really know which value the "chanid" contains.
- * So the following assignment might be wrong. */
- status.freq =
- ieee80211_channel_to_frequency(chanid, status.band);
- break;
case B43_PHYTYPE_G:
status.band = NL80211_BAND_2GHZ;
/* Somewhere between 478.104 and 508.1084 firmware for G-PHY
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index c7550dab6a23..c4b89d27e2e8 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -166,41 +166,45 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
sdio_claim_irq(sdiodev->func[1], brcmf_sdiod_ib_irqhandler);
sdio_claim_irq(sdiodev->func[2], brcmf_sdiod_dummy_irqhandler);
sdio_release_host(sdiodev->func[1]);
+ sdiodev->sd_irq_requested = true;
}
return 0;
}
-int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev)
+void brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev)
{
- struct brcmfmac_sdio_pd *pdata;
- brcmf_dbg(SDIO, "Entering\n");
+ brcmf_dbg(SDIO, "Entering oob=%d sd=%d\n",
+ sdiodev->oob_irq_requested,
+ sdiodev->sd_irq_requested);
- pdata = &sdiodev->settings->bus.sdio;
- if (pdata->oob_irq_supported) {
+ if (sdiodev->oob_irq_requested) {
+ struct brcmfmac_sdio_pd *pdata;
+
+ pdata = &sdiodev->settings->bus.sdio;
sdio_claim_host(sdiodev->func[1]);
brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
sdio_release_host(sdiodev->func[1]);
- if (sdiodev->oob_irq_requested) {
- sdiodev->oob_irq_requested = false;
- if (sdiodev->irq_wake) {
- disable_irq_wake(pdata->oob_irq_nr);
- sdiodev->irq_wake = false;
- }
- free_irq(pdata->oob_irq_nr, &sdiodev->func[1]->dev);
- sdiodev->irq_en = false;
+ sdiodev->oob_irq_requested = false;
+ if (sdiodev->irq_wake) {
+ disable_irq_wake(pdata->oob_irq_nr);
+ sdiodev->irq_wake = false;
}
- } else {
+ free_irq(pdata->oob_irq_nr, &sdiodev->func[1]->dev);
+ sdiodev->irq_en = false;
+ sdiodev->oob_irq_requested = false;
+ }
+
+ if (sdiodev->sd_irq_requested) {
sdio_claim_host(sdiodev->func[1]);
sdio_release_irq(sdiodev->func[2]);
sdio_release_irq(sdiodev->func[1]);
sdio_release_host(sdiodev->func[1]);
+ sdiodev->sd_irq_requested = false;
}
-
- return 0;
}
void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
@@ -1197,12 +1201,17 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func)
brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
brcmf_dbg(SDIO, "Function: %d\n", func->num);
- if (func->num != 1)
- return;
-
bus_if = dev_get_drvdata(&func->dev);
if (bus_if) {
sdiodev = bus_if->bus_priv.sdio;
+
+ /* start by unregistering irqs */
+ brcmf_sdiod_intr_unregister(sdiodev);
+
+ if (func->num != 1)
+ return;
+
+ /* only proceed with rest of cleanup if func 1 */
brcmf_sdiod_remove(sdiodev);
dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index d0631b6cfd53..264bd638a3d9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -541,6 +541,21 @@ brcmf_cfg80211_update_proto_addr_mode(struct wireless_dev *wdev)
ADDR_INDIRECT);
}
+static int brcmf_get_first_free_bsscfgidx(struct brcmf_pub *drvr)
+{
+ int bsscfgidx;
+
+ for (bsscfgidx = 0; bsscfgidx < BRCMF_MAX_IFS; bsscfgidx++) {
+ /* bsscfgidx 1 is reserved for legacy P2P */
+ if (bsscfgidx == 1)
+ continue;
+ if (!drvr->iflist[bsscfgidx])
+ return bsscfgidx;
+ }
+
+ return -ENOMEM;
+}
+
static int brcmf_cfg80211_request_ap_if(struct brcmf_if *ifp)
{
struct brcmf_mbss_ssid_le mbss_ssid_le;
@@ -548,7 +563,7 @@ static int brcmf_cfg80211_request_ap_if(struct brcmf_if *ifp)
int err;
memset(&mbss_ssid_le, 0, sizeof(mbss_ssid_le));
- bsscfgidx = brcmf_get_next_free_bsscfgidx(ifp->drvr);
+ bsscfgidx = brcmf_get_first_free_bsscfgidx(ifp->drvr);
if (bsscfgidx < 0)
return bsscfgidx;
@@ -586,7 +601,7 @@ struct wireless_dev *brcmf_ap_add_vif(struct wiphy *wiphy, const char *name,
brcmf_dbg(INFO, "Adding vif \"%s\"\n", name);
- vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_AP, false);
+ vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_AP);
if (IS_ERR(vif))
return (struct wireless_dev *)vif;
@@ -669,20 +684,24 @@ static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
return ERR_PTR(-EOPNOTSUPP);
case NL80211_IFTYPE_AP:
wdev = brcmf_ap_add_vif(wiphy, name, flags, params);
- if (!IS_ERR(wdev))
- brcmf_cfg80211_update_proto_addr_mode(wdev);
- return wdev;
+ break;
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_P2P_GO:
case NL80211_IFTYPE_P2P_DEVICE:
wdev = brcmf_p2p_add_vif(wiphy, name, name_assign_type, type, flags, params);
- if (!IS_ERR(wdev))
- brcmf_cfg80211_update_proto_addr_mode(wdev);
- return wdev;
+ break;
case NL80211_IFTYPE_UNSPECIFIED:
default:
return ERR_PTR(-EINVAL);
}
+
+ if (IS_ERR(wdev))
+ brcmf_err("add iface %s type %d failed: err=%d\n",
+ name, type, (int)PTR_ERR(wdev));
+ else
+ brcmf_cfg80211_update_proto_addr_mode(wdev);
+
+ return wdev;
}
static void brcmf_scan_config_mpc(struct brcmf_if *ifp, int mpc)
@@ -2540,12 +2559,14 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
const u8 *mac, struct station_info *sinfo)
{
struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_scb_val_le scb_val;
s32 err = 0;
struct brcmf_sta_info_le sta_info_le;
u32 sta_flags;
u32 is_tdls_peer;
s32 total_rssi;
s32 count_rssi;
+ int rssi;
u32 i;
brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac);
@@ -2629,6 +2650,20 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
total_rssi /= count_rssi;
sinfo->signal = total_rssi;
+ } else if (test_bit(BRCMF_VIF_STATUS_CONNECTED,
+ &ifp->vif->sme_state)) {
+ memset(&scb_val, 0, sizeof(scb_val));
+ err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_RSSI,
+ &scb_val, sizeof(scb_val));
+ if (err) {
+ brcmf_err("Could not get rssi (%d)\n", err);
+ goto done;
+ } else {
+ rssi = le32_to_cpu(scb_val.val);
+ sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->signal = rssi;
+ brcmf_dbg(CONN, "RSSI %d dBm\n", rssi);
+ }
}
}
done:
@@ -2734,7 +2769,7 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
if (!bi->ctl_ch) {
ch.chspec = le16_to_cpu(bi->chanspec);
cfg->d11inf.decchspec(&ch);
- bi->ctl_ch = ch.chnum;
+ bi->ctl_ch = ch.control_ch_num;
}
channel = bi->ctl_ch;
@@ -2852,7 +2887,7 @@ static s32 brcmf_inform_ibss(struct brcmf_cfg80211_info *cfg,
else
band = wiphy->bands[NL80211_BAND_5GHZ];
- freq = ieee80211_channel_to_frequency(ch.chnum, band->band);
+ freq = ieee80211_channel_to_frequency(ch.control_ch_num, band->band);
cfg->channel = freq;
notify_channel = ieee80211_get_channel(wiphy, freq);
@@ -2862,7 +2897,7 @@ static s32 brcmf_inform_ibss(struct brcmf_cfg80211_info *cfg,
notify_ielen = le32_to_cpu(bi->ie_length);
notify_signal = (s16)le16_to_cpu(bi->RSSI) * 100;
- brcmf_dbg(CONN, "channel: %d(%d)\n", ch.chnum, freq);
+ brcmf_dbg(CONN, "channel: %d(%d)\n", ch.control_ch_num, freq);
brcmf_dbg(CONN, "capability: %X\n", notify_capability);
brcmf_dbg(CONN, "beacon interval: %d\n", notify_interval);
brcmf_dbg(CONN, "signal: %d\n", notify_signal);
@@ -4423,7 +4458,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
struct brcmf_join_params join_params;
enum nl80211_iftype dev_role;
struct brcmf_fil_bss_enable_le bss_enable;
- u16 chanspec;
+ u16 chanspec = chandef_to_chanspec(&cfg->d11inf, &settings->chandef);
bool mbss;
int is_11d;
@@ -4499,16 +4534,8 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
brcmf_config_ap_mgmt_ie(ifp->vif, &settings->beacon);
+ /* Parameters shared by all radio interfaces */
if (!mbss) {
- chanspec = chandef_to_chanspec(&cfg->d11inf,
- &settings->chandef);
- err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec);
- if (err < 0) {
- brcmf_err("Set Channel failed: chspec=%d, %d\n",
- chanspec, err);
- goto exit;
- }
-
if (is_11d != ifp->vif->is_11d) {
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_REGULATORY,
is_11d);
@@ -4556,6 +4583,8 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
err = -EINVAL;
goto exit;
}
+
+ /* Interface specific setup */
if (dev_role == NL80211_IFTYPE_AP) {
if ((brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS)) && (!mbss))
brcmf_fil_iovar_int_set(ifp, "mbss", 1);
@@ -4565,6 +4594,17 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
brcmf_err("setting AP mode failed %d\n", err);
goto exit;
}
+ if (!mbss) {
+ /* Firmware 10.x requires setting channel after enabling
+ * AP and before bringing interface up.
+ */
+ err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec);
+ if (err < 0) {
+ brcmf_err("Set Channel failed: chspec=%d, %d\n",
+ chanspec, err);
+ goto exit;
+ }
+ }
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1);
if (err < 0) {
brcmf_err("BRCMF_C_UP error (%d)\n", err);
@@ -4586,7 +4626,13 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
goto exit;
}
brcmf_dbg(TRACE, "AP mode configuration complete\n");
- } else {
+ } else if (dev_role == NL80211_IFTYPE_P2P_GO) {
+ err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec);
+ if (err < 0) {
+ brcmf_err("Set Channel failed: chspec=%d, %d\n",
+ chanspec, err);
+ goto exit;
+ }
err = brcmf_fil_bsscfg_data_set(ifp, "ssid", &ssid_le,
sizeof(ssid_le));
if (err < 0) {
@@ -4603,7 +4649,10 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
}
brcmf_dbg(TRACE, "GO mode configuration complete\n");
+ } else {
+ WARN_ON(1);
}
+
set_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
brcmf_net_setcarrier(ifp, true);
@@ -4892,6 +4941,68 @@ exit:
return err;
}
+static int brcmf_cfg80211_get_channel(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ struct cfg80211_chan_def *chandef)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct net_device *ndev = wdev->netdev;
+ struct brcmf_if *ifp;
+ struct brcmu_chan ch;
+ enum nl80211_band band = 0;
+ enum nl80211_chan_width width = 0;
+ u32 chanspec;
+ int freq, err;
+
+ if (!ndev)
+ return -ENODEV;
+ ifp = netdev_priv(ndev);
+
+ err = brcmf_fil_iovar_int_get(ifp, "chanspec", &chanspec);
+ if (err) {
+ brcmf_err("chanspec failed (%d)\n", err);
+ return err;
+ }
+
+ ch.chspec = chanspec;
+ cfg->d11inf.decchspec(&ch);
+
+ switch (ch.band) {
+ case BRCMU_CHAN_BAND_2G:
+ band = NL80211_BAND_2GHZ;
+ break;
+ case BRCMU_CHAN_BAND_5G:
+ band = NL80211_BAND_5GHZ;
+ break;
+ }
+
+ switch (ch.bw) {
+ case BRCMU_CHAN_BW_80:
+ width = NL80211_CHAN_WIDTH_80;
+ break;
+ case BRCMU_CHAN_BW_40:
+ width = NL80211_CHAN_WIDTH_40;
+ break;
+ case BRCMU_CHAN_BW_20:
+ width = NL80211_CHAN_WIDTH_20;
+ break;
+ case BRCMU_CHAN_BW_80P80:
+ width = NL80211_CHAN_WIDTH_80P80;
+ break;
+ case BRCMU_CHAN_BW_160:
+ width = NL80211_CHAN_WIDTH_160;
+ break;
+ }
+
+ freq = ieee80211_channel_to_frequency(ch.control_ch_num, band);
+ chandef->chan = ieee80211_get_channel(wiphy, freq);
+ chandef->width = width;
+ chandef->center_freq1 = ieee80211_channel_to_frequency(ch.chnum, band);
+ chandef->center_freq2 = 0;
+
+ return 0;
+}
+
static int brcmf_cfg80211_crit_proto_start(struct wiphy *wiphy,
struct wireless_dev *wdev,
enum nl80211_crit_proto_id proto,
@@ -5054,6 +5165,7 @@ static struct cfg80211_ops brcmf_cfg80211_ops = {
.mgmt_tx = brcmf_cfg80211_mgmt_tx,
.remain_on_channel = brcmf_p2p_remain_on_channel,
.cancel_remain_on_channel = brcmf_cfg80211_cancel_remain_on_channel,
+ .get_channel = brcmf_cfg80211_get_channel,
.start_p2p_device = brcmf_p2p_start_device,
.stop_p2p_device = brcmf_p2p_stop_device,
.crit_proto_start = brcmf_cfg80211_crit_proto_start,
@@ -5062,8 +5174,7 @@ static struct cfg80211_ops brcmf_cfg80211_ops = {
};
struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
- enum nl80211_iftype type,
- bool pm_block)
+ enum nl80211_iftype type)
{
struct brcmf_cfg80211_vif *vif_walk;
struct brcmf_cfg80211_vif *vif;
@@ -5078,8 +5189,6 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
vif->wdev.wiphy = cfg->wiphy;
vif->wdev.iftype = type;
- vif->pm_block = pm_block;
-
brcmf_init_prof(&vif->profile);
if (type == NL80211_IFTYPE_AP) {
@@ -5280,7 +5389,7 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
else
band = wiphy->bands[NL80211_BAND_5GHZ];
- freq = ieee80211_channel_to_frequency(ch.chnum, band->band);
+ freq = ieee80211_channel_to_frequency(ch.control_ch_num, band->band);
notify_channel = ieee80211_get_channel(wiphy, freq);
done:
@@ -5336,7 +5445,6 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg,
struct net_device *ndev,
const struct brcmf_event_msg *e, void *data)
{
- struct brcmf_if *ifp = netdev_priv(ndev);
static int generation;
u32 event = e->event_code;
u32 reason = e->reason;
@@ -5347,8 +5455,6 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg,
ndev != cfg_to_ndev(cfg)) {
brcmf_dbg(CONN, "AP mode link down\n");
complete(&cfg->vif_disabled);
- if (ifp->vif->mbss)
- brcmf_remove_interface(ifp);
return 0;
}
@@ -5802,14 +5908,15 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
channel = band->channels;
index = band->n_channels;
for (j = 0; j < band->n_channels; j++) {
- if (channel[j].hw_value == ch.chnum) {
+ if (channel[j].hw_value == ch.control_ch_num) {
index = j;
break;
}
}
channel[index].center_freq =
- ieee80211_channel_to_frequency(ch.chnum, band->band);
- channel[index].hw_value = ch.chnum;
+ ieee80211_channel_to_frequency(ch.control_ch_num,
+ band->band);
+ channel[index].hw_value = ch.control_ch_num;
/* assuming the chanspecs order is HT20,
* HT40 upper, HT40 lower, and VHT80.
@@ -5911,7 +6018,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg)
if (WARN_ON(ch.bw != BRCMU_CHAN_BW_40))
continue;
for (j = 0; j < band->n_channels; j++) {
- if (band->channels[j].hw_value == ch.chnum)
+ if (band->channels[j].hw_value == ch.control_ch_num)
break;
}
if (WARN_ON(j == band->n_channels))
@@ -6699,11 +6806,10 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
return NULL;
}
- ops = kzalloc(sizeof(*ops), GFP_KERNEL);
+ ops = kmemdup(&brcmf_cfg80211_ops, sizeof(*ops), GFP_KERNEL);
if (!ops)
return NULL;
- memcpy(ops, &brcmf_cfg80211_ops, sizeof(*ops));
ifp = netdev_priv(ndev);
#ifdef CONFIG_PM
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL_GTK))
@@ -6724,7 +6830,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
init_vif_event(&cfg->vif_event);
INIT_LIST_HEAD(&cfg->vif_list);
- vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_STATION, false);
+ vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_STATION);
if (IS_ERR(vif))
goto wiphy_out;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index 95e35bcc16ce..04bfc7e3ecde 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -20,6 +20,9 @@
/* for brcmu_d11inf */
#include <brcmu_d11.h>
+#include "fwil_types.h"
+#include "p2p.h"
+
#define WL_NUM_SCAN_MAX 10
#define WL_TLV_INFO_MAX 1024
#define WL_BSS_INFO_MAX 2048
@@ -167,7 +170,6 @@ struct vif_saved_ie {
* @wdev: wireless device.
* @profile: profile information.
* @sme_state: SME state using enum brcmf_vif_status bits.
- * @pm_block: power-management blocked.
* @list: linked list.
* @mgmt_rx_reg: registered rx mgmt frame types.
* @mbss: Multiple BSS type, set if not first AP (not relevant for P2P).
@@ -177,7 +179,6 @@ struct brcmf_cfg80211_vif {
struct wireless_dev wdev;
struct brcmf_cfg80211_profile profile;
unsigned long sme_state;
- bool pm_block;
struct vif_saved_ie saved_ie;
struct list_head list;
u16 mgmt_rx_reg;
@@ -388,8 +389,7 @@ s32 brcmf_cfg80211_down(struct net_device *ndev);
enum nl80211_iftype brcmf_cfg80211_get_iftype(struct brcmf_if *ifp);
struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
- enum nl80211_iftype type,
- bool pm_block);
+ enum nl80211_iftype type);
void brcmf_free_vif(struct brcmf_cfg80211_vif *vif);
s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index d3fd6b1db1d9..05f22ff81d60 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -685,6 +685,8 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
case BRCM_CC_43602_CHIP_ID:
case BRCM_CC_4371_CHIP_ID:
return 0x180000;
+ case BRCM_CC_43465_CHIP_ID:
+ case BRCM_CC_43525_CHIP_ID:
case BRCM_CC_4365_CHIP_ID:
case BRCM_CC_4366_CHIP_ID:
return 0x200000;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index b590499f6883..faf4e46bd65b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -516,7 +516,7 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
/* set appropriate operations */
ndev->netdev_ops = &brcmf_netdev_ops_pri;
- ndev->hard_header_len += drvr->hdrlen;
+ ndev->needed_headroom += drvr->hdrlen;
ndev->ethtool_ops = &brcmf_ethtool_ops;
drvr->rxsz = ndev->mtu + ndev->hard_header_len +
@@ -753,30 +753,6 @@ void brcmf_remove_interface(struct brcmf_if *ifp)
brcmf_del_if(ifp->drvr, ifp->bsscfgidx);
}
-int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr)
-{
- int ifidx;
- int bsscfgidx;
- bool available;
- int highest;
-
- available = false;
- bsscfgidx = 2;
- highest = 2;
- for (ifidx = 0; ifidx < BRCMF_MAX_IFS; ifidx++) {
- if (drvr->iflist[ifidx]) {
- if (drvr->iflist[ifidx]->bsscfgidx == bsscfgidx)
- bsscfgidx = highest + 1;
- else if (drvr->iflist[ifidx]->bsscfgidx > highest)
- highest = drvr->iflist[ifidx]->bsscfgidx;
- } else {
- available = true;
- }
- }
-
- return available ? bsscfgidx : -ENOMEM;
-}
-
#ifdef CONFIG_INET
#define ARPOL_MAX_ENTRIES 8
static int brcmf_inetaddr_changed(struct notifier_block *nb,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index 647d3cc2a4dc..2a075c5f6f8b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -217,7 +217,6 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked);
struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bsscfgidx, s32 ifidx,
bool is_p2pdev, char *name, u8 *mac_addr);
void brcmf_remove_interface(struct brcmf_if *ifp);
-int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr);
void brcmf_txflowblock_if(struct brcmf_if *ifp,
enum brcmf_netif_stop_reason reason, bool state);
void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index 5b30922b67ec..cd221ab55062 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -2101,7 +2101,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
brcmf_dbg(DATA, "tx proto=0x%X\n", ntohs(eh->h_proto));
/* determine the priority */
- if (!skb->priority)
+ if ((skb->priority == 0) || (skb->priority > 7))
skb->priority = cfg80211_classify8021d(skb, NULL);
drvr->tx_multicast += !!multicast;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index 68f1ce02f4bf..2b9a2bc429d6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -1157,6 +1157,8 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
brcmu_pkt_buf_free_skb(skb);
return;
}
+
+ skb->protocol = eth_type_trans(skb, ifp->ndev);
brcmf_netif_rx(ifp, skb);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index a70cda6c0592..f38a82133540 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -1246,7 +1246,7 @@ bool brcmf_p2p_scan_finding_common_channel(struct brcmf_cfg80211_info *cfg,
if (!bi->ctl_ch) {
ch.chspec = le16_to_cpu(bi->chanspec);
cfg->d11inf.decchspec(&ch);
- bi->ctl_ch = ch.chnum;
+ bi->ctl_ch = ch.control_ch_num;
}
afx_hdl->peer_chan = bi->ctl_ch;
brcmf_dbg(TRACE, "ACTION FRAME SCAN : Peer %pM found, channel : %d\n",
@@ -1385,7 +1385,7 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
&p2p->status) &&
(ether_addr_equal(afx_hdl->tx_dst_addr, e->addr))) {
- afx_hdl->peer_chan = ch.chnum;
+ afx_hdl->peer_chan = ch.control_ch_num;
brcmf_dbg(INFO, "GON request: Peer found, channel=%d\n",
afx_hdl->peer_chan);
complete(&afx_hdl->act_frm_scan);
@@ -1428,7 +1428,7 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
memcpy(&mgmt_frame->u, frame, mgmt_frame_len);
mgmt_frame_len += offsetof(struct ieee80211_mgmt, u);
- freq = ieee80211_channel_to_frequency(ch.chnum,
+ freq = ieee80211_channel_to_frequency(ch.control_ch_num,
ch.band == BRCMU_CHAN_BAND_2G ?
NL80211_BAND_2GHZ :
NL80211_BAND_5GHZ);
@@ -1873,7 +1873,7 @@ s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp,
if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status) &&
(ether_addr_equal(afx_hdl->tx_dst_addr, e->addr))) {
- afx_hdl->peer_chan = ch.chnum;
+ afx_hdl->peer_chan = ch.control_ch_num;
brcmf_dbg(INFO, "PROBE REQUEST: Peer found, channel=%d\n",
afx_hdl->peer_chan);
complete(&afx_hdl->act_frm_scan);
@@ -1898,7 +1898,7 @@ s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp,
mgmt_frame = (u8 *)(rxframe + 1);
mgmt_frame_len = e->datalen - sizeof(*rxframe);
- freq = ieee80211_channel_to_frequency(ch.chnum,
+ freq = ieee80211_channel_to_frequency(ch.control_ch_num,
ch.band == BRCMU_CHAN_BAND_2G ?
NL80211_BAND_2GHZ :
NL80211_BAND_5GHZ);
@@ -2030,8 +2030,6 @@ static int brcmf_p2p_request_p2p_if(struct brcmf_p2p_info *p2p,
err = brcmf_fil_iovar_data_set(ifp, "p2p_ifadd", &if_request,
sizeof(if_request));
- if (err)
- return err;
return err;
}
@@ -2076,8 +2074,7 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p,
if (p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif)
return ERR_PTR(-ENOSPC);
- p2p_vif = brcmf_alloc_vif(p2p->cfg, NL80211_IFTYPE_P2P_DEVICE,
- false);
+ p2p_vif = brcmf_alloc_vif(p2p->cfg, NL80211_IFTYPE_P2P_DEVICE);
if (IS_ERR(p2p_vif)) {
brcmf_err("could not create discovery vif\n");
return (struct wireless_dev *)p2p_vif;
@@ -2177,7 +2174,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
return ERR_PTR(-EOPNOTSUPP);
}
- vif = brcmf_alloc_vif(cfg, type, false);
+ vif = brcmf_alloc_vif(cfg, type);
if (IS_ERR(vif))
return (struct wireless_dev *)vif;
brcmf_cfg80211_arm_vif_event(cfg, vif);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 0af8db82da0c..3deba90c7eb5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -54,21 +54,25 @@ BRCMF_FW_NVRAM_DEF(43570, "brcmfmac43570-pcie.bin", "brcmfmac43570-pcie.txt");
BRCMF_FW_NVRAM_DEF(4358, "brcmfmac4358-pcie.bin", "brcmfmac4358-pcie.txt");
BRCMF_FW_NVRAM_DEF(4359, "brcmfmac4359-pcie.bin", "brcmfmac4359-pcie.txt");
BRCMF_FW_NVRAM_DEF(4365B, "brcmfmac4365b-pcie.bin", "brcmfmac4365b-pcie.txt");
+BRCMF_FW_NVRAM_DEF(4365C, "brcmfmac4365c-pcie.bin", "brcmfmac4365c-pcie.txt");
BRCMF_FW_NVRAM_DEF(4366B, "brcmfmac4366b-pcie.bin", "brcmfmac4366b-pcie.txt");
BRCMF_FW_NVRAM_DEF(4366C, "brcmfmac4366c-pcie.bin", "brcmfmac4366c-pcie.txt");
BRCMF_FW_NVRAM_DEF(4371, "brcmfmac4371-pcie.bin", "brcmfmac4371-pcie.txt");
static struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43602_CHIP_ID, 0xFFFFFFFF, 43602),
+ BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43465_CHIP_ID, 0xFFFFFFF0, 4366C),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4350_CHIP_ID, 0x000000FF, 4350C),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4350_CHIP_ID, 0xFFFFFF00, 4350),
+ BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43525_CHIP_ID, 0xFFFFFFF0, 4365C),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43567_CHIP_ID, 0xFFFFFFFF, 43570),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43569_CHIP_ID, 0xFFFFFFFF, 43570),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43570_CHIP_ID, 0xFFFFFFFF, 43570),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4358_CHIP_ID, 0xFFFFFFFF, 4358),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),
- BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFFF, 4365B),
+ BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4365_CHIP_ID, 0x0000000F, 4365B),
+ BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFF0, 4365C),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4366_CHIP_ID, 0x0000000F, 4366B),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4366_CHIP_ID, 0xFFFFFFF0, 4366C),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371),
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 67e69bff2545..5fb8b91b9326 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -1384,8 +1384,7 @@ static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
return -ENXIO;
}
if (rd->seq_num != rx_seq) {
- brcmf_err("seq %d: sequence number error, expect %d\n",
- rx_seq, rd->seq_num);
+ brcmf_dbg(SDIO, "seq %d, expected %d\n", rx_seq, rd->seq_num);
bus->sdcnt.rx_badseq++;
rd->seq_num = rx_seq;
}
@@ -3666,7 +3665,7 @@ brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
str_shift = 11;
break;
default:
- brcmf_err("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
+ brcmf_dbg(INFO, "No SDIO driver strength init needed for chip %s rev %d pmurev %d\n",
ci->name, ci->chiprev, ci->pmurev);
break;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
index dcf0ce8cd2c1..f3da32fc6360 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
@@ -186,6 +186,7 @@ struct brcmf_sdio_dev {
struct brcmf_bus *bus_if;
struct brcmf_mp_device *settings;
bool oob_irq_requested;
+ bool sd_irq_requested;
bool irq_en; /* irq enable flags */
spinlock_t irq_en_lock;
bool irq_wake; /* irq wake enable flags */
@@ -293,7 +294,7 @@ struct sdpcmd_regs {
/* Register/deregister interrupt handler. */
int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev);
-int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev);
+void brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev);
/* sdio device register access interface */
u8 brcmf_sdiod_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
index 99dac9b8a082..b3aab2fe96eb 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
@@ -27017,7 +27017,7 @@ wlc_phy_rxcal_gainctrl_nphy_rev5(struct brcms_phy *pi, u8 rx_core,
tx_core = 1 - rx_core;
num_samps = 1024;
- desired_log2_pwr = (cal_type == 0) ? 13 : 13;
+ desired_log2_pwr = 13;
wlc_phy_rx_iq_coeffs_nphy(pi, 0, &save_comp);
zero_comp.a0 = zero_comp.b0 = zero_comp.a1 = zero_comp.b1 = 0x0;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
index 2b2522bdd8eb..d8b79cb72b58 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
@@ -107,6 +107,7 @@ static void brcmu_d11n_decchspec(struct brcmu_chan *ch)
u16 val;
ch->chnum = (u8)(ch->chspec & BRCMU_CHSPEC_CH_MASK);
+ ch->control_ch_num = ch->chnum;
switch (ch->chspec & BRCMU_CHSPEC_D11N_BW_MASK) {
case BRCMU_CHSPEC_D11N_BW_20:
@@ -118,10 +119,10 @@ static void brcmu_d11n_decchspec(struct brcmu_chan *ch)
val = ch->chspec & BRCMU_CHSPEC_D11N_SB_MASK;
if (val == BRCMU_CHSPEC_D11N_SB_L) {
ch->sb = BRCMU_CHAN_SB_L;
- ch->chnum -= CH_10MHZ_APART;
+ ch->control_ch_num -= CH_10MHZ_APART;
} else {
ch->sb = BRCMU_CHAN_SB_U;
- ch->chnum += CH_10MHZ_APART;
+ ch->control_ch_num += CH_10MHZ_APART;
}
break;
default:
@@ -147,6 +148,7 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
u16 val;
ch->chnum = (u8)(ch->chspec & BRCMU_CHSPEC_CH_MASK);
+ ch->control_ch_num = ch->chnum;
switch (ch->chspec & BRCMU_CHSPEC_D11AC_BW_MASK) {
case BRCMU_CHSPEC_D11AC_BW_20:
@@ -158,10 +160,10 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
val = ch->chspec & BRCMU_CHSPEC_D11AC_SB_MASK;
if (val == BRCMU_CHSPEC_D11AC_SB_L) {
ch->sb = BRCMU_CHAN_SB_L;
- ch->chnum -= CH_10MHZ_APART;
+ ch->control_ch_num -= CH_10MHZ_APART;
} else if (val == BRCMU_CHSPEC_D11AC_SB_U) {
ch->sb = BRCMU_CHAN_SB_U;
- ch->chnum += CH_10MHZ_APART;
+ ch->control_ch_num += CH_10MHZ_APART;
} else {
WARN_ON_ONCE(1);
}
@@ -172,16 +174,16 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
BRCMU_CHSPEC_D11AC_SB_SHIFT);
switch (ch->sb) {
case BRCMU_CHAN_SB_LL:
- ch->chnum -= CH_30MHZ_APART;
+ ch->control_ch_num -= CH_30MHZ_APART;
break;
case BRCMU_CHAN_SB_LU:
- ch->chnum -= CH_10MHZ_APART;
+ ch->control_ch_num -= CH_10MHZ_APART;
break;
case BRCMU_CHAN_SB_UL:
- ch->chnum += CH_10MHZ_APART;
+ ch->control_ch_num += CH_10MHZ_APART;
break;
case BRCMU_CHAN_SB_UU:
- ch->chnum += CH_30MHZ_APART;
+ ch->control_ch_num += CH_30MHZ_APART;
break;
default:
WARN_ON_ONCE(1);
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
index 699f2c2782ee..3cc42bef6245 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
@@ -40,7 +40,9 @@
#define BRCM_CC_4339_CHIP_ID 0x4339
#define BRCM_CC_43430_CHIP_ID 43430
#define BRCM_CC_4345_CHIP_ID 0x4345
+#define BRCM_CC_43465_CHIP_ID 43465
#define BRCM_CC_4350_CHIP_ID 0x4350
+#define BRCM_CC_43525_CHIP_ID 43525
#define BRCM_CC_4354_CHIP_ID 0x4354
#define BRCM_CC_4356_CHIP_ID 0x4356
#define BRCM_CC_43566_CHIP_ID 43566
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_d11.h b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_d11.h
index f9745ea8b3e0..8b8b2ecb3199 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_d11.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_d11.h
@@ -125,14 +125,36 @@ enum brcmu_chan_sb {
BRCMU_CHAN_SB_UU = BRCMU_CHAN_SB_LUU,
};
+/**
+ * struct brcmu_chan - stores channel formats
+ *
+ * This structure can be used with functions translating chanspec into generic
+ * channel info and the other way.
+ *
+ * @chspec: firmware specific format
+ * @chnum: center channel number
+ * @control_ch_num: control channel number
+ * @band: frequency band
+ * @bw: channel width
+ * @sb: control sideband (location of control channel against the center one)
+ */
struct brcmu_chan {
u16 chspec;
u8 chnum;
+ u8 control_ch_num;
u8 band;
enum brcmu_chan_bw bw;
enum brcmu_chan_sb sb;
};
+/**
+ * struct brcmu_d11inf - provides functions translating channel format
+ *
+ * @io_type: determines version of channel format used by firmware
+ * @encchspec: encodes channel info into a chanspec, requires center channel
+ * number, ignores control one
+ * @decchspec: decodes chanspec into generic info
+ */
struct brcmu_d11inf {
u8 io_type;
diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c
index 7bcedbb53d94..209dc9988455 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945.c
@@ -1019,12 +1019,13 @@ il3945_hw_txq_ctx_free(struct il_priv *il)
int txq_id;
/* Tx queues */
- if (il->txq)
+ if (il->txq) {
for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
if (txq_id == IL39_CMD_QUEUE_NUM)
il_cmd_queue_free(il);
else
il_tx_queue_free(il, txq_id);
+ }
/* free tx queue structure */
il_free_txq_mem(il);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index e5f267b21316..18a8474b5760 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -3851,8 +3851,8 @@ static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
if (idx != 0)
return -ENOENT;
- if (fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
return -ENOENT;
mutex_lock(&mvm->mutex);
@@ -3898,8 +3898,8 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- if (fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
return;
/* if beacon filtering isn't on mac80211 does it anyway */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index ac2c5718e454..2c61516d06ff 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -581,7 +581,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
struct iwl_rx_mpdu_desc *desc)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_sta *mvm_sta;
struct iwl_mvm_baid_data *baid_data;
struct iwl_mvm_reorder_buffer *buffer;
struct sk_buff *tail;
@@ -604,6 +604,8 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
if (WARN_ON(IS_ERR_OR_NULL(sta)))
return false;
+ mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+
/* not a data packet */
if (!ieee80211_is_data_qos(hdr->frame_control) ||
is_multicast_ether_addr(hdr->addr1))
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 6f609dd5c222..e78fc567ff7d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -1222,7 +1222,7 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
return -EIO;
}
-#define SCAN_TIMEOUT (16 * HZ)
+#define SCAN_TIMEOUT (20 * HZ)
void iwl_mvm_scan_timeout(unsigned long data)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index fea4d3437e2f..b23ab4a4504f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1852,12 +1852,18 @@ static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
u8 sta_id = mvmvif->ap_sta_id;
+ sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+
/*
* It is possible that the 'sta' parameter is NULL,
* for example when a GTK is removed - the sta_id will then
* be the AP ID, and no station was passed by mac80211.
*/
- return iwl_mvm_sta_from_staid_protected(mvm, sta_id);
+ if (IS_ERR_OR_NULL(sta))
+ return NULL;
+
+ return iwl_mvm_sta_from_mac80211(sta);
}
return NULL;
@@ -1955,6 +1961,14 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
struct ieee80211_key_seq seq;
const u8 *pn;
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
+ break;
+ default:
+ return -EINVAL;
+ }
+
memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen);
ieee80211_get_key_rx_seq(keyconf, 0, &seq);
pn = seq.aes_cmac.pn;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 9ed0ed1bf514..a1e28a4fd658 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -30,6 +30,8 @@
#include <linux/module.h>
#include <linux/ktime.h>
#include <net/genetlink.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
#include "mac80211_hwsim.h"
#define WARN_QUEUE 100
@@ -250,6 +252,28 @@ static inline void hwsim_clear_chanctx_magic(struct ieee80211_chanctx_conf *c)
cp->magic = 0;
}
+static unsigned int hwsim_net_id;
+
+static int hwsim_netgroup;
+
+struct hwsim_net {
+ int netgroup;
+};
+
+static inline int hwsim_net_get_netgroup(struct net *net)
+{
+ struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id);
+
+ return hwsim_net->netgroup;
+}
+
+static inline void hwsim_net_set_netgroup(struct net *net)
+{
+ struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id);
+
+ hwsim_net->netgroup = hwsim_netgroup++;
+}
+
static struct class *hwsim_class;
static struct net_device *hwsim_mon; /* global monitor netdev */
@@ -526,6 +550,9 @@ struct mac80211_hwsim_data {
*/
u64 group;
+ /* group shared by radios created in the same netns */
+ int netgroup;
+
int power_level;
/* difference between this hw's clock and the real clock, in usecs */
@@ -568,6 +595,7 @@ static struct genl_family hwsim_genl_family = {
.name = "MAC80211_HWSIM",
.version = 1,
.maxattr = HWSIM_ATTR_MAX,
+ .netnsok = true,
};
enum hwsim_multicast_groups {
@@ -1202,6 +1230,9 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
if (!(data->group & data2->group))
continue;
+ if (data->netgroup != data2->netgroup)
+ continue;
+
if (!hwsim_chans_compat(chan, data2->tmp_chan) &&
!hwsim_chans_compat(chan, data2->channel)) {
ieee80211_iterate_active_interfaces_atomic(
@@ -2349,6 +2380,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
struct ieee80211_hw *hw;
enum nl80211_band band;
const struct ieee80211_ops *ops = &mac80211_hwsim_ops;
+ struct net *net;
int idx;
if (WARN_ON(param->channels > 1 && !param->use_chanctx))
@@ -2366,6 +2398,13 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
err = -ENOMEM;
goto failed;
}
+
+ if (info)
+ net = genl_info_net(info);
+ else
+ net = &init_net;
+ wiphy_net_set(hw->wiphy, net);
+
data = hw->priv;
data->hw = hw;
@@ -2541,6 +2580,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
data->group = 1;
mutex_init(&data->mutex);
+ data->netgroup = hwsim_net_get_netgroup(net);
+
/* Enable frame retransmissions for lossy channels */
hw->max_rates = 4;
hw->max_rate_tries = 11;
@@ -2776,6 +2817,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] ||
!info->attrs[HWSIM_ATTR_FLAGS] ||
!info->attrs[HWSIM_ATTR_COOKIE] ||
+ !info->attrs[HWSIM_ATTR_SIGNAL] ||
!info->attrs[HWSIM_ATTR_TX_INFO])
goto out;
@@ -3013,6 +3055,9 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
continue;
}
+ if (!net_eq(wiphy_net(data->hw->wiphy), genl_info_net(info)))
+ continue;
+
list_del(&data->list);
spin_unlock_bh(&hwsim_radio_lock);
mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy),
@@ -3039,6 +3084,9 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
if (data->idx != idx)
continue;
+ if (!net_eq(wiphy_net(data->hw->wiphy), genl_info_net(info)))
+ continue;
+
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!skb) {
res = -ENOMEM;
@@ -3078,6 +3126,9 @@ static int hwsim_dump_radio_nl(struct sk_buff *skb,
if (data->idx < idx)
continue;
+ if (!net_eq(wiphy_net(data->hw->wiphy), sock_net(skb->sk)))
+ continue;
+
res = mac80211_hwsim_get_radio(skb, data,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, cb,
@@ -3117,13 +3168,13 @@ static const struct genl_ops hwsim_ops[] = {
.cmd = HWSIM_CMD_NEW_RADIO,
.policy = hwsim_genl_policy,
.doit = hwsim_new_radio_nl,
- .flags = GENL_ADMIN_PERM,
+ .flags = GENL_UNS_ADMIN_PERM,
},
{
.cmd = HWSIM_CMD_DEL_RADIO,
.policy = hwsim_genl_policy,
.doit = hwsim_del_radio_nl,
- .flags = GENL_ADMIN_PERM,
+ .flags = GENL_UNS_ADMIN_PERM,
},
{
.cmd = HWSIM_CMD_GET_RADIO,
@@ -3205,6 +3256,40 @@ failure:
return -EINVAL;
}
+static __net_init int hwsim_init_net(struct net *net)
+{
+ hwsim_net_set_netgroup(net);
+
+ return 0;
+}
+
+static void __net_exit hwsim_exit_net(struct net *net)
+{
+ struct mac80211_hwsim_data *data, *tmp;
+
+ spin_lock_bh(&hwsim_radio_lock);
+ list_for_each_entry_safe(data, tmp, &hwsim_radios, list) {
+ if (!net_eq(wiphy_net(data->hw->wiphy), net))
+ continue;
+
+ /* Radios created in init_net are returned to init_net. */
+ if (data->netgroup == hwsim_net_get_netgroup(&init_net))
+ continue;
+
+ list_del(&data->list);
+ INIT_WORK(&data->destroy_work, destroy_radio);
+ schedule_work(&data->destroy_work);
+ }
+ spin_unlock_bh(&hwsim_radio_lock);
+}
+
+static struct pernet_operations hwsim_net_ops = {
+ .init = hwsim_init_net,
+ .exit = hwsim_exit_net,
+ .id = &hwsim_net_id,
+ .size = sizeof(struct hwsim_net),
+};
+
static void hwsim_exit_netlink(void)
{
/* unregister the notifier */
@@ -3241,10 +3326,14 @@ static int __init init_mac80211_hwsim(void)
spin_lock_init(&hwsim_radio_lock);
INIT_LIST_HEAD(&hwsim_radios);
- err = platform_driver_register(&mac80211_hwsim_driver);
+ err = register_pernet_device(&hwsim_net_ops);
if (err)
return err;
+ err = platform_driver_register(&mac80211_hwsim_driver);
+ if (err)
+ goto out_unregister_pernet;
+
hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim");
if (IS_ERR(hwsim_class)) {
err = PTR_ERR(hwsim_class);
@@ -3362,6 +3451,8 @@ out_free_radios:
mac80211_hwsim_free();
out_unregister_driver:
platform_driver_unregister(&mac80211_hwsim_driver);
+out_unregister_pernet:
+ unregister_pernet_device(&hwsim_net_ops);
return err;
}
module_init(init_mac80211_hwsim);
@@ -3375,5 +3466,6 @@ static void __exit exit_mac80211_hwsim(void)
mac80211_hwsim_free();
unregister_netdev(hwsim_mon);
platform_driver_unregister(&mac80211_hwsim_driver);
+ unregister_pernet_device(&hwsim_net_ops);
}
module_exit(exit_mac80211_hwsim);
diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c
index 13eae9ff8c35..47f4a14c84fe 100644
--- a/drivers/net/wireless/marvell/libertas/if_sdio.c
+++ b/drivers/net/wireless/marvell/libertas/if_sdio.c
@@ -1228,7 +1228,7 @@ static int if_sdio_probe(struct sdio_func *func,
}
spin_lock_init(&card->lock);
- card->workqueue = create_workqueue("libertas_sdio");
+ card->workqueue = alloc_workqueue("libertas_sdio", WQ_MEM_RECLAIM, 0);
INIT_WORK(&card->packet_worker, if_sdio_host_to_card_worker);
init_waitqueue_head(&card->pwron_waitq);
@@ -1326,7 +1326,6 @@ static void if_sdio_remove(struct sdio_func *func)
lbs_stop_card(card->priv);
lbs_remove_card(card->priv);
- flush_workqueue(card->workqueue);
destroy_workqueue(card->workqueue);
while (card->packets) {
diff --git a/drivers/net/wireless/marvell/libertas/if_spi.c b/drivers/net/wireless/marvell/libertas/if_spi.c
index 82c0796377aa..c3a53cd6988e 100644
--- a/drivers/net/wireless/marvell/libertas/if_spi.c
+++ b/drivers/net/wireless/marvell/libertas/if_spi.c
@@ -1180,7 +1180,7 @@ static int if_spi_probe(struct spi_device *spi)
priv->fw_ready = 1;
/* Initialize interrupt handling stuff. */
- card->workqueue = create_workqueue("libertas_spi");
+ card->workqueue = alloc_workqueue("libertas_spi", WQ_MEM_RECLAIM, 0);
INIT_WORK(&card->packet_work, if_spi_host_to_card_worker);
INIT_WORK(&card->resume_work, if_spi_resume_worker);
@@ -1208,7 +1208,6 @@ static int if_spi_probe(struct spi_device *spi)
release_irq:
free_irq(spi->irq, card);
terminate_workqueue:
- flush_workqueue(card->workqueue);
destroy_workqueue(card->workqueue);
lbs_remove_card(priv); /* will call free_netdev */
free_card:
@@ -1235,7 +1234,6 @@ static int libertas_spi_remove(struct spi_device *spi)
lbs_remove_card(priv); /* will call free_netdev */
free_irq(spi->irq, card);
- flush_workqueue(card->workqueue);
destroy_workqueue(card->workqueue);
if (card->pdata->teardown)
card->pdata->teardown(spi);
diff --git a/drivers/net/wireless/marvell/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c
index 0bf8916a02cf..75bf0c8a2f6f 100644
--- a/drivers/net/wireless/marvell/libertas_tf/main.c
+++ b/drivers/net/wireless/marvell/libertas_tf/main.c
@@ -16,7 +16,6 @@
#include <linux/module.h>
#include "libertas_tf.h"
-#define DRIVER_RELEASE_VERSION "004.p0"
/* thinfirm version: 5.132.X.pX */
#define LBTF_FW_VER_MIN 0x05840300
#define LBTF_FW_VER_MAX 0x0584ffff
@@ -27,12 +26,6 @@ unsigned int lbtf_debug;
EXPORT_SYMBOL_GPL(lbtf_debug);
module_param_named(libertas_tf_debug, lbtf_debug, int, 0644);
-static const char lbtf_driver_version[] = "THINFIRM-USB8388-" DRIVER_RELEASE_VERSION
-#ifdef DEBUG
- "-dbg"
-#endif
- "";
-
struct workqueue_struct *lbtf_wq;
static const struct ieee80211_channel lbtf_channels[] = {
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
index 1efef3b8273d..dc49c3de1f25 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
@@ -184,7 +184,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
tx_info_src = MWIFIEX_SKB_TXCB(skb_src);
skb_aggr = mwifiex_alloc_dma_align_buf(adapter->tx_buf_size,
- GFP_ATOMIC | GFP_DMA);
+ GFP_ATOMIC);
if (!skb_aggr) {
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
ra_list_flags);
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index 78c532f0d286..a6d86d4ccd22 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -788,3 +788,4 @@ poll_fw:
return ret;
}
+EXPORT_SYMBOL_GPL(mwifiex_dnld_fw);
diff --git a/drivers/net/wireless/marvell/mwifiex/join.c b/drivers/net/wireless/marvell/mwifiex/join.c
index 62211fca91b7..a4b773d102b3 100644
--- a/drivers/net/wireless/marvell/mwifiex/join.c
+++ b/drivers/net/wireless/marvell/mwifiex/join.c
@@ -1281,7 +1281,7 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv,
if (result) {
mwifiex_dbg(priv->adapter, ERROR, "ADHOC_RESP: failed\n");
if (priv->media_connected)
- mwifiex_reset_connect_state(priv, result);
+ mwifiex_reset_connect_state(priv, result, true);
memset(&priv->curr_bss_params.bss_descriptor,
0x00, sizeof(struct mwifiex_bssdescriptor));
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 8b67a552a690..0e280f879b58 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -526,10 +526,12 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
fw.fw_buf = (u8 *) adapter->firmware->data;
fw.fw_len = adapter->firmware->size;
- if (adapter->if_ops.dnld_fw)
+ if (adapter->if_ops.dnld_fw) {
ret = adapter->if_ops.dnld_fw(adapter, &fw);
- else
+ } else {
ret = mwifiex_dnld_fw(adapter, &fw);
+ }
+
if (ret == -1)
goto err_dnld_fw;
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 0207af00be42..f0cd055c0b2c 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -1128,7 +1128,8 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc);
int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp);
-void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason);
+void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason,
+ bool from_ap);
u8 mwifiex_band_to_radio_type(u8 band);
int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac);
void mwifiex_deauthenticate_all(struct mwifiex_adapter *adapter);
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index 0c7937eb6b77..1b1e266ce00f 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -507,7 +507,7 @@ static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter)
for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
/* Allocate skb here so that firmware can DMA data from it */
skb = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE,
- GFP_KERNEL | GFP_DMA);
+ GFP_KERNEL);
if (!skb) {
mwifiex_dbg(adapter, ERROR,
"Unable to allocate skb for RX ring.\n");
@@ -1319,7 +1319,7 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
}
skb_tmp = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE,
- GFP_KERNEL | GFP_DMA);
+ GFP_KERNEL);
if (!skb_tmp) {
mwifiex_dbg(adapter, ERROR,
"Unable to allocate skb.\n");
@@ -2804,7 +2804,7 @@ static int mwifiex_pcie_request_irq(struct mwifiex_adapter *adapter)
}
/*
- * This function get firmare name for downloading by revision id
+ * This function gets the firmware name for downloading by revision id
*
* Read revision id register to get revision id
*/
@@ -2901,10 +2901,11 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
const struct mwifiex_pcie_card_reg *reg;
- struct pci_dev *pdev = card->dev;
+ struct pci_dev *pdev;
int i;
if (card) {
+ pdev = card->dev;
if (card->msix_enable) {
for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++)
synchronize_irq(card->msix_entries[i].vector);
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index bdc51ffd43ec..d3e1561ca075 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -102,10 +102,9 @@ static int mwifiex_sdio_probe_of(struct device *dev, struct sdio_mmc_card *card)
struct mwifiex_plt_wake_cfg *cfg;
int ret;
- if (!dev->of_node ||
- !of_match_node(mwifiex_sdio_of_match_table, dev->of_node)) {
- dev_err(dev, "sdio platform data not available\n");
- return -1;
+ if (!of_match_node(mwifiex_sdio_of_match_table, dev->of_node)) {
+ dev_err(dev, "required compatible string missing\n");
+ return -EINVAL;
}
card->plt_of_node = dev->of_node;
@@ -115,7 +114,7 @@ static int mwifiex_sdio_probe_of(struct device *dev, struct sdio_mmc_card *card)
if (cfg && card->plt_of_node) {
cfg->irq_wifi = irq_of_parse_and_map(card->plt_of_node, 0);
if (!cfg->irq_wifi) {
- dev_err(dev,
+ dev_dbg(dev,
"fail to parse irq_wifi from device tree\n");
} else {
ret = devm_request_irq(dev, cfg->irq_wifi,
@@ -183,24 +182,35 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
sdio_release_host(func);
if (ret) {
- pr_err("%s: failed to enable function\n", __func__);
- kfree(card);
- return -EIO;
+ dev_err(&func->dev, "failed to enable function\n");
+ goto err_free;
}
/* device tree node parsing and platform specific configuration*/
- mwifiex_sdio_probe_of(&func->dev, card);
-
- if (mwifiex_add_card(card, &add_remove_card_sem, &sdio_ops,
- MWIFIEX_SDIO)) {
- pr_err("%s: add card failed\n", __func__);
- kfree(card);
- sdio_claim_host(func);
- ret = sdio_disable_func(func);
- sdio_release_host(func);
- ret = -1;
+ if (func->dev.of_node) {
+ ret = mwifiex_sdio_probe_of(&func->dev, card);
+ if (ret) {
+ dev_err(&func->dev, "SDIO dt node parse failed\n");
+ goto err_disable;
+ }
+ }
+
+ ret = mwifiex_add_card(card, &add_remove_card_sem, &sdio_ops,
+ MWIFIEX_SDIO);
+ if (ret) {
+ dev_err(&func->dev, "add card failed\n");
+ goto err_disable;
}
+ return 0;
+
+err_disable:
+ sdio_claim_host(func);
+ sdio_disable_func(func);
+ sdio_release_host(func);
+err_free:
+ kfree(card);
+
return ret;
}
@@ -544,6 +554,19 @@ static int mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter)
return mwifiex_write_reg(adapter, CONFIGURATION_REG, 0);
}
+static int mwifiex_sdio_dnld_fw(struct mwifiex_adapter *adapter,
+ struct mwifiex_fw_image *fw)
+{
+ struct sdio_mmc_card *card = adapter->card;
+ int ret;
+
+ sdio_claim_host(card->func);
+ ret = mwifiex_dnld_fw(adapter, fw);
+ sdio_release_host(card->func);
+
+ return ret;
+}
+
/*
* This function is used to initialize IO ports for the
* chipsets supporting SDIO new mode eg SD8897.
@@ -1492,7 +1515,7 @@ rx_curr_single:
mwifiex_dbg(adapter, INFO, "info: RX: port: %d, rx_len: %d\n",
port, rx_len);
- skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL | GFP_DMA);
+ skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL);
if (!skb) {
mwifiex_dbg(adapter, ERROR,
"single skb allocated fail,\t"
@@ -1597,7 +1620,7 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE);
mwifiex_dbg(adapter, INFO, "info: rx_len = %d\n", rx_len);
- skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL | GFP_DMA);
+ skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL);
if (!skb)
return -1;
@@ -2732,6 +2755,7 @@ static struct mwifiex_if_ops sdio_ops = {
.cleanup_mpa_buf = mwifiex_cleanup_mpa_buf,
.cmdrsp_complete = mwifiex_sdio_cmdrsp_complete,
.event_complete = mwifiex_sdio_event_complete,
+ .dnld_fw = mwifiex_sdio_dnld_fw,
.card_reset = mwifiex_sdio_card_reset,
.reg_dump = mwifiex_sdio_reg_dump,
.device_dump = mwifiex_sdio_device_dump,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
index d18c7979d723..bcfd4b743145 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
@@ -553,7 +553,8 @@ static int mwifiex_ret_802_11_deauthenticate(struct mwifiex_private *priv,
if (!memcmp(resp->params.deauth.mac_addr,
&priv->curr_bss_params.bss_descriptor.mac_address,
sizeof(resp->params.deauth.mac_addr)))
- mwifiex_reset_connect_state(priv, WLAN_REASON_DEAUTH_LEAVING);
+ mwifiex_reset_connect_state(priv, WLAN_REASON_DEAUTH_LEAVING,
+ false);
return 0;
}
@@ -566,7 +567,7 @@ static int mwifiex_ret_802_11_deauthenticate(struct mwifiex_private *priv,
static int mwifiex_ret_802_11_ad_hoc_stop(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp)
{
- mwifiex_reset_connect_state(priv, WLAN_REASON_DEAUTH_LEAVING);
+ mwifiex_reset_connect_state(priv, WLAN_REASON_DEAUTH_LEAVING, false);
return 0;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
index 0104108b4ea2..0cefd40b2762 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
@@ -40,8 +40,8 @@
* - Erases current SSID and BSSID information
* - Sends a disconnect event to upper layers/applications.
*/
-void
-mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
+void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code,
+ bool from_ap)
{
struct mwifiex_adapter *adapter = priv->adapter;
@@ -140,7 +140,7 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
if (priv->bss_mode == NL80211_IFTYPE_STATION ||
priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) {
cfg80211_disconnected(priv->netdev, reason_code, NULL, 0,
- false, GFP_KERNEL);
+ !from_ap, GFP_KERNEL);
}
eth_zero_addr(priv->cfg_bssid);
@@ -574,7 +574,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
if (priv->media_connected) {
reason_code =
le16_to_cpu(*(__le16 *)adapter->event_body);
- mwifiex_reset_connect_state(priv, reason_code);
+ mwifiex_reset_connect_state(priv, reason_code, true);
}
break;
@@ -589,7 +589,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
if (priv->media_connected) {
reason_code =
le16_to_cpu(*(__le16 *)adapter->event_body);
- mwifiex_reset_connect_state(priv, reason_code);
+ mwifiex_reset_connect_state(priv, reason_code, true);
}
break;
@@ -599,7 +599,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
if (priv->media_connected) {
reason_code =
le16_to_cpu(*(__le16 *)adapter->event_body);
- mwifiex_reset_connect_state(priv, reason_code);
+ mwifiex_reset_connect_state(priv, reason_code, true);
}
break;
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
index 666e91af59d7..bf5660eb27d3 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
@@ -272,7 +272,7 @@ int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
int mwifiex_uap_recv_packet(struct mwifiex_private *priv,
struct sk_buff *skb)
{
- struct mwifiex_adapter *adapter = adapter;
+ struct mwifiex_adapter *adapter = priv->adapter;
struct mwifiex_sta_node *src_node;
struct ethhdr *p_ethhdr;
struct sk_buff *skb_uap;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
index fe19ace0d6a0..b04cf30f3959 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
@@ -1149,7 +1149,7 @@ static void rtl8192eu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
for (i = 0; i < retry; i++) {
path_b_ok = rtl8192eu_rx_iqk_path_b(priv);
- if (path_a_ok == 0x03) {
+ if (path_b_ok == 0x03) {
val32 = rtl8xxxu_read32(priv,
REG_RX_POWER_BEFORE_IQK_B_2);
result[t][6] = (val32 >> 16) & 0x3ff;
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index 0f48048b8654..3a0faa8fe9d4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -54,7 +54,7 @@ EXPORT_SYMBOL(channel5g_80m);
void rtl_addr_delay(u32 addr)
{
if (addr == 0xfe)
- msleep(50);
+ mdelay(50);
else if (addr == 0xfd)
msleep(5);
else if (addr == 0xfc)
@@ -75,7 +75,7 @@ void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr,
rtl_addr_delay(addr);
} else {
rtl_set_rfreg(hw, rfpath, addr, mask, data);
- usleep_range(1, 2);
+ udelay(1);
}
}
EXPORT_SYMBOL(rtl_rfreg_delay);
@@ -86,7 +86,7 @@ void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data)
rtl_addr_delay(addr);
} else {
rtl_set_bbreg(hw, addr, MASKDWORD, data);
- usleep_range(1, 2);
+ udelay(1);
}
}
EXPORT_SYMBOL(rtl_bb_delay);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
index 8ee83b093c0d..e26a233684bb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
@@ -1839,20 +1839,22 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw)
u8 hwinfo[HWSET_MAX_SIZE];
u16 eeprom_id;
- if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
+ switch (rtlefuse->epromtype) {
+ case EEPROM_BOOT_EFUSE:
rtl_efuse_shadow_map_update(hw);
+ break;
- memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
- HWSET_MAX_SIZE);
- } else if (rtlefuse->epromtype == EEPROM_93C46) {
+ case EEPROM_93C46:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"RTL819X Not boot from eeprom, check it !!");
return;
- } else {
+
+ default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"boot from neither eeprom nor efuse, check it !!");
return;
}
+ memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], HWSET_MAX_SIZE);
RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP\n",
hwinfo, HWSET_MAX_SIZE);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
index 04eb5c3f8464..58b7ac6899ef 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
@@ -1680,21 +1680,28 @@ static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct device *dev = &rtl_pcipriv(hw)->dev.pdev->dev;
u16 i, usvalue;
u8 hwinfo[HWSET_MAX_SIZE];
u16 eeprom_id;
- if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
+ switch (rtlefuse->epromtype) {
+ case EEPROM_BOOT_EFUSE:
rtl_efuse_shadow_map_update(hw);
+ break;
- memcpy((void *)hwinfo,
- (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
- HWSET_MAX_SIZE);
- } else if (rtlefuse->epromtype == EEPROM_93C46) {
+ case EEPROM_93C46:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"RTL819X Not boot from eeprom, check it !!");
+ return;
+
+ default:
+ dev_warn(dev, "no efuse data\n");
+ return;
}
+ memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], HWSET_MAX_SIZE);
+
RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP",
hwinfo, HWSET_MAX_SIZE);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
index 34ce06441d1b..ae1129f916d5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
@@ -351,15 +351,21 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
u8 hwinfo[HWSET_MAX_SIZE] = {0};
u16 eeprom_id;
- if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
+ switch (rtlefuse->epromtype) {
+ case EEPROM_BOOT_EFUSE:
rtl_efuse_shadow_map_update(hw);
- memcpy((void *)hwinfo,
- (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
- HWSET_MAX_SIZE);
- } else if (rtlefuse->epromtype == EEPROM_93C46) {
+ break;
+
+ case EEPROM_93C46:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"RTL819X Not boot from eeprom, check it !!\n");
+ return;
+
+ default:
+ pr_warn("rtl92cu: no efuse data\n\n");
+ return;
}
+ memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], HWSET_MAX_SIZE);
RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, "MAP",
hwinfo, HWSET_MAX_SIZE);
eeprom_id = le16_to_cpu(*((__le16 *)&hwinfo[0]));
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
index f49b60d31450..8618c322a3f8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
@@ -1744,23 +1744,29 @@ static void _rtl92de_read_adapter_info(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct device *dev = &rtl_pcipriv(hw)->dev.pdev->dev;
u16 i, usvalue;
u8 hwinfo[HWSET_MAX_SIZE];
u16 eeprom_id;
unsigned long flags;
- if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
+ switch (rtlefuse->epromtype) {
+ case EEPROM_BOOT_EFUSE:
spin_lock_irqsave(&globalmutex_for_power_and_efuse, flags);
rtl_efuse_shadow_map_update(hw);
_rtl92de_efuse_update_chip_version(hw);
spin_unlock_irqrestore(&globalmutex_for_power_and_efuse, flags);
- memcpy((void *)hwinfo, (void *)&rtlefuse->efuse_map
- [EFUSE_INIT_MAP][0],
- HWSET_MAX_SIZE);
- } else if (rtlefuse->epromtype == EEPROM_93C46) {
+ break;
+ case EEPROM_93C46:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"RTL819X Not boot from eeprom, check it !!\n");
+ return;
+ default:
+ dev_warn(dev, "no efuse data\n");
+ return;
}
+
+ memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], HWSET_MAX_SIZE);
RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP",
hwinfo, HWSET_MAX_SIZE);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
index 9fd3f1b6e4a8..28c260dd11ea 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
@@ -2102,20 +2102,22 @@ static void _rtl92ee_read_adapter_info(struct ieee80211_hw *hw)
u8 hwinfo[HWSET_MAX_SIZE];
u16 eeprom_id;
- if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
+ switch (rtlefuse->epromtype) {
+ case EEPROM_BOOT_EFUSE:
rtl_efuse_shadow_map_update(hw);
+ break;
- memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
- HWSET_MAX_SIZE);
- } else if (rtlefuse->epromtype == EEPROM_93C46) {
+ case EEPROM_93C46:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"RTL819X Not boot from eeprom, check it !!");
return;
- } else {
+
+ default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"boot from neither eeprom nor efuse, check it !!");
return;
}
+ memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], HWSET_MAX_SIZE);
RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP\n",
hwinfo, HWSET_MAX_SIZE);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
index 018340aedf09..c2bf8d1a7af3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
@@ -2414,19 +2414,10 @@ static void _rtl92ee_phy_reload_mac_registers(struct ieee80211_hw *hw,
static void _rtl92ee_phy_path_adda_on(struct ieee80211_hw *hw, u32 *addareg,
bool is_patha_on, bool is2t)
{
- u32 pathon;
u32 i;
- pathon = is_patha_on ? 0x0fc01616 : 0x0fc01616;
- if (!is2t) {
- pathon = 0x0fc01616;
- rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0fc01616);
- } else {
- rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathon);
- }
-
- for (i = 1; i < IQK_ADDA_REG_NUM; i++)
- rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathon);
+ for (i = 0; i < IQK_ADDA_REG_NUM; i++)
+ rtl_set_bbreg(hw, addareg[i], MASKDWORD, 0x0fc01616);
}
static void _rtl92ee_phy_mac_setting_calibration(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
index 12b0978ba4fa..442f2b68ee58 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
@@ -1673,23 +1673,31 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct device *dev = &rtl_pcipriv(hw)->dev.pdev->dev;
u16 i, usvalue;
u16 eeprom_id;
u8 tempval;
u8 hwinfo[HWSET_MAX_SIZE_92S];
u8 rf_path, index;
- if (rtlefuse->epromtype == EEPROM_93C46) {
+ switch (rtlefuse->epromtype) {
+ case EEPROM_BOOT_EFUSE:
+ rtl_efuse_shadow_map_update(hw);
+ break;
+
+ case EEPROM_93C46:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"RTL819X Not boot from eeprom, check it !!\n");
- } else if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
- rtl_efuse_shadow_map_update(hw);
+ return;
- memcpy((void *)hwinfo, (void *)
- &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
- HWSET_MAX_SIZE_92S);
+ default:
+ dev_warn(dev, "no efuse data\n");
+ return;
}
+ memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
+ HWSET_MAX_SIZE_92S);
+
RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP",
hwinfo, HWSET_MAX_SIZE_92S);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
index a4b7eac6856f..57a1ba8822b1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
@@ -1630,6 +1630,7 @@ static void _rtl8723e_read_adapter_info(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct device *dev = &rtl_pcipriv(hw)->dev.pdev->dev;
u16 i, usvalue;
u8 hwinfo[HWSET_MAX_SIZE];
u16 eeprom_id;
@@ -1638,15 +1639,19 @@ static void _rtl8723e_read_adapter_info(struct ieee80211_hw *hw,
/* need add */
return;
}
- if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
+ switch (rtlefuse->epromtype) {
+ case EEPROM_BOOT_EFUSE:
rtl_efuse_shadow_map_update(hw);
- memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
- HWSET_MAX_SIZE);
- } else if (rtlefuse->epromtype == EEPROM_93C46) {
+ case EEPROM_93C46:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"RTL819X Not boot from eeprom, check it !!");
+ return;
+
+ default:
+ dev_warn(dev, "no efuse data\n");
}
+ memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], HWSET_MAX_SIZE);
RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP\n",
hwinfo, HWSET_MAX_SIZE);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
index 5a3df9198ddf..08288ac9020a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
@@ -2026,6 +2026,7 @@ static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct device *dev = &rtl_pcipriv(hw)->dev.pdev->dev;
u16 i, usvalue;
u8 hwinfo[HWSET_MAX_SIZE];
u16 eeprom_id;
@@ -2055,15 +2056,22 @@ static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw,
/* needs to be added */
return;
}
- if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
+
+ switch (rtlefuse->epromtype) {
+ case EEPROM_BOOT_EFUSE:
rtl_efuse_shadow_map_update(hw);
+ break;
- memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
- HWSET_MAX_SIZE);
- } else if (rtlefuse->epromtype == EEPROM_93C46) {
+ case EEPROM_93C46:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"RTL819X Not boot from eeprom, check it !!");
+ return;
+
+ default:
+ dev_warn(dev, "no efuse data\n");
+ return;
}
+ memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], HWSET_MAX_SIZE);
RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, ("MAP\n"),
hwinfo, HWSET_MAX_SIZE);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
index 445f681d08c0..c5ca9dfb445c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
@@ -1019,7 +1019,7 @@ static u8 _rtl8723be_get_txpower_index(struct ieee80211_hw *hw, u8 path,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
u8 index = (channel - 1);
- u8 txpower;
+ u8 txpower = 0;
u8 power_diff_byrate = 0;
if (channel > 14 || channel < 1) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index 71e4dd9965bb..b9436df9e1ec 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -3101,6 +3101,7 @@ static void _rtl8821ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct device *dev = &rtl_pcipriv(hw)->dev.pdev->dev;
u16 i, usvalue;
u8 hwinfo[HWSET_MAX_SIZE];
u16 eeprom_id;
@@ -3109,14 +3110,20 @@ static void _rtl8821ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_
;/* need add */
}
- if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
+ switch (rtlefuse->epromtype) {
+ case EEPROM_BOOT_EFUSE:
rtl_efuse_shadow_map_update(hw);
- memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
- HWSET_MAX_SIZE);
- } else if (rtlefuse->epromtype == EEPROM_93C46) {
+ break;
+
+ case EEPROM_93C46:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"RTL819X Not boot from eeprom, check it !!");
+ return;
+
+ default:
+ dev_warn(dev, "no efuse data\n");
}
+ memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], HWSET_MAX_SIZE);
RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP\n",
hwinfo, HWSET_MAX_SIZE);
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index 40658b62d077..35c14cc3f0d2 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -398,7 +398,7 @@ static int rsi_mgmt_pkt_to_core(struct rsi_common *common,
return -ENOLINK;
msg_len -= pad_bytes;
- if ((msg_len <= 0) || (!msg)) {
+ if (msg_len <= 0) {
rsi_dbg(MGMT_RX_ZONE,
"%s: Invalid rx msg of len = %d\n",
__func__, msg_len);
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 13fd734b61ec..82d94f83b6b4 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -378,8 +378,7 @@ static int wl3501_esbq_exec(struct wl3501_card *this, void *sig, int sig_size)
return rc;
}
-static int wl3501_get_mib_value(struct wl3501_card *this, u8 index,
- void *bf, int size)
+static int wl3501_request_mib(struct wl3501_card *this, u8 index, void *bf)
{
struct wl3501_get_req sig = {
.sig_id = WL3501_SIG_GET_REQ,
@@ -395,20 +394,32 @@ static int wl3501_get_mib_value(struct wl3501_card *this, u8 index,
wl3501_set_to_wla(this, ptr, &sig, sizeof(sig));
wl3501_esbq_req(this, &ptr);
this->sig_get_confirm.mib_status = 255;
- spin_unlock_irqrestore(&this->lock, flags);
- rc = wait_event_interruptible(this->wait,
- this->sig_get_confirm.mib_status != 255);
- if (!rc)
- memcpy(bf, this->sig_get_confirm.mib_value,
- size);
- goto out;
+ rc = 0;
}
}
spin_unlock_irqrestore(&this->lock, flags);
-out:
+
return rc;
}
+static int wl3501_get_mib_value(struct wl3501_card *this, u8 index,
+ void *bf, int size)
+{
+ int rc;
+
+ rc = wl3501_request_mib(this, index, bf);
+ if (rc)
+ return rc;
+
+ rc = wait_event_interruptible(this->wait,
+ this->sig_get_confirm.mib_status != 255);
+ if (rc)
+ return rc;
+
+ memcpy(bf, this->sig_get_confirm.mib_value, size);
+ return 0;
+}
+
static int wl3501_pwr_mgmt(struct wl3501_card *this, int suspend)
{
struct wl3501_pwr_mgmt_req sig = {