summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/8390/8390.c14
-rw-r--r--drivers/net/ethernet/8390/8390p.c11
-rw-r--r--drivers/net/ethernet/Kconfig13
-rw-r--r--drivers/net/ethernet/Makefile2
-rw-r--r--drivers/net/ethernet/adi/adin1110.c2
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.c41
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.h2
-rw-r--r--drivers/net/ethernet/airoha/airoha_npu.c60
-rw-r--r--drivers/net/ethernet/alacritech/slic.h50
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c8
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c22
-rw-r--r--drivers/net/ethernet/amd/Kconfig2
-rw-r--r--drivers/net/ethernet/amd/declance.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h3
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c64
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c18
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c3
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.c32
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.h36
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c58
-rw-r--r--drivers/net/ethernet/broadcom/bnge/Makefile3
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_auxr.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_devlink.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hw_def.h446
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c67
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_netdev.c409
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_netdev.h123
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_rmem.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_txrx.c1642
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_txrx.h126
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c253
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c131
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c51
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h2
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c40
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c11
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c11
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c17
-rw-r--r--drivers/net/ethernet/dnet.c877
-rw-r--r--drivers/net/ethernet/dnet.h220
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h8
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c43
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c16
-rw-r--r--drivers/net/ethernet/engleder/tsnep_ethtool.c11
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c356
-rw-r--r--drivers/net/ethernet/freescale/fec.h14
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c1601
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_ethtool.c14
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c11
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c9
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c23
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c11
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c19
-rw-r--r--drivers/net/ethernet/huawei/hinic3/Kconfig1
-rw-r--r--drivers/net/ethernet/huawei/hinic3/Makefile1
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c3
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_csr.h6
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c3
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_filter.c417
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c115
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h6
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h24
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c99
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c90
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h23
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_irq.c97
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_lld.c58
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_main.c186
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c55
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c313
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h53
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h69
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c377
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c290
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h47
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h60
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c6
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rx.c27
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rx.h21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_tx.c34
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_tx.h16
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c79
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c758
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.h30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_irq.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c71
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c196
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c32
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tspll.c217
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tspll.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c49
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h199
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c4
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h179
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_dev.c18
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c93
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c238
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ptp.c17
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c810
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h44
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_vf_dev.c21
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c1096
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.h88
-rw-r--r--drivers/net/ethernet/intel/idpf/xdp.c79
-rw-r--r--drivers/net/ethernet/intel/idpf/xdp.h26
-rw-r--r--drivers/net/ethernet/intel/idpf/xsk.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c45
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c47
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c66
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.h2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h31
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_regs_cnxk_pf.h2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.c8
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c3
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c39
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c22
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c1
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_ethtool.c5
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_pci.c2
-rw-r--r--drivers/net/ethernet/marvell/skge.c1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dpll.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ecpf.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c429
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c382
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c215
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c74
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c12
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.h12
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c407
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_devlink.c8
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c14
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.c44
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.h10
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.c24
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_pci.c4
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.c28
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.h6
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c4
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c4
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c4
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c13
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c80
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c13
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c28
-rw-r--r--drivers/net/ethernet/neterion/Kconfig35
-rw-r--r--drivers/net/ethernet/neterion/Makefile6
-rw-r--r--drivers/net/ethernet/neterion/s2io-regs.h958
-rw-r--r--drivers/net/ethernet/neterion/s2io.c8572
-rw-r--r--drivers/net/ethernet/neterion/s2io.h1124
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c11
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c25
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c12
-rw-r--r--drivers/net/ethernet/realtek/8139too.c40
-rw-r--r--drivers/net/ethernet/realtek/Kconfig16
-rw-r--r--drivers/net/ethernet/realtek/Makefile1
-rw-r--r--drivers/net/ethernet/realtek/atp.c886
-rw-r--r--drivers/net/ethernet/realtek/atp.h262
-rw-r--r--drivers/net/ethernet/realtek/r8169.h3
-rw-r--r--drivers/net/ethernet/realtek/r8169_firmware.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c243
-rw-r--r--drivers/net/ethernet/renesas/rcar_gen4_ptp.c34
-rw-r--r--drivers/net/ethernet/renesas/rcar_gen4_ptp.h18
-rw-r--r--drivers/net/ethernet/renesas/rswitch_l2.c15
-rw-r--r--drivers/net/ethernet/renesas/rswitch_main.c11
-rw-r--r--drivers/net/ethernet/renesas/rtsn.c30
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c2
-rw-r--r--drivers/net/ethernet/sfc/ef100_ethtool.c1
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c1
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.c11
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.h1
-rw-r--r--drivers/net/ethernet/sfc/falcon/ethtool.c12
-rw-r--r--drivers/net/ethernet/sfc/nic.h7
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool.c1
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool_common.c11
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool_common.h1
-rw-r--r--drivers/net/ethernet/sis/sis900.c31
-rw-r--r--drivers/net/ethernet/smsc/epic100.c35
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c4
-rw-r--r--drivers/net/ethernet/spacemit/k1_emac.c111
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs.h7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs_com.h48
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c43
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-motorcomm.c384
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c1245
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c28
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100.h36
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h72
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c28
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h79
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c70
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c46
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h167
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h172
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h41
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c39
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c76
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c190
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.c45
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h68
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c2
-rw-r--r--drivers/net/ethernet/sun/niu.c11
-rw-r--r--drivers/net/ethernet/sun/sunhme.c3
-rw-r--r--drivers/net/ethernet/ti/Kconfig1
-rw-r--r--drivers/net/ethernet/ti/Makefile2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c5
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c5
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c5
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c15
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c1
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c13
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h1
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c9
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_prueth.c545
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_prueth.h20
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_prueth_fdb_tbl.h76
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_prueth_switch.c1065
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_prueth_switch.h37
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_switch.h103
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_switchdev.c333
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_switchdev.h13
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_vlan_mcast_filter_mmap.h120
-rw-r--r--drivers/net/ethernet/ti/netcp.h8
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c16
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c17
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c12
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c83
322 files changed, 16463 insertions, 18694 deletions
diff --git a/drivers/net/ethernet/8390/8390.c b/drivers/net/ethernet/8390/8390.c
index c5636245f1ca..8e4354568f04 100644
--- a/drivers/net/ethernet/8390/8390.c
+++ b/drivers/net/ethernet/8390/8390.c
@@ -86,19 +86,5 @@ void NS8390_init(struct net_device *dev, int startp)
}
EXPORT_SYMBOL(NS8390_init);
-#if defined(MODULE)
-
-static int __init ns8390_module_init(void)
-{
- return 0;
-}
-
-static void __exit ns8390_module_exit(void)
-{
-}
-
-module_init(ns8390_module_init);
-module_exit(ns8390_module_exit);
-#endif /* MODULE */
MODULE_DESCRIPTION("National Semiconductor 8390 core driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/8390p.c b/drivers/net/ethernet/8390/8390p.c
index 6d429b11e9c6..a0bfc8e34f79 100644
--- a/drivers/net/ethernet/8390/8390p.c
+++ b/drivers/net/ethernet/8390/8390p.c
@@ -91,16 +91,5 @@ void NS8390p_init(struct net_device *dev, int startp)
}
EXPORT_SYMBOL(NS8390p_init);
-static int __init NS8390p_init_module(void)
-{
- return 0;
-}
-
-static void __exit NS8390p_cleanup_module(void)
-{
-}
-
-module_init(NS8390p_init_module);
-module_exit(NS8390p_cleanup_module);
MODULE_DESCRIPTION("National Semiconductor 8390 core for ISA driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 4a1b368ca7e6..aa7103e7f47f 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -55,18 +55,6 @@ source "drivers/net/ethernet/cirrus/Kconfig"
source "drivers/net/ethernet/cisco/Kconfig"
source "drivers/net/ethernet/cortina/Kconfig"
source "drivers/net/ethernet/davicom/Kconfig"
-
-config DNET
- tristate "Dave ethernet support (DNET)"
- depends on HAS_IOMEM
- select PHYLIB
- help
- The Dave ethernet interface (DNET) is found on Qong Board FPGA.
- Say Y to include support for the DNET chip.
-
- To compile this driver as a module, choose M here: the module
- will be called dnet.
-
source "drivers/net/ethernet/dec/Kconfig"
source "drivers/net/ethernet/dlink/Kconfig"
source "drivers/net/ethernet/emulex/Kconfig"
@@ -143,7 +131,6 @@ config FEALNX
source "drivers/net/ethernet/ni/Kconfig"
source "drivers/net/ethernet/natsemi/Kconfig"
-source "drivers/net/ethernet/neterion/Kconfig"
source "drivers/net/ethernet/netronome/Kconfig"
source "drivers/net/ethernet/8390/Kconfig"
source "drivers/net/ethernet/nvidia/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 2e18df8ca8ec..6615a67a63d5 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -34,7 +34,6 @@ obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
obj-$(CONFIG_NET_VENDOR_CORTINA) += cortina/
obj-$(CONFIG_CX_ECAT) += ec_bhf.o
obj-$(CONFIG_DM9000) += davicom/
-obj-$(CONFIG_DNET) += dnet.o
obj-$(CONFIG_NET_VENDOR_DEC) += dec/
obj-$(CONFIG_NET_VENDOR_DLINK) += dlink/
obj-$(CONFIG_NET_VENDOR_EMULEX) += emulex/
@@ -69,7 +68,6 @@ obj-$(CONFIG_NET_VENDOR_MUCSE) += mucse/
obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
obj-$(CONFIG_FEALNX) += fealnx.o
obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
-obj-$(CONFIG_NET_VENDOR_NETERION) += neterion/
obj-$(CONFIG_NET_VENDOR_NETRONOME) += netronome/
obj-$(CONFIG_NET_VENDOR_NI) += ni/
obj-$(CONFIG_NET_VENDOR_NVIDIA) += nvidia/
diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
index 71a2397edf2b..1b4e37d000b9 100644
--- a/drivers/net/ethernet/adi/adin1110.c
+++ b/drivers/net/ethernet/adi/adin1110.c
@@ -123,7 +123,7 @@ enum adin1110_chips_id {
struct adin1110_cfg {
enum adin1110_chips_id id;
- char name[MDIO_NAME_SIZE];
+ const char *name;
u32 phy_ids[PHY_MAX_ADDR];
u32 ports_nr;
u32 phy_id_val;
diff --git a/drivers/net/ethernet/airoha/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c
index 315d97036ac1..62bcbbbe2a95 100644
--- a/drivers/net/ethernet/airoha/airoha_eth.c
+++ b/drivers/net/ethernet/airoha/airoha_eth.c
@@ -108,11 +108,11 @@ static int airoha_set_vip_for_gdm_port(struct airoha_gdm_port *port,
u32 vip_port;
switch (port->id) {
- case 3:
+ case AIROHA_GDM3_IDX:
/* FIXME: handle XSI_PCIE1_PORT */
vip_port = XSI_PCIE0_VIP_PORT_MASK;
break;
- case 4:
+ case AIROHA_GDM4_IDX:
/* FIXME: handle XSI_USB_PORT */
vip_port = XSI_ETH_VIP_PORT_MASK;
break;
@@ -514,8 +514,8 @@ static int airoha_fe_init(struct airoha_eth *eth)
FIELD_PREP(IP_ASSEMBLE_PORT_MASK, 0) |
FIELD_PREP(IP_ASSEMBLE_NBQ_MASK, 22));
- airoha_fe_set(eth, REG_GDM_FWD_CFG(3), GDM_PAD_EN_MASK);
- airoha_fe_set(eth, REG_GDM_FWD_CFG(4), GDM_PAD_EN_MASK);
+ airoha_fe_set(eth, REG_GDM_FWD_CFG(AIROHA_GDM3_IDX), GDM_PAD_EN_MASK);
+ airoha_fe_set(eth, REG_GDM_FWD_CFG(AIROHA_GDM4_IDX), GDM_PAD_EN_MASK);
airoha_fe_crsn_qsel_init(eth);
@@ -1690,27 +1690,29 @@ static int airhoha_set_gdm2_loopback(struct airoha_gdm_port *port)
/* Forward the traffic to the proper GDM port */
pse_port = port->id == AIROHA_GDM3_IDX ? FE_PSE_PORT_GDM3
: FE_PSE_PORT_GDM4;
- airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(2), pse_port);
- airoha_fe_clear(eth, REG_GDM_FWD_CFG(2), GDM_STRIP_CRC_MASK);
+ airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(AIROHA_GDM2_IDX),
+ pse_port);
+ airoha_fe_clear(eth, REG_GDM_FWD_CFG(AIROHA_GDM2_IDX),
+ GDM_STRIP_CRC_MASK);
/* Enable GDM2 loopback */
- airoha_fe_wr(eth, REG_GDM_TXCHN_EN(2), 0xffffffff);
- airoha_fe_wr(eth, REG_GDM_RXCHN_EN(2), 0xffff);
+ airoha_fe_wr(eth, REG_GDM_TXCHN_EN(AIROHA_GDM2_IDX), 0xffffffff);
+ airoha_fe_wr(eth, REG_GDM_RXCHN_EN(AIROHA_GDM2_IDX), 0xffff);
chan = port->id == AIROHA_GDM3_IDX ? airoha_is_7581(eth) ? 4 : 3 : 0;
- airoha_fe_rmw(eth, REG_GDM_LPBK_CFG(2),
+ airoha_fe_rmw(eth, REG_GDM_LPBK_CFG(AIROHA_GDM2_IDX),
LPBK_CHAN_MASK | LPBK_MODE_MASK | LPBK_EN_MASK,
FIELD_PREP(LPBK_CHAN_MASK, chan) |
LBK_GAP_MODE_MASK | LBK_LEN_MODE_MASK |
LBK_CHAN_MODE_MASK | LPBK_EN_MASK);
- airoha_fe_rmw(eth, REG_GDM_LEN_CFG(2),
+ airoha_fe_rmw(eth, REG_GDM_LEN_CFG(AIROHA_GDM2_IDX),
GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
FIELD_PREP(GDM_LONG_LEN_MASK, AIROHA_MAX_MTU));
/* Disable VIP and IFC for GDM2 */
- airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, BIT(2));
- airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, BIT(2));
+ airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, BIT(AIROHA_GDM2_IDX));
+ airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, BIT(AIROHA_GDM2_IDX));
/* XXX: handle XSI_USB_PORT and XSI_PCE1_PORT */
nbq = port->id == AIROHA_GDM3_IDX && airoha_is_7581(eth) ? 4 : 0;
@@ -1746,8 +1748,8 @@ static int airoha_dev_init(struct net_device *dev)
airoha_set_macaddr(port, dev->dev_addr);
switch (port->id) {
- case 3:
- case 4:
+ case AIROHA_GDM3_IDX:
+ case AIROHA_GDM4_IDX:
/* If GDM2 is active we can't enable loopback */
if (!eth->ports[1]) {
int err;
@@ -1757,7 +1759,7 @@ static int airoha_dev_init(struct net_device *dev)
return err;
}
fallthrough;
- case 2:
+ case AIROHA_GDM2_IDX:
if (airoha_ppe_is_enabled(eth, 1)) {
/* For PPE2 always use secondary cpu port. */
fe_cpu_port = FE_PSE_PORT_CDM2;
@@ -2803,6 +2805,7 @@ static const struct ethtool_ops airoha_ethtool_ops = {
.get_drvinfo = airoha_ethtool_get_drvinfo,
.get_eth_mac_stats = airoha_ethtool_get_mac_stats,
.get_rmon_stats = airoha_ethtool_get_rmon_stats,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
.get_link = ethtool_op_get_link,
};
@@ -3101,14 +3104,14 @@ static const char * const en7581_xsi_rsts_names[] = {
static int airoha_en7581_get_src_port_id(struct airoha_gdm_port *port, int nbq)
{
switch (port->id) {
- case 3:
+ case AIROHA_GDM3_IDX:
/* 7581 SoC supports PCIe serdes on GDM3 port */
if (nbq == 4)
return HSGMII_LAN_7581_PCIE0_SRCPORT;
if (nbq == 5)
return HSGMII_LAN_7581_PCIE1_SRCPORT;
break;
- case 4:
+ case AIROHA_GDM4_IDX:
/* 7581 SoC supports eth and usb serdes on GDM4 port */
if (!nbq)
return HSGMII_LAN_7581_ETH_SRCPORT;
@@ -3132,12 +3135,12 @@ static const char * const an7583_xsi_rsts_names[] = {
static int airoha_an7583_get_src_port_id(struct airoha_gdm_port *port, int nbq)
{
switch (port->id) {
- case 3:
+ case AIROHA_GDM3_IDX:
/* 7583 SoC supports eth serdes on GDM3 port */
if (!nbq)
return HSGMII_LAN_7583_ETH_SRCPORT;
break;
- case 4:
+ case AIROHA_GDM4_IDX:
/* 7583 SoC supports PCIe and USB serdes on GDM4 port */
if (!nbq)
return HSGMII_LAN_7583_PCIE_SRCPORT;
diff --git a/drivers/net/ethernet/airoha/airoha_eth.h b/drivers/net/ethernet/airoha/airoha_eth.h
index fbbc58133364..20e602d61e61 100644
--- a/drivers/net/ethernet/airoha/airoha_eth.h
+++ b/drivers/net/ethernet/airoha/airoha_eth.h
@@ -21,7 +21,7 @@
#define AIROHA_MAX_NUM_IRQ_BANKS 4
#define AIROHA_MAX_DSA_PORTS 7
#define AIROHA_MAX_NUM_RSTS 3
-#define AIROHA_MAX_MTU 9216
+#define AIROHA_MAX_MTU 9220
#define AIROHA_MAX_PACKET_SIZE 2048
#define AIROHA_NUM_QOS_CHANNELS 4
#define AIROHA_NUM_QOS_QUEUES 8
diff --git a/drivers/net/ethernet/airoha/airoha_npu.c b/drivers/net/ethernet/airoha/airoha_npu.c
index 68b7f9684dc7..89f22f3f47dc 100644
--- a/drivers/net/ethernet/airoha/airoha_npu.c
+++ b/drivers/net/ethernet/airoha/airoha_npu.c
@@ -16,6 +16,8 @@
#define NPU_EN7581_FIRMWARE_DATA "airoha/en7581_npu_data.bin"
#define NPU_EN7581_FIRMWARE_RV32 "airoha/en7581_npu_rv32.bin"
+#define NPU_EN7581_7996_FIRMWARE_DATA "airoha/en7581_MT7996_npu_data.bin"
+#define NPU_EN7581_7996_FIRMWARE_RV32 "airoha/en7581_MT7996_npu_rv32.bin"
#define NPU_AN7583_FIRMWARE_DATA "airoha/an7583_npu_data.bin"
#define NPU_AN7583_FIRMWARE_RV32 "airoha/an7583_npu_rv32.bin"
#define NPU_EN7581_FIRMWARE_RV32_MAX_SIZE 0x200000
@@ -195,18 +197,18 @@ static int airoha_npu_send_msg(struct airoha_npu *npu, int func_id,
}
static int airoha_npu_load_firmware(struct device *dev, void __iomem *addr,
- const struct airoha_npu_fw *fw_info)
+ const char *fw_name, int fw_max_size)
{
const struct firmware *fw;
int ret;
- ret = request_firmware(&fw, fw_info->name, dev);
+ ret = request_firmware(&fw, fw_name, dev);
if (ret)
return ret == -ENOENT ? -EPROBE_DEFER : ret;
- if (fw->size > fw_info->max_size) {
+ if (fw->size > fw_max_size) {
dev_err(dev, "%s: fw size too overlimit (%zu)\n",
- fw_info->name, fw->size);
+ fw_name, fw->size);
ret = -E2BIG;
goto out;
}
@@ -218,6 +220,28 @@ out:
return ret;
}
+static int
+airoha_npu_load_firmware_from_dts(struct device *dev, void __iomem *addr,
+ void __iomem *base)
+{
+ const char *fw_names[2];
+ int ret;
+
+ ret = of_property_read_string_array(dev->of_node, "firmware-name",
+ fw_names, ARRAY_SIZE(fw_names));
+ if (ret != ARRAY_SIZE(fw_names))
+ return -EINVAL;
+
+ ret = airoha_npu_load_firmware(dev, addr, fw_names[0],
+ NPU_EN7581_FIRMWARE_RV32_MAX_SIZE);
+ if (ret)
+ return ret;
+
+ return airoha_npu_load_firmware(dev, base + REG_NPU_LOCAL_SRAM,
+ fw_names[1],
+ NPU_EN7581_FIRMWARE_DATA_MAX_SIZE);
+}
+
static int airoha_npu_run_firmware(struct device *dev, void __iomem *base,
struct resource *res)
{
@@ -233,14 +257,22 @@ static int airoha_npu_run_firmware(struct device *dev, void __iomem *base,
if (IS_ERR(addr))
return PTR_ERR(addr);
+ /* Try to load firmware images using the firmware names provided via
+ * dts if available.
+ */
+ if (of_find_property(dev->of_node, "firmware-name", NULL))
+ return airoha_npu_load_firmware_from_dts(dev, addr, base);
+
/* Load rv32 npu firmware */
- ret = airoha_npu_load_firmware(dev, addr, &soc->fw_rv32);
+ ret = airoha_npu_load_firmware(dev, addr, soc->fw_rv32.name,
+ soc->fw_rv32.max_size);
if (ret)
return ret;
/* Load data npu firmware */
return airoha_npu_load_firmware(dev, base + REG_NPU_LOCAL_SRAM,
- &soc->fw_data);
+ soc->fw_data.name,
+ soc->fw_data.max_size);
}
static irqreturn_t airoha_npu_mbox_handler(int irq, void *npu_instance)
@@ -519,6 +551,14 @@ static int airoha_npu_wlan_init_memory(struct airoha_npu *npu)
if (err)
return err;
+ if (of_property_match_string(npu->dev->of_node, "memory-region-names",
+ "ba") >= 0) {
+ cmd = WLAN_FUNC_SET_WAIT_DRAM_BA_NODE_ADDR;
+ err = airoha_npu_wlan_set_reserved_memory(npu, 0, "ba", cmd);
+ if (err)
+ return err;
+ }
+
cmd = WLAN_FUNC_SET_WAIT_IS_FORCE_TO_CPU;
return airoha_npu_wlan_msg_send(npu, 0, cmd, &val, sizeof(val),
GFP_KERNEL);
@@ -657,6 +697,7 @@ static int airoha_npu_probe(struct platform_device *pdev)
struct resource res;
void __iomem *base;
int i, irq, err;
+ u32 val;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
@@ -750,6 +791,11 @@ static int airoha_npu_probe(struct platform_device *pdev)
regmap_write(npu->regmap, REG_CR_BOOT_TRIGGER, 0x1);
msleep(100);
+ if (!airoha_npu_wlan_msg_get(npu, 0, WLAN_FUNC_GET_WAIT_NPU_VERSION,
+ &val, sizeof(val), GFP_KERNEL))
+ dev_info(dev, "NPU fw version: %0d.%d\n",
+ (val >> 16) & 0xffff, val & 0xffff);
+
platform_set_drvdata(pdev, npu);
return 0;
@@ -776,6 +822,8 @@ module_platform_driver(airoha_npu_driver);
MODULE_FIRMWARE(NPU_EN7581_FIRMWARE_DATA);
MODULE_FIRMWARE(NPU_EN7581_FIRMWARE_RV32);
+MODULE_FIRMWARE(NPU_EN7581_7996_FIRMWARE_DATA);
+MODULE_FIRMWARE(NPU_EN7581_7996_FIRMWARE_RV32);
MODULE_FIRMWARE(NPU_AN7583_FIRMWARE_DATA);
MODULE_FIRMWARE(NPU_AN7583_FIRMWARE_RV32);
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/alacritech/slic.h b/drivers/net/ethernet/alacritech/slic.h
index 82071d0e5f7f..f5bb2d9a61be 100644
--- a/drivers/net/ethernet/alacritech/slic.h
+++ b/drivers/net/ethernet/alacritech/slic.h
@@ -284,7 +284,7 @@
#define SLIC_INC_STATS_COUNTER(st, counter) \
do { \
u64_stats_update_begin(&(st)->syncp); \
- (st)->counter++; \
+ u64_stats_inc(&(st)->counter); \
u64_stats_update_end(&(st)->syncp); \
} while (0)
@@ -293,7 +293,7 @@ do { \
unsigned int start; \
do { \
start = u64_stats_fetch_begin(&(st)->syncp); \
- newst = (st)->counter; \
+ newst = u64_stats_read(&(st)->counter); \
} while (u64_stats_fetch_retry(&(st)->syncp, start)); \
}
@@ -407,34 +407,34 @@ struct slic_oasis_eeprom {
};
struct slic_stats {
- u64 rx_packets;
- u64 rx_bytes;
- u64 rx_mcasts;
- u64 rx_errors;
- u64 tx_packets;
- u64 tx_bytes;
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_mcasts;
+ u64_stats_t rx_errors;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
/* HW STATS */
- u64 rx_buff_miss;
- u64 tx_dropped;
- u64 irq_errs;
+ u64_stats_t rx_buff_miss;
+ u64_stats_t tx_dropped;
+ u64_stats_t irq_errs;
/* transport layer */
- u64 rx_tpcsum;
- u64 rx_tpoflow;
- u64 rx_tphlen;
+ u64_stats_t rx_tpcsum;
+ u64_stats_t rx_tpoflow;
+ u64_stats_t rx_tphlen;
/* ip layer */
- u64 rx_ipcsum;
- u64 rx_iplen;
- u64 rx_iphlen;
+ u64_stats_t rx_ipcsum;
+ u64_stats_t rx_iplen;
+ u64_stats_t rx_iphlen;
/* link layer */
- u64 rx_early;
- u64 rx_buffoflow;
- u64 rx_lcode;
- u64 rx_drbl;
- u64 rx_crc;
- u64 rx_oflow802;
- u64 rx_uflow802;
+ u64_stats_t rx_early;
+ u64_stats_t rx_buffoflow;
+ u64_stats_t rx_lcode;
+ u64_stats_t rx_drbl;
+ u64_stats_t rx_crc;
+ u64_stats_t rx_oflow802;
+ u64_stats_t rx_uflow802;
/* oasis only */
- u64 tx_carrier;
+ u64_stats_t tx_carrier;
struct u64_stats_sync syncp;
};
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
index f62851708d4f..7488fb6ace0b 100644
--- a/drivers/net/ethernet/alacritech/slicoss.c
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -378,8 +378,8 @@ static void slic_xmit_complete(struct slic_device *sdev)
smp_wmb();
u64_stats_update_begin(&sdev->stats.syncp);
- sdev->stats.tx_bytes += bytes;
- sdev->stats.tx_packets += frames;
+ u64_stats_add(&sdev->stats.tx_bytes, bytes);
+ u64_stats_add(&sdev->stats.tx_packets, frames);
u64_stats_update_end(&sdev->stats.syncp);
netif_tx_lock(dev);
@@ -615,8 +615,8 @@ static void slic_handle_receive(struct slic_device *sdev, unsigned int todo,
}
u64_stats_update_begin(&sdev->stats.syncp);
- sdev->stats.rx_bytes += bytes;
- sdev->stats.rx_packets += frames;
+ u64_stats_add(&sdev->stats.rx_bytes, bytes);
+ u64_stats_add(&sdev->stats.rx_packets, frames);
u64_stats_update_end(&sdev->stats.syncp);
slic_refill_rx_queue(sdev, GFP_ATOMIC);
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index fe3479b84a1f..2455d6dddc26 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -835,27 +835,11 @@ static int ena_set_rxfh_fields(struct net_device *netdev,
return ena_com_fill_hash_ctrl(ena_dev, proto, hash_fields);
}
-static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
- u32 *rules)
+static u32 ena_get_rx_ring_count(struct net_device *netdev)
{
struct ena_adapter *adapter = netdev_priv(netdev);
- int rc = 0;
- switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = adapter->num_io_queues;
- rc = 0;
- break;
- case ETHTOOL_GRXCLSRLCNT:
- case ETHTOOL_GRXCLSRULE:
- case ETHTOOL_GRXCLSRLALL:
- default:
- netif_err(adapter, drv, netdev,
- "Command parameter %d is not supported\n", info->cmd);
- rc = -EOPNOTSUPP;
- }
-
- return rc;
+ return adapter->num_io_queues;
}
static u32 ena_get_rxfh_indir_size(struct net_device *netdev)
@@ -1096,7 +1080,7 @@ static const struct ethtool_ops ena_ethtool_ops = {
.get_sset_count = ena_get_sset_count,
.get_strings = ena_get_ethtool_strings,
.get_ethtool_stats = ena_get_ethtool_stats,
- .get_rxnfc = ena_get_rxnfc,
+ .get_rx_ring_count = ena_get_rx_ring_count,
.get_rxfh_indir_size = ena_get_rxfh_indir_size,
.get_rxfh_key_size = ena_get_rxfh_key_size,
.get_rxfh = ena_get_rxfh,
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index d54dca3074eb..45e8d698781c 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -165,7 +165,7 @@ config AMD_XGBE
select CRC32
select PHYLIB
select AMD_XGBE_HAVE_ECC if X86
- select NET_SELFTESTS
+ imply NET_SELFTESTS
help
This driver supports the AMD 10GbE Ethernet device found on an
AMD SoC.
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 8d05a0c5f2d5..e6d56fcdc1dd 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -813,7 +813,7 @@ static int lance_open(struct net_device *dev)
if (lp->dma_irq >= 0) {
unsigned long flags;
- if (request_irq(lp->dma_irq, lance_dma_merr_int, IRQF_ONESHOT,
+ if (request_irq(lp->dma_irq, lance_dma_merr_int, 0,
"lance error", dev)) {
free_irq(dev->irq, dev);
printk("%s: Can't get DMA IRQ %d\n", dev->name,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 62b01de93db4..711f295eb777 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -598,6 +598,7 @@
#define MMC_RXVLANFRAMES_GB_LO 0x0998
#define MMC_RXVLANFRAMES_GB_HI 0x099c
#define MMC_RXWATCHDOGERROR 0x09a0
+#define MMC_RXALIGNMENTERROR 0x09bc
/* MMC register entry bit positions and sizes */
#define MMC_CR_CR_INDEX 0
@@ -658,6 +659,8 @@
#define MMC_RISR_RXVLANFRAMES_GB_WIDTH 1
#define MMC_RISR_RXWATCHDOGERROR_INDEX 22
#define MMC_RISR_RXWATCHDOGERROR_WIDTH 1
+#define MMC_RISR_RXALIGNMENTERROR_INDEX 27
+#define MMC_RISR_RXALIGNMENTERROR_WIDTH 1
#define MMC_TIER_ALL_INTERRUPTS_INDEX 0
#define MMC_TIER_ALL_INTERRUPTS_WIDTH 18
#define MMC_TISR_TXOCTETCOUNT_GB_INDEX 0
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index b646ae575e6a..c04a9c76bd40 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -2794,6 +2794,7 @@ static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
case MMC_RXUNDERSIZE_G:
case MMC_RXOVERSIZE_G:
case MMC_RXWATCHDOGERROR:
+ case MMC_RXALIGNMENTERROR:
read_hi = false;
break;
@@ -2997,6 +2998,10 @@ static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR))
stats->rxwatchdogerror +=
xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXALIGNMENTERROR))
+ stats->rxalignmenterror +=
+ xgbe_mmc_read(pdata, MMC_RXALIGNMENTERROR);
}
static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
@@ -3129,6 +3134,9 @@ static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
stats->rxwatchdogerror +=
xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
+ stats->rxalignmenterror +=
+ xgbe_mmc_read(pdata, MMC_RXALIGNMENTERROR);
+
/* Un-freeze counters */
XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index b5a60a048896..62bb4b8a68e1 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1838,6 +1838,7 @@ static void xgbe_get_stats64(struct net_device *netdev,
s->rx_length_errors = pstats->rxlengtherror;
s->rx_crc_errors = pstats->rxcrcerror;
s->rx_over_errors = pstats->rxfifooverflow;
+ s->rx_frame_errors = pstats->rxalignmenterror;
s->tx_packets = pstats->txframecount_gb;
s->tx_bytes = pstats->txoctetcount_gb;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 0d19b09497a0..a9f4fcc4daae 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -362,13 +362,16 @@ static int xgbe_set_coalesce(struct net_device *netdev,
/* Check the bounds of values for Rx */
if (rx_riwt > XGMAC_MAX_DMA_RIWT) {
- netdev_err(netdev, "rx-usec is limited to %d usecs\n",
- hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT));
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "rx-usec is limited to %d usecs",
+ hw_if->riwt_to_usec(pdata,
+ XGMAC_MAX_DMA_RIWT));
return -EINVAL;
}
if (rx_frames > pdata->rx_desc_count) {
- netdev_err(netdev, "rx-frames is limited to %d frames\n",
- pdata->rx_desc_count);
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "rx-frames is limited to %d frames",
+ pdata->rx_desc_count);
return -EINVAL;
}
@@ -377,8 +380,7 @@ static int xgbe_set_coalesce(struct net_device *netdev,
/* Check the bounds of values for Tx */
if (!tx_usecs) {
- NL_SET_ERR_MSG_FMT_MOD(extack,
- "tx-usecs must not be 0");
+ NL_SET_ERR_MSG_MOD(extack, "tx-usecs must not be 0");
return -EINVAL;
}
if (tx_usecs > XGMAC_MAX_COAL_TX_TICK) {
@@ -387,8 +389,9 @@ static int xgbe_set_coalesce(struct net_device *netdev,
return -EINVAL;
}
if (tx_frames > pdata->tx_desc_count) {
- netdev_err(netdev, "tx-frames is limited to %d frames\n",
- pdata->tx_desc_count);
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "tx-frames is limited to %d frames",
+ pdata->tx_desc_count);
return -EINVAL;
}
@@ -414,20 +417,11 @@ static int xgbe_set_coalesce(struct net_device *netdev,
return 0;
}
-static int xgbe_get_rxnfc(struct net_device *netdev,
- struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
+static u32 xgbe_get_rx_ring_count(struct net_device *netdev)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
- switch (rxnfc->cmd) {
- case ETHTOOL_GRXRINGS:
- rxnfc->data = pdata->rx_ring_count;
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- return 0;
+ return pdata->rx_ring_count;
}
static u32 xgbe_get_rxfh_key_size(struct net_device *netdev)
@@ -474,7 +468,7 @@ static int xgbe_set_rxfh(struct net_device *netdev,
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP) {
- netdev_err(netdev, "unsupported hash function\n");
+ NL_SET_ERR_MSG_MOD(extack, "unsupported hash function");
return -EOPNOTSUPP;
}
@@ -561,37 +555,39 @@ static int xgbe_set_ringparam(struct net_device *netdev,
unsigned int rx, tx;
if (ringparam->rx_mini_pending || ringparam->rx_jumbo_pending) {
- netdev_err(netdev, "unsupported ring parameter\n");
+ NL_SET_ERR_MSG_MOD(extack, "unsupported ring parameter");
return -EINVAL;
}
if ((ringparam->rx_pending < XGBE_RX_DESC_CNT_MIN) ||
(ringparam->rx_pending > XGBE_RX_DESC_CNT_MAX)) {
- netdev_err(netdev,
- "rx ring parameter must be between %u and %u\n",
- XGBE_RX_DESC_CNT_MIN, XGBE_RX_DESC_CNT_MAX);
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "rx ring parameter must be between %u and %u",
+ XGBE_RX_DESC_CNT_MIN,
+ XGBE_RX_DESC_CNT_MAX);
return -EINVAL;
}
if ((ringparam->tx_pending < XGBE_TX_DESC_CNT_MIN) ||
(ringparam->tx_pending > XGBE_TX_DESC_CNT_MAX)) {
- netdev_err(netdev,
- "tx ring parameter must be between %u and %u\n",
- XGBE_TX_DESC_CNT_MIN, XGBE_TX_DESC_CNT_MAX);
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "tx ring parameter must be between %u and %u",
+ XGBE_TX_DESC_CNT_MIN,
+ XGBE_TX_DESC_CNT_MAX);
return -EINVAL;
}
rx = __rounddown_pow_of_two(ringparam->rx_pending);
if (rx != ringparam->rx_pending)
- netdev_notice(netdev,
- "rx ring parameter rounded to power of two: %u\n",
- rx);
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "rx ring parameter rounded to power of two: %u",
+ rx);
tx = __rounddown_pow_of_two(ringparam->tx_pending);
if (tx != ringparam->tx_pending)
- netdev_notice(netdev,
- "tx ring parameter rounded to power of two: %u\n",
- tx);
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "tx ring parameter rounded to power of two: %u",
+ tx);
if ((rx == pdata->rx_desc_count) &&
(tx == pdata->tx_desc_count))
@@ -752,7 +748,7 @@ static const struct ethtool_ops xgbe_ethtool_ops = {
.get_strings = xgbe_get_strings,
.get_ethtool_stats = xgbe_get_ethtool_stats,
.get_sset_count = xgbe_get_sset_count,
- .get_rxnfc = xgbe_get_rxnfc,
+ .get_rx_ring_count = xgbe_get_rx_ring_count,
.get_rxfh_key_size = xgbe_get_rxfh_key_size,
.get_rxfh_indir_size = xgbe_get_rxfh_indir_size,
.get_rxfh = xgbe_get_rxfh,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 03ef0f548483..1269b8ce9249 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -659,6 +659,7 @@ struct xgbe_mmc_stats {
u64 rxfifooverflow;
u64 rxvlanframes_gb;
u64 rxwatchdogerror;
+ u64 rxalignmenterror;
};
struct xgbe_ext_stats {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 6fef47ba0a59..a6e1826dd5d7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -500,20 +500,25 @@ static int aq_ethtool_set_rss(struct net_device *netdev,
return err;
}
+static u32 aq_ethtool_get_rx_ring_count(struct net_device *ndev)
+{
+ struct aq_nic_cfg_s *cfg;
+ struct aq_nic_s *aq_nic;
+
+ aq_nic = netdev_priv(ndev);
+ cfg = aq_nic_get_cfg(aq_nic);
+
+ return cfg->vecs;
+}
+
static int aq_ethtool_get_rxnfc(struct net_device *ndev,
struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg;
int err = 0;
- cfg = aq_nic_get_cfg(aq_nic);
-
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = cfg->vecs;
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = aq_get_rxnfc_count_all_rules(aq_nic);
break;
@@ -1072,6 +1077,7 @@ const struct ethtool_ops aq_ethtool_ops = {
.set_rxfh = aq_ethtool_set_rss,
.get_rxnfc = aq_ethtool_get_rxnfc,
.set_rxnfc = aq_ethtool_set_rxnfc,
+ .get_rx_ring_count = aq_ethtool_get_rx_ring_count,
.get_msglevel = aq_get_msg_level,
.set_msglevel = aq_set_msg_level,
.get_sset_count = aq_ethtool_get_sset_count,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index b24eaa5283fa..ef9447810071 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -701,9 +701,6 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
} else if (l4proto == IPPROTO_UDP) {
dx_buff->is_gso_udp = 1U;
dx_buff->len_l4 = sizeof(struct udphdr);
- /* UDP GSO Hardware does not replace packet length. */
- udp_hdr(skb)->len = htons(dx_buff->mss +
- dx_buff->len_l4);
} else {
WARN_ONCE(true, "Bad GSO mode");
goto exit;
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
index 014340f33345..aa6d8606849f 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
@@ -1081,15 +1081,10 @@ static irqreturn_t bcmasp_isr_wol(int irq, void *data)
struct bcmasp_priv *priv = data;
u32 status;
- /* No L3 IRQ, so we good */
- if (priv->wol_irq <= 0)
- goto irq_handled;
-
status = wakeup_intr2_core_rl(priv, ASP_WAKEUP_INTR2_STATUS) &
~wakeup_intr2_core_rl(priv, ASP_WAKEUP_INTR2_MASK_STATUS);
wakeup_intr2_core_wl(priv, status, ASP_WAKEUP_INTR2_CLEAR);
-irq_handled:
pm_wakeup_event(&priv->pdev->dev, 0);
return IRQ_HANDLED;
}
@@ -1322,6 +1317,8 @@ static int bcmasp_probe(struct platform_device *pdev)
bcmasp_core_init_filters(priv);
+ bcmasp_init_wol(priv);
+
ports_node = of_find_node_by_name(dev->of_node, "ethernet-ports");
if (!ports_node) {
dev_warn(dev, "No ports found\n");
@@ -1333,16 +1330,14 @@ static int bcmasp_probe(struct platform_device *pdev)
intf = bcmasp_interface_create(priv, intf_node, i);
if (!intf) {
dev_err(dev, "Cannot create eth interface %d\n", i);
- bcmasp_remove_intfs(priv);
- ret = -ENOMEM;
- goto of_put_exit;
+ of_node_put(ports_node);
+ ret = -EINVAL;
+ goto err_cleanup;
}
list_add_tail(&intf->list, &priv->intfs);
i++;
}
-
- /* Check and enable WoL */
- bcmasp_init_wol(priv);
+ of_node_put(ports_node);
/* Drop the clock reference count now and let ndo_open()/ndo_close()
* manage it for us from now on.
@@ -1357,19 +1352,20 @@ static int bcmasp_probe(struct platform_device *pdev)
list_for_each_entry(intf, &priv->intfs, list) {
ret = register_netdev(intf->ndev);
if (ret) {
- netdev_err(intf->ndev,
- "failed to register net_device: %d\n", ret);
- bcmasp_wol_irq_destroy(priv);
- bcmasp_remove_intfs(priv);
- goto of_put_exit;
+ dev_err(dev, "failed to register net_device: %d\n", ret);
+ goto err_cleanup;
}
count++;
}
dev_info(dev, "Initialized %d port(s)\n", count);
-of_put_exit:
- of_node_put(ports_node);
+ return ret;
+
+err_cleanup:
+ bcmasp_wol_irq_destroy(priv);
+ bcmasp_remove_intfs(priv);
+
return ret;
}
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.h b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
index e238507be40a..29cd87335ec8 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.h
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
@@ -268,13 +268,6 @@ struct bcmasp_mib_counters {
u32 tx_timeout_cnt;
};
-struct bcmasp_intf_ops {
- unsigned long (*rx_desc_read)(struct bcmasp_intf *intf);
- void (*rx_buffer_write)(struct bcmasp_intf *intf, dma_addr_t addr);
- void (*rx_desc_write)(struct bcmasp_intf *intf, dma_addr_t addr);
- unsigned long (*tx_read)(struct bcmasp_intf *intf);
- void (*tx_write)(struct bcmasp_intf *intf, dma_addr_t addr);
-};
struct bcmasp_priv;
@@ -286,7 +279,6 @@ struct bcmasp_intf {
/* ASP Ch */
int channel;
int port;
- const struct bcmasp_intf_ops *ops;
/* Used for splitting shared resources */
int index;
@@ -407,34 +399,6 @@ struct bcmasp_priv {
struct mutex net_lock;
};
-static inline unsigned long bcmasp_intf_rx_desc_read(struct bcmasp_intf *intf)
-{
- return intf->ops->rx_desc_read(intf);
-}
-
-static inline void bcmasp_intf_rx_buffer_write(struct bcmasp_intf *intf,
- dma_addr_t addr)
-{
- intf->ops->rx_buffer_write(intf, addr);
-}
-
-static inline void bcmasp_intf_rx_desc_write(struct bcmasp_intf *intf,
- dma_addr_t addr)
-{
- intf->ops->rx_desc_write(intf, addr);
-}
-
-static inline unsigned long bcmasp_intf_tx_read(struct bcmasp_intf *intf)
-{
- return intf->ops->tx_read(intf);
-}
-
-static inline void bcmasp_intf_tx_write(struct bcmasp_intf *intf,
- dma_addr_t addr)
-{
- intf->ops->tx_write(intf, addr);
-}
-
#define __BCMASP_IO_MACRO(name, m) \
static inline u32 name##_rl(struct bcmasp_intf *intf, u32 off) \
{ \
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
index ceb6c11431dd..d0a480430a95 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
@@ -231,39 +231,6 @@ help:
return skb;
}
-static unsigned long bcmasp_rx_edpkt_dma_rq(struct bcmasp_intf *intf)
-{
- return rx_edpkt_dma_rq(intf, RX_EDPKT_DMA_VALID);
-}
-
-static void bcmasp_rx_edpkt_cfg_wq(struct bcmasp_intf *intf, dma_addr_t addr)
-{
- rx_edpkt_cfg_wq(intf, addr, RX_EDPKT_RING_BUFFER_READ);
-}
-
-static void bcmasp_rx_edpkt_dma_wq(struct bcmasp_intf *intf, dma_addr_t addr)
-{
- rx_edpkt_dma_wq(intf, addr, RX_EDPKT_DMA_READ);
-}
-
-static unsigned long bcmasp_tx_spb_dma_rq(struct bcmasp_intf *intf)
-{
- return tx_spb_dma_rq(intf, TX_SPB_DMA_READ);
-}
-
-static void bcmasp_tx_spb_dma_wq(struct bcmasp_intf *intf, dma_addr_t addr)
-{
- tx_spb_dma_wq(intf, addr, TX_SPB_DMA_VALID);
-}
-
-static const struct bcmasp_intf_ops bcmasp_intf_ops = {
- .rx_desc_read = bcmasp_rx_edpkt_dma_rq,
- .rx_buffer_write = bcmasp_rx_edpkt_cfg_wq,
- .rx_desc_write = bcmasp_rx_edpkt_dma_wq,
- .tx_read = bcmasp_tx_spb_dma_rq,
- .tx_write = bcmasp_tx_spb_dma_wq,
-};
-
static netdev_tx_t bcmasp_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bcmasp_intf *intf = netdev_priv(dev);
@@ -368,7 +335,7 @@ static netdev_tx_t bcmasp_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
- bcmasp_intf_tx_write(intf, intf->tx_spb_dma_valid);
+ tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_VALID);
if (tx_spb_ring_full(intf, MAX_SKB_FRAGS + 1))
netif_stop_queue(dev);
@@ -449,7 +416,7 @@ static int bcmasp_tx_reclaim(struct bcmasp_intf *intf)
struct bcmasp_desc *desc;
dma_addr_t mapping;
- read = bcmasp_intf_tx_read(intf);
+ read = tx_spb_dma_rq(intf, TX_SPB_DMA_READ);
while (intf->tx_spb_dma_read != read) {
txcb = &intf->tx_cbs[intf->tx_spb_clean_index];
mapping = dma_unmap_addr(txcb, dma_addr);
@@ -519,7 +486,7 @@ static int bcmasp_rx_poll(struct napi_struct *napi, int budget)
u64 flags;
u32 len;
- valid = bcmasp_intf_rx_desc_read(intf) + 1;
+ valid = rx_edpkt_dma_rq(intf, RX_EDPKT_DMA_VALID) + 1;
if (valid == intf->rx_edpkt_dma_addr + DESC_RING_SIZE)
valid = intf->rx_edpkt_dma_addr;
@@ -591,8 +558,8 @@ static int bcmasp_rx_poll(struct napi_struct *napi, int budget)
u64_stats_update_end(&stats->syncp);
next:
- bcmasp_intf_rx_buffer_write(intf, (DESC_ADDR(desc->buf) +
- desc->size));
+ rx_edpkt_cfg_wq(intf, (DESC_ADDR(desc->buf) + desc->size),
+ RX_EDPKT_RING_BUFFER_READ);
processed++;
intf->rx_edpkt_dma_read =
@@ -603,7 +570,7 @@ next:
DESC_RING_COUNT);
}
- bcmasp_intf_rx_desc_write(intf, intf->rx_edpkt_dma_read);
+ rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_read, RX_EDPKT_DMA_READ);
if (processed < budget && napi_complete_done(&intf->rx_napi, processed))
bcmasp_enable_rx_irq(intf, 1);
@@ -1271,7 +1238,6 @@ struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
}
SET_NETDEV_DEV(ndev, dev);
- intf->ops = &bcmasp_intf_ops;
ndev->netdev_ops = &bcmasp_netdev_ops;
ndev->ethtool_ops = &bcmasp_ethtool_ops;
intf->msg_enable = netif_msg_init(-1, NETIF_MSG_DRV |
@@ -1336,10 +1302,8 @@ static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf)
umac_enable_set(intf, UMC_CMD_RX_EN, 1);
- if (intf->parent->wol_irq > 0) {
- wakeup_intr2_core_wl(intf->parent, 0xffffffff,
- ASP_WAKEUP_INTR2_MASK_CLEAR);
- }
+ wakeup_intr2_core_wl(intf->parent, 0xffffffff,
+ ASP_WAKEUP_INTR2_MASK_CLEAR);
if (ndev->phydev && ndev->phydev->eee_cfg.eee_enabled &&
intf->parent->eee_fixup)
@@ -1392,10 +1356,8 @@ static void bcmasp_resume_from_wol(struct bcmasp_intf *intf)
reg &= ~UMC_MPD_CTRL_MPD_EN;
umac_wl(intf, reg, UMC_MPD_CTRL);
- if (intf->parent->wol_irq > 0) {
- wakeup_intr2_core_wl(intf->parent, 0xffffffff,
- ASP_WAKEUP_INTR2_MASK_SET);
- }
+ wakeup_intr2_core_wl(intf->parent, 0xffffffff,
+ ASP_WAKEUP_INTR2_MASK_SET);
}
int bcmasp_interface_resume(struct bcmasp_intf *intf)
diff --git a/drivers/net/ethernet/broadcom/bnge/Makefile b/drivers/net/ethernet/broadcom/bnge/Makefile
index ea6596854e5c..fa604ee20264 100644
--- a/drivers/net/ethernet/broadcom/bnge/Makefile
+++ b/drivers/net/ethernet/broadcom/bnge/Makefile
@@ -10,4 +10,5 @@ bng_en-y := bnge_core.o \
bnge_resc.o \
bnge_netdev.o \
bnge_ethtool.o \
- bnge_auxr.o
+ bnge_auxr.o \
+ bnge_txrx.o
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge.h b/drivers/net/ethernet/broadcom/bnge/bnge.h
index 32fc16a37d02..f376913aa321 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge.h
+++ b/drivers/net/ethernet/broadcom/bnge/bnge.h
@@ -8,7 +8,7 @@
#define DRV_SUMMARY "Broadcom ThorUltra NIC Ethernet Driver"
#include <linux/etherdevice.h>
-#include <linux/bnxt/hsi.h>
+#include <linux/bnge/hsi.h>
#include "bnge_rmem.h"
#include "bnge_resc.h"
#include "bnge_auxr.h"
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_auxr.c b/drivers/net/ethernet/broadcom/bnge/bnge_auxr.c
index d64592b64e17..5f4cb4991964 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_auxr.c
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_auxr.c
@@ -14,7 +14,7 @@
#include <asm/byteorder.h>
#include <linux/bitmap.h>
#include <linux/auxiliary_bus.h>
-#include <linux/bnxt/hsi.h>
+#include <linux/bnge/hsi.h>
#include "bnge.h"
#include "bnge_hwrm.h"
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_devlink.c b/drivers/net/ethernet/broadcom/bnge/bnge_devlink.c
index a987afebd64d..f3a984d4d5f1 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_devlink.c
@@ -221,7 +221,7 @@ static int bnge_devlink_info_get(struct devlink *devlink,
DEVLINK_INFO_VERSION_GENERIC_FW, buf);
if (rc) {
NL_SET_ERR_MSG_MOD(extack,
- "Failed to set roce firmware version");
+ "Failed to set firmware version");
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hw_def.h b/drivers/net/ethernet/broadcom/bnge/bnge_hw_def.h
new file mode 100644
index 000000000000..49828dc05514
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_hw_def.h
@@ -0,0 +1,446 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_HW_DEF_H_
+#define _BNGE_HW_DEF_H_
+
+#define TX_BD_FLAGS_TCP_UDP_CHKSUM BIT(0)
+#define TX_BD_FLAGS_IP_CKSUM BIT(1)
+#define TX_BD_FLAGS_NO_CRC BIT(2)
+#define TX_BD_FLAGS_STAMP BIT(3)
+#define TX_BD_FLAGS_T_IP_CHKSUM BIT(4)
+#define TX_BD_FLAGS_LSO BIT(5)
+#define TX_BD_FLAGS_IPID_FMT BIT(6)
+#define TX_BD_FLAGS_T_IPID BIT(7)
+#define TX_BD_HSIZE GENMASK(23, 16)
+#define TX_BD_HSIZE_SHIFT 16
+
+#define TX_BD_CFA_ACTION GENMASK(31, 16)
+#define TX_BD_CFA_ACTION_SHIFT 16
+
+#define TX_BD_CFA_META_MASK 0xfffffff
+#define TX_BD_CFA_META_VID_MASK 0xfff
+#define TX_BD_CFA_META_PRI_MASK GENMASK(15, 12)
+#define TX_BD_CFA_META_PRI_SHIFT 12
+#define TX_BD_CFA_META_TPID_MASK GENMASK(17, 16)
+#define TX_BD_CFA_META_TPID_SHIFT 16
+#define TX_BD_CFA_META_KEY GENMASK(31, 28)
+#define TX_BD_CFA_META_KEY_SHIFT 28
+#define TX_BD_CFA_META_KEY_VLAN BIT(28)
+
+struct tx_bd_ext {
+ __le32 tx_bd_hsize_lflags;
+ __le32 tx_bd_mss;
+ __le32 tx_bd_cfa_action;
+ __le32 tx_bd_cfa_meta;
+};
+
+#define TX_CMP_SQ_CONS_IDX(txcmp) \
+ (le32_to_cpu((txcmp)->sq_cons_idx) & TX_CMP_SQ_CONS_IDX_MASK)
+
+#define RX_CMP_CMP_TYPE GENMASK(5, 0)
+#define RX_CMP_FLAGS_ERROR BIT(6)
+#define RX_CMP_FLAGS_PLACEMENT GENMASK(9, 7)
+#define RX_CMP_FLAGS_RSS_VALID BIT(10)
+#define RX_CMP_FLAGS_PKT_METADATA_PRESENT BIT(11)
+#define RX_CMP_FLAGS_ITYPES_SHIFT 12
+#define RX_CMP_FLAGS_ITYPES_MASK 0xf000
+#define RX_CMP_FLAGS_ITYPE_UNKNOWN (0 << 12)
+#define RX_CMP_FLAGS_ITYPE_IP (1 << 12)
+#define RX_CMP_FLAGS_ITYPE_TCP (2 << 12)
+#define RX_CMP_FLAGS_ITYPE_UDP (3 << 12)
+#define RX_CMP_FLAGS_ITYPE_FCOE (4 << 12)
+#define RX_CMP_FLAGS_ITYPE_ROCE (5 << 12)
+#define RX_CMP_FLAGS_ITYPE_PTP_WO_TS (8 << 12)
+#define RX_CMP_FLAGS_ITYPE_PTP_W_TS (9 << 12)
+#define RX_CMP_LEN GENMASK(31, 16)
+#define RX_CMP_LEN_SHIFT 16
+
+#define RX_CMP_V1 BIT(0)
+#define RX_CMP_AGG_BUFS GENMASK(5, 1)
+#define RX_CMP_AGG_BUFS_SHIFT 1
+#define RX_CMP_RSS_HASH_TYPE GENMASK(15, 9)
+#define RX_CMP_RSS_HASH_TYPE_SHIFT 9
+#define RX_CMP_V3_RSS_EXT_OP_LEGACY GENMASK(15, 12)
+#define RX_CMP_V3_RSS_EXT_OP_LEGACY_SHIFT 12
+#define RX_CMP_V3_RSS_EXT_OP_NEW GENMASK(11, 8)
+#define RX_CMP_V3_RSS_EXT_OP_NEW_SHIFT 8
+#define RX_CMP_PAYLOAD_OFFSET GENMASK(23, 16)
+#define RX_CMP_PAYLOAD_OFFSET_SHIFT 16
+#define RX_CMP_SUB_NS_TS GENMASK(19, 16)
+#define RX_CMP_SUB_NS_TS_SHIFT 16
+#define RX_CMP_METADATA1 GENMASK(31, 28)
+#define RX_CMP_METADATA1_SHIFT 28
+#define RX_CMP_METADATA1_TPID_SEL GENMASK(30, 28)
+#define RX_CMP_METADATA1_TPID_8021Q BIT(28)
+#define RX_CMP_METADATA1_TPID_8021AD (0x0 << 28)
+#define RX_CMP_METADATA1_VALID BIT(31)
+
+struct rx_cmp {
+ __le32 rx_cmp_len_flags_type;
+ u32 rx_cmp_opaque;
+ __le32 rx_cmp_misc_v1;
+ __le32 rx_cmp_rss_hash;
+};
+
+#define RX_CMP_FLAGS2_IP_CS_CALC BIT(0)
+#define RX_CMP_FLAGS2_L4_CS_CALC BIT(1)
+#define RX_CMP_FLAGS2_T_IP_CS_CALC BIT(2)
+#define RX_CMP_FLAGS2_T_L4_CS_CALC BIT(3)
+#define RX_CMP_FLAGS2_META_FORMAT_VLAN BIT(4)
+
+#define RX_CMP_FLAGS2_METADATA_TCI_MASK GENMASK(15, 0)
+#define RX_CMP_FLAGS2_METADATA_VID_MASK GENMASK(11, 0)
+#define RX_CMP_FLAGS2_METADATA_TPID_MASK GENMASK(31, 16)
+#define RX_CMP_FLAGS2_METADATA_TPID_SFT 16
+
+#define RX_CMP_V BIT(0)
+#define RX_CMPL_ERRORS_MASK GENMASK(15, 1)
+#define RX_CMPL_ERRORS_SFT 1
+#define RX_CMPL_ERRORS_BUFFER_ERROR_MASK GENMASK(3, 1)
+#define RX_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0 << 1)
+#define RX_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT (0x1 << 1)
+#define RX_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP (0x2 << 1)
+#define RX_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3 << 1)
+#define RX_CMPL_ERRORS_IP_CS_ERROR BIT(4)
+#define RX_CMPL_ERRORS_L4_CS_ERROR BIT(5)
+#define RX_CMPL_ERRORS_T_IP_CS_ERROR BIT(6)
+#define RX_CMPL_ERRORS_T_L4_CS_ERROR BIT(7)
+#define RX_CMPL_ERRORS_CRC_ERROR BIT(8)
+#define RX_CMPL_ERRORS_T_PKT_ERROR_MASK GENMASK(11, 9)
+#define RX_CMPL_ERRORS_T_PKT_ERROR_NO_ERROR (0x0 << 9)
+#define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION (0x1 << 9)
+#define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN (0x2 << 9)
+#define RX_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR (0x3 << 9)
+#define RX_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR (0x4 << 9)
+#define RX_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR (0x5 << 9)
+#define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL (0x6 << 9)
+#define RX_CMPL_ERRORS_PKT_ERROR_MASK GENMASK(15, 12)
+#define RX_CMPL_ERRORS_PKT_ERROR_NO_ERROR (0x0 << 12)
+#define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION (0x1 << 12)
+#define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN (0x2 << 12)
+#define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_TTL (0x3 << 12)
+#define RX_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR (0x4 << 12)
+#define RX_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR (0x5 << 12)
+#define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN (0x6 << 12)
+#define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL (0x7 << 12)
+#define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN (0x8 << 12)
+
+#define RX_CMPL_CFA_CODE_MASK GENMASK(31, 16)
+#define RX_CMPL_CFA_CODE_SFT 16
+#define RX_CMPL_METADATA0_TCI_MASK GENMASK(31, 16)
+#define RX_CMPL_METADATA0_VID_MASK GENMASK(27, 16)
+#define RX_CMPL_METADATA0_SFT 16
+
+struct rx_cmp_ext {
+ __le32 rx_cmp_flags2;
+ __le32 rx_cmp_meta_data;
+ __le32 rx_cmp_cfa_code_errors_v2;
+ __le32 rx_cmp_timestamp;
+};
+
+#define RX_AGG_CMP_TYPE GENMASK(5, 0)
+#define RX_AGG_CMP_LEN GENMASK(31, 16)
+#define RX_AGG_CMP_LEN_SHIFT 16
+#define RX_AGG_CMP_V BIT(0)
+#define RX_AGG_CMP_AGG_ID GENMASK(25, 16)
+#define RX_AGG_CMP_AGG_ID_SHIFT 16
+
+struct rx_agg_cmp {
+ __le32 rx_agg_cmp_len_flags_type;
+ u32 rx_agg_cmp_opaque;
+ __le32 rx_agg_cmp_v;
+ __le32 rx_agg_cmp_unused;
+};
+
+#define RX_CMP_L2_ERRORS \
+ cpu_to_le32(RX_CMPL_ERRORS_BUFFER_ERROR_MASK | RX_CMPL_ERRORS_CRC_ERROR)
+
+#define RX_CMP_L4_CS_BITS \
+ (cpu_to_le32(RX_CMP_FLAGS2_L4_CS_CALC | RX_CMP_FLAGS2_T_L4_CS_CALC))
+
+#define RX_CMP_L4_CS_ERR_BITS \
+ (cpu_to_le32(RX_CMPL_ERRORS_L4_CS_ERROR | RX_CMPL_ERRORS_T_L4_CS_ERROR))
+
+#define RX_CMP_L4_CS_OK(rxcmp1) \
+ (((rxcmp1)->rx_cmp_flags2 & RX_CMP_L4_CS_BITS) && \
+ !((rxcmp1)->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS))
+
+#define RX_CMP_METADATA0_TCI(rxcmp1) \
+ ((le32_to_cpu((rxcmp1)->rx_cmp_cfa_code_errors_v2) & \
+ RX_CMPL_METADATA0_TCI_MASK) >> RX_CMPL_METADATA0_SFT)
+
+#define RX_CMP_ENCAP(rxcmp1) \
+ ((le32_to_cpu((rxcmp1)->rx_cmp_flags2) & \
+ RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3)
+
+#define RX_CMP_V3_HASH_TYPE_LEGACY(rxcmp) \
+ ((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & \
+ RX_CMP_V3_RSS_EXT_OP_LEGACY) >> RX_CMP_V3_RSS_EXT_OP_LEGACY_SHIFT)
+
+#define RX_CMP_V3_HASH_TYPE_NEW(rxcmp) \
+ ((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_V3_RSS_EXT_OP_NEW) >>\
+ RX_CMP_V3_RSS_EXT_OP_NEW_SHIFT)
+
+#define RX_CMP_V3_HASH_TYPE(bd, rxcmp) \
+ (((bd)->rss_cap & BNGE_RSS_CAP_RSS_TCAM) ? \
+ RX_CMP_V3_HASH_TYPE_NEW(rxcmp) : \
+ RX_CMP_V3_HASH_TYPE_LEGACY(rxcmp))
+
+#define EXT_OP_INNER_4 0x0
+#define EXT_OP_OUTER_4 0x2
+#define EXT_OP_INNFL_3 0x8
+#define EXT_OP_OUTFL_3 0xa
+
+#define RX_CMP_VLAN_VALID(rxcmp) \
+ ((rxcmp)->rx_cmp_misc_v1 & cpu_to_le32(RX_CMP_METADATA1_VALID))
+
+#define RX_CMP_VLAN_TPID_SEL(rxcmp) \
+ (le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_METADATA1_TPID_SEL)
+
+#define RSS_PROFILE_ID_MASK GENMASK(4, 0)
+
+#define RX_CMP_HASH_TYPE(rxcmp) \
+ (((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_RSS_HASH_TYPE) >>\
+ RX_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
+
+#define RX_CMP_HASH_VALID(rxcmp) \
+ ((rxcmp)->rx_cmp_len_flags_type & cpu_to_le32(RX_CMP_FLAGS_RSS_VALID))
+
+#define TPA_AGG_AGG_ID(rx_agg) \
+ ((le32_to_cpu((rx_agg)->rx_agg_cmp_v) & \
+ RX_AGG_CMP_AGG_ID) >> RX_AGG_CMP_AGG_ID_SHIFT)
+
+#define RX_TPA_START_CMP_TYPE GENMASK(5, 0)
+#define RX_TPA_START_CMP_FLAGS GENMASK(15, 6)
+#define RX_TPA_START_CMP_FLAGS_SHIFT 6
+#define RX_TPA_START_CMP_FLAGS_ERROR BIT(6)
+#define RX_TPA_START_CMP_FLAGS_PLACEMENT GENMASK(9, 7)
+#define RX_TPA_START_CMP_FLAGS_PLACEMENT_SHIFT 7
+#define RX_TPA_START_CMP_FLAGS_PLACEMENT_JUMBO BIT(7)
+#define RX_TPA_START_CMP_FLAGS_PLACEMENT_HDS (0x2 << 7)
+#define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_JUMBO (0x5 << 7)
+#define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_HDS (0x6 << 7)
+#define RX_TPA_START_CMP_FLAGS_RSS_VALID BIT(10)
+#define RX_TPA_START_CMP_FLAGS_TIMESTAMP BIT(11)
+#define RX_TPA_START_CMP_FLAGS_ITYPES GENMASK(15, 12)
+#define RX_TPA_START_CMP_FLAGS_ITYPES_SHIFT 12
+#define RX_TPA_START_CMP_FLAGS_ITYPE_TCP (0x2 << 12)
+#define RX_TPA_START_CMP_LEN GENMASK(31, 16)
+#define RX_TPA_START_CMP_LEN_SHIFT 16
+#define RX_TPA_START_CMP_V1 BIT(0)
+#define RX_TPA_START_CMP_RSS_HASH_TYPE GENMASK(15, 9)
+#define RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT 9
+#define RX_TPA_START_CMP_V3_RSS_HASH_TYPE GENMASK(15, 7)
+#define RX_TPA_START_CMP_V3_RSS_HASH_TYPE_SHIFT 7
+#define RX_TPA_START_CMP_AGG_ID GENMASK(25, 16)
+#define RX_TPA_START_CMP_AGG_ID_SHIFT 16
+#define RX_TPA_START_CMP_METADATA1 GENMASK(31, 28)
+#define RX_TPA_START_CMP_METADATA1_SHIFT 28
+#define RX_TPA_START_METADATA1_TPID_SEL GENMASK(30, 28)
+#define RX_TPA_START_METADATA1_TPID_8021Q BIT(28)
+#define RX_TPA_START_METADATA1_TPID_8021AD (0x0 << 28)
+#define RX_TPA_START_METADATA1_VALID BIT(31)
+
+struct rx_tpa_start_cmp {
+ __le32 rx_tpa_start_cmp_len_flags_type;
+ u32 rx_tpa_start_cmp_opaque;
+ __le32 rx_tpa_start_cmp_misc_v1;
+ __le32 rx_tpa_start_cmp_rss_hash;
+};
+
+#define TPA_START_HASH_VALID(rx_tpa_start) \
+ ((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type & \
+ cpu_to_le32(RX_TPA_START_CMP_FLAGS_RSS_VALID))
+
+#define TPA_START_HASH_TYPE(rx_tpa_start) \
+ (((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
+ RX_TPA_START_CMP_RSS_HASH_TYPE) >> \
+ RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
+
+#define TPA_START_V3_HASH_TYPE(rx_tpa_start) \
+ (((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
+ RX_TPA_START_CMP_V3_RSS_HASH_TYPE) >> \
+ RX_TPA_START_CMP_V3_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
+
+#define TPA_START_AGG_ID(rx_tpa_start) \
+ ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
+ RX_TPA_START_CMP_AGG_ID) >> RX_TPA_START_CMP_AGG_ID_SHIFT)
+
+#define TPA_START_ERROR(rx_tpa_start) \
+ ((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type & \
+ cpu_to_le32(RX_TPA_START_CMP_FLAGS_ERROR))
+
+#define TPA_START_VLAN_VALID(rx_tpa_start) \
+ ((rx_tpa_start)->rx_tpa_start_cmp_misc_v1 & \
+ cpu_to_le32(RX_TPA_START_METADATA1_VALID))
+
+#define TPA_START_VLAN_TPID_SEL(rx_tpa_start) \
+ (le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
+ RX_TPA_START_METADATA1_TPID_SEL)
+
+#define RX_TPA_START_CMP_FLAGS2_IP_CS_CALC BIT(0)
+#define RX_TPA_START_CMP_FLAGS2_L4_CS_CALC BIT(1)
+#define RX_TPA_START_CMP_FLAGS2_T_IP_CS_CALC BIT(2)
+#define RX_TPA_START_CMP_FLAGS2_T_L4_CS_CALC BIT(3)
+#define RX_TPA_START_CMP_FLAGS2_IP_TYPE BIT(8)
+#define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_VALID BIT(9)
+#define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT GENMASK(11, 10)
+#define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT_SHIFT 10
+#define RX_TPA_START_CMP_V3_FLAGS2_T_IP_TYPE BIT(10)
+#define RX_TPA_START_CMP_V3_FLAGS2_AGG_GRO BIT(11)
+#define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL GENMASK(31, 16)
+#define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_SHIFT 16
+#define RX_TPA_START_CMP_V2 BIT(0)
+#define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_MASK GENMASK(3, 1)
+#define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_SHIFT 1
+#define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0 << 1)
+#define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3 << 1)
+#define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_FLUSH (0x5 << 1)
+#define RX_TPA_START_CMP_CFA_CODE GENMASK(31, 16)
+#define RX_TPA_START_CMPL_CFA_CODE_SHIFT 16
+#define RX_TPA_START_CMP_METADATA0_TCI_MASK GENMASK(31, 16)
+#define RX_TPA_START_CMP_METADATA0_VID_MASK GENMASK(27, 16)
+#define RX_TPA_START_CMP_METADATA0_SFT 16
+
+struct rx_tpa_start_cmp_ext {
+ __le32 rx_tpa_start_cmp_flags2;
+ __le32 rx_tpa_start_cmp_metadata;
+ __le32 rx_tpa_start_cmp_cfa_code_v2;
+ __le32 rx_tpa_start_cmp_hdr_info;
+};
+
+#define TPA_START_CFA_CODE(rx_tpa_start) \
+ ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) & \
+ RX_TPA_START_CMP_CFA_CODE) >> RX_TPA_START_CMPL_CFA_CODE_SHIFT)
+
+#define TPA_START_IS_IPV6(rx_tpa_start) \
+ (!!((rx_tpa_start)->rx_tpa_start_cmp_flags2 & \
+ cpu_to_le32(RX_TPA_START_CMP_FLAGS2_IP_TYPE)))
+
+#define TPA_START_ERROR_CODE(rx_tpa_start) \
+ ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) & \
+ RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_MASK) >> \
+ RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_SHIFT)
+
+#define TPA_START_METADATA0_TCI(rx_tpa_start) \
+ ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) & \
+ RX_TPA_START_CMP_METADATA0_TCI_MASK) >> \
+ RX_TPA_START_CMP_METADATA0_SFT)
+
+#define RX_TPA_END_CMP_TYPE GENMASK(5, 0)
+#define RX_TPA_END_CMP_FLAGS GENMASK(15, 6)
+#define RX_TPA_END_CMP_FLAGS_SHIFT 6
+#define RX_TPA_END_CMP_FLAGS_PLACEMENT GENMASK(9, 7)
+#define RX_TPA_END_CMP_FLAGS_PLACEMENT_SHIFT 7
+#define RX_TPA_END_CMP_FLAGS_PLACEMENT_JUMBO BIT(7)
+#define RX_TPA_END_CMP_FLAGS_PLACEMENT_HDS (0x2 << 7)
+#define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO (0x5 << 7)
+#define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS (0x6 << 7)
+#define RX_TPA_END_CMP_FLAGS_RSS_VALID BIT(10)
+#define RX_TPA_END_CMP_FLAGS_ITYPES GENMASK(15, 12)
+#define RX_TPA_END_CMP_FLAGS_ITYPES_SHIFT 12
+#define RX_TPA_END_CMP_FLAGS_ITYPE_TCP (0x2 << 12)
+#define RX_TPA_END_CMP_LEN GENMASK(31, 16)
+#define RX_TPA_END_CMP_LEN_SHIFT 16
+#define RX_TPA_END_CMP_V1 BIT(0)
+#define RX_TPA_END_CMP_TPA_SEGS GENMASK(15, 8)
+#define RX_TPA_END_CMP_TPA_SEGS_SHIFT 8
+#define RX_TPA_END_CMP_AGG_ID GENMASK(25, 16)
+#define RX_TPA_END_CMP_AGG_ID_SHIFT 16
+#define RX_TPA_END_GRO_TS BIT(31)
+
+struct rx_tpa_end_cmp {
+ __le32 rx_tpa_end_cmp_len_flags_type;
+ u32 rx_tpa_end_cmp_opaque;
+ __le32 rx_tpa_end_cmp_misc_v1;
+ __le32 rx_tpa_end_cmp_tsdelta;
+};
+
+#define TPA_END_AGG_ID(rx_tpa_end) \
+ ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
+ RX_TPA_END_CMP_AGG_ID) >> RX_TPA_END_CMP_AGG_ID_SHIFT)
+
+#define TPA_END_TPA_SEGS(rx_tpa_end) \
+ ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
+ RX_TPA_END_CMP_TPA_SEGS) >> RX_TPA_END_CMP_TPA_SEGS_SHIFT)
+
+#define RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO \
+ cpu_to_le32(RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO & \
+ RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS)
+
+#define TPA_END_GRO(rx_tpa_end) \
+ ((rx_tpa_end)->rx_tpa_end_cmp_len_flags_type & \
+ RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO)
+
+#define TPA_END_GRO_TS(rx_tpa_end) \
+ (!!((rx_tpa_end)->rx_tpa_end_cmp_tsdelta & \
+ cpu_to_le32(RX_TPA_END_GRO_TS)))
+
+#define RX_TPA_END_CMP_TPA_DUP_ACKS GENMASK(3, 0)
+#define RX_TPA_END_CMP_PAYLOAD_OFFSET GENMASK(23, 16)
+#define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT 16
+#define RX_TPA_END_CMP_AGG_BUFS GENMASK(31, 24)
+#define RX_TPA_END_CMP_AGG_BUFS_SHIFT 24
+#define RX_TPA_END_CMP_TPA_SEG_LEN GENMASK(15, 0)
+#define RX_TPA_END_CMP_V2 BIT(0)
+#define RX_TPA_END_CMP_ERRORS GENMASK(2, 1)
+#define RX_TPA_END_CMPL_ERRORS_SHIFT 1
+#define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0 << 1)
+#define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_NOT_ON_CHIP (0x2 << 1)
+#define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3 << 1)
+#define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_RSV_ERROR (0x4 << 1)
+#define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_FLUSH (0x5 << 1)
+
+struct rx_tpa_end_cmp_ext {
+ __le32 rx_tpa_end_cmp_dup_acks;
+ __le32 rx_tpa_end_cmp_seg_len;
+ __le32 rx_tpa_end_cmp_errors_v2;
+ u32 rx_tpa_end_cmp_start_opaque;
+};
+
+#define TPA_END_ERRORS(rx_tpa_end_ext) \
+ ((rx_tpa_end_ext)->rx_tpa_end_cmp_errors_v2 & \
+ cpu_to_le32(RX_TPA_END_CMP_ERRORS))
+
+#define TPA_END_PAYLOAD_OFF(rx_tpa_end_ext) \
+ ((le32_to_cpu((rx_tpa_end_ext)->rx_tpa_end_cmp_dup_acks) & \
+ RX_TPA_END_CMP_PAYLOAD_OFFSET) >> \
+ RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT)
+
+#define TPA_END_AGG_BUFS(rx_tpa_end_ext) \
+ ((le32_to_cpu((rx_tpa_end_ext)->rx_tpa_end_cmp_dup_acks) & \
+ RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT)
+
+#define EVENT_DATA1_RESET_NOTIFY_FATAL(data1) \
+ (((data1) & \
+ ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK) ==\
+ ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL)
+
+#define EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1) \
+ (((data1) & \
+ ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK) ==\
+ ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION)
+
+#define EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2) \
+ ((data2) & \
+ ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_MASK)
+
+#define EVENT_DATA1_RECOVERY_MASTER_FUNC(data1) \
+ (!!((data1) & \
+ ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC))
+
+#define EVENT_DATA1_RECOVERY_ENABLED(data1) \
+ (!!((data1) & \
+ ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED))
+
+#define BNGE_EVENT_ERROR_REPORT_TYPE(data1) \
+ (((data1) & \
+ ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK) >>\
+ ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT)
+
+#define BNGE_EVENT_INVALID_SIGNAL_DATA(data2) \
+ (((data2) & \
+ ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_MASK) >>\
+ ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_SFT)
+#endif /* _BNGE_HW_DEF_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h
index 6df629761d95..2ed9c92c8c30 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h
@@ -4,7 +4,7 @@
#ifndef _BNGE_HWRM_H_
#define _BNGE_HWRM_H_
-#include <linux/bnxt/hsi.h>
+#include <linux/bnge/hsi.h>
enum bnge_hwrm_ctx_flags {
BNGE_HWRM_INTERNAL_CTX_OWNED = BIT(0),
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c
index 198f49b40dbf..84c90a957719 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c
@@ -5,7 +5,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/pci.h>
-#include <linux/bnxt/hsi.h>
+#include <linux/bnge/hsi.h>
#include <linux/if_vlan.h>
#include <net/netdev_queues.h>
@@ -1183,3 +1183,68 @@ int bnge_hwrm_set_async_event_cr(struct bnge_dev *bd, int idx)
req->async_event_cr = cpu_to_le16(idx);
return bnge_hwrm_req_send(bd, req);
}
+
+#define BNGE_DFLT_TUNL_TPA_BMAP \
+ (VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE | \
+ VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 | \
+ VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6)
+
+static void bnge_hwrm_vnic_update_tunl_tpa(struct bnge_dev *bd,
+ struct hwrm_vnic_tpa_cfg_input *req)
+{
+ struct bnge_net *bn = netdev_priv(bd->netdev);
+ u32 tunl_tpa_bmap = BNGE_DFLT_TUNL_TPA_BMAP;
+
+ if (!(bd->fw_cap & BNGE_FW_CAP_VNIC_TUNNEL_TPA))
+ return;
+
+ if (bn->vxlan_port)
+ tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN;
+ if (bn->vxlan_gpe_port)
+ tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE;
+ if (bn->nge_port)
+ tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE;
+
+ req->enables |= cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN);
+ req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap);
+}
+
+int bnge_hwrm_vnic_set_tpa(struct bnge_dev *bd, struct bnge_vnic_info *vnic,
+ u32 tpa_flags)
+{
+ struct bnge_net *bn = netdev_priv(bd->netdev);
+ struct hwrm_vnic_tpa_cfg_input *req;
+ int rc;
+
+ if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
+ return 0;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_TPA_CFG);
+ if (rc)
+ return rc;
+
+ if (tpa_flags) {
+ u32 flags;
+
+ flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
+ VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
+ VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
+ VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
+ VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
+ if (tpa_flags & BNGE_NET_EN_GRO)
+ flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
+
+ req->flags = cpu_to_le32(flags);
+ req->enables =
+ cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
+ VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
+ VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
+ req->max_agg_segs = cpu_to_le16(MAX_TPA_SEGS);
+ req->max_aggs = cpu_to_le16(bn->max_tpa);
+ req->min_agg_len = cpu_to_le32(512);
+ bnge_hwrm_vnic_update_tunl_tpa(bd, req);
+ }
+ req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+
+ return bnge_hwrm_req_send(bd, req);
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h
index 042f28e84a05..38b046237feb 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h
@@ -55,4 +55,6 @@ int hwrm_ring_alloc_send_msg(struct bnge_net *bn,
struct bnge_ring_struct *ring,
u32 ring_type, u32 map_index);
int bnge_hwrm_set_async_event_cr(struct bnge_dev *bd, int idx);
+int bnge_hwrm_vnic_set_tpa(struct bnge_dev *bd, struct bnge_vnic_info *vnic,
+ u32 tpa_flags);
#endif /* _BNGE_HWRM_LIB_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
index 832eeb960bd2..6ab317f1c16e 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
@@ -10,9 +10,13 @@
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
+#include <net/netdev_lock.h>
+#include <net/netdev_queues.h>
+#include <net/netdev_rx_queue.h>
#include <linux/etherdevice.h>
#include <linux/if.h>
#include <net/ip.h>
+#include <net/netdev_queues.h>
#include <linux/skbuff.h>
#include <net/page_pool/helpers.h>
@@ -20,6 +24,7 @@
#include "bnge_hwrm_lib.h"
#include "bnge_ethtool.h"
#include "bnge_rmem.h"
+#include "bnge_txrx.h"
#define BNGE_RING_TO_TC_OFF(bd, tx) \
((tx) % (bd)->tx_nr_rings_per_tc)
@@ -372,11 +377,37 @@ static void bnge_free_one_agg_ring_bufs(struct bnge_net *bn,
}
}
+static void bnge_free_one_tpa_info_data(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ int i;
+
+ for (i = 0; i < bn->max_tpa; i++) {
+ struct bnge_tpa_info *tpa_info = &rxr->rx_tpa[i];
+ u8 *data = tpa_info->data;
+
+ if (!data)
+ continue;
+
+ tpa_info->data = NULL;
+ page_pool_free_va(rxr->head_pool, data, false);
+ }
+}
+
static void bnge_free_one_rx_ring_pair_bufs(struct bnge_net *bn,
struct bnge_rx_ring_info *rxr)
{
+ struct bnge_tpa_idx_map *map;
+
+ if (rxr->rx_tpa)
+ bnge_free_one_tpa_info_data(bn, rxr);
+
bnge_free_one_rx_ring_bufs(bn, rxr);
bnge_free_one_agg_ring_bufs(bn, rxr);
+
+ map = rxr->rx_tpa_idx_map;
+ if (map)
+ memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
}
static void bnge_free_rx_ring_pair_bufs(struct bnge_net *bn)
@@ -391,9 +422,118 @@ static void bnge_free_rx_ring_pair_bufs(struct bnge_net *bn)
bnge_free_one_rx_ring_pair_bufs(bn, &bn->rx_ring[i]);
}
+static void bnge_free_tx_skbs(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ u16 max_idx;
+ int i;
+
+ max_idx = bn->tx_nr_pages * TX_DESC_CNT;
+ for (i = 0; i < bd->tx_nr_rings; i++) {
+ struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
+ int j;
+
+ if (!txr->tx_buf_ring)
+ continue;
+
+ for (j = 0; j < max_idx;) {
+ struct bnge_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
+ struct sk_buff *skb;
+ int k, last;
+
+ skb = tx_buf->skb;
+ if (!skb) {
+ j++;
+ continue;
+ }
+
+ tx_buf->skb = NULL;
+
+ dma_unmap_single(bd->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_headlen(skb),
+ DMA_TO_DEVICE);
+
+ last = tx_buf->nr_frags;
+ j += 2;
+ for (k = 0; k < last; k++, j++) {
+ int ring_idx = j & bn->tx_ring_mask;
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
+
+ tx_buf = &txr->tx_buf_ring[ring_idx];
+ dma_unmap_page(bd->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ }
+ dev_kfree_skb(skb);
+ }
+ netdev_tx_reset_queue(netdev_get_tx_queue(bd->netdev, i));
+ }
+}
+
static void bnge_free_all_rings_bufs(struct bnge_net *bn)
{
bnge_free_rx_ring_pair_bufs(bn);
+ bnge_free_tx_skbs(bn);
+}
+
+static void bnge_free_tpa_info(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, j;
+
+ for (i = 0; i < bd->rx_nr_rings; i++) {
+ struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
+
+ kfree(rxr->rx_tpa_idx_map);
+ rxr->rx_tpa_idx_map = NULL;
+ if (rxr->rx_tpa) {
+ for (j = 0; j < bn->max_tpa; j++) {
+ kfree(rxr->rx_tpa[j].agg_arr);
+ rxr->rx_tpa[j].agg_arr = NULL;
+ }
+ }
+ kfree(rxr->rx_tpa);
+ rxr->rx_tpa = NULL;
+ }
+}
+
+static int bnge_alloc_tpa_info(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, j;
+
+ if (!bd->max_tpa_v2)
+ return 0;
+
+ bn->max_tpa = max_t(u16, bd->max_tpa_v2, MAX_TPA);
+ for (i = 0; i < bd->rx_nr_rings; i++) {
+ struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
+
+ rxr->rx_tpa = kcalloc(bn->max_tpa, sizeof(struct bnge_tpa_info),
+ GFP_KERNEL);
+ if (!rxr->rx_tpa)
+ goto err_free_tpa_info;
+
+ for (j = 0; j < bn->max_tpa; j++) {
+ struct rx_agg_cmp *agg;
+
+ agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
+ if (!agg)
+ goto err_free_tpa_info;
+ rxr->rx_tpa[j].agg_arr = agg;
+ }
+ rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
+ GFP_KERNEL);
+ if (!rxr->rx_tpa_idx_map)
+ goto err_free_tpa_info;
+ }
+ return 0;
+
+err_free_tpa_info:
+ bnge_free_tpa_info(bn);
+ return -ENOMEM;
}
static void bnge_free_rx_rings(struct bnge_net *bn)
@@ -401,6 +541,7 @@ static void bnge_free_rx_rings(struct bnge_net *bn)
struct bnge_dev *bd = bn->bd;
int i;
+ bnge_free_tpa_info(bn);
for (i = 0; i < bd->rx_nr_rings; i++) {
struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
struct bnge_ring_struct *ring;
@@ -525,6 +666,12 @@ static int bnge_alloc_rx_rings(struct bnge_net *bn)
goto err_free_rx_rings;
}
}
+
+ if (bn->priv_flags & BNGE_NET_EN_TPA) {
+ rc = bnge_alloc_tpa_info(bn);
+ if (rc)
+ goto err_free_rx_rings;
+ }
return rc;
err_free_rx_rings:
@@ -856,6 +1003,13 @@ u16 bnge_cp_ring_for_tx(struct bnge_tx_ring_info *txr)
return txr->tx_cpr->ring_struct.fw_ring_id;
}
+static void bnge_db_nq_arm(struct bnge_net *bn,
+ struct bnge_db_info *db, u32 idx)
+{
+ bnge_writeq(bn->bd, db->db_key64 | DBR_TYPE_NQ_ARM |
+ DB_RING_IDX(db, idx), db->doorbell);
+}
+
static void bnge_db_nq(struct bnge_net *bn, struct bnge_db_info *db, u32 idx)
{
bnge_writeq(bn->bd, db->db_key64 | DBR_TYPE_NQ_MASK |
@@ -878,12 +1032,6 @@ static int bnge_cp_num_to_irq_num(struct bnge_net *bn, int n)
return nqr->ring_struct.map_idx;
}
-static irqreturn_t bnge_msix(int irq, void *dev_instance)
-{
- /* NAPI scheduling to be added in a future patch */
- return IRQ_HANDLED;
-}
-
static void bnge_init_nq_tree(struct bnge_net *bn)
{
struct bnge_dev *bd = bn->bd;
@@ -925,9 +1073,9 @@ static netmem_ref __bnge_alloc_rx_netmem(struct bnge_net *bn,
return netmem;
}
-static u8 *__bnge_alloc_rx_frag(struct bnge_net *bn, dma_addr_t *mapping,
- struct bnge_rx_ring_info *rxr,
- gfp_t gfp)
+u8 *__bnge_alloc_rx_frag(struct bnge_net *bn, dma_addr_t *mapping,
+ struct bnge_rx_ring_info *rxr,
+ gfp_t gfp)
{
unsigned int offset;
struct page *page;
@@ -941,9 +1089,8 @@ static u8 *__bnge_alloc_rx_frag(struct bnge_net *bn, dma_addr_t *mapping,
return page_address(page) + offset;
}
-static int bnge_alloc_rx_data(struct bnge_net *bn,
- struct bnge_rx_ring_info *rxr,
- u16 prod, gfp_t gfp)
+int bnge_alloc_rx_data(struct bnge_net *bn, struct bnge_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp)
{
struct bnge_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bn, prod)];
struct rx_bd *rxbd;
@@ -995,7 +1142,7 @@ static int bnge_alloc_one_rx_ring_bufs(struct bnge_net *bn,
return 0;
}
-static u16 bnge_find_next_agg_idx(struct bnge_rx_ring_info *rxr, u16 idx)
+u16 bnge_find_next_agg_idx(struct bnge_rx_ring_info *rxr, u16 idx)
{
u16 next, max = rxr->rx_agg_bmap_size;
@@ -1005,9 +1152,9 @@ static u16 bnge_find_next_agg_idx(struct bnge_rx_ring_info *rxr, u16 idx)
return next;
}
-static int bnge_alloc_rx_netmem(struct bnge_net *bn,
- struct bnge_rx_ring_info *rxr,
- u16 prod, gfp_t gfp)
+int bnge_alloc_rx_netmem(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp)
{
struct bnge_sw_rx_agg_bd *rx_agg_buf;
u16 sw_prod = rxr->rx_sw_agg_prod;
@@ -1070,6 +1217,30 @@ err_free_one_agg_ring_bufs:
return -ENOMEM;
}
+static int bnge_alloc_one_tpa_info_data(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ dma_addr_t mapping;
+ u8 *data;
+ int i;
+
+ for (i = 0; i < bn->max_tpa; i++) {
+ data = __bnge_alloc_rx_frag(bn, &mapping, rxr,
+ GFP_KERNEL);
+ if (!data)
+ goto err_free_tpa_info_data;
+
+ rxr->rx_tpa[i].data = data;
+ rxr->rx_tpa[i].data_ptr = data + bn->rx_offset;
+ rxr->rx_tpa[i].mapping = mapping;
+ }
+ return 0;
+
+err_free_tpa_info_data:
+ bnge_free_one_tpa_info_data(bn, rxr);
+ return -ENOMEM;
+}
+
static int bnge_alloc_one_rx_ring_pair_bufs(struct bnge_net *bn, int ring_nr)
{
struct bnge_rx_ring_info *rxr = &bn->rx_ring[ring_nr];
@@ -1084,8 +1255,17 @@ static int bnge_alloc_one_rx_ring_pair_bufs(struct bnge_net *bn, int ring_nr)
if (rc)
goto err_free_one_rx_ring_bufs;
}
+
+ if (rxr->rx_tpa) {
+ rc = bnge_alloc_one_tpa_info_data(bn, rxr);
+ if (rc)
+ goto err_free_one_agg_ring_bufs;
+ }
+
return 0;
+err_free_one_agg_ring_bufs:
+ bnge_free_one_agg_ring_bufs(bn, rxr);
err_free_one_rx_ring_bufs:
bnge_free_one_rx_ring_bufs(bn, rxr);
return rc;
@@ -1755,6 +1935,85 @@ skip_uc:
return rc;
}
+static void bnge_disable_int(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ if (!bn->bnapi)
+ return;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr;
+ struct bnge_ring_struct *ring;
+
+ nqr = &bnapi->nq_ring;
+ ring = &nqr->ring_struct;
+
+ if (ring->fw_ring_id != INVALID_HW_RING_ID)
+ bnge_db_nq(bn, &nqr->nq_db, nqr->nq_raw_cons);
+ }
+}
+
+static void bnge_disable_int_sync(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ bnge_disable_int(bn);
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ int map_idx = bnge_cp_num_to_irq_num(bn, i);
+
+ synchronize_irq(bd->irq_tbl[map_idx].vector);
+ }
+}
+
+static void bnge_enable_int(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr;
+
+ nqr = &bnapi->nq_ring;
+ bnge_db_nq_arm(bn, &nqr->nq_db, nqr->nq_raw_cons);
+ }
+}
+
+static void bnge_disable_napi(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ if (test_and_set_bit(BNGE_STATE_NAPI_DISABLED, &bn->state))
+ return;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+
+ napi_disable_locked(&bnapi->napi);
+ }
+}
+
+static void bnge_enable_napi(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ clear_bit(BNGE_STATE_NAPI_DISABLED, &bn->state);
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+
+ bnapi->in_reset = false;
+ bnapi->tx_fault = 0;
+
+ napi_enable_locked(&bnapi->napi);
+ }
+}
+
static void bnge_hwrm_vnic_free(struct bnge_net *bn)
{
int i;
@@ -1886,6 +2145,12 @@ static void bnge_hwrm_ring_free(struct bnge_net *bn, bool close_path)
bnge_hwrm_rx_agg_ring_free(bn, &bn->rx_ring[i], close_path);
}
+ /* The completion rings are about to be freed. After that the
+ * IRQ doorbell will not work anymore. So we need to disable
+ * IRQ here.
+ */
+ bnge_disable_int_sync(bn);
+
for (i = 0; i < bd->nq_nr_rings; i++) {
struct bnge_napi *bnapi = bn->bnapi[i];
struct bnge_nq_ring_info *nqr;
@@ -2015,6 +2280,27 @@ err_free_irq:
return rc;
}
+static int bnge_set_tpa(struct bnge_net *bn, bool set_tpa)
+{
+ u32 tpa_flags = 0;
+ int rc, i;
+
+ if (set_tpa)
+ tpa_flags = bn->priv_flags & BNGE_NET_EN_TPA;
+ else if (BNGE_NO_FW_ACCESS(bn->bd))
+ return 0;
+ for (i = 0; i < bn->nr_vnics; i++) {
+ rc = bnge_hwrm_vnic_set_tpa(bn->bd, &bn->vnic_info[i],
+ tpa_flags);
+ if (rc) {
+ netdev_err(bn->netdev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
+ i, rc);
+ return rc;
+ }
+ }
+ return 0;
+}
+
static int bnge_init_chip(struct bnge_net *bn)
{
struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
@@ -2049,6 +2335,12 @@ static int bnge_init_chip(struct bnge_net *bn)
if (bd->rss_cap & BNGE_RSS_CAP_RSS_HASH_TYPE_DELTA)
bnge_hwrm_update_rss_hash_cfg(bn);
+ if (bn->priv_flags & BNGE_NET_EN_TPA) {
+ rc = bnge_set_tpa(bn, true);
+ if (rc)
+ goto err_out;
+ }
+
/* Filter for default vnic 0 */
rc = bnge_hwrm_set_vnic_filter(bn, 0, 0, bn->netdev->dev_addr);
if (rc) {
@@ -2085,16 +2377,6 @@ err_out:
return rc;
}
-static int bnge_napi_poll(struct napi_struct *napi, int budget)
-{
- int work_done = 0;
-
- /* defer NAPI implementation to next patch series */
- napi_complete_done(napi, work_done);
-
- return work_done;
-}
-
static void bnge_init_napi(struct bnge_net *bn)
{
struct bnge_dev *bd = bn->bd;
@@ -2161,6 +2443,42 @@ err_free_rx_ring_pair_bufs:
return rc;
}
+static void bnge_tx_disable(struct bnge_net *bn)
+{
+ struct bnge_tx_ring_info *txr;
+ int i;
+
+ if (bn->tx_ring) {
+ for (i = 0; i < bn->bd->tx_nr_rings; i++) {
+ txr = &bn->tx_ring[i];
+ WRITE_ONCE(txr->dev_state, BNGE_DEV_STATE_CLOSING);
+ }
+ }
+ /* Make sure napi polls see @dev_state change */
+ synchronize_net();
+
+ if (!bn->netdev)
+ return;
+ /* Drop carrier first to prevent TX timeout */
+ netif_carrier_off(bn->netdev);
+ /* Stop all TX queues */
+ netif_tx_disable(bn->netdev);
+}
+
+static void bnge_tx_enable(struct bnge_net *bn)
+{
+ struct bnge_tx_ring_info *txr;
+ int i;
+
+ for (i = 0; i < bn->bd->tx_nr_rings; i++) {
+ txr = &bn->tx_ring[i];
+ WRITE_ONCE(txr->dev_state, 0);
+ }
+ /* Make sure napi polls see @dev_state change */
+ synchronize_net();
+ netif_tx_wake_all_queues(bn->netdev);
+}
+
static int bnge_open_core(struct bnge_net *bn)
{
struct bnge_dev *bd = bn->bd;
@@ -2192,7 +2510,14 @@ static int bnge_open_core(struct bnge_net *bn)
netdev_err(bn->netdev, "bnge_init_nic err: %d\n", rc);
goto err_free_irq;
}
+
+ bnge_enable_napi(bn);
+
set_bit(BNGE_STATE_OPEN, &bd->state);
+
+ bnge_enable_int(bn);
+
+ bnge_tx_enable(bn);
return 0;
err_free_irq:
@@ -2203,13 +2528,6 @@ err_del_napi:
return rc;
}
-static netdev_tx_t bnge_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- dev_kfree_skb_any(skb);
-
- return NETDEV_TX_OK;
-}
-
static int bnge_open(struct net_device *dev)
{
struct bnge_net *bn = netdev_priv(dev);
@@ -2224,8 +2542,7 @@ static int bnge_open(struct net_device *dev)
static int bnge_shutdown_nic(struct bnge_net *bn)
{
- /* TODO: close_path = 0 until we make NAPI functional */
- bnge_hwrm_resource_free(bn, 0);
+ bnge_hwrm_resource_free(bn, 1);
return 0;
}
@@ -2233,8 +2550,11 @@ static void bnge_close_core(struct bnge_net *bn)
{
struct bnge_dev *bd = bn->bd;
+ bnge_tx_disable(bn);
+
clear_bit(BNGE_STATE_OPEN, &bd->state);
bnge_shutdown_nic(bn);
+ bnge_disable_napi(bn);
bnge_free_all_rings_bufs(bn);
bnge_free_irq(bn);
bnge_del_napi(bn);
@@ -2255,6 +2575,7 @@ static const struct net_device_ops bnge_netdev_ops = {
.ndo_open = bnge_open,
.ndo_stop = bnge_close,
.ndo_start_xmit = bnge_start_xmit,
+ .ndo_features_check = bnge_features_check,
};
static void bnge_init_mac_addr(struct bnge_dev *bd)
@@ -2295,7 +2616,6 @@ void bnge_set_ring_params(struct bnge_dev *bd)
rx_space = rx_size + ALIGN(NET_SKB_PAD, 8) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- bn->rx_copy_thresh = BNGE_RX_COPY_THRESH;
ring_size = bn->rx_ring_size;
bn->rx_agg_ring_size = 0;
bn->rx_agg_nr_pages = 0;
@@ -2334,7 +2654,10 @@ void bnge_set_ring_params(struct bnge_dev *bd)
bn->rx_agg_ring_size = agg_ring_size;
bn->rx_agg_ring_mask = (bn->rx_agg_nr_pages * RX_DESC_CNT) - 1;
- rx_size = SKB_DATA_ALIGN(BNGE_RX_COPY_THRESH + NET_IP_ALIGN);
+ rx_size = max3(BNGE_DEFAULT_RX_COPYBREAK,
+ bn->rx_copybreak,
+ bn->netdev->cfg_pending->hds_thresh);
+ rx_size = SKB_DATA_ALIGN(rx_size + NET_IP_ALIGN);
rx_space = rx_size + NET_SKB_PAD +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
}
@@ -2367,6 +2690,17 @@ void bnge_set_ring_params(struct bnge_dev *bd)
bn->cp_ring_mask = bn->cp_bit - 1;
}
+static void bnge_init_ring_params(struct bnge_net *bn)
+{
+ u32 rx_size;
+
+ bn->rx_copybreak = BNGE_DEFAULT_RX_COPYBREAK;
+ /* Try to fit 4 chunks into a 4k page */
+ rx_size = SZ_1K -
+ NET_SKB_PAD - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ bn->netdev->cfg->hds_thresh = max(BNGE_DEFAULT_RX_COPYBREAK, rx_size);
+}
+
int bnge_netdev_alloc(struct bnge_dev *bd, int max_irqs)
{
struct net_device *netdev;
@@ -2456,6 +2790,7 @@ int bnge_netdev_alloc(struct bnge_dev *bd, int max_irqs)
bn->rx_dir = DMA_FROM_DEVICE;
bnge_set_tpa_flags(bd);
+ bnge_init_ring_params(bn);
bnge_set_ring_params(bd);
bnge_init_l2_fltr_tbl(bn);
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
index fb3b961536ba..70f1a7c24814 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
@@ -4,10 +4,11 @@
#ifndef _BNGE_NETDEV_H_
#define _BNGE_NETDEV_H_
-#include <linux/bnxt/hsi.h>
+#include <linux/bnge/hsi.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/refcount.h>
#include "bnge_db.h"
+#include "bnge_hw_def.h"
struct tx_bd {
__le32 tx_bd_len_flags_type;
@@ -76,6 +77,7 @@ struct tx_cmp {
#define CMPL_BASE_TYPE_HWRM_FWD_REQ 0x22UL
#define CMPL_BASE_TYPE_HWRM_FWD_RESP 0x24UL
#define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define CMPL_BA_TY_HWRM_ASY_EVT CMPL_BASE_TYPE_HWRM_ASYNC_EVENT
#define TX_CMP_FLAGS_ERROR (1 << 6)
#define TX_CMP_FLAGS_PUSH (1 << 7)
u32 tx_cmp_opaque;
@@ -135,7 +137,8 @@ struct bnge_ring_grp_info {
u16 nq_fw_ring_id;
};
-#define BNGE_RX_COPY_THRESH 256
+#define BNGE_DEFAULT_RX_COPYBREAK 256
+#define BNGE_MAX_RX_COPYBREAK 1024
#define BNGE_HW_FEATURE_VLAN_ALL_RX \
(NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)
@@ -150,6 +153,45 @@ enum {
#define BNGE_NET_EN_TPA (BNGE_NET_EN_GRO | BNGE_NET_EN_LRO)
+#define BNGE_NO_FW_ACCESS(bd) (pci_channel_offline((bd)->pdev))
+
+#define MAX_TPA 256
+#define MAX_TPA_MASK (MAX_TPA - 1)
+#define MAX_TPA_SEGS 0x3f
+
+#define BNGE_TPA_INNER_L3_OFF(hdr_info) \
+ (((hdr_info) >> 18) & 0x1ff)
+
+#define BNGE_TPA_INNER_L2_OFF(hdr_info) \
+ (((hdr_info) >> 9) & 0x1ff)
+
+#define BNGE_TPA_OUTER_L3_OFF(hdr_info) \
+ ((hdr_info) & 0x1ff)
+
+struct bnge_tpa_idx_map {
+ u16 agg_id_tbl[1024];
+ DECLARE_BITMAP(agg_idx_bmap, MAX_TPA);
+};
+
+struct bnge_tpa_info {
+ void *data;
+ u8 *data_ptr;
+ dma_addr_t mapping;
+ u16 len;
+ unsigned short gso_type;
+ u32 flags2;
+ u32 metadata;
+ enum pkt_hash_types hash_type;
+ u32 rss_hash;
+ u32 hdr_info;
+
+ u16 cfa_code; /* cfa_code in TPA start compl */
+ u8 agg_count;
+ bool vlan_valid;
+ bool cfa_code_valid;
+ struct rx_agg_cmp *agg_arr;
+};
+
/* Minimum TX BDs for a TX packet with MAX_SKB_FRAGS + 1. We need one extra
* BD because the first TX BD is always a long BD.
*/
@@ -172,10 +214,16 @@ enum {
#define RING_RX_AGG(bn, idx) ((idx) & (bn)->rx_agg_ring_mask)
#define NEXT_RX_AGG(idx) ((idx) + 1)
+#define BNGE_NQ_HDL_IDX_MASK 0x00ffffff
+#define BNGE_NQ_HDL_TYPE_MASK 0xff000000
#define BNGE_NQ_HDL_TYPE_SHIFT 24
#define BNGE_NQ_HDL_TYPE_RX 0x00
#define BNGE_NQ_HDL_TYPE_TX 0x01
+#define BNGE_NQ_HDL_IDX(hdl) ((hdl) & BNGE_NQ_HDL_IDX_MASK)
+#define BNGE_NQ_HDL_TYPE(hdl) (((hdl) & BNGE_NQ_HDL_TYPE_MASK) >> \
+ BNGE_NQ_HDL_TYPE_SHIFT)
+
struct bnge_net {
struct bnge_dev *bd;
struct net_device *netdev;
@@ -186,7 +234,7 @@ struct bnge_net {
u32 rx_buf_size;
u32 rx_buf_use_size; /* usable size */
u32 rx_agg_ring_size;
- u32 rx_copy_thresh;
+ u32 rx_copybreak;
u32 rx_ring_mask;
u32 rx_agg_ring_mask;
u16 rx_nr_pages;
@@ -231,6 +279,15 @@ struct bnge_net {
u8 rss_hash_key_updated:1;
int rsscos_nr_ctxs;
u32 stats_coal_ticks;
+
+ unsigned long state;
+#define BNGE_STATE_NAPI_DISABLED 0
+
+ u32 msg_enable;
+ u16 max_tpa;
+ __be16 vxlan_port;
+ __be16 nge_port;
+ __be16 vxlan_gpe_port;
};
#define BNGE_DEFAULT_RX_RING_SIZE 511
@@ -277,9 +334,25 @@ void bnge_set_ring_params(struct bnge_dev *bd);
txr = (iter < BNGE_MAX_TXR_PER_NAPI - 1) ? \
(bnapi)->tx_ring[++iter] : NULL)
+#define DB_EPOCH(db, idx) (((idx) & (db)->db_epoch_mask) << \
+ ((db)->db_epoch_shift))
+
+#define DB_TOGGLE(tgl) ((tgl) << DBR_TOGGLE_SFT)
+
+#define DB_RING_IDX(db, idx) (((idx) & (db)->db_ring_mask) | \
+ DB_EPOCH(db, idx))
+
#define BNGE_SET_NQ_HDL(cpr) \
(((cpr)->cp_ring_type << BNGE_NQ_HDL_TYPE_SHIFT) | (cpr)->cp_idx)
+#define BNGE_DB_NQ(bd, db, idx) \
+ bnge_writeq(bd, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\
+ (db)->doorbell)
+
+#define BNGE_DB_NQ_ARM(bd, db, idx) \
+ bnge_writeq(bd, (db)->db_key64 | DBR_TYPE_NQ_ARM | \
+ DB_RING_IDX(db, idx), (db)->doorbell)
+
struct bnge_stats_mem {
u64 *sw_stats;
u64 *hw_masks;
@@ -288,6 +361,25 @@ struct bnge_stats_mem {
int len;
};
+struct nqe_cn {
+ __le16 type;
+ #define NQ_CN_TYPE_MASK 0x3fUL
+ #define NQ_CN_TYPE_SFT 0
+ #define NQ_CN_TYPE_CQ_NOTIFICATION 0x30UL
+ #define NQ_CN_TYPE_LAST NQ_CN_TYPE_CQ_NOTIFICATION
+ #define NQ_CN_TOGGLE_MASK 0xc0UL
+ #define NQ_CN_TOGGLE_SFT 6
+ __le16 reserved16;
+ __le32 cq_handle_low;
+ __le32 v;
+ #define NQ_CN_V 0x1UL
+ __le32 cq_handle_high;
+};
+
+#define NQE_CN_TYPE(type) ((type) & NQ_CN_TYPE_MASK)
+#define NQE_CN_TOGGLE(type) (((type) & NQ_CN_TOGGLE_MASK) >> \
+ NQ_CN_TOGGLE_SFT)
+
struct bnge_cp_ring_info {
struct bnge_napi *bnapi;
dma_addr_t *desc_mapping;
@@ -297,6 +389,10 @@ struct bnge_cp_ring_info {
u8 cp_idx;
u32 cp_raw_cons;
struct bnge_db_info cp_db;
+ bool had_work_done;
+ bool has_more_work;
+ bool had_nqe_notify;
+ u8 toggle;
};
struct bnge_nq_ring_info {
@@ -309,8 +405,9 @@ struct bnge_nq_ring_info {
struct bnge_stats_mem stats;
u32 hw_stats_ctx_id;
+ bool has_more_work;
- int cp_ring_count;
+ u16 cp_ring_count;
struct bnge_cp_ring_info *cp_ring_arr;
};
@@ -336,6 +433,9 @@ struct bnge_rx_ring_info {
dma_addr_t rx_desc_mapping[MAX_RX_PAGES];
dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
+ struct bnge_tpa_info *rx_tpa;
+ struct bnge_tpa_idx_map *rx_tpa_idx_map;
+
struct bnge_ring_struct rx_ring_struct;
struct bnge_ring_struct rx_agg_ring_struct;
struct page_pool *page_pool;
@@ -373,6 +473,14 @@ struct bnge_napi {
struct bnge_nq_ring_info nq_ring;
struct bnge_rx_ring_info *rx_ring;
struct bnge_tx_ring_info *tx_ring[BNGE_MAX_TXR_PER_NAPI];
+ u8 events;
+#define BNGE_RX_EVENT 1
+#define BNGE_AGG_EVENT 2
+#define BNGE_TX_EVENT 4
+#define BNGE_REDIRECT_EVENT 8
+#define BNGE_TX_CMP_EVENT 0x10
+ bool in_reset;
+ bool tx_fault;
};
#define INVALID_STATS_CTX_ID -1
@@ -451,4 +559,11 @@ struct bnge_l2_filter {
u16 bnge_cp_ring_for_rx(struct bnge_rx_ring_info *rxr);
u16 bnge_cp_ring_for_tx(struct bnge_tx_ring_info *txr);
void bnge_fill_hw_rss_tbl(struct bnge_net *bn, struct bnge_vnic_info *vnic);
+int bnge_alloc_rx_data(struct bnge_net *bn, struct bnge_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp);
+u16 bnge_find_next_agg_idx(struct bnge_rx_ring_info *rxr, u16 idx);
+u8 *__bnge_alloc_rx_frag(struct bnge_net *bn, dma_addr_t *mapping,
+ struct bnge_rx_ring_info *rxr, gfp_t gfp);
+int bnge_alloc_rx_netmem(struct bnge_net *bn, struct bnge_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp);
#endif /* _BNGE_NETDEV_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c
index 79f5ce2e5d08..ee97be440c33 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c
@@ -9,7 +9,7 @@
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
#include <linux/crash_dump.h>
-#include <linux/bnxt/hsi.h>
+#include <linux/bnge/hsi.h>
#include "bnge.h"
#include "bnge_hwrm_lib.h"
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_txrx.c b/drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
new file mode 100644
index 000000000000..a2616f037557
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
@@ -0,0 +1,1642 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <asm/byteorder.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if.h>
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <net/gro.h>
+#include <linux/skbuff.h>
+#include <net/page_pool/helpers.h>
+#include <linux/if_vlan.h>
+#include <net/udp_tunnel.h>
+#include <net/dst_metadata.h>
+#include <net/netdev_queues.h>
+
+#include "bnge.h"
+#include "bnge_hwrm.h"
+#include "bnge_hwrm_lib.h"
+#include "bnge_netdev.h"
+#include "bnge_rmem.h"
+#include "bnge_txrx.h"
+
+irqreturn_t bnge_msix(int irq, void *dev_instance)
+{
+ struct bnge_napi *bnapi = dev_instance;
+ struct bnge_nq_ring_info *nqr;
+ struct bnge_net *bn;
+ u32 cons;
+
+ bn = bnapi->bn;
+ nqr = &bnapi->nq_ring;
+ cons = RING_CMP(bn, nqr->nq_raw_cons);
+
+ prefetch(&nqr->desc_ring[CP_RING(cons)][CP_IDX(cons)]);
+ napi_schedule(&bnapi->napi);
+ return IRQ_HANDLED;
+}
+
+static struct rx_agg_cmp *bnge_get_tpa_agg(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ u16 agg_id, u16 curr)
+{
+ struct bnge_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
+
+ return &tpa_info->agg_arr[curr];
+}
+
+static struct rx_agg_cmp *bnge_get_agg(struct bnge_net *bn,
+ struct bnge_cp_ring_info *cpr,
+ u16 cp_cons, u16 curr)
+{
+ struct rx_agg_cmp *agg;
+
+ cp_cons = RING_CMP(bn, ADV_RAW_CMP(cp_cons, curr));
+ agg = (struct rx_agg_cmp *)
+ &cpr->desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+ return agg;
+}
+
+static void bnge_reuse_rx_agg_bufs(struct bnge_cp_ring_info *cpr, u16 idx,
+ u16 start, u32 agg_bufs, bool tpa)
+{
+ struct bnge_napi *bnapi = cpr->bnapi;
+ struct bnge_net *bn = bnapi->bn;
+ struct bnge_rx_ring_info *rxr;
+ u16 prod, sw_prod;
+ u32 i;
+
+ rxr = bnapi->rx_ring;
+ sw_prod = rxr->rx_sw_agg_prod;
+ prod = rxr->rx_agg_prod;
+
+ for (i = 0; i < agg_bufs; i++) {
+ struct bnge_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
+ struct rx_agg_cmp *agg;
+ struct rx_bd *prod_bd;
+ netmem_ref netmem;
+ u16 cons;
+
+ if (tpa)
+ agg = bnge_get_tpa_agg(bn, rxr, idx, start + i);
+ else
+ agg = bnge_get_agg(bn, cpr, idx, start + i);
+ cons = agg->rx_agg_cmp_opaque;
+ __clear_bit(cons, rxr->rx_agg_bmap);
+
+ if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
+ sw_prod = bnge_find_next_agg_idx(rxr, sw_prod);
+
+ __set_bit(sw_prod, rxr->rx_agg_bmap);
+ prod_rx_buf = &rxr->rx_agg_buf_ring[sw_prod];
+ cons_rx_buf = &rxr->rx_agg_buf_ring[cons];
+
+ /* It is possible for sw_prod to be equal to cons, so
+ * set cons_rx_buf->netmem to 0 first.
+ */
+ netmem = cons_rx_buf->netmem;
+ cons_rx_buf->netmem = 0;
+ prod_rx_buf->netmem = netmem;
+ prod_rx_buf->offset = cons_rx_buf->offset;
+
+ prod_rx_buf->mapping = cons_rx_buf->mapping;
+
+ prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bn, prod)]
+ [RX_IDX(prod)];
+
+ prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
+ prod_bd->rx_bd_opaque = sw_prod;
+
+ prod = NEXT_RX_AGG(prod);
+ sw_prod = RING_RX_AGG(bn, NEXT_RX_AGG(sw_prod));
+ }
+ rxr->rx_agg_prod = prod;
+ rxr->rx_sw_agg_prod = sw_prod;
+}
+
+static int bnge_agg_bufs_valid(struct bnge_net *bn,
+ struct bnge_cp_ring_info *cpr,
+ u8 agg_bufs, u32 *raw_cons)
+{
+ struct rx_agg_cmp *agg;
+ u16 last;
+
+ *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
+ last = RING_CMP(bn, *raw_cons);
+ agg = (struct rx_agg_cmp *)
+ &cpr->desc_ring[CP_RING(last)][CP_IDX(last)];
+ return RX_AGG_CMP_VALID(bn, agg, *raw_cons);
+}
+
+static int bnge_discard_rx(struct bnge_net *bn, struct bnge_cp_ring_info *cpr,
+ u32 *raw_cons, void *cmp)
+{
+ u32 tmp_raw_cons = *raw_cons;
+ struct rx_cmp *rxcmp = cmp;
+ u8 cmp_type, agg_bufs = 0;
+
+ cmp_type = RX_CMP_TYPE(rxcmp);
+
+ if (cmp_type == CMP_TYPE_RX_L2_CMP) {
+ agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
+ RX_CMP_AGG_BUFS) >>
+ RX_CMP_AGG_BUFS_SHIFT;
+ } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
+ return 0;
+ }
+
+ if (agg_bufs) {
+ if (!bnge_agg_bufs_valid(bn, cpr, agg_bufs, &tmp_raw_cons))
+ return -EBUSY;
+ }
+ *raw_cons = tmp_raw_cons;
+ return 0;
+}
+
+static u32 __bnge_rx_agg_netmems(struct bnge_net *bn,
+ struct bnge_cp_ring_info *cpr,
+ u16 idx, u32 agg_bufs, bool tpa,
+ struct sk_buff *skb)
+{
+ struct bnge_napi *bnapi = cpr->bnapi;
+ struct skb_shared_info *shinfo;
+ struct bnge_rx_ring_info *rxr;
+ u32 i, total_frag_len = 0;
+ u16 prod;
+
+ rxr = bnapi->rx_ring;
+ prod = rxr->rx_agg_prod;
+ shinfo = skb_shinfo(skb);
+
+ for (i = 0; i < agg_bufs; i++) {
+ struct bnge_sw_rx_agg_bd *cons_rx_buf;
+ struct rx_agg_cmp *agg;
+ u16 cons, frag_len;
+ netmem_ref netmem;
+
+ if (tpa)
+ agg = bnge_get_tpa_agg(bn, rxr, idx, i);
+ else
+ agg = bnge_get_agg(bn, cpr, idx, i);
+ cons = agg->rx_agg_cmp_opaque;
+ frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
+ RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
+
+ cons_rx_buf = &rxr->rx_agg_buf_ring[cons];
+ skb_add_rx_frag_netmem(skb, i, cons_rx_buf->netmem,
+ cons_rx_buf->offset,
+ frag_len, BNGE_RX_PAGE_SIZE);
+ __clear_bit(cons, rxr->rx_agg_bmap);
+
+ /* It is possible for bnge_alloc_rx_netmem() to allocate
+ * a sw_prod index that equals the cons index, so we
+ * need to clear the cons entry now.
+ */
+ netmem = cons_rx_buf->netmem;
+ cons_rx_buf->netmem = 0;
+
+ if (bnge_alloc_rx_netmem(bn, rxr, prod, GFP_ATOMIC) != 0) {
+ skb->len -= frag_len;
+ skb->data_len -= frag_len;
+ skb->truesize -= BNGE_RX_PAGE_SIZE;
+
+ --shinfo->nr_frags;
+ cons_rx_buf->netmem = netmem;
+
+ /* Update prod since possibly some netmems have been
+ * allocated already.
+ */
+ rxr->rx_agg_prod = prod;
+ bnge_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
+ return 0;
+ }
+
+ page_pool_dma_sync_netmem_for_cpu(rxr->page_pool, netmem, 0,
+ BNGE_RX_PAGE_SIZE);
+
+ total_frag_len += frag_len;
+ prod = NEXT_RX_AGG(prod);
+ }
+ rxr->rx_agg_prod = prod;
+ return total_frag_len;
+}
+
+static struct sk_buff *bnge_rx_agg_netmems_skb(struct bnge_net *bn,
+ struct bnge_cp_ring_info *cpr,
+ struct sk_buff *skb, u16 idx,
+ u32 agg_bufs, bool tpa)
+{
+ u32 total_frag_len;
+
+ total_frag_len = __bnge_rx_agg_netmems(bn, cpr, idx, agg_bufs,
+ tpa, skb);
+ if (!total_frag_len) {
+ skb_mark_for_recycle(skb);
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+
+ return skb;
+}
+
+static void bnge_sched_reset_rxr(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ if (!rxr->bnapi->in_reset) {
+ rxr->bnapi->in_reset = true;
+
+ /* TODO: Initiate reset task */
+ }
+ rxr->rx_next_cons = 0xffff;
+}
+
+static void bnge_sched_reset_txr(struct bnge_net *bn,
+ struct bnge_tx_ring_info *txr,
+ u16 curr)
+{
+ struct bnge_napi *bnapi = txr->bnapi;
+
+ if (bnapi->tx_fault)
+ return;
+
+ netdev_err(bn->netdev, "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)",
+ txr->txq_index, txr->tx_hw_cons,
+ txr->tx_cons, txr->tx_prod, curr);
+ WARN_ON_ONCE(1);
+ bnapi->tx_fault = 1;
+ /* TODO: Initiate reset task */
+}
+
+static u16 bnge_tpa_alloc_agg_idx(struct bnge_rx_ring_info *rxr, u16 agg_id)
+{
+ struct bnge_tpa_idx_map *map = rxr->rx_tpa_idx_map;
+ u16 idx = agg_id & MAX_TPA_MASK;
+
+ if (test_bit(idx, map->agg_idx_bmap)) {
+ idx = find_first_zero_bit(map->agg_idx_bmap, MAX_TPA);
+ if (idx >= MAX_TPA)
+ return INVALID_HW_RING_ID;
+ }
+ __set_bit(idx, map->agg_idx_bmap);
+ map->agg_id_tbl[agg_id] = idx;
+ return idx;
+}
+
+static void bnge_free_agg_idx(struct bnge_rx_ring_info *rxr, u16 idx)
+{
+ struct bnge_tpa_idx_map *map = rxr->rx_tpa_idx_map;
+
+ __clear_bit(idx, map->agg_idx_bmap);
+}
+
+static u16 bnge_lookup_agg_idx(struct bnge_rx_ring_info *rxr, u16 agg_id)
+{
+ struct bnge_tpa_idx_map *map = rxr->rx_tpa_idx_map;
+
+ return map->agg_id_tbl[agg_id];
+}
+
+static void bnge_tpa_metadata(struct bnge_tpa_info *tpa_info,
+ struct rx_tpa_start_cmp *tpa_start,
+ struct rx_tpa_start_cmp_ext *tpa_start1)
+{
+ tpa_info->cfa_code_valid = 1;
+ tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
+ tpa_info->vlan_valid = 0;
+ if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
+ tpa_info->vlan_valid = 1;
+ tpa_info->metadata =
+ le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
+ }
+}
+
+static void bnge_tpa_metadata_v2(struct bnge_tpa_info *tpa_info,
+ struct rx_tpa_start_cmp *tpa_start,
+ struct rx_tpa_start_cmp_ext *tpa_start1)
+{
+ tpa_info->vlan_valid = 0;
+ if (TPA_START_VLAN_VALID(tpa_start)) {
+ u32 tpid_sel = TPA_START_VLAN_TPID_SEL(tpa_start);
+ u32 vlan_proto = ETH_P_8021Q;
+
+ tpa_info->vlan_valid = 1;
+ if (tpid_sel == RX_TPA_START_METADATA1_TPID_8021AD)
+ vlan_proto = ETH_P_8021AD;
+ tpa_info->metadata = vlan_proto << 16 |
+ TPA_START_METADATA0_TCI(tpa_start1);
+ }
+}
+
+static void bnge_tpa_start(struct bnge_net *bn, struct bnge_rx_ring_info *rxr,
+ u8 cmp_type, struct rx_tpa_start_cmp *tpa_start,
+ struct rx_tpa_start_cmp_ext *tpa_start1)
+{
+ struct bnge_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
+ struct bnge_tpa_info *tpa_info;
+ u16 cons, prod, agg_id;
+ struct rx_bd *prod_bd;
+ dma_addr_t mapping;
+
+ agg_id = TPA_START_AGG_ID(tpa_start);
+ agg_id = bnge_tpa_alloc_agg_idx(rxr, agg_id);
+ if (unlikely(agg_id == INVALID_HW_RING_ID)) {
+ netdev_warn(bn->netdev, "Unable to allocate agg ID for ring %d, agg 0x%lx\n",
+ rxr->bnapi->index, TPA_START_AGG_ID(tpa_start));
+ bnge_sched_reset_rxr(bn, rxr);
+ return;
+ }
+ cons = tpa_start->rx_tpa_start_cmp_opaque;
+ prod = rxr->rx_prod;
+ cons_rx_buf = &rxr->rx_buf_ring[cons];
+ prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bn, prod)];
+ tpa_info = &rxr->rx_tpa[agg_id];
+
+ if (unlikely(cons != rxr->rx_next_cons ||
+ TPA_START_ERROR(tpa_start))) {
+ netdev_warn(bn->netdev, "TPA cons %x, expected cons %x, error code %lx\n",
+ cons, rxr->rx_next_cons,
+ TPA_START_ERROR_CODE(tpa_start1));
+ bnge_sched_reset_rxr(bn, rxr);
+ return;
+ }
+ prod_rx_buf->data = tpa_info->data;
+ prod_rx_buf->data_ptr = tpa_info->data_ptr;
+
+ mapping = tpa_info->mapping;
+ prod_rx_buf->mapping = mapping;
+
+ prod_bd = &rxr->rx_desc_ring[RX_RING(bn, prod)][RX_IDX(prod)];
+
+ prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
+
+ tpa_info->data = cons_rx_buf->data;
+ tpa_info->data_ptr = cons_rx_buf->data_ptr;
+ cons_rx_buf->data = NULL;
+ tpa_info->mapping = cons_rx_buf->mapping;
+
+ tpa_info->len =
+ le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
+ RX_TPA_START_CMP_LEN_SHIFT;
+ if (likely(TPA_START_HASH_VALID(tpa_start))) {
+ tpa_info->hash_type = PKT_HASH_TYPE_L4;
+ if (TPA_START_IS_IPV6(tpa_start1))
+ tpa_info->gso_type = SKB_GSO_TCPV6;
+ else
+ tpa_info->gso_type = SKB_GSO_TCPV4;
+ tpa_info->rss_hash =
+ le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
+ } else {
+ tpa_info->hash_type = PKT_HASH_TYPE_NONE;
+ tpa_info->gso_type = 0;
+ netif_warn(bn, rx_err, bn->netdev, "TPA packet without valid hash\n");
+ }
+ tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
+ tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
+ if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP)
+ bnge_tpa_metadata(tpa_info, tpa_start, tpa_start1);
+ else
+ bnge_tpa_metadata_v2(tpa_info, tpa_start, tpa_start1);
+ tpa_info->agg_count = 0;
+
+ rxr->rx_prod = NEXT_RX(prod);
+ cons = RING_RX(bn, NEXT_RX(cons));
+ rxr->rx_next_cons = RING_RX(bn, NEXT_RX(cons));
+ cons_rx_buf = &rxr->rx_buf_ring[cons];
+
+ bnge_reuse_rx_data(rxr, cons, cons_rx_buf->data);
+ rxr->rx_prod = NEXT_RX(rxr->rx_prod);
+ cons_rx_buf->data = NULL;
+}
+
+static void bnge_abort_tpa(struct bnge_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
+{
+ if (agg_bufs)
+ bnge_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
+}
+
+static void bnge_tpa_agg(struct bnge_net *bn, struct bnge_rx_ring_info *rxr,
+ struct rx_agg_cmp *rx_agg)
+{
+ u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
+ struct bnge_tpa_info *tpa_info;
+
+ agg_id = bnge_lookup_agg_idx(rxr, agg_id);
+ tpa_info = &rxr->rx_tpa[agg_id];
+
+ if (unlikely(tpa_info->agg_count >= MAX_SKB_FRAGS)) {
+ netdev_warn(bn->netdev,
+ "TPA completion count %d exceeds limit for ring %d\n",
+ tpa_info->agg_count, rxr->bnapi->index);
+
+ bnge_sched_reset_rxr(bn, rxr);
+ return;
+ }
+
+ tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
+}
+
+void bnge_reuse_rx_data(struct bnge_rx_ring_info *rxr, u16 cons, void *data)
+{
+ struct bnge_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
+ struct bnge_net *bn = rxr->bnapi->bn;
+ struct rx_bd *cons_bd, *prod_bd;
+ u16 prod = rxr->rx_prod;
+
+ prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bn, prod)];
+ cons_rx_buf = &rxr->rx_buf_ring[cons];
+
+ prod_rx_buf->data = data;
+ prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
+
+ prod_rx_buf->mapping = cons_rx_buf->mapping;
+
+ prod_bd = &rxr->rx_desc_ring[RX_RING(bn, prod)][RX_IDX(prod)];
+ cons_bd = &rxr->rx_desc_ring[RX_RING(bn, cons)][RX_IDX(cons)];
+
+ prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
+}
+
+static void bnge_deliver_skb(struct bnge_net *bn, struct bnge_napi *bnapi,
+ struct sk_buff *skb)
+{
+ skb_mark_for_recycle(skb);
+ skb_record_rx_queue(skb, bnapi->index);
+ napi_gro_receive(&bnapi->napi, skb);
+}
+
+static struct sk_buff *bnge_copy_skb(struct bnge_napi *bnapi, u8 *data,
+ unsigned int len, dma_addr_t mapping)
+{
+ struct bnge_net *bn = bnapi->bn;
+ struct bnge_dev *bd = bn->bd;
+ struct sk_buff *skb;
+
+ skb = napi_alloc_skb(&bnapi->napi, len);
+ if (!skb)
+ return NULL;
+
+ dma_sync_single_for_cpu(bd->dev, mapping, len, bn->rx_dir);
+
+ memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
+ len + NET_IP_ALIGN);
+
+ dma_sync_single_for_device(bd->dev, mapping, len, bn->rx_dir);
+
+ skb_put(skb, len);
+
+ return skb;
+}
+
+#ifdef CONFIG_INET
+static void bnge_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
+{
+ struct udphdr *uh = NULL;
+
+ if (ip_proto == htons(ETH_P_IP)) {
+ struct iphdr *iph = (struct iphdr *)skb->data;
+
+ if (iph->protocol == IPPROTO_UDP)
+ uh = (struct udphdr *)(iph + 1);
+ } else {
+ struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
+
+ if (iph->nexthdr == IPPROTO_UDP)
+ uh = (struct udphdr *)(iph + 1);
+ }
+ if (uh) {
+ if (uh->check)
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
+ else
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
+ }
+}
+
+static struct sk_buff *bnge_gro_func(struct bnge_tpa_info *tpa_info,
+ int payload_off, int tcp_ts,
+ struct sk_buff *skb)
+{
+ u16 outer_ip_off, inner_ip_off, inner_mac_off;
+ u32 hdr_info = tpa_info->hdr_info;
+ int iphdr_len, nw_off;
+
+ inner_ip_off = BNGE_TPA_INNER_L3_OFF(hdr_info);
+ inner_mac_off = BNGE_TPA_INNER_L2_OFF(hdr_info);
+ outer_ip_off = BNGE_TPA_OUTER_L3_OFF(hdr_info);
+
+ nw_off = inner_ip_off - ETH_HLEN;
+ skb_set_network_header(skb, nw_off);
+ iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
+ sizeof(struct ipv6hdr) : sizeof(struct iphdr);
+ skb_set_transport_header(skb, nw_off + iphdr_len);
+
+ if (inner_mac_off) { /* tunnel */
+ __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
+ ETH_HLEN - 2));
+
+ bnge_gro_tunnel(skb, proto);
+ }
+
+ return skb;
+}
+
+static struct sk_buff *bnge_gro_skb(struct bnge_net *bn,
+ struct bnge_tpa_info *tpa_info,
+ struct rx_tpa_end_cmp *tpa_end,
+ struct rx_tpa_end_cmp_ext *tpa_end1,
+ struct sk_buff *skb)
+{
+ int payload_off;
+ u16 segs;
+
+ segs = TPA_END_TPA_SEGS(tpa_end);
+ if (segs == 1)
+ return skb;
+
+ NAPI_GRO_CB(skb)->count = segs;
+ skb_shinfo(skb)->gso_size =
+ le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
+ skb_shinfo(skb)->gso_type = tpa_info->gso_type;
+ payload_off = TPA_END_PAYLOAD_OFF(tpa_end1);
+ skb = bnge_gro_func(tpa_info, payload_off,
+ TPA_END_GRO_TS(tpa_end), skb);
+ if (likely(skb))
+ tcp_gro_complete(skb);
+
+ return skb;
+}
+#endif
+
+static struct sk_buff *bnge_tpa_end(struct bnge_net *bn,
+ struct bnge_cp_ring_info *cpr,
+ u32 *raw_cons,
+ struct rx_tpa_end_cmp *tpa_end,
+ struct rx_tpa_end_cmp_ext *tpa_end1,
+ u8 *event)
+{
+ struct bnge_napi *bnapi = cpr->bnapi;
+ struct net_device *dev = bn->netdev;
+ struct bnge_tpa_info *tpa_info;
+ struct bnge_rx_ring_info *rxr;
+ u8 *data_ptr, agg_bufs;
+ struct sk_buff *skb;
+ u16 idx = 0, agg_id;
+ dma_addr_t mapping;
+ unsigned int len;
+ void *data;
+
+ if (unlikely(bnapi->in_reset)) {
+ int rc = bnge_discard_rx(bn, cpr, raw_cons, tpa_end);
+
+ if (rc < 0)
+ return ERR_PTR(-EBUSY);
+ return NULL;
+ }
+
+ rxr = bnapi->rx_ring;
+ agg_id = TPA_END_AGG_ID(tpa_end);
+ agg_id = bnge_lookup_agg_idx(rxr, agg_id);
+ agg_bufs = TPA_END_AGG_BUFS(tpa_end1);
+ tpa_info = &rxr->rx_tpa[agg_id];
+ if (unlikely(agg_bufs != tpa_info->agg_count)) {
+ netdev_warn(bn->netdev, "TPA end agg_buf %d != expected agg_bufs %d\n",
+ agg_bufs, tpa_info->agg_count);
+ agg_bufs = tpa_info->agg_count;
+ }
+ tpa_info->agg_count = 0;
+ *event |= BNGE_AGG_EVENT;
+ bnge_free_agg_idx(rxr, agg_id);
+ idx = agg_id;
+ data = tpa_info->data;
+ data_ptr = tpa_info->data_ptr;
+ prefetch(data_ptr);
+ len = tpa_info->len;
+ mapping = tpa_info->mapping;
+
+ if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
+ bnge_abort_tpa(cpr, idx, agg_bufs);
+ if (agg_bufs > MAX_SKB_FRAGS)
+ netdev_warn(bn->netdev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
+ agg_bufs, (int)MAX_SKB_FRAGS);
+ return NULL;
+ }
+
+ if (len <= bn->rx_copybreak) {
+ skb = bnge_copy_skb(bnapi, data_ptr, len, mapping);
+ if (!skb) {
+ bnge_abort_tpa(cpr, idx, agg_bufs);
+ return NULL;
+ }
+ } else {
+ dma_addr_t new_mapping;
+ u8 *new_data;
+
+ new_data = __bnge_alloc_rx_frag(bn, &new_mapping, rxr,
+ GFP_ATOMIC);
+ if (!new_data) {
+ bnge_abort_tpa(cpr, idx, agg_bufs);
+ return NULL;
+ }
+
+ tpa_info->data = new_data;
+ tpa_info->data_ptr = new_data + bn->rx_offset;
+ tpa_info->mapping = new_mapping;
+
+ skb = napi_build_skb(data, bn->rx_buf_size);
+ dma_sync_single_for_cpu(bn->bd->dev, mapping,
+ bn->rx_buf_use_size, bn->rx_dir);
+
+ if (!skb) {
+ page_pool_free_va(rxr->head_pool, data, true);
+ bnge_abort_tpa(cpr, idx, agg_bufs);
+ return NULL;
+ }
+ skb_mark_for_recycle(skb);
+ skb_reserve(skb, bn->rx_offset);
+ skb_put(skb, len);
+ }
+
+ if (agg_bufs) {
+ skb = bnge_rx_agg_netmems_skb(bn, cpr, skb, idx, agg_bufs,
+ true);
+ /* Page reuse already handled by bnge_rx_agg_netmems_skb(). */
+ if (!skb)
+ return NULL;
+ }
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+ if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
+ skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
+
+ if (tpa_info->vlan_valid &&
+ (dev->features & BNGE_HW_FEATURE_VLAN_ALL_RX)) {
+ __be16 vlan_proto = htons(tpa_info->metadata >>
+ RX_CMP_FLAGS2_METADATA_TPID_SFT);
+ u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
+
+ if (eth_type_vlan(vlan_proto)) {
+ __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
+ } else {
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+ }
+
+ skb_checksum_none_assert(skb);
+ if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level =
+ (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
+ }
+
+#ifdef CONFIG_INET
+ if (bn->priv_flags & BNGE_NET_EN_GRO)
+ skb = bnge_gro_skb(bn, tpa_info, tpa_end, tpa_end1, skb);
+#endif
+
+ return skb;
+}
+
+static enum pkt_hash_types bnge_rss_ext_op(struct bnge_net *bn,
+ struct rx_cmp *rxcmp)
+{
+ u8 ext_op = RX_CMP_V3_HASH_TYPE(bn->bd, rxcmp);
+
+ switch (ext_op) {
+ case EXT_OP_INNER_4:
+ case EXT_OP_OUTER_4:
+ case EXT_OP_INNFL_3:
+ case EXT_OP_OUTFL_3:
+ return PKT_HASH_TYPE_L4;
+ default:
+ return PKT_HASH_TYPE_L3;
+ }
+}
+
+static struct sk_buff *bnge_rx_vlan(struct sk_buff *skb, u8 cmp_type,
+ struct rx_cmp *rxcmp,
+ struct rx_cmp_ext *rxcmp1)
+{
+ __be16 vlan_proto;
+ u16 vtag;
+
+ if (cmp_type == CMP_TYPE_RX_L2_CMP) {
+ __le32 flags2 = rxcmp1->rx_cmp_flags2;
+ u32 meta_data;
+
+ if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)))
+ return skb;
+
+ meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
+ vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
+ vlan_proto =
+ htons(meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT);
+ if (eth_type_vlan(vlan_proto))
+ __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
+ else
+ goto vlan_err;
+ } else if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
+ if (RX_CMP_VLAN_VALID(rxcmp)) {
+ u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp);
+
+ if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q)
+ vlan_proto = htons(ETH_P_8021Q);
+ else if (tpid_sel == RX_CMP_METADATA1_TPID_8021AD)
+ vlan_proto = htons(ETH_P_8021AD);
+ else
+ goto vlan_err;
+ vtag = RX_CMP_METADATA0_TCI(rxcmp1);
+ __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
+ }
+ }
+ return skb;
+
+vlan_err:
+ skb_mark_for_recycle(skb);
+ dev_kfree_skb(skb);
+ return NULL;
+}
+
+static struct sk_buff *bnge_rx_skb(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr, u16 cons,
+ void *data, u8 *data_ptr,
+ dma_addr_t dma_addr,
+ unsigned int len)
+{
+ struct bnge_dev *bd = bn->bd;
+ u16 prod = rxr->rx_prod;
+ struct sk_buff *skb;
+ int err;
+
+ err = bnge_alloc_rx_data(bn, rxr, prod, GFP_ATOMIC);
+ if (unlikely(err)) {
+ bnge_reuse_rx_data(rxr, cons, data);
+ return NULL;
+ }
+
+ dma_sync_single_for_cpu(bd->dev, dma_addr, len, bn->rx_dir);
+ skb = napi_build_skb(data, bn->rx_buf_size);
+ if (!skb) {
+ page_pool_free_va(rxr->head_pool, data, true);
+ return NULL;
+ }
+
+ skb_mark_for_recycle(skb);
+ skb_reserve(skb, bn->rx_offset);
+ skb_put(skb, len);
+ return skb;
+}
+
+/* returns the following:
+ * 1 - 1 packet successfully received
+ * 0 - successful TPA_START, packet not completed yet
+ * -EBUSY - completion ring does not have all the agg buffers yet
+ * -ENOMEM - packet aborted due to out of memory
+ * -EIO - packet aborted due to hw error indicated in BD
+ */
+static int bnge_rx_pkt(struct bnge_net *bn, struct bnge_cp_ring_info *cpr,
+ u32 *raw_cons, u8 *event)
+{
+ struct bnge_napi *bnapi = cpr->bnapi;
+ struct net_device *dev = bn->netdev;
+ struct bnge_rx_ring_info *rxr;
+ u32 tmp_raw_cons, flags, misc;
+ struct bnge_sw_rx_bd *rx_buf;
+ struct rx_cmp_ext *rxcmp1;
+ u16 cons, prod, cp_cons;
+ u8 *data_ptr, cmp_type;
+ struct rx_cmp *rxcmp;
+ dma_addr_t dma_addr;
+ struct sk_buff *skb;
+ unsigned int len;
+ u8 agg_bufs;
+ void *data;
+ int rc = 0;
+
+ rxr = bnapi->rx_ring;
+
+ tmp_raw_cons = *raw_cons;
+ cp_cons = RING_CMP(bn, tmp_raw_cons);
+ rxcmp = (struct rx_cmp *)
+ &cpr->desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+ cmp_type = RX_CMP_TYPE(rxcmp);
+
+ if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
+ bnge_tpa_agg(bn, rxr, (struct rx_agg_cmp *)rxcmp);
+ goto next_rx_no_prod_no_len;
+ }
+
+ tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
+ cp_cons = RING_CMP(bn, tmp_raw_cons);
+ rxcmp1 = (struct rx_cmp_ext *)
+ &cpr->desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+ if (!RX_CMP_VALID(bn, rxcmp1, tmp_raw_cons))
+ return -EBUSY;
+
+ /* The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
+ prod = rxr->rx_prod;
+
+ if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP ||
+ cmp_type == CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
+ bnge_tpa_start(bn, rxr, cmp_type,
+ (struct rx_tpa_start_cmp *)rxcmp,
+ (struct rx_tpa_start_cmp_ext *)rxcmp1);
+
+ *event |= BNGE_RX_EVENT;
+ goto next_rx_no_prod_no_len;
+
+ } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
+ skb = bnge_tpa_end(bn, cpr, &tmp_raw_cons,
+ (struct rx_tpa_end_cmp *)rxcmp,
+ (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
+ if (IS_ERR(skb))
+ return -EBUSY;
+
+ rc = -ENOMEM;
+ if (likely(skb)) {
+ bnge_deliver_skb(bn, bnapi, skb);
+ rc = 1;
+ }
+ *event |= BNGE_RX_EVENT;
+ goto next_rx_no_prod_no_len;
+ }
+
+ cons = rxcmp->rx_cmp_opaque;
+ if (unlikely(cons != rxr->rx_next_cons)) {
+ int rc1 = bnge_discard_rx(bn, cpr, &tmp_raw_cons, rxcmp);
+
+ /* 0xffff is forced error, don't print it */
+ if (rxr->rx_next_cons != 0xffff)
+ netdev_warn(bn->netdev, "RX cons %x != expected cons %x\n",
+ cons, rxr->rx_next_cons);
+ bnge_sched_reset_rxr(bn, rxr);
+ if (rc1)
+ return rc1;
+ goto next_rx_no_prod_no_len;
+ }
+ rx_buf = &rxr->rx_buf_ring[cons];
+ data = rx_buf->data;
+ data_ptr = rx_buf->data_ptr;
+ prefetch(data_ptr);
+
+ misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
+ agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
+
+ if (agg_bufs) {
+ if (!bnge_agg_bufs_valid(bn, cpr, agg_bufs, &tmp_raw_cons))
+ return -EBUSY;
+
+ cp_cons = NEXT_CMP(bn, cp_cons);
+ *event |= BNGE_AGG_EVENT;
+ }
+ *event |= BNGE_RX_EVENT;
+
+ rx_buf->data = NULL;
+ if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
+ bnge_reuse_rx_data(rxr, cons, data);
+ if (agg_bufs)
+ bnge_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
+ false);
+ rc = -EIO;
+ goto next_rx_no_len;
+ }
+
+ flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
+ len = flags >> RX_CMP_LEN_SHIFT;
+ dma_addr = rx_buf->mapping;
+
+ if (len <= bn->rx_copybreak) {
+ skb = bnge_copy_skb(bnapi, data_ptr, len, dma_addr);
+ bnge_reuse_rx_data(rxr, cons, data);
+ } else {
+ skb = bnge_rx_skb(bn, rxr, cons, data, data_ptr, dma_addr, len);
+ }
+
+ if (!skb) {
+ if (agg_bufs)
+ bnge_reuse_rx_agg_bufs(cpr, cp_cons, 0,
+ agg_bufs, false);
+ goto oom_next_rx;
+ }
+
+ if (agg_bufs) {
+ skb = bnge_rx_agg_netmems_skb(bn, cpr, skb, cp_cons,
+ agg_bufs, false);
+ if (!skb)
+ goto oom_next_rx;
+ }
+
+ if (RX_CMP_HASH_VALID(rxcmp)) {
+ enum pkt_hash_types type;
+
+ if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
+ type = bnge_rss_ext_op(bn, rxcmp);
+ } else {
+ u32 itypes = RX_CMP_ITYPES(rxcmp);
+
+ if (itypes == RX_CMP_FLAGS_ITYPE_TCP ||
+ itypes == RX_CMP_FLAGS_ITYPE_UDP)
+ type = PKT_HASH_TYPE_L4;
+ else
+ type = PKT_HASH_TYPE_L3;
+ }
+ skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
+ }
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+ if (skb->dev->features & BNGE_HW_FEATURE_VLAN_ALL_RX) {
+ skb = bnge_rx_vlan(skb, cmp_type, rxcmp, rxcmp1);
+ if (!skb)
+ goto next_rx;
+ }
+
+ skb_checksum_none_assert(skb);
+ if (RX_CMP_L4_CS_OK(rxcmp1)) {
+ if (dev->features & NETIF_F_RXCSUM) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = RX_CMP_ENCAP(rxcmp1);
+ }
+ }
+
+ bnge_deliver_skb(bn, bnapi, skb);
+ rc = 1;
+
+next_rx:
+ /* Update Stats */
+next_rx_no_len:
+ rxr->rx_prod = NEXT_RX(prod);
+ rxr->rx_next_cons = RING_RX(bn, NEXT_RX(cons));
+
+next_rx_no_prod_no_len:
+ *raw_cons = tmp_raw_cons;
+ return rc;
+
+oom_next_rx:
+ rc = -ENOMEM;
+ goto next_rx;
+}
+
+/* In netpoll mode, if we are using a combined completion ring, we need to
+ * discard the rx packets and recycle the buffers.
+ */
+static int bnge_force_rx_discard(struct bnge_net *bn,
+ struct bnge_cp_ring_info *cpr,
+ u32 *raw_cons, u8 *event)
+{
+ u32 tmp_raw_cons = *raw_cons;
+ struct rx_cmp_ext *rxcmp1;
+ struct rx_cmp *rxcmp;
+ u16 cp_cons;
+ u8 cmp_type;
+ int rc;
+
+ cp_cons = RING_CMP(bn, tmp_raw_cons);
+ rxcmp = (struct rx_cmp *)
+ &cpr->desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+ tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
+ cp_cons = RING_CMP(bn, tmp_raw_cons);
+ rxcmp1 = (struct rx_cmp_ext *)
+ &cpr->desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+ if (!RX_CMP_VALID(bn, rxcmp1, tmp_raw_cons))
+ return -EBUSY;
+
+ /* The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
+ cmp_type = RX_CMP_TYPE(rxcmp);
+ if (cmp_type == CMP_TYPE_RX_L2_CMP ||
+ cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
+ rxcmp1->rx_cmp_cfa_code_errors_v2 |=
+ cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
+ } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
+ struct rx_tpa_end_cmp_ext *tpa_end1;
+
+ tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
+ tpa_end1->rx_tpa_end_cmp_errors_v2 |=
+ cpu_to_le32(RX_TPA_END_CMP_ERRORS);
+ }
+ rc = bnge_rx_pkt(bn, cpr, raw_cons, event);
+ return rc;
+}
+
+static void __bnge_tx_int(struct bnge_net *bn, struct bnge_tx_ring_info *txr,
+ int budget)
+{
+ u16 hw_cons = txr->tx_hw_cons;
+ struct bnge_dev *bd = bn->bd;
+ unsigned int tx_bytes = 0;
+ unsigned int tx_pkts = 0;
+ struct netdev_queue *txq;
+ u16 cons = txr->tx_cons;
+ skb_frag_t *frag;
+
+ txq = netdev_get_tx_queue(bn->netdev, txr->txq_index);
+
+ while (SW_TX_RING(bn, cons) != hw_cons) {
+ struct bnge_sw_tx_bd *tx_buf;
+ struct sk_buff *skb;
+ int j, last;
+
+ tx_buf = &txr->tx_buf_ring[SW_TX_RING(bn, cons)];
+ skb = tx_buf->skb;
+ if (unlikely(!skb)) {
+ bnge_sched_reset_txr(bn, txr, cons);
+ return;
+ }
+
+ cons = NEXT_TX(cons);
+ tx_pkts++;
+ tx_bytes += skb->len;
+ tx_buf->skb = NULL;
+
+ dma_unmap_single(bd->dev, dma_unmap_addr(tx_buf, mapping),
+ skb_headlen(skb), DMA_TO_DEVICE);
+ last = tx_buf->nr_frags;
+
+ for (j = 0; j < last; j++) {
+ frag = &skb_shinfo(skb)->frags[j];
+ cons = NEXT_TX(cons);
+ tx_buf = &txr->tx_buf_ring[SW_TX_RING(bn, cons)];
+ netmem_dma_unmap_page_attrs(bd->dev,
+ dma_unmap_addr(tx_buf,
+ mapping),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE, 0);
+ }
+
+ cons = NEXT_TX(cons);
+
+ napi_consume_skb(skb, budget);
+ }
+
+ WRITE_ONCE(txr->tx_cons, cons);
+
+ __netif_txq_completed_wake(txq, tx_pkts, tx_bytes,
+ bnge_tx_avail(bn, txr), bn->tx_wake_thresh,
+ (READ_ONCE(txr->dev_state) ==
+ BNGE_DEV_STATE_CLOSING));
+}
+
+static void bnge_tx_int(struct bnge_net *bn, struct bnge_napi *bnapi,
+ int budget)
+{
+ struct bnge_tx_ring_info *txr;
+ int i;
+
+ bnge_for_each_napi_tx(i, bnapi, txr) {
+ if (txr->tx_hw_cons != SW_TX_RING(bn, txr->tx_cons))
+ __bnge_tx_int(bn, txr, budget);
+ }
+
+ bnapi->events &= ~BNGE_TX_CMP_EVENT;
+}
+
+static void __bnge_poll_work_done(struct bnge_net *bn, struct bnge_napi *bnapi,
+ int budget)
+{
+ struct bnge_rx_ring_info *rxr = bnapi->rx_ring;
+
+ if ((bnapi->events & BNGE_TX_CMP_EVENT) && !bnapi->tx_fault)
+ bnge_tx_int(bn, bnapi, budget);
+
+ if ((bnapi->events & BNGE_RX_EVENT)) {
+ bnge_db_write(bn->bd, &rxr->rx_db, rxr->rx_prod);
+ bnapi->events &= ~BNGE_RX_EVENT;
+ }
+
+ if (bnapi->events & BNGE_AGG_EVENT) {
+ bnge_db_write(bn->bd, &rxr->rx_agg_db, rxr->rx_agg_prod);
+ bnapi->events &= ~BNGE_AGG_EVENT;
+ }
+}
+
+static void
+bnge_hwrm_update_token(struct bnge_dev *bd, u16 seq_id,
+ enum bnge_hwrm_wait_state state)
+{
+ struct bnge_hwrm_wait_token *token;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(token, &bd->hwrm_pending_list, node) {
+ if (token->seq_id == seq_id) {
+ WRITE_ONCE(token->state, state);
+ rcu_read_unlock();
+ return;
+ }
+ }
+ rcu_read_unlock();
+ dev_err(bd->dev, "Invalid hwrm seq id %d\n", seq_id);
+}
+
+static int bnge_hwrm_handler(struct bnge_dev *bd, struct tx_cmp *txcmp)
+{
+ struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
+ u16 cmpl_type = TX_CMP_TYPE(txcmp), seq_id;
+
+ switch (cmpl_type) {
+ case CMPL_BASE_TYPE_HWRM_DONE:
+ seq_id = le16_to_cpu(h_cmpl->sequence_id);
+ bnge_hwrm_update_token(bd, seq_id, BNGE_HWRM_COMPLETE);
+ break;
+
+ case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int __bnge_poll_work(struct bnge_net *bn, struct bnge_cp_ring_info *cpr,
+ int budget)
+{
+ struct bnge_napi *bnapi = cpr->bnapi;
+ u32 raw_cons = cpr->cp_raw_cons;
+ struct tx_cmp *txcmp;
+ int rx_pkts = 0;
+ u8 event = 0;
+ u32 cons;
+
+ cpr->has_more_work = 0;
+ cpr->had_work_done = 1;
+ while (1) {
+ u8 cmp_type;
+ int rc;
+
+ cons = RING_CMP(bn, raw_cons);
+ txcmp = &cpr->desc_ring[CP_RING(cons)][CP_IDX(cons)];
+
+ if (!TX_CMP_VALID(bn, txcmp, raw_cons))
+ break;
+
+ /* The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
+ cmp_type = TX_CMP_TYPE(txcmp);
+ if (cmp_type == CMP_TYPE_TX_L2_CMP ||
+ cmp_type == CMP_TYPE_TX_L2_COAL_CMP) {
+ u32 opaque = txcmp->tx_cmp_opaque;
+ struct bnge_tx_ring_info *txr;
+ u16 tx_freed;
+
+ txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)];
+ event |= BNGE_TX_CMP_EVENT;
+ if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP)
+ txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp);
+ else
+ txr->tx_hw_cons = TX_OPAQUE_PROD(bn, opaque);
+ tx_freed = ((txr->tx_hw_cons - txr->tx_cons) &
+ bn->tx_ring_mask);
+ /* return full budget so NAPI will complete. */
+ if (unlikely(tx_freed >= bn->tx_wake_thresh)) {
+ rx_pkts = budget;
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+ if (budget)
+ cpr->has_more_work = 1;
+ break;
+ }
+ } else if (cmp_type >= CMP_TYPE_RX_L2_CMP &&
+ cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
+ if (likely(budget))
+ rc = bnge_rx_pkt(bn, cpr, &raw_cons, &event);
+ else
+ rc = bnge_force_rx_discard(bn, cpr, &raw_cons,
+ &event);
+ if (likely(rc >= 0))
+ rx_pkts += rc;
+ /* Increment rx_pkts when rc is -ENOMEM to count towards
+ * the NAPI budget. Otherwise, we may potentially loop
+ * here forever if we consistently cannot allocate
+ * buffers.
+ */
+ else if (rc == -ENOMEM && budget)
+ rx_pkts++;
+ else if (rc == -EBUSY) /* partial completion */
+ break;
+ } else if (unlikely(cmp_type == CMPL_BASE_TYPE_HWRM_DONE ||
+ cmp_type == CMPL_BASE_TYPE_HWRM_FWD_REQ ||
+ cmp_type == CMPL_BA_TY_HWRM_ASY_EVT)) {
+ bnge_hwrm_handler(bn->bd, txcmp);
+ }
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+
+ if (rx_pkts && rx_pkts == budget) {
+ cpr->has_more_work = 1;
+ break;
+ }
+ }
+
+ cpr->cp_raw_cons = raw_cons;
+ bnapi->events |= event;
+ return rx_pkts;
+}
+
+static void __bnge_poll_cqs_done(struct bnge_net *bn, struct bnge_napi *bnapi,
+ u64 dbr_type, int budget)
+{
+ struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
+ int i;
+
+ for (i = 0; i < nqr->cp_ring_count; i++) {
+ struct bnge_cp_ring_info *cpr = &nqr->cp_ring_arr[i];
+ struct bnge_db_info *db;
+
+ if (cpr->had_work_done) {
+ u32 tgl = 0;
+
+ if (dbr_type == DBR_TYPE_CQ_ARMALL) {
+ cpr->had_nqe_notify = 0;
+ tgl = cpr->toggle;
+ }
+ db = &cpr->cp_db;
+ bnge_writeq(bn->bd,
+ db->db_key64 | dbr_type | DB_TOGGLE(tgl) |
+ DB_RING_IDX(db, cpr->cp_raw_cons),
+ db->doorbell);
+ cpr->had_work_done = 0;
+ }
+ }
+ __bnge_poll_work_done(bn, bnapi, budget);
+}
+
+static int __bnge_poll_cqs(struct bnge_net *bn, struct bnge_napi *bnapi,
+ int budget)
+{
+ struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
+ int i, work_done = 0;
+
+ for (i = 0; i < nqr->cp_ring_count; i++) {
+ struct bnge_cp_ring_info *cpr = &nqr->cp_ring_arr[i];
+
+ if (cpr->had_nqe_notify) {
+ work_done += __bnge_poll_work(bn, cpr,
+ budget - work_done);
+ nqr->has_more_work |= cpr->has_more_work;
+ }
+ }
+ return work_done;
+}
+
+int bnge_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct bnge_napi *bnapi = container_of(napi, struct bnge_napi, napi);
+ struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
+ u32 raw_cons = nqr->nq_raw_cons;
+ struct bnge_net *bn = bnapi->bn;
+ struct bnge_dev *bd = bn->bd;
+ struct nqe_cn *nqcmp;
+ int work_done = 0;
+ u32 cons;
+
+ if (nqr->has_more_work) {
+ nqr->has_more_work = 0;
+ work_done = __bnge_poll_cqs(bn, bnapi, budget);
+ }
+
+ while (1) {
+ u16 type;
+
+ cons = RING_CMP(bn, raw_cons);
+ nqcmp = &nqr->desc_ring[CP_RING(cons)][CP_IDX(cons)];
+
+ if (!NQ_CMP_VALID(bn, nqcmp, raw_cons)) {
+ if (nqr->has_more_work)
+ break;
+
+ __bnge_poll_cqs_done(bn, bnapi, DBR_TYPE_CQ_ARMALL,
+ budget);
+ nqr->nq_raw_cons = raw_cons;
+ if (napi_complete_done(napi, work_done))
+ BNGE_DB_NQ_ARM(bd, &nqr->nq_db,
+ nqr->nq_raw_cons);
+ goto poll_done;
+ }
+
+ /* The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
+
+ type = le16_to_cpu(nqcmp->type);
+ if (NQE_CN_TYPE(type) == NQ_CN_TYPE_CQ_NOTIFICATION) {
+ u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
+ u32 cq_type = BNGE_NQ_HDL_TYPE(idx);
+ struct bnge_cp_ring_info *cpr;
+
+ /* No more budget for RX work */
+ if (budget && work_done >= budget &&
+ cq_type == BNGE_NQ_HDL_TYPE_RX)
+ break;
+
+ idx = BNGE_NQ_HDL_IDX(idx);
+ cpr = &nqr->cp_ring_arr[idx];
+ cpr->had_nqe_notify = 1;
+ cpr->toggle = NQE_CN_TOGGLE(type);
+ work_done += __bnge_poll_work(bn, cpr,
+ budget - work_done);
+ nqr->has_more_work |= cpr->has_more_work;
+ } else {
+ bnge_hwrm_handler(bn->bd, (struct tx_cmp *)nqcmp);
+ }
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+ }
+
+ __bnge_poll_cqs_done(bn, bnapi, DBR_TYPE_CQ, budget);
+ if (raw_cons != nqr->nq_raw_cons) {
+ nqr->nq_raw_cons = raw_cons;
+ BNGE_DB_NQ(bd, &nqr->nq_db, raw_cons);
+ }
+poll_done:
+ return work_done;
+}
+
+static u16 bnge_xmit_get_cfa_action(struct sk_buff *skb)
+{
+ struct metadata_dst *md_dst = skb_metadata_dst(skb);
+
+ if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
+ return 0;
+
+ return md_dst->u.port_info.port_id;
+}
+
+static const u16 bnge_lhint_arr[] = {
+ TX_BD_FLAGS_LHINT_512_AND_SMALLER,
+ TX_BD_FLAGS_LHINT_512_TO_1023,
+ TX_BD_FLAGS_LHINT_1024_TO_2047,
+ TX_BD_FLAGS_LHINT_1024_TO_2047,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+};
+
+static void bnge_txr_db_kick(struct bnge_net *bn, struct bnge_tx_ring_info *txr,
+ u16 prod)
+{
+ /* Sync BD data before updating doorbell */
+ wmb();
+ bnge_db_write(bn->bd, &txr->tx_db, prod);
+ txr->kick_pending = 0;
+}
+
+static u32 bnge_get_gso_hdr_len(struct sk_buff *skb)
+{
+ bool udp_gso = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4);
+ u32 hdr_len;
+
+ if (skb->encapsulation) {
+ if (udp_gso)
+ hdr_len = skb_inner_transport_offset(skb) +
+ sizeof(struct udphdr);
+ else
+ hdr_len = skb_inner_tcp_all_headers(skb);
+ } else if (udp_gso) {
+ hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
+ } else {
+ hdr_len = skb_tcp_all_headers(skb);
+ }
+
+ return hdr_len;
+}
+
+netdev_tx_t bnge_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ u32 len, free_size, vlan_tag_flags, cfa_action, flags;
+ struct bnge_net *bn = netdev_priv(dev);
+ struct bnge_tx_ring_info *txr;
+ struct bnge_dev *bd = bn->bd;
+ struct bnge_sw_tx_bd *tx_buf;
+ struct tx_bd *txbd, *txbd0;
+ struct netdev_queue *txq;
+ struct tx_bd_ext *txbd1;
+ u16 prod, last_frag;
+ unsigned int length;
+ dma_addr_t mapping;
+ __le32 lflags = 0;
+ skb_frag_t *frag;
+ int i;
+
+ i = skb_get_queue_mapping(skb);
+ txq = netdev_get_tx_queue(dev, i);
+ txr = &bn->tx_ring[bn->tx_ring_map[i]];
+ prod = txr->tx_prod;
+
+ free_size = bnge_tx_avail(bn, txr);
+ if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
+ /* We must have raced with NAPI cleanup */
+ if (net_ratelimit() && txr->kick_pending)
+ netif_warn(bn, tx_err, dev,
+ "bnge: ring busy w/ flush pending!\n");
+ if (!netif_txq_try_stop(txq, bnge_tx_avail(bn, txr),
+ bn->tx_wake_thresh))
+ return NETDEV_TX_BUSY;
+ }
+
+ last_frag = skb_shinfo(skb)->nr_frags;
+
+ txbd = &txr->tx_desc_ring[TX_RING(bn, prod)][TX_IDX(prod)];
+
+ tx_buf = &txr->tx_buf_ring[SW_TX_RING(bn, prod)];
+ tx_buf->skb = skb;
+ tx_buf->nr_frags = last_frag;
+
+ vlan_tag_flags = 0;
+ cfa_action = bnge_xmit_get_cfa_action(skb);
+ if (skb_vlan_tag_present(skb)) {
+ vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
+ skb_vlan_tag_get(skb);
+ /* Currently supports 8021Q, 8021AD vlan offloads
+ * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
+ */
+ if (skb->vlan_proto == htons(ETH_P_8021Q))
+ vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
+ }
+
+ if (unlikely(skb->no_fcs))
+ lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
+
+ if (eth_skb_pad(skb))
+ goto tx_kick_pending;
+
+ len = skb_headlen(skb);
+
+ mapping = dma_map_single(bd->dev, skb->data, len, DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(bd->dev, mapping)))
+ goto tx_free;
+
+ dma_unmap_addr_set(tx_buf, mapping, mapping);
+ flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
+ TX_BD_CNT(last_frag + 2);
+
+ txbd->tx_bd_haddr = cpu_to_le64(mapping);
+ txbd->tx_bd_opaque = SET_TX_OPAQUE(bn, txr, prod, 2 + last_frag);
+
+ prod = NEXT_TX(prod);
+ txbd1 = (struct tx_bd_ext *)
+ &txr->tx_desc_ring[TX_RING(bn, prod)][TX_IDX(prod)];
+
+ if (skb_is_gso(skb)) {
+ u32 hdr_len = bnge_get_gso_hdr_len(skb);
+
+ lflags |= cpu_to_le32(TX_BD_FLAGS_LSO | TX_BD_FLAGS_T_IPID |
+ (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
+ length = skb_shinfo(skb)->gso_size;
+ txbd1->tx_bd_mss = cpu_to_le32(length);
+ length += hdr_len;
+ } else {
+ length = skb->len;
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ lflags |= cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
+ txbd1->tx_bd_mss = 0;
+ }
+ }
+
+ flags |= bnge_lhint_arr[length >> 9];
+
+ txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+ txbd1->tx_bd_hsize_lflags = lflags;
+ txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
+ txbd1->tx_bd_cfa_action =
+ cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
+ txbd0 = txbd;
+ for (i = 0; i < last_frag; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+
+ prod = NEXT_TX(prod);
+ txbd = &txr->tx_desc_ring[TX_RING(bn, prod)][TX_IDX(prod)];
+
+ len = skb_frag_size(frag);
+ mapping = skb_frag_dma_map(bd->dev, frag, 0, len,
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(bd->dev, mapping)))
+ goto tx_dma_error;
+
+ tx_buf = &txr->tx_buf_ring[SW_TX_RING(bn, prod)];
+ netmem_dma_unmap_addr_set(skb_frag_netmem(frag), tx_buf,
+ mapping, mapping);
+
+ txbd->tx_bd_haddr = cpu_to_le64(mapping);
+
+ flags = len << TX_BD_LEN_SHIFT;
+ txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+ }
+
+ flags &= ~TX_BD_LEN;
+ txbd->tx_bd_len_flags_type =
+ cpu_to_le32(((len) << TX_BD_LEN_SHIFT) | flags |
+ TX_BD_FLAGS_PACKET_END);
+
+ netdev_tx_sent_queue(txq, skb->len);
+
+ prod = NEXT_TX(prod);
+ WRITE_ONCE(txr->tx_prod, prod);
+
+ if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
+ bnge_txr_db_kick(bn, txr, prod);
+ } else {
+ if (free_size >= bn->tx_wake_thresh)
+ txbd0->tx_bd_len_flags_type |=
+ cpu_to_le32(TX_BD_FLAGS_NO_CMPL);
+ txr->kick_pending = 1;
+ }
+
+ if (unlikely(bnge_tx_avail(bn, txr) <= MAX_SKB_FRAGS + 1)) {
+ if (netdev_xmit_more()) {
+ txbd0->tx_bd_len_flags_type &=
+ cpu_to_le32(~TX_BD_FLAGS_NO_CMPL);
+ bnge_txr_db_kick(bn, txr, prod);
+ }
+
+ netif_txq_try_stop(txq, bnge_tx_avail(bn, txr),
+ bn->tx_wake_thresh);
+ }
+ return NETDEV_TX_OK;
+
+tx_dma_error:
+ last_frag = i;
+
+ /* start back at beginning and unmap skb */
+ prod = txr->tx_prod;
+ tx_buf = &txr->tx_buf_ring[SW_TX_RING(bn, prod)];
+ dma_unmap_single(bd->dev, dma_unmap_addr(tx_buf, mapping),
+ skb_headlen(skb), DMA_TO_DEVICE);
+ prod = NEXT_TX(prod);
+
+ /* unmap remaining mapped pages */
+ for (i = 0; i < last_frag; i++) {
+ prod = NEXT_TX(prod);
+ tx_buf = &txr->tx_buf_ring[SW_TX_RING(bn, prod)];
+ frag = &skb_shinfo(skb)->frags[i];
+ netmem_dma_unmap_page_attrs(bd->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE, 0);
+ }
+
+tx_free:
+ dev_kfree_skb_any(skb);
+
+tx_kick_pending:
+ if (txr->kick_pending)
+ bnge_txr_db_kick(bn, txr, txr->tx_prod);
+ txr->tx_buf_ring[SW_TX_RING(bn, txr->tx_prod)].skb = NULL;
+ dev_core_stats_tx_dropped_inc(dev);
+ return NETDEV_TX_OK;
+}
+
+netdev_features_t bnge_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ u32 len;
+
+ features = vlan_features_check(skb, features);
+#if (MAX_SKB_FRAGS > TX_MAX_FRAGS)
+ if (skb_shinfo(skb)->nr_frags > TX_MAX_FRAGS)
+ features &= ~NETIF_F_SG;
+#endif
+
+ if (skb_is_gso(skb))
+ len = bnge_get_gso_hdr_len(skb) + skb_shinfo(skb)->gso_size;
+ else
+ len = skb->len;
+
+ len >>= 9;
+ if (unlikely(len >= ARRAY_SIZE(bnge_lhint_arr)))
+ features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+
+ return features;
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_txrx.h b/drivers/net/ethernet/broadcom/bnge/bnge_txrx.h
new file mode 100644
index 000000000000..bd0aa6c221a4
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_txrx.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_TXRX_H_
+#define _BNGE_TXRX_H_
+
+#include <linux/bnge/hsi.h>
+#include "bnge_netdev.h"
+
+static inline u32 bnge_tx_avail(struct bnge_net *bn,
+ const struct bnge_tx_ring_info *txr)
+{
+ u32 used = READ_ONCE(txr->tx_prod) - READ_ONCE(txr->tx_cons);
+
+ return bn->tx_ring_size - (used & bn->tx_ring_mask);
+}
+
+static inline void bnge_writeq_relaxed(struct bnge_dev *bd, u64 val,
+ void __iomem *addr)
+{
+#if BITS_PER_LONG == 32
+ spin_lock(&bd->db_lock);
+ lo_hi_writeq_relaxed(val, addr);
+ spin_unlock(&bd->db_lock);
+#else
+ writeq_relaxed(val, addr);
+#endif
+}
+
+/* For TX and RX ring doorbells with no ordering guarantee*/
+static inline void bnge_db_write_relaxed(struct bnge_net *bn,
+ struct bnge_db_info *db, u32 idx)
+{
+ bnge_writeq_relaxed(bn->bd, db->db_key64 | DB_RING_IDX(db, idx),
+ db->doorbell);
+}
+
+#define TX_OPAQUE_IDX_MASK 0x0000ffff
+#define TX_OPAQUE_BDS_MASK 0x00ff0000
+#define TX_OPAQUE_BDS_SHIFT 16
+#define TX_OPAQUE_RING_MASK 0xff000000
+#define TX_OPAQUE_RING_SHIFT 24
+
+#define SET_TX_OPAQUE(bn, txr, idx, bds) \
+ (((txr)->tx_napi_idx << TX_OPAQUE_RING_SHIFT) | \
+ ((bds) << TX_OPAQUE_BDS_SHIFT) | ((idx) & (bn)->tx_ring_mask))
+
+#define TX_OPAQUE_IDX(opq) ((opq) & TX_OPAQUE_IDX_MASK)
+#define TX_OPAQUE_RING(opq) (((opq) & TX_OPAQUE_RING_MASK) >> \
+ TX_OPAQUE_RING_SHIFT)
+#define TX_OPAQUE_BDS(opq) (((opq) & TX_OPAQUE_BDS_MASK) >> \
+ TX_OPAQUE_BDS_SHIFT)
+#define TX_OPAQUE_PROD(bn, opq) ((TX_OPAQUE_IDX(opq) + TX_OPAQUE_BDS(opq)) &\
+ (bn)->tx_ring_mask)
+#define TX_BD_CNT(n) (((n) << TX_BD_FLAGS_BD_CNT_SHIFT) & TX_BD_FLAGS_BD_CNT)
+
+#define TX_MAX_BD_CNT 32
+
+#define TX_MAX_FRAGS (TX_MAX_BD_CNT - 2)
+
+/* Minimum TX BDs for a TX packet with MAX_SKB_FRAGS + 1. We need one extra
+ * BD because the first TX BD is always a long BD.
+ */
+#define BNGE_MIN_TX_DESC_CNT (MAX_SKB_FRAGS + 2)
+
+#define RX_RING(bn, x) (((x) & (bn)->rx_ring_mask) >> (BNGE_PAGE_SHIFT - 4))
+#define RX_AGG_RING(bn, x) (((x) & (bn)->rx_agg_ring_mask) >> \
+ (BNGE_PAGE_SHIFT - 4))
+#define RX_IDX(x) ((x) & (RX_DESC_CNT - 1))
+
+#define TX_RING(bn, x) (((x) & (bn)->tx_ring_mask) >> (BNGE_PAGE_SHIFT - 4))
+#define TX_IDX(x) ((x) & (TX_DESC_CNT - 1))
+
+#define CP_RING(x) (((x) & ~(CP_DESC_CNT - 1)) >> (BNGE_PAGE_SHIFT - 4))
+#define CP_IDX(x) ((x) & (CP_DESC_CNT - 1))
+
+#define TX_CMP_VALID(bn, txcmp, raw_cons) \
+ (!!((txcmp)->tx_cmp_errors_v & cpu_to_le32(TX_CMP_V)) == \
+ !((raw_cons) & (bn)->cp_bit))
+
+#define RX_CMP_VALID(bn, rxcmp1, raw_cons) \
+ (!!((rxcmp1)->rx_cmp_cfa_code_errors_v2 & cpu_to_le32(RX_CMP_V)) ==\
+ !((raw_cons) & (bn)->cp_bit))
+
+#define RX_AGG_CMP_VALID(bn, agg, raw_cons) \
+ (!!((agg)->rx_agg_cmp_v & cpu_to_le32(RX_AGG_CMP_V)) == \
+ !((raw_cons) & (bn)->cp_bit))
+
+#define NQ_CMP_VALID(bn, nqcmp, raw_cons) \
+ (!!((nqcmp)->v & cpu_to_le32(NQ_CN_V)) == !((raw_cons) & (bn)->cp_bit))
+
+#define TX_CMP_TYPE(txcmp) \
+ (le32_to_cpu((txcmp)->tx_cmp_flags_type) & CMP_TYPE)
+
+#define RX_CMP_TYPE(rxcmp) \
+ (le32_to_cpu((rxcmp)->rx_cmp_len_flags_type) & RX_CMP_CMP_TYPE)
+
+#define RING_RX(bn, idx) ((idx) & (bn)->rx_ring_mask)
+#define NEXT_RX(idx) ((idx) + 1)
+
+#define RING_RX_AGG(bn, idx) ((idx) & (bn)->rx_agg_ring_mask)
+#define NEXT_RX_AGG(idx) ((idx) + 1)
+
+#define SW_TX_RING(bn, idx) ((idx) & (bn)->tx_ring_mask)
+#define NEXT_TX(idx) ((idx) + 1)
+
+#define ADV_RAW_CMP(idx, n) ((idx) + (n))
+#define NEXT_RAW_CMP(idx) ADV_RAW_CMP(idx, 1)
+#define RING_CMP(bn, idx) ((idx) & (bn)->cp_ring_mask)
+#define NEXT_CMP(bn, idx) RING_CMP(bn, ADV_RAW_CMP(idx, 1))
+
+#define RX_CMP_ITYPES(rxcmp) \
+ (le32_to_cpu((rxcmp)->rx_cmp_len_flags_type) & RX_CMP_FLAGS_ITYPES_MASK)
+
+#define RX_CMP_CFA_CODE(rxcmpl1) \
+ ((le32_to_cpu((rxcmpl1)->rx_cmp_cfa_code_errors_v2) & \
+ RX_CMPL_CFA_CODE_MASK) >> RX_CMPL_CFA_CODE_SFT)
+
+irqreturn_t bnge_msix(int irq, void *dev_instance);
+netdev_tx_t bnge_start_xmit(struct sk_buff *skb, struct net_device *dev);
+void bnge_reuse_rx_data(struct bnge_rx_ring_info *rxr, u16 cons, void *data);
+int bnge_napi_poll(struct napi_struct *napi, int budget);
+netdev_features_t bnge_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features);
+#endif /* _BNGE_TXRX_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 8419d1eb4035..fb45e1dd1dd7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -517,9 +517,6 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
- if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
- goto tx_free;
-
length = skb->len;
len = skb_headlen(skb);
last_frag = skb_shinfo(skb)->nr_frags;
@@ -905,7 +902,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
static bool bnxt_separate_head_pool(struct bnxt_rx_ring_info *rxr)
{
- return rxr->need_head_pool || PAGE_SIZE > BNXT_RX_PAGE_SIZE;
+ return rxr->need_head_pool || rxr->rx_page_size < PAGE_SIZE;
}
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
@@ -915,9 +912,9 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
{
struct page *page;
- if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
+ if (rxr->rx_page_size < PAGE_SIZE) {
page = page_pool_dev_alloc_frag(rxr->page_pool, offset,
- BNXT_RX_PAGE_SIZE);
+ rxr->rx_page_size);
} else {
page = page_pool_dev_alloc_pages(rxr->page_pool);
*offset = 0;
@@ -936,8 +933,9 @@ static netmem_ref __bnxt_alloc_rx_netmem(struct bnxt *bp, dma_addr_t *mapping,
{
netmem_ref netmem;
- if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
- netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset, BNXT_RX_PAGE_SIZE, gfp);
+ if (rxr->rx_page_size < PAGE_SIZE) {
+ netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset,
+ rxr->rx_page_size, gfp);
} else {
netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
*offset = 0;
@@ -1155,9 +1153,9 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
return NULL;
}
dma_addr -= bp->rx_dma_offset;
- dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
+ dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, rxr->rx_page_size,
bp->rx_dir);
- skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
+ skb = napi_build_skb(data_ptr - bp->rx_offset, rxr->rx_page_size);
if (!skb) {
page_pool_recycle_direct(rxr->page_pool, page);
return NULL;
@@ -1189,7 +1187,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
return NULL;
}
dma_addr -= bp->rx_dma_offset;
- dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
+ dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, rxr->rx_page_size,
bp->rx_dir);
if (unlikely(!payload))
@@ -1203,7 +1201,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
skb_mark_for_recycle(skb);
off = (void *)data_ptr - page_address(page);
- skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE);
+ skb_add_rx_frag(skb, 0, page, off, len, rxr->rx_page_size);
memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
payload + NET_IP_ALIGN);
@@ -1288,7 +1286,7 @@ static u32 __bnxt_rx_agg_netmems(struct bnxt *bp,
if (skb) {
skb_add_rx_frag_netmem(skb, i, cons_rx_buf->netmem,
cons_rx_buf->offset,
- frag_len, BNXT_RX_PAGE_SIZE);
+ frag_len, rxr->rx_page_size);
} else {
skb_frag_t *frag = &shinfo->frags[i];
@@ -1313,7 +1311,7 @@ static u32 __bnxt_rx_agg_netmems(struct bnxt *bp,
if (skb) {
skb->len -= frag_len;
skb->data_len -= frag_len;
- skb->truesize -= BNXT_RX_PAGE_SIZE;
+ skb->truesize -= rxr->rx_page_size;
}
--shinfo->nr_frags;
@@ -1328,7 +1326,7 @@ static u32 __bnxt_rx_agg_netmems(struct bnxt *bp,
}
page_pool_dma_sync_netmem_for_cpu(rxr->page_pool, netmem, 0,
- BNXT_RX_PAGE_SIZE);
+ rxr->rx_page_size);
total_frag_len += frag_len;
prod = NEXT_RX_AGG(prod);
@@ -1803,7 +1801,8 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
struct bnxt_tpa_info *tpa_info,
struct rx_tpa_end_cmp *tpa_end,
struct rx_tpa_end_cmp_ext *tpa_end1,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct bnxt_rx_sw_stats *rx_stats)
{
#ifdef CONFIG_INET
int payload_off;
@@ -1813,6 +1812,9 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
if (segs == 1)
return skb;
+ rx_stats->rx_hw_gro_packets++;
+ rx_stats->rx_hw_gro_wire_packets += segs;
+
NAPI_GRO_CB(skb)->count = segs;
skb_shinfo(skb)->gso_size =
le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
@@ -1986,7 +1988,8 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
}
if (gro)
- skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
+ skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb,
+ &cpr->sw_stats->rx);
return skb;
}
@@ -2290,8 +2293,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (!skb)
goto oom_next_rx;
} else {
- skb = bnxt_xdp_build_skb(bp, skb, agg_bufs,
- rxr->page_pool, &xdp);
+ skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr, &xdp);
if (!skb) {
/* we should be able to free the old skb here */
bnxt_xdp_buff_frags_free(rxr, &xdp);
@@ -3825,23 +3827,40 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
}
}
+static int bnxt_rx_agg_ring_fill_level(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ /* User may have chosen larger than default rx_page_size,
+ * we keep the ring sizes uniform and also want uniform amount
+ * of bytes consumed per ring, so cap how much of the rings we fill.
+ */
+ int fill_level = bp->rx_agg_ring_size;
+
+ if (rxr->rx_page_size > BNXT_RX_PAGE_SIZE)
+ fill_level /= rxr->rx_page_size / BNXT_RX_PAGE_SIZE;
+
+ return fill_level;
+}
+
static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr,
int numa_node)
{
- const unsigned int agg_size_fac = PAGE_SIZE / BNXT_RX_PAGE_SIZE;
+ unsigned int agg_size_fac = rxr->rx_page_size / BNXT_RX_PAGE_SIZE;
const unsigned int rx_size_fac = PAGE_SIZE / SZ_4K;
struct page_pool_params pp = { 0 };
struct page_pool *pool;
- pp.pool_size = bp->rx_agg_ring_size / agg_size_fac;
+ pp.pool_size = bnxt_rx_agg_ring_fill_level(bp, rxr) / agg_size_fac;
if (BNXT_RX_PAGE_MODE(bp))
pp.pool_size += bp->rx_ring_size / rx_size_fac;
+
+ pp.order = get_order(rxr->rx_page_size);
pp.nid = numa_node;
pp.netdev = bp->dev;
pp.dev = &bp->pdev->dev;
pp.dma_dir = bp->rx_dir;
- pp.max_len = PAGE_SIZE;
+ pp.max_len = PAGE_SIZE << pp.order;
pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
PP_FLAG_ALLOW_UNREADABLE_NETMEM;
pp.queue_idx = rxr->bnapi->index;
@@ -3852,7 +3871,10 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
rxr->page_pool = pool;
rxr->need_head_pool = page_pool_is_unreadable(pool);
+ rxr->need_head_pool |= !!pp.order;
if (bnxt_separate_head_pool(rxr)) {
+ pp.order = 0;
+ pp.max_len = PAGE_SIZE;
pp.pool_size = min(bp->rx_ring_size / rx_size_fac, 1024);
pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
pool = page_pool_create(&pp);
@@ -4306,6 +4328,7 @@ static void bnxt_init_ring_struct(struct bnxt *bp)
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct netdev_queue_config qcfg;
struct bnxt_ring_mem_info *rmem;
struct bnxt_cp_ring_info *cpr;
struct bnxt_rx_ring_info *rxr;
@@ -4328,6 +4351,9 @@ static void bnxt_init_ring_struct(struct bnxt *bp)
if (!rxr)
goto skip_rx;
+ netdev_queue_config(bp->dev, i, &qcfg);
+ rxr->rx_page_size = qcfg.rx_page_size;
+
ring = &rxr->rx_ring_struct;
rmem = &ring->ring_mem;
rmem->nr_pages = bp->rx_nr_pages;
@@ -4405,11 +4431,13 @@ static void bnxt_alloc_one_rx_ring_netmem(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr,
int ring_nr)
{
+ int fill_level, i;
u32 prod;
- int i;
+
+ fill_level = bnxt_rx_agg_ring_fill_level(bp, rxr);
prod = rxr->rx_agg_prod;
- for (i = 0; i < bp->rx_agg_ring_size; i++) {
+ for (i = 0; i < fill_level; i++) {
if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_KERNEL)) {
netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n",
ring_nr, i, bp->rx_agg_ring_size);
@@ -4487,7 +4515,7 @@ static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp,
ring = &rxr->rx_agg_ring_struct;
ring->fw_ring_id = INVALID_HW_RING_ID;
if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
- type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
+ type = ((u32)rxr->rx_page_size << RX_BD_LEN_SHIFT) |
RX_BD_TYPE_RX_AGG_BD;
/* On P7, setting EOP will cause the chip to disable
@@ -6567,6 +6595,9 @@ int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
if (!rx_rings)
return 0;
+ if (bp->rss_cap & BNXT_RSS_CAP_LARGE_RSS_CTX)
+ return BNXT_RSS_TABLE_MAX_TBL_P5;
+
return bnxt_calc_nr_ring_pages(rx_rings - 1,
BNXT_RSS_TABLE_ENTRIES_P5);
}
@@ -7065,6 +7096,7 @@ static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type,
struct hwrm_ring_alloc_input *req,
+ struct bnxt_rx_ring_info *rxr,
struct bnxt_ring_struct *ring)
{
struct bnxt_ring_grp_info *grp_info = &bp->grp_info[ring->grp_idx];
@@ -7074,7 +7106,7 @@ static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type,
if (ring_type == HWRM_RING_ALLOC_AGG) {
req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
- req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
+ req->rx_buf_size = cpu_to_le16(rxr->rx_page_size);
enables |= RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID;
} else {
req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
@@ -7088,6 +7120,7 @@ static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type,
}
static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
struct bnxt_ring_struct *ring,
u32 ring_type, u32 map_index)
{
@@ -7144,7 +7177,8 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
cpu_to_le32(bp->rx_ring_mask + 1) :
cpu_to_le32(bp->rx_agg_ring_mask + 1);
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
- bnxt_set_rx_ring_params_p5(bp, ring_type, req, ring);
+ bnxt_set_rx_ring_params_p5(bp, ring_type, req,
+ rxr, ring);
break;
case HWRM_RING_ALLOC_CMPL:
req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
@@ -7292,7 +7326,7 @@ static int bnxt_hwrm_rx_ring_alloc(struct bnxt *bp,
u32 map_idx = bnapi->index;
int rc;
- rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ rc = hwrm_ring_alloc_send_msg(bp, rxr, ring, type, map_idx);
if (rc)
return rc;
@@ -7312,7 +7346,7 @@ static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp,
int rc;
map_idx = grp_idx + bp->rx_nr_rings;
- rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ rc = hwrm_ring_alloc_send_msg(bp, rxr, ring, type, map_idx);
if (rc)
return rc;
@@ -7336,7 +7370,7 @@ static int bnxt_hwrm_cp_ring_alloc_p5(struct bnxt *bp,
ring = &cpr->cp_ring_struct;
ring->handle = BNXT_SET_NQ_HDL(cpr);
- rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, map_idx);
if (rc)
return rc;
bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
@@ -7351,7 +7385,7 @@ static int bnxt_hwrm_tx_ring_alloc(struct bnxt *bp,
const u32 type = HWRM_RING_ALLOC_TX;
int rc;
- rc = hwrm_ring_alloc_send_msg(bp, ring, type, tx_idx);
+ rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, tx_idx);
if (rc)
return rc;
bnxt_set_db(bp, &txr->tx_db, type, tx_idx, ring->fw_ring_id);
@@ -7377,7 +7411,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
vector = bp->irq_tbl[map_idx].vector;
disable_irq_nosync(vector);
- rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, map_idx);
if (rc) {
enable_irq(vector);
goto err_out;
@@ -7917,13 +7951,28 @@ static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings)
return 1;
}
+static void bnxt_get_total_resources(struct bnxt *bp, struct bnxt_hw_rings *hwr)
+{
+ hwr->cp = bnxt_nq_rings_in_use(bp);
+ hwr->cp_p5 = 0;
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ hwr->cp_p5 = bnxt_cp_rings_in_use(bp);
+ hwr->tx = bp->tx_nr_rings;
+ hwr->rx = bp->rx_nr_rings;
+ hwr->grp = hwr->rx;
+ hwr->vnic = bnxt_get_total_vnics(bp, hwr->rx);
+ hwr->rss_ctx = bnxt_get_total_rss_ctxs(bp, hwr);
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ hwr->rx <<= 1;
+ hwr->stat = bnxt_get_func_stat_ctxs(bp);
+}
+
static bool bnxt_need_reserve_rings(struct bnxt *bp)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
- int cp = bnxt_cp_rings_in_use(bp);
- int nq = bnxt_nq_rings_in_use(bp);
- int rx = bp->rx_nr_rings, stat;
- int vnic, grp = rx;
+ struct bnxt_hw_rings hwr;
+
+ bnxt_get_total_resources(bp, &hwr);
/* Old firmware does not need RX ring reservations but we still
* need to setup a default RSS map when needed. With new firmware
@@ -7933,25 +7982,27 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
if (!BNXT_NEW_RM(bp))
bnxt_check_rss_tbl_no_rmgr(bp);
- if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
- bp->hwrm_spec_code >= 0x10601)
+ if (hw_resc->resv_tx_rings != hwr.tx && bp->hwrm_spec_code >= 0x10601)
return true;
if (!BNXT_NEW_RM(bp))
return false;
- vnic = bnxt_get_total_vnics(bp, rx);
-
- if (bp->flags & BNXT_FLAG_AGG_RINGS)
- rx <<= 1;
- stat = bnxt_get_func_stat_ctxs(bp);
- if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
- hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
- (hw_resc->resv_hw_ring_grps != grp &&
+ if (hw_resc->resv_rx_rings != hwr.rx ||
+ hw_resc->resv_vnics != hwr.vnic ||
+ hw_resc->resv_stat_ctxs != hwr.stat ||
+ hw_resc->resv_rsscos_ctxs != hwr.rss_ctx ||
+ (hw_resc->resv_hw_ring_grps != hwr.grp &&
!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)))
return true;
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ if (hw_resc->resv_cp_rings != hwr.cp_p5)
+ return true;
+ } else if (hw_resc->resv_cp_rings != hwr.cp) {
+ return true;
+ }
if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) &&
- hw_resc->resv_irqs != nq)
+ hw_resc->resv_irqs != hwr.cp)
return true;
return false;
}
@@ -8077,6 +8128,11 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
bp->rx_nr_rings = rx_rings;
bp->cp_nr_rings = hwr.cp;
+ /* Fall back if we cannot reserve enough HW RSS contexts */
+ if ((bp->rss_cap & BNXT_RSS_CAP_LARGE_RSS_CTX) &&
+ hwr.rss_ctx < bnxt_get_total_rss_ctxs(bp, &hwr))
+ bp->rss_cap &= ~BNXT_RSS_CAP_LARGE_RSS_CTX;
+
if (!bnxt_rings_ok(bp, &hwr))
return -ENOMEM;
@@ -9567,6 +9623,10 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+ if (hw_resc->max_rsscos_ctxs >=
+ hw_resc->max_vnics * BNXT_LARGE_RSS_TO_VNIC_RATIO)
+ bp->rss_cap |= BNXT_RSS_CAP_LARGE_RSS_CTX;
+
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
u16 max_msix = le16_to_cpu(resp->max_msix);
@@ -9700,6 +9760,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
+ if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PTM_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_PTP_PTM;
if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_PTP_RTC;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
@@ -11901,6 +11963,26 @@ static char *bnxt_report_fec(struct bnxt_link_info *link_info)
}
}
+static char *bnxt_link_down_reason(struct bnxt_link_info *link_info)
+{
+ u8 reason = link_info->link_down_reason;
+
+ /* Multiple bits can be set, we report 1 bit only in order of
+ * priority.
+ */
+ if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_RF)
+ return "(Remote fault)";
+ if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_OTP_SPEED_VIOLATION)
+ return "(OTP Speed limit violation)";
+ if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_CABLE_REMOVED)
+ return "(Cable removed)";
+ if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_MODULE_FAULT)
+ return "(Module fault)";
+ if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_BMC_REQUEST)
+ return "(BMC request down)";
+ return "";
+}
+
void bnxt_report_link(struct bnxt *bp)
{
if (BNXT_LINK_IS_UP(bp)) {
@@ -11958,8 +12040,10 @@ void bnxt_report_link(struct bnxt *bp)
(fec & BNXT_FEC_AUTONEG) ? "on" : "off",
bnxt_report_fec(&bp->link_info));
} else {
+ char *str = bnxt_link_down_reason(&bp->link_info);
+
netif_carrier_off(bp->dev);
- netdev_err(bp->dev, "NIC Link is Down\n");
+ netdev_err(bp->dev, "NIC Link is Down %s\n", str);
}
}
@@ -12159,6 +12243,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
link_info->phy_addr = resp->eee_config_phy_addr &
PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
link_info->module_status = resp->module_status;
+ link_info->link_down_reason = resp->link_down_reason;
if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
struct ethtool_keee *eee = &bp->eee;
@@ -13426,6 +13511,8 @@ static void bnxt_get_one_ring_err_stats(struct bnxt *bp,
stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards;
stats->rx_total_ring_discards +=
BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts);
+ stats->rx_total_hw_gro_packets += sw_stats->rx.rx_hw_gro_packets;
+ stats->rx_total_hw_gro_wire_packets += sw_stats->rx.rx_hw_gro_wire_packets;
stats->tx_total_resets += sw_stats->tx.tx_resets;
stats->tx_total_ring_discards +=
BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts);
@@ -13815,7 +13902,6 @@ static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
u8 **nextp)
{
struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
- struct hop_jumbo_hdr *jhdr;
int hdr_count = 0;
u8 *nexthdr;
int start;
@@ -13844,24 +13930,7 @@ static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
if (hdrlen > 64)
return false;
- /* The ext header may be a hop-by-hop header inserted for
- * big TCP purposes. This will be removed before sending
- * from NIC, so do not count it.
- */
- if (*nexthdr == NEXTHDR_HOP) {
- if (likely(skb->len <= GRO_LEGACY_MAX_SIZE))
- goto increment_hdr;
-
- jhdr = (struct hop_jumbo_hdr *)hp;
- if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 ||
- jhdr->nexthdr != IPPROTO_TCP)
- goto increment_hdr;
-
- goto next_hdr;
- }
-increment_hdr:
hdr_count++;
-next_hdr:
nexthdr = &hp->nexthdr;
start += hdrlen;
}
@@ -15865,6 +15934,8 @@ static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
stats->alloc_fail = cpr->sw_stats->rx.rx_oom_discards;
+ stats->hw_gro_packets = cpr->sw_stats->rx.rx_hw_gro_packets;
+ stats->hw_gro_wire_packets = cpr->sw_stats->rx.rx_hw_gro_wire_packets;
}
static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
@@ -15900,6 +15971,8 @@ static void bnxt_get_base_stats(struct net_device *dev,
rx->packets = bp->net_stats_prev.rx_packets;
rx->bytes = bp->net_stats_prev.rx_bytes;
rx->alloc_fail = bp->ring_err_stats_prev.rx_total_oom_discards;
+ rx->hw_gro_packets = bp->ring_err_stats_prev.rx_total_hw_gro_packets;
+ rx->hw_gro_wire_packets = bp->ring_err_stats_prev.rx_total_hw_gro_wire_packets;
tx->packets = bp->net_stats_prev.tx_packets;
tx->bytes = bp->net_stats_prev.tx_bytes;
@@ -15911,7 +15984,36 @@ static const struct netdev_stat_ops bnxt_stat_ops = {
.get_base_stats = bnxt_get_base_stats,
};
-static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
+static void bnxt_queue_default_qcfg(struct net_device *dev,
+ struct netdev_queue_config *qcfg)
+{
+ qcfg->rx_page_size = BNXT_RX_PAGE_SIZE;
+}
+
+static int bnxt_validate_qcfg(struct net_device *dev,
+ struct netdev_queue_config *qcfg,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ /* Older chips need MSS calc so rx_page_size is not supported */
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
+ qcfg->rx_page_size != BNXT_RX_PAGE_SIZE)
+ return -EINVAL;
+
+ if (!is_power_of_2(qcfg->rx_page_size))
+ return -ERANGE;
+
+ if (qcfg->rx_page_size < BNXT_RX_PAGE_SIZE ||
+ qcfg->rx_page_size > BNXT_MAX_RX_PAGE_SIZE)
+ return -ERANGE;
+
+ return 0;
+}
+
+static int bnxt_queue_mem_alloc(struct net_device *dev,
+ struct netdev_queue_config *qcfg,
+ void *qmem, int idx)
{
struct bnxt_rx_ring_info *rxr, *clone;
struct bnxt *bp = netdev_priv(dev);
@@ -15932,6 +16034,7 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
clone->rx_sw_agg_prod = 0;
clone->rx_next_cons = 0;
clone->need_head_pool = false;
+ clone->rx_page_size = qcfg->rx_page_size;
rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid);
if (rc)
@@ -16058,6 +16161,8 @@ static void bnxt_copy_rx_ring(struct bnxt *bp,
src_ring = &src->rx_agg_ring_struct;
src_rmem = &src_ring->ring_mem;
+ dst->rx_page_size = src->rx_page_size;
+
WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
WARN_ON(dst_rmem->page_size != src_rmem->page_size);
WARN_ON(dst_rmem->flags != src_rmem->flags);
@@ -16077,7 +16182,9 @@ static void bnxt_copy_rx_ring(struct bnxt *bp,
dst->rx_agg_bmap = src->rx_agg_bmap;
}
-static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
+static int bnxt_queue_start(struct net_device *dev,
+ struct netdev_queue_config *qcfg,
+ void *qmem, int idx)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_rx_ring_info *rxr, *clone;
@@ -16210,6 +16317,13 @@ static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops = {
.ndo_queue_mem_free = bnxt_queue_mem_free,
.ndo_queue_start = bnxt_queue_start,
.ndo_queue_stop = bnxt_queue_stop,
+ .ndo_default_qcfg = bnxt_queue_default_qcfg,
+ .ndo_validate_qcfg = bnxt_validate_qcfg,
+ .supported_params = QCFG_RX_PAGE_SIZE,
+};
+
+static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops_unsupp = {
+ .ndo_default_qcfg = bnxt_queue_default_qcfg,
};
static void bnxt_remove_one(struct pci_dev *pdev)
@@ -16864,9 +16978,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
+
+ dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops_unsupp;
if (BNXT_SUPPORTS_QUEUE_API(bp))
dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
- dev->request_ops_lock = true;
dev->netmem_tx = true;
rc = register_netdev(dev);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index f88e7769a838..9a41b9e0423c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -760,6 +760,7 @@ struct nqe_cn {
#endif
#define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
+#define BNXT_MAX_RX_PAGE_SIZE BIT(15)
#define BNXT_MAX_MTU 9500
@@ -1105,6 +1106,7 @@ struct bnxt_rx_ring_info {
unsigned long *rx_agg_bmap;
u16 rx_agg_bmap_size;
+ u32 rx_page_size;
bool need_head_pool;
dma_addr_t rx_desc_mapping[MAX_RX_PAGES];
@@ -1124,8 +1126,11 @@ struct bnxt_rx_sw_stats {
u64 rx_l4_csum_errors;
u64 rx_resets;
u64 rx_buf_errors;
+ /* end of ethtool -S stats */
u64 rx_oom_discards;
u64 rx_netpoll_discards;
+ u64 rx_hw_gro_packets;
+ u64 rx_hw_gro_wire_packets;
};
struct bnxt_tx_sw_stats {
@@ -1152,6 +1157,9 @@ struct bnxt_total_ring_err_stats {
u64 tx_total_resets;
u64 tx_total_ring_discards;
u64 total_missed_irqs;
+ /* end of ethtool -S stats */
+ u64 rx_total_hw_gro_packets;
+ u64 rx_total_hw_gro_wire_packets;
};
struct bnxt_stats_mem {
@@ -1367,6 +1375,8 @@ struct bnxt_hw_resc {
u32 max_rx_wm_flows;
};
+#define BNXT_LARGE_RSS_TO_VNIC_RATIO 7
+
#if defined(CONFIG_BNXT_SRIOV)
struct bnxt_vf_info {
u16 fw_fid;
@@ -1551,6 +1561,7 @@ struct bnxt_link_info {
#define BNXT_LINK_STATE_DOWN 1
#define BNXT_LINK_STATE_UP 2
#define BNXT_LINK_IS_UP(bp) ((bp)->link_info.link_state == BNXT_LINK_STATE_UP)
+ u8 link_down_reason;
u8 active_lanes;
u8 duplex;
#define BNXT_LINK_DUPLEX_HALF PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF
@@ -2410,6 +2421,7 @@ struct bnxt {
#define BNXT_RSS_CAP_ESP_V6_RSS_CAP BIT(7)
#define BNXT_RSS_CAP_MULTI_RSS_CTX BIT(8)
#define BNXT_RSS_CAP_IPV6_FLOW_LABEL_RSS_CAP BIT(9)
+#define BNXT_RSS_CAP_LARGE_RSS_CTX BIT(10)
u8 rss_hash_key[HW_HASH_KEY_SIZE];
u8 rss_hash_key_valid:1;
@@ -2516,6 +2528,7 @@ struct bnxt {
#define BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS BIT_ULL(41)
#define BNXT_FW_CAP_NPAR_1_2 BIT_ULL(42)
#define BNXT_FW_CAP_MIRROR_ON_ROCE BIT_ULL(43)
+ #define BNXT_FW_CAP_PTP_PTM BIT_ULL(44)
u32 fw_dbg_cap;
@@ -2701,6 +2714,7 @@ struct bnxt {
#define BNXT_PHY_FL_NO_PFC (PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED << 8)
#define BNXT_PHY_FL_BANK_SEL (PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED << 8)
#define BNXT_PHY_FL_SPEEDS2 (PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED << 8)
+#define BNXT_PHY_FL_FDRSTATS (PORT_PHY_QCAPS_RESP_FLAGS2_FDRSTAT_CMD_SUPPORTED << 8)
/* copied from flags in hwrm_port_mac_qcaps_output */
u8 mac_flags;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 068e191ede19..53a83b6680c4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1346,16 +1346,17 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
struct bnxt_l2_filter *l2_fltr;
struct bnxt_flow_masks *fmasks;
struct flow_keys *fkeys;
- u32 idx, ring;
+ u32 idx;
int rc;
- u8 vf;
if (!bp->vnic_info)
return -EAGAIN;
- vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
- ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
- if ((fs->flow_type & (FLOW_MAC_EXT | FLOW_EXT)) || vf)
+ if (fs->flow_type & (FLOW_MAC_EXT | FLOW_EXT))
+ return -EOPNOTSUPP;
+
+ if (fs->ring_cookie != RX_CLS_FLOW_DISC &&
+ ethtool_get_flow_spec_ring_vf(fs->ring_cookie))
return -EOPNOTSUPP;
if (flow_type == IP_USER_FLOW) {
@@ -1481,7 +1482,7 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
if (fs->ring_cookie == RX_CLS_FLOW_DISC)
new_fltr->base.flags |= BNXT_ACT_DROP;
else
- new_fltr->base.rxq = ring;
+ new_fltr->base.rxq = ethtool_get_flow_spec_ring(fs->ring_cookie);
__set_bit(BNXT_FLTR_VALID, &new_fltr->base.state);
rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
if (!rc) {
@@ -3216,6 +3217,56 @@ static int bnxt_get_fecparam(struct net_device *dev,
return 0;
}
+static const struct ethtool_fec_hist_range bnxt_fec_ranges[] = {
+ { 0, 0},
+ { 1, 1},
+ { 2, 2},
+ { 3, 3},
+ { 4, 4},
+ { 5, 5},
+ { 6, 6},
+ { 7, 7},
+ { 8, 8},
+ { 9, 9},
+ { 10, 10},
+ { 11, 11},
+ { 12, 12},
+ { 13, 13},
+ { 14, 14},
+ { 15, 15},
+ { 0, 0},
+};
+
+static void bnxt_hwrm_port_phy_fdrstat(struct bnxt *bp,
+ struct ethtool_fec_hist *hist)
+{
+ struct ethtool_fec_hist_value *values = hist->values;
+ struct hwrm_port_phy_fdrstat_output *resp;
+ struct hwrm_port_phy_fdrstat_input *req;
+ int rc, i;
+
+ if (!(bp->phy_flags & BNXT_PHY_FL_FDRSTATS))
+ return;
+
+ rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_FDRSTAT);
+ if (rc)
+ return;
+
+ req->port_id = cpu_to_le16(bp->pf.port_id);
+ req->ops = cpu_to_le16(PORT_PHY_FDRSTAT_REQ_OPS_COUNTER);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (!rc) {
+ hist->ranges = bnxt_fec_ranges;
+ for (i = 0; i <= 15; i++) {
+ __le64 sum = resp->accumulated_codewords_err_s[i];
+
+ values[i].sum = le64_to_cpu(sum);
+ }
+ }
+ hwrm_req_drop(bp, req);
+}
+
static void bnxt_get_fec_stats(struct net_device *dev,
struct ethtool_fec_stats *fec_stats,
struct ethtool_fec_hist *hist)
@@ -3237,6 +3288,7 @@ static void bnxt_get_fec_stats(struct net_device *dev,
*(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_corrected_blocks));
fec_stats->uncorrectable_blocks.total =
*(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_uncorrectable_blocks));
+ bnxt_hwrm_port_phy_fdrstat(bp, hist);
}
static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info,
@@ -3381,6 +3433,40 @@ static u32 bnxt_get_link(struct net_device *dev)
return BNXT_LINK_IS_UP(bp);
}
+static int bnxt_get_link_ext_state(struct net_device *dev,
+ struct ethtool_link_ext_state_info *info)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u8 reason;
+
+ if (BNXT_LINK_IS_UP(bp))
+ return -ENODATA;
+
+ reason = bp->link_info.link_down_reason;
+ if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_RF) {
+ info->link_ext_state = ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE;
+ info->link_training = ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT;
+ return 0;
+ }
+ if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_CABLE_REMOVED) {
+ info->link_ext_state = ETHTOOL_LINK_EXT_STATE_NO_CABLE;
+ return 0;
+ }
+ if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_OTP_SPEED_VIOLATION) {
+ info->link_ext_state = ETHTOOL_LINK_EXT_STATE_OTP_SPEED_VIOLATION;
+ return 0;
+ }
+ if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_MODULE_FAULT) {
+ info->link_ext_state = ETHTOOL_LINK_EXT_STATE_MODULE;
+ return 0;
+ }
+ if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_BMC_REQUEST) {
+ info->link_ext_state = ETHTOOL_LINK_EXT_STATE_BMC_REQUEST_DOWN;
+ return 0;
+ }
+ return -ENODATA;
+}
+
int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
struct hwrm_nvm_get_dev_info_output *nvm_dev_info)
{
@@ -3797,9 +3883,25 @@ static int nvm_update_err_to_stderr(struct net_device *dev, u8 result,
#define BNXT_NVM_MORE_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_MODE))
#define BNXT_NVM_LAST_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_LAST))
+static int bnxt_hwrm_nvm_defrag(struct bnxt *bp)
+{
+ struct hwrm_nvm_defrag_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_NVM_DEFRAG);
+ if (rc)
+ return rc;
+ req->flags = cpu_to_le32(NVM_DEFRAG_REQ_FLAGS_DEFRAG);
+ hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout);
+
+ return hwrm_req_send(bp, req);
+}
+
static int bnxt_resize_update_entry(struct net_device *dev, size_t fw_size,
struct netlink_ext_ack *extack)
{
+ struct bnxt *bp = netdev_priv(dev);
+ bool retry = false;
u32 item_len;
int rc;
@@ -3812,9 +3914,19 @@ static int bnxt_resize_update_entry(struct net_device *dev, size_t fw_size,
}
if (fw_size > item_len) {
- rc = bnxt_flash_nvram(dev, BNX_DIR_TYPE_UPDATE,
- BNX_DIR_ORDINAL_FIRST, 0, 1,
- round_up(fw_size, 4096), NULL, 0);
+ do {
+ rc = bnxt_flash_nvram(dev, BNX_DIR_TYPE_UPDATE,
+ BNX_DIR_ORDINAL_FIRST, 0, 1,
+ round_up(fw_size, 4096), NULL,
+ 0);
+
+ if (rc == -ENOSPC) {
+ if (retry || bnxt_hwrm_nvm_defrag(bp))
+ break;
+ retry = true;
+ }
+ } while (rc == -ENOSPC);
+
if (rc) {
BNXT_NVM_ERR_MSG(dev, extack, MSG_RESIZE_UPDATE_ERR);
return rc;
@@ -5634,6 +5746,7 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_eeprom = bnxt_get_eeprom,
.set_eeprom = bnxt_set_eeprom,
.get_link = bnxt_get_link,
+ .get_link_ext_state = bnxt_get_link_ext_state,
.get_link_ext_stats = bnxt_get_link_ext_stats,
.get_eee = bnxt_get_eee,
.set_eee = bnxt_set_eee,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index a8a74f07bb54..ad89c5fa9b40 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -882,6 +882,51 @@ void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi,
}
}
+#ifdef CONFIG_X86
+static int bnxt_phc_get_syncdevicetime(ktime_t *device,
+ struct system_counterval_t *system,
+ void *ctx)
+{
+ struct bnxt_ptp_cfg *ptp = (struct bnxt_ptp_cfg *)ctx;
+ struct hwrm_func_ptp_ts_query_output *resp;
+ struct hwrm_func_ptp_ts_query_input *req;
+ struct bnxt *bp = ptp->bp;
+ u64 ptm_local_ts;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_PTP_TS_QUERY);
+ if (rc)
+ return rc;
+ req->flags = cpu_to_le32(FUNC_PTP_TS_QUERY_REQ_FLAGS_PTM_TIME);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (rc) {
+ hwrm_req_drop(bp, req);
+ return rc;
+ }
+ ptm_local_ts = le64_to_cpu(resp->ptm_local_ts);
+ *device = ns_to_ktime(bnxt_timecounter_cyc2time(ptp, ptm_local_ts));
+ /* ptm_system_ts is 64-bit */
+ system->cycles = le64_to_cpu(resp->ptm_system_ts);
+ system->cs_id = CSID_X86_ART;
+ system->use_nsecs = true;
+
+ hwrm_req_drop(bp, req);
+
+ return 0;
+}
+
+static int bnxt_ptp_getcrosststamp(struct ptp_clock_info *ptp_info,
+ struct system_device_crosststamp *xtstamp)
+{
+ struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
+ ptp_info);
+
+ return get_device_system_crosststamp(bnxt_phc_get_syncdevicetime,
+ ptp, NULL, xtstamp);
+}
+#endif /* CONFIG_X86 */
+
static const struct ptp_clock_info bnxt_ptp_caps = {
.owner = THIS_MODULE,
.name = "bnxt clock",
@@ -1094,6 +1139,12 @@ int bnxt_ptp_init(struct bnxt *bp)
if (bnxt_ptp_pps_init(bp))
netdev_err(bp->dev, "1pps not initialized, continuing without 1pps support\n");
}
+#ifdef CONFIG_X86
+ if ((bp->fw_cap & BNXT_FW_CAP_PTP_PTM) && pcie_ptm_enabled(bp->pdev) &&
+ boot_cpu_has(X86_FEATURE_ART))
+ ptp->ptp_info.getcrosststamp = bnxt_ptp_getcrosststamp;
+#endif /* CONFIG_X86 */
+
ptp->ptp_clock = ptp_clock_register(&ptp->ptp_info, &bp->pdev->dev);
if (IS_ERR(ptp->ptp_clock)) {
int err = PTR_ERR(ptp->ptp_clock);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index c94a391b1ba5..85cbeb35681c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -183,7 +183,7 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
u16 cons, u8 *data_ptr, unsigned int len,
struct xdp_buff *xdp)
{
- u32 buflen = BNXT_RX_PAGE_SIZE;
+ u32 buflen = rxr->rx_page_size;
struct bnxt_sw_rx_bd *rx_buf;
struct pci_dev *pdev;
dma_addr_t mapping;
@@ -460,7 +460,7 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
struct sk_buff *
bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
- struct page_pool *pool, struct xdp_buff *xdp)
+ struct bnxt_rx_ring_info *rxr, struct xdp_buff *xdp)
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
@@ -468,7 +468,7 @@ bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
return NULL;
xdp_update_skb_frags_info(skb, num_frags, sinfo->xdp_frags_size,
- BNXT_RX_PAGE_SIZE * num_frags,
+ rxr->rx_page_size * num_frags,
xdp_buff_get_skb_flags(xdp));
return skb;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
index 220285e190fc..8933a0dec09a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
@@ -32,6 +32,6 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
struct xdp_buff *xdp);
struct sk_buff *bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb,
- u8 num_frags, struct page_pool *pool,
+ u8 num_frags, struct bnxt_rx_ring_info *rxr,
struct xdp_buff *xdp);
#endif
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 6511ecd5856b..43cd013bb70e 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -705,14 +705,12 @@ static void macb_mac_link_up(struct phylink_config *config,
if (rx_pause)
ctrl |= MACB_BIT(PAE);
- /* Initialize rings & buffers as clearing MACB_BIT(TE) in link down
- * cleared the pipeline and control registers.
- */
- macb_init_buffers(bp);
-
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ queue->tx_head = 0;
+ queue->tx_tail = 0;
queue_writel(queue, IER,
bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
+ }
}
macb_or_gem_writel(bp, NCFGR, ctrl);
@@ -2954,6 +2952,7 @@ static int macb_open(struct net_device *dev)
}
bp->macbgem_ops.mog_init_rings(bp);
+ macb_init_buffers(bp);
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
napi_enable(&queue->napi_rx);
@@ -3850,6 +3849,13 @@ static int gem_get_all_flow_entries(struct net_device *netdev,
return 0;
}
+static u32 gem_get_rx_ring_count(struct net_device *netdev)
+{
+ struct macb *bp = netdev_priv(netdev);
+
+ return bp->num_queues;
+}
+
static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
@@ -3857,9 +3863,6 @@ static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
int ret = 0;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = bp->num_queues;
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = bp->rx_fs_list.count;
break;
@@ -3941,6 +3944,7 @@ static const struct ethtool_ops gem_ethtool_ops = {
.set_ringparam = macb_set_ringparam,
.get_rxnfc = gem_get_rxnfc,
.set_rxnfc = gem_set_rxnfc,
+ .get_rx_ring_count = gem_get_rx_ring_count,
};
static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -4811,7 +4815,9 @@ static int at91ether_close(struct net_device *dev)
at91ether_stop(lp);
- return pm_runtime_put(&lp->pdev->dev);
+ pm_runtime_put(&lp->pdev->dev);
+
+ return 0;
}
/* Transmit packet */
@@ -5431,9 +5437,9 @@ static const struct macb_config default_gem_config = {
static int macb_probe(struct platform_device *pdev)
{
- const struct macb_config *macb_config = &default_gem_config;
- struct device_node *np = pdev->dev.of_node;
struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
+ struct device_node *np = pdev->dev.of_node;
+ const struct macb_config *macb_config;
struct clk *tsu_clk = NULL;
phy_interface_t interface;
struct net_device *dev;
@@ -5449,13 +5455,9 @@ static int macb_probe(struct platform_device *pdev)
if (IS_ERR(mem))
return PTR_ERR(mem);
- if (np) {
- const struct of_device_id *match;
-
- match = of_match_node(macb_dt_ids, np);
- if (match && match->data)
- macb_config = match->data;
- }
+ macb_config = of_device_get_match_data(&pdev->dev);
+ if (!macb_config)
+ macb_config = &default_gem_config;
err = macb_config->clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk);
if (err)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 23326235d4ab..faf8f7e86520 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -1784,6 +1784,13 @@ static int cxgb4_get_rxfh_fields(struct net_device *dev,
return 0;
}
+static u32 get_rx_ring_count(struct net_device *dev)
+{
+ const struct port_info *pi = netdev_priv(dev);
+
+ return pi->nqsets;
+}
+
static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
u32 *rules)
{
@@ -1793,9 +1800,6 @@ static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
int ret = 0;
switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = pi->nqsets;
- return 0;
case ETHTOOL_GRXCLSRLCNT:
info->rule_cnt =
adap->ethtool_filters->port[pi->port_id].in_use;
@@ -2200,6 +2204,7 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
.get_regs = get_regs,
.get_rxnfc = get_rxnfc,
.set_rxnfc = set_rxnfc,
+ .get_rx_ring_count = get_rx_ring_count,
.get_rxfh_indir_size = get_rss_table_size,
.get_rxfh = get_rss_table,
.set_rxfh = set_rss_table,
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index a50f5dad34d5..471613899ec0 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -573,6 +573,13 @@ static int enic_get_rx_flow_hash(struct net_device *dev,
return 0;
}
+static u32 enic_get_rx_ring_count(struct net_device *dev)
+{
+ struct enic *enic = netdev_priv(dev);
+
+ return enic->rq_count;
+}
+
static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
@@ -580,9 +587,6 @@ static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
int ret = 0;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = enic->rq_count;
- break;
case ETHTOOL_GRXCLSRLCNT:
spin_lock_bh(&enic->rfs_h.lock);
cmd->rule_cnt = enic->rfs_h.max - enic->rfs_h.free;
@@ -689,6 +693,7 @@ static const struct ethtool_ops enic_ethtool_ops = {
.get_coalesce = enic_get_coalesce,
.set_coalesce = enic_set_coalesce,
.get_rxnfc = enic_get_rxnfc,
+ .get_rx_ring_count = enic_get_rx_ring_count,
.get_rxfh_key_size = enic_get_rxfh_key_size,
.get_rxfh = enic_get_rxfh,
.set_rxfh = enic_set_rxfh,
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 846d58c769ea..69bfb8265d57 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -279,18 +279,15 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
card_idx++;
- printk (KERN_INFO "%s: %s, %pM, IRQ %d\n",
- dev->name, np->name, dev->dev_addr, irq);
+ netdev_info(dev, "%s, %pM, IRQ %d", np->name, dev->dev_addr, irq);
if (tx_coalesce > 1)
- printk(KERN_INFO "tx_coalesce:\t%d packets\n",
- tx_coalesce);
- if (np->coalesce)
- printk(KERN_INFO
- "rx_coalesce:\t%d packets\n"
- "rx_timeout: \t%d ns\n",
- np->rx_coalesce, np->rx_timeout*640);
+ netdev_dbg(dev, "tx_coalesce:\t%d packets", tx_coalesce);
+ if (np->coalesce) {
+ netdev_dbg(dev, "rx_coalesce:\t%d packets", np->rx_coalesce);
+ netdev_dbg(dev, "rx_timeout: \t%d ns", np->rx_timeout * 640);
+ }
if (np->vlan)
- printk(KERN_INFO "vlan(id):\t%d\n", np->vlan);
+ netdev_dbg(dev, "vlan(id):\t%d", np->vlan);
return 0;
err_out_unmap_rx:
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
deleted file mode 100644
index 0de3cd660ec8..000000000000
--- a/drivers/net/ethernet/dnet.c
+++ /dev/null
@@ -1,877 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Dave DNET Ethernet Controller driver
- *
- * Copyright (C) 2008 Dave S.r.l. <www.dave.eu>
- * Copyright (C) 2009 Ilya Yanok, Emcraft Systems Ltd, <yanok@emcraft.com>
- */
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
-#include <linux/phy.h>
-
-#include "dnet.h"
-
-#undef DEBUG
-
-/* function for reading internal MAC register */
-static u16 dnet_readw_mac(struct dnet *bp, u16 reg)
-{
- u16 data_read;
-
- /* issue a read */
- dnet_writel(bp, reg, MACREG_ADDR);
-
- /* since a read/write op to the MAC is very slow,
- * we must wait before reading the data */
- ndelay(500);
-
- /* read data read from the MAC register */
- data_read = dnet_readl(bp, MACREG_DATA);
-
- /* all done */
- return data_read;
-}
-
-/* function for writing internal MAC register */
-static void dnet_writew_mac(struct dnet *bp, u16 reg, u16 val)
-{
- /* load data to write */
- dnet_writel(bp, val, MACREG_DATA);
-
- /* issue a write */
- dnet_writel(bp, reg | DNET_INTERNAL_WRITE, MACREG_ADDR);
-
- /* since a read/write op to the MAC is very slow,
- * we must wait before exiting */
- ndelay(500);
-}
-
-static void __dnet_set_hwaddr(struct dnet *bp)
-{
- u16 tmp;
-
- tmp = be16_to_cpup((const __be16 *)bp->dev->dev_addr);
- dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG, tmp);
- tmp = be16_to_cpup((const __be16 *)(bp->dev->dev_addr + 2));
- dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG, tmp);
- tmp = be16_to_cpup((const __be16 *)(bp->dev->dev_addr + 4));
- dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp);
-}
-
-static void dnet_get_hwaddr(struct dnet *bp)
-{
- u16 tmp;
- u8 addr[6];
-
- /*
- * from MAC docs:
- * "Note that the MAC address is stored in the registers in Hexadecimal
- * form. For example, to set the MAC Address to: AC-DE-48-00-00-80
- * would require writing 0xAC (octet 0) to address 0x0B (high byte of
- * Mac_addr[15:0]), 0xDE (octet 1) to address 0x0A (Low byte of
- * Mac_addr[15:0]), 0x48 (octet 2) to address 0x0D (high byte of
- * Mac_addr[15:0]), 0x00 (octet 3) to address 0x0C (Low byte of
- * Mac_addr[15:0]), 0x00 (octet 4) to address 0x0F (high byte of
- * Mac_addr[15:0]), and 0x80 (octet 5) to address * 0x0E (Low byte of
- * Mac_addr[15:0]).
- */
- tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG);
- *((__be16 *)addr) = cpu_to_be16(tmp);
- tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG);
- *((__be16 *)(addr + 2)) = cpu_to_be16(tmp);
- tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG);
- *((__be16 *)(addr + 4)) = cpu_to_be16(tmp);
-
- if (is_valid_ether_addr(addr))
- eth_hw_addr_set(bp->dev, addr);
-}
-
-static int dnet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
-{
- struct dnet *bp = bus->priv;
- u16 value;
-
- while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
- & DNET_INTERNAL_GMII_MNG_CMD_FIN))
- cpu_relax();
-
- /* only 5 bits allowed for phy-addr and reg_offset */
- mii_id &= 0x1f;
- regnum &= 0x1f;
-
- /* prepare reg_value for a read */
- value = (mii_id << 8);
- value |= regnum;
-
- /* write control word */
- dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, value);
-
- /* wait for end of transfer */
- while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
- & DNET_INTERNAL_GMII_MNG_CMD_FIN))
- cpu_relax();
-
- value = dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG);
-
- pr_debug("mdio_read %02x:%02x <- %04x\n", mii_id, regnum, value);
-
- return value;
-}
-
-static int dnet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
- u16 value)
-{
- struct dnet *bp = bus->priv;
- u16 tmp;
-
- pr_debug("mdio_write %02x:%02x <- %04x\n", mii_id, regnum, value);
-
- while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
- & DNET_INTERNAL_GMII_MNG_CMD_FIN))
- cpu_relax();
-
- /* prepare for a write operation */
- tmp = (1 << 13);
-
- /* only 5 bits allowed for phy-addr and reg_offset */
- mii_id &= 0x1f;
- regnum &= 0x1f;
-
- /* only 16 bits on data */
- value &= 0xffff;
-
- /* prepare reg_value for a write */
- tmp |= (mii_id << 8);
- tmp |= regnum;
-
- /* write data to write first */
- dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG, value);
-
- /* write control word */
- dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, tmp);
-
- while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
- & DNET_INTERNAL_GMII_MNG_CMD_FIN))
- cpu_relax();
-
- return 0;
-}
-
-static void dnet_handle_link_change(struct net_device *dev)
-{
- struct dnet *bp = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
- unsigned long flags;
- u32 mode_reg, ctl_reg;
-
- int status_change = 0;
-
- spin_lock_irqsave(&bp->lock, flags);
-
- mode_reg = dnet_readw_mac(bp, DNET_INTERNAL_MODE_REG);
- ctl_reg = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG);
-
- if (phydev->link) {
- if (bp->duplex != phydev->duplex) {
- if (phydev->duplex)
- ctl_reg &=
- ~(DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP);
- else
- ctl_reg |=
- DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP;
-
- bp->duplex = phydev->duplex;
- status_change = 1;
- }
-
- if (bp->speed != phydev->speed) {
- status_change = 1;
- switch (phydev->speed) {
- case 1000:
- mode_reg |= DNET_INTERNAL_MODE_GBITEN;
- break;
- case 100:
- case 10:
- mode_reg &= ~DNET_INTERNAL_MODE_GBITEN;
- break;
- default:
- printk(KERN_WARNING
- "%s: Ack! Speed (%d) is not "
- "10/100/1000!\n", dev->name,
- phydev->speed);
- break;
- }
- bp->speed = phydev->speed;
- }
- }
-
- if (phydev->link != bp->link) {
- if (phydev->link) {
- mode_reg |=
- (DNET_INTERNAL_MODE_RXEN | DNET_INTERNAL_MODE_TXEN);
- } else {
- mode_reg &=
- ~(DNET_INTERNAL_MODE_RXEN |
- DNET_INTERNAL_MODE_TXEN);
- bp->speed = 0;
- bp->duplex = -1;
- }
- bp->link = phydev->link;
-
- status_change = 1;
- }
-
- if (status_change) {
- dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, ctl_reg);
- dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, mode_reg);
- }
-
- spin_unlock_irqrestore(&bp->lock, flags);
-
- if (status_change) {
- if (phydev->link)
- printk(KERN_INFO "%s: link up (%d/%s)\n",
- dev->name, phydev->speed,
- DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
- else
- printk(KERN_INFO "%s: link down\n", dev->name);
- }
-}
-
-static int dnet_mii_probe(struct net_device *dev)
-{
- struct dnet *bp = netdev_priv(dev);
- struct phy_device *phydev = NULL;
-
- /* find the first phy */
- phydev = phy_find_first(bp->mii_bus);
-
- if (!phydev) {
- printk(KERN_ERR "%s: no PHY found\n", dev->name);
- return -ENODEV;
- }
-
- /* TODO : add pin_irq */
-
- /* attach the mac to the phy */
- if (bp->capabilities & DNET_HAS_RMII) {
- phydev = phy_connect(dev, phydev_name(phydev),
- &dnet_handle_link_change,
- PHY_INTERFACE_MODE_RMII);
- } else {
- phydev = phy_connect(dev, phydev_name(phydev),
- &dnet_handle_link_change,
- PHY_INTERFACE_MODE_MII);
- }
-
- if (IS_ERR(phydev)) {
- printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
- return PTR_ERR(phydev);
- }
-
- /* mask with MAC supported features */
- if (bp->capabilities & DNET_HAS_GIGABIT)
- phy_set_max_speed(phydev, SPEED_1000);
- else
- phy_set_max_speed(phydev, SPEED_100);
-
- phy_support_asym_pause(phydev);
-
- bp->link = 0;
- bp->speed = 0;
- bp->duplex = -1;
-
- return 0;
-}
-
-static int dnet_mii_init(struct dnet *bp)
-{
- int err;
-
- bp->mii_bus = mdiobus_alloc();
- if (bp->mii_bus == NULL)
- return -ENOMEM;
-
- bp->mii_bus->name = "dnet_mii_bus";
- bp->mii_bus->read = &dnet_mdio_read;
- bp->mii_bus->write = &dnet_mdio_write;
-
- snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
- bp->pdev->name, bp->pdev->id);
-
- bp->mii_bus->priv = bp;
-
- if (mdiobus_register(bp->mii_bus)) {
- err = -ENXIO;
- goto err_out;
- }
-
- if (dnet_mii_probe(bp->dev) != 0) {
- err = -ENXIO;
- goto err_out_unregister_bus;
- }
-
- return 0;
-
-err_out_unregister_bus:
- mdiobus_unregister(bp->mii_bus);
-err_out:
- mdiobus_free(bp->mii_bus);
- return err;
-}
-
-/* For Neptune board: LINK1000 as Link LED and TX as activity LED */
-static int dnet_phy_marvell_fixup(struct phy_device *phydev)
-{
- return phy_write(phydev, 0x18, 0x4148);
-}
-
-static void dnet_update_stats(struct dnet *bp)
-{
- u32 __iomem *reg = bp->regs + DNET_RX_PKT_IGNR_CNT;
- u32 *p = &bp->hw_stats.rx_pkt_ignr;
- u32 *end = &bp->hw_stats.rx_byte + 1;
-
- WARN_ON((unsigned long)(end - p - 1) !=
- (DNET_RX_BYTE_CNT - DNET_RX_PKT_IGNR_CNT) / 4);
-
- for (; p < end; p++, reg++)
- *p += readl(reg);
-
- reg = bp->regs + DNET_TX_UNICAST_CNT;
- p = &bp->hw_stats.tx_unicast;
- end = &bp->hw_stats.tx_byte + 1;
-
- WARN_ON((unsigned long)(end - p - 1) !=
- (DNET_TX_BYTE_CNT - DNET_TX_UNICAST_CNT) / 4);
-
- for (; p < end; p++, reg++)
- *p += readl(reg);
-}
-
-static int dnet_poll(struct napi_struct *napi, int budget)
-{
- struct dnet *bp = container_of(napi, struct dnet, napi);
- struct net_device *dev = bp->dev;
- int npackets = 0;
- unsigned int pkt_len;
- struct sk_buff *skb;
- unsigned int *data_ptr;
- u32 int_enable;
- u32 cmd_word;
- int i;
-
- while (npackets < budget) {
- /*
- * break out of while loop if there are no more
- * packets waiting
- */
- if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16))
- break;
-
- cmd_word = dnet_readl(bp, RX_LEN_FIFO);
- pkt_len = cmd_word & 0xFFFF;
-
- if (cmd_word & 0xDF180000)
- printk(KERN_ERR "%s packet receive error %x\n",
- __func__, cmd_word);
-
- skb = netdev_alloc_skb(dev, pkt_len + 5);
- if (skb != NULL) {
- /* Align IP on 16 byte boundaries */
- skb_reserve(skb, 2);
- /*
- * 'skb_put()' points to the start of sk_buff
- * data area.
- */
- data_ptr = skb_put(skb, pkt_len);
- for (i = 0; i < (pkt_len + 3) >> 2; i++)
- *data_ptr++ = dnet_readl(bp, RX_DATA_FIFO);
- skb->protocol = eth_type_trans(skb, dev);
- netif_receive_skb(skb);
- npackets++;
- } else
- printk(KERN_NOTICE
- "%s: No memory to allocate a sk_buff of "
- "size %u.\n", dev->name, pkt_len);
- }
-
- if (npackets < budget) {
- /* We processed all packets available. Tell NAPI it can
- * stop polling then re-enable rx interrupts.
- */
- napi_complete_done(napi, npackets);
- int_enable = dnet_readl(bp, INTR_ENB);
- int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
- dnet_writel(bp, int_enable, INTR_ENB);
- }
-
- return npackets;
-}
-
-static irqreturn_t dnet_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct dnet *bp = netdev_priv(dev);
- u32 int_src, int_enable, int_current;
- unsigned long flags;
- unsigned int handled = 0;
-
- spin_lock_irqsave(&bp->lock, flags);
-
- /* read and clear the DNET irq (clear on read) */
- int_src = dnet_readl(bp, INTR_SRC);
- int_enable = dnet_readl(bp, INTR_ENB);
- int_current = int_src & int_enable;
-
- /* restart the queue if we had stopped it for TX fifo almost full */
- if (int_current & DNET_INTR_SRC_TX_FIFOAE) {
- int_enable = dnet_readl(bp, INTR_ENB);
- int_enable &= ~DNET_INTR_ENB_TX_FIFOAE;
- dnet_writel(bp, int_enable, INTR_ENB);
- netif_wake_queue(dev);
- handled = 1;
- }
-
- /* RX FIFO error checking */
- if (int_current &
- (DNET_INTR_SRC_RX_CMDFIFOFF | DNET_INTR_SRC_RX_DATAFIFOFF)) {
- printk(KERN_ERR "%s: RX fifo error %x, irq %x\n", __func__,
- dnet_readl(bp, RX_STATUS), int_current);
- /* we can only flush the RX FIFOs */
- dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH, SYS_CTL);
- ndelay(500);
- dnet_writel(bp, 0, SYS_CTL);
- handled = 1;
- }
-
- /* TX FIFO error checking */
- if (int_current &
- (DNET_INTR_SRC_TX_FIFOFULL | DNET_INTR_SRC_TX_DISCFRM)) {
- printk(KERN_ERR "%s: TX fifo error %x, irq %x\n", __func__,
- dnet_readl(bp, TX_STATUS), int_current);
- /* we can only flush the TX FIFOs */
- dnet_writel(bp, DNET_SYS_CTL_TXFIFOFLUSH, SYS_CTL);
- ndelay(500);
- dnet_writel(bp, 0, SYS_CTL);
- handled = 1;
- }
-
- if (int_current & DNET_INTR_SRC_RX_CMDFIFOAF) {
- if (napi_schedule_prep(&bp->napi)) {
- /*
- * There's no point taking any more interrupts
- * until we have processed the buffers
- */
- /* Disable Rx interrupts and schedule NAPI poll */
- int_enable = dnet_readl(bp, INTR_ENB);
- int_enable &= ~DNET_INTR_SRC_RX_CMDFIFOAF;
- dnet_writel(bp, int_enable, INTR_ENB);
- __napi_schedule(&bp->napi);
- }
- handled = 1;
- }
-
- if (!handled)
- pr_debug("%s: irq %x remains\n", __func__, int_current);
-
- spin_unlock_irqrestore(&bp->lock, flags);
-
- return IRQ_RETVAL(handled);
-}
-
-#ifdef DEBUG
-static inline void dnet_print_skb(struct sk_buff *skb)
-{
- int k;
- printk(KERN_DEBUG PFX "data:");
- for (k = 0; k < skb->len; k++)
- printk(" %02x", (unsigned int)skb->data[k]);
- printk("\n");
-}
-#else
-#define dnet_print_skb(skb) do {} while (0)
-#endif
-
-static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
-
- struct dnet *bp = netdev_priv(dev);
- unsigned int i, tx_cmd, wrsz;
- unsigned long flags;
- unsigned int *bufp;
- u32 irq_enable;
-
- dnet_readl(bp, TX_STATUS);
-
- pr_debug("start_xmit: len %u head %p data %p\n",
- skb->len, skb->head, skb->data);
- dnet_print_skb(skb);
-
- spin_lock_irqsave(&bp->lock, flags);
-
- dnet_readl(bp, TX_STATUS);
-
- bufp = (unsigned int *)(((unsigned long) skb->data) & ~0x3UL);
- wrsz = (u32) skb->len + 3;
- wrsz += ((unsigned long) skb->data) & 0x3;
- wrsz >>= 2;
- tx_cmd = ((((unsigned long)(skb->data)) & 0x03) << 16) | (u32) skb->len;
-
- /* check if there is enough room for the current frame */
- if (wrsz < (DNET_FIFO_SIZE - dnet_readl(bp, TX_FIFO_WCNT))) {
- for (i = 0; i < wrsz; i++)
- dnet_writel(bp, *bufp++, TX_DATA_FIFO);
-
- /*
- * inform MAC that a packet's written and ready to be
- * shipped out
- */
- dnet_writel(bp, tx_cmd, TX_LEN_FIFO);
- }
-
- if (dnet_readl(bp, TX_FIFO_WCNT) > DNET_FIFO_TX_DATA_AF_TH) {
- netif_stop_queue(dev);
- dnet_readl(bp, INTR_SRC);
- irq_enable = dnet_readl(bp, INTR_ENB);
- irq_enable |= DNET_INTR_ENB_TX_FIFOAE;
- dnet_writel(bp, irq_enable, INTR_ENB);
- }
-
- skb_tx_timestamp(skb);
-
- spin_unlock_irqrestore(&bp->lock, flags);
-
- /* free the buffer */
- dev_kfree_skb(skb);
-
- return NETDEV_TX_OK;
-}
-
-static void dnet_reset_hw(struct dnet *bp)
-{
- /* put ts_mac in IDLE state i.e. disable rx/tx */
- dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, DNET_INTERNAL_MODE_FCEN);
-
- /*
- * RX FIFO almost full threshold: only cmd FIFO almost full is
- * implemented for RX side
- */
- dnet_writel(bp, DNET_FIFO_RX_CMD_AF_TH, RX_FIFO_TH);
- /*
- * TX FIFO almost empty threshold: only data FIFO almost empty
- * is implemented for TX side
- */
- dnet_writel(bp, DNET_FIFO_TX_DATA_AE_TH, TX_FIFO_TH);
-
- /* flush rx/tx fifos */
- dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH | DNET_SYS_CTL_TXFIFOFLUSH,
- SYS_CTL);
- msleep(1);
- dnet_writel(bp, 0, SYS_CTL);
-}
-
-static void dnet_init_hw(struct dnet *bp)
-{
- u32 config;
-
- dnet_reset_hw(bp);
- __dnet_set_hwaddr(bp);
-
- config = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG);
-
- if (bp->dev->flags & IFF_PROMISC)
- /* Copy All Frames */
- config |= DNET_INTERNAL_RXTX_CONTROL_ENPROMISC;
- if (!(bp->dev->flags & IFF_BROADCAST))
- /* No BroadCast */
- config |= DNET_INTERNAL_RXTX_CONTROL_RXMULTICAST;
-
- config |= DNET_INTERNAL_RXTX_CONTROL_RXPAUSE |
- DNET_INTERNAL_RXTX_CONTROL_RXBROADCAST |
- DNET_INTERNAL_RXTX_CONTROL_DROPCONTROL |
- DNET_INTERNAL_RXTX_CONTROL_DISCFXFCS;
-
- dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, config);
-
- /* clear irq before enabling them */
- config = dnet_readl(bp, INTR_SRC);
-
- /* enable RX/TX interrupt, recv packet ready interrupt */
- dnet_writel(bp, DNET_INTR_ENB_GLOBAL_ENABLE | DNET_INTR_ENB_RX_SUMMARY |
- DNET_INTR_ENB_TX_SUMMARY | DNET_INTR_ENB_RX_FIFOERR |
- DNET_INTR_ENB_RX_ERROR | DNET_INTR_ENB_RX_FIFOFULL |
- DNET_INTR_ENB_TX_FIFOFULL | DNET_INTR_ENB_TX_DISCFRM |
- DNET_INTR_ENB_RX_PKTRDY, INTR_ENB);
-}
-
-static int dnet_open(struct net_device *dev)
-{
- struct dnet *bp = netdev_priv(dev);
-
- /* if the phy is not yet register, retry later */
- if (!dev->phydev)
- return -EAGAIN;
-
- napi_enable(&bp->napi);
- dnet_init_hw(bp);
-
- phy_start_aneg(dev->phydev);
-
- /* schedule a link state check */
- phy_start(dev->phydev);
-
- netif_start_queue(dev);
-
- return 0;
-}
-
-static int dnet_close(struct net_device *dev)
-{
- struct dnet *bp = netdev_priv(dev);
-
- netif_stop_queue(dev);
- napi_disable(&bp->napi);
-
- if (dev->phydev)
- phy_stop(dev->phydev);
-
- dnet_reset_hw(bp);
- netif_carrier_off(dev);
-
- return 0;
-}
-
-static inline void dnet_print_pretty_hwstats(struct dnet_stats *hwstat)
-{
- pr_debug("%s\n", __func__);
- pr_debug("----------------------------- RX statistics "
- "-------------------------------\n");
- pr_debug("RX_PKT_IGNR_CNT %-8x\n", hwstat->rx_pkt_ignr);
- pr_debug("RX_LEN_CHK_ERR_CNT %-8x\n", hwstat->rx_len_chk_err);
- pr_debug("RX_LNG_FRM_CNT %-8x\n", hwstat->rx_lng_frm);
- pr_debug("RX_SHRT_FRM_CNT %-8x\n", hwstat->rx_shrt_frm);
- pr_debug("RX_IPG_VIOL_CNT %-8x\n", hwstat->rx_ipg_viol);
- pr_debug("RX_CRC_ERR_CNT %-8x\n", hwstat->rx_crc_err);
- pr_debug("RX_OK_PKT_CNT %-8x\n", hwstat->rx_ok_pkt);
- pr_debug("RX_CTL_FRM_CNT %-8x\n", hwstat->rx_ctl_frm);
- pr_debug("RX_PAUSE_FRM_CNT %-8x\n", hwstat->rx_pause_frm);
- pr_debug("RX_MULTICAST_CNT %-8x\n", hwstat->rx_multicast);
- pr_debug("RX_BROADCAST_CNT %-8x\n", hwstat->rx_broadcast);
- pr_debug("RX_VLAN_TAG_CNT %-8x\n", hwstat->rx_vlan_tag);
- pr_debug("RX_PRE_SHRINK_CNT %-8x\n", hwstat->rx_pre_shrink);
- pr_debug("RX_DRIB_NIB_CNT %-8x\n", hwstat->rx_drib_nib);
- pr_debug("RX_UNSUP_OPCD_CNT %-8x\n", hwstat->rx_unsup_opcd);
- pr_debug("RX_BYTE_CNT %-8x\n", hwstat->rx_byte);
- pr_debug("----------------------------- TX statistics "
- "-------------------------------\n");
- pr_debug("TX_UNICAST_CNT %-8x\n", hwstat->tx_unicast);
- pr_debug("TX_PAUSE_FRM_CNT %-8x\n", hwstat->tx_pause_frm);
- pr_debug("TX_MULTICAST_CNT %-8x\n", hwstat->tx_multicast);
- pr_debug("TX_BRDCAST_CNT %-8x\n", hwstat->tx_brdcast);
- pr_debug("TX_VLAN_TAG_CNT %-8x\n", hwstat->tx_vlan_tag);
- pr_debug("TX_BAD_FCS_CNT %-8x\n", hwstat->tx_bad_fcs);
- pr_debug("TX_JUMBO_CNT %-8x\n", hwstat->tx_jumbo);
- pr_debug("TX_BYTE_CNT %-8x\n", hwstat->tx_byte);
-}
-
-static struct net_device_stats *dnet_get_stats(struct net_device *dev)
-{
-
- struct dnet *bp = netdev_priv(dev);
- struct net_device_stats *nstat = &dev->stats;
- struct dnet_stats *hwstat = &bp->hw_stats;
-
- /* read stats from hardware */
- dnet_update_stats(bp);
-
- /* Convert HW stats into netdevice stats */
- nstat->rx_errors = (hwstat->rx_len_chk_err +
- hwstat->rx_lng_frm + hwstat->rx_shrt_frm +
- /* ignore IGP violation error
- hwstat->rx_ipg_viol + */
- hwstat->rx_crc_err +
- hwstat->rx_pre_shrink +
- hwstat->rx_drib_nib + hwstat->rx_unsup_opcd);
- nstat->tx_errors = hwstat->tx_bad_fcs;
- nstat->rx_length_errors = (hwstat->rx_len_chk_err +
- hwstat->rx_lng_frm +
- hwstat->rx_shrt_frm + hwstat->rx_pre_shrink);
- nstat->rx_crc_errors = hwstat->rx_crc_err;
- nstat->rx_frame_errors = hwstat->rx_pre_shrink + hwstat->rx_drib_nib;
- nstat->rx_packets = hwstat->rx_ok_pkt;
- nstat->tx_packets = (hwstat->tx_unicast +
- hwstat->tx_multicast + hwstat->tx_brdcast);
- nstat->rx_bytes = hwstat->rx_byte;
- nstat->tx_bytes = hwstat->tx_byte;
- nstat->multicast = hwstat->rx_multicast;
- nstat->rx_missed_errors = hwstat->rx_pkt_ignr;
-
- dnet_print_pretty_hwstats(hwstat);
-
- return nstat;
-}
-
-static void dnet_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strscpy(info->driver, DRV_NAME, sizeof(info->driver));
- strscpy(info->bus_info, "0", sizeof(info->bus_info));
-}
-
-static const struct ethtool_ops dnet_ethtool_ops = {
- .get_drvinfo = dnet_get_drvinfo,
- .get_link = ethtool_op_get_link,
- .get_ts_info = ethtool_op_get_ts_info,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
-};
-
-static const struct net_device_ops dnet_netdev_ops = {
- .ndo_open = dnet_open,
- .ndo_stop = dnet_close,
- .ndo_get_stats = dnet_get_stats,
- .ndo_start_xmit = dnet_start_xmit,
- .ndo_eth_ioctl = phy_do_ioctl_running,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int dnet_probe(struct platform_device *pdev)
-{
- struct resource *res;
- struct net_device *dev;
- struct dnet *bp;
- struct phy_device *phydev;
- int err;
- unsigned int irq;
-
- irq = platform_get_irq(pdev, 0);
-
- dev = alloc_etherdev(sizeof(*bp));
- if (!dev)
- return -ENOMEM;
-
- /* TODO: Actually, we have some interesting features... */
- dev->features |= 0;
-
- bp = netdev_priv(dev);
- bp->dev = dev;
-
- platform_set_drvdata(pdev, dev);
- SET_NETDEV_DEV(dev, &pdev->dev);
-
- spin_lock_init(&bp->lock);
-
- bp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
- if (IS_ERR(bp->regs)) {
- err = PTR_ERR(bp->regs);
- goto err_out_free_dev;
- }
-
- dev->irq = irq;
- err = request_irq(dev->irq, dnet_interrupt, 0, DRV_NAME, dev);
- if (err) {
- dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
- irq, err);
- goto err_out_free_dev;
- }
-
- dev->netdev_ops = &dnet_netdev_ops;
- netif_napi_add(dev, &bp->napi, dnet_poll);
- dev->ethtool_ops = &dnet_ethtool_ops;
-
- dev->base_addr = (unsigned long)bp->regs;
-
- bp->capabilities = dnet_readl(bp, VERCAPS) & DNET_CAPS_MASK;
-
- dnet_get_hwaddr(bp);
-
- if (!is_valid_ether_addr(dev->dev_addr)) {
- /* choose a random ethernet address */
- eth_hw_addr_random(dev);
- __dnet_set_hwaddr(bp);
- }
-
- err = register_netdev(dev);
- if (err) {
- dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
- goto err_out_free_irq;
- }
-
- /* register the PHY board fixup (for Marvell 88E1111) */
- err = phy_register_fixup_for_uid(0x01410cc0, 0xfffffff0,
- dnet_phy_marvell_fixup);
- /* we can live without it, so just issue a warning */
- if (err)
- dev_warn(&pdev->dev, "Cannot register PHY board fixup.\n");
-
- err = dnet_mii_init(bp);
- if (err)
- goto err_out_unregister_netdev;
-
- dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n",
- bp->regs, (unsigned int)res->start, dev->irq, dev->dev_addr);
- dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma\n",
- (bp->capabilities & DNET_HAS_MDIO) ? "" : "no ",
- (bp->capabilities & DNET_HAS_IRQ) ? "" : "no ",
- (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ",
- (bp->capabilities & DNET_HAS_DMA) ? "" : "no ");
- phydev = dev->phydev;
- phy_attached_info(phydev);
-
- return 0;
-
-err_out_unregister_netdev:
- unregister_netdev(dev);
-err_out_free_irq:
- free_irq(dev->irq, dev);
-err_out_free_dev:
- free_netdev(dev);
- return err;
-}
-
-static void dnet_remove(struct platform_device *pdev)
-{
-
- struct net_device *dev;
- struct dnet *bp;
-
- dev = platform_get_drvdata(pdev);
-
- if (dev) {
- bp = netdev_priv(dev);
- if (dev->phydev)
- phy_disconnect(dev->phydev);
- mdiobus_unregister(bp->mii_bus);
- mdiobus_free(bp->mii_bus);
- unregister_netdev(dev);
- free_irq(dev->irq, dev);
- free_netdev(dev);
- }
-}
-
-static struct platform_driver dnet_driver = {
- .probe = dnet_probe,
- .remove = dnet_remove,
- .driver = {
- .name = "dnet",
- },
-};
-
-module_platform_driver(dnet_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Dave DNET Ethernet driver");
-MODULE_AUTHOR("Ilya Yanok <yanok@emcraft.com>, "
- "Matteo Vit <matteo.vit@dave.eu>");
diff --git a/drivers/net/ethernet/dnet.h b/drivers/net/ethernet/dnet.h
deleted file mode 100644
index 030724484b49..000000000000
--- a/drivers/net/ethernet/dnet.h
+++ /dev/null
@@ -1,220 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Dave DNET Ethernet Controller driver
- *
- * Copyright (C) 2008 Dave S.r.l. <www.dave.eu>
- */
-#ifndef _DNET_H
-#define _DNET_H
-
-#define DRV_NAME "dnet"
-#define PFX DRV_NAME ": "
-
-/* Register access macros */
-#define dnet_writel(port, value, reg) \
- writel((value), (port)->regs + DNET_##reg)
-#define dnet_readl(port, reg) readl((port)->regs + DNET_##reg)
-
-/* ALL DNET FIFO REGISTERS */
-#define DNET_RX_LEN_FIFO 0x000 /* RX_LEN_FIFO */
-#define DNET_RX_DATA_FIFO 0x004 /* RX_DATA_FIFO */
-#define DNET_TX_LEN_FIFO 0x008 /* TX_LEN_FIFO */
-#define DNET_TX_DATA_FIFO 0x00C /* TX_DATA_FIFO */
-
-/* ALL DNET CONTROL/STATUS REGISTERS OFFSETS */
-#define DNET_VERCAPS 0x100 /* VERCAPS */
-#define DNET_INTR_SRC 0x104 /* INTR_SRC */
-#define DNET_INTR_ENB 0x108 /* INTR_ENB */
-#define DNET_RX_STATUS 0x10C /* RX_STATUS */
-#define DNET_TX_STATUS 0x110 /* TX_STATUS */
-#define DNET_RX_FRAMES_CNT 0x114 /* RX_FRAMES_CNT */
-#define DNET_TX_FRAMES_CNT 0x118 /* TX_FRAMES_CNT */
-#define DNET_RX_FIFO_TH 0x11C /* RX_FIFO_TH */
-#define DNET_TX_FIFO_TH 0x120 /* TX_FIFO_TH */
-#define DNET_SYS_CTL 0x124 /* SYS_CTL */
-#define DNET_PAUSE_TMR 0x128 /* PAUSE_TMR */
-#define DNET_RX_FIFO_WCNT 0x12C /* RX_FIFO_WCNT */
-#define DNET_TX_FIFO_WCNT 0x130 /* TX_FIFO_WCNT */
-
-/* ALL DNET MAC REGISTERS */
-#define DNET_MACREG_DATA 0x200 /* Mac-Reg Data */
-#define DNET_MACREG_ADDR 0x204 /* Mac-Reg Addr */
-
-/* ALL DNET RX STATISTICS COUNTERS */
-#define DNET_RX_PKT_IGNR_CNT 0x300
-#define DNET_RX_LEN_CHK_ERR_CNT 0x304
-#define DNET_RX_LNG_FRM_CNT 0x308
-#define DNET_RX_SHRT_FRM_CNT 0x30C
-#define DNET_RX_IPG_VIOL_CNT 0x310
-#define DNET_RX_CRC_ERR_CNT 0x314
-#define DNET_RX_OK_PKT_CNT 0x318
-#define DNET_RX_CTL_FRM_CNT 0x31C
-#define DNET_RX_PAUSE_FRM_CNT 0x320
-#define DNET_RX_MULTICAST_CNT 0x324
-#define DNET_RX_BROADCAST_CNT 0x328
-#define DNET_RX_VLAN_TAG_CNT 0x32C
-#define DNET_RX_PRE_SHRINK_CNT 0x330
-#define DNET_RX_DRIB_NIB_CNT 0x334
-#define DNET_RX_UNSUP_OPCD_CNT 0x338
-#define DNET_RX_BYTE_CNT 0x33C
-
-/* DNET TX STATISTICS COUNTERS */
-#define DNET_TX_UNICAST_CNT 0x400
-#define DNET_TX_PAUSE_FRM_CNT 0x404
-#define DNET_TX_MULTICAST_CNT 0x408
-#define DNET_TX_BRDCAST_CNT 0x40C
-#define DNET_TX_VLAN_TAG_CNT 0x410
-#define DNET_TX_BAD_FCS_CNT 0x414
-#define DNET_TX_JUMBO_CNT 0x418
-#define DNET_TX_BYTE_CNT 0x41C
-
-/* SOME INTERNAL MAC-CORE REGISTER */
-#define DNET_INTERNAL_MODE_REG 0x0
-#define DNET_INTERNAL_RXTX_CONTROL_REG 0x2
-#define DNET_INTERNAL_MAX_PKT_SIZE_REG 0x4
-#define DNET_INTERNAL_IGP_REG 0x8
-#define DNET_INTERNAL_MAC_ADDR_0_REG 0xa
-#define DNET_INTERNAL_MAC_ADDR_1_REG 0xc
-#define DNET_INTERNAL_MAC_ADDR_2_REG 0xe
-#define DNET_INTERNAL_TX_RX_STS_REG 0x12
-#define DNET_INTERNAL_GMII_MNG_CTL_REG 0x14
-#define DNET_INTERNAL_GMII_MNG_DAT_REG 0x16
-
-#define DNET_INTERNAL_GMII_MNG_CMD_FIN (1 << 14)
-
-#define DNET_INTERNAL_WRITE (1 << 31)
-
-/* MAC-CORE REGISTER FIELDS */
-
-/* MAC-CORE MODE REGISTER FIELDS */
-#define DNET_INTERNAL_MODE_GBITEN (1 << 0)
-#define DNET_INTERNAL_MODE_FCEN (1 << 1)
-#define DNET_INTERNAL_MODE_RXEN (1 << 2)
-#define DNET_INTERNAL_MODE_TXEN (1 << 3)
-
-/* MAC-CORE RXTX CONTROL REGISTER FIELDS */
-#define DNET_INTERNAL_RXTX_CONTROL_RXSHORTFRAME (1 << 8)
-#define DNET_INTERNAL_RXTX_CONTROL_RXBROADCAST (1 << 7)
-#define DNET_INTERNAL_RXTX_CONTROL_RXMULTICAST (1 << 4)
-#define DNET_INTERNAL_RXTX_CONTROL_RXPAUSE (1 << 3)
-#define DNET_INTERNAL_RXTX_CONTROL_DISTXFCS (1 << 2)
-#define DNET_INTERNAL_RXTX_CONTROL_DISCFXFCS (1 << 1)
-#define DNET_INTERNAL_RXTX_CONTROL_ENPROMISC (1 << 0)
-#define DNET_INTERNAL_RXTX_CONTROL_DROPCONTROL (1 << 6)
-#define DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP (1 << 5)
-
-/* SYSTEM CONTROL REGISTER FIELDS */
-#define DNET_SYS_CTL_IGNORENEXTPKT (1 << 0)
-#define DNET_SYS_CTL_SENDPAUSE (1 << 2)
-#define DNET_SYS_CTL_RXFIFOFLUSH (1 << 3)
-#define DNET_SYS_CTL_TXFIFOFLUSH (1 << 4)
-
-/* TX STATUS REGISTER FIELDS */
-#define DNET_TX_STATUS_FIFO_ALMOST_EMPTY (1 << 2)
-#define DNET_TX_STATUS_FIFO_ALMOST_FULL (1 << 1)
-
-/* INTERRUPT SOURCE REGISTER FIELDS */
-#define DNET_INTR_SRC_TX_PKTSENT (1 << 0)
-#define DNET_INTR_SRC_TX_FIFOAF (1 << 1)
-#define DNET_INTR_SRC_TX_FIFOAE (1 << 2)
-#define DNET_INTR_SRC_TX_DISCFRM (1 << 3)
-#define DNET_INTR_SRC_TX_FIFOFULL (1 << 4)
-#define DNET_INTR_SRC_RX_CMDFIFOAF (1 << 8)
-#define DNET_INTR_SRC_RX_CMDFIFOFF (1 << 9)
-#define DNET_INTR_SRC_RX_DATAFIFOFF (1 << 10)
-#define DNET_INTR_SRC_TX_SUMMARY (1 << 16)
-#define DNET_INTR_SRC_RX_SUMMARY (1 << 17)
-#define DNET_INTR_SRC_PHY (1 << 19)
-
-/* INTERRUPT ENABLE REGISTER FIELDS */
-#define DNET_INTR_ENB_TX_PKTSENT (1 << 0)
-#define DNET_INTR_ENB_TX_FIFOAF (1 << 1)
-#define DNET_INTR_ENB_TX_FIFOAE (1 << 2)
-#define DNET_INTR_ENB_TX_DISCFRM (1 << 3)
-#define DNET_INTR_ENB_TX_FIFOFULL (1 << 4)
-#define DNET_INTR_ENB_RX_PKTRDY (1 << 8)
-#define DNET_INTR_ENB_RX_FIFOAF (1 << 9)
-#define DNET_INTR_ENB_RX_FIFOERR (1 << 10)
-#define DNET_INTR_ENB_RX_ERROR (1 << 11)
-#define DNET_INTR_ENB_RX_FIFOFULL (1 << 12)
-#define DNET_INTR_ENB_RX_FIFOAE (1 << 13)
-#define DNET_INTR_ENB_TX_SUMMARY (1 << 16)
-#define DNET_INTR_ENB_RX_SUMMARY (1 << 17)
-#define DNET_INTR_ENB_GLOBAL_ENABLE (1 << 18)
-
-/* default values:
- * almost empty = less than one full sized ethernet frame (no jumbo) inside
- * the fifo almost full = can write less than one full sized ethernet frame
- * (no jumbo) inside the fifo
- */
-#define DNET_CFG_TX_FIFO_FULL_THRES 25
-#define DNET_CFG_RX_FIFO_FULL_THRES 20
-
-/*
- * Capabilities. Used by the driver to know the capabilities that the ethernet
- * controller inside the FPGA have.
- */
-
-#define DNET_HAS_MDIO (1 << 0)
-#define DNET_HAS_IRQ (1 << 1)
-#define DNET_HAS_GIGABIT (1 << 2)
-#define DNET_HAS_DMA (1 << 3)
-
-#define DNET_HAS_MII (1 << 4) /* or GMII */
-#define DNET_HAS_RMII (1 << 5) /* or RGMII */
-
-#define DNET_CAPS_MASK 0xFFFF
-
-#define DNET_FIFO_SIZE 1024 /* 1K x 32 bit */
-#define DNET_FIFO_TX_DATA_AF_TH (DNET_FIFO_SIZE - 384) /* 384 = 1536 / 4 */
-#define DNET_FIFO_TX_DATA_AE_TH 384
-
-#define DNET_FIFO_RX_CMD_AF_TH (1 << 16) /* just one frame inside the FIFO */
-
-/*
- * Hardware-collected statistics.
- */
-struct dnet_stats {
- u32 rx_pkt_ignr;
- u32 rx_len_chk_err;
- u32 rx_lng_frm;
- u32 rx_shrt_frm;
- u32 rx_ipg_viol;
- u32 rx_crc_err;
- u32 rx_ok_pkt;
- u32 rx_ctl_frm;
- u32 rx_pause_frm;
- u32 rx_multicast;
- u32 rx_broadcast;
- u32 rx_vlan_tag;
- u32 rx_pre_shrink;
- u32 rx_drib_nib;
- u32 rx_unsup_opcd;
- u32 rx_byte;
- u32 tx_unicast;
- u32 tx_pause_frm;
- u32 tx_multicast;
- u32 tx_brdcast;
- u32 tx_vlan_tag;
- u32 tx_bad_fcs;
- u32 tx_jumbo;
- u32 tx_byte;
-};
-
-struct dnet {
- void __iomem *regs;
- spinlock_t lock;
- struct platform_device *pdev;
- struct net_device *dev;
- struct dnet_stats hw_stats;
- unsigned int capabilities; /* read from FPGA */
- struct napi_struct napi;
-
- /* PHY stuff */
- struct mii_bus *mii_bus;
- unsigned int link;
- unsigned int speed;
- unsigned int duplex;
-};
-
-#endif /* _DNET_H */
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 270ff9aab335..d2623e35de43 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -672,7 +672,7 @@ struct be_adapter {
struct be_error_recovery error_recovery;
};
-/* Used for defered FW config cmds. Add fields to this struct as reqd */
+/* Used for deferred FW config cmds. Add fields to this struct as reqd */
struct be_cmd_work {
struct work_struct work;
struct be_adapter *adapter;
@@ -700,19 +700,19 @@ struct be_cmd_work {
#define be_max_rxqs(adapter) (adapter->res.max_rx_qs)
/* Max number of EQs available for the function (NIC + RoCE (if enabled)) */
#define be_max_func_eqs(adapter) (adapter->res.max_evt_qs)
-/* Max number of EQs available avaialble only for NIC */
+/* Max number of EQs available only for NIC */
#define be_max_nic_eqs(adapter) (adapter->res.max_nic_evt_qs)
#define be_if_cap_flags(adapter) (adapter->res.if_cap_flags)
#define be_max_pf_pool_rss_tables(adapter) \
(adapter->pool_res.max_rss_tables)
-/* Max irqs avaialble for NIC */
+/* Max irqs available for NIC */
#define be_max_irqs(adapter) \
(min_t(u16, be_max_nic_eqs(adapter), num_online_cpus()))
/* Max irqs *needed* for RX queues */
static inline u16 be_max_rx_irqs(struct be_adapter *adapter)
{
- /* If no RSS, need atleast one irq for def-RXQ */
+ /* If no RSS, need at least one irq for def-RXQ */
u16 num = max_t(u16, be_max_rss(adapter), 1);
return min_t(u16, num, be_max_irqs(adapter));
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 8ed45bceb537..eab81e073e1e 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1941,7 +1941,7 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
return 0;
}
-/* Uses sycnhronous mcc */
+/* Uses synchronous mcc */
int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
u32 num, u32 domain)
{
@@ -2035,7 +2035,7 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
return __be_cmd_rx_filter(adapter, flags, value);
}
-/* Uses synchrounous mcc */
+/* Uses synchronous mcc */
int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
{
struct be_mcc_wrb *wrb;
@@ -2074,7 +2074,7 @@ err:
return status;
}
-/* Uses sycn mcc */
+/* Uses sync mcc */
int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
{
struct be_mcc_wrb *wrb;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 5e2d3ddb5d43..fcc298ce2c77 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1134,14 +1134,14 @@ struct be_cmd_resp_get_fw_version {
u8 fw_on_flash_version_string[FW_VER_LEN];
} __packed;
-/******************** Set Flow Contrl *******************/
+/******************** Set Flow Control *******************/
struct be_cmd_req_set_flow_control {
struct be_cmd_req_hdr hdr;
u16 tx_flow_control;
u16 rx_flow_control;
} __packed;
-/******************** Get Flow Contrl *******************/
+/******************** Get Flow Control *******************/
struct be_cmd_req_get_flow_control {
struct be_cmd_req_hdr hdr;
u32 rsvd;
@@ -2069,7 +2069,7 @@ struct be_cmd_resp_get_stats_v2 {
struct be_hw_stats_v2 hw_stats;
};
-/************** get fat capabilites *******************/
+/************** get fat capabilities *******************/
#define MAX_MODULES 27
#define MAX_MODES 4
#define MODE_UART 0
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index f9216326bdfe..87dbbd5b7f4e 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -142,7 +142,7 @@ static const struct be_ethtool_stat et_rx_stats[] = {
* to HW.
*/
{DRVSTAT_RX_INFO(rx_post_fail)},
- /* Recevied packets dropped due to skb allocation failure */
+ /* Received packets dropped due to skb allocation failure */
{DRVSTAT_RX_INFO(rx_drops_no_skbs)},
/* Received packets dropped due to lack of available fetched buffers
* posted by the driver.
@@ -189,7 +189,7 @@ static const struct be_ethtool_stat et_tx_stats[] = {
{DRVSTAT_TX_INFO(tx_bytes)},
{DRVSTAT_TX_INFO(tx_pkts)},
{DRVSTAT_TX_INFO(tx_vxlan_offload_pkts)},
- /* Number of skbs queued for trasmission by the driver */
+ /* Number of skbs queued for transmission by the driver */
{DRVSTAT_TX_INFO(tx_reqs)},
/* Number of times the TX queue was stopped due to lack
* of spaces in the TXQ.
@@ -1073,6 +1073,13 @@ static void be_set_msg_level(struct net_device *netdev, u32 level)
adapter->msg_enable = level;
}
+static u32 be_get_rx_ring_count(struct net_device *netdev)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->num_rx_qs;
+}
+
static int be_get_rxfh_fields(struct net_device *netdev,
struct ethtool_rxfh_fields *cmd)
{
@@ -1117,28 +1124,6 @@ static int be_get_rxfh_fields(struct net_device *netdev,
return 0;
}
-static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
- u32 *rule_locs)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
-
- if (!be_multi_rxq(adapter)) {
- dev_info(&adapter->pdev->dev,
- "ethtool::get_rxnfc: RX flow hashing is disabled\n");
- return -EINVAL;
- }
-
- switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = adapter->num_rx_qs;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
static int be_set_rxfh_fields(struct net_device *netdev,
const struct ethtool_rxfh_fields *cmd,
struct netlink_ext_ack *extack)
@@ -1222,7 +1207,7 @@ static void be_get_channels(struct net_device *netdev,
ch->tx_count = adapter->num_tx_qs - ch->combined_count;
ch->max_combined = be_max_qp_irqs(adapter);
- /* The user must create atleast one combined channel */
+ /* The user must create at least one combined channel */
ch->max_rx = be_max_rx_irqs(adapter) - 1;
ch->max_tx = be_max_tx_irqs(adapter) - 1;
}
@@ -1293,6 +1278,12 @@ static int be_set_rxfh(struct net_device *netdev,
u8 *hkey = rxfh->key;
u8 rsstable[RSS_INDIR_TABLE_LEN];
+ if (!be_multi_rxq(adapter)) {
+ dev_info(&adapter->pdev->dev,
+ "ethtool::set_rxfh: RX flow hashing is disabled\n");
+ return -EINVAL;
+ }
+
/* We do not allow change in unsupported parameters */
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP)
@@ -1441,7 +1432,7 @@ const struct ethtool_ops be_ethtool_ops = {
.get_ethtool_stats = be_get_ethtool_stats,
.flash_device = be_do_flash,
.self_test = be_self_test,
- .get_rxnfc = be_get_rxnfc,
+ .get_rx_ring_count = be_get_rx_ring_count,
.get_rxfh_fields = be_get_rxfh_fields,
.set_rxfh_fields = be_set_rxfh_fields,
.get_rxfh_indir_size = be_get_rxfh_indir_size,
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 3476194f0855..42e83ff9c52f 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -16,7 +16,7 @@
* The software must write this register twice to post any command. First,
* it writes the register with hi=1 and the upper bits of the physical address
* for the MAILBOX structure. Software must poll the ready bit until this
- * is acknowledged. Then, sotware writes the register with hi=0 with the lower
+ * is acknowledged. Then, software writes the register with hi=0 with the lower
* bits in the address. It must poll the ready bit until the command is
* complete. Upon completion, the MAILBOX will contain a valid completion
* queue entry.
@@ -27,7 +27,7 @@
#define MPU_EP_CONTROL 0
-/********** MPU semphore: used for SH & BE *************/
+/********** MPU semaphore: used for SH & BE *************/
#define SLIPORT_SOFTRESET_OFFSET 0x5c /* CSR BAR offset */
#define SLIPORT_SEMAPHORE_OFFSET_BEx 0xac /* CSR BAR offset */
#define SLIPORT_SEMAPHORE_OFFSET_SH 0x94 /* PCI-CFG offset */
@@ -39,7 +39,7 @@
/* Soft Reset register masks */
#define SLIPORT_SOFTRESET_SR_MASK 0x00000080 /* SR bit */
-/* MPU semphore POST stage values */
+/* MPU semaphore POST stage values */
#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */
#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */
#define POST_STAGE_BE_RESET 0x3 /* Host wants to reset chip */
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 995c159003d7..52e10467b3e4 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -61,7 +61,7 @@ static const struct pci_device_id be_dev_ids[] = {
};
MODULE_DEVICE_TABLE(pci, be_dev_ids);
-/* Workqueue used by all functions for defering cmd calls to the adapter */
+/* Workqueue used by all functions for deferring cmd calls to the adapter */
static struct workqueue_struct *be_wq;
/* UE Status Low CSR */
@@ -1129,7 +1129,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
struct iphdr *ip;
/* For padded packets, BE HW modifies tot_len field in IP header
- * incorrecly when VLAN tag is inserted by HW.
+ * incorrectly when VLAN tag is inserted by HW.
* For padded packets, Lancer computes incorrect checksum.
*/
eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
@@ -2570,7 +2570,7 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
rxcp->vlanf = 0;
}
- /* As the compl has been parsed, reset it; we wont touch it again */
+ /* As the compl has been parsed, reset it; we won't touch it again */
compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
queue_tail_inc(&rxo->cq);
@@ -2729,7 +2729,7 @@ static struct be_tx_compl_info *be_tx_compl_get(struct be_adapter *adapter,
if (txcp->status) {
if (lancer_chip(adapter)) {
lancer_update_tx_err(txo, txcp->status);
- /* Reset the adapter incase of TSO,
+ /* Reset the adapter in case of TSO,
* SGE or Parity error
*/
if (txcp->status == LANCER_TX_COMP_LSO_ERR ||
@@ -3127,7 +3127,7 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
adapter->num_rss_qs =
min(adapter->num_evt_qs, adapter->cfg_num_rx_irqs);
- /* We'll use RSS only if atleast 2 RSS rings are supported. */
+ /* We'll use RSS only if at least 2 RSS rings are supported. */
if (adapter->num_rss_qs < 2)
adapter->num_rss_qs = 0;
@@ -3169,7 +3169,7 @@ static irqreturn_t be_intx(int irq, void *dev)
/* IRQ is not expected when NAPI is scheduled as the EQ
* will not be armed.
* But, this can happen on Lancer INTx where it takes
- * a while to de-assert INTx or in BE2 where occasionaly
+ * a while to de-assert INTx or in BE2 where occasionally
* an interrupt may be raised even when EQ is unarmed.
* If NAPI is already scheduled, then counting & notifying
* events will orphan them.
@@ -4417,7 +4417,7 @@ static void be_setup_init(struct be_adapter *adapter)
/* HW supports only MAX_PORT_RSS_TABLES RSS Policy Tables per port.
* However, this HW limitation is not exposed to the host via any SLI cmd.
* As a result, in the case of SRIOV and in particular multi-partition configs
- * the driver needs to calcuate a proportional share of RSS Tables per PF-pool
+ * the driver needs to calculate a proportional share of RSS Tables per PF-pool
* for distribution between the VFs. This self-imposed limit will determine the
* no: of VFs for which RSS can be enabled.
*/
@@ -4521,7 +4521,7 @@ static int be_get_resources(struct be_adapter *adapter)
if (status)
return status;
- /* If a deafault RXQ must be created, we'll use up one RSSQ*/
+ /* If a default RXQ must be created, we'll use up one RSSQ*/
if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
!(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
res.max_rss_qs -= 1;
diff --git a/drivers/net/ethernet/engleder/tsnep_ethtool.c b/drivers/net/ethernet/engleder/tsnep_ethtool.c
index 228a638eae16..d11168278515 100644
--- a/drivers/net/ethernet/engleder/tsnep_ethtool.c
+++ b/drivers/net/ethernet/engleder/tsnep_ethtool.c
@@ -257,15 +257,19 @@ static int tsnep_ethtool_get_sset_count(struct net_device *netdev, int sset)
}
}
+static u32 tsnep_ethtool_get_rx_ring_count(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->num_rx_queues;
+}
+
static int tsnep_ethtool_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *cmd, u32 *rule_locs)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = adapter->num_rx_queues;
- return 0;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = adapter->rxnfc_count;
cmd->data = adapter->rxnfc_max;
@@ -469,6 +473,7 @@ const struct ethtool_ops tsnep_ethtool_ops = {
.get_sset_count = tsnep_ethtool_get_sset_count,
.get_rxnfc = tsnep_ethtool_get_rxnfc,
.set_rxnfc = tsnep_ethtool_set_rxnfc,
+ .get_rx_ring_count = tsnep_ethtool_get_rx_ring_count,
.get_channels = tsnep_ethtool_get_channels,
.get_ts_info = tsnep_ethtool_get_ts_info,
.get_coalesce = tsnep_ethtool_get_coalesce,
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index a863f7841210..1e91e79c8134 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -33,6 +33,17 @@
#define DRV_NAME "ftgmac100"
+enum ftgmac100_mac_id {
+ FTGMAC100_FARADAY = 1,
+ FTGMAC100_AST2400,
+ FTGMAC100_AST2500,
+ FTGMAC100_AST2600
+};
+
+struct ftgmac100_match_data {
+ enum ftgmac100_mac_id mac_id;
+};
+
/* Arbitrary values, I am not sure the HW has limits */
#define MAX_RX_QUEUE_ENTRIES 1024
#define MAX_TX_QUEUE_ENTRIES 1024
@@ -66,6 +77,8 @@ struct ftgmac100 {
struct resource *res;
void __iomem *base;
+ enum ftgmac100_mac_id mac_id;
+
/* Rx ring */
unsigned int rx_q_entries;
struct ftgmac100_rxdes *rxdes;
@@ -1470,6 +1483,11 @@ static int ftgmac100_mii_probe(struct net_device *netdev)
phy_interface_t phy_intf;
int err;
+ if (!priv->mii_bus) {
+ dev_err(priv->dev, "No MDIO bus available\n");
+ return -ENODEV;
+ }
+
/* Default to RGMII. It's a gigabit part after all */
err = of_get_phy_mode(np, &phy_intf);
if (err)
@@ -1699,16 +1717,16 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
struct platform_device *pdev = to_platform_device(priv->dev);
struct device_node *np = pdev->dev.of_node;
struct device_node *mdio_np;
- int i, err = 0;
+ int err = 0;
u32 reg;
/* initialize mdio bus */
- priv->mii_bus = mdiobus_alloc();
+ priv->mii_bus = devm_mdiobus_alloc(priv->dev);
if (!priv->mii_bus)
return -EIO;
- if (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
- of_device_is_compatible(np, "aspeed,ast2500-mac")) {
+ if (priv->mac_id == FTGMAC100_AST2400 ||
+ priv->mac_id == FTGMAC100_AST2500) {
/* The AST2600 has a separate MDIO controller */
/* For the AST2400 and AST2500 this driver only supports the
@@ -1727,24 +1745,16 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
priv->mii_bus->read = ftgmac100_mdiobus_read;
priv->mii_bus->write = ftgmac100_mdiobus_write;
- for (i = 0; i < PHY_MAX_ADDR; i++)
- priv->mii_bus->irq[i] = PHY_POLL;
-
mdio_np = of_get_child_by_name(np, "mdio");
- err = of_mdiobus_register(priv->mii_bus, mdio_np);
+ err = devm_of_mdiobus_register(priv->dev, priv->mii_bus, mdio_np);
+ of_node_put(mdio_np);
if (err) {
dev_err(priv->dev, "Cannot register MDIO bus!\n");
- goto err_register_mdiobus;
+ return err;
}
- of_node_put(mdio_np);
-
return 0;
-
-err_register_mdiobus:
- mdiobus_free(priv->mii_bus);
- return err;
}
static void ftgmac100_phy_disconnect(struct net_device *netdev)
@@ -1763,17 +1773,6 @@ static void ftgmac100_phy_disconnect(struct net_device *netdev)
fixed_phy_unregister(phydev);
}
-static void ftgmac100_destroy_mdio(struct net_device *netdev)
-{
- struct ftgmac100 *priv = netdev_priv(netdev);
-
- if (!priv->mii_bus)
- return;
-
- mdiobus_unregister(priv->mii_bus);
- mdiobus_free(priv->mii_bus);
-}
-
static void ftgmac100_ncsi_handler(struct ncsi_dev *nd)
{
if (unlikely(nd->state != ncsi_dev_state_functional))
@@ -1788,13 +1787,10 @@ static int ftgmac100_setup_clk(struct ftgmac100 *priv)
struct clk *clk;
int rc;
- clk = devm_clk_get(priv->dev, NULL /* MACCLK */);
+ clk = devm_clk_get_enabled(priv->dev, NULL /* MACCLK */);
if (IS_ERR(clk))
return PTR_ERR(clk);
priv->clk = clk;
- rc = clk_prepare_enable(priv->clk);
- if (rc)
- return rc;
/* Aspeed specifies a 100MHz clock is required for up to
* 1000Mbit link speeds. As NCSI is limited to 100Mbit, 25MHz
@@ -1803,21 +1799,17 @@ static int ftgmac100_setup_clk(struct ftgmac100 *priv)
rc = clk_set_rate(priv->clk, priv->use_ncsi ? FTGMAC_25MHZ :
FTGMAC_100MHZ);
if (rc)
- goto cleanup_clk;
+ return rc;
/* RCLK is for RMII, typically used for NCSI. Optional because it's not
* necessary if it's the AST2400 MAC, or the MAC is configured for
* RGMII, or the controller is not an ASPEED-based controller.
*/
- priv->rclk = devm_clk_get_optional(priv->dev, "RCLK");
- rc = clk_prepare_enable(priv->rclk);
- if (!rc)
- return 0;
-
-cleanup_clk:
- clk_disable_unprepare(priv->clk);
+ priv->rclk = devm_clk_get_optional_enabled(priv->dev, "RCLK");
+ if (IS_ERR(priv->rclk))
+ return PTR_ERR(priv->rclk);
- return rc;
+ return 0;
}
static bool ftgmac100_has_child_node(struct device_node *np, const char *name)
@@ -1833,16 +1825,121 @@ static bool ftgmac100_has_child_node(struct device_node *np, const char *name)
return ret;
}
+static int ftgmac100_probe_ncsi(struct net_device *netdev,
+ struct ftgmac100 *priv,
+ struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct phy_device *phydev;
+ int err;
+
+ if (!IS_ENABLED(CONFIG_NET_NCSI)) {
+ dev_err(&pdev->dev, "NCSI stack not enabled\n");
+ return -EINVAL;
+ }
+
+ dev_info(&pdev->dev, "Using NCSI interface\n");
+ priv->use_ncsi = true;
+ priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler);
+ if (!priv->ndev)
+ return -EINVAL;
+
+ phydev = fixed_phy_register(&ncsi_phy_status, np);
+ if (IS_ERR(phydev)) {
+ dev_err(&pdev->dev, "failed to register fixed PHY device\n");
+ err = PTR_ERR(phydev);
+ goto err_register_ndev;
+ }
+ err = phy_connect_direct(netdev, phydev, ftgmac100_adjust_link,
+ PHY_INTERFACE_MODE_RMII);
+ if (err) {
+ dev_err(&pdev->dev, "Connecting PHY failed\n");
+ goto err_register_phy;
+ }
+
+ return 0;
+err_register_phy:
+ fixed_phy_unregister(phydev);
+err_register_ndev:
+ if (priv->ndev)
+ ncsi_unregister_dev(priv->ndev);
+ priv->ndev = NULL;
+ return err;
+}
+
+static int ftgmac100_probe_dt(struct net_device *netdev,
+ struct platform_device *pdev,
+ struct ftgmac100 *priv,
+ struct device_node *np)
+{
+ struct phy_device *phy;
+ int err;
+
+ if (of_get_property(np, "use-ncsi", NULL))
+ return ftgmac100_probe_ncsi(netdev, priv, pdev);
+
+ if (of_phy_is_fixed_link(np) ||
+ of_get_property(np, "phy-handle", NULL)) {
+ /* Support "mdio"/"phy" child nodes for ast2400/2500
+ * with an embedded MDIO controller. Automatically
+ * scan the DTS for available PHYs and register
+ * them. 2600 has an independent MDIO controller, not
+ * part of the MAC.
+ */
+ phy = of_phy_get_and_connect(priv->netdev, np,
+ &ftgmac100_adjust_link);
+ if (!phy) {
+ dev_err(&pdev->dev, "Failed to connect to phy\n");
+ return -EINVAL;
+ }
+
+ /* Indicate that we support PAUSE frames (see comment in
+ * Documentation/networking/phy.rst)
+ */
+ phy_support_asym_pause(phy);
+
+ /* Display what we found */
+ phy_attached_info(phy);
+ return 0;
+ }
+
+ if (!ftgmac100_has_child_node(np, "mdio")) {
+ /* Support legacy ASPEED devicetree descriptions that
+ * decribe a MAC with an embedded MDIO controller but
+ * have no "mdio" child node. Automatically scan the
+ * MDIO bus for available PHYs.
+ */
+ err = ftgmac100_mii_probe(netdev);
+ if (err) {
+ dev_err(priv->dev, "MII probe failed!\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
static int ftgmac100_probe(struct platform_device *pdev)
{
+ const struct ftgmac100_match_data *match_data;
+ enum ftgmac100_mac_id mac_id;
struct resource *res;
int irq;
struct net_device *netdev;
- struct phy_device *phydev;
struct ftgmac100 *priv;
struct device_node *np;
int err = 0;
+ np = pdev->dev.of_node;
+ if (np) {
+ match_data = of_device_get_match_data(&pdev->dev);
+ if (!match_data)
+ return -EINVAL;
+ mac_id = match_data->mac_id;
+ } else {
+ mac_id = FTGMAC100_FARADAY;
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENXIO;
@@ -1852,11 +1949,9 @@ static int ftgmac100_probe(struct platform_device *pdev)
return irq;
/* setup net_device */
- netdev = alloc_etherdev(sizeof(*priv));
- if (!netdev) {
- err = -ENOMEM;
- goto err_alloc_etherdev;
- }
+ netdev = devm_alloc_etherdev(&pdev->dev, sizeof(*priv));
+ if (!netdev)
+ return -ENOMEM;
SET_NETDEV_DEV(netdev, &pdev->dev);
@@ -1870,22 +1965,22 @@ static int ftgmac100_probe(struct platform_device *pdev)
priv = netdev_priv(netdev);
priv->netdev = netdev;
priv->dev = &pdev->dev;
+ priv->mac_id = mac_id;
INIT_WORK(&priv->reset_task, ftgmac100_reset_task);
/* map io memory */
- priv->res = request_mem_region(res->start, resource_size(res),
- dev_name(&pdev->dev));
+ priv->res = devm_request_mem_region(&pdev->dev,
+ res->start, resource_size(res),
+ dev_name(&pdev->dev));
if (!priv->res) {
dev_err(&pdev->dev, "Could not reserve memory region\n");
- err = -ENOMEM;
- goto err_req_mem;
+ return -ENOMEM;
}
- priv->base = ioremap(res->start, resource_size(res));
+ priv->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!priv->base) {
dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
- err = -EIO;
- goto err_ioremap;
+ return -EIO;
}
netdev->irq = irq;
@@ -1898,12 +1993,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
/* MAC address from chip or random one */
err = ftgmac100_initial_mac(priv);
if (err)
- goto err_phy_connect;
+ return err;
- np = pdev->dev.of_node;
- if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
- of_device_is_compatible(np, "aspeed,ast2500-mac") ||
- of_device_is_compatible(np, "aspeed,ast2600-mac"))) {
+ if (priv->mac_id == FTGMAC100_AST2400 ||
+ priv->mac_id == FTGMAC100_AST2500 ||
+ priv->mac_id == FTGMAC100_AST2600) {
priv->rxdes0_edorr_mask = BIT(30);
priv->txdes0_edotr_mask = BIT(30);
priv->is_aspeed = true;
@@ -1912,100 +2006,37 @@ static int ftgmac100_probe(struct platform_device *pdev)
priv->txdes0_edotr_mask = BIT(15);
}
- if (np && of_get_property(np, "use-ncsi", NULL)) {
- if (!IS_ENABLED(CONFIG_NET_NCSI)) {
- dev_err(&pdev->dev, "NCSI stack not enabled\n");
- err = -EINVAL;
- goto err_phy_connect;
- }
-
- dev_info(&pdev->dev, "Using NCSI interface\n");
- priv->use_ncsi = true;
- priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler);
- if (!priv->ndev) {
- err = -EINVAL;
- goto err_phy_connect;
- }
-
- phydev = fixed_phy_register(&ncsi_phy_status, np);
- if (IS_ERR(phydev)) {
- dev_err(&pdev->dev, "failed to register fixed PHY device\n");
- err = PTR_ERR(phydev);
- goto err_phy_connect;
- }
- err = phy_connect_direct(netdev, phydev, ftgmac100_adjust_link,
- PHY_INTERFACE_MODE_RMII);
- if (err) {
- dev_err(&pdev->dev, "Connecting PHY failed\n");
- goto err_phy_connect;
- }
- } else if (np && (of_phy_is_fixed_link(np) ||
- of_get_property(np, "phy-handle", NULL))) {
- struct phy_device *phy;
-
- /* Support "mdio"/"phy" child nodes for ast2400/2500 with
- * an embedded MDIO controller. Automatically scan the DTS for
- * available PHYs and register them.
- */
- if (of_get_property(np, "phy-handle", NULL) &&
- (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
- of_device_is_compatible(np, "aspeed,ast2500-mac"))) {
- err = ftgmac100_setup_mdio(netdev);
- if (err)
- goto err_setup_mdio;
- }
-
- phy = of_phy_get_and_connect(priv->netdev, np,
- &ftgmac100_adjust_link);
- if (!phy) {
- dev_err(&pdev->dev, "Failed to connect to phy\n");
- err = -EINVAL;
- goto err_phy_connect;
- }
-
- /* Indicate that we support PAUSE frames (see comment in
- * Documentation/networking/phy.rst)
- */
- phy_support_asym_pause(phy);
-
- /* Display what we found */
- phy_attached_info(phy);
- } else if (np && !ftgmac100_has_child_node(np, "mdio")) {
- /* Support legacy ASPEED devicetree descriptions that decribe a
- * MAC with an embedded MDIO controller but have no "mdio"
- * child node. Automatically scan the MDIO bus for available
- * PHYs.
- */
- priv->use_ncsi = false;
+ if (priv->mac_id == FTGMAC100_FARADAY ||
+ priv->mac_id == FTGMAC100_AST2400 ||
+ priv->mac_id == FTGMAC100_AST2500) {
err = ftgmac100_setup_mdio(netdev);
if (err)
- goto err_setup_mdio;
-
- err = ftgmac100_mii_probe(netdev);
- if (err) {
- dev_err(priv->dev, "MII probe failed!\n");
- goto err_ncsi_dev;
- }
+ return err;
+ }
+ if (np) {
+ err = ftgmac100_probe_dt(netdev, pdev, priv, np);
+ if (err)
+ goto err;
}
priv->rst = devm_reset_control_get_optional_exclusive(priv->dev, NULL);
if (IS_ERR(priv->rst)) {
err = PTR_ERR(priv->rst);
- goto err_phy_connect;
+ goto err;
}
if (priv->is_aspeed) {
err = ftgmac100_setup_clk(priv);
if (err)
- goto err_phy_connect;
-
- /* Disable ast2600 problematic HW arbitration */
- if (of_device_is_compatible(np, "aspeed,ast2600-mac"))
- iowrite32(FTGMAC100_TM_DEFAULT,
- priv->base + FTGMAC100_OFFSET_TM);
+ goto err;
}
+ /* Disable ast2600 problematic HW arbitration */
+ if (priv->mac_id == FTGMAC100_AST2600)
+ iowrite32(FTGMAC100_TM_DEFAULT,
+ priv->base + FTGMAC100_OFFSET_TM);
+
/* Default ring sizes */
priv->rx_q_entries = priv->new_rx_q_entries = DEF_RX_QUEUE_ENTRIES;
priv->tx_q_entries = priv->new_tx_q_entries = DEF_TX_QUEUE_ENTRIES;
@@ -2019,11 +2050,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
/* AST2400 doesn't have working HW checksum generation */
- if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac")))
+ if (priv->mac_id == FTGMAC100_AST2400)
netdev->hw_features &= ~NETIF_F_HW_CSUM;
/* AST2600 tx checksum with NCSI is broken */
- if (priv->use_ncsi && of_device_is_compatible(np, "aspeed,ast2600-mac"))
+ if (priv->use_ncsi && priv->mac_id == FTGMAC100_AST2600)
netdev->hw_features &= ~NETIF_F_HW_CSUM;
if (np && of_get_property(np, "no-hw-checksum", NULL))
@@ -2034,29 +2065,17 @@ static int ftgmac100_probe(struct platform_device *pdev)
err = register_netdev(netdev);
if (err) {
dev_err(&pdev->dev, "Failed to register netdev\n");
- goto err_register_netdev;
+ goto err;
}
netdev_info(netdev, "irq %d, mapped at %p\n", netdev->irq, priv->base);
return 0;
-err_register_netdev:
- clk_disable_unprepare(priv->rclk);
- clk_disable_unprepare(priv->clk);
-err_phy_connect:
+err:
ftgmac100_phy_disconnect(netdev);
-err_ncsi_dev:
if (priv->ndev)
ncsi_unregister_dev(priv->ndev);
- ftgmac100_destroy_mdio(netdev);
-err_setup_mdio:
- iounmap(priv->base);
-err_ioremap:
- release_resource(priv->res);
-err_req_mem:
- free_netdev(netdev);
-err_alloc_etherdev:
return err;
}
@@ -2072,26 +2091,39 @@ static void ftgmac100_remove(struct platform_device *pdev)
ncsi_unregister_dev(priv->ndev);
unregister_netdev(netdev);
- clk_disable_unprepare(priv->rclk);
- clk_disable_unprepare(priv->clk);
-
/* There's a small chance the reset task will have been re-queued,
* during stop, make sure it's gone before we free the structure.
*/
cancel_work_sync(&priv->reset_task);
ftgmac100_phy_disconnect(netdev);
- ftgmac100_destroy_mdio(netdev);
+}
- iounmap(priv->base);
- release_resource(priv->res);
+static const struct ftgmac100_match_data ftgmac100_match_data_ast2400 = {
+ .mac_id = FTGMAC100_AST2400
+};
- netif_napi_del(&priv->napi);
- free_netdev(netdev);
-}
+static const struct ftgmac100_match_data ftgmac100_match_data_ast2500 = {
+ .mac_id = FTGMAC100_AST2500
+};
+
+static const struct ftgmac100_match_data ftgmac100_match_data_ast2600 = {
+ .mac_id = FTGMAC100_AST2600
+};
+
+static const struct ftgmac100_match_data ftgmac100_match_data_faraday = {
+ .mac_id = FTGMAC100_FARADAY
+};
static const struct of_device_id ftgmac100_of_match[] = {
- { .compatible = "faraday,ftgmac100" },
+ { .compatible = "aspeed,ast2400-mac",
+ .data = &ftgmac100_match_data_ast2400},
+ { .compatible = "aspeed,ast2500-mac",
+ .data = &ftgmac100_match_data_ast2500 },
+ { .compatible = "aspeed,ast2600-mac",
+ .data = &ftgmac100_match_data_ast2600 },
+ { .compatible = "faraday,ftgmac100",
+ .data = &ftgmac100_match_data_faraday },
{ }
};
MODULE_DEVICE_TABLE(of, ftgmac100_of_match);
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index fd9a93d02f8e..7176803146f3 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -340,6 +340,7 @@ struct bufdesc_ex {
#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
#define TX_RING_SIZE 1024 /* Must be power of two */
#define TX_RING_MOD_MASK 511 /* for this to work */
+#define FEC_XSK_TX_BUDGET_MAX 256
#define BD_ENET_RX_INT 0x00800000
#define BD_ENET_RX_PTP ((ushort)0x0400)
@@ -528,6 +529,8 @@ enum fec_txbuf_type {
FEC_TXBUF_T_SKB,
FEC_TXBUF_T_XDP_NDO,
FEC_TXBUF_T_XDP_TX,
+ FEC_TXBUF_T_XSK_XMIT,
+ FEC_TXBUF_T_XSK_TX,
};
struct fec_tx_buffer {
@@ -539,6 +542,7 @@ struct fec_enet_priv_tx_q {
struct bufdesc_prop bd;
unsigned char *tx_bounce[TX_RING_SIZE];
struct fec_tx_buffer tx_buf[TX_RING_SIZE];
+ struct xsk_buff_pool *xsk_pool;
unsigned short tx_stop_threshold;
unsigned short tx_wake_threshold;
@@ -548,9 +552,16 @@ struct fec_enet_priv_tx_q {
dma_addr_t tso_hdrs_dma;
};
+union fec_rx_buffer {
+ void *buf_p;
+ struct page *page;
+ struct xdp_buff *xdp;
+};
+
struct fec_enet_priv_rx_q {
struct bufdesc_prop bd;
- struct page *rx_buf[RX_RING_SIZE];
+ union fec_rx_buffer rx_buf[RX_RING_SIZE];
+ struct xsk_buff_pool *xsk_pool;
/* page_pool */
struct page_pool *page_pool;
@@ -643,6 +654,7 @@ struct fec_enet_private {
struct pm_qos_request pm_qos_req;
unsigned int tx_align;
+ unsigned int rx_shift;
/* hw interrupt coalesce */
unsigned int rx_pkts_itr;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 797ef6899657..0d926bf18195 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -71,6 +71,7 @@
#include <net/page_pool/helpers.h>
#include <net/selftests.h>
#include <net/tso.h>
+#include <net/xdp_sock_drv.h>
#include <soc/imx/cpuidle.h>
#include "fec.h"
@@ -79,7 +80,7 @@ static void set_multicast_list(struct net_device *ndev);
static void fec_enet_itr_coal_set(struct net_device *ndev);
static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
int cpu, struct xdp_buff *xdp,
- u32 dma_sync_len);
+ u32 dma_sync_len, int queue);
#define DRIVER_NAME "fec"
@@ -467,13 +468,13 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
static int
fec_enet_create_page_pool(struct fec_enet_private *fep,
- struct fec_enet_priv_rx_q *rxq, int size)
+ struct fec_enet_priv_rx_q *rxq)
{
struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
struct page_pool_params pp_params = {
.order = fep->pagepool_order,
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
- .pool_size = size,
+ .pool_size = rxq->bd.ring_size,
.nid = dev_to_node(&fep->pdev->dev),
.dev = &fep->pdev->dev,
.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
@@ -489,23 +490,18 @@ fec_enet_create_page_pool(struct fec_enet_private *fep,
return err;
}
- err = xdp_rxq_info_reg(&rxq->xdp_rxq, fep->netdev, rxq->id, 0);
- if (err < 0)
- goto err_free_pp;
-
- err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
- rxq->page_pool);
- if (err)
- goto err_unregister_rxq;
-
return 0;
+}
-err_unregister_rxq:
- xdp_rxq_info_unreg(&rxq->xdp_rxq);
-err_free_pp:
- page_pool_destroy(rxq->page_pool);
- rxq->page_pool = NULL;
- return err;
+static void fec_txq_trigger_xmit(struct fec_enet_private *fep,
+ struct fec_enet_priv_tx_q *txq)
+{
+ if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
+ !readl(txq->bd.reg_desc_active) ||
+ !readl(txq->bd.reg_desc_active) ||
+ !readl(txq->bd.reg_desc_active) ||
+ !readl(txq->bd.reg_desc_active))
+ writel(0, txq->bd.reg_desc_active);
}
static struct bufdesc *
@@ -717,12 +713,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
txq->bd.cur = bdp;
/* Trigger transmission start */
- if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
- !readl(txq->bd.reg_desc_active) ||
- !readl(txq->bd.reg_desc_active) ||
- !readl(txq->bd.reg_desc_active) ||
- !readl(txq->bd.reg_desc_active))
- writel(0, txq->bd.reg_desc_active);
+ fec_txq_trigger_xmit(fep, txq);
return 0;
}
@@ -913,12 +904,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
txq->bd.cur = bdp;
/* Trigger transmission start */
- if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
- !readl(txq->bd.reg_desc_active) ||
- !readl(txq->bd.reg_desc_active) ||
- !readl(txq->bd.reg_desc_active) ||
- !readl(txq->bd.reg_desc_active))
- writel(0, txq->bd.reg_desc_active);
+ fec_txq_trigger_xmit(fep, txq);
return 0;
@@ -1005,6 +991,13 @@ static void fec_enet_bd_init(struct net_device *dev)
bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
else
bdp->cbd_sc = cpu_to_fec16(0);
+
+ if (fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+
+ ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
+ }
+
bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
}
@@ -1022,33 +1015,38 @@ static void fec_enet_bd_init(struct net_device *dev)
txq->bd.cur = bdp;
for (i = 0; i < txq->bd.ring_size; i++) {
+ struct page *page;
+
/* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = cpu_to_fec16(0);
- if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
+
+ switch (txq->tx_buf[i].type) {
+ case FEC_TXBUF_T_SKB:
if (bdp->cbd_bufaddr &&
!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
dma_unmap_single(&fep->pdev->dev,
fec32_to_cpu(bdp->cbd_bufaddr),
fec16_to_cpu(bdp->cbd_datlen),
DMA_TO_DEVICE);
- if (txq->tx_buf[i].buf_p)
- dev_kfree_skb_any(txq->tx_buf[i].buf_p);
- } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
- if (bdp->cbd_bufaddr)
- dma_unmap_single(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
- fec16_to_cpu(bdp->cbd_datlen),
- DMA_TO_DEVICE);
-
- if (txq->tx_buf[i].buf_p)
- xdp_return_frame(txq->tx_buf[i].buf_p);
- } else {
- struct page *page = txq->tx_buf[i].buf_p;
-
- if (page)
- page_pool_put_page(pp_page_to_nmdesc(page)->pp,
- page, 0,
- false);
+ dev_kfree_skb_any(txq->tx_buf[i].buf_p);
+ break;
+ case FEC_TXBUF_T_XDP_NDO:
+ dma_unmap_single(&fep->pdev->dev,
+ fec32_to_cpu(bdp->cbd_bufaddr),
+ fec16_to_cpu(bdp->cbd_datlen),
+ DMA_TO_DEVICE);
+ xdp_return_frame(txq->tx_buf[i].buf_p);
+ break;
+ case FEC_TXBUF_T_XDP_TX:
+ page = txq->tx_buf[i].buf_p;
+ page_pool_put_page(pp_page_to_nmdesc(page)->pp,
+ page, 0, false);
+ break;
+ case FEC_TXBUF_T_XSK_TX:
+ xsk_buff_free(txq->tx_buf[i].buf_p);
+ break;
+ default:
+ break;
}
txq->tx_buf[i].buf_p = NULL;
@@ -1335,7 +1333,9 @@ fec_restart(struct net_device *ndev)
static int fec_enet_ipc_handle_init(struct fec_enet_private *fep)
{
if (!(of_machine_is_compatible("fsl,imx8qm") ||
+ of_machine_is_compatible("fsl,imx8qp") ||
of_machine_is_compatible("fsl,imx8qxp") ||
+ of_machine_is_compatible("fsl,imx8dx") ||
of_machine_is_compatible("fsl,imx8dxl")))
return 0;
@@ -1479,27 +1479,102 @@ fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
hwtstamps->hwtstamp = ns_to_ktime(ns);
}
-static void
-fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
+static bool fec_enet_xsk_xmit(struct fec_enet_private *fep,
+ struct xsk_buff_pool *pool,
+ u32 queue)
{
- struct fec_enet_private *fep;
- struct xdp_frame *xdpf;
- struct bufdesc *bdp;
- unsigned short status;
- struct sk_buff *skb;
- struct fec_enet_priv_tx_q *txq;
+ struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue];
+ struct xdp_desc *xsk_desc = pool->tx_descs;
+ int cpu = smp_processor_id();
+ int free_bds, budget, batch;
struct netdev_queue *nq;
- int index = 0;
- int entries_free;
- struct page *page;
- int frame_len;
+ struct bufdesc *bdp;
+ dma_addr_t dma;
+ u32 estatus;
+ u16 status;
+ int i, j;
- fep = netdev_priv(ndev);
+ nq = netdev_get_tx_queue(fep->netdev, queue);
+ __netif_tx_lock(nq, cpu);
- txq = fep->tx_queue[queue_id];
- /* get next bdp of dirty_tx */
- nq = netdev_get_tx_queue(ndev, queue_id);
- bdp = txq->dirty_tx;
+ txq_trans_cond_update(nq);
+ free_bds = fec_enet_get_free_txdesc_num(txq);
+ if (!free_bds)
+ goto tx_unlock;
+
+ budget = min(free_bds, FEC_XSK_TX_BUDGET_MAX);
+ batch = xsk_tx_peek_release_desc_batch(pool, budget);
+ if (!batch)
+ goto tx_unlock;
+
+ bdp = txq->bd.cur;
+ for (i = 0; i < batch; i++) {
+ dma = xsk_buff_raw_get_dma(pool, xsk_desc[i].addr);
+ xsk_buff_raw_dma_sync_for_device(pool, dma, xsk_desc[i].len);
+
+ j = fec_enet_get_bd_index(bdp, &txq->bd);
+ txq->tx_buf[j].type = FEC_TXBUF_T_XSK_XMIT;
+ txq->tx_buf[j].buf_p = NULL;
+
+ status = fec16_to_cpu(bdp->cbd_sc);
+ status &= ~BD_ENET_TX_STATS;
+ status |= BD_ENET_TX_INTR | BD_ENET_TX_LAST;
+ bdp->cbd_datlen = cpu_to_fec16(xsk_desc[i].len);
+ bdp->cbd_bufaddr = cpu_to_fec32(dma);
+
+ if (fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+
+ estatus = BD_ENET_TX_INT;
+ if (fep->quirks & FEC_QUIRK_HAS_AVB)
+ estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
+
+ ebdp->cbd_bdu = 0;
+ ebdp->cbd_esc = cpu_to_fec32(estatus);
+ }
+
+ /* Make sure the updates to rest of the descriptor are performed
+ * before transferring ownership.
+ */
+ dma_wmb();
+
+ /* Send it on its way. Tell FEC it's ready, interrupt when done,
+ * it's the last BD of the frame, and to put the CRC on the end.
+ */
+ status |= BD_ENET_TX_READY | BD_ENET_TX_TC;
+ bdp->cbd_sc = cpu_to_fec16(status);
+ dma_wmb();
+
+ bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
+ txq->bd.cur = bdp;
+ }
+
+ /* Trigger transmission start */
+ fec_txq_trigger_xmit(fep, txq);
+
+ __netif_tx_unlock(nq);
+
+ return batch < budget;
+
+tx_unlock:
+ __netif_tx_unlock(nq);
+
+ return true;
+}
+
+static int fec_enet_tx_queue(struct fec_enet_private *fep,
+ u16 queue, int budget)
+{
+ struct netdev_queue *nq = netdev_get_tx_queue(fep->netdev, queue);
+ struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue];
+ struct net_device *ndev = fep->netdev;
+ struct bufdesc *bdp = txq->dirty_tx;
+ int index, frame_len, entries_free;
+ struct fec_tx_buffer *tx_buf;
+ unsigned short status;
+ struct sk_buff *skb;
+ struct page *page;
+ int xsk_cnt = 0;
/* get next bdp of dirty_tx */
bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
@@ -1512,45 +1587,77 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
break;
index = fec_enet_get_bd_index(bdp, &txq->bd);
+ tx_buf = &txq->tx_buf[index];
+ frame_len = fec16_to_cpu(bdp->cbd_datlen);
- if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
- skb = txq->tx_buf[index].buf_p;
+ switch (tx_buf->type) {
+ case FEC_TXBUF_T_SKB:
if (bdp->cbd_bufaddr &&
!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
dma_unmap_single(&fep->pdev->dev,
fec32_to_cpu(bdp->cbd_bufaddr),
- fec16_to_cpu(bdp->cbd_datlen),
- DMA_TO_DEVICE);
+ frame_len, DMA_TO_DEVICE);
+
bdp->cbd_bufaddr = cpu_to_fec32(0);
+ skb = tx_buf->buf_p;
if (!skb)
goto tx_buf_done;
- } else {
+
+ frame_len = skb->len;
+
+ /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
+ * are to time stamp the packet, so we still need to check time
+ * stamping enabled flag.
+ */
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
+ fep->hwts_tx_en) && fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+ struct skb_shared_hwtstamps shhwtstamps;
+
+ fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ }
+
+ /* Free the sk buffer associated with this last transmit */
+ napi_consume_skb(skb, budget);
+ break;
+ case FEC_TXBUF_T_XDP_NDO:
/* Tx processing cannot call any XDP (or page pool) APIs if
* the "budget" is 0. Because NAPI is called with budget of
* 0 (such as netpoll) indicates we may be in an IRQ context,
* however, we can't use the page pool from IRQ context.
*/
if (unlikely(!budget))
- break;
-
- if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
- xdpf = txq->tx_buf[index].buf_p;
- if (bdp->cbd_bufaddr)
- dma_unmap_single(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
- fec16_to_cpu(bdp->cbd_datlen),
- DMA_TO_DEVICE);
- } else {
- page = txq->tx_buf[index].buf_p;
- }
+ goto out;
+ dma_unmap_single(&fep->pdev->dev,
+ fec32_to_cpu(bdp->cbd_bufaddr),
+ frame_len, DMA_TO_DEVICE);
bdp->cbd_bufaddr = cpu_to_fec32(0);
- if (unlikely(!txq->tx_buf[index].buf_p)) {
- txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
- goto tx_buf_done;
- }
+ xdp_return_frame_rx_napi(tx_buf->buf_p);
+ break;
+ case FEC_TXBUF_T_XDP_TX:
+ if (unlikely(!budget))
+ goto out;
- frame_len = fec16_to_cpu(bdp->cbd_datlen);
+ bdp->cbd_bufaddr = cpu_to_fec32(0);
+ page = tx_buf->buf_p;
+ /* The dma_sync_size = 0 as XDP_TX has already synced
+ * DMA for_device
+ */
+ page_pool_put_page(pp_page_to_nmdesc(page)->pp, page,
+ 0, true);
+ break;
+ case FEC_TXBUF_T_XSK_XMIT:
+ bdp->cbd_bufaddr = cpu_to_fec32(0);
+ xsk_cnt++;
+ break;
+ case FEC_TXBUF_T_XSK_TX:
+ bdp->cbd_bufaddr = cpu_to_fec32(0);
+ xsk_buff_free(tx_buf->buf_p);
+ break;
+ default:
+ break;
}
/* Check for errors. */
@@ -1570,11 +1677,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
ndev->stats.tx_carrier_errors++;
} else {
ndev->stats.tx_packets++;
-
- if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB)
- ndev->stats.tx_bytes += skb->len;
- else
- ndev->stats.tx_bytes += frame_len;
+ ndev->stats.tx_bytes += frame_len;
}
/* Deferred means some collisions occurred during transmit,
@@ -1583,33 +1686,9 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
if (status & BD_ENET_TX_DEF)
ndev->stats.collisions++;
- if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
- /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
- * are to time stamp the packet, so we still need to check time
- * stamping enabled flag.
- */
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
- fep->hwts_tx_en) && fep->bufdesc_ex) {
- struct skb_shared_hwtstamps shhwtstamps;
- struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
-
- fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
- skb_tstamp_tx(skb, &shhwtstamps);
- }
-
- /* Free the sk buffer associated with this last transmit */
- napi_consume_skb(skb, budget);
- } else if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
- xdp_return_frame_rx_napi(xdpf);
- } else { /* recycle pages of XDP_TX frames */
- /* The dma_sync_size = 0 as XDP_TX has already synced DMA for_device */
- page_pool_put_page(pp_page_to_nmdesc(page)->pp, page,
- 0, true);
- }
-
- txq->tx_buf[index].buf_p = NULL;
+ tx_buf->buf_p = NULL;
/* restore default tx buffer type: FEC_TXBUF_T_SKB */
- txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
+ tx_buf->type = FEC_TXBUF_T_SKB;
tx_buf_done:
/* Make sure the update to bdp and tx_buf are performed
@@ -1630,20 +1709,43 @@ tx_buf_done:
}
}
+out:
+
/* ERR006358: Keep the transmitter going */
if (bdp != txq->bd.cur &&
readl(txq->bd.reg_desc_active) == 0)
writel(0, txq->bd.reg_desc_active);
+
+ if (txq->xsk_pool) {
+ struct xsk_buff_pool *pool = txq->xsk_pool;
+
+ if (xsk_cnt)
+ xsk_tx_completed(pool, xsk_cnt);
+
+ if (xsk_uses_need_wakeup(pool))
+ xsk_set_tx_need_wakeup(pool);
+
+ /* If the condition is true, it indicates that there are still
+ * packets to be transmitted, so return "budget" to make the
+ * NAPI continue polling.
+ */
+ if (!fec_enet_xsk_xmit(fep, pool, queue))
+ return budget;
+ }
+
+ return 0;
}
-static void fec_enet_tx(struct net_device *ndev, int budget)
+static int fec_enet_tx(struct net_device *ndev, int budget)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- int i;
+ int i, count = 0;
/* Make sure that AVB queues are processed first. */
for (i = fep->num_tx_queues - 1; i >= 0; i--)
- fec_enet_tx_queue(ndev, i, budget);
+ count += fec_enet_tx_queue(fep, i, budget);
+
+ return count;
}
static int fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
@@ -1656,76 +1758,28 @@ static int fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
if (unlikely(!new_page))
return -ENOMEM;
- rxq->rx_buf[index] = new_page;
+ rxq->rx_buf[index].page = new_page;
phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM;
bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
return 0;
}
-static u32
-fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog,
- struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int cpu)
+static int fec_enet_update_cbd_zc(struct fec_enet_priv_rx_q *rxq,
+ struct bufdesc *bdp, int index)
{
- unsigned int sync, len = xdp->data_end - xdp->data;
- u32 ret = FEC_ENET_XDP_PASS;
- struct page *page;
- int err;
- u32 act;
-
- act = bpf_prog_run_xdp(prog, xdp);
-
- /* Due xdp_adjust_tail and xdp_adjust_head: DMA sync for_device cover
- * max len CPU touch
- */
- sync = xdp->data_end - xdp->data;
- sync = max(sync, len);
-
- switch (act) {
- case XDP_PASS:
- rxq->stats[RX_XDP_PASS]++;
- ret = FEC_ENET_XDP_PASS;
- break;
-
- case XDP_REDIRECT:
- rxq->stats[RX_XDP_REDIRECT]++;
- err = xdp_do_redirect(fep->netdev, xdp, prog);
- if (unlikely(err))
- goto xdp_err;
-
- ret = FEC_ENET_XDP_REDIR;
- break;
-
- case XDP_TX:
- rxq->stats[RX_XDP_TX]++;
- err = fec_enet_xdp_tx_xmit(fep, cpu, xdp, sync);
- if (unlikely(err)) {
- rxq->stats[RX_XDP_TX_ERRORS]++;
- goto xdp_err;
- }
+ struct xdp_buff *new_xdp;
+ dma_addr_t phys_addr;
- ret = FEC_ENET_XDP_TX;
- break;
+ new_xdp = xsk_buff_alloc(rxq->xsk_pool);
+ if (unlikely(!new_xdp))
+ return -ENOMEM;
- default:
- bpf_warn_invalid_xdp_action(fep->netdev, prog, act);
- fallthrough;
-
- case XDP_ABORTED:
- fallthrough; /* handle aborts by dropping packet */
-
- case XDP_DROP:
- rxq->stats[RX_XDP_DROP]++;
-xdp_err:
- ret = FEC_ENET_XDP_CONSUMED;
- page = virt_to_head_page(xdp->data);
- page_pool_put_page(rxq->page_pool, page, sync, true);
- if (act != XDP_DROP)
- trace_xdp_exception(fep->netdev, prog, act);
- break;
- }
+ rxq->rx_buf[index].xdp = new_xdp;
+ phys_addr = xsk_buff_xdp_get_dma(new_xdp);
+ bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
- return ret;
+ return 0;
}
static void fec_enet_rx_vlan(const struct net_device *ndev, struct sk_buff *skb)
@@ -1744,40 +1798,113 @@ static void fec_enet_rx_vlan(const struct net_device *ndev, struct sk_buff *skb)
}
}
+static int fec_rx_error_check(struct net_device *ndev, u16 status)
+{
+ if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
+ BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST |
+ BD_ENET_RX_CL)) {
+ ndev->stats.rx_errors++;
+
+ if (status & BD_ENET_RX_OV) {
+ /* FIFO overrun */
+ ndev->stats.rx_fifo_errors++;
+ return -EIO;
+ }
+
+ if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH |
+ BD_ENET_RX_LAST)) {
+ /* Frame too long or too short. */
+ ndev->stats.rx_length_errors++;
+ if ((status & BD_ENET_RX_LAST) && net_ratelimit())
+ netdev_err(ndev, "rcv is not +last\n");
+ }
+
+ /* CRC Error */
+ if (status & BD_ENET_RX_CR)
+ ndev->stats.rx_crc_errors++;
+
+ /* Report late collisions as a frame error. */
+ if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL))
+ ndev->stats.rx_frame_errors++;
+
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static struct sk_buff *fec_build_skb(struct fec_enet_private *fep,
+ struct fec_enet_priv_rx_q *rxq,
+ struct bufdesc *bdp,
+ struct page *page, u32 len)
+{
+ struct net_device *ndev = fep->netdev;
+ struct bufdesc_ex *ebdp;
+ struct sk_buff *skb;
+
+ skb = build_skb(page_address(page),
+ PAGE_SIZE << fep->pagepool_order);
+ if (unlikely(!skb)) {
+ page_pool_recycle_direct(rxq->page_pool, page);
+ ndev->stats.rx_dropped++;
+ if (net_ratelimit())
+ netdev_err(ndev, "build_skb failed\n");
+
+ return NULL;
+ }
+
+ skb_reserve(skb, FEC_ENET_XDP_HEADROOM + fep->rx_shift);
+ skb_put(skb, len);
+ skb_mark_for_recycle(skb);
+
+ /* Get offloads from the enhanced buffer descriptor */
+ if (fep->bufdesc_ex) {
+ ebdp = (struct bufdesc_ex *)bdp;
+
+ /* If this is a VLAN packet remove the VLAN Tag */
+ if (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))
+ fec_enet_rx_vlan(ndev, skb);
+
+ /* Get receive timestamp from the skb */
+ if (fep->hwts_rx_en)
+ fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts),
+ skb_hwtstamps(skb));
+
+ if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) {
+ if (!(ebdp->cbd_esc &
+ cpu_to_fec32(FLAG_RX_CSUM_ERROR)))
+ /* don't check it */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
+ }
+ }
+
+ skb->protocol = eth_type_trans(skb, ndev);
+ skb_record_rx_queue(skb, rxq->bd.qid);
+
+ return skb;
+}
+
/* During a receive, the bd_rx.cur points to the current incoming buffer.
* When we update through the ring, if the next incoming buffer has
* not been given to the system, we just set the empty indicator,
* effectively tossing the packet.
*/
-static int
-fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget)
+static int fec_enet_rx_queue(struct fec_enet_private *fep,
+ u16 queue, int budget)
{
- struct fec_enet_private *fep = netdev_priv(ndev);
- struct fec_enet_priv_rx_q *rxq;
- struct bufdesc *bdp;
- unsigned short status;
- struct sk_buff *skb;
- ushort pkt_len;
- int pkt_received = 0;
- struct bufdesc_ex *ebdp = NULL;
- int index = 0;
- bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
- struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
- u32 ret, xdp_result = FEC_ENET_XDP_PASS;
- u32 data_start = FEC_ENET_XDP_HEADROOM;
- int cpu = smp_processor_id();
- struct xdp_buff xdp;
+ struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue];
+ bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
+ struct net_device *ndev = fep->netdev;
+ struct bufdesc *bdp = rxq->bd.cur;
+ u32 sub_len = 4 + fep->rx_shift;
+ int pkt_received = 0;
+ u16 status, pkt_len;
+ struct sk_buff *skb;
struct page *page;
- __fec32 cbd_bufaddr;
- u32 sub_len = 4;
-
- /*If it has the FEC_QUIRK_HAS_RACC quirk property, the bit of
- * FEC_RACC_SHIFT16 is set by default in the probe function.
- */
- if (fep->quirks & FEC_QUIRK_HAS_RACC) {
- data_start += 2;
- sub_len += 2;
- }
+ dma_addr_t dma;
+ int index;
#if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA)
/*
@@ -1786,139 +1913,497 @@ fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget)
*/
flush_cache_all();
#endif
- rxq = fep->rx_queue[queue_id];
/* First, grab all of the stats for the incoming packet.
* These get messed up if we get called due to a busy condition.
*/
- bdp = rxq->bd.cur;
- xdp_init_buff(&xdp, PAGE_SIZE << fep->pagepool_order, &rxq->xdp_rxq);
-
while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
if (pkt_received >= budget)
break;
pkt_received++;
- writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT);
+ writel(FEC_ENET_RXF_GET(queue), fep->hwp + FEC_IEVENT);
/* Check for errors. */
status ^= BD_ENET_RX_LAST;
- if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
- BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST |
- BD_ENET_RX_CL)) {
- ndev->stats.rx_errors++;
- if (status & BD_ENET_RX_OV) {
- /* FIFO overrun */
- ndev->stats.rx_fifo_errors++;
- goto rx_processing_done;
- }
- if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH
- | BD_ENET_RX_LAST)) {
- /* Frame too long or too short. */
- ndev->stats.rx_length_errors++;
- if (status & BD_ENET_RX_LAST)
- netdev_err(ndev, "rcv is not +last\n");
- }
- if (status & BD_ENET_RX_CR) /* CRC Error */
- ndev->stats.rx_crc_errors++;
- /* Report late collisions as a frame error. */
- if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL))
- ndev->stats.rx_frame_errors++;
+ if (unlikely(fec_rx_error_check(ndev, status)))
goto rx_processing_done;
- }
/* Process the incoming frame. */
ndev->stats.rx_packets++;
pkt_len = fec16_to_cpu(bdp->cbd_datlen);
- ndev->stats.rx_bytes += pkt_len;
- if (fep->quirks & FEC_QUIRK_HAS_RACC)
- ndev->stats.rx_bytes -= 2;
+ ndev->stats.rx_bytes += pkt_len - fep->rx_shift;
index = fec_enet_get_bd_index(bdp, &rxq->bd);
- page = rxq->rx_buf[index];
- cbd_bufaddr = bdp->cbd_bufaddr;
+ page = rxq->rx_buf[index].page;
+ dma = fec32_to_cpu(bdp->cbd_bufaddr);
if (fec_enet_update_cbd(rxq, bdp, index)) {
ndev->stats.rx_dropped++;
goto rx_processing_done;
}
- dma_sync_single_for_cpu(&fep->pdev->dev,
- fec32_to_cpu(cbd_bufaddr),
- pkt_len,
+ dma_sync_single_for_cpu(&fep->pdev->dev, dma, pkt_len,
DMA_FROM_DEVICE);
prefetch(page_address(page));
- if (xdp_prog) {
- xdp_buff_clear_frags_flag(&xdp);
- /* subtract 16bit shift and FCS */
- xdp_prepare_buff(&xdp, page_address(page),
- data_start, pkt_len - sub_len, false);
- ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, cpu);
- xdp_result |= ret;
- if (ret != FEC_ENET_XDP_PASS)
- goto rx_processing_done;
+ if (unlikely(need_swap)) {
+ u8 *data;
+
+ data = page_address(page) + FEC_ENET_XDP_HEADROOM;
+ swap_buffer(data, pkt_len);
}
/* The packet length includes FCS, but we don't want to
* include that when passing upstream as it messes up
* bridging applications.
*/
- skb = build_skb(page_address(page),
- PAGE_SIZE << fep->pagepool_order);
- if (unlikely(!skb)) {
- page_pool_recycle_direct(rxq->page_pool, page);
- ndev->stats.rx_dropped++;
+ skb = fec_build_skb(fep, rxq, bdp, page, pkt_len - sub_len);
+ if (!skb)
+ goto rx_processing_done;
+
+ napi_gro_receive(&fep->napi, skb);
- netdev_err_once(ndev, "build_skb failed!\n");
+rx_processing_done:
+ /* Clear the status flags for this buffer */
+ status &= ~BD_ENET_RX_STATS;
+
+ /* Mark the buffer empty */
+ status |= BD_ENET_RX_EMPTY;
+
+ if (fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+
+ ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
+ ebdp->cbd_prot = 0;
+ ebdp->cbd_bdu = 0;
+ }
+ /* Make sure the updates to rest of the descriptor are
+ * performed before transferring ownership.
+ */
+ wmb();
+ bdp->cbd_sc = cpu_to_fec16(status);
+
+ /* Update BD pointer to next entry */
+ bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
+
+ /* Doing this here will keep the FEC running while we process
+ * incoming frames. On a heavily loaded network, we should be
+ * able to keep up at the expense of system resources.
+ */
+ writel(0, rxq->bd.reg_desc_active);
+ }
+ rxq->bd.cur = bdp;
+
+ return pkt_received;
+}
+
+static void fec_xdp_drop(struct fec_enet_priv_rx_q *rxq,
+ struct xdp_buff *xdp, u32 sync)
+{
+ struct page *page = virt_to_head_page(xdp->data);
+
+ page_pool_put_page(rxq->page_pool, page, sync, true);
+}
+
+static int
+fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index)
+{
+ if (unlikely(index < 0))
+ return 0;
+
+ return (index % fep->num_tx_queues);
+}
+
+static int fec_enet_rx_queue_xdp(struct fec_enet_private *fep, int queue,
+ int budget, struct bpf_prog *prog)
+{
+ u32 data_start = FEC_ENET_XDP_HEADROOM + fep->rx_shift;
+ struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue];
+ struct net_device *ndev = fep->netdev;
+ struct bufdesc *bdp = rxq->bd.cur;
+ u32 sub_len = 4 + fep->rx_shift;
+ int cpu = smp_processor_id();
+ int pkt_received = 0;
+ struct sk_buff *skb;
+ u16 status, pkt_len;
+ struct xdp_buff xdp;
+ int tx_qid = queue;
+ struct page *page;
+ u32 xdp_res = 0;
+ dma_addr_t dma;
+ int index, err;
+ u32 act, sync;
+
+#if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA)
+ /*
+ * Hacky flush of all caches instead of using the DMA API for the TSO
+ * headers.
+ */
+ flush_cache_all();
+#endif
+
+ if (unlikely(tx_qid >= fep->num_tx_queues))
+ tx_qid = fec_enet_xdp_get_tx_queue(fep, cpu);
+
+ xdp_init_buff(&xdp, PAGE_SIZE << fep->pagepool_order, &rxq->xdp_rxq);
+
+ while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
+ if (pkt_received >= budget)
+ break;
+ pkt_received++;
+
+ writel(FEC_ENET_RXF_GET(queue), fep->hwp + FEC_IEVENT);
+
+ /* Check for errors. */
+ status ^= BD_ENET_RX_LAST;
+ if (unlikely(fec_rx_error_check(ndev, status)))
+ goto rx_processing_done;
+
+ /* Process the incoming frame. */
+ ndev->stats.rx_packets++;
+ pkt_len = fec16_to_cpu(bdp->cbd_datlen);
+ ndev->stats.rx_bytes += pkt_len - fep->rx_shift;
+
+ index = fec_enet_get_bd_index(bdp, &rxq->bd);
+ page = rxq->rx_buf[index].page;
+ dma = fec32_to_cpu(bdp->cbd_bufaddr);
+
+ if (fec_enet_update_cbd(rxq, bdp, index)) {
+ ndev->stats.rx_dropped++;
goto rx_processing_done;
}
- skb_reserve(skb, data_start);
- skb_put(skb, pkt_len - sub_len);
- skb_mark_for_recycle(skb);
+ dma_sync_single_for_cpu(&fep->pdev->dev, dma, pkt_len,
+ DMA_FROM_DEVICE);
+ prefetch(page_address(page));
- if (unlikely(need_swap)) {
- u8 *data;
+ xdp_buff_clear_frags_flag(&xdp);
+ /* subtract 16bit shift and FCS */
+ pkt_len -= sub_len;
+ xdp_prepare_buff(&xdp, page_address(page), data_start,
+ pkt_len, false);
- data = page_address(page) + FEC_ENET_XDP_HEADROOM;
- swap_buffer(data, pkt_len);
+ act = bpf_prog_run_xdp(prog, &xdp);
+ /* Due xdp_adjust_tail and xdp_adjust_head: DMA sync
+ * for_device cover max len CPU touch.
+ */
+ sync = xdp.data_end - xdp.data;
+ sync = max(sync, pkt_len);
+
+ switch (act) {
+ case XDP_PASS:
+ rxq->stats[RX_XDP_PASS]++;
+ /* The packet length includes FCS, but we don't want to
+ * include that when passing upstream as it messes up
+ * bridging applications.
+ */
+ skb = fec_build_skb(fep, rxq, bdp, page, pkt_len);
+ if (!skb)
+ trace_xdp_exception(ndev, prog, XDP_PASS);
+ else
+ napi_gro_receive(&fep->napi, skb);
+
+ break;
+ case XDP_REDIRECT:
+ rxq->stats[RX_XDP_REDIRECT]++;
+ err = xdp_do_redirect(ndev, &xdp, prog);
+ if (unlikely(err)) {
+ fec_xdp_drop(rxq, &xdp, sync);
+ trace_xdp_exception(ndev, prog, XDP_REDIRECT);
+ } else {
+ xdp_res |= FEC_ENET_XDP_REDIR;
+ }
+ break;
+ case XDP_TX:
+ rxq->stats[RX_XDP_TX]++;
+ err = fec_enet_xdp_tx_xmit(fep, cpu, &xdp, sync, tx_qid);
+ if (unlikely(err)) {
+ rxq->stats[RX_XDP_TX_ERRORS]++;
+ fec_xdp_drop(rxq, &xdp, sync);
+ trace_xdp_exception(ndev, prog, XDP_TX);
+ } else {
+ xdp_res |= FEC_ENET_XDP_TX;
+ }
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(ndev, prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(ndev, prog, act);
+ /* handle aborts by dropping packet */
+ fallthrough;
+ case XDP_DROP:
+ rxq->stats[RX_XDP_DROP]++;
+ fec_xdp_drop(rxq, &xdp, sync);
+ break;
}
- /* Extract the enhanced buffer descriptor */
- ebdp = NULL;
- if (fep->bufdesc_ex)
- ebdp = (struct bufdesc_ex *)bdp;
+rx_processing_done:
+ /* Clear the status flags for this buffer */
+ status &= ~BD_ENET_RX_STATS;
+ /* Mark the buffer empty */
+ status |= BD_ENET_RX_EMPTY;
- /* If this is a VLAN packet remove the VLAN Tag */
- if (fep->bufdesc_ex &&
- (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN)))
- fec_enet_rx_vlan(ndev, skb);
+ if (fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
- skb->protocol = eth_type_trans(skb, ndev);
+ ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
+ ebdp->cbd_prot = 0;
+ ebdp->cbd_bdu = 0;
+ }
- /* Get receive timestamp from the skb */
- if (fep->hwts_rx_en && fep->bufdesc_ex)
- fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts),
- skb_hwtstamps(skb));
+ /* Make sure the updates to rest of the descriptor are
+ * performed before transferring ownership.
+ */
+ dma_wmb();
+ bdp->cbd_sc = cpu_to_fec16(status);
- if (fep->bufdesc_ex &&
- (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
- if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) {
- /* don't check it */
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else {
- skb_checksum_none_assert(skb);
+ /* Update BD pointer to next entry */
+ bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
+
+ /* Doing this here will keep the FEC running while we process
+ * incoming frames. On a heavily loaded network, we should be
+ * able to keep up at the expense of system resources.
+ */
+ writel(0, rxq->bd.reg_desc_active);
+ }
+
+ rxq->bd.cur = bdp;
+
+ if (xdp_res & FEC_ENET_XDP_REDIR)
+ xdp_do_flush();
+
+ if (xdp_res & FEC_ENET_XDP_TX)
+ /* Trigger transmission start */
+ fec_txq_trigger_xmit(fep, fep->tx_queue[tx_qid]);
+
+ return pkt_received;
+}
+
+static struct sk_buff *fec_build_skb_zc(struct xdp_buff *xsk,
+ struct napi_struct *napi)
+{
+ size_t len = xdp_get_buff_len(xsk);
+ struct sk_buff *skb;
+
+ skb = napi_alloc_skb(napi, len);
+ if (unlikely(!skb)) {
+ xsk_buff_free(xsk);
+ return NULL;
+ }
+
+ skb_put_data(skb, xsk->data, len);
+ xsk_buff_free(xsk);
+
+ return skb;
+}
+
+static int fec_enet_xsk_tx_xmit(struct fec_enet_private *fep,
+ struct xdp_buff *xsk, int cpu,
+ int queue)
+{
+ struct netdev_queue *nq = netdev_get_tx_queue(fep->netdev, queue);
+ struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue];
+ u32 offset = xsk->data - xsk->data_hard_start;
+ u32 headroom = txq->xsk_pool->headroom;
+ u32 len = xsk->data_end - xsk->data;
+ u32 index, status, estatus;
+ struct bufdesc *bdp;
+ dma_addr_t dma;
+
+ __netif_tx_lock(nq, cpu);
+
+ /* Avoid tx timeout as XDP shares the queue with kernel stack */
+ txq_trans_cond_update(nq);
+
+ if (!fec_enet_get_free_txdesc_num(txq)) {
+ __netif_tx_unlock(nq);
+
+ return -EBUSY;
+ }
+
+ /* Fill in a Tx ring entry */
+ bdp = txq->bd.cur;
+ status = fec16_to_cpu(bdp->cbd_sc);
+ status &= ~BD_ENET_TX_STATS;
+
+ index = fec_enet_get_bd_index(bdp, &txq->bd);
+ dma = xsk_buff_xdp_get_frame_dma(xsk) + headroom + offset;
+
+ xsk_buff_raw_dma_sync_for_device(txq->xsk_pool, dma, len);
+
+ txq->tx_buf[index].buf_p = xsk;
+ txq->tx_buf[index].type = FEC_TXBUF_T_XSK_TX;
+
+ status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
+ if (fep->bufdesc_ex)
+ estatus = BD_ENET_TX_INT;
+
+ bdp->cbd_bufaddr = cpu_to_fec32(dma);
+ bdp->cbd_datlen = cpu_to_fec16(len);
+
+ if (fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+
+ if (fep->quirks & FEC_QUIRK_HAS_AVB)
+ estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
+
+ ebdp->cbd_bdu = 0;
+ ebdp->cbd_esc = cpu_to_fec32(estatus);
+ }
+
+ dma_wmb();
+ status |= BD_ENET_TX_READY | BD_ENET_TX_TC;
+ bdp->cbd_sc = cpu_to_fec16(status);
+ dma_wmb();
+
+ bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
+ txq->bd.cur = bdp;
+
+ __netif_tx_unlock(nq);
+
+ return 0;
+}
+
+static int fec_enet_rx_queue_xsk(struct fec_enet_private *fep, int queue,
+ int budget, struct bpf_prog *prog)
+{
+ u32 data_start = FEC_ENET_XDP_HEADROOM + fep->rx_shift;
+ struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue];
+ struct net_device *ndev = fep->netdev;
+ struct bufdesc *bdp = rxq->bd.cur;
+ u32 sub_len = 4 + fep->rx_shift;
+ int cpu = smp_processor_id();
+ bool wakeup_xsk = false;
+ struct xdp_buff *xsk;
+ int pkt_received = 0;
+ struct sk_buff *skb;
+ u16 status, pkt_len;
+ u32 xdp_res = 0;
+ int index, err;
+ u32 act;
+
+#if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA)
+ /*
+ * Hacky flush of all caches instead of using the DMA API for the TSO
+ * headers.
+ */
+ flush_cache_all();
+#endif
+
+ while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
+ if (unlikely(pkt_received >= budget))
+ break;
+
+ writel(FEC_ENET_RXF_GET(queue), fep->hwp + FEC_IEVENT);
+
+ index = fec_enet_get_bd_index(bdp, &rxq->bd);
+ xsk = rxq->rx_buf[index].xdp;
+ if (unlikely(!xsk)) {
+ if (fec_enet_update_cbd_zc(rxq, bdp, index))
+ break;
+
+ if (fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+
+ ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
+ ebdp->cbd_prot = 0;
+ ebdp->cbd_bdu = 0;
}
+
+ dma_wmb();
+ status &= ~BD_ENET_RX_STATS;
+ status |= BD_ENET_RX_EMPTY;
+ bdp->cbd_sc = cpu_to_fec16(status);
+ break;
}
- skb_record_rx_queue(skb, queue_id);
- napi_gro_receive(&fep->napi, skb);
+ pkt_received++;
+ /* Check for errors. */
+ status ^= BD_ENET_RX_LAST;
+ if (unlikely(fec_rx_error_check(ndev, status)))
+ goto rx_processing_done;
+
+ /* Process the incoming frame. */
+ ndev->stats.rx_packets++;
+ pkt_len = fec16_to_cpu(bdp->cbd_datlen);
+ ndev->stats.rx_bytes += pkt_len - fep->rx_shift;
+
+ if (fec_enet_update_cbd_zc(rxq, bdp, index)) {
+ ndev->stats.rx_dropped++;
+ goto rx_processing_done;
+ }
+
+ pkt_len -= sub_len;
+ xsk->data = xsk->data_hard_start + data_start;
+ /* Subtract FCS and 16bit shift */
+ xsk->data_end = xsk->data + pkt_len;
+ xsk->data_meta = xsk->data;
+ xsk_buff_dma_sync_for_cpu(xsk);
+
+ /* If the XSK pool is enabled before the bpf program is
+ * installed, or the bpf program is uninstalled before
+ * the XSK pool is disabled. prog will be NULL and we
+ * need to set a default XDP_PASS action.
+ */
+ if (unlikely(!prog))
+ act = XDP_PASS;
+ else
+ act = bpf_prog_run_xdp(prog, xsk);
+
+ switch (act) {
+ case XDP_PASS:
+ rxq->stats[RX_XDP_PASS]++;
+ skb = fec_build_skb_zc(xsk, &fep->napi);
+ if (unlikely(!skb)) {
+ ndev->stats.rx_dropped++;
+ trace_xdp_exception(ndev, prog, XDP_PASS);
+ } else {
+ napi_gro_receive(&fep->napi, skb);
+ }
+
+ break;
+ case XDP_TX:
+ rxq->stats[RX_XDP_TX]++;
+ err = fec_enet_xsk_tx_xmit(fep, xsk, cpu, queue);
+ if (unlikely(err)) {
+ rxq->stats[RX_XDP_TX_ERRORS]++;
+ xsk_buff_free(xsk);
+ trace_xdp_exception(ndev, prog, XDP_TX);
+ } else {
+ xdp_res |= FEC_ENET_XDP_TX;
+ }
+ break;
+ case XDP_REDIRECT:
+ rxq->stats[RX_XDP_REDIRECT]++;
+ err = xdp_do_redirect(ndev, xsk, prog);
+ if (unlikely(err)) {
+ if (err == -ENOBUFS)
+ wakeup_xsk = true;
+
+ rxq->stats[RX_XDP_DROP]++;
+ xsk_buff_free(xsk);
+ trace_xdp_exception(ndev, prog, XDP_REDIRECT);
+ } else {
+ xdp_res |= FEC_ENET_XDP_REDIR;
+ }
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(ndev, prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(ndev, prog, act);
+ fallthrough;
+ case XDP_DROP:
+ rxq->stats[RX_XDP_DROP]++;
+ xsk_buff_free(xsk);
+ break;
+ }
rx_processing_done:
/* Clear the status flags for this buffer */
status &= ~BD_ENET_RX_STATS;
-
/* Mark the buffer empty */
status |= BD_ENET_RX_EMPTY;
@@ -1929,37 +2414,59 @@ rx_processing_done:
ebdp->cbd_prot = 0;
ebdp->cbd_bdu = 0;
}
+
/* Make sure the updates to rest of the descriptor are
* performed before transferring ownership.
*/
- wmb();
+ dma_wmb();
bdp->cbd_sc = cpu_to_fec16(status);
/* Update BD pointer to next entry */
bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
/* Doing this here will keep the FEC running while we process
- * incoming frames. On a heavily loaded network, we should be
+ * incoming frames. On a heavily loaded network, we should be
* able to keep up at the expense of system resources.
*/
writel(0, rxq->bd.reg_desc_active);
}
+
rxq->bd.cur = bdp;
- if (xdp_result & FEC_ENET_XDP_REDIR)
+ if (xdp_res & FEC_ENET_XDP_REDIR)
xdp_do_flush();
+ if (xdp_res & FEC_ENET_XDP_TX)
+ fec_txq_trigger_xmit(fep, fep->tx_queue[queue]);
+
+ if (rxq->xsk_pool && xsk_uses_need_wakeup(rxq->xsk_pool)) {
+ if (wakeup_xsk)
+ xsk_set_rx_need_wakeup(rxq->xsk_pool);
+ else
+ xsk_clear_rx_need_wakeup(rxq->xsk_pool);
+ }
+
return pkt_received;
}
static int fec_enet_rx(struct net_device *ndev, int budget)
{
struct fec_enet_private *fep = netdev_priv(ndev);
+ struct bpf_prog *prog = READ_ONCE(fep->xdp_prog);
int i, done = 0;
/* Make sure that AVB queues are processed first. */
- for (i = fep->num_rx_queues - 1; i >= 0; i--)
- done += fec_enet_rx_queue(ndev, i, budget - done);
+ for (i = fep->num_rx_queues - 1; i >= 0; i--) {
+ struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i];
+ int batch = budget - done;
+
+ if (rxq->xsk_pool)
+ done += fec_enet_rx_queue_xsk(fep, i, batch, prog);
+ else if (prog)
+ done += fec_enet_rx_queue_xdp(fep, i, batch, prog);
+ else
+ done += fec_enet_rx_queue(fep, i, batch);
+ }
return done;
}
@@ -2002,19 +2509,22 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
{
struct net_device *ndev = napi->dev;
struct fec_enet_private *fep = netdev_priv(ndev);
- int done = 0;
+ int rx_done = 0, tx_done = 0;
+ int max_done;
do {
- done += fec_enet_rx(ndev, budget - done);
- fec_enet_tx(ndev, budget);
- } while ((done < budget) && fec_enet_collect_events(fep));
+ rx_done += fec_enet_rx(ndev, budget - rx_done);
+ tx_done += fec_enet_tx(ndev, budget);
+ max_done = max(rx_done, tx_done);
+ } while ((max_done < budget) && fec_enet_collect_events(fep));
- if (done < budget) {
- napi_complete_done(napi, done);
+ if (max_done < budget) {
+ napi_complete_done(napi, max_done);
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+ return max_done;
}
- return done;
+ return budget;
}
/* ------------------------------------------------------------------------- */
@@ -3301,27 +3811,86 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
.self_test = net_selftest,
};
+static int fec_xdp_rxq_info_reg(struct fec_enet_private *fep,
+ struct fec_enet_priv_rx_q *rxq)
+{
+ struct net_device *ndev = fep->netdev;
+ void *allocator;
+ int type, err;
+
+ err = xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq->id, 0);
+ if (err) {
+ netdev_err(ndev, "Failed to register xdp rxq info\n");
+ return err;
+ }
+
+ allocator = rxq->xsk_pool ? NULL : rxq->page_pool;
+ type = rxq->xsk_pool ? MEM_TYPE_XSK_BUFF_POOL : MEM_TYPE_PAGE_POOL;
+ err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, type, allocator);
+ if (err) {
+ netdev_err(ndev, "Failed to register XDP mem model\n");
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+
+ return err;
+ }
+
+ if (rxq->xsk_pool)
+ xsk_pool_set_rxq_info(rxq->xsk_pool, &rxq->xdp_rxq);
+
+ return 0;
+}
+
+static void fec_xdp_rxq_info_unreg(struct fec_enet_priv_rx_q *rxq)
+{
+ if (xdp_rxq_info_is_reg(&rxq->xdp_rxq)) {
+ xdp_rxq_info_unreg_mem_model(&rxq->xdp_rxq);
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+ }
+}
+
+static void fec_free_rxq_buffers(struct fec_enet_priv_rx_q *rxq)
+{
+ bool xsk = !!rxq->xsk_pool;
+ int i;
+
+ for (i = 0; i < rxq->bd.ring_size; i++) {
+ union fec_rx_buffer *buf = &rxq->rx_buf[i];
+
+ if (!buf->buf_p)
+ continue;
+
+ if (xsk)
+ xsk_buff_free(buf->xdp);
+ else
+ page_pool_put_full_page(rxq->page_pool,
+ buf->page, false);
+
+ rxq->rx_buf[i].buf_p = NULL;
+ }
+
+ if (!xsk) {
+ page_pool_destroy(rxq->page_pool);
+ rxq->page_pool = NULL;
+ }
+}
+
static void fec_enet_free_buffers(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
unsigned int i;
struct fec_enet_priv_tx_q *txq;
struct fec_enet_priv_rx_q *rxq;
+ struct page *page;
unsigned int q;
for (q = 0; q < fep->num_rx_queues; q++) {
rxq = fep->rx_queue[q];
- for (i = 0; i < rxq->bd.ring_size; i++)
- page_pool_put_full_page(rxq->page_pool, rxq->rx_buf[i],
- false);
+
+ fec_xdp_rxq_info_unreg(rxq);
+ fec_free_rxq_buffers(rxq);
for (i = 0; i < XDP_STATS_TOTAL; i++)
rxq->stats[i] = 0;
-
- if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
- xdp_rxq_info_unreg(&rxq->xdp_rxq);
- page_pool_destroy(rxq->page_pool);
- rxq->page_pool = NULL;
}
for (q = 0; q < fep->num_tx_queues; q++) {
@@ -3330,20 +3899,23 @@ static void fec_enet_free_buffers(struct net_device *ndev)
kfree(txq->tx_bounce[i]);
txq->tx_bounce[i] = NULL;
- if (!txq->tx_buf[i].buf_p) {
- txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
- continue;
- }
-
- if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
+ switch (txq->tx_buf[i].type) {
+ case FEC_TXBUF_T_SKB:
dev_kfree_skb(txq->tx_buf[i].buf_p);
- } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
+ break;
+ case FEC_TXBUF_T_XDP_NDO:
xdp_return_frame(txq->tx_buf[i].buf_p);
- } else {
- struct page *page = txq->tx_buf[i].buf_p;
-
+ break;
+ case FEC_TXBUF_T_XDP_TX:
+ page = txq->tx_buf[i].buf_p;
page_pool_put_page(pp_page_to_nmdesc(page)->pp,
page, 0, false);
+ break;
+ case FEC_TXBUF_T_XSK_TX:
+ xsk_buff_free(txq->tx_buf[i].buf_p);
+ break;
+ default:
+ break;
}
txq->tx_buf[i].buf_p = NULL;
@@ -3420,22 +3992,18 @@ alloc_failed:
return ret;
}
-static int
-fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
+static int fec_alloc_rxq_buffers_pp(struct fec_enet_private *fep,
+ struct fec_enet_priv_rx_q *rxq)
{
- struct fec_enet_private *fep = netdev_priv(ndev);
- struct fec_enet_priv_rx_q *rxq;
+ struct bufdesc *bdp = rxq->bd.base;
dma_addr_t phys_addr;
- struct bufdesc *bdp;
struct page *page;
int i, err;
- rxq = fep->rx_queue[queue];
- bdp = rxq->bd.base;
-
- err = fec_enet_create_page_pool(fep, rxq, rxq->bd.ring_size);
+ err = fec_enet_create_page_pool(fep, rxq);
if (err < 0) {
- netdev_err(ndev, "%s failed queue %d (%d)\n", __func__, queue, err);
+ netdev_err(fep->netdev, "%s failed queue %d (%d)\n",
+ __func__, rxq->bd.qid, err);
return err;
}
@@ -3454,31 +4022,81 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
for (i = 0; i < rxq->bd.ring_size; i++) {
page = page_pool_dev_alloc_pages(rxq->page_pool);
- if (!page)
- goto err_alloc;
+ if (!page) {
+ err = -ENOMEM;
+ goto free_rx_buffers;
+ }
phys_addr = page_pool_get_dma_addr(page) + FEC_ENET_XDP_HEADROOM;
bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
+ rxq->rx_buf[i].page = page;
+ bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
+ }
- rxq->rx_buf[i] = page;
- bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
+ return 0;
- if (fep->bufdesc_ex) {
- struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
- ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
- }
+free_rx_buffers:
+ fec_free_rxq_buffers(rxq);
+
+ return err;
+}
+static int fec_alloc_rxq_buffers_zc(struct fec_enet_private *fep,
+ struct fec_enet_priv_rx_q *rxq)
+{
+ union fec_rx_buffer *buf = &rxq->rx_buf[0];
+ struct bufdesc *bdp = rxq->bd.base;
+ dma_addr_t phys_addr;
+ int i;
+
+ for (i = 0; i < rxq->bd.ring_size; i++) {
+ buf[i].xdp = xsk_buff_alloc(rxq->xsk_pool);
+ if (!buf[i].xdp)
+ break;
+
+ phys_addr = xsk_buff_xdp_get_dma(buf[i].xdp);
+ bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
}
- /* Set the last buffer to wrap. */
- bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
- bdp->cbd_sc |= cpu_to_fec16(BD_ENET_RX_WRAP);
+ for (; i < rxq->bd.ring_size; i++) {
+ buf[i].xdp = NULL;
+ bdp->cbd_bufaddr = cpu_to_fec32(0);
+ bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
+ }
+
+ return 0;
+}
+
+static int
+fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct fec_enet_priv_rx_q *rxq;
+ int err;
+
+ rxq = fep->rx_queue[queue];
+ if (rxq->xsk_pool) {
+ /* RX XDP ZC buffer pool may not be populated, e.g.
+ * xdpsock TX-only.
+ */
+ fec_alloc_rxq_buffers_zc(fep, rxq);
+ } else {
+ err = fec_alloc_rxq_buffers_pp(fep, rxq);
+ if (err)
+ goto free_buffers;
+ }
+
+ err = fec_xdp_rxq_info_reg(fep, rxq);
+ if (err)
+ goto free_buffers;
+
return 0;
- err_alloc:
+free_buffers:
fec_enet_free_buffers(ndev);
- return -ENOMEM;
+
+ return err;
}
static int
@@ -3792,21 +4410,237 @@ static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb,
return fec_enet_vlan_pri_to_queue[vlan_tag >> 13];
}
+static void fec_free_rxq(struct fec_enet_priv_rx_q *rxq)
+{
+ fec_xdp_rxq_info_unreg(rxq);
+ fec_free_rxq_buffers(rxq);
+ kfree(rxq);
+}
+
+static struct fec_enet_priv_rx_q *
+fec_alloc_new_rxq_xsk(struct fec_enet_private *fep, int queue,
+ struct xsk_buff_pool *pool)
+{
+ struct fec_enet_priv_rx_q *old_rxq = fep->rx_queue[queue];
+ struct fec_enet_priv_rx_q *rxq;
+ union fec_rx_buffer *buf;
+ int i;
+
+ rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
+ if (!rxq)
+ return NULL;
+
+ /* Copy the BD ring to the new rxq */
+ rxq->bd = old_rxq->bd;
+ rxq->id = queue;
+ rxq->xsk_pool = pool;
+ buf = &rxq->rx_buf[0];
+
+ for (i = 0; i < rxq->bd.ring_size; i++) {
+ buf[i].xdp = xsk_buff_alloc(pool);
+ /* RX XDP ZC buffer pool may not be populated, e.g.
+ * xdpsock TX-only.
+ */
+ if (!buf[i].xdp)
+ break;
+ }
+
+ if (fec_xdp_rxq_info_reg(fep, rxq))
+ goto free_buffers;
+
+ return rxq;
+
+free_buffers:
+ while (--i >= 0)
+ xsk_buff_free(buf[i].xdp);
+
+ kfree(rxq);
+
+ return NULL;
+}
+
+static struct fec_enet_priv_rx_q *
+fec_alloc_new_rxq_pp(struct fec_enet_private *fep, int queue)
+{
+ struct fec_enet_priv_rx_q *old_rxq = fep->rx_queue[queue];
+ struct fec_enet_priv_rx_q *rxq;
+ union fec_rx_buffer *buf;
+ int i = 0;
+
+ rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
+ if (!rxq)
+ return NULL;
+
+ rxq->bd = old_rxq->bd;
+ rxq->id = queue;
+
+ if (fec_enet_create_page_pool(fep, rxq))
+ goto free_rxq;
+
+ buf = &rxq->rx_buf[0];
+ for (; i < rxq->bd.ring_size; i++) {
+ buf[i].page = page_pool_dev_alloc_pages(rxq->page_pool);
+ if (!buf[i].page)
+ goto free_buffers;
+ }
+
+ if (fec_xdp_rxq_info_reg(fep, rxq))
+ goto free_buffers;
+
+ return rxq;
+
+free_buffers:
+ while (--i >= 0)
+ page_pool_put_full_page(rxq->page_pool,
+ buf[i].page, false);
+
+ page_pool_destroy(rxq->page_pool);
+free_rxq:
+ kfree(rxq);
+
+ return NULL;
+}
+
+static void fec_init_rxq_bd_buffers(struct fec_enet_priv_rx_q *rxq, bool xsk)
+{
+ union fec_rx_buffer *buf = &rxq->rx_buf[0];
+ struct bufdesc *bdp = rxq->bd.base;
+ dma_addr_t dma;
+
+ for (int i = 0; i < rxq->bd.ring_size; i++) {
+ if (xsk)
+ dma = buf[i].xdp ?
+ xsk_buff_xdp_get_dma(buf[i].xdp) : 0;
+ else
+ dma = page_pool_get_dma_addr(buf[i].page) +
+ FEC_ENET_XDP_HEADROOM;
+
+ bdp->cbd_bufaddr = cpu_to_fec32(dma);
+ bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
+ }
+}
+
+static int fec_xsk_restart_napi(struct fec_enet_private *fep,
+ struct xsk_buff_pool *pool,
+ u16 queue)
+{
+ struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue];
+ struct net_device *ndev = fep->netdev;
+ struct fec_enet_priv_rx_q *rxq;
+ int err;
+
+ napi_disable(&fep->napi);
+ netif_tx_disable(ndev);
+ synchronize_rcu();
+
+ rxq = pool ? fec_alloc_new_rxq_xsk(fep, queue, pool) :
+ fec_alloc_new_rxq_pp(fep, queue);
+ if (!rxq) {
+ err = -ENOMEM;
+ goto err_alloc_new_rxq;
+ }
+
+ /* Replace the old rxq with the new rxq */
+ fec_free_rxq(fep->rx_queue[queue]);
+ fep->rx_queue[queue] = rxq;
+ fec_init_rxq_bd_buffers(rxq, !!pool);
+ txq->xsk_pool = pool;
+
+ fec_restart(ndev);
+ napi_enable(&fep->napi);
+ netif_tx_start_all_queues(ndev);
+
+ return 0;
+
+err_alloc_new_rxq:
+ napi_enable(&fep->napi);
+ netif_tx_start_all_queues(ndev);
+
+ return err;
+}
+
+static int fec_enable_xsk_pool(struct fec_enet_private *fep,
+ struct xsk_buff_pool *pool,
+ u16 queue)
+{
+ int err;
+
+ err = xsk_pool_dma_map(pool, &fep->pdev->dev, 0);
+ if (err) {
+ netdev_err(fep->netdev, "Failed to map xsk pool\n");
+ return err;
+ }
+
+ if (!netif_running(fep->netdev)) {
+ struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue];
+ struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue];
+
+ rxq->xsk_pool = pool;
+ txq->xsk_pool = pool;
+
+ return 0;
+ }
+
+ err = fec_xsk_restart_napi(fep, pool, queue);
+ if (err) {
+ xsk_pool_dma_unmap(pool, 0);
+ return err;
+ }
+
+ return 0;
+}
+
+static int fec_disable_xsk_pool(struct fec_enet_private *fep,
+ u16 queue)
+{
+ struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue];
+ struct xsk_buff_pool *old_pool = txq->xsk_pool;
+ int err;
+
+ if (!netif_running(fep->netdev)) {
+ struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue];
+
+ xsk_pool_dma_unmap(old_pool, 0);
+ rxq->xsk_pool = NULL;
+ txq->xsk_pool = NULL;
+
+ return 0;
+ }
+
+ err = fec_xsk_restart_napi(fep, NULL, queue);
+ if (err)
+ return err;
+
+ xsk_pool_dma_unmap(old_pool, 0);
+
+ return 0;
+}
+
+static int fec_setup_xsk_pool(struct fec_enet_private *fep,
+ struct xsk_buff_pool *pool,
+ u16 queue)
+{
+ if (queue >= fep->num_rx_queues || queue >= fep->num_tx_queues)
+ return -ERANGE;
+
+ return pool ? fec_enable_xsk_pool(fep, pool, queue) :
+ fec_disable_xsk_pool(fep, queue);
+}
+
static int fec_enet_bpf(struct net_device *dev, struct netdev_bpf *bpf)
{
struct fec_enet_private *fep = netdev_priv(dev);
bool is_run = netif_running(dev);
struct bpf_prog *old_prog;
+ /* No need to support the SoCs that require to do the frame swap
+ * because the performance wouldn't be better than the skb mode.
+ */
+ if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
+ return -EOPNOTSUPP;
+
switch (bpf->command) {
case XDP_SETUP_PROG:
- /* No need to support the SoCs that require to
- * do the frame swap because the performance wouldn't be
- * better than the skb mode.
- */
- if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
- return -EOPNOTSUPP;
-
if (!bpf->prog)
xdp_features_clear_redirect_target(dev);
@@ -3830,24 +4664,14 @@ static int fec_enet_bpf(struct net_device *dev, struct netdev_bpf *bpf)
xdp_features_set_redirect_target(dev, false);
return 0;
-
case XDP_SETUP_XSK_POOL:
- return -EOPNOTSUPP;
-
+ return fec_setup_xsk_pool(fep, bpf->xsk.pool,
+ bpf->xsk.queue_id);
default:
return -EOPNOTSUPP;
}
}
-static int
-fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index)
-{
- if (unlikely(index < 0))
- return 0;
-
- return (index % fep->num_tx_queues);
-}
-
static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
struct fec_enet_priv_tx_q *txq,
void *frame, u32 dma_sync_len,
@@ -3933,28 +4757,16 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
txq->bd.cur = bdp;
- /* Trigger transmission start */
- if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
- !readl(txq->bd.reg_desc_active) ||
- !readl(txq->bd.reg_desc_active) ||
- !readl(txq->bd.reg_desc_active) ||
- !readl(txq->bd.reg_desc_active))
- writel(0, txq->bd.reg_desc_active);
-
return 0;
}
static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
int cpu, struct xdp_buff *xdp,
- u32 dma_sync_len)
+ u32 dma_sync_len, int queue)
{
- struct fec_enet_priv_tx_q *txq;
- struct netdev_queue *nq;
- int queue, ret;
-
- queue = fec_enet_xdp_get_tx_queue(fep, cpu);
- txq = fep->tx_queue[queue];
- nq = netdev_get_tx_queue(fep->netdev, queue);
+ struct netdev_queue *nq = netdev_get_tx_queue(fep->netdev, queue);
+ struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue];
+ int ret;
__netif_tx_lock(nq, cpu);
@@ -3994,11 +4806,37 @@ static int fec_enet_xdp_xmit(struct net_device *dev,
sent_frames++;
}
+ if (sent_frames)
+ fec_txq_trigger_xmit(fep, txq);
+
__netif_tx_unlock(nq);
return sent_frames;
}
+static int fec_enet_xsk_wakeup(struct net_device *ndev, u32 queue, u32 flags)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct fec_enet_priv_rx_q *rxq;
+
+ if (!netif_running(ndev) || !netif_carrier_ok(ndev))
+ return -ENETDOWN;
+
+ if (queue >= fep->num_rx_queues || queue >= fep->num_tx_queues)
+ return -ERANGE;
+
+ rxq = fep->rx_queue[queue];
+ if (!rxq->xsk_pool)
+ return -EINVAL;
+
+ if (!napi_if_scheduled_mark_missed(&fep->napi)) {
+ if (likely(napi_schedule_prep(&fep->napi)))
+ __napi_schedule(&fep->napi);
+ }
+
+ return 0;
+}
+
static int fec_hwtstamp_get(struct net_device *ndev,
struct kernel_hwtstamp_config *config)
{
@@ -4061,6 +4899,7 @@ static const struct net_device_ops fec_netdev_ops = {
.ndo_set_features = fec_set_features,
.ndo_bpf = fec_enet_bpf,
.ndo_xdp_xmit = fec_enet_xdp_xmit,
+ .ndo_xsk_wakeup = fec_enet_xsk_wakeup,
.ndo_hwtstamp_get = fec_hwtstamp_get,
.ndo_hwtstamp_set = fec_hwtstamp_set,
};
@@ -4188,7 +5027,8 @@ static int fec_enet_init(struct net_device *ndev)
if (!(fep->quirks & FEC_QUIRK_SWAP_FRAME))
ndev->xdp_features = NETDEV_XDP_ACT_BASIC |
- NETDEV_XDP_ACT_REDIRECT;
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY;
fec_restart(ndev);
@@ -4591,6 +5431,11 @@ fec_probe(struct platform_device *pdev)
ndev->max_mtu = fep->max_buf_size - VLAN_ETH_HLEN - ETH_FCS_LEN;
+ if (fep->quirks & FEC_QUIRK_HAS_RACC)
+ fep->rx_shift = 2;
+ else
+ fep->rx_shift = 0;
+
ret = register_netdev(ndev);
if (ret)
goto failed_register;
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
index 1966dba512f8..106adf7a870f 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
@@ -946,17 +946,9 @@ static void fun_get_fec_stats(struct net_device *netdev,
#undef TX_STAT
#undef FEC_STAT
-static int fun_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
- u32 *rule_locs)
+static u32 fun_get_rx_ring_count(struct net_device *netdev)
{
- switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = netdev->real_num_rx_queues;
- return 0;
- default:
- break;
- }
- return -EOPNOTSUPP;
+ return netdev->real_num_rx_queues;
}
static int fun_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info)
@@ -1169,8 +1161,8 @@ static const struct ethtool_ops fun_ethtool_ops = {
.get_sset_count = fun_get_sset_count,
.get_strings = fun_get_strings,
.get_ethtool_stats = fun_get_ethtool_stats,
- .get_rxnfc = fun_get_rxnfc,
.set_rxnfc = fun_set_rxnfc,
+ .get_rx_ring_count = fun_get_rx_ring_count,
.get_rxfh_indir_size = fun_get_rxfh_indir_size,
.get_rxfh_key_size = fun_get_rxfh_key_size,
.get_rxfh = fun_get_rxfh,
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 66ddc4413f8d..42a0a6f7b296 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -840,15 +840,19 @@ static int gve_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
return err;
}
+static u32 gve_get_rx_ring_count(struct net_device *netdev)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ return priv->rx_cfg.num_queues;
+}
+
static int gve_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs)
{
struct gve_priv *priv = netdev_priv(netdev);
int err = 0;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = priv->rx_cfg.num_queues;
- break;
case ETHTOOL_GRXCLSRLCNT:
if (!priv->max_flow_rules)
return -EOPNOTSUPP;
@@ -991,6 +995,7 @@ const struct ethtool_ops gve_ethtool_ops = {
.get_channels = gve_get_channels,
.set_rxnfc = gve_set_rxnfc,
.get_rxnfc = gve_get_rxnfc,
+ .get_rx_ring_count = gve_get_rx_ring_count,
.get_rxfh_indir_size = gve_get_rxfh_indir_size,
.get_rxfh_key_size = gve_get_rxfh_key_size,
.get_rxfh = gve_get_rxfh,
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index dbc84de39b70..0ee864b0afe0 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -2618,8 +2618,9 @@ static void gve_rx_queue_mem_free(struct net_device *dev, void *per_q_mem)
gve_rx_free_ring_dqo(priv, gve_per_q_mem, &cfg);
}
-static int gve_rx_queue_mem_alloc(struct net_device *dev, void *per_q_mem,
- int idx)
+static int gve_rx_queue_mem_alloc(struct net_device *dev,
+ struct netdev_queue_config *qcfg,
+ void *per_q_mem, int idx)
{
struct gve_priv *priv = netdev_priv(dev);
struct gve_rx_alloc_rings_cfg cfg = {0};
@@ -2640,7 +2641,9 @@ static int gve_rx_queue_mem_alloc(struct net_device *dev, void *per_q_mem,
return err;
}
-static int gve_rx_queue_start(struct net_device *dev, void *per_q_mem, int idx)
+static int gve_rx_queue_start(struct net_device *dev,
+ struct netdev_queue_config *qcfg,
+ void *per_q_mem, int idx)
{
struct gve_priv *priv = netdev_priv(dev);
struct gve_rx_ring *gve_per_q_mem;
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index 40b89b3e5a31..28e85730f785 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -963,9 +963,6 @@ static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx,
int num_buffer_descs;
int total_num_descs;
- if (skb_is_gso(skb) && unlikely(ipv6_hopopt_jumbo_remove(skb)))
- goto drop;
-
if (tx->dqo.qpl) {
/* We do not need to verify the number of buffers used per
* packet or per segment in case of TSO as with 2K size buffers
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 60a586a951a0..23b295dedaef 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -1230,21 +1230,11 @@ hns_set_rss(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
rxfh->indir, rxfh->key, rxfh->hfunc);
}
-static int hns_get_rxnfc(struct net_device *netdev,
- struct ethtool_rxnfc *cmd,
- u32 *rule_locs)
+static u32 hns_get_rx_ring_count(struct net_device *netdev)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
- switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = priv->ae_handle->q_num;
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- return 0;
+ return priv->ae_handle->q_num;
}
static const struct ethtool_ops hns_ethtool_ops = {
@@ -1273,7 +1263,7 @@ static const struct ethtool_ops hns_ethtool_ops = {
.get_rxfh_indir_size = hns_get_rss_indir_size,
.get_rxfh = hns_get_rss,
.set_rxfh = hns_set_rss,
- .get_rxnfc = hns_get_rxnfc,
+ .get_rx_ring_count = hns_get_rx_ring_count,
.get_link_ksettings = hns_nic_get_link_ksettings,
.set_link_ksettings = hns_nic_set_link_ksettings,
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 7a9573dcab74..a3206c97923e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -25,6 +25,7 @@
#include <net/tcp.h>
#include <net/vxlan.h>
#include <net/geneve.h>
+#include <net/netdev_queues.h>
#include "hnae3.h"
#include "hns3_enet.h"
@@ -1048,13 +1049,13 @@ static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
int order;
if (!alloc_size)
- return;
+ goto not_init;
order = get_order(alloc_size);
if (order > MAX_PAGE_ORDER) {
if (net_ratelimit())
dev_warn(ring_to_dev(ring), "failed to allocate tx spare buffer, exceed to max order\n");
- return;
+ goto not_init;
}
tx_spare = devm_kzalloc(ring_to_dev(ring), sizeof(*tx_spare),
@@ -1092,6 +1093,13 @@ alloc_pages_error:
devm_kfree(ring_to_dev(ring), tx_spare);
devm_kzalloc_error:
ring->tqp->handle->kinfo.tx_spare_buf_size = 0;
+not_init:
+ /* When driver init or reset_init, the ring->tx_spare is always NULL;
+ * but when called from hns3_set_ringparam, it's usually not NULL, and
+ * will be restored if hns3_init_all_ring() failed. So it's safe to set
+ * ring->tx_spare to NULL here.
+ */
+ ring->tx_spare = NULL;
}
/* Use hns3_tx_spare_space() to make sure there is enough buffer
@@ -2810,14 +2818,12 @@ static int hns3_get_timeout_queue(struct net_device *ndev)
/* Find the stopped queue the same way the stack does */
for (i = 0; i < ndev->num_tx_queues; i++) {
+ unsigned int timedout_ms;
struct netdev_queue *q;
- unsigned long trans_start;
q = netdev_get_tx_queue(ndev, i);
- trans_start = READ_ONCE(q->trans_start);
- if (netif_xmit_stopped(q) &&
- time_after(jiffies,
- (trans_start + ndev->watchdog_timeo))) {
+ timedout_ms = netif_xmit_timeout_ms(q);
+ if (timedout_ms) {
#ifdef CONFIG_BQL
struct dql *dql = &q->dql;
@@ -2826,8 +2832,7 @@ static int hns3_get_timeout_queue(struct net_device *ndev)
dql->adj_limit, dql->num_completed);
#endif
netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n",
- q->state,
- jiffies_to_msecs(jiffies - trans_start));
+ q->state, timedout_ms);
break;
}
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index a5eefa28454c..6d746a9fb687 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -988,6 +988,13 @@ static int hns3_get_rxfh_fields(struct net_device *netdev,
return -EOPNOTSUPP;
}
+static u32 hns3_get_rx_ring_count(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ return h->kinfo.num_tqps;
+}
+
static int hns3_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *cmd,
u32 *rule_locs)
@@ -995,9 +1002,6 @@ static int hns3_get_rxnfc(struct net_device *netdev,
struct hnae3_handle *h = hns3_get_handle(netdev);
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = h->kinfo.num_tqps;
- return 0;
case ETHTOOL_GRXCLSRLCNT:
if (h->ae_algo->ops->get_fd_rule_cnt)
return h->ae_algo->ops->get_fd_rule_cnt(h, cmd);
@@ -2148,6 +2152,7 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
.get_sset_count = hns3_get_sset_count,
.get_rxnfc = hns3_get_rxnfc,
.set_rxnfc = hns3_set_rxnfc,
+ .get_rx_ring_count = hns3_get_rx_ring_count,
.get_rxfh_key_size = hns3_get_rss_key_size,
.get_rxfh_indir_size = hns3_get_rss_indir_size,
.get_rxfh = hns3_get_rss,
@@ -2187,6 +2192,7 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.get_sset_count = hns3_get_sset_count,
.get_rxnfc = hns3_get_rxnfc,
.set_rxnfc = hns3_set_rxnfc,
+ .get_rx_ring_count = hns3_get_rx_ring_count,
.get_rxfh_key_size = hns3_get_rss_key_size,
.get_rxfh_indir_size = hns3_get_rss_indir_size,
.get_rxfh = hns3_get_rss,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 416e02e7b995..4ce92ddefcde 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -26,6 +26,7 @@ struct hclge_misc_vector {
#define HCLGE_TQP_REG_OFFSET 0x80000
#define HCLGE_TQP_REG_SIZE 0x200
+#define HCLGE_FD_COUNTER_MAX_SIZE_DEV_V2 128
#define HCLGE_TQP_MAX_SIZE_DEV_V2 1024
#define HCLGE_TQP_EXT_REG_OFFSET 0x100
@@ -727,11 +728,11 @@ struct hclge_fd_tcam_config_3_cmd {
#define HCLGE_FD_AD_DROP_B 0
#define HCLGE_FD_AD_DIRECT_QID_B 1
-#define HCLGE_FD_AD_QID_S 2
-#define HCLGE_FD_AD_QID_M GENMASK(11, 2)
+#define HCLGE_FD_AD_QID_L_S 2
+#define HCLGE_FD_AD_QID_L_M GENMASK(11, 2)
#define HCLGE_FD_AD_USE_COUNTER_B 12
-#define HCLGE_FD_AD_COUNTER_NUM_S 13
-#define HCLGE_FD_AD_COUNTER_NUM_M GENMASK(19, 13)
+#define HCLGE_FD_AD_COUNTER_NUM_L_S 13
+#define HCLGE_FD_AD_COUNTER_NUM_L_M GENMASK(19, 13)
#define HCLGE_FD_AD_NXT_STEP_B 20
#define HCLGE_FD_AD_NXT_KEY_S 21
#define HCLGE_FD_AD_NXT_KEY_M GENMASK(25, 21)
@@ -741,6 +742,8 @@ struct hclge_fd_tcam_config_3_cmd {
#define HCLGE_FD_AD_TC_OVRD_B 16
#define HCLGE_FD_AD_TC_SIZE_S 17
#define HCLGE_FD_AD_TC_SIZE_M GENMASK(20, 17)
+#define HCLGE_FD_AD_QID_H_B 21
+#define HCLGE_FD_AD_COUNTER_NUM_H_B 26
struct hclge_fd_ad_config_cmd {
u8 stage;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index b8e2aa19f9e6..edec994981c7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -5679,15 +5679,20 @@ static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
}
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_QID_H_B,
+ action->queue_id >= HCLGE_TQP_MAX_SIZE_DEV_V2 ? 1 : 0);
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_COUNTER_NUM_H_B,
+ action->counter_id >= HCLGE_FD_COUNTER_MAX_SIZE_DEV_V2 ?
+ 1 : 0);
ad_data <<= 32;
hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
action->forward_to_direct_queue);
- hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
+ hnae3_set_field(ad_data, HCLGE_FD_AD_QID_L_M, HCLGE_FD_AD_QID_L_S,
action->queue_id);
hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
- hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
- HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
+ hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_L_M,
+ HCLGE_FD_AD_COUNTER_NUM_L_S, action->counter_id);
hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
action->next_input_key);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
index e9f338e9dbe7..f28528df5aac 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
@@ -1101,22 +1101,11 @@ static int __set_rss_rxfh(struct net_device *netdev,
return 0;
}
-static int hinic_get_rxnfc(struct net_device *netdev,
- struct ethtool_rxnfc *cmd, u32 *rule_locs)
+static u32 hinic_get_rx_ring_count(struct net_device *netdev)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
- int err = 0;
- switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = nic_dev->num_qps;
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
-
- return err;
+ return nic_dev->num_qps;
}
static int hinic_get_rxfh(struct net_device *netdev,
@@ -1779,7 +1768,7 @@ static const struct ethtool_ops hinic_ethtool_ops = {
.set_pauseparam = hinic_set_pauseparam,
.get_channels = hinic_get_channels,
.set_channels = hinic_set_channels,
- .get_rxnfc = hinic_get_rxnfc,
+ .get_rx_ring_count = hinic_get_rx_ring_count,
.get_rxfh_key_size = hinic_get_rxfh_key_size,
.get_rxfh_indir_size = hinic_get_rxfh_indir_size,
.get_rxfh = hinic_get_rxfh,
@@ -1812,7 +1801,7 @@ static const struct ethtool_ops hinicvf_ethtool_ops = {
.set_per_queue_coalesce = hinic_set_per_queue_coalesce,
.get_channels = hinic_get_channels,
.set_channels = hinic_set_channels,
- .get_rxnfc = hinic_get_rxnfc,
+ .get_rx_ring_count = hinic_get_rx_ring_count,
.get_rxfh_key_size = hinic_get_rxfh_key_size,
.get_rxfh_indir_size = hinic_get_rxfh_indir_size,
.get_rxfh = hinic_get_rxfh,
diff --git a/drivers/net/ethernet/huawei/hinic3/Kconfig b/drivers/net/ethernet/huawei/hinic3/Kconfig
index ce4331d1387b..02d6f91a7f4a 100644
--- a/drivers/net/ethernet/huawei/hinic3/Kconfig
+++ b/drivers/net/ethernet/huawei/hinic3/Kconfig
@@ -11,6 +11,7 @@ config HINIC3
depends on X86 || ARM64 || COMPILE_TEST
depends on PCI_MSI && 64BIT
select AUXILIARY_BUS
+ select DIMLIB
select PAGE_POOL
help
This driver supports HiNIC 3rd gen Network Adapter (HINIC3).
diff --git a/drivers/net/ethernet/huawei/hinic3/Makefile b/drivers/net/ethernet/huawei/hinic3/Makefile
index c3efa45a6a42..26c05ecf31c9 100644
--- a/drivers/net/ethernet/huawei/hinic3/Makefile
+++ b/drivers/net/ethernet/huawei/hinic3/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_HINIC3) += hinic3.o
hinic3-objs := hinic3_cmdq.o \
hinic3_common.o \
hinic3_eqs.o \
+ hinic3_filter.o \
hinic3_hw_cfg.o \
hinic3_hw_comm.o \
hinic3_hwdev.o \
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c b/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c
index ef539d1b69a3..86720bb119e9 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c
@@ -878,14 +878,11 @@ err_free_cmd_infos:
}
hinic3_free_db_addr(hwdev, cmdqs->cmdqs_db_base);
-
err_destroy_cmdq_wq:
destroy_cmdq_wq(hwdev, cmdqs);
-
err_free_cmdqs:
dma_pool_destroy(cmdqs->cmd_buf_pool);
kfree(cmdqs);
-
err_out:
return err;
}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_csr.h b/drivers/net/ethernet/huawei/hinic3/hinic3_csr.h
index e7417e8efa99..f7083a6e7df9 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_csr.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_csr.h
@@ -5,6 +5,7 @@
#define _HINIC3_CSR_H_
#define HINIC3_CFG_REGS_FLAG 0x40000000
+#define HINIC3_MGMT_REGS_FLAG 0xC0000000
#define HINIC3_REGS_FLAG_MASK 0x3FFFFFFF
#define HINIC3_VF_CFG_REG_OFFSET 0x2000
@@ -24,6 +25,11 @@
#define HINIC3_FUNC_CSR_MAILBOX_RESULT_H_OFF (HINIC3_CFG_REGS_FLAG + 0x0108)
#define HINIC3_FUNC_CSR_MAILBOX_RESULT_L_OFF (HINIC3_CFG_REGS_FLAG + 0x010C)
+#define HINIC3_HOST_CSR_BASE_ADDR (HINIC3_MGMT_REGS_FLAG + 0x6000)
+#define HINIC3_PPF_ELECTION_OFFSET 0x0
+#define HINIC3_CSR_PPF_ELECTION_ADDR \
+ (HINIC3_HOST_CSR_BASE_ADDR + HINIC3_PPF_ELECTION_OFFSET)
+
#define HINIC3_CSR_DMA_ATTR_TBL_ADDR (HINIC3_CFG_REGS_FLAG + 0x380)
#define HINIC3_CSR_DMA_ATTR_INDIR_IDX_ADDR (HINIC3_CFG_REGS_FLAG + 0x390)
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c b/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c
index 01686472985b..a2c3962116d5 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c
@@ -655,7 +655,7 @@ int hinic3_aeqs_init(struct hinic3_hwdev *hwdev, u16 num_aeqs,
hwdev->aeqs = aeqs;
aeqs->hwdev = hwdev;
aeqs->num_aeqs = num_aeqs;
- aeqs->workq = alloc_workqueue(HINIC3_EQS_WQ_NAME, WQ_MEM_RECLAIM,
+ aeqs->workq = alloc_workqueue(HINIC3_EQS_WQ_NAME, WQ_MEM_RECLAIM | WQ_PERCPU,
HINIC3_MAX_AEQS);
if (!aeqs->workq) {
dev_err(hwdev->dev, "Failed to initialize aeq workqueue\n");
@@ -686,7 +686,6 @@ err_remove_eqs:
}
destroy_workqueue(aeqs->workq);
-
err_free_aeqs:
kfree(aeqs);
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_filter.c b/drivers/net/ethernet/huawei/hinic3/hinic3_filter.c
new file mode 100644
index 000000000000..6349d71f574b
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_filter.c
@@ -0,0 +1,417 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include "hinic3_hwif.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_nic_cfg.h"
+
+static int hinic3_filter_addr_sync(struct net_device *netdev, u8 *addr)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ return hinic3_set_mac(nic_dev->hwdev, addr, 0,
+ hinic3_global_func_id(nic_dev->hwdev));
+}
+
+static int hinic3_filter_addr_unsync(struct net_device *netdev, u8 *addr)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ /* The addr is in use */
+ if (ether_addr_equal(addr, netdev->dev_addr))
+ return 0;
+
+ return hinic3_del_mac(nic_dev->hwdev, addr, 0,
+ hinic3_global_func_id(nic_dev->hwdev));
+}
+
+void hinic3_clean_mac_list_filter(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_mac_filter *ftmp;
+ struct hinic3_mac_filter *f;
+
+ list_for_each_entry_safe(f, ftmp, &nic_dev->uc_filter_list, list) {
+ if (f->state == HINIC3_MAC_HW_SYNCED)
+ hinic3_filter_addr_unsync(netdev, f->addr);
+ list_del(&f->list);
+ kfree(f);
+ }
+
+ list_for_each_entry_safe(f, ftmp, &nic_dev->mc_filter_list, list) {
+ if (f->state == HINIC3_MAC_HW_SYNCED)
+ hinic3_filter_addr_unsync(netdev, f->addr);
+ list_del(&f->list);
+ kfree(f);
+ }
+}
+
+static struct hinic3_mac_filter *
+hinic3_find_mac(const struct list_head *filter_list, u8 *addr)
+{
+ struct hinic3_mac_filter *f;
+
+ list_for_each_entry(f, filter_list, list) {
+ if (ether_addr_equal(addr, f->addr))
+ return f;
+ }
+ return NULL;
+}
+
+static void hinic3_add_filter(struct net_device *netdev,
+ struct list_head *mac_filter_list,
+ u8 *addr)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_mac_filter *f;
+
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (!f)
+ return;
+
+ ether_addr_copy(f->addr, addr);
+
+ INIT_LIST_HEAD(&f->list);
+ list_add_tail(&f->list, mac_filter_list);
+
+ f->state = HINIC3_MAC_WAIT_HW_SYNC;
+ set_bit(HINIC3_MAC_FILTER_CHANGED, &nic_dev->flags);
+}
+
+static void hinic3_del_filter(struct net_device *netdev,
+ struct hinic3_mac_filter *f)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ set_bit(HINIC3_MAC_FILTER_CHANGED, &nic_dev->flags);
+
+ if (f->state == HINIC3_MAC_WAIT_HW_SYNC) {
+ /* have not added to hw, delete it directly */
+ list_del(&f->list);
+ kfree(f);
+ return;
+ }
+
+ f->state = HINIC3_MAC_WAIT_HW_UNSYNC;
+}
+
+static struct hinic3_mac_filter *
+hinic3_mac_filter_entry_clone(const struct hinic3_mac_filter *src)
+{
+ struct hinic3_mac_filter *f;
+
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (!f)
+ return NULL;
+
+ *f = *src;
+ INIT_LIST_HEAD(&f->list);
+
+ return f;
+}
+
+static void hinic3_undo_del_filter_entries(struct list_head *filter_list,
+ const struct list_head *from)
+{
+ struct hinic3_mac_filter *ftmp;
+ struct hinic3_mac_filter *f;
+
+ list_for_each_entry_safe(f, ftmp, from, list) {
+ if (hinic3_find_mac(filter_list, f->addr))
+ continue;
+
+ if (f->state == HINIC3_MAC_HW_UNSYNCED)
+ f->state = HINIC3_MAC_WAIT_HW_UNSYNC;
+
+ list_move_tail(&f->list, filter_list);
+ }
+}
+
+static void hinic3_undo_add_filter_entries(struct list_head *filter_list,
+ const struct list_head *from)
+{
+ struct hinic3_mac_filter *ftmp;
+ struct hinic3_mac_filter *tmp;
+ struct hinic3_mac_filter *f;
+
+ list_for_each_entry_safe(f, ftmp, from, list) {
+ tmp = hinic3_find_mac(filter_list, f->addr);
+ if (tmp && tmp->state == HINIC3_MAC_HW_SYNCING)
+ tmp->state = HINIC3_MAC_WAIT_HW_SYNC;
+ }
+}
+
+static void hinic3_cleanup_filter_list(const struct list_head *head)
+{
+ struct hinic3_mac_filter *ftmp;
+ struct hinic3_mac_filter *f;
+
+ list_for_each_entry_safe(f, ftmp, head, list) {
+ list_del(&f->list);
+ kfree(f);
+ }
+}
+
+static int hinic3_mac_filter_sync_hw(struct net_device *netdev,
+ struct list_head *del_list,
+ struct list_head *add_list,
+ int *add_count)
+{
+ struct hinic3_mac_filter *ftmp;
+ struct hinic3_mac_filter *f;
+ int err;
+
+ if (!list_empty(del_list)) {
+ list_for_each_entry_safe(f, ftmp, del_list, list) {
+ /* ignore errors when deleting mac */
+ hinic3_filter_addr_unsync(netdev, f->addr);
+ list_del(&f->list);
+ kfree(f);
+ }
+ }
+
+ if (!list_empty(add_list)) {
+ list_for_each_entry_safe(f, ftmp, add_list, list) {
+ if (f->state != HINIC3_MAC_HW_SYNCING)
+ continue;
+
+ err = hinic3_filter_addr_sync(netdev, f->addr);
+ if (err) {
+ netdev_err(netdev, "Failed to add mac\n");
+ return err;
+ }
+
+ f->state = HINIC3_MAC_HW_SYNCED;
+ (*add_count)++;
+ }
+ }
+
+ return 0;
+}
+
+static int hinic3_mac_filter_sync(struct net_device *netdev,
+ struct list_head *mac_filter_list, bool uc)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct list_head tmp_del_list, tmp_add_list;
+ struct hinic3_mac_filter *fclone;
+ struct hinic3_mac_filter *ftmp;
+ struct hinic3_mac_filter *f;
+ int err = 0, add_count = 0;
+
+ INIT_LIST_HEAD(&tmp_del_list);
+ INIT_LIST_HEAD(&tmp_add_list);
+
+ list_for_each_entry_safe(f, ftmp, mac_filter_list, list) {
+ if (f->state != HINIC3_MAC_WAIT_HW_UNSYNC)
+ continue;
+
+ f->state = HINIC3_MAC_HW_UNSYNCED;
+ list_move_tail(&f->list, &tmp_del_list);
+ }
+
+ list_for_each_entry_safe(f, ftmp, mac_filter_list, list) {
+ if (f->state != HINIC3_MAC_WAIT_HW_SYNC)
+ continue;
+
+ fclone = hinic3_mac_filter_entry_clone(f);
+ if (!fclone) {
+ hinic3_undo_del_filter_entries(mac_filter_list,
+ &tmp_del_list);
+ hinic3_undo_add_filter_entries(mac_filter_list,
+ &tmp_add_list);
+
+ netdev_err(netdev,
+ "Failed to clone mac_filter_entry\n");
+ err = -ENOMEM;
+ goto cleanup_tmp_filter_list;
+ }
+
+ f->state = HINIC3_MAC_HW_SYNCING;
+ list_add_tail(&fclone->list, &tmp_add_list);
+ }
+
+ err = hinic3_mac_filter_sync_hw(netdev, &tmp_del_list,
+ &tmp_add_list, &add_count);
+ if (err) {
+ /* there were errors, delete all mac in hw */
+ hinic3_undo_add_filter_entries(mac_filter_list, &tmp_add_list);
+ add_count = 0;
+ /* VF does not support promiscuous mode,
+ * don't delete any other uc mac.
+ */
+ if (!HINIC3_IS_VF(nic_dev->hwdev) || !uc) {
+ list_for_each_entry_safe(f, ftmp, mac_filter_list,
+ list) {
+ if (f->state != HINIC3_MAC_HW_SYNCED)
+ continue;
+
+ fclone = hinic3_mac_filter_entry_clone(f);
+ if (!fclone)
+ break;
+
+ f->state = HINIC3_MAC_WAIT_HW_SYNC;
+ list_add_tail(&fclone->list, &tmp_del_list);
+ }
+ }
+
+ hinic3_mac_filter_sync_hw(netdev, &tmp_del_list,
+ &tmp_add_list, &add_count);
+ }
+
+cleanup_tmp_filter_list:
+ hinic3_cleanup_filter_list(&tmp_del_list);
+ hinic3_cleanup_filter_list(&tmp_add_list);
+
+ return err ? err : add_count;
+}
+
+static void hinic3_mac_filter_sync_all(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ int add_count;
+
+ if (test_bit(HINIC3_MAC_FILTER_CHANGED, &nic_dev->flags)) {
+ clear_bit(HINIC3_MAC_FILTER_CHANGED, &nic_dev->flags);
+ add_count = hinic3_mac_filter_sync(netdev,
+ &nic_dev->uc_filter_list,
+ true);
+ if (add_count < 0 &&
+ hinic3_test_support(nic_dev, HINIC3_NIC_F_PROMISC))
+ set_bit(HINIC3_PROMISC_FORCE_ON,
+ &nic_dev->rx_mod_state);
+ else if (add_count)
+ clear_bit(HINIC3_PROMISC_FORCE_ON,
+ &nic_dev->rx_mod_state);
+
+ add_count = hinic3_mac_filter_sync(netdev,
+ &nic_dev->mc_filter_list,
+ false);
+ if (add_count < 0 &&
+ hinic3_test_support(nic_dev, HINIC3_NIC_F_ALLMULTI))
+ set_bit(HINIC3_ALLMULTI_FORCE_ON,
+ &nic_dev->rx_mod_state);
+ else if (add_count)
+ clear_bit(HINIC3_ALLMULTI_FORCE_ON,
+ &nic_dev->rx_mod_state);
+ }
+}
+
+#define HINIC3_DEFAULT_RX_MODE \
+ (L2NIC_RX_MODE_UC | L2NIC_RX_MODE_MC | L2NIC_RX_MODE_BC)
+
+static void hinic3_update_mac_filter(struct net_device *netdev,
+ const struct netdev_hw_addr_list *src_list,
+ struct list_head *filter_list)
+{
+ struct hinic3_mac_filter *filter;
+ struct hinic3_mac_filter *ftmp;
+ struct hinic3_mac_filter *f;
+ struct netdev_hw_addr *ha;
+
+ /* add addr if not already in the filter list */
+ netif_addr_lock_bh(netdev);
+ netdev_hw_addr_list_for_each(ha, src_list) {
+ filter = hinic3_find_mac(filter_list, ha->addr);
+ if (!filter)
+ hinic3_add_filter(netdev, filter_list, ha->addr);
+ else if (filter->state == HINIC3_MAC_WAIT_HW_UNSYNC)
+ filter->state = HINIC3_MAC_HW_SYNCED;
+ }
+ netif_addr_unlock_bh(netdev);
+
+ /* delete addr if not in netdev list */
+ list_for_each_entry_safe(f, ftmp, filter_list, list) {
+ bool found = false;
+
+ netif_addr_lock_bh(netdev);
+ netdev_hw_addr_list_for_each(ha, src_list)
+ if (ether_addr_equal(ha->addr, f->addr)) {
+ found = true;
+ break;
+ }
+ netif_addr_unlock_bh(netdev);
+
+ if (found)
+ continue;
+
+ hinic3_del_filter(netdev, f);
+ }
+}
+
+static void hinic3_sync_rx_mode_to_hw(struct net_device *netdev, int promisc_en,
+ int allmulti_en)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ u32 rx_mode = HINIC3_DEFAULT_RX_MODE;
+ int err;
+
+ rx_mode |= (promisc_en ? L2NIC_RX_MODE_PROMISC : 0);
+ rx_mode |= (allmulti_en ? L2NIC_RX_MODE_MC_ALL : 0);
+
+ if (promisc_en != test_bit(HINIC3_HW_PROMISC_ON,
+ &nic_dev->rx_mod_state))
+ netdev_dbg(netdev, "%s promisc mode\n",
+ promisc_en ? "Enter" : "Left");
+ if (allmulti_en !=
+ test_bit(HINIC3_HW_ALLMULTI_ON, &nic_dev->rx_mod_state))
+ netdev_dbg(netdev, "%s all_multi mode\n",
+ allmulti_en ? "Enter" : "Left");
+
+ err = hinic3_set_rx_mode(nic_dev->hwdev, rx_mode);
+ if (err) {
+ netdev_err(netdev, "Failed to set rx_mode\n");
+ return;
+ }
+
+ promisc_en ? set_bit(HINIC3_HW_PROMISC_ON, &nic_dev->rx_mod_state) :
+ clear_bit(HINIC3_HW_PROMISC_ON, &nic_dev->rx_mod_state);
+
+ allmulti_en ? set_bit(HINIC3_HW_ALLMULTI_ON, &nic_dev->rx_mod_state) :
+ clear_bit(HINIC3_HW_ALLMULTI_ON, &nic_dev->rx_mod_state);
+}
+
+void hinic3_set_rx_mode_work(struct work_struct *work)
+{
+ int promisc_en = 0, allmulti_en = 0;
+ struct hinic3_nic_dev *nic_dev;
+ struct net_device *netdev;
+
+ nic_dev = container_of(work, struct hinic3_nic_dev, rx_mode_work);
+ netdev = nic_dev->netdev;
+
+ if (test_and_clear_bit(HINIC3_UPDATE_MAC_FILTER, &nic_dev->flags)) {
+ hinic3_update_mac_filter(netdev, &netdev->uc,
+ &nic_dev->uc_filter_list);
+ hinic3_update_mac_filter(netdev, &netdev->mc,
+ &nic_dev->mc_filter_list);
+ }
+
+ hinic3_mac_filter_sync_all(netdev);
+
+ if (hinic3_test_support(nic_dev, HINIC3_NIC_F_PROMISC))
+ promisc_en = !!(netdev->flags & IFF_PROMISC) ||
+ test_bit(HINIC3_PROMISC_FORCE_ON,
+ &nic_dev->rx_mod_state);
+
+ if (hinic3_test_support(nic_dev, HINIC3_NIC_F_ALLMULTI))
+ allmulti_en = !!(netdev->flags & IFF_ALLMULTI) ||
+ test_bit(HINIC3_ALLMULTI_FORCE_ON,
+ &nic_dev->rx_mod_state);
+
+ if (promisc_en != test_bit(HINIC3_HW_PROMISC_ON,
+ &nic_dev->rx_mod_state) ||
+ allmulti_en != test_bit(HINIC3_HW_ALLMULTI_ON,
+ &nic_dev->rx_mod_state))
+ hinic3_sync_rx_mode_to_hw(netdev, promisc_en, allmulti_en);
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c
index 89638813df40..ecfe6265954e 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c
@@ -9,6 +9,36 @@
#include "hinic3_hwif.h"
#include "hinic3_mbox.h"
+static int hinic3_get_interrupt_cfg(struct hinic3_hwdev *hwdev,
+ struct hinic3_interrupt_info *info)
+{
+ struct comm_cmd_cfg_msix_ctrl_reg msix_cfg = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ msix_cfg.func_id = hinic3_global_func_id(hwdev);
+ msix_cfg.msix_index = info->msix_index;
+ msix_cfg.opcode = MGMT_MSG_CMD_OP_GET;
+
+ mgmt_msg_params_init_default(&msg_params, &msix_cfg, sizeof(msix_cfg));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_CFG_MSIX_CTRL_REG, &msg_params);
+ if (err || msix_cfg.head.status) {
+ dev_err(hwdev->dev, "Failed to get interrupt config, err: %d, status: 0x%x\n",
+ err, msix_cfg.head.status);
+ return -EFAULT;
+ }
+
+ info->lli_credit_limit = msix_cfg.lli_credit_cnt;
+ info->lli_timer_cfg = msix_cfg.lli_timer_cnt;
+ info->pending_limit = msix_cfg.pending_cnt;
+ info->coalesc_timer_cfg = msix_cfg.coalesce_timer_cnt;
+ info->resend_timer_cfg = msix_cfg.resend_timer_cnt;
+
+ return 0;
+}
+
int hinic3_set_interrupt_cfg_direct(struct hinic3_hwdev *hwdev,
const struct hinic3_interrupt_info *info)
{
@@ -40,6 +70,30 @@ int hinic3_set_interrupt_cfg_direct(struct hinic3_hwdev *hwdev,
return 0;
}
+int hinic3_set_interrupt_cfg(struct hinic3_hwdev *hwdev,
+ struct hinic3_interrupt_info info)
+{
+ struct hinic3_interrupt_info temp_info;
+ int err;
+
+ temp_info.msix_index = info.msix_index;
+
+ err = hinic3_get_interrupt_cfg(hwdev, &temp_info);
+ if (err)
+ return err;
+
+ info.lli_credit_limit = temp_info.lli_credit_limit;
+ info.lli_timer_cfg = temp_info.lli_timer_cfg;
+
+ if (!info.interrupt_coalesc_set) {
+ info.pending_limit = temp_info.pending_limit;
+ info.coalesc_timer_cfg = temp_info.coalesc_timer_cfg;
+ info.resend_timer_cfg = temp_info.resend_timer_cfg;
+ }
+
+ return hinic3_set_interrupt_cfg_direct(hwdev, &info);
+}
+
int hinic3_func_reset(struct hinic3_hwdev *hwdev, u16 func_id, u64 reset_flag)
{
struct comm_cmd_func_reset func_reset = {};
@@ -314,6 +368,8 @@ int hinic3_func_rx_tx_flush(struct hinic3_hwdev *hwdev)
ret = -EFAULT;
}
+ hinic3_set_pf_status(hwif, HINIC3_PF_STATUS_FLR_START_FLAG);
+
clr_res.func_id = hwif->attr.func_global_idx;
msg_params.buf_in = &clr_res;
msg_params.in_size = sizeof(clr_res);
@@ -337,6 +393,65 @@ int hinic3_func_rx_tx_flush(struct hinic3_hwdev *hwdev)
return ret;
}
+int hinic3_set_bdf_ctxt(struct hinic3_hwdev *hwdev,
+ struct comm_cmd_bdf_info *bdf_info)
+{
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ mgmt_msg_params_init_default(&msg_params, bdf_info, sizeof(*bdf_info));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_SEND_BDF_INFO, &msg_params);
+ if (err || bdf_info->head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set bdf info to fw, err: %d, status: 0x%x\n",
+ err, bdf_info->head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int hinic3_sync_time(struct hinic3_hwdev *hwdev, u64 time)
+{
+ struct comm_cmd_sync_time time_info = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ time_info.mstime = time;
+
+ mgmt_msg_params_init_default(&msg_params, &time_info,
+ sizeof(time_info));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_SYNC_TIME, &msg_params);
+ if (err || time_info.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to sync time to mgmt, err: %d, status: 0x%x\n",
+ err, time_info.head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+void hinic3_sync_time_to_fw(struct hinic3_hwdev *hwdev)
+{
+ struct timespec64 ts = {};
+ u64 time;
+ int err;
+
+ ktime_get_real_ts64(&ts);
+ time = (u64)(ts.tv_sec * MSEC_PER_SEC + ts.tv_nsec / NSEC_PER_MSEC);
+
+ err = hinic3_sync_time(hwdev, time);
+ if (err)
+ dev_err(hwdev->dev,
+ "Synchronize UTC time to firmware failed, err=%d\n",
+ err);
+}
+
static int get_hw_rx_buf_size_idx(int rx_buf_sz, u16 *buf_sz_idx)
{
/* Supported RX buffer sizes in bytes. Configured by array index. */
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h
index 304f5691f0c2..8e4737c486b7 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h
@@ -23,6 +23,8 @@ struct hinic3_interrupt_info {
int hinic3_set_interrupt_cfg_direct(struct hinic3_hwdev *hwdev,
const struct hinic3_interrupt_info *info);
+int hinic3_set_interrupt_cfg(struct hinic3_hwdev *hwdev,
+ struct hinic3_interrupt_info info);
int hinic3_func_reset(struct hinic3_hwdev *hwdev, u16 func_id, u64 reset_flag);
int hinic3_get_comm_features(struct hinic3_hwdev *hwdev, u64 *s_feature,
@@ -40,6 +42,10 @@ int hinic3_set_wq_page_size(struct hinic3_hwdev *hwdev, u16 func_idx,
u32 page_size);
int hinic3_set_cmdq_depth(struct hinic3_hwdev *hwdev, u16 cmdq_depth);
int hinic3_func_rx_tx_flush(struct hinic3_hwdev *hwdev);
+int hinic3_set_bdf_ctxt(struct hinic3_hwdev *hwdev,
+ struct comm_cmd_bdf_info *bdf_info);
+void hinic3_sync_time_to_fw(struct hinic3_hwdev *hwdev);
+
int hinic3_set_root_ctxt(struct hinic3_hwdev *hwdev, u32 rq_depth, u32 sq_depth,
int rx_buf_sz);
int hinic3_clean_root_ctxt(struct hinic3_hwdev *hwdev);
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h
index 623cf2d14cbc..329a9c464ff9 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h
@@ -39,6 +39,8 @@ enum mgmt_mod_type {
/* Configuration module */
MGMT_MOD_CFGM = 7,
MGMT_MOD_HILINK = 14,
+ /* hardware max module id */
+ MGMT_MOD_HW_MAX = 20,
};
static inline void mgmt_msg_params_init_default(struct mgmt_msg_params *msg_params,
@@ -110,6 +112,10 @@ enum comm_cmd {
COMM_CMD_CFG_MSIX_CTRL_REG = 23,
COMM_CMD_SET_CEQ_CTRL_REG = 24,
COMM_CMD_SET_DMA_ATTR = 25,
+
+ /* Commands for obtaining information */
+ COMM_CMD_SYNC_TIME = 62,
+ COMM_CMD_SEND_BDF_INFO = 64,
};
struct comm_cmd_cfg_msix_ctrl_reg {
@@ -251,6 +257,24 @@ struct comm_cmd_clear_resource {
u16 rsvd1[3];
};
+struct comm_cmd_sync_time {
+ struct mgmt_msg_head head;
+
+ u64 mstime;
+ u64 rsvd1;
+};
+
+struct comm_cmd_bdf_info {
+ struct mgmt_msg_head head;
+
+ u16 function_idx;
+ u8 rsvd1[2];
+ u8 bus;
+ u8 device;
+ u8 function;
+ u8 rsvd2[5];
+};
+
/* Services supported by HW. HW uses these values when delivering events.
* HW supports multiple services that are not yet supported by driver
* (e.g. RoCE).
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c
index 95a213133be9..7906d4057cf2 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c
@@ -13,6 +13,8 @@
#define HINIC3_PCIE_SNOOP 0
#define HINIC3_PCIE_TPH_DISABLE 0
+#define HINIC3_SYNFW_TIME_PERIOD (60 * 60 * 1000)
+
#define HINIC3_DMA_ATTR_INDIR_IDX_MASK GENMASK(9, 0)
#define HINIC3_DMA_ATTR_INDIR_IDX_SET(val, member) \
FIELD_PREP(HINIC3_DMA_ATTR_INDIR_##member##_MASK, val)
@@ -38,6 +40,7 @@
#define HINIC3_WQ_MAX_REQ 10
enum hinic3_hwdev_init_state {
+ HINIC3_HWDEV_MGMT_INITED = 1,
HINIC3_HWDEV_MBOX_INITED = 2,
HINIC3_HWDEV_CMDQ_INITED = 3,
};
@@ -197,7 +200,7 @@ static int init_ceqs_msix_attr(struct hinic3_hwdev *hwdev)
for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) {
eq = &ceqs->ceq[q_id];
info.msix_index = eq->msix_entry_idx;
- err = hinic3_set_interrupt_cfg_direct(hwdev, &info);
+ err = hinic3_set_interrupt_cfg(hwdev, info);
if (err) {
dev_err(hwdev->dev, "Set msix attr for ceq %u failed\n",
q_id);
@@ -208,6 +211,36 @@ static int init_ceqs_msix_attr(struct hinic3_hwdev *hwdev)
return 0;
}
+static int hinic3_comm_pf_to_mgmt_init(struct hinic3_hwdev *hwdev)
+{
+ int err;
+
+ if (HINIC3_IS_VF(hwdev))
+ return 0;
+
+ err = hinic3_pf_to_mgmt_init(hwdev);
+ if (err)
+ return err;
+
+ set_bit(HINIC3_HWDEV_MGMT_INITED, &hwdev->func_state);
+
+ return 0;
+}
+
+static void hinic3_comm_pf_to_mgmt_free(struct hinic3_hwdev *hwdev)
+{
+ if (HINIC3_IS_VF(hwdev))
+ return;
+
+ spin_lock_bh(&hwdev->channel_lock);
+ clear_bit(HINIC3_HWDEV_MGMT_INITED, &hwdev->func_state);
+ spin_unlock_bh(&hwdev->channel_lock);
+
+ hinic3_aeq_unregister_cb(hwdev, HINIC3_MSG_FROM_FW);
+
+ hinic3_pf_to_mgmt_free(hwdev);
+}
+
static int init_basic_mgmt_channel(struct hinic3_hwdev *hwdev)
{
int err;
@@ -409,20 +442,28 @@ static int hinic3_init_comm_ch(struct hinic3_hwdev *hwdev)
if (err)
return err;
- err = init_basic_attributes(hwdev);
+ err = hinic3_comm_pf_to_mgmt_init(hwdev);
if (err)
goto err_free_basic_mgmt_ch;
+ err = init_basic_attributes(hwdev);
+ if (err)
+ goto err_free_comm_pf_to_mgmt;
+
err = init_cmdqs_channel(hwdev);
if (err) {
dev_err(hwdev->dev, "Failed to init cmdq channel\n");
goto err_clear_func_svc_used_state;
}
+ hinic3_set_pf_status(hwdev->hwif, HINIC3_PF_STATUS_ACTIVE_FLAG);
+
return 0;
err_clear_func_svc_used_state:
hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_COMM, 0);
+err_free_comm_pf_to_mgmt:
+ hinic3_comm_pf_to_mgmt_free(hwdev);
err_free_basic_mgmt_ch:
free_base_mgmt_channel(hwdev);
@@ -431,11 +472,44 @@ err_free_basic_mgmt_ch:
static void hinic3_uninit_comm_ch(struct hinic3_hwdev *hwdev)
{
+ hinic3_set_pf_status(hwdev->hwif, HINIC3_PF_STATUS_INIT);
hinic3_free_cmdqs_channel(hwdev);
hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_COMM, 0);
+ hinic3_comm_pf_to_mgmt_free(hwdev);
free_base_mgmt_channel(hwdev);
}
+static void hinic3_auto_sync_time_work(struct work_struct *work)
+{
+ struct delayed_work *delay = to_delayed_work(work);
+ struct hinic3_hwdev *hwdev;
+
+ hwdev = container_of(delay, struct hinic3_hwdev, sync_time_task);
+
+ hinic3_sync_time_to_fw(hwdev);
+
+ queue_delayed_work(hwdev->workq, &hwdev->sync_time_task,
+ msecs_to_jiffies(HINIC3_SYNFW_TIME_PERIOD));
+}
+
+static void hinic3_init_ppf_work(struct hinic3_hwdev *hwdev)
+{
+ if (hinic3_ppf_idx(hwdev) != hinic3_global_func_id(hwdev))
+ return;
+
+ INIT_DELAYED_WORK(&hwdev->sync_time_task, hinic3_auto_sync_time_work);
+ queue_delayed_work(hwdev->workq, &hwdev->sync_time_task,
+ msecs_to_jiffies(HINIC3_SYNFW_TIME_PERIOD));
+}
+
+static void hinic3_free_ppf_work(struct hinic3_hwdev *hwdev)
+{
+ if (hinic3_ppf_idx(hwdev) != hinic3_global_func_id(hwdev))
+ return;
+
+ disable_delayed_work_sync(&hwdev->sync_time_task);
+}
+
static DEFINE_IDA(hinic3_adev_ida);
static int hinic3_adev_idx_alloc(void)
@@ -472,7 +546,7 @@ int hinic3_init_hwdev(struct pci_dev *pdev)
goto err_free_hwdev;
}
- hwdev->workq = alloc_workqueue(HINIC3_HWDEV_WQ_NAME, WQ_MEM_RECLAIM,
+ hwdev->workq = alloc_workqueue(HINIC3_HWDEV_WQ_NAME, WQ_MEM_RECLAIM | WQ_PERCPU,
HINIC3_WQ_MAX_REQ);
if (!hwdev->workq) {
dev_err(hwdev->dev, "Failed to alloc hardware workq\n");
@@ -498,15 +572,19 @@ int hinic3_init_hwdev(struct pci_dev *pdev)
goto err_uninit_comm_ch;
}
+ hinic3_init_ppf_work(hwdev);
+
err = hinic3_set_comm_features(hwdev, hwdev->features,
COMM_MAX_FEATURE_QWORD);
if (err) {
dev_err(hwdev->dev, "Failed to set comm features\n");
- goto err_uninit_comm_ch;
+ goto err_free_ppf_work;
}
return 0;
+err_free_ppf_work:
+ hinic3_free_ppf_work(hwdev);
err_uninit_comm_ch:
hinic3_uninit_comm_ch(hwdev);
err_free_cfg_mgmt:
@@ -528,6 +606,7 @@ void hinic3_free_hwdev(struct hinic3_hwdev *hwdev)
u64 drv_features[COMM_MAX_FEATURE_QWORD] = {};
hinic3_set_comm_features(hwdev, drv_features, COMM_MAX_FEATURE_QWORD);
+ hinic3_free_ppf_work(hwdev);
hinic3_func_rx_tx_flush(hwdev);
hinic3_uninit_comm_ch(hwdev);
hinic3_free_cfg_mgmt(hwdev);
@@ -539,9 +618,21 @@ void hinic3_free_hwdev(struct hinic3_hwdev *hwdev)
void hinic3_set_api_stop(struct hinic3_hwdev *hwdev)
{
+ struct hinic3_recv_msg *recv_resp_msg;
struct hinic3_mbox *mbox;
spin_lock_bh(&hwdev->channel_lock);
+ if (HINIC3_IS_PF(hwdev) &&
+ test_bit(HINIC3_HWDEV_MGMT_INITED, &hwdev->func_state)) {
+ recv_resp_msg = &hwdev->pf_to_mgmt->recv_resp_msg_from_mgmt;
+ spin_lock_bh(&hwdev->pf_to_mgmt->sync_event_lock);
+ if (hwdev->pf_to_mgmt->event_flag == COMM_SEND_EVENT_START) {
+ complete(&recv_resp_msg->recv_done);
+ hwdev->pf_to_mgmt->event_flag = COMM_SEND_EVENT_TIMEOUT;
+ }
+ spin_unlock_bh(&hwdev->pf_to_mgmt->sync_event_lock);
+ }
+
if (test_bit(HINIC3_HWDEV_MBOX_INITED, &hwdev->func_state)) {
mbox = hwdev->mbox;
spin_lock(&mbox->mbox_lock);
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h
index 62e2745e9316..9686c2600b46 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h
@@ -17,6 +17,24 @@ enum hinic3_event_service_type {
HINIC3_EVENT_SRV_NIC = 1
};
+enum hinic3_comm_event_type {
+ HINIC3_COMM_EVENT_PCIE_LINK_DOWN = 0,
+ HINIC3_COMM_EVENT_HEART_LOST = 1,
+ HINIC3_COMM_EVENT_FAULT = 2,
+ HINIC3_COMM_EVENT_SRIOV_STATE_CHANGE = 3,
+ HINIC3_COMM_EVENT_CARD_REMOVE = 4,
+ HINIC3_COMM_EVENT_MGMT_WATCHDOG = 5,
+};
+
+enum hinic3_fault_err_level {
+ HINIC3_FAULT_LEVEL_SERIOUS_FLR = 3,
+};
+
+enum hinic3_fault_source_type {
+ HINIC3_FAULT_SRC_HW_PHY_FAULT = 9,
+ HINIC3_FAULT_SRC_TX_TIMEOUT = 22,
+};
+
#define HINIC3_SRV_EVENT_TYPE(svc, type) (((svc) << 16) | (type))
/* driver-specific data of pci_dev */
@@ -28,6 +46,7 @@ struct hinic3_pcidev {
void __iomem *cfg_reg_base;
void __iomem *intr_reg_base;
+ void __iomem *mgmt_reg_base;
void __iomem *db_base;
u64 db_dwqe_len;
u64 db_base_phy;
@@ -48,7 +67,9 @@ struct hinic3_hwdev {
struct hinic3_ceqs *ceqs;
struct hinic3_mbox *mbox;
struct hinic3_cmdqs *cmdqs;
+ struct delayed_work sync_time_task;
struct workqueue_struct *workq;
+ struct hinic3_msg_pf_to_mgmt *pf_to_mgmt;
/* protect channel init and uninit */
spinlock_t channel_lock;
u64 features[COMM_MAX_FEATURE_QWORD];
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c
index f76f140fb6f7..801f48e241f8 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c
@@ -31,6 +31,7 @@
#define HINIC3_AF0_GET(val, member) \
FIELD_GET(HINIC3_AF0_##member##_MASK, val)
+#define HINIC3_AF1_PPF_IDX_MASK GENMASK(5, 0)
#define HINIC3_AF1_AEQS_PER_FUNC_MASK GENMASK(9, 8)
#define HINIC3_AF1_MGMT_INIT_STATUS_MASK BIT(30)
#define HINIC3_AF1_GET(val, member) \
@@ -41,6 +42,10 @@
#define HINIC3_AF2_GET(val, member) \
FIELD_GET(HINIC3_AF2_##member##_MASK, val)
+#define HINIC3_AF3_GLOBAL_VF_ID_OF_PF_MASK GENMASK(27, 16)
+#define HINIC3_AF3_GET(val, member) \
+ FIELD_GET(HINIC3_AF3_##member##_MASK, val)
+
#define HINIC3_AF4_DOORBELL_CTRL_MASK BIT(0)
#define HINIC3_AF4_GET(val, member) \
FIELD_GET(HINIC3_AF4_##member##_MASK, val)
@@ -54,9 +59,17 @@
#define HINIC3_AF6_PF_STATUS_MASK GENMASK(15, 0)
#define HINIC3_AF6_FUNC_MAX_SQ_MASK GENMASK(31, 23)
#define HINIC3_AF6_MSIX_FLEX_EN_MASK BIT(22)
+#define HINIC3_AF6_SET(val, member) \
+ FIELD_PREP(HINIC3_AF6_##member##_MASK, val)
#define HINIC3_AF6_GET(val, member) \
FIELD_GET(HINIC3_AF6_##member##_MASK, val)
+#define HINIC3_PPF_ELECTION_IDX_MASK GENMASK(5, 0)
+#define HINIC3_PPF_ELECTION_SET(val, member) \
+ FIELD_PREP(HINIC3_PPF_ELECTION_##member##_MASK, val)
+#define HINIC3_PPF_ELECTION_GET(val, member) \
+ FIELD_GET(HINIC3_PPF_ELECTION_##member##_MASK, val)
+
#define HINIC3_GET_REG_ADDR(reg) ((reg) & (HINIC3_REGS_FLAG_MASK))
static void __iomem *hinic3_reg_addr(struct hinic3_hwif *hwif, u32 reg)
@@ -105,12 +118,15 @@ static void set_hwif_attr(struct hinic3_func_attr *attr, u32 attr0, u32 attr1,
attr->pci_intf_idx = HINIC3_AF0_GET(attr0, PCI_INTF_IDX);
attr->func_type = HINIC3_AF0_GET(attr0, FUNC_TYPE);
+ attr->ppf_idx = HINIC3_AF1_GET(attr1, PPF_IDX);
attr->num_aeqs = BIT(HINIC3_AF1_GET(attr1, AEQS_PER_FUNC));
attr->num_ceqs = HINIC3_AF2_GET(attr2, CEQS_PER_FUNC);
attr->num_irqs = HINIC3_AF2_GET(attr2, IRQS_PER_FUNC);
if (attr->num_irqs > HINIC3_MAX_MSIX_ENTRY)
attr->num_irqs = HINIC3_MAX_MSIX_ENTRY;
+ attr->global_vf_id_of_pf = HINIC3_AF3_GET(attr3, GLOBAL_VF_ID_OF_PF);
+
attr->num_sq = HINIC3_AF6_GET(attr6, FUNC_MAX_SQ);
attr->msix_flex_en = HINIC3_AF6_GET(attr6, MSIX_FLEX_EN);
}
@@ -187,6 +203,28 @@ void hinic3_toggle_doorbell(struct hinic3_hwif *hwif,
hinic3_hwif_write_reg(hwif, addr, attr4);
}
+static void hinic3_set_ppf(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_hwif *hwif = hwdev->hwif;
+ struct hinic3_func_attr *attr;
+ u32 addr, val;
+
+ if (HINIC3_IS_VF(hwdev))
+ return;
+
+ /* Read Modify Write */
+ attr = &hwif->attr;
+ addr = HINIC3_CSR_PPF_ELECTION_ADDR;
+ val = hinic3_hwif_read_reg(hwif, addr);
+ val &= ~HINIC3_PPF_ELECTION_IDX_MASK;
+ val |= HINIC3_PPF_ELECTION_SET(attr->func_global_idx, IDX);
+ hinic3_hwif_write_reg(hwif, addr, val);
+
+ /* Check PPF index */
+ val = hinic3_hwif_read_reg(hwif, addr);
+ attr->ppf_idx = HINIC3_PPF_ELECTION_GET(val, IDX);
+}
+
static int db_area_idx_init(struct hinic3_hwif *hwif, u64 db_base_phy,
u8 __iomem *db_base, u64 db_dwqe_len)
{
@@ -366,6 +404,27 @@ static int wait_until_doorbell_and_outbound_enabled(struct hinic3_hwif *hwif)
USEC_PER_MSEC);
}
+void hinic3_set_pf_status(struct hinic3_hwif *hwif,
+ enum hinic3_pf_status status)
+{
+ u32 attr6 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR6_ADDR);
+
+ attr6 &= ~HINIC3_AF6_PF_STATUS_MASK;
+ attr6 |= HINIC3_AF6_SET(status, PF_STATUS);
+
+ if (hwif->attr.func_type == HINIC3_FUNC_TYPE_VF)
+ return;
+
+ hinic3_hwif_write_reg(hwif, HINIC3_CSR_FUNC_ATTR6_ADDR, attr6);
+}
+
+enum hinic3_pf_status hinic3_get_pf_status(struct hinic3_hwif *hwif)
+{
+ u32 attr6 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR6_ADDR);
+
+ return HINIC3_AF6_GET(attr6, PF_STATUS);
+}
+
int hinic3_init_hwif(struct hinic3_hwdev *hwdev)
{
struct hinic3_pcidev *pci_adapter = hwdev->adapter;
@@ -378,9 +437,15 @@ int hinic3_init_hwif(struct hinic3_hwdev *hwdev)
return -ENOMEM;
hwdev->hwif = hwif;
- hwif->cfg_regs_base = (u8 __iomem *)pci_adapter->cfg_reg_base +
+ /* if function is VF, mgmt_regs_base will be NULL */
+ hwif->cfg_regs_base = pci_adapter->mgmt_reg_base ?
+ pci_adapter->cfg_reg_base :
+ (u8 __iomem *)pci_adapter->cfg_reg_base +
HINIC3_VF_CFG_REG_OFFSET;
+ hwif->intr_regs_base = pci_adapter->intr_reg_base;
+ hwif->mgmt_regs_base = pci_adapter->mgmt_reg_base;
+
err = db_area_idx_init(hwif, pci_adapter->db_base_phy,
pci_adapter->db_base,
pci_adapter->db_dwqe_len);
@@ -412,7 +477,15 @@ int hinic3_init_hwif(struct hinic3_hwdev *hwdev)
goto err_free_db_area_idx;
}
+ hinic3_set_ppf(hwdev);
+
disable_all_msix(hwdev);
+ /* disable mgmt cpu from reporting any event */
+ hinic3_set_pf_status(hwdev->hwif, HINIC3_PF_STATUS_INIT);
+
+ dev_dbg(hwdev->dev, "global_func_idx: %u, func_type: %d, host_id: %u, ppf: %u\n",
+ hwif->attr.func_global_idx, hwif->attr.func_type,
+ hwif->attr.pci_intf_idx, hwif->attr.ppf_idx);
return 0;
@@ -434,3 +507,18 @@ u16 hinic3_global_func_id(struct hinic3_hwdev *hwdev)
{
return hwdev->hwif->attr.func_global_idx;
}
+
+u8 hinic3_pf_id_of_vf(struct hinic3_hwdev *hwdev)
+{
+ return hwdev->hwif->attr.port_to_port_idx;
+}
+
+u16 hinic3_glb_pf_vf_offset(struct hinic3_hwdev *hwdev)
+{
+ return hwdev->hwif->attr.global_vf_id_of_pf;
+}
+
+u8 hinic3_ppf_idx(struct hinic3_hwdev *hwdev)
+{
+ return hwdev->hwif->attr.ppf_idx;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h
index c02904e861cc..445bf7fa79b4 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h
@@ -10,6 +10,7 @@
struct hinic3_hwdev;
enum hinic3_func_type {
+ HINIC3_FUNC_TYPE_PF = 0,
HINIC3_FUNC_TYPE_VF = 1,
};
@@ -38,6 +39,8 @@ static_assert(sizeof(struct hinic3_func_attr) == 20);
struct hinic3_hwif {
u8 __iomem *cfg_regs_base;
+ u8 __iomem *intr_regs_base;
+ u8 __iomem *mgmt_regs_base;
u64 db_base_phy;
u64 db_dwqe_len;
u8 __iomem *db_base;
@@ -50,6 +53,13 @@ enum hinic3_outbound_ctrl {
DISABLE_OUTBOUND = 0x1,
};
+enum hinic3_pf_status {
+ HINIC3_PF_STATUS_INIT = 0x0,
+ HINIC3_PF_STATUS_ACTIVE_FLAG = 0x11,
+ HINIC3_PF_STATUS_FLR_START_FLAG = 0x12,
+ HINIC3_PF_STATUS_FLR_FINISH_FLAG = 0x13,
+};
+
enum hinic3_doorbell_ctrl {
ENABLE_DOORBELL = 0,
DISABLE_DOORBELL = 1,
@@ -65,6 +75,12 @@ enum hinic3_msix_auto_mask {
HINIC3_SET_MSIX_AUTO_MASK,
};
+#define HINIC3_FUNC_TYPE(hwdev) ((hwdev)->hwif->attr.func_type)
+#define HINIC3_IS_PF(hwdev) \
+ (HINIC3_FUNC_TYPE(hwdev) == HINIC3_FUNC_TYPE_PF)
+#define HINIC3_IS_VF(hwdev) \
+ (HINIC3_FUNC_TYPE(hwdev) == HINIC3_FUNC_TYPE_VF)
+
u32 hinic3_hwif_read_reg(struct hinic3_hwif *hwif, u32 reg);
void hinic3_hwif_write_reg(struct hinic3_hwif *hwif, u32 reg, u32 val);
@@ -75,6 +91,10 @@ int hinic3_alloc_db_addr(struct hinic3_hwdev *hwdev, void __iomem **db_base,
void __iomem **dwqe_base);
void hinic3_free_db_addr(struct hinic3_hwdev *hwdev, const u8 __iomem *db_base);
+void hinic3_set_pf_status(struct hinic3_hwif *hwif,
+ enum hinic3_pf_status status);
+enum hinic3_pf_status hinic3_get_pf_status(struct hinic3_hwif *hwif);
+
int hinic3_init_hwif(struct hinic3_hwdev *hwdev);
void hinic3_free_hwif(struct hinic3_hwdev *hwdev);
@@ -86,5 +106,8 @@ void hinic3_set_msix_auto_mask_state(struct hinic3_hwdev *hwdev, u16 msix_idx,
enum hinic3_msix_auto_mask flag);
u16 hinic3_global_func_id(struct hinic3_hwdev *hwdev);
+u8 hinic3_pf_id_of_vf(struct hinic3_hwdev *hwdev);
+u16 hinic3_glb_pf_vf_offset(struct hinic3_hwdev *hwdev);
+u8 hinic3_ppf_idx(struct hinic3_hwdev *hwdev);
#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c b/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
index 84bee5d6e638..e7d6c2033b45 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+#include <linux/dim.h>
#include <linux/netdevice.h>
#include "hinic3_hw_comm.h"
@@ -10,6 +11,23 @@
#include "hinic3_rx.h"
#include "hinic3_tx.h"
+#define HINIC3_COAL_PKT_SHIFT 5
+
+static void hinic3_net_dim(struct hinic3_nic_dev *nic_dev,
+ struct hinic3_irq_cfg *irq_cfg)
+{
+ struct hinic3_rxq *rxq = irq_cfg->rxq;
+ struct dim_sample sample = {};
+
+ if (!test_bit(HINIC3_INTF_UP, &nic_dev->flags) ||
+ !nic_dev->adaptive_rx_coal)
+ return;
+
+ dim_update_sample(irq_cfg->total_events, rxq->rxq_stats.packets,
+ rxq->rxq_stats.bytes, &sample);
+ net_dim(&rxq->dim, &sample);
+}
+
static int hinic3_poll(struct napi_struct *napi, int budget)
{
struct hinic3_irq_cfg *irq_cfg =
@@ -31,9 +49,11 @@ static int hinic3_poll(struct napi_struct *napi, int budget)
if (busy)
return budget;
- if (likely(napi_complete_done(napi, work_done)))
+ if (likely(napi_complete_done(napi, work_done))) {
+ hinic3_net_dim(nic_dev, irq_cfg);
hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx,
HINIC3_MSIX_ENABLE);
+ }
return work_done;
}
@@ -61,6 +81,8 @@ static irqreturn_t qp_irq(int irq, void *data)
hinic3_msix_intr_clear_resend_bit(nic_dev->hwdev,
irq_cfg->msix_entry_idx, 1);
+ irq_cfg->total_events++;
+
napi_schedule(&irq_cfg->napi);
return IRQ_HANDLED;
@@ -83,7 +105,7 @@ static int hinic3_request_irq(struct hinic3_irq_cfg *irq_cfg, u16 q_id)
info.coalesc_timer_cfg =
nic_dev->intr_coalesce[q_id].coalesce_timer_cfg;
info.resend_timer_cfg = nic_dev->intr_coalesce[q_id].resend_timer_cfg;
- err = hinic3_set_interrupt_cfg_direct(nic_dev->hwdev, &info);
+ err = hinic3_set_interrupt_cfg(nic_dev->hwdev, info);
if (err) {
netdev_err(netdev, "Failed to set RX interrupt coalescing attribute.\n");
qp_del_napi(irq_cfg);
@@ -108,6 +130,71 @@ static void hinic3_release_irq(struct hinic3_irq_cfg *irq_cfg)
free_irq(irq_cfg->irq_id, irq_cfg);
}
+static int hinic3_set_interrupt_moder(struct net_device *netdev, u16 q_id,
+ u8 coalesc_timer_cfg, u8 pending_limit)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_interrupt_info info = {};
+ int err;
+
+ if (q_id >= nic_dev->q_params.num_qps)
+ return 0;
+
+ info.interrupt_coalesc_set = 1;
+ info.coalesc_timer_cfg = coalesc_timer_cfg;
+ info.pending_limit = pending_limit;
+ info.msix_index = nic_dev->q_params.irq_cfg[q_id].msix_entry_idx;
+ info.resend_timer_cfg =
+ nic_dev->intr_coalesce[q_id].resend_timer_cfg;
+
+ err = hinic3_set_interrupt_cfg(nic_dev->hwdev, info);
+ if (err) {
+ netdev_err(netdev,
+ "Failed to modify moderation for Queue: %u\n", q_id);
+ } else {
+ nic_dev->rxqs[q_id].last_coalesc_timer_cfg = coalesc_timer_cfg;
+ nic_dev->rxqs[q_id].last_pending_limit = pending_limit;
+ }
+
+ return err;
+}
+
+static void hinic3_update_queue_coal(struct net_device *netdev, u16 q_id,
+ u16 coal_timer, u16 coal_pkts)
+{
+ struct hinic3_intr_coal_info *q_coal;
+ u8 coalesc_timer_cfg, pending_limit;
+ struct hinic3_nic_dev *nic_dev;
+
+ nic_dev = netdev_priv(netdev);
+
+ q_coal = &nic_dev->intr_coalesce[q_id];
+ coalesc_timer_cfg = (u8)coal_timer;
+ pending_limit = clamp_t(u8, coal_pkts >> HINIC3_COAL_PKT_SHIFT,
+ q_coal->rx_pending_limit_low,
+ q_coal->rx_pending_limit_high);
+
+ hinic3_set_interrupt_moder(nic_dev->netdev, q_id,
+ coalesc_timer_cfg, pending_limit);
+}
+
+static void hinic3_rx_dim_work(struct work_struct *work)
+{
+ struct dim_cq_moder cur_moder;
+ struct hinic3_rxq *rxq;
+ struct dim *dim;
+
+ dim = container_of(work, struct dim, work);
+ rxq = container_of(dim, struct hinic3_rxq, dim);
+
+ cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+
+ hinic3_update_queue_coal(rxq->netdev, rxq->q_id,
+ cur_moder.usec, cur_moder.pkts);
+
+ dim->state = DIM_START_MEASURE;
+}
+
int hinic3_qps_irq_init(struct net_device *netdev)
{
struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
@@ -141,6 +228,9 @@ int hinic3_qps_irq_init(struct net_device *netdev)
goto err_release_irqs;
}
+ INIT_WORK(&irq_cfg->rxq->dim.work, hinic3_rx_dim_work);
+ irq_cfg->rxq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
+
netif_queue_set_napi(irq_cfg->netdev, q_id,
NETDEV_QUEUE_TYPE_RX, &irq_cfg->napi);
netif_queue_set_napi(irq_cfg->netdev, q_id,
@@ -164,12 +254,14 @@ err_release_irqs:
NETDEV_QUEUE_TYPE_RX, NULL);
netif_queue_set_napi(irq_cfg->netdev, q_id,
NETDEV_QUEUE_TYPE_TX, NULL);
+
hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx,
HINIC3_MSIX_DISABLE);
hinic3_set_msix_auto_mask_state(nic_dev->hwdev,
irq_cfg->msix_entry_idx,
HINIC3_CLR_MSIX_AUTO_MASK);
hinic3_release_irq(irq_cfg);
+ disable_work_sync(&irq_cfg->rxq->dim.work);
}
return err;
@@ -194,5 +286,6 @@ void hinic3_qps_irq_uninit(struct net_device *netdev)
irq_cfg->msix_entry_idx,
HINIC3_CLR_MSIX_AUTO_MASK);
hinic3_release_irq(irq_cfg);
+ disable_work_sync(&irq_cfg->rxq->dim.work);
}
}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c
index 3db8241a3b0c..87413e192f10 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c
@@ -5,15 +5,22 @@
#include <linux/iopoll.h>
#include "hinic3_hw_cfg.h"
+#include "hinic3_hw_comm.h"
#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
#include "hinic3_lld.h"
#include "hinic3_mgmt.h"
#include "hinic3_pci_id_tbl.h"
#define HINIC3_VF_PCI_CFG_REG_BAR 0
+#define HINIC3_PF_PCI_CFG_REG_BAR 1
#define HINIC3_PCI_INTR_REG_BAR 2
+/* Only PF has mgmt bar */
+#define HINIC3_PCI_MGMT_REG_BAR 3
#define HINIC3_PCI_DB_BAR 4
+#define HINIC3_IS_VF_DEV(pdev) ((pdev)->device == PCI_DEV_ID_HINIC3_VF)
+
#define HINIC3_EVENT_POLL_SLEEP_US 1000
#define HINIC3_EVENT_POLL_TIMEOUT_US 10000000
@@ -181,8 +188,12 @@ void hinic3_adev_event_unregister(struct auxiliary_device *adev)
static int hinic3_mapping_bar(struct pci_dev *pdev,
struct hinic3_pcidev *pci_adapter)
{
- pci_adapter->cfg_reg_base = pci_ioremap_bar(pdev,
- HINIC3_VF_PCI_CFG_REG_BAR);
+ int cfg_bar;
+
+ cfg_bar = HINIC3_IS_VF_DEV(pdev) ?
+ HINIC3_VF_PCI_CFG_REG_BAR : HINIC3_PF_PCI_CFG_REG_BAR;
+
+ pci_adapter->cfg_reg_base = pci_ioremap_bar(pdev, cfg_bar);
if (!pci_adapter->cfg_reg_base) {
dev_err(&pdev->dev, "Failed to map configuration regs\n");
return -ENOMEM;
@@ -195,19 +206,30 @@ static int hinic3_mapping_bar(struct pci_dev *pdev,
goto err_unmap_cfg_reg_base;
}
+ if (!HINIC3_IS_VF_DEV(pdev)) {
+ pci_adapter->mgmt_reg_base =
+ pci_ioremap_bar(pdev, HINIC3_PCI_MGMT_REG_BAR);
+ if (!pci_adapter->mgmt_reg_base) {
+ dev_err(&pdev->dev, "Failed to map mgmt regs\n");
+ goto err_unmap_intr_reg_base;
+ }
+ }
+
pci_adapter->db_base_phy = pci_resource_start(pdev, HINIC3_PCI_DB_BAR);
pci_adapter->db_dwqe_len = pci_resource_len(pdev, HINIC3_PCI_DB_BAR);
pci_adapter->db_base = pci_ioremap_bar(pdev, HINIC3_PCI_DB_BAR);
if (!pci_adapter->db_base) {
dev_err(&pdev->dev, "Failed to map doorbell regs\n");
- goto err_unmap_intr_reg_base;
+ goto err_unmap_mgmt_reg_base;
}
return 0;
+err_unmap_mgmt_reg_base:
+ if (!HINIC3_IS_VF_DEV(pdev))
+ iounmap(pci_adapter->mgmt_reg_base);
err_unmap_intr_reg_base:
iounmap(pci_adapter->intr_reg_base);
-
err_unmap_cfg_reg_base:
iounmap(pci_adapter->cfg_reg_base);
@@ -217,6 +239,8 @@ err_unmap_cfg_reg_base:
static void hinic3_unmapping_bar(struct hinic3_pcidev *pci_adapter)
{
iounmap(pci_adapter->db_base);
+ if (!HINIC3_IS_VF_DEV(pci_adapter->pdev))
+ iounmap(pci_adapter->mgmt_reg_base);
iounmap(pci_adapter->intr_reg_base);
iounmap(pci_adapter->cfg_reg_base);
}
@@ -260,10 +284,8 @@ static int hinic3_pci_init(struct pci_dev *pdev)
err_release_regions:
pci_clear_master(pdev);
pci_release_regions(pdev);
-
err_disable_device:
pci_disable_device(pdev);
-
err_free_pci_adapter:
pci_set_drvdata(pdev, NULL);
mutex_destroy(&pci_adapter->pdev_mutex);
@@ -295,6 +317,9 @@ static int hinic3_func_init(struct pci_dev *pdev,
return err;
}
+ if (HINIC3_IS_PF(pci_adapter->hwdev))
+ hinic3_sync_time_to_fw(pci_adapter->hwdev);
+
err = hinic3_attach_aux_devices(pci_adapter->hwdev);
if (err)
goto err_free_hwdev;
@@ -311,6 +336,8 @@ static void hinic3_func_uninit(struct pci_dev *pdev)
{
struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev);
+ /* disable mgmt reporting before flushing mgmt work-queue. */
+ hinic3_set_pf_status(pci_adapter->hwdev->hwif, HINIC3_PF_STATUS_INIT);
hinic3_flush_mgmt_workq(pci_adapter->hwdev);
hinic3_detach_aux_devices(pci_adapter->hwdev);
hinic3_free_hwdev(pci_adapter->hwdev);
@@ -319,6 +346,7 @@ static void hinic3_func_uninit(struct pci_dev *pdev)
static int hinic3_probe_func(struct hinic3_pcidev *pci_adapter)
{
struct pci_dev *pdev = pci_adapter->pdev;
+ struct comm_cmd_bdf_info bdf_info = {};
int err;
err = hinic3_mapping_bar(pdev, pci_adapter);
@@ -331,11 +359,26 @@ static int hinic3_probe_func(struct hinic3_pcidev *pci_adapter)
if (err)
goto err_unmap_bar;
+ if (HINIC3_IS_PF(pci_adapter->hwdev)) {
+ bdf_info.function_idx =
+ hinic3_global_func_id(pci_adapter->hwdev);
+ bdf_info.bus = pdev->bus->number;
+ bdf_info.device = PCI_SLOT(pdev->devfn);
+ bdf_info.function = PCI_FUNC(pdev->devfn);
+
+ err = hinic3_set_bdf_ctxt(pci_adapter->hwdev, &bdf_info);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set BDF info to fw\n");
+ goto err_uninit_func;
+ }
+ }
+
return 0;
+err_uninit_func:
+ hinic3_func_uninit(pdev);
err_unmap_bar:
hinic3_unmapping_bar(pci_adapter);
-
err_out:
dev_err(&pdev->dev, "PCIe device probe function failed\n");
@@ -368,7 +411,6 @@ static int hinic3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_uninit_pci:
hinic3_pci_uninit(pdev);
-
err_out:
dev_err(&pdev->dev, "PCIe device probe failed\n");
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_main.c b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c
index 6d87d4d895ba..6275d94dfefd 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_main.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c
@@ -29,6 +29,9 @@
#define HINIC3_DEFAULT_TXRX_MSIX_COALESC_TIMER_CFG 25
#define HINIC3_DEFAULT_TXRX_MSIX_RESEND_TIMER_CFG 7
+#define HINIC3_RX_PENDING_LIMIT_LOW 2
+#define HINIC3_RX_PENDING_LIMIT_HIGH 8
+
static void init_intr_coal_param(struct net_device *netdev)
{
struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
@@ -38,9 +41,16 @@ static void init_intr_coal_param(struct net_device *netdev)
for (i = 0; i < nic_dev->max_qps; i++) {
info = &nic_dev->intr_coalesce[i];
info->pending_limit = HINIC3_DEFAULT_TXRX_MSIX_PENDING_LIMIT;
- info->coalesce_timer_cfg = HINIC3_DEFAULT_TXRX_MSIX_COALESC_TIMER_CFG;
- info->resend_timer_cfg = HINIC3_DEFAULT_TXRX_MSIX_RESEND_TIMER_CFG;
+ info->coalesce_timer_cfg =
+ HINIC3_DEFAULT_TXRX_MSIX_COALESC_TIMER_CFG;
+ info->resend_timer_cfg =
+ HINIC3_DEFAULT_TXRX_MSIX_RESEND_TIMER_CFG;
+
+ info->rx_pending_limit_high = HINIC3_RX_PENDING_LIMIT_HIGH;
+ info->rx_pending_limit_low = HINIC3_RX_PENDING_LIMIT_LOW;
}
+
+ nic_dev->adaptive_rx_coal = 1;
}
static int hinic3_init_intr_coalesce(struct net_device *netdev)
@@ -94,7 +104,6 @@ static int hinic3_alloc_txrxqs(struct net_device *netdev)
err_free_rxqs:
hinic3_free_rxqs(netdev);
-
err_free_txqs:
hinic3_free_txqs(netdev);
@@ -108,6 +117,22 @@ static void hinic3_free_txrxqs(struct net_device *netdev)
hinic3_free_txqs(netdev);
}
+static void hinic3_periodic_work_handler(struct work_struct *work)
+{
+ struct delayed_work *delay = to_delayed_work(work);
+ struct hinic3_nic_dev *nic_dev;
+
+ nic_dev = container_of(delay, struct hinic3_nic_dev, periodic_work);
+ if (test_and_clear_bit(HINIC3_EVENT_WORK_TX_TIMEOUT,
+ &nic_dev->event_flag))
+ dev_info(nic_dev->hwdev->dev,
+ "Fault event report, src: %u, level: %u\n",
+ HINIC3_FAULT_SRC_TX_TIMEOUT,
+ HINIC3_FAULT_LEVEL_SERIOUS_FLR);
+
+ queue_delayed_work(nic_dev->workq, &nic_dev->periodic_work, HZ);
+}
+
static int hinic3_init_nic_dev(struct net_device *netdev,
struct hinic3_hwdev *hwdev)
{
@@ -121,8 +146,27 @@ static int hinic3_init_nic_dev(struct net_device *netdev,
nic_dev->rx_buf_len = HINIC3_RX_BUF_LEN;
nic_dev->lro_replenish_thld = HINIC3_LRO_REPLENISH_THLD;
+ nic_dev->vlan_bitmap = kzalloc(HINIC3_VLAN_BITMAP_SIZE(nic_dev),
+ GFP_KERNEL);
+ if (!nic_dev->vlan_bitmap)
+ return -ENOMEM;
+
nic_dev->nic_svc_cap = hwdev->cfg_mgmt->cap.nic_svc_cap;
+ nic_dev->workq = create_singlethread_workqueue(HINIC3_NIC_DEV_WQ_NAME);
+ if (!nic_dev->workq) {
+ dev_err(hwdev->dev, "Failed to initialize nic workqueue\n");
+ kfree(nic_dev->vlan_bitmap);
+ return -ENOMEM;
+ }
+
+ INIT_DELAYED_WORK(&nic_dev->periodic_work,
+ hinic3_periodic_work_handler);
+
+ INIT_LIST_HEAD(&nic_dev->uc_filter_list);
+ INIT_LIST_HEAD(&nic_dev->mc_filter_list);
+ INIT_WORK(&nic_dev->rx_mode_work, hinic3_set_rx_mode_work);
+
return 0;
}
@@ -130,23 +174,39 @@ static int hinic3_sw_init(struct net_device *netdev)
{
struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ u8 mac_addr[ETH_ALEN];
int err;
+ mutex_init(&nic_dev->port_state_mutex);
+
nic_dev->q_params.sq_depth = HINIC3_SQ_DEPTH;
nic_dev->q_params.rq_depth = HINIC3_RQ_DEPTH;
hinic3_try_to_enable_rss(netdev);
- /* VF driver always uses random MAC address. During VM migration to a
- * new device, the new device should learn the VMs old MAC rather than
- * provide its own MAC. The product design assumes that every VF is
- * suspectable to migration so the device avoids offering MAC address
- * to VFs.
- */
- eth_hw_addr_random(netdev);
+ if (HINIC3_IS_VF(hwdev)) {
+ /* VF driver always uses random MAC address. During VM migration
+ * to a new device, the new device should learn the VMs old MAC
+ * rather than provide its own MAC. The product design assumes
+ * that every VF is susceptible to migration so the device
+ * avoids offering MAC address to VFs.
+ */
+ eth_hw_addr_random(netdev);
+ } else {
+ err = hinic3_get_default_mac(hwdev, mac_addr);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to get MAC address\n");
+ goto err_clear_rss_config;
+ }
+ eth_hw_addr_set(netdev, mac_addr);
+ }
+
err = hinic3_set_mac(hwdev, netdev->dev_addr, 0,
hinic3_global_func_id(hwdev));
- if (err) {
+ /* Failure to set MAC is not a fatal error for VF since its MAC may have
+ * already been set by PF
+ */
+ if (err && err != -EADDRINUSE) {
dev_err(hwdev->dev, "Failed to set default MAC\n");
goto err_clear_rss_config;
}
@@ -173,6 +233,7 @@ static void hinic3_sw_uninit(struct net_device *netdev)
struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
hinic3_free_txrxqs(netdev);
+ hinic3_clean_mac_list_filter(netdev);
hinic3_del_mac(nic_dev->hwdev, netdev->dev_addr, 0,
hinic3_global_func_id(nic_dev->hwdev));
hinic3_clear_rss_config(netdev);
@@ -186,6 +247,8 @@ static void hinic3_assign_netdev_ops(struct net_device *netdev)
static void netdev_feature_init(struct net_device *netdev)
{
struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ netdev_features_t hw_features = 0;
+ netdev_features_t vlan_fts = 0;
netdev_features_t cso_fts = 0;
netdev_features_t tso_fts = 0;
netdev_features_t dft_fts;
@@ -198,7 +261,29 @@ static void netdev_feature_init(struct net_device *netdev)
if (hinic3_test_support(nic_dev, HINIC3_NIC_F_TSO))
tso_fts |= NETIF_F_TSO | NETIF_F_TSO6;
- netdev->features |= dft_fts | cso_fts | tso_fts;
+ if (hinic3_test_support(nic_dev, HINIC3_NIC_F_RX_VLAN_STRIP |
+ HINIC3_NIC_F_TX_VLAN_INSERT))
+ vlan_fts |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+
+ if (hinic3_test_support(nic_dev, HINIC3_NIC_F_RX_VLAN_FILTER))
+ vlan_fts |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ if (hinic3_test_support(nic_dev, HINIC3_NIC_F_VXLAN_OFFLOAD))
+ tso_fts |= NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+ /* LRO is disabled by default, only set hw features */
+ if (hinic3_test_support(nic_dev, HINIC3_NIC_F_LRO))
+ hw_features |= NETIF_F_LRO;
+
+ netdev->features |= dft_fts | cso_fts | tso_fts | vlan_fts;
+ netdev->vlan_features |= dft_fts | cso_fts | tso_fts;
+ hw_features |= netdev->hw_features | netdev->features;
+ netdev->hw_features = hw_features;
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ netdev->hw_enc_features |= dft_fts;
+ if (hinic3_test_support(nic_dev, HINIC3_NIC_F_VXLAN_OFFLOAD))
+ netdev->hw_enc_features |= cso_fts | tso_fts | NETIF_F_TSO_ECN;
}
static int hinic3_set_default_hw_feature(struct net_device *netdev)
@@ -213,6 +298,13 @@ static int hinic3_set_default_hw_feature(struct net_device *netdev)
return err;
}
+ err = hinic3_set_hw_features(netdev);
+ if (err) {
+ hinic3_update_nic_feature(nic_dev, 0);
+ hinic3_set_nic_feature_to_hw(nic_dev);
+ return err;
+ }
+
return 0;
}
@@ -238,6 +330,44 @@ static void hinic3_link_status_change(struct net_device *netdev,
}
}
+static void hinic3_port_module_event_handler(struct net_device *netdev,
+ struct hinic3_event_info *event)
+{
+ const char *g_hinic3_module_link_err[LINK_ERR_NUM] = {
+ "Unrecognized module"
+ };
+ struct hinic3_port_module_event *module_event;
+ enum port_module_event_type type;
+ enum link_err_type err_type;
+
+ module_event = (struct hinic3_port_module_event *)event->event_data;
+ type = module_event->type;
+ err_type = module_event->err_type;
+
+ switch (type) {
+ case HINIC3_PORT_MODULE_CABLE_PLUGGED:
+ case HINIC3_PORT_MODULE_CABLE_UNPLUGGED:
+ netdev_info(netdev, "Port module event: Cable %s\n",
+ type == HINIC3_PORT_MODULE_CABLE_PLUGGED ?
+ "plugged" : "unplugged");
+ break;
+ case HINIC3_PORT_MODULE_LINK_ERR:
+ if (err_type >= LINK_ERR_NUM) {
+ netdev_info(netdev, "Link failed, Unknown error type: 0x%x\n",
+ err_type);
+ } else {
+ netdev_info(netdev,
+ "Link failed, error type: 0x%x: %s\n",
+ err_type,
+ g_hinic3_module_link_err[err_type]);
+ }
+ break;
+ default:
+ netdev_err(netdev, "Unknown port module type %d\n", type);
+ break;
+ }
+}
+
static void hinic3_nic_event(struct auxiliary_device *adev,
struct hinic3_event_info *event)
{
@@ -252,7 +382,19 @@ static void hinic3_nic_event(struct auxiliary_device *adev,
hinic3_link_status_change(netdev, true);
break;
case HINIC3_SRV_EVENT_TYPE(HINIC3_EVENT_SRV_NIC,
+ HINIC3_NIC_EVENT_PORT_MODULE_EVENT):
+ hinic3_port_module_event_handler(netdev, event);
+ break;
+ case HINIC3_SRV_EVENT_TYPE(HINIC3_EVENT_SRV_NIC,
HINIC3_NIC_EVENT_LINK_DOWN):
+ case HINIC3_SRV_EVENT_TYPE(HINIC3_EVENT_SRV_COMM,
+ HINIC3_COMM_EVENT_FAULT):
+ case HINIC3_SRV_EVENT_TYPE(HINIC3_EVENT_SRV_COMM,
+ HINIC3_COMM_EVENT_PCIE_LINK_DOWN):
+ case HINIC3_SRV_EVENT_TYPE(HINIC3_EVENT_SRV_COMM,
+ HINIC3_COMM_EVENT_HEART_LOST):
+ case HINIC3_SRV_EVENT_TYPE(HINIC3_EVENT_SRV_COMM,
+ HINIC3_COMM_EVENT_MGMT_WATCHDOG):
hinic3_link_status_change(netdev, false);
break;
default:
@@ -260,6 +402,12 @@ static void hinic3_nic_event(struct auxiliary_device *adev,
}
}
+static void hinic3_free_nic_dev(struct hinic3_nic_dev *nic_dev)
+{
+ destroy_workqueue(nic_dev->workq);
+ kfree(nic_dev->vlan_bitmap);
+}
+
static int hinic3_nic_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
@@ -300,7 +448,7 @@ static int hinic3_nic_probe(struct auxiliary_device *adev,
err = hinic3_init_nic_io(nic_dev);
if (err)
- goto err_free_netdev;
+ goto err_free_nic_dev;
err = hinic3_sw_init(netdev);
if (err)
@@ -313,6 +461,7 @@ static int hinic3_nic_probe(struct auxiliary_device *adev,
if (err)
goto err_uninit_sw;
+ queue_delayed_work(nic_dev->workq, &nic_dev->periodic_work, HZ);
netif_carrier_off(netdev);
err = register_netdev(netdev);
@@ -322,18 +471,17 @@ static int hinic3_nic_probe(struct auxiliary_device *adev,
return 0;
err_uninit_nic_feature:
+ disable_delayed_work_sync(&nic_dev->periodic_work);
hinic3_update_nic_feature(nic_dev, 0);
hinic3_set_nic_feature_to_hw(nic_dev);
-
err_uninit_sw:
hinic3_sw_uninit(netdev);
-
err_free_nic_io:
hinic3_free_nic_io(nic_dev);
-
+err_free_nic_dev:
+ hinic3_free_nic_dev(nic_dev);
err_free_netdev:
free_netdev(netdev);
-
err_unregister_adev_event:
hinic3_adev_event_unregister(adev);
dev_err(&pdev->dev, "NIC service probe failed\n");
@@ -352,6 +500,10 @@ static void hinic3_nic_remove(struct auxiliary_device *adev)
netdev = nic_dev->netdev;
unregister_netdev(netdev);
+ disable_delayed_work_sync(&nic_dev->periodic_work);
+ cancel_work_sync(&nic_dev->rx_mode_work);
+ hinic3_free_nic_dev(nic_dev);
+
hinic3_update_nic_feature(nic_dev, 0);
hinic3_set_nic_feature_to_hw(nic_dev);
hinic3_sw_uninit(netdev);
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c
index cf67e26acece..c871fd0fb109 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c
@@ -82,10 +82,19 @@ static struct hinic3_msg_desc *get_mbox_msg_desc(struct hinic3_mbox *mbox,
enum mbox_msg_direction_type dir,
u16 src_func_id)
{
+ struct hinic3_hwdev *hwdev = mbox->hwdev;
struct hinic3_msg_channel *msg_ch;
- msg_ch = (src_func_id == MBOX_MGMT_FUNC_ID) ?
- &mbox->mgmt_msg : mbox->func_msg;
+ if (src_func_id == MBOX_MGMT_FUNC_ID) {
+ msg_ch = &mbox->mgmt_msg;
+ } else if (HINIC3_IS_VF(hwdev)) {
+ /* message from pf */
+ msg_ch = mbox->func_msg;
+ if (src_func_id != hinic3_pf_id_of_vf(hwdev) || !msg_ch)
+ return NULL;
+ } else {
+ return NULL;
+ }
return (dir == MBOX_MSG_SEND) ?
&msg_ch->recv_msg : &msg_ch->resp_msg;
@@ -191,6 +200,12 @@ void hinic3_mbox_func_aeqe_handler(struct hinic3_hwdev *hwdev, u8 *header,
dir = MBOX_MSG_HEADER_GET(mbox_header, DIRECTION);
src_func_id = MBOX_MSG_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX);
msg_desc = get_mbox_msg_desc(mbox, dir, src_func_id);
+ if (!msg_desc) {
+ dev_err(mbox->hwdev->dev,
+ "Mailbox source function id: %u is invalid for current function\n",
+ src_func_id);
+ return;
+ }
recv_mbox_handler(mbox, header, msg_desc);
}
@@ -409,9 +424,12 @@ int hinic3_init_mbox(struct hinic3_hwdev *hwdev)
if (err)
goto err_destroy_workqueue;
- err = hinic3_init_func_mbox_msg_channel(hwdev);
- if (err)
- goto err_uninit_mgmt_msg_ch;
+ if (HINIC3_IS_VF(hwdev)) {
+ /* VF to PF mbox message channel */
+ err = hinic3_init_func_mbox_msg_channel(hwdev);
+ if (err)
+ goto err_uninit_mgmt_msg_ch;
+ }
err = alloc_mbox_wb_status(mbox);
if (err) {
@@ -424,14 +442,12 @@ int hinic3_init_mbox(struct hinic3_hwdev *hwdev)
return 0;
err_uninit_func_mbox_msg_ch:
- hinic3_uninit_func_mbox_msg_channel(hwdev);
-
+ if (HINIC3_IS_VF(hwdev))
+ hinic3_uninit_func_mbox_msg_channel(hwdev);
err_uninit_mgmt_msg_ch:
uninit_mgmt_msg_channel(mbox);
-
err_destroy_workqueue:
destroy_workqueue(mbox->workq);
-
err_free_mbox:
kfree(mbox);
@@ -576,7 +592,13 @@ static void write_mbox_msg_attr(struct hinic3_mbox *mbox,
{
struct hinic3_hwif *hwif = mbox->hwdev->hwif;
u32 mbox_int, mbox_ctrl, tx_size;
+ u16 func = dst_func;
+ /* VF can send non-management messages only to PF. We set DST_FUNC field
+ * to 0 since HW will ignore it anyway.
+ */
+ if (HINIC3_IS_VF(mbox->hwdev) && dst_func != MBOX_MGMT_FUNC_ID)
+ func = 0;
tx_size = ALIGN(seg_len + MBOX_HEADER_SZ, MBOX_SEG_LEN_ALIGN) >> 2;
mbox_int = MBOX_INT_SET(dst_aeqn, DST_AEQN) |
@@ -587,7 +609,7 @@ static void write_mbox_msg_attr(struct hinic3_mbox *mbox,
mbox_ctrl = MBOX_CTRL_SET(1, TX_STATUS) |
MBOX_CTRL_SET(0, TRIGGER_AEQE) |
- MBOX_CTRL_SET(dst_func, DST_FUNC);
+ MBOX_CTRL_SET(func, DST_FUNC);
hinic3_hwif_write_reg(hwif, HINIC3_FUNC_CSR_MAILBOX_INT_OFF, mbox_int);
hinic3_hwif_write_reg(hwif, HINIC3_FUNC_CSR_MAILBOX_CONTROL_OFF,
@@ -840,6 +862,19 @@ err_send:
return err;
}
+void hinic3_response_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
+ const void *buf_in, u32 in_size, u16 msg_id)
+{
+ struct mbox_msg_info msg_info;
+
+ msg_info.msg_id = (u8)msg_id;
+ msg_info.status = 0;
+
+ send_mbox_msg(hwdev->mbox, mod, cmd, buf_in, in_size,
+ MBOX_MGMT_FUNC_ID, MBOX_MSG_RESP,
+ MBOX_MSG_NO_ACK, &msg_info);
+}
+
int hinic3_send_mbox_to_mgmt_no_ack(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
const struct mgmt_msg_params *msg_params)
{
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h
index e71629e95086..e26f22d1d564 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h
@@ -135,6 +135,8 @@ void hinic3_free_mbox(struct hinic3_hwdev *hwdev);
int hinic3_send_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
const struct mgmt_msg_params *msg_params);
+void hinic3_response_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
+ const void *buf_in, u32 in_size, u16 msg_id);
int hinic3_send_mbox_to_mgmt_no_ack(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
const struct mgmt_msg_params *msg_params);
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c
index c38d10cd7fac..be2a2ae75fc0 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c
@@ -3,19 +3,330 @@
#include "hinic3_eqs.h"
#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
#include "hinic3_mbox.h"
#include "hinic3_mgmt.h"
+#define HINIC3_MSG_TO_MGMT_MAX_LEN 2016
+
+#define MGMT_MAX_PF_BUF_SIZE 2048UL
+#define MGMT_SEG_LEN_MAX 48
+#define MGMT_ASYNC_MSG_FLAG 0x8
+
+#define HINIC3_MGMT_WQ_NAME "hinic3_mgmt"
+
+/* Bogus sequence ID to prevent accidental match following partial message */
+#define MGMT_BOGUS_SEQ_ID \
+ (MGMT_MAX_PF_BUF_SIZE / MGMT_SEG_LEN_MAX + 1)
+
+static void
+hinic3_mgmt_resp_msg_handler(struct hinic3_msg_pf_to_mgmt *pf_to_mgmt,
+ struct hinic3_recv_msg *recv_msg)
+{
+ struct device *dev = pf_to_mgmt->hwdev->dev;
+
+ /* Ignore async msg */
+ if (recv_msg->msg_id & MGMT_ASYNC_MSG_FLAG)
+ return;
+
+ spin_lock(&pf_to_mgmt->sync_event_lock);
+ if (recv_msg->msg_id != pf_to_mgmt->sync_msg_id) {
+ dev_err(dev, "msg id mismatch, send msg id: 0x%x, recv msg id: 0x%x, event state: %d\n",
+ pf_to_mgmt->sync_msg_id, recv_msg->msg_id,
+ pf_to_mgmt->event_flag);
+ } else if (pf_to_mgmt->event_flag == COMM_SEND_EVENT_START) {
+ pf_to_mgmt->event_flag = COMM_SEND_EVENT_SUCCESS;
+ complete(&recv_msg->recv_done);
+ } else {
+ dev_err(dev, "Wait timeout, send msg id: 0x%x, recv msg id: 0x%x, event state: %d\n",
+ pf_to_mgmt->sync_msg_id, recv_msg->msg_id,
+ pf_to_mgmt->event_flag);
+ }
+ spin_unlock(&pf_to_mgmt->sync_event_lock);
+}
+
+static void hinic3_recv_mgmt_msg_work_handler(struct work_struct *work)
+{
+ struct hinic3_msg_pf_to_mgmt *pf_to_mgmt;
+ struct mgmt_msg_handle_work *mgmt_work;
+ struct mgmt_msg_head *ack_cmd;
+
+ mgmt_work = container_of(work, struct mgmt_msg_handle_work, work);
+
+ /* At the moment, we do not expect any meaningful messages but if the
+ * sender expects an ACK we still need to provide one with "unsupported"
+ * status.
+ */
+ if (mgmt_work->async_mgmt_to_pf)
+ goto out;
+
+ pf_to_mgmt = mgmt_work->pf_to_mgmt;
+ ack_cmd = pf_to_mgmt->mgmt_ack_buf;
+ memset(ack_cmd, 0, sizeof(*ack_cmd));
+ ack_cmd->status = MGMT_STATUS_CMD_UNSUPPORTED;
+
+ hinic3_response_mbox_to_mgmt(pf_to_mgmt->hwdev, mgmt_work->mod,
+ mgmt_work->cmd, ack_cmd, sizeof(*ack_cmd),
+ mgmt_work->msg_id);
+
+out:
+ kfree(mgmt_work->msg);
+ kfree(mgmt_work);
+}
+
+static int hinic3_recv_msg_add_seg(struct hinic3_recv_msg *recv_msg,
+ __le64 msg_header, const void *seg_data,
+ bool *is_complete)
+{
+ u8 seq_id, msg_id, seg_len, is_last;
+ char *msg_buff;
+ u32 offset;
+
+ seg_len = MBOX_MSG_HEADER_GET(msg_header, SEG_LEN);
+ is_last = MBOX_MSG_HEADER_GET(msg_header, LAST);
+ seq_id = MBOX_MSG_HEADER_GET(msg_header, SEQID);
+ msg_id = MBOX_MSG_HEADER_GET(msg_header, MSG_ID);
+
+ if (seg_len > MGMT_SEG_LEN_MAX)
+ return -EINVAL;
+
+ /* All segments but last must be of maximal size */
+ if (seg_len != MGMT_SEG_LEN_MAX && !is_last)
+ return -EINVAL;
+
+ if (seq_id == 0) {
+ recv_msg->seq_id = seq_id;
+ recv_msg->msg_id = msg_id;
+ } else if (seq_id != recv_msg->seq_id + 1 ||
+ msg_id != recv_msg->msg_id) {
+ return -EINVAL;
+ }
+
+ offset = seq_id * MGMT_SEG_LEN_MAX;
+ if (offset + seg_len > MGMT_MAX_PF_BUF_SIZE)
+ return -EINVAL;
+
+ msg_buff = recv_msg->msg;
+ memcpy(msg_buff + offset, seg_data, seg_len);
+ recv_msg->msg_len = offset + seg_len;
+ recv_msg->seq_id = seq_id;
+ *is_complete = !!is_last;
+
+ return 0;
+}
+
+static void hinic3_init_mgmt_msg_work(struct hinic3_msg_pf_to_mgmt *pf_to_mgmt,
+ struct hinic3_recv_msg *recv_msg)
+{
+ struct mgmt_msg_handle_work *mgmt_work;
+
+ mgmt_work = kmalloc(sizeof(*mgmt_work), GFP_KERNEL);
+ if (!mgmt_work)
+ return;
+
+ if (recv_msg->msg_len) {
+ mgmt_work->msg = kmemdup(recv_msg->msg, recv_msg->msg_len,
+ GFP_KERNEL);
+ if (!mgmt_work->msg) {
+ kfree(mgmt_work);
+ return;
+ }
+ } else {
+ mgmt_work->msg = NULL;
+ }
+
+ mgmt_work->pf_to_mgmt = pf_to_mgmt;
+ mgmt_work->msg_len = recv_msg->msg_len;
+ mgmt_work->msg_id = recv_msg->msg_id;
+ mgmt_work->mod = recv_msg->mod;
+ mgmt_work->cmd = recv_msg->cmd;
+ mgmt_work->async_mgmt_to_pf = recv_msg->async_mgmt_to_pf;
+
+ INIT_WORK(&mgmt_work->work, hinic3_recv_mgmt_msg_work_handler);
+ queue_work(pf_to_mgmt->workq, &mgmt_work->work);
+}
+
+static void
+hinic3_recv_mgmt_msg_handler(struct hinic3_msg_pf_to_mgmt *pf_to_mgmt,
+ const u8 *header,
+ struct hinic3_recv_msg *recv_msg)
+{
+ struct hinic3_hwdev *hwdev = pf_to_mgmt->hwdev;
+ const void *seg_data;
+ __le64 msg_header;
+ bool is_complete;
+ u8 dir, msg_id;
+ int err;
+
+ msg_header = *(__force __le64 *)header;
+ dir = MBOX_MSG_HEADER_GET(msg_header, DIRECTION);
+ msg_id = MBOX_MSG_HEADER_GET(msg_header, MSG_ID);
+ /* Don't need to get anything from hw when cmd is async */
+ if (dir == MBOX_MSG_RESP && (msg_id & MGMT_ASYNC_MSG_FLAG))
+ return;
+
+ seg_data = header + sizeof(msg_header);
+ err = hinic3_recv_msg_add_seg(recv_msg, msg_header,
+ seg_data, &is_complete);
+ if (err) {
+ dev_err(hwdev->dev, "invalid receive segment\n");
+ /* set seq_id to invalid seq_id */
+ recv_msg->seq_id = MGMT_BOGUS_SEQ_ID;
+
+ return;
+ }
+
+ if (!is_complete)
+ return;
+
+ recv_msg->cmd = MBOX_MSG_HEADER_GET(msg_header, CMD);
+ recv_msg->mod = MBOX_MSG_HEADER_GET(msg_header, MODULE);
+ recv_msg->async_mgmt_to_pf = MBOX_MSG_HEADER_GET(msg_header, NO_ACK);
+ recv_msg->seq_id = MGMT_BOGUS_SEQ_ID;
+
+ if (dir == MBOX_MSG_RESP)
+ hinic3_mgmt_resp_msg_handler(pf_to_mgmt, recv_msg);
+ else
+ hinic3_init_mgmt_msg_work(pf_to_mgmt, recv_msg);
+}
+
+static int alloc_recv_msg(struct hinic3_recv_msg *recv_msg)
+{
+ recv_msg->seq_id = MGMT_BOGUS_SEQ_ID;
+
+ recv_msg->msg = kzalloc(MGMT_MAX_PF_BUF_SIZE, GFP_KERNEL);
+ if (!recv_msg->msg)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void free_recv_msg(struct hinic3_recv_msg *recv_msg)
+{
+ kfree(recv_msg->msg);
+}
+
+static int alloc_msg_buf(struct hinic3_msg_pf_to_mgmt *pf_to_mgmt)
+{
+ struct device *dev = pf_to_mgmt->hwdev->dev;
+ int err;
+
+ err = alloc_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt);
+ if (err) {
+ dev_err(dev, "Failed to allocate recv msg\n");
+ return err;
+ }
+
+ err = alloc_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt);
+ if (err) {
+ dev_err(dev, "Failed to allocate resp recv msg\n");
+ goto err_free_msg_from_mgmt;
+ }
+
+ pf_to_mgmt->mgmt_ack_buf = kzalloc(MGMT_MAX_PF_BUF_SIZE, GFP_KERNEL);
+ if (!pf_to_mgmt->mgmt_ack_buf) {
+ err = -ENOMEM;
+ goto err_free_resp_msg_from_mgmt;
+ }
+
+ return 0;
+
+err_free_resp_msg_from_mgmt:
+ free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt);
+err_free_msg_from_mgmt:
+ free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt);
+
+ return err;
+}
+
+static void free_msg_buf(struct hinic3_msg_pf_to_mgmt *pf_to_mgmt)
+{
+ kfree(pf_to_mgmt->mgmt_ack_buf);
+
+ free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt);
+ free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt);
+}
+
+int hinic3_pf_to_mgmt_init(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_msg_pf_to_mgmt *pf_to_mgmt;
+ int err;
+
+ pf_to_mgmt = kzalloc(sizeof(*pf_to_mgmt), GFP_KERNEL);
+ if (!pf_to_mgmt)
+ return -ENOMEM;
+
+ hwdev->pf_to_mgmt = pf_to_mgmt;
+ pf_to_mgmt->hwdev = hwdev;
+ spin_lock_init(&pf_to_mgmt->sync_event_lock);
+ pf_to_mgmt->workq = create_singlethread_workqueue(HINIC3_MGMT_WQ_NAME);
+ if (!pf_to_mgmt->workq) {
+ dev_err(hwdev->dev, "Failed to initialize MGMT workqueue\n");
+ err = -ENOMEM;
+ goto err_free_pf_to_mgmt;
+ }
+
+ err = alloc_msg_buf(pf_to_mgmt);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to allocate msg buffers\n");
+ goto err_destroy_workqueue;
+ }
+
+ return 0;
+
+err_destroy_workqueue:
+ destroy_workqueue(pf_to_mgmt->workq);
+err_free_pf_to_mgmt:
+ kfree(pf_to_mgmt);
+
+ return err;
+}
+
+void hinic3_pf_to_mgmt_free(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_msg_pf_to_mgmt *pf_to_mgmt = hwdev->pf_to_mgmt;
+
+ /* destroy workqueue before free related pf_to_mgmt resources in case of
+ * illegal resource access
+ */
+ destroy_workqueue(pf_to_mgmt->workq);
+
+ free_msg_buf(pf_to_mgmt);
+ kfree(pf_to_mgmt);
+}
+
void hinic3_flush_mgmt_workq(struct hinic3_hwdev *hwdev)
{
if (hwdev->aeqs)
flush_workqueue(hwdev->aeqs->workq);
+
+ if (HINIC3_IS_PF(hwdev) && hwdev->pf_to_mgmt)
+ flush_workqueue(hwdev->pf_to_mgmt->workq);
}
void hinic3_mgmt_msg_aeqe_handler(struct hinic3_hwdev *hwdev, u8 *header,
u8 size)
{
+ struct hinic3_msg_pf_to_mgmt *pf_to_mgmt;
+ struct hinic3_recv_msg *recv_msg;
+ __le64 msg_header;
+ bool is_send_dir;
+
if (MBOX_MSG_HEADER_GET(*(__force __le64 *)header, SOURCE) ==
- MBOX_MSG_FROM_MBOX)
+ MBOX_MSG_FROM_MBOX) {
hinic3_mbox_func_aeqe_handler(hwdev, header, size);
+
+ return;
+ }
+
+ pf_to_mgmt = hwdev->pf_to_mgmt;
+ msg_header = *(__force __le64 *)header;
+
+ is_send_dir = (MBOX_MSG_HEADER_GET(msg_header, DIRECTION) ==
+ MBOX_MSG_SEND) ? true : false;
+
+ recv_msg = is_send_dir ? &pf_to_mgmt->recv_msg_from_mgmt :
+ &pf_to_mgmt->recv_resp_msg_from_mgmt;
+
+ hinic3_recv_mgmt_msg_handler(pf_to_mgmt, header, recv_msg);
}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h
index bbef3b32a6ec..56f48d5442bc 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h
@@ -6,8 +6,61 @@
#include <linux/types.h>
+#include "hinic3_mbox.h"
+#include "hinic3_hw_intf.h"
+
struct hinic3_hwdev;
+struct hinic3_recv_msg {
+ /* Preallocated buffer of size MAX_PF_MGMT_BUF_SIZE that accumulates
+ * receive message, segment-by-segment.
+ */
+ void *msg;
+ /* Message id for which segments are accumulated. */
+ u8 msg_id;
+ /* Sequence id of last received segment of current message. */
+ u8 seq_id;
+ u16 msg_len;
+ int async_mgmt_to_pf;
+ enum mgmt_mod_type mod;
+ u16 cmd;
+ struct completion recv_done;
+};
+
+enum comm_pf_to_mgmt_event_state {
+ COMM_SEND_EVENT_UNINIT,
+ COMM_SEND_EVENT_START,
+ COMM_SEND_EVENT_SUCCESS,
+ COMM_SEND_EVENT_TIMEOUT,
+};
+
+struct hinic3_msg_pf_to_mgmt {
+ struct hinic3_hwdev *hwdev;
+ struct workqueue_struct *workq;
+ void *mgmt_ack_buf;
+ struct hinic3_recv_msg recv_msg_from_mgmt;
+ struct hinic3_recv_msg recv_resp_msg_from_mgmt;
+ u16 async_msg_id;
+ u16 sync_msg_id;
+ void *async_msg_cb_data[MGMT_MOD_HW_MAX];
+ /* synchronizes message send with message receives via event queue */
+ spinlock_t sync_event_lock;
+ enum comm_pf_to_mgmt_event_state event_flag;
+};
+
+struct mgmt_msg_handle_work {
+ struct work_struct work;
+ struct hinic3_msg_pf_to_mgmt *pf_to_mgmt;
+ void *msg;
+ u16 msg_len;
+ enum mgmt_mod_type mod;
+ u16 cmd;
+ u16 msg_id;
+ int async_mgmt_to_pf;
+};
+
+int hinic3_pf_to_mgmt_init(struct hinic3_hwdev *hwdev);
+void hinic3_pf_to_mgmt_free(struct hinic3_hwdev *hwdev);
void hinic3_flush_mgmt_workq(struct hinic3_hwdev *hwdev);
void hinic3_mgmt_msg_aeqe_handler(struct hinic3_hwdev *hwdev,
u8 *header, u8 size);
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h
index 6cc0345c39e4..c0c87a8c2198 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h
@@ -56,6 +56,31 @@ struct l2nic_cmd_update_mac {
u8 new_mac[ETH_ALEN];
};
+struct l2nic_cmd_vlan_config {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u8 opcode;
+ u8 rsvd1;
+ u16 vlan_id;
+ u16 rsvd2;
+};
+
+struct l2nic_cmd_vlan_offload {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u8 vlan_offload;
+ u8 rsvd1[5];
+};
+
+/* set vlan filter */
+struct l2nic_cmd_set_vlan_filter {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u8 rsvd[2];
+ /* bit0:vlan filter en; bit1:broadcast_filter_en */
+ u32 vlan_filter_ctrl;
+};
+
struct l2nic_cmd_set_ci_attr {
struct mgmt_msg_head msg_head;
u16 func_idx;
@@ -90,6 +115,22 @@ struct l2nic_cmd_set_vport_state {
u8 rsvd2[3];
};
+/* *
+ * Definition of the NIC receiving mode
+ */
+#define L2NIC_RX_MODE_UC 0x01
+#define L2NIC_RX_MODE_MC 0x02
+#define L2NIC_RX_MODE_BC 0x04
+#define L2NIC_RX_MODE_MC_ALL 0x08
+#define L2NIC_RX_MODE_PROMISC 0x10
+
+struct l2nic_rx_mode_config {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u16 rsvd1;
+ u32 rx_mode;
+};
+
struct l2nic_cmd_set_dcb_state {
struct mgmt_msg_head head;
u16 func_id;
@@ -102,6 +143,26 @@ struct l2nic_cmd_set_dcb_state {
u8 rsvd[7];
};
+struct l2nic_cmd_lro_config {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u8 opcode;
+ u8 rsvd1;
+ u8 lro_ipv4_en;
+ u8 lro_ipv6_en;
+ /* unit is 1K */
+ u8 lro_max_pkt_len;
+ u8 resv2[13];
+};
+
+struct l2nic_cmd_lro_timer {
+ struct mgmt_msg_head msg_head;
+ /* 1: set timer value, 0: get timer value */
+ u8 opcode;
+ u8 rsvd[3];
+ u32 timer;
+};
+
#define L2NIC_RSS_TYPE_VALID_MASK BIT(23)
#define L2NIC_RSS_TYPE_TCP_IPV6_EXT_MASK BIT(24)
#define L2NIC_RSS_TYPE_IPV6_EXT_MASK BIT(25)
@@ -160,12 +221,19 @@ enum l2nic_cmd {
/* FUNC CFG */
L2NIC_CMD_SET_FUNC_TBL = 5,
L2NIC_CMD_SET_VPORT_ENABLE = 6,
+ L2NIC_CMD_SET_RX_MODE = 7,
L2NIC_CMD_SET_SQ_CI_ATTR = 8,
L2NIC_CMD_CLEAR_QP_RESOURCE = 11,
+ L2NIC_CMD_CFG_RX_LRO = 13,
+ L2NIC_CMD_CFG_LRO_TIMER = 14,
L2NIC_CMD_FEATURE_NEGO = 15,
+ L2NIC_CMD_GET_MAC = 20,
L2NIC_CMD_SET_MAC = 21,
L2NIC_CMD_DEL_MAC = 22,
L2NIC_CMD_UPDATE_MAC = 23,
+ L2NIC_CMD_CFG_FUNC_VLAN = 25,
+ L2NIC_CMD_SET_VLAN_FILTER_EN = 26,
+ L2NIC_CMD_SET_RX_VLAN_OFFLOAD = 27,
L2NIC_CMD_CFG_RSS = 60,
L2NIC_CMD_CFG_RSS_HASH_KEY = 63,
L2NIC_CMD_CFG_RSS_HASH_ENGINE = 64,
@@ -189,6 +257,7 @@ enum l2nic_ucode_cmd {
/* hilink mac group command */
enum mag_cmd {
+ MAG_CMD_SET_PORT_ENABLE = 6,
MAG_CMD_GET_LINK_STATUS = 7,
};
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c
index bbf22811a029..75adfe897e81 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c
@@ -2,7 +2,9 @@
// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
#include <linux/netdevice.h>
+#include <net/vxlan.h>
#include "hinic3_hwif.h"
#include "hinic3_nic_cfg.h"
@@ -12,6 +14,15 @@
#include "hinic3_rx.h"
#include "hinic3_tx.h"
+#define HINIC3_LRO_DEFAULT_COAL_PKT_SIZE 32
+#define HINIC3_LRO_DEFAULT_TIME_LIMIT 16
+
+#define VLAN_BITMAP_BITS_SIZE(nic_dev) (sizeof(*(nic_dev)->vlan_bitmap) * 8)
+#define VID_LINE(nic_dev, vid) \
+ ((vid) / VLAN_BITMAP_BITS_SIZE(nic_dev))
+#define VID_COL(nic_dev, vid) \
+ ((vid) & (VLAN_BITMAP_BITS_SIZE(nic_dev) - 1))
+
/* try to modify the number of irq to the target number,
* and return the actual number of irq.
*/
@@ -327,6 +338,31 @@ static void hinic3_close_channel(struct net_device *netdev)
hinic3_free_qp_ctxts(nic_dev);
}
+static int hinic3_maybe_set_port_state(struct net_device *netdev, bool enable)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ int err;
+
+ mutex_lock(&nic_dev->port_state_mutex);
+ err = hinic3_set_port_enable(nic_dev->hwdev, enable);
+ mutex_unlock(&nic_dev->port_state_mutex);
+
+ return err;
+}
+
+static void hinic3_print_link_message(struct net_device *netdev,
+ bool link_status_up)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ if (nic_dev->link_status_up == link_status_up)
+ return;
+
+ nic_dev->link_status_up = link_status_up;
+
+ netdev_dbg(netdev, "Link is %s\n", str_up_down(link_status_up));
+}
+
static int hinic3_vport_up(struct net_device *netdev)
{
struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
@@ -341,11 +377,17 @@ static int hinic3_vport_up(struct net_device *netdev)
goto err_flush_qps_res;
}
+ err = hinic3_maybe_set_port_state(netdev, true);
+ if (err) {
+ netdev_err(netdev, "Failed to enable port\n");
+ goto err_disable_vport;
+ }
+
err = netif_set_real_num_queues(netdev, nic_dev->q_params.num_qps,
nic_dev->q_params.num_qps);
if (err) {
netdev_err(netdev, "Failed to set real number of queues\n");
- goto err_flush_qps_res;
+ goto err_disable_vport;
}
netif_tx_start_all_queues(netdev);
@@ -353,8 +395,12 @@ static int hinic3_vport_up(struct net_device *netdev)
if (!err && link_status_up)
netif_carrier_on(netdev);
+ hinic3_print_link_message(netdev, link_status_up);
+
return 0;
+err_disable_vport:
+ hinic3_set_vport_enable(nic_dev->hwdev, glb_func_id, false);
err_flush_qps_res:
hinic3_flush_qps_res(nic_dev->hwdev);
/* wait to guarantee that no packets will be sent to host */
@@ -386,6 +432,11 @@ static int hinic3_open(struct net_device *netdev)
struct hinic3_dyna_qp_params qp_params;
int err;
+ if (test_bit(HINIC3_INTF_UP, &nic_dev->flags)) {
+ netdev_dbg(netdev, "Netdev already open, do nothing\n");
+ return 0;
+ }
+
err = hinic3_init_nicio_res(nic_dev);
if (err) {
netdev_err(netdev, "Failed to init nicio resources\n");
@@ -413,6 +464,8 @@ static int hinic3_open(struct net_device *netdev)
if (err)
goto err_close_channel;
+ set_bit(HINIC3_INTF_UP, &nic_dev->flags);
+
return 0;
err_close_channel:
@@ -433,6 +486,11 @@ static int hinic3_close(struct net_device *netdev)
struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
struct hinic3_dyna_qp_params qp_params;
+ if (!test_and_clear_bit(HINIC3_INTF_UP, &nic_dev->flags)) {
+ netdev_dbg(netdev, "Netdev already close, do nothing\n");
+ return 0;
+ }
+
hinic3_vport_down(netdev);
hinic3_close_channel(netdev);
hinic3_uninit_qps(nic_dev, &qp_params);
@@ -441,6 +499,172 @@ static int hinic3_close(struct net_device *netdev)
return 0;
}
+#define SET_FEATURES_OP_STR(op) ((op) ? "Enable" : "Disable")
+
+static int hinic3_set_feature_rx_csum(struct net_device *netdev,
+ netdev_features_t wanted_features,
+ netdev_features_t features,
+ netdev_features_t *failed_features)
+{
+ netdev_features_t changed = wanted_features ^ features;
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+
+ if (changed & NETIF_F_RXCSUM)
+ dev_dbg(hwdev->dev, "%s rx csum success\n",
+ SET_FEATURES_OP_STR(wanted_features & NETIF_F_RXCSUM));
+
+ return 0;
+}
+
+static int hinic3_set_feature_tso(struct net_device *netdev,
+ netdev_features_t wanted_features,
+ netdev_features_t features,
+ netdev_features_t *failed_features)
+{
+ netdev_features_t changed = wanted_features ^ features;
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+
+ if (changed & NETIF_F_TSO)
+ dev_dbg(hwdev->dev, "%s tso success\n",
+ SET_FEATURES_OP_STR(wanted_features & NETIF_F_TSO));
+
+ return 0;
+}
+
+static int hinic3_set_feature_lro(struct net_device *netdev,
+ netdev_features_t wanted_features,
+ netdev_features_t features,
+ netdev_features_t *failed_features)
+{
+ netdev_features_t changed = wanted_features ^ features;
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ bool en = !!(wanted_features & NETIF_F_LRO);
+ int err;
+
+ if (!(changed & NETIF_F_LRO))
+ return 0;
+
+ err = hinic3_set_rx_lro_state(hwdev, en,
+ HINIC3_LRO_DEFAULT_TIME_LIMIT,
+ HINIC3_LRO_DEFAULT_COAL_PKT_SIZE);
+ if (err) {
+ dev_err(hwdev->dev, "%s lro failed\n", SET_FEATURES_OP_STR(en));
+ *failed_features |= NETIF_F_LRO;
+ }
+
+ return err;
+}
+
+static int hinic3_set_feature_rx_cvlan(struct net_device *netdev,
+ netdev_features_t wanted_features,
+ netdev_features_t features,
+ netdev_features_t *failed_features)
+{
+ bool en = !!(wanted_features & NETIF_F_HW_VLAN_CTAG_RX);
+ netdev_features_t changed = wanted_features ^ features;
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ int err;
+
+ if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
+ return 0;
+
+ err = hinic3_set_rx_vlan_offload(hwdev, en);
+ if (err) {
+ dev_err(hwdev->dev, "%s rx vlan offload failed\n",
+ SET_FEATURES_OP_STR(en));
+ *failed_features |= NETIF_F_HW_VLAN_CTAG_RX;
+ }
+
+ return err;
+}
+
+static int hinic3_set_feature_vlan_filter(struct net_device *netdev,
+ netdev_features_t wanted_features,
+ netdev_features_t features,
+ netdev_features_t *failed_features)
+{
+ bool en = !!(wanted_features & NETIF_F_HW_VLAN_CTAG_FILTER);
+ netdev_features_t changed = wanted_features ^ features;
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ int err;
+
+ if (!(changed & NETIF_F_HW_VLAN_CTAG_FILTER))
+ return 0;
+
+ err = hinic3_set_vlan_filter(hwdev, en);
+ if (err) {
+ dev_err(hwdev->dev, "%s rx vlan filter failed\n",
+ SET_FEATURES_OP_STR(en));
+ *failed_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ }
+
+ return err;
+}
+
+static int hinic3_set_features(struct net_device *netdev,
+ netdev_features_t curr,
+ netdev_features_t wanted)
+{
+ netdev_features_t failed = 0;
+ int err;
+
+ err = hinic3_set_feature_rx_csum(netdev, wanted, curr, &failed) |
+ hinic3_set_feature_tso(netdev, wanted, curr, &failed) |
+ hinic3_set_feature_lro(netdev, wanted, curr, &failed) |
+ hinic3_set_feature_rx_cvlan(netdev, wanted, curr, &failed) |
+ hinic3_set_feature_vlan_filter(netdev, wanted, curr, &failed);
+ if (err) {
+ netdev->features = wanted ^ failed;
+ return err;
+ }
+
+ return 0;
+}
+
+static int hinic3_ndo_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ return hinic3_set_features(netdev, netdev->features, features);
+}
+
+static netdev_features_t hinic3_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ netdev_features_t features_tmp = features;
+
+ /* If Rx checksum is disabled, then LRO should also be disabled */
+ if (!(features_tmp & NETIF_F_RXCSUM))
+ features_tmp &= ~NETIF_F_LRO;
+
+ return features_tmp;
+}
+
+static netdev_features_t hinic3_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ features = vlan_features_check(skb, features);
+ features = vxlan_features_check(skb, features);
+
+ return features;
+}
+
+int hinic3_set_hw_features(struct net_device *netdev)
+{
+ netdev_features_t wanted, curr;
+
+ wanted = netdev->features;
+ /* fake current features so all wanted are enabled */
+ curr = ~wanted;
+
+ return hinic3_set_features(netdev, curr, wanted);
+}
+
static int hinic3_change_mtu(struct net_device *netdev, int new_mtu)
{
int err;
@@ -482,11 +706,162 @@ static int hinic3_set_mac_addr(struct net_device *netdev, void *addr)
return 0;
}
+static int hinic3_vlan_rx_add_vid(struct net_device *netdev,
+ __be16 proto, u16 vid)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ unsigned long *vlan_bitmap = nic_dev->vlan_bitmap;
+ u32 column, row;
+ u16 func_id;
+ int err;
+
+ column = VID_COL(nic_dev, vid);
+ row = VID_LINE(nic_dev, vid);
+
+ func_id = hinic3_global_func_id(nic_dev->hwdev);
+
+ err = hinic3_add_vlan(nic_dev->hwdev, vid, func_id);
+ if (err) {
+ netdev_err(netdev, "Failed to add vlan %u\n", vid);
+ goto out;
+ }
+
+ set_bit(column, &vlan_bitmap[row]);
+ netdev_dbg(netdev, "Add vlan %u\n", vid);
+
+out:
+ return err;
+}
+
+static int hinic3_vlan_rx_kill_vid(struct net_device *netdev,
+ __be16 proto, u16 vid)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ unsigned long *vlan_bitmap = nic_dev->vlan_bitmap;
+ u32 column, row;
+ u16 func_id;
+ int err;
+
+ column = VID_COL(nic_dev, vid);
+ row = VID_LINE(nic_dev, vid);
+
+ func_id = hinic3_global_func_id(nic_dev->hwdev);
+ err = hinic3_del_vlan(nic_dev->hwdev, vid, func_id);
+ if (err) {
+ netdev_err(netdev, "Failed to delete vlan %u\n", vid);
+ goto out;
+ }
+
+ clear_bit(column, &vlan_bitmap[row]);
+ netdev_dbg(netdev, "Remove vlan %u\n", vid);
+
+out:
+ return err;
+}
+
+static void hinic3_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_io_queue *sq;
+ u16 sw_pi, hw_ci;
+
+ sq = nic_dev->txqs[txqueue].sq;
+ sw_pi = hinic3_get_sq_local_pi(sq);
+ hw_ci = hinic3_get_sq_hw_ci(sq);
+ netdev_dbg(netdev,
+ "txq%u: sw_pi: %u, hw_ci: %u, sw_ci: %u, napi->state: 0x%lx.\n",
+ txqueue, sw_pi, hw_ci, hinic3_get_sq_local_ci(sq),
+ nic_dev->q_params.irq_cfg[txqueue].napi.state);
+
+ if (sw_pi != hw_ci)
+ set_bit(HINIC3_EVENT_WORK_TX_TIMEOUT, &nic_dev->event_flag);
+}
+
+static void hinic3_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ u64 bytes, packets, dropped, errors;
+ struct hinic3_txq_stats *txq_stats;
+ struct hinic3_rxq_stats *rxq_stats;
+ struct hinic3_txq *txq;
+ struct hinic3_rxq *rxq;
+ unsigned int start;
+ int i;
+
+ bytes = 0;
+ packets = 0;
+ dropped = 0;
+ for (i = 0; i < nic_dev->max_qps; i++) {
+ if (!nic_dev->txqs)
+ break;
+
+ txq = &nic_dev->txqs[i];
+ txq_stats = &txq->txq_stats;
+ do {
+ start = u64_stats_fetch_begin(&txq_stats->syncp);
+ bytes += txq_stats->bytes;
+ packets += txq_stats->packets;
+ dropped += txq_stats->dropped;
+ } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
+ }
+ stats->tx_packets = packets;
+ stats->tx_bytes = bytes;
+ stats->tx_dropped = dropped;
+
+ bytes = 0;
+ packets = 0;
+ errors = 0;
+ dropped = 0;
+ for (i = 0; i < nic_dev->max_qps; i++) {
+ if (!nic_dev->rxqs)
+ break;
+
+ rxq = &nic_dev->rxqs[i];
+ rxq_stats = &rxq->rxq_stats;
+ do {
+ start = u64_stats_fetch_begin(&rxq_stats->syncp);
+ bytes += rxq_stats->bytes;
+ packets += rxq_stats->packets;
+ errors += rxq_stats->csum_errors +
+ rxq_stats->other_errors;
+ dropped += rxq_stats->dropped;
+ } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
+ }
+ stats->rx_packets = packets;
+ stats->rx_bytes = bytes;
+ stats->rx_errors = errors;
+ stats->rx_dropped = dropped;
+}
+
+static void hinic3_nic_set_rx_mode(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ if (netdev_uc_count(netdev) != nic_dev->netdev_uc_cnt ||
+ netdev_mc_count(netdev) != nic_dev->netdev_mc_cnt) {
+ set_bit(HINIC3_UPDATE_MAC_FILTER, &nic_dev->flags);
+ nic_dev->netdev_uc_cnt = netdev_uc_count(netdev);
+ nic_dev->netdev_mc_cnt = netdev_mc_count(netdev);
+ }
+
+ queue_work(nic_dev->workq, &nic_dev->rx_mode_work);
+}
+
static const struct net_device_ops hinic3_netdev_ops = {
.ndo_open = hinic3_open,
.ndo_stop = hinic3_close,
+ .ndo_set_features = hinic3_ndo_set_features,
+ .ndo_fix_features = hinic3_fix_features,
+ .ndo_features_check = hinic3_features_check,
.ndo_change_mtu = hinic3_change_mtu,
.ndo_set_mac_address = hinic3_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_vlan_rx_add_vid = hinic3_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = hinic3_vlan_rx_kill_vid,
+ .ndo_tx_timeout = hinic3_tx_timeout,
+ .ndo_get_stats64 = hinic3_get_stats64,
+ .ndo_set_rx_mode = hinic3_nic_set_rx_mode,
.ndo_start_xmit = hinic3_xmit_frame,
};
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c
index 979f47ca77f9..44abccf9cb29 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c
@@ -10,6 +10,9 @@
#include "hinic3_nic_dev.h"
#include "hinic3_nic_io.h"
+#define MGMT_MSG_CMD_OP_ADD 1
+#define MGMT_MSG_CMD_OP_DEL 0
+
static int hinic3_feature_nego(struct hinic3_hwdev *hwdev, u8 opcode,
u64 *s_feature, u16 size)
{
@@ -20,7 +23,8 @@ static int hinic3_feature_nego(struct hinic3_hwdev *hwdev, u8 opcode,
feature_nego.func_id = hinic3_global_func_id(hwdev);
feature_nego.opcode = opcode;
if (opcode == MGMT_MSG_CMD_OP_SET)
- memcpy(feature_nego.s_feature, s_feature, size * sizeof(u64));
+ memcpy(feature_nego.s_feature, s_feature,
+ array_size(size, sizeof(u64)));
mgmt_msg_params_init_default(&msg_params, &feature_nego,
sizeof(feature_nego));
@@ -34,7 +38,8 @@ static int hinic3_feature_nego(struct hinic3_hwdev *hwdev, u8 opcode,
}
if (opcode == MGMT_MSG_CMD_OP_GET)
- memcpy(s_feature, feature_nego.s_feature, size * sizeof(u64));
+ memcpy(s_feature, feature_nego.s_feature,
+ array_size(size, sizeof(u64)));
return 0;
}
@@ -57,6 +62,136 @@ bool hinic3_test_support(struct hinic3_nic_dev *nic_dev,
return (nic_dev->nic_io->feature_cap & feature_bits) == feature_bits;
}
+static int hinic3_set_rx_lro(struct hinic3_hwdev *hwdev, u8 ipv4_en, u8 ipv6_en,
+ u8 lro_max_pkt_len)
+{
+ struct l2nic_cmd_lro_config lro_cfg = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ lro_cfg.func_id = hinic3_global_func_id(hwdev);
+ lro_cfg.opcode = MGMT_MSG_CMD_OP_SET;
+ lro_cfg.lro_ipv4_en = ipv4_en;
+ lro_cfg.lro_ipv6_en = ipv6_en;
+ lro_cfg.lro_max_pkt_len = lro_max_pkt_len;
+
+ mgmt_msg_params_init_default(&msg_params, &lro_cfg,
+ sizeof(lro_cfg));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_CFG_RX_LRO,
+ &msg_params);
+
+ if (err || lro_cfg.msg_head.status) {
+ dev_err(hwdev->dev, "Failed to set lro offload, err: %d, status: 0x%x\n",
+ err, lro_cfg.msg_head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int hinic3_set_rx_lro_timer(struct hinic3_hwdev *hwdev, u32 timer_value)
+{
+ struct l2nic_cmd_lro_timer lro_timer = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ lro_timer.opcode = MGMT_MSG_CMD_OP_SET;
+ lro_timer.timer = timer_value;
+
+ mgmt_msg_params_init_default(&msg_params, &lro_timer,
+ sizeof(lro_timer));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_CFG_LRO_TIMER,
+ &msg_params);
+
+ if (err || lro_timer.msg_head.status) {
+ dev_err(hwdev->dev, "Failed to set lro timer, err: %d, status: 0x%x\n",
+ err, lro_timer.msg_head.status);
+
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic3_set_rx_lro_state(struct hinic3_hwdev *hwdev, u8 lro_en,
+ u32 lro_timer, u8 lro_max_pkt_len)
+{
+ u8 ipv4_en, ipv6_en;
+ int err;
+
+ ipv4_en = lro_en ? 1 : 0;
+ ipv6_en = lro_en ? 1 : 0;
+
+ dev_dbg(hwdev->dev, "Set LRO max coalesce packet size to %uK\n",
+ lro_max_pkt_len);
+
+ err = hinic3_set_rx_lro(hwdev, ipv4_en, ipv6_en, lro_max_pkt_len);
+ if (err)
+ return err;
+
+ /* we don't set LRO timer for VF */
+ if (HINIC3_IS_VF(hwdev))
+ return 0;
+
+ dev_dbg(hwdev->dev, "Set LRO timer to %u\n", lro_timer);
+
+ return hinic3_set_rx_lro_timer(hwdev, lro_timer);
+}
+
+int hinic3_set_rx_vlan_offload(struct hinic3_hwdev *hwdev, u8 en)
+{
+ struct l2nic_cmd_vlan_offload vlan_cfg = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ vlan_cfg.func_id = hinic3_global_func_id(hwdev);
+ vlan_cfg.vlan_offload = en;
+
+ mgmt_msg_params_init_default(&msg_params, &vlan_cfg,
+ sizeof(vlan_cfg));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_SET_RX_VLAN_OFFLOAD,
+ &msg_params);
+
+ if (err || vlan_cfg.msg_head.status) {
+ dev_err(hwdev->dev, "Failed to set rx vlan offload, err: %d, status: 0x%x\n",
+ err, vlan_cfg.msg_head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic3_set_vlan_filter(struct hinic3_hwdev *hwdev, u32 vlan_filter_ctrl)
+{
+ struct l2nic_cmd_set_vlan_filter vlan_filter = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ vlan_filter.func_id = hinic3_global_func_id(hwdev);
+ vlan_filter.vlan_filter_ctrl = vlan_filter_ctrl;
+
+ mgmt_msg_params_init_default(&msg_params, &vlan_filter,
+ sizeof(vlan_filter));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_SET_VLAN_FILTER_EN,
+ &msg_params);
+
+ if (err || vlan_filter.msg_head.status) {
+ dev_err(hwdev->dev, "Failed to set vlan filter, err: %d, status: 0x%x\n",
+ err, vlan_filter.msg_head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
void hinic3_update_nic_feature(struct hinic3_nic_dev *nic_dev, u64 feature_cap)
{
nic_dev->nic_io->feature_cap = feature_cap;
@@ -117,17 +252,52 @@ int hinic3_set_port_mtu(struct net_device *netdev, u16 new_mtu)
&func_tbl_cfg);
}
+static bool hinic3_check_vf_set_by_pf(struct hinic3_hwdev *hwdev,
+ u8 status)
+{
+ return HINIC3_IS_VF(hwdev) && status == HINIC3_PF_SET_VF_ALREADY;
+}
+
static int hinic3_check_mac_info(struct hinic3_hwdev *hwdev, u8 status,
u16 vlan_id)
{
if ((status && status != MGMT_STATUS_EXIST) ||
((vlan_id & BIT(15)) && status == MGMT_STATUS_EXIST)) {
+ if (hinic3_check_vf_set_by_pf(hwdev, status))
+ return 0;
+
return -EINVAL;
}
return 0;
}
+int hinic3_get_default_mac(struct hinic3_hwdev *hwdev, u8 *mac_addr)
+{
+ struct l2nic_cmd_set_mac mac_info = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ mac_info.func_id = hinic3_global_func_id(hwdev);
+
+ mgmt_msg_params_init_default(&msg_params, &mac_info, sizeof(mac_info));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_GET_MAC,
+ &msg_params);
+
+ if (err || mac_info.msg_head.status) {
+ dev_err(hwdev->dev,
+ "Failed to get mac, err: %d, status: 0x%x\n",
+ err, mac_info.msg_head.status);
+ return -EFAULT;
+ }
+
+ ether_addr_copy(mac_addr, mac_info.mac);
+
+ return 0;
+}
+
int hinic3_set_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id,
u16 func_id)
{
@@ -157,9 +327,9 @@ int hinic3_set_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id,
return -EIO;
}
- if (mac_info.msg_head.status == MGMT_STATUS_PF_SET_VF_ALREADY) {
+ if (hinic3_check_vf_set_by_pf(hwdev, mac_info.msg_head.status)) {
dev_warn(hwdev->dev, "PF has already set VF mac, Ignore set operation\n");
- return 0;
+ return -EADDRINUSE;
}
if (mac_info.msg_head.status == MGMT_STATUS_EXIST) {
@@ -191,11 +361,18 @@ int hinic3_del_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id,
err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
L2NIC_CMD_DEL_MAC, &msg_params);
- if (err) {
+ if (err ||
+ (mac_info.msg_head.status &&
+ !hinic3_check_vf_set_by_pf(hwdev, mac_info.msg_head.status))) {
dev_err(hwdev->dev,
"Failed to delete MAC, err: %d, status: 0x%x\n",
err, mac_info.msg_head.status);
- return err;
+ return -EFAULT;
+ }
+
+ if (hinic3_check_vf_set_by_pf(hwdev, mac_info.msg_head.status)) {
+ dev_warn(hwdev->dev, "PF has already set VF mac, Ignore delete operation.\n");
+ return -EADDRINUSE;
}
return 0;
@@ -231,6 +408,17 @@ int hinic3_update_mac(struct hinic3_hwdev *hwdev, const u8 *old_mac,
return -EIO;
}
+ if (hinic3_check_vf_set_by_pf(hwdev, mac_info.msg_head.status)) {
+ dev_warn(hwdev->dev, "PF has already set VF MAC. Ignore update operation\n");
+ return -EADDRINUSE;
+ }
+
+ if (mac_info.msg_head.status == HINIC3_MGMT_STATUS_EXIST) {
+ dev_warn(hwdev->dev,
+ "MAC is repeated. Ignore update operation\n");
+ return 0;
+ }
+
return 0;
}
@@ -313,6 +501,96 @@ int hinic3_force_drop_tx_pkt(struct hinic3_hwdev *hwdev)
return pkt_drop.msg_head.status;
}
+int hinic3_set_rx_mode(struct hinic3_hwdev *hwdev, u32 rx_mode)
+{
+ struct l2nic_rx_mode_config rx_mode_cfg = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ rx_mode_cfg.func_id = hinic3_global_func_id(hwdev);
+ rx_mode_cfg.rx_mode = rx_mode;
+
+ mgmt_msg_params_init_default(&msg_params, &rx_mode_cfg,
+ sizeof(rx_mode_cfg));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_SET_RX_MODE, &msg_params);
+
+ if (err || rx_mode_cfg.msg_head.status) {
+ dev_err(hwdev->dev, "Failed to set rx mode, err: %d, status: 0x%x\n",
+ err, rx_mode_cfg.msg_head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int hinic3_config_vlan(struct hinic3_hwdev *hwdev,
+ u8 opcode, u16 vlan_id, u16 func_id)
+{
+ struct l2nic_cmd_vlan_config vlan_info = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ vlan_info.opcode = opcode;
+ vlan_info.func_id = func_id;
+ vlan_info.vlan_id = vlan_id;
+
+ mgmt_msg_params_init_default(&msg_params, &vlan_info,
+ sizeof(vlan_info));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_CFG_FUNC_VLAN, &msg_params);
+
+ if (err || vlan_info.msg_head.status) {
+ dev_err(hwdev->dev,
+ "Failed to %s vlan, err: %d, status: 0x%x\n",
+ opcode == MGMT_MSG_CMD_OP_ADD ? "add" : "delete",
+ err, vlan_info.msg_head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic3_add_vlan(struct hinic3_hwdev *hwdev, u16 vlan_id, u16 func_id)
+{
+ return hinic3_config_vlan(hwdev, MGMT_MSG_CMD_OP_ADD, vlan_id, func_id);
+}
+
+int hinic3_del_vlan(struct hinic3_hwdev *hwdev, u16 vlan_id, u16 func_id)
+{
+ return hinic3_config_vlan(hwdev, MGMT_MSG_CMD_OP_DEL, vlan_id, func_id);
+}
+
+int hinic3_set_port_enable(struct hinic3_hwdev *hwdev, bool enable)
+{
+ struct mag_cmd_set_port_enable en_state = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ if (HINIC3_IS_VF(hwdev))
+ return 0;
+
+ en_state.function_id = hinic3_global_func_id(hwdev);
+ en_state.state = enable ? MAG_CMD_TX_ENABLE | MAG_CMD_RX_ENABLE :
+ MAG_CMD_PORT_DISABLE;
+
+ mgmt_msg_params_init_default(&msg_params, &en_state,
+ sizeof(en_state));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_HILINK,
+ MAG_CMD_SET_PORT_ENABLE, &msg_params);
+
+ if (err || en_state.head.status) {
+ dev_err(hwdev->dev, "Failed to set port state, err: %d, status: 0x%x\n",
+ err, en_state.head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
int hinic3_sync_dcb_state(struct hinic3_hwdev *hwdev, u8 op_code, u8 state)
{
struct l2nic_cmd_set_dcb_state dcb_state = {};
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h
index b83b567fa542..c32eaa886e17 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h
@@ -16,10 +16,13 @@ struct hinic3_nic_dev;
#define HINIC3_MAX_JUMBO_FRAME_SIZE 9600
#define HINIC3_VLAN_ID_MASK 0x7FFF
+#define HINIC3_PF_SET_VF_ALREADY 0x4
+#define HINIC3_MGMT_STATUS_EXIST 0x6
enum hinic3_nic_event_type {
HINIC3_NIC_EVENT_LINK_DOWN = 0,
HINIC3_NIC_EVENT_LINK_UP = 1,
+ HINIC3_NIC_EVENT_PORT_MODULE_EVENT = 2,
};
struct hinic3_sq_attr {
@@ -32,15 +35,55 @@ struct hinic3_sq_attr {
u64 ci_dma_base;
};
+#define MAG_CMD_PORT_DISABLE 0x0
+#define MAG_CMD_TX_ENABLE 0x1
+#define MAG_CMD_RX_ENABLE 0x2
+/* the physical port is disabled only when all pf of the port are set to down,
+ * if any pf is enabled, the port is enabled
+ */
+struct mag_cmd_set_port_enable {
+ struct mgmt_msg_head head;
+
+ u16 function_id;
+ u16 rsvd0;
+
+ /* bitmap bit0:tx_en bit1:rx_en */
+ u8 state;
+ u8 rsvd1[3];
+};
+
+enum link_err_type {
+ LINK_ERR_MODULE_UNRECOGENIZED,
+ LINK_ERR_NUM,
+};
+
+enum port_module_event_type {
+ HINIC3_PORT_MODULE_CABLE_PLUGGED,
+ HINIC3_PORT_MODULE_CABLE_UNPLUGGED,
+ HINIC3_PORT_MODULE_LINK_ERR,
+ HINIC3_PORT_MODULE_MAX_EVENT,
+};
+
+struct hinic3_port_module_event {
+ enum port_module_event_type type;
+ enum link_err_type err_type;
+};
+
int hinic3_get_nic_feature_from_hw(struct hinic3_nic_dev *nic_dev);
int hinic3_set_nic_feature_to_hw(struct hinic3_nic_dev *nic_dev);
bool hinic3_test_support(struct hinic3_nic_dev *nic_dev,
enum hinic3_nic_feature_cap feature_bits);
void hinic3_update_nic_feature(struct hinic3_nic_dev *nic_dev, u64 feature_cap);
+int hinic3_set_rx_lro_state(struct hinic3_hwdev *hwdev, u8 lro_en,
+ u32 lro_timer, u8 lro_max_pkt_len);
+int hinic3_set_rx_vlan_offload(struct hinic3_hwdev *hwdev, u8 en);
+int hinic3_set_vlan_filter(struct hinic3_hwdev *hwdev, u32 vlan_filter_ctrl);
+
int hinic3_init_function_table(struct hinic3_nic_dev *nic_dev);
int hinic3_set_port_mtu(struct net_device *netdev, u16 new_mtu);
+int hinic3_get_default_mac(struct hinic3_hwdev *hwdev, u8 *mac_addr);
int hinic3_set_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id,
u16 func_id);
int hinic3_del_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id,
@@ -52,10 +95,14 @@ int hinic3_set_ci_table(struct hinic3_hwdev *hwdev,
struct hinic3_sq_attr *attr);
int hinic3_flush_qps_res(struct hinic3_hwdev *hwdev);
int hinic3_force_drop_tx_pkt(struct hinic3_hwdev *hwdev);
+int hinic3_set_rx_mode(struct hinic3_hwdev *hwdev, u32 rx_mode);
int hinic3_sync_dcb_state(struct hinic3_hwdev *hwdev, u8 op_code, u8 state);
+int hinic3_set_port_enable(struct hinic3_hwdev *hwdev, bool enable);
int hinic3_get_link_status(struct hinic3_hwdev *hwdev, bool *link_status_up);
int hinic3_set_vport_enable(struct hinic3_hwdev *hwdev, u16 func_id,
bool enable);
+int hinic3_add_vlan(struct hinic3_hwdev *hwdev, u16 vlan_id, u16 func_id);
+int hinic3_del_vlan(struct hinic3_hwdev *hwdev, u16 vlan_id, u16 func_id);
#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h
index 5ba83261616c..29189241f446 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h
@@ -4,13 +4,47 @@
#ifndef _HINIC3_NIC_DEV_H_
#define _HINIC3_NIC_DEV_H_
+#include <linux/if_vlan.h>
#include <linux/netdevice.h>
#include "hinic3_hw_cfg.h"
+#include "hinic3_hwdev.h"
#include "hinic3_mgmt_interface.h"
+#define HINIC3_VLAN_BITMAP_BYTE_SIZE(nic_dev) (sizeof(*(nic_dev)->vlan_bitmap))
+#define HINIC3_VLAN_BITMAP_SIZE(nic_dev) \
+ (VLAN_N_VID / HINIC3_VLAN_BITMAP_BYTE_SIZE(nic_dev))
+
enum hinic3_flags {
+ HINIC3_INTF_UP,
+ HINIC3_MAC_FILTER_CHANGED,
HINIC3_RSS_ENABLE,
+ HINIC3_UPDATE_MAC_FILTER,
+};
+
+enum hinic3_event_work_flags {
+ HINIC3_EVENT_WORK_TX_TIMEOUT,
+};
+
+enum hinic3_rx_mode_state {
+ HINIC3_HW_PROMISC_ON,
+ HINIC3_HW_ALLMULTI_ON,
+ HINIC3_PROMISC_FORCE_ON,
+ HINIC3_ALLMULTI_FORCE_ON,
+};
+
+enum hinic3_mac_filter_state {
+ HINIC3_MAC_WAIT_HW_SYNC,
+ HINIC3_MAC_HW_SYNCING,
+ HINIC3_MAC_HW_SYNCED,
+ HINIC3_MAC_WAIT_HW_UNSYNC,
+ HINIC3_MAC_HW_UNSYNCED,
+};
+
+struct hinic3_mac_filter {
+ struct list_head list;
+ u8 addr[ETH_ALEN];
+ unsigned long state;
};
enum hinic3_rss_hash_type {
@@ -39,6 +73,7 @@ struct hinic3_irq_cfg {
cpumask_t affinity_mask;
struct hinic3_txq *txq;
struct hinic3_rxq *rxq;
+ u16 total_events;
};
struct hinic3_dyna_txrxq_params {
@@ -55,6 +90,9 @@ struct hinic3_intr_coal_info {
u8 pending_limit;
u8 coalesce_timer_cfg;
u8 resend_timer_cfg;
+
+ u8 rx_pending_limit_low;
+ u8 rx_pending_limit_high;
};
struct hinic3_nic_dev {
@@ -66,6 +104,7 @@ struct hinic3_nic_dev {
u16 max_qps;
u16 rx_buf_len;
u32 lro_replenish_thld;
+ unsigned long *vlan_bitmap;
unsigned long flags;
struct hinic3_nic_service_cap nic_svc_cap;
@@ -82,12 +121,31 @@ struct hinic3_nic_dev {
struct msix_entry *qps_msix_entries;
struct hinic3_intr_coal_info *intr_coalesce;
-
+ u32 adaptive_rx_coal;
+
+ struct workqueue_struct *workq;
+ struct delayed_work periodic_work;
+ struct work_struct rx_mode_work;
+ /* lock for enable/disable port */
+ struct mutex port_state_mutex;
+
+ struct list_head uc_filter_list;
+ struct list_head mc_filter_list;
+ unsigned long rx_mod_state;
+ int netdev_uc_cnt;
+ int netdev_mc_cnt;
+
+ /* flag bits defined by hinic3_event_work_flags */
+ unsigned long event_flag;
bool link_status_up;
};
void hinic3_set_netdev_ops(struct net_device *netdev);
+int hinic3_set_hw_features(struct net_device *netdev);
int hinic3_qps_irq_init(struct net_device *netdev);
void hinic3_qps_irq_uninit(struct net_device *netdev);
+void hinic3_set_rx_mode_work(struct work_struct *work);
+void hinic3_clean_mac_list_filter(struct net_device *netdev);
+
#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
index d86cd1ba4605..90887d2bb127 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
@@ -162,6 +162,9 @@ struct hinic3_clean_queue_ctxt {
#define SQ_CTXT_WQ_BLOCK_SET(val, member) \
FIELD_PREP(SQ_CTXT_WQ_BLOCK_##member##_MASK, val)
+/* reuse SQ macro for RQ because the hardware format is identical */
+#define RQ_CTXT_PREF_CI_HI(val) SQ_CTXT_PREF_CI_HI(val)
+
#define RQ_CTXT_PI_IDX_MASK GENMASK(15, 0)
#define RQ_CTXT_CI_IDX_MASK GENMASK(31, 16)
#define RQ_CTXT_CI_PI_SET(val, member) \
@@ -629,7 +632,8 @@ static void hinic3_rq_prepare_ctxt(struct hinic3_io_queue *rq,
RQ_CTXT_PREF_SET(RQ_WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD));
rq_ctxt->pref_ci_owner =
- cpu_to_le32(RQ_CTXT_PREF_SET(SQ_CTXT_PREF_CI_HI(ci_start), CI_HI) |
+ cpu_to_le32(RQ_CTXT_PREF_SET(RQ_CTXT_PREF_CI_HI(ci_start),
+ CI_HI) |
RQ_CTXT_PREF_SET(1, OWNER));
rq_ctxt->pref_wq_pfn_hi_ci =
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
index 16c00c3bb1ed..159c291fa293 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
@@ -33,6 +33,31 @@
HINIC3_RX_IPV6_PKT ? HINIC3_LRO_PKT_HDR_LEN_IPV6 : \
HINIC3_LRO_PKT_HDR_LEN_IPV4)
+static void hinic3_rxq_clean_stats(struct hinic3_rxq_stats *rxq_stats)
+{
+ u64_stats_update_begin(&rxq_stats->syncp);
+ rxq_stats->bytes = 0;
+ rxq_stats->packets = 0;
+ rxq_stats->errors = 0;
+ rxq_stats->csum_errors = 0;
+ rxq_stats->other_errors = 0;
+ rxq_stats->dropped = 0;
+ rxq_stats->rx_buf_empty = 0;
+
+ rxq_stats->alloc_skb_err = 0;
+ rxq_stats->alloc_rx_buf_err = 0;
+ rxq_stats->restore_drop_sge = 0;
+ u64_stats_update_end(&rxq_stats->syncp);
+}
+
+static void hinic3_rxq_stats_init(struct hinic3_rxq *rxq)
+{
+ struct hinic3_rxq_stats *rxq_stats = &rxq->rxq_stats;
+
+ u64_stats_init(&rxq_stats->syncp);
+ hinic3_rxq_clean_stats(rxq_stats);
+}
+
int hinic3_alloc_rxqs(struct net_device *netdev)
{
struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
@@ -54,6 +79,8 @@ int hinic3_alloc_rxqs(struct net_device *netdev)
rxq->buf_len_shift = ilog2(nic_dev->rx_buf_len);
rxq->q_depth = nic_dev->q_params.rq_depth;
rxq->q_mask = nic_dev->q_params.rq_depth - 1;
+
+ hinic3_rxq_stats_init(rxq);
}
return 0;
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h
index 44ae841a3648..31622e0a63d0 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h
@@ -5,6 +5,7 @@
#define _HINIC3_RX_H_
#include <linux/bitfield.h>
+#include <linux/dim.h>
#include <linux/netdevice.h>
#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_MASK GENMASK(4, 0)
@@ -25,6 +26,20 @@
#define RQ_CQE_STATUS_GET(val, member) \
FIELD_GET(RQ_CQE_STATUS_##member##_MASK, val)
+struct hinic3_rxq_stats {
+ u64 packets;
+ u64 bytes;
+ u64 errors;
+ u64 csum_errors;
+ u64 other_errors;
+ u64 dropped;
+ u64 rx_buf_empty;
+ u64 alloc_skb_err;
+ u64 alloc_rx_buf_err;
+ u64 restore_drop_sge;
+ struct u64_stats_sync syncp;
+};
+
/* RX Completion information that is provided by HW for a specific RX WQE */
struct hinic3_rq_cqe {
__le32 status;
@@ -59,6 +74,7 @@ struct hinic3_rxq {
u16 buf_len;
u32 buf_len_shift;
+ struct hinic3_rxq_stats rxq_stats;
u32 cons_idx;
u32 delta;
@@ -80,6 +96,11 @@ struct hinic3_rxq {
struct device *dev; /* device for DMA mapping */
dma_addr_t cqe_start_paddr;
+
+ struct dim dim;
+
+ u8 last_coalesc_timer_cfg;
+ u8 last_pending_limit;
} ____cacheline_aligned;
struct hinic3_dyna_rxq_res {
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
index 92c43c05e3f2..6d3dc930ca97 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
@@ -16,19 +16,38 @@
#define MIN_SKB_LEN 32
+static void hinic3_txq_clean_stats(struct hinic3_txq_stats *txq_stats)
+{
+ u64_stats_update_begin(&txq_stats->syncp);
+ txq_stats->bytes = 0;
+ txq_stats->packets = 0;
+ txq_stats->busy = 0;
+ txq_stats->dropped = 0;
+
+ txq_stats->skb_pad_err = 0;
+ txq_stats->frag_len_overflow = 0;
+ txq_stats->offload_cow_skb_err = 0;
+ txq_stats->map_frag_err = 0;
+ txq_stats->unknown_tunnel_pkt = 0;
+ txq_stats->frag_size_err = 0;
+ u64_stats_update_end(&txq_stats->syncp);
+}
+
+static void hinic3_txq_stats_init(struct hinic3_txq *txq)
+{
+ struct hinic3_txq_stats *txq_stats = &txq->txq_stats;
+
+ u64_stats_init(&txq_stats->syncp);
+ hinic3_txq_clean_stats(txq_stats);
+}
+
int hinic3_alloc_txqs(struct net_device *netdev)
{
struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
- struct hinic3_hwdev *hwdev = nic_dev->hwdev;
u16 q_id, num_txqs = nic_dev->max_qps;
struct pci_dev *pdev = nic_dev->pdev;
struct hinic3_txq *txq;
- if (!num_txqs) {
- dev_err(hwdev->dev, "Cannot allocate zero size txqs\n");
- return -EINVAL;
- }
-
nic_dev->txqs = kcalloc(num_txqs, sizeof(*nic_dev->txqs), GFP_KERNEL);
if (!nic_dev->txqs)
return -ENOMEM;
@@ -40,6 +59,8 @@ int hinic3_alloc_txqs(struct net_device *netdev)
txq->q_depth = nic_dev->q_params.sq_depth;
txq->q_mask = nic_dev->q_params.sq_depth - 1;
txq->dev = &pdev->dev;
+
+ hinic3_txq_stats_init(txq);
}
return 0;
@@ -582,7 +603,6 @@ static netdev_tx_t hinic3_send_one_skb(struct sk_buff *skb,
err_drop_pkt:
dev_kfree_skb_any(skb);
-
err_out:
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h
index 7e1b872ba752..00194f2a1bcc 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h
@@ -100,6 +100,20 @@ struct hinic3_sq_wqe_combo {
u32 task_type;
};
+struct hinic3_txq_stats {
+ u64 packets;
+ u64 bytes;
+ u64 busy;
+ u64 dropped;
+ u64 skb_pad_err;
+ u64 frag_len_overflow;
+ u64 offload_cow_skb_err;
+ u64 map_frag_err;
+ u64 unknown_tunnel_pkt;
+ u64 frag_size_err;
+ struct u64_stats_sync syncp;
+};
+
struct hinic3_dma_info {
dma_addr_t dma;
u32 len;
@@ -123,6 +137,8 @@ struct hinic3_txq {
struct hinic3_tx_info *tx_info;
struct hinic3_io_queue *sq;
+
+ struct hinic3_txq_stats txq_stats;
} ____cacheline_aligned;
struct hinic3_dyna_txq_res {
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 00f75d87c73f..def7efa15447 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -957,9 +957,6 @@ u16 ice_get_avail_rxq_count(struct ice_pf *pf);
int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked);
void ice_update_vsi_stats(struct ice_vsi *vsi);
void ice_update_pf_stats(struct ice_pf *pf);
-void
-ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
- struct ice_q_stats stats, u64 *pkts, u64 *bytes);
int ice_up(struct ice_vsi *vsi);
int ice_down(struct ice_vsi *vsi);
int ice_down_up(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index eadb1e3d12b3..afbff8aa9ceb 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -1414,8 +1414,8 @@ static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
if (!vsi_stat)
return;
- memset(&vsi_stat->rx_ring_stats[q_idx]->rx_stats, 0,
- sizeof(vsi_stat->rx_ring_stats[q_idx]->rx_stats));
+ memset(&vsi_stat->rx_ring_stats[q_idx]->stats, 0,
+ sizeof(vsi_stat->rx_ring_stats[q_idx]->stats));
memset(&vsi_stat->tx_ring_stats[q_idx]->stats, 0,
sizeof(vsi_stat->tx_ring_stats[q_idx]->stats));
if (vsi->xdp_rings)
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 785bf5cc1b25..64e798b8f18f 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -204,42 +204,6 @@ bool ice_is_generic_mac(struct ice_hw *hw)
}
/**
- * ice_is_pf_c827 - check if pf contains c827 phy
- * @hw: pointer to the hw struct
- *
- * Return: true if the device has c827 phy.
- */
-static bool ice_is_pf_c827(struct ice_hw *hw)
-{
- struct ice_aqc_get_link_topo cmd = {};
- u8 node_part_number;
- u16 node_handle;
- int status;
-
- if (hw->mac_type != ICE_MAC_E810)
- return false;
-
- if (hw->device_id != ICE_DEV_ID_E810C_QSFP)
- return true;
-
- cmd.addr.topo_params.node_type_ctx =
- FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) |
- FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT);
- cmd.addr.topo_params.index = 0;
-
- status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number,
- &node_handle);
-
- if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827)
- return false;
-
- if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE)
- return true;
-
- return false;
-}
-
-/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
*
@@ -958,30 +922,31 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw)
}
/**
- * ice_wait_for_fw - wait for full FW readiness
+ * ice_wait_fw_load - wait for PHY firmware loading to complete
* @hw: pointer to the hardware structure
- * @timeout: milliseconds that can elapse before timing out
+ * @timeout: milliseconds that can elapse before timing out, 0 to bypass waiting
*
- * Return: 0 on success, -ETIMEDOUT on timeout.
+ * Return:
+ * * 0 on success
+ * * negative on timeout
*/
-static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
+static int ice_wait_fw_load(struct ice_hw *hw, u32 timeout)
{
- int fw_loading;
- u32 elapsed = 0;
+ int fw_loading_reg;
- while (elapsed <= timeout) {
- fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M;
+ if (!timeout)
+ return 0;
- /* firmware was not yet loaded, we have to wait more */
- if (fw_loading) {
- elapsed += 100;
- msleep(100);
- continue;
- }
+ fw_loading_reg = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M;
+ /* notify the user only once if PHY FW is still loading */
+ if (fw_loading_reg)
+ dev_info(ice_hw_to_dev(hw), "Link initialization is blocked by PHY FW initialization. Link initialization will continue after PHY FW initialization completes.\n");
+ else
return 0;
- }
- return -ETIMEDOUT;
+ return rd32_poll_timeout(hw, GL_MNG_FWSM, fw_loading_reg,
+ !(fw_loading_reg & GL_MNG_FWSM_FW_LOADING_M),
+ 10000, timeout * 1000);
}
static int __fwlog_send_cmd(void *priv, struct libie_aq_desc *desc, void *buf,
@@ -1171,12 +1136,10 @@ int ice_init_hw(struct ice_hw *hw)
* due to necessity of loading FW from an external source.
* This can take even half a minute.
*/
- if (ice_is_pf_c827(hw)) {
- status = ice_wait_for_fw(hw, 30000);
- if (status) {
- dev_err(ice_hw_to_dev(hw), "ice_wait_for_fw timed out");
- goto err_unroll_fltr_mgmt_struct;
- }
+ status = ice_wait_fw_load(hw, 30000);
+ if (status) {
+ dev_err(ice_hw_to_dev(hw), "ice_wait_fw_load timed out");
+ goto err_unroll_fltr_mgmt_struct;
}
hw->lane_num = ice_get_phy_lane_number(hw);
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c
index 53b54e395a2e..baf02512d041 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.c
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.c
@@ -5,6 +5,7 @@
#include "ice_lib.h"
#include "ice_trace.h"
#include <linux/dpll.h>
+#include <linux/property.h>
#define ICE_CGU_STATE_ACQ_ERR_THRESHOLD 50
#define ICE_DPLL_PIN_IDX_INVALID 0xff
@@ -529,6 +530,94 @@ ice_dpll_pin_disable(struct ice_hw *hw, struct ice_dpll_pin *pin,
}
/**
+ * ice_dpll_pin_store_state - updates the state of pin in SW bookkeeping
+ * @pin: pointer to a pin
+ * @parent: parent pin index
+ * @state: pin state (connected or disconnected)
+ */
+static void
+ice_dpll_pin_store_state(struct ice_dpll_pin *pin, int parent, bool state)
+{
+ pin->state[parent] = state ? DPLL_PIN_STATE_CONNECTED :
+ DPLL_PIN_STATE_DISCONNECTED;
+}
+
+/**
+ * ice_dpll_rclk_update_e825c - updates the state of rclk pin on e825c device
+ * @pf: private board struct
+ * @pin: pointer to a pin
+ *
+ * Update struct holding pin states info, states are separate for each parent
+ *
+ * Context: Called under pf->dplls.lock
+ * Return:
+ * * 0 - OK
+ * * negative - error
+ */
+static int ice_dpll_rclk_update_e825c(struct ice_pf *pf,
+ struct ice_dpll_pin *pin)
+{
+ u8 rclk_bits;
+ int err;
+ u32 reg;
+
+ if (pf->dplls.rclk.num_parents > ICE_SYNCE_CLK_NUM)
+ return -EINVAL;
+
+ err = ice_read_cgu_reg(&pf->hw, ICE_CGU_R10, &reg);
+ if (err)
+ return err;
+
+ rclk_bits = FIELD_GET(ICE_CGU_R10_SYNCE_S_REF_CLK, reg);
+ ice_dpll_pin_store_state(pin, ICE_SYNCE_CLK0, rclk_bits ==
+ (pf->ptp.port.port_num + ICE_CGU_BYPASS_MUX_OFFSET_E825C));
+
+ err = ice_read_cgu_reg(&pf->hw, ICE_CGU_R11, &reg);
+ if (err)
+ return err;
+
+ rclk_bits = FIELD_GET(ICE_CGU_R11_SYNCE_S_BYP_CLK, reg);
+ ice_dpll_pin_store_state(pin, ICE_SYNCE_CLK1, rclk_bits ==
+ (pf->ptp.port.port_num + ICE_CGU_BYPASS_MUX_OFFSET_E825C));
+
+ return 0;
+}
+
+/**
+ * ice_dpll_rclk_update - updates the state of rclk pin on a device
+ * @pf: private board struct
+ * @pin: pointer to a pin
+ * @port_num: port number
+ *
+ * Update struct holding pin states info, states are separate for each parent
+ *
+ * Context: Called under pf->dplls.lock
+ * Return:
+ * * 0 - OK
+ * * negative - error
+ */
+static int ice_dpll_rclk_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
+ u8 port_num)
+{
+ int ret;
+
+ for (u8 parent = 0; parent < pf->dplls.rclk.num_parents; parent++) {
+ u8 p = parent;
+
+ ret = ice_aq_get_phy_rec_clk_out(&pf->hw, &p, &port_num,
+ &pin->flags[parent], NULL);
+ if (ret)
+ return ret;
+
+ ice_dpll_pin_store_state(pin, parent,
+ ICE_AQC_GET_PHY_REC_CLK_OUT_OUT_EN &
+ pin->flags[parent]);
+ }
+
+ return 0;
+}
+
+/**
* ice_dpll_sw_pins_update - update status of all SW pins
* @pf: private board struct
*
@@ -668,22 +757,14 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
}
break;
case ICE_DPLL_PIN_TYPE_RCLK_INPUT:
- for (parent = 0; parent < pf->dplls.rclk.num_parents;
- parent++) {
- u8 p = parent;
-
- ret = ice_aq_get_phy_rec_clk_out(&pf->hw, &p,
- &port_num,
- &pin->flags[parent],
- NULL);
+ if (pf->hw.mac_type == ICE_MAC_GENERIC_3K_E825) {
+ ret = ice_dpll_rclk_update_e825c(pf, pin);
+ if (ret)
+ goto err;
+ } else {
+ ret = ice_dpll_rclk_update(pf, pin, port_num);
if (ret)
goto err;
- if (ICE_AQC_GET_PHY_REC_CLK_OUT_OUT_EN &
- pin->flags[parent])
- pin->state[parent] = DPLL_PIN_STATE_CONNECTED;
- else
- pin->state[parent] =
- DPLL_PIN_STATE_DISCONNECTED;
}
break;
case ICE_DPLL_PIN_TYPE_SOFTWARE:
@@ -1843,6 +1924,40 @@ ice_dpll_phase_offset_get(const struct dpll_pin *pin, void *pin_priv,
}
/**
+ * ice_dpll_synce_update_e825c - setting PHY recovered clock pins on e825c
+ * @hw: Pointer to the HW struct
+ * @ena: true if enable, false in disable
+ * @port_num: port number
+ * @output: output pin, we have two in E825C
+ *
+ * DPLL subsystem callback. Set proper signals to recover clock from port.
+ *
+ * Context: Called under pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int ice_dpll_synce_update_e825c(struct ice_hw *hw, bool ena,
+ u32 port_num, enum ice_synce_clk output)
+{
+ int err;
+
+ /* configure the mux to deliver proper signal to DPLL from the MUX */
+ err = ice_tspll_cfg_bypass_mux_e825c(hw, ena, port_num, output);
+ if (err)
+ return err;
+
+ err = ice_tspll_cfg_synce_ethdiv_e825c(hw, output);
+ if (err)
+ return err;
+
+ dev_dbg(ice_hw_to_dev(hw), "CLK_SYNCE%u recovered clock: pin %s\n",
+ output, str_enabled_disabled(ena));
+
+ return 0;
+}
+
+/**
* ice_dpll_output_esync_set - callback for setting embedded sync
* @pin: pointer to a pin
* @pin_priv: private data pointer passed on pin registration
@@ -2263,6 +2378,28 @@ ice_dpll_sw_input_ref_sync_get(const struct dpll_pin *pin, void *pin_priv,
state, extack);
}
+static int
+ice_dpll_pin_get_parent_num(struct ice_dpll_pin *pin,
+ const struct dpll_pin *parent)
+{
+ int i;
+
+ for (i = 0; i < pin->num_parents; i++)
+ if (pin->pf->dplls.inputs[pin->parent_idx[i]].pin == parent)
+ return i;
+
+ return -ENOENT;
+}
+
+static int
+ice_dpll_pin_get_parent_idx(struct ice_dpll_pin *pin,
+ const struct dpll_pin *parent)
+{
+ int num = ice_dpll_pin_get_parent_num(pin, parent);
+
+ return num < 0 ? num : pin->parent_idx[num];
+}
+
/**
* ice_dpll_rclk_state_on_pin_set - set a state on rclk pin
* @pin: pointer to a pin
@@ -2286,35 +2423,45 @@ ice_dpll_rclk_state_on_pin_set(const struct dpll_pin *pin, void *pin_priv,
enum dpll_pin_state state,
struct netlink_ext_ack *extack)
{
- struct ice_dpll_pin *p = pin_priv, *parent = parent_pin_priv;
bool enable = state == DPLL_PIN_STATE_CONNECTED;
+ struct ice_dpll_pin *p = pin_priv;
struct ice_pf *pf = p->pf;
+ struct ice_hw *hw;
int ret = -EINVAL;
- u32 hw_idx;
+ int hw_idx;
+
+ hw = &pf->hw;
if (ice_dpll_is_reset(pf, extack))
return -EBUSY;
mutex_lock(&pf->dplls.lock);
- hw_idx = parent->idx - pf->dplls.base_rclk_idx;
- if (hw_idx >= pf->dplls.num_inputs)
+ hw_idx = ice_dpll_pin_get_parent_idx(p, parent_pin);
+ if (hw_idx < 0)
goto unlock;
+ hw_idx -= pf->dplls.base_rclk_idx;
if ((enable && p->state[hw_idx] == DPLL_PIN_STATE_CONNECTED) ||
(!enable && p->state[hw_idx] == DPLL_PIN_STATE_DISCONNECTED)) {
NL_SET_ERR_MSG_FMT(extack,
"pin:%u state:%u on parent:%u already set",
- p->idx, state, parent->idx);
+ p->idx, state,
+ ice_dpll_pin_get_parent_num(p, parent_pin));
goto unlock;
}
- ret = ice_aq_set_phy_rec_clk_out(&pf->hw, hw_idx, enable,
- &p->freq);
+
+ ret = hw->mac_type == ICE_MAC_GENERIC_3K_E825 ?
+ ice_dpll_synce_update_e825c(hw, enable,
+ pf->ptp.port.port_num,
+ (enum ice_synce_clk)hw_idx) :
+ ice_aq_set_phy_rec_clk_out(hw, hw_idx, enable, &p->freq);
if (ret)
NL_SET_ERR_MSG_FMT(extack,
"err:%d %s failed to set pin state:%u for pin:%u on parent:%u",
ret,
- libie_aq_str(pf->hw.adminq.sq_last_status),
- state, p->idx, parent->idx);
+ libie_aq_str(hw->adminq.sq_last_status),
+ state, p->idx,
+ ice_dpll_pin_get_parent_num(p, parent_pin));
unlock:
mutex_unlock(&pf->dplls.lock);
@@ -2344,17 +2491,17 @@ ice_dpll_rclk_state_on_pin_get(const struct dpll_pin *pin, void *pin_priv,
enum dpll_pin_state *state,
struct netlink_ext_ack *extack)
{
- struct ice_dpll_pin *p = pin_priv, *parent = parent_pin_priv;
+ struct ice_dpll_pin *p = pin_priv;
struct ice_pf *pf = p->pf;
int ret = -EINVAL;
- u32 hw_idx;
+ int hw_idx;
if (ice_dpll_is_reset(pf, extack))
return -EBUSY;
mutex_lock(&pf->dplls.lock);
- hw_idx = parent->idx - pf->dplls.base_rclk_idx;
- if (hw_idx >= pf->dplls.num_inputs)
+ hw_idx = ice_dpll_pin_get_parent_idx(p, parent_pin);
+ if (hw_idx < 0)
goto unlock;
ret = ice_dpll_pin_state_update(pf, p, ICE_DPLL_PIN_TYPE_RCLK_INPUT,
@@ -2814,7 +2961,8 @@ static void ice_dpll_release_pins(struct ice_dpll_pin *pins, int count)
int i;
for (i = 0; i < count; i++)
- dpll_pin_put(pins[i].pin);
+ if (!IS_ERR_OR_NULL(pins[i].pin))
+ dpll_pin_put(pins[i].pin, &pins[i].tracker);
}
/**
@@ -2836,11 +2984,15 @@ static int
ice_dpll_get_pins(struct ice_pf *pf, struct ice_dpll_pin *pins,
int start_idx, int count, u64 clock_id)
{
+ u32 pin_index;
int i, ret;
for (i = 0; i < count; i++) {
- pins[i].pin = dpll_pin_get(clock_id, i + start_idx, THIS_MODULE,
- &pins[i].prop);
+ pin_index = start_idx;
+ if (start_idx != DPLL_PIN_IDX_UNSPEC)
+ pin_index += i;
+ pins[i].pin = dpll_pin_get(clock_id, pin_index, THIS_MODULE,
+ &pins[i].prop, &pins[i].tracker);
if (IS_ERR(pins[i].pin)) {
ret = PTR_ERR(pins[i].pin);
goto release_pins;
@@ -2851,7 +3003,7 @@ ice_dpll_get_pins(struct ice_pf *pf, struct ice_dpll_pin *pins,
release_pins:
while (--i >= 0)
- dpll_pin_put(pins[i].pin);
+ dpll_pin_put(pins[i].pin, &pins[i].tracker);
return ret;
}
@@ -2944,6 +3096,7 @@ unregister_pins:
/**
* ice_dpll_deinit_direct_pins - deinitialize direct pins
+ * @pf: board private structure
* @cgu: if cgu is present and controlled by this NIC
* @pins: pointer to pins array
* @count: number of pins
@@ -2955,7 +3108,8 @@ unregister_pins:
* Release pins resources to the dpll subsystem.
*/
static void
-ice_dpll_deinit_direct_pins(bool cgu, struct ice_dpll_pin *pins, int count,
+ice_dpll_deinit_direct_pins(struct ice_pf *pf, bool cgu,
+ struct ice_dpll_pin *pins, int count,
const struct dpll_pin_ops *ops,
struct dpll_device *first,
struct dpll_device *second)
@@ -3024,77 +3178,230 @@ static void ice_dpll_deinit_rclk_pin(struct ice_pf *pf)
{
struct ice_dpll_pin *rclk = &pf->dplls.rclk;
struct ice_vsi *vsi = ice_get_main_vsi(pf);
- struct dpll_pin *parent;
+ struct ice_dpll_pin *parent;
int i;
for (i = 0; i < rclk->num_parents; i++) {
- parent = pf->dplls.inputs[rclk->parent_idx[i]].pin;
- if (!parent)
+ parent = &pf->dplls.inputs[rclk->parent_idx[i]];
+ if (IS_ERR_OR_NULL(parent->pin))
continue;
- dpll_pin_on_pin_unregister(parent, rclk->pin,
+ dpll_pin_on_pin_unregister(parent->pin, rclk->pin,
&ice_dpll_rclk_ops, rclk);
}
if (WARN_ON_ONCE(!vsi || !vsi->netdev))
return;
dpll_netdev_pin_clear(vsi->netdev);
- dpll_pin_put(rclk->pin);
+ dpll_pin_put(rclk->pin, &rclk->tracker);
+}
+
+static bool ice_dpll_is_fwnode_pin(struct ice_dpll_pin *pin)
+{
+ return !IS_ERR_OR_NULL(pin->fwnode);
+}
+
+static void ice_dpll_pin_notify_work(struct work_struct *work)
+{
+ struct ice_dpll_pin_work *w = container_of(work,
+ struct ice_dpll_pin_work,
+ work);
+ struct ice_dpll_pin *pin, *parent = w->pin;
+ struct ice_pf *pf = parent->pf;
+ int ret;
+
+ wait_for_completion(&pf->dplls.dpll_init);
+ if (!test_bit(ICE_FLAG_DPLL, pf->flags))
+ goto out; /* DPLL initialization failed */
+
+ switch (w->action) {
+ case DPLL_PIN_CREATED:
+ if (!IS_ERR_OR_NULL(parent->pin)) {
+ /* We have already our pin registered */
+ goto out;
+ }
+
+ /* Grab reference on fwnode pin */
+ parent->pin = fwnode_dpll_pin_find(parent->fwnode,
+ &parent->tracker);
+ if (IS_ERR_OR_NULL(parent->pin)) {
+ dev_err(ice_pf_to_dev(pf),
+ "Cannot get fwnode pin reference\n");
+ goto out;
+ }
+
+ /* Register rclk pin */
+ pin = &pf->dplls.rclk;
+ ret = dpll_pin_on_pin_register(parent->pin, pin->pin,
+ &ice_dpll_rclk_ops, pin);
+ if (ret) {
+ dev_err(ice_pf_to_dev(pf),
+ "Failed to register pin: %pe\n", ERR_PTR(ret));
+ dpll_pin_put(parent->pin, &parent->tracker);
+ parent->pin = NULL;
+ goto out;
+ }
+ break;
+ case DPLL_PIN_DELETED:
+ if (IS_ERR_OR_NULL(parent->pin)) {
+ /* We have already our pin unregistered */
+ goto out;
+ }
+
+ /* Unregister rclk pin */
+ pin = &pf->dplls.rclk;
+ dpll_pin_on_pin_unregister(parent->pin, pin->pin,
+ &ice_dpll_rclk_ops, pin);
+
+ /* Drop fwnode pin reference */
+ dpll_pin_put(parent->pin, &parent->tracker);
+ parent->pin = NULL;
+ break;
+ default:
+ break;
+ }
+out:
+ kfree(w);
+}
+
+static int ice_dpll_pin_notify(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct ice_dpll_pin *pin = container_of(nb, struct ice_dpll_pin, nb);
+ struct dpll_pin_notifier_info *info = data;
+ struct ice_dpll_pin_work *work;
+
+ if (action != DPLL_PIN_CREATED && action != DPLL_PIN_DELETED)
+ return NOTIFY_DONE;
+
+ /* Check if the reported pin is this one */
+ if (pin->fwnode != info->fwnode)
+ return NOTIFY_DONE; /* Not this pin */
+
+ work = kzalloc(sizeof(*work), GFP_KERNEL);
+ if (!work)
+ return NOTIFY_DONE;
+
+ INIT_WORK(&work->work, ice_dpll_pin_notify_work);
+ work->action = action;
+ work->pin = pin;
+
+ queue_work(pin->pf->dplls.wq, &work->work);
+
+ return NOTIFY_OK;
}
/**
- * ice_dpll_init_rclk_pins - initialize recovered clock pin
+ * ice_dpll_init_pin_common - initialize pin
* @pf: board private structure
* @pin: pin to register
* @start_idx: on which index shall allocation start in dpll subsystem
* @ops: callback ops registered with the pins
*
- * Allocate resource for recovered clock pin in dpll subsystem. Register the
- * pin with the parents it has in the info. Register pin with the pf's main vsi
- * netdev.
+ * Allocate resource for given pin in dpll subsystem. Register the pin with
+ * the parents it has in the info.
*
* Return:
* * 0 - success
* * negative - registration failure reason
*/
static int
-ice_dpll_init_rclk_pins(struct ice_pf *pf, struct ice_dpll_pin *pin,
- int start_idx, const struct dpll_pin_ops *ops)
+ice_dpll_init_pin_common(struct ice_pf *pf, struct ice_dpll_pin *pin,
+ int start_idx, const struct dpll_pin_ops *ops)
{
- struct ice_vsi *vsi = ice_get_main_vsi(pf);
- struct dpll_pin *parent;
+ struct ice_dpll_pin *parent;
int ret, i;
- if (WARN_ON((!vsi || !vsi->netdev)))
- return -EINVAL;
- ret = ice_dpll_get_pins(pf, pin, start_idx, ICE_DPLL_RCLK_NUM_PER_PF,
- pf->dplls.clock_id);
+ ret = ice_dpll_get_pins(pf, pin, start_idx, 1, pf->dplls.clock_id);
if (ret)
return ret;
- for (i = 0; i < pf->dplls.rclk.num_parents; i++) {
- parent = pf->dplls.inputs[pf->dplls.rclk.parent_idx[i]].pin;
- if (!parent) {
- ret = -ENODEV;
- goto unregister_pins;
+
+ for (i = 0; i < pin->num_parents; i++) {
+ parent = &pf->dplls.inputs[pin->parent_idx[i]];
+ if (IS_ERR_OR_NULL(parent->pin)) {
+ if (!ice_dpll_is_fwnode_pin(parent)) {
+ ret = -ENODEV;
+ goto unregister_pins;
+ }
+ parent->pin = fwnode_dpll_pin_find(parent->fwnode,
+ &parent->tracker);
+ if (IS_ERR_OR_NULL(parent->pin)) {
+ dev_info(ice_pf_to_dev(pf),
+ "Mux pin not registered yet\n");
+ continue;
+ }
}
- ret = dpll_pin_on_pin_register(parent, pf->dplls.rclk.pin,
- ops, &pf->dplls.rclk);
+ ret = dpll_pin_on_pin_register(parent->pin, pin->pin, ops, pin);
if (ret)
goto unregister_pins;
}
- dpll_netdev_pin_set(vsi->netdev, pf->dplls.rclk.pin);
return 0;
unregister_pins:
while (i) {
- parent = pf->dplls.inputs[pf->dplls.rclk.parent_idx[--i]].pin;
- dpll_pin_on_pin_unregister(parent, pf->dplls.rclk.pin,
- &ice_dpll_rclk_ops, &pf->dplls.rclk);
+ parent = &pf->dplls.inputs[pin->parent_idx[--i]];
+ if (IS_ERR_OR_NULL(parent->pin))
+ continue;
+ dpll_pin_on_pin_unregister(parent->pin, pin->pin, ops, pin);
}
- ice_dpll_release_pins(pin, ICE_DPLL_RCLK_NUM_PER_PF);
+ ice_dpll_release_pins(pin, 1);
+
return ret;
}
/**
+ * ice_dpll_init_rclk_pin - initialize recovered clock pin
+ * @pf: board private structure
+ * @start_idx: on which index shall allocation start in dpll subsystem
+ * @ops: callback ops registered with the pins
+ *
+ * Allocate resource for recovered clock pin in dpll subsystem. Register the
+ * pin with the parents it has in the info.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - registration failure reason
+ */
+static int
+ice_dpll_init_rclk_pin(struct ice_pf *pf, int start_idx,
+ const struct dpll_pin_ops *ops)
+{
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+ int ret;
+
+ ret = ice_dpll_init_pin_common(pf, &pf->dplls.rclk, start_idx, ops);
+ if (ret)
+ return ret;
+
+ dpll_netdev_pin_set(vsi->netdev, pf->dplls.rclk.pin);
+
+ return 0;
+}
+
+static void
+ice_dpll_deinit_fwnode_pin(struct ice_dpll_pin *pin)
+{
+ unregister_dpll_notifier(&pin->nb);
+ flush_workqueue(pin->pf->dplls.wq);
+ if (!IS_ERR_OR_NULL(pin->pin)) {
+ dpll_pin_put(pin->pin, &pin->tracker);
+ pin->pin = NULL;
+ }
+ fwnode_handle_put(pin->fwnode);
+ pin->fwnode = NULL;
+}
+
+static void
+ice_dpll_deinit_fwnode_pins(struct ice_pf *pf, struct ice_dpll_pin *pins,
+ int start_idx)
+{
+ int i;
+
+ for (i = 0; i < pf->dplls.rclk.num_parents; i++)
+ ice_dpll_deinit_fwnode_pin(&pins[start_idx + i]);
+ destroy_workqueue(pf->dplls.wq);
+}
+
+/**
* ice_dpll_deinit_pins - deinitialize direct pins
* @pf: board private structure
* @cgu: if cgu is controlled by this pf
@@ -3113,6 +3420,8 @@ static void ice_dpll_deinit_pins(struct ice_pf *pf, bool cgu)
struct ice_dpll *dp = &d->pps;
ice_dpll_deinit_rclk_pin(pf);
+ if (pf->hw.mac_type == ICE_MAC_GENERIC_3K_E825)
+ ice_dpll_deinit_fwnode_pins(pf, pf->dplls.inputs, 0);
if (cgu) {
ice_dpll_unregister_pins(dp->dpll, inputs, &ice_dpll_input_ops,
num_inputs);
@@ -3127,12 +3436,12 @@ static void ice_dpll_deinit_pins(struct ice_pf *pf, bool cgu)
&ice_dpll_output_ops, num_outputs);
ice_dpll_release_pins(outputs, num_outputs);
if (!pf->dplls.generic) {
- ice_dpll_deinit_direct_pins(cgu, pf->dplls.ufl,
+ ice_dpll_deinit_direct_pins(pf, cgu, pf->dplls.ufl,
ICE_DPLL_PIN_SW_NUM,
&ice_dpll_pin_ufl_ops,
pf->dplls.pps.dpll,
pf->dplls.eec.dpll);
- ice_dpll_deinit_direct_pins(cgu, pf->dplls.sma,
+ ice_dpll_deinit_direct_pins(pf, cgu, pf->dplls.sma,
ICE_DPLL_PIN_SW_NUM,
&ice_dpll_pin_sma_ops,
pf->dplls.pps.dpll,
@@ -3141,6 +3450,141 @@ static void ice_dpll_deinit_pins(struct ice_pf *pf, bool cgu)
}
}
+static struct fwnode_handle *
+ice_dpll_pin_node_get(struct ice_pf *pf, const char *name)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(ice_pf_to_dev(pf));
+ int index;
+
+ index = fwnode_property_match_string(fwnode, "dpll-pin-names", name);
+ if (index < 0)
+ return ERR_PTR(-ENOENT);
+
+ return fwnode_find_reference(fwnode, "dpll-pins", index);
+}
+
+static int
+ice_dpll_init_fwnode_pin(struct ice_dpll_pin *pin, const char *name)
+{
+ struct ice_pf *pf = pin->pf;
+ int ret;
+
+ pin->fwnode = ice_dpll_pin_node_get(pf, name);
+ if (IS_ERR(pin->fwnode)) {
+ dev_err(ice_pf_to_dev(pf),
+ "Failed to find %s firmware node: %pe\n", name,
+ pin->fwnode);
+ pin->fwnode = NULL;
+ return -ENODEV;
+ }
+
+ dev_dbg(ice_pf_to_dev(pf), "Found fwnode node for %s\n", name);
+
+ pin->pin = fwnode_dpll_pin_find(pin->fwnode, &pin->tracker);
+ if (IS_ERR_OR_NULL(pin->pin)) {
+ dev_info(ice_pf_to_dev(pf),
+ "DPLL pin for %pfwp not registered yet\n",
+ pin->fwnode);
+ pin->pin = NULL;
+ }
+
+ pin->nb.notifier_call = ice_dpll_pin_notify;
+ ret = register_dpll_notifier(&pin->nb);
+ if (ret) {
+ dev_err(ice_pf_to_dev(pf),
+ "Failed to subscribe for DPLL notifications\n");
+
+ if (!IS_ERR_OR_NULL(pin->pin)) {
+ dpll_pin_put(pin->pin, &pin->tracker);
+ pin->pin = NULL;
+ }
+ fwnode_handle_put(pin->fwnode);
+ pin->fwnode = NULL;
+
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * ice_dpll_init_fwnode_pins - initialize pins from device tree
+ * @pf: board private structure
+ * @pins: pointer to pins array
+ * @start_idx: starting index for pins
+ * @count: number of pins to initialize
+ *
+ * Initialize input pins for E825 RCLK support. The parent pins (rclk0, rclk1)
+ * are expected to be defined by the system firmware (ACPI). This function
+ * allocates them in the dpll subsystem and stores their indices for later
+ * registration with the rclk pin.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - initialization failure reason
+ */
+static int
+ice_dpll_init_fwnode_pins(struct ice_pf *pf, struct ice_dpll_pin *pins,
+ int start_idx)
+{
+ char pin_name[8];
+ int i, ret;
+
+ pf->dplls.wq = create_singlethread_workqueue("ice_dpll_wq");
+ if (!pf->dplls.wq)
+ return -ENOMEM;
+
+ for (i = 0; i < pf->dplls.rclk.num_parents; i++) {
+ pins[start_idx + i].pf = pf;
+ snprintf(pin_name, sizeof(pin_name), "rclk%u", i);
+ ret = ice_dpll_init_fwnode_pin(&pins[start_idx + i], pin_name);
+ if (ret)
+ goto error;
+ }
+
+ return 0;
+error:
+ while (i--)
+ ice_dpll_deinit_fwnode_pin(&pins[start_idx + i]);
+
+ destroy_workqueue(pf->dplls.wq);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_init_pins_e825 - init pins and register pins with a dplls
+ * @pf: board private structure
+ * @cgu: if cgu is present and controlled by this NIC
+ *
+ * Initialize directly connected pf's pins within pf's dplls in a Linux dpll
+ * subsystem.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - initialization failure reason
+ */
+static int ice_dpll_init_pins_e825(struct ice_pf *pf)
+{
+ int ret;
+
+ ret = ice_dpll_init_fwnode_pins(pf, pf->dplls.inputs, 0);
+ if (ret)
+ return ret;
+
+ ret = ice_dpll_init_rclk_pin(pf, DPLL_PIN_IDX_UNSPEC,
+ &ice_dpll_rclk_ops);
+ if (ret) {
+ /* Inform DPLL notifier works that DPLL init was finished
+ * unsuccessfully (ICE_DPLL_FLAG not set).
+ */
+ complete_all(&pf->dplls.dpll_init);
+ ice_dpll_deinit_fwnode_pins(pf, pf->dplls.inputs, 0);
+ }
+
+ return ret;
+}
+
/**
* ice_dpll_init_pins - init pins and register pins with a dplls
* @pf: board private structure
@@ -3155,21 +3599,24 @@ static void ice_dpll_deinit_pins(struct ice_pf *pf, bool cgu)
*/
static int ice_dpll_init_pins(struct ice_pf *pf, bool cgu)
{
+ const struct dpll_pin_ops *output_ops;
+ const struct dpll_pin_ops *input_ops;
int ret, count;
+ input_ops = &ice_dpll_input_ops;
+ output_ops = &ice_dpll_output_ops;
+
ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.inputs, 0,
- pf->dplls.num_inputs,
- &ice_dpll_input_ops,
- pf->dplls.eec.dpll, pf->dplls.pps.dpll);
+ pf->dplls.num_inputs, input_ops,
+ pf->dplls.eec.dpll,
+ pf->dplls.pps.dpll);
if (ret)
return ret;
count = pf->dplls.num_inputs;
if (cgu) {
ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.outputs,
- count,
- pf->dplls.num_outputs,
- &ice_dpll_output_ops,
- pf->dplls.eec.dpll,
+ count, pf->dplls.num_outputs,
+ output_ops, pf->dplls.eec.dpll,
pf->dplls.pps.dpll);
if (ret)
goto deinit_inputs;
@@ -3205,30 +3652,30 @@ static int ice_dpll_init_pins(struct ice_pf *pf, bool cgu)
} else {
count += pf->dplls.num_outputs + 2 * ICE_DPLL_PIN_SW_NUM;
}
- ret = ice_dpll_init_rclk_pins(pf, &pf->dplls.rclk, count + pf->hw.pf_id,
- &ice_dpll_rclk_ops);
+
+ ret = ice_dpll_init_rclk_pin(pf, count + pf->ptp.port.port_num,
+ &ice_dpll_rclk_ops);
if (ret)
goto deinit_ufl;
return 0;
deinit_ufl:
- ice_dpll_deinit_direct_pins(cgu, pf->dplls.ufl,
- ICE_DPLL_PIN_SW_NUM,
- &ice_dpll_pin_ufl_ops,
- pf->dplls.pps.dpll, pf->dplls.eec.dpll);
+ ice_dpll_deinit_direct_pins(pf, cgu, pf->dplls.ufl, ICE_DPLL_PIN_SW_NUM,
+ &ice_dpll_pin_ufl_ops, pf->dplls.pps.dpll,
+ pf->dplls.eec.dpll);
deinit_sma:
- ice_dpll_deinit_direct_pins(cgu, pf->dplls.sma,
- ICE_DPLL_PIN_SW_NUM,
- &ice_dpll_pin_sma_ops,
- pf->dplls.pps.dpll, pf->dplls.eec.dpll);
+ ice_dpll_deinit_direct_pins(pf, cgu, pf->dplls.sma, ICE_DPLL_PIN_SW_NUM,
+ &ice_dpll_pin_sma_ops, pf->dplls.pps.dpll,
+ pf->dplls.eec.dpll);
deinit_outputs:
- ice_dpll_deinit_direct_pins(cgu, pf->dplls.outputs,
+ ice_dpll_deinit_direct_pins(pf, cgu, pf->dplls.outputs,
pf->dplls.num_outputs,
- &ice_dpll_output_ops, pf->dplls.pps.dpll,
+ output_ops, pf->dplls.pps.dpll,
pf->dplls.eec.dpll);
deinit_inputs:
- ice_dpll_deinit_direct_pins(cgu, pf->dplls.inputs, pf->dplls.num_inputs,
- &ice_dpll_input_ops, pf->dplls.pps.dpll,
+ ice_dpll_deinit_direct_pins(pf, cgu, pf->dplls.inputs,
+ pf->dplls.num_inputs,
+ input_ops, pf->dplls.pps.dpll,
pf->dplls.eec.dpll);
return ret;
}
@@ -3239,15 +3686,15 @@ deinit_inputs:
* @d: pointer to ice_dpll
* @cgu: if cgu is present and controlled by this NIC
*
- * If cgu is owned unregister the dpll from dpll subsystem.
- * Release resources of dpll device from dpll subsystem.
+ * If cgu is owned, unregister the DPLL from DPLL subsystem.
+ * Release resources of DPLL device from DPLL subsystem.
*/
static void
ice_dpll_deinit_dpll(struct ice_pf *pf, struct ice_dpll *d, bool cgu)
{
if (cgu)
dpll_device_unregister(d->dpll, d->ops, d);
- dpll_device_put(d->dpll);
+ dpll_device_put(d->dpll, &d->tracker);
}
/**
@@ -3257,8 +3704,8 @@ ice_dpll_deinit_dpll(struct ice_pf *pf, struct ice_dpll *d, bool cgu)
* @cgu: if cgu is present and controlled by this NIC
* @type: type of dpll being initialized
*
- * Allocate dpll instance for this board in dpll subsystem, if cgu is controlled
- * by this NIC, register dpll with the callback ops.
+ * Allocate DPLL instance for this board in dpll subsystem, if cgu is controlled
+ * by this NIC, register DPLL with the callback ops.
*
* Return:
* * 0 - success
@@ -3271,7 +3718,8 @@ ice_dpll_init_dpll(struct ice_pf *pf, struct ice_dpll *d, bool cgu,
u64 clock_id = pf->dplls.clock_id;
int ret;
- d->dpll = dpll_device_get(clock_id, d->dpll_idx, THIS_MODULE);
+ d->dpll = dpll_device_get(clock_id, d->dpll_idx, THIS_MODULE,
+ &d->tracker);
if (IS_ERR(d->dpll)) {
ret = PTR_ERR(d->dpll);
dev_err(ice_pf_to_dev(pf),
@@ -3287,7 +3735,8 @@ ice_dpll_init_dpll(struct ice_pf *pf, struct ice_dpll *d, bool cgu,
ice_dpll_update_state(pf, d, true);
ret = dpll_device_register(d->dpll, type, ops, d);
if (ret) {
- dpll_device_put(d->dpll);
+ dpll_device_put(d->dpll, &d->tracker);
+ d->dpll = NULL;
return ret;
}
d->ops = ops;
@@ -3506,6 +3955,26 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
}
/**
+ * ice_dpll_init_info_pin_on_pin_e825c - initializes rclk pin information
+ * @pf: board private structure
+ *
+ * Init information for rclk pin, cache them in pf->dplls.rclk.
+ *
+ * Return:
+ * * 0 - success
+ */
+static int ice_dpll_init_info_pin_on_pin_e825c(struct ice_pf *pf)
+{
+ struct ice_dpll_pin *rclk_pin = &pf->dplls.rclk;
+
+ rclk_pin->prop.type = DPLL_PIN_TYPE_SYNCE_ETH_PORT;
+ rclk_pin->prop.capabilities |= DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
+ rclk_pin->pf = pf;
+
+ return 0;
+}
+
+/**
* ice_dpll_init_info_rclk_pin - initializes rclk pin information
* @pf: board private structure
*
@@ -3631,7 +4100,10 @@ ice_dpll_init_pins_info(struct ice_pf *pf, enum ice_dpll_pin_type pin_type)
case ICE_DPLL_PIN_TYPE_OUTPUT:
return ice_dpll_init_info_direct_pins(pf, pin_type);
case ICE_DPLL_PIN_TYPE_RCLK_INPUT:
- return ice_dpll_init_info_rclk_pin(pf);
+ if (pf->hw.mac_type == ICE_MAC_GENERIC_3K_E825)
+ return ice_dpll_init_info_pin_on_pin_e825c(pf);
+ else
+ return ice_dpll_init_info_rclk_pin(pf);
case ICE_DPLL_PIN_TYPE_SOFTWARE:
return ice_dpll_init_info_sw_pins(pf);
default:
@@ -3654,6 +4126,50 @@ static void ice_dpll_deinit_info(struct ice_pf *pf)
}
/**
+ * ice_dpll_init_info_e825c - prepare pf's dpll information structure for e825c
+ * device
+ * @pf: board private structure
+ *
+ * Acquire (from HW) and set basic DPLL information (on pf->dplls struct).
+ *
+ * Return:
+ * * 0 - success
+ * * negative - init failure reason
+ */
+static int ice_dpll_init_info_e825c(struct ice_pf *pf)
+{
+ struct ice_dplls *d = &pf->dplls;
+ int ret = 0;
+ int i;
+
+ d->clock_id = ice_generate_clock_id(pf);
+ d->num_inputs = ICE_SYNCE_CLK_NUM;
+
+ d->inputs = kcalloc(d->num_inputs, sizeof(*d->inputs), GFP_KERNEL);
+ if (!d->inputs)
+ return -ENOMEM;
+
+ ret = ice_get_cgu_rclk_pin_info(&pf->hw, &d->base_rclk_idx,
+ &pf->dplls.rclk.num_parents);
+ if (ret)
+ goto deinit_info;
+
+ for (i = 0; i < pf->dplls.rclk.num_parents; i++)
+ pf->dplls.rclk.parent_idx[i] = d->base_rclk_idx + i;
+
+ ret = ice_dpll_init_pins_info(pf, ICE_DPLL_PIN_TYPE_RCLK_INPUT);
+ if (ret)
+ goto deinit_info;
+ dev_dbg(ice_pf_to_dev(pf),
+ "%s - success, inputs: %u, outputs: %u, rclk-parents: %u\n",
+ __func__, d->num_inputs, d->num_outputs, d->rclk.num_parents);
+ return 0;
+deinit_info:
+ ice_dpll_deinit_info(pf);
+ return ret;
+}
+
+/**
* ice_dpll_init_info - prepare pf's dpll information structure
* @pf: board private structure
* @cgu: if cgu is present and controlled by this NIC
@@ -3772,14 +4288,16 @@ void ice_dpll_deinit(struct ice_pf *pf)
ice_dpll_deinit_worker(pf);
ice_dpll_deinit_pins(pf, cgu);
- ice_dpll_deinit_dpll(pf, &pf->dplls.pps, cgu);
- ice_dpll_deinit_dpll(pf, &pf->dplls.eec, cgu);
+ if (!IS_ERR_OR_NULL(pf->dplls.pps.dpll))
+ ice_dpll_deinit_dpll(pf, &pf->dplls.pps, cgu);
+ if (!IS_ERR_OR_NULL(pf->dplls.eec.dpll))
+ ice_dpll_deinit_dpll(pf, &pf->dplls.eec, cgu);
ice_dpll_deinit_info(pf);
mutex_destroy(&pf->dplls.lock);
}
/**
- * ice_dpll_init - initialize support for dpll subsystem
+ * ice_dpll_init_e825 - initialize support for dpll subsystem
* @pf: board private structure
*
* Set up the device dplls, register them and pins connected within Linux dpll
@@ -3788,7 +4306,43 @@ void ice_dpll_deinit(struct ice_pf *pf)
*
* Context: Initializes pf->dplls.lock mutex.
*/
-void ice_dpll_init(struct ice_pf *pf)
+static void ice_dpll_init_e825(struct ice_pf *pf)
+{
+ struct ice_dplls *d = &pf->dplls;
+ int err;
+
+ mutex_init(&d->lock);
+ init_completion(&d->dpll_init);
+
+ err = ice_dpll_init_info_e825c(pf);
+ if (err)
+ goto err_exit;
+ err = ice_dpll_init_pins_e825(pf);
+ if (err)
+ goto deinit_info;
+ set_bit(ICE_FLAG_DPLL, pf->flags);
+ complete_all(&d->dpll_init);
+
+ return;
+
+deinit_info:
+ ice_dpll_deinit_info(pf);
+err_exit:
+ mutex_destroy(&d->lock);
+ dev_warn(ice_pf_to_dev(pf), "DPLLs init failure err:%d\n", err);
+}
+
+/**
+ * ice_dpll_init_e810 - initialize support for dpll subsystem
+ * @pf: board private structure
+ *
+ * Set up the device dplls, register them and pins connected within Linux dpll
+ * subsystem. Allow userspace to obtain state of DPLL and handling of DPLL
+ * configuration requests.
+ *
+ * Context: Initializes pf->dplls.lock mutex.
+ */
+static void ice_dpll_init_e810(struct ice_pf *pf)
{
bool cgu = ice_is_feature_supported(pf, ICE_F_CGU);
struct ice_dplls *d = &pf->dplls;
@@ -3828,3 +4382,15 @@ err_exit:
mutex_destroy(&d->lock);
dev_warn(ice_pf_to_dev(pf), "DPLLs init failure err:%d\n", err);
}
+
+void ice_dpll_init(struct ice_pf *pf)
+{
+ switch (pf->hw.mac_type) {
+ case ICE_MAC_GENERIC_3K_E825:
+ ice_dpll_init_e825(pf);
+ break;
+ default:
+ ice_dpll_init_e810(pf);
+ break;
+ }
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.h b/drivers/net/ethernet/intel/ice/ice_dpll.h
index c0da03384ce9..ae42cdea0ee1 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.h
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.h
@@ -20,9 +20,16 @@ enum ice_dpll_pin_sw {
ICE_DPLL_PIN_SW_NUM
};
+struct ice_dpll_pin_work {
+ struct work_struct work;
+ unsigned long action;
+ struct ice_dpll_pin *pin;
+};
+
/** ice_dpll_pin - store info about pins
* @pin: dpll pin structure
* @pf: pointer to pf, which has registered the dpll_pin
+ * @tracker: reference count tracker
* @idx: ice pin private idx
* @num_parents: hols number of parent pins
* @parent_idx: hold indexes of parent pins
@@ -37,6 +44,9 @@ enum ice_dpll_pin_sw {
struct ice_dpll_pin {
struct dpll_pin *pin;
struct ice_pf *pf;
+ dpll_tracker tracker;
+ struct fwnode_handle *fwnode;
+ struct notifier_block nb;
u8 idx;
u8 num_parents;
u8 parent_idx[ICE_DPLL_RCLK_NUM_MAX];
@@ -58,6 +68,7 @@ struct ice_dpll_pin {
/** ice_dpll - store info required for DPLL control
* @dpll: pointer to dpll dev
* @pf: pointer to pf, which has registered the dpll_device
+ * @tracker: reference count tracker
* @dpll_idx: index of dpll on the NIC
* @input_idx: currently selected input index
* @prev_input_idx: previously selected input index
@@ -76,6 +87,7 @@ struct ice_dpll_pin {
struct ice_dpll {
struct dpll_device *dpll;
struct ice_pf *pf;
+ dpll_tracker tracker;
u8 dpll_idx;
u8 input_idx;
u8 prev_input_idx;
@@ -114,7 +126,9 @@ struct ice_dpll {
struct ice_dplls {
struct kthread_worker *kworker;
struct kthread_delayed_work work;
+ struct workqueue_struct *wq;
struct mutex lock;
+ struct completion dpll_init;
struct ice_dpll eec;
struct ice_dpll pps;
struct ice_dpll_pin *inputs;
@@ -143,3 +157,19 @@ static inline void ice_dpll_deinit(struct ice_pf *pf) { }
#endif
#endif
+
+#define ICE_CGU_R10 0x28
+#define ICE_CGU_R10_SYNCE_CLKO_SEL GENMASK(8, 5)
+#define ICE_CGU_R10_SYNCE_CLKODIV_M1 GENMASK(13, 9)
+#define ICE_CGU_R10_SYNCE_CLKODIV_LOAD BIT(14)
+#define ICE_CGU_R10_SYNCE_DCK_RST BIT(15)
+#define ICE_CGU_R10_SYNCE_ETHCLKO_SEL GENMASK(18, 16)
+#define ICE_CGU_R10_SYNCE_ETHDIV_M1 GENMASK(23, 19)
+#define ICE_CGU_R10_SYNCE_ETHDIV_LOAD BIT(24)
+#define ICE_CGU_R10_SYNCE_DCK2_RST BIT(25)
+#define ICE_CGU_R10_SYNCE_S_REF_CLK GENMASK(31, 27)
+
+#define ICE_CGU_R11 0x2C
+#define ICE_CGU_R11_SYNCE_S_BYP_CLK GENMASK(6, 1)
+
+#define ICE_CGU_BYPASS_MUX_OFFSET_E825C 3
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 3565a5d96c6d..c6bc29cfb8e6 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -33,8 +33,8 @@ static int ice_q_stats_len(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
- return ((np->vsi->alloc_txq + np->vsi->alloc_rxq) *
- (sizeof(struct ice_q_stats) / sizeof(u64)));
+ /* One packets and one bytes count per queue */
+ return ((np->vsi->alloc_txq + np->vsi->alloc_rxq) * 2);
}
#define ICE_PF_STATS_LEN ARRAY_SIZE(ice_gstrings_pf_stats)
@@ -1942,25 +1942,35 @@ __ice_get_ethtool_stats(struct net_device *netdev,
rcu_read_lock();
ice_for_each_alloc_txq(vsi, j) {
+ u64 pkts, bytes;
+
tx_ring = READ_ONCE(vsi->tx_rings[j]);
- if (tx_ring && tx_ring->ring_stats) {
- data[i++] = tx_ring->ring_stats->stats.pkts;
- data[i++] = tx_ring->ring_stats->stats.bytes;
- } else {
+ if (!tx_ring || !tx_ring->ring_stats) {
data[i++] = 0;
data[i++] = 0;
+ continue;
}
+
+ ice_fetch_tx_ring_stats(tx_ring, &pkts, &bytes);
+
+ data[i++] = pkts;
+ data[i++] = bytes;
}
ice_for_each_alloc_rxq(vsi, j) {
+ u64 pkts, bytes;
+
rx_ring = READ_ONCE(vsi->rx_rings[j]);
- if (rx_ring && rx_ring->ring_stats) {
- data[i++] = rx_ring->ring_stats->stats.pkts;
- data[i++] = rx_ring->ring_stats->stats.bytes;
- } else {
+ if (!rx_ring || !rx_ring->ring_stats) {
data[i++] = 0;
data[i++] = 0;
+ continue;
}
+
+ ice_fetch_rx_ring_stats(rx_ring, &pkts, &bytes);
+
+ data[i++] = pkts;
+ data[i++] = bytes;
}
rcu_read_unlock();
@@ -3378,7 +3388,6 @@ process_link:
*/
rx_rings[i].next_to_use = 0;
rx_rings[i].next_to_clean = 0;
- rx_rings[i].next_to_alloc = 0;
*vsi->rx_rings[i] = rx_rings[i];
}
kfree(rx_rings);
diff --git a/drivers/net/ethernet/intel/ice/ice_irq.c b/drivers/net/ethernet/intel/ice/ice_irq.c
index 30801fd375f0..1d9b2d646474 100644
--- a/drivers/net/ethernet/intel/ice/ice_irq.c
+++ b/drivers/net/ethernet/intel/ice/ice_irq.c
@@ -106,9 +106,10 @@ static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf,
#define ICE_RDMA_AEQ_MSIX 1
static int ice_get_default_msix_amount(struct ice_pf *pf)
{
- return ICE_MIN_LAN_OICR_MSIX + num_online_cpus() +
+ return ICE_MIN_LAN_OICR_MSIX + netif_get_num_default_rss_queues() +
(test_bit(ICE_FLAG_FD_ENA, pf->flags) ? ICE_FDIR_MSIX : 0) +
- (ice_is_rdma_ena(pf) ? num_online_cpus() + ICE_RDMA_AEQ_MSIX : 0);
+ (ice_is_rdma_ena(pf) ? netif_get_num_default_rss_queues() +
+ ICE_RDMA_AEQ_MSIX : 0);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index d47af94f31a9..d921269e1fe7 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -159,12 +159,14 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
static u16 ice_get_rxq_count(struct ice_pf *pf)
{
- return min(ice_get_avail_rxq_count(pf), num_online_cpus());
+ return min(ice_get_avail_rxq_count(pf),
+ netif_get_num_default_rss_queues());
}
static u16 ice_get_txq_count(struct ice_pf *pf)
{
- return min(ice_get_avail_txq_count(pf), num_online_cpus());
+ return min(ice_get_avail_txq_count(pf),
+ netif_get_num_default_rss_queues());
}
/**
@@ -911,13 +913,15 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
if (vsi->type == ICE_VSI_CHNL)
vsi->rss_size = min_t(u16, vsi->num_rxq, max_rss_size);
else
- vsi->rss_size = min_t(u16, num_online_cpus(),
+ vsi->rss_size = min_t(u16,
+ netif_get_num_default_rss_queues(),
max_rss_size);
vsi->rss_lut_type = ICE_LUT_PF;
break;
case ICE_VSI_SF:
vsi->rss_table_size = ICE_LUT_VSI_SIZE;
- vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size);
+ vsi->rss_size = min_t(u16, netif_get_num_default_rss_queues(),
+ max_rss_size);
vsi->rss_lut_type = ICE_LUT_VSI;
break;
case ICE_VSI_VF:
@@ -3431,20 +3435,6 @@ out:
}
/**
- * ice_update_ring_stats - Update ring statistics
- * @stats: stats to be updated
- * @pkts: number of processed packets
- * @bytes: number of processed bytes
- *
- * This function assumes that caller has acquired a u64_stats_sync lock.
- */
-static void ice_update_ring_stats(struct ice_q_stats *stats, u64 pkts, u64 bytes)
-{
- stats->bytes += bytes;
- stats->pkts += pkts;
-}
-
-/**
* ice_update_tx_ring_stats - Update Tx ring specific counters
* @tx_ring: ring to update
* @pkts: number of processed packets
@@ -3453,7 +3443,8 @@ static void ice_update_ring_stats(struct ice_q_stats *stats, u64 pkts, u64 bytes
void ice_update_tx_ring_stats(struct ice_tx_ring *tx_ring, u64 pkts, u64 bytes)
{
u64_stats_update_begin(&tx_ring->ring_stats->syncp);
- ice_update_ring_stats(&tx_ring->ring_stats->stats, pkts, bytes);
+ u64_stats_add(&tx_ring->ring_stats->pkts, pkts);
+ u64_stats_add(&tx_ring->ring_stats->bytes, bytes);
u64_stats_update_end(&tx_ring->ring_stats->syncp);
}
@@ -3466,11 +3457,48 @@ void ice_update_tx_ring_stats(struct ice_tx_ring *tx_ring, u64 pkts, u64 bytes)
void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes)
{
u64_stats_update_begin(&rx_ring->ring_stats->syncp);
- ice_update_ring_stats(&rx_ring->ring_stats->stats, pkts, bytes);
+ u64_stats_add(&rx_ring->ring_stats->pkts, pkts);
+ u64_stats_add(&rx_ring->ring_stats->bytes, bytes);
u64_stats_update_end(&rx_ring->ring_stats->syncp);
}
/**
+ * ice_fetch_tx_ring_stats - Fetch Tx ring packet and byte counters
+ * @ring: ring to update
+ * @pkts: number of processed packets
+ * @bytes: number of processed bytes
+ */
+void ice_fetch_tx_ring_stats(const struct ice_tx_ring *ring,
+ u64 *pkts, u64 *bytes)
+{
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&ring->ring_stats->syncp);
+ *pkts = u64_stats_read(&ring->ring_stats->pkts);
+ *bytes = u64_stats_read(&ring->ring_stats->bytes);
+ } while (u64_stats_fetch_retry(&ring->ring_stats->syncp, start));
+}
+
+/**
+ * ice_fetch_rx_ring_stats - Fetch Rx ring packet and byte counters
+ * @ring: ring to read
+ * @pkts: number of processed packets
+ * @bytes: number of processed bytes
+ */
+void ice_fetch_rx_ring_stats(const struct ice_rx_ring *ring,
+ u64 *pkts, u64 *bytes)
+{
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&ring->ring_stats->syncp);
+ *pkts = u64_stats_read(&ring->ring_stats->pkts);
+ *bytes = u64_stats_read(&ring->ring_stats->bytes);
+ } while (u64_stats_fetch_retry(&ring->ring_stats->syncp, start));
+}
+
+/**
* ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used
* @pi: port info of the switch with default VSI
*
@@ -3961,6 +3989,9 @@ void ice_init_feature_support(struct ice_pf *pf)
break;
}
+ if (pf->hw.mac_type == ICE_MAC_GENERIC_3K_E825)
+ ice_set_feature_support(pf, ICE_F_PHY_RCLK);
+
if (pf->hw.mac_type == ICE_MAC_E830) {
ice_set_feature_support(pf, ICE_F_MBX_LIMIT);
ice_set_feature_support(pf, ICE_F_GCS);
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 2cb1eb98b9da..49454d98dcfe 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -92,6 +92,12 @@ void ice_update_tx_ring_stats(struct ice_tx_ring *ring, u64 pkts, u64 bytes);
void ice_update_rx_ring_stats(struct ice_rx_ring *ring, u64 pkts, u64 bytes);
+void ice_fetch_tx_ring_stats(const struct ice_tx_ring *ring,
+ u64 *pkts, u64 *bytes);
+
+void ice_fetch_rx_ring_stats(const struct ice_rx_ring *ring,
+ u64 *pkts, u64 *bytes);
+
void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl);
void ice_write_itr(struct ice_ring_container *rc, u16 itr);
void ice_set_q_vector_intrl(struct ice_q_vector *q_vector);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index d04605d3e61a..4da37caa3ec9 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -159,8 +159,8 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
* prev_pkt would be negative if there was no
* pending work.
*/
- packets = ring_stats->stats.pkts & INT_MAX;
- if (ring_stats->tx_stats.prev_pkt == packets) {
+ packets = ice_stats_read(ring_stats, pkts) & INT_MAX;
+ if (ring_stats->tx.prev_pkt == packets) {
/* Trigger sw interrupt to revive the queue */
ice_trigger_sw_intr(hw, tx_ring->q_vector);
continue;
@@ -170,7 +170,7 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
* to ice_get_tx_pending()
*/
smp_rmb();
- ring_stats->tx_stats.prev_pkt =
+ ring_stats->tx.prev_pkt =
ice_get_tx_pending(tx_ring) ? packets : -1;
}
}
@@ -6824,58 +6824,132 @@ int ice_up(struct ice_vsi *vsi)
return err;
}
+struct ice_vsi_tx_stats {
+ u64 pkts;
+ u64 bytes;
+ u64 tx_restart_q;
+ u64 tx_busy;
+ u64 tx_linearize;
+};
+
+struct ice_vsi_rx_stats {
+ u64 pkts;
+ u64 bytes;
+ u64 rx_non_eop_descs;
+ u64 rx_page_failed;
+ u64 rx_buf_failed;
+};
+
/**
- * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
- * @syncp: pointer to u64_stats_sync
- * @stats: stats that pkts and bytes count will be taken from
- * @pkts: packets stats counter
- * @bytes: bytes stats counter
+ * ice_fetch_u64_tx_stats - get Tx stats from a ring
+ * @ring: the Tx ring to copy stats from
+ * @copy: temporary storage for the ring statistics
*
- * This function fetches stats from the ring considering the atomic operations
- * that needs to be performed to read u64 values in 32 bit machine.
+ * Fetch the u64 stats from the ring using u64_stats_fetch. This ensures each
+ * stat value is self-consistent, though not necessarily consistent w.r.t
+ * other stats.
*/
-void
-ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
- struct ice_q_stats stats, u64 *pkts, u64 *bytes)
+static void ice_fetch_u64_tx_stats(struct ice_tx_ring *ring,
+ struct ice_vsi_tx_stats *copy)
{
+ struct ice_ring_stats *stats = ring->ring_stats;
unsigned int start;
do {
- start = u64_stats_fetch_begin(syncp);
- *pkts = stats.pkts;
- *bytes = stats.bytes;
- } while (u64_stats_fetch_retry(syncp, start));
+ start = u64_stats_fetch_begin(&stats->syncp);
+ copy->pkts = u64_stats_read(&stats->pkts);
+ copy->bytes = u64_stats_read(&stats->bytes);
+ copy->tx_restart_q = u64_stats_read(&stats->tx_restart_q);
+ copy->tx_busy = u64_stats_read(&stats->tx_busy);
+ copy->tx_linearize = u64_stats_read(&stats->tx_linearize);
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+}
+
+/**
+ * ice_fetch_u64_rx_stats - get Rx stats from a ring
+ * @ring: the Rx ring to copy stats from
+ * @copy: temporary storage for the ring statistics
+ *
+ * Fetch the u64 stats from the ring using u64_stats_fetch. This ensures each
+ * stat value is self-consistent, though not necessarily consistent w.r.t
+ * other stats.
+ */
+static void ice_fetch_u64_rx_stats(struct ice_rx_ring *ring,
+ struct ice_vsi_rx_stats *copy)
+{
+ struct ice_ring_stats *stats = ring->ring_stats;
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ copy->pkts = u64_stats_read(&stats->pkts);
+ copy->bytes = u64_stats_read(&stats->bytes);
+ copy->rx_non_eop_descs =
+ u64_stats_read(&stats->rx_non_eop_descs);
+ copy->rx_page_failed = u64_stats_read(&stats->rx_page_failed);
+ copy->rx_buf_failed = u64_stats_read(&stats->rx_buf_failed);
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
}
/**
* ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
* @vsi: the VSI to be updated
- * @vsi_stats: the stats struct to be updated
+ * @vsi_stats: accumulated stats for this VSI
* @rings: rings to work on
* @count: number of rings
*/
-static void
-ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
- struct rtnl_link_stats64 *vsi_stats,
- struct ice_tx_ring **rings, u16 count)
+static void ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
+ struct ice_vsi_tx_stats *vsi_stats,
+ struct ice_tx_ring **rings, u16 count)
{
+ struct ice_vsi_tx_stats copy = {};
u16 i;
for (i = 0; i < count; i++) {
struct ice_tx_ring *ring;
- u64 pkts = 0, bytes = 0;
ring = READ_ONCE(rings[i]);
if (!ring || !ring->ring_stats)
continue;
- ice_fetch_u64_stats_per_ring(&ring->ring_stats->syncp,
- ring->ring_stats->stats, &pkts,
- &bytes);
- vsi_stats->tx_packets += pkts;
- vsi_stats->tx_bytes += bytes;
- vsi->tx_restart += ring->ring_stats->tx_stats.restart_q;
- vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy;
- vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize;
+
+ ice_fetch_u64_tx_stats(ring, &copy);
+
+ vsi_stats->pkts += copy.pkts;
+ vsi_stats->bytes += copy.bytes;
+ vsi_stats->tx_restart_q += copy.tx_restart_q;
+ vsi_stats->tx_busy += copy.tx_busy;
+ vsi_stats->tx_linearize += copy.tx_linearize;
+ }
+}
+
+/**
+ * ice_update_vsi_rx_ring_stats - Update VSI Rx ring stats counters
+ * @vsi: the VSI to be updated
+ * @vsi_stats: accumulated stats for this VSI
+ * @rings: rings to work on
+ * @count: number of rings
+ */
+static void ice_update_vsi_rx_ring_stats(struct ice_vsi *vsi,
+ struct ice_vsi_rx_stats *vsi_stats,
+ struct ice_rx_ring **rings, u16 count)
+{
+ struct ice_vsi_rx_stats copy = {};
+ u16 i;
+
+ for (i = 0; i < count; i++) {
+ struct ice_rx_ring *ring;
+
+ ring = READ_ONCE(rings[i]);
+ if (!ring || !ring->ring_stats)
+ continue;
+
+ ice_fetch_u64_rx_stats(ring, &copy);
+
+ vsi_stats->pkts += copy.pkts;
+ vsi_stats->bytes += copy.bytes;
+ vsi_stats->rx_non_eop_descs += copy.rx_non_eop_descs;
+ vsi_stats->rx_page_failed += copy.rx_page_failed;
+ vsi_stats->rx_buf_failed += copy.rx_buf_failed;
}
}
@@ -6886,50 +6960,34 @@ ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
{
struct rtnl_link_stats64 *net_stats, *stats_prev;
- struct rtnl_link_stats64 *vsi_stats;
+ struct ice_vsi_tx_stats tx_stats = {};
+ struct ice_vsi_rx_stats rx_stats = {};
struct ice_pf *pf = vsi->back;
- u64 pkts, bytes;
- int i;
-
- vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC);
- if (!vsi_stats)
- return;
-
- /* reset non-netdev (extended) stats */
- vsi->tx_restart = 0;
- vsi->tx_busy = 0;
- vsi->tx_linearize = 0;
- vsi->rx_buf_failed = 0;
- vsi->rx_page_failed = 0;
rcu_read_lock();
/* update Tx rings counters */
- ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings,
+ ice_update_vsi_tx_ring_stats(vsi, &tx_stats, vsi->tx_rings,
vsi->num_txq);
/* update Rx rings counters */
- ice_for_each_rxq(vsi, i) {
- struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
- struct ice_ring_stats *ring_stats;
-
- ring_stats = ring->ring_stats;
- ice_fetch_u64_stats_per_ring(&ring_stats->syncp,
- ring_stats->stats, &pkts,
- &bytes);
- vsi_stats->rx_packets += pkts;
- vsi_stats->rx_bytes += bytes;
- vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed;
- vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed;
- }
+ ice_update_vsi_rx_ring_stats(vsi, &rx_stats, vsi->rx_rings,
+ vsi->num_rxq);
/* update XDP Tx rings counters */
if (ice_is_xdp_ena_vsi(vsi))
- ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings,
+ ice_update_vsi_tx_ring_stats(vsi, &tx_stats, vsi->xdp_rings,
vsi->num_xdp_txq);
rcu_read_unlock();
+ /* Save non-netdev (extended) stats */
+ vsi->tx_restart = tx_stats.tx_restart_q;
+ vsi->tx_busy = tx_stats.tx_busy;
+ vsi->tx_linearize = tx_stats.tx_linearize;
+ vsi->rx_buf_failed = rx_stats.rx_buf_failed;
+ vsi->rx_page_failed = rx_stats.rx_page_failed;
+
net_stats = &vsi->net_stats;
stats_prev = &vsi->net_stats_prev;
@@ -6939,18 +6997,16 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
* let's skip this round.
*/
if (likely(pf->stat_prev_loaded)) {
- net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
- net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
- net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
- net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
+ net_stats->tx_packets += tx_stats.pkts - stats_prev->tx_packets;
+ net_stats->tx_bytes += tx_stats.bytes - stats_prev->tx_bytes;
+ net_stats->rx_packets += rx_stats.pkts - stats_prev->rx_packets;
+ net_stats->rx_bytes += rx_stats.bytes - stats_prev->rx_bytes;
}
- stats_prev->tx_packets = vsi_stats->tx_packets;
- stats_prev->tx_bytes = vsi_stats->tx_bytes;
- stats_prev->rx_packets = vsi_stats->rx_packets;
- stats_prev->rx_bytes = vsi_stats->rx_bytes;
-
- kfree(vsi_stats);
+ stats_prev->tx_packets = tx_stats.pkts;
+ stats_prev->tx_bytes = tx_stats.bytes;
+ stats_prev->rx_packets = rx_stats.pkts;
+ stats_prev->rx_bytes = rx_stats.bytes;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 272683001476..22c3986b910a 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -1296,6 +1296,38 @@ void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
if (pf->hw.reset_ongoing)
return;
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825) {
+ int pin, err;
+
+ if (!test_bit(ICE_FLAG_DPLL, pf->flags))
+ return;
+
+ mutex_lock(&pf->dplls.lock);
+ for (pin = 0; pin < ICE_SYNCE_CLK_NUM; pin++) {
+ enum ice_synce_clk clk_pin;
+ bool active;
+ u8 port_num;
+
+ port_num = ptp_port->port_num;
+ clk_pin = (enum ice_synce_clk)pin;
+ err = ice_tspll_bypass_mux_active_e825c(hw,
+ port_num,
+ &active,
+ clk_pin);
+ if (WARN_ON_ONCE(err)) {
+ mutex_unlock(&pf->dplls.lock);
+ return;
+ }
+
+ err = ice_tspll_cfg_synce_ethdiv_e825c(hw, clk_pin);
+ if (active && WARN_ON_ONCE(err)) {
+ mutex_unlock(&pf->dplls.lock);
+ return;
+ }
+ }
+ mutex_unlock(&pf->dplls.lock);
+ }
+
switch (hw->mac_type) {
case ICE_MAC_E810:
case ICE_MAC_E830:
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index 35680dbe4a7f..61c0a0d93ea8 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -5903,7 +5903,14 @@ int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num)
*base_idx = SI_REF1P;
else
ret = -ENODEV;
-
+ break;
+ case ICE_DEV_ID_E825C_BACKPLANE:
+ case ICE_DEV_ID_E825C_QSFP:
+ case ICE_DEV_ID_E825C_SFP:
+ case ICE_DEV_ID_E825C_SGMII:
+ *pin_num = ICE_SYNCE_CLK_NUM;
+ *base_idx = 0;
+ ret = 0;
break;
default:
ret = -ENODEV;
diff --git a/drivers/net/ethernet/intel/ice/ice_tspll.c b/drivers/net/ethernet/intel/ice/ice_tspll.c
index 66320a4ab86f..fd4b58eb9bc0 100644
--- a/drivers/net/ethernet/intel/ice/ice_tspll.c
+++ b/drivers/net/ethernet/intel/ice/ice_tspll.c
@@ -624,3 +624,220 @@ int ice_tspll_init(struct ice_hw *hw)
return err;
}
+
+/**
+ * ice_tspll_bypass_mux_active_e825c - check if the given port is set active
+ * @hw: Pointer to the HW struct
+ * @port: Number of the port
+ * @active: Output flag showing if port is active
+ * @output: Output pin, we have two in E825C
+ *
+ * Check if given port is selected as recovered clock source for given output.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+int ice_tspll_bypass_mux_active_e825c(struct ice_hw *hw, u8 port, bool *active,
+ enum ice_synce_clk output)
+{
+ u8 active_clk;
+ u32 val;
+ int err;
+
+ switch (output) {
+ case ICE_SYNCE_CLK0:
+ err = ice_read_cgu_reg(hw, ICE_CGU_R10, &val);
+ if (err)
+ return err;
+ active_clk = FIELD_GET(ICE_CGU_R10_SYNCE_S_REF_CLK, val);
+ break;
+ case ICE_SYNCE_CLK1:
+ err = ice_read_cgu_reg(hw, ICE_CGU_R11, &val);
+ if (err)
+ return err;
+ active_clk = FIELD_GET(ICE_CGU_R11_SYNCE_S_BYP_CLK, val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (active_clk == port % hw->ptp.ports_per_phy +
+ ICE_CGU_BYPASS_MUX_OFFSET_E825C)
+ *active = true;
+ else
+ *active = false;
+
+ return 0;
+}
+
+/**
+ * ice_tspll_cfg_bypass_mux_e825c - configure reference clock mux
+ * @hw: Pointer to the HW struct
+ * @ena: true to enable the reference, false if disable
+ * @port_num: Number of the port
+ * @output: Output pin, we have two in E825C
+ *
+ * Set reference clock source and output clock selection.
+ *
+ * Context: Called under pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+int ice_tspll_cfg_bypass_mux_e825c(struct ice_hw *hw, bool ena, u32 port_num,
+ enum ice_synce_clk output)
+{
+ u8 first_mux;
+ int err;
+ u32 r10;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R10, &r10);
+ if (err)
+ return err;
+
+ if (!ena)
+ first_mux = ICE_CGU_NET_REF_CLK0;
+ else
+ first_mux = port_num + ICE_CGU_BYPASS_MUX_OFFSET_E825C;
+
+ r10 &= ~(ICE_CGU_R10_SYNCE_DCK_RST | ICE_CGU_R10_SYNCE_DCK2_RST);
+
+ switch (output) {
+ case ICE_SYNCE_CLK0:
+ r10 &= ~(ICE_CGU_R10_SYNCE_ETHCLKO_SEL |
+ ICE_CGU_R10_SYNCE_ETHDIV_LOAD |
+ ICE_CGU_R10_SYNCE_S_REF_CLK);
+ r10 |= FIELD_PREP(ICE_CGU_R10_SYNCE_S_REF_CLK, first_mux);
+ r10 |= FIELD_PREP(ICE_CGU_R10_SYNCE_ETHCLKO_SEL,
+ ICE_CGU_REF_CLK_BYP0_DIV);
+ break;
+ case ICE_SYNCE_CLK1:
+ {
+ u32 val;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R11, &val);
+ if (err)
+ return err;
+ val &= ~ICE_CGU_R11_SYNCE_S_BYP_CLK;
+ val |= FIELD_PREP(ICE_CGU_R11_SYNCE_S_BYP_CLK, first_mux);
+ err = ice_write_cgu_reg(hw, ICE_CGU_R11, val);
+ if (err)
+ return err;
+ r10 &= ~(ICE_CGU_R10_SYNCE_CLKODIV_LOAD |
+ ICE_CGU_R10_SYNCE_CLKO_SEL);
+ r10 |= FIELD_PREP(ICE_CGU_R10_SYNCE_CLKO_SEL,
+ ICE_CGU_REF_CLK_BYP1_DIV);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R10, r10);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_tspll_get_div_e825c - get the divider for the given speed
+ * @link_speed: link speed of the port
+ * @divider: output value, calculated divider
+ *
+ * Get CGU divider value based on the link speed.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int ice_tspll_get_div_e825c(u16 link_speed, unsigned int *divider)
+{
+ switch (link_speed) {
+ case ICE_AQ_LINK_SPEED_100GB:
+ case ICE_AQ_LINK_SPEED_50GB:
+ case ICE_AQ_LINK_SPEED_25GB:
+ *divider = 10;
+ break;
+ case ICE_AQ_LINK_SPEED_40GB:
+ case ICE_AQ_LINK_SPEED_10GB:
+ *divider = 4;
+ break;
+ case ICE_AQ_LINK_SPEED_5GB:
+ case ICE_AQ_LINK_SPEED_2500MB:
+ case ICE_AQ_LINK_SPEED_1000MB:
+ *divider = 2;
+ break;
+ case ICE_AQ_LINK_SPEED_100MB:
+ *divider = 1;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_tspll_cfg_synce_ethdiv_e825c - set the divider on the mux
+ * @hw: Pointer to the HW struct
+ * @output: Output pin, we have two in E825C
+ *
+ * Set the correct CGU divider for RCLKA or RCLKB.
+ *
+ * Context: Called under pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+int ice_tspll_cfg_synce_ethdiv_e825c(struct ice_hw *hw,
+ enum ice_synce_clk output)
+{
+ unsigned int divider;
+ u16 link_speed;
+ u32 val;
+ int err;
+
+ link_speed = hw->port_info->phy.link_info.link_speed;
+ if (!link_speed)
+ return 0;
+
+ err = ice_tspll_get_div_e825c(link_speed, &divider);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R10, &val);
+ if (err)
+ return err;
+
+ /* programmable divider value (from 2 to 16) minus 1 for ETHCLKOUT */
+ switch (output) {
+ case ICE_SYNCE_CLK0:
+ val &= ~(ICE_CGU_R10_SYNCE_ETHDIV_M1 |
+ ICE_CGU_R10_SYNCE_ETHDIV_LOAD);
+ val |= FIELD_PREP(ICE_CGU_R10_SYNCE_ETHDIV_M1, divider - 1);
+ err = ice_write_cgu_reg(hw, ICE_CGU_R10, val);
+ if (err)
+ return err;
+ val |= ICE_CGU_R10_SYNCE_ETHDIV_LOAD;
+ break;
+ case ICE_SYNCE_CLK1:
+ val &= ~(ICE_CGU_R10_SYNCE_CLKODIV_M1 |
+ ICE_CGU_R10_SYNCE_CLKODIV_LOAD);
+ val |= FIELD_PREP(ICE_CGU_R10_SYNCE_CLKODIV_M1, divider - 1);
+ err = ice_write_cgu_reg(hw, ICE_CGU_R10, val);
+ if (err)
+ return err;
+ val |= ICE_CGU_R10_SYNCE_CLKODIV_LOAD;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R10, val);
+ if (err)
+ return err;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_tspll.h b/drivers/net/ethernet/intel/ice/ice_tspll.h
index c0b1232cc07c..d650867004d1 100644
--- a/drivers/net/ethernet/intel/ice/ice_tspll.h
+++ b/drivers/net/ethernet/intel/ice/ice_tspll.h
@@ -21,11 +21,22 @@ struct ice_tspll_params_e82x {
u32 frac_n_div;
};
+#define ICE_CGU_NET_REF_CLK0 0x0
+#define ICE_CGU_REF_CLK_BYP0 0x5
+#define ICE_CGU_REF_CLK_BYP0_DIV 0x0
+#define ICE_CGU_REF_CLK_BYP1 0x4
+#define ICE_CGU_REF_CLK_BYP1_DIV 0x1
+
#define ICE_TSPLL_CK_REFCLKFREQ_E825 0x1F
#define ICE_TSPLL_NDIVRATIO_E825 5
#define ICE_TSPLL_FBDIV_INTGR_E825 256
int ice_tspll_cfg_pps_out_e825c(struct ice_hw *hw, bool enable);
int ice_tspll_init(struct ice_hw *hw);
-
+int ice_tspll_bypass_mux_active_e825c(struct ice_hw *hw, u8 port, bool *active,
+ enum ice_synce_clk output);
+int ice_tspll_cfg_bypass_mux_e825c(struct ice_hw *hw, bool ena, u32 port_num,
+ enum ice_synce_clk output);
+int ice_tspll_cfg_synce_ethdiv_e825c(struct ice_hw *hw,
+ enum ice_synce_clk output);
#endif /* _ICE_TSPLL_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index ad76768a4232..6fa201a14f51 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -379,7 +379,7 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
if (netif_tx_queue_stopped(txring_txq(tx_ring)) &&
!test_bit(ICE_VSI_DOWN, vsi->state)) {
netif_tx_wake_queue(txring_txq(tx_ring));
- ++tx_ring->ring_stats->tx_stats.restart_q;
+ ice_stats_inc(tx_ring->ring_stats, tx_restart_q);
}
}
@@ -499,7 +499,7 @@ int ice_setup_tx_ring(struct ice_tx_ring *tx_ring)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
- tx_ring->ring_stats->tx_stats.prev_pkt = -1;
+ tx_ring->ring_stats->tx.prev_pkt = -1;
return 0;
err:
@@ -574,7 +574,6 @@ rx_skip_free:
PAGE_SIZE);
memset(rx_ring->desc, 0, size);
- rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
}
@@ -849,7 +848,7 @@ bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsigned int cleaned_count)
addr = libeth_rx_alloc(&fq, ntu);
if (addr == DMA_MAPPING_ERROR) {
- rx_ring->ring_stats->rx_stats.alloc_page_failed++;
+ ice_stats_inc(rx_ring->ring_stats, rx_page_failed);
break;
}
@@ -863,7 +862,7 @@ bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsigned int cleaned_count)
addr = libeth_rx_alloc(&hdr_fq, ntu);
if (addr == DMA_MAPPING_ERROR) {
- rx_ring->ring_stats->rx_stats.alloc_page_failed++;
+ ice_stats_inc(rx_ring->ring_stats, rx_page_failed);
libeth_rx_recycle_slow(fq.fqes[ntu].netmem);
break;
@@ -1045,7 +1044,7 @@ construct_skb:
/* exit if we failed to retrieve a buffer */
if (!skb) {
libeth_xdp_return_buff_slow(xdp);
- rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
+ ice_stats_inc(rx_ring->ring_stats, rx_buf_failed);
continue;
}
@@ -1087,35 +1086,36 @@ static void __ice_update_sample(struct ice_q_vector *q_vector,
struct dim_sample *sample,
bool is_tx)
{
- u64 packets = 0, bytes = 0;
+ u64 total_packets = 0, total_bytes = 0, pkts, bytes;
if (is_tx) {
struct ice_tx_ring *tx_ring;
ice_for_each_tx_ring(tx_ring, *rc) {
- struct ice_ring_stats *ring_stats;
-
- ring_stats = tx_ring->ring_stats;
- if (!ring_stats)
+ if (!tx_ring->ring_stats)
continue;
- packets += ring_stats->stats.pkts;
- bytes += ring_stats->stats.bytes;
+
+ ice_fetch_tx_ring_stats(tx_ring, &pkts, &bytes);
+
+ total_packets += pkts;
+ total_bytes += bytes;
}
} else {
struct ice_rx_ring *rx_ring;
ice_for_each_rx_ring(rx_ring, *rc) {
- struct ice_ring_stats *ring_stats;
-
- ring_stats = rx_ring->ring_stats;
- if (!ring_stats)
+ if (!rx_ring->ring_stats)
continue;
- packets += ring_stats->stats.pkts;
- bytes += ring_stats->stats.bytes;
+
+ ice_fetch_rx_ring_stats(rx_ring, &pkts, &bytes);
+
+ total_packets += pkts;
+ total_bytes += bytes;
}
}
- dim_update_sample(q_vector->total_events, packets, bytes, sample);
+ dim_update_sample(q_vector->total_events,
+ total_packets, total_bytes, sample);
sample->comp_ctr = 0;
/* if dim settings get stale, like when not updated for 1
@@ -1362,7 +1362,7 @@ static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
/* A reprieve! - use start_queue because it doesn't call schedule */
netif_tx_start_queue(txring_txq(tx_ring));
- ++tx_ring->ring_stats->tx_stats.restart_q;
+ ice_stats_inc(tx_ring->ring_stats, tx_restart_q);
return 0;
}
@@ -2156,15 +2156,12 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
ice_trace(xmit_frame_ring, tx_ring, skb);
- if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
- goto out_drop;
-
count = ice_xmit_desc_count(skb);
if (ice_chk_linearize(skb, count)) {
if (__skb_linearize(skb))
goto out_drop;
count = ice_txd_use_count(skb->len);
- tx_ring->ring_stats->tx_stats.tx_linearize++;
+ ice_stats_inc(tx_ring->ring_stats, tx_linearize);
}
/* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
@@ -2175,7 +2172,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
*/
if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
ICE_DESCS_FOR_CTX_DESC)) {
- tx_ring->ring_stats->tx_stats.tx_busy++;
+ ice_stats_inc(tx_ring->ring_stats, tx_busy);
return NETDEV_TX_BUSY;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index e440c55d9e9f..b6547e1b7c42 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -129,34 +129,65 @@ struct ice_tx_offload_params {
u8 header_len;
};
-struct ice_q_stats {
- u64 pkts;
- u64 bytes;
-};
-
-struct ice_txq_stats {
- u64 restart_q;
- u64 tx_busy;
- u64 tx_linearize;
- int prev_pkt; /* negative if no pending Tx descriptors */
-};
-
-struct ice_rxq_stats {
- u64 non_eop_descs;
- u64 alloc_page_failed;
- u64 alloc_buf_failed;
-};
-
struct ice_ring_stats {
struct rcu_head rcu; /* to avoid race on free */
- struct ice_q_stats stats;
struct u64_stats_sync syncp;
- union {
- struct ice_txq_stats tx_stats;
- struct ice_rxq_stats rx_stats;
- };
+ struct_group(stats,
+ u64_stats_t pkts;
+ u64_stats_t bytes;
+ union {
+ struct_group(tx,
+ u64_stats_t tx_restart_q;
+ u64_stats_t tx_busy;
+ u64_stats_t tx_linearize;
+ /* negative if no pending Tx descriptors */
+ int prev_pkt;
+ );
+ struct_group(rx,
+ u64_stats_t rx_non_eop_descs;
+ u64_stats_t rx_page_failed;
+ u64_stats_t rx_buf_failed;
+ );
+ };
+ );
};
+/**
+ * ice_stats_read - Read a single ring stat value
+ * @stats: pointer to ring_stats structure for a queue
+ * @member: the ice_ring_stats member to read
+ *
+ * Shorthand for reading a single 64-bit stat value from struct
+ * ice_ring_stats.
+ *
+ * Return: the value of the requested stat.
+ */
+#define ice_stats_read(stats, member) ({ \
+ struct ice_ring_stats *__stats = (stats); \
+ unsigned int start; \
+ u64 val; \
+ do { \
+ start = u64_stats_fetch_begin(&__stats->syncp); \
+ val = u64_stats_read(&__stats->member); \
+ } while (u64_stats_fetch_retry(&__stats->syncp, start)); \
+ val; \
+})
+
+/**
+ * ice_stats_inc - Increment a single ring stat value
+ * @stats: pointer to the ring_stats structure for a queue
+ * @member: the ice_ring_stats member to increment
+ *
+ * Shorthand for incrementing a single 64-bit stat value in struct
+ * ice_ring_stats.
+ */
+#define ice_stats_inc(stats, member) do { \
+ struct ice_ring_stats *__stats = (stats); \
+ u64_stats_update_begin(&__stats->syncp); \
+ u64_stats_inc(&__stats->member); \
+ u64_stats_update_end(&__stats->syncp); \
+} while (0)
+
enum ice_ring_state_t {
ICE_TX_XPS_INIT_DONE,
ICE_TX_NBITS,
@@ -236,34 +267,49 @@ struct ice_tstamp_ring {
} ____cacheline_internodealigned_in_smp;
struct ice_rx_ring {
- /* CL1 - 1st cacheline starts here */
+ __cacheline_group_begin_aligned(read_mostly);
void *desc; /* Descriptor ring memory */
struct page_pool *pp;
struct net_device *netdev; /* netdev ring maps to */
- struct ice_vsi *vsi; /* Backreference to associated VSI */
struct ice_q_vector *q_vector; /* Backreference to associated vector */
u8 __iomem *tail;
- u16 q_index; /* Queue number of ring */
-
- u16 count; /* Number of descriptors */
- u16 reg_idx; /* HW register index of the ring */
- u16 next_to_alloc;
union {
struct libeth_fqe *rx_fqes;
struct xdp_buff **xdp_buf;
};
- /* CL2 - 2nd cacheline starts here */
- struct libeth_fqe *hdr_fqes;
+ u16 count; /* Number of descriptors */
+ u8 ptp_rx;
+
+ u8 flags;
+#define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2)
+#define ICE_RX_FLAGS_MULTIDEV BIT(3)
+#define ICE_RX_FLAGS_RING_GCS BIT(4)
+
+ u32 truesize;
+
struct page_pool *hdr_pp;
+ struct libeth_fqe *hdr_fqes;
+
+ struct bpf_prog *xdp_prog;
+ struct ice_tx_ring *xdp_ring;
+ struct xsk_buff_pool *xsk_pool;
+
+ /* stats structs */
+ struct ice_ring_stats *ring_stats;
+ struct ice_rx_ring *next; /* pointer to next ring in q_vector */
+
+ u32 hdr_truesize;
+
+ struct xdp_rxq_info xdp_rxq;
+ __cacheline_group_end_aligned(read_mostly);
+ __cacheline_group_begin_aligned(read_write);
union {
struct libeth_xdp_buff_stash xdp;
struct libeth_xdp_buff *xsk;
};
-
- /* CL3 - 3rd cacheline starts here */
union {
struct ice_pkt_ctx pkt_ctx;
struct {
@@ -271,75 +317,78 @@ struct ice_rx_ring {
__be16 vlan_proto;
};
};
- struct bpf_prog *xdp_prog;
/* used in interrupt processing */
u16 next_to_use;
u16 next_to_clean;
+ __cacheline_group_end_aligned(read_write);
- u32 hdr_truesize;
- u32 truesize;
-
- /* stats structs */
- struct ice_ring_stats *ring_stats;
-
+ __cacheline_group_begin_aligned(cold);
struct rcu_head rcu; /* to avoid race on free */
- /* CL4 - 4th cacheline starts here */
+ struct ice_vsi *vsi; /* Backreference to associated VSI */
struct ice_channel *ch;
- struct ice_tx_ring *xdp_ring;
- struct ice_rx_ring *next; /* pointer to next ring in q_vector */
- struct xsk_buff_pool *xsk_pool;
- u16 rx_hdr_len;
- u16 rx_buf_len;
+
dma_addr_t dma; /* physical address of ring */
+ u16 q_index; /* Queue number of ring */
+ u16 reg_idx; /* HW register index of the ring */
u8 dcb_tc; /* Traffic class of ring */
- u8 ptp_rx;
-#define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2)
-#define ICE_RX_FLAGS_MULTIDEV BIT(3)
-#define ICE_RX_FLAGS_RING_GCS BIT(4)
- u8 flags;
- /* CL5 - 5th cacheline starts here */
- struct xdp_rxq_info xdp_rxq;
+
+ u16 rx_hdr_len;
+ u16 rx_buf_len;
+ __cacheline_group_end_aligned(cold);
} ____cacheline_internodealigned_in_smp;
struct ice_tx_ring {
- /* CL1 - 1st cacheline starts here */
- struct ice_tx_ring *next; /* pointer to next ring in q_vector */
+ __cacheline_group_begin_aligned(read_mostly);
void *desc; /* Descriptor ring memory */
struct device *dev; /* Used for DMA mapping */
u8 __iomem *tail;
struct ice_tx_buf *tx_buf;
+
struct ice_q_vector *q_vector; /* Backreference to associated vector */
struct net_device *netdev; /* netdev ring maps to */
struct ice_vsi *vsi; /* Backreference to associated VSI */
- /* CL2 - 2nd cacheline starts here */
- dma_addr_t dma; /* physical address of ring */
- struct xsk_buff_pool *xsk_pool;
- u16 next_to_use;
- u16 next_to_clean;
- u16 q_handle; /* Queue handle per TC */
- u16 reg_idx; /* HW register index of the ring */
+
u16 count; /* Number of descriptors */
u16 q_index; /* Queue number of ring */
- u16 xdp_tx_active;
+
+ u8 flags;
+#define ICE_TX_FLAGS_RING_XDP BIT(0)
+#define ICE_TX_FLAGS_RING_VLAN_L2TAG1 BIT(1)
+#define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2)
+#define ICE_TX_FLAGS_TXTIME BIT(3)
+
+ struct xsk_buff_pool *xsk_pool;
+
/* stats structs */
struct ice_ring_stats *ring_stats;
- /* CL3 - 3rd cacheline starts here */
+ struct ice_tx_ring *next; /* pointer to next ring in q_vector */
+
+ struct ice_tstamp_ring *tstamp_ring;
+ struct ice_ptp_tx *tx_tstamps;
+ __cacheline_group_end_aligned(read_mostly);
+
+ __cacheline_group_begin_aligned(read_write);
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ u16 xdp_tx_active;
+ spinlock_t tx_lock;
+ __cacheline_group_end_aligned(read_write);
+
+ __cacheline_group_begin_aligned(cold);
struct rcu_head rcu; /* to avoid race on free */
DECLARE_BITMAP(xps_state, ICE_TX_NBITS); /* XPS Config State */
struct ice_channel *ch;
- struct ice_ptp_tx *tx_tstamps;
- spinlock_t tx_lock;
- u32 txq_teid; /* Added Tx queue TEID */
- /* CL4 - 4th cacheline starts here */
- struct ice_tstamp_ring *tstamp_ring;
-#define ICE_TX_FLAGS_RING_XDP BIT(0)
-#define ICE_TX_FLAGS_RING_VLAN_L2TAG1 BIT(1)
-#define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2)
-#define ICE_TX_FLAGS_TXTIME BIT(3)
- u8 flags;
+
+ dma_addr_t dma; /* physical address of ring */
+ u16 q_handle; /* Queue handle per TC */
+ u16 reg_idx; /* HW register index of the ring */
u8 dcb_tc; /* Traffic class of ring */
+
u16 quanta_prof_id;
+ u32 txq_teid; /* Added Tx queue TEID */
+ __cacheline_group_end_aligned(cold);
} ____cacheline_internodealigned_in_smp;
static inline bool ice_ring_ch_enabled(struct ice_tx_ring *ring)
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 956da38d63b0..e695a664e53d 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -20,9 +20,6 @@ void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val)
rx_ring->next_to_use = val;
- /* update next to alloc since we have filled the ring */
- rx_ring->next_to_alloc = val;
-
/* QRX_TAIL will be updated with any tail value, but hardware ignores
* the lower 3 bits. This makes it so we only bump tail on meaningful
* boundaries. Also, this allows us to bump tail on intervals of 8 up to
@@ -480,7 +477,7 @@ dma_unmap:
return ICE_XDP_CONSUMED;
busy:
- xdp_ring->ring_stats->tx_stats.tx_busy++;
+ ice_stats_inc(xdp_ring->ring_stats, tx_busy);
return ICE_XDP_CONSUMED;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
index 6a3f10f7a53f..f17990b68b62 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -38,7 +38,7 @@ ice_is_non_eop(const struct ice_rx_ring *rx_ring,
if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF)))
return false;
- rx_ring->ring_stats->rx_stats.non_eop_descs++;
+ ice_stats_inc(rx_ring->ring_stats, rx_non_eop_descs);
return true;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 6a2ec8389a8f..1e82f4c40b32 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -349,6 +349,12 @@ enum ice_clk_src {
NUM_ICE_CLK_SRC
};
+enum ice_synce_clk {
+ ICE_SYNCE_CLK0,
+ ICE_SYNCE_CLK1,
+ ICE_SYNCE_CLK_NUM
+};
+
struct ice_ts_func_info {
/* Function specific info */
enum ice_tspll_freq time_ref;
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 989ff1fd9110..953e68ed0f9a 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -497,7 +497,7 @@ static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
return ICE_XDP_TX;
busy:
- xdp_ring->ring_stats->tx_stats.tx_busy++;
+ ice_stats_inc(xdp_ring->ring_stats, tx_busy);
return ICE_XDP_CONSUMED;
}
@@ -659,7 +659,7 @@ construct_skb:
xsk_buff_free(first);
first = NULL;
- rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
+ ice_stats_inc(rx_ring->ring_stats, rx_buf_failed);
continue;
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h
index 1bf7934d4e28..b206fba092c8 100644
--- a/drivers/net/ethernet/intel/idpf/idpf.h
+++ b/drivers/net/ethernet/intel/idpf/idpf.h
@@ -8,6 +8,8 @@
struct idpf_adapter;
struct idpf_vport;
struct idpf_vport_max_q;
+struct idpf_q_vec_rsrc;
+struct idpf_rss_data;
#include <net/pkt_sched.h>
#include <linux/aer.h>
@@ -201,7 +203,8 @@ struct idpf_vport_max_q {
struct idpf_reg_ops {
void (*ctlq_reg_init)(struct idpf_adapter *adapter,
struct idpf_ctlq_create_info *cq);
- int (*intr_reg_init)(struct idpf_vport *vport);
+ int (*intr_reg_init)(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
void (*mb_intr_reg_init)(struct idpf_adapter *adapter);
void (*reset_reg_init)(struct idpf_adapter *adapter);
void (*trigger_reset)(struct idpf_adapter *adapter,
@@ -288,54 +291,88 @@ struct idpf_fsteer_fltr {
};
/**
- * struct idpf_vport - Handle for netdevices and queue resources
- * @num_txq: Number of allocated TX queues
- * @num_complq: Number of allocated completion queues
+ * struct idpf_q_vec_rsrc - handle for queue and vector resources
+ * @dev: device pointer for DMA mapping
+ * @q_vectors: array of queue vectors
+ * @q_vector_idxs: starting index of queue vectors
+ * @num_q_vectors: number of IRQ vectors allocated
+ * @noirq_v_idx: ID of the NOIRQ vector
+ * @noirq_dyn_ctl_ena: value to write to the above to enable it
+ * @noirq_dyn_ctl: register to enable/disable the vector for NOIRQ queues
+ * @txq_grps: array of TX queue groups
* @txq_desc_count: TX queue descriptor count
- * @complq_desc_count: Completion queue descriptor count
- * @compln_clean_budget: Work budget for completion clean
- * @num_txq_grp: Number of TX queue groups
- * @txq_grps: Array of TX queue groups
- * @txq_model: Split queue or single queue queuing model
- * @txqs: Used only in hotpath to get to the right queue very fast
- * @crc_enable: Enable CRC insertion offload
- * @xdpsq_share: whether XDPSQ sharing is enabled
- * @num_xdp_txq: number of XDPSQs
+ * @complq_desc_count: completion queue descriptor count
+ * @txq_model: split queue or single queue queuing model
+ * @num_txq: number of allocated TX queues
+ * @num_complq: number of allocated completion queues
+ * @num_txq_grp: number of TX queue groups
* @xdp_txq_offset: index of the first XDPSQ (== number of regular SQs)
- * @xdp_prog: installed XDP program
- * @num_rxq: Number of allocated RX queues
- * @num_bufq: Number of allocated buffer queues
+ * @num_rxq_grp: number of RX queues in a group
+ * @rxq_model: splitq queue or single queue queuing model
+ * @rxq_grps: total number of RX groups. Number of groups * number of RX per
+ * group will yield total number of RX queues.
+ * @num_rxq: number of allocated RX queues
+ * @num_bufq: number of allocated buffer queues
* @rxq_desc_count: RX queue descriptor count. *MUST* have enough descriptors
* to complete all buffer descriptors for all buffer queues in
* the worst case.
- * @num_bufqs_per_qgrp: Buffer queues per RX queue in a given grouping
- * @bufq_desc_count: Buffer queue descriptor count
- * @num_rxq_grp: Number of RX queues in a group
- * @rxq_grps: Total number of RX groups. Number of groups * number of RX per
- * group will yield total number of RX queues.
- * @rxq_model: Splitq queue or single queue queuing model
- * @rx_ptype_lkup: Lookup table for ptypes on RX
+ * @bufq_desc_count: buffer queue descriptor count
+ * @num_bufqs_per_qgrp: buffer queues per RX queue in a given grouping
+ * @base_rxd: true if the driver should use base descriptors instead of flex
+ */
+struct idpf_q_vec_rsrc {
+ struct device *dev;
+ struct idpf_q_vector *q_vectors;
+ u16 *q_vector_idxs;
+ u16 num_q_vectors;
+ u16 noirq_v_idx;
+ u32 noirq_dyn_ctl_ena;
+ void __iomem *noirq_dyn_ctl;
+
+ struct idpf_txq_group *txq_grps;
+ u32 txq_desc_count;
+ u32 complq_desc_count;
+ u32 txq_model;
+ u16 num_txq;
+ u16 num_complq;
+ u16 num_txq_grp;
+ u16 xdp_txq_offset;
+
+ u16 num_rxq_grp;
+ u32 rxq_model;
+ struct idpf_rxq_group *rxq_grps;
+ u16 num_rxq;
+ u16 num_bufq;
+ u32 rxq_desc_count;
+ u32 bufq_desc_count[IDPF_MAX_BUFQS_PER_RXQ_GRP];
+ u8 num_bufqs_per_qgrp;
+ bool base_rxd;
+};
+
+/**
+ * struct idpf_vport - Handle for netdevices and queue resources
+ * @dflt_qv_rsrc: contains default queue and vector resources
+ * @txqs: Used only in hotpath to get to the right queue very fast
+ * @num_txq: Number of allocated TX queues
+ * @num_xdp_txq: number of XDPSQs
+ * @xdpsq_share: whether XDPSQ sharing is enabled
+ * @xdp_prog: installed XDP program
* @vdev_info: IDC vport device info pointer
* @adapter: back pointer to associated adapter
* @netdev: Associated net_device. Each vport should have one and only one
* associated netdev.
* @flags: See enum idpf_vport_flags
- * @vport_type: Default SRIOV, SIOV, etc.
+ * @compln_clean_budget: Work budget for completion clean
* @vport_id: Device given vport identifier
+ * @vport_type: Default SRIOV, SIOV, etc.
* @idx: Software index in adapter vports struct
- * @default_vport: Use this vport if one isn't specified
- * @base_rxd: True if the driver should use base descriptors instead of flex
- * @num_q_vectors: Number of IRQ vectors allocated
- * @q_vectors: Array of queue vectors
- * @q_vector_idxs: Starting index of queue vectors
- * @noirq_dyn_ctl: register to enable/disable the vector for NOIRQ queues
- * @noirq_dyn_ctl_ena: value to write to the above to enable it
- * @noirq_v_idx: ID of the NOIRQ vector
* @max_mtu: device given max possible MTU
* @default_mac_addr: device will give a default MAC to use
* @rx_itr_profile: RX profiles for Dynamic Interrupt Moderation
* @tx_itr_profile: TX profiles for Dynamic Interrupt Moderation
* @port_stats: per port csum, header split, and other offload stats
+ * @default_vport: Use this vport if one isn't specified
+ * @crc_enable: Enable CRC insertion offload
* @link_up: True if link is up
* @tx_tstamp_caps: Capabilities negotiated for Tx timestamping
* @tstamp_config: The Tx tstamp config
@@ -343,57 +380,31 @@ struct idpf_fsteer_fltr {
* @tstamp_stats: Tx timestamping statistics
*/
struct idpf_vport {
- u16 num_txq;
- u16 num_complq;
- u32 txq_desc_count;
- u32 complq_desc_count;
- u32 compln_clean_budget;
- u16 num_txq_grp;
- struct idpf_txq_group *txq_grps;
- u32 txq_model;
+ struct idpf_q_vec_rsrc dflt_qv_rsrc;
struct idpf_tx_queue **txqs;
- bool crc_enable;
-
- bool xdpsq_share;
+ u16 num_txq;
u16 num_xdp_txq;
- u16 xdp_txq_offset;
+ bool xdpsq_share;
struct bpf_prog *xdp_prog;
- u16 num_rxq;
- u16 num_bufq;
- u32 rxq_desc_count;
- u8 num_bufqs_per_qgrp;
- u32 bufq_desc_count[IDPF_MAX_BUFQS_PER_RXQ_GRP];
- u16 num_rxq_grp;
- struct idpf_rxq_group *rxq_grps;
- u32 rxq_model;
- struct libeth_rx_pt *rx_ptype_lkup;
-
struct iidc_rdma_vport_dev_info *vdev_info;
struct idpf_adapter *adapter;
struct net_device *netdev;
DECLARE_BITMAP(flags, IDPF_VPORT_FLAGS_NBITS);
- u16 vport_type;
+ u32 compln_clean_budget;
u32 vport_id;
+ u16 vport_type;
u16 idx;
- bool default_vport;
- bool base_rxd;
-
- u16 num_q_vectors;
- struct idpf_q_vector *q_vectors;
- u16 *q_vector_idxs;
-
- void __iomem *noirq_dyn_ctl;
- u32 noirq_dyn_ctl_ena;
- u16 noirq_v_idx;
u16 max_mtu;
u8 default_mac_addr[ETH_ALEN];
u16 rx_itr_profile[IDPF_DIM_PROFILE_SLOTS];
u16 tx_itr_profile[IDPF_DIM_PROFILE_SLOTS];
- struct idpf_port_stats port_stats;
+ struct idpf_port_stats port_stats;
+ bool default_vport;
+ bool crc_enable;
bool link_up;
struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
@@ -550,10 +561,37 @@ struct idpf_vector_lifo {
};
/**
+ * struct idpf_queue_id_reg_chunk - individual queue ID and register chunk
+ * @qtail_reg_start: queue tail register offset
+ * @qtail_reg_spacing: queue tail register spacing
+ * @type: queue type of the queues in the chunk
+ * @start_queue_id: starting queue ID in the chunk
+ * @num_queues: number of queues in the chunk
+ */
+struct idpf_queue_id_reg_chunk {
+ u64 qtail_reg_start;
+ u32 qtail_reg_spacing;
+ u32 type;
+ u32 start_queue_id;
+ u32 num_queues;
+};
+
+/**
+ * struct idpf_queue_id_reg_info - queue ID and register chunk info received
+ * over the mailbox
+ * @num_chunks: number of chunks
+ * @queue_chunks: array of chunks
+ */
+struct idpf_queue_id_reg_info {
+ u16 num_chunks;
+ struct idpf_queue_id_reg_chunk *queue_chunks;
+};
+
+/**
* struct idpf_vport_config - Vport configuration data
* @user_config: see struct idpf_vport_user_config_data
* @max_q: Maximum possible queues
- * @req_qs_chunks: Queue chunk data for requested queues
+ * @qid_reg_info: Struct to store the queue ID and register info
* @mac_filter_list_lock: Lock to protect mac filters
* @flow_steer_list_lock: Lock to protect fsteer filters
* @flags: See enum idpf_vport_config_flags
@@ -561,7 +599,7 @@ struct idpf_vector_lifo {
struct idpf_vport_config {
struct idpf_vport_user_config_data user_config;
struct idpf_vport_max_q max_q;
- struct virtchnl2_add_queues *req_qs_chunks;
+ struct idpf_queue_id_reg_info qid_reg_info;
spinlock_t mac_filter_list_lock;
spinlock_t flow_steer_list_lock;
DECLARE_BITMAP(flags, IDPF_VPORT_CONFIG_FLAGS_NBITS);
@@ -603,6 +641,8 @@ struct idpf_vc_xn_manager;
* @vport_params_reqd: Vport params requested
* @vport_params_recvd: Vport params received
* @vport_ids: Array of device given vport identifiers
+ * @singleq_pt_lkup: Lookup table for singleq RX ptypes
+ * @splitq_pt_lkup: Lookup table for splitq RX ptypes
* @vport_config: Vport config parameters
* @max_vports: Maximum vports that can be allocated
* @num_alloc_vports: Current number of vports allocated
@@ -661,6 +701,9 @@ struct idpf_adapter {
struct virtchnl2_create_vport **vport_params_recvd;
u32 *vport_ids;
+ struct libeth_rx_pt *singleq_pt_lkup;
+ struct libeth_rx_pt *splitq_pt_lkup;
+
struct idpf_vport_config **vport_config;
u16 max_vports;
u16 num_alloc_vports;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_dev.c
index 3a04a6bd0d7c..a4625638cf3f 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_dev.c
@@ -70,11 +70,13 @@ static void idpf_mb_intr_reg_init(struct idpf_adapter *adapter)
/**
* idpf_intr_reg_init - Initialize interrupt registers
* @vport: virtual port structure
+ * @rsrc: pointer to queue and vector resources
*/
-static int idpf_intr_reg_init(struct idpf_vport *vport)
+static int idpf_intr_reg_init(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_adapter *adapter = vport->adapter;
- int num_vecs = vport->num_q_vectors;
+ u16 num_vecs = rsrc->num_q_vectors;
struct idpf_vec_regs *reg_vals;
int num_regs, i, err = 0;
u32 rx_itr, tx_itr, val;
@@ -86,15 +88,15 @@ static int idpf_intr_reg_init(struct idpf_vport *vport)
if (!reg_vals)
return -ENOMEM;
- num_regs = idpf_get_reg_intr_vecs(vport, reg_vals);
+ num_regs = idpf_get_reg_intr_vecs(adapter, reg_vals);
if (num_regs < num_vecs) {
err = -EINVAL;
goto free_reg_vals;
}
for (i = 0; i < num_vecs; i++) {
- struct idpf_q_vector *q_vector = &vport->q_vectors[i];
- u16 vec_id = vport->q_vector_idxs[i] - IDPF_MBX_Q_VEC;
+ struct idpf_q_vector *q_vector = &rsrc->q_vectors[i];
+ u16 vec_id = rsrc->q_vector_idxs[i] - IDPF_MBX_Q_VEC;
struct idpf_intr_reg *intr = &q_vector->intr_reg;
u32 spacing;
@@ -123,12 +125,12 @@ static int idpf_intr_reg_init(struct idpf_vport *vport)
/* Data vector for NOIRQ queues */
- val = reg_vals[vport->q_vector_idxs[i] - IDPF_MBX_Q_VEC].dyn_ctl_reg;
- vport->noirq_dyn_ctl = idpf_get_reg_addr(adapter, val);
+ val = reg_vals[rsrc->q_vector_idxs[i] - IDPF_MBX_Q_VEC].dyn_ctl_reg;
+ rsrc->noirq_dyn_ctl = idpf_get_reg_addr(adapter, val);
val = PF_GLINT_DYN_CTL_WB_ON_ITR_M | PF_GLINT_DYN_CTL_INTENA_MSK_M |
FIELD_PREP(PF_GLINT_DYN_CTL_ITR_INDX_M, IDPF_NO_ITR_UPDATE_IDX);
- vport->noirq_dyn_ctl_ena = val;
+ rsrc->noirq_dyn_ctl_ena = val;
free_reg_vals:
kfree(reg_vals);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
index 2efa3c08aba5..1d78a621d65b 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
@@ -18,7 +18,7 @@ static u32 idpf_get_rx_ring_count(struct net_device *netdev)
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
- num_rxq = vport->num_rxq;
+ num_rxq = vport->dflt_qv_rsrc.num_rxq;
idpf_vport_ctrl_unlock(netdev);
return num_rxq;
@@ -503,7 +503,7 @@ static int idpf_set_rxfh(struct net_device *netdev,
}
if (test_bit(IDPF_VPORT_UP, np->state))
- err = idpf_config_rss(vport);
+ err = idpf_config_rss(vport, rss_data);
unlock_mutex:
idpf_vport_ctrl_unlock(netdev);
@@ -644,8 +644,8 @@ static void idpf_get_ringparam(struct net_device *netdev,
ring->rx_max_pending = IDPF_MAX_RXQ_DESC;
ring->tx_max_pending = IDPF_MAX_TXQ_DESC;
- ring->rx_pending = vport->rxq_desc_count;
- ring->tx_pending = vport->txq_desc_count;
+ ring->rx_pending = vport->dflt_qv_rsrc.rxq_desc_count;
+ ring->tx_pending = vport->dflt_qv_rsrc.txq_desc_count;
kring->tcp_data_split = idpf_vport_get_hsplit(vport);
@@ -669,8 +669,9 @@ static int idpf_set_ringparam(struct net_device *netdev,
{
struct idpf_vport_user_config_data *config_data;
u32 new_rx_count, new_tx_count;
+ struct idpf_q_vec_rsrc *rsrc;
struct idpf_vport *vport;
- int i, err = 0;
+ int err = 0;
u16 idx;
idpf_vport_ctrl_lock(netdev);
@@ -704,8 +705,9 @@ static int idpf_set_ringparam(struct net_device *netdev,
netdev_info(netdev, "Requested Tx descriptor count rounded up to %u\n",
new_tx_count);
- if (new_tx_count == vport->txq_desc_count &&
- new_rx_count == vport->rxq_desc_count &&
+ rsrc = &vport->dflt_qv_rsrc;
+ if (new_tx_count == rsrc->txq_desc_count &&
+ new_rx_count == rsrc->rxq_desc_count &&
kring->tcp_data_split == idpf_vport_get_hsplit(vport))
goto unlock_mutex;
@@ -724,10 +726,10 @@ static int idpf_set_ringparam(struct net_device *netdev,
/* Since we adjusted the RX completion queue count, the RX buffer queue
* descriptor count needs to be adjusted as well
*/
- for (i = 0; i < vport->num_bufqs_per_qgrp; i++)
- vport->bufq_desc_count[i] =
+ for (unsigned int i = 0; i < rsrc->num_bufqs_per_qgrp; i++)
+ rsrc->bufq_desc_count[i] =
IDPF_RX_BUFQ_DESC_COUNT(new_rx_count,
- vport->num_bufqs_per_qgrp);
+ rsrc->num_bufqs_per_qgrp);
err = idpf_initiate_soft_reset(vport, IDPF_SR_Q_DESC_CHANGE);
@@ -1104,7 +1106,7 @@ static void idpf_add_port_stats(struct idpf_vport *vport, u64 **data)
static void idpf_collect_queue_stats(struct idpf_vport *vport)
{
struct idpf_port_stats *pstats = &vport->port_stats;
- int i, j;
+ struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
/* zero out port stats since they're actually tracked in per
* queue stats; this is only for reporting
@@ -1120,22 +1122,22 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport)
u64_stats_set(&pstats->tx_dma_map_errs, 0);
u64_stats_update_end(&pstats->stats_sync);
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i];
+ for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rxq_grp = &rsrc->rxq_grps[i];
u16 num_rxq;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
num_rxq = rxq_grp->splitq.num_rxq_sets;
else
num_rxq = rxq_grp->singleq.num_rxq;
- for (j = 0; j < num_rxq; j++) {
+ for (unsigned int j = 0; j < num_rxq; j++) {
u64 hw_csum_err, hsplit, hsplit_hbo, bad_descs;
struct idpf_rx_queue_stats *stats;
struct idpf_rx_queue *rxq;
unsigned int start;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
rxq = &rxq_grp->splitq.rxq_sets[j]->rxq;
else
rxq = rxq_grp->singleq.rxqs[j];
@@ -1162,10 +1164,10 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport)
}
}
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
+ for (unsigned int i = 0; i < rsrc->num_txq_grp; i++) {
+ struct idpf_txq_group *txq_grp = &rsrc->txq_grps[i];
- for (j = 0; j < txq_grp->num_txq; j++) {
+ for (unsigned int j = 0; j < txq_grp->num_txq; j++) {
u64 linearize, qbusy, skb_drops, dma_map_errs;
struct idpf_tx_queue *txq = txq_grp->txqs[j];
struct idpf_tx_queue_stats *stats;
@@ -1208,9 +1210,9 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_vport_config *vport_config;
+ struct idpf_q_vec_rsrc *rsrc;
struct idpf_vport *vport;
unsigned int total = 0;
- unsigned int i, j;
bool is_splitq;
u16 qtype;
@@ -1228,12 +1230,13 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
idpf_collect_queue_stats(vport);
idpf_add_port_stats(vport, &data);
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
+ rsrc = &vport->dflt_qv_rsrc;
+ for (unsigned int i = 0; i < rsrc->num_txq_grp; i++) {
+ struct idpf_txq_group *txq_grp = &rsrc->txq_grps[i];
qtype = VIRTCHNL2_QUEUE_TYPE_TX;
- for (j = 0; j < txq_grp->num_txq; j++, total++) {
+ for (unsigned int j = 0; j < txq_grp->num_txq; j++, total++) {
struct idpf_tx_queue *txq = txq_grp->txqs[j];
if (!txq)
@@ -1253,10 +1256,10 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_TX);
total = 0;
- is_splitq = idpf_is_queue_model_split(vport->rxq_model);
+ is_splitq = idpf_is_queue_model_split(rsrc->rxq_model);
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i];
+ for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rxq_grp = &rsrc->rxq_grps[i];
u16 num_rxq;
qtype = VIRTCHNL2_QUEUE_TYPE_RX;
@@ -1266,7 +1269,7 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
else
num_rxq = rxq_grp->singleq.num_rxq;
- for (j = 0; j < num_rxq; j++, total++) {
+ for (unsigned int j = 0; j < num_rxq; j++, total++) {
struct idpf_rx_queue *rxq;
if (is_splitq)
@@ -1298,15 +1301,16 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
u32 q_num)
{
+ const struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
int q_grp, q_idx;
- if (!idpf_is_queue_model_split(vport->rxq_model))
- return vport->rxq_grps->singleq.rxqs[q_num]->q_vector;
+ if (!idpf_is_queue_model_split(rsrc->rxq_model))
+ return rsrc->rxq_grps->singleq.rxqs[q_num]->q_vector;
q_grp = q_num / IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
q_idx = q_num % IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
- return vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq.q_vector;
+ return rsrc->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq.q_vector;
}
/**
@@ -1319,14 +1323,15 @@ struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport,
u32 q_num)
{
+ const struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
int q_grp;
- if (!idpf_is_queue_model_split(vport->txq_model))
+ if (!idpf_is_queue_model_split(rsrc->txq_model))
return vport->txqs[q_num]->q_vector;
q_grp = q_num / IDPF_DFLT_SPLITQ_TXQ_PER_GROUP;
- return vport->txq_grps[q_grp].complq->q_vector;
+ return rsrc->txq_grps[q_grp].complq->q_vector;
}
/**
@@ -1363,7 +1368,8 @@ static int idpf_get_q_coalesce(struct net_device *netdev,
u32 q_num)
{
const struct idpf_netdev_priv *np = netdev_priv(netdev);
- const struct idpf_vport *vport;
+ struct idpf_q_vec_rsrc *rsrc;
+ struct idpf_vport *vport;
int err = 0;
idpf_vport_ctrl_lock(netdev);
@@ -1372,16 +1378,17 @@ static int idpf_get_q_coalesce(struct net_device *netdev,
if (!test_bit(IDPF_VPORT_UP, np->state))
goto unlock_mutex;
- if (q_num >= vport->num_rxq && q_num >= vport->num_txq) {
+ rsrc = &vport->dflt_qv_rsrc;
+ if (q_num >= rsrc->num_rxq && q_num >= rsrc->num_txq) {
err = -EINVAL;
goto unlock_mutex;
}
- if (q_num < vport->num_rxq)
+ if (q_num < rsrc->num_rxq)
__idpf_get_q_coalesce(ec, idpf_find_rxq_vec(vport, q_num),
VIRTCHNL2_QUEUE_TYPE_RX);
- if (q_num < vport->num_txq)
+ if (q_num < rsrc->num_txq)
__idpf_get_q_coalesce(ec, idpf_find_txq_vec(vport, q_num),
VIRTCHNL2_QUEUE_TYPE_TX);
@@ -1549,8 +1556,9 @@ static int idpf_set_coalesce(struct net_device *netdev,
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_vport_user_config_data *user_config;
struct idpf_q_coalesce *q_coal;
+ struct idpf_q_vec_rsrc *rsrc;
struct idpf_vport *vport;
- int i, err = 0;
+ int err = 0;
user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
@@ -1560,14 +1568,15 @@ static int idpf_set_coalesce(struct net_device *netdev,
if (!test_bit(IDPF_VPORT_UP, np->state))
goto unlock_mutex;
- for (i = 0; i < vport->num_txq; i++) {
+ rsrc = &vport->dflt_qv_rsrc;
+ for (unsigned int i = 0; i < rsrc->num_txq; i++) {
q_coal = &user_config->q_coalesce[i];
err = idpf_set_q_coalesce(vport, q_coal, ec, i, false);
if (err)
goto unlock_mutex;
}
- for (i = 0; i < vport->num_rxq; i++) {
+ for (unsigned int i = 0; i < rsrc->num_rxq; i++) {
q_coal = &user_config->q_coalesce[i];
err = idpf_set_q_coalesce(vport, q_coal, ec, i, true);
if (err)
@@ -1748,6 +1757,7 @@ static void idpf_get_ts_stats(struct net_device *netdev,
struct ethtool_ts_stats *ts_stats)
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_q_vec_rsrc *rsrc;
struct idpf_vport *vport;
unsigned int start;
@@ -1763,8 +1773,9 @@ static void idpf_get_ts_stats(struct net_device *netdev,
if (!test_bit(IDPF_VPORT_UP, np->state))
goto exit;
- for (u16 i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
+ rsrc = &vport->dflt_qv_rsrc;
+ for (u16 i = 0; i < rsrc->num_txq_grp; i++) {
+ struct idpf_txq_group *txq_grp = &rsrc->txq_grps[i];
for (u16 j = 0; j < txq_grp->num_txq; j++) {
struct idpf_tx_queue *txq = txq_grp->txqs[j];
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index 131a8121839b..94da5fbd56f1 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -545,7 +545,9 @@ static int idpf_del_mac_filter(struct idpf_vport *vport,
if (test_bit(IDPF_VPORT_UP, np->state)) {
int err;
- err = idpf_add_del_mac_filters(vport, np, false, async);
+ err = idpf_add_del_mac_filters(np->adapter, vport_config,
+ vport->default_mac_addr,
+ np->vport_id, false, async);
if (err)
return err;
}
@@ -614,7 +616,9 @@ static int idpf_add_mac_filter(struct idpf_vport *vport,
return err;
if (test_bit(IDPF_VPORT_UP, np->state))
- err = idpf_add_del_mac_filters(vport, np, true, async);
+ err = idpf_add_del_mac_filters(np->adapter, vport_config,
+ vport->default_mac_addr,
+ np->vport_id, true, async);
return err;
}
@@ -662,7 +666,8 @@ static void idpf_restore_mac_filters(struct idpf_vport *vport)
spin_unlock_bh(&vport_config->mac_filter_list_lock);
- idpf_add_del_mac_filters(vport, netdev_priv(vport->netdev),
+ idpf_add_del_mac_filters(vport->adapter, vport_config,
+ vport->default_mac_addr, vport->vport_id,
true, false);
}
@@ -686,7 +691,8 @@ static void idpf_remove_mac_filters(struct idpf_vport *vport)
spin_unlock_bh(&vport_config->mac_filter_list_lock);
- idpf_add_del_mac_filters(vport, netdev_priv(vport->netdev),
+ idpf_add_del_mac_filters(vport->adapter, vport_config,
+ vport->default_mac_addr, vport->vport_id,
false, false);
}
@@ -975,6 +981,10 @@ static void idpf_remove_features(struct idpf_vport *vport)
static void idpf_vport_stop(struct idpf_vport *vport, bool rtnl)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+ struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_queue_id_reg_info *chunks;
+ u32 vport_id = vport->vport_id;
if (!test_bit(IDPF_VPORT_UP, np->state))
return;
@@ -985,24 +995,26 @@ static void idpf_vport_stop(struct idpf_vport *vport, bool rtnl)
netif_carrier_off(vport->netdev);
netif_tx_disable(vport->netdev);
- idpf_send_disable_vport_msg(vport);
+ chunks = &adapter->vport_config[vport->idx]->qid_reg_info;
+
+ idpf_send_disable_vport_msg(adapter, vport_id);
idpf_send_disable_queues_msg(vport);
- idpf_send_map_unmap_queue_vector_msg(vport, false);
+ idpf_send_map_unmap_queue_vector_msg(adapter, rsrc, vport_id, false);
/* Normally we ask for queues in create_vport, but if the number of
* initially requested queues have changed, for example via ethtool
* set channels, we do delete queues and then add the queues back
* instead of deleting and reallocating the vport.
*/
if (test_and_clear_bit(IDPF_VPORT_DEL_QUEUES, vport->flags))
- idpf_send_delete_queues_msg(vport);
+ idpf_send_delete_queues_msg(adapter, chunks, vport_id);
idpf_remove_features(vport);
vport->link_up = false;
- idpf_vport_intr_deinit(vport);
- idpf_xdp_rxq_info_deinit_all(vport);
- idpf_vport_queues_rel(vport);
- idpf_vport_intr_rel(vport);
+ idpf_vport_intr_deinit(vport, rsrc);
+ idpf_xdp_rxq_info_deinit_all(rsrc);
+ idpf_vport_queues_rel(vport, rsrc);
+ idpf_vport_intr_rel(rsrc);
clear_bit(IDPF_VPORT_UP, np->state);
if (rtnl)
@@ -1046,9 +1058,6 @@ static void idpf_decfg_netdev(struct idpf_vport *vport)
struct idpf_adapter *adapter = vport->adapter;
u16 idx = vport->idx;
- kfree(vport->rx_ptype_lkup);
- vport->rx_ptype_lkup = NULL;
-
if (test_and_clear_bit(IDPF_VPORT_REG_NETDEV,
adapter->vport_config[idx]->flags)) {
unregister_netdev(vport->netdev);
@@ -1065,6 +1074,7 @@ static void idpf_decfg_netdev(struct idpf_vport *vport)
*/
static void idpf_vport_rel(struct idpf_vport *vport)
{
+ struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct idpf_adapter *adapter = vport->adapter;
struct idpf_vport_config *vport_config;
struct idpf_vector_info vec_info;
@@ -1073,12 +1083,12 @@ static void idpf_vport_rel(struct idpf_vport *vport)
u16 idx = vport->idx;
vport_config = adapter->vport_config[vport->idx];
- idpf_deinit_rss_lut(vport);
rss_data = &vport_config->user_config.rss_data;
+ idpf_deinit_rss_lut(rss_data);
kfree(rss_data->rss_key);
rss_data->rss_key = NULL;
- idpf_send_destroy_vport_msg(vport);
+ idpf_send_destroy_vport_msg(adapter, vport->vport_id);
/* Release all max queues allocated to the adapter's pool */
max_q.max_rxq = vport_config->max_q.max_rxq;
@@ -1089,24 +1099,21 @@ static void idpf_vport_rel(struct idpf_vport *vport)
/* Release all the allocated vectors on the stack */
vec_info.num_req_vecs = 0;
- vec_info.num_curr_vecs = vport->num_q_vectors;
+ vec_info.num_curr_vecs = rsrc->num_q_vectors;
vec_info.default_vport = vport->default_vport;
- idpf_req_rel_vector_indexes(adapter, vport->q_vector_idxs, &vec_info);
+ idpf_req_rel_vector_indexes(adapter, rsrc->q_vector_idxs, &vec_info);
- kfree(vport->q_vector_idxs);
- vport->q_vector_idxs = NULL;
+ kfree(rsrc->q_vector_idxs);
+ rsrc->q_vector_idxs = NULL;
+
+ idpf_vport_deinit_queue_reg_chunks(vport_config);
kfree(adapter->vport_params_recvd[idx]);
adapter->vport_params_recvd[idx] = NULL;
kfree(adapter->vport_params_reqd[idx]);
adapter->vport_params_reqd[idx] = NULL;
- if (adapter->vport_config[idx]) {
- kfree(adapter->vport_config[idx]->req_qs_chunks);
- adapter->vport_config[idx]->req_qs_chunks = NULL;
- }
- kfree(vport->rx_ptype_lkup);
- vport->rx_ptype_lkup = NULL;
+
kfree(vport);
adapter->num_alloc_vports--;
}
@@ -1155,7 +1162,7 @@ static void idpf_vport_dealloc(struct idpf_vport *vport)
*/
static bool idpf_is_hsplit_supported(const struct idpf_vport *vport)
{
- return idpf_is_queue_model_split(vport->rxq_model) &&
+ return idpf_is_queue_model_split(vport->dflt_qv_rsrc.rxq_model) &&
idpf_is_cap_ena_all(vport->adapter, IDPF_HSPLIT_CAPS,
IDPF_CAP_HSPLIT);
}
@@ -1224,6 +1231,7 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
{
struct idpf_rss_data *rss_data;
u16 idx = adapter->next_vport;
+ struct idpf_q_vec_rsrc *rsrc;
struct idpf_vport *vport;
u16 num_max_q;
int err;
@@ -1271,11 +1279,15 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
vport->default_vport = adapter->num_alloc_vports <
idpf_get_default_vports(adapter);
- vport->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL);
- if (!vport->q_vector_idxs)
+ rsrc = &vport->dflt_qv_rsrc;
+ rsrc->dev = &adapter->pdev->dev;
+ rsrc->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL);
+ if (!rsrc->q_vector_idxs)
goto free_vport;
- idpf_vport_init(vport, max_q);
+ err = idpf_vport_init(vport, max_q);
+ if (err)
+ goto free_vector_idxs;
/* LUT and key are both initialized here. Key is not strictly dependent
* on how many queues we have. If we change number of queues and soft
@@ -1286,13 +1298,13 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
rss_data = &adapter->vport_config[idx]->user_config.rss_data;
rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL);
if (!rss_data->rss_key)
- goto free_vector_idxs;
+ goto free_qreg_chunks;
- /* Initialize default rss key */
+ /* Initialize default RSS key */
netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size);
- /* Initialize default rss LUT */
- err = idpf_init_rss_lut(vport);
+ /* Initialize default RSS LUT */
+ err = idpf_init_rss_lut(vport, rss_data);
if (err)
goto free_rss_key;
@@ -1308,8 +1320,10 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
free_rss_key:
kfree(rss_data->rss_key);
+free_qreg_chunks:
+ idpf_vport_deinit_queue_reg_chunks(adapter->vport_config[idx]);
free_vector_idxs:
- kfree(vport->q_vector_idxs);
+ kfree(rsrc->q_vector_idxs);
free_vport:
kfree(vport);
@@ -1346,7 +1360,8 @@ void idpf_statistics_task(struct work_struct *work)
struct idpf_vport *vport = adapter->vports[i];
if (vport && !test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
- idpf_send_get_stats_msg(vport);
+ idpf_send_get_stats_msg(netdev_priv(vport->netdev),
+ &vport->port_stats);
}
queue_delayed_work(adapter->stats_wq, &adapter->stats_task,
@@ -1369,7 +1384,7 @@ void idpf_mbx_task(struct work_struct *work)
queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task,
usecs_to_jiffies(300));
- idpf_recv_mb_msg(adapter);
+ idpf_recv_mb_msg(adapter, adapter->hw.arq);
}
/**
@@ -1417,9 +1432,10 @@ static void idpf_restore_features(struct idpf_vport *vport)
*/
static int idpf_set_real_num_queues(struct idpf_vport *vport)
{
- int err, txq = vport->num_txq - vport->num_xdp_txq;
+ int err, txq = vport->dflt_qv_rsrc.num_txq - vport->num_xdp_txq;
- err = netif_set_real_num_rx_queues(vport->netdev, vport->num_rxq);
+ err = netif_set_real_num_rx_queues(vport->netdev,
+ vport->dflt_qv_rsrc.num_rxq);
if (err)
return err;
@@ -1429,10 +1445,8 @@ static int idpf_set_real_num_queues(struct idpf_vport *vport)
/**
* idpf_up_complete - Complete interface up sequence
* @vport: virtual port structure
- *
- * Returns 0 on success, negative on failure.
*/
-static int idpf_up_complete(struct idpf_vport *vport)
+static void idpf_up_complete(struct idpf_vport *vport)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
@@ -1442,30 +1456,26 @@ static int idpf_up_complete(struct idpf_vport *vport)
}
set_bit(IDPF_VPORT_UP, np->state);
-
- return 0;
}
/**
* idpf_rx_init_buf_tail - Write initial buffer ring tail value
- * @vport: virtual port struct
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_rx_init_buf_tail(struct idpf_vport *vport)
+static void idpf_rx_init_buf_tail(struct idpf_q_vec_rsrc *rsrc)
{
- int i, j;
+ for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *grp = &rsrc->rxq_grps[i];
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *grp = &vport->rxq_grps[i];
-
- if (idpf_is_queue_model_split(vport->rxq_model)) {
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ if (idpf_is_queue_model_split(rsrc->rxq_model)) {
+ for (unsigned int j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
const struct idpf_buf_queue *q =
&grp->splitq.bufq_sets[j].bufq;
writel(q->next_to_alloc, q->tail);
}
} else {
- for (j = 0; j < grp->singleq.num_rxq; j++) {
+ for (unsigned int j = 0; j < grp->singleq.num_rxq; j++) {
const struct idpf_rx_queue *q =
grp->singleq.rxqs[j];
@@ -1483,7 +1493,12 @@ static void idpf_rx_init_buf_tail(struct idpf_vport *vport)
static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+ struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_vport_config *vport_config;
+ struct idpf_queue_id_reg_info *chunks;
+ struct idpf_rss_data *rss_data;
+ u32 vport_id = vport->vport_id;
int err;
if (test_bit(IDPF_VPORT_UP, np->state))
@@ -1495,48 +1510,51 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
/* we do not allow interface up just yet */
netif_carrier_off(vport->netdev);
- err = idpf_vport_intr_alloc(vport);
+ err = idpf_vport_intr_alloc(vport, rsrc);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to allocate interrupts for vport %u: %d\n",
vport->vport_id, err);
goto err_rtnl_unlock;
}
- err = idpf_vport_queues_alloc(vport);
+ err = idpf_vport_queues_alloc(vport, rsrc);
if (err)
goto intr_rel;
- err = idpf_vport_queue_ids_init(vport);
+ vport_config = adapter->vport_config[vport->idx];
+ chunks = &vport_config->qid_reg_info;
+
+ err = idpf_vport_queue_ids_init(vport, rsrc, chunks);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize queue ids for vport %u: %d\n",
vport->vport_id, err);
goto queues_rel;
}
- err = idpf_vport_intr_init(vport);
+ err = idpf_vport_intr_init(vport, rsrc);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize interrupts for vport %u: %d\n",
vport->vport_id, err);
goto queues_rel;
}
- err = idpf_queue_reg_init(vport);
+ err = idpf_queue_reg_init(vport, rsrc, chunks);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n",
vport->vport_id, err);
goto intr_deinit;
}
- err = idpf_rx_bufs_init_all(vport);
+ err = idpf_rx_bufs_init_all(vport, rsrc);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n",
vport->vport_id, err);
goto intr_deinit;
}
- idpf_rx_init_buf_tail(vport);
+ idpf_rx_init_buf_tail(rsrc);
- err = idpf_xdp_rxq_info_init_all(vport);
+ err = idpf_xdp_rxq_info_init_all(rsrc);
if (err) {
netdev_err(vport->netdev,
"Failed to initialize XDP RxQ info for vport %u: %pe\n",
@@ -1544,16 +1562,17 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
goto intr_deinit;
}
- idpf_vport_intr_ena(vport);
+ idpf_vport_intr_ena(vport, rsrc);
- err = idpf_send_config_queues_msg(vport);
+ err = idpf_send_config_queues_msg(adapter, rsrc, vport_id);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to configure queues for vport %u, %d\n",
vport->vport_id, err);
goto rxq_deinit;
}
- err = idpf_send_map_unmap_queue_vector_msg(vport, true);
+ err = idpf_send_map_unmap_queue_vector_msg(adapter, rsrc, vport_id,
+ true);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to map queue vectors for vport %u: %d\n",
vport->vport_id, err);
@@ -1567,7 +1586,7 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
goto unmap_queue_vectors;
}
- err = idpf_send_enable_vport_msg(vport);
+ err = idpf_send_enable_vport_msg(adapter, vport_id);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to enable vport %u: %d\n",
vport->vport_id, err);
@@ -1577,19 +1596,15 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
idpf_restore_features(vport);
- err = idpf_config_rss(vport);
+ rss_data = &vport_config->user_config.rss_data;
+ err = idpf_config_rss(vport, rss_data);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to configure RSS for vport %u: %d\n",
vport->vport_id, err);
goto disable_vport;
}
- err = idpf_up_complete(vport);
- if (err) {
- dev_err(&adapter->pdev->dev, "Failed to complete interface up for vport %u: %d\n",
- vport->vport_id, err);
- goto disable_vport;
- }
+ idpf_up_complete(vport);
if (rtnl)
rtnl_unlock();
@@ -1597,19 +1612,19 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
return 0;
disable_vport:
- idpf_send_disable_vport_msg(vport);
+ idpf_send_disable_vport_msg(adapter, vport_id);
disable_queues:
idpf_send_disable_queues_msg(vport);
unmap_queue_vectors:
- idpf_send_map_unmap_queue_vector_msg(vport, false);
+ idpf_send_map_unmap_queue_vector_msg(adapter, rsrc, vport_id, false);
rxq_deinit:
- idpf_xdp_rxq_info_deinit_all(vport);
+ idpf_xdp_rxq_info_deinit_all(rsrc);
intr_deinit:
- idpf_vport_intr_deinit(vport);
+ idpf_vport_intr_deinit(vport, rsrc);
queues_rel:
- idpf_vport_queues_rel(vport);
+ idpf_vport_queues_rel(vport, rsrc);
intr_rel:
- idpf_vport_intr_rel(vport);
+ idpf_vport_intr_rel(rsrc);
err_rtnl_unlock:
if (rtnl)
@@ -1667,10 +1682,6 @@ void idpf_init_task(struct work_struct *work)
goto unwind_vports;
}
- err = idpf_send_get_rx_ptype_msg(vport);
- if (err)
- goto unwind_vports;
-
index = vport->idx;
vport_config = adapter->vport_config[index];
@@ -1996,9 +2007,13 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
bool vport_is_up = test_bit(IDPF_VPORT_UP, np->state);
+ struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_vport_config *vport_config;
+ struct idpf_q_vec_rsrc *new_rsrc;
+ u32 vport_id = vport->vport_id;
struct idpf_vport *new_vport;
- int err;
+ int err, tmp_err = 0;
/* If the system is low on memory, we can end up in bad state if we
* free all the memory for queue resources and try to allocate them
@@ -2023,16 +2038,18 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
*/
memcpy(new_vport, vport, offsetof(struct idpf_vport, link_up));
+ new_rsrc = &new_vport->dflt_qv_rsrc;
+
/* Adjust resource parameters prior to reallocating resources */
switch (reset_cause) {
case IDPF_SR_Q_CHANGE:
- err = idpf_vport_adjust_qs(new_vport);
+ err = idpf_vport_adjust_qs(new_vport, new_rsrc);
if (err)
goto free_vport;
break;
case IDPF_SR_Q_DESC_CHANGE:
/* Update queue parameters before allocating resources */
- idpf_vport_calc_num_q_desc(new_vport);
+ idpf_vport_calc_num_q_desc(new_vport, new_rsrc);
break;
case IDPF_SR_MTU_CHANGE:
idpf_idc_vdev_mtu_event(vport->vdev_info,
@@ -2046,41 +2063,40 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
goto free_vport;
}
+ vport_config = adapter->vport_config[vport->idx];
+
if (!vport_is_up) {
- idpf_send_delete_queues_msg(vport);
+ idpf_send_delete_queues_msg(adapter, &vport_config->qid_reg_info,
+ vport_id);
} else {
set_bit(IDPF_VPORT_DEL_QUEUES, vport->flags);
idpf_vport_stop(vport, false);
}
- /* We're passing in vport here because we need its wait_queue
- * to send a message and it should be getting all the vport
- * config data out of the adapter but we need to be careful not
- * to add code to add_queues to change the vport config within
- * vport itself as it will be wiped with a memcpy later.
- */
- err = idpf_send_add_queues_msg(vport, new_vport->num_txq,
- new_vport->num_complq,
- new_vport->num_rxq,
- new_vport->num_bufq);
+ err = idpf_send_add_queues_msg(adapter, vport_config, new_rsrc,
+ vport_id);
if (err)
goto err_reset;
- /* Same comment as above regarding avoiding copying the wait_queues and
- * mutexes applies here. We do not want to mess with those if possible.
+ /* Avoid copying the wait_queues and mutexes. We do not want to mess
+ * with those if possible.
*/
memcpy(vport, new_vport, offsetof(struct idpf_vport, link_up));
if (reset_cause == IDPF_SR_Q_CHANGE)
- idpf_vport_alloc_vec_indexes(vport);
+ idpf_vport_alloc_vec_indexes(vport, &vport->dflt_qv_rsrc);
err = idpf_set_real_num_queues(vport);
if (err)
goto err_open;
if (reset_cause == IDPF_SR_Q_CHANGE &&
- !netif_is_rxfh_configured(vport->netdev))
- idpf_fill_dflt_rss_lut(vport);
+ !netif_is_rxfh_configured(vport->netdev)) {
+ struct idpf_rss_data *rss_data;
+
+ rss_data = &vport_config->user_config.rss_data;
+ idpf_fill_dflt_rss_lut(vport, rss_data);
+ }
if (vport_is_up)
err = idpf_vport_open(vport, false);
@@ -2088,11 +2104,11 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
goto free_vport;
err_reset:
- idpf_send_add_queues_msg(vport, vport->num_txq, vport->num_complq,
- vport->num_rxq, vport->num_bufq);
+ tmp_err = idpf_send_add_queues_msg(adapter, vport_config, rsrc,
+ vport_id);
err_open:
- if (vport_is_up)
+ if (!tmp_err && vport_is_up)
idpf_vport_open(vport, false);
free_vport:
@@ -2258,7 +2274,12 @@ static int idpf_set_features(struct net_device *netdev,
* the HW when the interface is brought up.
*/
if (test_bit(IDPF_VPORT_UP, np->state)) {
- err = idpf_config_rss(vport);
+ struct idpf_vport_config *vport_config;
+ struct idpf_rss_data *rss_data;
+
+ vport_config = adapter->vport_config[vport->idx];
+ rss_data = &vport_config->user_config.rss_data;
+ err = idpf_config_rss(vport, rss_data);
if (err)
goto unlock_mutex;
}
@@ -2272,8 +2293,13 @@ static int idpf_set_features(struct net_device *netdev,
}
if (changed & NETIF_F_LOOPBACK) {
+ bool loopback_ena;
+
netdev->features ^= NETIF_F_LOOPBACK;
- err = idpf_send_ena_dis_loopback_msg(vport);
+ loopback_ena = idpf_is_feature_ena(vport, NETIF_F_LOOPBACK);
+
+ err = idpf_send_ena_dis_loopback_msg(adapter, vport->vport_id,
+ loopback_ena);
}
unlock_mutex:
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_ptp.c
index 0a8b50350b86..4a805a9541f0 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ptp.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ptp.c
@@ -384,15 +384,17 @@ static int idpf_ptp_update_cached_phctime(struct idpf_adapter *adapter)
WRITE_ONCE(adapter->ptp->cached_phc_jiffies, jiffies);
idpf_for_each_vport(adapter, vport) {
+ struct idpf_q_vec_rsrc *rsrc;
bool split;
- if (!vport || !vport->rxq_grps)
+ if (!vport || !vport->dflt_qv_rsrc.rxq_grps)
continue;
- split = idpf_is_queue_model_split(vport->rxq_model);
+ rsrc = &vport->dflt_qv_rsrc;
+ split = idpf_is_queue_model_split(rsrc->rxq_model);
- for (u16 i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *grp = &vport->rxq_grps[i];
+ for (u16 i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *grp = &rsrc->rxq_grps[i];
idpf_ptp_update_phctime_rxq_grp(grp, split, systime);
}
@@ -681,9 +683,10 @@ int idpf_ptp_request_ts(struct idpf_tx_queue *tx_q, struct sk_buff *skb,
*/
static void idpf_ptp_set_rx_tstamp(struct idpf_vport *vport, int rx_filter)
{
+ struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
bool enable = true, splitq;
- splitq = idpf_is_queue_model_split(vport->rxq_model);
+ splitq = idpf_is_queue_model_split(rsrc->rxq_model);
if (rx_filter == HWTSTAMP_FILTER_NONE) {
enable = false;
@@ -692,8 +695,8 @@ static void idpf_ptp_set_rx_tstamp(struct idpf_vport *vport, int rx_filter)
vport->tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL;
}
- for (u16 i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *grp = &vport->rxq_grps[i];
+ for (u16 i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *grp = &rsrc->rxq_grps[i];
struct idpf_rx_queue *rx_queue;
u16 j, num_rxq;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index f58f616d87fc..376050308b06 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -19,6 +19,8 @@ LIBETH_SQE_CHECK_PRIV(u32);
* Make sure we don't exceed maximum scatter gather buffers for a single
* packet.
* TSO case has been handled earlier from idpf_features_check().
+ *
+ * Return: %true if skb exceeds max descriptors per packet, %false otherwise.
*/
static bool idpf_chk_linearize(const struct sk_buff *skb,
unsigned int max_bufs,
@@ -146,24 +148,22 @@ static void idpf_compl_desc_rel(struct idpf_compl_queue *complq)
/**
* idpf_tx_desc_rel_all - Free Tx Resources for All Queues
- * @vport: virtual port structure
+ * @rsrc: pointer to queue and vector resources
*
* Free all transmit software resources
*/
-static void idpf_tx_desc_rel_all(struct idpf_vport *vport)
+static void idpf_tx_desc_rel_all(struct idpf_q_vec_rsrc *rsrc)
{
- int i, j;
-
- if (!vport->txq_grps)
+ if (!rsrc->txq_grps)
return;
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
+ for (unsigned int i = 0; i < rsrc->num_txq_grp; i++) {
+ struct idpf_txq_group *txq_grp = &rsrc->txq_grps[i];
- for (j = 0; j < txq_grp->num_txq; j++)
+ for (unsigned int j = 0; j < txq_grp->num_txq; j++)
idpf_tx_desc_rel(txq_grp->txqs[j]);
- if (idpf_is_queue_model_split(vport->txq_model))
+ if (idpf_is_queue_model_split(rsrc->txq_model))
idpf_compl_desc_rel(txq_grp->complq);
}
}
@@ -172,7 +172,7 @@ static void idpf_tx_desc_rel_all(struct idpf_vport *vport)
* idpf_tx_buf_alloc_all - Allocate memory for all buffer resources
* @tx_q: queue for which the buffers are allocated
*
- * Returns 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
static int idpf_tx_buf_alloc_all(struct idpf_tx_queue *tx_q)
{
@@ -196,7 +196,7 @@ static int idpf_tx_buf_alloc_all(struct idpf_tx_queue *tx_q)
* @vport: vport to allocate resources for
* @tx_q: the tx ring to set up
*
- * Returns 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
static int idpf_tx_desc_alloc(const struct idpf_vport *vport,
struct idpf_tx_queue *tx_q)
@@ -263,7 +263,7 @@ err_alloc:
/**
* idpf_compl_desc_alloc - allocate completion descriptors
- * @vport: vport to allocate resources for
+ * @vport: virtual port private structure
* @complq: completion queue to set up
*
* Return: 0 on success, -errno on failure.
@@ -296,20 +296,21 @@ static int idpf_compl_desc_alloc(const struct idpf_vport *vport,
/**
* idpf_tx_desc_alloc_all - allocate all queues Tx resources
* @vport: virtual port private structure
+ * @rsrc: pointer to queue and vector resources
*
- * Returns 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
-static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
+static int idpf_tx_desc_alloc_all(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
int err = 0;
- int i, j;
/* Setup buffer queues. In single queue model buffer queues and
* completion queues will be same
*/
- for (i = 0; i < vport->num_txq_grp; i++) {
- for (j = 0; j < vport->txq_grps[i].num_txq; j++) {
- struct idpf_tx_queue *txq = vport->txq_grps[i].txqs[j];
+ for (unsigned int i = 0; i < rsrc->num_txq_grp; i++) {
+ for (unsigned int j = 0; j < rsrc->txq_grps[i].num_txq; j++) {
+ struct idpf_tx_queue *txq = rsrc->txq_grps[i].txqs[j];
err = idpf_tx_desc_alloc(vport, txq);
if (err) {
@@ -320,11 +321,11 @@ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
}
}
- if (!idpf_is_queue_model_split(vport->txq_model))
+ if (!idpf_is_queue_model_split(rsrc->txq_model))
continue;
/* Setup completion queues */
- err = idpf_compl_desc_alloc(vport, vport->txq_grps[i].complq);
+ err = idpf_compl_desc_alloc(vport, rsrc->txq_grps[i].complq);
if (err) {
pci_err(vport->adapter->pdev,
"Allocation for Tx Completion Queue %u failed\n",
@@ -335,7 +336,7 @@ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
err_out:
if (err)
- idpf_tx_desc_rel_all(vport);
+ idpf_tx_desc_rel_all(rsrc);
return err;
}
@@ -488,38 +489,38 @@ static void idpf_rx_desc_rel_bufq(struct idpf_buf_queue *bufq,
/**
* idpf_rx_desc_rel_all - Free Rx Resources for All Queues
* @vport: virtual port structure
+ * @rsrc: pointer to queue and vector resources
*
* Free all rx queues resources
*/
-static void idpf_rx_desc_rel_all(struct idpf_vport *vport)
+static void idpf_rx_desc_rel_all(struct idpf_q_vec_rsrc *rsrc)
{
- struct device *dev = &vport->adapter->pdev->dev;
+ struct device *dev = rsrc->dev;
struct idpf_rxq_group *rx_qgrp;
u16 num_rxq;
- int i, j;
- if (!vport->rxq_grps)
+ if (!rsrc->rxq_grps)
return;
- for (i = 0; i < vport->num_rxq_grp; i++) {
- rx_qgrp = &vport->rxq_grps[i];
+ for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) {
+ rx_qgrp = &rsrc->rxq_grps[i];
- if (!idpf_is_queue_model_split(vport->rxq_model)) {
- for (j = 0; j < rx_qgrp->singleq.num_rxq; j++)
+ if (!idpf_is_queue_model_split(rsrc->rxq_model)) {
+ for (unsigned int j = 0; j < rx_qgrp->singleq.num_rxq; j++)
idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j], dev,
VIRTCHNL2_QUEUE_MODEL_SINGLE);
continue;
}
num_rxq = rx_qgrp->splitq.num_rxq_sets;
- for (j = 0; j < num_rxq; j++)
+ for (unsigned int j = 0; j < num_rxq; j++)
idpf_rx_desc_rel(&rx_qgrp->splitq.rxq_sets[j]->rxq,
dev, VIRTCHNL2_QUEUE_MODEL_SPLIT);
if (!rx_qgrp->splitq.bufq_sets)
continue;
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ for (unsigned int j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
struct idpf_bufq_set *bufq_set =
&rx_qgrp->splitq.bufq_sets[j];
@@ -548,7 +549,7 @@ static void idpf_rx_buf_hw_update(struct idpf_buf_queue *bufq, u32 val)
* idpf_rx_hdr_buf_alloc_all - Allocate memory for header buffers
* @bufq: ring to use
*
- * Returns 0 on success, negative on failure.
+ * Return: 0 on success, negative on failure.
*/
static int idpf_rx_hdr_buf_alloc_all(struct idpf_buf_queue *bufq)
{
@@ -600,7 +601,7 @@ static void idpf_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
* @bufq: buffer queue to post to
* @buf_id: buffer id to post
*
- * Returns false if buffer could not be allocated, true otherwise.
+ * Return: %false if buffer could not be allocated, %true otherwise.
*/
static bool idpf_rx_post_buf_desc(struct idpf_buf_queue *bufq, u16 buf_id)
{
@@ -649,7 +650,7 @@ static bool idpf_rx_post_buf_desc(struct idpf_buf_queue *bufq, u16 buf_id)
* @bufq: buffer queue to post working set to
* @working_set: number of buffers to put in working set
*
- * Returns true if @working_set bufs were posted successfully, false otherwise.
+ * Return: %true if @working_set bufs were posted successfully, %false otherwise.
*/
static bool idpf_rx_post_init_bufs(struct idpf_buf_queue *bufq,
u16 working_set)
@@ -718,7 +719,7 @@ static int idpf_rx_bufs_init_singleq(struct idpf_rx_queue *rxq)
* idpf_rx_buf_alloc_all - Allocate memory for all buffer resources
* @rxbufq: queue for which the buffers are allocated
*
- * Returns 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
static int idpf_rx_buf_alloc_all(struct idpf_buf_queue *rxbufq)
{
@@ -746,7 +747,7 @@ rx_buf_alloc_all_out:
* @bufq: buffer queue to create page pool for
* @type: type of Rx buffers to allocate
*
- * Returns 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
static int idpf_rx_bufs_init(struct idpf_buf_queue *bufq,
enum libeth_fqe_type type)
@@ -779,26 +780,28 @@ static int idpf_rx_bufs_init(struct idpf_buf_queue *bufq,
/**
* idpf_rx_bufs_init_all - Initialize all RX bufs
- * @vport: virtual port struct
+ * @vport: pointer to vport struct
+ * @rsrc: pointer to queue and vector resources
*
- * Returns 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
-int idpf_rx_bufs_init_all(struct idpf_vport *vport)
+int idpf_rx_bufs_init_all(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
- bool split = idpf_is_queue_model_split(vport->rxq_model);
- int i, j, err;
+ bool split = idpf_is_queue_model_split(rsrc->rxq_model);
+ int err;
- idpf_xdp_copy_prog_to_rqs(vport, vport->xdp_prog);
+ idpf_xdp_copy_prog_to_rqs(rsrc, vport->xdp_prog);
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
u32 truesize = 0;
/* Allocate bufs for the rxq itself in singleq */
if (!split) {
int num_rxq = rx_qgrp->singleq.num_rxq;
- for (j = 0; j < num_rxq; j++) {
+ for (unsigned int j = 0; j < num_rxq; j++) {
struct idpf_rx_queue *q;
q = rx_qgrp->singleq.rxqs[j];
@@ -811,7 +814,7 @@ int idpf_rx_bufs_init_all(struct idpf_vport *vport)
}
/* Otherwise, allocate bufs for the buffer queues */
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ for (unsigned int j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
enum libeth_fqe_type type;
struct idpf_buf_queue *q;
@@ -836,7 +839,7 @@ int idpf_rx_bufs_init_all(struct idpf_vport *vport)
* @vport: vport to allocate resources for
* @rxq: Rx queue for which the resources are setup
*
- * Returns 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
static int idpf_rx_desc_alloc(const struct idpf_vport *vport,
struct idpf_rx_queue *rxq)
@@ -897,26 +900,28 @@ static int idpf_bufq_desc_alloc(const struct idpf_vport *vport,
/**
* idpf_rx_desc_alloc_all - allocate all RX queues resources
* @vport: virtual port structure
+ * @rsrc: pointer to queue and vector resources
*
- * Returns 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
-static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
+static int idpf_rx_desc_alloc_all(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_rxq_group *rx_qgrp;
- int i, j, err;
u16 num_rxq;
+ int err;
- for (i = 0; i < vport->num_rxq_grp; i++) {
- rx_qgrp = &vport->rxq_grps[i];
- if (idpf_is_queue_model_split(vport->rxq_model))
+ for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) {
+ rx_qgrp = &rsrc->rxq_grps[i];
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
num_rxq = rx_qgrp->splitq.num_rxq_sets;
else
num_rxq = rx_qgrp->singleq.num_rxq;
- for (j = 0; j < num_rxq; j++) {
+ for (unsigned int j = 0; j < num_rxq; j++) {
struct idpf_rx_queue *q;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
else
q = rx_qgrp->singleq.rxqs[j];
@@ -930,10 +935,10 @@ static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
}
}
- if (!idpf_is_queue_model_split(vport->rxq_model))
+ if (!idpf_is_queue_model_split(rsrc->rxq_model))
continue;
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ for (unsigned int j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
struct idpf_buf_queue *q;
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
@@ -951,18 +956,18 @@ static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
return 0;
err_out:
- idpf_rx_desc_rel_all(vport);
+ idpf_rx_desc_rel_all(rsrc);
return err;
}
-static int idpf_init_queue_set(const struct idpf_queue_set *qs)
+static int idpf_init_queue_set(const struct idpf_vport *vport,
+ const struct idpf_queue_set *qs)
{
- const struct idpf_vport *vport = qs->vport;
bool splitq;
int err;
- splitq = idpf_is_queue_model_split(vport->rxq_model);
+ splitq = idpf_is_queue_model_split(qs->qv_rsrc->rxq_model);
for (u32 i = 0; i < qs->num; i++) {
const struct idpf_queue_ptr *q = &qs->qs[i];
@@ -1032,19 +1037,18 @@ static int idpf_init_queue_set(const struct idpf_queue_set *qs)
static void idpf_clean_queue_set(const struct idpf_queue_set *qs)
{
- const struct idpf_vport *vport = qs->vport;
- struct device *dev = vport->netdev->dev.parent;
+ const struct idpf_q_vec_rsrc *rsrc = qs->qv_rsrc;
for (u32 i = 0; i < qs->num; i++) {
const struct idpf_queue_ptr *q = &qs->qs[i];
switch (q->type) {
case VIRTCHNL2_QUEUE_TYPE_RX:
- idpf_xdp_rxq_info_deinit(q->rxq, vport->rxq_model);
- idpf_rx_desc_rel(q->rxq, dev, vport->rxq_model);
+ idpf_xdp_rxq_info_deinit(q->rxq, rsrc->rxq_model);
+ idpf_rx_desc_rel(q->rxq, rsrc->dev, rsrc->rxq_model);
break;
case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
- idpf_rx_desc_rel_bufq(q->bufq, dev);
+ idpf_rx_desc_rel_bufq(q->bufq, rsrc->dev);
break;
case VIRTCHNL2_QUEUE_TYPE_TX:
idpf_tx_desc_rel(q->txq);
@@ -1111,7 +1115,8 @@ static void idpf_qvec_ena_irq(struct idpf_q_vector *qv)
static struct idpf_queue_set *
idpf_vector_to_queue_set(struct idpf_q_vector *qv)
{
- bool xdp = qv->vport->xdp_txq_offset && !qv->num_xsksq;
+ u32 xdp_txq_offset = qv->vport->dflt_qv_rsrc.xdp_txq_offset;
+ bool xdp = xdp_txq_offset && !qv->num_xsksq;
struct idpf_vport *vport = qv->vport;
struct idpf_queue_set *qs;
u32 num;
@@ -1121,7 +1126,8 @@ idpf_vector_to_queue_set(struct idpf_q_vector *qv)
if (!num)
return NULL;
- qs = idpf_alloc_queue_set(vport, num);
+ qs = idpf_alloc_queue_set(vport->adapter, &vport->dflt_qv_rsrc,
+ vport->vport_id, num);
if (!qs)
return NULL;
@@ -1147,12 +1153,12 @@ idpf_vector_to_queue_set(struct idpf_q_vector *qv)
qs->qs[num++].complq = qv->complq[i];
}
- if (!vport->xdp_txq_offset)
+ if (!xdp_txq_offset)
goto finalize;
if (xdp) {
for (u32 i = 0; i < qv->num_rxq; i++) {
- u32 idx = vport->xdp_txq_offset + qv->rx[i]->idx;
+ u32 idx = xdp_txq_offset + qv->rx[i]->idx;
qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX;
qs->qs[num++].txq = vport->txqs[idx];
@@ -1179,26 +1185,27 @@ finalize:
return qs;
}
-static int idpf_qp_enable(const struct idpf_queue_set *qs, u32 qid)
+static int idpf_qp_enable(const struct idpf_vport *vport,
+ const struct idpf_queue_set *qs, u32 qid)
{
- struct idpf_vport *vport = qs->vport;
+ const struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct idpf_q_vector *q_vector;
int err;
q_vector = idpf_find_rxq_vec(vport, qid);
- err = idpf_init_queue_set(qs);
+ err = idpf_init_queue_set(vport, qs);
if (err) {
netdev_err(vport->netdev, "Could not initialize queues in pair %u: %pe\n",
qid, ERR_PTR(err));
return err;
}
- if (!vport->xdp_txq_offset)
+ if (!rsrc->xdp_txq_offset)
goto config;
- q_vector->xsksq = kcalloc(DIV_ROUND_UP(vport->num_rxq_grp,
- vport->num_q_vectors),
+ q_vector->xsksq = kcalloc(DIV_ROUND_UP(rsrc->num_rxq_grp,
+ rsrc->num_q_vectors),
sizeof(*q_vector->xsksq), GFP_KERNEL);
if (!q_vector->xsksq)
return -ENOMEM;
@@ -1241,9 +1248,9 @@ config:
return 0;
}
-static int idpf_qp_disable(const struct idpf_queue_set *qs, u32 qid)
+static int idpf_qp_disable(const struct idpf_vport *vport,
+ const struct idpf_queue_set *qs, u32 qid)
{
- struct idpf_vport *vport = qs->vport;
struct idpf_q_vector *q_vector;
int err;
@@ -1288,30 +1295,28 @@ int idpf_qp_switch(struct idpf_vport *vport, u32 qid, bool en)
if (!qs)
return -ENOMEM;
- return en ? idpf_qp_enable(qs, qid) : idpf_qp_disable(qs, qid);
+ return en ? idpf_qp_enable(vport, qs, qid) :
+ idpf_qp_disable(vport, qs, qid);
}
/**
* idpf_txq_group_rel - Release all resources for txq groups
- * @vport: vport to release txq groups on
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_txq_group_rel(struct idpf_vport *vport)
+static void idpf_txq_group_rel(struct idpf_q_vec_rsrc *rsrc)
{
- bool split, flow_sch_en;
- int i, j;
+ bool split;
- if (!vport->txq_grps)
+ if (!rsrc->txq_grps)
return;
- split = idpf_is_queue_model_split(vport->txq_model);
- flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
- VIRTCHNL2_CAP_SPLITQ_QSCHED);
+ split = idpf_is_queue_model_split(rsrc->txq_model);
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
+ for (unsigned int i = 0; i < rsrc->num_txq_grp; i++) {
+ struct idpf_txq_group *txq_grp = &rsrc->txq_grps[i];
- for (j = 0; j < txq_grp->num_txq; j++) {
- if (flow_sch_en) {
+ for (unsigned int j = 0; j < txq_grp->num_txq; j++) {
+ if (idpf_queue_has(FLOW_SCH_EN, txq_grp->txqs[j])) {
kfree(txq_grp->txqs[j]->refillq);
txq_grp->txqs[j]->refillq = NULL;
}
@@ -1326,8 +1331,8 @@ static void idpf_txq_group_rel(struct idpf_vport *vport)
kfree(txq_grp->complq);
txq_grp->complq = NULL;
}
- kfree(vport->txq_grps);
- vport->txq_grps = NULL;
+ kfree(rsrc->txq_grps);
+ rsrc->txq_grps = NULL;
}
/**
@@ -1336,12 +1341,10 @@ static void idpf_txq_group_rel(struct idpf_vport *vport)
*/
static void idpf_rxq_sw_queue_rel(struct idpf_rxq_group *rx_qgrp)
{
- int i, j;
-
- for (i = 0; i < rx_qgrp->vport->num_bufqs_per_qgrp; i++) {
+ for (unsigned int i = 0; i < rx_qgrp->splitq.num_bufq_sets; i++) {
struct idpf_bufq_set *bufq_set = &rx_qgrp->splitq.bufq_sets[i];
- for (j = 0; j < bufq_set->num_refillqs; j++) {
+ for (unsigned int j = 0; j < bufq_set->num_refillqs; j++) {
kfree(bufq_set->refillqs[j].ring);
bufq_set->refillqs[j].ring = NULL;
}
@@ -1352,23 +1355,20 @@ static void idpf_rxq_sw_queue_rel(struct idpf_rxq_group *rx_qgrp)
/**
* idpf_rxq_group_rel - Release all resources for rxq groups
- * @vport: vport to release rxq groups on
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_rxq_group_rel(struct idpf_vport *vport)
+static void idpf_rxq_group_rel(struct idpf_q_vec_rsrc *rsrc)
{
- int i;
-
- if (!vport->rxq_grps)
+ if (!rsrc->rxq_grps)
return;
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
u16 num_rxq;
- int j;
- if (idpf_is_queue_model_split(vport->rxq_model)) {
+ if (idpf_is_queue_model_split(rsrc->rxq_model)) {
num_rxq = rx_qgrp->splitq.num_rxq_sets;
- for (j = 0; j < num_rxq; j++) {
+ for (unsigned int j = 0; j < num_rxq; j++) {
kfree(rx_qgrp->splitq.rxq_sets[j]);
rx_qgrp->splitq.rxq_sets[j] = NULL;
}
@@ -1378,41 +1378,44 @@ static void idpf_rxq_group_rel(struct idpf_vport *vport)
rx_qgrp->splitq.bufq_sets = NULL;
} else {
num_rxq = rx_qgrp->singleq.num_rxq;
- for (j = 0; j < num_rxq; j++) {
+ for (unsigned int j = 0; j < num_rxq; j++) {
kfree(rx_qgrp->singleq.rxqs[j]);
rx_qgrp->singleq.rxqs[j] = NULL;
}
}
}
- kfree(vport->rxq_grps);
- vport->rxq_grps = NULL;
+ kfree(rsrc->rxq_grps);
+ rsrc->rxq_grps = NULL;
}
/**
* idpf_vport_queue_grp_rel_all - Release all queue groups
* @vport: vport to release queue groups for
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_vport_queue_grp_rel_all(struct idpf_vport *vport)
+static void idpf_vport_queue_grp_rel_all(struct idpf_q_vec_rsrc *rsrc)
{
- idpf_txq_group_rel(vport);
- idpf_rxq_group_rel(vport);
+ idpf_txq_group_rel(rsrc);
+ idpf_rxq_group_rel(rsrc);
}
/**
* idpf_vport_queues_rel - Free memory for all queues
* @vport: virtual port
+ * @rsrc: pointer to queue and vector resources
*
* Free the memory allocated for queues associated to a vport
*/
-void idpf_vport_queues_rel(struct idpf_vport *vport)
+void idpf_vport_queues_rel(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
- idpf_xdp_copy_prog_to_rqs(vport, NULL);
+ idpf_xdp_copy_prog_to_rqs(rsrc, NULL);
- idpf_tx_desc_rel_all(vport);
- idpf_rx_desc_rel_all(vport);
+ idpf_tx_desc_rel_all(rsrc);
+ idpf_rx_desc_rel_all(rsrc);
idpf_xdpsqs_put(vport);
- idpf_vport_queue_grp_rel_all(vport);
+ idpf_vport_queue_grp_rel_all(rsrc);
kfree(vport->txqs);
vport->txqs = NULL;
@@ -1421,29 +1424,31 @@ void idpf_vport_queues_rel(struct idpf_vport *vport)
/**
* idpf_vport_init_fast_path_txqs - Initialize fast path txq array
* @vport: vport to init txqs on
+ * @rsrc: pointer to queue and vector resources
*
* We get a queue index from skb->queue_mapping and we need a fast way to
* dereference the queue from queue groups. This allows us to quickly pull a
* txq based on a queue index.
*
- * Returns 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
-static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport)
+static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_ptp_vport_tx_tstamp_caps *caps = vport->tx_tstamp_caps;
struct work_struct *tstamp_task = &vport->tstamp_task;
- int i, j, k = 0;
+ int k = 0;
- vport->txqs = kcalloc(vport->num_txq, sizeof(*vport->txqs),
+ vport->txqs = kcalloc(rsrc->num_txq, sizeof(*vport->txqs),
GFP_KERNEL);
-
if (!vport->txqs)
return -ENOMEM;
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *tx_grp = &vport->txq_grps[i];
+ vport->num_txq = rsrc->num_txq;
+ for (unsigned int i = 0; i < rsrc->num_txq_grp; i++) {
+ struct idpf_txq_group *tx_grp = &rsrc->txq_grps[i];
- for (j = 0; j < tx_grp->num_txq; j++, k++) {
+ for (unsigned int j = 0; j < tx_grp->num_txq; j++, k++) {
vport->txqs[k] = tx_grp->txqs[j];
vport->txqs[k]->idx = k;
@@ -1462,16 +1467,18 @@ static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport)
* idpf_vport_init_num_qs - Initialize number of queues
* @vport: vport to initialize queues
* @vport_msg: data to be filled into vport
+ * @rsrc: pointer to queue and vector resources
*/
void idpf_vport_init_num_qs(struct idpf_vport *vport,
- struct virtchnl2_create_vport *vport_msg)
+ struct virtchnl2_create_vport *vport_msg,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_vport_user_config_data *config_data;
u16 idx = vport->idx;
config_data = &vport->adapter->vport_config[idx]->user_config;
- vport->num_txq = le16_to_cpu(vport_msg->num_tx_q);
- vport->num_rxq = le16_to_cpu(vport_msg->num_rx_q);
+ rsrc->num_txq = le16_to_cpu(vport_msg->num_tx_q);
+ rsrc->num_rxq = le16_to_cpu(vport_msg->num_rx_q);
/* number of txqs and rxqs in config data will be zeros only in the
* driver load path and we dont update them there after
*/
@@ -1480,74 +1487,75 @@ void idpf_vport_init_num_qs(struct idpf_vport *vport,
config_data->num_req_rx_qs = le16_to_cpu(vport_msg->num_rx_q);
}
- if (idpf_is_queue_model_split(vport->txq_model))
- vport->num_complq = le16_to_cpu(vport_msg->num_tx_complq);
- if (idpf_is_queue_model_split(vport->rxq_model))
- vport->num_bufq = le16_to_cpu(vport_msg->num_rx_bufq);
+ if (idpf_is_queue_model_split(rsrc->txq_model))
+ rsrc->num_complq = le16_to_cpu(vport_msg->num_tx_complq);
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
+ rsrc->num_bufq = le16_to_cpu(vport_msg->num_rx_bufq);
vport->xdp_prog = config_data->xdp_prog;
if (idpf_xdp_enabled(vport)) {
- vport->xdp_txq_offset = config_data->num_req_tx_qs;
+ rsrc->xdp_txq_offset = config_data->num_req_tx_qs;
vport->num_xdp_txq = le16_to_cpu(vport_msg->num_tx_q) -
- vport->xdp_txq_offset;
+ rsrc->xdp_txq_offset;
vport->xdpsq_share = libeth_xdpsq_shared(vport->num_xdp_txq);
} else {
- vport->xdp_txq_offset = 0;
+ rsrc->xdp_txq_offset = 0;
vport->num_xdp_txq = 0;
vport->xdpsq_share = false;
}
/* Adjust number of buffer queues per Rx queue group. */
- if (!idpf_is_queue_model_split(vport->rxq_model)) {
- vport->num_bufqs_per_qgrp = 0;
+ if (!idpf_is_queue_model_split(rsrc->rxq_model)) {
+ rsrc->num_bufqs_per_qgrp = 0;
return;
}
- vport->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP;
+ rsrc->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP;
}
/**
* idpf_vport_calc_num_q_desc - Calculate number of queue groups
* @vport: vport to calculate q groups for
+ * @rsrc: pointer to queue and vector resources
*/
-void idpf_vport_calc_num_q_desc(struct idpf_vport *vport)
+void idpf_vport_calc_num_q_desc(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_vport_user_config_data *config_data;
- int num_bufqs = vport->num_bufqs_per_qgrp;
+ u8 num_bufqs = rsrc->num_bufqs_per_qgrp;
u32 num_req_txq_desc, num_req_rxq_desc;
u16 idx = vport->idx;
- int i;
config_data = &vport->adapter->vport_config[idx]->user_config;
num_req_txq_desc = config_data->num_req_txq_desc;
num_req_rxq_desc = config_data->num_req_rxq_desc;
- vport->complq_desc_count = 0;
+ rsrc->complq_desc_count = 0;
if (num_req_txq_desc) {
- vport->txq_desc_count = num_req_txq_desc;
- if (idpf_is_queue_model_split(vport->txq_model)) {
- vport->complq_desc_count = num_req_txq_desc;
- if (vport->complq_desc_count < IDPF_MIN_TXQ_COMPLQ_DESC)
- vport->complq_desc_count =
+ rsrc->txq_desc_count = num_req_txq_desc;
+ if (idpf_is_queue_model_split(rsrc->txq_model)) {
+ rsrc->complq_desc_count = num_req_txq_desc;
+ if (rsrc->complq_desc_count < IDPF_MIN_TXQ_COMPLQ_DESC)
+ rsrc->complq_desc_count =
IDPF_MIN_TXQ_COMPLQ_DESC;
}
} else {
- vport->txq_desc_count = IDPF_DFLT_TX_Q_DESC_COUNT;
- if (idpf_is_queue_model_split(vport->txq_model))
- vport->complq_desc_count =
+ rsrc->txq_desc_count = IDPF_DFLT_TX_Q_DESC_COUNT;
+ if (idpf_is_queue_model_split(rsrc->txq_model))
+ rsrc->complq_desc_count =
IDPF_DFLT_TX_COMPLQ_DESC_COUNT;
}
if (num_req_rxq_desc)
- vport->rxq_desc_count = num_req_rxq_desc;
+ rsrc->rxq_desc_count = num_req_rxq_desc;
else
- vport->rxq_desc_count = IDPF_DFLT_RX_Q_DESC_COUNT;
+ rsrc->rxq_desc_count = IDPF_DFLT_RX_Q_DESC_COUNT;
- for (i = 0; i < num_bufqs; i++) {
- if (!vport->bufq_desc_count[i])
- vport->bufq_desc_count[i] =
- IDPF_RX_BUFQ_DESC_COUNT(vport->rxq_desc_count,
+ for (unsigned int i = 0; i < num_bufqs; i++) {
+ if (!rsrc->bufq_desc_count[i])
+ rsrc->bufq_desc_count[i] =
+ IDPF_RX_BUFQ_DESC_COUNT(rsrc->rxq_desc_count,
num_bufqs);
}
}
@@ -1559,7 +1567,7 @@ void idpf_vport_calc_num_q_desc(struct idpf_vport *vport)
* @vport_msg: message to fill with data
* @max_q: vport max queue info
*
- * Return 0 on success, error value on failure.
+ * Return: 0 on success, error value on failure.
*/
int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_idx,
struct virtchnl2_create_vport *vport_msg,
@@ -1636,54 +1644,54 @@ int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_idx,
/**
* idpf_vport_calc_num_q_groups - Calculate number of queue groups
- * @vport: vport to calculate q groups for
+ * @rsrc: pointer to queue and vector resources
*/
-void idpf_vport_calc_num_q_groups(struct idpf_vport *vport)
+void idpf_vport_calc_num_q_groups(struct idpf_q_vec_rsrc *rsrc)
{
- if (idpf_is_queue_model_split(vport->txq_model))
- vport->num_txq_grp = vport->num_txq;
+ if (idpf_is_queue_model_split(rsrc->txq_model))
+ rsrc->num_txq_grp = rsrc->num_txq;
else
- vport->num_txq_grp = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS;
+ rsrc->num_txq_grp = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS;
- if (idpf_is_queue_model_split(vport->rxq_model))
- vport->num_rxq_grp = vport->num_rxq;
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
+ rsrc->num_rxq_grp = rsrc->num_rxq;
else
- vport->num_rxq_grp = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS;
+ rsrc->num_rxq_grp = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS;
}
/**
* idpf_vport_calc_numq_per_grp - Calculate number of queues per group
- * @vport: vport to calculate queues for
+ * @rsrc: pointer to queue and vector resources
* @num_txq: return parameter for number of TX queues
* @num_rxq: return parameter for number of RX queues
*/
-static void idpf_vport_calc_numq_per_grp(struct idpf_vport *vport,
+static void idpf_vport_calc_numq_per_grp(struct idpf_q_vec_rsrc *rsrc,
u16 *num_txq, u16 *num_rxq)
{
- if (idpf_is_queue_model_split(vport->txq_model))
+ if (idpf_is_queue_model_split(rsrc->txq_model))
*num_txq = IDPF_DFLT_SPLITQ_TXQ_PER_GROUP;
else
- *num_txq = vport->num_txq;
+ *num_txq = rsrc->num_txq;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
*num_rxq = IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
else
- *num_rxq = vport->num_rxq;
+ *num_rxq = rsrc->num_rxq;
}
/**
* idpf_rxq_set_descids - set the descids supported by this queue
- * @vport: virtual port data structure
+ * @rsrc: pointer to queue and vector resources
* @q: rx queue for which descids are set
*
*/
-static void idpf_rxq_set_descids(const struct idpf_vport *vport,
+static void idpf_rxq_set_descids(struct idpf_q_vec_rsrc *rsrc,
struct idpf_rx_queue *q)
{
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
return;
- if (vport->base_rxd)
+ if (rsrc->base_rxd)
q->rxdids = VIRTCHNL2_RXDID_1_32B_BASE_M;
else
q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
@@ -1692,44 +1700,45 @@ static void idpf_rxq_set_descids(const struct idpf_vport *vport,
/**
* idpf_txq_group_alloc - Allocate all txq group resources
* @vport: vport to allocate txq groups for
+ * @rsrc: pointer to queue and vector resources
* @num_txq: number of txqs to allocate for each group
*
- * Returns 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
-static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
+static int idpf_txq_group_alloc(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc,
+ u16 num_txq)
{
bool split, flow_sch_en;
- int i;
- vport->txq_grps = kcalloc(vport->num_txq_grp,
- sizeof(*vport->txq_grps), GFP_KERNEL);
- if (!vport->txq_grps)
+ rsrc->txq_grps = kcalloc(rsrc->num_txq_grp,
+ sizeof(*rsrc->txq_grps), GFP_KERNEL);
+ if (!rsrc->txq_grps)
return -ENOMEM;
- split = idpf_is_queue_model_split(vport->txq_model);
+ split = idpf_is_queue_model_split(rsrc->txq_model);
flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
VIRTCHNL2_CAP_SPLITQ_QSCHED);
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ for (unsigned int i = 0; i < rsrc->num_txq_grp; i++) {
+ struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
struct idpf_adapter *adapter = vport->adapter;
- int j;
tx_qgrp->vport = vport;
tx_qgrp->num_txq = num_txq;
- for (j = 0; j < tx_qgrp->num_txq; j++) {
+ for (unsigned int j = 0; j < tx_qgrp->num_txq; j++) {
tx_qgrp->txqs[j] = kzalloc(sizeof(*tx_qgrp->txqs[j]),
GFP_KERNEL);
if (!tx_qgrp->txqs[j])
goto err_alloc;
}
- for (j = 0; j < tx_qgrp->num_txq; j++) {
+ for (unsigned int j = 0; j < tx_qgrp->num_txq; j++) {
struct idpf_tx_queue *q = tx_qgrp->txqs[j];
q->dev = &adapter->pdev->dev;
- q->desc_count = vport->txq_desc_count;
+ q->desc_count = rsrc->txq_desc_count;
q->tx_max_bufs = idpf_get_max_tx_bufs(adapter);
q->tx_min_pkt_len = idpf_get_min_tx_pkt_len(adapter);
q->netdev = vport->netdev;
@@ -1764,7 +1773,7 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
if (!tx_qgrp->complq)
goto err_alloc;
- tx_qgrp->complq->desc_count = vport->complq_desc_count;
+ tx_qgrp->complq->desc_count = rsrc->complq_desc_count;
tx_qgrp->complq->txq_grp = tx_qgrp;
tx_qgrp->complq->netdev = vport->netdev;
tx_qgrp->complq->clean_budget = vport->compln_clean_budget;
@@ -1776,7 +1785,7 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
return 0;
err_alloc:
- idpf_txq_group_rel(vport);
+ idpf_txq_group_rel(rsrc);
return -ENOMEM;
}
@@ -1784,30 +1793,34 @@ err_alloc:
/**
* idpf_rxq_group_alloc - Allocate all rxq group resources
* @vport: vport to allocate rxq groups for
+ * @rsrc: pointer to queue and vector resources
* @num_rxq: number of rxqs to allocate for each group
*
- * Returns 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
-static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
+static int idpf_rxq_group_alloc(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc,
+ u16 num_rxq)
{
- int i, k, err = 0;
- bool hs;
+ struct idpf_adapter *adapter = vport->adapter;
+ bool hs, rsc;
+ int err = 0;
- vport->rxq_grps = kcalloc(vport->num_rxq_grp,
- sizeof(struct idpf_rxq_group), GFP_KERNEL);
- if (!vport->rxq_grps)
+ rsrc->rxq_grps = kcalloc(rsrc->num_rxq_grp,
+ sizeof(struct idpf_rxq_group), GFP_KERNEL);
+ if (!rsrc->rxq_grps)
return -ENOMEM;
hs = idpf_vport_get_hsplit(vport) == ETHTOOL_TCP_DATA_SPLIT_ENABLED;
+ rsc = idpf_is_feature_ena(vport, NETIF_F_GRO_HW);
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
- int j;
+ for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
rx_qgrp->vport = vport;
- if (!idpf_is_queue_model_split(vport->rxq_model)) {
+ if (!idpf_is_queue_model_split(rsrc->rxq_model)) {
rx_qgrp->singleq.num_rxq = num_rxq;
- for (j = 0; j < num_rxq; j++) {
+ for (unsigned int j = 0; j < num_rxq; j++) {
rx_qgrp->singleq.rxqs[j] =
kzalloc(sizeof(*rx_qgrp->singleq.rxqs[j]),
GFP_KERNEL);
@@ -1820,7 +1833,7 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
}
rx_qgrp->splitq.num_rxq_sets = num_rxq;
- for (j = 0; j < num_rxq; j++) {
+ for (unsigned int j = 0; j < num_rxq; j++) {
rx_qgrp->splitq.rxq_sets[j] =
kzalloc(sizeof(struct idpf_rxq_set),
GFP_KERNEL);
@@ -1830,25 +1843,27 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
}
}
- rx_qgrp->splitq.bufq_sets = kcalloc(vport->num_bufqs_per_qgrp,
+ rx_qgrp->splitq.bufq_sets = kcalloc(rsrc->num_bufqs_per_qgrp,
sizeof(struct idpf_bufq_set),
GFP_KERNEL);
if (!rx_qgrp->splitq.bufq_sets) {
err = -ENOMEM;
goto err_alloc;
}
+ rx_qgrp->splitq.num_bufq_sets = rsrc->num_bufqs_per_qgrp;
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ for (unsigned int j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
struct idpf_bufq_set *bufq_set =
&rx_qgrp->splitq.bufq_sets[j];
int swq_size = sizeof(struct idpf_sw_queue);
struct idpf_buf_queue *q;
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
- q->desc_count = vport->bufq_desc_count[j];
+ q->desc_count = rsrc->bufq_desc_count[j];
q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
idpf_queue_assign(HSPLIT_EN, q, hs);
+ idpf_queue_assign(RSC_EN, q, rsc);
bufq_set->num_refillqs = num_rxq;
bufq_set->refillqs = kcalloc(num_rxq, swq_size,
@@ -1857,12 +1872,12 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
err = -ENOMEM;
goto err_alloc;
}
- for (k = 0; k < bufq_set->num_refillqs; k++) {
+ for (unsigned int k = 0; k < bufq_set->num_refillqs; k++) {
struct idpf_sw_queue *refillq =
&bufq_set->refillqs[k];
refillq->desc_count =
- vport->bufq_desc_count[j];
+ rsrc->bufq_desc_count[j];
idpf_queue_set(GEN_CHK, refillq);
idpf_queue_set(RFL_GEN_CHK, refillq);
refillq->ring = kcalloc(refillq->desc_count,
@@ -1876,37 +1891,39 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
}
skip_splitq_rx_init:
- for (j = 0; j < num_rxq; j++) {
+ for (unsigned int j = 0; j < num_rxq; j++) {
struct idpf_rx_queue *q;
- if (!idpf_is_queue_model_split(vport->rxq_model)) {
+ if (!idpf_is_queue_model_split(rsrc->rxq_model)) {
q = rx_qgrp->singleq.rxqs[j];
+ q->rx_ptype_lkup = adapter->singleq_pt_lkup;
goto setup_rxq;
}
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
rx_qgrp->splitq.rxq_sets[j]->refillq[0] =
&rx_qgrp->splitq.bufq_sets[0].refillqs[j];
- if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP)
+ if (rsrc->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP)
rx_qgrp->splitq.rxq_sets[j]->refillq[1] =
&rx_qgrp->splitq.bufq_sets[1].refillqs[j];
idpf_queue_assign(HSPLIT_EN, q, hs);
+ idpf_queue_assign(RSC_EN, q, rsc);
+ q->rx_ptype_lkup = adapter->splitq_pt_lkup;
setup_rxq:
- q->desc_count = vport->rxq_desc_count;
- q->rx_ptype_lkup = vport->rx_ptype_lkup;
+ q->desc_count = rsrc->rxq_desc_count;
q->bufq_sets = rx_qgrp->splitq.bufq_sets;
q->idx = (i * num_rxq) + j;
q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
q->rx_max_pkt_size = vport->netdev->mtu +
LIBETH_RX_LL_LEN;
- idpf_rxq_set_descids(vport, q);
+ idpf_rxq_set_descids(rsrc, q);
}
}
err_alloc:
if (err)
- idpf_rxq_group_rel(vport);
+ idpf_rxq_group_rel(rsrc);
return err;
}
@@ -1914,28 +1931,30 @@ err_alloc:
/**
* idpf_vport_queue_grp_alloc_all - Allocate all queue groups/resources
* @vport: vport with qgrps to allocate
+ * @rsrc: pointer to queue and vector resources
*
- * Returns 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
-static int idpf_vport_queue_grp_alloc_all(struct idpf_vport *vport)
+static int idpf_vport_queue_grp_alloc_all(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
u16 num_txq, num_rxq;
int err;
- idpf_vport_calc_numq_per_grp(vport, &num_txq, &num_rxq);
+ idpf_vport_calc_numq_per_grp(rsrc, &num_txq, &num_rxq);
- err = idpf_txq_group_alloc(vport, num_txq);
+ err = idpf_txq_group_alloc(vport, rsrc, num_txq);
if (err)
goto err_out;
- err = idpf_rxq_group_alloc(vport, num_rxq);
+ err = idpf_rxq_group_alloc(vport, rsrc, num_rxq);
if (err)
goto err_out;
return 0;
err_out:
- idpf_vport_queue_grp_rel_all(vport);
+ idpf_vport_queue_grp_rel_all(rsrc);
return err;
}
@@ -1943,19 +1962,22 @@ err_out:
/**
* idpf_vport_queues_alloc - Allocate memory for all queues
* @vport: virtual port
+ * @rsrc: pointer to queue and vector resources
+ *
+ * Allocate memory for queues associated with a vport.
*
- * Allocate memory for queues associated with a vport. Returns 0 on success,
- * negative on failure.
+ * Return: 0 on success, negative on failure.
*/
-int idpf_vport_queues_alloc(struct idpf_vport *vport)
+int idpf_vport_queues_alloc(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
int err;
- err = idpf_vport_queue_grp_alloc_all(vport);
+ err = idpf_vport_queue_grp_alloc_all(vport, rsrc);
if (err)
goto err_out;
- err = idpf_vport_init_fast_path_txqs(vport);
+ err = idpf_vport_init_fast_path_txqs(vport, rsrc);
if (err)
goto err_out;
@@ -1963,18 +1985,18 @@ int idpf_vport_queues_alloc(struct idpf_vport *vport)
if (err)
goto err_out;
- err = idpf_tx_desc_alloc_all(vport);
+ err = idpf_tx_desc_alloc_all(vport, rsrc);
if (err)
goto err_out;
- err = idpf_rx_desc_alloc_all(vport);
+ err = idpf_rx_desc_alloc_all(vport, rsrc);
if (err)
goto err_out;
return 0;
err_out:
- idpf_vport_queues_rel(vport);
+ idpf_vport_queues_rel(vport, rsrc);
return err;
}
@@ -2172,7 +2194,7 @@ static void idpf_tx_handle_rs_completion(struct idpf_tx_queue *txq,
* @budget: Used to determine if we are in netpoll
* @cleaned: returns number of packets cleaned
*
- * Returns true if there's any budget left (e.g. the clean is finished)
+ * Return: %true if there's any budget left (e.g. the clean is finished)
*/
static bool idpf_tx_clean_complq(struct idpf_compl_queue *complq, int budget,
int *cleaned)
@@ -2398,7 +2420,7 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
}
/**
- * idpf_tx_splitq_has_room - check if enough Tx splitq resources are available
+ * idpf_txq_has_room - check if enough Tx splitq resources are available
* @tx_q: the queue to be checked
* @descs_needed: number of descriptors required for this packet
* @bufs_needed: number of Tx buffers required for this packet
@@ -2529,6 +2551,8 @@ unsigned int idpf_tx_res_count_required(struct idpf_tx_queue *txq,
* idpf_tx_splitq_bump_ntu - adjust NTU and generation
* @txq: the tx ring to wrap
* @ntu: ring index to bump
+ *
+ * Return: the next ring index hopping to 0 when wraps around
*/
static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_tx_queue *txq, u16 ntu)
{
@@ -2797,7 +2821,7 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
* @skb: pointer to skb
* @off: pointer to struct that holds offload parameters
*
- * Returns error (negative) if TSO was requested but cannot be applied to the
+ * Return: error (negative) if TSO was requested but cannot be applied to the
* given skb, 0 if TSO does not apply to the given skb, or 1 otherwise.
*/
int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off)
@@ -2875,6 +2899,8 @@ int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off)
*
* Since the TX buffer rings mimics the descriptor ring, update the tx buffer
* ring entry to reflect that this index is a context descriptor
+ *
+ * Return: pointer to the next descriptor
*/
static union idpf_flex_tx_ctx_desc *
idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue *txq)
@@ -2893,6 +2919,8 @@ idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue *txq)
* idpf_tx_drop_skb - free the SKB and bump tail if necessary
* @tx_q: queue to send buffer on
* @skb: pointer to skb
+ *
+ * Return: always NETDEV_TX_OK
*/
netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb)
{
@@ -2994,7 +3022,7 @@ static bool idpf_tx_splitq_need_re(struct idpf_tx_queue *tx_q)
* @skb: send buffer
* @tx_q: queue to send buffer on
*
- * Returns NETDEV_TX_OK if sent, else an error code
+ * Return: NETDEV_TX_OK if sent, else an error code
*/
static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
struct idpf_tx_queue *tx_q)
@@ -3120,7 +3148,7 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
* @skb: send buffer
* @netdev: network interface device structure
*
- * Returns NETDEV_TX_OK if sent, else an error code
+ * Return: NETDEV_TX_OK if sent, else an error code
*/
netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev)
{
@@ -3145,7 +3173,7 @@ netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
- if (idpf_is_queue_model_split(vport->txq_model))
+ if (idpf_is_queue_model_split(vport->dflt_qv_rsrc.txq_model))
return idpf_tx_splitq_frame(skb, tx_q);
else
return idpf_tx_singleq_frame(skb, tx_q);
@@ -3270,10 +3298,10 @@ idpf_rx_splitq_extract_csum_bits(const struct virtchnl2_rx_flex_desc_adv_nic_3 *
* @rx_desc: Receive descriptor
* @decoded: Decoded Rx packet type related fields
*
- * Return 0 on success and error code on failure
- *
* Populate the skb fields with the total number of RSC segments, RSC payload
* length and packet type.
+ *
+ * Return: 0 on success and error code on failure
*/
static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
@@ -3371,6 +3399,8 @@ idpf_rx_hwtstamp(const struct idpf_rx_queue *rxq,
* This function checks the ring, descriptor, and packet information in
* order to populate the hash, checksum, protocol, and
* other fields within the skb.
+ *
+ * Return: 0 on success and error code on failure
*/
static int
__idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
@@ -3465,6 +3495,7 @@ static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr,
* @stat_err_field: field from descriptor to test bits in
* @stat_err_bits: value to mask
*
+ * Return: %true if any of given @stat_err_bits are set, %false otherwise.
*/
static bool idpf_rx_splitq_test_staterr(const u8 stat_err_field,
const u8 stat_err_bits)
@@ -3476,8 +3507,8 @@ static bool idpf_rx_splitq_test_staterr(const u8 stat_err_field,
* idpf_rx_splitq_is_eop - process handling of EOP buffers
* @rx_desc: Rx descriptor for current buffer
*
- * If the buffer is an EOP buffer, this function exits returning true,
- * otherwise return false indicating that this is in fact a non-EOP buffer.
+ * Return: %true if the buffer is an EOP buffer, %false otherwise, indicating
+ * that this is in fact a non-EOP buffer.
*/
static bool idpf_rx_splitq_is_eop(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
{
@@ -3496,7 +3527,7 @@ static bool idpf_rx_splitq_is_eop(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_de
* expensive overhead for IOMMU access this provides a means of avoiding
* it by maintaining the mapping of the page to the system.
*
- * Returns amount of work completed
+ * Return: amount of work completed
*/
static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget)
{
@@ -3626,7 +3657,7 @@ payload:
* @buf_id: buffer ID
* @buf_desc: Buffer queue descriptor
*
- * Return 0 on success and negative on failure.
+ * Return: 0 on success and negative on failure.
*/
static int idpf_rx_update_bufq_desc(struct idpf_buf_queue *bufq, u32 buf_id,
struct virtchnl2_splitq_rx_buf_desc *buf_desc)
@@ -3753,6 +3784,7 @@ static void idpf_rx_clean_refillq_all(struct idpf_buf_queue *bufq, int nid)
* @irq: interrupt number
* @data: pointer to a q_vector
*
+ * Return: always IRQ_HANDLED
*/
static irqreturn_t idpf_vport_intr_clean_queues(int __always_unused irq,
void *data)
@@ -3767,39 +3799,34 @@ static irqreturn_t idpf_vport_intr_clean_queues(int __always_unused irq,
/**
* idpf_vport_intr_napi_del_all - Unregister napi for all q_vectors in vport
- * @vport: virtual port structure
- *
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_vport_intr_napi_del_all(struct idpf_vport *vport)
+static void idpf_vport_intr_napi_del_all(struct idpf_q_vec_rsrc *rsrc)
{
- u16 v_idx;
-
- for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++)
- netif_napi_del(&vport->q_vectors[v_idx].napi);
+ for (u16 v_idx = 0; v_idx < rsrc->num_q_vectors; v_idx++)
+ netif_napi_del(&rsrc->q_vectors[v_idx].napi);
}
/**
* idpf_vport_intr_napi_dis_all - Disable NAPI for all q_vectors in the vport
- * @vport: main vport structure
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_vport_intr_napi_dis_all(struct idpf_vport *vport)
+static void idpf_vport_intr_napi_dis_all(struct idpf_q_vec_rsrc *rsrc)
{
- int v_idx;
-
- for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++)
- napi_disable(&vport->q_vectors[v_idx].napi);
+ for (u16 v_idx = 0; v_idx < rsrc->num_q_vectors; v_idx++)
+ napi_disable(&rsrc->q_vectors[v_idx].napi);
}
/**
* idpf_vport_intr_rel - Free memory allocated for interrupt vectors
- * @vport: virtual port
+ * @rsrc: pointer to queue and vector resources
*
* Free the memory allocated for interrupt vectors associated to a vport
*/
-void idpf_vport_intr_rel(struct idpf_vport *vport)
+void idpf_vport_intr_rel(struct idpf_q_vec_rsrc *rsrc)
{
- for (u32 v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
- struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
+ for (u16 v_idx = 0; v_idx < rsrc->num_q_vectors; v_idx++) {
+ struct idpf_q_vector *q_vector = &rsrc->q_vectors[v_idx];
kfree(q_vector->xsksq);
q_vector->xsksq = NULL;
@@ -3813,8 +3840,8 @@ void idpf_vport_intr_rel(struct idpf_vport *vport)
q_vector->rx = NULL;
}
- kfree(vport->q_vectors);
- vport->q_vectors = NULL;
+ kfree(rsrc->q_vectors);
+ rsrc->q_vectors = NULL;
}
static void idpf_q_vector_set_napi(struct idpf_q_vector *q_vector, bool link)
@@ -3834,21 +3861,22 @@ static void idpf_q_vector_set_napi(struct idpf_q_vector *q_vector, bool link)
/**
* idpf_vport_intr_rel_irq - Free the IRQ association with the OS
* @vport: main vport structure
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_vport_intr_rel_irq(struct idpf_vport *vport)
+static void idpf_vport_intr_rel_irq(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_adapter *adapter = vport->adapter;
- int vector;
- for (vector = 0; vector < vport->num_q_vectors; vector++) {
- struct idpf_q_vector *q_vector = &vport->q_vectors[vector];
+ for (u16 vector = 0; vector < rsrc->num_q_vectors; vector++) {
+ struct idpf_q_vector *q_vector = &rsrc->q_vectors[vector];
int irq_num, vidx;
/* free only the irqs that were actually requested */
if (!q_vector)
continue;
- vidx = vport->q_vector_idxs[vector];
+ vidx = rsrc->q_vector_idxs[vector];
irq_num = adapter->msix_entries[vidx].vector;
idpf_q_vector_set_napi(q_vector, false);
@@ -3858,22 +3886,23 @@ static void idpf_vport_intr_rel_irq(struct idpf_vport *vport)
/**
* idpf_vport_intr_dis_irq_all - Disable all interrupt
- * @vport: main vport structure
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_vport_intr_dis_irq_all(struct idpf_vport *vport)
+static void idpf_vport_intr_dis_irq_all(struct idpf_q_vec_rsrc *rsrc)
{
- struct idpf_q_vector *q_vector = vport->q_vectors;
- int q_idx;
+ struct idpf_q_vector *q_vector = rsrc->q_vectors;
- writel(0, vport->noirq_dyn_ctl);
+ writel(0, rsrc->noirq_dyn_ctl);
- for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++)
+ for (u16 q_idx = 0; q_idx < rsrc->num_q_vectors; q_idx++)
writel(0, q_vector[q_idx].intr_reg.dyn_ctl);
}
/**
* idpf_vport_intr_buildreg_itr - Enable default interrupt generation settings
* @q_vector: pointer to q_vector
+ *
+ * Return: value to be written back to HW to enable interrupt generation
*/
static u32 idpf_vport_intr_buildreg_itr(struct idpf_q_vector *q_vector)
{
@@ -4011,8 +4040,12 @@ void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector)
/**
* idpf_vport_intr_req_irq - get MSI-X vectors from the OS for the vport
* @vport: main vport structure
+ * @rsrc: pointer to queue and vector resources
+ *
+ * Return: 0 on success, negative on failure
*/
-static int idpf_vport_intr_req_irq(struct idpf_vport *vport)
+static int idpf_vport_intr_req_irq(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_adapter *adapter = vport->adapter;
const char *drv_name, *if_name, *vec_name;
@@ -4021,11 +4054,11 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport)
drv_name = dev_driver_string(&adapter->pdev->dev);
if_name = netdev_name(vport->netdev);
- for (vector = 0; vector < vport->num_q_vectors; vector++) {
- struct idpf_q_vector *q_vector = &vport->q_vectors[vector];
+ for (vector = 0; vector < rsrc->num_q_vectors; vector++) {
+ struct idpf_q_vector *q_vector = &rsrc->q_vectors[vector];
char *name;
- vidx = vport->q_vector_idxs[vector];
+ vidx = rsrc->q_vector_idxs[vector];
irq_num = adapter->msix_entries[vidx].vector;
if (q_vector->num_rxq && q_vector->num_txq)
@@ -4055,9 +4088,9 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport)
free_q_irqs:
while (--vector >= 0) {
- vidx = vport->q_vector_idxs[vector];
+ vidx = rsrc->q_vector_idxs[vector];
irq_num = adapter->msix_entries[vidx].vector;
- kfree(free_irq(irq_num, &vport->q_vectors[vector]));
+ kfree(free_irq(irq_num, &rsrc->q_vectors[vector]));
}
return err;
@@ -4086,15 +4119,16 @@ void idpf_vport_intr_write_itr(struct idpf_q_vector *q_vector, u16 itr, bool tx)
/**
* idpf_vport_intr_ena_irq_all - Enable IRQ for the given vport
* @vport: main vport structure
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport)
+static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
bool dynamic;
- int q_idx;
u16 itr;
- for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) {
- struct idpf_q_vector *qv = &vport->q_vectors[q_idx];
+ for (u16 q_idx = 0; q_idx < rsrc->num_q_vectors; q_idx++) {
+ struct idpf_q_vector *qv = &rsrc->q_vectors[q_idx];
/* Set the initial ITR values */
if (qv->num_txq) {
@@ -4117,19 +4151,21 @@ static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport)
idpf_vport_intr_update_itr_ena_irq(qv);
}
- writel(vport->noirq_dyn_ctl_ena, vport->noirq_dyn_ctl);
+ writel(rsrc->noirq_dyn_ctl_ena, rsrc->noirq_dyn_ctl);
}
/**
* idpf_vport_intr_deinit - Release all vector associations for the vport
* @vport: main vport structure
+ * @rsrc: pointer to queue and vector resources
*/
-void idpf_vport_intr_deinit(struct idpf_vport *vport)
+void idpf_vport_intr_deinit(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
- idpf_vport_intr_dis_irq_all(vport);
- idpf_vport_intr_napi_dis_all(vport);
- idpf_vport_intr_napi_del_all(vport);
- idpf_vport_intr_rel_irq(vport);
+ idpf_vport_intr_dis_irq_all(rsrc);
+ idpf_vport_intr_napi_dis_all(rsrc);
+ idpf_vport_intr_napi_del_all(rsrc);
+ idpf_vport_intr_rel_irq(vport, rsrc);
}
/**
@@ -4201,14 +4237,12 @@ static void idpf_init_dim(struct idpf_q_vector *qv)
/**
* idpf_vport_intr_napi_ena_all - Enable NAPI for all q_vectors in the vport
- * @vport: main vport structure
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_vport_intr_napi_ena_all(struct idpf_vport *vport)
+static void idpf_vport_intr_napi_ena_all(struct idpf_q_vec_rsrc *rsrc)
{
- int q_idx;
-
- for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) {
- struct idpf_q_vector *q_vector = &vport->q_vectors[q_idx];
+ for (u16 q_idx = 0; q_idx < rsrc->num_q_vectors; q_idx++) {
+ struct idpf_q_vector *q_vector = &rsrc->q_vectors[q_idx];
idpf_init_dim(q_vector);
napi_enable(&q_vector->napi);
@@ -4221,7 +4255,7 @@ static void idpf_vport_intr_napi_ena_all(struct idpf_vport *vport)
* @budget: Used to determine if we are in netpoll
* @cleaned: returns number of packets cleaned
*
- * Returns false if clean is not complete else returns true
+ * Return: %false if clean is not complete else returns %true
*/
static bool idpf_tx_splitq_clean_all(struct idpf_q_vector *q_vec,
int budget, int *cleaned)
@@ -4248,7 +4282,7 @@ static bool idpf_tx_splitq_clean_all(struct idpf_q_vector *q_vec,
* @budget: Used to determine if we are in netpoll
* @cleaned: returns number of packets cleaned
*
- * Returns false if clean is not complete else returns true
+ * Return: %false if clean is not complete else returns %true
*/
static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget,
int *cleaned)
@@ -4291,6 +4325,8 @@ static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget,
* idpf_vport_splitq_napi_poll - NAPI handler
* @napi: struct from which you get q_vector
* @budget: budget provided by stack
+ *
+ * Return: how many packets were cleaned
*/
static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
{
@@ -4336,24 +4372,26 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
/**
* idpf_vport_intr_map_vector_to_qs - Map vectors to queues
* @vport: virtual port
+ * @rsrc: pointer to queue and vector resources
*
* Mapping for vectors to queues
*/
-static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
+static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
- u16 num_txq_grp = vport->num_txq_grp - vport->num_xdp_txq;
- bool split = idpf_is_queue_model_split(vport->rxq_model);
+ u16 num_txq_grp = rsrc->num_txq_grp - vport->num_xdp_txq;
+ bool split = idpf_is_queue_model_split(rsrc->rxq_model);
struct idpf_rxq_group *rx_qgrp;
struct idpf_txq_group *tx_qgrp;
- u32 i, qv_idx, q_index;
+ u32 q_index;
- for (i = 0, qv_idx = 0; i < vport->num_rxq_grp; i++) {
+ for (unsigned int i = 0, qv_idx = 0; i < rsrc->num_rxq_grp; i++) {
u16 num_rxq;
- if (qv_idx >= vport->num_q_vectors)
+ if (qv_idx >= rsrc->num_q_vectors)
qv_idx = 0;
- rx_qgrp = &vport->rxq_grps[i];
+ rx_qgrp = &rsrc->rxq_grps[i];
if (split)
num_rxq = rx_qgrp->splitq.num_rxq_sets;
else
@@ -4366,7 +4404,7 @@ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
else
q = rx_qgrp->singleq.rxqs[j];
- q->q_vector = &vport->q_vectors[qv_idx];
+ q->q_vector = &rsrc->q_vectors[qv_idx];
q_index = q->q_vector->num_rxq;
q->q_vector->rx[q_index] = q;
q->q_vector->num_rxq++;
@@ -4376,11 +4414,11 @@ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
}
if (split) {
- for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ for (u32 j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
struct idpf_buf_queue *bufq;
bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
- bufq->q_vector = &vport->q_vectors[qv_idx];
+ bufq->q_vector = &rsrc->q_vectors[qv_idx];
q_index = bufq->q_vector->num_bufq;
bufq->q_vector->bufq[q_index] = bufq;
bufq->q_vector->num_bufq++;
@@ -4390,40 +4428,40 @@ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
qv_idx++;
}
- split = idpf_is_queue_model_split(vport->txq_model);
+ split = idpf_is_queue_model_split(rsrc->txq_model);
- for (i = 0, qv_idx = 0; i < num_txq_grp; i++) {
+ for (unsigned int i = 0, qv_idx = 0; i < num_txq_grp; i++) {
u16 num_txq;
- if (qv_idx >= vport->num_q_vectors)
+ if (qv_idx >= rsrc->num_q_vectors)
qv_idx = 0;
- tx_qgrp = &vport->txq_grps[i];
+ tx_qgrp = &rsrc->txq_grps[i];
num_txq = tx_qgrp->num_txq;
for (u32 j = 0; j < num_txq; j++) {
struct idpf_tx_queue *q;
q = tx_qgrp->txqs[j];
- q->q_vector = &vport->q_vectors[qv_idx];
+ q->q_vector = &rsrc->q_vectors[qv_idx];
q->q_vector->tx[q->q_vector->num_txq++] = q;
}
if (split) {
struct idpf_compl_queue *q = tx_qgrp->complq;
- q->q_vector = &vport->q_vectors[qv_idx];
+ q->q_vector = &rsrc->q_vectors[qv_idx];
q->q_vector->complq[q->q_vector->num_complq++] = q;
}
qv_idx++;
}
- for (i = 0; i < vport->num_xdp_txq; i++) {
+ for (unsigned int i = 0; i < vport->num_xdp_txq; i++) {
struct idpf_tx_queue *xdpsq;
struct idpf_q_vector *qv;
- xdpsq = vport->txqs[vport->xdp_txq_offset + i];
+ xdpsq = vport->txqs[rsrc->xdp_txq_offset + i];
if (!idpf_queue_has(XSK, xdpsq))
continue;
@@ -4438,10 +4476,14 @@ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
/**
* idpf_vport_intr_init_vec_idx - Initialize the vector indexes
* @vport: virtual port
+ * @rsrc: pointer to queue and vector resources
*
- * Initialize vector indexes with values returened over mailbox
+ * Initialize vector indexes with values returned over mailbox.
+ *
+ * Return: 0 on success, negative on failure
*/
-static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
+static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_adapter *adapter = vport->adapter;
struct virtchnl2_alloc_vectors *ac;
@@ -4450,10 +4492,10 @@ static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
ac = adapter->req_vec_chunks;
if (!ac) {
- for (i = 0; i < vport->num_q_vectors; i++)
- vport->q_vectors[i].v_idx = vport->q_vector_idxs[i];
+ for (i = 0; i < rsrc->num_q_vectors; i++)
+ rsrc->q_vectors[i].v_idx = rsrc->q_vector_idxs[i];
- vport->noirq_v_idx = vport->q_vector_idxs[i];
+ rsrc->noirq_v_idx = rsrc->q_vector_idxs[i];
return 0;
}
@@ -4465,10 +4507,10 @@ static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
idpf_get_vec_ids(adapter, vecids, total_vecs, &ac->vchunks);
- for (i = 0; i < vport->num_q_vectors; i++)
- vport->q_vectors[i].v_idx = vecids[vport->q_vector_idxs[i]];
+ for (i = 0; i < rsrc->num_q_vectors; i++)
+ rsrc->q_vectors[i].v_idx = vecids[rsrc->q_vector_idxs[i]];
- vport->noirq_v_idx = vecids[vport->q_vector_idxs[i]];
+ rsrc->noirq_v_idx = vecids[rsrc->q_vector_idxs[i]];
kfree(vecids);
@@ -4478,21 +4520,24 @@ static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
/**
* idpf_vport_intr_napi_add_all- Register napi handler for all qvectors
* @vport: virtual port structure
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
+static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
int (*napi_poll)(struct napi_struct *napi, int budget);
- u16 v_idx, qv_idx;
int irq_num;
+ u16 qv_idx;
- if (idpf_is_queue_model_split(vport->txq_model))
+ if (idpf_is_queue_model_split(rsrc->txq_model))
napi_poll = idpf_vport_splitq_napi_poll;
else
napi_poll = idpf_vport_singleq_napi_poll;
- for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
- struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
- qv_idx = vport->q_vector_idxs[v_idx];
+ for (u16 v_idx = 0; v_idx < rsrc->num_q_vectors; v_idx++) {
+ struct idpf_q_vector *q_vector = &rsrc->q_vectors[v_idx];
+
+ qv_idx = rsrc->q_vector_idxs[v_idx];
irq_num = vport->adapter->msix_entries[qv_idx].vector;
netif_napi_add_config(vport->netdev, &q_vector->napi,
@@ -4504,37 +4549,41 @@ static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
/**
* idpf_vport_intr_alloc - Allocate memory for interrupt vectors
* @vport: virtual port
+ * @rsrc: pointer to queue and vector resources
+ *
+ * Allocate one q_vector per queue interrupt.
*
- * We allocate one q_vector per queue interrupt. If allocation fails we
- * return -ENOMEM.
+ * Return: 0 on success, if allocation fails we return -ENOMEM.
*/
-int idpf_vport_intr_alloc(struct idpf_vport *vport)
+int idpf_vport_intr_alloc(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
u16 txqs_per_vector, rxqs_per_vector, bufqs_per_vector;
struct idpf_vport_user_config_data *user_config;
struct idpf_q_vector *q_vector;
struct idpf_q_coalesce *q_coal;
- u32 complqs_per_vector, v_idx;
+ u32 complqs_per_vector;
u16 idx = vport->idx;
user_config = &vport->adapter->vport_config[idx]->user_config;
- vport->q_vectors = kcalloc(vport->num_q_vectors,
- sizeof(struct idpf_q_vector), GFP_KERNEL);
- if (!vport->q_vectors)
+
+ rsrc->q_vectors = kcalloc(rsrc->num_q_vectors,
+ sizeof(struct idpf_q_vector), GFP_KERNEL);
+ if (!rsrc->q_vectors)
return -ENOMEM;
- txqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp,
- vport->num_q_vectors);
- rxqs_per_vector = DIV_ROUND_UP(vport->num_rxq_grp,
- vport->num_q_vectors);
- bufqs_per_vector = vport->num_bufqs_per_qgrp *
- DIV_ROUND_UP(vport->num_rxq_grp,
- vport->num_q_vectors);
- complqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp,
- vport->num_q_vectors);
-
- for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
- q_vector = &vport->q_vectors[v_idx];
+ txqs_per_vector = DIV_ROUND_UP(rsrc->num_txq_grp,
+ rsrc->num_q_vectors);
+ rxqs_per_vector = DIV_ROUND_UP(rsrc->num_rxq_grp,
+ rsrc->num_q_vectors);
+ bufqs_per_vector = rsrc->num_bufqs_per_qgrp *
+ DIV_ROUND_UP(rsrc->num_rxq_grp,
+ rsrc->num_q_vectors);
+ complqs_per_vector = DIV_ROUND_UP(rsrc->num_txq_grp,
+ rsrc->num_q_vectors);
+
+ for (u16 v_idx = 0; v_idx < rsrc->num_q_vectors; v_idx++) {
+ q_vector = &rsrc->q_vectors[v_idx];
q_coal = &user_config->q_coalesce[v_idx];
q_vector->vport = vport;
@@ -4556,7 +4605,7 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport)
if (!q_vector->rx)
goto error;
- if (!idpf_is_queue_model_split(vport->rxq_model))
+ if (!idpf_is_queue_model_split(rsrc->rxq_model))
continue;
q_vector->bufq = kcalloc(bufqs_per_vector,
@@ -4571,7 +4620,7 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport)
if (!q_vector->complq)
goto error;
- if (!vport->xdp_txq_offset)
+ if (!rsrc->xdp_txq_offset)
continue;
q_vector->xsksq = kcalloc(rxqs_per_vector,
@@ -4584,7 +4633,7 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport)
return 0;
error:
- idpf_vport_intr_rel(vport);
+ idpf_vport_intr_rel(rsrc);
return -ENOMEM;
}
@@ -4592,72 +4641,74 @@ error:
/**
* idpf_vport_intr_init - Setup all vectors for the given vport
* @vport: virtual port
+ * @rsrc: pointer to queue and vector resources
*
- * Returns 0 on success or negative on failure
+ * Return: 0 on success or negative on failure
*/
-int idpf_vport_intr_init(struct idpf_vport *vport)
+int idpf_vport_intr_init(struct idpf_vport *vport, struct idpf_q_vec_rsrc *rsrc)
{
int err;
- err = idpf_vport_intr_init_vec_idx(vport);
+ err = idpf_vport_intr_init_vec_idx(vport, rsrc);
if (err)
return err;
- idpf_vport_intr_map_vector_to_qs(vport);
- idpf_vport_intr_napi_add_all(vport);
+ idpf_vport_intr_map_vector_to_qs(vport, rsrc);
+ idpf_vport_intr_napi_add_all(vport, rsrc);
- err = vport->adapter->dev_ops.reg_ops.intr_reg_init(vport);
+ err = vport->adapter->dev_ops.reg_ops.intr_reg_init(vport, rsrc);
if (err)
goto unroll_vectors_alloc;
- err = idpf_vport_intr_req_irq(vport);
+ err = idpf_vport_intr_req_irq(vport, rsrc);
if (err)
goto unroll_vectors_alloc;
return 0;
unroll_vectors_alloc:
- idpf_vport_intr_napi_del_all(vport);
+ idpf_vport_intr_napi_del_all(rsrc);
return err;
}
-void idpf_vport_intr_ena(struct idpf_vport *vport)
+void idpf_vport_intr_ena(struct idpf_vport *vport, struct idpf_q_vec_rsrc *rsrc)
{
- idpf_vport_intr_napi_ena_all(vport);
- idpf_vport_intr_ena_irq_all(vport);
+ idpf_vport_intr_napi_ena_all(rsrc);
+ idpf_vport_intr_ena_irq_all(vport, rsrc);
}
/**
* idpf_config_rss - Send virtchnl messages to configure RSS
* @vport: virtual port
+ * @rss_data: pointer to RSS key and lut info
*
- * Return 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
-int idpf_config_rss(struct idpf_vport *vport)
+int idpf_config_rss(struct idpf_vport *vport, struct idpf_rss_data *rss_data)
{
+ struct idpf_adapter *adapter = vport->adapter;
+ u32 vport_id = vport->vport_id;
int err;
- err = idpf_send_get_set_rss_key_msg(vport, false);
+ err = idpf_send_get_set_rss_key_msg(adapter, rss_data, vport_id, false);
if (err)
return err;
- return idpf_send_get_set_rss_lut_msg(vport, false);
+ return idpf_send_get_set_rss_lut_msg(adapter, rss_data, vport_id, false);
}
/**
* idpf_fill_dflt_rss_lut - Fill the indirection table with the default values
* @vport: virtual port structure
+ * @rss_data: pointer to RSS key and lut info
*/
-void idpf_fill_dflt_rss_lut(struct idpf_vport *vport)
+void idpf_fill_dflt_rss_lut(struct idpf_vport *vport,
+ struct idpf_rss_data *rss_data)
{
- struct idpf_adapter *adapter = vport->adapter;
- u16 num_active_rxq = vport->num_rxq;
- struct idpf_rss_data *rss_data;
+ u16 num_active_rxq = vport->dflt_qv_rsrc.num_rxq;
int i;
- rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
-
for (i = 0; i < rss_data->rss_lut_size; i++)
rss_data->rss_lut[i] = i % num_active_rxq;
}
@@ -4665,15 +4716,12 @@ void idpf_fill_dflt_rss_lut(struct idpf_vport *vport)
/**
* idpf_init_rss_lut - Allocate and initialize RSS LUT
* @vport: virtual port
+ * @rss_data: pointer to RSS key and lut info
*
* Return: 0 on success, negative on failure
*/
-int idpf_init_rss_lut(struct idpf_vport *vport)
+int idpf_init_rss_lut(struct idpf_vport *vport, struct idpf_rss_data *rss_data)
{
- struct idpf_adapter *adapter = vport->adapter;
- struct idpf_rss_data *rss_data;
-
- rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
if (!rss_data->rss_lut) {
u32 lut_size;
@@ -4684,21 +4732,17 @@ int idpf_init_rss_lut(struct idpf_vport *vport)
}
/* Fill the default RSS lut values */
- idpf_fill_dflt_rss_lut(vport);
+ idpf_fill_dflt_rss_lut(vport, rss_data);
return 0;
}
/**
* idpf_deinit_rss_lut - Release RSS LUT
- * @vport: virtual port
+ * @rss_data: pointer to RSS key and lut info
*/
-void idpf_deinit_rss_lut(struct idpf_vport *vport)
+void idpf_deinit_rss_lut(struct idpf_rss_data *rss_data)
{
- struct idpf_adapter *adapter = vport->adapter;
- struct idpf_rss_data *rss_data;
-
- rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
kfree(rss_data->rss_lut);
rss_data->rss_lut = NULL;
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index 423cc9486dce..4be5b3b6d3ed 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -283,6 +283,7 @@ struct idpf_ptype_state {
* @__IDPF_Q_FLOW_SCH_EN: Enable flow scheduling
* @__IDPF_Q_SW_MARKER: Used to indicate TX queue marker completions
* @__IDPF_Q_CRC_EN: enable CRC offload in singleq mode
+ * @__IDPF_Q_RSC_EN: enable Receive Side Coalescing on Rx (splitq)
* @__IDPF_Q_HSPLIT_EN: enable header split on Rx (splitq)
* @__IDPF_Q_PTP: indicates whether the Rx timestamping is enabled for the
* queue
@@ -297,6 +298,7 @@ enum idpf_queue_flags_t {
__IDPF_Q_FLOW_SCH_EN,
__IDPF_Q_SW_MARKER,
__IDPF_Q_CRC_EN,
+ __IDPF_Q_RSC_EN,
__IDPF_Q_HSPLIT_EN,
__IDPF_Q_PTP,
__IDPF_Q_NOIRQ,
@@ -925,6 +927,7 @@ struct idpf_bufq_set {
* @singleq.rxqs: Array of RX queue pointers
* @splitq: Struct with split queue related members
* @splitq.num_rxq_sets: Number of RX queue sets
+ * @splitq.num_rxq_sets: Number of Buffer queue sets
* @splitq.rxq_sets: Array of RX queue sets
* @splitq.bufq_sets: Buffer queue set pointer
*
@@ -942,6 +945,7 @@ struct idpf_rxq_group {
} singleq;
struct {
u16 num_rxq_sets;
+ u16 num_bufq_sets;
struct idpf_rxq_set *rxq_sets[IDPF_LARGE_MAX_Q];
struct idpf_bufq_set *bufq_sets;
} splitq;
@@ -1072,25 +1076,35 @@ static inline u32 idpf_tx_splitq_get_free_bufs(struct idpf_sw_queue *refillq)
int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget);
void idpf_vport_init_num_qs(struct idpf_vport *vport,
- struct virtchnl2_create_vport *vport_msg);
-void idpf_vport_calc_num_q_desc(struct idpf_vport *vport);
+ struct virtchnl2_create_vport *vport_msg,
+ struct idpf_q_vec_rsrc *rsrc);
+void idpf_vport_calc_num_q_desc(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_index,
struct virtchnl2_create_vport *vport_msg,
struct idpf_vport_max_q *max_q);
-void idpf_vport_calc_num_q_groups(struct idpf_vport *vport);
-int idpf_vport_queues_alloc(struct idpf_vport *vport);
-void idpf_vport_queues_rel(struct idpf_vport *vport);
-void idpf_vport_intr_rel(struct idpf_vport *vport);
-int idpf_vport_intr_alloc(struct idpf_vport *vport);
+void idpf_vport_calc_num_q_groups(struct idpf_q_vec_rsrc *rsrc);
+int idpf_vport_queues_alloc(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
+void idpf_vport_queues_rel(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
+void idpf_vport_intr_rel(struct idpf_q_vec_rsrc *rsrc);
+int idpf_vport_intr_alloc(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector);
-void idpf_vport_intr_deinit(struct idpf_vport *vport);
-int idpf_vport_intr_init(struct idpf_vport *vport);
-void idpf_vport_intr_ena(struct idpf_vport *vport);
-void idpf_fill_dflt_rss_lut(struct idpf_vport *vport);
-int idpf_config_rss(struct idpf_vport *vport);
-int idpf_init_rss_lut(struct idpf_vport *vport);
-void idpf_deinit_rss_lut(struct idpf_vport *vport);
-int idpf_rx_bufs_init_all(struct idpf_vport *vport);
+void idpf_vport_intr_deinit(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
+int idpf_vport_intr_init(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
+void idpf_vport_intr_ena(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
+void idpf_fill_dflt_rss_lut(struct idpf_vport *vport,
+ struct idpf_rss_data *rss_data);
+int idpf_config_rss(struct idpf_vport *vport, struct idpf_rss_data *rss_data);
+int idpf_init_rss_lut(struct idpf_vport *vport, struct idpf_rss_data *rss_data);
+void idpf_deinit_rss_lut(struct idpf_rss_data *rss_data);
+int idpf_rx_bufs_init_all(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
u32 q_num);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
index 4cc58c83688c..7527b967e2e7 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
@@ -69,11 +69,13 @@ static void idpf_vf_mb_intr_reg_init(struct idpf_adapter *adapter)
/**
* idpf_vf_intr_reg_init - Initialize interrupt registers
* @vport: virtual port structure
+ * @rsrc: pointer to queue and vector resources
*/
-static int idpf_vf_intr_reg_init(struct idpf_vport *vport)
+static int idpf_vf_intr_reg_init(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_adapter *adapter = vport->adapter;
- int num_vecs = vport->num_q_vectors;
+ u16 num_vecs = rsrc->num_q_vectors;
struct idpf_vec_regs *reg_vals;
int num_regs, i, err = 0;
u32 rx_itr, tx_itr, val;
@@ -85,15 +87,15 @@ static int idpf_vf_intr_reg_init(struct idpf_vport *vport)
if (!reg_vals)
return -ENOMEM;
- num_regs = idpf_get_reg_intr_vecs(vport, reg_vals);
+ num_regs = idpf_get_reg_intr_vecs(adapter, reg_vals);
if (num_regs < num_vecs) {
err = -EINVAL;
goto free_reg_vals;
}
for (i = 0; i < num_vecs; i++) {
- struct idpf_q_vector *q_vector = &vport->q_vectors[i];
- u16 vec_id = vport->q_vector_idxs[i] - IDPF_MBX_Q_VEC;
+ struct idpf_q_vector *q_vector = &rsrc->q_vectors[i];
+ u16 vec_id = rsrc->q_vector_idxs[i] - IDPF_MBX_Q_VEC;
struct idpf_intr_reg *intr = &q_vector->intr_reg;
u32 spacing;
@@ -122,12 +124,12 @@ static int idpf_vf_intr_reg_init(struct idpf_vport *vport)
/* Data vector for NOIRQ queues */
- val = reg_vals[vport->q_vector_idxs[i] - IDPF_MBX_Q_VEC].dyn_ctl_reg;
- vport->noirq_dyn_ctl = idpf_get_reg_addr(adapter, val);
+ val = reg_vals[rsrc->q_vector_idxs[i] - IDPF_MBX_Q_VEC].dyn_ctl_reg;
+ rsrc->noirq_dyn_ctl = idpf_get_reg_addr(adapter, val);
val = VF_INT_DYN_CTLN_WB_ON_ITR_M | VF_INT_DYN_CTLN_INTENA_MSK_M |
FIELD_PREP(VF_INT_DYN_CTLN_ITR_INDX_M, IDPF_NO_ITR_UPDATE_IDX);
- vport->noirq_dyn_ctl_ena = val;
+ rsrc->noirq_dyn_ctl_ena = val;
free_reg_vals:
kfree(reg_vals);
@@ -156,7 +158,8 @@ static void idpf_vf_trigger_reset(struct idpf_adapter *adapter,
/* Do not send VIRTCHNL2_OP_RESET_VF message on driver unload */
if (trig_cause == IDPF_HR_FUNC_RESET &&
!test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
- idpf_send_mb_msg(adapter, VIRTCHNL2_OP_RESET_VF, 0, NULL, 0);
+ idpf_send_mb_msg(adapter, adapter->hw.asq,
+ VIRTCHNL2_OP_RESET_VF, 0, NULL, 0);
}
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index cb702eac86c8..d46affaf7185 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -117,13 +117,15 @@ static void idpf_recv_event_msg(struct idpf_adapter *adapter,
/**
* idpf_mb_clean - Reclaim the send mailbox queue entries
- * @adapter: Driver specific private structure
+ * @adapter: driver specific private structure
+ * @asq: send control queue info
*
* Reclaim the send mailbox queue entries to be used to send further messages
*
- * Returns 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
-static int idpf_mb_clean(struct idpf_adapter *adapter)
+static int idpf_mb_clean(struct idpf_adapter *adapter,
+ struct idpf_ctlq_info *asq)
{
u16 i, num_q_msg = IDPF_DFLT_MBX_Q_LEN;
struct idpf_ctlq_msg **q_msg;
@@ -134,7 +136,7 @@ static int idpf_mb_clean(struct idpf_adapter *adapter)
if (!q_msg)
return -ENOMEM;
- err = idpf_ctlq_clean_sq(adapter->hw.asq, &num_q_msg, q_msg);
+ err = idpf_ctlq_clean_sq(asq, &num_q_msg, q_msg);
if (err)
goto err_kfree;
@@ -206,7 +208,8 @@ static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op,
/**
* idpf_send_mb_msg - Send message over mailbox
- * @adapter: Driver specific private structure
+ * @adapter: driver specific private structure
+ * @asq: control queue to send message to
* @op: virtchnl opcode
* @msg_size: size of the payload
* @msg: pointer to buffer holding the payload
@@ -214,10 +217,10 @@ static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op,
*
* Will prepare the control queue message and initiates the send api
*
- * Returns 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
-int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
- u16 msg_size, u8 *msg, u16 cookie)
+int idpf_send_mb_msg(struct idpf_adapter *adapter, struct idpf_ctlq_info *asq,
+ u32 op, u16 msg_size, u8 *msg, u16 cookie)
{
struct idpf_ctlq_msg *ctlq_msg;
struct idpf_dma_mem *dma_mem;
@@ -231,7 +234,7 @@ int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
if (idpf_is_reset_detected(adapter))
return 0;
- err = idpf_mb_clean(adapter);
+ err = idpf_mb_clean(adapter, asq);
if (err)
return err;
@@ -267,7 +270,7 @@ int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
ctlq_msg->ctx.indirect.payload = dma_mem;
ctlq_msg->ctx.sw_cookie.data = cookie;
- err = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg);
+ err = idpf_ctlq_send(&adapter->hw, asq, 1, ctlq_msg);
if (err)
goto send_error;
@@ -463,7 +466,7 @@ ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
cookie = FIELD_PREP(IDPF_VC_XN_SALT_M, xn->salt) |
FIELD_PREP(IDPF_VC_XN_IDX_M, xn->idx);
- retval = idpf_send_mb_msg(adapter, params->vc_op,
+ retval = idpf_send_mb_msg(adapter, adapter->hw.asq, params->vc_op,
send_buf->iov_len, send_buf->iov_base,
cookie);
if (retval) {
@@ -662,12 +665,14 @@ out_unlock:
/**
* idpf_recv_mb_msg - Receive message over mailbox
- * @adapter: Driver specific private structure
+ * @adapter: driver specific private structure
+ * @arq: control queue to receive message from
+ *
+ * Will receive control queue message and posts the receive buffer.
*
- * Will receive control queue message and posts the receive buffer. Returns 0
- * on success and negative on failure.
+ * Return: 0 on success and negative on failure.
*/
-int idpf_recv_mb_msg(struct idpf_adapter *adapter)
+int idpf_recv_mb_msg(struct idpf_adapter *adapter, struct idpf_ctlq_info *arq)
{
struct idpf_ctlq_msg ctlq_msg;
struct idpf_dma_mem *dma_mem;
@@ -679,7 +684,7 @@ int idpf_recv_mb_msg(struct idpf_adapter *adapter)
* actually received on num_recv.
*/
num_recv = 1;
- err = idpf_ctlq_recv(adapter->hw.arq, &num_recv, &ctlq_msg);
+ err = idpf_ctlq_recv(arq, &num_recv, &ctlq_msg);
if (err || !num_recv)
break;
@@ -695,8 +700,7 @@ int idpf_recv_mb_msg(struct idpf_adapter *adapter)
else
err = idpf_vc_xn_forward_reply(adapter, &ctlq_msg);
- post_err = idpf_ctlq_post_rx_buffs(&adapter->hw,
- adapter->hw.arq,
+ post_err = idpf_ctlq_post_rx_buffs(&adapter->hw, arq,
&num_recv, &dma_mem);
/* If post failed clear the only buffer we supplied */
@@ -717,9 +721,8 @@ int idpf_recv_mb_msg(struct idpf_adapter *adapter)
}
struct idpf_chunked_msg_params {
- u32 (*prepare_msg)(const struct idpf_vport *vport,
- void *buf, const void *pos,
- u32 num);
+ u32 (*prepare_msg)(u32 vport_id, void *buf,
+ const void *pos, u32 num);
const void *chunks;
u32 num_chunks;
@@ -728,9 +731,12 @@ struct idpf_chunked_msg_params {
u32 config_sz;
u32 vc_op;
+ u32 vport_id;
};
-struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_vport *vport, u32 num)
+struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_adapter *adapter,
+ struct idpf_q_vec_rsrc *qv_rsrc,
+ u32 vport_id, u32 num)
{
struct idpf_queue_set *qp;
@@ -738,7 +744,9 @@ struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_vport *vport, u32 num)
if (!qp)
return NULL;
- qp->vport = vport;
+ qp->adapter = adapter;
+ qp->qv_rsrc = qv_rsrc;
+ qp->vport_id = vport_id;
qp->num = num;
return qp;
@@ -746,7 +754,7 @@ struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_vport *vport, u32 num)
/**
* idpf_send_chunked_msg - send VC message consisting of chunks
- * @vport: virtual port data structure
+ * @adapter: Driver specific private structure
* @params: message params
*
* Helper function for preparing a message describing queues to be enabled
@@ -754,7 +762,7 @@ struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_vport *vport, u32 num)
*
* Return: the total size of the prepared message.
*/
-static int idpf_send_chunked_msg(struct idpf_vport *vport,
+static int idpf_send_chunked_msg(struct idpf_adapter *adapter,
const struct idpf_chunked_msg_params *params)
{
struct idpf_vc_xn_params xn_params = {
@@ -765,6 +773,7 @@ static int idpf_send_chunked_msg(struct idpf_vport *vport,
u32 num_chunks, num_msgs, buf_sz;
void *buf __free(kfree) = NULL;
u32 totqs = params->num_chunks;
+ u32 vid = params->vport_id;
num_chunks = min(IDPF_NUM_CHUNKS_PER_MSG(params->config_sz,
params->chunk_sz), totqs);
@@ -783,10 +792,10 @@ static int idpf_send_chunked_msg(struct idpf_vport *vport,
memset(buf, 0, buf_sz);
xn_params.send_buf.iov_len = buf_sz;
- if (params->prepare_msg(vport, buf, pos, num_chunks) != buf_sz)
+ if (params->prepare_msg(vid, buf, pos, num_chunks) != buf_sz)
return -EINVAL;
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
if (reply_sz < 0)
return reply_sz;
@@ -809,6 +818,7 @@ static int idpf_send_chunked_msg(struct idpf_vport *vport,
*/
static int idpf_wait_for_marker_event_set(const struct idpf_queue_set *qs)
{
+ struct net_device *netdev;
struct idpf_tx_queue *txq;
bool markers_rcvd = true;
@@ -817,6 +827,8 @@ static int idpf_wait_for_marker_event_set(const struct idpf_queue_set *qs)
case VIRTCHNL2_QUEUE_TYPE_TX:
txq = qs->qs[i].txq;
+ netdev = txq->netdev;
+
idpf_queue_set(SW_MARKER, txq);
idpf_wait_for_sw_marker_completion(txq);
markers_rcvd &= !idpf_queue_has(SW_MARKER, txq);
@@ -827,7 +839,7 @@ static int idpf_wait_for_marker_event_set(const struct idpf_queue_set *qs)
}
if (!markers_rcvd) {
- netdev_warn(qs->vport->netdev,
+ netdev_warn(netdev,
"Failed to receive marker packets\n");
return -ETIMEDOUT;
}
@@ -845,7 +857,8 @@ static int idpf_wait_for_marker_event(struct idpf_vport *vport)
{
struct idpf_queue_set *qs __free(kfree) = NULL;
- qs = idpf_alloc_queue_set(vport, vport->num_txq);
+ qs = idpf_alloc_queue_set(vport->adapter, &vport->dflt_qv_rsrc,
+ vport->vport_id, vport->num_txq);
if (!qs)
return -ENOMEM;
@@ -1263,13 +1276,52 @@ static void idpf_init_avail_queues(struct idpf_adapter *adapter)
}
/**
+ * idpf_vport_init_queue_reg_chunks - initialize queue register chunks
+ * @vport_config: persistent vport structure to store the queue register info
+ * @schunks: source chunks to copy data from
+ *
+ * Return: 0 on success, negative on failure.
+ */
+static int
+idpf_vport_init_queue_reg_chunks(struct idpf_vport_config *vport_config,
+ struct virtchnl2_queue_reg_chunks *schunks)
+{
+ struct idpf_queue_id_reg_info *q_info = &vport_config->qid_reg_info;
+ u16 num_chunks = le16_to_cpu(schunks->num_chunks);
+
+ kfree(q_info->queue_chunks);
+
+ q_info->queue_chunks = kcalloc(num_chunks, sizeof(*q_info->queue_chunks),
+ GFP_KERNEL);
+ if (!q_info->queue_chunks) {
+ q_info->num_chunks = 0;
+ return -ENOMEM;
+ }
+
+ q_info->num_chunks = num_chunks;
+
+ for (u16 i = 0; i < num_chunks; i++) {
+ struct idpf_queue_id_reg_chunk *dchunk = &q_info->queue_chunks[i];
+ struct virtchnl2_queue_reg_chunk *schunk = &schunks->chunks[i];
+
+ dchunk->qtail_reg_start = le64_to_cpu(schunk->qtail_reg_start);
+ dchunk->qtail_reg_spacing = le32_to_cpu(schunk->qtail_reg_spacing);
+ dchunk->type = le32_to_cpu(schunk->type);
+ dchunk->start_queue_id = le32_to_cpu(schunk->start_queue_id);
+ dchunk->num_queues = le32_to_cpu(schunk->num_queues);
+ }
+
+ return 0;
+}
+
+/**
* idpf_get_reg_intr_vecs - Get vector queue register offset
- * @vport: virtual port structure
+ * @adapter: adapter structure to get the vector chunks
* @reg_vals: Register offsets to store in
*
- * Returns number of registers that got populated
+ * Return: number of registers that got populated
*/
-int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
+int idpf_get_reg_intr_vecs(struct idpf_adapter *adapter,
struct idpf_vec_regs *reg_vals)
{
struct virtchnl2_vector_chunks *chunks;
@@ -1277,7 +1329,7 @@ int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
u16 num_vchunks, num_vec;
int num_regs = 0, i, j;
- chunks = &vport->adapter->req_vec_chunks->vchunks;
+ chunks = &adapter->req_vec_chunks->vchunks;
num_vchunks = le16_to_cpu(chunks->num_vchunks);
for (j = 0; j < num_vchunks; j++) {
@@ -1322,25 +1374,25 @@ int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
* are filled.
*/
static int idpf_vport_get_q_reg(u32 *reg_vals, int num_regs, u32 q_type,
- struct virtchnl2_queue_reg_chunks *chunks)
+ struct idpf_queue_id_reg_info *chunks)
{
- u16 num_chunks = le16_to_cpu(chunks->num_chunks);
+ u16 num_chunks = chunks->num_chunks;
int reg_filled = 0, i;
u32 reg_val;
while (num_chunks--) {
- struct virtchnl2_queue_reg_chunk *chunk;
+ struct idpf_queue_id_reg_chunk *chunk;
u16 num_q;
- chunk = &chunks->chunks[num_chunks];
- if (le32_to_cpu(chunk->type) != q_type)
+ chunk = &chunks->queue_chunks[num_chunks];
+ if (chunk->type != q_type)
continue;
- num_q = le32_to_cpu(chunk->num_queues);
- reg_val = le64_to_cpu(chunk->qtail_reg_start);
+ num_q = chunk->num_queues;
+ reg_val = chunk->qtail_reg_start;
for (i = 0; i < num_q && reg_filled < num_regs ; i++) {
reg_vals[reg_filled++] = reg_val;
- reg_val += le32_to_cpu(chunk->qtail_reg_spacing);
+ reg_val += chunk->qtail_reg_spacing;
}
}
@@ -1350,13 +1402,15 @@ static int idpf_vport_get_q_reg(u32 *reg_vals, int num_regs, u32 q_type,
/**
* __idpf_queue_reg_init - initialize queue registers
* @vport: virtual port structure
+ * @rsrc: pointer to queue and vector resources
* @reg_vals: registers we are initializing
* @num_regs: how many registers there are in total
* @q_type: queue model
*
* Return number of queues that are initialized
*/
-static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
+static int __idpf_queue_reg_init(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc, u32 *reg_vals,
int num_regs, u32 q_type)
{
struct idpf_adapter *adapter = vport->adapter;
@@ -1364,8 +1418,8 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
switch (q_type) {
case VIRTCHNL2_QUEUE_TYPE_TX:
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ for (i = 0; i < rsrc->num_txq_grp; i++) {
+ struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
for (j = 0; j < tx_qgrp->num_txq && k < num_regs; j++, k++)
tx_qgrp->txqs[j]->tail =
@@ -1373,8 +1427,8 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
}
break;
case VIRTCHNL2_QUEUE_TYPE_RX:
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ for (i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
u16 num_rxq = rx_qgrp->singleq.num_rxq;
for (j = 0; j < num_rxq && k < num_regs; j++, k++) {
@@ -1387,9 +1441,9 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
}
break;
case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
- u8 num_bufqs = vport->num_bufqs_per_qgrp;
+ for (i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
+ u8 num_bufqs = rsrc->num_bufqs_per_qgrp;
for (j = 0; j < num_bufqs && k < num_regs; j++, k++) {
struct idpf_buf_queue *q;
@@ -1410,15 +1464,15 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
/**
* idpf_queue_reg_init - initialize queue registers
* @vport: virtual port structure
+ * @rsrc: pointer to queue and vector resources
+ * @chunks: queue registers received over mailbox
*
- * Return 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
-int idpf_queue_reg_init(struct idpf_vport *vport)
+int idpf_queue_reg_init(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc,
+ struct idpf_queue_id_reg_info *chunks)
{
- struct virtchnl2_create_vport *vport_params;
- struct virtchnl2_queue_reg_chunks *chunks;
- struct idpf_vport_config *vport_config;
- u16 vport_idx = vport->idx;
int num_regs, ret = 0;
u32 *reg_vals;
@@ -1427,28 +1481,18 @@ int idpf_queue_reg_init(struct idpf_vport *vport)
if (!reg_vals)
return -ENOMEM;
- vport_config = vport->adapter->vport_config[vport_idx];
- if (vport_config->req_qs_chunks) {
- struct virtchnl2_add_queues *vc_aq =
- (struct virtchnl2_add_queues *)vport_config->req_qs_chunks;
- chunks = &vc_aq->chunks;
- } else {
- vport_params = vport->adapter->vport_params_recvd[vport_idx];
- chunks = &vport_params->chunks;
- }
-
/* Initialize Tx queue tail register address */
num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
VIRTCHNL2_QUEUE_TYPE_TX,
chunks);
- if (num_regs < vport->num_txq) {
+ if (num_regs < rsrc->num_txq) {
ret = -EINVAL;
goto free_reg_vals;
}
- num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs,
+ num_regs = __idpf_queue_reg_init(vport, rsrc, reg_vals, num_regs,
VIRTCHNL2_QUEUE_TYPE_TX);
- if (num_regs < vport->num_txq) {
+ if (num_regs < rsrc->num_txq) {
ret = -EINVAL;
goto free_reg_vals;
}
@@ -1456,18 +1500,18 @@ int idpf_queue_reg_init(struct idpf_vport *vport)
/* Initialize Rx/buffer queue tail register address based on Rx queue
* model
*/
- if (idpf_is_queue_model_split(vport->rxq_model)) {
+ if (idpf_is_queue_model_split(rsrc->rxq_model)) {
num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
VIRTCHNL2_QUEUE_TYPE_RX_BUFFER,
chunks);
- if (num_regs < vport->num_bufq) {
+ if (num_regs < rsrc->num_bufq) {
ret = -EINVAL;
goto free_reg_vals;
}
- num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs,
+ num_regs = __idpf_queue_reg_init(vport, rsrc, reg_vals, num_regs,
VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
- if (num_regs < vport->num_bufq) {
+ if (num_regs < rsrc->num_bufq) {
ret = -EINVAL;
goto free_reg_vals;
}
@@ -1475,14 +1519,14 @@ int idpf_queue_reg_init(struct idpf_vport *vport)
num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
VIRTCHNL2_QUEUE_TYPE_RX,
chunks);
- if (num_regs < vport->num_rxq) {
+ if (num_regs < rsrc->num_rxq) {
ret = -EINVAL;
goto free_reg_vals;
}
- num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs,
+ num_regs = __idpf_queue_reg_init(vport, rsrc, reg_vals, num_regs,
VIRTCHNL2_QUEUE_TYPE_RX);
- if (num_regs < vport->num_rxq) {
+ if (num_regs < rsrc->num_rxq) {
ret = -EINVAL;
goto free_reg_vals;
}
@@ -1581,6 +1625,7 @@ free_vport_params:
*/
int idpf_check_supported_desc_ids(struct idpf_vport *vport)
{
+ struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct idpf_adapter *adapter = vport->adapter;
struct virtchnl2_create_vport *vport_msg;
u64 rx_desc_ids, tx_desc_ids;
@@ -1597,17 +1642,17 @@ int idpf_check_supported_desc_ids(struct idpf_vport *vport)
rx_desc_ids = le64_to_cpu(vport_msg->rx_desc_ids);
tx_desc_ids = le64_to_cpu(vport_msg->tx_desc_ids);
- if (idpf_is_queue_model_split(vport->rxq_model)) {
+ if (idpf_is_queue_model_split(rsrc->rxq_model)) {
if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M)) {
dev_info(&adapter->pdev->dev, "Minimum RX descriptor support not provided, using the default\n");
vport_msg->rx_desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
}
} else {
if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M))
- vport->base_rxd = true;
+ rsrc->base_rxd = true;
}
- if (!idpf_is_queue_model_split(vport->txq_model))
+ if (!idpf_is_queue_model_split(rsrc->txq_model))
return 0;
if ((tx_desc_ids & MIN_SUPPORT_TXDID) != MIN_SUPPORT_TXDID) {
@@ -1620,96 +1665,96 @@ int idpf_check_supported_desc_ids(struct idpf_vport *vport)
/**
* idpf_send_destroy_vport_msg - Send virtchnl destroy vport message
- * @vport: virtual port data structure
+ * @adapter: adapter pointer used to send virtchnl message
+ * @vport_id: vport identifier used while preparing the virtchnl message
*
- * Send virtchnl destroy vport message. Returns 0 on success, negative on
- * failure.
+ * Return: 0 on success, negative on failure.
*/
-int idpf_send_destroy_vport_msg(struct idpf_vport *vport)
+int idpf_send_destroy_vport_msg(struct idpf_adapter *adapter, u32 vport_id)
{
struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_vport v_id;
ssize_t reply_sz;
- v_id.vport_id = cpu_to_le32(vport->vport_id);
+ v_id.vport_id = cpu_to_le32(vport_id);
xn_params.vc_op = VIRTCHNL2_OP_DESTROY_VPORT;
xn_params.send_buf.iov_base = &v_id;
xn_params.send_buf.iov_len = sizeof(v_id);
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
return reply_sz < 0 ? reply_sz : 0;
}
/**
* idpf_send_enable_vport_msg - Send virtchnl enable vport message
- * @vport: virtual port data structure
+ * @adapter: adapter pointer used to send virtchnl message
+ * @vport_id: vport identifier used while preparing the virtchnl message
*
- * Send enable vport virtchnl message. Returns 0 on success, negative on
- * failure.
+ * Return: 0 on success, negative on failure.
*/
-int idpf_send_enable_vport_msg(struct idpf_vport *vport)
+int idpf_send_enable_vport_msg(struct idpf_adapter *adapter, u32 vport_id)
{
struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_vport v_id;
ssize_t reply_sz;
- v_id.vport_id = cpu_to_le32(vport->vport_id);
+ v_id.vport_id = cpu_to_le32(vport_id);
xn_params.vc_op = VIRTCHNL2_OP_ENABLE_VPORT;
xn_params.send_buf.iov_base = &v_id;
xn_params.send_buf.iov_len = sizeof(v_id);
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
return reply_sz < 0 ? reply_sz : 0;
}
/**
* idpf_send_disable_vport_msg - Send virtchnl disable vport message
- * @vport: virtual port data structure
+ * @adapter: adapter pointer used to send virtchnl message
+ * @vport_id: vport identifier used while preparing the virtchnl message
*
- * Send disable vport virtchnl message. Returns 0 on success, negative on
- * failure.
+ * Return: 0 on success, negative on failure.
*/
-int idpf_send_disable_vport_msg(struct idpf_vport *vport)
+int idpf_send_disable_vport_msg(struct idpf_adapter *adapter, u32 vport_id)
{
struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_vport v_id;
ssize_t reply_sz;
- v_id.vport_id = cpu_to_le32(vport->vport_id);
+ v_id.vport_id = cpu_to_le32(vport_id);
xn_params.vc_op = VIRTCHNL2_OP_DISABLE_VPORT;
xn_params.send_buf.iov_base = &v_id;
xn_params.send_buf.iov_len = sizeof(v_id);
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
return reply_sz < 0 ? reply_sz : 0;
}
/**
* idpf_fill_txq_config_chunk - fill chunk describing the Tx queue
- * @vport: virtual port data structure
+ * @rsrc: pointer to queue and vector resources
* @q: Tx queue to be inserted into VC chunk
* @qi: pointer to the buffer containing the VC chunk
*/
-static void idpf_fill_txq_config_chunk(const struct idpf_vport *vport,
+static void idpf_fill_txq_config_chunk(const struct idpf_q_vec_rsrc *rsrc,
const struct idpf_tx_queue *q,
struct virtchnl2_txq_info *qi)
{
u32 val;
qi->queue_id = cpu_to_le32(q->q_id);
- qi->model = cpu_to_le16(vport->txq_model);
+ qi->model = cpu_to_le16(rsrc->txq_model);
qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
qi->ring_len = cpu_to_le16(q->desc_count);
qi->dma_ring_addr = cpu_to_le64(q->dma);
qi->relative_queue_id = cpu_to_le16(q->rel_q_id);
- if (!idpf_is_queue_model_split(vport->txq_model)) {
+ if (!idpf_is_queue_model_split(rsrc->txq_model)) {
qi->sched_mode = cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE);
return;
}
@@ -1731,18 +1776,18 @@ static void idpf_fill_txq_config_chunk(const struct idpf_vport *vport,
/**
* idpf_fill_complq_config_chunk - fill chunk describing the completion queue
- * @vport: virtual port data structure
+ * @rsrc: pointer to queue and vector resources
* @q: completion queue to be inserted into VC chunk
* @qi: pointer to the buffer containing the VC chunk
*/
-static void idpf_fill_complq_config_chunk(const struct idpf_vport *vport,
+static void idpf_fill_complq_config_chunk(const struct idpf_q_vec_rsrc *rsrc,
const struct idpf_compl_queue *q,
struct virtchnl2_txq_info *qi)
{
u32 val;
qi->queue_id = cpu_to_le32(q->q_id);
- qi->model = cpu_to_le16(vport->txq_model);
+ qi->model = cpu_to_le16(rsrc->txq_model);
qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
qi->ring_len = cpu_to_le16(q->desc_count);
qi->dma_ring_addr = cpu_to_le64(q->dma);
@@ -1757,7 +1802,7 @@ static void idpf_fill_complq_config_chunk(const struct idpf_vport *vport,
/**
* idpf_prepare_cfg_txqs_msg - prepare message to configure selected Tx queues
- * @vport: virtual port data structure
+ * @vport_id: ID of virtual port queues are associated with
* @buf: buffer containing the message
* @pos: pointer to the first chunk describing the tx queue
* @num_chunks: number of chunks in the message
@@ -1767,13 +1812,12 @@ static void idpf_fill_complq_config_chunk(const struct idpf_vport *vport,
*
* Return: the total size of the prepared message.
*/
-static u32 idpf_prepare_cfg_txqs_msg(const struct idpf_vport *vport,
- void *buf, const void *pos,
+static u32 idpf_prepare_cfg_txqs_msg(u32 vport_id, void *buf, const void *pos,
u32 num_chunks)
{
struct virtchnl2_config_tx_queues *ctq = buf;
- ctq->vport_id = cpu_to_le32(vport->vport_id);
+ ctq->vport_id = cpu_to_le32(vport_id);
ctq->num_qinfo = cpu_to_le16(num_chunks);
memcpy(ctq->qinfo, pos, num_chunks * sizeof(*ctq->qinfo));
@@ -1794,6 +1838,7 @@ static int idpf_send_config_tx_queue_set_msg(const struct idpf_queue_set *qs)
{
struct virtchnl2_txq_info *qi __free(kfree) = NULL;
struct idpf_chunked_msg_params params = {
+ .vport_id = qs->vport_id,
.vc_op = VIRTCHNL2_OP_CONFIG_TX_QUEUES,
.prepare_msg = idpf_prepare_cfg_txqs_msg,
.config_sz = sizeof(struct virtchnl2_config_tx_queues),
@@ -1808,43 +1853,47 @@ static int idpf_send_config_tx_queue_set_msg(const struct idpf_queue_set *qs)
for (u32 i = 0; i < qs->num; i++) {
if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_TX)
- idpf_fill_txq_config_chunk(qs->vport, qs->qs[i].txq,
+ idpf_fill_txq_config_chunk(qs->qv_rsrc, qs->qs[i].txq,
&qi[params.num_chunks++]);
else if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION)
- idpf_fill_complq_config_chunk(qs->vport,
+ idpf_fill_complq_config_chunk(qs->qv_rsrc,
qs->qs[i].complq,
&qi[params.num_chunks++]);
}
- return idpf_send_chunked_msg(qs->vport, &params);
+ return idpf_send_chunked_msg(qs->adapter, &params);
}
/**
* idpf_send_config_tx_queues_msg - send virtchnl config Tx queues message
- * @vport: virtual port data structure
+ * @adapter: adapter pointer used to send virtchnl message
+ * @rsrc: pointer to queue and vector resources
+ * @vport_id: vport identifier used while preparing the virtchnl message
*
* Return: 0 on success, -errno on failure.
*/
-static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
+static int idpf_send_config_tx_queues_msg(struct idpf_adapter *adapter,
+ struct idpf_q_vec_rsrc *rsrc,
+ u32 vport_id)
{
struct idpf_queue_set *qs __free(kfree) = NULL;
- u32 totqs = vport->num_txq + vport->num_complq;
+ u32 totqs = rsrc->num_txq + rsrc->num_complq;
u32 k = 0;
- qs = idpf_alloc_queue_set(vport, totqs);
+ qs = idpf_alloc_queue_set(adapter, rsrc, vport_id, totqs);
if (!qs)
return -ENOMEM;
/* Populate the queue info buffer with all queue context info */
- for (u32 i = 0; i < vport->num_txq_grp; i++) {
- const struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ for (u32 i = 0; i < rsrc->num_txq_grp; i++) {
+ const struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
for (u32 j = 0; j < tx_qgrp->num_txq; j++) {
qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX;
qs->qs[k++].txq = tx_qgrp->txqs[j];
}
- if (idpf_is_queue_model_split(vport->txq_model)) {
+ if (idpf_is_queue_model_split(rsrc->txq_model)) {
qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
qs->qs[k++].complq = tx_qgrp->complq;
}
@@ -1859,28 +1908,28 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
/**
* idpf_fill_rxq_config_chunk - fill chunk describing the Rx queue
- * @vport: virtual port data structure
+ * @rsrc: pointer to queue and vector resources
* @q: Rx queue to be inserted into VC chunk
* @qi: pointer to the buffer containing the VC chunk
*/
-static void idpf_fill_rxq_config_chunk(const struct idpf_vport *vport,
+static void idpf_fill_rxq_config_chunk(const struct idpf_q_vec_rsrc *rsrc,
struct idpf_rx_queue *q,
struct virtchnl2_rxq_info *qi)
{
const struct idpf_bufq_set *sets;
qi->queue_id = cpu_to_le32(q->q_id);
- qi->model = cpu_to_le16(vport->rxq_model);
+ qi->model = cpu_to_le16(rsrc->rxq_model);
qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
qi->ring_len = cpu_to_le16(q->desc_count);
qi->dma_ring_addr = cpu_to_le64(q->dma);
qi->max_pkt_size = cpu_to_le32(q->rx_max_pkt_size);
qi->rx_buffer_low_watermark = cpu_to_le16(q->rx_buffer_low_watermark);
qi->qflags = cpu_to_le16(VIRTCHNL2_RX_DESC_SIZE_32BYTE);
- if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
+ if (idpf_queue_has(RSC_EN, q))
qi->qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
- if (!idpf_is_queue_model_split(vport->rxq_model)) {
+ if (!idpf_is_queue_model_split(rsrc->rxq_model)) {
qi->data_buffer_size = cpu_to_le32(q->rx_buf_size);
qi->desc_ids = cpu_to_le64(q->rxdids);
@@ -1897,7 +1946,7 @@ static void idpf_fill_rxq_config_chunk(const struct idpf_vport *vport,
qi->data_buffer_size = cpu_to_le32(q->rx_buf_size);
qi->rx_bufq1_id = cpu_to_le16(sets[0].bufq.q_id);
- if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) {
+ if (rsrc->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) {
qi->bufq2_ena = IDPF_BUFQ2_ENA;
qi->rx_bufq2_id = cpu_to_le16(sets[1].bufq.q_id);
}
@@ -1914,16 +1963,16 @@ static void idpf_fill_rxq_config_chunk(const struct idpf_vport *vport,
/**
* idpf_fill_bufq_config_chunk - fill chunk describing the buffer queue
- * @vport: virtual port data structure
+ * @rsrc: pointer to queue and vector resources
* @q: buffer queue to be inserted into VC chunk
* @qi: pointer to the buffer containing the VC chunk
*/
-static void idpf_fill_bufq_config_chunk(const struct idpf_vport *vport,
+static void idpf_fill_bufq_config_chunk(const struct idpf_q_vec_rsrc *rsrc,
const struct idpf_buf_queue *q,
struct virtchnl2_rxq_info *qi)
{
qi->queue_id = cpu_to_le32(q->q_id);
- qi->model = cpu_to_le16(vport->rxq_model);
+ qi->model = cpu_to_le16(rsrc->rxq_model);
qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
qi->ring_len = cpu_to_le16(q->desc_count);
qi->dma_ring_addr = cpu_to_le64(q->dma);
@@ -1931,7 +1980,7 @@ static void idpf_fill_bufq_config_chunk(const struct idpf_vport *vport,
qi->rx_buffer_low_watermark = cpu_to_le16(q->rx_buffer_low_watermark);
qi->desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
qi->buffer_notif_stride = IDPF_RX_BUF_STRIDE;
- if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
+ if (idpf_queue_has(RSC_EN, q))
qi->qflags = cpu_to_le16(VIRTCHNL2_RXQ_RSC);
if (idpf_queue_has(HSPLIT_EN, q)) {
@@ -1942,7 +1991,7 @@ static void idpf_fill_bufq_config_chunk(const struct idpf_vport *vport,
/**
* idpf_prepare_cfg_rxqs_msg - prepare message to configure selected Rx queues
- * @vport: virtual port data structure
+ * @vport_id: ID of virtual port queues are associated with
* @buf: buffer containing the message
* @pos: pointer to the first chunk describing the rx queue
* @num_chunks: number of chunks in the message
@@ -1952,13 +2001,12 @@ static void idpf_fill_bufq_config_chunk(const struct idpf_vport *vport,
*
* Return: the total size of the prepared message.
*/
-static u32 idpf_prepare_cfg_rxqs_msg(const struct idpf_vport *vport,
- void *buf, const void *pos,
+static u32 idpf_prepare_cfg_rxqs_msg(u32 vport_id, void *buf, const void *pos,
u32 num_chunks)
{
struct virtchnl2_config_rx_queues *crq = buf;
- crq->vport_id = cpu_to_le32(vport->vport_id);
+ crq->vport_id = cpu_to_le32(vport_id);
crq->num_qinfo = cpu_to_le16(num_chunks);
memcpy(crq->qinfo, pos, num_chunks * sizeof(*crq->qinfo));
@@ -1979,6 +2027,7 @@ static int idpf_send_config_rx_queue_set_msg(const struct idpf_queue_set *qs)
{
struct virtchnl2_rxq_info *qi __free(kfree) = NULL;
struct idpf_chunked_msg_params params = {
+ .vport_id = qs->vport_id,
.vc_op = VIRTCHNL2_OP_CONFIG_RX_QUEUES,
.prepare_msg = idpf_prepare_cfg_rxqs_msg,
.config_sz = sizeof(struct virtchnl2_config_rx_queues),
@@ -1993,36 +2042,40 @@ static int idpf_send_config_rx_queue_set_msg(const struct idpf_queue_set *qs)
for (u32 i = 0; i < qs->num; i++) {
if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_RX)
- idpf_fill_rxq_config_chunk(qs->vport, qs->qs[i].rxq,
+ idpf_fill_rxq_config_chunk(qs->qv_rsrc, qs->qs[i].rxq,
&qi[params.num_chunks++]);
else if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_RX_BUFFER)
- idpf_fill_bufq_config_chunk(qs->vport, qs->qs[i].bufq,
+ idpf_fill_bufq_config_chunk(qs->qv_rsrc, qs->qs[i].bufq,
&qi[params.num_chunks++]);
}
- return idpf_send_chunked_msg(qs->vport, &params);
+ return idpf_send_chunked_msg(qs->adapter, &params);
}
/**
* idpf_send_config_rx_queues_msg - send virtchnl config Rx queues message
- * @vport: virtual port data structure
+ * @adapter: adapter pointer used to send virtchnl message
+ * @rsrc: pointer to queue and vector resources
+ * @vport_id: vport identifier used while preparing the virtchnl message
*
* Return: 0 on success, -errno on failure.
*/
-static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
+static int idpf_send_config_rx_queues_msg(struct idpf_adapter *adapter,
+ struct idpf_q_vec_rsrc *rsrc,
+ u32 vport_id)
{
- bool splitq = idpf_is_queue_model_split(vport->rxq_model);
+ bool splitq = idpf_is_queue_model_split(rsrc->rxq_model);
struct idpf_queue_set *qs __free(kfree) = NULL;
- u32 totqs = vport->num_rxq + vport->num_bufq;
+ u32 totqs = rsrc->num_rxq + rsrc->num_bufq;
u32 k = 0;
- qs = idpf_alloc_queue_set(vport, totqs);
+ qs = idpf_alloc_queue_set(adapter, rsrc, vport_id, totqs);
if (!qs)
return -ENOMEM;
/* Populate the queue info buffer with all queue context info */
- for (u32 i = 0; i < vport->num_rxq_grp; i++) {
- const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ for (u32 i = 0; i < rsrc->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
u32 num_rxq;
if (!splitq) {
@@ -2030,7 +2083,7 @@ static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
goto rxq;
}
- for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ for (u32 j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
qs->qs[k++].bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
}
@@ -2059,7 +2112,7 @@ rxq:
/**
* idpf_prepare_ena_dis_qs_msg - prepare message to enable/disable selected
* queues
- * @vport: virtual port data structure
+ * @vport_id: ID of virtual port queues are associated with
* @buf: buffer containing the message
* @pos: pointer to the first chunk describing the queue
* @num_chunks: number of chunks in the message
@@ -2069,13 +2122,12 @@ rxq:
*
* Return: the total size of the prepared message.
*/
-static u32 idpf_prepare_ena_dis_qs_msg(const struct idpf_vport *vport,
- void *buf, const void *pos,
+static u32 idpf_prepare_ena_dis_qs_msg(u32 vport_id, void *buf, const void *pos,
u32 num_chunks)
{
struct virtchnl2_del_ena_dis_queues *eq = buf;
- eq->vport_id = cpu_to_le32(vport->vport_id);
+ eq->vport_id = cpu_to_le32(vport_id);
eq->chunks.num_chunks = cpu_to_le16(num_chunks);
memcpy(eq->chunks.chunks, pos,
num_chunks * sizeof(*eq->chunks.chunks));
@@ -2100,6 +2152,7 @@ static int idpf_send_ena_dis_queue_set_msg(const struct idpf_queue_set *qs,
{
struct virtchnl2_queue_chunk *qc __free(kfree) = NULL;
struct idpf_chunked_msg_params params = {
+ .vport_id = qs->vport_id,
.vc_op = en ? VIRTCHNL2_OP_ENABLE_QUEUES :
VIRTCHNL2_OP_DISABLE_QUEUES,
.prepare_msg = idpf_prepare_ena_dis_qs_msg,
@@ -2141,34 +2194,38 @@ static int idpf_send_ena_dis_queue_set_msg(const struct idpf_queue_set *qs,
qc[i].start_queue_id = cpu_to_le32(qid);
}
- return idpf_send_chunked_msg(qs->vport, &params);
+ return idpf_send_chunked_msg(qs->adapter, &params);
}
/**
* idpf_send_ena_dis_queues_msg - send virtchnl enable or disable queues
* message
- * @vport: virtual port data structure
+ * @adapter: adapter pointer used to send virtchnl message
+ * @rsrc: pointer to queue and vector resources
+ * @vport_id: vport identifier used while preparing the virtchnl message
* @en: whether to enable or disable queues
*
* Return: 0 on success, -errno on failure.
*/
-static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool en)
+static int idpf_send_ena_dis_queues_msg(struct idpf_adapter *adapter,
+ struct idpf_q_vec_rsrc *rsrc,
+ u32 vport_id, bool en)
{
struct idpf_queue_set *qs __free(kfree) = NULL;
u32 num_txq, num_q, k = 0;
bool split;
- num_txq = vport->num_txq + vport->num_complq;
- num_q = num_txq + vport->num_rxq + vport->num_bufq;
+ num_txq = rsrc->num_txq + rsrc->num_complq;
+ num_q = num_txq + rsrc->num_rxq + rsrc->num_bufq;
- qs = idpf_alloc_queue_set(vport, num_q);
+ qs = idpf_alloc_queue_set(adapter, rsrc, vport_id, num_q);
if (!qs)
return -ENOMEM;
- split = idpf_is_queue_model_split(vport->txq_model);
+ split = idpf_is_queue_model_split(rsrc->txq_model);
- for (u32 i = 0; i < vport->num_txq_grp; i++) {
- const struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ for (u32 i = 0; i < rsrc->num_txq_grp; i++) {
+ const struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
for (u32 j = 0; j < tx_qgrp->num_txq; j++) {
qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX;
@@ -2185,10 +2242,10 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool en)
if (k != num_txq)
return -EINVAL;
- split = idpf_is_queue_model_split(vport->rxq_model);
+ split = idpf_is_queue_model_split(rsrc->rxq_model);
- for (u32 i = 0; i < vport->num_rxq_grp; i++) {
- const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ for (u32 i = 0; i < rsrc->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
u32 num_rxq;
if (split)
@@ -2209,7 +2266,7 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool en)
if (!split)
continue;
- for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ for (u32 j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
qs->qs[k++].bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
}
@@ -2224,7 +2281,7 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool en)
/**
* idpf_prep_map_unmap_queue_set_vector_msg - prepare message to map or unmap
* queue set to the interrupt vector
- * @vport: virtual port data structure
+ * @vport_id: ID of virtual port queues are associated with
* @buf: buffer containing the message
* @pos: pointer to the first chunk describing the vector mapping
* @num_chunks: number of chunks in the message
@@ -2235,13 +2292,12 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool en)
* Return: the total size of the prepared message.
*/
static u32
-idpf_prep_map_unmap_queue_set_vector_msg(const struct idpf_vport *vport,
- void *buf, const void *pos,
- u32 num_chunks)
+idpf_prep_map_unmap_queue_set_vector_msg(u32 vport_id, void *buf,
+ const void *pos, u32 num_chunks)
{
struct virtchnl2_queue_vector_maps *vqvm = buf;
- vqvm->vport_id = cpu_to_le32(vport->vport_id);
+ vqvm->vport_id = cpu_to_le32(vport_id);
vqvm->num_qv_maps = cpu_to_le16(num_chunks);
memcpy(vqvm->qv_maps, pos, num_chunks * sizeof(*vqvm->qv_maps));
@@ -2262,6 +2318,7 @@ idpf_send_map_unmap_queue_set_vector_msg(const struct idpf_queue_set *qs,
{
struct virtchnl2_queue_vector *vqv __free(kfree) = NULL;
struct idpf_chunked_msg_params params = {
+ .vport_id = qs->vport_id,
.vc_op = map ? VIRTCHNL2_OP_MAP_QUEUE_VECTOR :
VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR,
.prepare_msg = idpf_prep_map_unmap_queue_set_vector_msg,
@@ -2277,7 +2334,7 @@ idpf_send_map_unmap_queue_set_vector_msg(const struct idpf_queue_set *qs,
params.chunks = vqv;
- split = idpf_is_queue_model_split(qs->vport->txq_model);
+ split = idpf_is_queue_model_split(qs->qv_rsrc->txq_model);
for (u32 i = 0; i < qs->num; i++) {
const struct idpf_queue_ptr *q = &qs->qs[i];
@@ -2299,7 +2356,7 @@ idpf_send_map_unmap_queue_set_vector_msg(const struct idpf_queue_set *qs,
v_idx = vec->v_idx;
itr_idx = vec->rx_itr_idx;
} else {
- v_idx = qs->vport->noirq_v_idx;
+ v_idx = qs->qv_rsrc->noirq_v_idx;
itr_idx = VIRTCHNL2_ITR_IDX_0;
}
break;
@@ -2319,7 +2376,7 @@ idpf_send_map_unmap_queue_set_vector_msg(const struct idpf_queue_set *qs,
v_idx = vec->v_idx;
itr_idx = vec->tx_itr_idx;
} else {
- v_idx = qs->vport->noirq_v_idx;
+ v_idx = qs->qv_rsrc->noirq_v_idx;
itr_idx = VIRTCHNL2_ITR_IDX_1;
}
break;
@@ -2332,29 +2389,33 @@ idpf_send_map_unmap_queue_set_vector_msg(const struct idpf_queue_set *qs,
vqv[i].itr_idx = cpu_to_le32(itr_idx);
}
- return idpf_send_chunked_msg(qs->vport, &params);
+ return idpf_send_chunked_msg(qs->adapter, &params);
}
/**
* idpf_send_map_unmap_queue_vector_msg - send virtchnl map or unmap queue
* vector message
- * @vport: virtual port data structure
+ * @adapter: adapter pointer used to send virtchnl message
+ * @rsrc: pointer to queue and vector resources
+ * @vport_id: vport identifier used while preparing the virtchnl message
* @map: true for map and false for unmap
*
* Return: 0 on success, -errno on failure.
*/
-int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
+int idpf_send_map_unmap_queue_vector_msg(struct idpf_adapter *adapter,
+ struct idpf_q_vec_rsrc *rsrc,
+ u32 vport_id, bool map)
{
struct idpf_queue_set *qs __free(kfree) = NULL;
- u32 num_q = vport->num_txq + vport->num_rxq;
+ u32 num_q = rsrc->num_txq + rsrc->num_rxq;
u32 k = 0;
- qs = idpf_alloc_queue_set(vport, num_q);
+ qs = idpf_alloc_queue_set(adapter, rsrc, vport_id, num_q);
if (!qs)
return -ENOMEM;
- for (u32 i = 0; i < vport->num_txq_grp; i++) {
- const struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ for (u32 i = 0; i < rsrc->num_txq_grp; i++) {
+ const struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
for (u32 j = 0; j < tx_qgrp->num_txq; j++) {
qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX;
@@ -2362,14 +2423,14 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
}
}
- if (k != vport->num_txq)
+ if (k != rsrc->num_txq)
return -EINVAL;
- for (u32 i = 0; i < vport->num_rxq_grp; i++) {
- const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ for (u32 i = 0; i < rsrc->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
u32 num_rxq;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
num_rxq = rx_qgrp->splitq.num_rxq_sets;
else
num_rxq = rx_qgrp->singleq.num_rxq;
@@ -2377,7 +2438,7 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
for (u32 j = 0; j < num_rxq; j++) {
qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
qs->qs[k++].rxq =
&rx_qgrp->splitq.rxq_sets[j]->rxq;
else
@@ -2453,7 +2514,9 @@ int idpf_send_config_queue_set_msg(const struct idpf_queue_set *qs)
*/
int idpf_send_enable_queues_msg(struct idpf_vport *vport)
{
- return idpf_send_ena_dis_queues_msg(vport, true);
+ return idpf_send_ena_dis_queues_msg(vport->adapter,
+ &vport->dflt_qv_rsrc,
+ vport->vport_id, true);
}
/**
@@ -2467,7 +2530,9 @@ int idpf_send_disable_queues_msg(struct idpf_vport *vport)
{
int err;
- err = idpf_send_ena_dis_queues_msg(vport, false);
+ err = idpf_send_ena_dis_queues_msg(vport->adapter,
+ &vport->dflt_qv_rsrc,
+ vport->vport_id, false);
if (err)
return err;
@@ -2482,104 +2547,96 @@ int idpf_send_disable_queues_msg(struct idpf_vport *vport)
* @num_chunks: number of chunks to copy
*/
static void idpf_convert_reg_to_queue_chunks(struct virtchnl2_queue_chunk *dchunks,
- struct virtchnl2_queue_reg_chunk *schunks,
+ struct idpf_queue_id_reg_chunk *schunks,
u16 num_chunks)
{
u16 i;
for (i = 0; i < num_chunks; i++) {
- dchunks[i].type = schunks[i].type;
- dchunks[i].start_queue_id = schunks[i].start_queue_id;
- dchunks[i].num_queues = schunks[i].num_queues;
+ dchunks[i].type = cpu_to_le32(schunks[i].type);
+ dchunks[i].start_queue_id = cpu_to_le32(schunks[i].start_queue_id);
+ dchunks[i].num_queues = cpu_to_le32(schunks[i].num_queues);
}
}
/**
* idpf_send_delete_queues_msg - send delete queues virtchnl message
- * @vport: Virtual port private data structure
+ * @adapter: adapter pointer used to send virtchnl message
+ * @chunks: queue ids received over mailbox
+ * @vport_id: vport identifier used while preparing the virtchnl message
*
- * Will send delete queues virtchnl message. Return 0 on success, negative on
- * failure.
+ * Return: 0 on success, negative on failure.
*/
-int idpf_send_delete_queues_msg(struct idpf_vport *vport)
+int idpf_send_delete_queues_msg(struct idpf_adapter *adapter,
+ struct idpf_queue_id_reg_info *chunks,
+ u32 vport_id)
{
struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL;
- struct virtchnl2_create_vport *vport_params;
- struct virtchnl2_queue_reg_chunks *chunks;
struct idpf_vc_xn_params xn_params = {};
- struct idpf_vport_config *vport_config;
- u16 vport_idx = vport->idx;
ssize_t reply_sz;
u16 num_chunks;
int buf_size;
- vport_config = vport->adapter->vport_config[vport_idx];
- if (vport_config->req_qs_chunks) {
- chunks = &vport_config->req_qs_chunks->chunks;
- } else {
- vport_params = vport->adapter->vport_params_recvd[vport_idx];
- chunks = &vport_params->chunks;
- }
-
- num_chunks = le16_to_cpu(chunks->num_chunks);
+ num_chunks = chunks->num_chunks;
buf_size = struct_size(eq, chunks.chunks, num_chunks);
eq = kzalloc(buf_size, GFP_KERNEL);
if (!eq)
return -ENOMEM;
- eq->vport_id = cpu_to_le32(vport->vport_id);
+ eq->vport_id = cpu_to_le32(vport_id);
eq->chunks.num_chunks = cpu_to_le16(num_chunks);
- idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->chunks,
+ idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->queue_chunks,
num_chunks);
xn_params.vc_op = VIRTCHNL2_OP_DEL_QUEUES;
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
xn_params.send_buf.iov_base = eq;
xn_params.send_buf.iov_len = buf_size;
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
return reply_sz < 0 ? reply_sz : 0;
}
/**
* idpf_send_config_queues_msg - Send config queues virtchnl message
- * @vport: Virtual port private data structure
+ * @adapter: adapter pointer used to send virtchnl message
+ * @rsrc: pointer to queue and vector resources
+ * @vport_id: vport identifier used while preparing the virtchnl message
*
- * Will send config queues virtchnl message. Returns 0 on success, negative on
- * failure.
+ * Return: 0 on success, negative on failure.
*/
-int idpf_send_config_queues_msg(struct idpf_vport *vport)
+int idpf_send_config_queues_msg(struct idpf_adapter *adapter,
+ struct idpf_q_vec_rsrc *rsrc,
+ u32 vport_id)
{
int err;
- err = idpf_send_config_tx_queues_msg(vport);
+ err = idpf_send_config_tx_queues_msg(adapter, rsrc, vport_id);
if (err)
return err;
- return idpf_send_config_rx_queues_msg(vport);
+ return idpf_send_config_rx_queues_msg(adapter, rsrc, vport_id);
}
/**
* idpf_send_add_queues_msg - Send virtchnl add queues message
- * @vport: Virtual port private data structure
- * @num_tx_q: number of transmit queues
- * @num_complq: number of transmit completion queues
- * @num_rx_q: number of receive queues
- * @num_rx_bufq: number of receive buffer queues
+ * @adapter: adapter pointer used to send virtchnl message
+ * @vport_config: vport persistent structure to store the queue chunk info
+ * @rsrc: pointer to queue and vector resources
+ * @vport_id: vport identifier used while preparing the virtchnl message
*
- * Returns 0 on success, negative on failure. vport _MUST_ be const here as
- * we should not change any fields within vport itself in this function.
+ * Return: 0 on success, negative on failure.
*/
-int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
- u16 num_complq, u16 num_rx_q, u16 num_rx_bufq)
+int idpf_send_add_queues_msg(struct idpf_adapter *adapter,
+ struct idpf_vport_config *vport_config,
+ struct idpf_q_vec_rsrc *rsrc,
+ u32 vport_id)
{
struct virtchnl2_add_queues *vc_msg __free(kfree) = NULL;
struct idpf_vc_xn_params xn_params = {};
- struct idpf_vport_config *vport_config;
struct virtchnl2_add_queues aq = {};
- u16 vport_idx = vport->idx;
ssize_t reply_sz;
int size;
@@ -2587,15 +2644,11 @@ int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
if (!vc_msg)
return -ENOMEM;
- vport_config = vport->adapter->vport_config[vport_idx];
- kfree(vport_config->req_qs_chunks);
- vport_config->req_qs_chunks = NULL;
-
- aq.vport_id = cpu_to_le32(vport->vport_id);
- aq.num_tx_q = cpu_to_le16(num_tx_q);
- aq.num_tx_complq = cpu_to_le16(num_complq);
- aq.num_rx_q = cpu_to_le16(num_rx_q);
- aq.num_rx_bufq = cpu_to_le16(num_rx_bufq);
+ aq.vport_id = cpu_to_le32(vport_id);
+ aq.num_tx_q = cpu_to_le16(rsrc->num_txq);
+ aq.num_tx_complq = cpu_to_le16(rsrc->num_complq);
+ aq.num_rx_q = cpu_to_le16(rsrc->num_rxq);
+ aq.num_rx_bufq = cpu_to_le16(rsrc->num_bufq);
xn_params.vc_op = VIRTCHNL2_OP_ADD_QUEUES;
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
@@ -2603,15 +2656,15 @@ int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
xn_params.send_buf.iov_len = sizeof(aq);
xn_params.recv_buf.iov_base = vc_msg;
xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
if (reply_sz < 0)
return reply_sz;
/* compare vc_msg num queues with vport num queues */
- if (le16_to_cpu(vc_msg->num_tx_q) != num_tx_q ||
- le16_to_cpu(vc_msg->num_rx_q) != num_rx_q ||
- le16_to_cpu(vc_msg->num_tx_complq) != num_complq ||
- le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq)
+ if (le16_to_cpu(vc_msg->num_tx_q) != rsrc->num_txq ||
+ le16_to_cpu(vc_msg->num_rx_q) != rsrc->num_rxq ||
+ le16_to_cpu(vc_msg->num_tx_complq) != rsrc->num_complq ||
+ le16_to_cpu(vc_msg->num_rx_bufq) != rsrc->num_bufq)
return -EINVAL;
size = struct_size(vc_msg, chunks.chunks,
@@ -2619,11 +2672,7 @@ int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
if (reply_sz < size)
return -EIO;
- vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL);
- if (!vport_config->req_qs_chunks)
- return -ENOMEM;
-
- return 0;
+ return idpf_vport_init_queue_reg_chunks(vport_config, &vc_msg->chunks);
}
/**
@@ -2746,13 +2795,14 @@ int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs)
/**
* idpf_send_get_stats_msg - Send virtchnl get statistics message
- * @vport: vport to get stats for
+ * @np: netdev private structure
+ * @port_stats: structure to store the vport statistics
*
- * Returns 0 on success, negative on failure.
+ * Return: 0 on success, negative on failure.
*/
-int idpf_send_get_stats_msg(struct idpf_vport *vport)
+int idpf_send_get_stats_msg(struct idpf_netdev_priv *np,
+ struct idpf_port_stats *port_stats)
{
- struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
struct rtnl_link_stats64 *netstats = &np->netstats;
struct virtchnl2_vport_stats stats_msg = {};
struct idpf_vc_xn_params xn_params = {};
@@ -2763,7 +2813,7 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport)
if (!test_bit(IDPF_VPORT_UP, np->state))
return 0;
- stats_msg.vport_id = cpu_to_le32(vport->vport_id);
+ stats_msg.vport_id = cpu_to_le32(np->vport_id);
xn_params.vc_op = VIRTCHNL2_OP_GET_STATS;
xn_params.send_buf.iov_base = &stats_msg;
@@ -2771,7 +2821,7 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport)
xn_params.recv_buf = xn_params.send_buf;
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ reply_sz = idpf_vc_xn_exec(np->adapter, &xn_params);
if (reply_sz < 0)
return reply_sz;
if (reply_sz < sizeof(stats_msg))
@@ -2792,7 +2842,7 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport)
netstats->rx_dropped = le64_to_cpu(stats_msg.rx_discards);
netstats->tx_dropped = le64_to_cpu(stats_msg.tx_discards);
- vport->port_stats.vport_stats = stats_msg;
+ port_stats->vport_stats = stats_msg;
spin_unlock_bh(&np->stats_lock);
@@ -2800,36 +2850,43 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport)
}
/**
- * idpf_send_get_set_rss_lut_msg - Send virtchnl get or set rss lut message
- * @vport: virtual port data structure
- * @get: flag to set or get rss look up table
+ * idpf_send_get_set_rss_lut_msg - Send virtchnl get or set RSS lut message
+ * @adapter: adapter pointer used to send virtchnl message
+ * @rss_data: pointer to RSS key and lut info
+ * @vport_id: vport identifier used while preparing the virtchnl message
+ * @get: flag to set or get RSS look up table
*
* When rxhash is disabled, RSS LUT will be configured with zeros. If rxhash
* is enabled, the LUT values stored in driver's soft copy will be used to setup
* the HW.
*
- * Returns 0 on success, negative on failure.
+ * Return: 0 on success, negative on failure.
*/
-int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
+int idpf_send_get_set_rss_lut_msg(struct idpf_adapter *adapter,
+ struct idpf_rss_data *rss_data,
+ u32 vport_id, bool get)
{
struct virtchnl2_rss_lut *recv_rl __free(kfree) = NULL;
struct virtchnl2_rss_lut *rl __free(kfree) = NULL;
struct idpf_vc_xn_params xn_params = {};
- struct idpf_rss_data *rss_data;
int buf_size, lut_buf_size;
+ struct idpf_vport *vport;
ssize_t reply_sz;
bool rxhash_ena;
int i;
- rss_data =
- &vport->adapter->vport_config[vport->idx]->user_config.rss_data;
+ vport = idpf_vid_to_vport(adapter, vport_id);
+ if (!vport)
+ return -EINVAL;
+
rxhash_ena = idpf_is_feature_ena(vport, NETIF_F_RXHASH);
+
buf_size = struct_size(rl, lut, rss_data->rss_lut_size);
rl = kzalloc(buf_size, GFP_KERNEL);
if (!rl)
return -ENOMEM;
- rl->vport_id = cpu_to_le32(vport->vport_id);
+ rl->vport_id = cpu_to_le32(vport_id);
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
xn_params.send_buf.iov_base = rl;
@@ -2850,7 +2907,7 @@ int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_LUT;
}
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
if (reply_sz < 0)
return reply_sz;
if (!get)
@@ -2882,30 +2939,31 @@ do_memcpy:
}
/**
- * idpf_send_get_set_rss_key_msg - Send virtchnl get or set rss key message
- * @vport: virtual port data structure
- * @get: flag to set or get rss look up table
+ * idpf_send_get_set_rss_key_msg - Send virtchnl get or set RSS key message
+ * @adapter: adapter pointer used to send virtchnl message
+ * @rss_data: pointer to RSS key and lut info
+ * @vport_id: vport identifier used while preparing the virtchnl message
+ * @get: flag to set or get RSS look up table
*
- * Returns 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
-int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get)
+int idpf_send_get_set_rss_key_msg(struct idpf_adapter *adapter,
+ struct idpf_rss_data *rss_data,
+ u32 vport_id, bool get)
{
struct virtchnl2_rss_key *recv_rk __free(kfree) = NULL;
struct virtchnl2_rss_key *rk __free(kfree) = NULL;
struct idpf_vc_xn_params xn_params = {};
- struct idpf_rss_data *rss_data;
ssize_t reply_sz;
int i, buf_size;
u16 key_size;
- rss_data =
- &vport->adapter->vport_config[vport->idx]->user_config.rss_data;
buf_size = struct_size(rk, key_flex, rss_data->rss_key_size);
rk = kzalloc(buf_size, GFP_KERNEL);
if (!rk)
return -ENOMEM;
- rk->vport_id = cpu_to_le32(vport->vport_id);
+ rk->vport_id = cpu_to_le32(vport_id);
xn_params.send_buf.iov_base = rk;
xn_params.send_buf.iov_len = buf_size;
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
@@ -2925,7 +2983,7 @@ int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get)
xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_KEY;
}
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
if (reply_sz < 0)
return reply_sz;
if (!get)
@@ -3011,33 +3069,142 @@ static void idpf_finalize_ptype_lookup(struct libeth_rx_pt *ptype)
}
/**
+ * idpf_parse_protocol_ids - parse protocol IDs for a given packet type
+ * @ptype: packet type to parse
+ * @rx_pt: store the parsed packet type info into
+ */
+static void idpf_parse_protocol_ids(struct virtchnl2_ptype *ptype,
+ struct libeth_rx_pt *rx_pt)
+{
+ struct idpf_ptype_state pstate = {};
+
+ for (u32 j = 0; j < ptype->proto_id_count; j++) {
+ u16 id = le16_to_cpu(ptype->proto_id[j]);
+
+ switch (id) {
+ case VIRTCHNL2_PROTO_HDR_GRE:
+ if (pstate.tunnel_state == IDPF_PTYPE_TUNNEL_IP) {
+ rx_pt->tunnel_type =
+ LIBETH_RX_PT_TUNNEL_IP_GRENAT;
+ pstate.tunnel_state |=
+ IDPF_PTYPE_TUNNEL_IP_GRENAT;
+ }
+ break;
+ case VIRTCHNL2_PROTO_HDR_MAC:
+ rx_pt->outer_ip = LIBETH_RX_PT_OUTER_L2;
+ if (pstate.tunnel_state == IDPF_TUN_IP_GRE) {
+ rx_pt->tunnel_type =
+ LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC;
+ pstate.tunnel_state |=
+ IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC;
+ }
+ break;
+ case VIRTCHNL2_PROTO_HDR_IPV4:
+ idpf_fill_ptype_lookup(rx_pt, &pstate, true, false);
+ break;
+ case VIRTCHNL2_PROTO_HDR_IPV6:
+ idpf_fill_ptype_lookup(rx_pt, &pstate, false, false);
+ break;
+ case VIRTCHNL2_PROTO_HDR_IPV4_FRAG:
+ idpf_fill_ptype_lookup(rx_pt, &pstate, true, true);
+ break;
+ case VIRTCHNL2_PROTO_HDR_IPV6_FRAG:
+ idpf_fill_ptype_lookup(rx_pt, &pstate, false, true);
+ break;
+ case VIRTCHNL2_PROTO_HDR_UDP:
+ rx_pt->inner_prot = LIBETH_RX_PT_INNER_UDP;
+ break;
+ case VIRTCHNL2_PROTO_HDR_TCP:
+ rx_pt->inner_prot = LIBETH_RX_PT_INNER_TCP;
+ break;
+ case VIRTCHNL2_PROTO_HDR_SCTP:
+ rx_pt->inner_prot = LIBETH_RX_PT_INNER_SCTP;
+ break;
+ case VIRTCHNL2_PROTO_HDR_ICMP:
+ rx_pt->inner_prot = LIBETH_RX_PT_INNER_ICMP;
+ break;
+ case VIRTCHNL2_PROTO_HDR_PAY:
+ rx_pt->payload_layer = LIBETH_RX_PT_PAYLOAD_L2;
+ break;
+ case VIRTCHNL2_PROTO_HDR_ICMPV6:
+ case VIRTCHNL2_PROTO_HDR_IPV6_EH:
+ case VIRTCHNL2_PROTO_HDR_PRE_MAC:
+ case VIRTCHNL2_PROTO_HDR_POST_MAC:
+ case VIRTCHNL2_PROTO_HDR_ETHERTYPE:
+ case VIRTCHNL2_PROTO_HDR_SVLAN:
+ case VIRTCHNL2_PROTO_HDR_CVLAN:
+ case VIRTCHNL2_PROTO_HDR_MPLS:
+ case VIRTCHNL2_PROTO_HDR_MMPLS:
+ case VIRTCHNL2_PROTO_HDR_PTP:
+ case VIRTCHNL2_PROTO_HDR_CTRL:
+ case VIRTCHNL2_PROTO_HDR_LLDP:
+ case VIRTCHNL2_PROTO_HDR_ARP:
+ case VIRTCHNL2_PROTO_HDR_ECP:
+ case VIRTCHNL2_PROTO_HDR_EAPOL:
+ case VIRTCHNL2_PROTO_HDR_PPPOD:
+ case VIRTCHNL2_PROTO_HDR_PPPOE:
+ case VIRTCHNL2_PROTO_HDR_IGMP:
+ case VIRTCHNL2_PROTO_HDR_AH:
+ case VIRTCHNL2_PROTO_HDR_ESP:
+ case VIRTCHNL2_PROTO_HDR_IKE:
+ case VIRTCHNL2_PROTO_HDR_NATT_KEEP:
+ case VIRTCHNL2_PROTO_HDR_L2TPV2:
+ case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL:
+ case VIRTCHNL2_PROTO_HDR_L2TPV3:
+ case VIRTCHNL2_PROTO_HDR_GTP:
+ case VIRTCHNL2_PROTO_HDR_GTP_EH:
+ case VIRTCHNL2_PROTO_HDR_GTPCV2:
+ case VIRTCHNL2_PROTO_HDR_GTPC_TEID:
+ case VIRTCHNL2_PROTO_HDR_GTPU:
+ case VIRTCHNL2_PROTO_HDR_GTPU_UL:
+ case VIRTCHNL2_PROTO_HDR_GTPU_DL:
+ case VIRTCHNL2_PROTO_HDR_ECPRI:
+ case VIRTCHNL2_PROTO_HDR_VRRP:
+ case VIRTCHNL2_PROTO_HDR_OSPF:
+ case VIRTCHNL2_PROTO_HDR_TUN:
+ case VIRTCHNL2_PROTO_HDR_NVGRE:
+ case VIRTCHNL2_PROTO_HDR_VXLAN:
+ case VIRTCHNL2_PROTO_HDR_VXLAN_GPE:
+ case VIRTCHNL2_PROTO_HDR_GENEVE:
+ case VIRTCHNL2_PROTO_HDR_NSH:
+ case VIRTCHNL2_PROTO_HDR_QUIC:
+ case VIRTCHNL2_PROTO_HDR_PFCP:
+ case VIRTCHNL2_PROTO_HDR_PFCP_NODE:
+ case VIRTCHNL2_PROTO_HDR_PFCP_SESSION:
+ case VIRTCHNL2_PROTO_HDR_RTP:
+ case VIRTCHNL2_PROTO_HDR_NO_PROTO:
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+/**
* idpf_send_get_rx_ptype_msg - Send virtchnl for ptype info
- * @vport: virtual port data structure
+ * @adapter: driver specific private structure
*
- * Returns 0 on success, negative on failure.
+ * Return: 0 on success, negative on failure.
*/
-int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
+static int idpf_send_get_rx_ptype_msg(struct idpf_adapter *adapter)
{
struct virtchnl2_get_ptype_info *get_ptype_info __free(kfree) = NULL;
struct virtchnl2_get_ptype_info *ptype_info __free(kfree) = NULL;
- struct libeth_rx_pt *ptype_lkup __free(kfree) = NULL;
- int max_ptype, ptypes_recvd = 0, ptype_offset;
- struct idpf_adapter *adapter = vport->adapter;
+ struct libeth_rx_pt *singleq_pt_lkup __free(kfree) = NULL;
+ struct libeth_rx_pt *splitq_pt_lkup __free(kfree) = NULL;
struct idpf_vc_xn_params xn_params = {};
+ int ptypes_recvd = 0, ptype_offset;
+ u32 max_ptype = IDPF_RX_MAX_PTYPE;
u16 next_ptype_id = 0;
ssize_t reply_sz;
- int i, j, k;
- if (vport->rx_ptype_lkup)
- return 0;
-
- if (idpf_is_queue_model_split(vport->rxq_model))
- max_ptype = IDPF_RX_MAX_PTYPE;
- else
- max_ptype = IDPF_RX_MAX_BASE_PTYPE;
+ singleq_pt_lkup = kcalloc(IDPF_RX_MAX_BASE_PTYPE,
+ sizeof(*singleq_pt_lkup), GFP_KERNEL);
+ if (!singleq_pt_lkup)
+ return -ENOMEM;
- ptype_lkup = kcalloc(max_ptype, sizeof(*ptype_lkup), GFP_KERNEL);
- if (!ptype_lkup)
+ splitq_pt_lkup = kcalloc(max_ptype, sizeof(*splitq_pt_lkup), GFP_KERNEL);
+ if (!splitq_pt_lkup)
return -ENOMEM;
get_ptype_info = kzalloc(sizeof(*get_ptype_info), GFP_KERNEL);
@@ -3078,175 +3245,85 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
ptype_offset = IDPF_RX_PTYPE_HDR_SZ;
- for (i = 0; i < le16_to_cpu(ptype_info->num_ptypes); i++) {
- struct idpf_ptype_state pstate = { };
+ for (u16 i = 0; i < le16_to_cpu(ptype_info->num_ptypes); i++) {
+ struct libeth_rx_pt rx_pt = {};
struct virtchnl2_ptype *ptype;
- u16 id;
+ u16 pt_10, pt_8;
ptype = (struct virtchnl2_ptype *)
((u8 *)ptype_info + ptype_offset);
+ pt_10 = le16_to_cpu(ptype->ptype_id_10);
+ pt_8 = ptype->ptype_id_8;
+
ptype_offset += IDPF_GET_PTYPE_SIZE(ptype);
if (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN)
return -EINVAL;
/* 0xFFFF indicates end of ptypes */
- if (le16_to_cpu(ptype->ptype_id_10) ==
- IDPF_INVALID_PTYPE_ID)
+ if (pt_10 == IDPF_INVALID_PTYPE_ID)
goto out;
+ if (pt_10 >= max_ptype)
+ return -EINVAL;
- if (idpf_is_queue_model_split(vport->rxq_model))
- k = le16_to_cpu(ptype->ptype_id_10);
- else
- k = ptype->ptype_id_8;
-
- for (j = 0; j < ptype->proto_id_count; j++) {
- id = le16_to_cpu(ptype->proto_id[j]);
- switch (id) {
- case VIRTCHNL2_PROTO_HDR_GRE:
- if (pstate.tunnel_state ==
- IDPF_PTYPE_TUNNEL_IP) {
- ptype_lkup[k].tunnel_type =
- LIBETH_RX_PT_TUNNEL_IP_GRENAT;
- pstate.tunnel_state |=
- IDPF_PTYPE_TUNNEL_IP_GRENAT;
- }
- break;
- case VIRTCHNL2_PROTO_HDR_MAC:
- ptype_lkup[k].outer_ip =
- LIBETH_RX_PT_OUTER_L2;
- if (pstate.tunnel_state ==
- IDPF_TUN_IP_GRE) {
- ptype_lkup[k].tunnel_type =
- LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC;
- pstate.tunnel_state |=
- IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC;
- }
- break;
- case VIRTCHNL2_PROTO_HDR_IPV4:
- idpf_fill_ptype_lookup(&ptype_lkup[k],
- &pstate, true,
- false);
- break;
- case VIRTCHNL2_PROTO_HDR_IPV6:
- idpf_fill_ptype_lookup(&ptype_lkup[k],
- &pstate, false,
- false);
- break;
- case VIRTCHNL2_PROTO_HDR_IPV4_FRAG:
- idpf_fill_ptype_lookup(&ptype_lkup[k],
- &pstate, true,
- true);
- break;
- case VIRTCHNL2_PROTO_HDR_IPV6_FRAG:
- idpf_fill_ptype_lookup(&ptype_lkup[k],
- &pstate, false,
- true);
- break;
- case VIRTCHNL2_PROTO_HDR_UDP:
- ptype_lkup[k].inner_prot =
- LIBETH_RX_PT_INNER_UDP;
- break;
- case VIRTCHNL2_PROTO_HDR_TCP:
- ptype_lkup[k].inner_prot =
- LIBETH_RX_PT_INNER_TCP;
- break;
- case VIRTCHNL2_PROTO_HDR_SCTP:
- ptype_lkup[k].inner_prot =
- LIBETH_RX_PT_INNER_SCTP;
- break;
- case VIRTCHNL2_PROTO_HDR_ICMP:
- ptype_lkup[k].inner_prot =
- LIBETH_RX_PT_INNER_ICMP;
- break;
- case VIRTCHNL2_PROTO_HDR_PAY:
- ptype_lkup[k].payload_layer =
- LIBETH_RX_PT_PAYLOAD_L2;
- break;
- case VIRTCHNL2_PROTO_HDR_ICMPV6:
- case VIRTCHNL2_PROTO_HDR_IPV6_EH:
- case VIRTCHNL2_PROTO_HDR_PRE_MAC:
- case VIRTCHNL2_PROTO_HDR_POST_MAC:
- case VIRTCHNL2_PROTO_HDR_ETHERTYPE:
- case VIRTCHNL2_PROTO_HDR_SVLAN:
- case VIRTCHNL2_PROTO_HDR_CVLAN:
- case VIRTCHNL2_PROTO_HDR_MPLS:
- case VIRTCHNL2_PROTO_HDR_MMPLS:
- case VIRTCHNL2_PROTO_HDR_PTP:
- case VIRTCHNL2_PROTO_HDR_CTRL:
- case VIRTCHNL2_PROTO_HDR_LLDP:
- case VIRTCHNL2_PROTO_HDR_ARP:
- case VIRTCHNL2_PROTO_HDR_ECP:
- case VIRTCHNL2_PROTO_HDR_EAPOL:
- case VIRTCHNL2_PROTO_HDR_PPPOD:
- case VIRTCHNL2_PROTO_HDR_PPPOE:
- case VIRTCHNL2_PROTO_HDR_IGMP:
- case VIRTCHNL2_PROTO_HDR_AH:
- case VIRTCHNL2_PROTO_HDR_ESP:
- case VIRTCHNL2_PROTO_HDR_IKE:
- case VIRTCHNL2_PROTO_HDR_NATT_KEEP:
- case VIRTCHNL2_PROTO_HDR_L2TPV2:
- case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL:
- case VIRTCHNL2_PROTO_HDR_L2TPV3:
- case VIRTCHNL2_PROTO_HDR_GTP:
- case VIRTCHNL2_PROTO_HDR_GTP_EH:
- case VIRTCHNL2_PROTO_HDR_GTPCV2:
- case VIRTCHNL2_PROTO_HDR_GTPC_TEID:
- case VIRTCHNL2_PROTO_HDR_GTPU:
- case VIRTCHNL2_PROTO_HDR_GTPU_UL:
- case VIRTCHNL2_PROTO_HDR_GTPU_DL:
- case VIRTCHNL2_PROTO_HDR_ECPRI:
- case VIRTCHNL2_PROTO_HDR_VRRP:
- case VIRTCHNL2_PROTO_HDR_OSPF:
- case VIRTCHNL2_PROTO_HDR_TUN:
- case VIRTCHNL2_PROTO_HDR_NVGRE:
- case VIRTCHNL2_PROTO_HDR_VXLAN:
- case VIRTCHNL2_PROTO_HDR_VXLAN_GPE:
- case VIRTCHNL2_PROTO_HDR_GENEVE:
- case VIRTCHNL2_PROTO_HDR_NSH:
- case VIRTCHNL2_PROTO_HDR_QUIC:
- case VIRTCHNL2_PROTO_HDR_PFCP:
- case VIRTCHNL2_PROTO_HDR_PFCP_NODE:
- case VIRTCHNL2_PROTO_HDR_PFCP_SESSION:
- case VIRTCHNL2_PROTO_HDR_RTP:
- case VIRTCHNL2_PROTO_HDR_NO_PROTO:
- break;
- default:
- break;
- }
- }
-
- idpf_finalize_ptype_lookup(&ptype_lkup[k]);
+ idpf_parse_protocol_ids(ptype, &rx_pt);
+ idpf_finalize_ptype_lookup(&rx_pt);
+
+ /* For a given protocol ID stack, the ptype value might
+ * vary between ptype_id_10 and ptype_id_8. So store
+ * them separately for splitq and singleq. Also skip
+ * the repeated ptypes in case of singleq.
+ */
+ splitq_pt_lkup[pt_10] = rx_pt;
+ if (!singleq_pt_lkup[pt_8].outer_ip)
+ singleq_pt_lkup[pt_8] = rx_pt;
}
}
out:
- vport->rx_ptype_lkup = no_free_ptr(ptype_lkup);
+ adapter->splitq_pt_lkup = no_free_ptr(splitq_pt_lkup);
+ adapter->singleq_pt_lkup = no_free_ptr(singleq_pt_lkup);
return 0;
}
/**
+ * idpf_rel_rx_pt_lkup - release RX ptype lookup table
+ * @adapter: adapter pointer to get the lookup table
+ */
+static void idpf_rel_rx_pt_lkup(struct idpf_adapter *adapter)
+{
+ kfree(adapter->splitq_pt_lkup);
+ adapter->splitq_pt_lkup = NULL;
+
+ kfree(adapter->singleq_pt_lkup);
+ adapter->singleq_pt_lkup = NULL;
+}
+
+/**
* idpf_send_ena_dis_loopback_msg - Send virtchnl enable/disable loopback
* message
- * @vport: virtual port data structure
+ * @adapter: adapter pointer used to send virtchnl message
+ * @vport_id: vport identifier used while preparing the virtchnl message
+ * @loopback_ena: flag to enable or disable loopback
*
- * Returns 0 on success, negative on failure.
+ * Return: 0 on success, negative on failure.
*/
-int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport)
+int idpf_send_ena_dis_loopback_msg(struct idpf_adapter *adapter, u32 vport_id,
+ bool loopback_ena)
{
struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_loopback loopback;
ssize_t reply_sz;
- loopback.vport_id = cpu_to_le32(vport->vport_id);
- loopback.enable = idpf_is_feature_ena(vport, NETIF_F_LOOPBACK);
+ loopback.vport_id = cpu_to_le32(vport_id);
+ loopback.enable = loopback_ena;
xn_params.vc_op = VIRTCHNL2_OP_LOOPBACK;
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
xn_params.send_buf.iov_base = &loopback;
xn_params.send_buf.iov_len = sizeof(loopback);
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
return reply_sz < 0 ? reply_sz : 0;
}
@@ -3325,7 +3402,7 @@ int idpf_init_dflt_mbx(struct idpf_adapter *adapter)
void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter)
{
if (adapter->hw.arq && adapter->hw.asq) {
- idpf_mb_clean(adapter);
+ idpf_mb_clean(adapter, adapter->hw.asq);
idpf_ctlq_deinit(&adapter->hw);
}
adapter->hw.arq = NULL;
@@ -3520,6 +3597,13 @@ restart:
goto err_intr_req;
}
+ err = idpf_send_get_rx_ptype_msg(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "failed to get RX ptypes: %d\n",
+ err);
+ goto intr_rel;
+ }
+
err = idpf_ptp_init(adapter);
if (err)
pci_err(adapter->pdev, "PTP init failed, err=%pe\n",
@@ -3537,6 +3621,8 @@ restart:
return 0;
+intr_rel:
+ idpf_intr_rel(adapter);
err_intr_req:
cancel_delayed_work_sync(&adapter->serv_task);
cancel_delayed_work_sync(&adapter->mbx_task);
@@ -3591,6 +3677,7 @@ void idpf_vc_core_deinit(struct idpf_adapter *adapter)
idpf_ptp_release(adapter);
idpf_deinit_task(adapter);
idpf_idc_deinit_core_aux_device(adapter->cdev_info);
+ idpf_rel_rx_pt_lkup(adapter);
idpf_intr_rel(adapter);
if (remove_in_prog)
@@ -3613,25 +3700,27 @@ void idpf_vc_core_deinit(struct idpf_adapter *adapter)
/**
* idpf_vport_alloc_vec_indexes - Get relative vector indexes
* @vport: virtual port data struct
+ * @rsrc: pointer to queue and vector resources
*
* This function requests the vector information required for the vport and
* stores the vector indexes received from the 'global vector distribution'
* in the vport's queue vectors array.
*
- * Return 0 on success, error on failure
+ * Return: 0 on success, error on failure
*/
-int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport)
+int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_vector_info vec_info;
int num_alloc_vecs;
u32 req;
- vec_info.num_curr_vecs = vport->num_q_vectors;
+ vec_info.num_curr_vecs = rsrc->num_q_vectors;
if (vec_info.num_curr_vecs)
vec_info.num_curr_vecs += IDPF_RESERVED_VECS;
/* XDPSQs are all bound to the NOIRQ vector from IDPF_RESERVED_VECS */
- req = max(vport->num_txq - vport->num_xdp_txq, vport->num_rxq) +
+ req = max(rsrc->num_txq - vport->num_xdp_txq, rsrc->num_rxq) +
IDPF_RESERVED_VECS;
vec_info.num_req_vecs = req;
@@ -3639,7 +3728,7 @@ int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport)
vec_info.index = vport->idx;
num_alloc_vecs = idpf_req_rel_vector_indexes(vport->adapter,
- vport->q_vector_idxs,
+ rsrc->q_vector_idxs,
&vec_info);
if (num_alloc_vecs <= 0) {
dev_err(&vport->adapter->pdev->dev, "Vector distribution failed: %d\n",
@@ -3647,7 +3736,7 @@ int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport)
return -EINVAL;
}
- vport->num_q_vectors = num_alloc_vecs - IDPF_RESERVED_VECS;
+ rsrc->num_q_vectors = num_alloc_vecs - IDPF_RESERVED_VECS;
return 0;
}
@@ -3658,9 +3747,12 @@ int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport)
* @max_q: vport max queue info
*
* Will initialize vport with the info received through MB earlier
+ *
+ * Return: 0 on success, negative on failure.
*/
-void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
+int idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
{
+ struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct idpf_adapter *adapter = vport->adapter;
struct virtchnl2_create_vport *vport_msg;
struct idpf_vport_config *vport_config;
@@ -3674,13 +3766,18 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
rss_data = &vport_config->user_config.rss_data;
vport_msg = adapter->vport_params_recvd[idx];
+ err = idpf_vport_init_queue_reg_chunks(vport_config,
+ &vport_msg->chunks);
+ if (err)
+ return err;
+
vport_config->max_q.max_txq = max_q->max_txq;
vport_config->max_q.max_rxq = max_q->max_rxq;
vport_config->max_q.max_complq = max_q->max_complq;
vport_config->max_q.max_bufq = max_q->max_bufq;
- vport->txq_model = le16_to_cpu(vport_msg->txq_model);
- vport->rxq_model = le16_to_cpu(vport_msg->rxq_model);
+ rsrc->txq_model = le16_to_cpu(vport_msg->txq_model);
+ rsrc->rxq_model = le16_to_cpu(vport_msg->rxq_model);
vport->vport_type = le16_to_cpu(vport_msg->vport_type);
vport->vport_id = le32_to_cpu(vport_msg->vport_id);
@@ -3697,24 +3794,27 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
idpf_vport_set_hsplit(vport, ETHTOOL_TCP_DATA_SPLIT_ENABLED);
- idpf_vport_init_num_qs(vport, vport_msg);
- idpf_vport_calc_num_q_desc(vport);
- idpf_vport_calc_num_q_groups(vport);
- idpf_vport_alloc_vec_indexes(vport);
+ idpf_vport_init_num_qs(vport, vport_msg, rsrc);
+ idpf_vport_calc_num_q_desc(vport, rsrc);
+ idpf_vport_calc_num_q_groups(rsrc);
+ idpf_vport_alloc_vec_indexes(vport, rsrc);
vport->crc_enable = adapter->crc_enable;
if (!(vport_msg->vport_flags &
cpu_to_le16(VIRTCHNL2_VPORT_UPLINK_PORT)))
- return;
+ return 0;
err = idpf_ptp_get_vport_tstamps_caps(vport);
if (err) {
+ /* Do not error on timestamp failure */
pci_dbg(vport->adapter->pdev, "Tx timestamping not supported\n");
- return;
+ return 0;
}
INIT_WORK(&vport->tstamp_task, idpf_tstamp_task);
+
+ return 0;
}
/**
@@ -3773,21 +3873,21 @@ int idpf_get_vec_ids(struct idpf_adapter *adapter,
* Returns number of ids filled
*/
static int idpf_vport_get_queue_ids(u32 *qids, int num_qids, u16 q_type,
- struct virtchnl2_queue_reg_chunks *chunks)
+ struct idpf_queue_id_reg_info *chunks)
{
- u16 num_chunks = le16_to_cpu(chunks->num_chunks);
+ u16 num_chunks = chunks->num_chunks;
u32 num_q_id_filled = 0, i;
u32 start_q_id, num_q;
while (num_chunks--) {
- struct virtchnl2_queue_reg_chunk *chunk;
+ struct idpf_queue_id_reg_chunk *chunk;
- chunk = &chunks->chunks[num_chunks];
- if (le32_to_cpu(chunk->type) != q_type)
+ chunk = &chunks->queue_chunks[num_chunks];
+ if (chunk->type != q_type)
continue;
- num_q = le32_to_cpu(chunk->num_queues);
- start_q_id = le32_to_cpu(chunk->start_queue_id);
+ num_q = chunk->num_queues;
+ start_q_id = chunk->start_queue_id;
for (i = 0; i < num_q; i++) {
if ((num_q_id_filled + i) < num_qids) {
@@ -3806,6 +3906,7 @@ static int idpf_vport_get_queue_ids(u32 *qids, int num_qids, u16 q_type,
/**
* __idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters
* @vport: virtual port for which the queues ids are initialized
+ * @rsrc: pointer to queue and vector resources
* @qids: queue ids
* @num_qids: number of queue ids
* @q_type: type of queue
@@ -3814,6 +3915,7 @@ static int idpf_vport_get_queue_ids(u32 *qids, int num_qids, u16 q_type,
* parameters. Returns number of queue ids initialized.
*/
static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc,
const u32 *qids,
int num_qids,
u32 q_type)
@@ -3822,19 +3924,19 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
switch (q_type) {
case VIRTCHNL2_QUEUE_TYPE_TX:
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ for (i = 0; i < rsrc->num_txq_grp; i++) {
+ struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++)
tx_qgrp->txqs[j]->q_id = qids[k];
}
break;
case VIRTCHNL2_QUEUE_TYPE_RX:
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ for (i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
u16 num_rxq;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
num_rxq = rx_qgrp->splitq.num_rxq_sets;
else
num_rxq = rx_qgrp->singleq.num_rxq;
@@ -3842,7 +3944,7 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
for (j = 0; j < num_rxq && k < num_qids; j++, k++) {
struct idpf_rx_queue *q;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
else
q = rx_qgrp->singleq.rxqs[j];
@@ -3851,16 +3953,16 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
}
break;
case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
- for (i = 0; i < vport->num_txq_grp && k < num_qids; i++, k++) {
- struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ for (i = 0; i < rsrc->num_txq_grp && k < num_qids; i++, k++) {
+ struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
tx_qgrp->complq->q_id = qids[k];
}
break;
case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
- u8 num_bufqs = vport->num_bufqs_per_qgrp;
+ for (i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
+ u8 num_bufqs = rsrc->num_bufqs_per_qgrp;
for (j = 0; j < num_bufqs && k < num_qids; j++, k++) {
struct idpf_buf_queue *q;
@@ -3880,30 +3982,21 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
/**
* idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters
* @vport: virtual port for which the queues ids are initialized
+ * @rsrc: pointer to queue and vector resources
+ * @chunks: queue ids received over mailbox
*
* Will initialize all queue ids with ids received as mailbox parameters.
- * Returns 0 on success, negative if all the queues are not initialized.
+ *
+ * Return: 0 on success, negative if all the queues are not initialized.
*/
-int idpf_vport_queue_ids_init(struct idpf_vport *vport)
+int idpf_vport_queue_ids_init(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc,
+ struct idpf_queue_id_reg_info *chunks)
{
- struct virtchnl2_create_vport *vport_params;
- struct virtchnl2_queue_reg_chunks *chunks;
- struct idpf_vport_config *vport_config;
- u16 vport_idx = vport->idx;
int num_ids, err = 0;
u16 q_type;
u32 *qids;
- vport_config = vport->adapter->vport_config[vport_idx];
- if (vport_config->req_qs_chunks) {
- struct virtchnl2_add_queues *vc_aq =
- (struct virtchnl2_add_queues *)vport_config->req_qs_chunks;
- chunks = &vc_aq->chunks;
- } else {
- vport_params = vport->adapter->vport_params_recvd[vport_idx];
- chunks = &vport_params->chunks;
- }
-
qids = kcalloc(IDPF_MAX_QIDS, sizeof(u32), GFP_KERNEL);
if (!qids)
return -ENOMEM;
@@ -3911,13 +4004,13 @@ int idpf_vport_queue_ids_init(struct idpf_vport *vport)
num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS,
VIRTCHNL2_QUEUE_TYPE_TX,
chunks);
- if (num_ids < vport->num_txq) {
+ if (num_ids < rsrc->num_txq) {
err = -EINVAL;
goto mem_rel;
}
- num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids,
+ num_ids = __idpf_vport_queue_ids_init(vport, rsrc, qids, num_ids,
VIRTCHNL2_QUEUE_TYPE_TX);
- if (num_ids < vport->num_txq) {
+ if (num_ids < rsrc->num_txq) {
err = -EINVAL;
goto mem_rel;
}
@@ -3925,44 +4018,46 @@ int idpf_vport_queue_ids_init(struct idpf_vport *vport)
num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS,
VIRTCHNL2_QUEUE_TYPE_RX,
chunks);
- if (num_ids < vport->num_rxq) {
+ if (num_ids < rsrc->num_rxq) {
err = -EINVAL;
goto mem_rel;
}
- num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids,
+ num_ids = __idpf_vport_queue_ids_init(vport, rsrc, qids, num_ids,
VIRTCHNL2_QUEUE_TYPE_RX);
- if (num_ids < vport->num_rxq) {
+ if (num_ids < rsrc->num_rxq) {
err = -EINVAL;
goto mem_rel;
}
- if (!idpf_is_queue_model_split(vport->txq_model))
+ if (!idpf_is_queue_model_split(rsrc->txq_model))
goto check_rxq;
q_type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks);
- if (num_ids < vport->num_complq) {
+ if (num_ids < rsrc->num_complq) {
err = -EINVAL;
goto mem_rel;
}
- num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type);
- if (num_ids < vport->num_complq) {
+ num_ids = __idpf_vport_queue_ids_init(vport, rsrc, qids,
+ num_ids, q_type);
+ if (num_ids < rsrc->num_complq) {
err = -EINVAL;
goto mem_rel;
}
check_rxq:
- if (!idpf_is_queue_model_split(vport->rxq_model))
+ if (!idpf_is_queue_model_split(rsrc->rxq_model))
goto mem_rel;
q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks);
- if (num_ids < vport->num_bufq) {
+ if (num_ids < rsrc->num_bufq) {
err = -EINVAL;
goto mem_rel;
}
- num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type);
- if (num_ids < vport->num_bufq)
+ num_ids = __idpf_vport_queue_ids_init(vport, rsrc, qids,
+ num_ids, q_type);
+ if (num_ids < rsrc->num_bufq)
err = -EINVAL;
mem_rel:
@@ -3974,23 +4069,24 @@ mem_rel:
/**
* idpf_vport_adjust_qs - Adjust to new requested queues
* @vport: virtual port data struct
+ * @rsrc: pointer to queue and vector resources
*
* Renegotiate queues. Returns 0 on success, negative on failure.
*/
-int idpf_vport_adjust_qs(struct idpf_vport *vport)
+int idpf_vport_adjust_qs(struct idpf_vport *vport, struct idpf_q_vec_rsrc *rsrc)
{
struct virtchnl2_create_vport vport_msg;
int err;
- vport_msg.txq_model = cpu_to_le16(vport->txq_model);
- vport_msg.rxq_model = cpu_to_le16(vport->rxq_model);
+ vport_msg.txq_model = cpu_to_le16(rsrc->txq_model);
+ vport_msg.rxq_model = cpu_to_le16(rsrc->rxq_model);
err = idpf_vport_calc_total_qs(vport->adapter, vport->idx, &vport_msg,
NULL);
if (err)
return err;
- idpf_vport_init_num_qs(vport, &vport_msg);
- idpf_vport_calc_num_q_groups(vport);
+ idpf_vport_init_num_qs(vport, &vport_msg, rsrc);
+ idpf_vport_calc_num_q_groups(rsrc);
return 0;
}
@@ -4112,12 +4208,12 @@ u32 idpf_get_vport_id(struct idpf_vport *vport)
return le32_to_cpu(vport_msg->vport_id);
}
-static void idpf_set_mac_type(struct idpf_vport *vport,
+static void idpf_set_mac_type(const u8 *default_mac_addr,
struct virtchnl2_mac_addr *mac_addr)
{
bool is_primary;
- is_primary = ether_addr_equal(vport->default_mac_addr, mac_addr->addr);
+ is_primary = ether_addr_equal(default_mac_addr, mac_addr->addr);
mac_addr->type = is_primary ? VIRTCHNL2_MAC_ADDR_PRIMARY :
VIRTCHNL2_MAC_ADDR_EXTRA;
}
@@ -4193,22 +4289,23 @@ invalid_payload:
/**
* idpf_add_del_mac_filters - Add/del mac filters
- * @vport: Virtual port data structure
- * @np: Netdev private structure
+ * @adapter: adapter pointer used to send virtchnl message
+ * @vport_config: persistent vport structure to get the MAC filter list
+ * @default_mac_addr: default MAC address to compare with
+ * @vport_id: vport identifier used while preparing the virtchnl message
* @add: Add or delete flag
* @async: Don't wait for return message
*
- * Returns 0 on success, error on failure.
+ * Return: 0 on success, error on failure.
**/
-int idpf_add_del_mac_filters(struct idpf_vport *vport,
- struct idpf_netdev_priv *np,
+int idpf_add_del_mac_filters(struct idpf_adapter *adapter,
+ struct idpf_vport_config *vport_config,
+ const u8 *default_mac_addr, u32 vport_id,
bool add, bool async)
{
struct virtchnl2_mac_addr_list *ma_list __free(kfree) = NULL;
struct virtchnl2_mac_addr *mac_addr __free(kfree) = NULL;
- struct idpf_adapter *adapter = np->adapter;
struct idpf_vc_xn_params xn_params = {};
- struct idpf_vport_config *vport_config;
u32 num_msgs, total_filters = 0;
struct idpf_mac_filter *f;
ssize_t reply_sz;
@@ -4220,7 +4317,6 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
xn_params.async = async;
xn_params.async_handler = idpf_mac_filter_async_handler;
- vport_config = adapter->vport_config[np->vport_idx];
spin_lock_bh(&vport_config->mac_filter_list_lock);
/* Find the number of newly added filters */
@@ -4251,7 +4347,7 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
list) {
if (add && f->add) {
ether_addr_copy(mac_addr[i].addr, f->macaddr);
- idpf_set_mac_type(vport, &mac_addr[i]);
+ idpf_set_mac_type(default_mac_addr, &mac_addr[i]);
i++;
f->add = false;
if (i == total_filters)
@@ -4259,7 +4355,7 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
}
if (!add && f->remove) {
ether_addr_copy(mac_addr[i].addr, f->macaddr);
- idpf_set_mac_type(vport, &mac_addr[i]);
+ idpf_set_mac_type(default_mac_addr, &mac_addr[i]);
i++;
f->remove = false;
if (i == total_filters)
@@ -4291,7 +4387,7 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
memset(ma_list, 0, buf_size);
}
- ma_list->vport_id = cpu_to_le32(np->vport_id);
+ ma_list->vport_id = cpu_to_le32(vport_id);
ma_list->num_mac_addr = cpu_to_le16(num_entries);
memcpy(ma_list->mac_addr_list, &mac_addr[k], entries_size);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
index eac3d15daa42..fe065911ad5a 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
@@ -92,6 +92,7 @@ struct idpf_netdev_priv;
struct idpf_vec_regs;
struct idpf_vport;
struct idpf_vport_max_q;
+struct idpf_vport_config;
struct idpf_vport_user_config_data;
ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
@@ -101,10 +102,20 @@ void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter);
int idpf_vc_core_init(struct idpf_adapter *adapter);
void idpf_vc_core_deinit(struct idpf_adapter *adapter);
-int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
+int idpf_get_reg_intr_vecs(struct idpf_adapter *adapter,
struct idpf_vec_regs *reg_vals);
-int idpf_queue_reg_init(struct idpf_vport *vport);
-int idpf_vport_queue_ids_init(struct idpf_vport *vport);
+int idpf_queue_reg_init(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc,
+ struct idpf_queue_id_reg_info *chunks);
+int idpf_vport_queue_ids_init(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc,
+ struct idpf_queue_id_reg_info *chunks);
+static inline void
+idpf_vport_deinit_queue_reg_chunks(struct idpf_vport_config *vport_cfg)
+{
+ kfree(vport_cfg->qid_reg_info.queue_chunks);
+ vport_cfg->qid_reg_info.queue_chunks = NULL;
+}
bool idpf_vport_is_cap_ena(struct idpf_vport *vport, u16 flag);
bool idpf_sideband_flow_type_ena(struct idpf_vport *vport, u32 flow_type);
@@ -112,9 +123,9 @@ bool idpf_sideband_action_ena(struct idpf_vport *vport,
struct ethtool_rx_flow_spec *fsp);
unsigned int idpf_fsteer_max_rules(struct idpf_vport *vport);
-int idpf_recv_mb_msg(struct idpf_adapter *adapter);
-int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
- u16 msg_size, u8 *msg, u16 cookie);
+int idpf_recv_mb_msg(struct idpf_adapter *adapter, struct idpf_ctlq_info *arq);
+int idpf_send_mb_msg(struct idpf_adapter *adapter, struct idpf_ctlq_info *asq,
+ u32 op, u16 msg_size, u8 *msg, u16 cookie);
struct idpf_queue_ptr {
enum virtchnl2_queue_type type;
@@ -127,60 +138,81 @@ struct idpf_queue_ptr {
};
struct idpf_queue_set {
- struct idpf_vport *vport;
+ struct idpf_adapter *adapter;
+ struct idpf_q_vec_rsrc *qv_rsrc;
+ u32 vport_id;
u32 num;
struct idpf_queue_ptr qs[] __counted_by(num);
};
-struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_vport *vport, u32 num);
+struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_adapter *adapter,
+ struct idpf_q_vec_rsrc *rsrc,
+ u32 vport_id, u32 num);
int idpf_send_enable_queue_set_msg(const struct idpf_queue_set *qs);
int idpf_send_disable_queue_set_msg(const struct idpf_queue_set *qs);
int idpf_send_config_queue_set_msg(const struct idpf_queue_set *qs);
int idpf_send_disable_queues_msg(struct idpf_vport *vport);
-int idpf_send_config_queues_msg(struct idpf_vport *vport);
int idpf_send_enable_queues_msg(struct idpf_vport *vport);
+int idpf_send_config_queues_msg(struct idpf_adapter *adapter,
+ struct idpf_q_vec_rsrc *rsrc,
+ u32 vport_id);
-void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q);
+int idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q);
u32 idpf_get_vport_id(struct idpf_vport *vport);
int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
struct idpf_vport_max_q *max_q);
-int idpf_send_destroy_vport_msg(struct idpf_vport *vport);
-int idpf_send_enable_vport_msg(struct idpf_vport *vport);
-int idpf_send_disable_vport_msg(struct idpf_vport *vport);
+int idpf_send_destroy_vport_msg(struct idpf_adapter *adapter, u32 vport_id);
+int idpf_send_enable_vport_msg(struct idpf_adapter *adapter, u32 vport_id);
+int idpf_send_disable_vport_msg(struct idpf_adapter *adapter, u32 vport_id);
-int idpf_vport_adjust_qs(struct idpf_vport *vport);
+int idpf_vport_adjust_qs(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter,
struct idpf_vport_max_q *max_q);
void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter,
struct idpf_vport_max_q *max_q);
-int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
- u16 num_complq, u16 num_rx_q, u16 num_rx_bufq);
-int idpf_send_delete_queues_msg(struct idpf_vport *vport);
-
-int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport);
+int idpf_send_add_queues_msg(struct idpf_adapter *adapter,
+ struct idpf_vport_config *vport_config,
+ struct idpf_q_vec_rsrc *rsrc,
+ u32 vport_id);
+int idpf_send_delete_queues_msg(struct idpf_adapter *adapter,
+ struct idpf_queue_id_reg_info *chunks,
+ u32 vport_id);
+
+int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
int idpf_get_vec_ids(struct idpf_adapter *adapter,
u16 *vecids, int num_vecids,
struct virtchnl2_vector_chunks *chunks);
int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors);
int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter);
-int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map);
-
-int idpf_add_del_mac_filters(struct idpf_vport *vport,
- struct idpf_netdev_priv *np,
+int idpf_send_map_unmap_queue_vector_msg(struct idpf_adapter *adapter,
+ struct idpf_q_vec_rsrc *rsrc,
+ u32 vport_id,
+ bool map);
+
+int idpf_add_del_mac_filters(struct idpf_adapter *adapter,
+ struct idpf_vport_config *vport_config,
+ const u8 *default_mac_addr, u32 vport_id,
bool add, bool async);
int idpf_set_promiscuous(struct idpf_adapter *adapter,
struct idpf_vport_user_config_data *config_data,
u32 vport_id);
int idpf_check_supported_desc_ids(struct idpf_vport *vport);
-int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport);
-int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport);
-int idpf_send_get_stats_msg(struct idpf_vport *vport);
+int idpf_send_ena_dis_loopback_msg(struct idpf_adapter *adapter, u32 vport_id,
+ bool loopback_ena);
+int idpf_send_get_stats_msg(struct idpf_netdev_priv *np,
+ struct idpf_port_stats *port_stats);
int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs);
-int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get);
-int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get);
+int idpf_send_get_set_rss_key_msg(struct idpf_adapter *adapter,
+ struct idpf_rss_data *rss_data,
+ u32 vport_id, bool get);
+int idpf_send_get_set_rss_lut_msg(struct idpf_adapter *adapter,
+ struct idpf_rss_data *rss_data,
+ u32 vport_id, bool get);
void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr);
int idpf_idc_rdma_vc_send_sync(struct iidc_rdma_core_dev_info *cdev_info,
u8 *send_msg, u16 msg_size,
diff --git a/drivers/net/ethernet/intel/idpf/xdp.c b/drivers/net/ethernet/intel/idpf/xdp.c
index 958d16f87424..2b60f2a78684 100644
--- a/drivers/net/ethernet/intel/idpf/xdp.c
+++ b/drivers/net/ethernet/intel/idpf/xdp.c
@@ -2,21 +2,22 @@
/* Copyright (C) 2025 Intel Corporation */
#include "idpf.h"
+#include "idpf_ptp.h"
#include "idpf_virtchnl.h"
#include "xdp.h"
#include "xsk.h"
-static int idpf_rxq_for_each(const struct idpf_vport *vport,
+static int idpf_rxq_for_each(const struct idpf_q_vec_rsrc *rsrc,
int (*fn)(struct idpf_rx_queue *rxq, void *arg),
void *arg)
{
- bool splitq = idpf_is_queue_model_split(vport->rxq_model);
+ bool splitq = idpf_is_queue_model_split(rsrc->rxq_model);
- if (!vport->rxq_grps)
+ if (!rsrc->rxq_grps)
return -ENETDOWN;
- for (u32 i = 0; i < vport->num_rxq_grp; i++) {
- const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ for (u32 i = 0; i < rsrc->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
u32 num_rxq;
if (splitq)
@@ -45,7 +46,8 @@ static int idpf_rxq_for_each(const struct idpf_vport *vport,
static int __idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq, void *arg)
{
const struct idpf_vport *vport = rxq->q_vector->vport;
- bool split = idpf_is_queue_model_split(vport->rxq_model);
+ const struct idpf_q_vec_rsrc *rsrc;
+ bool split;
int err;
err = __xdp_rxq_info_reg(&rxq->xdp_rxq, vport->netdev, rxq->idx,
@@ -54,6 +56,9 @@ static int __idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq, void *arg)
if (err)
return err;
+ rsrc = &vport->dflt_qv_rsrc;
+ split = idpf_is_queue_model_split(rsrc->rxq_model);
+
if (idpf_queue_has(XSK, rxq)) {
err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL,
@@ -70,7 +75,7 @@ static int __idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq, void *arg)
if (!split)
return 0;
- rxq->xdpsqs = &vport->txqs[vport->xdp_txq_offset];
+ rxq->xdpsqs = &vport->txqs[rsrc->xdp_txq_offset];
rxq->num_xdp_txq = vport->num_xdp_txq;
return 0;
@@ -86,9 +91,9 @@ int idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq)
return __idpf_xdp_rxq_info_init(rxq, NULL);
}
-int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport)
+int idpf_xdp_rxq_info_init_all(const struct idpf_q_vec_rsrc *rsrc)
{
- return idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_init, NULL);
+ return idpf_rxq_for_each(rsrc, __idpf_xdp_rxq_info_init, NULL);
}
static int __idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, void *arg)
@@ -111,10 +116,10 @@ void idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, u32 model)
__idpf_xdp_rxq_info_deinit(rxq, (void *)(size_t)model);
}
-void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport)
+void idpf_xdp_rxq_info_deinit_all(const struct idpf_q_vec_rsrc *rsrc)
{
- idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_deinit,
- (void *)(size_t)vport->rxq_model);
+ idpf_rxq_for_each(rsrc, __idpf_xdp_rxq_info_deinit,
+ (void *)(size_t)rsrc->rxq_model);
}
static int idpf_xdp_rxq_assign_prog(struct idpf_rx_queue *rxq, void *arg)
@@ -132,10 +137,10 @@ static int idpf_xdp_rxq_assign_prog(struct idpf_rx_queue *rxq, void *arg)
return 0;
}
-void idpf_xdp_copy_prog_to_rqs(const struct idpf_vport *vport,
+void idpf_xdp_copy_prog_to_rqs(const struct idpf_q_vec_rsrc *rsrc,
struct bpf_prog *xdp_prog)
{
- idpf_rxq_for_each(vport, idpf_xdp_rxq_assign_prog, xdp_prog);
+ idpf_rxq_for_each(rsrc, idpf_xdp_rxq_assign_prog, xdp_prog);
}
static void idpf_xdp_tx_timer(struct work_struct *work);
@@ -165,7 +170,7 @@ int idpf_xdpsqs_get(const struct idpf_vport *vport)
}
dev = vport->netdev;
- sqs = vport->xdp_txq_offset;
+ sqs = vport->dflt_qv_rsrc.xdp_txq_offset;
for (u32 i = sqs; i < vport->num_txq; i++) {
struct idpf_tx_queue *xdpsq = vport->txqs[i];
@@ -202,7 +207,7 @@ void idpf_xdpsqs_put(const struct idpf_vport *vport)
return;
dev = vport->netdev;
- sqs = vport->xdp_txq_offset;
+ sqs = vport->dflt_qv_rsrc.xdp_txq_offset;
for (u32 i = sqs; i < vport->num_txq; i++) {
struct idpf_tx_queue *xdpsq = vport->txqs[i];
@@ -358,12 +363,15 @@ int idpf_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
{
const struct idpf_netdev_priv *np = netdev_priv(dev);
const struct idpf_vport *vport = np->vport;
+ u32 xdp_txq_offset;
if (unlikely(!netif_carrier_ok(dev) || !vport->link_up))
return -ENETDOWN;
+ xdp_txq_offset = vport->dflt_qv_rsrc.xdp_txq_offset;
+
return libeth_xdp_xmit_do_bulk(dev, n, frames, flags,
- &vport->txqs[vport->xdp_txq_offset],
+ &vport->txqs[xdp_txq_offset],
vport->num_xdp_txq,
idpf_xdp_xmit_flush_bulk,
idpf_xdp_tx_finalize);
@@ -391,13 +399,43 @@ static int idpf_xdpmo_rx_hash(const struct xdp_md *ctx, u32 *hash,
pt);
}
+static int idpf_xdpmo_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
+{
+ const struct libeth_xdp_buff *xdp = (typeof(xdp))ctx;
+ struct idpf_xdp_rx_desc desc __uninitialized;
+ const struct idpf_rx_queue *rxq;
+ u64 cached_time, ts_ns;
+ u32 ts_high;
+
+ rxq = libeth_xdp_buff_to_rq(xdp, typeof(*rxq), xdp_rxq);
+
+ if (!idpf_queue_has(PTP, rxq))
+ return -ENODATA;
+
+ idpf_xdp_get_qw1(&desc, xdp->desc);
+
+ if (!(idpf_xdp_rx_ts_low(&desc) & VIRTCHNL2_RX_FLEX_TSTAMP_VALID))
+ return -ENODATA;
+
+ cached_time = READ_ONCE(rxq->cached_phc_time);
+
+ idpf_xdp_get_qw3(&desc, xdp->desc);
+
+ ts_high = idpf_xdp_rx_ts_high(&desc);
+ ts_ns = idpf_ptp_tstamp_extend_32b_to_64b(cached_time, ts_high);
+
+ *timestamp = ts_ns;
+ return 0;
+}
+
static const struct xdp_metadata_ops idpf_xdpmo = {
.xmo_rx_hash = idpf_xdpmo_rx_hash,
+ .xmo_rx_timestamp = idpf_xdpmo_rx_timestamp,
};
void idpf_xdp_set_features(const struct idpf_vport *vport)
{
- if (!idpf_is_queue_model_split(vport->rxq_model))
+ if (!idpf_is_queue_model_split(vport->dflt_qv_rsrc.rxq_model))
return;
libeth_xdp_set_features_noredir(vport->netdev, &idpf_xdpmo,
@@ -409,6 +447,7 @@ static int idpf_xdp_setup_prog(struct idpf_vport *vport,
const struct netdev_bpf *xdp)
{
const struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+ const struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct bpf_prog *old, *prog = xdp->prog;
struct idpf_vport_config *cfg;
int ret;
@@ -419,7 +458,7 @@ static int idpf_xdp_setup_prog(struct idpf_vport *vport,
!test_bit(IDPF_VPORT_REG_NETDEV, cfg->flags) ||
!!vport->xdp_prog == !!prog) {
if (test_bit(IDPF_VPORT_UP, np->state))
- idpf_xdp_copy_prog_to_rqs(vport, prog);
+ idpf_xdp_copy_prog_to_rqs(rsrc, prog);
old = xchg(&vport->xdp_prog, prog);
if (old)
@@ -464,7 +503,7 @@ int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp)
idpf_vport_ctrl_lock(dev);
vport = idpf_netdev_to_vport(dev);
- if (!idpf_is_queue_model_split(vport->txq_model))
+ if (!idpf_is_queue_model_split(vport->dflt_qv_rsrc.txq_model))
goto notsupp;
switch (xdp->command) {
diff --git a/drivers/net/ethernet/intel/idpf/xdp.h b/drivers/net/ethernet/intel/idpf/xdp.h
index 479f5ef3c604..63e56f7d43e0 100644
--- a/drivers/net/ethernet/intel/idpf/xdp.h
+++ b/drivers/net/ethernet/intel/idpf/xdp.h
@@ -9,10 +9,10 @@
#include "idpf_txrx.h"
int idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq);
-int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport);
+int idpf_xdp_rxq_info_init_all(const struct idpf_q_vec_rsrc *rsrc);
void idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, u32 model);
-void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport);
-void idpf_xdp_copy_prog_to_rqs(const struct idpf_vport *vport,
+void idpf_xdp_rxq_info_deinit_all(const struct idpf_q_vec_rsrc *rsrc);
+void idpf_xdp_copy_prog_to_rqs(const struct idpf_q_vec_rsrc *rsrc,
struct bpf_prog *xdp_prog);
int idpf_xdpsqs_get(const struct idpf_vport *vport);
@@ -112,11 +112,13 @@ struct idpf_xdp_rx_desc {
aligned_u64 qw1;
#define IDPF_XDP_RX_BUF GENMASK_ULL(47, 32)
#define IDPF_XDP_RX_EOP BIT_ULL(1)
+#define IDPF_XDP_RX_TS_LOW GENMASK_ULL(31, 24)
aligned_u64 qw2;
#define IDPF_XDP_RX_HASH GENMASK_ULL(31, 0)
aligned_u64 qw3;
+#define IDPF_XDP_RX_TS_HIGH GENMASK_ULL(63, 32)
} __aligned(4 * sizeof(u64));
static_assert(sizeof(struct idpf_xdp_rx_desc) ==
sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3));
@@ -128,6 +130,8 @@ static_assert(sizeof(struct idpf_xdp_rx_desc) ==
#define idpf_xdp_rx_buf(desc) FIELD_GET(IDPF_XDP_RX_BUF, (desc)->qw1)
#define idpf_xdp_rx_eop(desc) !!((desc)->qw1 & IDPF_XDP_RX_EOP)
#define idpf_xdp_rx_hash(desc) FIELD_GET(IDPF_XDP_RX_HASH, (desc)->qw2)
+#define idpf_xdp_rx_ts_low(desc) FIELD_GET(IDPF_XDP_RX_TS_LOW, (desc)->qw1)
+#define idpf_xdp_rx_ts_high(desc) FIELD_GET(IDPF_XDP_RX_TS_HIGH, (desc)->qw3)
static inline void
idpf_xdp_get_qw0(struct idpf_xdp_rx_desc *desc,
@@ -149,6 +153,9 @@ idpf_xdp_get_qw1(struct idpf_xdp_rx_desc *desc,
desc->qw1 = ((const typeof(desc))rxd)->qw1;
#else
desc->qw1 = ((u64)le16_to_cpu(rxd->buf_id) << 32) |
+ ((u64)rxd->ts_low << 24) |
+ ((u64)rxd->fflags1 << 16) |
+ ((u64)rxd->status_err1 << 8) |
rxd->status_err0_qw1;
#endif
}
@@ -166,6 +173,19 @@ idpf_xdp_get_qw2(struct idpf_xdp_rx_desc *desc,
#endif
}
+static inline void
+idpf_xdp_get_qw3(struct idpf_xdp_rx_desc *desc,
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rxd)
+{
+#ifdef __LIBETH_WORD_ACCESS
+ desc->qw3 = ((const typeof(desc))rxd)->qw3;
+#else
+ desc->qw3 = ((u64)le32_to_cpu(rxd->ts_high) << 32) |
+ ((u64)le16_to_cpu(rxd->fmd6) << 16) |
+ le16_to_cpu(rxd->l2tag1);
+#endif
+}
+
void idpf_xdp_set_features(const struct idpf_vport *vport);
int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp);
diff --git a/drivers/net/ethernet/intel/idpf/xsk.c b/drivers/net/ethernet/intel/idpf/xsk.c
index fd2cc43ab43c..676cbd80774d 100644
--- a/drivers/net/ethernet/intel/idpf/xsk.c
+++ b/drivers/net/ethernet/intel/idpf/xsk.c
@@ -26,13 +26,14 @@ static void idpf_xsk_setup_rxq(const struct idpf_vport *vport,
static void idpf_xsk_setup_bufq(const struct idpf_vport *vport,
struct idpf_buf_queue *bufq)
{
+ const struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct xsk_buff_pool *pool;
u32 qid = U32_MAX;
- for (u32 i = 0; i < vport->num_rxq_grp; i++) {
- const struct idpf_rxq_group *grp = &vport->rxq_grps[i];
+ for (u32 i = 0; i < rsrc->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *grp = &rsrc->rxq_grps[i];
- for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ for (u32 j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
if (&grp->splitq.bufq_sets[j].bufq == bufq) {
qid = grp->splitq.rxq_sets[0]->rxq.idx;
goto setup;
@@ -61,7 +62,7 @@ static void idpf_xsk_setup_txq(const struct idpf_vport *vport,
if (!idpf_queue_has(XDP, txq))
return;
- qid = txq->idx - vport->xdp_txq_offset;
+ qid = txq->idx - vport->dflt_qv_rsrc.xdp_txq_offset;
pool = xsk_get_pool_from_qid(vport->netdev, qid);
if (!pool || !pool->dev)
@@ -86,7 +87,8 @@ static void idpf_xsk_setup_complq(const struct idpf_vport *vport,
if (!idpf_queue_has(XDP, complq))
return;
- qid = complq->txq_grp->txqs[0]->idx - vport->xdp_txq_offset;
+ qid = complq->txq_grp->txqs[0]->idx -
+ vport->dflt_qv_rsrc.xdp_txq_offset;
pool = xsk_get_pool_from_qid(vport->netdev, qid);
if (!pool || !pool->dev)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 3069b583fd81..89c7fed7b8fc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -342,6 +342,13 @@ static int ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
return 0;
}
+ if (hw->phy.sfp_type == ixgbe_sfp_type_10g_bx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_10g_bx_core1) {
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ *autoneg = false;
+ return 0;
+ }
+
/*
* Determine link capabilities based on the stored value of AUTOC,
* which represents EEPROM defaults. If AUTOC value has not been
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 2ad81f687a84..bb4b53fee234 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -351,6 +351,8 @@ static int ixgbe_get_link_ksettings(struct net_device *netdev,
case ixgbe_sfp_type_1g_lx_core1:
case ixgbe_sfp_type_1g_bx_core0:
case ixgbe_sfp_type_1g_bx_core1:
+ case ixgbe_sfp_type_10g_bx_core0:
+ case ixgbe_sfp_type_10g_bx_core1:
ethtool_link_ksettings_add_link_mode(cmd, supported,
FIBRE);
ethtool_link_ksettings_add_link_mode(cmd, advertising,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 2449e4cf2679..ab733e73927d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -1534,8 +1534,10 @@ int ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
struct ixgbe_adapter *adapter = hw->back;
u8 oui_bytes[3] = {0, 0, 0};
u8 bitrate_nominal = 0;
+ u8 sm_length_100m = 0;
u8 comp_codes_10g = 0;
u8 comp_codes_1g = 0;
+ u8 sm_length_km = 0;
u16 enforce_sfp = 0;
u32 vendor_oui = 0;
u8 identifier = 0;
@@ -1678,6 +1680,33 @@ int ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
else
hw->phy.sfp_type =
ixgbe_sfp_type_1g_bx_core1;
+ /* Support Ethernet 10G-BX, checking the Bit Rate
+ * Nominal Value as per SFF-8472 to be 12.5 Gb/s (67h) and
+ * Single Mode fibre with at least 1km link length
+ */
+ } else if ((!comp_codes_10g) && (bitrate_nominal == 0x67) &&
+ (!(cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)) &&
+ (!(cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE))) {
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_SM_LENGTH_KM,
+ &sm_length_km);
+ if (status != 0)
+ goto err_read_i2c_eeprom;
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_SM_LENGTH_100M,
+ &sm_length_100m);
+ if (status != 0)
+ goto err_read_i2c_eeprom;
+ if (sm_length_km > 0 || sm_length_100m >= 10) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_10g_bx_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_10g_bx_core1;
+ } else {
+ hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+ }
} else {
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
}
@@ -1768,7 +1797,9 @@ int ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core1)) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_10g_bx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_10g_bx_core1)) {
hw->phy.type = ixgbe_phy_sfp_unsupported;
return -EOPNOTSUPP;
}
@@ -1786,7 +1817,9 @@ int ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core1)) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_10g_bx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_10g_bx_core1)) {
/* Make sure we're a supported PHY type */
if (hw->phy.type == ixgbe_phy_sfp_intel)
return 0;
@@ -2016,20 +2049,22 @@ int ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
return -EOPNOTSUPP;
/*
- * Limiting active cables and 1G Phys must be initialized as
+ * Limiting active cables, 10G BX and 1G Phys must be initialized as
* SR modules
*/
if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
- sfp_type == ixgbe_sfp_type_1g_bx_core0)
+ sfp_type == ixgbe_sfp_type_1g_bx_core0 ||
+ sfp_type == ixgbe_sfp_type_10g_bx_core0)
sfp_type = ixgbe_sfp_type_srlr_core0;
else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
- sfp_type == ixgbe_sfp_type_1g_bx_core1)
+ sfp_type == ixgbe_sfp_type_1g_bx_core1 ||
+ sfp_type == ixgbe_sfp_type_10g_bx_core1)
sfp_type = ixgbe_sfp_type_srlr_core1;
/* Read offset to PHY init contents */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 81179c60af4e..039ba4b6c120 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -32,6 +32,8 @@
#define IXGBE_SFF_QSFP_1GBE_COMP 0x86
#define IXGBE_SFF_QSFP_CABLE_LENGTH 0x92
#define IXGBE_SFF_QSFP_DEVICE_TECH 0x93
+#define IXGBE_SFF_SM_LENGTH_KM 0xE
+#define IXGBE_SFF_SM_LENGTH_100M 0xF
/* Bitmasks */
#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index b1bfeb21537a..61f2ef67defd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -3286,6 +3286,8 @@ enum ixgbe_sfp_type {
ixgbe_sfp_type_1g_lx_core1 = 14,
ixgbe_sfp_type_1g_bx_core0 = 15,
ixgbe_sfp_type_1g_bx_core1 = 16,
+ ixgbe_sfp_type_10g_bx_core0 = 17,
+ ixgbe_sfp_type_10g_bx_core1 = 18,
ixgbe_sfp_type_not_present = 0xFFFE,
ixgbe_sfp_type_unknown = 0xFFFF
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
index b5805969404f..0fbbcb5400c7 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
@@ -307,7 +307,7 @@ static void octep_setup_iq_regs_cn93_pf(struct octep_device *oct, int iq_no)
}
/* Setup registers for a hardware Rx Queue */
-static void octep_setup_oq_regs_cn93_pf(struct octep_device *oct, int oq_no)
+static int octep_setup_oq_regs_cn93_pf(struct octep_device *oct, int oq_no)
{
u64 reg_val;
u64 oq_ctl = 0ULL;
@@ -355,6 +355,7 @@ static void octep_setup_oq_regs_cn93_pf(struct octep_device *oct, int oq_no)
reg_val = ((u64)time_threshold << 32) |
CFG_GET_OQ_INTR_PKT(oct->conf);
octep_write_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+ return 0;
}
/* Setup registers for a PF mailbox */
@@ -637,6 +638,19 @@ static int octep_soft_reset_cn93_pf(struct octep_device *oct)
octep_write_csr64(oct, CN93_SDP_WIN_WR_MASK_REG, 0xFF);
+ /* Firmware status CSR is supposed to be cleared by
+ * core domain reset, but due to a hw bug, it is not.
+ * Set it to RUNNING right before reset so that it is not
+ * left in READY (1) state after a reset. This is required
+ * in addition to the early setting to handle the case where
+ * the OcteonTX is unexpectedly reset, reboots, and then
+ * the module is removed.
+ */
+ OCTEP_PCI_WIN_WRITE(oct,
+ CN9K_PEMX_PFX_CSX_PFCFGX(0,
+ 0, CN9K_PCIEEP_VSECST_CTL),
+ FW_STATUS_DOWNING);
+
/* Set core domain reset bit */
OCTEP_PCI_WIN_WRITE(oct, CN93_RST_CORE_DOMAIN_W1S, 1);
/* Wait for 100ms as Octeon resets. */
@@ -696,14 +710,26 @@ static void octep_enable_interrupts_cn93_pf(struct octep_device *oct)
/* Disable all interrupts */
static void octep_disable_interrupts_cn93_pf(struct octep_device *oct)
{
- u64 intr_mask = 0ULL;
+ u64 reg_val, intr_mask = 0ULL;
int srn, num_rings, i;
srn = CFG_GET_PORTS_PF_SRN(oct->conf);
num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
- for (i = 0; i < num_rings; i++)
- intr_mask |= (0x1ULL << (srn + i));
+ for (i = 0; i < num_rings; i++) {
+ intr_mask |= BIT_ULL(srn + i);
+ reg_val = octep_read_csr64(oct,
+ CN93_SDP_R_IN_INT_LEVELS(srn + i));
+ reg_val &= ~CN93_INT_ENA_BIT;
+ octep_write_csr64(oct,
+ CN93_SDP_R_IN_INT_LEVELS(srn + i), reg_val);
+
+ reg_val = octep_read_csr64(oct,
+ CN93_SDP_R_OUT_INT_LEVELS(srn + i));
+ reg_val &= ~CN93_INT_ENA_BIT;
+ octep_write_csr64(oct,
+ CN93_SDP_R_OUT_INT_LEVELS(srn + i), reg_val);
+ }
octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1C, intr_mask);
octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1C, intr_mask);
@@ -894,4 +920,17 @@ void octep_device_setup_cn93_pf(struct octep_device *oct)
octep_init_config_cn93_pf(oct);
octep_configure_ring_mapping_cn93_pf(oct);
+
+ if (oct->chip_id == OCTEP_PCI_DEVICE_ID_CN98_PF)
+ return;
+
+ /* Firmware status CSR is supposed to be cleared by
+ * core domain reset, but due to IPBUPEM-38842, it is not.
+ * Set it to RUNNING early in boot, so that unexpected resets
+ * leave it in a state that is not READY (1).
+ */
+ OCTEP_PCI_WIN_WRITE(oct,
+ CN9K_PEMX_PFX_CSX_PFCFGX(0,
+ 0, CN9K_PCIEEP_VSECST_CTL),
+ FW_STATUS_RUNNING);
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c b/drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c
index 5de0b5ecbc5f..ad2f4984e40a 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c
@@ -8,6 +8,7 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/jiffies.h>
#include "octep_config.h"
#include "octep_main.h"
@@ -327,12 +328,14 @@ static void octep_setup_iq_regs_cnxk_pf(struct octep_device *oct, int iq_no)
}
/* Setup registers for a hardware Rx Queue */
-static void octep_setup_oq_regs_cnxk_pf(struct octep_device *oct, int oq_no)
+static int octep_setup_oq_regs_cnxk_pf(struct octep_device *oct, int oq_no)
{
- u64 reg_val;
- u64 oq_ctl = 0ULL;
- u32 time_threshold = 0;
struct octep_oq *oq = oct->oq[oq_no];
+ unsigned long t_out_jiffies;
+ u32 time_threshold = 0;
+ u64 oq_ctl = 0ULL;
+ u64 reg_ba_val;
+ u64 reg_val;
oq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
reg_val = octep_read_csr64(oct, CNXK_SDP_R_OUT_CONTROL(oq_no));
@@ -343,6 +346,36 @@ static void octep_setup_oq_regs_cnxk_pf(struct octep_device *oct, int oq_no)
reg_val = octep_read_csr64(oct, CNXK_SDP_R_OUT_CONTROL(oq_no));
} while (!(reg_val & CNXK_R_OUT_CTL_IDLE));
}
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_WMARK(oq_no), oq->max_count);
+ /* Wait for WMARK to get applied */
+ usleep_range(10, 15);
+
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_BADDR(oq_no),
+ oq->desc_ring_dma);
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_RSIZE(oq_no),
+ oq->max_count);
+ reg_ba_val = octep_read_csr64(oct, CNXK_SDP_R_OUT_SLIST_BADDR(oq_no));
+
+ if (reg_ba_val != oq->desc_ring_dma) {
+ t_out_jiffies = jiffies + 10 * HZ;
+ do {
+ if (reg_ba_val == ULLONG_MAX)
+ return -EFAULT;
+ octep_write_csr64(oct,
+ CNXK_SDP_R_OUT_SLIST_BADDR(oq_no),
+ oq->desc_ring_dma);
+ octep_write_csr64(oct,
+ CNXK_SDP_R_OUT_SLIST_RSIZE(oq_no),
+ oq->max_count);
+ reg_ba_val =
+ octep_read_csr64(oct,
+ CNXK_SDP_R_OUT_SLIST_BADDR(oq_no));
+ } while ((reg_ba_val != oq->desc_ring_dma) &&
+ time_before(jiffies, t_out_jiffies));
+
+ if (reg_ba_val != oq->desc_ring_dma)
+ return -EAGAIN;
+ }
reg_val &= ~(CNXK_R_OUT_CTL_IMODE);
reg_val &= ~(CNXK_R_OUT_CTL_ROR_P);
@@ -356,10 +389,6 @@ static void octep_setup_oq_regs_cnxk_pf(struct octep_device *oct, int oq_no)
reg_val |= (CNXK_R_OUT_CTL_ES_P);
octep_write_csr64(oct, CNXK_SDP_R_OUT_CONTROL(oq_no), reg_val);
- octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_BADDR(oq_no),
- oq->desc_ring_dma);
- octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_RSIZE(oq_no),
- oq->max_count);
oq_ctl = octep_read_csr64(oct, CNXK_SDP_R_OUT_CONTROL(oq_no));
@@ -385,6 +414,7 @@ static void octep_setup_oq_regs_cnxk_pf(struct octep_device *oct, int oq_no)
reg_val &= ~0xFFFFFFFFULL;
reg_val |= CFG_GET_OQ_WMARK(oct->conf);
octep_write_csr64(oct, CNXK_SDP_R_OUT_WMARK(oq_no), reg_val);
+ return 0;
}
/* Setup registers for a PF mailbox */
@@ -660,7 +690,7 @@ static int octep_soft_reset_cnxk_pf(struct octep_device *oct)
* the module is removed.
*/
OCTEP_PCI_WIN_WRITE(oct, CNXK_PEMX_PFX_CSX_PFCFGX(0, 0, CNXK_PCIEEP_VSECST_CTL),
- FW_STATUS_RUNNING);
+ FW_STATUS_DOWNING);
/* Set chip domain reset bit */
OCTEP_PCI_WIN_WRITE(oct, CNXK_RST_CHIP_DOMAIN_W1S, 1);
@@ -720,14 +750,26 @@ static void octep_enable_interrupts_cnxk_pf(struct octep_device *oct)
/* Disable all interrupts */
static void octep_disable_interrupts_cnxk_pf(struct octep_device *oct)
{
- u64 intr_mask = 0ULL;
+ u64 reg_val, intr_mask = 0ULL;
int srn, num_rings, i;
srn = CFG_GET_PORTS_PF_SRN(oct->conf);
num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
- for (i = 0; i < num_rings; i++)
- intr_mask |= (0x1ULL << (srn + i));
+ for (i = 0; i < num_rings; i++) {
+ intr_mask |= BIT_ULL(srn + i);
+ reg_val = octep_read_csr64(oct,
+ CNXK_SDP_R_IN_INT_LEVELS(srn + i));
+ reg_val &= ~CNXK_INT_ENA_BIT;
+ octep_write_csr64(oct,
+ CNXK_SDP_R_IN_INT_LEVELS(srn + i), reg_val);
+
+ reg_val = octep_read_csr64(oct,
+ CNXK_SDP_R_OUT_INT_LEVELS(srn + i));
+ reg_val &= ~CNXK_INT_ENA_BIT;
+ octep_write_csr64(oct,
+ CNXK_SDP_R_OUT_INT_LEVELS(srn + i), reg_val);
+ }
octep_write_csr64(oct, CNXK_SDP_EPF_IRERR_RINT_ENA_W1C, intr_mask);
octep_write_csr64(oct, CNXK_SDP_EPF_ORERR_RINT_ENA_W1C, intr_mask);
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
index 81ac4267811c..35d0ff289a70 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
@@ -77,7 +77,7 @@ struct octep_pci_win_regs {
struct octep_hw_ops {
void (*setup_iq_regs)(struct octep_device *oct, int q);
- void (*setup_oq_regs)(struct octep_device *oct, int q);
+ int (*setup_oq_regs)(struct octep_device *oct, int q);
void (*setup_mbox_regs)(struct octep_device *oct, int mbox);
irqreturn_t (*mbox_intr_handler)(void *ioq_vector);
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
index ca473502d7a0..06eff23521fa 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
@@ -5,6 +5,8 @@
*
*/
+#include <linux/bitfield.h>
+
#ifndef _OCTEP_REGS_CN9K_PF_H_
#define _OCTEP_REGS_CN9K_PF_H_
@@ -383,8 +385,37 @@
/* bit 1 for firmware heartbeat interrupt */
#define CN93_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT BIT_ULL(1)
+#define FW_STATUS_DOWNING 0ULL
+#define FW_STATUS_RUNNING 2ULL
+
+#define CN9K_PEM_GENMASK BIT_ULL(36)
+#define CN9K_PF_GENMASK GENMASK_ULL(21, 18)
+#define CN9K_PFX_CSX_PFCFGX_SHADOW_BIT BIT_ULL(16)
+#define CN9K_PFX_CSX_PFCFGX_BASE_ADDR (0x8e0000008000ULL)
+#define CN9K_4BYTE_ALIGNED_ADDRESS_OFFSET(offset) ((offset) & BIT_ULL(2))
+#define CN9K_PEMX_PFX_CSX_PFCFGX cn9k_pemx_pfx_csx_pfcfgx
+
+static inline u64 cn9k_pemx_pfx_csx_pfcfgx(u64 pem, u32 pf, u32 offset)
+{
+ u32 shadow_addr_bit, pf_addr_bits, aligned_offset;
+ u64 pem_addr_bits;
+
+ pem_addr_bits = FIELD_PREP(CN9K_PEM_GENMASK, pem);
+ pf_addr_bits = FIELD_PREP(CN9K_PF_GENMASK, pf);
+ shadow_addr_bit = CN9K_PFX_CSX_PFCFGX_SHADOW_BIT & (offset);
+ aligned_offset = rounddown((offset), 8);
+
+ return (CN9K_PFX_CSX_PFCFGX_BASE_ADDR | pem_addr_bits
+ | pf_addr_bits | shadow_addr_bit | aligned_offset)
+ + CN9K_4BYTE_ALIGNED_ADDRESS_OFFSET(offset);
+}
+
+/* Register defines for use with CN9K_PEMX_PFX_CSX_PFCFGX */
+#define CN9K_PCIEEP_VSECST_CTL 0x4D0
+
#define CN93_PEM_BAR4_INDEX 7
#define CN93_PEM_BAR4_INDEX_SIZE 0x400000ULL
#define CN93_PEM_BAR4_INDEX_OFFSET (CN93_PEM_BAR4_INDEX * CN93_PEM_BAR4_INDEX_SIZE)
+#define CN93_INT_ENA_BIT BIT_ULL(62)
#endif /* _OCTEP_REGS_CN9K_PF_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cnxk_pf.h b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cnxk_pf.h
index e637d7c8224d..006e23882ee9 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cnxk_pf.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cnxk_pf.h
@@ -396,6 +396,7 @@
#define CNXK_SDP_EPF_OEI_RINT_DATA_BIT_MBOX BIT_ULL(0)
/* bit 1 for firmware heartbeat interrupt */
#define CNXK_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT BIT_ULL(1)
+#define FW_STATUS_DOWNING 0ULL
#define FW_STATUS_RUNNING 2ULL
#define CNXK_PEMX_PFX_CSX_PFCFGX(pem, pf, offset) ({ typeof(offset) _off = (offset); \
((0x8e0000008000 | \
@@ -412,5 +413,6 @@
#define CNXK_PEM_BAR4_INDEX 7
#define CNXK_PEM_BAR4_INDEX_SIZE 0x400000ULL
#define CNXK_PEM_BAR4_INDEX_OFFSET (CNXK_PEM_BAR4_INDEX * CNXK_PEM_BAR4_INDEX_SIZE)
+#define CNXK_INT_ENA_BIT BIT_ULL(62)
#endif /* _OCTEP_REGS_CNXK_PF_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
index 82b6b19e76b4..f2a7c6a76c74 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
@@ -12,6 +12,8 @@
#include "octep_config.h"
#include "octep_main.h"
+static void octep_oq_free_ring_buffers(struct octep_oq *oq);
+
static void octep_oq_reset_indices(struct octep_oq *oq)
{
oq->host_read_idx = 0;
@@ -170,11 +172,15 @@ static int octep_setup_oq(struct octep_device *oct, int q_no)
goto oq_fill_buff_err;
octep_oq_reset_indices(oq);
- oct->hw_ops.setup_oq_regs(oct, q_no);
+ if (oct->hw_ops.setup_oq_regs(oct, q_no))
+ goto oq_setup_err;
+
oct->num_oqs++;
return 0;
+oq_setup_err:
+ octep_oq_free_ring_buffers(oq);
oq_fill_buff_err:
vfree(oq->buff_info);
oq->buff_info = NULL;
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
index 88937fce75f1..4c769b27c278 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
@@ -196,7 +196,7 @@ static void octep_vf_setup_iq_regs_cn93(struct octep_vf_device *oct, int iq_no)
}
/* Setup registers for a hardware Rx Queue */
-static void octep_vf_setup_oq_regs_cn93(struct octep_vf_device *oct, int oq_no)
+static int octep_vf_setup_oq_regs_cn93(struct octep_vf_device *oct, int oq_no)
{
struct octep_vf_oq *oq = oct->oq[oq_no];
u32 time_threshold = 0;
@@ -239,6 +239,7 @@ static void octep_vf_setup_oq_regs_cn93(struct octep_vf_device *oct, int oq_no)
time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf);
reg_val = ((u64)time_threshold << 32) | CFG_GET_OQ_INTR_PKT(oct->conf);
octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+ return 0;
}
/* Setup registers for a VF mailbox */
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
index 1f79dfad42c6..a968b93a6794 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
@@ -199,11 +199,13 @@ static void octep_vf_setup_iq_regs_cnxk(struct octep_vf_device *oct, int iq_no)
}
/* Setup registers for a hardware Rx Queue */
-static void octep_vf_setup_oq_regs_cnxk(struct octep_vf_device *oct, int oq_no)
+static int octep_vf_setup_oq_regs_cnxk(struct octep_vf_device *oct, int oq_no)
{
struct octep_vf_oq *oq = oct->oq[oq_no];
+ unsigned long t_out_jiffies;
u32 time_threshold = 0;
u64 oq_ctl = ULL(0);
+ u64 reg_ba_val;
u64 reg_val;
reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no));
@@ -214,6 +216,38 @@ static void octep_vf_setup_oq_regs_cnxk(struct octep_vf_device *oct, int oq_no)
reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no));
} while (!(reg_val & CNXK_VF_R_OUT_CTL_IDLE));
}
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_WMARK(oq_no),
+ oq->max_count);
+ /* Wait for WMARK to get applied */
+ usleep_range(10, 15);
+
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_BADDR(oq_no),
+ oq->desc_ring_dma);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(oq_no),
+ oq->max_count);
+ reg_ba_val = octep_vf_read_csr64(oct,
+ CNXK_VF_SDP_R_OUT_SLIST_BADDR(oq_no));
+ if (reg_ba_val != oq->desc_ring_dma) {
+ t_out_jiffies = jiffies + 10 * HZ;
+ do {
+ if (reg_ba_val == ULLONG_MAX)
+ return -EFAULT;
+ octep_vf_write_csr64(oct,
+ CNXK_VF_SDP_R_OUT_SLIST_BADDR
+ (oq_no), oq->desc_ring_dma);
+ octep_vf_write_csr64(oct,
+ CNXK_VF_SDP_R_OUT_SLIST_RSIZE
+ (oq_no), oq->max_count);
+ reg_ba_val =
+ octep_vf_read_csr64(oct,
+ CNXK_VF_SDP_R_OUT_SLIST_BADDR
+ (oq_no));
+ } while ((reg_ba_val != oq->desc_ring_dma) &&
+ time_before(jiffies, t_out_jiffies));
+
+ if (reg_ba_val != oq->desc_ring_dma)
+ return -EAGAIN;
+ }
reg_val &= ~(CNXK_VF_R_OUT_CTL_IMODE);
reg_val &= ~(CNXK_VF_R_OUT_CTL_ROR_P);
@@ -227,8 +261,6 @@ static void octep_vf_setup_oq_regs_cnxk(struct octep_vf_device *oct, int oq_no)
reg_val |= (CNXK_VF_R_OUT_CTL_ES_P);
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no), reg_val);
- octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_BADDR(oq_no), oq->desc_ring_dma);
- octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(oq_no), oq->max_count);
oq_ctl = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no));
/* Clear the ISIZE and BSIZE (22-0) */
@@ -250,6 +282,7 @@ static void octep_vf_setup_oq_regs_cnxk(struct octep_vf_device *oct, int oq_no)
reg_val &= ~GENMASK_ULL(31, 0);
reg_val |= CFG_GET_OQ_WMARK(oct->conf);
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_WMARK(oq_no), reg_val);
+ return 0;
}
/* Setup registers for a VF mailbox */
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
index b9f13506f462..c74cd2369e90 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
@@ -55,7 +55,7 @@ struct octep_vf_mmio {
struct octep_vf_hw_ops {
void (*setup_iq_regs)(struct octep_vf_device *oct, int q);
- void (*setup_oq_regs)(struct octep_vf_device *oct, int q);
+ int (*setup_oq_regs)(struct octep_vf_device *oct, int q);
void (*setup_mbox_regs)(struct octep_vf_device *oct, int mbox);
irqreturn_t (*non_ioq_intr_handler)(void *ioq_vector);
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
index d70c8be3cfc4..6f865dbbba6c 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
@@ -12,6 +12,8 @@
#include "octep_vf_config.h"
#include "octep_vf_main.h"
+static void octep_vf_oq_free_ring_buffers(struct octep_vf_oq *oq);
+
static void octep_vf_oq_reset_indices(struct octep_vf_oq *oq)
{
oq->host_read_idx = 0;
@@ -171,11 +173,15 @@ static int octep_vf_setup_oq(struct octep_vf_device *oct, int q_no)
goto oq_fill_buff_err;
octep_vf_oq_reset_indices(oq);
- oct->hw_ops.setup_oq_regs(oct, q_no);
+ if (oct->hw_ops.setup_oq_regs(oct, q_no))
+ goto oq_setup_err;
+
oct->num_oqs++;
return 0;
+oq_setup_err:
+ octep_vf_oq_free_ring_buffers(oq);
oq_fill_buff_err:
vfree(oq->buff_info);
oq->buff_info = NULL;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 42044cd810b1..fd4792e432bf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -1823,6 +1823,8 @@ static int cgx_lmac_exit(struct cgx *cgx)
cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, false);
cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true);
kfree(lmac->mac_to_index_bmap.bmap);
+ rvu_free_bitmap(&lmac->rx_fc_pfvf_bmap);
+ rvu_free_bitmap(&lmac->tx_fc_pfvf_bmap);
kfree(lmac->name);
kfree(lmac);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 747fbdf2a908..8530df8b3fda 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -3632,11 +3632,22 @@ static void rvu_remove(struct pci_dev *pdev)
devm_kfree(&pdev->dev, rvu);
}
+static void rvu_shutdown(struct pci_dev *pdev)
+{
+ struct rvu *rvu = pci_get_drvdata(pdev);
+
+ if (!rvu)
+ return;
+
+ rvu_clear_rvum_blk_revid(rvu);
+}
+
static struct pci_driver rvu_driver = {
.name = DRV_NAME,
.id_table = rvu_id_table,
.probe = rvu_probe,
.remove = rvu_remove,
+ .shutdown = rvu_shutdown,
};
static int __init rvu_init_module(void)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 2f485a930edd..49f7ff5eddfc 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -4938,12 +4938,18 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
/* Set chan/link to backpressure TL3 instead of TL2 */
rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
- /* Disable SQ manager's sticky mode operation (set TM6 = 0)
+ /* Disable SQ manager's sticky mode operation (set TM6 = 0, TM11 = 0)
* This sticky mode is known to cause SQ stalls when multiple
- * SQs are mapped to same SMQ and transmitting pkts at a time.
+ * SQs are mapped to same SMQ and transmitting pkts simultaneously.
+ * NIX PSE may deadlock when there are any sticky to non-sticky
+ * transmission. Hence disable it (TM5 = 0).
*/
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
- cfg &= ~BIT_ULL(15);
+ cfg &= ~(BIT_ULL(15) | BIT_ULL(14) | BIT_ULL(23));
+ /* NIX may drop credits when condition clocks are turned off.
+ * Hence enable control flow clk (set TM9 = 1).
+ */
+ cfg |= BIT_ULL(21);
rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
ltdefs = rvu->kpu.lt_def;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 8cdfc36d79d2..255c7e2633bb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -789,8 +789,15 @@ static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura,
tar_addr = (__force u64)otx2_get_regaddr(pfvf, NPA_LF_AURA_BATCH_FREE0);
/* LMTID is same as AURA Id */
val = (lmt_info->lmt_id & 0x7FF) | BIT_ULL(63);
- /* Set if [127:64] of last 128bit word has a valid pointer */
- count_eot = (num_ptrs % 2) ? 0ULL : 1ULL;
+ /* Meaning of count_eot
+ * CN10K: count_eot = 0 if the number of pointers to free is even,
+ * count_eot = 1 if the number of pointers to free is odd.
+ *
+ * CN20K: count_eot represents the least significant 2 bits of the
+ * total number of valid pointers to free.
+ * Example: if 7 pointers are freed (0b111), count_eot = 0b11.
+ */
+ count_eot = (num_ptrs - 1) & 0x3ULL;
/* Set AURA ID to free pointer */
ptrs[0] = (count_eot << 32) | (aura & 0xFFFFF);
/* Target address for LMTST flush tells HW how many 128bit
@@ -800,7 +807,7 @@ static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura,
*/
if (num_ptrs > 2) {
size = (sizeof(u64) * num_ptrs) / 16;
- if (!count_eot)
+ if (!(count_eot & 1))
size++;
tar_addr |= ((size - 1) & 0x7) << 4;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.h
index c7bd4f3c6c6b..069e39b48847 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.h
@@ -17,4 +17,4 @@ struct otx2_devlink {
int otx2_register_dl(struct otx2_nic *pfvf);
void otx2_unregister_dl(struct otx2_nic *pfvf);
-#endif /* RVU_DEVLINK_H */
+#endif /* OTX2_DEVLINK_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index b6449f0a9e7d..a0340f3422bf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -66,6 +66,8 @@ static const struct otx2_stat otx2_queue_stats[] = {
{ "frames", 1 },
};
+#define OTX2_FEC_MAX_INDEX 4
+
static const unsigned int otx2_n_dev_stats = ARRAY_SIZE(otx2_dev_stats);
static const unsigned int otx2_n_drv_stats = ARRAY_SIZE(otx2_drv_stats);
static const unsigned int otx2_n_queue_stats = ARRAY_SIZE(otx2_queue_stats);
@@ -568,6 +570,13 @@ static int otx2_set_coalesce(struct net_device *netdev,
return 0;
}
+static u32 otx2_get_rx_ring_count(struct net_device *dev)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+
+ return pfvf->hw.rx_queues;
+}
+
static int otx2_get_rss_hash_opts(struct net_device *dev,
struct ethtool_rxfh_fields *nfc)
{
@@ -742,10 +751,6 @@ static int otx2_get_rxnfc(struct net_device *dev,
int ret = -EOPNOTSUPP;
switch (nfc->cmd) {
- case ETHTOOL_GRXRINGS:
- nfc->data = pfvf->hw.rx_queues;
- ret = 0;
- break;
case ETHTOOL_GRXCLSRLCNT:
if (netif_running(dev) && ntuple) {
nfc->rule_cnt = pfvf->flow_cfg->nr_flows;
@@ -1028,15 +1033,14 @@ static int otx2_get_fecparam(struct net_device *netdev,
ETHTOOL_FEC_BASER,
ETHTOOL_FEC_RS,
ETHTOOL_FEC_BASER | ETHTOOL_FEC_RS};
-#define FEC_MAX_INDEX 4
- if (pfvf->linfo.fec < FEC_MAX_INDEX)
- fecparam->active_fec = fec[pfvf->linfo.fec];
+
+ fecparam->active_fec = fec[pfvf->linfo.fec];
rsp = otx2_get_fwdata(pfvf);
if (IS_ERR(rsp))
return PTR_ERR(rsp);
- if (rsp->fwdata.supported_fec < FEC_MAX_INDEX) {
+ if (rsp->fwdata.supported_fec < OTX2_FEC_MAX_INDEX) {
if (!rsp->fwdata.supported_fec)
fecparam->fec = ETHTOOL_FEC_NONE;
else
@@ -1344,6 +1348,7 @@ static const struct ethtool_ops otx2_ethtool_ops = {
.set_coalesce = otx2_set_coalesce,
.get_rxnfc = otx2_get_rxnfc,
.set_rxnfc = otx2_set_rxnfc,
+ .get_rx_ring_count = otx2_get_rx_ring_count,
.get_rxfh_key_size = otx2_get_rxfh_key_size,
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
@@ -1462,6 +1467,7 @@ static const struct ethtool_ops otx2vf_ethtool_ops = {
.get_channels = otx2_get_channels,
.get_rxnfc = otx2_get_rxnfc,
.set_rxnfc = otx2_set_rxnfc,
+ .get_rx_ring_count = otx2_get_rx_ring_count,
.get_rxfh_key_size = otx2_get_rxfh_key_size,
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 6b2d8559f0eb..444bb67494ab 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -3315,6 +3315,7 @@ err_free_zc_bmap:
err_sriov_cleannup:
otx2_sriov_vfcfg_cleanup(pf);
err_pf_sriov_init:
+ otx2_unregister_dl(pf);
otx2_shutdown_tc(pf);
err_mcam_flow_del:
otx2_mcam_flow_del(pf);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
index 2f52daba58e6..750c72f628e5 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
@@ -717,11 +717,6 @@ static int prestera_ethtool_set_fecparam(struct net_device *dev,
return -EINVAL;
}
- if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) {
- netdev_err(dev, "FEC set is not allowed on non-SFP ports\n");
- return -EINVAL;
- }
-
fec = PRESTERA_PORT_FEC_MAX;
for (mode = 0; mode < PRESTERA_PORT_FEC_MAX; mode++) {
if ((port_fec_caps[mode].eth_fec & fecparam->fec) &&
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
index 3e13322470da..2989a77e3b42 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
@@ -542,7 +542,7 @@ static int prestera_ldr_wait_reg32(struct prestera_fw *fw,
10 * USEC_PER_MSEC, waitms * USEC_PER_MSEC);
}
-static u32 prestera_ldr_wait_buf(struct prestera_fw *fw, size_t len)
+static int prestera_ldr_wait_buf(struct prestera_fw *fw, size_t len)
{
u8 __iomem *addr = PRESTERA_LDR_REG_ADDR(fw, PRESTERA_LDR_BUF_RD_REG);
u32 buf_len = fw->ldr_buf_len;
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 05349a0b2db1..cf4e26d337bb 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -78,7 +78,6 @@ static const struct pci_device_id skge_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x4320) }, /* SK-98xx V2.0 */
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* D-Link DGE-530T (rev.B) */
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4c00) }, /* D-Link DGE-530T */
- { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302) }, /* D-Link DGE-530T Rev C1 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) }, /* Marvell Yukon 88E8001/8003/8010 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */
{ PCI_DEVICE(PCI_VENDOR_ID_CNET, 0x434E) }, /* CNet PowerG-2000 */
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index e68997a29191..35fef28ee2f9 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -4625,18 +4625,20 @@ static void mtk_get_ethtool_stats(struct net_device *dev,
} while (u64_stats_fetch_retry(&hwstats->syncp, start));
}
+static u32 mtk_get_rx_ring_count(struct net_device *dev)
+{
+ if (dev->hw_features & NETIF_F_LRO)
+ return MTK_MAX_RX_RING_NUM;
+
+ return 0;
+}
+
static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- if (dev->hw_features & NETIF_F_LRO) {
- cmd->data = MTK_MAX_RX_RING_NUM;
- ret = 0;
- }
- break;
case ETHTOOL_GRXCLSRLCNT:
if (dev->hw_features & NETIF_F_LRO) {
struct mtk_mac *mac = netdev_priv(dev);
@@ -4741,6 +4743,7 @@ static const struct ethtool_ops mtk_ethtool_ops = {
.set_pauseparam = mtk_set_pauseparam,
.get_rxnfc = mtk_get_rxnfc,
.set_rxnfc = mtk_set_rxnfc,
+ .get_rx_ring_count = mtk_get_rx_ring_count,
.get_eee = mtk_get_eee,
.set_eee = mtk_set_eee,
};
@@ -4991,7 +4994,6 @@ static int mtk_sgmii_init(struct mtk_eth *eth)
{
struct device_node *np;
struct regmap *regmap;
- u32 flags;
int i;
for (i = 0; i < MTK_MAX_DEVS; i++) {
@@ -5000,18 +5002,16 @@ static int mtk_sgmii_init(struct mtk_eth *eth)
break;
regmap = syscon_node_to_regmap(np);
- flags = 0;
- if (of_property_read_bool(np, "mediatek,pnswap"))
- flags |= MTK_SGMII_FLAG_PN_SWAP;
-
- of_node_put(np);
-
- if (IS_ERR(regmap))
+ if (IS_ERR(regmap)) {
+ of_node_put(np);
return PTR_ERR(regmap);
+ }
- eth->sgmii_pcs[i] = mtk_pcs_lynxi_create(eth->dev, regmap,
- eth->soc->ana_rgc3,
- flags);
+ eth->sgmii_pcs[i] = mtk_pcs_lynxi_create(eth->dev,
+ of_fwnode_handle(np),
+ regmap,
+ eth->soc->ana_rgc3);
+ of_node_put(np);
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 87f35bcbeff8..c5d564e5a581 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -636,28 +636,20 @@ static int get_real_size(const struct sk_buff *skb,
struct net_device *dev,
int *lso_header_size,
bool *inline_ok,
- void **pfrag,
- int *hopbyhop)
+ void **pfrag)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int real_size;
if (shinfo->gso_size) {
*inline_ok = false;
- *hopbyhop = 0;
if (skb->encapsulation) {
*lso_header_size = skb_inner_tcp_all_headers(skb);
} else {
- /* Detects large IPV6 TCP packets and prepares for removal of
- * HBH header that has been pushed by ip6_xmit(),
- * mainly so that tcpdump can dissect them.
- */
- if (ipv6_has_hopopt_jumbo(skb))
- *hopbyhop = sizeof(struct hop_jumbo_hdr);
*lso_header_size = skb_tcp_all_headers(skb);
}
real_size = CTRL_SIZE + shinfo->nr_frags * DS_SIZE +
- ALIGN(*lso_header_size - *hopbyhop + 4, DS_SIZE);
+ ALIGN(*lso_header_size + 4, DS_SIZE);
if (unlikely(*lso_header_size != skb_headlen(skb))) {
/* We add a segment for the skb linear buffer only if
* it contains data */
@@ -884,7 +876,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
int desc_size;
int real_size;
u32 index, bf_index;
- struct ipv6hdr *h6;
__be32 op_own;
int lso_header_size;
void *fragptr = NULL;
@@ -893,7 +884,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
bool stop_queue;
bool inline_ok;
u8 data_offset;
- int hopbyhop;
bool bf_ok;
tx_ind = skb_get_queue_mapping(skb);
@@ -903,7 +893,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_drop;
real_size = get_real_size(skb, shinfo, dev, &lso_header_size,
- &inline_ok, &fragptr, &hopbyhop);
+ &inline_ok, &fragptr);
if (unlikely(!real_size))
goto tx_drop_count;
@@ -956,7 +946,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
data = &tx_desc->data;
data_offset = offsetof(struct mlx4_en_tx_desc, data);
} else {
- int lso_align = ALIGN(lso_header_size - hopbyhop + 4, DS_SIZE);
+ int lso_align = ALIGN(lso_header_size + 4, DS_SIZE);
data = (void *)&tx_desc->lso + lso_align;
data_offset = offsetof(struct mlx4_en_tx_desc, lso) + lso_align;
@@ -1021,31 +1011,15 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
((ring->prod & ring->size) ?
cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
- lso_header_size -= hopbyhop;
/* Fill in the LSO prefix */
tx_desc->lso.mss_hdr_size = cpu_to_be32(
shinfo->gso_size << 16 | lso_header_size);
+ /* Copy headers;
+ * note that we already verified that it is linear
+ */
+ memcpy(tx_desc->lso.header, skb->data, lso_header_size);
- if (unlikely(hopbyhop)) {
- /* remove the HBH header.
- * Layout: [Ethernet header][IPv6 header][HBH][TCP header]
- */
- memcpy(tx_desc->lso.header, skb->data, ETH_HLEN + sizeof(*h6));
- h6 = (struct ipv6hdr *)((char *)tx_desc->lso.header + ETH_HLEN);
- h6->nexthdr = IPPROTO_TCP;
- /* Copy the TCP header after the IPv6 one */
- memcpy(h6 + 1,
- skb->data + ETH_HLEN + sizeof(*h6) +
- sizeof(struct hop_jumbo_hdr),
- tcp_hdrlen(skb));
- /* Leave ipv6 payload_len set to 0, as LSO v2 specs request. */
- } else {
- /* Copy headers;
- * note that we already verified that it is linear
- */
- memcpy(tx_desc->lso.header, skb->data, lso_header_size);
- }
ring->tso_packets++;
i = shinfo->gso_segs;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
index 1e5522a19483..3981dd81d4c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
@@ -9,7 +9,9 @@
*/
struct mlx5_dpll {
struct dpll_device *dpll;
+ dpll_tracker dpll_tracker;
struct dpll_pin *dpll_pin;
+ dpll_tracker pin_tracker;
struct mlx5_core_dev *mdev;
struct workqueue_struct *wq;
struct delayed_work work;
@@ -136,7 +138,7 @@ mlx5_dpll_pin_ffo_get(struct mlx5_dpll_synce_status *synce_status,
{
if (!synce_status->oper_freq_measure)
return -ENODATA;
- *ffo = synce_status->frequency_diff;
+ *ffo = 1000000LL * synce_status->frequency_diff;
return 0;
}
@@ -438,7 +440,8 @@ static int mlx5_dpll_probe(struct auxiliary_device *adev,
auxiliary_set_drvdata(adev, mdpll);
/* Multiple mdev instances might share one DPLL device. */
- mdpll->dpll = dpll_device_get(clock_id, 0, THIS_MODULE);
+ mdpll->dpll = dpll_device_get(clock_id, 0, THIS_MODULE,
+ &mdpll->dpll_tracker);
if (IS_ERR(mdpll->dpll)) {
err = PTR_ERR(mdpll->dpll);
goto err_free_mdpll;
@@ -451,7 +454,8 @@ static int mlx5_dpll_probe(struct auxiliary_device *adev,
/* Multiple mdev instances might share one DPLL pin. */
mdpll->dpll_pin = dpll_pin_get(clock_id, mlx5_get_dev_index(mdev),
- THIS_MODULE, &mlx5_dpll_pin_properties);
+ THIS_MODULE, &mlx5_dpll_pin_properties,
+ &mdpll->pin_tracker);
if (IS_ERR(mdpll->dpll_pin)) {
err = PTR_ERR(mdpll->dpll_pin);
goto err_unregister_dpll_device;
@@ -479,11 +483,11 @@ err_unregister_dpll_pin:
dpll_pin_unregister(mdpll->dpll, mdpll->dpll_pin,
&mlx5_dpll_pins_ops, mdpll);
err_put_dpll_pin:
- dpll_pin_put(mdpll->dpll_pin);
+ dpll_pin_put(mdpll->dpll_pin, &mdpll->pin_tracker);
err_unregister_dpll_device:
dpll_device_unregister(mdpll->dpll, &mlx5_dpll_device_ops, mdpll);
err_put_dpll_device:
- dpll_device_put(mdpll->dpll);
+ dpll_device_put(mdpll->dpll, &mdpll->dpll_tracker);
err_free_mdpll:
kfree(mdpll);
return err;
@@ -499,9 +503,9 @@ static void mlx5_dpll_remove(struct auxiliary_device *adev)
destroy_workqueue(mdpll->wq);
dpll_pin_unregister(mdpll->dpll, mdpll->dpll_pin,
&mlx5_dpll_pins_ops, mdpll);
- dpll_pin_put(mdpll->dpll_pin);
+ dpll_pin_put(mdpll->dpll_pin, &mdpll->pin_tracker);
dpll_device_unregister(mdpll->dpll, &mlx5_dpll_device_ops, mdpll);
- dpll_device_put(mdpll->dpll);
+ dpll_device_put(mdpll->dpll, &mdpll->dpll_tracker);
kfree(mdpll);
mlx5_dpll_synce_status_set(mdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
index d000236ddbac..15cb27aea2c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019 Mellanox Technologies. */
#include "ecpf.h"
+#include "eswitch.h"
bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev)
{
@@ -49,7 +50,7 @@ static int mlx5_host_pf_init(struct mlx5_core_dev *dev)
/* ECPF shall enable HCA for host PF in the same way a PF
* does this for its VFs when ECPF is not a eswitch manager.
*/
- err = mlx5_cmd_host_pf_enable_hca(dev);
+ err = mlx5_esw_host_pf_enable_hca(dev);
if (err)
mlx5_core_err(dev, "Failed to enable external host PF HCA err(%d)\n", err);
@@ -63,7 +64,7 @@ static void mlx5_host_pf_cleanup(struct mlx5_core_dev *dev)
if (mlx5_ecpf_esw_admins_host_pf(dev))
return;
- err = mlx5_cmd_host_pf_disable_hca(dev);
+ err = mlx5_esw_host_pf_disable_hca(dev);
if (err) {
mlx5_core_err(dev, "Failed to disable external host PF HCA err(%d)\n", err);
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index ff4ab4691baf..a7de3a3efc49 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -82,9 +82,10 @@ struct page_pool;
#define MLX5E_RX_MAX_HEAD (256)
#define MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE (8)
-#define MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE (9)
-#define MLX5E_SHAMPO_WQ_HEADER_PER_PAGE (PAGE_SIZE >> MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE)
-#define MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE (PAGE_SHIFT - MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE)
+#define MLX5E_SHAMPO_WQ_HEADER_PER_PAGE \
+ (PAGE_SIZE >> MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE)
+#define MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE \
+ (PAGE_SHIFT - MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE)
#define MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE_SHIFT (6)
#define MLX5E_SHAMPO_WQ_RESRV_SIZE_BASE_SHIFT (12)
#define MLX5E_SHAMPO_WQ_LOG_RESRV_SIZE (16)
@@ -388,6 +389,7 @@ enum {
MLX5E_SQ_STATE_DIM,
MLX5E_SQ_STATE_PENDING_XSK_TX,
MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC,
+ MLX5E_SQ_STATE_LOCK_NEEDED,
MLX5E_NUM_SQ_STATES, /* Must be kept last */
};
@@ -545,6 +547,11 @@ struct mlx5e_icosq {
u32 sqn;
u16 reserved_room;
unsigned long state;
+ /* icosq can be accessed from any CPU and from different contexts
+ * (NAPI softirq or process/workqueue). Always use spin_lock_bh for
+ * simplicity and correctness across all contexts.
+ */
+ spinlock_t lock;
struct mlx5e_ktls_resync_resp *ktls_resync;
/* control path */
@@ -632,16 +639,11 @@ struct mlx5e_dma_info {
};
struct mlx5e_shampo_hd {
- struct mlx5e_frag_page *pages;
u32 hd_per_wq;
- u32 hd_per_page;
- u16 hd_per_wqe;
- u8 log_hd_per_page;
- u8 log_hd_entry_size;
- unsigned long *bitmap;
- u16 pi;
- u16 ci;
- __be32 mkey_be;
+ u32 hd_buf_size;
+ u32 mkey;
+ u32 nentries;
+ DECLARE_FLEX_ARRAY(struct mlx5e_dma_info, hd_buf_pages);
};
struct mlx5e_hw_gro_data {
@@ -776,9 +778,7 @@ struct mlx5e_channel {
struct mlx5e_xdpsq xsksq;
/* Async ICOSQ */
- struct mlx5e_icosq async_icosq;
- /* async_icosq can be accessed from any CPU - the spinlock protects it. */
- spinlock_t async_icosq_lock;
+ struct mlx5e_icosq *async_icosq;
/* data path - accessed per napi poll */
const struct cpumask *aff_mask;
@@ -801,6 +801,21 @@ struct mlx5e_channel {
struct dim_cq_moder tx_cq_moder;
};
+static inline bool mlx5e_icosq_sync_lock(struct mlx5e_icosq *sq)
+{
+ if (likely(!test_bit(MLX5E_SQ_STATE_LOCK_NEEDED, &sq->state)))
+ return false;
+
+ spin_lock_bh(&sq->lock);
+ return true;
+}
+
+static inline void mlx5e_icosq_sync_unlock(struct mlx5e_icosq *sq, bool locked)
+{
+ if (unlikely(locked))
+ spin_unlock_bh(&sq->lock);
+}
+
struct mlx5e_ptp;
struct mlx5e_channels {
@@ -920,6 +935,7 @@ struct mlx5e_priv {
u8 max_opened_tc;
bool tx_ptp_opened;
bool rx_ptp_opened;
+ bool ktls_rx_was_enabled;
struct kernel_hwtstamp_config hwtstamp_config;
u16 q_counter[MLX5_SD_MAX_GROUP_SZ];
u16 drop_rq_q_counter;
@@ -1018,8 +1034,6 @@ void mlx5e_build_ptys2ethtool_map(void);
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode);
-void mlx5e_shampo_fill_umr(struct mlx5e_rq *rq, int len);
-void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq);
void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
index 2c98a5299df3..6bd959f9083d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
@@ -29,6 +29,10 @@ struct mlx5e_dcbx {
u32 cable_len;
u32 xoff;
u16 port_buff_cell_sz;
+
+ /* Upper limit for 100Mbps and 1Gbps in Kbps units */
+ u64 upper_limit_100mbps;
+ u64 upper_limit_gbps;
};
#define MLX5E_MAX_DSCP (64)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index c9bdee9a8b30..8e99d07586fa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -1068,26 +1068,6 @@ u32 mlx5e_shampo_hd_per_wq(struct mlx5_core_dev *mdev,
return hd_per_wq;
}
-static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params,
- struct mlx5e_rq_param *rq_param)
-{
- int max_num_of_umr_per_wqe, max_hd_per_wqe, max_ksm_per_umr, rest;
- void *wqc = MLX5_ADDR_OF(rqc, rq_param->rqc, wq);
- int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
- u32 wqebbs;
-
- max_ksm_per_umr = MLX5E_MAX_KSM_PER_WQE(mdev);
- max_hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param);
- max_num_of_umr_per_wqe = max_hd_per_wqe / max_ksm_per_umr;
- rest = max_hd_per_wqe % max_ksm_per_umr;
- wqebbs = MLX5E_KSM_UMR_WQEBBS(max_ksm_per_umr) * max_num_of_umr_per_wqe;
- if (rest)
- wqebbs += MLX5E_KSM_UMR_WQEBBS(rest);
- wqebbs *= wq_size;
- return wqebbs;
-}
-
#define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
@@ -1173,9 +1153,6 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev,
wqebbs += max_xsk_wqebbs;
}
- if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
- wqebbs += mlx5e_shampo_icosq_sz(mdev, params, rqp);
-
/* UMR WQEs don't cross the page boundary, they are padded with NOPs.
* This padding is always smaller than the max WQE size. That gives us
* at least (PAGE_SIZE - (max WQE size - MLX5_SEND_WQE_BB)) useful bytes
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 9e2cf191ed30..4adc1adf9897 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -15,6 +15,7 @@ static const char * const sq_sw_state_type_name[] = {
[MLX5E_SQ_STATE_DIM] = "dim",
[MLX5E_SQ_STATE_PENDING_XSK_TX] = "pending_xsk_tx",
[MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC] = "pending_tls_rx_resync",
+ [MLX5E_SQ_STATE_LOCK_NEEDED] = "lock_needed",
};
static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 7e191e1569e8..f2a8453d8dce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -65,7 +65,6 @@ ktime_t mlx5e_cqe_ts_to_ns(cqe_ts_to_ns func, struct mlx5_clock *clock, u64 cqe_
enum mlx5e_icosq_wqe_type {
MLX5E_ICOSQ_WQE_NOP,
MLX5E_ICOSQ_WQE_UMR_RX,
- MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR,
#ifdef CONFIG_MLX5_EN_TLS
MLX5E_ICOSQ_WQE_UMR_TLS,
MLX5E_ICOSQ_WQE_SET_PSV_TLS,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
index 2b05536d564a..4f984f6a2cb9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -23,6 +23,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
struct mlx5_wq_cyc *wq = &icosq->wq;
struct mlx5e_umr_wqe *umr_wqe;
struct xdp_buff **xsk_buffs;
+ bool sync_locked;
int batch, i;
u32 offset; /* 17-bit value with MTT. */
u16 pi;
@@ -47,6 +48,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
goto err_reuse_batch;
}
+ sync_locked = mlx5e_icosq_sync_lock(icosq);
pi = mlx5e_icosq_get_next_pi(icosq, rq->mpwqe.umr_wqebbs);
umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe));
@@ -143,6 +145,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
};
icosq->pc += rq->mpwqe.umr_wqebbs;
+ mlx5e_icosq_sync_unlock(icosq, sync_locked);
icosq->doorbell_cseg = &umr_wqe->hdr.ctrl;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
index a59199ed590d..9e33156fac8a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
@@ -26,10 +26,12 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
* active and not polled by NAPI. Return 0, because the upcoming
* activate will trigger the IRQ for us.
*/
- if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &c->async_icosq.state)))
+ if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED,
+ &c->async_icosq->state)))
return 0;
- if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state))
+ if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX,
+ &c->async_icosq->state))
return 0;
mlx5e_trigger_napi_icosq(c);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 8bef99e8367e..b526b3898c22 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -100,20 +100,6 @@ static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
#endif /* CONFIG_GENEVE */
-static inline void
-mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
-{
- int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
- struct udphdr *udphdr;
-
- if (skb->encapsulation)
- udphdr = (struct udphdr *)skb_inner_transport_header(skb);
- else
- udphdr = udp_hdr(skb);
-
- udphdr->len = htons(payload_len);
-}
-
struct mlx5e_accel_tx_state {
#ifdef CONFIG_MLX5_EN_TLS
struct mlx5e_accel_tx_tls_state tls;
@@ -131,9 +117,6 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
struct sk_buff *skb,
struct mlx5e_accel_tx_state *state)
{
- if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
- mlx5e_udp_gso_handle_tx_skb(skb);
-
#ifdef CONFIG_MLX5_EN_TLS
/* May send WQEs. */
if (tls_is_skb_tx_device_offloaded(skb))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
index e3e57c849436..1c2cc2aad2b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
@@ -135,10 +135,15 @@ int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable)
int err = 0;
mutex_lock(&priv->state_lock);
- if (enable)
+ if (enable) {
err = mlx5e_accel_fs_tcp_create(priv->fs);
- else
+ if (!err && !priv->ktls_rx_was_enabled) {
+ priv->ktls_rx_was_enabled = true;
+ mlx5e_safe_reopen_channels(priv);
+ }
+ } else {
mlx5e_accel_fs_tcp_destroy(priv->fs);
+ }
mutex_unlock(&priv->state_lock);
return err;
@@ -161,6 +166,7 @@ int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
destroy_workqueue(priv->tls->rx_wq);
return err;
}
+ priv->ktls_rx_was_enabled = true;
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index da2d1eb52c13..5d8fe252799e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -202,8 +202,8 @@ static int post_rx_param_wqes(struct mlx5e_channel *c,
int err;
err = 0;
- sq = &c->async_icosq;
- spin_lock_bh(&c->async_icosq_lock);
+ sq = c->async_icosq;
+ spin_lock_bh(&sq->lock);
cseg = post_static_params(sq, priv_rx);
if (IS_ERR(cseg))
@@ -214,7 +214,7 @@ static int post_rx_param_wqes(struct mlx5e_channel *c,
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
unlock:
- spin_unlock_bh(&c->async_icosq_lock);
+ spin_unlock_bh(&sq->lock);
return err;
@@ -277,10 +277,10 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
buf->priv_rx = priv_rx;
- spin_lock_bh(&sq->channel->async_icosq_lock);
+ spin_lock_bh(&sq->lock);
if (unlikely(!mlx5e_icosq_can_post_wqe(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS))) {
- spin_unlock_bh(&sq->channel->async_icosq_lock);
+ spin_unlock_bh(&sq->lock);
err = -ENOSPC;
goto err_dma_unmap;
}
@@ -311,7 +311,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
icosq_fill_wi(sq, pi, &wi);
sq->pc++;
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
- spin_unlock_bh(&sq->channel->async_icosq_lock);
+ spin_unlock_bh(&sq->lock);
return 0;
@@ -344,7 +344,7 @@ static void resync_handle_work(struct work_struct *work)
}
c = resync->priv->channels.c[priv_rx->rxq];
- sq = &c->async_icosq;
+ sq = c->async_icosq;
if (resync_post_get_progress_params(sq, priv_rx)) {
priv_rx->rq_stats->tls_resync_req_skip++;
@@ -371,7 +371,7 @@ static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_r
struct mlx5e_icosq *sq;
bool trigger_poll;
- sq = &c->async_icosq;
+ sq = c->async_icosq;
ktls_resync = sq->ktls_resync;
trigger_poll = false;
@@ -413,9 +413,9 @@ static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_r
return;
if (!napi_if_scheduled_mark_missed(&c->napi)) {
- spin_lock_bh(&c->async_icosq_lock);
+ spin_lock_bh(&sq->lock);
mlx5e_trigger_irq(sq);
- spin_unlock_bh(&c->async_icosq_lock);
+ spin_unlock_bh(&sq->lock);
}
}
@@ -753,7 +753,7 @@ bool mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget)
LIST_HEAD(local_list);
int i, j;
- sq = &c->async_icosq;
+ sq = c->async_icosq;
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return false;
@@ -772,7 +772,7 @@ bool mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget)
clear_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state);
spin_unlock(&ktls_resync->lock);
- spin_lock(&c->async_icosq_lock);
+ spin_lock(&sq->lock);
for (j = 0; j < i; j++) {
struct mlx5_wqe_ctrl_seg *cseg;
@@ -791,7 +791,7 @@ bool mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget)
}
if (db_cseg)
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, db_cseg);
- spin_unlock(&c->async_icosq_lock);
+ spin_unlock(&sq->lock);
priv_rx->rq_stats->tls_resync_res_ok += j;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
index cb08799769ee..4022c7e78a2e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
@@ -50,7 +50,8 @@ bool mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget);
static inline bool
mlx5e_ktls_rx_pending_resync_list(struct mlx5e_channel *c, int budget)
{
- return budget && test_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &c->async_icosq.state);
+ return budget && test_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC,
+ &c->async_icosq->state);
}
static inline void
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index fddf7c207f8e..4b86df6d5b9e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -58,6 +58,20 @@ enum {
MLX5_DCB_CHG_NO_RESET,
};
+static const struct {
+ int scale;
+ const char *units_str;
+} mlx5e_bw_units[] = {
+ [MLX5_100_MBPS_UNIT] = {
+ .scale = 100,
+ .units_str = "Mbps",
+ },
+ [MLX5_GBPS_UNIT] = {
+ .scale = 1,
+ .units_str = "Gbps",
+ },
+};
+
#define MLX5_DSCP_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, qcam_reg) && \
MLX5_CAP_QCAM_REG(mdev, qpts) && \
MLX5_CAP_QCAM_REG(mdev, qpdpm))
@@ -559,7 +573,7 @@ static int mlx5e_dcbnl_ieee_getmaxrate(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
- u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
+ u16 max_bw_value[IEEE_8021QAZ_MAX_TCS];
u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
int err;
int i;
@@ -594,57 +608,41 @@ static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
- u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
+ u16 max_bw_value[IEEE_8021QAZ_MAX_TCS];
u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
- u64 upper_limit_100mbps;
- u64 upper_limit_gbps;
int i;
- struct {
- int scale;
- const char *units_str;
- } units[] = {
- [MLX5_100_MBPS_UNIT] = {
- .scale = 100,
- .units_str = "Mbps",
- },
- [MLX5_GBPS_UNIT] = {
- .scale = 1,
- .units_str = "Gbps",
- },
- };
memset(max_bw_value, 0, sizeof(max_bw_value));
memset(max_bw_unit, 0, sizeof(max_bw_unit));
- upper_limit_100mbps = U8_MAX * MLX5E_100MB_TO_KB;
- upper_limit_gbps = U8_MAX * MLX5E_1GB_TO_KB;
for (i = 0; i <= mlx5_max_tc(mdev); i++) {
- if (!maxrate->tc_maxrate[i]) {
+ u64 rate = maxrate->tc_maxrate[i];
+
+ if (!rate) {
max_bw_unit[i] = MLX5_BW_NO_LIMIT;
continue;
}
- if (maxrate->tc_maxrate[i] <= upper_limit_100mbps) {
- max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
- MLX5E_100MB_TO_KB);
+ if (rate <= priv->dcbx.upper_limit_100mbps) {
+ max_bw_value[i] = div_u64(rate, MLX5E_100MB_TO_KB);
max_bw_value[i] = max_bw_value[i] ? max_bw_value[i] : 1;
max_bw_unit[i] = MLX5_100_MBPS_UNIT;
- } else if (maxrate->tc_maxrate[i] <= upper_limit_gbps) {
- max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
- MLX5E_1GB_TO_KB);
+ } else if (rate <= priv->dcbx.upper_limit_gbps) {
+ max_bw_value[i] = div_u64(rate, MLX5E_1GB_TO_KB);
max_bw_unit[i] = MLX5_GBPS_UNIT;
} else {
netdev_err(netdev,
"tc_%d maxrate %llu Kbps exceeds limit %llu\n",
- i, maxrate->tc_maxrate[i],
- upper_limit_gbps);
+ i, rate, priv->dcbx.upper_limit_gbps);
return -EINVAL;
}
}
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ u8 unit = max_bw_unit[i];
+
netdev_dbg(netdev, "%s: tc_%d <=> max_bw %u %s\n", __func__, i,
- max_bw_value[i] * units[max_bw_unit[i]].scale,
- units[max_bw_unit[i]].units_str);
+ max_bw_value[i] * mlx5e_bw_units[unit].scale,
+ mlx5e_bw_units[unit].units_str);
}
return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
@@ -1268,6 +1266,8 @@ static u16 mlx5e_query_port_buffers_cell_size(struct mlx5e_priv *priv)
void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
{
struct mlx5e_dcbx *dcbx = &priv->dcbx;
+ bool max_bw_msb_supported;
+ u16 type_max;
mlx5e_trust_initialize(priv);
@@ -1285,5 +1285,11 @@ void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
priv->dcbx.port_buff_cell_sz = mlx5e_query_port_buffers_cell_size(priv);
priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN;
+ max_bw_msb_supported = MLX5_CAP_QCAM_FEATURE(priv->mdev,
+ qetcr_qshr_max_bw_val_msb);
+ type_max = max_bw_msb_supported ? U16_MAX : U8_MAX;
+ priv->dcbx.upper_limit_100mbps = type_max * MLX5E_100MB_TO_KB;
+ priv->dcbx.upper_limit_gbps = type_max * MLX5E_1GB_TO_KB;
+
mlx5e_ets_init(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index d3fef1e7e2f7..4a8dc85d5924 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -261,7 +261,7 @@ void mlx5e_build_ptys2ethtool_map(void)
ETHTOOL_LINK_MODE_800000baseDR4_2_Full_BIT,
ETHTOOL_LINK_MODE_800000baseSR4_Full_BIT,
ETHTOOL_LINK_MODE_800000baseVR4_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1600TAUI_8_1600TBASE_CR8_KR8, ext,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1600GAUI_8_1600GBASE_CR8_KR8, ext,
ETHTOOL_LINK_MODE_1600000baseCR8_Full_BIT,
ETHTOOL_LINK_MODE_1600000baseKR8_Full_BIT,
ETHTOOL_LINK_MODE_1600000baseDR8_Full_BIT,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 4b2963bbe7ff..4b8084420816 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -492,40 +492,6 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
return err;
}
-static int mlx5e_create_umr_ksm_mkey(struct mlx5_core_dev *mdev,
- u64 nentries, u8 log_entry_size,
- u32 *umr_mkey)
-{
- int inlen;
- void *mkc;
- u32 *in;
- int err;
-
- inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
-
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
- mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
-
- MLX5_SET(mkc, mkc, free, 1);
- MLX5_SET(mkc, mkc, umr_en, 1);
- MLX5_SET(mkc, mkc, lw, 1);
- MLX5_SET(mkc, mkc, lr, 1);
- MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KSM);
- mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
- MLX5_SET(mkc, mkc, qpn, 0xffffff);
- MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn);
- MLX5_SET(mkc, mkc, translations_octword_size, nentries);
- MLX5_SET(mkc, mkc, log_page_size, log_entry_size);
- MLX5_SET64(mkc, mkc, len, nentries << log_entry_size);
- err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
-
- kvfree(in);
- return err;
-}
-
static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
{
u32 xsk_chunk_size = rq->xsk_pool ? rq->xsk_pool->chunk_size : 0;
@@ -551,29 +517,6 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq
return err;
}
-static int mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev *mdev,
- u16 hd_per_wq, __be32 *umr_mkey)
-{
- u32 max_ksm_size = BIT(MLX5_CAP_GEN(mdev, log_max_klm_list_size));
- u32 mkey;
- int err;
-
- if (max_ksm_size < hd_per_wq) {
- mlx5_core_err(mdev, "max ksm list size 0x%x is smaller than shampo header buffer list size 0x%x\n",
- max_ksm_size, hd_per_wq);
- return -EINVAL;
- }
-
- err = mlx5e_create_umr_ksm_mkey(mdev, hd_per_wq,
- MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE,
- &mkey);
- if (err)
- return err;
-
- *umr_mkey = cpu_to_be32(mkey);
- return 0;
-}
-
static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
{
struct mlx5e_wqe_frag_info next_frag = {};
@@ -754,145 +697,169 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
xdp_frag_size);
}
-static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, u16 hd_per_wq,
- int node)
+static void mlx5e_release_rq_hd_pages(struct mlx5e_rq *rq,
+ struct mlx5e_shampo_hd *shampo)
+
{
- struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+ for (int i = 0; i < shampo->nentries; i++) {
+ struct mlx5e_dma_info *info = &shampo->hd_buf_pages[i];
- shampo->hd_per_wq = hd_per_wq;
+ if (!info->page)
+ continue;
- shampo->bitmap = bitmap_zalloc_node(hd_per_wq, GFP_KERNEL, node);
- shampo->pages = kvzalloc_node(array_size(hd_per_wq,
- sizeof(*shampo->pages)),
- GFP_KERNEL, node);
- if (!shampo->bitmap || !shampo->pages)
- goto err_nomem;
+ dma_unmap_page(rq->pdev, info->addr, PAGE_SIZE,
+ rq->buff.map_dir);
+ __free_page(info->page);
+ }
+}
+
+static int mlx5e_alloc_rq_hd_pages(struct mlx5e_rq *rq, int node,
+ struct mlx5e_shampo_hd *shampo)
+{
+ int err, i;
+
+ for (i = 0; i < shampo->nentries; i++) {
+ struct page *page = alloc_pages_node(node, GFP_KERNEL, 0);
+ dma_addr_t addr;
+
+ if (!page) {
+ err = -ENOMEM;
+ goto err_free_pages;
+ }
+
+ addr = dma_map_page(rq->pdev, page, 0, PAGE_SIZE,
+ rq->buff.map_dir);
+ err = dma_mapping_error(rq->pdev, addr);
+ if (err) {
+ __free_page(page);
+ goto err_free_pages;
+ }
+
+ shampo->hd_buf_pages[i].page = page;
+ shampo->hd_buf_pages[i].addr = addr;
+ }
return 0;
-err_nomem:
- kvfree(shampo->pages);
- bitmap_free(shampo->bitmap);
+err_free_pages:
+ mlx5e_release_rq_hd_pages(rq, shampo);
- return -ENOMEM;
+ return err;
}
-static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
+static int mlx5e_create_rq_hd_mkey(struct mlx5_core_dev *mdev,
+ struct mlx5e_shampo_hd *shampo)
{
- kvfree(rq->mpwqe.shampo->pages);
- bitmap_free(rq->mpwqe.shampo->bitmap);
+ enum mlx5e_mpwrq_umr_mode umr_mode = MLX5E_MPWRQ_UMR_MODE_ALIGNED;
+ struct mlx5_mtt *mtt;
+ void *mkc, *in;
+ int inlen, err;
+ u32 octwords;
+
+ octwords = mlx5e_mpwrq_umr_octowords(shampo->nentries, umr_mode);
+ inlen = MLX5_FLEXIBLE_INLEN(mdev, MLX5_ST_SZ_BYTES(create_mkey_in),
+ MLX5_OCTWORD, octwords);
+ if (inlen < 0)
+ return inlen;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+
+ MLX5_SET(mkc, mkc, lw, 1);
+ MLX5_SET(mkc, mkc, lr, 1);
+ MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
+ mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn);
+ MLX5_SET64(mkc, mkc, len, shampo->hd_buf_size);
+ MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
+ MLX5_SET(mkc, mkc, translations_octword_size, octwords);
+ MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
+ octwords);
+
+ mtt = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
+ for (int i = 0; i < shampo->nentries; i++)
+ mtt[i].ptag = cpu_to_be64(shampo->hd_buf_pages[i].addr);
+
+ err = mlx5_core_create_mkey(mdev, &shampo->mkey, in, inlen);
+
+ kvfree(in);
+ return err;
}
static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_rq_param *rqp,
struct mlx5e_rq *rq,
- u32 *pool_size,
int node)
{
- void *wqc = MLX5_ADDR_OF(rqc, rqp->rqc, wq);
- u8 log_hd_per_page, log_hd_entry_size;
- u16 hd_per_wq, hd_per_wqe;
- u32 hd_pool_size;
- int wq_size;
- int err;
+ struct mlx5e_shampo_hd *shampo;
+ int nentries, err, shampo_sz;
+ u32 hd_per_wq, hd_buf_size;
if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
return 0;
- rq->mpwqe.shampo = kvzalloc_node(sizeof(*rq->mpwqe.shampo),
- GFP_KERNEL, node);
- if (!rq->mpwqe.shampo)
- return -ENOMEM;
-
- /* split headers data structures */
hd_per_wq = mlx5e_shampo_hd_per_wq(mdev, params, rqp);
- err = mlx5e_rq_shampo_hd_info_alloc(rq, hd_per_wq, node);
- if (err)
- goto err_shampo_hd_info_alloc;
-
- err = mlx5e_create_rq_hd_umr_mkey(mdev, hd_per_wq,
- &rq->mpwqe.shampo->mkey_be);
- if (err)
- goto err_umr_mkey;
-
- hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rqp);
- wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
-
- BUILD_BUG_ON(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE > PAGE_SHIFT);
- if (hd_per_wqe >= MLX5E_SHAMPO_WQ_HEADER_PER_PAGE) {
- log_hd_per_page = MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE;
- log_hd_entry_size = MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
- } else {
- log_hd_per_page = order_base_2(hd_per_wqe);
- log_hd_entry_size = order_base_2(PAGE_SIZE / hd_per_wqe);
+ hd_buf_size = hd_per_wq * BIT(MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE);
+ nentries = hd_buf_size / PAGE_SIZE;
+ if (!nentries) {
+ mlx5_core_err(mdev, "SHAMPO header buffer size %u < %lu\n",
+ hd_buf_size, PAGE_SIZE);
+ return -EINVAL;
}
- rq->mpwqe.shampo->hd_per_wqe = hd_per_wqe;
- rq->mpwqe.shampo->hd_per_page = BIT(log_hd_per_page);
- rq->mpwqe.shampo->log_hd_per_page = log_hd_per_page;
- rq->mpwqe.shampo->log_hd_entry_size = log_hd_entry_size;
-
- hd_pool_size = (hd_per_wqe * wq_size) >> log_hd_per_page;
-
- if (netif_rxq_has_unreadable_mp(rq->netdev, rq->ix)) {
- /* Separate page pool for shampo headers */
- struct page_pool_params pp_params = { };
+ shampo_sz = struct_size(shampo, hd_buf_pages, nentries);
+ shampo = kvzalloc_node(shampo_sz, GFP_KERNEL, node);
+ if (!shampo)
+ return -ENOMEM;
- pp_params.order = 0;
- pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
- pp_params.pool_size = hd_pool_size;
- pp_params.nid = node;
- pp_params.dev = rq->pdev;
- pp_params.napi = rq->cq.napi;
- pp_params.netdev = rq->netdev;
- pp_params.dma_dir = rq->buff.map_dir;
- pp_params.max_len = PAGE_SIZE;
+ shampo->hd_per_wq = hd_per_wq;
+ shampo->hd_buf_size = hd_buf_size;
+ shampo->nentries = nentries;
+ err = mlx5e_alloc_rq_hd_pages(rq, node, shampo);
+ if (err)
+ goto err_free;
- rq->hd_page_pool = page_pool_create(&pp_params);
- if (IS_ERR(rq->hd_page_pool)) {
- err = PTR_ERR(rq->hd_page_pool);
- rq->hd_page_pool = NULL;
- goto err_hds_page_pool;
- }
- } else {
- /* Common page pool, reserve space for headers. */
- *pool_size += hd_pool_size;
- rq->hd_page_pool = NULL;
- }
+ err = mlx5e_create_rq_hd_mkey(mdev, shampo);
+ if (err)
+ goto err_release_pages;
/* gro only data structures */
rq->hw_gro_data = kvzalloc_node(sizeof(*rq->hw_gro_data), GFP_KERNEL, node);
if (!rq->hw_gro_data) {
err = -ENOMEM;
- goto err_hw_gro_data;
+ goto err_destroy_mkey;
}
+ rq->mpwqe.shampo = shampo;
+
return 0;
-err_hw_gro_data:
- page_pool_destroy(rq->hd_page_pool);
-err_hds_page_pool:
- mlx5_core_destroy_mkey(mdev, be32_to_cpu(rq->mpwqe.shampo->mkey_be));
-err_umr_mkey:
- mlx5e_rq_shampo_hd_info_free(rq);
-err_shampo_hd_info_alloc:
- kvfree(rq->mpwqe.shampo);
+err_destroy_mkey:
+ mlx5_core_destroy_mkey(mdev, shampo->mkey);
+err_release_pages:
+ mlx5e_release_rq_hd_pages(rq, shampo);
+err_free:
+ kvfree(shampo);
+
return err;
}
static void mlx5e_rq_free_shampo(struct mlx5e_rq *rq)
{
- if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+
+ if (!shampo)
return;
kvfree(rq->hw_gro_data);
- if (rq->hd_page_pool != rq->page_pool)
- page_pool_destroy(rq->hd_page_pool);
- mlx5e_rq_shampo_hd_info_free(rq);
- mlx5_core_destroy_mkey(rq->mdev,
- be32_to_cpu(rq->mpwqe.shampo->mkey_be));
- kvfree(rq->mpwqe.shampo);
+ mlx5_core_destroy_mkey(rq->mdev, shampo->mkey);
+ mlx5e_release_rq_hd_pages(rq, shampo);
+ kvfree(shampo);
}
static int mlx5e_alloc_rq(struct mlx5e_params *params,
@@ -970,7 +937,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
if (err)
goto err_rq_mkey;
- err = mlx5_rq_shampo_alloc(mdev, params, rqp, rq, &pool_size, node);
+ err = mlx5_rq_shampo_alloc(mdev, params, rqp, rq, node);
if (err)
goto err_free_mpwqe_info;
@@ -1165,8 +1132,7 @@ int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_cou
if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
MLX5_SET(wq, wq, log_headers_buffer_entry_num,
order_base_2(rq->mpwqe.shampo->hd_per_wq));
- MLX5_SET(wq, wq, headers_mkey,
- be32_to_cpu(rq->mpwqe.shampo->mkey_be));
+ MLX5_SET(wq, wq, headers_mkey, rq->mpwqe.shampo->mkey);
}
mlx5_fill_page_frag_array(&rq->wq_ctrl.buf,
@@ -1326,14 +1292,6 @@ void mlx5e_free_rx_missing_descs(struct mlx5e_rq *rq)
rq->mpwqe.actual_wq_head = wq->head;
rq->mpwqe.umr_in_progress = 0;
rq->mpwqe.umr_completed = 0;
-
- if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
- struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
- u16 len;
-
- len = (shampo->pi - shampo->ci) & shampo->hd_per_wq;
- mlx5e_shampo_fill_umr(rq, len);
- }
}
void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
@@ -1356,9 +1314,6 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
mlx5_wq_ll_pop(wq, wqe_ix_be,
&wqe->next.next_wqe_index);
}
-
- if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
- mlx5e_shampo_dealloc_hd(rq);
} else {
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
u16 missing = mlx5_wq_cyc_missing(wq);
@@ -2075,6 +2030,8 @@ static int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params
if (err)
goto err_free_icosq;
+ spin_lock_init(&sq->lock);
+
if (param->is_tls) {
sq->ktls_resync = mlx5e_ktls_rx_resync_create_resp_list();
if (IS_ERR(sq->ktls_resync)) {
@@ -2587,9 +2544,51 @@ static int mlx5e_open_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
return mlx5e_open_rq(params, rq_params, NULL, cpu_to_node(c->cpu), q_counter, &c->rq);
}
+static struct mlx5e_icosq *
+mlx5e_open_async_icosq(struct mlx5e_channel *c,
+ struct mlx5e_params *params,
+ struct mlx5e_channel_param *cparam,
+ struct mlx5e_create_cq_param *ccp)
+{
+ struct dim_cq_moder icocq_moder = {0, 0};
+ struct mlx5e_icosq *async_icosq;
+ int err;
+
+ async_icosq = kvzalloc_node(sizeof(*async_icosq), GFP_KERNEL,
+ cpu_to_node(c->cpu));
+ if (!async_icosq)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5e_open_cq(c->mdev, icocq_moder, &cparam->async_icosq.cqp, ccp,
+ &async_icosq->cq);
+ if (err)
+ goto err_free_async_icosq;
+
+ err = mlx5e_open_icosq(c, params, &cparam->async_icosq, async_icosq,
+ mlx5e_async_icosq_err_cqe_work);
+ if (err)
+ goto err_close_async_icosq_cq;
+
+ return async_icosq;
+
+err_close_async_icosq_cq:
+ mlx5e_close_cq(&async_icosq->cq);
+err_free_async_icosq:
+ kvfree(async_icosq);
+ return ERR_PTR(err);
+}
+
+static void mlx5e_close_async_icosq(struct mlx5e_icosq *async_icosq)
+{
+ mlx5e_close_icosq(async_icosq);
+ mlx5e_close_cq(&async_icosq->cq);
+ kvfree(async_icosq);
+}
+
static int mlx5e_open_queues(struct mlx5e_channel *c,
struct mlx5e_params *params,
- struct mlx5e_channel_param *cparam)
+ struct mlx5e_channel_param *cparam,
+ bool async_icosq_needed)
{
const struct net_device_ops *netdev_ops = c->netdev->netdev_ops;
struct dim_cq_moder icocq_moder = {0, 0};
@@ -2598,15 +2597,10 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
mlx5e_build_create_cq_param(&ccp, c);
- err = mlx5e_open_cq(c->mdev, icocq_moder, &cparam->async_icosq.cqp, &ccp,
- &c->async_icosq.cq);
- if (err)
- return err;
-
err = mlx5e_open_cq(c->mdev, icocq_moder, &cparam->icosq.cqp, &ccp,
&c->icosq.cq);
if (err)
- goto err_close_async_icosq_cq;
+ return err;
err = mlx5e_open_tx_cqs(c, params, &ccp, cparam);
if (err)
@@ -2630,12 +2624,14 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
if (err)
goto err_close_rx_cq;
- spin_lock_init(&c->async_icosq_lock);
-
- err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq,
- mlx5e_async_icosq_err_cqe_work);
- if (err)
- goto err_close_rq_xdpsq_cq;
+ if (async_icosq_needed) {
+ c->async_icosq = mlx5e_open_async_icosq(c, params, cparam,
+ &ccp);
+ if (IS_ERR(c->async_icosq)) {
+ err = PTR_ERR(c->async_icosq);
+ goto err_close_rq_xdpsq_cq;
+ }
+ }
mutex_init(&c->icosq_recovery_lock);
@@ -2671,7 +2667,8 @@ err_close_icosq:
mlx5e_close_icosq(&c->icosq);
err_close_async_icosq:
- mlx5e_close_icosq(&c->async_icosq);
+ if (c->async_icosq)
+ mlx5e_close_async_icosq(c->async_icosq);
err_close_rq_xdpsq_cq:
if (c->xdp)
@@ -2690,9 +2687,6 @@ err_close_tx_cqs:
err_close_icosq_cq:
mlx5e_close_cq(&c->icosq.cq);
-err_close_async_icosq_cq:
- mlx5e_close_cq(&c->async_icosq.cq);
-
return err;
}
@@ -2706,7 +2700,8 @@ static void mlx5e_close_queues(struct mlx5e_channel *c)
mlx5e_close_sqs(c);
mlx5e_close_icosq(&c->icosq);
mutex_destroy(&c->icosq_recovery_lock);
- mlx5e_close_icosq(&c->async_icosq);
+ if (c->async_icosq)
+ mlx5e_close_async_icosq(c->async_icosq);
if (c->xdp)
mlx5e_close_cq(&c->rq_xdpsq.cq);
mlx5e_close_cq(&c->rq.cq);
@@ -2714,7 +2709,6 @@ static void mlx5e_close_queues(struct mlx5e_channel *c)
mlx5e_close_xdpredirect_sq(c->xdpsq);
mlx5e_close_tx_cqs(c);
mlx5e_close_cq(&c->icosq.cq);
- mlx5e_close_cq(&c->async_icosq.cq);
}
static u8 mlx5e_enumerate_lag_port(struct mlx5_core_dev *mdev, int ix)
@@ -2750,9 +2744,16 @@ static int mlx5e_channel_stats_alloc(struct mlx5e_priv *priv, int ix, int cpu)
void mlx5e_trigger_napi_icosq(struct mlx5e_channel *c)
{
- spin_lock_bh(&c->async_icosq_lock);
- mlx5e_trigger_irq(&c->async_icosq);
- spin_unlock_bh(&c->async_icosq_lock);
+ bool locked;
+
+ if (!test_and_set_bit(MLX5E_SQ_STATE_LOCK_NEEDED, &c->icosq.state))
+ synchronize_net();
+
+ locked = mlx5e_icosq_sync_lock(&c->icosq);
+ mlx5e_trigger_irq(&c->icosq);
+ mlx5e_icosq_sync_unlock(&c->icosq, locked);
+
+ clear_bit(MLX5E_SQ_STATE_LOCK_NEEDED, &c->icosq.state);
}
void mlx5e_trigger_napi_sched(struct napi_struct *napi)
@@ -2785,6 +2786,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_channel_param *cparam;
struct mlx5_core_dev *mdev;
struct mlx5e_xsk_param xsk;
+ bool async_icosq_needed;
struct mlx5e_channel *c;
unsigned int irq;
int vec_ix;
@@ -2834,7 +2836,8 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
netif_napi_add_config_locked(netdev, &c->napi, mlx5e_napi_poll, ix);
netif_napi_set_irq_locked(&c->napi, irq);
- err = mlx5e_open_queues(c, params, cparam);
+ async_icosq_needed = !!xsk_pool || priv->ktls_rx_was_enabled;
+ err = mlx5e_open_queues(c, params, cparam, async_icosq_needed);
if (unlikely(err))
goto err_napi_del;
@@ -2872,7 +2875,8 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_activate_txqsq(&c->sq[tc]);
mlx5e_activate_icosq(&c->icosq);
- mlx5e_activate_icosq(&c->async_icosq);
+ if (c->async_icosq)
+ mlx5e_activate_icosq(c->async_icosq);
if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
mlx5e_activate_xsk(c);
@@ -2893,7 +2897,8 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
else
mlx5e_deactivate_rq(&c->rq);
- mlx5e_deactivate_icosq(&c->async_icosq);
+ if (c->async_icosq)
+ mlx5e_deactivate_icosq(c->async_icosq);
mlx5e_deactivate_icosq(&c->icosq);
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_deactivate_txqsq(&c->sq[tc]);
@@ -4666,7 +4671,6 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_params new_params;
struct mlx5e_params *params;
- bool reset = true;
int err = 0;
mutex_lock(&priv->state_lock);
@@ -4692,28 +4696,8 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
goto out;
}
- if (params->packet_merge.type == MLX5E_PACKET_MERGE_LRO)
- reset = false;
-
- if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
- params->packet_merge.type != MLX5E_PACKET_MERGE_SHAMPO) {
- bool is_linear_old = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, params, NULL);
- bool is_linear_new = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev,
- &new_params, NULL);
- u8 sz_old = mlx5e_mpwqe_get_log_rq_size(priv->mdev, params, NULL);
- u8 sz_new = mlx5e_mpwqe_get_log_rq_size(priv->mdev, &new_params, NULL);
-
- /* Always reset in linear mode - hw_mtu is used in data path.
- * Check that the mode was non-linear and didn't change.
- * If XSK is active, XSK RQs are linear.
- * Reset if the RQ size changed, even if it's non-linear.
- */
- if (!is_linear_old && !is_linear_new && !priv->xsk.refcnt &&
- sz_old == sz_new)
- reset = false;
- }
-
- err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, reset);
+ err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL,
+ true);
out:
WRITE_ONCE(netdev->mtu, params->sw_mtu);
@@ -5139,7 +5123,7 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
netdev_get_tx_queue(netdev, i);
struct mlx5e_txqsq *sq = priv->txq2sq[i];
- if (!netif_xmit_stopped(dev_queue))
+ if (!netif_xmit_timeout_ms(dev_queue))
continue;
if (mlx5e_reporter_tx_timeout(sq))
@@ -5598,8 +5582,9 @@ struct mlx5_qmgmt_data {
struct mlx5e_channel_param cparam;
};
-static int mlx5e_queue_mem_alloc(struct net_device *dev, void *newq,
- int queue_index)
+static int mlx5e_queue_mem_alloc(struct net_device *dev,
+ struct netdev_queue_config *qcfg,
+ void *newq, int queue_index)
{
struct mlx5_qmgmt_data *new = (struct mlx5_qmgmt_data *)newq;
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -5660,8 +5645,9 @@ static int mlx5e_queue_stop(struct net_device *dev, void *oldq, int queue_index)
return 0;
}
-static int mlx5e_queue_start(struct net_device *dev, void *newq,
- int queue_index)
+static int mlx5e_queue_start(struct net_device *dev,
+ struct netdev_queue_config *qcfg,
+ void *newq, int queue_index)
{
struct mlx5_qmgmt_data *new = (struct mlx5_qmgmt_data *)newq;
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -5800,8 +5786,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
NETIF_F_GSO_GRE_CSUM;
netdev->hw_enc_features |= NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM;
- netdev->gso_partial_features |= NETIF_F_GSO_GRE |
- NETIF_F_GSO_GRE_CSUM;
+ netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
+ netdev->vlan_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM;
}
if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_IPIP)) {
@@ -5815,6 +5801,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->gso_partial_features |= NETIF_F_GSO_UDP_L4;
netdev->hw_features |= NETIF_F_GSO_UDP_L4;
+ netdev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 1f6930c77437..efcfcddab376 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -611,165 +611,6 @@ static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n)
mlx5_wq_ll_update_db_record(wq);
}
-/* This function returns the size of the continuous free space inside a bitmap
- * that starts from first and no longer than len including circular ones.
- */
-static int bitmap_find_window(unsigned long *bitmap, int len,
- int bitmap_size, int first)
-{
- int next_one, count;
-
- next_one = find_next_bit(bitmap, bitmap_size, first);
- if (next_one == bitmap_size) {
- if (bitmap_size - first >= len)
- return len;
- next_one = find_next_bit(bitmap, bitmap_size, 0);
- count = next_one + bitmap_size - first;
- } else {
- count = next_one - first;
- }
-
- return min(len, count);
-}
-
-static void build_ksm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe,
- __be32 key, u16 offset, u16 ksm_len)
-{
- memset(umr_wqe, 0, offsetof(struct mlx5e_umr_wqe, inline_ksms));
- umr_wqe->hdr.ctrl.opmod_idx_opcode =
- cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
- MLX5_OPCODE_UMR);
- umr_wqe->hdr.ctrl.umr_mkey = key;
- umr_wqe->hdr.ctrl.qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT)
- | MLX5E_KSM_UMR_DS_CNT(ksm_len));
- umr_wqe->hdr.uctrl.flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
- umr_wqe->hdr.uctrl.xlt_offset = cpu_to_be16(offset);
- umr_wqe->hdr.uctrl.xlt_octowords = cpu_to_be16(ksm_len);
- umr_wqe->hdr.uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
-}
-
-static struct mlx5e_frag_page *mlx5e_shampo_hd_to_frag_page(struct mlx5e_rq *rq,
- int header_index)
-{
- struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
-
- return &shampo->pages[header_index >> shampo->log_hd_per_page];
-}
-
-static u64 mlx5e_shampo_hd_offset(struct mlx5e_rq *rq, int header_index)
-{
- struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
- u32 hd_per_page = shampo->hd_per_page;
-
- return (header_index & (hd_per_page - 1)) << shampo->log_hd_entry_size;
-}
-
-static void mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index);
-
-static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
- struct mlx5e_icosq *sq,
- u16 ksm_entries, u16 index)
-{
- struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
- u16 pi, header_offset, err, wqe_bbs;
- u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey;
- struct mlx5e_umr_wqe *umr_wqe;
- int headroom, i;
-
- headroom = rq->buff.headroom;
- wqe_bbs = MLX5E_KSM_UMR_WQEBBS(ksm_entries);
- pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs);
- umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
- build_ksm_umr(sq, umr_wqe, shampo->mkey_be, index, ksm_entries);
-
- for (i = 0; i < ksm_entries; i++, index++) {
- struct mlx5e_frag_page *frag_page;
- u64 addr;
-
- frag_page = mlx5e_shampo_hd_to_frag_page(rq, index);
- header_offset = mlx5e_shampo_hd_offset(rq, index);
- if (!header_offset) {
- err = mlx5e_page_alloc_fragmented(rq->hd_page_pool,
- frag_page);
- if (err)
- goto err_unmap;
- }
-
- addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
- umr_wqe->inline_ksms[i] = (struct mlx5_ksm) {
- .key = cpu_to_be32(lkey),
- .va = cpu_to_be64(addr + header_offset + headroom),
- };
- }
-
- sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
- .wqe_type = MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR,
- .num_wqebbs = wqe_bbs,
- .shampo.len = ksm_entries,
- };
-
- shampo->pi = (shampo->pi + ksm_entries) & (shampo->hd_per_wq - 1);
- sq->pc += wqe_bbs;
- sq->doorbell_cseg = &umr_wqe->hdr.ctrl;
-
- return 0;
-
-err_unmap:
- while (--i >= 0) {
- --index;
- header_offset = mlx5e_shampo_hd_offset(rq, index);
- if (!header_offset) {
- struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index);
-
- mlx5e_page_release_fragmented(rq->hd_page_pool,
- frag_page);
- }
- }
-
- rq->stats->buff_alloc_err++;
- return err;
-}
-
-static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
-{
- struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
- u16 ksm_entries, num_wqe, index, entries_before;
- struct mlx5e_icosq *sq = rq->icosq;
- int i, err, max_ksm_entries, len;
-
- max_ksm_entries = MLX5E_MAX_KSM_PER_WQE(rq->mdev);
- ksm_entries = bitmap_find_window(shampo->bitmap,
- shampo->hd_per_wqe,
- shampo->hd_per_wq, shampo->pi);
- ksm_entries = ALIGN_DOWN(ksm_entries, shampo->hd_per_page);
- if (!ksm_entries)
- return 0;
-
- /* pi is aligned to MLX5E_SHAMPO_WQ_HEADER_PER_PAGE */
- index = shampo->pi;
- entries_before = shampo->hd_per_wq - index;
-
- if (unlikely(entries_before < ksm_entries))
- num_wqe = DIV_ROUND_UP(entries_before, max_ksm_entries) +
- DIV_ROUND_UP(ksm_entries - entries_before, max_ksm_entries);
- else
- num_wqe = DIV_ROUND_UP(ksm_entries, max_ksm_entries);
-
- for (i = 0; i < num_wqe; i++) {
- len = (ksm_entries > max_ksm_entries) ? max_ksm_entries :
- ksm_entries;
- if (unlikely(index + len > shampo->hd_per_wq))
- len = shampo->hd_per_wq - index;
- err = mlx5e_build_shampo_hd_umr(rq, sq, len, index);
- if (unlikely(err))
- return err;
- index = (index + len) & (rq->mpwqe.shampo->hd_per_wq - 1);
- ksm_entries -= len;
- }
-
- return 0;
-}
-
static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
@@ -778,16 +619,12 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_umr_wqe *umr_wqe;
u32 offset; /* 17-bit value with MTT. */
+ bool sync_locked;
u16 pi;
int err;
int i;
- if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
- err = mlx5e_alloc_rx_hd_mpwqe(rq);
- if (unlikely(err))
- goto err;
- }
-
+ sync_locked = mlx5e_icosq_sync_lock(sq);
pi = mlx5e_icosq_get_next_pi(sq, rq->mpwqe.umr_wqebbs);
umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe));
@@ -835,12 +672,14 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
};
sq->pc += rq->mpwqe.umr_wqebbs;
+ mlx5e_icosq_sync_unlock(sq, sync_locked);
sq->doorbell_cseg = &umr_wqe->hdr.ctrl;
return 0;
err_unmap:
+ mlx5e_icosq_sync_unlock(sq, sync_locked);
while (--i >= 0) {
frag_page--;
mlx5e_page_release_fragmented(rq->page_pool, frag_page);
@@ -848,34 +687,11 @@ err_unmap:
bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
-err:
rq->stats->buff_alloc_err++;
return err;
}
-static void
-mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
-{
- struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
-
- if (((header_index + 1) & (shampo->hd_per_page - 1)) == 0) {
- struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
-
- mlx5e_page_release_fragmented(rq->hd_page_pool, frag_page);
- }
- clear_bit(header_index, shampo->bitmap);
-}
-
-void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq)
-{
- struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
- int i;
-
- for_each_set_bit(i, shampo->bitmap, rq->mpwqe.shampo->hd_per_wq)
- mlx5e_free_rx_shampo_hd_entry(rq, i);
-}
-
static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
@@ -968,33 +784,6 @@ void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq)
sq->cc = sqcc;
}
-void mlx5e_shampo_fill_umr(struct mlx5e_rq *rq, int len)
-{
- struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
- int end, from, full_len = len;
-
- end = shampo->hd_per_wq;
- from = shampo->ci;
- if (from + len > end) {
- len -= end - from;
- bitmap_set(shampo->bitmap, from, end - from);
- from = 0;
- }
-
- bitmap_set(shampo->bitmap, from, len);
- shampo->ci = (shampo->ci + full_len) & (shampo->hd_per_wq - 1);
-}
-
-static void mlx5e_handle_shampo_hd_umr(struct mlx5e_shampo_umr umr,
- struct mlx5e_icosq *sq)
-{
- struct mlx5e_channel *c = container_of(sq, struct mlx5e_channel, icosq);
- /* assume 1:1 relationship between RQ and icosq */
- struct mlx5e_rq *rq = &c->rq;
-
- mlx5e_shampo_fill_umr(rq, umr.len);
-}
-
int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
{
struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
@@ -1055,9 +844,6 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
break;
case MLX5E_ICOSQ_WQE_NOP:
break;
- case MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR:
- mlx5e_handle_shampo_hd_umr(wi->shampo, sq);
- break;
#ifdef CONFIG_MLX5_EN_TLS
case MLX5E_ICOSQ_WQE_UMR_TLS:
break;
@@ -1083,11 +869,24 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
return i;
}
+static void mlx5e_reclaim_mpwqe_pages(struct mlx5e_rq *rq, int head,
+ int reclaim)
+{
+ struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
+
+ for (int i = 0; i < reclaim; i++) {
+ head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
+
+ mlx5e_dealloc_rx_mpwqe(rq, head);
+ }
+}
+
INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
{
struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
u8 umr_completed = rq->mpwqe.umr_completed;
struct mlx5e_icosq *sq = rq->icosq;
+ bool reclaimed = false;
int alloc_err = 0;
u8 missing, i;
u16 head;
@@ -1122,11 +921,20 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
/* Deferred free for better page pool cache usage. */
mlx5e_free_rx_mpwqe(rq, wi);
+retry:
alloc_err = rq->xsk_pool ? mlx5e_xsk_alloc_rx_mpwqe(rq, head) :
mlx5e_alloc_rx_mpwqe(rq, head);
+ if (unlikely(alloc_err)) {
+ int reclaim = i - 1;
- if (unlikely(alloc_err))
- break;
+ if (reclaimed || !reclaim)
+ break;
+
+ mlx5e_reclaim_mpwqe_pages(rq, head, reclaim);
+ reclaimed = true;
+
+ goto retry;
+ }
head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
} while (--i);
@@ -1223,15 +1031,6 @@ static unsigned int mlx5e_lro_update_hdr(struct sk_buff *skb,
return (unsigned int)((unsigned char *)tcp + tcp->doff * 4 - skb->data);
}
-static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index)
-{
- struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
- u16 head_offset = mlx5e_shampo_hd_offset(rq, header_index);
- void *addr = netmem_address(frag_page->netmem);
-
- return addr + head_offset + rq->buff.headroom;
-}
-
static void mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4)
{
int udp_off = rq->hw_gro_data->fk.control.thoff;
@@ -1270,15 +1069,46 @@ static void mlx5e_shampo_update_ipv6_udp_hdr(struct mlx5e_rq *rq, struct ipv6hdr
skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4;
}
+static void mlx5e_shampo_get_hd_buf_info(struct mlx5e_rq *rq,
+ struct mlx5_cqe64 *cqe,
+ struct mlx5e_dma_info **di,
+ u32 *head_offset)
+{
+ u32 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe);
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+ u32 di_index;
+
+ di_index = header_index >> MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE;
+ *di = &shampo->hd_buf_pages[di_index];
+ *head_offset = (header_index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) *
+ BIT(MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE);
+}
+
+static void *mlx5e_shampo_get_hdr(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
+ int len)
+{
+ struct mlx5e_dma_info *di;
+ u32 head_offset;
+
+ mlx5e_shampo_get_hd_buf_info(rq, cqe, &di, &head_offset);
+
+ dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset,
+ len, rq->buff.map_dir);
+
+ return page_address(di->page) + head_offset;
+}
+
static void mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct tcphdr *skb_tcp_hd)
{
- u16 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe);
+ int nhoff = ETH_HLEN + rq->hw_gro_data->fk.control.thoff;
+ int len = nhoff + sizeof(struct tcphdr);
struct tcphdr *last_tcp_hd;
void *last_hd_addr;
- last_hd_addr = mlx5e_shampo_get_packet_hd(rq, header_index);
- last_tcp_hd = last_hd_addr + ETH_HLEN + rq->hw_gro_data->fk.control.thoff;
+ last_hd_addr = mlx5e_shampo_get_hdr(rq, cqe, len);
+ last_tcp_hd = (struct tcphdr *)(last_hd_addr + nhoff);
+
tcp_flag_word(skb_tcp_hd) |= tcp_flag_word(last_tcp_hd) & (TCP_FLAG_FIN | TCP_FLAG_PSH);
}
@@ -1570,7 +1400,7 @@ static inline bool mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
struct mlx5e_rq *rq,
struct sk_buff *skb)
{
- u8 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
+ u8 lro_num_seg = get_cqe_lro_num_seg(cqe);
struct mlx5e_rq_stats *stats = rq->stats;
struct net_device *netdev = rq->netdev;
@@ -2054,6 +1884,15 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
u16 linear_hr;
void *va;
+ if (unlikely(cqe_bcnt > rq->hw_mtu)) {
+ u8 lro_num_seg = get_cqe_lro_num_seg(cqe);
+
+ if (lro_num_seg <= 1) {
+ rq->stats->oversize_pkts_sw_drop++;
+ return NULL;
+ }
+ }
+
prog = rcu_dereference(rq->xdp_prog);
if (prog) {
@@ -2268,52 +2107,25 @@ static struct sk_buff *
mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
struct mlx5_cqe64 *cqe, u16 header_index)
{
- struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
- u16 head_offset = mlx5e_shampo_hd_offset(rq, header_index);
- struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
u16 head_size = cqe->shampo.header_size;
- u16 rx_headroom = rq->buff.headroom;
- struct sk_buff *skb = NULL;
- dma_addr_t page_dma_addr;
- dma_addr_t dma_addr;
- void *hdr, *data;
- u32 frag_size;
-
- page_dma_addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
- dma_addr = page_dma_addr + head_offset;
-
- hdr = netmem_address(frag_page->netmem) + head_offset;
- data = hdr + rx_headroom;
- frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + head_size);
+ struct mlx5e_dma_info *di;
+ struct sk_buff *skb;
+ u32 head_offset;
+ int len;
- if (likely(frag_size <= BIT(shampo->log_hd_entry_size))) {
- /* build SKB around header */
- dma_sync_single_range_for_cpu(rq->pdev, dma_addr, 0, frag_size, rq->buff.map_dir);
- net_prefetchw(hdr);
- net_prefetch(data);
- skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0);
- if (unlikely(!skb))
- return NULL;
+ len = ALIGN(head_size, sizeof(long));
+ skb = napi_alloc_skb(rq->cq.napi, len);
+ if (unlikely(!skb)) {
+ rq->stats->buff_alloc_err++;
+ return NULL;
+ }
- frag_page->frags++;
- } else {
- /* allocate SKB and copy header for large header */
- rq->stats->gro_large_hds++;
- skb = napi_alloc_skb(rq->cq.napi,
- ALIGN(head_size, sizeof(long)));
- if (unlikely(!skb)) {
- rq->stats->buff_alloc_err++;
- return NULL;
- }
+ net_prefetchw(skb->data);
- net_prefetchw(skb->data);
- mlx5e_copy_skb_header(rq, skb, frag_page->netmem, dma_addr,
- head_offset + rx_headroom,
- rx_headroom, head_size);
- /* skb linear part was allocated with headlen and aligned to long */
- skb->tail += head_size;
- skb->len += head_size;
- }
+ mlx5e_shampo_get_hd_buf_info(rq, cqe, &di, &head_offset);
+ mlx5e_copy_skb_header(rq, skb, page_to_netmem(di->page), di->addr,
+ head_offset, head_offset, len);
+ __skb_put(skb, head_size);
/* queue up for recycling/reuse */
skb_mark_for_recycle(skb);
@@ -2414,7 +2226,7 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
* prevent the kernel from touching it.
*/
if (unlikely(netmem_is_net_iov(frag_page->netmem)))
- goto free_hd_entry;
+ goto mpwrq_cqe_out;
*skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe,
cqe_bcnt,
data_offset,
@@ -2422,19 +2234,22 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
}
if (unlikely(!*skb))
- goto free_hd_entry;
+ goto mpwrq_cqe_out;
NAPI_GRO_CB(*skb)->count = 1;
skb_shinfo(*skb)->gso_size = cqe_bcnt - head_size;
} else {
NAPI_GRO_CB(*skb)->count++;
+
if (NAPI_GRO_CB(*skb)->count == 2 &&
rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP)) {
- void *hd_addr = mlx5e_shampo_get_packet_hd(rq, header_index);
- int nhoff = ETH_HLEN + rq->hw_gro_data->fk.control.thoff -
- sizeof(struct iphdr);
- struct iphdr *iph = (struct iphdr *)(hd_addr + nhoff);
+ int len = ETH_HLEN + rq->hw_gro_data->fk.control.thoff;
+ int nhoff = len - sizeof(struct iphdr);
+ void *last_hd_addr;
+ struct iphdr *iph;
+ last_hd_addr = mlx5e_shampo_get_hdr(rq, cqe, len);
+ iph = (struct iphdr *)(last_hd_addr + nhoff);
rq->hw_gro_data->second_ip_id = ntohs(iph->id);
}
}
@@ -2456,13 +2271,10 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
if (mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb)) {
*skb = NULL;
- goto free_hd_entry;
+ goto mpwrq_cqe_out;
}
if (flush && rq->hw_gro_data->skb)
mlx5e_shampo_flush_skb(rq, cqe, match);
-free_hd_entry:
- if (likely(head_size))
- mlx5e_free_rx_shampo_hd_entry(rq, header_index);
mpwrq_cqe_out:
if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index a01ee656a1e7..9f0272649fa1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -152,12 +152,11 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
* to inline later in the transmit descriptor
*/
static inline u16
-mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop)
+mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb)
{
struct mlx5e_sq_stats *stats = sq->stats;
u16 ihs;
- *hopbyhop = 0;
if (skb->encapsulation) {
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
ihs = skb_inner_transport_offset(skb) +
@@ -167,17 +166,12 @@ mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop)
stats->tso_inner_packets++;
stats->tso_inner_bytes += skb->len - ihs;
} else {
- if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
- } else {
+ else
ihs = skb_tcp_all_headers(skb);
- if (ipv6_has_hopopt_jumbo(skb)) {
- *hopbyhop = sizeof(struct hop_jumbo_hdr);
- ihs -= sizeof(struct hop_jumbo_hdr);
- }
- }
stats->tso_packets++;
- stats->tso_bytes += skb->len - ihs - *hopbyhop;
+ stats->tso_bytes += skb->len - ihs;
}
return ihs;
@@ -239,7 +233,6 @@ struct mlx5e_tx_attr {
__be16 mss;
u16 insz;
u8 opcode;
- u8 hopbyhop;
};
struct mlx5e_tx_wqe_attr {
@@ -275,16 +268,14 @@ static void mlx5e_sq_xmit_prepare(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_sq_stats *stats = sq->stats;
if (skb_is_gso(skb)) {
- int hopbyhop;
- u16 ihs = mlx5e_tx_get_gso_ihs(sq, skb, &hopbyhop);
+ u16 ihs = mlx5e_tx_get_gso_ihs(sq, skb);
*attr = (struct mlx5e_tx_attr) {
.opcode = MLX5_OPCODE_LSO,
.mss = cpu_to_be16(skb_shinfo(skb)->gso_size),
.ihs = ihs,
.num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs,
- .headlen = skb_headlen(skb) - ihs - hopbyhop,
- .hopbyhop = hopbyhop,
+ .headlen = skb_headlen(skb) - ihs,
};
stats->packets += skb_shinfo(skb)->gso_segs;
@@ -439,7 +430,6 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_wqe_data_seg *dseg;
struct mlx5e_tx_wqe_info *wi;
u16 ihs = attr->ihs;
- struct ipv6hdr *h6;
struct mlx5e_sq_stats *stats = sq->stats;
int num_dma;
@@ -456,28 +446,7 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
if (ihs) {
u8 *start = eseg->inline_hdr.start;
- if (unlikely(attr->hopbyhop)) {
- /* remove the HBH header.
- * Layout: [Ethernet header][IPv6 header][HBH][TCP header]
- */
- if (skb_vlan_tag_present(skb)) {
- mlx5e_insert_vlan(start, skb, ETH_HLEN + sizeof(*h6));
- ihs += VLAN_HLEN;
- h6 = (struct ipv6hdr *)(start + sizeof(struct vlan_ethhdr));
- } else {
- unsafe_memcpy(start, skb->data,
- ETH_HLEN + sizeof(*h6),
- MLX5_UNSAFE_MEMCPY_DISCLAIMER);
- h6 = (struct ipv6hdr *)(start + ETH_HLEN);
- }
- h6->nexthdr = IPPROTO_TCP;
- /* Copy the TCP header after the IPv6 one */
- memcpy(h6 + 1,
- skb->data + ETH_HLEN + sizeof(*h6) +
- sizeof(struct hop_jumbo_hdr),
- tcp_hdrlen(skb));
- /* Leave ipv6 payload_len set to 0, as LSO v2 specs request. */
- } else if (skb_vlan_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
mlx5e_insert_vlan(start, skb, ihs);
ihs += VLAN_HLEN;
stats->added_vlan_packets++;
@@ -491,7 +460,7 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
}
dseg += wqe_attr->ds_cnt_ids;
- num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr->ihs + attr->hopbyhop,
+ num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr->ihs,
attr->headlen, dseg);
if (unlikely(num_dma < 0))
goto err_drop;
@@ -1019,34 +988,14 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
eseg->mss = attr.mss;
if (attr.ihs) {
- if (unlikely(attr.hopbyhop)) {
- struct ipv6hdr *h6;
-
- /* remove the HBH header.
- * Layout: [Ethernet header][IPv6 header][HBH][TCP header]
- */
- unsafe_memcpy(eseg->inline_hdr.start, skb->data,
- ETH_HLEN + sizeof(*h6),
- MLX5_UNSAFE_MEMCPY_DISCLAIMER);
- h6 = (struct ipv6hdr *)((char *)eseg->inline_hdr.start + ETH_HLEN);
- h6->nexthdr = IPPROTO_TCP;
- /* Copy the TCP header after the IPv6 one */
- unsafe_memcpy(h6 + 1,
- skb->data + ETH_HLEN + sizeof(*h6) +
- sizeof(struct hop_jumbo_hdr),
- tcp_hdrlen(skb),
- MLX5_UNSAFE_MEMCPY_DISCLAIMER);
- /* Leave ipv6 payload_len set to 0, as LSO v2 specs request. */
- } else {
- unsafe_memcpy(eseg->inline_hdr.start, skb->data,
- attr.ihs,
- MLX5_UNSAFE_MEMCPY_DISCLAIMER);
- }
+ unsafe_memcpy(eseg->inline_hdr.start, skb->data,
+ attr.ihs,
+ MLX5_UNSAFE_MEMCPY_DISCLAIMER);
eseg->inline_hdr.sz = cpu_to_be16(attr.ihs);
dseg += wqe_attr.ds_cnt_inl;
}
- num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr.ihs + attr.hopbyhop,
+ num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr.ihs,
attr.headlen, dseg);
if (unlikely(num_dma < 0))
goto err_drop;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 76108299ea57..b31f689fe271 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -125,6 +125,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
{
struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
napi);
+ struct mlx5e_icosq *aicosq = c->async_icosq;
struct mlx5e_ch_stats *ch_stats = c->stats;
struct mlx5e_xdpsq *xsksq = &c->xsksq;
struct mlx5e_txqsq __rcu **qos_sqs;
@@ -180,15 +181,18 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
busy |= work_done == budget;
mlx5e_poll_ico_cq(&c->icosq.cq);
- if (mlx5e_poll_ico_cq(&c->async_icosq.cq))
- /* Don't clear the flag if nothing was polled to prevent
- * queueing more WQEs and overflowing the async ICOSQ.
- */
- clear_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state);
-
- /* Keep after async ICOSQ CQ poll */
- if (unlikely(mlx5e_ktls_rx_pending_resync_list(c, budget)))
- busy |= mlx5e_ktls_rx_handle_resync_list(c, budget);
+ if (aicosq) {
+ if (mlx5e_poll_ico_cq(&aicosq->cq))
+ /* Don't clear the flag if nothing was polled to prevent
+ * queueing more WQEs and overflowing the async ICOSQ.
+ */
+ clear_bit(MLX5E_SQ_STATE_PENDING_XSK_TX,
+ &aicosq->state);
+
+ /* Keep after async ICOSQ CQ poll */
+ if (unlikely(mlx5e_ktls_rx_pending_resync_list(c, budget)))
+ busy |= mlx5e_ktls_rx_handle_resync_list(c, budget);
+ }
busy |= INDIRECT_CALL_2(rq->post_wqes,
mlx5e_post_rx_mpwqes,
@@ -236,16 +240,17 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
mlx5e_cq_arm(&rq->cq);
mlx5e_cq_arm(&c->icosq.cq);
- mlx5e_cq_arm(&c->async_icosq.cq);
+ if (aicosq) {
+ mlx5e_cq_arm(&aicosq->cq);
+ if (xsk_open) {
+ mlx5e_handle_rx_dim(xskrq);
+ mlx5e_cq_arm(&xsksq->cq);
+ mlx5e_cq_arm(&xskrq->cq);
+ }
+ }
if (c->xdpsq)
mlx5e_cq_arm(&c->xdpsq->cq);
- if (xsk_open) {
- mlx5e_handle_rx_dim(xskrq);
- mlx5e_cq_arm(&xsksq->cq);
- mlx5e_cq_arm(&xskrq->cq);
- }
-
if (unlikely(aff_change && busy_xsk)) {
mlx5e_trigger_irq(&c->icosq);
ch_stats->force_irq++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index 89a58dee50b3..cd60bc500ec5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -99,6 +99,8 @@ static const struct devlink_port_ops mlx5_esw_pf_vf_dl_port_ops = {
.port_fn_roce_set = mlx5_devlink_port_fn_roce_set,
.port_fn_migratable_get = mlx5_devlink_port_fn_migratable_get,
.port_fn_migratable_set = mlx5_devlink_port_fn_migratable_set,
+ .port_fn_state_get = mlx5_devlink_pf_port_fn_state_get,
+ .port_fn_state_set = mlx5_devlink_pf_port_fn_state_set,
#ifdef CONFIG_XFRM_OFFLOAD
.port_fn_ipsec_crypto_get = mlx5_devlink_port_fn_ipsec_crypto_get,
.port_fn_ipsec_crypto_set = mlx5_devlink_port_fn_ipsec_crypto_set,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 4b7a1ce7f406..5fbfabe28bdb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1304,24 +1304,52 @@ vf_err:
return err;
}
-static int host_pf_enable_hca(struct mlx5_core_dev *dev)
+int mlx5_esw_host_pf_enable_hca(struct mlx5_core_dev *dev)
{
- if (!mlx5_core_is_ecpf(dev))
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ struct mlx5_vport *vport;
+ int err;
+
+ if (!mlx5_core_is_ecpf(dev) || !mlx5_esw_allowed(esw))
return 0;
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
+
/* Once vport and representor are ready, take out the external host PF
* out of initializing state. Enabling HCA clears the iser->initializing
* bit and host PF driver loading can progress.
*/
- return mlx5_cmd_host_pf_enable_hca(dev);
+ err = mlx5_cmd_host_pf_enable_hca(dev);
+ if (err)
+ return err;
+
+ vport->pf_activated = true;
+
+ return 0;
}
-static void host_pf_disable_hca(struct mlx5_core_dev *dev)
+int mlx5_esw_host_pf_disable_hca(struct mlx5_core_dev *dev)
{
- if (!mlx5_core_is_ecpf(dev))
- return;
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ struct mlx5_vport *vport;
+ int err;
- mlx5_cmd_host_pf_disable_hca(dev);
+ if (!mlx5_core_is_ecpf(dev) || !mlx5_esw_allowed(esw))
+ return 0;
+
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
+
+ err = mlx5_cmd_host_pf_disable_hca(dev);
+ if (err)
+ return err;
+
+ vport->pf_activated = false;
+
+ return 0;
}
/* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs
@@ -1347,7 +1375,7 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
if (mlx5_esw_host_functions_enabled(esw->dev)) {
/* Enable external host PF HCA */
- ret = host_pf_enable_hca(esw->dev);
+ ret = mlx5_esw_host_pf_enable_hca(esw->dev);
if (ret)
goto pf_hca_err;
}
@@ -1391,7 +1419,7 @@ ec_vf_err:
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_ECPF);
ecpf_err:
if (mlx5_esw_host_functions_enabled(esw->dev))
- host_pf_disable_hca(esw->dev);
+ mlx5_esw_host_pf_disable_hca(esw->dev);
pf_hca_err:
if (pf_needed && mlx5_esw_host_functions_enabled(esw->dev))
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
@@ -1416,7 +1444,7 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
}
if (mlx5_esw_host_functions_enabled(esw->dev))
- host_pf_disable_hca(esw->dev);
+ mlx5_esw_host_pf_disable_hca(esw->dev);
if ((mlx5_core_is_ecpf_esw_manager(esw->dev) ||
esw->mode == MLX5_ESWITCH_LEGACY) &&
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 714ad28e8445..6841caef02d1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -243,6 +243,7 @@ struct mlx5_vport {
u16 vport;
bool enabled;
bool max_eqs_set;
+ bool pf_activated;
enum mlx5_eswitch_vport_event enabled_events;
int index;
struct mlx5_devlink_port *dl_port;
@@ -587,6 +588,13 @@ int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enab
struct netlink_ext_ack *extack);
int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
struct netlink_ext_ack *extack);
+int mlx5_devlink_pf_port_fn_state_get(struct devlink_port *port,
+ enum devlink_port_fn_state *state,
+ enum devlink_port_fn_opstate *opstate,
+ struct netlink_ext_ack *extack);
+int mlx5_devlink_pf_port_fn_state_set(struct devlink_port *port,
+ enum devlink_port_fn_state state,
+ struct netlink_ext_ack *extack);
#ifdef CONFIG_XFRM_OFFLOAD
int mlx5_devlink_port_fn_ipsec_crypto_get(struct devlink_port *port, bool *is_enabled,
struct netlink_ext_ack *extack);
@@ -634,6 +642,8 @@ bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
struct mlx5_core_dev *dev1);
const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev);
+int mlx5_esw_host_pf_enable_hca(struct mlx5_core_dev *dev);
+int mlx5_esw_host_pf_disable_hca(struct mlx5_core_dev *dev);
void mlx5_esw_adjacent_vhcas_setup(struct mlx5_eswitch *esw);
void mlx5_esw_adjacent_vhcas_cleanup(struct mlx5_eswitch *esw);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 02b7e474586d..1b439cef3719 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -4696,6 +4696,61 @@ out:
return err;
}
+int mlx5_devlink_pf_port_fn_state_get(struct devlink_port *port,
+ enum devlink_port_fn_state *state,
+ enum devlink_port_fn_opstate *opstate,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
+ const u32 *query_out;
+ bool pf_disabled;
+
+ if (vport->vport != MLX5_VPORT_PF) {
+ NL_SET_ERR_MSG_MOD(extack, "State get is not supported for VF");
+ return -EOPNOTSUPP;
+ }
+
+ *state = vport->pf_activated ?
+ DEVLINK_PORT_FN_STATE_ACTIVE : DEVLINK_PORT_FN_STATE_INACTIVE;
+
+ query_out = mlx5_esw_query_functions(vport->dev);
+ if (IS_ERR(query_out))
+ return PTR_ERR(query_out);
+
+ pf_disabled = MLX5_GET(query_esw_functions_out, query_out,
+ host_params_context.host_pf_disabled);
+
+ *opstate = pf_disabled ? DEVLINK_PORT_FN_OPSTATE_DETACHED :
+ DEVLINK_PORT_FN_OPSTATE_ATTACHED;
+
+ kvfree(query_out);
+ return 0;
+}
+
+int mlx5_devlink_pf_port_fn_state_set(struct devlink_port *port,
+ enum devlink_port_fn_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
+ struct mlx5_core_dev *dev;
+
+ if (vport->vport != MLX5_VPORT_PF) {
+ NL_SET_ERR_MSG_MOD(extack, "State set is not supported for VF");
+ return -EOPNOTSUPP;
+ }
+
+ dev = vport->dev;
+
+ switch (state) {
+ case DEVLINK_PORT_FN_STATE_ACTIVE:
+ return mlx5_esw_host_pf_enable_hca(dev);
+ case DEVLINK_PORT_FN_STATE_INACTIVE:
+ return mlx5_esw_host_pf_disable_hca(dev);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
int
mlx5_eswitch_restore_ipsec_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule,
struct mlx5_esw_flow_attr *esw_attr, int attr_idx)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 1c6591425260..dbaf33b537f7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -308,7 +308,8 @@ struct mlx5_flow_root_namespace {
};
enum mlx5_fc_type {
- MLX5_FC_TYPE_ACQUIRED = 0,
+ MLX5_FC_TYPE_POOL_ACQUIRED = 0,
+ MLX5_FC_TYPE_SINGLE,
MLX5_FC_TYPE_LOCAL,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 83001eda3884..fe7caa910219 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -153,6 +153,7 @@ static void mlx5_fc_stats_query_all_counters(struct mlx5_core_dev *dev)
static void mlx5_fc_free(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
{
mlx5_cmd_fc_free(dev, counter->id);
+ kfree(counter->bulk);
kfree(counter);
}
@@ -163,7 +164,7 @@ static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
if (WARN_ON(counter->type == MLX5_FC_TYPE_LOCAL))
return;
- if (counter->bulk)
+ if (counter->type == MLX5_FC_TYPE_POOL_ACQUIRED)
mlx5_fc_pool_release_counter(&fc_stats->fc_pool, counter);
else
mlx5_fc_free(dev, counter);
@@ -220,8 +221,16 @@ static void mlx5_fc_stats_work(struct work_struct *work)
mlx5_fc_stats_query_all_counters(dev);
}
+static void mlx5_fc_bulk_init(struct mlx5_fc_bulk *fc_bulk, u32 base_id)
+{
+ fc_bulk->base_id = base_id;
+ refcount_set(&fc_bulk->hws_data.hws_action_refcount, 0);
+ mutex_init(&fc_bulk->hws_data.lock);
+}
+
static struct mlx5_fc *mlx5_fc_single_alloc(struct mlx5_core_dev *dev)
{
+ struct mlx5_fc_bulk *fc_bulk;
struct mlx5_fc *counter;
int err;
@@ -229,13 +238,26 @@ static struct mlx5_fc *mlx5_fc_single_alloc(struct mlx5_core_dev *dev)
if (!counter)
return ERR_PTR(-ENOMEM);
- err = mlx5_cmd_fc_alloc(dev, &counter->id);
- if (err) {
- kfree(counter);
- return ERR_PTR(err);
+ fc_bulk = kzalloc(sizeof(*fc_bulk), GFP_KERNEL);
+ if (!fc_bulk) {
+ err = -ENOMEM;
+ goto free_counter;
}
+ err = mlx5_cmd_fc_alloc(dev, &counter->id);
+ if (err)
+ goto free_bulk;
+ counter->type = MLX5_FC_TYPE_SINGLE;
+ mlx5_fs_bulk_init(&fc_bulk->fs_bulk, 1);
+ mlx5_fc_bulk_init(fc_bulk, counter->id);
+ counter->bulk = fc_bulk;
return counter;
+
+free_bulk:
+ kfree(fc_bulk);
+free_counter:
+ kfree(counter);
+ return ERR_PTR(err);
}
static struct mlx5_fc *mlx5_fc_acquire(struct mlx5_core_dev *dev, bool aging)
@@ -442,17 +464,18 @@ static struct mlx5_fs_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev,
if (!fc_bulk)
return NULL;
- if (mlx5_fs_bulk_init(dev, &fc_bulk->fs_bulk, bulk_len))
+ mlx5_fs_bulk_init(&fc_bulk->fs_bulk, bulk_len);
+
+ if (mlx5_fs_bulk_bitmap_alloc(dev, &fc_bulk->fs_bulk))
goto fc_bulk_free;
if (mlx5_cmd_fc_bulk_alloc(dev, alloc_bitmask, &base_id))
goto fs_bulk_cleanup;
- fc_bulk->base_id = base_id;
+
+ mlx5_fc_bulk_init(fc_bulk, base_id);
for (i = 0; i < bulk_len; i++)
mlx5_fc_init(&fc_bulk->fcs[i], fc_bulk, base_id + i);
- refcount_set(&fc_bulk->hws_data.hws_action_refcount, 0);
- mutex_init(&fc_bulk->hws_data.lock);
return &fc_bulk->fs_bulk;
fs_bulk_cleanup:
@@ -560,10 +583,8 @@ mlx5_fc_local_create(u32 counter_id, u32 offset, u32 bulk_size)
counter->type = MLX5_FC_TYPE_LOCAL;
counter->id = counter_id;
- fc_bulk->base_id = counter_id - offset;
- fc_bulk->fs_bulk.bulk_len = bulk_size;
- refcount_set(&fc_bulk->hws_data.hws_action_refcount, 0);
- mutex_init(&fc_bulk->hws_data.lock);
+ mlx5_fs_bulk_init(&fc_bulk->fs_bulk, bulk_size);
+ mlx5_fc_bulk_init(fc_bulk, counter_id - offset);
counter->bulk = fc_bulk;
refcount_set(&counter->fc_local_refcount, 1);
return counter;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c
index f6c226664602..faa519254316 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c
@@ -4,23 +4,27 @@
#include <mlx5_core.h>
#include "fs_pool.h"
-int mlx5_fs_bulk_init(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk,
- int bulk_len)
+int mlx5_fs_bulk_bitmap_alloc(struct mlx5_core_dev *dev,
+ struct mlx5_fs_bulk *fs_bulk)
{
int i;
- fs_bulk->bitmask = kvcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long),
- GFP_KERNEL);
+ fs_bulk->bitmask = kvcalloc(BITS_TO_LONGS(fs_bulk->bulk_len),
+ sizeof(unsigned long), GFP_KERNEL);
if (!fs_bulk->bitmask)
return -ENOMEM;
- fs_bulk->bulk_len = bulk_len;
- for (i = 0; i < bulk_len; i++)
+ for (i = 0; i < fs_bulk->bulk_len; i++)
set_bit(i, fs_bulk->bitmask);
return 0;
}
+void mlx5_fs_bulk_init(struct mlx5_fs_bulk *fs_bulk, int bulk_len)
+{
+ fs_bulk->bulk_len = bulk_len;
+}
+
void mlx5_fs_bulk_cleanup(struct mlx5_fs_bulk *fs_bulk)
{
kvfree(fs_bulk->bitmask);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h
index f04ec3107498..4deb66479d16 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h
@@ -39,8 +39,9 @@ struct mlx5_fs_pool {
int threshold;
};
-int mlx5_fs_bulk_init(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk,
- int bulk_len);
+void mlx5_fs_bulk_init(struct mlx5_fs_bulk *fs_bulk, int bulk_len);
+int mlx5_fs_bulk_bitmap_alloc(struct mlx5_core_dev *dev,
+ struct mlx5_fs_bulk *fs_bulk);
void mlx5_fs_bulk_cleanup(struct mlx5_fs_bulk *fs_bulk);
int mlx5_fs_bulk_get_free_amount(struct mlx5_fs_bulk *bulk);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index a459a30f36ca..9fe47c836ebd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -233,14 +233,25 @@ static void mlx5_ldev_free(struct kref *ref)
{
struct mlx5_lag *ldev = container_of(ref, struct mlx5_lag, ref);
struct net *net;
+ int i;
if (ldev->nb.notifier_call) {
net = read_pnet(&ldev->net);
unregister_netdevice_notifier_net(net, &ldev->nb);
}
+ mlx5_ldev_for_each(i, 0, ldev) {
+ if (ldev->pf[i].dev &&
+ ldev->pf[i].port_change_nb.nb.notifier_call) {
+ struct mlx5_nb *nb = &ldev->pf[i].port_change_nb;
+
+ mlx5_eq_notifier_unregister(ldev->pf[i].dev, nb);
+ }
+ }
+
mlx5_lag_mp_cleanup(ldev);
cancel_delayed_work_sync(&ldev->bond_work);
+ cancel_work_sync(&ldev->speed_update_work);
destroy_workqueue(ldev->wq);
mutex_destroy(&ldev->lock);
kfree(ldev);
@@ -274,6 +285,7 @@ static struct mlx5_lag *mlx5_lag_dev_alloc(struct mlx5_core_dev *dev)
kref_init(&ldev->ref);
mutex_init(&ldev->lock);
INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
+ INIT_WORK(&ldev->speed_update_work, mlx5_mpesw_speed_update_work);
ldev->nb.notifier_call = mlx5_lag_netdev_event;
write_pnet(&ldev->net, mlx5_core_net(dev));
@@ -996,6 +1008,137 @@ static bool mlx5_lag_should_disable_lag(struct mlx5_lag *ldev, bool do_bond)
ldev->mode != MLX5_LAG_MODE_MPESW;
}
+#ifdef CONFIG_MLX5_ESWITCH
+static int
+mlx5_lag_sum_devices_speed(struct mlx5_lag *ldev, u32 *sum_speed,
+ int (*get_speed)(struct mlx5_core_dev *, u32 *))
+{
+ struct mlx5_core_dev *pf_mdev;
+ int pf_idx;
+ u32 speed;
+ int ret;
+
+ *sum_speed = 0;
+ mlx5_ldev_for_each(pf_idx, 0, ldev) {
+ pf_mdev = ldev->pf[pf_idx].dev;
+ if (!pf_mdev)
+ continue;
+
+ ret = get_speed(pf_mdev, &speed);
+ if (ret) {
+ mlx5_core_dbg(pf_mdev,
+ "Failed to get device speed using %ps. Device %s speed is not available (err=%d)\n",
+ get_speed, dev_name(pf_mdev->device),
+ ret);
+ return ret;
+ }
+
+ *sum_speed += speed;
+ }
+
+ return 0;
+}
+
+static int mlx5_lag_sum_devices_max_speed(struct mlx5_lag *ldev, u32 *max_speed)
+{
+ return mlx5_lag_sum_devices_speed(ldev, max_speed,
+ mlx5_port_max_linkspeed);
+}
+
+static int mlx5_lag_sum_devices_oper_speed(struct mlx5_lag *ldev,
+ u32 *oper_speed)
+{
+ return mlx5_lag_sum_devices_speed(ldev, oper_speed,
+ mlx5_port_oper_linkspeed);
+}
+
+static void mlx5_lag_modify_device_vports_speed(struct mlx5_core_dev *mdev,
+ u32 speed)
+{
+ u16 op_mod = MLX5_VPORT_STATE_OP_MOD_ESW_VPORT;
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ struct mlx5_vport *vport;
+ unsigned long i;
+ int ret;
+
+ if (!esw)
+ return;
+
+ if (!MLX5_CAP_ESW(mdev, esw_vport_state_max_tx_speed))
+ return;
+
+ mlx5_esw_for_each_vport(esw, i, vport) {
+ if (!vport)
+ continue;
+
+ if (vport->vport == MLX5_VPORT_UPLINK)
+ continue;
+
+ ret = mlx5_modify_vport_max_tx_speed(mdev, op_mod,
+ vport->vport, true, speed);
+ if (ret)
+ mlx5_core_dbg(mdev,
+ "Failed to set vport %d speed %d, err=%d\n",
+ vport->vport, speed, ret);
+ }
+}
+
+void mlx5_lag_set_vports_agg_speed(struct mlx5_lag *ldev)
+{
+ struct mlx5_core_dev *mdev;
+ u32 speed;
+ int pf_idx;
+
+ if (ldev->mode == MLX5_LAG_MODE_MPESW) {
+ if (mlx5_lag_sum_devices_oper_speed(ldev, &speed))
+ return;
+ } else {
+ speed = ldev->tracker.bond_speed_mbps;
+ if (speed == SPEED_UNKNOWN)
+ return;
+ }
+
+ /* If speed is not set, use the sum of max speeds of all PFs */
+ if (!speed && mlx5_lag_sum_devices_max_speed(ldev, &speed))
+ return;
+
+ speed = speed / MLX5_MAX_TX_SPEED_UNIT;
+
+ mlx5_ldev_for_each(pf_idx, 0, ldev) {
+ mdev = ldev->pf[pf_idx].dev;
+ if (!mdev)
+ continue;
+
+ mlx5_lag_modify_device_vports_speed(mdev, speed);
+ }
+}
+
+void mlx5_lag_reset_vports_speed(struct mlx5_lag *ldev)
+{
+ struct mlx5_core_dev *mdev;
+ u32 speed;
+ int pf_idx;
+ int ret;
+
+ mlx5_ldev_for_each(pf_idx, 0, ldev) {
+ mdev = ldev->pf[pf_idx].dev;
+ if (!mdev)
+ continue;
+
+ ret = mlx5_port_oper_linkspeed(mdev, &speed);
+ if (ret) {
+ mlx5_core_dbg(mdev,
+ "Failed to reset vports speed for device %s. Oper speed is not available (err=%d)\n",
+ dev_name(mdev->device), ret);
+ continue;
+ }
+
+ speed = speed / MLX5_MAX_TX_SPEED_UNIT;
+ mlx5_lag_modify_device_vports_speed(mdev, speed);
+ }
+}
+#endif
+
static void mlx5_do_bond(struct mlx5_lag *ldev)
{
int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
@@ -1083,9 +1226,12 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
ndev);
dev_put(ndev);
}
+ mlx5_lag_set_vports_agg_speed(ldev);
} else if (mlx5_lag_should_modify_lag(ldev, do_bond)) {
mlx5_modify_lag(ldev, &tracker);
+ mlx5_lag_set_vports_agg_speed(ldev);
} else if (mlx5_lag_should_disable_lag(ldev, do_bond)) {
+ mlx5_lag_reset_vports_speed(ldev);
mlx5_disable_lag(ldev);
}
}
@@ -1286,6 +1432,65 @@ static int mlx5_handle_changeinfodata_event(struct mlx5_lag *ldev,
return 1;
}
+static void mlx5_lag_update_tracker_speed(struct lag_tracker *tracker,
+ struct net_device *ndev)
+{
+ struct ethtool_link_ksettings lksettings;
+ struct net_device *bond_dev;
+ int err;
+
+ if (netif_is_lag_master(ndev))
+ bond_dev = ndev;
+ else
+ bond_dev = netdev_master_upper_dev_get(ndev);
+
+ if (!bond_dev) {
+ tracker->bond_speed_mbps = SPEED_UNKNOWN;
+ return;
+ }
+
+ err = __ethtool_get_link_ksettings(bond_dev, &lksettings);
+ if (err) {
+ netdev_dbg(bond_dev,
+ "Failed to get speed for bond dev %s, err=%d\n",
+ bond_dev->name, err);
+ tracker->bond_speed_mbps = SPEED_UNKNOWN;
+ return;
+ }
+
+ if (lksettings.base.speed == SPEED_UNKNOWN)
+ tracker->bond_speed_mbps = 0;
+ else
+ tracker->bond_speed_mbps = lksettings.base.speed;
+}
+
+/* Returns speed in Mbps. */
+int mlx5_lag_query_bond_speed(struct mlx5_core_dev *mdev, u32 *speed)
+{
+ struct mlx5_lag *ldev;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&lag_lock, flags);
+ ldev = mlx5_lag_dev(mdev);
+ if (!ldev) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ *speed = ldev->tracker.bond_speed_mbps;
+
+ if (*speed == SPEED_UNKNOWN) {
+ mlx5_core_dbg(mdev, "Bond speed is unknown\n");
+ ret = -EINVAL;
+ }
+
+unlock:
+ spin_unlock_irqrestore(&lag_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mlx5_lag_query_bond_speed);
+
/* this handler is always registered to netdev events */
static int mlx5_lag_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
@@ -1317,6 +1522,9 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
break;
}
+ if (changed)
+ mlx5_lag_update_tracker_speed(&tracker, ndev);
+
ldev->tracker = tracker;
if (changed)
@@ -1362,6 +1570,10 @@ static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
ldev->pf[fn].dev = dev;
dev->priv.lag = ldev;
+
+ MLX5_NB_INIT(&ldev->pf[fn].port_change_nb,
+ mlx5_lag_mpesw_port_change_event, PORT_CHANGE);
+ mlx5_eq_notifier_register(dev, &ldev->pf[fn].port_change_nb);
}
static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev,
@@ -1373,6 +1585,9 @@ static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev,
if (ldev->pf[fn].dev != dev)
return;
+ if (ldev->pf[fn].port_change_nb.nb.notifier_call)
+ mlx5_eq_notifier_unregister(dev, &ldev->pf[fn].port_change_nb);
+
ldev->pf[fn].dev = NULL;
dev->priv.lag = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
index 4918eee2b3da..be1afece5fdc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
@@ -39,6 +39,7 @@ struct lag_func {
struct mlx5_core_dev *dev;
struct net_device *netdev;
bool has_drop;
+ struct mlx5_nb port_change_nb;
};
/* Used for collection of netdev event info. */
@@ -48,6 +49,7 @@ struct lag_tracker {
unsigned int is_bonded:1;
unsigned int has_inactive:1;
enum netdev_lag_hash hash_type;
+ u32 bond_speed_mbps;
};
/* LAG data of a ConnectX card.
@@ -66,6 +68,7 @@ struct mlx5_lag {
struct lag_tracker tracker;
struct workqueue_struct *wq;
struct delayed_work bond_work;
+ struct work_struct speed_update_work;
struct notifier_block nb;
possible_net_t net;
struct lag_mp lag_mp;
@@ -116,6 +119,14 @@ int mlx5_deactivate_lag(struct mlx5_lag *ldev);
void mlx5_lag_add_devices(struct mlx5_lag *ldev);
struct mlx5_devcom_comp_dev *mlx5_lag_get_devcom_comp(struct mlx5_lag *ldev);
+#ifdef CONFIG_MLX5_ESWITCH
+void mlx5_lag_set_vports_agg_speed(struct mlx5_lag *ldev);
+void mlx5_lag_reset_vports_speed(struct mlx5_lag *ldev);
+#else
+static inline void mlx5_lag_set_vports_agg_speed(struct mlx5_lag *ldev) {}
+static inline void mlx5_lag_reset_vports_speed(struct mlx5_lag *ldev) {}
+#endif
+
static inline bool mlx5_lag_is_supported(struct mlx5_core_dev *dev)
{
if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
index 2d86af8f0d9b..04762562d7d9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
@@ -110,6 +110,8 @@ static int enable_mpesw(struct mlx5_lag *ldev)
goto err_rescan_drivers;
}
+ mlx5_lag_set_vports_agg_speed(ldev);
+
return 0;
err_rescan_drivers:
@@ -223,3 +225,40 @@ bool mlx5_lag_is_mpesw(struct mlx5_core_dev *dev)
return ldev && ldev->mode == MLX5_LAG_MODE_MPESW;
}
EXPORT_SYMBOL(mlx5_lag_is_mpesw);
+
+void mlx5_mpesw_speed_update_work(struct work_struct *work)
+{
+ struct mlx5_lag *ldev = container_of(work, struct mlx5_lag,
+ speed_update_work);
+
+ mutex_lock(&ldev->lock);
+ if (ldev->mode == MLX5_LAG_MODE_MPESW) {
+ if (ldev->mode_changes_in_progress)
+ queue_work(ldev->wq, &ldev->speed_update_work);
+ else
+ mlx5_lag_set_vports_agg_speed(ldev);
+ }
+
+ mutex_unlock(&ldev->lock);
+}
+
+int mlx5_lag_mpesw_port_change_event(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct mlx5_nb *mlx5_nb = container_of(nb, struct mlx5_nb, nb);
+ struct lag_func *lag_func = container_of(mlx5_nb,
+ struct lag_func,
+ port_change_nb);
+ struct mlx5_core_dev *dev = lag_func->dev;
+ struct mlx5_lag *ldev = dev->priv.lag;
+ struct mlx5_eqe *eqe = data;
+
+ if (!ldev)
+ return NOTIFY_DONE;
+
+ if (eqe->sub_type == MLX5_PORT_CHANGE_SUBTYPE_DOWN ||
+ eqe->sub_type == MLX5_PORT_CHANGE_SUBTYPE_ACTIVE)
+ queue_work(ldev->wq, &ldev->speed_update_work);
+
+ return NOTIFY_OK;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h
index 02520f27a033..f5d9b5c97b0d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h
@@ -32,4 +32,18 @@ bool mlx5_lag_is_mpesw(struct mlx5_core_dev *dev);
void mlx5_lag_mpesw_disable(struct mlx5_core_dev *dev);
int mlx5_lag_mpesw_enable(struct mlx5_core_dev *dev);
+#ifdef CONFIG_MLX5_ESWITCH
+void mlx5_mpesw_speed_update_work(struct work_struct *work);
+int mlx5_lag_mpesw_port_change_event(struct notifier_block *nb,
+ unsigned long event, void *data);
+#else
+static inline void mlx5_mpesw_speed_update_work(struct work_struct *work) {}
+static inline int mlx5_lag_mpesw_port_change_event(struct notifier_block *nb,
+ unsigned long event,
+ void *data)
+{
+ return NOTIFY_DONE;
+}
+#endif /* CONFIG_MLX5_ESWITCH */
+
#endif /* __MLX5_LAG_MPESW_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index f2d74382fb85..b635b423d972 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -345,10 +345,10 @@ int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
u8 tc, u8 *bw_pct);
int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
- u8 *max_bw_value,
+ u16 *max_bw_value,
u8 *max_bw_unit);
int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
- u8 *max_bw_value,
+ u16 *max_bw_value,
u8 *max_bw_unit);
int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode);
int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode);
@@ -383,6 +383,7 @@ const struct mlx5_link_info *mlx5_port_ptys2info(struct mlx5_core_dev *mdev,
u32 mlx5_port_info2linkmodes(struct mlx5_core_dev *mdev,
struct mlx5_link_info *info,
bool force_legacy);
+int mlx5_port_oper_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
#define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) && \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 7f8bed353e67..ee8b9765c5ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -773,7 +773,7 @@ int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
}
int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
- u8 *max_bw_value,
+ u16 *max_bw_value,
u8 *max_bw_units)
{
u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0};
@@ -796,7 +796,7 @@ int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
}
int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
- u8 *max_bw_value,
+ u16 *max_bw_value,
u8 *max_bw_units)
{
u32 out[MLX5_ST_SZ_DW(qetc_reg)];
@@ -1111,7 +1111,7 @@ mlx5e_ext_link_info[MLX5E_EXT_LINK_MODES_NUMBER] = {
[MLX5E_200GAUI_1_200GBASE_CR1_KR1] = {.speed = 200000, .lanes = 1},
[MLX5E_400GAUI_2_400GBASE_CR2_KR2] = {.speed = 400000, .lanes = 2},
[MLX5E_800GAUI_4_800GBASE_CR4_KR4] = {.speed = 800000, .lanes = 4},
- [MLX5E_1600TAUI_8_1600TBASE_CR8_KR8] = {.speed = 1600000, .lanes = 8},
+ [MLX5E_1600GAUI_8_1600GBASE_CR8_KR8] = {.speed = 1600000, .lanes = 8},
};
int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
@@ -1203,6 +1203,30 @@ u32 mlx5_port_info2linkmodes(struct mlx5_core_dev *mdev,
return link_modes;
}
+int mlx5_port_oper_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
+{
+ const struct mlx5_link_info *table;
+ struct mlx5_port_eth_proto eproto;
+ u32 oper_speed = 0;
+ u32 max_size;
+ bool ext;
+ int err;
+ int i;
+
+ ext = mlx5_ptys_ext_supported(mdev);
+ err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
+ if (err)
+ return err;
+
+ mlx5e_port_get_link_mode_info_arr(mdev, &table, &max_size, false);
+ for (i = 0; i < max_size; ++i)
+ if (eproto.oper & MLX5E_PROT_MASK(i))
+ oper_speed = max(oper_speed, table[i].speed);
+
+ *speed = oper_speed;
+ return 0;
+}
+
int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
{
const struct mlx5_link_info *table;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
index 839d71bd4216..5bc8e97ecf1c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
@@ -121,7 +121,9 @@ mlx5_fs_hws_pr_bulk_create(struct mlx5_core_dev *dev, void *pool_ctx)
if (!pr_bulk)
return NULL;
- if (mlx5_fs_bulk_init(dev, &pr_bulk->fs_bulk, bulk_len))
+ mlx5_fs_bulk_init(&pr_bulk->fs_bulk, bulk_len);
+
+ if (mlx5_fs_bulk_bitmap_alloc(dev, &pr_bulk->fs_bulk))
goto free_pr_bulk;
for (i = 0; i < bulk_len; i++) {
@@ -275,7 +277,9 @@ mlx5_fs_hws_mh_bulk_create(struct mlx5_core_dev *dev, void *pool_ctx)
if (!mh_bulk)
return NULL;
- if (mlx5_fs_bulk_init(dev, &mh_bulk->fs_bulk, bulk_len))
+ mlx5_fs_bulk_init(&mh_bulk->fs_bulk, bulk_len);
+
+ if (mlx5_fs_bulk_bitmap_alloc(dev, &mh_bulk->fs_bulk))
goto free_mh_bulk;
for (int i = 0; i < bulk_len; i++) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 306affbcfd3b..cb098d3eb2fa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -62,6 +62,28 @@ u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
return MLX5_GET(query_vport_state_out, out, state);
}
+static int mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
+ u16 vport, u8 other_vport,
+ u8 *admin_state)
+{
+ u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {};
+ int err;
+
+ MLX5_SET(query_vport_state_in, in, opcode,
+ MLX5_CMD_OP_QUERY_VPORT_STATE);
+ MLX5_SET(query_vport_state_in, in, op_mod, opmod);
+ MLX5_SET(query_vport_state_in, in, vport_number, vport);
+ MLX5_SET(query_vport_state_in, in, other_vport, other_vport);
+
+ err = mlx5_cmd_exec_inout(mdev, query_vport_state, in, out);
+ if (err)
+ return err;
+
+ *admin_state = MLX5_GET(query_vport_state_out, out, admin_state);
+ return 0;
+}
+
int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
u16 vport, u8 other_vport, u8 state)
{
@@ -77,6 +99,58 @@ int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
return mlx5_cmd_exec_in(mdev, modify_vport_state, in);
}
+int mlx5_modify_vport_max_tx_speed(struct mlx5_core_dev *mdev, u8 opmod,
+ u16 vport, u8 other_vport, u16 max_tx_speed)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {};
+ u8 admin_state;
+ int err;
+
+ err = mlx5_query_vport_admin_state(mdev, opmod, vport, other_vport,
+ &admin_state);
+ if (err)
+ return err;
+
+ MLX5_SET(modify_vport_state_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_VPORT_STATE);
+ MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
+ MLX5_SET(modify_vport_state_in, in, vport_number, vport);
+ MLX5_SET(modify_vport_state_in, in, other_vport, other_vport);
+ MLX5_SET(modify_vport_state_in, in, admin_state, admin_state);
+ MLX5_SET(modify_vport_state_in, in, max_tx_speed, max_tx_speed);
+
+ return mlx5_cmd_exec_in(mdev, modify_vport_state, in);
+}
+
+int mlx5_query_vport_max_tx_speed(struct mlx5_core_dev *mdev, u8 op_mod,
+ u16 vport, u8 other_vport, u32 *max_tx_speed)
+{
+ u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {};
+ u32 state;
+ int err;
+
+ MLX5_SET(query_vport_state_in, in, opcode,
+ MLX5_CMD_OP_QUERY_VPORT_STATE);
+ MLX5_SET(query_vport_state_in, in, op_mod, op_mod);
+ MLX5_SET(query_vport_state_in, in, vport_number, vport);
+ MLX5_SET(query_vport_state_in, in, other_vport, other_vport);
+
+ err = mlx5_cmd_exec_inout(mdev, query_vport_state, in, out);
+ if (err)
+ return err;
+
+ state = MLX5_GET(query_vport_state_out, out, state);
+ if (state == VPORT_STATE_DOWN) {
+ *max_tx_speed = 0;
+ return 0;
+ }
+
+ *max_tx_speed = MLX5_GET(query_vport_state_out, out, max_tx_speed);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_vport_max_tx_speed);
+
static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
bool other_vport, u32 *out)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 8769cba2c746..7da9ef254b72 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -2542,18 +2542,6 @@ void mlxsw_pci_driver_unregister(struct pci_driver *pci_driver)
}
EXPORT_SYMBOL(mlxsw_pci_driver_unregister);
-static int __init mlxsw_pci_module_init(void)
-{
- return 0;
-}
-
-static void __exit mlxsw_pci_module_exit(void)
-{
-}
-
-module_init(mlxsw_pci_module_init);
-module_exit(mlxsw_pci_module_exit);
-
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
MODULE_DESCRIPTION("Mellanox switch PCI interface driver");
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
index 422265dc7abd..b717db879cd3 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
@@ -1019,6 +1019,9 @@ enum {
#define FBNIC_QUEUE_TWQ_CTL_ENABLE CSR_BIT(1)
#define FBNIC_QUEUE_TWQ0_TAIL 0x002 /* 0x008 */
#define FBNIC_QUEUE_TWQ1_TAIL 0x003 /* 0x00c */
+#define FBNIC_QUEUE_TWQ0_PTRS 0x004 /* 0x010 */
+#define FBNIC_QUEUE_TWQ1_PTRS 0x005 /* 0x014 */
+#define FBNIC_QUEUE_TWQ_PTRS_HEAD_MASK CSR_GENMASK(31, 16)
#define FBNIC_QUEUE_TWQ0_SIZE 0x00a /* 0x028 */
#define FBNIC_QUEUE_TWQ1_SIZE 0x00b /* 0x02c */
@@ -1042,6 +1045,8 @@ enum {
#define FBNIC_QUEUE_TCQ_CTL_ENABLE CSR_BIT(1)
#define FBNIC_QUEUE_TCQ_HEAD 0x081 /* 0x204 */
+#define FBNIC_QUEUE_TCQ_PTRS 0x082 /* 0x208 */
+#define FBNIC_QUEUE_TCQ_PTRS_TAIL_MASK CSR_GENMASK(31, 16)
#define FBNIC_QUEUE_TCQ_SIZE 0x084 /* 0x210 */
#define FBNIC_QUEUE_TCQ_SIZE_MASK CSR_GENMASK(3, 0)
@@ -1075,6 +1080,9 @@ enum {
#define FBNIC_QUEUE_RCQ_CTL_ENABLE CSR_BIT(1)
#define FBNIC_QUEUE_RCQ_HEAD 0x201 /* 0x804 */
+#define FBNIC_QUEUE_RCQ_PTRS 0x202 /* 0x808 */
+#define FBNIC_QUEUE_RCQ_PTRS_TAIL_MASK CSR_GENMASK(31, 16)
+#define FBNIC_QUEUE_RCQ_PTRS_HEAD_MASK CSR_GENMASK(15, 0)
#define FBNIC_QUEUE_RCQ_SIZE 0x204 /* 0x810 */
#define FBNIC_QUEUE_RCQ_SIZE_MASK CSR_GENMASK(3, 0)
@@ -1090,6 +1098,10 @@ enum {
#define FBNIC_QUEUE_BDQ_HPQ_TAIL 0x241 /* 0x904 */
#define FBNIC_QUEUE_BDQ_PPQ_TAIL 0x242 /* 0x908 */
+#define FBNIC_QUEUE_BDQ_HPQ_PTRS 0x243 /* 0x90c */
+#define FBNIC_QUEUE_BDQ_PPQ_PTRS 0x244 /* 0x910 */
+#define FBNIC_QUEUE_BDQ_PTRS_HEAD_MASK CSR_GENMASK(31, 16)
+#define FBNIC_QUEUE_BDQ_PTRS_TAIL_MASK CSR_GENMASK(15, 0)
#define FBNIC_QUEUE_BDQ_HPQ_SIZE 0x247 /* 0x91c */
#define FBNIC_QUEUE_BDQ_PPQ_SIZE 0x248 /* 0x920 */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c b/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c
index b7238dd967fe..08270db2dee8 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c
@@ -7,9 +7,12 @@
#include <linux/seq_file.h>
#include "fbnic.h"
+#include "fbnic_txrx.h"
static struct dentry *fbnic_dbg_root;
+/* Descriptor Seq Functions */
+
static void fbnic_dbg_desc_break(struct seq_file *s, int i)
{
while (i--)
@@ -18,6 +21,362 @@ static void fbnic_dbg_desc_break(struct seq_file *s, int i)
seq_putc(s, '\n');
}
+static void fbnic_dbg_ring_show(struct seq_file *s)
+{
+ struct fbnic_ring *ring = s->private;
+ unsigned long doorbell_offset;
+ u32 head = 0, tail = 0;
+ u32 __iomem *csr_base;
+
+ csr_base = fbnic_ring_csr_base(ring);
+ doorbell_offset = ring->doorbell - csr_base;
+
+ seq_printf(s, "doorbell CSR: %#05lx q_idx: %d\n",
+ doorbell_offset, ring->q_idx);
+ seq_printf(s, "size_mask: %#06x size: %zu flags: 0x%02x\n",
+ ring->size_mask, ring->size, ring->flags);
+ seq_printf(s, "SW: head: %#06x tail: %#06x\n",
+ ring->head, ring->tail);
+
+ switch (doorbell_offset) {
+ case FBNIC_QUEUE_TWQ0_TAIL:
+ tail = readl(csr_base + FBNIC_QUEUE_TWQ0_PTRS);
+ head = FIELD_GET(FBNIC_QUEUE_TWQ_PTRS_HEAD_MASK, tail);
+ break;
+ case FBNIC_QUEUE_TWQ1_TAIL:
+ tail = readl(csr_base + FBNIC_QUEUE_TWQ1_PTRS);
+ head = FIELD_GET(FBNIC_QUEUE_TWQ_PTRS_HEAD_MASK, tail);
+ break;
+ case FBNIC_QUEUE_TCQ_HEAD:
+ head = readl(csr_base + FBNIC_QUEUE_TCQ_PTRS);
+ tail = FIELD_GET(FBNIC_QUEUE_TCQ_PTRS_TAIL_MASK, head);
+ break;
+ case FBNIC_QUEUE_BDQ_HPQ_TAIL:
+ tail = readl(csr_base + FBNIC_QUEUE_BDQ_HPQ_PTRS);
+ head = FIELD_GET(FBNIC_QUEUE_BDQ_PTRS_HEAD_MASK, tail);
+ break;
+ case FBNIC_QUEUE_BDQ_PPQ_TAIL:
+ tail = readl(csr_base + FBNIC_QUEUE_BDQ_PPQ_PTRS);
+ head = FIELD_GET(FBNIC_QUEUE_BDQ_PTRS_HEAD_MASK, tail);
+ break;
+ case FBNIC_QUEUE_RCQ_HEAD:
+ head = readl(csr_base + FBNIC_QUEUE_RCQ_PTRS);
+ tail = FIELD_GET(FBNIC_QUEUE_RCQ_PTRS_TAIL_MASK, head);
+ break;
+ }
+
+ tail &= FBNIC_QUEUE_BDQ_PTRS_TAIL_MASK;
+ head &= FBNIC_QUEUE_RCQ_PTRS_HEAD_MASK;
+
+ seq_printf(s, "HW: head: %#06x tail: %#06x\n", head, tail);
+
+ seq_puts(s, "\n");
+}
+
+static void fbnic_dbg_twd_desc_seq_show(struct seq_file *s, int i)
+{
+ struct fbnic_ring *ring = s->private;
+ u64 twd = le64_to_cpu(ring->desc[i]);
+
+ switch (FIELD_GET(FBNIC_TWD_TYPE_MASK, twd)) {
+ case FBNIC_TWD_TYPE_META:
+ seq_printf(s, "%04x %#06llx %llx %llx %llx %llx %llx %#llx %#llx %llx %#04llx %#04llx %llx %#04llx\n",
+ i, FIELD_GET(FBNIC_TWD_LEN_MASK, twd),
+ FIELD_GET(FBNIC_TWD_TYPE_MASK, twd),
+ FIELD_GET(FBNIC_TWD_FLAG_REQ_COMPLETION, twd),
+ FIELD_GET(FBNIC_TWD_FLAG_REQ_CSO, twd),
+ FIELD_GET(FBNIC_TWD_FLAG_REQ_LSO, twd),
+ FIELD_GET(FBNIC_TWD_FLAG_REQ_TS, twd),
+ FIELD_GET(FBNIC_TWD_L4_HLEN_MASK, twd),
+ FIELD_GET(FBNIC_TWD_CSUM_OFFSET_MASK, twd),
+ FIELD_GET(FBNIC_TWD_L4_TYPE_MASK, twd),
+ FIELD_GET(FBNIC_TWD_L3_IHLEN_MASK, twd),
+ FIELD_GET(FBNIC_TWD_L3_OHLEN_MASK, twd),
+ FIELD_GET(FBNIC_TWD_L3_TYPE_MASK, twd),
+ FIELD_GET(FBNIC_TWD_L2_HLEN_MASK, twd));
+ break;
+ default:
+ seq_printf(s, "%04x %#06llx %llx %#014llx\n", i,
+ FIELD_GET(FBNIC_TWD_LEN_MASK, twd),
+ FIELD_GET(FBNIC_TWD_TYPE_MASK, twd),
+ FIELD_GET(FBNIC_TWD_ADDR_MASK, twd));
+ break;
+ }
+}
+
+static int fbnic_dbg_twq_desc_seq_show(struct seq_file *s, void *v)
+{
+ struct fbnic_ring *ring = s->private;
+ char hdr[80];
+ int i;
+
+ /* Generate header on first entry */
+ fbnic_dbg_ring_show(s);
+ snprintf(hdr, sizeof(hdr), "%4s %5s %s %s\n",
+ "DESC", "LEN/MSS", "T", "METADATA/TIMESTAMP/BUFFER_ADDR");
+ seq_puts(s, hdr);
+ fbnic_dbg_desc_break(s, strnlen(hdr, sizeof(hdr)));
+
+ /* Display descriptor */
+ if (!ring->desc) {
+ seq_puts(s, "Descriptor ring not allocated.\n");
+ return 0;
+ }
+
+ for (i = 0; i <= ring->size_mask; i++)
+ fbnic_dbg_twd_desc_seq_show(s, i);
+
+ return 0;
+}
+
+static int fbnic_dbg_tcq_desc_seq_show(struct seq_file *s, void *v)
+{
+ struct fbnic_ring *ring = s->private;
+ char hdr[80];
+ int i;
+
+ /* Generate header on first entry */
+ fbnic_dbg_ring_show(s);
+ snprintf(hdr, sizeof(hdr), "%4s %s %s %s %5s %-16s %-6s %-6s\n",
+ "DESC", "D", "T", "Q", "STATUS", "TIMESTAMP", "HEAD1", "HEAD0");
+ seq_puts(s, hdr);
+ fbnic_dbg_desc_break(s, strnlen(hdr, sizeof(hdr)));
+
+ /* Display descriptor */
+ if (!ring->desc) {
+ seq_puts(s, "Descriptor ring not allocated.\n");
+ return 0;
+ }
+
+ for (i = 0; i <= ring->size_mask; i++) {
+ u64 tcd = le64_to_cpu(ring->desc[i]);
+
+ switch (FIELD_GET(FBNIC_TCD_TYPE_MASK, tcd)) {
+ case FBNIC_TCD_TYPE_0:
+ seq_printf(s, "%04x %llx %llx %llx %#05llx %-17s %#06llx %#06llx\n",
+ i, FIELD_GET(FBNIC_TCD_DONE, tcd),
+ FIELD_GET(FBNIC_TCD_TYPE_MASK, tcd),
+ FIELD_GET(FBNIC_TCD_TWQ1, tcd),
+ FIELD_GET(FBNIC_TCD_STATUS_MASK, tcd),
+ "",
+ FIELD_GET(FBNIC_TCD_TYPE0_HEAD1_MASK, tcd),
+ FIELD_GET(FBNIC_TCD_TYPE0_HEAD0_MASK, tcd));
+ break;
+ case FBNIC_TCD_TYPE_1:
+ seq_printf(s, "%04x %llx %llx %llx %#05llx %#012llx\n",
+ i, FIELD_GET(FBNIC_TCD_DONE, tcd),
+ FIELD_GET(FBNIC_TCD_TYPE_MASK, tcd),
+ FIELD_GET(FBNIC_TCD_TWQ1, tcd),
+ FIELD_GET(FBNIC_TCD_STATUS_MASK, tcd),
+ FIELD_GET(FBNIC_TCD_TYPE1_TS_MASK, tcd));
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int fbnic_dbg_bdq_desc_seq_show(struct seq_file *s, void *v)
+{
+ struct fbnic_ring *ring = s->private;
+ char hdr[80];
+ int i;
+
+ /* Generate header on first entry */
+ fbnic_dbg_ring_show(s);
+ snprintf(hdr, sizeof(hdr), "%4s %-4s %s\n",
+ "DESC", "ID", "BUFFER_ADDR");
+ seq_puts(s, hdr);
+ fbnic_dbg_desc_break(s, strnlen(hdr, sizeof(hdr)));
+
+ /* Display descriptor */
+ if (!ring->desc) {
+ seq_puts(s, "Descriptor ring not allocated.\n");
+ return 0;
+ }
+
+ for (i = 0; i <= ring->size_mask; i++) {
+ u64 bd = le64_to_cpu(ring->desc[i]);
+
+ seq_printf(s, "%04x %#04llx %#014llx\n", i,
+ FIELD_GET(FBNIC_BD_DESC_ID_MASK, bd),
+ FIELD_GET(FBNIC_BD_DESC_ADDR_MASK, bd));
+ }
+
+ return 0;
+}
+
+static void fbnic_dbg_rcd_desc_seq_show(struct seq_file *s, int i)
+{
+ struct fbnic_ring *ring = s->private;
+ u64 rcd = le64_to_cpu(ring->desc[i]);
+
+ switch (FIELD_GET(FBNIC_RCD_TYPE_MASK, rcd)) {
+ case FBNIC_RCD_TYPE_HDR_AL:
+ case FBNIC_RCD_TYPE_PAY_AL:
+ seq_printf(s, "%04x %llx %llx %llx %#06llx %#06llx %#06llx\n",
+ i, FIELD_GET(FBNIC_RCD_DONE, rcd),
+ FIELD_GET(FBNIC_RCD_TYPE_MASK, rcd),
+ FIELD_GET(FBNIC_RCD_AL_PAGE_FIN, rcd),
+ FIELD_GET(FBNIC_RCD_AL_BUFF_OFF_MASK, rcd),
+ FIELD_GET(FBNIC_RCD_AL_BUFF_LEN_MASK, rcd),
+ FIELD_GET(FBNIC_RCD_AL_BUFF_ID_MASK, rcd));
+ break;
+ case FBNIC_RCD_TYPE_OPT_META:
+ seq_printf(s, "%04x %llx %llx %llx %llx %llx %#06llx %#012llx\n",
+ i, FIELD_GET(FBNIC_RCD_DONE, rcd),
+ FIELD_GET(FBNIC_RCD_TYPE_MASK, rcd),
+ FIELD_GET(FBNIC_RCD_OPT_META_TYPE_MASK, rcd),
+ FIELD_GET(FBNIC_RCD_OPT_META_TS, rcd),
+ FIELD_GET(FBNIC_RCD_OPT_META_ACTION, rcd),
+ FIELD_GET(FBNIC_RCD_OPT_META_ACTION_MASK, rcd),
+ FIELD_GET(FBNIC_RCD_OPT_META_TS_MASK, rcd));
+ break;
+ case FBNIC_RCD_TYPE_META:
+ seq_printf(s, "%04x %llx %llx %llx %llx %llx %llx %llx %llx %llx %#06llx %#010llx\n",
+ i, FIELD_GET(FBNIC_RCD_DONE, rcd),
+ FIELD_GET(FBNIC_RCD_TYPE_MASK, rcd),
+ FIELD_GET(FBNIC_RCD_META_ECN, rcd),
+ FIELD_GET(FBNIC_RCD_META_L4_CSUM_UNNECESSARY, rcd),
+ FIELD_GET(FBNIC_RCD_META_ERR_MAC_EOP, rcd),
+ FIELD_GET(FBNIC_RCD_META_ERR_TRUNCATED_FRAME, rcd),
+ FIELD_GET(FBNIC_RCD_META_ERR_PARSER, rcd),
+ FIELD_GET(FBNIC_RCD_META_L4_TYPE_MASK, rcd),
+ FIELD_GET(FBNIC_RCD_META_L3_TYPE_MASK, rcd),
+ FIELD_GET(FBNIC_RCD_META_L2_CSUM_MASK, rcd),
+ FIELD_GET(FBNIC_RCD_META_RSS_HASH_MASK, rcd));
+ break;
+ }
+}
+
+static int fbnic_dbg_rcq_desc_seq_show(struct seq_file *s, void *v)
+{
+ struct fbnic_ring *ring = s->private;
+ char hdr[80];
+ int i;
+
+ /* Generate header on first entry */
+ fbnic_dbg_ring_show(s);
+ snprintf(hdr, sizeof(hdr),
+ "%18s %s %s\n", "OFFSET/", "L", "L");
+ seq_puts(s, hdr);
+ snprintf(hdr, sizeof(hdr),
+ "%4s %s %s %s %s %s %s %s %s %s %-8s %s\n",
+ "DESC", "D", "T", "F", "C", "M", "T", "P", "4", "3", "LEN/CSUM", "ID/TS/RSS");
+ seq_puts(s, hdr);
+ fbnic_dbg_desc_break(s, strnlen(hdr, sizeof(hdr)));
+
+ /* Display descriptor */
+ if (!ring->desc) {
+ seq_puts(s, "Descriptor ring not allocated.\n");
+ return 0;
+ }
+
+ for (i = 0; i <= ring->size_mask; i++)
+ fbnic_dbg_rcd_desc_seq_show(s, i);
+
+ return 0;
+}
+
+static int fbnic_dbg_desc_open(struct inode *inode, struct file *file)
+{
+ struct fbnic_ring *ring = inode->i_private;
+ int (*show)(struct seq_file *s, void *v);
+
+ switch (ring->doorbell - fbnic_ring_csr_base(ring)) {
+ case FBNIC_QUEUE_TWQ0_TAIL:
+ case FBNIC_QUEUE_TWQ1_TAIL:
+ show = fbnic_dbg_twq_desc_seq_show;
+ break;
+ case FBNIC_QUEUE_TCQ_HEAD:
+ show = fbnic_dbg_tcq_desc_seq_show;
+ break;
+ case FBNIC_QUEUE_BDQ_HPQ_TAIL:
+ case FBNIC_QUEUE_BDQ_PPQ_TAIL:
+ show = fbnic_dbg_bdq_desc_seq_show;
+ break;
+ case FBNIC_QUEUE_RCQ_HEAD:
+ show = fbnic_dbg_rcq_desc_seq_show;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return single_open(file, show, ring);
+}
+
+static const struct file_operations fbnic_dbg_desc_fops = {
+ .owner = THIS_MODULE,
+ .open = fbnic_dbg_desc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void fbnic_dbg_nv_init(struct fbnic_napi_vector *nv)
+{
+ struct fbnic_dev *fbd = nv->fbd;
+ char name[16];
+ int i, j;
+
+ /* Generate a folder for each napi vector */
+ snprintf(name, sizeof(name), "nv.%03d", nv->v_idx);
+
+ nv->dbg_nv = debugfs_create_dir(name, fbd->dbg_fbd);
+
+ /* Generate a file for each Tx ring in the napi vector */
+ for (i = 0; i < nv->txt_count; i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+ unsigned int hw_idx;
+
+ hw_idx = fbnic_ring_csr_base(&qt->cmpl) -
+ &fbd->uc_addr0[FBNIC_QUEUE(0)];
+ hw_idx /= FBNIC_QUEUE_STRIDE;
+
+ snprintf(name, sizeof(name), "twq0.%03d", hw_idx);
+ debugfs_create_file(name, 0400, nv->dbg_nv, &qt->sub0,
+ &fbnic_dbg_desc_fops);
+
+ snprintf(name, sizeof(name), "twq1.%03d", hw_idx);
+ debugfs_create_file(name, 0400, nv->dbg_nv, &qt->sub1,
+ &fbnic_dbg_desc_fops);
+
+ snprintf(name, sizeof(name), "tcq.%03d", hw_idx);
+ debugfs_create_file(name, 0400, nv->dbg_nv, &qt->cmpl,
+ &fbnic_dbg_desc_fops);
+ }
+
+ /* Generate a file for each Rx ring in the napi vector */
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+ unsigned int hw_idx;
+
+ hw_idx = fbnic_ring_csr_base(&qt->cmpl) -
+ &fbd->uc_addr0[FBNIC_QUEUE(0)];
+ hw_idx /= FBNIC_QUEUE_STRIDE;
+
+ snprintf(name, sizeof(name), "hpq.%03d", hw_idx);
+ debugfs_create_file(name, 0400, nv->dbg_nv, &qt->sub0,
+ &fbnic_dbg_desc_fops);
+
+ snprintf(name, sizeof(name), "ppq.%03d", hw_idx);
+ debugfs_create_file(name, 0400, nv->dbg_nv, &qt->sub1,
+ &fbnic_dbg_desc_fops);
+
+ snprintf(name, sizeof(name), "rcq.%03d", hw_idx);
+ debugfs_create_file(name, 0400, nv->dbg_nv, &qt->cmpl,
+ &fbnic_dbg_desc_fops);
+ }
+}
+
+void fbnic_dbg_nv_exit(struct fbnic_napi_vector *nv)
+{
+ debugfs_remove_recursive(nv->dbg_nv);
+ nv->dbg_nv = NULL;
+}
+
static int fbnic_dbg_mac_addr_show(struct seq_file *s, void *v)
{
struct fbnic_dev *fbd = s->private;
@@ -170,6 +529,52 @@ static int fbnic_dbg_ipo_dst_show(struct seq_file *s, void *v)
}
DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_ipo_dst);
+static void fbnic_dbg_fw_mbx_display(struct seq_file *s,
+ struct fbnic_dev *fbd, int mbx_idx)
+{
+ struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
+ char hdr[80];
+ int i;
+
+ /* Generate header */
+ seq_puts(s, mbx_idx == FBNIC_IPC_MBX_RX_IDX ? "Rx\n" : "Tx\n");
+
+ seq_printf(s, "Rdy: %d Head: %d Tail: %d\n",
+ mbx->ready, mbx->head, mbx->tail);
+
+ snprintf(hdr, sizeof(hdr), "%3s %-4s %s %-12s %s %-3s %-16s\n",
+ "Idx", "Len", "E", "Addr", "F", "H", "Raw");
+ seq_puts(s, hdr);
+ fbnic_dbg_desc_break(s, strnlen(hdr, sizeof(hdr)));
+
+ for (i = 0; i < FBNIC_IPC_MBX_DESC_LEN; i++) {
+ u64 desc = __fbnic_mbx_rd_desc(fbd, mbx_idx, i);
+
+ seq_printf(s, "%-3.2d %04lld %d %012llx %d %-3d %016llx\n",
+ i, FIELD_GET(FBNIC_IPC_MBX_DESC_LEN_MASK, desc),
+ !!(desc & FBNIC_IPC_MBX_DESC_EOM),
+ desc & FBNIC_IPC_MBX_DESC_ADDR_MASK,
+ !!(desc & FBNIC_IPC_MBX_DESC_FW_CMPL),
+ !!(desc & FBNIC_IPC_MBX_DESC_HOST_CMPL),
+ desc);
+ }
+}
+
+static int fbnic_dbg_fw_mbx_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+
+ fbnic_dbg_fw_mbx_display(s, fbd, FBNIC_IPC_MBX_RX_IDX);
+
+ /* Add blank line between Rx and Tx */
+ seq_puts(s, "\n");
+
+ fbnic_dbg_fw_mbx_display(s, fbd, FBNIC_IPC_MBX_TX_IDX);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_fw_mbx);
+
static int fbnic_dbg_fw_log_show(struct seq_file *s, void *v)
{
struct fbnic_dev *fbd = s->private;
@@ -249,6 +654,8 @@ void fbnic_dbg_fbd_init(struct fbnic_dev *fbd)
&fbnic_dbg_ipo_src_fops);
debugfs_create_file("ipo_dst", 0400, fbd->dbg_fbd, fbd,
&fbnic_dbg_ipo_dst_fops);
+ debugfs_create_file("fw_mbx", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_fw_mbx_fops);
debugfs_create_file("fw_log", 0400, fbd->dbg_fbd, fbd,
&fbnic_dbg_fw_log_fops);
}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
index b62b1d5b1453..f1c992f5fe94 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
@@ -178,7 +178,7 @@ fbnic_flash_start(struct fbnic_dev *fbd, struct pldmfw_component *component)
goto cmpl_free;
/* Wait for firmware to ack firmware upgrade start */
- if (wait_for_completion_timeout(&cmpl->done, 10 * HZ))
+ if (fbnic_mbx_wait_for_cmpl(cmpl))
err = cmpl->result;
else
err = -ETIMEDOUT;
@@ -252,7 +252,7 @@ fbnic_flash_component(struct pldmfw *context,
goto err_no_msg;
while (offset < size) {
- if (!wait_for_completion_timeout(&cmpl->done, 15 * HZ)) {
+ if (!fbnic_mbx_wait_for_cmpl(cmpl)) {
err = -ETIMEDOUT;
break;
}
@@ -390,7 +390,7 @@ static int fbnic_fw_reporter_dump(struct devlink_health_reporter *reporter,
"Failed to transmit core dump info msg");
goto cmpl_free;
}
- if (!wait_for_completion_timeout(&fw_cmpl->done, 2 * HZ)) {
+ if (!fbnic_mbx_wait_for_cmpl(fw_cmpl)) {
NL_SET_ERR_MSG_MOD(extack,
"Timed out waiting on core dump info");
err = -ETIMEDOUT;
@@ -447,7 +447,7 @@ static int fbnic_fw_reporter_dump(struct devlink_health_reporter *reporter,
goto cmpl_cleanup;
}
- if (wait_for_completion_timeout(&fw_cmpl->done, 2 * HZ)) {
+ if (fbnic_mbx_wait_for_cmpl(fw_cmpl)) {
reinit_completion(&fw_cmpl->done);
} else {
NL_SET_ERR_MSG_FMT_MOD(extack,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
index 693ebdf38705..11745a2d8a44 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
@@ -825,6 +825,13 @@ static int fbnic_get_cls_rule(struct fbnic_net *fbn, struct ethtool_rxnfc *cmd)
return 0;
}
+static u32 fbnic_get_rx_ring_count(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ return fbn->num_rx_queues;
+}
+
static int fbnic_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *cmd, u32 *rule_locs)
{
@@ -833,10 +840,6 @@ static int fbnic_get_rxnfc(struct net_device *netdev,
u32 special = 0;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = fbn->num_rx_queues;
- ret = 0;
- break;
case ETHTOOL_GRXCLSRULE:
ret = fbnic_get_cls_rule(fbn, cmd);
break;
@@ -1671,7 +1674,7 @@ fbnic_get_module_eeprom_by_page(struct net_device *netdev,
goto exit_free;
}
- if (!wait_for_completion_timeout(&fw_cmpl->done, 2 * HZ)) {
+ if (!fbnic_mbx_wait_for_cmpl(fw_cmpl)) {
err = -ETIMEDOUT;
NL_SET_ERR_MSG_MOD(extack,
"Timed out waiting for firmware response");
@@ -1895,6 +1898,7 @@ static const struct ethtool_ops fbnic_ethtool_ops = {
.get_sset_count = fbnic_get_sset_count,
.get_rxnfc = fbnic_get_rxnfc,
.set_rxnfc = fbnic_set_rxnfc,
+ .get_rx_ring_count = fbnic_get_rx_ring_count,
.get_rxfh_key_size = fbnic_get_rxfh_key_size,
.get_rxfh_indir_size = fbnic_get_rxfh_indir_size,
.get_rxfh = fbnic_get_rxfh,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
index d8d9b6cfde82..1f0b6350bef4 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
@@ -40,7 +40,7 @@ static void __fbnic_mbx_invalidate_desc(struct fbnic_dev *fbd, int mbx_idx,
fw_wr32(fbd, desc_offset + 1, 0);
}
-static u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx)
+u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx)
{
u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
u64 desc;
@@ -205,8 +205,7 @@ static int fbnic_mbx_alloc_rx_msgs(struct fbnic_dev *fbd)
while (!err && count--) {
struct fbnic_tlv_msg *msg;
- msg = (struct fbnic_tlv_msg *)__get_free_page(GFP_ATOMIC |
- __GFP_NOWARN);
+ msg = (struct fbnic_tlv_msg *)__get_free_page(GFP_KERNEL);
if (!msg) {
err = -ENOMEM;
break;
@@ -416,7 +415,7 @@ static int fbnic_fw_xmit_simple_msg(struct fbnic_dev *fbd, u32 msg_type)
return err;
}
-static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
+static int fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
{
struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
@@ -429,14 +428,15 @@ static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME);
/* Make sure we have a page for the FW to write to */
- fbnic_mbx_alloc_rx_msgs(fbd);
- break;
+ return fbnic_mbx_alloc_rx_msgs(fbd);
case FBNIC_IPC_MBX_TX_IDX:
/* Enable DMA reads from the device */
wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME);
break;
}
+
+ return 0;
}
static bool fbnic_mbx_event(struct fbnic_dev *fbd)
@@ -1592,7 +1592,7 @@ static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = {
static void fbnic_mbx_process_rx_msgs(struct fbnic_dev *fbd)
{
struct fbnic_fw_mbx *rx_mbx = &fbd->mbx[FBNIC_IPC_MBX_RX_IDX];
- u8 head = rx_mbx->head;
+ u8 head = rx_mbx->head, tail = rx_mbx->tail;
u64 desc, length;
while (head != rx_mbx->tail) {
@@ -1603,8 +1603,8 @@ static void fbnic_mbx_process_rx_msgs(struct fbnic_dev *fbd)
if (!(desc & FBNIC_IPC_MBX_DESC_FW_CMPL))
break;
- dma_unmap_single(fbd->dev, rx_mbx->buf_info[head].addr,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(fbd->dev, rx_mbx->buf_info[head].addr,
+ FBNIC_RX_PAGE_SIZE, DMA_FROM_DEVICE);
msg = rx_mbx->buf_info[head].msg;
@@ -1637,19 +1637,26 @@ static void fbnic_mbx_process_rx_msgs(struct fbnic_dev *fbd)
dev_dbg(fbd->dev, "Parsed msg type %d\n", msg->hdr.type);
next_page:
+ fw_wr32(fbd, FBNIC_IPC_MBX(FBNIC_IPC_MBX_RX_IDX, head), 0);
- free_page((unsigned long)rx_mbx->buf_info[head].msg);
+ rx_mbx->buf_info[tail] = rx_mbx->buf_info[head];
rx_mbx->buf_info[head].msg = NULL;
+ rx_mbx->buf_info[head].addr = 0;
- head++;
- head %= FBNIC_IPC_MBX_DESC_LEN;
+ __fbnic_mbx_wr_desc(fbd, FBNIC_IPC_MBX_RX_IDX, tail,
+ FIELD_PREP(FBNIC_IPC_MBX_DESC_LEN_MASK,
+ FBNIC_RX_PAGE_SIZE) |
+ (rx_mbx->buf_info[tail].addr &
+ FBNIC_IPC_MBX_DESC_ADDR_MASK) |
+ FBNIC_IPC_MBX_DESC_HOST_CMPL);
+
+ head = (head + 1) & (FBNIC_IPC_MBX_DESC_LEN - 1);
+ tail = (tail + 1) & (FBNIC_IPC_MBX_DESC_LEN - 1);
}
/* Record head for next interrupt */
rx_mbx->head = head;
-
- /* Make sure we have at least one page for the FW to write to */
- fbnic_mbx_alloc_rx_msgs(fbd);
+ rx_mbx->tail = tail;
}
void fbnic_mbx_poll(struct fbnic_dev *fbd)
@@ -1684,8 +1691,11 @@ int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd)
} while (!fbnic_mbx_event(fbd));
/* FW has shown signs of life. Enable DMA and start Tx/Rx */
- for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
- fbnic_mbx_init_desc_ring(fbd, i);
+ for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++) {
+ err = fbnic_mbx_init_desc_ring(fbd, i);
+ if (err)
+ goto clean_mbx;
+ }
/* Request an update from the firmware. This should overwrite
* mgmt.version once we get the actual version from the firmware
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
index 1ecd777aaada..8f7218900562 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
@@ -4,6 +4,7 @@
#ifndef _FBNIC_FW_H_
#define _FBNIC_FW_H_
+#include <linux/completion.h>
#include <linux/if_ether.h>
#include <linux/types.h>
@@ -36,6 +37,7 @@ struct fbnic_fw_mbx {
* + INDEX_SZ))
*/
#define FBNIC_FW_MAX_LOG_HISTORY 14
+#define FBNIC_MBX_RX_TO_SEC 10
struct fbnic_fw_ver {
u32 version;
@@ -92,6 +94,7 @@ struct fbnic_fw_completion {
} u;
};
+u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx);
void fbnic_mbx_init(struct fbnic_dev *fbd);
void fbnic_mbx_clean(struct fbnic_dev *fbd);
int fbnic_mbx_set_cmpl(struct fbnic_dev *fbd,
@@ -129,6 +132,13 @@ struct fbnic_fw_completion *__fbnic_fw_alloc_cmpl(u32 msg_type,
struct fbnic_fw_completion *fbnic_fw_alloc_cmpl(u32 msg_type);
void fbnic_fw_put_cmpl(struct fbnic_fw_completion *cmpl_data);
+static inline unsigned long
+fbnic_mbx_wait_for_cmpl(struct fbnic_fw_completion *cmpl)
+{
+ return wait_for_completion_timeout(&cmpl->done,
+ FBNIC_MBX_RX_TO_SEC * HZ);
+}
+
#define fbnic_mk_full_fw_ver_str(_rev_id, _delim, _commit, _str, _str_sz) \
do { \
const u32 __rev_id = _rev_id; \
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
index fc7abea4ef5b..9d0e4b2cc9ac 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
@@ -835,7 +835,7 @@ static int fbnic_mac_get_sensor_asic(struct fbnic_dev *fbd, int id,
long *val)
{
struct fbnic_fw_completion *fw_cmpl;
- int err = 0, retries = 5;
+ int err = 0;
s32 *sensor;
fw_cmpl = fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_TSENE_READ_RESP);
@@ -862,24 +862,10 @@ static int fbnic_mac_get_sensor_asic(struct fbnic_dev *fbd, int id,
goto exit_free;
}
- /* Allow 2 seconds for reply, resend and try up to 5 times */
- while (!wait_for_completion_timeout(&fw_cmpl->done, 2 * HZ)) {
- retries--;
-
- if (retries == 0) {
- dev_err(fbd->dev,
- "Timed out waiting for TSENE read\n");
- err = -ETIMEDOUT;
- goto exit_cleanup;
- }
-
- err = fbnic_fw_xmit_tsene_read_msg(fbd, NULL);
- if (err) {
- dev_err(fbd->dev,
- "Failed to transmit TSENE read msg, err %d\n",
- err);
- goto exit_cleanup;
- }
+ if (!wait_for_completion_timeout(&fw_cmpl->done, 10 * HZ)) {
+ dev_err(fbd->dev, "Timed out waiting for TSENE read\n");
+ err = -ETIMEDOUT;
+ goto exit_cleanup;
}
/* Handle error returned by firmware */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
index 9240673c7533..6f9389748a7d 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
@@ -142,10 +142,14 @@ void fbnic_up(struct fbnic_net *fbn)
netif_tx_start_all_queues(fbn->netdev);
fbnic_service_task_start(fbn);
+
+ fbnic_dbg_up(fbn);
}
void fbnic_down_noidle(struct fbnic_net *fbn)
{
+ fbnic_dbg_down(fbn);
+
fbnic_service_task_stop(fbn);
/* Disable Tx/Rx Processing */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
index 13d508ce637f..e29959241ff3 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
@@ -39,7 +39,7 @@ struct fbnic_xmit_cb {
#define FBNIC_XMIT_NOUNMAP ((void *)1)
-static u32 __iomem *fbnic_ring_csr_base(const struct fbnic_ring *ring)
+u32 __iomem *fbnic_ring_csr_base(const struct fbnic_ring *ring)
{
unsigned long csr_base = (unsigned long)ring->doorbell;
@@ -2255,6 +2255,22 @@ fbnic_nv_disable(struct fbnic_net *fbn, struct fbnic_napi_vector *nv)
fbnic_wrfl(fbn->fbd);
}
+void fbnic_dbg_down(struct fbnic_net *fbn)
+{
+ int i;
+
+ for (i = 0; i < fbn->num_napi; i++)
+ fbnic_dbg_nv_exit(fbn->napi[i]);
+}
+
+void fbnic_dbg_up(struct fbnic_net *fbn)
+{
+ int i;
+
+ for (i = 0; i < fbn->num_napi; i++)
+ fbnic_dbg_nv_init(fbn->napi[i]);
+}
+
void fbnic_disable(struct fbnic_net *fbn)
{
struct fbnic_dev *fbd = fbn->fbd;
@@ -2809,7 +2825,9 @@ void fbnic_napi_depletion_check(struct net_device *netdev)
fbnic_wrfl(fbd);
}
-static int fbnic_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
+static int fbnic_queue_mem_alloc(struct net_device *dev,
+ struct netdev_queue_config *qcfg,
+ void *qmem, int idx)
{
struct fbnic_net *fbn = netdev_priv(dev);
const struct fbnic_q_triad *real;
@@ -2859,9 +2877,12 @@ static void __fbnic_nv_restart(struct fbnic_net *fbn,
for (i = 0; i < nv->txt_count; i++)
netif_wake_subqueue(fbn->netdev, nv->qt[i].sub0.q_idx);
+ fbnic_dbg_nv_init(nv);
}
-static int fbnic_queue_start(struct net_device *dev, void *qmem, int idx)
+static int fbnic_queue_start(struct net_device *dev,
+ struct netdev_queue_config *qcfg,
+ void *qmem, int idx)
{
struct fbnic_net *fbn = netdev_priv(dev);
struct fbnic_napi_vector *nv;
@@ -2891,6 +2912,7 @@ static int fbnic_queue_stop(struct net_device *dev, void *qmem, int idx)
real = container_of(fbn->rx[idx], struct fbnic_q_triad, cmpl);
nv = fbn->napi[idx % fbn->num_napi];
+ fbnic_dbg_nv_exit(nv);
napi_disable_locked(&nv->napi);
fbnic_nv_irq_disable(nv);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
index 27776e844e29..b9560103ab86 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
@@ -151,6 +151,7 @@ struct fbnic_napi_vector {
struct napi_struct napi;
struct device *dev; /* Device for DMA unmapping */
struct fbnic_dev *fbd;
+ struct dentry *dbg_nv;
u16 v_idx;
u8 txt_count;
@@ -187,9 +188,12 @@ void fbnic_napi_disable(struct fbnic_net *fbn);
void fbnic_config_drop_mode(struct fbnic_net *fbn, bool tx_pause);
void fbnic_enable(struct fbnic_net *fbn);
void fbnic_disable(struct fbnic_net *fbn);
+void fbnic_dbg_up(struct fbnic_net *fbn);
+void fbnic_dbg_down(struct fbnic_net *fbn);
void fbnic_flush(struct fbnic_net *fbn);
void fbnic_fill(struct fbnic_net *fbn);
+u32 __iomem *fbnic_ring_csr_base(const struct fbnic_ring *ring);
void fbnic_napi_depletion_check(struct net_device *netdev);
int fbnic_wait_all_queues_idle(struct fbnic_dev *fbd, bool may_fail);
@@ -198,4 +202,6 @@ static inline int fbnic_napi_idx(const struct fbnic_napi_vector *nv)
return nv->v_idx - FBNIC_NON_NAPI_VECTORS;
}
+void fbnic_dbg_nv_init(struct fbnic_napi_vector *nv);
+void fbnic_dbg_nv_exit(struct fbnic_napi_vector *nv);
#endif /* _FBNIC_TXRX_H_ */
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index 541c41a9077a..b7cf2ee9115f 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -242,7 +242,7 @@ static void ks8842_reset(struct ks8842_adapter *adapter)
msleep(10);
iowrite16(0, adapter->hw_addr + REG_GRR);
} else {
- /* The KS8842 goes haywire when doing softare reset
+ /* The KS8842 goes haywire when doing software reset
* a work around in the timberdale IP is implemented to
* do a hardware reset instead
ks8842_write16(adapter, 3, 1, REG_GRR);
@@ -312,7 +312,7 @@ static void ks8842_reset_hw(struct ks8842_adapter *adapter)
/* aggressive back off in half duplex */
ks8842_enable_bits(adapter, 32, 1 << 8, REG_SGCR1);
- /* enable no excessive collison drop */
+ /* enable no excessive collision drop */
ks8842_enable_bits(adapter, 32, 1 << 3, REG_SGCR2);
/* Enable port 1 force flow control / back pressure / transmit / recv */
diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
index bb5138806c3f..8048770958d6 100644
--- a/drivers/net/ethernet/micrel/ks8851_common.c
+++ b/drivers/net/ethernet/micrel/ks8851_common.c
@@ -480,7 +480,7 @@ static int ks8851_net_open(struct net_device *dev)
* ks8851_net_stop - close network device
* @dev: The device being closed.
*
- * Called to close down a network device which has been active. Cancell any
+ * Called to close down a network device which has been active. Cancel any
* work, shutdown the RX and TX process and then place the chip into a low
* power state whilst it is not being used.
*/
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
index c862b13b447a..a161ae45743a 100644
--- a/drivers/net/ethernet/micrel/ks8851_spi.c
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -39,7 +39,7 @@ static int msg_enable;
*
* The @lock ensures that the chip is protected when certain operations are
* in progress. When the read or write packet transfer is in progress, most
- * of the chip registers are not ccessible until the transfer is finished and
+ * of the chip registers are not accessible until the transfer is finished and
* the DMA has been de-asserted.
*/
struct ks8851_net_spi {
@@ -298,7 +298,7 @@ static unsigned int calc_txlen(unsigned int len)
/**
* ks8851_tx_work - process tx packet(s)
- * @work: The work strucutre what was scheduled.
+ * @work: The work structure what was scheduled.
*
* This is called when a number of packets have been scheduled for
* transmission and need to be sent to the device.
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index cdde19b8edc4..60223f03482d 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -1166,7 +1166,7 @@ struct ksz_port_info {
* @tx_cfg: Cached transmit control settings.
* @rx_cfg: Cached receive control settings.
* @intr_mask: Current interrupt mask.
- * @intr_set: Current interrup set.
+ * @intr_set: Current interrupt set.
* @intr_blocked: Interrupt blocked.
* @rx_desc_info: Receive descriptor information.
* @tx_desc_info: Transmit descriptor information.
@@ -2096,7 +2096,7 @@ static void sw_dis_prio_rate(struct ksz_hw *hw, int port)
}
/**
- * sw_init_prio_rate - initialize switch prioirty rate
+ * sw_init_prio_rate - initialize switch priority rate
* @hw: The hardware instance.
*
* This routine initializes the priority rate function of the switch.
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index 40002d9fe274..8a3c1ecc7866 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -931,16 +931,9 @@ static int lan743x_ethtool_get_rxfh_fields(struct net_device *netdev,
return 0;
}
-static int lan743x_ethtool_get_rxnfc(struct net_device *netdev,
- struct ethtool_rxnfc *rxnfc,
- u32 *rule_locs)
+static u32 lan743x_ethtool_get_rx_ring_count(struct net_device *netdev)
{
- switch (rxnfc->cmd) {
- case ETHTOOL_GRXRINGS:
- rxnfc->data = LAN743X_USED_RX_CHANNELS;
- return 0;
- }
- return -EOPNOTSUPP;
+ return LAN743X_USED_RX_CHANNELS;
}
static u32 lan743x_ethtool_get_rxfh_key_size(struct net_device *netdev)
@@ -1369,7 +1362,7 @@ const struct ethtool_ops lan743x_ethtool_ops = {
.get_priv_flags = lan743x_ethtool_get_priv_flags,
.set_priv_flags = lan743x_ethtool_set_priv_flags,
.get_sset_count = lan743x_ethtool_get_sset_count,
- .get_rxnfc = lan743x_ethtool_get_rxnfc,
+ .get_rx_ring_count = lan743x_ethtool_get_rx_ring_count,
.get_rxfh_key_size = lan743x_ethtool_get_rxfh_key_size,
.get_rxfh_indir_size = lan743x_ethtool_get_rxfh_indir_size,
.get_rxfh = lan743x_ethtool_get_rxfh,
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 1ad154f9db1a..9b5a72ada5c4 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -299,6 +299,39 @@ static int mana_get_gso_hs(struct sk_buff *skb)
return gso_hs;
}
+static void mana_per_port_queue_reset_work_handler(struct work_struct *work)
+{
+ struct mana_port_context *apc = container_of(work,
+ struct mana_port_context,
+ queue_reset_work);
+ struct net_device *ndev = apc->ndev;
+ int err;
+
+ rtnl_lock();
+
+ /* Pre-allocate buffers to prevent failure in mana_attach later */
+ err = mana_pre_alloc_rxbufs(apc, ndev->mtu, apc->num_queues);
+ if (err) {
+ netdev_err(ndev, "Insufficient memory for reset post tx stall detection\n");
+ goto out;
+ }
+
+ err = mana_detach(ndev, false);
+ if (err) {
+ netdev_err(ndev, "mana_detach failed: %d\n", err);
+ goto dealloc_pre_rxbufs;
+ }
+
+ err = mana_attach(ndev);
+ if (err)
+ netdev_err(ndev, "mana_attach failed: %d\n", err);
+
+dealloc_pre_rxbufs:
+ mana_pre_dealloc_rxbufs(apc);
+out:
+ rtnl_unlock();
+}
+
netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
@@ -322,9 +355,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (skb_cow_head(skb, MANA_HEADROOM))
goto tx_drop_count;
- if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
- goto tx_drop_count;
-
txq = &apc->tx_qp[txq_idx].txq;
gdma_sq = txq->gdma_sq;
cq = &apc->tx_qp[txq_idx].tx_cq;
@@ -839,6 +869,23 @@ out:
return err;
}
+static void mana_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+{
+ struct mana_port_context *apc = netdev_priv(netdev);
+ struct mana_context *ac = apc->ac;
+ struct gdma_context *gc = ac->gdma_dev->gdma_context;
+
+ /* Already in service, hence tx queue reset is not required.*/
+ if (gc->in_service)
+ return;
+
+ /* Note: If there are pending queue reset work for this port(apc),
+ * subsequent request queued up from here are ignored. This is because
+ * we are using the same work instance per port(apc).
+ */
+ queue_work(ac->per_port_queue_reset_wq, &apc->queue_reset_work);
+}
+
static int mana_shaper_set(struct net_shaper_binding *binding,
const struct net_shaper *shaper,
struct netlink_ext_ack *extack)
@@ -924,6 +971,7 @@ static const struct net_device_ops mana_devops = {
.ndo_bpf = mana_bpf,
.ndo_xdp_xmit = mana_xdp_xmit,
.ndo_change_mtu = mana_change_mtu,
+ .ndo_tx_timeout = mana_tx_timeout,
.net_shaper_ops = &mana_shaper_ops,
};
@@ -3287,6 +3335,8 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
ndev->min_mtu = ETH_MIN_MTU;
ndev->needed_headroom = MANA_HEADROOM;
ndev->dev_port = port_idx;
+ /* Recommended timeout based on HW FPGA re-config scenario. */
+ ndev->watchdog_timeo = 15 * HZ;
SET_NETDEV_DEV(ndev, gc->dev);
netif_set_tso_max_size(ndev, GSO_MAX_SIZE);
@@ -3303,6 +3353,10 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
if (err)
goto reset_apc;
+ /* Initialize the per port queue reset work.*/
+ INIT_WORK(&apc->queue_reset_work,
+ mana_per_port_queue_reset_work_handler);
+
netdev_lockdep_set_classes(ndev);
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
@@ -3492,6 +3546,7 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
{
struct gdma_context *gc = gd->gdma_context;
struct mana_context *ac = gd->driver_data;
+ struct mana_port_context *apc = NULL;
struct device *dev = gc->dev;
u8 bm_hostmode = 0;
u16 num_ports = 0;
@@ -3549,6 +3604,14 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
ac->num_ports = MAX_PORTS_IN_MANA_DEV;
+ ac->per_port_queue_reset_wq =
+ create_singlethread_workqueue("mana_per_port_queue_reset_wq");
+ if (!ac->per_port_queue_reset_wq) {
+ dev_err(dev, "Failed to allocate per port queue reset workqueue\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
if (!resuming) {
for (i = 0; i < ac->num_ports; i++) {
err = mana_probe_port(ac, i, &ac->ports[i]);
@@ -3565,6 +3628,8 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
} else {
for (i = 0; i < ac->num_ports; i++) {
rtnl_lock();
+ apc = netdev_priv(ac->ports[i]);
+ enable_work(&apc->queue_reset_work);
err = mana_attach(ac->ports[i]);
rtnl_unlock();
/* we log the port for which the attach failed and stop
@@ -3616,13 +3681,15 @@ void mana_remove(struct gdma_dev *gd, bool suspending)
for (i = 0; i < ac->num_ports; i++) {
ndev = ac->ports[i];
- apc = netdev_priv(ndev);
if (!ndev) {
if (i == 0)
dev_err(dev, "No net device to remove\n");
goto out;
}
+ apc = netdev_priv(ndev);
+ disable_work_sync(&apc->queue_reset_work);
+
/* All cleanup actions should stay after rtnl_lock(), otherwise
* other functions may access partially cleaned up data.
*/
@@ -3649,6 +3716,11 @@ void mana_remove(struct gdma_dev *gd, bool suspending)
mana_destroy_eq(ac);
out:
+ if (ac->per_port_queue_reset_wq) {
+ destroy_workqueue(ac->per_port_queue_reset_wq);
+ ac->per_port_queue_reset_wq = NULL;
+ }
+
mana_gd_deregister_device(gd);
if (suspending)
diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
index 0e2f4343ac67..f2d220b371b5 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
@@ -282,18 +282,11 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
}
}
-static int mana_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *cmd,
- u32 *rules)
+static u32 mana_get_rx_ring_count(struct net_device *ndev)
{
struct mana_port_context *apc = netdev_priv(ndev);
- switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = apc->num_queues;
- return 0;
- }
-
- return -EOPNOTSUPP;
+ return apc->num_queues;
}
static u32 mana_get_rxfh_key_size(struct net_device *ndev)
@@ -520,7 +513,7 @@ const struct ethtool_ops mana_ethtool_ops = {
.get_ethtool_stats = mana_get_ethtool_stats,
.get_sset_count = mana_get_sset_count,
.get_strings = mana_get_strings,
- .get_rxnfc = mana_get_rxnfc,
+ .get_rx_ring_count = mana_get_rx_ring_count,
.get_rxfh_key_size = mana_get_rxfh_key_size,
.get_rxfh_indir_size = mana_rss_indir_size,
.get_rxfh = mana_get_rxfh,
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 7be30a8df268..2f0cdbd4e2ac 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -688,6 +688,9 @@ static int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp)
/* probe for IPv6 TSO support */
mgp->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO;
+ cmd.data0 = 0,
+ cmd.data1 = 0,
+ cmd.data2 = 0,
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE,
&cmd, 0);
if (status == 0) {
@@ -806,6 +809,7 @@ static int myri10ge_update_mac_address(struct myri10ge_priv *mgp,
| (addr[2] << 8) | addr[3]);
cmd.data1 = ((addr[4] << 8) | (addr[5]));
+ cmd.data2 = 0;
status = myri10ge_send_cmd(mgp, MXGEFW_SET_MAC_ADDRESS, &cmd, 0);
return status;
@@ -817,6 +821,9 @@ static int myri10ge_change_pause(struct myri10ge_priv *mgp, int pause)
int status, ctl;
ctl = pause ? MXGEFW_ENABLE_FLOW_CONTROL : MXGEFW_DISABLE_FLOW_CONTROL;
+ cmd.data0 = 0,
+ cmd.data1 = 0,
+ cmd.data2 = 0,
status = myri10ge_send_cmd(mgp, ctl, &cmd, 0);
if (status) {
@@ -834,6 +841,9 @@ myri10ge_change_promisc(struct myri10ge_priv *mgp, int promisc, int atomic)
int status, ctl;
ctl = promisc ? MXGEFW_ENABLE_PROMISC : MXGEFW_DISABLE_PROMISC;
+ cmd.data0 = 0;
+ cmd.data1 = 0;
+ cmd.data2 = 0;
status = myri10ge_send_cmd(mgp, ctl, &cmd, atomic);
if (status)
netdev_err(mgp->dev, "Failed to set promisc mode\n");
@@ -1946,6 +1956,8 @@ static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss)
/* get ring sizes */
slice = ss - mgp->ss;
cmd.data0 = slice;
+ cmd.data1 = 0;
+ cmd.data2 = 0;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0);
tx_ring_size = cmd.data0;
cmd.data0 = slice;
@@ -2238,12 +2250,16 @@ static int myri10ge_get_txrx(struct myri10ge_priv *mgp, int slice)
status = 0;
if (slice == 0 || (mgp->dev->real_num_tx_queues > 1)) {
cmd.data0 = slice;
+ cmd.data1 = 0;
+ cmd.data2 = 0;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET,
&cmd, 0);
ss->tx.lanai = (struct mcp_kreq_ether_send __iomem *)
(mgp->sram + cmd.data0);
}
cmd.data0 = slice;
+ cmd.data1 = 0;
+ cmd.data2 = 0;
status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET,
&cmd, 0);
ss->rx_small.lanai = (struct mcp_kreq_ether_recv __iomem *)
@@ -2312,6 +2328,7 @@ static int myri10ge_open(struct net_device *dev)
if (mgp->num_slices > 1) {
cmd.data0 = mgp->num_slices;
cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE;
+ cmd.data2 = 0;
if (mgp->dev->real_num_tx_queues > 1)
cmd.data1 |= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES,
@@ -2414,6 +2431,8 @@ static int myri10ge_open(struct net_device *dev)
/* now give firmware buffers sizes, and MTU */
cmd.data0 = dev->mtu + ETH_HLEN + VLAN_HLEN;
+ cmd.data1 = 0;
+ cmd.data2 = 0;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_MTU, &cmd, 0);
cmd.data0 = mgp->small_bytes;
status |=
@@ -2472,7 +2491,6 @@ abort_with_nothing:
static int myri10ge_close(struct net_device *dev)
{
struct myri10ge_priv *mgp = netdev_priv(dev);
- struct myri10ge_cmd cmd;
int status, old_down_cnt;
int i;
@@ -2491,8 +2509,13 @@ static int myri10ge_close(struct net_device *dev)
netif_tx_stop_all_queues(dev);
if (mgp->rebooted == 0) {
+ struct myri10ge_cmd cmd;
+
old_down_cnt = mgp->down_cnt;
mb();
+ cmd.data0 = 0;
+ cmd.data1 = 0;
+ cmd.data2 = 0;
status =
myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_DOWN, &cmd, 0);
if (status)
@@ -2956,6 +2979,9 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
/* Disable multicast filtering */
+ cmd.data0 = 0;
+ cmd.data1 = 0;
+ cmd.data2 = 0;
err = myri10ge_send_cmd(mgp, MXGEFW_ENABLE_ALLMULTI, &cmd, 1);
if (err != 0) {
netdev_err(dev, "Failed MXGEFW_ENABLE_ALLMULTI, error status: %d\n",
diff --git a/drivers/net/ethernet/neterion/Kconfig b/drivers/net/ethernet/neterion/Kconfig
deleted file mode 100644
index 09a89e72f904..000000000000
--- a/drivers/net/ethernet/neterion/Kconfig
+++ /dev/null
@@ -1,35 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Exar device configuration
-#
-
-config NET_VENDOR_NETERION
- bool "Neterion (Exar) devices"
- default y
- depends on PCI
- help
- If you have a network (Ethernet) card belonging to this class, say Y.
-
- Note that the answer to this question doesn't directly affect the
- kernel: saying N will just cause the configurator to skip all
- the questions about Neterion/Exar cards. If you say Y, you will be
- asked for your specific card in the following questions.
-
-if NET_VENDOR_NETERION
-
-config S2IO
- tristate "Neterion (Exar) Xframe 10Gb Ethernet Adapter"
- depends on PCI
- help
- This driver supports Exar Corp's Xframe Series 10Gb Ethernet Adapters.
- These were originally released from S2IO, which renamed itself
- Neterion. So, the adapters might be labeled as either one, depending
- on its age.
-
- More specific information on configuring the driver is in
- <file:Documentation/networking/device_drivers/ethernet/neterion/s2io.rst>.
-
- To compile this driver as a module, choose M here. The module
- will be called s2io.
-
-endif # NET_VENDOR_NETERION
diff --git a/drivers/net/ethernet/neterion/Makefile b/drivers/net/ethernet/neterion/Makefile
deleted file mode 100644
index de98b4e6eff9..000000000000
--- a/drivers/net/ethernet/neterion/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for the Exar network device drivers.
-#
-
-obj-$(CONFIG_S2IO) += s2io.o
diff --git a/drivers/net/ethernet/neterion/s2io-regs.h b/drivers/net/ethernet/neterion/s2io-regs.h
deleted file mode 100644
index 3688325c11f5..000000000000
--- a/drivers/net/ethernet/neterion/s2io-regs.h
+++ /dev/null
@@ -1,958 +0,0 @@
-/************************************************************************
- * regs.h: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
- * Copyright(c) 2002-2010 Exar Corp.
-
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- ************************************************************************/
-#ifndef _REGS_H
-#define _REGS_H
-
-#define TBD 0
-
-struct XENA_dev_config {
-/* Convention: mHAL_XXX is mask, vHAL_XXX is value */
-
-/* General Control-Status Registers */
- u64 general_int_status;
-#define GEN_INTR_TXPIC s2BIT(0)
-#define GEN_INTR_TXDMA s2BIT(1)
-#define GEN_INTR_TXMAC s2BIT(2)
-#define GEN_INTR_TXXGXS s2BIT(3)
-#define GEN_INTR_TXTRAFFIC s2BIT(8)
-#define GEN_INTR_RXPIC s2BIT(32)
-#define GEN_INTR_RXDMA s2BIT(33)
-#define GEN_INTR_RXMAC s2BIT(34)
-#define GEN_INTR_MC s2BIT(35)
-#define GEN_INTR_RXXGXS s2BIT(36)
-#define GEN_INTR_RXTRAFFIC s2BIT(40)
-#define GEN_ERROR_INTR GEN_INTR_TXPIC | GEN_INTR_RXPIC | \
- GEN_INTR_TXDMA | GEN_INTR_RXDMA | \
- GEN_INTR_TXMAC | GEN_INTR_RXMAC | \
- GEN_INTR_TXXGXS| GEN_INTR_RXXGXS| \
- GEN_INTR_MC
-
- u64 general_int_mask;
-
- u8 unused0[0x100 - 0x10];
-
- u64 sw_reset;
-/* XGXS must be removed from reset only once. */
-#define SW_RESET_XENA vBIT(0xA5,0,8)
-#define SW_RESET_FLASH vBIT(0xA5,8,8)
-#define SW_RESET_EOI vBIT(0xA5,16,8)
-#define SW_RESET_ALL (SW_RESET_XENA | \
- SW_RESET_FLASH | \
- SW_RESET_EOI)
-/* The SW_RESET register must read this value after a successful reset. */
-#define SW_RESET_RAW_VAL 0xA5000000
-
-
- u64 adapter_status;
-#define ADAPTER_STATUS_TDMA_READY s2BIT(0)
-#define ADAPTER_STATUS_RDMA_READY s2BIT(1)
-#define ADAPTER_STATUS_PFC_READY s2BIT(2)
-#define ADAPTER_STATUS_TMAC_BUF_EMPTY s2BIT(3)
-#define ADAPTER_STATUS_PIC_QUIESCENT s2BIT(5)
-#define ADAPTER_STATUS_RMAC_REMOTE_FAULT s2BIT(6)
-#define ADAPTER_STATUS_RMAC_LOCAL_FAULT s2BIT(7)
-#define ADAPTER_STATUS_RMAC_PCC_IDLE vBIT(0xFF,8,8)
-#define ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE vBIT(0x0F,8,8)
-#define ADAPTER_STATUS_RC_PRC_QUIESCENT vBIT(0xFF,16,8)
-#define ADAPTER_STATUS_MC_DRAM_READY s2BIT(24)
-#define ADAPTER_STATUS_MC_QUEUES_READY s2BIT(25)
-#define ADAPTER_STATUS_RIC_RUNNING s2BIT(26)
-#define ADAPTER_STATUS_M_PLL_LOCK s2BIT(30)
-#define ADAPTER_STATUS_P_PLL_LOCK s2BIT(31)
-
- u64 adapter_control;
-#define ADAPTER_CNTL_EN s2BIT(7)
-#define ADAPTER_EOI_TX_ON s2BIT(15)
-#define ADAPTER_LED_ON s2BIT(23)
-#define ADAPTER_UDPI(val) vBIT(val,36,4)
-#define ADAPTER_WAIT_INT s2BIT(48)
-#define ADAPTER_ECC_EN s2BIT(55)
-
- u64 serr_source;
-#define SERR_SOURCE_PIC s2BIT(0)
-#define SERR_SOURCE_TXDMA s2BIT(1)
-#define SERR_SOURCE_RXDMA s2BIT(2)
-#define SERR_SOURCE_MAC s2BIT(3)
-#define SERR_SOURCE_MC s2BIT(4)
-#define SERR_SOURCE_XGXS s2BIT(5)
-#define SERR_SOURCE_ANY (SERR_SOURCE_PIC | \
- SERR_SOURCE_TXDMA | \
- SERR_SOURCE_RXDMA | \
- SERR_SOURCE_MAC | \
- SERR_SOURCE_MC | \
- SERR_SOURCE_XGXS)
-
- u64 pci_mode;
-#define GET_PCI_MODE(val) ((val & vBIT(0xF, 0, 4)) >> 60)
-#define PCI_MODE_PCI_33 0
-#define PCI_MODE_PCI_66 0x1
-#define PCI_MODE_PCIX_M1_66 0x2
-#define PCI_MODE_PCIX_M1_100 0x3
-#define PCI_MODE_PCIX_M1_133 0x4
-#define PCI_MODE_PCIX_M2_66 0x5
-#define PCI_MODE_PCIX_M2_100 0x6
-#define PCI_MODE_PCIX_M2_133 0x7
-#define PCI_MODE_UNSUPPORTED s2BIT(0)
-#define PCI_MODE_32_BITS s2BIT(8)
-#define PCI_MODE_UNKNOWN_MODE s2BIT(9)
-
- u8 unused_0[0x800 - 0x128];
-
-/* PCI-X Controller registers */
- u64 pic_int_status;
- u64 pic_int_mask;
-#define PIC_INT_TX s2BIT(0)
-#define PIC_INT_FLSH s2BIT(1)
-#define PIC_INT_MDIO s2BIT(2)
-#define PIC_INT_IIC s2BIT(3)
-#define PIC_INT_GPIO s2BIT(4)
-#define PIC_INT_RX s2BIT(32)
-
- u64 txpic_int_reg;
- u64 txpic_int_mask;
-#define PCIX_INT_REG_ECC_SG_ERR s2BIT(0)
-#define PCIX_INT_REG_ECC_DB_ERR s2BIT(1)
-#define PCIX_INT_REG_FLASHR_R_FSM_ERR s2BIT(8)
-#define PCIX_INT_REG_FLASHR_W_FSM_ERR s2BIT(9)
-#define PCIX_INT_REG_INI_TX_FSM_SERR s2BIT(10)
-#define PCIX_INT_REG_INI_TXO_FSM_ERR s2BIT(11)
-#define PCIX_INT_REG_TRT_FSM_SERR s2BIT(13)
-#define PCIX_INT_REG_SRT_FSM_SERR s2BIT(14)
-#define PCIX_INT_REG_PIFR_FSM_SERR s2BIT(15)
-#define PCIX_INT_REG_WRC_TX_SEND_FSM_SERR s2BIT(21)
-#define PCIX_INT_REG_RRC_TX_REQ_FSM_SERR s2BIT(23)
-#define PCIX_INT_REG_INI_RX_FSM_SERR s2BIT(48)
-#define PCIX_INT_REG_RA_RX_FSM_SERR s2BIT(50)
-/*
-#define PCIX_INT_REG_WRC_RX_SEND_FSM_SERR s2BIT(52)
-#define PCIX_INT_REG_RRC_RX_REQ_FSM_SERR s2BIT(54)
-#define PCIX_INT_REG_RRC_RX_SPLIT_FSM_SERR s2BIT(58)
-*/
- u64 txpic_alarms;
- u64 rxpic_int_reg;
- u64 rxpic_int_mask;
- u64 rxpic_alarms;
-
- u64 flsh_int_reg;
- u64 flsh_int_mask;
-#define PIC_FLSH_INT_REG_CYCLE_FSM_ERR s2BIT(63)
-#define PIC_FLSH_INT_REG_ERR s2BIT(62)
- u64 flash_alarms;
-
- u64 mdio_int_reg;
- u64 mdio_int_mask;
-#define MDIO_INT_REG_MDIO_BUS_ERR s2BIT(0)
-#define MDIO_INT_REG_DTX_BUS_ERR s2BIT(8)
-#define MDIO_INT_REG_LASI s2BIT(39)
- u64 mdio_alarms;
-
- u64 iic_int_reg;
- u64 iic_int_mask;
-#define IIC_INT_REG_BUS_FSM_ERR s2BIT(4)
-#define IIC_INT_REG_BIT_FSM_ERR s2BIT(5)
-#define IIC_INT_REG_CYCLE_FSM_ERR s2BIT(6)
-#define IIC_INT_REG_REQ_FSM_ERR s2BIT(7)
-#define IIC_INT_REG_ACK_ERR s2BIT(8)
- u64 iic_alarms;
-
- u8 unused4[0x08];
-
- u64 gpio_int_reg;
-#define GPIO_INT_REG_DP_ERR_INT s2BIT(0)
-#define GPIO_INT_REG_LINK_DOWN s2BIT(1)
-#define GPIO_INT_REG_LINK_UP s2BIT(2)
- u64 gpio_int_mask;
-#define GPIO_INT_MASK_LINK_DOWN s2BIT(1)
-#define GPIO_INT_MASK_LINK_UP s2BIT(2)
- u64 gpio_alarms;
-
- u8 unused5[0x38];
-
- u64 tx_traffic_int;
-#define TX_TRAFFIC_INT_n(n) s2BIT(n)
- u64 tx_traffic_mask;
-
- u64 rx_traffic_int;
-#define RX_TRAFFIC_INT_n(n) s2BIT(n)
- u64 rx_traffic_mask;
-
-/* PIC Control registers */
- u64 pic_control;
-#define PIC_CNTL_RX_ALARM_MAP_1 s2BIT(0)
-#define PIC_CNTL_SHARED_SPLITS(n) vBIT(n,11,5)
-
- u64 swapper_ctrl;
-#define SWAPPER_CTRL_PIF_R_FE s2BIT(0)
-#define SWAPPER_CTRL_PIF_R_SE s2BIT(1)
-#define SWAPPER_CTRL_PIF_W_FE s2BIT(8)
-#define SWAPPER_CTRL_PIF_W_SE s2BIT(9)
-#define SWAPPER_CTRL_TXP_FE s2BIT(16)
-#define SWAPPER_CTRL_TXP_SE s2BIT(17)
-#define SWAPPER_CTRL_TXD_R_FE s2BIT(18)
-#define SWAPPER_CTRL_TXD_R_SE s2BIT(19)
-#define SWAPPER_CTRL_TXD_W_FE s2BIT(20)
-#define SWAPPER_CTRL_TXD_W_SE s2BIT(21)
-#define SWAPPER_CTRL_TXF_R_FE s2BIT(22)
-#define SWAPPER_CTRL_TXF_R_SE s2BIT(23)
-#define SWAPPER_CTRL_RXD_R_FE s2BIT(32)
-#define SWAPPER_CTRL_RXD_R_SE s2BIT(33)
-#define SWAPPER_CTRL_RXD_W_FE s2BIT(34)
-#define SWAPPER_CTRL_RXD_W_SE s2BIT(35)
-#define SWAPPER_CTRL_RXF_W_FE s2BIT(36)
-#define SWAPPER_CTRL_RXF_W_SE s2BIT(37)
-#define SWAPPER_CTRL_XMSI_FE s2BIT(40)
-#define SWAPPER_CTRL_XMSI_SE s2BIT(41)
-#define SWAPPER_CTRL_STATS_FE s2BIT(48)
-#define SWAPPER_CTRL_STATS_SE s2BIT(49)
-
- u64 pif_rd_swapper_fb;
-#define IF_RD_SWAPPER_FB 0x0123456789ABCDEF
-
- u64 scheduled_int_ctrl;
-#define SCHED_INT_CTRL_TIMER_EN s2BIT(0)
-#define SCHED_INT_CTRL_ONE_SHOT s2BIT(1)
-#define SCHED_INT_CTRL_INT2MSI(val) vBIT(val,10,6)
-#define SCHED_INT_PERIOD TBD
-
- u64 txreqtimeout;
-#define TXREQTO_VAL(val) vBIT(val,0,32)
-#define TXREQTO_EN s2BIT(63)
-
- u64 statsreqtimeout;
-#define STATREQTO_VAL(n) TBD
-#define STATREQTO_EN s2BIT(63)
-
- u64 read_retry_delay;
- u64 read_retry_acceleration;
- u64 write_retry_delay;
- u64 write_retry_acceleration;
-
- u64 xmsi_control;
- u64 xmsi_access;
- u64 xmsi_address;
- u64 xmsi_data;
-
- u64 rx_mat;
-#define RX_MAT_SET(ring, msi) vBIT(msi, (8 * ring), 8)
-
- u8 unused6[0x8];
-
- u64 tx_mat0_n[0x8];
-#define TX_MAT_SET(fifo, msi) vBIT(msi, (8 * fifo), 8)
-
- u64 xmsi_mask_reg;
- u64 stat_byte_cnt;
-#define STAT_BC(n) vBIT(n,4,12)
-
- /* Automated statistics collection */
- u64 stat_cfg;
-#define STAT_CFG_STAT_EN s2BIT(0)
-#define STAT_CFG_ONE_SHOT_EN s2BIT(1)
-#define STAT_CFG_STAT_NS_EN s2BIT(8)
-#define STAT_CFG_STAT_RO s2BIT(9)
-#define STAT_TRSF_PER(n) TBD
-#define PER_SEC 0x208d5
-#define SET_UPDT_PERIOD(n) vBIT((PER_SEC*n),32,32)
-#define SET_UPDT_CLICKS(val) vBIT(val, 32, 32)
-
- u64 stat_addr;
-
- /* General Configuration */
- u64 mdio_control;
-#define MDIO_MMD_INDX_ADDR(val) vBIT(val, 0, 16)
-#define MDIO_MMD_DEV_ADDR(val) vBIT(val, 19, 5)
-#define MDIO_MMS_PRT_ADDR(val) vBIT(val, 27, 5)
-#define MDIO_CTRL_START_TRANS(val) vBIT(val, 56, 4)
-#define MDIO_OP(val) vBIT(val, 60, 2)
-#define MDIO_OP_ADDR_TRANS 0x0
-#define MDIO_OP_WRITE_TRANS 0x1
-#define MDIO_OP_READ_POST_INC_TRANS 0x2
-#define MDIO_OP_READ_TRANS 0x3
-#define MDIO_MDIO_DATA(val) vBIT(val, 32, 16)
-
- u64 dtx_control;
-
- u64 i2c_control;
-#define I2C_CONTROL_DEV_ID(id) vBIT(id,1,3)
-#define I2C_CONTROL_ADDR(addr) vBIT(addr,5,11)
-#define I2C_CONTROL_BYTE_CNT(cnt) vBIT(cnt,22,2)
-#define I2C_CONTROL_READ s2BIT(24)
-#define I2C_CONTROL_NACK s2BIT(25)
-#define I2C_CONTROL_CNTL_START vBIT(0xE,28,4)
-#define I2C_CONTROL_CNTL_END(val) (val & vBIT(0x1,28,4))
-#define I2C_CONTROL_GET_DATA(val) (u32)(val & 0xFFFFFFFF)
-#define I2C_CONTROL_SET_DATA(val) vBIT(val,32,32)
-
- u64 gpio_control;
-#define GPIO_CTRL_GPIO_0 s2BIT(8)
- u64 misc_control;
-#define FAULT_BEHAVIOUR s2BIT(0)
-#define EXT_REQ_EN s2BIT(1)
-#define MISC_LINK_STABILITY_PRD(val) vBIT(val,29,3)
-
- u8 unused7_1[0x230 - 0x208];
-
- u64 pic_control2;
- u64 ini_dperr_ctrl;
-
- u64 wreq_split_mask;
-#define WREQ_SPLIT_MASK_SET_MASK(val) vBIT(val, 52, 12)
-
- u8 unused7_2[0x800 - 0x248];
-
-/* TxDMA registers */
- u64 txdma_int_status;
- u64 txdma_int_mask;
-#define TXDMA_PFC_INT s2BIT(0)
-#define TXDMA_TDA_INT s2BIT(1)
-#define TXDMA_PCC_INT s2BIT(2)
-#define TXDMA_TTI_INT s2BIT(3)
-#define TXDMA_LSO_INT s2BIT(4)
-#define TXDMA_TPA_INT s2BIT(5)
-#define TXDMA_SM_INT s2BIT(6)
- u64 pfc_err_reg;
-#define PFC_ECC_SG_ERR s2BIT(7)
-#define PFC_ECC_DB_ERR s2BIT(15)
-#define PFC_SM_ERR_ALARM s2BIT(23)
-#define PFC_MISC_0_ERR s2BIT(31)
-#define PFC_MISC_1_ERR s2BIT(32)
-#define PFC_PCIX_ERR s2BIT(39)
- u64 pfc_err_mask;
- u64 pfc_err_alarm;
-
- u64 tda_err_reg;
-#define TDA_Fn_ECC_SG_ERR vBIT(0xff,0,8)
-#define TDA_Fn_ECC_DB_ERR vBIT(0xff,8,8)
-#define TDA_SM0_ERR_ALARM s2BIT(22)
-#define TDA_SM1_ERR_ALARM s2BIT(23)
-#define TDA_PCIX_ERR s2BIT(39)
- u64 tda_err_mask;
- u64 tda_err_alarm;
-
- u64 pcc_err_reg;
-#define PCC_FB_ECC_SG_ERR vBIT(0xFF,0,8)
-#define PCC_TXB_ECC_SG_ERR vBIT(0xFF,8,8)
-#define PCC_FB_ECC_DB_ERR vBIT(0xFF,16, 8)
-#define PCC_TXB_ECC_DB_ERR vBIT(0xff,24,8)
-#define PCC_SM_ERR_ALARM vBIT(0xff,32,8)
-#define PCC_WR_ERR_ALARM vBIT(0xff,40,8)
-#define PCC_N_SERR vBIT(0xff,48,8)
-#define PCC_6_COF_OV_ERR s2BIT(56)
-#define PCC_7_COF_OV_ERR s2BIT(57)
-#define PCC_6_LSO_OV_ERR s2BIT(58)
-#define PCC_7_LSO_OV_ERR s2BIT(59)
-#define PCC_ENABLE_FOUR vBIT(0x0F,0,8)
- u64 pcc_err_mask;
- u64 pcc_err_alarm;
-
- u64 tti_err_reg;
-#define TTI_ECC_SG_ERR s2BIT(7)
-#define TTI_ECC_DB_ERR s2BIT(15)
-#define TTI_SM_ERR_ALARM s2BIT(23)
- u64 tti_err_mask;
- u64 tti_err_alarm;
-
- u64 lso_err_reg;
-#define LSO6_SEND_OFLOW s2BIT(12)
-#define LSO7_SEND_OFLOW s2BIT(13)
-#define LSO6_ABORT s2BIT(14)
-#define LSO7_ABORT s2BIT(15)
-#define LSO6_SM_ERR_ALARM s2BIT(22)
-#define LSO7_SM_ERR_ALARM s2BIT(23)
- u64 lso_err_mask;
- u64 lso_err_alarm;
-
- u64 tpa_err_reg;
-#define TPA_TX_FRM_DROP s2BIT(7)
-#define TPA_SM_ERR_ALARM s2BIT(23)
-
- u64 tpa_err_mask;
- u64 tpa_err_alarm;
-
- u64 sm_err_reg;
-#define SM_SM_ERR_ALARM s2BIT(15)
- u64 sm_err_mask;
- u64 sm_err_alarm;
-
- u8 unused8[0x100 - 0xB8];
-
-/* TxDMA arbiter */
- u64 tx_dma_wrap_stat;
-
-/* Tx FIFO controller */
-#define X_MAX_FIFOS 8
-#define X_FIFO_MAX_LEN 0x1FFF /*8191 */
- u64 tx_fifo_partition_0;
-#define TX_FIFO_PARTITION_EN s2BIT(0)
-#define TX_FIFO_PARTITION_0_PRI(val) vBIT(val,5,3)
-#define TX_FIFO_PARTITION_0_LEN(val) vBIT(val,19,13)
-#define TX_FIFO_PARTITION_1_PRI(val) vBIT(val,37,3)
-#define TX_FIFO_PARTITION_1_LEN(val) vBIT(val,51,13 )
-
- u64 tx_fifo_partition_1;
-#define TX_FIFO_PARTITION_2_PRI(val) vBIT(val,5,3)
-#define TX_FIFO_PARTITION_2_LEN(val) vBIT(val,19,13)
-#define TX_FIFO_PARTITION_3_PRI(val) vBIT(val,37,3)
-#define TX_FIFO_PARTITION_3_LEN(val) vBIT(val,51,13)
-
- u64 tx_fifo_partition_2;
-#define TX_FIFO_PARTITION_4_PRI(val) vBIT(val,5,3)
-#define TX_FIFO_PARTITION_4_LEN(val) vBIT(val,19,13)
-#define TX_FIFO_PARTITION_5_PRI(val) vBIT(val,37,3)
-#define TX_FIFO_PARTITION_5_LEN(val) vBIT(val,51,13)
-
- u64 tx_fifo_partition_3;
-#define TX_FIFO_PARTITION_6_PRI(val) vBIT(val,5,3)
-#define TX_FIFO_PARTITION_6_LEN(val) vBIT(val,19,13)
-#define TX_FIFO_PARTITION_7_PRI(val) vBIT(val,37,3)
-#define TX_FIFO_PARTITION_7_LEN(val) vBIT(val,51,13)
-
-#define TX_FIFO_PARTITION_PRI_0 0 /* highest */
-#define TX_FIFO_PARTITION_PRI_1 1
-#define TX_FIFO_PARTITION_PRI_2 2
-#define TX_FIFO_PARTITION_PRI_3 3
-#define TX_FIFO_PARTITION_PRI_4 4
-#define TX_FIFO_PARTITION_PRI_5 5
-#define TX_FIFO_PARTITION_PRI_6 6
-#define TX_FIFO_PARTITION_PRI_7 7 /* lowest */
-
- u64 tx_w_round_robin_0;
- u64 tx_w_round_robin_1;
- u64 tx_w_round_robin_2;
- u64 tx_w_round_robin_3;
- u64 tx_w_round_robin_4;
-
- u64 tti_command_mem;
-#define TTI_CMD_MEM_WE s2BIT(7)
-#define TTI_CMD_MEM_STROBE_NEW_CMD s2BIT(15)
-#define TTI_CMD_MEM_STROBE_BEING_EXECUTED s2BIT(15)
-#define TTI_CMD_MEM_OFFSET(n) vBIT(n,26,6)
-
- u64 tti_data1_mem;
-#define TTI_DATA1_MEM_TX_TIMER_VAL(n) vBIT(n,6,26)
-#define TTI_DATA1_MEM_TX_TIMER_AC_CI(n) vBIT(n,38,2)
-#define TTI_DATA1_MEM_TX_TIMER_AC_EN s2BIT(38)
-#define TTI_DATA1_MEM_TX_TIMER_CI_EN s2BIT(39)
-#define TTI_DATA1_MEM_TX_URNG_A(n) vBIT(n,41,7)
-#define TTI_DATA1_MEM_TX_URNG_B(n) vBIT(n,49,7)
-#define TTI_DATA1_MEM_TX_URNG_C(n) vBIT(n,57,7)
-
- u64 tti_data2_mem;
-#define TTI_DATA2_MEM_TX_UFC_A(n) vBIT(n,0,16)
-#define TTI_DATA2_MEM_TX_UFC_B(n) vBIT(n,16,16)
-#define TTI_DATA2_MEM_TX_UFC_C(n) vBIT(n,32,16)
-#define TTI_DATA2_MEM_TX_UFC_D(n) vBIT(n,48,16)
-
-/* Tx Protocol assist */
- u64 tx_pa_cfg;
-#define TX_PA_CFG_IGNORE_FRM_ERR s2BIT(1)
-#define TX_PA_CFG_IGNORE_SNAP_OUI s2BIT(2)
-#define TX_PA_CFG_IGNORE_LLC_CTRL s2BIT(3)
-#define TX_PA_CFG_IGNORE_L2_ERR s2BIT(6)
-#define RX_PA_CFG_STRIP_VLAN_TAG s2BIT(15)
-
-/* Recent add, used only debug purposes. */
- u64 pcc_enable;
-
- u8 unused9[0x700 - 0x178];
-
- u64 txdma_debug_ctrl;
-
- u8 unused10[0x1800 - 0x1708];
-
-/* RxDMA Registers */
- u64 rxdma_int_status;
- u64 rxdma_int_mask;
-#define RXDMA_INT_RC_INT_M s2BIT(0)
-#define RXDMA_INT_RPA_INT_M s2BIT(1)
-#define RXDMA_INT_RDA_INT_M s2BIT(2)
-#define RXDMA_INT_RTI_INT_M s2BIT(3)
-
- u64 rda_err_reg;
-#define RDA_RXDn_ECC_SG_ERR vBIT(0xFF,0,8)
-#define RDA_RXDn_ECC_DB_ERR vBIT(0xFF,8,8)
-#define RDA_FRM_ECC_SG_ERR s2BIT(23)
-#define RDA_FRM_ECC_DB_N_AERR s2BIT(31)
-#define RDA_SM1_ERR_ALARM s2BIT(38)
-#define RDA_SM0_ERR_ALARM s2BIT(39)
-#define RDA_MISC_ERR s2BIT(47)
-#define RDA_PCIX_ERR s2BIT(55)
-#define RDA_RXD_ECC_DB_SERR s2BIT(63)
- u64 rda_err_mask;
- u64 rda_err_alarm;
-
- u64 rc_err_reg;
-#define RC_PRCn_ECC_SG_ERR vBIT(0xFF,0,8)
-#define RC_PRCn_ECC_DB_ERR vBIT(0xFF,8,8)
-#define RC_FTC_ECC_SG_ERR s2BIT(23)
-#define RC_FTC_ECC_DB_ERR s2BIT(31)
-#define RC_PRCn_SM_ERR_ALARM vBIT(0xFF,32,8)
-#define RC_FTC_SM_ERR_ALARM s2BIT(47)
-#define RC_RDA_FAIL_WR_Rn vBIT(0xFF,48,8)
- u64 rc_err_mask;
- u64 rc_err_alarm;
-
- u64 prc_pcix_err_reg;
-#define PRC_PCI_AB_RD_Rn vBIT(0xFF,0,8)
-#define PRC_PCI_DP_RD_Rn vBIT(0xFF,8,8)
-#define PRC_PCI_AB_WR_Rn vBIT(0xFF,16,8)
-#define PRC_PCI_DP_WR_Rn vBIT(0xFF,24,8)
-#define PRC_PCI_AB_F_WR_Rn vBIT(0xFF,32,8)
-#define PRC_PCI_DP_F_WR_Rn vBIT(0xFF,40,8)
- u64 prc_pcix_err_mask;
- u64 prc_pcix_err_alarm;
-
- u64 rpa_err_reg;
-#define RPA_ECC_SG_ERR s2BIT(7)
-#define RPA_ECC_DB_ERR s2BIT(15)
-#define RPA_FLUSH_REQUEST s2BIT(22)
-#define RPA_SM_ERR_ALARM s2BIT(23)
-#define RPA_CREDIT_ERR s2BIT(31)
- u64 rpa_err_mask;
- u64 rpa_err_alarm;
-
- u64 rti_err_reg;
-#define RTI_ECC_SG_ERR s2BIT(7)
-#define RTI_ECC_DB_ERR s2BIT(15)
-#define RTI_SM_ERR_ALARM s2BIT(23)
- u64 rti_err_mask;
- u64 rti_err_alarm;
-
- u8 unused11[0x100 - 0x88];
-
-/* DMA arbiter */
- u64 rx_queue_priority;
-#define RX_QUEUE_0_PRIORITY(val) vBIT(val,5,3)
-#define RX_QUEUE_1_PRIORITY(val) vBIT(val,13,3)
-#define RX_QUEUE_2_PRIORITY(val) vBIT(val,21,3)
-#define RX_QUEUE_3_PRIORITY(val) vBIT(val,29,3)
-#define RX_QUEUE_4_PRIORITY(val) vBIT(val,37,3)
-#define RX_QUEUE_5_PRIORITY(val) vBIT(val,45,3)
-#define RX_QUEUE_6_PRIORITY(val) vBIT(val,53,3)
-#define RX_QUEUE_7_PRIORITY(val) vBIT(val,61,3)
-
-#define RX_QUEUE_PRI_0 0 /* highest */
-#define RX_QUEUE_PRI_1 1
-#define RX_QUEUE_PRI_2 2
-#define RX_QUEUE_PRI_3 3
-#define RX_QUEUE_PRI_4 4
-#define RX_QUEUE_PRI_5 5
-#define RX_QUEUE_PRI_6 6
-#define RX_QUEUE_PRI_7 7 /* lowest */
-
- u64 rx_w_round_robin_0;
- u64 rx_w_round_robin_1;
- u64 rx_w_round_robin_2;
- u64 rx_w_round_robin_3;
- u64 rx_w_round_robin_4;
-
- /* Per-ring controller regs */
-#define RX_MAX_RINGS 8
-#if 0
-#define RX_MAX_RINGS_SZ 0xFFFF /* 65536 */
-#define RX_MIN_RINGS_SZ 0x3F /* 63 */
-#endif
- u64 prc_rxd0_n[RX_MAX_RINGS];
- u64 prc_ctrl_n[RX_MAX_RINGS];
-#define PRC_CTRL_RC_ENABLED s2BIT(7)
-#define PRC_CTRL_RING_MODE (s2BIT(14)|s2BIT(15))
-#define PRC_CTRL_RING_MODE_1 vBIT(0,14,2)
-#define PRC_CTRL_RING_MODE_3 vBIT(1,14,2)
-#define PRC_CTRL_RING_MODE_5 vBIT(2,14,2)
-#define PRC_CTRL_RING_MODE_x vBIT(3,14,2)
-#define PRC_CTRL_NO_SNOOP (s2BIT(22)|s2BIT(23))
-#define PRC_CTRL_NO_SNOOP_DESC s2BIT(22)
-#define PRC_CTRL_NO_SNOOP_BUFF s2BIT(23)
-#define PRC_CTRL_BIMODAL_INTERRUPT s2BIT(37)
-#define PRC_CTRL_GROUP_READS s2BIT(38)
-#define PRC_CTRL_RXD_BACKOFF_INTERVAL(val) vBIT(val,40,24)
-
- u64 prc_alarm_action;
-#define PRC_ALARM_ACTION_RR_R0_STOP s2BIT(3)
-#define PRC_ALARM_ACTION_RW_R0_STOP s2BIT(7)
-#define PRC_ALARM_ACTION_RR_R1_STOP s2BIT(11)
-#define PRC_ALARM_ACTION_RW_R1_STOP s2BIT(15)
-#define PRC_ALARM_ACTION_RR_R2_STOP s2BIT(19)
-#define PRC_ALARM_ACTION_RW_R2_STOP s2BIT(23)
-#define PRC_ALARM_ACTION_RR_R3_STOP s2BIT(27)
-#define PRC_ALARM_ACTION_RW_R3_STOP s2BIT(31)
-#define PRC_ALARM_ACTION_RR_R4_STOP s2BIT(35)
-#define PRC_ALARM_ACTION_RW_R4_STOP s2BIT(39)
-#define PRC_ALARM_ACTION_RR_R5_STOP s2BIT(43)
-#define PRC_ALARM_ACTION_RW_R5_STOP s2BIT(47)
-#define PRC_ALARM_ACTION_RR_R6_STOP s2BIT(51)
-#define PRC_ALARM_ACTION_RW_R6_STOP s2BIT(55)
-#define PRC_ALARM_ACTION_RR_R7_STOP s2BIT(59)
-#define PRC_ALARM_ACTION_RW_R7_STOP s2BIT(63)
-
-/* Receive traffic interrupts */
- u64 rti_command_mem;
-#define RTI_CMD_MEM_WE s2BIT(7)
-#define RTI_CMD_MEM_STROBE s2BIT(15)
-#define RTI_CMD_MEM_STROBE_NEW_CMD s2BIT(15)
-#define RTI_CMD_MEM_STROBE_CMD_BEING_EXECUTED s2BIT(15)
-#define RTI_CMD_MEM_OFFSET(n) vBIT(n,29,3)
-
- u64 rti_data1_mem;
-#define RTI_DATA1_MEM_RX_TIMER_VAL(n) vBIT(n,3,29)
-#define RTI_DATA1_MEM_RX_TIMER_AC_EN s2BIT(38)
-#define RTI_DATA1_MEM_RX_TIMER_CI_EN s2BIT(39)
-#define RTI_DATA1_MEM_RX_URNG_A(n) vBIT(n,41,7)
-#define RTI_DATA1_MEM_RX_URNG_B(n) vBIT(n,49,7)
-#define RTI_DATA1_MEM_RX_URNG_C(n) vBIT(n,57,7)
-
- u64 rti_data2_mem;
-#define RTI_DATA2_MEM_RX_UFC_A(n) vBIT(n,0,16)
-#define RTI_DATA2_MEM_RX_UFC_B(n) vBIT(n,16,16)
-#define RTI_DATA2_MEM_RX_UFC_C(n) vBIT(n,32,16)
-#define RTI_DATA2_MEM_RX_UFC_D(n) vBIT(n,48,16)
-
- u64 rx_pa_cfg;
-#define RX_PA_CFG_IGNORE_FRM_ERR s2BIT(1)
-#define RX_PA_CFG_IGNORE_SNAP_OUI s2BIT(2)
-#define RX_PA_CFG_IGNORE_LLC_CTRL s2BIT(3)
-#define RX_PA_CFG_IGNORE_L2_ERR s2BIT(6)
-
- u64 unused_11_1;
-
- u64 ring_bump_counter1;
- u64 ring_bump_counter2;
-
- u8 unused12[0x700 - 0x1F0];
-
- u64 rxdma_debug_ctrl;
-
- u8 unused13[0x2000 - 0x1f08];
-
-/* Media Access Controller Register */
- u64 mac_int_status;
- u64 mac_int_mask;
-#define MAC_INT_STATUS_TMAC_INT s2BIT(0)
-#define MAC_INT_STATUS_RMAC_INT s2BIT(1)
-
- u64 mac_tmac_err_reg;
-#define TMAC_ECC_SG_ERR s2BIT(7)
-#define TMAC_ECC_DB_ERR s2BIT(15)
-#define TMAC_TX_BUF_OVRN s2BIT(23)
-#define TMAC_TX_CRI_ERR s2BIT(31)
-#define TMAC_TX_SM_ERR s2BIT(39)
-#define TMAC_DESC_ECC_SG_ERR s2BIT(47)
-#define TMAC_DESC_ECC_DB_ERR s2BIT(55)
-
- u64 mac_tmac_err_mask;
- u64 mac_tmac_err_alarm;
-
- u64 mac_rmac_err_reg;
-#define RMAC_RX_BUFF_OVRN s2BIT(0)
-#define RMAC_FRM_RCVD_INT s2BIT(1)
-#define RMAC_UNUSED_INT s2BIT(2)
-#define RMAC_RTS_PNUM_ECC_SG_ERR s2BIT(5)
-#define RMAC_RTS_DS_ECC_SG_ERR s2BIT(6)
-#define RMAC_RD_BUF_ECC_SG_ERR s2BIT(7)
-#define RMAC_RTH_MAP_ECC_SG_ERR s2BIT(8)
-#define RMAC_RTH_SPDM_ECC_SG_ERR s2BIT(9)
-#define RMAC_RTS_VID_ECC_SG_ERR s2BIT(10)
-#define RMAC_DA_SHADOW_ECC_SG_ERR s2BIT(11)
-#define RMAC_RTS_PNUM_ECC_DB_ERR s2BIT(13)
-#define RMAC_RTS_DS_ECC_DB_ERR s2BIT(14)
-#define RMAC_RD_BUF_ECC_DB_ERR s2BIT(15)
-#define RMAC_RTH_MAP_ECC_DB_ERR s2BIT(16)
-#define RMAC_RTH_SPDM_ECC_DB_ERR s2BIT(17)
-#define RMAC_RTS_VID_ECC_DB_ERR s2BIT(18)
-#define RMAC_DA_SHADOW_ECC_DB_ERR s2BIT(19)
-#define RMAC_LINK_STATE_CHANGE_INT s2BIT(31)
-#define RMAC_RX_SM_ERR s2BIT(39)
-#define RMAC_SINGLE_ECC_ERR (s2BIT(5) | s2BIT(6) | s2BIT(7) |\
- s2BIT(8) | s2BIT(9) | s2BIT(10)|\
- s2BIT(11))
-#define RMAC_DOUBLE_ECC_ERR (s2BIT(13) | s2BIT(14) | s2BIT(15) |\
- s2BIT(16) | s2BIT(17) | s2BIT(18)|\
- s2BIT(19))
- u64 mac_rmac_err_mask;
- u64 mac_rmac_err_alarm;
-
- u8 unused14[0x100 - 0x40];
-
- u64 mac_cfg;
-#define MAC_CFG_TMAC_ENABLE s2BIT(0)
-#define MAC_CFG_RMAC_ENABLE s2BIT(1)
-#define MAC_CFG_LAN_NOT_WAN s2BIT(2)
-#define MAC_CFG_TMAC_LOOPBACK s2BIT(3)
-#define MAC_CFG_TMAC_APPEND_PAD s2BIT(4)
-#define MAC_CFG_RMAC_STRIP_FCS s2BIT(5)
-#define MAC_CFG_RMAC_STRIP_PAD s2BIT(6)
-#define MAC_CFG_RMAC_PROM_ENABLE s2BIT(7)
-#define MAC_RMAC_DISCARD_PFRM s2BIT(8)
-#define MAC_RMAC_BCAST_ENABLE s2BIT(9)
-#define MAC_RMAC_ALL_ADDR_ENABLE s2BIT(10)
-#define MAC_RMAC_INVLD_IPG_THR(val) vBIT(val,16,8)
-
- u64 tmac_avg_ipg;
-#define TMAC_AVG_IPG(val) vBIT(val,0,8)
-
- u64 rmac_max_pyld_len;
-#define RMAC_MAX_PYLD_LEN(val) vBIT(val,2,14)
-#define RMAC_MAX_PYLD_LEN_DEF vBIT(1500,2,14)
-#define RMAC_MAX_PYLD_LEN_JUMBO_DEF vBIT(9600,2,14)
-
- u64 rmac_err_cfg;
-#define RMAC_ERR_FCS s2BIT(0)
-#define RMAC_ERR_FCS_ACCEPT s2BIT(1)
-#define RMAC_ERR_TOO_LONG s2BIT(1)
-#define RMAC_ERR_TOO_LONG_ACCEPT s2BIT(1)
-#define RMAC_ERR_RUNT s2BIT(2)
-#define RMAC_ERR_RUNT_ACCEPT s2BIT(2)
-#define RMAC_ERR_LEN_MISMATCH s2BIT(3)
-#define RMAC_ERR_LEN_MISMATCH_ACCEPT s2BIT(3)
-
- u64 rmac_cfg_key;
-#define RMAC_CFG_KEY(val) vBIT(val,0,16)
-
-#define S2IO_MAC_ADDR_START_OFFSET 0
-
-#define S2IO_XENA_MAX_MC_ADDRESSES 64 /* multicast addresses */
-#define S2IO_HERC_MAX_MC_ADDRESSES 256
-
-#define S2IO_XENA_MAX_MAC_ADDRESSES 16
-#define S2IO_HERC_MAX_MAC_ADDRESSES 64
-
-#define S2IO_XENA_MC_ADDR_START_OFFSET 16
-#define S2IO_HERC_MC_ADDR_START_OFFSET 64
-
- u64 rmac_addr_cmd_mem;
-#define RMAC_ADDR_CMD_MEM_WE s2BIT(7)
-#define RMAC_ADDR_CMD_MEM_RD 0
-#define RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD s2BIT(15)
-#define RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING s2BIT(15)
-#define RMAC_ADDR_CMD_MEM_OFFSET(n) vBIT(n,26,6)
-
- u64 rmac_addr_data0_mem;
-#define RMAC_ADDR_DATA0_MEM_ADDR(n) vBIT(n,0,48)
-#define RMAC_ADDR_DATA0_MEM_USER s2BIT(48)
-
- u64 rmac_addr_data1_mem;
-#define RMAC_ADDR_DATA1_MEM_MASK(n) vBIT(n,0,48)
-
- u8 unused15[0x8];
-
-/*
- u64 rmac_addr_cfg;
-#define RMAC_ADDR_UCASTn_EN(n) mBIT(0)_n(n)
-#define RMAC_ADDR_MCASTn_EN(n) mBIT(0)_n(n)
-#define RMAC_ADDR_BCAST_EN vBIT(0)_48
-#define RMAC_ADDR_ALL_ADDR_EN vBIT(0)_49
-*/
- u64 tmac_ipg_cfg;
-
- u64 rmac_pause_cfg;
-#define RMAC_PAUSE_GEN s2BIT(0)
-#define RMAC_PAUSE_GEN_ENABLE s2BIT(0)
-#define RMAC_PAUSE_RX s2BIT(1)
-#define RMAC_PAUSE_RX_ENABLE s2BIT(1)
-#define RMAC_PAUSE_HG_PTIME_DEF vBIT(0xFFFF,16,16)
-#define RMAC_PAUSE_HG_PTIME(val) vBIT(val,16,16)
-
- u64 rmac_red_cfg;
-
- u64 rmac_red_rate_q0q3;
- u64 rmac_red_rate_q4q7;
-
- u64 mac_link_util;
-#define MAC_TX_LINK_UTIL vBIT(0xFE,1,7)
-#define MAC_TX_LINK_UTIL_DISABLE vBIT(0xF, 8,4)
-#define MAC_TX_LINK_UTIL_VAL( n ) vBIT(n,8,4)
-#define MAC_RX_LINK_UTIL vBIT(0xFE,33,7)
-#define MAC_RX_LINK_UTIL_DISABLE vBIT(0xF,40,4)
-#define MAC_RX_LINK_UTIL_VAL( n ) vBIT(n,40,4)
-
-#define MAC_LINK_UTIL_DISABLE MAC_TX_LINK_UTIL_DISABLE | \
- MAC_RX_LINK_UTIL_DISABLE
-
- u64 rmac_invalid_ipg;
-
-/* rx traffic steering */
-#define MAC_RTS_FRM_LEN_SET(len) vBIT(len,2,14)
- u64 rts_frm_len_n[8];
-
- u64 rts_qos_steering;
-
-#define MAX_DIX_MAP 4
- u64 rts_dix_map_n[MAX_DIX_MAP];
-#define RTS_DIX_MAP_ETYPE(val) vBIT(val,0,16)
-#define RTS_DIX_MAP_SCW(val) s2BIT(val,21)
-
- u64 rts_q_alternates;
- u64 rts_default_q;
-
- u64 rts_ctrl;
-#define RTS_CTRL_IGNORE_SNAP_OUI s2BIT(2)
-#define RTS_CTRL_IGNORE_LLC_CTRL s2BIT(3)
-
- u64 rts_pn_cam_ctrl;
-#define RTS_PN_CAM_CTRL_WE s2BIT(7)
-#define RTS_PN_CAM_CTRL_STROBE_NEW_CMD s2BIT(15)
-#define RTS_PN_CAM_CTRL_STROBE_BEING_EXECUTED s2BIT(15)
-#define RTS_PN_CAM_CTRL_OFFSET(n) vBIT(n,24,8)
- u64 rts_pn_cam_data;
-#define RTS_PN_CAM_DATA_TCP_SELECT s2BIT(7)
-#define RTS_PN_CAM_DATA_PORT(val) vBIT(val,8,16)
-#define RTS_PN_CAM_DATA_SCW(val) vBIT(val,24,8)
-
- u64 rts_ds_mem_ctrl;
-#define RTS_DS_MEM_CTRL_WE s2BIT(7)
-#define RTS_DS_MEM_CTRL_STROBE_NEW_CMD s2BIT(15)
-#define RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED s2BIT(15)
-#define RTS_DS_MEM_CTRL_OFFSET(n) vBIT(n,26,6)
- u64 rts_ds_mem_data;
-#define RTS_DS_MEM_DATA(n) vBIT(n,0,8)
-
- u8 unused16[0x700 - 0x220];
-
- u64 mac_debug_ctrl;
-#define MAC_DBG_ACTIVITY_VALUE 0x411040400000000ULL
-
- u8 unused17[0x2800 - 0x2708];
-
-/* memory controller registers */
- u64 mc_int_status;
-#define MC_INT_STATUS_MC_INT s2BIT(0)
- u64 mc_int_mask;
-#define MC_INT_MASK_MC_INT s2BIT(0)
-
- u64 mc_err_reg;
-#define MC_ERR_REG_ECC_DB_ERR_L s2BIT(14)
-#define MC_ERR_REG_ECC_DB_ERR_U s2BIT(15)
-#define MC_ERR_REG_MIRI_ECC_DB_ERR_0 s2BIT(18)
-#define MC_ERR_REG_MIRI_ECC_DB_ERR_1 s2BIT(20)
-#define MC_ERR_REG_MIRI_CRI_ERR_0 s2BIT(22)
-#define MC_ERR_REG_MIRI_CRI_ERR_1 s2BIT(23)
-#define MC_ERR_REG_SM_ERR s2BIT(31)
-#define MC_ERR_REG_ECC_ALL_SNG (s2BIT(2) | s2BIT(3) | s2BIT(4) | s2BIT(5) |\
- s2BIT(17) | s2BIT(19))
-#define MC_ERR_REG_ECC_ALL_DBL (s2BIT(10) | s2BIT(11) | s2BIT(12) |\
- s2BIT(13) | s2BIT(18) | s2BIT(20))
-#define PLL_LOCK_N s2BIT(39)
- u64 mc_err_mask;
- u64 mc_err_alarm;
-
- u8 unused18[0x100 - 0x28];
-
-/* MC configuration */
- u64 rx_queue_cfg;
-#define RX_QUEUE_CFG_Q0_SZ(n) vBIT(n,0,8)
-#define RX_QUEUE_CFG_Q1_SZ(n) vBIT(n,8,8)
-#define RX_QUEUE_CFG_Q2_SZ(n) vBIT(n,16,8)
-#define RX_QUEUE_CFG_Q3_SZ(n) vBIT(n,24,8)
-#define RX_QUEUE_CFG_Q4_SZ(n) vBIT(n,32,8)
-#define RX_QUEUE_CFG_Q5_SZ(n) vBIT(n,40,8)
-#define RX_QUEUE_CFG_Q6_SZ(n) vBIT(n,48,8)
-#define RX_QUEUE_CFG_Q7_SZ(n) vBIT(n,56,8)
-
- u64 mc_rldram_mrs;
-#define MC_RLDRAM_QUEUE_SIZE_ENABLE s2BIT(39)
-#define MC_RLDRAM_MRS_ENABLE s2BIT(47)
-
- u64 mc_rldram_interleave;
-
- u64 mc_pause_thresh_q0q3;
- u64 mc_pause_thresh_q4q7;
-
- u64 mc_red_thresh_q[8];
-
- u8 unused19[0x200 - 0x168];
- u64 mc_rldram_ref_per;
- u8 unused20[0x220 - 0x208];
- u64 mc_rldram_test_ctrl;
-#define MC_RLDRAM_TEST_MODE s2BIT(47)
-#define MC_RLDRAM_TEST_WRITE s2BIT(7)
-#define MC_RLDRAM_TEST_GO s2BIT(15)
-#define MC_RLDRAM_TEST_DONE s2BIT(23)
-#define MC_RLDRAM_TEST_PASS s2BIT(31)
-
- u8 unused21[0x240 - 0x228];
- u64 mc_rldram_test_add;
- u8 unused22[0x260 - 0x248];
- u64 mc_rldram_test_d0;
- u8 unused23[0x280 - 0x268];
- u64 mc_rldram_test_d1;
- u8 unused24[0x300 - 0x288];
- u64 mc_rldram_test_d2;
-
- u8 unused24_1[0x360 - 0x308];
- u64 mc_rldram_ctrl;
-#define MC_RLDRAM_ENABLE_ODT s2BIT(7)
-
- u8 unused24_2[0x640 - 0x368];
- u64 mc_rldram_ref_per_herc;
-#define MC_RLDRAM_SET_REF_PERIOD(val) vBIT(val, 0, 16)
-
- u8 unused24_3[0x660 - 0x648];
- u64 mc_rldram_mrs_herc;
-
- u8 unused25[0x700 - 0x668];
- u64 mc_debug_ctrl;
-
- u8 unused26[0x3000 - 0x2f08];
-
-/* XGXG */
- /* XGXS control registers */
-
- u64 xgxs_int_status;
-#define XGXS_INT_STATUS_TXGXS s2BIT(0)
-#define XGXS_INT_STATUS_RXGXS s2BIT(1)
- u64 xgxs_int_mask;
-#define XGXS_INT_MASK_TXGXS s2BIT(0)
-#define XGXS_INT_MASK_RXGXS s2BIT(1)
-
- u64 xgxs_txgxs_err_reg;
-#define TXGXS_ECC_SG_ERR s2BIT(7)
-#define TXGXS_ECC_DB_ERR s2BIT(15)
-#define TXGXS_ESTORE_UFLOW s2BIT(31)
-#define TXGXS_TX_SM_ERR s2BIT(39)
-
- u64 xgxs_txgxs_err_mask;
- u64 xgxs_txgxs_err_alarm;
-
- u64 xgxs_rxgxs_err_reg;
-#define RXGXS_ESTORE_OFLOW s2BIT(7)
-#define RXGXS_RX_SM_ERR s2BIT(39)
- u64 xgxs_rxgxs_err_mask;
- u64 xgxs_rxgxs_err_alarm;
-
- u8 unused27[0x100 - 0x40];
-
- u64 xgxs_cfg;
- u64 xgxs_status;
-
- u64 xgxs_cfg_key;
- u64 xgxs_efifo_cfg; /* CHANGED */
- u64 rxgxs_ber_0; /* CHANGED */
- u64 rxgxs_ber_1; /* CHANGED */
-
- u64 spi_control;
-#define SPI_CONTROL_KEY(key) vBIT(key,0,4)
-#define SPI_CONTROL_BYTECNT(cnt) vBIT(cnt,29,3)
-#define SPI_CONTROL_CMD(cmd) vBIT(cmd,32,8)
-#define SPI_CONTROL_ADDR(addr) vBIT(addr,40,24)
-#define SPI_CONTROL_SEL1 s2BIT(4)
-#define SPI_CONTROL_REQ s2BIT(7)
-#define SPI_CONTROL_NACK s2BIT(5)
-#define SPI_CONTROL_DONE s2BIT(6)
- u64 spi_data;
-#define SPI_DATA_WRITE(data,len) vBIT(data,0,len)
-};
-
-#define XENA_REG_SPACE sizeof(struct XENA_dev_config)
-#define XENA_EEPROM_SPACE (0x01 << 11)
-
-#endif /* _REGS_H */
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
deleted file mode 100644
index 1e55ccb4822b..000000000000
--- a/drivers/net/ethernet/neterion/s2io.c
+++ /dev/null
@@ -1,8572 +0,0 @@
-/************************************************************************
- * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
- * Copyright(c) 2002-2010 Exar Corp.
- *
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * Credits:
- * Jeff Garzik : For pointing out the improper error condition
- * check in the s2io_xmit routine and also some
- * issues in the Tx watch dog function. Also for
- * patiently answering all those innumerable
- * questions regaring the 2.6 porting issues.
- * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
- * macros available only in 2.6 Kernel.
- * Francois Romieu : For pointing out all code part that were
- * deprecated and also styling related comments.
- * Grant Grundler : For helping me get rid of some Architecture
- * dependent code.
- * Christopher Hellwig : Some more 2.6 specific issues in the driver.
- *
- * The module loadable parameters that are supported by the driver and a brief
- * explanation of all the variables.
- *
- * rx_ring_num : This can be used to program the number of receive rings used
- * in the driver.
- * rx_ring_sz: This defines the number of receive blocks each ring can have.
- * This is also an array of size 8.
- * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
- * values are 1, 2.
- * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
- * tx_fifo_len: This too is an array of 8. Each element defines the number of
- * Tx descriptors that can be associated with each corresponding FIFO.
- * intr_type: This defines the type of interrupt. The values can be 0(INTA),
- * 2(MSI_X). Default value is '2(MSI_X)'
- * lro_max_pkts: This parameter defines maximum number of packets can be
- * aggregated as a single large packet
- * napi: This parameter used to enable/disable NAPI (polling Rx)
- * Possible values '1' for enable and '0' for disable. Default is '1'
- * vlan_tag_strip: This can be used to enable or disable vlan stripping.
- * Possible values '1' for enable , '0' for disable.
- * Default is '2' - which means disable in promisc mode
- * and enable in non-promiscuous mode.
- * multiq: This parameter used to enable/disable MULTIQUEUE support.
- * Possible values '1' for enable and '0' for disable. Default is '0'
- ************************************************************************/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/mdio.h>
-#include <linux/skbuff.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/stddef.h>
-#include <linux/ioctl.h>
-#include <linux/timex.h>
-#include <linux/ethtool.h>
-#include <linux/workqueue.h>
-#include <linux/if_vlan.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/uaccess.h>
-#include <linux/io.h>
-#include <linux/io-64-nonatomic-lo-hi.h>
-#include <linux/slab.h>
-#include <linux/prefetch.h>
-#include <net/tcp.h>
-#include <net/checksum.h>
-
-#include <asm/div64.h>
-#include <asm/irq.h>
-
-/* local include */
-#include "s2io.h"
-#include "s2io-regs.h"
-
-#define DRV_VERSION "2.0.26.28"
-
-/* S2io Driver name & version. */
-static const char s2io_driver_name[] = "Neterion";
-static const char s2io_driver_version[] = DRV_VERSION;
-
-static const int rxd_size[2] = {32, 48};
-static const int rxd_count[2] = {127, 85};
-
-static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
-{
- int ret;
-
- ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
- (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
-
- return ret;
-}
-
-/*
- * Cards with following subsystem_id have a link state indication
- * problem, 600B, 600C, 600D, 640B, 640C and 640D.
- * macro below identifies these cards given the subsystem_id.
- */
-#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
- (dev_type == XFRAME_I_DEVICE) ? \
- ((((subid >= 0x600B) && (subid <= 0x600D)) || \
- ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
-
-#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
- ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
-
-static inline int is_s2io_card_up(const struct s2io_nic *sp)
-{
- return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
-}
-
-/* Ethtool related variables and Macros. */
-static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
- "Register test\t(offline)",
- "Eeprom test\t(offline)",
- "Link test\t(online)",
- "RLDRAM test\t(offline)",
- "BIST Test\t(offline)"
-};
-
-static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
- {"tmac_frms"},
- {"tmac_data_octets"},
- {"tmac_drop_frms"},
- {"tmac_mcst_frms"},
- {"tmac_bcst_frms"},
- {"tmac_pause_ctrl_frms"},
- {"tmac_ttl_octets"},
- {"tmac_ucst_frms"},
- {"tmac_nucst_frms"},
- {"tmac_any_err_frms"},
- {"tmac_ttl_less_fb_octets"},
- {"tmac_vld_ip_octets"},
- {"tmac_vld_ip"},
- {"tmac_drop_ip"},
- {"tmac_icmp"},
- {"tmac_rst_tcp"},
- {"tmac_tcp"},
- {"tmac_udp"},
- {"rmac_vld_frms"},
- {"rmac_data_octets"},
- {"rmac_fcs_err_frms"},
- {"rmac_drop_frms"},
- {"rmac_vld_mcst_frms"},
- {"rmac_vld_bcst_frms"},
- {"rmac_in_rng_len_err_frms"},
- {"rmac_out_rng_len_err_frms"},
- {"rmac_long_frms"},
- {"rmac_pause_ctrl_frms"},
- {"rmac_unsup_ctrl_frms"},
- {"rmac_ttl_octets"},
- {"rmac_accepted_ucst_frms"},
- {"rmac_accepted_nucst_frms"},
- {"rmac_discarded_frms"},
- {"rmac_drop_events"},
- {"rmac_ttl_less_fb_octets"},
- {"rmac_ttl_frms"},
- {"rmac_usized_frms"},
- {"rmac_osized_frms"},
- {"rmac_frag_frms"},
- {"rmac_jabber_frms"},
- {"rmac_ttl_64_frms"},
- {"rmac_ttl_65_127_frms"},
- {"rmac_ttl_128_255_frms"},
- {"rmac_ttl_256_511_frms"},
- {"rmac_ttl_512_1023_frms"},
- {"rmac_ttl_1024_1518_frms"},
- {"rmac_ip"},
- {"rmac_ip_octets"},
- {"rmac_hdr_err_ip"},
- {"rmac_drop_ip"},
- {"rmac_icmp"},
- {"rmac_tcp"},
- {"rmac_udp"},
- {"rmac_err_drp_udp"},
- {"rmac_xgmii_err_sym"},
- {"rmac_frms_q0"},
- {"rmac_frms_q1"},
- {"rmac_frms_q2"},
- {"rmac_frms_q3"},
- {"rmac_frms_q4"},
- {"rmac_frms_q5"},
- {"rmac_frms_q6"},
- {"rmac_frms_q7"},
- {"rmac_full_q0"},
- {"rmac_full_q1"},
- {"rmac_full_q2"},
- {"rmac_full_q3"},
- {"rmac_full_q4"},
- {"rmac_full_q5"},
- {"rmac_full_q6"},
- {"rmac_full_q7"},
- {"rmac_pause_cnt"},
- {"rmac_xgmii_data_err_cnt"},
- {"rmac_xgmii_ctrl_err_cnt"},
- {"rmac_accepted_ip"},
- {"rmac_err_tcp"},
- {"rd_req_cnt"},
- {"new_rd_req_cnt"},
- {"new_rd_req_rtry_cnt"},
- {"rd_rtry_cnt"},
- {"wr_rtry_rd_ack_cnt"},
- {"wr_req_cnt"},
- {"new_wr_req_cnt"},
- {"new_wr_req_rtry_cnt"},
- {"wr_rtry_cnt"},
- {"wr_disc_cnt"},
- {"rd_rtry_wr_ack_cnt"},
- {"txp_wr_cnt"},
- {"txd_rd_cnt"},
- {"txd_wr_cnt"},
- {"rxd_rd_cnt"},
- {"rxd_wr_cnt"},
- {"txf_rd_cnt"},
- {"rxf_wr_cnt"}
-};
-
-static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
- {"rmac_ttl_1519_4095_frms"},
- {"rmac_ttl_4096_8191_frms"},
- {"rmac_ttl_8192_max_frms"},
- {"rmac_ttl_gt_max_frms"},
- {"rmac_osized_alt_frms"},
- {"rmac_jabber_alt_frms"},
- {"rmac_gt_max_alt_frms"},
- {"rmac_vlan_frms"},
- {"rmac_len_discard"},
- {"rmac_fcs_discard"},
- {"rmac_pf_discard"},
- {"rmac_da_discard"},
- {"rmac_red_discard"},
- {"rmac_rts_discard"},
- {"rmac_ingm_full_discard"},
- {"link_fault_cnt"}
-};
-
-static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
- {"\n DRIVER STATISTICS"},
- {"single_bit_ecc_errs"},
- {"double_bit_ecc_errs"},
- {"parity_err_cnt"},
- {"serious_err_cnt"},
- {"soft_reset_cnt"},
- {"fifo_full_cnt"},
- {"ring_0_full_cnt"},
- {"ring_1_full_cnt"},
- {"ring_2_full_cnt"},
- {"ring_3_full_cnt"},
- {"ring_4_full_cnt"},
- {"ring_5_full_cnt"},
- {"ring_6_full_cnt"},
- {"ring_7_full_cnt"},
- {"alarm_transceiver_temp_high"},
- {"alarm_transceiver_temp_low"},
- {"alarm_laser_bias_current_high"},
- {"alarm_laser_bias_current_low"},
- {"alarm_laser_output_power_high"},
- {"alarm_laser_output_power_low"},
- {"warn_transceiver_temp_high"},
- {"warn_transceiver_temp_low"},
- {"warn_laser_bias_current_high"},
- {"warn_laser_bias_current_low"},
- {"warn_laser_output_power_high"},
- {"warn_laser_output_power_low"},
- {"lro_aggregated_pkts"},
- {"lro_flush_both_count"},
- {"lro_out_of_sequence_pkts"},
- {"lro_flush_due_to_max_pkts"},
- {"lro_avg_aggr_pkts"},
- {"mem_alloc_fail_cnt"},
- {"pci_map_fail_cnt"},
- {"watchdog_timer_cnt"},
- {"mem_allocated"},
- {"mem_freed"},
- {"link_up_cnt"},
- {"link_down_cnt"},
- {"link_up_time"},
- {"link_down_time"},
- {"tx_tcode_buf_abort_cnt"},
- {"tx_tcode_desc_abort_cnt"},
- {"tx_tcode_parity_err_cnt"},
- {"tx_tcode_link_loss_cnt"},
- {"tx_tcode_list_proc_err_cnt"},
- {"rx_tcode_parity_err_cnt"},
- {"rx_tcode_abort_cnt"},
- {"rx_tcode_parity_abort_cnt"},
- {"rx_tcode_rda_fail_cnt"},
- {"rx_tcode_unkn_prot_cnt"},
- {"rx_tcode_fcs_err_cnt"},
- {"rx_tcode_buf_size_err_cnt"},
- {"rx_tcode_rxd_corrupt_cnt"},
- {"rx_tcode_unkn_err_cnt"},
- {"tda_err_cnt"},
- {"pfc_err_cnt"},
- {"pcc_err_cnt"},
- {"tti_err_cnt"},
- {"tpa_err_cnt"},
- {"sm_err_cnt"},
- {"lso_err_cnt"},
- {"mac_tmac_err_cnt"},
- {"mac_rmac_err_cnt"},
- {"xgxs_txgxs_err_cnt"},
- {"xgxs_rxgxs_err_cnt"},
- {"rc_err_cnt"},
- {"prc_pcix_err_cnt"},
- {"rpa_err_cnt"},
- {"rda_err_cnt"},
- {"rti_err_cnt"},
- {"mc_err_cnt"}
-};
-
-#define S2IO_XENA_STAT_LEN ARRAY_SIZE(ethtool_xena_stats_keys)
-#define S2IO_ENHANCED_STAT_LEN ARRAY_SIZE(ethtool_enhanced_stats_keys)
-#define S2IO_DRIVER_STAT_LEN ARRAY_SIZE(ethtool_driver_stats_keys)
-
-#define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
-#define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
-
-#define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
-#define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
-
-#define S2IO_TEST_LEN ARRAY_SIZE(s2io_gstrings)
-#define S2IO_STRINGS_LEN (S2IO_TEST_LEN * ETH_GSTRING_LEN)
-
-/* copy mac addr to def_mac_addr array */
-static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
-{
- sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
- sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
- sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
- sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
- sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
- sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
-}
-
-/*
- * Constants to be programmed into the Xena's registers, to configure
- * the XAUI.
- */
-
-#define END_SIGN 0x0
-static const u64 herc_act_dtx_cfg[] = {
- /* Set address */
- 0x8000051536750000ULL, 0x80000515367500E0ULL,
- /* Write data */
- 0x8000051536750004ULL, 0x80000515367500E4ULL,
- /* Set address */
- 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
- /* Write data */
- 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
- /* Set address */
- 0x801205150D440000ULL, 0x801205150D4400E0ULL,
- /* Write data */
- 0x801205150D440004ULL, 0x801205150D4400E4ULL,
- /* Set address */
- 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
- /* Write data */
- 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
- /* Done */
- END_SIGN
-};
-
-static const u64 xena_dtx_cfg[] = {
- /* Set address */
- 0x8000051500000000ULL, 0x80000515000000E0ULL,
- /* Write data */
- 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
- /* Set address */
- 0x8001051500000000ULL, 0x80010515000000E0ULL,
- /* Write data */
- 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
- /* Set address */
- 0x8002051500000000ULL, 0x80020515000000E0ULL,
- /* Write data */
- 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
- END_SIGN
-};
-
-/*
- * Constants for Fixing the MacAddress problem seen mostly on
- * Alpha machines.
- */
-static const u64 fix_mac[] = {
- 0x0060000000000000ULL, 0x0060600000000000ULL,
- 0x0040600000000000ULL, 0x0000600000000000ULL,
- 0x0020600000000000ULL, 0x0060600000000000ULL,
- 0x0020600000000000ULL, 0x0060600000000000ULL,
- 0x0020600000000000ULL, 0x0060600000000000ULL,
- 0x0020600000000000ULL, 0x0060600000000000ULL,
- 0x0020600000000000ULL, 0x0060600000000000ULL,
- 0x0020600000000000ULL, 0x0060600000000000ULL,
- 0x0020600000000000ULL, 0x0060600000000000ULL,
- 0x0020600000000000ULL, 0x0060600000000000ULL,
- 0x0020600000000000ULL, 0x0060600000000000ULL,
- 0x0020600000000000ULL, 0x0060600000000000ULL,
- 0x0020600000000000ULL, 0x0000600000000000ULL,
- 0x0040600000000000ULL, 0x0060600000000000ULL,
- END_SIGN
-};
-
-MODULE_DESCRIPTION("Neterion 10GbE driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
-
-
-/* Module Loadable parameters. */
-S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
-S2IO_PARM_INT(rx_ring_num, 1);
-S2IO_PARM_INT(multiq, 0);
-S2IO_PARM_INT(rx_ring_mode, 1);
-S2IO_PARM_INT(use_continuous_tx_intrs, 1);
-S2IO_PARM_INT(rmac_pause_time, 0x100);
-S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
-S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
-S2IO_PARM_INT(shared_splits, 0);
-S2IO_PARM_INT(tmac_util_period, 5);
-S2IO_PARM_INT(rmac_util_period, 5);
-S2IO_PARM_INT(l3l4hdr_size, 128);
-/* 0 is no steering, 1 is Priority steering, 2 is Default steering */
-S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
-/* Frequency of Rx desc syncs expressed as power of 2 */
-S2IO_PARM_INT(rxsync_frequency, 3);
-/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
-S2IO_PARM_INT(intr_type, 2);
-/* Large receive offload feature */
-
-/* Max pkts to be aggregated by LRO at one time. If not specified,
- * aggregation happens until we hit max IP pkt size(64K)
- */
-S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
-S2IO_PARM_INT(indicate_max_pkts, 0);
-
-S2IO_PARM_INT(napi, 1);
-S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
-
-static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
-{DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
-static unsigned int rx_ring_sz[MAX_RX_RINGS] =
-{[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
-static unsigned int rts_frm_len[MAX_RX_RINGS] =
-{[0 ...(MAX_RX_RINGS - 1)] = 0 };
-
-module_param_array(tx_fifo_len, uint, NULL, 0);
-module_param_array(rx_ring_sz, uint, NULL, 0);
-module_param_array(rts_frm_len, uint, NULL, 0);
-
-/*
- * S2IO device table.
- * This table lists all the devices that this driver supports.
- */
-static const struct pci_device_id s2io_tbl[] = {
- {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
- PCI_ANY_ID, PCI_ANY_ID},
- {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
- PCI_ANY_ID, PCI_ANY_ID},
- {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
- PCI_ANY_ID, PCI_ANY_ID},
- {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
- PCI_ANY_ID, PCI_ANY_ID},
- {0,}
-};
-
-MODULE_DEVICE_TABLE(pci, s2io_tbl);
-
-static const struct pci_error_handlers s2io_err_handler = {
- .error_detected = s2io_io_error_detected,
- .slot_reset = s2io_io_slot_reset,
- .resume = s2io_io_resume,
-};
-
-static struct pci_driver s2io_driver = {
- .name = "S2IO",
- .id_table = s2io_tbl,
- .probe = s2io_init_nic,
- .remove = s2io_rem_nic,
- .err_handler = &s2io_err_handler,
-};
-
-/* A simplifier macro used both by init and free shared_mem Fns(). */
-#define TXD_MEM_PAGE_CNT(len, per_each) DIV_ROUND_UP(len, per_each)
-
-/* netqueue manipulation helper functions */
-static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
-{
- if (!sp->config.multiq) {
- int i;
-
- for (i = 0; i < sp->config.tx_fifo_num; i++)
- sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
- }
- netif_tx_stop_all_queues(sp->dev);
-}
-
-static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
-{
- if (!sp->config.multiq)
- sp->mac_control.fifos[fifo_no].queue_state =
- FIFO_QUEUE_STOP;
-
- netif_tx_stop_all_queues(sp->dev);
-}
-
-static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
-{
- if (!sp->config.multiq) {
- int i;
-
- for (i = 0; i < sp->config.tx_fifo_num; i++)
- sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
- }
- netif_tx_start_all_queues(sp->dev);
-}
-
-static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
-{
- if (!sp->config.multiq) {
- int i;
-
- for (i = 0; i < sp->config.tx_fifo_num; i++)
- sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
- }
- netif_tx_wake_all_queues(sp->dev);
-}
-
-static inline void s2io_wake_tx_queue(
- struct fifo_info *fifo, int cnt, u8 multiq)
-{
-
- if (multiq) {
- if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
- netif_wake_subqueue(fifo->dev, fifo->fifo_no);
- } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
- if (netif_queue_stopped(fifo->dev)) {
- fifo->queue_state = FIFO_QUEUE_START;
- netif_wake_queue(fifo->dev);
- }
- }
-}
-
-/**
- * init_shared_mem - Allocation and Initialization of Memory
- * @nic: Device private variable.
- * Description: The function allocates all the memory areas shared
- * between the NIC and the driver. This includes Tx descriptors,
- * Rx descriptors and the statistics block.
- */
-
-static int init_shared_mem(struct s2io_nic *nic)
-{
- u32 size;
- void *tmp_v_addr, *tmp_v_addr_next;
- dma_addr_t tmp_p_addr, tmp_p_addr_next;
- struct RxD_block *pre_rxd_blk = NULL;
- int i, j, blk_cnt;
- int lst_size, lst_per_page;
- struct net_device *dev = nic->dev;
- unsigned long tmp;
- struct buffAdd *ba;
- struct config_param *config = &nic->config;
- struct mac_info *mac_control = &nic->mac_control;
- unsigned long long mem_allocated = 0;
-
- /* Allocation and initialization of TXDLs in FIFOs */
- size = 0;
- for (i = 0; i < config->tx_fifo_num; i++) {
- struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
-
- size += tx_cfg->fifo_len;
- }
- if (size > MAX_AVAILABLE_TXDS) {
- DBG_PRINT(ERR_DBG,
- "Too many TxDs requested: %d, max supported: %d\n",
- size, MAX_AVAILABLE_TXDS);
- return -EINVAL;
- }
-
- size = 0;
- for (i = 0; i < config->tx_fifo_num; i++) {
- struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
-
- size = tx_cfg->fifo_len;
- /*
- * Legal values are from 2 to 8192
- */
- if (size < 2) {
- DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - "
- "Valid lengths are 2 through 8192\n",
- i, size);
- return -EINVAL;
- }
- }
-
- lst_size = (sizeof(struct TxD) * config->max_txds);
- lst_per_page = PAGE_SIZE / lst_size;
-
- for (i = 0; i < config->tx_fifo_num; i++) {
- struct fifo_info *fifo = &mac_control->fifos[i];
- struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
- int fifo_len = tx_cfg->fifo_len;
- int list_holder_size = fifo_len * sizeof(struct list_info_hold);
-
- fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
- if (!fifo->list_info) {
- DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
- return -ENOMEM;
- }
- mem_allocated += list_holder_size;
- }
- for (i = 0; i < config->tx_fifo_num; i++) {
- int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
- lst_per_page);
- struct fifo_info *fifo = &mac_control->fifos[i];
- struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
-
- fifo->tx_curr_put_info.offset = 0;
- fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
- fifo->tx_curr_get_info.offset = 0;
- fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
- fifo->fifo_no = i;
- fifo->nic = nic;
- fifo->max_txds = MAX_SKB_FRAGS + 2;
- fifo->dev = dev;
-
- for (j = 0; j < page_num; j++) {
- int k = 0;
- dma_addr_t tmp_p;
- void *tmp_v;
- tmp_v = dma_alloc_coherent(&nic->pdev->dev, PAGE_SIZE,
- &tmp_p, GFP_KERNEL);
- if (!tmp_v) {
- DBG_PRINT(INFO_DBG,
- "dma_alloc_coherent failed for TxDL\n");
- return -ENOMEM;
- }
- /* If we got a zero DMA address(can happen on
- * certain platforms like PPC), reallocate.
- * Store virtual address of page we don't want,
- * to be freed later.
- */
- if (!tmp_p) {
- mac_control->zerodma_virt_addr = tmp_v;
- DBG_PRINT(INIT_DBG,
- "%s: Zero DMA address for TxDL. "
- "Virtual address %p\n",
- dev->name, tmp_v);
- tmp_v = dma_alloc_coherent(&nic->pdev->dev,
- PAGE_SIZE, &tmp_p,
- GFP_KERNEL);
- if (!tmp_v) {
- DBG_PRINT(INFO_DBG,
- "dma_alloc_coherent failed for TxDL\n");
- return -ENOMEM;
- }
- mem_allocated += PAGE_SIZE;
- }
- while (k < lst_per_page) {
- int l = (j * lst_per_page) + k;
- if (l == tx_cfg->fifo_len)
- break;
- fifo->list_info[l].list_virt_addr =
- tmp_v + (k * lst_size);
- fifo->list_info[l].list_phy_addr =
- tmp_p + (k * lst_size);
- k++;
- }
- }
- }
-
- for (i = 0; i < config->tx_fifo_num; i++) {
- struct fifo_info *fifo = &mac_control->fifos[i];
- struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
-
- size = tx_cfg->fifo_len;
- fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
- if (!fifo->ufo_in_band_v)
- return -ENOMEM;
- mem_allocated += (size * sizeof(u64));
- }
-
- /* Allocation and initialization of RXDs in Rings */
- size = 0;
- for (i = 0; i < config->rx_ring_num; i++) {
- struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
- struct ring_info *ring = &mac_control->rings[i];
-
- if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
- DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a "
- "multiple of RxDs per Block\n",
- dev->name, i);
- return FAILURE;
- }
- size += rx_cfg->num_rxd;
- ring->block_count = rx_cfg->num_rxd /
- (rxd_count[nic->rxd_mode] + 1);
- ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
- }
- if (nic->rxd_mode == RXD_MODE_1)
- size = (size * (sizeof(struct RxD1)));
- else
- size = (size * (sizeof(struct RxD3)));
-
- for (i = 0; i < config->rx_ring_num; i++) {
- struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
- struct ring_info *ring = &mac_control->rings[i];
-
- ring->rx_curr_get_info.block_index = 0;
- ring->rx_curr_get_info.offset = 0;
- ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
- ring->rx_curr_put_info.block_index = 0;
- ring->rx_curr_put_info.offset = 0;
- ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
- ring->nic = nic;
- ring->ring_no = i;
-
- blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
- /* Allocating all the Rx blocks */
- for (j = 0; j < blk_cnt; j++) {
- struct rx_block_info *rx_blocks;
- int l;
-
- rx_blocks = &ring->rx_blocks[j];
- size = SIZE_OF_BLOCK; /* size is always page size */
- tmp_v_addr = dma_alloc_coherent(&nic->pdev->dev, size,
- &tmp_p_addr, GFP_KERNEL);
- if (tmp_v_addr == NULL) {
- /*
- * In case of failure, free_shared_mem()
- * is called, which should free any
- * memory that was alloced till the
- * failure happened.
- */
- rx_blocks->block_virt_addr = tmp_v_addr;
- return -ENOMEM;
- }
- mem_allocated += size;
-
- size = sizeof(struct rxd_info) *
- rxd_count[nic->rxd_mode];
- rx_blocks->block_virt_addr = tmp_v_addr;
- rx_blocks->block_dma_addr = tmp_p_addr;
- rx_blocks->rxds = kmalloc(size, GFP_KERNEL);
- if (!rx_blocks->rxds)
- return -ENOMEM;
- mem_allocated += size;
- for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
- rx_blocks->rxds[l].virt_addr =
- rx_blocks->block_virt_addr +
- (rxd_size[nic->rxd_mode] * l);
- rx_blocks->rxds[l].dma_addr =
- rx_blocks->block_dma_addr +
- (rxd_size[nic->rxd_mode] * l);
- }
- }
- /* Interlinking all Rx Blocks */
- for (j = 0; j < blk_cnt; j++) {
- int next = (j + 1) % blk_cnt;
- tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
- tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
- tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
- tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
-
- pre_rxd_blk = tmp_v_addr;
- pre_rxd_blk->reserved_2_pNext_RxD_block =
- (unsigned long)tmp_v_addr_next;
- pre_rxd_blk->pNext_RxD_Blk_physical =
- (u64)tmp_p_addr_next;
- }
- }
- if (nic->rxd_mode == RXD_MODE_3B) {
- /*
- * Allocation of Storages for buffer addresses in 2BUFF mode
- * and the buffers as well.
- */
- for (i = 0; i < config->rx_ring_num; i++) {
- struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
- struct ring_info *ring = &mac_control->rings[i];
-
- blk_cnt = rx_cfg->num_rxd /
- (rxd_count[nic->rxd_mode] + 1);
- size = sizeof(struct buffAdd *) * blk_cnt;
- ring->ba = kmalloc(size, GFP_KERNEL);
- if (!ring->ba)
- return -ENOMEM;
- mem_allocated += size;
- for (j = 0; j < blk_cnt; j++) {
- int k = 0;
-
- size = sizeof(struct buffAdd) *
- (rxd_count[nic->rxd_mode] + 1);
- ring->ba[j] = kmalloc(size, GFP_KERNEL);
- if (!ring->ba[j])
- return -ENOMEM;
- mem_allocated += size;
- while (k != rxd_count[nic->rxd_mode]) {
- ba = &ring->ba[j][k];
- size = BUF0_LEN + ALIGN_SIZE;
- ba->ba_0_org = kmalloc(size, GFP_KERNEL);
- if (!ba->ba_0_org)
- return -ENOMEM;
- mem_allocated += size;
- tmp = (unsigned long)ba->ba_0_org;
- tmp += ALIGN_SIZE;
- tmp &= ~((unsigned long)ALIGN_SIZE);
- ba->ba_0 = (void *)tmp;
-
- size = BUF1_LEN + ALIGN_SIZE;
- ba->ba_1_org = kmalloc(size, GFP_KERNEL);
- if (!ba->ba_1_org)
- return -ENOMEM;
- mem_allocated += size;
- tmp = (unsigned long)ba->ba_1_org;
- tmp += ALIGN_SIZE;
- tmp &= ~((unsigned long)ALIGN_SIZE);
- ba->ba_1 = (void *)tmp;
- k++;
- }
- }
- }
- }
-
- /* Allocation and initialization of Statistics block */
- size = sizeof(struct stat_block);
- mac_control->stats_mem =
- dma_alloc_coherent(&nic->pdev->dev, size,
- &mac_control->stats_mem_phy, GFP_KERNEL);
-
- if (!mac_control->stats_mem) {
- /*
- * In case of failure, free_shared_mem() is called, which
- * should free any memory that was alloced till the
- * failure happened.
- */
- return -ENOMEM;
- }
- mem_allocated += size;
- mac_control->stats_mem_sz = size;
-
- tmp_v_addr = mac_control->stats_mem;
- mac_control->stats_info = tmp_v_addr;
- memset(tmp_v_addr, 0, size);
- DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
- dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
- mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
- return SUCCESS;
-}
-
-/**
- * free_shared_mem - Free the allocated Memory
- * @nic: Device private variable.
- * Description: This function is to free all memory locations allocated by
- * the init_shared_mem() function and return it to the kernel.
- */
-
-static void free_shared_mem(struct s2io_nic *nic)
-{
- int i, j, blk_cnt, size;
- void *tmp_v_addr;
- dma_addr_t tmp_p_addr;
- int lst_size, lst_per_page;
- struct net_device *dev;
- int page_num = 0;
- struct config_param *config;
- struct mac_info *mac_control;
- struct stat_block *stats;
- struct swStat *swstats;
-
- if (!nic)
- return;
-
- dev = nic->dev;
-
- config = &nic->config;
- mac_control = &nic->mac_control;
- stats = mac_control->stats_info;
- swstats = &stats->sw_stat;
-
- lst_size = sizeof(struct TxD) * config->max_txds;
- lst_per_page = PAGE_SIZE / lst_size;
-
- for (i = 0; i < config->tx_fifo_num; i++) {
- struct fifo_info *fifo = &mac_control->fifos[i];
- struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
-
- page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
- for (j = 0; j < page_num; j++) {
- int mem_blks = (j * lst_per_page);
- struct list_info_hold *fli;
-
- if (!fifo->list_info)
- return;
-
- fli = &fifo->list_info[mem_blks];
- if (!fli->list_virt_addr)
- break;
- dma_free_coherent(&nic->pdev->dev, PAGE_SIZE,
- fli->list_virt_addr,
- fli->list_phy_addr);
- swstats->mem_freed += PAGE_SIZE;
- }
- /* If we got a zero DMA address during allocation,
- * free the page now
- */
- if (mac_control->zerodma_virt_addr) {
- dma_free_coherent(&nic->pdev->dev, PAGE_SIZE,
- mac_control->zerodma_virt_addr,
- (dma_addr_t)0);
- DBG_PRINT(INIT_DBG,
- "%s: Freeing TxDL with zero DMA address. "
- "Virtual address %p\n",
- dev->name, mac_control->zerodma_virt_addr);
- swstats->mem_freed += PAGE_SIZE;
- }
- kfree(fifo->list_info);
- swstats->mem_freed += tx_cfg->fifo_len *
- sizeof(struct list_info_hold);
- }
-
- size = SIZE_OF_BLOCK;
- for (i = 0; i < config->rx_ring_num; i++) {
- struct ring_info *ring = &mac_control->rings[i];
-
- blk_cnt = ring->block_count;
- for (j = 0; j < blk_cnt; j++) {
- tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
- tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
- if (tmp_v_addr == NULL)
- break;
- dma_free_coherent(&nic->pdev->dev, size, tmp_v_addr,
- tmp_p_addr);
- swstats->mem_freed += size;
- kfree(ring->rx_blocks[j].rxds);
- swstats->mem_freed += sizeof(struct rxd_info) *
- rxd_count[nic->rxd_mode];
- }
- }
-
- if (nic->rxd_mode == RXD_MODE_3B) {
- /* Freeing buffer storage addresses in 2BUFF mode. */
- for (i = 0; i < config->rx_ring_num; i++) {
- struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
- struct ring_info *ring = &mac_control->rings[i];
-
- blk_cnt = rx_cfg->num_rxd /
- (rxd_count[nic->rxd_mode] + 1);
- for (j = 0; j < blk_cnt; j++) {
- int k = 0;
- if (!ring->ba[j])
- continue;
- while (k != rxd_count[nic->rxd_mode]) {
- struct buffAdd *ba = &ring->ba[j][k];
- kfree(ba->ba_0_org);
- swstats->mem_freed +=
- BUF0_LEN + ALIGN_SIZE;
- kfree(ba->ba_1_org);
- swstats->mem_freed +=
- BUF1_LEN + ALIGN_SIZE;
- k++;
- }
- kfree(ring->ba[j]);
- swstats->mem_freed += sizeof(struct buffAdd) *
- (rxd_count[nic->rxd_mode] + 1);
- }
- kfree(ring->ba);
- swstats->mem_freed += sizeof(struct buffAdd *) *
- blk_cnt;
- }
- }
-
- for (i = 0; i < nic->config.tx_fifo_num; i++) {
- struct fifo_info *fifo = &mac_control->fifos[i];
- struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
-
- if (fifo->ufo_in_band_v) {
- swstats->mem_freed += tx_cfg->fifo_len *
- sizeof(u64);
- kfree(fifo->ufo_in_band_v);
- }
- }
-
- if (mac_control->stats_mem) {
- swstats->mem_freed += mac_control->stats_mem_sz;
- dma_free_coherent(&nic->pdev->dev, mac_control->stats_mem_sz,
- mac_control->stats_mem,
- mac_control->stats_mem_phy);
- }
-}
-
-/*
- * s2io_verify_pci_mode -
- */
-
-static int s2io_verify_pci_mode(struct s2io_nic *nic)
-{
- struct XENA_dev_config __iomem *bar0 = nic->bar0;
- register u64 val64 = 0;
- int mode;
-
- val64 = readq(&bar0->pci_mode);
- mode = (u8)GET_PCI_MODE(val64);
-
- if (val64 & PCI_MODE_UNKNOWN_MODE)
- return -1; /* Unknown PCI mode */
- return mode;
-}
-
-#define NEC_VENID 0x1033
-#define NEC_DEVID 0x0125
-static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
-{
- struct pci_dev *tdev = NULL;
- for_each_pci_dev(tdev) {
- if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
- if (tdev->bus == s2io_pdev->bus->parent) {
- pci_dev_put(tdev);
- return 1;
- }
- }
- }
- return 0;
-}
-
-static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
-/*
- * s2io_print_pci_mode -
- */
-static int s2io_print_pci_mode(struct s2io_nic *nic)
-{
- struct XENA_dev_config __iomem *bar0 = nic->bar0;
- register u64 val64 = 0;
- int mode;
- struct config_param *config = &nic->config;
- const char *pcimode;
-
- val64 = readq(&bar0->pci_mode);
- mode = (u8)GET_PCI_MODE(val64);
-
- if (val64 & PCI_MODE_UNKNOWN_MODE)
- return -1; /* Unknown PCI mode */
-
- config->bus_speed = bus_speed[mode];
-
- if (s2io_on_nec_bridge(nic->pdev)) {
- DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
- nic->dev->name);
- return mode;
- }
-
- switch (mode) {
- case PCI_MODE_PCI_33:
- pcimode = "33MHz PCI bus";
- break;
- case PCI_MODE_PCI_66:
- pcimode = "66MHz PCI bus";
- break;
- case PCI_MODE_PCIX_M1_66:
- pcimode = "66MHz PCIX(M1) bus";
- break;
- case PCI_MODE_PCIX_M1_100:
- pcimode = "100MHz PCIX(M1) bus";
- break;
- case PCI_MODE_PCIX_M1_133:
- pcimode = "133MHz PCIX(M1) bus";
- break;
- case PCI_MODE_PCIX_M2_66:
- pcimode = "133MHz PCIX(M2) bus";
- break;
- case PCI_MODE_PCIX_M2_100:
- pcimode = "200MHz PCIX(M2) bus";
- break;
- case PCI_MODE_PCIX_M2_133:
- pcimode = "266MHz PCIX(M2) bus";
- break;
- default:
- pcimode = "unsupported bus!";
- mode = -1;
- }
-
- DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n",
- nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode);
-
- return mode;
-}
-
-/**
- * init_tti - Initialization transmit traffic interrupt scheme
- * @nic: device private variable
- * @link: link status (UP/DOWN) used to enable/disable continuous
- * transmit interrupts
- * @may_sleep: parameter indicates if sleeping when waiting for
- * command complete
- * Description: The function configures transmit traffic interrupts
- * Return Value: SUCCESS on success and
- * '-1' on failure
- */
-
-static int init_tti(struct s2io_nic *nic, int link, bool may_sleep)
-{
- struct XENA_dev_config __iomem *bar0 = nic->bar0;
- register u64 val64 = 0;
- int i;
- struct config_param *config = &nic->config;
-
- for (i = 0; i < config->tx_fifo_num; i++) {
- /*
- * TTI Initialization. Default Tx timer gets us about
- * 250 interrupts per sec. Continuous interrupts are enabled
- * by default.
- */
- if (nic->device_type == XFRAME_II_DEVICE) {
- int count = (nic->config.bus_speed * 125)/2;
- val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
- } else
- val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
-
- val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
- TTI_DATA1_MEM_TX_URNG_B(0x10) |
- TTI_DATA1_MEM_TX_URNG_C(0x30) |
- TTI_DATA1_MEM_TX_TIMER_AC_EN;
- if (i == 0)
- if (use_continuous_tx_intrs && (link == LINK_UP))
- val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
- writeq(val64, &bar0->tti_data1_mem);
-
- if (nic->config.intr_type == MSI_X) {
- val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
- TTI_DATA2_MEM_TX_UFC_B(0x100) |
- TTI_DATA2_MEM_TX_UFC_C(0x200) |
- TTI_DATA2_MEM_TX_UFC_D(0x300);
- } else {
- if ((nic->config.tx_steering_type ==
- TX_DEFAULT_STEERING) &&
- (config->tx_fifo_num > 1) &&
- (i >= nic->udp_fifo_idx) &&
- (i < (nic->udp_fifo_idx +
- nic->total_udp_fifos)))
- val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
- TTI_DATA2_MEM_TX_UFC_B(0x80) |
- TTI_DATA2_MEM_TX_UFC_C(0x100) |
- TTI_DATA2_MEM_TX_UFC_D(0x120);
- else
- val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
- TTI_DATA2_MEM_TX_UFC_B(0x20) |
- TTI_DATA2_MEM_TX_UFC_C(0x40) |
- TTI_DATA2_MEM_TX_UFC_D(0x80);
- }
-
- writeq(val64, &bar0->tti_data2_mem);
-
- val64 = TTI_CMD_MEM_WE |
- TTI_CMD_MEM_STROBE_NEW_CMD |
- TTI_CMD_MEM_OFFSET(i);
- writeq(val64, &bar0->tti_command_mem);
-
- if (wait_for_cmd_complete(&bar0->tti_command_mem,
- TTI_CMD_MEM_STROBE_NEW_CMD,
- S2IO_BIT_RESET, may_sleep) != SUCCESS)
- return FAILURE;
- }
-
- return SUCCESS;
-}
-
-/**
- * init_nic - Initialization of hardware
- * @nic: device private variable
- * Description: The function sequentially configures every block
- * of the H/W from their reset values.
- * Return Value: SUCCESS on success and
- * '-1' on failure (endian settings incorrect).
- */
-
-static int init_nic(struct s2io_nic *nic)
-{
- struct XENA_dev_config __iomem *bar0 = nic->bar0;
- struct net_device *dev = nic->dev;
- register u64 val64 = 0;
- void __iomem *add;
- u32 time;
- int i, j;
- int dtx_cnt = 0;
- unsigned long long mem_share;
- int mem_size;
- struct config_param *config = &nic->config;
- struct mac_info *mac_control = &nic->mac_control;
-
- /* to set the swapper controle on the card */
- if (s2io_set_swapper(nic)) {
- DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
- return -EIO;
- }
-
- /*
- * Herc requires EOI to be removed from reset before XGXS, so..
- */
- if (nic->device_type & XFRAME_II_DEVICE) {
- val64 = 0xA500000000ULL;
- writeq(val64, &bar0->sw_reset);
- msleep(500);
- val64 = readq(&bar0->sw_reset);
- }
-
- /* Remove XGXS from reset state */
- val64 = 0;
- writeq(val64, &bar0->sw_reset);
- msleep(500);
- val64 = readq(&bar0->sw_reset);
-
- /* Ensure that it's safe to access registers by checking
- * RIC_RUNNING bit is reset. Check is valid only for XframeII.
- */
- if (nic->device_type == XFRAME_II_DEVICE) {
- for (i = 0; i < 50; i++) {
- val64 = readq(&bar0->adapter_status);
- if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
- break;
- msleep(10);
- }
- if (i == 50)
- return -ENODEV;
- }
-
- /* Enable Receiving broadcasts */
- add = &bar0->mac_cfg;
- val64 = readq(&bar0->mac_cfg);
- val64 |= MAC_RMAC_BCAST_ENABLE;
- writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
- writel((u32)val64, add);
- writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
- writel((u32) (val64 >> 32), (add + 4));
-
- /* Read registers in all blocks */
- val64 = readq(&bar0->mac_int_mask);
- val64 = readq(&bar0->mc_int_mask);
- val64 = readq(&bar0->xgxs_int_mask);
-
- /* Set MTU */
- val64 = dev->mtu;
- writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
-
- if (nic->device_type & XFRAME_II_DEVICE) {
- while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
- SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
- &bar0->dtx_control, UF);
- if (dtx_cnt & 0x1)
- msleep(1); /* Necessary!! */
- dtx_cnt++;
- }
- } else {
- while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
- SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
- &bar0->dtx_control, UF);
- val64 = readq(&bar0->dtx_control);
- dtx_cnt++;
- }
- }
-
- /* Tx DMA Initialization */
- val64 = 0;
- writeq(val64, &bar0->tx_fifo_partition_0);
- writeq(val64, &bar0->tx_fifo_partition_1);
- writeq(val64, &bar0->tx_fifo_partition_2);
- writeq(val64, &bar0->tx_fifo_partition_3);
-
- for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
- struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
-
- val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
- vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
-
- if (i == (config->tx_fifo_num - 1)) {
- if (i % 2 == 0)
- i++;
- }
-
- switch (i) {
- case 1:
- writeq(val64, &bar0->tx_fifo_partition_0);
- val64 = 0;
- j = 0;
- break;
- case 3:
- writeq(val64, &bar0->tx_fifo_partition_1);
- val64 = 0;
- j = 0;
- break;
- case 5:
- writeq(val64, &bar0->tx_fifo_partition_2);
- val64 = 0;
- j = 0;
- break;
- case 7:
- writeq(val64, &bar0->tx_fifo_partition_3);
- val64 = 0;
- j = 0;
- break;
- default:
- j++;
- break;
- }
- }
-
- /*
- * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
- * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
- */
- if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
- writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
-
- val64 = readq(&bar0->tx_fifo_partition_0);
- DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
- &bar0->tx_fifo_partition_0, (unsigned long long)val64);
-
- /*
- * Initialization of Tx_PA_CONFIG register to ignore packet
- * integrity checking.
- */
- val64 = readq(&bar0->tx_pa_cfg);
- val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
- TX_PA_CFG_IGNORE_SNAP_OUI |
- TX_PA_CFG_IGNORE_LLC_CTRL |
- TX_PA_CFG_IGNORE_L2_ERR;
- writeq(val64, &bar0->tx_pa_cfg);
-
- /* Rx DMA initialization. */
- val64 = 0;
- for (i = 0; i < config->rx_ring_num; i++) {
- struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
-
- val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
- }
- writeq(val64, &bar0->rx_queue_priority);
-
- /*
- * Allocating equal share of memory to all the
- * configured Rings.
- */
- val64 = 0;
- if (nic->device_type & XFRAME_II_DEVICE)
- mem_size = 32;
- else
- mem_size = 64;
-
- for (i = 0; i < config->rx_ring_num; i++) {
- switch (i) {
- case 0:
- mem_share = (mem_size / config->rx_ring_num +
- mem_size % config->rx_ring_num);
- val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
- continue;
- case 1:
- mem_share = (mem_size / config->rx_ring_num);
- val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
- continue;
- case 2:
- mem_share = (mem_size / config->rx_ring_num);
- val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
- continue;
- case 3:
- mem_share = (mem_size / config->rx_ring_num);
- val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
- continue;
- case 4:
- mem_share = (mem_size / config->rx_ring_num);
- val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
- continue;
- case 5:
- mem_share = (mem_size / config->rx_ring_num);
- val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
- continue;
- case 6:
- mem_share = (mem_size / config->rx_ring_num);
- val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
- continue;
- case 7:
- mem_share = (mem_size / config->rx_ring_num);
- val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
- continue;
- }
- }
- writeq(val64, &bar0->rx_queue_cfg);
-
- /*
- * Filling Tx round robin registers
- * as per the number of FIFOs for equal scheduling priority
- */
- switch (config->tx_fifo_num) {
- case 1:
- val64 = 0x0;
- writeq(val64, &bar0->tx_w_round_robin_0);
- writeq(val64, &bar0->tx_w_round_robin_1);
- writeq(val64, &bar0->tx_w_round_robin_2);
- writeq(val64, &bar0->tx_w_round_robin_3);
- writeq(val64, &bar0->tx_w_round_robin_4);
- break;
- case 2:
- val64 = 0x0001000100010001ULL;
- writeq(val64, &bar0->tx_w_round_robin_0);
- writeq(val64, &bar0->tx_w_round_robin_1);
- writeq(val64, &bar0->tx_w_round_robin_2);
- writeq(val64, &bar0->tx_w_round_robin_3);
- val64 = 0x0001000100000000ULL;
- writeq(val64, &bar0->tx_w_round_robin_4);
- break;
- case 3:
- val64 = 0x0001020001020001ULL;
- writeq(val64, &bar0->tx_w_round_robin_0);
- val64 = 0x0200010200010200ULL;
- writeq(val64, &bar0->tx_w_round_robin_1);
- val64 = 0x0102000102000102ULL;
- writeq(val64, &bar0->tx_w_round_robin_2);
- val64 = 0x0001020001020001ULL;
- writeq(val64, &bar0->tx_w_round_robin_3);
- val64 = 0x0200010200000000ULL;
- writeq(val64, &bar0->tx_w_round_robin_4);
- break;
- case 4:
- val64 = 0x0001020300010203ULL;
- writeq(val64, &bar0->tx_w_round_robin_0);
- writeq(val64, &bar0->tx_w_round_robin_1);
- writeq(val64, &bar0->tx_w_round_robin_2);
- writeq(val64, &bar0->tx_w_round_robin_3);
- val64 = 0x0001020300000000ULL;
- writeq(val64, &bar0->tx_w_round_robin_4);
- break;
- case 5:
- val64 = 0x0001020304000102ULL;
- writeq(val64, &bar0->tx_w_round_robin_0);
- val64 = 0x0304000102030400ULL;
- writeq(val64, &bar0->tx_w_round_robin_1);
- val64 = 0x0102030400010203ULL;
- writeq(val64, &bar0->tx_w_round_robin_2);
- val64 = 0x0400010203040001ULL;
- writeq(val64, &bar0->tx_w_round_robin_3);
- val64 = 0x0203040000000000ULL;
- writeq(val64, &bar0->tx_w_round_robin_4);
- break;
- case 6:
- val64 = 0x0001020304050001ULL;
- writeq(val64, &bar0->tx_w_round_robin_0);
- val64 = 0x0203040500010203ULL;
- writeq(val64, &bar0->tx_w_round_robin_1);
- val64 = 0x0405000102030405ULL;
- writeq(val64, &bar0->tx_w_round_robin_2);
- val64 = 0x0001020304050001ULL;
- writeq(val64, &bar0->tx_w_round_robin_3);
- val64 = 0x0203040500000000ULL;
- writeq(val64, &bar0->tx_w_round_robin_4);
- break;
- case 7:
- val64 = 0x0001020304050600ULL;
- writeq(val64, &bar0->tx_w_round_robin_0);
- val64 = 0x0102030405060001ULL;
- writeq(val64, &bar0->tx_w_round_robin_1);
- val64 = 0x0203040506000102ULL;
- writeq(val64, &bar0->tx_w_round_robin_2);
- val64 = 0x0304050600010203ULL;
- writeq(val64, &bar0->tx_w_round_robin_3);
- val64 = 0x0405060000000000ULL;
- writeq(val64, &bar0->tx_w_round_robin_4);
- break;
- case 8:
- val64 = 0x0001020304050607ULL;
- writeq(val64, &bar0->tx_w_round_robin_0);
- writeq(val64, &bar0->tx_w_round_robin_1);
- writeq(val64, &bar0->tx_w_round_robin_2);
- writeq(val64, &bar0->tx_w_round_robin_3);
- val64 = 0x0001020300000000ULL;
- writeq(val64, &bar0->tx_w_round_robin_4);
- break;
- }
-
- /* Enable all configured Tx FIFO partitions */
- val64 = readq(&bar0->tx_fifo_partition_0);
- val64 |= (TX_FIFO_PARTITION_EN);
- writeq(val64, &bar0->tx_fifo_partition_0);
-
- /* Filling the Rx round robin registers as per the
- * number of Rings and steering based on QoS with
- * equal priority.
- */
- switch (config->rx_ring_num) {
- case 1:
- val64 = 0x0;
- writeq(val64, &bar0->rx_w_round_robin_0);
- writeq(val64, &bar0->rx_w_round_robin_1);
- writeq(val64, &bar0->rx_w_round_robin_2);
- writeq(val64, &bar0->rx_w_round_robin_3);
- writeq(val64, &bar0->rx_w_round_robin_4);
-
- val64 = 0x8080808080808080ULL;
- writeq(val64, &bar0->rts_qos_steering);
- break;
- case 2:
- val64 = 0x0001000100010001ULL;
- writeq(val64, &bar0->rx_w_round_robin_0);
- writeq(val64, &bar0->rx_w_round_robin_1);
- writeq(val64, &bar0->rx_w_round_robin_2);
- writeq(val64, &bar0->rx_w_round_robin_3);
- val64 = 0x0001000100000000ULL;
- writeq(val64, &bar0->rx_w_round_robin_4);
-
- val64 = 0x8080808040404040ULL;
- writeq(val64, &bar0->rts_qos_steering);
- break;
- case 3:
- val64 = 0x0001020001020001ULL;
- writeq(val64, &bar0->rx_w_round_robin_0);
- val64 = 0x0200010200010200ULL;
- writeq(val64, &bar0->rx_w_round_robin_1);
- val64 = 0x0102000102000102ULL;
- writeq(val64, &bar0->rx_w_round_robin_2);
- val64 = 0x0001020001020001ULL;
- writeq(val64, &bar0->rx_w_round_robin_3);
- val64 = 0x0200010200000000ULL;
- writeq(val64, &bar0->rx_w_round_robin_4);
-
- val64 = 0x8080804040402020ULL;
- writeq(val64, &bar0->rts_qos_steering);
- break;
- case 4:
- val64 = 0x0001020300010203ULL;
- writeq(val64, &bar0->rx_w_round_robin_0);
- writeq(val64, &bar0->rx_w_round_robin_1);
- writeq(val64, &bar0->rx_w_round_robin_2);
- writeq(val64, &bar0->rx_w_round_robin_3);
- val64 = 0x0001020300000000ULL;
- writeq(val64, &bar0->rx_w_round_robin_4);
-
- val64 = 0x8080404020201010ULL;
- writeq(val64, &bar0->rts_qos_steering);
- break;
- case 5:
- val64 = 0x0001020304000102ULL;
- writeq(val64, &bar0->rx_w_round_robin_0);
- val64 = 0x0304000102030400ULL;
- writeq(val64, &bar0->rx_w_round_robin_1);
- val64 = 0x0102030400010203ULL;
- writeq(val64, &bar0->rx_w_round_robin_2);
- val64 = 0x0400010203040001ULL;
- writeq(val64, &bar0->rx_w_round_robin_3);
- val64 = 0x0203040000000000ULL;
- writeq(val64, &bar0->rx_w_round_robin_4);
-
- val64 = 0x8080404020201008ULL;
- writeq(val64, &bar0->rts_qos_steering);
- break;
- case 6:
- val64 = 0x0001020304050001ULL;
- writeq(val64, &bar0->rx_w_round_robin_0);
- val64 = 0x0203040500010203ULL;
- writeq(val64, &bar0->rx_w_round_robin_1);
- val64 = 0x0405000102030405ULL;
- writeq(val64, &bar0->rx_w_round_robin_2);
- val64 = 0x0001020304050001ULL;
- writeq(val64, &bar0->rx_w_round_robin_3);
- val64 = 0x0203040500000000ULL;
- writeq(val64, &bar0->rx_w_round_robin_4);
-
- val64 = 0x8080404020100804ULL;
- writeq(val64, &bar0->rts_qos_steering);
- break;
- case 7:
- val64 = 0x0001020304050600ULL;
- writeq(val64, &bar0->rx_w_round_robin_0);
- val64 = 0x0102030405060001ULL;
- writeq(val64, &bar0->rx_w_round_robin_1);
- val64 = 0x0203040506000102ULL;
- writeq(val64, &bar0->rx_w_round_robin_2);
- val64 = 0x0304050600010203ULL;
- writeq(val64, &bar0->rx_w_round_robin_3);
- val64 = 0x0405060000000000ULL;
- writeq(val64, &bar0->rx_w_round_robin_4);
-
- val64 = 0x8080402010080402ULL;
- writeq(val64, &bar0->rts_qos_steering);
- break;
- case 8:
- val64 = 0x0001020304050607ULL;
- writeq(val64, &bar0->rx_w_round_robin_0);
- writeq(val64, &bar0->rx_w_round_robin_1);
- writeq(val64, &bar0->rx_w_round_robin_2);
- writeq(val64, &bar0->rx_w_round_robin_3);
- val64 = 0x0001020300000000ULL;
- writeq(val64, &bar0->rx_w_round_robin_4);
-
- val64 = 0x8040201008040201ULL;
- writeq(val64, &bar0->rts_qos_steering);
- break;
- }
-
- /* UDP Fix */
- val64 = 0;
- for (i = 0; i < 8; i++)
- writeq(val64, &bar0->rts_frm_len_n[i]);
-
- /* Set the default rts frame length for the rings configured */
- val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
- for (i = 0 ; i < config->rx_ring_num ; i++)
- writeq(val64, &bar0->rts_frm_len_n[i]);
-
- /* Set the frame length for the configured rings
- * desired by the user
- */
- for (i = 0; i < config->rx_ring_num; i++) {
- /* If rts_frm_len[i] == 0 then it is assumed that user not
- * specified frame length steering.
- * If the user provides the frame length then program
- * the rts_frm_len register for those values or else
- * leave it as it is.
- */
- if (rts_frm_len[i] != 0) {
- writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
- &bar0->rts_frm_len_n[i]);
- }
- }
-
- /* Disable differentiated services steering logic */
- for (i = 0; i < 64; i++) {
- if (rts_ds_steer(nic, i, 0) == FAILURE) {
- DBG_PRINT(ERR_DBG,
- "%s: rts_ds_steer failed on codepoint %d\n",
- dev->name, i);
- return -ENODEV;
- }
- }
-
- /* Program statistics memory */
- writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
-
- if (nic->device_type == XFRAME_II_DEVICE) {
- val64 = STAT_BC(0x320);
- writeq(val64, &bar0->stat_byte_cnt);
- }
-
- /*
- * Initializing the sampling rate for the device to calculate the
- * bandwidth utilization.
- */
- val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
- MAC_RX_LINK_UTIL_VAL(rmac_util_period);
- writeq(val64, &bar0->mac_link_util);
-
- /*
- * Initializing the Transmit and Receive Traffic Interrupt
- * Scheme.
- */
-
- /* Initialize TTI */
- if (SUCCESS != init_tti(nic, nic->last_link_state, true))
- return -ENODEV;
-
- /* RTI Initialization */
- if (nic->device_type == XFRAME_II_DEVICE) {
- /*
- * Programmed to generate Apprx 500 Intrs per
- * second
- */
- int count = (nic->config.bus_speed * 125)/4;
- val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
- } else
- val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
- val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
- RTI_DATA1_MEM_RX_URNG_B(0x10) |
- RTI_DATA1_MEM_RX_URNG_C(0x30) |
- RTI_DATA1_MEM_RX_TIMER_AC_EN;
-
- writeq(val64, &bar0->rti_data1_mem);
-
- val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
- RTI_DATA2_MEM_RX_UFC_B(0x2) ;
- if (nic->config.intr_type == MSI_X)
- val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
- RTI_DATA2_MEM_RX_UFC_D(0x40));
- else
- val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
- RTI_DATA2_MEM_RX_UFC_D(0x80));
- writeq(val64, &bar0->rti_data2_mem);
-
- for (i = 0; i < config->rx_ring_num; i++) {
- val64 = RTI_CMD_MEM_WE |
- RTI_CMD_MEM_STROBE_NEW_CMD |
- RTI_CMD_MEM_OFFSET(i);
- writeq(val64, &bar0->rti_command_mem);
-
- /*
- * Once the operation completes, the Strobe bit of the
- * command register will be reset. We poll for this
- * particular condition. We wait for a maximum of 500ms
- * for the operation to complete, if it's not complete
- * by then we return error.
- */
- time = 0;
- while (true) {
- val64 = readq(&bar0->rti_command_mem);
- if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
- break;
-
- if (time > 10) {
- DBG_PRINT(ERR_DBG, "%s: RTI init failed\n",
- dev->name);
- return -ENODEV;
- }
- time++;
- msleep(50);
- }
- }
-
- /*
- * Initializing proper values as Pause threshold into all
- * the 8 Queues on Rx side.
- */
- writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
- writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
-
- /* Disable RMAC PAD STRIPPING */
- add = &bar0->mac_cfg;
- val64 = readq(&bar0->mac_cfg);
- val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
- writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
- writel((u32) (val64), add);
- writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
- writel((u32) (val64 >> 32), (add + 4));
- val64 = readq(&bar0->mac_cfg);
-
- /* Enable FCS stripping by adapter */
- add = &bar0->mac_cfg;
- val64 = readq(&bar0->mac_cfg);
- val64 |= MAC_CFG_RMAC_STRIP_FCS;
- if (nic->device_type == XFRAME_II_DEVICE)
- writeq(val64, &bar0->mac_cfg);
- else {
- writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
- writel((u32) (val64), add);
- writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
- writel((u32) (val64 >> 32), (add + 4));
- }
-
- /*
- * Set the time value to be inserted in the pause frame
- * generated by xena.
- */
- val64 = readq(&bar0->rmac_pause_cfg);
- val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
- val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
- writeq(val64, &bar0->rmac_pause_cfg);
-
- /*
- * Set the Threshold Limit for Generating the pause frame
- * If the amount of data in any Queue exceeds ratio of
- * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
- * pause frame is generated
- */
- val64 = 0;
- for (i = 0; i < 4; i++) {
- val64 |= (((u64)0xFF00 |
- nic->mac_control.mc_pause_threshold_q0q3)
- << (i * 2 * 8));
- }
- writeq(val64, &bar0->mc_pause_thresh_q0q3);
-
- val64 = 0;
- for (i = 0; i < 4; i++) {
- val64 |= (((u64)0xFF00 |
- nic->mac_control.mc_pause_threshold_q4q7)
- << (i * 2 * 8));
- }
- writeq(val64, &bar0->mc_pause_thresh_q4q7);
-
- /*
- * TxDMA will stop Read request if the number of read split has
- * exceeded the limit pointed by shared_splits
- */
- val64 = readq(&bar0->pic_control);
- val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
- writeq(val64, &bar0->pic_control);
-
- if (nic->config.bus_speed == 266) {
- writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
- writeq(0x0, &bar0->read_retry_delay);
- writeq(0x0, &bar0->write_retry_delay);
- }
-
- /*
- * Programming the Herc to split every write transaction
- * that does not start on an ADB to reduce disconnects.
- */
- if (nic->device_type == XFRAME_II_DEVICE) {
- val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
- MISC_LINK_STABILITY_PRD(3);
- writeq(val64, &bar0->misc_control);
- val64 = readq(&bar0->pic_control2);
- val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
- writeq(val64, &bar0->pic_control2);
- }
- if (strstr(nic->product_name, "CX4")) {
- val64 = TMAC_AVG_IPG(0x17);
- writeq(val64, &bar0->tmac_avg_ipg);
- }
-
- return SUCCESS;
-}
-#define LINK_UP_DOWN_INTERRUPT 1
-#define MAC_RMAC_ERR_TIMER 2
-
-static int s2io_link_fault_indication(struct s2io_nic *nic)
-{
- if (nic->device_type == XFRAME_II_DEVICE)
- return LINK_UP_DOWN_INTERRUPT;
- else
- return MAC_RMAC_ERR_TIMER;
-}
-
-/**
- * do_s2io_write_bits - update alarm bits in alarm register
- * @value: alarm bits
- * @flag: interrupt status
- * @addr: address value
- * Description: update alarm bits in alarm register
- * Return Value:
- * NONE.
- */
-static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
-{
- u64 temp64;
-
- temp64 = readq(addr);
-
- if (flag == ENABLE_INTRS)
- temp64 &= ~((u64)value);
- else
- temp64 |= ((u64)value);
- writeq(temp64, addr);
-}
-
-static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
-{
- struct XENA_dev_config __iomem *bar0 = nic->bar0;
- register u64 gen_int_mask = 0;
- u64 interruptible;
-
- writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
- if (mask & TX_DMA_INTR) {
- gen_int_mask |= TXDMA_INT_M;
-
- do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
- TXDMA_PCC_INT | TXDMA_TTI_INT |
- TXDMA_LSO_INT | TXDMA_TPA_INT |
- TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
-
- do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
- PFC_MISC_0_ERR | PFC_MISC_1_ERR |
- PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
- &bar0->pfc_err_mask);
-
- do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
- TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
- TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
-
- do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
- PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
- PCC_N_SERR | PCC_6_COF_OV_ERR |
- PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
- PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
- PCC_TXB_ECC_SG_ERR,
- flag, &bar0->pcc_err_mask);
-
- do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
- TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
-
- do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
- LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
- LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
- flag, &bar0->lso_err_mask);
-
- do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
- flag, &bar0->tpa_err_mask);
-
- do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
- }
-
- if (mask & TX_MAC_INTR) {
- gen_int_mask |= TXMAC_INT_M;
- do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
- &bar0->mac_int_mask);
- do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
- TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
- TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
- flag, &bar0->mac_tmac_err_mask);
- }
-
- if (mask & TX_XGXS_INTR) {
- gen_int_mask |= TXXGXS_INT_M;
- do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
- &bar0->xgxs_int_mask);
- do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
- TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
- flag, &bar0->xgxs_txgxs_err_mask);
- }
-
- if (mask & RX_DMA_INTR) {
- gen_int_mask |= RXDMA_INT_M;
- do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
- RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
- flag, &bar0->rxdma_int_mask);
- do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
- RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
- RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
- RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
- do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
- PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
- PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
- &bar0->prc_pcix_err_mask);
- do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
- RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
- &bar0->rpa_err_mask);
- do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
- RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
- RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
- RDA_FRM_ECC_SG_ERR |
- RDA_MISC_ERR|RDA_PCIX_ERR,
- flag, &bar0->rda_err_mask);
- do_s2io_write_bits(RTI_SM_ERR_ALARM |
- RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
- flag, &bar0->rti_err_mask);
- }
-
- if (mask & RX_MAC_INTR) {
- gen_int_mask |= RXMAC_INT_M;
- do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
- &bar0->mac_int_mask);
- interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
- RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
- RMAC_DOUBLE_ECC_ERR);
- if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
- interruptible |= RMAC_LINK_STATE_CHANGE_INT;
- do_s2io_write_bits(interruptible,
- flag, &bar0->mac_rmac_err_mask);
- }
-
- if (mask & RX_XGXS_INTR) {
- gen_int_mask |= RXXGXS_INT_M;
- do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
- &bar0->xgxs_int_mask);
- do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
- &bar0->xgxs_rxgxs_err_mask);
- }
-
- if (mask & MC_INTR) {
- gen_int_mask |= MC_INT_M;
- do_s2io_write_bits(MC_INT_MASK_MC_INT,
- flag, &bar0->mc_int_mask);
- do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
- MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
- &bar0->mc_err_mask);
- }
- nic->general_int_mask = gen_int_mask;
-
- /* Remove this line when alarm interrupts are enabled */
- nic->general_int_mask = 0;
-}
-
-/**
- * en_dis_able_nic_intrs - Enable or Disable the interrupts
- * @nic: device private variable,
- * @mask: A mask indicating which Intr block must be modified and,
- * @flag: A flag indicating whether to enable or disable the Intrs.
- * Description: This function will either disable or enable the interrupts
- * depending on the flag argument. The mask argument can be used to
- * enable/disable any Intr block.
- * Return Value: NONE.
- */
-
-static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
-{
- struct XENA_dev_config __iomem *bar0 = nic->bar0;
- register u64 temp64 = 0, intr_mask = 0;
-
- intr_mask = nic->general_int_mask;
-
- /* Top level interrupt classification */
- /* PIC Interrupts */
- if (mask & TX_PIC_INTR) {
- /* Enable PIC Intrs in the general intr mask register */
- intr_mask |= TXPIC_INT_M;
- if (flag == ENABLE_INTRS) {
- /*
- * If Hercules adapter enable GPIO otherwise
- * disable all PCIX, Flash, MDIO, IIC and GPIO
- * interrupts for now.
- * TODO
- */
- if (s2io_link_fault_indication(nic) ==
- LINK_UP_DOWN_INTERRUPT) {
- do_s2io_write_bits(PIC_INT_GPIO, flag,
- &bar0->pic_int_mask);
- do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
- &bar0->gpio_int_mask);
- } else
- writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
- } else if (flag == DISABLE_INTRS) {
- /*
- * Disable PIC Intrs in the general
- * intr mask register
- */
- writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
- }
- }
-
- /* Tx traffic interrupts */
- if (mask & TX_TRAFFIC_INTR) {
- intr_mask |= TXTRAFFIC_INT_M;
- if (flag == ENABLE_INTRS) {
- /*
- * Enable all the Tx side interrupts
- * writing 0 Enables all 64 TX interrupt levels
- */
- writeq(0x0, &bar0->tx_traffic_mask);
- } else if (flag == DISABLE_INTRS) {
- /*
- * Disable Tx Traffic Intrs in the general intr mask
- * register.
- */
- writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
- }
- }
-
- /* Rx traffic interrupts */
- if (mask & RX_TRAFFIC_INTR) {
- intr_mask |= RXTRAFFIC_INT_M;
- if (flag == ENABLE_INTRS) {
- /* writing 0 Enables all 8 RX interrupt levels */
- writeq(0x0, &bar0->rx_traffic_mask);
- } else if (flag == DISABLE_INTRS) {
- /*
- * Disable Rx Traffic Intrs in the general intr mask
- * register.
- */
- writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
- }
- }
-
- temp64 = readq(&bar0->general_int_mask);
- if (flag == ENABLE_INTRS)
- temp64 &= ~((u64)intr_mask);
- else
- temp64 = DISABLE_ALL_INTRS;
- writeq(temp64, &bar0->general_int_mask);
-
- nic->general_int_mask = readq(&bar0->general_int_mask);
-}
-
-/**
- * verify_pcc_quiescent- Checks for PCC quiescent state
- * @sp : private member of the device structure, which is a pointer to the
- * s2io_nic structure.
- * @flag: boolean controlling function path
- * Return: 1 If PCC is quiescence
- * 0 If PCC is not quiescence
- */
-static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
-{
- int ret = 0, herc;
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- u64 val64 = readq(&bar0->adapter_status);
-
- herc = (sp->device_type == XFRAME_II_DEVICE);
-
- if (flag == false) {
- if ((!herc && (sp->pdev->revision >= 4)) || herc) {
- if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
- ret = 1;
- } else {
- if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
- ret = 1;
- }
- } else {
- if ((!herc && (sp->pdev->revision >= 4)) || herc) {
- if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
- ADAPTER_STATUS_RMAC_PCC_IDLE))
- ret = 1;
- } else {
- if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
- ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
- ret = 1;
- }
- }
-
- return ret;
-}
-/**
- * verify_xena_quiescence - Checks whether the H/W is ready
- * @sp : private member of the device structure, which is a pointer to the
- * s2io_nic structure.
- * Description: Returns whether the H/W is ready to go or not. Depending
- * on whether adapter enable bit was written or not the comparison
- * differs and the calling function passes the input argument flag to
- * indicate this.
- * Return: 1 If xena is quiescence
- * 0 If Xena is not quiescence
- */
-
-static int verify_xena_quiescence(struct s2io_nic *sp)
-{
- int mode;
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- u64 val64 = readq(&bar0->adapter_status);
- mode = s2io_verify_pci_mode(sp);
-
- if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
- DBG_PRINT(ERR_DBG, "TDMA is not ready!\n");
- return 0;
- }
- if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
- DBG_PRINT(ERR_DBG, "RDMA is not ready!\n");
- return 0;
- }
- if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
- DBG_PRINT(ERR_DBG, "PFC is not ready!\n");
- return 0;
- }
- if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
- DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n");
- return 0;
- }
- if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
- DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n");
- return 0;
- }
- if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
- DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n");
- return 0;
- }
- if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
- DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n");
- return 0;
- }
- if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
- DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n");
- return 0;
- }
-
- /*
- * In PCI 33 mode, the P_PLL is not used, and therefore,
- * the P_PLL_LOCK bit in the adapter_status register will
- * not be asserted.
- */
- if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
- sp->device_type == XFRAME_II_DEVICE &&
- mode != PCI_MODE_PCI_33) {
- DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n");
- return 0;
- }
- if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
- ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
- DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n");
- return 0;
- }
- return 1;
-}
-
-/**
- * fix_mac_address - Fix for Mac addr problem on Alpha platforms
- * @sp: Pointer to device specifc structure
- * Description :
- * New procedure to clear mac address reading problems on Alpha platforms
- *
- */
-
-static void fix_mac_address(struct s2io_nic *sp)
-{
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- int i = 0;
-
- while (fix_mac[i] != END_SIGN) {
- writeq(fix_mac[i++], &bar0->gpio_control);
- udelay(10);
- (void) readq(&bar0->gpio_control);
- }
-}
-
-/**
- * start_nic - Turns the device on
- * @nic : device private variable.
- * Description:
- * This function actually turns the device on. Before this function is
- * called,all Registers are configured from their reset states
- * and shared memory is allocated but the NIC is still quiescent. On
- * calling this function, the device interrupts are cleared and the NIC is
- * literally switched on by writing into the adapter control register.
- * Return Value:
- * SUCCESS on success and -1 on failure.
- */
-
-static int start_nic(struct s2io_nic *nic)
-{
- struct XENA_dev_config __iomem *bar0 = nic->bar0;
- struct net_device *dev = nic->dev;
- register u64 val64 = 0;
- u16 subid, i;
- struct config_param *config = &nic->config;
- struct mac_info *mac_control = &nic->mac_control;
-
- /* PRC Initialization and configuration */
- for (i = 0; i < config->rx_ring_num; i++) {
- struct ring_info *ring = &mac_control->rings[i];
-
- writeq((u64)ring->rx_blocks[0].block_dma_addr,
- &bar0->prc_rxd0_n[i]);
-
- val64 = readq(&bar0->prc_ctrl_n[i]);
- if (nic->rxd_mode == RXD_MODE_1)
- val64 |= PRC_CTRL_RC_ENABLED;
- else
- val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
- if (nic->device_type == XFRAME_II_DEVICE)
- val64 |= PRC_CTRL_GROUP_READS;
- val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
- val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
- writeq(val64, &bar0->prc_ctrl_n[i]);
- }
-
- if (nic->rxd_mode == RXD_MODE_3B) {
- /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
- val64 = readq(&bar0->rx_pa_cfg);
- val64 |= RX_PA_CFG_IGNORE_L2_ERR;
- writeq(val64, &bar0->rx_pa_cfg);
- }
-
- if (vlan_tag_strip == 0) {
- val64 = readq(&bar0->rx_pa_cfg);
- val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
- writeq(val64, &bar0->rx_pa_cfg);
- nic->vlan_strip_flag = 0;
- }
-
- /*
- * Enabling MC-RLDRAM. After enabling the device, we timeout
- * for around 100ms, which is approximately the time required
- * for the device to be ready for operation.
- */
- val64 = readq(&bar0->mc_rldram_mrs);
- val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
- SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
- val64 = readq(&bar0->mc_rldram_mrs);
-
- msleep(100); /* Delay by around 100 ms. */
-
- /* Enabling ECC Protection. */
- val64 = readq(&bar0->adapter_control);
- val64 &= ~ADAPTER_ECC_EN;
- writeq(val64, &bar0->adapter_control);
-
- /*
- * Verify if the device is ready to be enabled, if so enable
- * it.
- */
- val64 = readq(&bar0->adapter_status);
- if (!verify_xena_quiescence(nic)) {
- DBG_PRINT(ERR_DBG, "%s: device is not ready, "
- "Adapter status reads: 0x%llx\n",
- dev->name, (unsigned long long)val64);
- return FAILURE;
- }
-
- /*
- * With some switches, link might be already up at this point.
- * Because of this weird behavior, when we enable laser,
- * we may not get link. We need to handle this. We cannot
- * figure out which switch is misbehaving. So we are forced to
- * make a global change.
- */
-
- /* Enabling Laser. */
- val64 = readq(&bar0->adapter_control);
- val64 |= ADAPTER_EOI_TX_ON;
- writeq(val64, &bar0->adapter_control);
-
- if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
- /*
- * Dont see link state interrupts initially on some switches,
- * so directly scheduling the link state task here.
- */
- schedule_work(&nic->set_link_task);
- }
- /* SXE-002: Initialize link and activity LED */
- subid = nic->pdev->subsystem_device;
- if (((subid & 0xFF) >= 0x07) &&
- (nic->device_type == XFRAME_I_DEVICE)) {
- val64 = readq(&bar0->gpio_control);
- val64 |= 0x0000800000000000ULL;
- writeq(val64, &bar0->gpio_control);
- val64 = 0x0411040400000000ULL;
- writeq(val64, (void __iomem *)bar0 + 0x2700);
- }
-
- return SUCCESS;
-}
-/**
- * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
- * @fifo_data: fifo data pointer
- * @txdlp: descriptor
- * @get_off: unused
- */
-static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
- struct TxD *txdlp, int get_off)
-{
- struct s2io_nic *nic = fifo_data->nic;
- struct sk_buff *skb;
- struct TxD *txds;
- u16 j, frg_cnt;
-
- txds = txdlp;
- if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
- dma_unmap_single(&nic->pdev->dev,
- (dma_addr_t)txds->Buffer_Pointer,
- sizeof(u64), DMA_TO_DEVICE);
- txds++;
- }
-
- skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
- if (!skb) {
- memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
- return NULL;
- }
- dma_unmap_single(&nic->pdev->dev, (dma_addr_t)txds->Buffer_Pointer,
- skb_headlen(skb), DMA_TO_DEVICE);
- frg_cnt = skb_shinfo(skb)->nr_frags;
- if (frg_cnt) {
- txds++;
- for (j = 0; j < frg_cnt; j++, txds++) {
- const skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
- if (!txds->Buffer_Pointer)
- break;
- dma_unmap_page(&nic->pdev->dev,
- (dma_addr_t)txds->Buffer_Pointer,
- skb_frag_size(frag), DMA_TO_DEVICE);
- }
- }
- memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
- return skb;
-}
-
-/**
- * free_tx_buffers - Free all queued Tx buffers
- * @nic : device private variable.
- * Description:
- * Free all queued Tx buffers.
- * Return Value: void
- */
-
-static void free_tx_buffers(struct s2io_nic *nic)
-{
- struct net_device *dev = nic->dev;
- struct sk_buff *skb;
- struct TxD *txdp;
- int i, j;
- int cnt = 0;
- struct config_param *config = &nic->config;
- struct mac_info *mac_control = &nic->mac_control;
- struct stat_block *stats = mac_control->stats_info;
- struct swStat *swstats = &stats->sw_stat;
-
- for (i = 0; i < config->tx_fifo_num; i++) {
- struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
- struct fifo_info *fifo = &mac_control->fifos[i];
- unsigned long flags;
-
- spin_lock_irqsave(&fifo->tx_lock, flags);
- for (j = 0; j < tx_cfg->fifo_len; j++) {
- txdp = fifo->list_info[j].list_virt_addr;
- skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
- if (skb) {
- swstats->mem_freed += skb->truesize;
- dev_kfree_skb_irq(skb);
- cnt++;
- }
- }
- DBG_PRINT(INTR_DBG,
- "%s: forcibly freeing %d skbs on FIFO%d\n",
- dev->name, cnt, i);
- fifo->tx_curr_get_info.offset = 0;
- fifo->tx_curr_put_info.offset = 0;
- spin_unlock_irqrestore(&fifo->tx_lock, flags);
- }
-}
-
-/**
- * stop_nic - To stop the nic
- * @nic : device private variable.
- * Description:
- * This function does exactly the opposite of what the start_nic()
- * function does. This function is called to stop the device.
- * Return Value:
- * void.
- */
-
-static void stop_nic(struct s2io_nic *nic)
-{
- struct XENA_dev_config __iomem *bar0 = nic->bar0;
- register u64 val64 = 0;
- u16 interruptible;
-
- /* Disable all interrupts */
- en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
- interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
- interruptible |= TX_PIC_INTR;
- en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
-
- /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
- val64 = readq(&bar0->adapter_control);
- val64 &= ~(ADAPTER_CNTL_EN);
- writeq(val64, &bar0->adapter_control);
-}
-
-/**
- * fill_rx_buffers - Allocates the Rx side skbs
- * @nic : device private variable.
- * @ring: per ring structure
- * @from_card_up: If this is true, we will map the buffer to get
- * the dma address for buf0 and buf1 to give it to the card.
- * Else we will sync the already mapped buffer to give it to the card.
- * Description:
- * The function allocates Rx side skbs and puts the physical
- * address of these buffers into the RxD buffer pointers, so that the NIC
- * can DMA the received frame into these locations.
- * The NIC supports 3 receive modes, viz
- * 1. single buffer,
- * 2. three buffer and
- * 3. Five buffer modes.
- * Each mode defines how many fragments the received frame will be split
- * up into by the NIC. The frame is split into L3 header, L4 Header,
- * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
- * is split into 3 fragments. As of now only single buffer mode is
- * supported.
- * Return Value:
- * SUCCESS on success or an appropriate -ve value on failure.
- */
-static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
- int from_card_up)
-{
- struct sk_buff *skb;
- struct RxD_t *rxdp;
- int off, size, block_no, block_no1;
- u32 alloc_tab = 0;
- u32 alloc_cnt;
- u64 tmp;
- struct buffAdd *ba;
- struct RxD_t *first_rxdp = NULL;
- u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
- struct RxD1 *rxdp1;
- struct RxD3 *rxdp3;
- struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
-
- alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
-
- block_no1 = ring->rx_curr_get_info.block_index;
- while (alloc_tab < alloc_cnt) {
- block_no = ring->rx_curr_put_info.block_index;
-
- off = ring->rx_curr_put_info.offset;
-
- rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
-
- if ((block_no == block_no1) &&
- (off == ring->rx_curr_get_info.offset) &&
- (rxdp->Host_Control)) {
- DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n",
- ring->dev->name);
- goto end;
- }
- if (off && (off == ring->rxd_count)) {
- ring->rx_curr_put_info.block_index++;
- if (ring->rx_curr_put_info.block_index ==
- ring->block_count)
- ring->rx_curr_put_info.block_index = 0;
- block_no = ring->rx_curr_put_info.block_index;
- off = 0;
- ring->rx_curr_put_info.offset = off;
- rxdp = ring->rx_blocks[block_no].block_virt_addr;
- DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
- ring->dev->name, rxdp);
-
- }
-
- if ((rxdp->Control_1 & RXD_OWN_XENA) &&
- ((ring->rxd_mode == RXD_MODE_3B) &&
- (rxdp->Control_2 & s2BIT(0)))) {
- ring->rx_curr_put_info.offset = off;
- goto end;
- }
- /* calculate size of skb based on ring mode */
- size = ring->mtu +
- HEADER_ETHERNET_II_802_3_SIZE +
- HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
- if (ring->rxd_mode == RXD_MODE_1)
- size += NET_IP_ALIGN;
- else
- size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
-
- /* allocate skb */
- skb = netdev_alloc_skb(nic->dev, size);
- if (!skb) {
- DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
- ring->dev->name);
- if (first_rxdp) {
- dma_wmb();
- first_rxdp->Control_1 |= RXD_OWN_XENA;
- }
- swstats->mem_alloc_fail_cnt++;
-
- return -ENOMEM ;
- }
- swstats->mem_allocated += skb->truesize;
-
- if (ring->rxd_mode == RXD_MODE_1) {
- /* 1 buffer mode - normal operation mode */
- rxdp1 = (struct RxD1 *)rxdp;
- memset(rxdp, 0, sizeof(struct RxD1));
- skb_reserve(skb, NET_IP_ALIGN);
- rxdp1->Buffer0_ptr =
- dma_map_single(&ring->pdev->dev, skb->data,
- size - NET_IP_ALIGN,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&nic->pdev->dev, rxdp1->Buffer0_ptr))
- goto pci_map_failed;
-
- rxdp->Control_2 =
- SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
- rxdp->Host_Control = (unsigned long)skb;
- } else if (ring->rxd_mode == RXD_MODE_3B) {
- /*
- * 2 buffer mode -
- * 2 buffer mode provides 128
- * byte aligned receive buffers.
- */
-
- rxdp3 = (struct RxD3 *)rxdp;
- /* save buffer pointers to avoid frequent dma mapping */
- Buffer0_ptr = rxdp3->Buffer0_ptr;
- Buffer1_ptr = rxdp3->Buffer1_ptr;
- memset(rxdp, 0, sizeof(struct RxD3));
- /* restore the buffer pointers for dma sync*/
- rxdp3->Buffer0_ptr = Buffer0_ptr;
- rxdp3->Buffer1_ptr = Buffer1_ptr;
-
- ba = &ring->ba[block_no][off];
- skb_reserve(skb, BUF0_LEN);
- tmp = (u64)(unsigned long)skb->data;
- tmp += ALIGN_SIZE;
- tmp &= ~ALIGN_SIZE;
- skb->data = (void *) (unsigned long)tmp;
- skb_reset_tail_pointer(skb);
-
- if (from_card_up) {
- rxdp3->Buffer0_ptr =
- dma_map_single(&ring->pdev->dev,
- ba->ba_0, BUF0_LEN,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer0_ptr))
- goto pci_map_failed;
- } else
- dma_sync_single_for_device(&ring->pdev->dev,
- (dma_addr_t)rxdp3->Buffer0_ptr,
- BUF0_LEN,
- DMA_FROM_DEVICE);
-
- rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
- if (ring->rxd_mode == RXD_MODE_3B) {
- /* Two buffer mode */
-
- /*
- * Buffer2 will have L3/L4 header plus
- * L4 payload
- */
- rxdp3->Buffer2_ptr = dma_map_single(&ring->pdev->dev,
- skb->data,
- ring->mtu + 4,
- DMA_FROM_DEVICE);
-
- if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer2_ptr))
- goto pci_map_failed;
-
- if (from_card_up) {
- rxdp3->Buffer1_ptr =
- dma_map_single(&ring->pdev->dev,
- ba->ba_1,
- BUF1_LEN,
- DMA_FROM_DEVICE);
-
- if (dma_mapping_error(&nic->pdev->dev,
- rxdp3->Buffer1_ptr)) {
- dma_unmap_single(&ring->pdev->dev,
- (dma_addr_t)(unsigned long)
- skb->data,
- ring->mtu + 4,
- DMA_FROM_DEVICE);
- goto pci_map_failed;
- }
- }
- rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
- rxdp->Control_2 |= SET_BUFFER2_SIZE_3
- (ring->mtu + 4);
- }
- rxdp->Control_2 |= s2BIT(0);
- rxdp->Host_Control = (unsigned long) (skb);
- }
- if (alloc_tab & ((1 << rxsync_frequency) - 1))
- rxdp->Control_1 |= RXD_OWN_XENA;
- off++;
- if (off == (ring->rxd_count + 1))
- off = 0;
- ring->rx_curr_put_info.offset = off;
-
- rxdp->Control_2 |= SET_RXD_MARKER;
- if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
- if (first_rxdp) {
- dma_wmb();
- first_rxdp->Control_1 |= RXD_OWN_XENA;
- }
- first_rxdp = rxdp;
- }
- ring->rx_bufs_left += 1;
- alloc_tab++;
- }
-
-end:
- /* Transfer ownership of first descriptor to adapter just before
- * exiting. Before that, use memory barrier so that ownership
- * and other fields are seen by adapter correctly.
- */
- if (first_rxdp) {
- dma_wmb();
- first_rxdp->Control_1 |= RXD_OWN_XENA;
- }
-
- return SUCCESS;
-
-pci_map_failed:
- swstats->pci_map_fail_cnt++;
- swstats->mem_freed += skb->truesize;
- dev_kfree_skb_irq(skb);
- return -ENOMEM;
-}
-
-static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
-{
- struct net_device *dev = sp->dev;
- int j;
- struct sk_buff *skb;
- struct RxD_t *rxdp;
- struct RxD1 *rxdp1;
- struct RxD3 *rxdp3;
- struct mac_info *mac_control = &sp->mac_control;
- struct stat_block *stats = mac_control->stats_info;
- struct swStat *swstats = &stats->sw_stat;
-
- for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
- rxdp = mac_control->rings[ring_no].
- rx_blocks[blk].rxds[j].virt_addr;
- skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
- if (!skb)
- continue;
- if (sp->rxd_mode == RXD_MODE_1) {
- rxdp1 = (struct RxD1 *)rxdp;
- dma_unmap_single(&sp->pdev->dev,
- (dma_addr_t)rxdp1->Buffer0_ptr,
- dev->mtu +
- HEADER_ETHERNET_II_802_3_SIZE +
- HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
- DMA_FROM_DEVICE);
- memset(rxdp, 0, sizeof(struct RxD1));
- } else if (sp->rxd_mode == RXD_MODE_3B) {
- rxdp3 = (struct RxD3 *)rxdp;
- dma_unmap_single(&sp->pdev->dev,
- (dma_addr_t)rxdp3->Buffer0_ptr,
- BUF0_LEN, DMA_FROM_DEVICE);
- dma_unmap_single(&sp->pdev->dev,
- (dma_addr_t)rxdp3->Buffer1_ptr,
- BUF1_LEN, DMA_FROM_DEVICE);
- dma_unmap_single(&sp->pdev->dev,
- (dma_addr_t)rxdp3->Buffer2_ptr,
- dev->mtu + 4, DMA_FROM_DEVICE);
- memset(rxdp, 0, sizeof(struct RxD3));
- }
- swstats->mem_freed += skb->truesize;
- dev_kfree_skb(skb);
- mac_control->rings[ring_no].rx_bufs_left -= 1;
- }
-}
-
-/**
- * free_rx_buffers - Frees all Rx buffers
- * @sp: device private variable.
- * Description:
- * This function will free all Rx buffers allocated by host.
- * Return Value:
- * NONE.
- */
-
-static void free_rx_buffers(struct s2io_nic *sp)
-{
- struct net_device *dev = sp->dev;
- int i, blk = 0, buf_cnt = 0;
- struct config_param *config = &sp->config;
- struct mac_info *mac_control = &sp->mac_control;
-
- for (i = 0; i < config->rx_ring_num; i++) {
- struct ring_info *ring = &mac_control->rings[i];
-
- for (blk = 0; blk < rx_ring_sz[i]; blk++)
- free_rxd_blk(sp, i, blk);
-
- ring->rx_curr_put_info.block_index = 0;
- ring->rx_curr_get_info.block_index = 0;
- ring->rx_curr_put_info.offset = 0;
- ring->rx_curr_get_info.offset = 0;
- ring->rx_bufs_left = 0;
- DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n",
- dev->name, buf_cnt, i);
- }
-}
-
-static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
-{
- if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
- DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n",
- ring->dev->name);
- }
- return 0;
-}
-
-/**
- * s2io_poll_msix - Rx interrupt handler for NAPI support
- * @napi : pointer to the napi structure.
- * @budget : The number of packets that were budgeted to be processed
- * during one pass through the 'Poll" function.
- * Description:
- * Comes into picture only if NAPI support has been incorporated. It does
- * the same thing that rx_intr_handler does, but not in a interrupt context
- * also It will process only a given number of packets.
- * Return value:
- * 0 on success and 1 if there are No Rx packets to be processed.
- */
-
-static int s2io_poll_msix(struct napi_struct *napi, int budget)
-{
- struct ring_info *ring = container_of(napi, struct ring_info, napi);
- struct net_device *dev = ring->dev;
- int pkts_processed = 0;
- u8 __iomem *addr = NULL;
- u8 val8 = 0;
- struct s2io_nic *nic = netdev_priv(dev);
- struct XENA_dev_config __iomem *bar0 = nic->bar0;
- int budget_org = budget;
-
- if (unlikely(!is_s2io_card_up(nic)))
- return 0;
-
- pkts_processed = rx_intr_handler(ring, budget);
- s2io_chk_rx_buffers(nic, ring);
-
- if (pkts_processed < budget_org) {
- napi_complete_done(napi, pkts_processed);
- /*Re Enable MSI-Rx Vector*/
- addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
- addr += 7 - ring->ring_no;
- val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
- writeb(val8, addr);
- val8 = readb(addr);
- }
- return pkts_processed;
-}
-
-static int s2io_poll_inta(struct napi_struct *napi, int budget)
-{
- struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
- int pkts_processed = 0;
- int ring_pkts_processed, i;
- struct XENA_dev_config __iomem *bar0 = nic->bar0;
- int budget_org = budget;
- struct config_param *config = &nic->config;
- struct mac_info *mac_control = &nic->mac_control;
-
- if (unlikely(!is_s2io_card_up(nic)))
- return 0;
-
- for (i = 0; i < config->rx_ring_num; i++) {
- struct ring_info *ring = &mac_control->rings[i];
- ring_pkts_processed = rx_intr_handler(ring, budget);
- s2io_chk_rx_buffers(nic, ring);
- pkts_processed += ring_pkts_processed;
- budget -= ring_pkts_processed;
- if (budget <= 0)
- break;
- }
- if (pkts_processed < budget_org) {
- napi_complete_done(napi, pkts_processed);
- /* Re enable the Rx interrupts for the ring */
- writeq(0, &bar0->rx_traffic_mask);
- readl(&bar0->rx_traffic_mask);
- }
- return pkts_processed;
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/**
- * s2io_netpoll - netpoll event handler entry point
- * @dev : pointer to the device structure.
- * Description:
- * This function will be called by upper layer to check for events on the
- * interface in situations where interrupts are disabled. It is used for
- * specific in-kernel networking tasks, such as remote consoles and kernel
- * debugging over the network (example netdump in RedHat).
- */
-static void s2io_netpoll(struct net_device *dev)
-{
- struct s2io_nic *nic = netdev_priv(dev);
- const int irq = nic->pdev->irq;
- struct XENA_dev_config __iomem *bar0 = nic->bar0;
- u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
- int i;
- struct config_param *config = &nic->config;
- struct mac_info *mac_control = &nic->mac_control;
-
- if (pci_channel_offline(nic->pdev))
- return;
-
- disable_irq(irq);
-
- writeq(val64, &bar0->rx_traffic_int);
- writeq(val64, &bar0->tx_traffic_int);
-
- /* we need to free up the transmitted skbufs or else netpoll will
- * run out of skbs and will fail and eventually netpoll application such
- * as netdump will fail.
- */
- for (i = 0; i < config->tx_fifo_num; i++)
- tx_intr_handler(&mac_control->fifos[i]);
-
- /* check for received packet and indicate up to network */
- for (i = 0; i < config->rx_ring_num; i++) {
- struct ring_info *ring = &mac_control->rings[i];
-
- rx_intr_handler(ring, 0);
- }
-
- for (i = 0; i < config->rx_ring_num; i++) {
- struct ring_info *ring = &mac_control->rings[i];
-
- if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
- DBG_PRINT(INFO_DBG,
- "%s: Out of memory in Rx Netpoll!!\n",
- dev->name);
- break;
- }
- }
- enable_irq(irq);
-}
-#endif
-
-/**
- * rx_intr_handler - Rx interrupt handler
- * @ring_data: per ring structure.
- * @budget: budget for napi processing.
- * Description:
- * If the interrupt is because of a received frame or if the
- * receive ring contains fresh as yet un-processed frames,this function is
- * called. It picks out the RxD at which place the last Rx processing had
- * stopped and sends the skb to the OSM's Rx handler and then increments
- * the offset.
- * Return Value:
- * No. of napi packets processed.
- */
-static int rx_intr_handler(struct ring_info *ring_data, int budget)
-{
- int get_block, put_block;
- struct rx_curr_get_info get_info, put_info;
- struct RxD_t *rxdp;
- struct sk_buff *skb;
- int pkt_cnt = 0, napi_pkts = 0;
- int i;
- struct RxD1 *rxdp1;
- struct RxD3 *rxdp3;
-
- if (budget <= 0)
- return napi_pkts;
-
- get_info = ring_data->rx_curr_get_info;
- get_block = get_info.block_index;
- memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
- put_block = put_info.block_index;
- rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
-
- while (RXD_IS_UP2DT(rxdp)) {
- /*
- * If your are next to put index then it's
- * FIFO full condition
- */
- if ((get_block == put_block) &&
- (get_info.offset + 1) == put_info.offset) {
- DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
- ring_data->dev->name);
- break;
- }
- skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
- if (skb == NULL) {
- DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n",
- ring_data->dev->name);
- return 0;
- }
- if (ring_data->rxd_mode == RXD_MODE_1) {
- rxdp1 = (struct RxD1 *)rxdp;
- dma_unmap_single(&ring_data->pdev->dev,
- (dma_addr_t)rxdp1->Buffer0_ptr,
- ring_data->mtu +
- HEADER_ETHERNET_II_802_3_SIZE +
- HEADER_802_2_SIZE +
- HEADER_SNAP_SIZE,
- DMA_FROM_DEVICE);
- } else if (ring_data->rxd_mode == RXD_MODE_3B) {
- rxdp3 = (struct RxD3 *)rxdp;
- dma_sync_single_for_cpu(&ring_data->pdev->dev,
- (dma_addr_t)rxdp3->Buffer0_ptr,
- BUF0_LEN, DMA_FROM_DEVICE);
- dma_unmap_single(&ring_data->pdev->dev,
- (dma_addr_t)rxdp3->Buffer2_ptr,
- ring_data->mtu + 4, DMA_FROM_DEVICE);
- }
- prefetch(skb->data);
- rx_osm_handler(ring_data, rxdp);
- get_info.offset++;
- ring_data->rx_curr_get_info.offset = get_info.offset;
- rxdp = ring_data->rx_blocks[get_block].
- rxds[get_info.offset].virt_addr;
- if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
- get_info.offset = 0;
- ring_data->rx_curr_get_info.offset = get_info.offset;
- get_block++;
- if (get_block == ring_data->block_count)
- get_block = 0;
- ring_data->rx_curr_get_info.block_index = get_block;
- rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
- }
-
- if (ring_data->nic->config.napi) {
- budget--;
- napi_pkts++;
- if (!budget)
- break;
- }
- pkt_cnt++;
- if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
- break;
- }
- if (ring_data->lro) {
- /* Clear all LRO sessions before exiting */
- for (i = 0; i < MAX_LRO_SESSIONS; i++) {
- struct lro *lro = &ring_data->lro0_n[i];
- if (lro->in_use) {
- update_L3L4_header(ring_data->nic, lro);
- queue_rx_frame(lro->parent, lro->vlan_tag);
- clear_lro_session(lro);
- }
- }
- }
- return napi_pkts;
-}
-
-/**
- * tx_intr_handler - Transmit interrupt handler
- * @fifo_data : fifo data pointer
- * Description:
- * If an interrupt was raised to indicate DMA complete of the
- * Tx packet, this function is called. It identifies the last TxD
- * whose buffer was freed and frees all skbs whose data have already
- * DMA'ed into the NICs internal memory.
- * Return Value:
- * NONE
- */
-
-static void tx_intr_handler(struct fifo_info *fifo_data)
-{
- struct s2io_nic *nic = fifo_data->nic;
- struct tx_curr_get_info get_info, put_info;
- struct sk_buff *skb = NULL;
- struct TxD *txdlp;
- int pkt_cnt = 0;
- unsigned long flags = 0;
- u8 err_mask;
- struct stat_block *stats = nic->mac_control.stats_info;
- struct swStat *swstats = &stats->sw_stat;
-
- if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
- return;
-
- get_info = fifo_data->tx_curr_get_info;
- memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
- txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
- while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
- (get_info.offset != put_info.offset) &&
- (txdlp->Host_Control)) {
- /* Check for TxD errors */
- if (txdlp->Control_1 & TXD_T_CODE) {
- unsigned long long err;
- err = txdlp->Control_1 & TXD_T_CODE;
- if (err & 0x1) {
- swstats->parity_err_cnt++;
- }
-
- /* update t_code statistics */
- err_mask = err >> 48;
- switch (err_mask) {
- case 2:
- swstats->tx_buf_abort_cnt++;
- break;
-
- case 3:
- swstats->tx_desc_abort_cnt++;
- break;
-
- case 7:
- swstats->tx_parity_err_cnt++;
- break;
-
- case 10:
- swstats->tx_link_loss_cnt++;
- break;
-
- case 15:
- swstats->tx_list_proc_err_cnt++;
- break;
- }
- }
-
- skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
- if (skb == NULL) {
- spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
- DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n",
- __func__);
- return;
- }
- pkt_cnt++;
-
- /* Updating the statistics block */
- swstats->mem_freed += skb->truesize;
- dev_consume_skb_irq(skb);
-
- get_info.offset++;
- if (get_info.offset == get_info.fifo_len + 1)
- get_info.offset = 0;
- txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
- fifo_data->tx_curr_get_info.offset = get_info.offset;
- }
-
- s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
-
- spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
-}
-
-/**
- * s2io_mdio_write - Function to write in to MDIO registers
- * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
- * @addr : address value
- * @value : data value
- * @dev : pointer to net_device structure
- * Description:
- * This function is used to write values to the MDIO registers
- * NONE
- */
-static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
- struct net_device *dev)
-{
- u64 val64;
- struct s2io_nic *sp = netdev_priv(dev);
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
-
- /* address transaction */
- val64 = MDIO_MMD_INDX_ADDR(addr) |
- MDIO_MMD_DEV_ADDR(mmd_type) |
- MDIO_MMS_PRT_ADDR(0x0);
- writeq(val64, &bar0->mdio_control);
- val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
- writeq(val64, &bar0->mdio_control);
- udelay(100);
-
- /* Data transaction */
- val64 = MDIO_MMD_INDX_ADDR(addr) |
- MDIO_MMD_DEV_ADDR(mmd_type) |
- MDIO_MMS_PRT_ADDR(0x0) |
- MDIO_MDIO_DATA(value) |
- MDIO_OP(MDIO_OP_WRITE_TRANS);
- writeq(val64, &bar0->mdio_control);
- val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
- writeq(val64, &bar0->mdio_control);
- udelay(100);
-
- val64 = MDIO_MMD_INDX_ADDR(addr) |
- MDIO_MMD_DEV_ADDR(mmd_type) |
- MDIO_MMS_PRT_ADDR(0x0) |
- MDIO_OP(MDIO_OP_READ_TRANS);
- writeq(val64, &bar0->mdio_control);
- val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
- writeq(val64, &bar0->mdio_control);
- udelay(100);
-}
-
-/**
- * s2io_mdio_read - Function to write in to MDIO registers
- * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
- * @addr : address value
- * @dev : pointer to net_device structure
- * Description:
- * This function is used to read values to the MDIO registers
- * NONE
- */
-static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
-{
- u64 val64 = 0x0;
- u64 rval64 = 0x0;
- struct s2io_nic *sp = netdev_priv(dev);
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
-
- /* address transaction */
- val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
- | MDIO_MMD_DEV_ADDR(mmd_type)
- | MDIO_MMS_PRT_ADDR(0x0));
- writeq(val64, &bar0->mdio_control);
- val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
- writeq(val64, &bar0->mdio_control);
- udelay(100);
-
- /* Data transaction */
- val64 = MDIO_MMD_INDX_ADDR(addr) |
- MDIO_MMD_DEV_ADDR(mmd_type) |
- MDIO_MMS_PRT_ADDR(0x0) |
- MDIO_OP(MDIO_OP_READ_TRANS);
- writeq(val64, &bar0->mdio_control);
- val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
- writeq(val64, &bar0->mdio_control);
- udelay(100);
-
- /* Read the value from regs */
- rval64 = readq(&bar0->mdio_control);
- rval64 = rval64 & 0xFFFF0000;
- rval64 = rval64 >> 16;
- return rval64;
-}
-
-/**
- * s2io_chk_xpak_counter - Function to check the status of the xpak counters
- * @counter : counter value to be updated
- * @regs_stat : registers status
- * @index : index
- * @flag : flag to indicate the status
- * @type : counter type
- * Description:
- * This function is to check the status of the xpak counters value
- * NONE
- */
-
-static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
- u16 flag, u16 type)
-{
- u64 mask = 0x3;
- u64 val64;
- int i;
- for (i = 0; i < index; i++)
- mask = mask << 0x2;
-
- if (flag > 0) {
- *counter = *counter + 1;
- val64 = *regs_stat & mask;
- val64 = val64 >> (index * 0x2);
- val64 = val64 + 1;
- if (val64 == 3) {
- switch (type) {
- case 1:
- DBG_PRINT(ERR_DBG,
- "Take Xframe NIC out of service.\n");
- DBG_PRINT(ERR_DBG,
-"Excessive temperatures may result in premature transceiver failure.\n");
- break;
- case 2:
- DBG_PRINT(ERR_DBG,
- "Take Xframe NIC out of service.\n");
- DBG_PRINT(ERR_DBG,
-"Excessive bias currents may indicate imminent laser diode failure.\n");
- break;
- case 3:
- DBG_PRINT(ERR_DBG,
- "Take Xframe NIC out of service.\n");
- DBG_PRINT(ERR_DBG,
-"Excessive laser output power may saturate far-end receiver.\n");
- break;
- default:
- DBG_PRINT(ERR_DBG,
- "Incorrect XPAK Alarm type\n");
- }
- val64 = 0x0;
- }
- val64 = val64 << (index * 0x2);
- *regs_stat = (*regs_stat & (~mask)) | (val64);
-
- } else {
- *regs_stat = *regs_stat & (~mask);
- }
-}
-
-/**
- * s2io_updt_xpak_counter - Function to update the xpak counters
- * @dev : pointer to net_device struct
- * Description:
- * This function is to upate the status of the xpak counters value
- * NONE
- */
-static void s2io_updt_xpak_counter(struct net_device *dev)
-{
- u16 flag = 0x0;
- u16 type = 0x0;
- u16 val16 = 0x0;
- u64 val64 = 0x0;
- u64 addr = 0x0;
-
- struct s2io_nic *sp = netdev_priv(dev);
- struct stat_block *stats = sp->mac_control.stats_info;
- struct xpakStat *xstats = &stats->xpak_stat;
-
- /* Check the communication with the MDIO slave */
- addr = MDIO_CTRL1;
- val64 = 0x0;
- val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
- if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
- DBG_PRINT(ERR_DBG,
- "ERR: MDIO slave access failed - Returned %llx\n",
- (unsigned long long)val64);
- return;
- }
-
- /* Check for the expected value of control reg 1 */
- if (val64 != MDIO_CTRL1_SPEED10G) {
- DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "
- "Returned: %llx- Expected: 0x%x\n",
- (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
- return;
- }
-
- /* Loading the DOM register to MDIO register */
- addr = 0xA100;
- s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
- val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
-
- /* Reading the Alarm flags */
- addr = 0xA070;
- val64 = 0x0;
- val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
-
- flag = CHECKBIT(val64, 0x7);
- type = 1;
- s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high,
- &xstats->xpak_regs_stat,
- 0x0, flag, type);
-
- if (CHECKBIT(val64, 0x6))
- xstats->alarm_transceiver_temp_low++;
-
- flag = CHECKBIT(val64, 0x3);
- type = 2;
- s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high,
- &xstats->xpak_regs_stat,
- 0x2, flag, type);
-
- if (CHECKBIT(val64, 0x2))
- xstats->alarm_laser_bias_current_low++;
-
- flag = CHECKBIT(val64, 0x1);
- type = 3;
- s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high,
- &xstats->xpak_regs_stat,
- 0x4, flag, type);
-
- if (CHECKBIT(val64, 0x0))
- xstats->alarm_laser_output_power_low++;
-
- /* Reading the Warning flags */
- addr = 0xA074;
- val64 = 0x0;
- val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
-
- if (CHECKBIT(val64, 0x7))
- xstats->warn_transceiver_temp_high++;
-
- if (CHECKBIT(val64, 0x6))
- xstats->warn_transceiver_temp_low++;
-
- if (CHECKBIT(val64, 0x3))
- xstats->warn_laser_bias_current_high++;
-
- if (CHECKBIT(val64, 0x2))
- xstats->warn_laser_bias_current_low++;
-
- if (CHECKBIT(val64, 0x1))
- xstats->warn_laser_output_power_high++;
-
- if (CHECKBIT(val64, 0x0))
- xstats->warn_laser_output_power_low++;
-}
-
-/**
- * wait_for_cmd_complete - waits for a command to complete.
- * @addr: address
- * @busy_bit: bit to check for busy
- * @bit_state: state to check
- * @may_sleep: parameter indicates if sleeping when waiting for
- * command complete
- * Description: Function that waits for a command to Write into RMAC
- * ADDR DATA registers to be completed and returns either success or
- * error depending on whether the command was complete or not.
- * Return value:
- * SUCCESS on success and FAILURE on failure.
- */
-
-static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
- int bit_state, bool may_sleep)
-{
- int ret = FAILURE, cnt = 0, delay = 1;
- u64 val64;
-
- if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
- return FAILURE;
-
- do {
- val64 = readq(addr);
- if (bit_state == S2IO_BIT_RESET) {
- if (!(val64 & busy_bit)) {
- ret = SUCCESS;
- break;
- }
- } else {
- if (val64 & busy_bit) {
- ret = SUCCESS;
- break;
- }
- }
-
- if (!may_sleep)
- mdelay(delay);
- else
- msleep(delay);
-
- if (++cnt >= 10)
- delay = 50;
- } while (cnt < 20);
- return ret;
-}
-/**
- * check_pci_device_id - Checks if the device id is supported
- * @id : device id
- * Description: Function to check if the pci device id is supported by driver.
- * Return value: Actual device id if supported else PCI_ANY_ID
- */
-static u16 check_pci_device_id(u16 id)
-{
- switch (id) {
- case PCI_DEVICE_ID_HERC_WIN:
- case PCI_DEVICE_ID_HERC_UNI:
- return XFRAME_II_DEVICE;
- case PCI_DEVICE_ID_S2IO_UNI:
- case PCI_DEVICE_ID_S2IO_WIN:
- return XFRAME_I_DEVICE;
- default:
- return PCI_ANY_ID;
- }
-}
-
-/**
- * s2io_reset - Resets the card.
- * @sp : private member of the device structure.
- * Description: Function to Reset the card. This function then also
- * restores the previously saved PCI configuration space registers as
- * the card reset also resets the configuration space.
- * Return value:
- * void.
- */
-
-static void s2io_reset(struct s2io_nic *sp)
-{
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- u64 val64;
- u16 subid, pci_cmd;
- int i;
- u16 val16;
- unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
- unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
- struct stat_block *stats;
- struct swStat *swstats;
-
- DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
- __func__, pci_name(sp->pdev));
-
- /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
- pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
-
- val64 = SW_RESET_ALL;
- writeq(val64, &bar0->sw_reset);
- if (strstr(sp->product_name, "CX4"))
- msleep(750);
- msleep(250);
- for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
-
- /* Restore the PCI state saved during initialization. */
- pci_restore_state(sp->pdev);
- pci_read_config_word(sp->pdev, 0x2, &val16);
- if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
- break;
- msleep(200);
- }
-
- if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
- DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
-
- pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
-
- s2io_init_pci(sp);
-
- /* Set swapper to enable I/O register access */
- s2io_set_swapper(sp);
-
- /* restore mac_addr entries */
- do_s2io_restore_unicast_mc(sp);
-
- /* Restore the MSIX table entries from local variables */
- restore_xmsi_data(sp);
-
- /* Clear certain PCI/PCI-X fields after reset */
- if (sp->device_type == XFRAME_II_DEVICE) {
- /* Clear "detected parity error" bit */
- pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
-
- /* Clearing PCIX Ecc status register */
- pci_write_config_dword(sp->pdev, 0x68, 0x7C);
-
- /* Clearing PCI_STATUS error reflected here */
- writeq(s2BIT(62), &bar0->txpic_int_reg);
- }
-
- /* Reset device statistics maintained by OS */
- memset(&sp->stats, 0, sizeof(struct net_device_stats));
-
- stats = sp->mac_control.stats_info;
- swstats = &stats->sw_stat;
-
- /* save link up/down time/cnt, reset/memory/watchdog cnt */
- up_cnt = swstats->link_up_cnt;
- down_cnt = swstats->link_down_cnt;
- up_time = swstats->link_up_time;
- down_time = swstats->link_down_time;
- reset_cnt = swstats->soft_reset_cnt;
- mem_alloc_cnt = swstats->mem_allocated;
- mem_free_cnt = swstats->mem_freed;
- watchdog_cnt = swstats->watchdog_timer_cnt;
-
- memset(stats, 0, sizeof(struct stat_block));
-
- /* restore link up/down time/cnt, reset/memory/watchdog cnt */
- swstats->link_up_cnt = up_cnt;
- swstats->link_down_cnt = down_cnt;
- swstats->link_up_time = up_time;
- swstats->link_down_time = down_time;
- swstats->soft_reset_cnt = reset_cnt;
- swstats->mem_allocated = mem_alloc_cnt;
- swstats->mem_freed = mem_free_cnt;
- swstats->watchdog_timer_cnt = watchdog_cnt;
-
- /* SXE-002: Configure link and activity LED to turn it off */
- subid = sp->pdev->subsystem_device;
- if (((subid & 0xFF) >= 0x07) &&
- (sp->device_type == XFRAME_I_DEVICE)) {
- val64 = readq(&bar0->gpio_control);
- val64 |= 0x0000800000000000ULL;
- writeq(val64, &bar0->gpio_control);
- val64 = 0x0411040400000000ULL;
- writeq(val64, (void __iomem *)bar0 + 0x2700);
- }
-
- /*
- * Clear spurious ECC interrupts that would have occurred on
- * XFRAME II cards after reset.
- */
- if (sp->device_type == XFRAME_II_DEVICE) {
- val64 = readq(&bar0->pcc_err_reg);
- writeq(val64, &bar0->pcc_err_reg);
- }
-
- sp->device_enabled_once = false;
-}
-
-/**
- * s2io_set_swapper - to set the swapper controle on the card
- * @sp : private member of the device structure,
- * pointer to the s2io_nic structure.
- * Description: Function to set the swapper control on the card
- * correctly depending on the 'endianness' of the system.
- * Return value:
- * SUCCESS on success and FAILURE on failure.
- */
-
-static int s2io_set_swapper(struct s2io_nic *sp)
-{
- struct net_device *dev = sp->dev;
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- u64 val64, valt, valr;
-
- /*
- * Set proper endian settings and verify the same by reading
- * the PIF Feed-back register.
- */
-
- val64 = readq(&bar0->pif_rd_swapper_fb);
- if (val64 != 0x0123456789ABCDEFULL) {
- int i = 0;
- static const u64 value[] = {
- 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
- 0x8100008181000081ULL, /* FE=1, SE=0 */
- 0x4200004242000042ULL, /* FE=0, SE=1 */
- 0 /* FE=0, SE=0 */
- };
-
- while (i < 4) {
- writeq(value[i], &bar0->swapper_ctrl);
- val64 = readq(&bar0->pif_rd_swapper_fb);
- if (val64 == 0x0123456789ABCDEFULL)
- break;
- i++;
- }
- if (i == 4) {
- DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, "
- "feedback read %llx\n",
- dev->name, (unsigned long long)val64);
- return FAILURE;
- }
- valr = value[i];
- } else {
- valr = readq(&bar0->swapper_ctrl);
- }
-
- valt = 0x0123456789ABCDEFULL;
- writeq(valt, &bar0->xmsi_address);
- val64 = readq(&bar0->xmsi_address);
-
- if (val64 != valt) {
- int i = 0;
- static const u64 value[] = {
- 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
- 0x0081810000818100ULL, /* FE=1, SE=0 */
- 0x0042420000424200ULL, /* FE=0, SE=1 */
- 0 /* FE=0, SE=0 */
- };
-
- while (i < 4) {
- writeq((value[i] | valr), &bar0->swapper_ctrl);
- writeq(valt, &bar0->xmsi_address);
- val64 = readq(&bar0->xmsi_address);
- if (val64 == valt)
- break;
- i++;
- }
- if (i == 4) {
- unsigned long long x = val64;
- DBG_PRINT(ERR_DBG,
- "Write failed, Xmsi_addr reads:0x%llx\n", x);
- return FAILURE;
- }
- }
- val64 = readq(&bar0->swapper_ctrl);
- val64 &= 0xFFFF000000000000ULL;
-
-#ifdef __BIG_ENDIAN
- /*
- * The device by default set to a big endian format, so a
- * big endian driver need not set anything.
- */
- val64 |= (SWAPPER_CTRL_TXP_FE |
- SWAPPER_CTRL_TXP_SE |
- SWAPPER_CTRL_TXD_R_FE |
- SWAPPER_CTRL_TXD_W_FE |
- SWAPPER_CTRL_TXF_R_FE |
- SWAPPER_CTRL_RXD_R_FE |
- SWAPPER_CTRL_RXD_W_FE |
- SWAPPER_CTRL_RXF_W_FE |
- SWAPPER_CTRL_XMSI_FE |
- SWAPPER_CTRL_STATS_FE |
- SWAPPER_CTRL_STATS_SE);
- if (sp->config.intr_type == INTA)
- val64 |= SWAPPER_CTRL_XMSI_SE;
- writeq(val64, &bar0->swapper_ctrl);
-#else
- /*
- * Initially we enable all bits to make it accessible by the
- * driver, then we selectively enable only those bits that
- * we want to set.
- */
- val64 |= (SWAPPER_CTRL_TXP_FE |
- SWAPPER_CTRL_TXP_SE |
- SWAPPER_CTRL_TXD_R_FE |
- SWAPPER_CTRL_TXD_R_SE |
- SWAPPER_CTRL_TXD_W_FE |
- SWAPPER_CTRL_TXD_W_SE |
- SWAPPER_CTRL_TXF_R_FE |
- SWAPPER_CTRL_RXD_R_FE |
- SWAPPER_CTRL_RXD_R_SE |
- SWAPPER_CTRL_RXD_W_FE |
- SWAPPER_CTRL_RXD_W_SE |
- SWAPPER_CTRL_RXF_W_FE |
- SWAPPER_CTRL_XMSI_FE |
- SWAPPER_CTRL_STATS_FE |
- SWAPPER_CTRL_STATS_SE);
- if (sp->config.intr_type == INTA)
- val64 |= SWAPPER_CTRL_XMSI_SE;
- writeq(val64, &bar0->swapper_ctrl);
-#endif
- val64 = readq(&bar0->swapper_ctrl);
-
- /*
- * Verifying if endian settings are accurate by reading a
- * feedback register.
- */
- val64 = readq(&bar0->pif_rd_swapper_fb);
- if (val64 != 0x0123456789ABCDEFULL) {
- /* Endian settings are incorrect, calls for another dekko. */
- DBG_PRINT(ERR_DBG,
- "%s: Endian settings are wrong, feedback read %llx\n",
- dev->name, (unsigned long long)val64);
- return FAILURE;
- }
-
- return SUCCESS;
-}
-
-static int wait_for_msix_trans(struct s2io_nic *nic, int i)
-{
- struct XENA_dev_config __iomem *bar0 = nic->bar0;
- u64 val64;
- int ret = 0, cnt = 0;
-
- do {
- val64 = readq(&bar0->xmsi_access);
- if (!(val64 & s2BIT(15)))
- break;
- mdelay(1);
- cnt++;
- } while (cnt < 5);
- if (cnt == 5) {
- DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
- ret = 1;
- }
-
- return ret;
-}
-
-static void restore_xmsi_data(struct s2io_nic *nic)
-{
- struct XENA_dev_config __iomem *bar0 = nic->bar0;
- u64 val64;
- int i, msix_index;
-
- if (nic->device_type == XFRAME_I_DEVICE)
- return;
-
- for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
- msix_index = (i) ? ((i-1) * 8 + 1) : 0;
- writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
- writeq(nic->msix_info[i].data, &bar0->xmsi_data);
- val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
- writeq(val64, &bar0->xmsi_access);
- if (wait_for_msix_trans(nic, msix_index))
- DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
- __func__, msix_index);
- }
-}
-
-static void store_xmsi_data(struct s2io_nic *nic)
-{
- struct XENA_dev_config __iomem *bar0 = nic->bar0;
- u64 val64, addr, data;
- int i, msix_index;
-
- if (nic->device_type == XFRAME_I_DEVICE)
- return;
-
- /* Store and display */
- for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
- msix_index = (i) ? ((i-1) * 8 + 1) : 0;
- val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
- writeq(val64, &bar0->xmsi_access);
- if (wait_for_msix_trans(nic, msix_index)) {
- DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
- __func__, msix_index);
- continue;
- }
- addr = readq(&bar0->xmsi_address);
- data = readq(&bar0->xmsi_data);
- if (addr && data) {
- nic->msix_info[i].addr = addr;
- nic->msix_info[i].data = data;
- }
- }
-}
-
-static int s2io_enable_msi_x(struct s2io_nic *nic)
-{
- struct XENA_dev_config __iomem *bar0 = nic->bar0;
- u64 rx_mat;
- u16 msi_control; /* Temp variable */
- int ret, i, j, msix_indx = 1;
- int size;
- struct stat_block *stats = nic->mac_control.stats_info;
- struct swStat *swstats = &stats->sw_stat;
-
- size = nic->num_entries * sizeof(struct msix_entry);
- nic->entries = kzalloc(size, GFP_KERNEL);
- if (!nic->entries) {
- DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
- __func__);
- swstats->mem_alloc_fail_cnt++;
- return -ENOMEM;
- }
- swstats->mem_allocated += size;
-
- size = nic->num_entries * sizeof(struct s2io_msix_entry);
- nic->s2io_entries = kzalloc(size, GFP_KERNEL);
- if (!nic->s2io_entries) {
- DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
- __func__);
- swstats->mem_alloc_fail_cnt++;
- kfree(nic->entries);
- swstats->mem_freed
- += (nic->num_entries * sizeof(struct msix_entry));
- return -ENOMEM;
- }
- swstats->mem_allocated += size;
-
- nic->entries[0].entry = 0;
- nic->s2io_entries[0].entry = 0;
- nic->s2io_entries[0].in_use = MSIX_FLG;
- nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
- nic->s2io_entries[0].arg = &nic->mac_control.fifos;
-
- for (i = 1; i < nic->num_entries; i++) {
- nic->entries[i].entry = ((i - 1) * 8) + 1;
- nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
- nic->s2io_entries[i].arg = NULL;
- nic->s2io_entries[i].in_use = 0;
- }
-
- rx_mat = readq(&bar0->rx_mat);
- for (j = 0; j < nic->config.rx_ring_num; j++) {
- rx_mat |= RX_MAT_SET(j, msix_indx);
- nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
- nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
- nic->s2io_entries[j+1].in_use = MSIX_FLG;
- msix_indx += 8;
- }
- writeq(rx_mat, &bar0->rx_mat);
- readq(&bar0->rx_mat);
-
- ret = pci_enable_msix_range(nic->pdev, nic->entries,
- nic->num_entries, nic->num_entries);
- /* We fail init if error or we get less vectors than min required */
- if (ret < 0) {
- DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
- kfree(nic->entries);
- swstats->mem_freed += nic->num_entries *
- sizeof(struct msix_entry);
- kfree(nic->s2io_entries);
- swstats->mem_freed += nic->num_entries *
- sizeof(struct s2io_msix_entry);
- nic->entries = NULL;
- nic->s2io_entries = NULL;
- return -ENOMEM;
- }
-
- /*
- * To enable MSI-X, MSI also needs to be enabled, due to a bug
- * in the herc NIC. (Temp change, needs to be removed later)
- */
- pci_read_config_word(nic->pdev, 0x42, &msi_control);
- msi_control |= 0x1; /* Enable MSI */
- pci_write_config_word(nic->pdev, 0x42, msi_control);
-
- return 0;
-}
-
-/* Handle software interrupt used during MSI(X) test */
-static irqreturn_t s2io_test_intr(int irq, void *dev_id)
-{
- struct s2io_nic *sp = dev_id;
-
- sp->msi_detected = 1;
- wake_up(&sp->msi_wait);
-
- return IRQ_HANDLED;
-}
-
-/* Test interrupt path by forcing a software IRQ */
-static int s2io_test_msi(struct s2io_nic *sp)
-{
- struct pci_dev *pdev = sp->pdev;
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- int err;
- u64 val64, saved64;
-
- err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
- sp->name, sp);
- if (err) {
- DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
- sp->dev->name, pci_name(pdev), pdev->irq);
- return err;
- }
-
- init_waitqueue_head(&sp->msi_wait);
- sp->msi_detected = 0;
-
- saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
- val64 |= SCHED_INT_CTRL_ONE_SHOT;
- val64 |= SCHED_INT_CTRL_TIMER_EN;
- val64 |= SCHED_INT_CTRL_INT2MSI(1);
- writeq(val64, &bar0->scheduled_int_ctrl);
-
- wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
-
- if (!sp->msi_detected) {
- /* MSI(X) test failed, go back to INTx mode */
- DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
- "using MSI(X) during test\n",
- sp->dev->name, pci_name(pdev));
-
- err = -EOPNOTSUPP;
- }
-
- free_irq(sp->entries[1].vector, sp);
-
- writeq(saved64, &bar0->scheduled_int_ctrl);
-
- return err;
-}
-
-static void remove_msix_isr(struct s2io_nic *sp)
-{
- int i;
- u16 msi_control;
-
- for (i = 0; i < sp->num_entries; i++) {
- if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
- int vector = sp->entries[i].vector;
- void *arg = sp->s2io_entries[i].arg;
- free_irq(vector, arg);
- }
- }
-
- kfree(sp->entries);
- kfree(sp->s2io_entries);
- sp->entries = NULL;
- sp->s2io_entries = NULL;
-
- pci_read_config_word(sp->pdev, 0x42, &msi_control);
- msi_control &= 0xFFFE; /* Disable MSI */
- pci_write_config_word(sp->pdev, 0x42, msi_control);
-
- pci_disable_msix(sp->pdev);
-}
-
-static void remove_inta_isr(struct s2io_nic *sp)
-{
- free_irq(sp->pdev->irq, sp->dev);
-}
-
-/* ********************************************************* *
- * Functions defined below concern the OS part of the driver *
- * ********************************************************* */
-
-/**
- * s2io_open - open entry point of the driver
- * @dev : pointer to the device structure.
- * Description:
- * This function is the open entry point of the driver. It mainly calls a
- * function to allocate Rx buffers and inserts them into the buffer
- * descriptors and then enables the Rx part of the NIC.
- * Return value:
- * 0 on success and an appropriate (-)ve integer as defined in errno.h
- * file on failure.
- */
-
-static int s2io_open(struct net_device *dev)
-{
- struct s2io_nic *sp = netdev_priv(dev);
- struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
- int err = 0;
-
- /*
- * Make sure you have link off by default every time
- * Nic is initialized
- */
- netif_carrier_off(dev);
- sp->last_link_state = 0;
-
- /* Initialize H/W and enable interrupts */
- err = s2io_card_up(sp);
- if (err) {
- DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
- dev->name);
- goto hw_init_failed;
- }
-
- if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
- DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
- s2io_card_down(sp);
- err = -ENODEV;
- goto hw_init_failed;
- }
- s2io_start_all_tx_queue(sp);
- return 0;
-
-hw_init_failed:
- if (sp->config.intr_type == MSI_X) {
- if (sp->entries) {
- kfree(sp->entries);
- swstats->mem_freed += sp->num_entries *
- sizeof(struct msix_entry);
- }
- if (sp->s2io_entries) {
- kfree(sp->s2io_entries);
- swstats->mem_freed += sp->num_entries *
- sizeof(struct s2io_msix_entry);
- }
- }
- return err;
-}
-
-/**
- * s2io_close -close entry point of the driver
- * @dev : device pointer.
- * Description:
- * This is the stop entry point of the driver. It needs to undo exactly
- * whatever was done by the open entry point,thus it's usually referred to
- * as the close function.Among other things this function mainly stops the
- * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
- * Return value:
- * 0 on success and an appropriate (-)ve integer as defined in errno.h
- * file on failure.
- */
-
-static int s2io_close(struct net_device *dev)
-{
- struct s2io_nic *sp = netdev_priv(dev);
- struct config_param *config = &sp->config;
- u64 tmp64;
- int offset;
-
- /* Return if the device is already closed *
- * Can happen when s2io_card_up failed in change_mtu *
- */
- if (!is_s2io_card_up(sp))
- return 0;
-
- s2io_stop_all_tx_queue(sp);
- /* delete all populated mac entries */
- for (offset = 1; offset < config->max_mc_addr; offset++) {
- tmp64 = do_s2io_read_unicast_mc(sp, offset);
- if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
- do_s2io_delete_unicast_mc(sp, tmp64);
- }
-
- s2io_card_down(sp);
-
- return 0;
-}
-
-/**
- * s2io_xmit - Tx entry point of te driver
- * @skb : the socket buffer containing the Tx data.
- * @dev : device pointer.
- * Description :
- * This function is the Tx entry point of the driver. S2IO NIC supports
- * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
- * NOTE: when device can't queue the pkt,just the trans_start variable will
- * not be upadted.
- * Return value:
- * 0 on success & 1 on failure.
- */
-
-static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct s2io_nic *sp = netdev_priv(dev);
- u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
- register u64 val64;
- struct TxD *txdp;
- struct TxFIFO_element __iomem *tx_fifo;
- unsigned long flags = 0;
- u16 vlan_tag = 0;
- struct fifo_info *fifo = NULL;
- int offload_type;
- int enable_per_list_interrupt = 0;
- struct config_param *config = &sp->config;
- struct mac_info *mac_control = &sp->mac_control;
- struct stat_block *stats = mac_control->stats_info;
- struct swStat *swstats = &stats->sw_stat;
-
- DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
-
- if (unlikely(skb->len <= 0)) {
- DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name);
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- if (!is_s2io_card_up(sp)) {
- DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
- dev->name);
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- queue = 0;
- if (skb_vlan_tag_present(skb))
- vlan_tag = skb_vlan_tag_get(skb);
- if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
- if (skb->protocol == htons(ETH_P_IP)) {
- struct iphdr *ip;
- struct tcphdr *th;
- ip = ip_hdr(skb);
-
- if (!ip_is_fragment(ip)) {
- th = (struct tcphdr *)(((unsigned char *)ip) +
- ip->ihl*4);
-
- if (ip->protocol == IPPROTO_TCP) {
- queue_len = sp->total_tcp_fifos;
- queue = (ntohs(th->source) +
- ntohs(th->dest)) &
- sp->fifo_selector[queue_len - 1];
- if (queue >= queue_len)
- queue = queue_len - 1;
- } else if (ip->protocol == IPPROTO_UDP) {
- queue_len = sp->total_udp_fifos;
- queue = (ntohs(th->source) +
- ntohs(th->dest)) &
- sp->fifo_selector[queue_len - 1];
- if (queue >= queue_len)
- queue = queue_len - 1;
- queue += sp->udp_fifo_idx;
- if (skb->len > 1024)
- enable_per_list_interrupt = 1;
- }
- }
- }
- } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
- /* get fifo number based on skb->priority value */
- queue = config->fifo_mapping
- [skb->priority & (MAX_TX_FIFOS - 1)];
- fifo = &mac_control->fifos[queue];
-
- spin_lock_irqsave(&fifo->tx_lock, flags);
-
- if (sp->config.multiq) {
- if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
- spin_unlock_irqrestore(&fifo->tx_lock, flags);
- return NETDEV_TX_BUSY;
- }
- } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
- if (netif_queue_stopped(dev)) {
- spin_unlock_irqrestore(&fifo->tx_lock, flags);
- return NETDEV_TX_BUSY;
- }
- }
-
- put_off = (u16)fifo->tx_curr_put_info.offset;
- get_off = (u16)fifo->tx_curr_get_info.offset;
- txdp = fifo->list_info[put_off].list_virt_addr;
-
- queue_len = fifo->tx_curr_put_info.fifo_len + 1;
- /* Avoid "put" pointer going beyond "get" pointer */
- if (txdp->Host_Control ||
- ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
- DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
- s2io_stop_tx_queue(sp, fifo->fifo_no);
- dev_kfree_skb_any(skb);
- spin_unlock_irqrestore(&fifo->tx_lock, flags);
- return NETDEV_TX_OK;
- }
-
- offload_type = s2io_offload_type(skb);
- if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
- txdp->Control_1 |= TXD_TCP_LSO_EN;
- txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
- }
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
- TXD_TX_CKO_TCP_EN |
- TXD_TX_CKO_UDP_EN);
- }
- txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
- txdp->Control_1 |= TXD_LIST_OWN_XENA;
- txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
- if (enable_per_list_interrupt)
- if (put_off & (queue_len >> 5))
- txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
- if (vlan_tag) {
- txdp->Control_2 |= TXD_VLAN_ENABLE;
- txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
- }
-
- frg_len = skb_headlen(skb);
- txdp->Buffer_Pointer = dma_map_single(&sp->pdev->dev, skb->data,
- frg_len, DMA_TO_DEVICE);
- if (dma_mapping_error(&sp->pdev->dev, txdp->Buffer_Pointer))
- goto pci_map_failed;
-
- txdp->Host_Control = (unsigned long)skb;
- txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
-
- frg_cnt = skb_shinfo(skb)->nr_frags;
- /* For fragmented SKB. */
- for (i = 0; i < frg_cnt; i++) {
- const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- /* A '0' length fragment will be ignored */
- if (!skb_frag_size(frag))
- continue;
- txdp++;
- txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev,
- frag, 0,
- skb_frag_size(frag),
- DMA_TO_DEVICE);
- txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag));
- }
- txdp->Control_1 |= TXD_GATHER_CODE_LAST;
-
- tx_fifo = mac_control->tx_FIFO_start[queue];
- val64 = fifo->list_info[put_off].list_phy_addr;
- writeq(val64, &tx_fifo->TxDL_Pointer);
-
- val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
- TX_FIFO_LAST_LIST);
- if (offload_type)
- val64 |= TX_FIFO_SPECIAL_FUNC;
-
- writeq(val64, &tx_fifo->List_Control);
-
- put_off++;
- if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
- put_off = 0;
- fifo->tx_curr_put_info.offset = put_off;
-
- /* Avoid "put" pointer going beyond "get" pointer */
- if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
- swstats->fifo_full_cnt++;
- DBG_PRINT(TX_DBG,
- "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
- put_off, get_off);
- s2io_stop_tx_queue(sp, fifo->fifo_no);
- }
- swstats->mem_allocated += skb->truesize;
- spin_unlock_irqrestore(&fifo->tx_lock, flags);
-
- if (sp->config.intr_type == MSI_X)
- tx_intr_handler(fifo);
-
- return NETDEV_TX_OK;
-
-pci_map_failed:
- swstats->pci_map_fail_cnt++;
- s2io_stop_tx_queue(sp, fifo->fifo_no);
- swstats->mem_freed += skb->truesize;
- dev_kfree_skb_any(skb);
- spin_unlock_irqrestore(&fifo->tx_lock, flags);
- return NETDEV_TX_OK;
-}
-
-static void
-s2io_alarm_handle(struct timer_list *t)
-{
- struct s2io_nic *sp = timer_container_of(sp, t, alarm_timer);
- struct net_device *dev = sp->dev;
-
- s2io_handle_errors(dev);
- mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
-}
-
-static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
-{
- struct ring_info *ring = (struct ring_info *)dev_id;
- struct s2io_nic *sp = ring->nic;
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
-
- if (unlikely(!is_s2io_card_up(sp)))
- return IRQ_HANDLED;
-
- if (sp->config.napi) {
- u8 __iomem *addr = NULL;
- u8 val8 = 0;
-
- addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
- addr += (7 - ring->ring_no);
- val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
- writeb(val8, addr);
- val8 = readb(addr);
- napi_schedule(&ring->napi);
- } else {
- rx_intr_handler(ring, 0);
- s2io_chk_rx_buffers(sp, ring);
- }
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
-{
- int i;
- struct fifo_info *fifos = (struct fifo_info *)dev_id;
- struct s2io_nic *sp = fifos->nic;
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- struct config_param *config = &sp->config;
- u64 reason;
-
- if (unlikely(!is_s2io_card_up(sp)))
- return IRQ_NONE;
-
- reason = readq(&bar0->general_int_status);
- if (unlikely(reason == S2IO_MINUS_ONE))
- /* Nothing much can be done. Get out */
- return IRQ_HANDLED;
-
- if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
- writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
-
- if (reason & GEN_INTR_TXPIC)
- s2io_txpic_intr_handle(sp);
-
- if (reason & GEN_INTR_TXTRAFFIC)
- writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
-
- for (i = 0; i < config->tx_fifo_num; i++)
- tx_intr_handler(&fifos[i]);
-
- writeq(sp->general_int_mask, &bar0->general_int_mask);
- readl(&bar0->general_int_status);
- return IRQ_HANDLED;
- }
- /* The interrupt was not raised by us */
- return IRQ_NONE;
-}
-
-static void s2io_txpic_intr_handle(struct s2io_nic *sp)
-{
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- u64 val64;
-
- val64 = readq(&bar0->pic_int_status);
- if (val64 & PIC_INT_GPIO) {
- val64 = readq(&bar0->gpio_int_reg);
- if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
- (val64 & GPIO_INT_REG_LINK_UP)) {
- /*
- * This is unstable state so clear both up/down
- * interrupt and adapter to re-evaluate the link state.
- */
- val64 |= GPIO_INT_REG_LINK_DOWN;
- val64 |= GPIO_INT_REG_LINK_UP;
- writeq(val64, &bar0->gpio_int_reg);
- val64 = readq(&bar0->gpio_int_mask);
- val64 &= ~(GPIO_INT_MASK_LINK_UP |
- GPIO_INT_MASK_LINK_DOWN);
- writeq(val64, &bar0->gpio_int_mask);
- } else if (val64 & GPIO_INT_REG_LINK_UP) {
- val64 = readq(&bar0->adapter_status);
- /* Enable Adapter */
- val64 = readq(&bar0->adapter_control);
- val64 |= ADAPTER_CNTL_EN;
- writeq(val64, &bar0->adapter_control);
- val64 |= ADAPTER_LED_ON;
- writeq(val64, &bar0->adapter_control);
- if (!sp->device_enabled_once)
- sp->device_enabled_once = 1;
-
- s2io_link(sp, LINK_UP);
- /*
- * unmask link down interrupt and mask link-up
- * intr
- */
- val64 = readq(&bar0->gpio_int_mask);
- val64 &= ~GPIO_INT_MASK_LINK_DOWN;
- val64 |= GPIO_INT_MASK_LINK_UP;
- writeq(val64, &bar0->gpio_int_mask);
-
- } else if (val64 & GPIO_INT_REG_LINK_DOWN) {
- val64 = readq(&bar0->adapter_status);
- s2io_link(sp, LINK_DOWN);
- /* Link is down so unmaks link up interrupt */
- val64 = readq(&bar0->gpio_int_mask);
- val64 &= ~GPIO_INT_MASK_LINK_UP;
- val64 |= GPIO_INT_MASK_LINK_DOWN;
- writeq(val64, &bar0->gpio_int_mask);
-
- /* turn off LED */
- val64 = readq(&bar0->adapter_control);
- val64 = val64 & (~ADAPTER_LED_ON);
- writeq(val64, &bar0->adapter_control);
- }
- }
- val64 = readq(&bar0->gpio_int_mask);
-}
-
-/**
- * do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
- * @value: alarm bits
- * @addr: address value
- * @cnt: counter variable
- * Description: Check for alarm and increment the counter
- * Return Value:
- * 1 - if alarm bit set
- * 0 - if alarm bit is not set
- */
-static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
- unsigned long long *cnt)
-{
- u64 val64;
- val64 = readq(addr);
- if (val64 & value) {
- writeq(val64, addr);
- (*cnt)++;
- return 1;
- }
- return 0;
-
-}
-
-/**
- * s2io_handle_errors - Xframe error indication handler
- * @dev_id: opaque handle to dev
- * Description: Handle alarms such as loss of link, single or
- * double ECC errors, critical and serious errors.
- * Return Value:
- * NONE
- */
-static void s2io_handle_errors(void *dev_id)
-{
- struct net_device *dev = (struct net_device *)dev_id;
- struct s2io_nic *sp = netdev_priv(dev);
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- u64 temp64 = 0, val64 = 0;
- int i = 0;
-
- struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
- struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
-
- if (!is_s2io_card_up(sp))
- return;
-
- if (pci_channel_offline(sp->pdev))
- return;
-
- memset(&sw_stat->ring_full_cnt, 0,
- sizeof(sw_stat->ring_full_cnt));
-
- /* Handling the XPAK counters update */
- if (stats->xpak_timer_count < 72000) {
- /* waiting for an hour */
- stats->xpak_timer_count++;
- } else {
- s2io_updt_xpak_counter(dev);
- /* reset the count to zero */
- stats->xpak_timer_count = 0;
- }
-
- /* Handling link status change error Intr */
- if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
- val64 = readq(&bar0->mac_rmac_err_reg);
- writeq(val64, &bar0->mac_rmac_err_reg);
- if (val64 & RMAC_LINK_STATE_CHANGE_INT)
- schedule_work(&sp->set_link_task);
- }
-
- /* In case of a serious error, the device will be Reset. */
- if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
- &sw_stat->serious_err_cnt))
- goto reset;
-
- /* Check for data parity error */
- if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
- &sw_stat->parity_err_cnt))
- goto reset;
-
- /* Check for ring full counter */
- if (sp->device_type == XFRAME_II_DEVICE) {
- val64 = readq(&bar0->ring_bump_counter1);
- for (i = 0; i < 4; i++) {
- temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
- temp64 >>= 64 - ((i+1)*16);
- sw_stat->ring_full_cnt[i] += temp64;
- }
-
- val64 = readq(&bar0->ring_bump_counter2);
- for (i = 0; i < 4; i++) {
- temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
- temp64 >>= 64 - ((i+1)*16);
- sw_stat->ring_full_cnt[i+4] += temp64;
- }
- }
-
- val64 = readq(&bar0->txdma_int_status);
- /*check for pfc_err*/
- if (val64 & TXDMA_PFC_INT) {
- if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
- PFC_MISC_0_ERR | PFC_MISC_1_ERR |
- PFC_PCIX_ERR,
- &bar0->pfc_err_reg,
- &sw_stat->pfc_err_cnt))
- goto reset;
- do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
- &bar0->pfc_err_reg,
- &sw_stat->pfc_err_cnt);
- }
-
- /*check for tda_err*/
- if (val64 & TXDMA_TDA_INT) {
- if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
- TDA_SM0_ERR_ALARM |
- TDA_SM1_ERR_ALARM,
- &bar0->tda_err_reg,
- &sw_stat->tda_err_cnt))
- goto reset;
- do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
- &bar0->tda_err_reg,
- &sw_stat->tda_err_cnt);
- }
- /*check for pcc_err*/
- if (val64 & TXDMA_PCC_INT) {
- if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
- PCC_N_SERR | PCC_6_COF_OV_ERR |
- PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
- PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
- PCC_TXB_ECC_DB_ERR,
- &bar0->pcc_err_reg,
- &sw_stat->pcc_err_cnt))
- goto reset;
- do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
- &bar0->pcc_err_reg,
- &sw_stat->pcc_err_cnt);
- }
-
- /*check for tti_err*/
- if (val64 & TXDMA_TTI_INT) {
- if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
- &bar0->tti_err_reg,
- &sw_stat->tti_err_cnt))
- goto reset;
- do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
- &bar0->tti_err_reg,
- &sw_stat->tti_err_cnt);
- }
-
- /*check for lso_err*/
- if (val64 & TXDMA_LSO_INT) {
- if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
- LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
- &bar0->lso_err_reg,
- &sw_stat->lso_err_cnt))
- goto reset;
- do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
- &bar0->lso_err_reg,
- &sw_stat->lso_err_cnt);
- }
-
- /*check for tpa_err*/
- if (val64 & TXDMA_TPA_INT) {
- if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
- &bar0->tpa_err_reg,
- &sw_stat->tpa_err_cnt))
- goto reset;
- do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
- &bar0->tpa_err_reg,
- &sw_stat->tpa_err_cnt);
- }
-
- /*check for sm_err*/
- if (val64 & TXDMA_SM_INT) {
- if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
- &bar0->sm_err_reg,
- &sw_stat->sm_err_cnt))
- goto reset;
- }
-
- val64 = readq(&bar0->mac_int_status);
- if (val64 & MAC_INT_STATUS_TMAC_INT) {
- if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
- &bar0->mac_tmac_err_reg,
- &sw_stat->mac_tmac_err_cnt))
- goto reset;
- do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
- TMAC_DESC_ECC_SG_ERR |
- TMAC_DESC_ECC_DB_ERR,
- &bar0->mac_tmac_err_reg,
- &sw_stat->mac_tmac_err_cnt);
- }
-
- val64 = readq(&bar0->xgxs_int_status);
- if (val64 & XGXS_INT_STATUS_TXGXS) {
- if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
- &bar0->xgxs_txgxs_err_reg,
- &sw_stat->xgxs_txgxs_err_cnt))
- goto reset;
- do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
- &bar0->xgxs_txgxs_err_reg,
- &sw_stat->xgxs_txgxs_err_cnt);
- }
-
- val64 = readq(&bar0->rxdma_int_status);
- if (val64 & RXDMA_INT_RC_INT_M) {
- if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
- RC_FTC_ECC_DB_ERR |
- RC_PRCn_SM_ERR_ALARM |
- RC_FTC_SM_ERR_ALARM,
- &bar0->rc_err_reg,
- &sw_stat->rc_err_cnt))
- goto reset;
- do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
- RC_FTC_ECC_SG_ERR |
- RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
- &sw_stat->rc_err_cnt);
- if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
- PRC_PCI_AB_WR_Rn |
- PRC_PCI_AB_F_WR_Rn,
- &bar0->prc_pcix_err_reg,
- &sw_stat->prc_pcix_err_cnt))
- goto reset;
- do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
- PRC_PCI_DP_WR_Rn |
- PRC_PCI_DP_F_WR_Rn,
- &bar0->prc_pcix_err_reg,
- &sw_stat->prc_pcix_err_cnt);
- }
-
- if (val64 & RXDMA_INT_RPA_INT_M) {
- if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
- &bar0->rpa_err_reg,
- &sw_stat->rpa_err_cnt))
- goto reset;
- do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
- &bar0->rpa_err_reg,
- &sw_stat->rpa_err_cnt);
- }
-
- if (val64 & RXDMA_INT_RDA_INT_M) {
- if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
- RDA_FRM_ECC_DB_N_AERR |
- RDA_SM1_ERR_ALARM |
- RDA_SM0_ERR_ALARM |
- RDA_RXD_ECC_DB_SERR,
- &bar0->rda_err_reg,
- &sw_stat->rda_err_cnt))
- goto reset;
- do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
- RDA_FRM_ECC_SG_ERR |
- RDA_MISC_ERR |
- RDA_PCIX_ERR,
- &bar0->rda_err_reg,
- &sw_stat->rda_err_cnt);
- }
-
- if (val64 & RXDMA_INT_RTI_INT_M) {
- if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
- &bar0->rti_err_reg,
- &sw_stat->rti_err_cnt))
- goto reset;
- do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
- &bar0->rti_err_reg,
- &sw_stat->rti_err_cnt);
- }
-
- val64 = readq(&bar0->mac_int_status);
- if (val64 & MAC_INT_STATUS_RMAC_INT) {
- if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
- &bar0->mac_rmac_err_reg,
- &sw_stat->mac_rmac_err_cnt))
- goto reset;
- do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
- RMAC_SINGLE_ECC_ERR |
- RMAC_DOUBLE_ECC_ERR,
- &bar0->mac_rmac_err_reg,
- &sw_stat->mac_rmac_err_cnt);
- }
-
- val64 = readq(&bar0->xgxs_int_status);
- if (val64 & XGXS_INT_STATUS_RXGXS) {
- if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
- &bar0->xgxs_rxgxs_err_reg,
- &sw_stat->xgxs_rxgxs_err_cnt))
- goto reset;
- }
-
- val64 = readq(&bar0->mc_int_status);
- if (val64 & MC_INT_STATUS_MC_INT) {
- if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
- &bar0->mc_err_reg,
- &sw_stat->mc_err_cnt))
- goto reset;
-
- /* Handling Ecc errors */
- if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
- writeq(val64, &bar0->mc_err_reg);
- if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
- sw_stat->double_ecc_errs++;
- if (sp->device_type != XFRAME_II_DEVICE) {
- /*
- * Reset XframeI only if critical error
- */
- if (val64 &
- (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
- MC_ERR_REG_MIRI_ECC_DB_ERR_1))
- goto reset;
- }
- } else
- sw_stat->single_ecc_errs++;
- }
- }
- return;
-
-reset:
- s2io_stop_all_tx_queue(sp);
- schedule_work(&sp->rst_timer_task);
- sw_stat->soft_reset_cnt++;
-}
-
-/**
- * s2io_isr - ISR handler of the device .
- * @irq: the irq of the device.
- * @dev_id: a void pointer to the dev structure of the NIC.
- * Description: This function is the ISR handler of the device. It
- * identifies the reason for the interrupt and calls the relevant
- * service routines. As a contongency measure, this ISR allocates the
- * recv buffers, if their numbers are below the panic value which is
- * presently set to 25% of the original number of rcv buffers allocated.
- * Return value:
- * IRQ_HANDLED: will be returned if IRQ was handled by this routine
- * IRQ_NONE: will be returned if interrupt is not from our device
- */
-static irqreturn_t s2io_isr(int irq, void *dev_id)
-{
- struct net_device *dev = (struct net_device *)dev_id;
- struct s2io_nic *sp = netdev_priv(dev);
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- int i;
- u64 reason = 0;
- struct mac_info *mac_control;
- struct config_param *config;
-
- /* Pretend we handled any irq's from a disconnected card */
- if (pci_channel_offline(sp->pdev))
- return IRQ_NONE;
-
- if (!is_s2io_card_up(sp))
- return IRQ_NONE;
-
- config = &sp->config;
- mac_control = &sp->mac_control;
-
- /*
- * Identify the cause for interrupt and call the appropriate
- * interrupt handler. Causes for the interrupt could be;
- * 1. Rx of packet.
- * 2. Tx complete.
- * 3. Link down.
- */
- reason = readq(&bar0->general_int_status);
-
- if (unlikely(reason == S2IO_MINUS_ONE))
- return IRQ_HANDLED; /* Nothing much can be done. Get out */
-
- if (reason &
- (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
- writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
-
- if (config->napi) {
- if (reason & GEN_INTR_RXTRAFFIC) {
- napi_schedule(&sp->napi);
- writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
- writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
- readl(&bar0->rx_traffic_int);
- }
- } else {
- /*
- * rx_traffic_int reg is an R1 register, writing all 1's
- * will ensure that the actual interrupt causing bit
- * gets cleared and hence a read can be avoided.
- */
- if (reason & GEN_INTR_RXTRAFFIC)
- writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
-
- for (i = 0; i < config->rx_ring_num; i++) {
- struct ring_info *ring = &mac_control->rings[i];
-
- rx_intr_handler(ring, 0);
- }
- }
-
- /*
- * tx_traffic_int reg is an R1 register, writing all 1's
- * will ensure that the actual interrupt causing bit gets
- * cleared and hence a read can be avoided.
- */
- if (reason & GEN_INTR_TXTRAFFIC)
- writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
-
- for (i = 0; i < config->tx_fifo_num; i++)
- tx_intr_handler(&mac_control->fifos[i]);
-
- if (reason & GEN_INTR_TXPIC)
- s2io_txpic_intr_handle(sp);
-
- /*
- * Reallocate the buffers from the interrupt handler itself.
- */
- if (!config->napi) {
- for (i = 0; i < config->rx_ring_num; i++) {
- struct ring_info *ring = &mac_control->rings[i];
-
- s2io_chk_rx_buffers(sp, ring);
- }
- }
- writeq(sp->general_int_mask, &bar0->general_int_mask);
- readl(&bar0->general_int_status);
-
- return IRQ_HANDLED;
-
- } else if (!reason) {
- /* The interrupt was not raised by us */
- return IRQ_NONE;
- }
-
- return IRQ_HANDLED;
-}
-
-/*
- * s2io_updt_stats -
- */
-static void s2io_updt_stats(struct s2io_nic *sp)
-{
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- u64 val64;
- int cnt = 0;
-
- if (is_s2io_card_up(sp)) {
- /* Apprx 30us on a 133 MHz bus */
- val64 = SET_UPDT_CLICKS(10) |
- STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
- writeq(val64, &bar0->stat_cfg);
- do {
- udelay(100);
- val64 = readq(&bar0->stat_cfg);
- if (!(val64 & s2BIT(0)))
- break;
- cnt++;
- if (cnt == 5)
- break; /* Updt failed */
- } while (1);
- }
-}
-
-/**
- * s2io_get_stats - Updates the device statistics structure.
- * @dev : pointer to the device structure.
- * Description:
- * This function updates the device statistics structure in the s2io_nic
- * structure and returns a pointer to the same.
- * Return value:
- * pointer to the updated net_device_stats structure.
- */
-static struct net_device_stats *s2io_get_stats(struct net_device *dev)
-{
- struct s2io_nic *sp = netdev_priv(dev);
- struct mac_info *mac_control = &sp->mac_control;
- struct stat_block *stats = mac_control->stats_info;
- u64 delta;
-
- /* Configure Stats for immediate updt */
- s2io_updt_stats(sp);
-
- /* A device reset will cause the on-adapter statistics to be zero'ed.
- * This can be done while running by changing the MTU. To prevent the
- * system from having the stats zero'ed, the driver keeps a copy of the
- * last update to the system (which is also zero'ed on reset). This
- * enables the driver to accurately know the delta between the last
- * update and the current update.
- */
- delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
- le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
- sp->stats.rx_packets += delta;
- dev->stats.rx_packets += delta;
-
- delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
- le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
- sp->stats.tx_packets += delta;
- dev->stats.tx_packets += delta;
-
- delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
- le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
- sp->stats.rx_bytes += delta;
- dev->stats.rx_bytes += delta;
-
- delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
- le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
- sp->stats.tx_bytes += delta;
- dev->stats.tx_bytes += delta;
-
- delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
- sp->stats.rx_errors += delta;
- dev->stats.rx_errors += delta;
-
- delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
- le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
- sp->stats.tx_errors += delta;
- dev->stats.tx_errors += delta;
-
- delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
- sp->stats.rx_dropped += delta;
- dev->stats.rx_dropped += delta;
-
- delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
- sp->stats.tx_dropped += delta;
- dev->stats.tx_dropped += delta;
-
- /* The adapter MAC interprets pause frames as multicast packets, but
- * does not pass them up. This erroneously increases the multicast
- * packet count and needs to be deducted when the multicast frame count
- * is queried.
- */
- delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
- le32_to_cpu(stats->rmac_vld_mcst_frms);
- delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
- delta -= sp->stats.multicast;
- sp->stats.multicast += delta;
- dev->stats.multicast += delta;
-
- delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
- le32_to_cpu(stats->rmac_usized_frms)) +
- le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
- sp->stats.rx_length_errors += delta;
- dev->stats.rx_length_errors += delta;
-
- delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
- sp->stats.rx_crc_errors += delta;
- dev->stats.rx_crc_errors += delta;
-
- return &dev->stats;
-}
-
-/**
- * s2io_set_multicast - entry point for multicast address enable/disable.
- * @dev : pointer to the device structure
- * @may_sleep: parameter indicates if sleeping when waiting for command
- * complete
- * Description:
- * This function is a driver entry point which gets called by the kernel
- * whenever multicast addresses must be enabled/disabled. This also gets
- * called to set/reset promiscuous mode. Depending on the deivce flag, we
- * determine, if multicast address must be enabled or if promiscuous mode
- * is to be disabled etc.
- * Return value:
- * void.
- */
-static void s2io_set_multicast(struct net_device *dev, bool may_sleep)
-{
- int i, j, prev_cnt;
- struct netdev_hw_addr *ha;
- struct s2io_nic *sp = netdev_priv(dev);
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
- 0xfeffffffffffULL;
- u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
- void __iomem *add;
- struct config_param *config = &sp->config;
-
- if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
- /* Enable all Multicast addresses */
- writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
- &bar0->rmac_addr_data0_mem);
- writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
- &bar0->rmac_addr_data1_mem);
- val64 = RMAC_ADDR_CMD_MEM_WE |
- RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
- RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
- writeq(val64, &bar0->rmac_addr_cmd_mem);
- /* Wait till command completes */
- wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
- RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
- S2IO_BIT_RESET, may_sleep);
-
- sp->m_cast_flg = 1;
- sp->all_multi_pos = config->max_mc_addr - 1;
- } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
- /* Disable all Multicast addresses */
- writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
- &bar0->rmac_addr_data0_mem);
- writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
- &bar0->rmac_addr_data1_mem);
- val64 = RMAC_ADDR_CMD_MEM_WE |
- RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
- RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
- writeq(val64, &bar0->rmac_addr_cmd_mem);
- /* Wait till command completes */
- wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
- RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
- S2IO_BIT_RESET, may_sleep);
-
- sp->m_cast_flg = 0;
- sp->all_multi_pos = 0;
- }
-
- if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
- /* Put the NIC into promiscuous mode */
- add = &bar0->mac_cfg;
- val64 = readq(&bar0->mac_cfg);
- val64 |= MAC_CFG_RMAC_PROM_ENABLE;
-
- writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
- writel((u32)val64, add);
- writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
- writel((u32) (val64 >> 32), (add + 4));
-
- if (vlan_tag_strip != 1) {
- val64 = readq(&bar0->rx_pa_cfg);
- val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
- writeq(val64, &bar0->rx_pa_cfg);
- sp->vlan_strip_flag = 0;
- }
-
- val64 = readq(&bar0->mac_cfg);
- sp->promisc_flg = 1;
- DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
- dev->name);
- } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
- /* Remove the NIC from promiscuous mode */
- add = &bar0->mac_cfg;
- val64 = readq(&bar0->mac_cfg);
- val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
-
- writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
- writel((u32)val64, add);
- writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
- writel((u32) (val64 >> 32), (add + 4));
-
- if (vlan_tag_strip != 0) {
- val64 = readq(&bar0->rx_pa_cfg);
- val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
- writeq(val64, &bar0->rx_pa_cfg);
- sp->vlan_strip_flag = 1;
- }
-
- val64 = readq(&bar0->mac_cfg);
- sp->promisc_flg = 0;
- DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name);
- }
-
- /* Update individual M_CAST address list */
- if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
- if (netdev_mc_count(dev) >
- (config->max_mc_addr - config->max_mac_addr)) {
- DBG_PRINT(ERR_DBG,
- "%s: No more Rx filters can be added - "
- "please enable ALL_MULTI instead\n",
- dev->name);
- return;
- }
-
- prev_cnt = sp->mc_addr_count;
- sp->mc_addr_count = netdev_mc_count(dev);
-
- /* Clear out the previous list of Mc in the H/W. */
- for (i = 0; i < prev_cnt; i++) {
- writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
- &bar0->rmac_addr_data0_mem);
- writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
- &bar0->rmac_addr_data1_mem);
- val64 = RMAC_ADDR_CMD_MEM_WE |
- RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
- RMAC_ADDR_CMD_MEM_OFFSET
- (config->mc_start_offset + i);
- writeq(val64, &bar0->rmac_addr_cmd_mem);
-
- /* Wait for command completes */
- if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
- RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
- S2IO_BIT_RESET, may_sleep)) {
- DBG_PRINT(ERR_DBG,
- "%s: Adding Multicasts failed\n",
- dev->name);
- return;
- }
- }
-
- /* Create the new Rx filter list and update the same in H/W. */
- i = 0;
- netdev_for_each_mc_addr(ha, dev) {
- mac_addr = 0;
- for (j = 0; j < ETH_ALEN; j++) {
- mac_addr |= ha->addr[j];
- mac_addr <<= 8;
- }
- mac_addr >>= 8;
- writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
- &bar0->rmac_addr_data0_mem);
- writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
- &bar0->rmac_addr_data1_mem);
- val64 = RMAC_ADDR_CMD_MEM_WE |
- RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
- RMAC_ADDR_CMD_MEM_OFFSET
- (i + config->mc_start_offset);
- writeq(val64, &bar0->rmac_addr_cmd_mem);
-
- /* Wait for command completes */
- if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
- RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
- S2IO_BIT_RESET, may_sleep)) {
- DBG_PRINT(ERR_DBG,
- "%s: Adding Multicasts failed\n",
- dev->name);
- return;
- }
- i++;
- }
- }
-}
-
-/* NDO wrapper for s2io_set_multicast */
-static void s2io_ndo_set_multicast(struct net_device *dev)
-{
- s2io_set_multicast(dev, false);
-}
-
-/* read from CAM unicast & multicast addresses and store it in
- * def_mac_addr structure
- */
-static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
-{
- int offset;
- u64 mac_addr = 0x0;
- struct config_param *config = &sp->config;
-
- /* store unicast & multicast mac addresses */
- for (offset = 0; offset < config->max_mc_addr; offset++) {
- mac_addr = do_s2io_read_unicast_mc(sp, offset);
- /* if read fails disable the entry */
- if (mac_addr == FAILURE)
- mac_addr = S2IO_DISABLE_MAC_ENTRY;
- do_s2io_copy_mac_addr(sp, offset, mac_addr);
- }
-}
-
-/* restore unicast & multicast MAC to CAM from def_mac_addr structure */
-static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
-{
- int offset;
- struct config_param *config = &sp->config;
- /* restore unicast mac address */
- for (offset = 0; offset < config->max_mac_addr; offset++)
- do_s2io_prog_unicast(sp->dev,
- sp->def_mac_addr[offset].mac_addr);
-
- /* restore multicast mac address */
- for (offset = config->mc_start_offset;
- offset < config->max_mc_addr; offset++)
- do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
-}
-
-/* add a multicast MAC address to CAM */
-static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
-{
- int i;
- u64 mac_addr;
- struct config_param *config = &sp->config;
-
- mac_addr = ether_addr_to_u64(addr);
- if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
- return SUCCESS;
-
- /* check if the multicast mac already preset in CAM */
- for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
- u64 tmp64;
- tmp64 = do_s2io_read_unicast_mc(sp, i);
- if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
- break;
-
- if (tmp64 == mac_addr)
- return SUCCESS;
- }
- if (i == config->max_mc_addr) {
- DBG_PRINT(ERR_DBG,
- "CAM full no space left for multicast MAC\n");
- return FAILURE;
- }
- /* Update the internal structure with this new mac address */
- do_s2io_copy_mac_addr(sp, i, mac_addr);
-
- return do_s2io_add_mac(sp, mac_addr, i);
-}
-
-/* add MAC address to CAM */
-static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
-{
- u64 val64;
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
-
- writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
- &bar0->rmac_addr_data0_mem);
-
- val64 = RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
- RMAC_ADDR_CMD_MEM_OFFSET(off);
- writeq(val64, &bar0->rmac_addr_cmd_mem);
-
- /* Wait till command completes */
- if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
- RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
- S2IO_BIT_RESET, true)) {
- DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
- return FAILURE;
- }
- return SUCCESS;
-}
-/* deletes a specified unicast/multicast mac entry from CAM */
-static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
-{
- int offset;
- u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
- struct config_param *config = &sp->config;
-
- for (offset = 1;
- offset < config->max_mc_addr; offset++) {
- tmp64 = do_s2io_read_unicast_mc(sp, offset);
- if (tmp64 == addr) {
- /* disable the entry by writing 0xffffffffffffULL */
- if (do_s2io_add_mac(sp, dis_addr, offset) == FAILURE)
- return FAILURE;
- /* store the new mac list from CAM */
- do_s2io_store_unicast_mc(sp);
- return SUCCESS;
- }
- }
- DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
- (unsigned long long)addr);
- return FAILURE;
-}
-
-/* read mac entries from CAM */
-static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
-{
- u64 tmp64, val64;
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
-
- /* read mac addr */
- val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
- RMAC_ADDR_CMD_MEM_OFFSET(offset);
- writeq(val64, &bar0->rmac_addr_cmd_mem);
-
- /* Wait till command completes */
- if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
- RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
- S2IO_BIT_RESET, true)) {
- DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
- return FAILURE;
- }
- tmp64 = readq(&bar0->rmac_addr_data0_mem);
-
- return tmp64 >> 16;
-}
-
-/*
- * s2io_set_mac_addr - driver entry point
- */
-
-static int s2io_set_mac_addr(struct net_device *dev, void *p)
-{
- struct sockaddr *addr = p;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
-
- eth_hw_addr_set(dev, addr->sa_data);
-
- /* store the MAC address in CAM */
- return do_s2io_prog_unicast(dev, dev->dev_addr);
-}
-/**
- * do_s2io_prog_unicast - Programs the Xframe mac address
- * @dev : pointer to the device structure.
- * @addr: a uchar pointer to the new mac address which is to be set.
- * Description : This procedure will program the Xframe to receive
- * frames with new Mac Address
- * Return value: SUCCESS on success and an appropriate (-)ve integer
- * as defined in errno.h file on failure.
- */
-
-static int do_s2io_prog_unicast(struct net_device *dev, const u8 *addr)
-{
- struct s2io_nic *sp = netdev_priv(dev);
- register u64 mac_addr, perm_addr;
- int i;
- u64 tmp64;
- struct config_param *config = &sp->config;
-
- /*
- * Set the new MAC address as the new unicast filter and reflect this
- * change on the device address registered with the OS. It will be
- * at offset 0.
- */
- mac_addr = ether_addr_to_u64(addr);
- perm_addr = ether_addr_to_u64(sp->def_mac_addr[0].mac_addr);
-
- /* check if the dev_addr is different than perm_addr */
- if (mac_addr == perm_addr)
- return SUCCESS;
-
- /* check if the mac already preset in CAM */
- for (i = 1; i < config->max_mac_addr; i++) {
- tmp64 = do_s2io_read_unicast_mc(sp, i);
- if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
- break;
-
- if (tmp64 == mac_addr) {
- DBG_PRINT(INFO_DBG,
- "MAC addr:0x%llx already present in CAM\n",
- (unsigned long long)mac_addr);
- return SUCCESS;
- }
- }
- if (i == config->max_mac_addr) {
- DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
- return FAILURE;
- }
- /* Update the internal structure with this new mac address */
- do_s2io_copy_mac_addr(sp, i, mac_addr);
-
- return do_s2io_add_mac(sp, mac_addr, i);
-}
-
-/**
- * s2io_ethtool_set_link_ksettings - Sets different link parameters.
- * @dev : pointer to netdev
- * @cmd: pointer to the structure with parameters given by ethtool to set
- * link information.
- * Description:
- * The function sets different link parameters provided by the user onto
- * the NIC.
- * Return value:
- * 0 on success.
- */
-
-static int
-s2io_ethtool_set_link_ksettings(struct net_device *dev,
- const struct ethtool_link_ksettings *cmd)
-{
- struct s2io_nic *sp = netdev_priv(dev);
- if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
- (cmd->base.speed != SPEED_10000) ||
- (cmd->base.duplex != DUPLEX_FULL))
- return -EINVAL;
- else {
- s2io_close(sp->dev);
- s2io_open(sp->dev);
- }
-
- return 0;
-}
-
-/**
- * s2io_ethtool_get_link_ksettings - Return link specific information.
- * @dev: pointer to netdev
- * @cmd : pointer to the structure with parameters given by ethtool
- * to return link information.
- * Description:
- * Returns link specific information like speed, duplex etc.. to ethtool.
- * Return value :
- * return 0 on success.
- */
-
-static int
-s2io_ethtool_get_link_ksettings(struct net_device *dev,
- struct ethtool_link_ksettings *cmd)
-{
- struct s2io_nic *sp = netdev_priv(dev);
-
- ethtool_link_ksettings_zero_link_mode(cmd, supported);
- ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
-
- ethtool_link_ksettings_zero_link_mode(cmd, advertising);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
-
- cmd->base.port = PORT_FIBRE;
-
- if (netif_carrier_ok(sp->dev)) {
- cmd->base.speed = SPEED_10000;
- cmd->base.duplex = DUPLEX_FULL;
- } else {
- cmd->base.speed = SPEED_UNKNOWN;
- cmd->base.duplex = DUPLEX_UNKNOWN;
- }
-
- cmd->base.autoneg = AUTONEG_DISABLE;
- return 0;
-}
-
-/**
- * s2io_ethtool_gdrvinfo - Returns driver specific information.
- * @dev: pointer to netdev
- * @info : pointer to the structure with parameters given by ethtool to
- * return driver information.
- * Description:
- * Returns driver specefic information like name, version etc.. to ethtool.
- * Return value:
- * void
- */
-
-static void s2io_ethtool_gdrvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- struct s2io_nic *sp = netdev_priv(dev);
-
- strscpy(info->driver, s2io_driver_name, sizeof(info->driver));
- strscpy(info->version, s2io_driver_version, sizeof(info->version));
- strscpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
-}
-
-/**
- * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
- * @dev: pointer to netdev
- * @regs : pointer to the structure with parameters given by ethtool for
- * dumping the registers.
- * @space: The input argument into which all the registers are dumped.
- * Description:
- * Dumps the entire register space of xFrame NIC into the user given
- * buffer area.
- * Return value :
- * void .
- */
-
-static void s2io_ethtool_gregs(struct net_device *dev,
- struct ethtool_regs *regs, void *space)
-{
- int i;
- u64 reg;
- u8 *reg_space = (u8 *)space;
- struct s2io_nic *sp = netdev_priv(dev);
-
- regs->len = XENA_REG_SPACE;
- regs->version = sp->pdev->subsystem_device;
-
- for (i = 0; i < regs->len; i += 8) {
- reg = readq(sp->bar0 + i);
- memcpy((reg_space + i), &reg, 8);
- }
-}
-
-/*
- * s2io_set_led - control NIC led
- */
-static void s2io_set_led(struct s2io_nic *sp, bool on)
-{
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- u16 subid = sp->pdev->subsystem_device;
- u64 val64;
-
- if ((sp->device_type == XFRAME_II_DEVICE) ||
- ((subid & 0xFF) >= 0x07)) {
- val64 = readq(&bar0->gpio_control);
- if (on)
- val64 |= GPIO_CTRL_GPIO_0;
- else
- val64 &= ~GPIO_CTRL_GPIO_0;
-
- writeq(val64, &bar0->gpio_control);
- } else {
- val64 = readq(&bar0->adapter_control);
- if (on)
- val64 |= ADAPTER_LED_ON;
- else
- val64 &= ~ADAPTER_LED_ON;
-
- writeq(val64, &bar0->adapter_control);
- }
-
-}
-
-/**
- * s2io_ethtool_set_led - To physically identify the nic on the system.
- * @dev : network device
- * @state: led setting
- *
- * Description: Used to physically identify the NIC on the system.
- * The Link LED will blink for a time specified by the user for
- * identification.
- * NOTE: The Link has to be Up to be able to blink the LED. Hence
- * identification is possible only if it's link is up.
- */
-
-static int s2io_ethtool_set_led(struct net_device *dev,
- enum ethtool_phys_id_state state)
-{
- struct s2io_nic *sp = netdev_priv(dev);
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- u16 subid = sp->pdev->subsystem_device;
-
- if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
- u64 val64 = readq(&bar0->adapter_control);
- if (!(val64 & ADAPTER_CNTL_EN)) {
- pr_err("Adapter Link down, cannot blink LED\n");
- return -EAGAIN;
- }
- }
-
- switch (state) {
- case ETHTOOL_ID_ACTIVE:
- sp->adapt_ctrl_org = readq(&bar0->gpio_control);
- return 1; /* cycle on/off once per second */
-
- case ETHTOOL_ID_ON:
- s2io_set_led(sp, true);
- break;
-
- case ETHTOOL_ID_OFF:
- s2io_set_led(sp, false);
- break;
-
- case ETHTOOL_ID_INACTIVE:
- if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid))
- writeq(sp->adapt_ctrl_org, &bar0->gpio_control);
- }
-
- return 0;
-}
-
-static void
-s2io_ethtool_gringparam(struct net_device *dev,
- struct ethtool_ringparam *ering,
- struct kernel_ethtool_ringparam *kernel_ering,
- struct netlink_ext_ack *extack)
-{
- struct s2io_nic *sp = netdev_priv(dev);
- int i, tx_desc_count = 0, rx_desc_count = 0;
-
- if (sp->rxd_mode == RXD_MODE_1) {
- ering->rx_max_pending = MAX_RX_DESC_1;
- ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
- } else {
- ering->rx_max_pending = MAX_RX_DESC_2;
- ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
- }
-
- ering->tx_max_pending = MAX_TX_DESC;
-
- for (i = 0; i < sp->config.rx_ring_num; i++)
- rx_desc_count += sp->config.rx_cfg[i].num_rxd;
- ering->rx_pending = rx_desc_count;
- ering->rx_jumbo_pending = rx_desc_count;
-
- for (i = 0; i < sp->config.tx_fifo_num; i++)
- tx_desc_count += sp->config.tx_cfg[i].fifo_len;
- ering->tx_pending = tx_desc_count;
- DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
-}
-
-/**
- * s2io_ethtool_getpause_data -Pause frame generation and reception.
- * @dev: pointer to netdev
- * @ep : pointer to the structure with pause parameters given by ethtool.
- * Description:
- * Returns the Pause frame generation and reception capability of the NIC.
- * Return value:
- * void
- */
-static void s2io_ethtool_getpause_data(struct net_device *dev,
- struct ethtool_pauseparam *ep)
-{
- u64 val64;
- struct s2io_nic *sp = netdev_priv(dev);
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
-
- val64 = readq(&bar0->rmac_pause_cfg);
- if (val64 & RMAC_PAUSE_GEN_ENABLE)
- ep->tx_pause = true;
- if (val64 & RMAC_PAUSE_RX_ENABLE)
- ep->rx_pause = true;
- ep->autoneg = false;
-}
-
-/**
- * s2io_ethtool_setpause_data - set/reset pause frame generation.
- * @dev: pointer to netdev
- * @ep : pointer to the structure with pause parameters given by ethtool.
- * Description:
- * It can be used to set or reset Pause frame generation or reception
- * support of the NIC.
- * Return value:
- * int, returns 0 on Success
- */
-
-static int s2io_ethtool_setpause_data(struct net_device *dev,
- struct ethtool_pauseparam *ep)
-{
- u64 val64;
- struct s2io_nic *sp = netdev_priv(dev);
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
-
- val64 = readq(&bar0->rmac_pause_cfg);
- if (ep->tx_pause)
- val64 |= RMAC_PAUSE_GEN_ENABLE;
- else
- val64 &= ~RMAC_PAUSE_GEN_ENABLE;
- if (ep->rx_pause)
- val64 |= RMAC_PAUSE_RX_ENABLE;
- else
- val64 &= ~RMAC_PAUSE_RX_ENABLE;
- writeq(val64, &bar0->rmac_pause_cfg);
- return 0;
-}
-
-#define S2IO_DEV_ID 5
-/**
- * read_eeprom - reads 4 bytes of data from user given offset.
- * @sp : private member of the device structure, which is a pointer to the
- * s2io_nic structure.
- * @off : offset at which the data must be written
- * @data : Its an output parameter where the data read at the given
- * offset is stored.
- * Description:
- * Will read 4 bytes of data from the user given offset and return the
- * read data.
- * NOTE: Will allow to read only part of the EEPROM visible through the
- * I2C bus.
- * Return value:
- * -1 on failure and 0 on success.
- */
-static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
-{
- int ret = -1;
- u32 exit_cnt = 0;
- u64 val64;
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
-
- if (sp->device_type == XFRAME_I_DEVICE) {
- val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
- I2C_CONTROL_ADDR(off) |
- I2C_CONTROL_BYTE_CNT(0x3) |
- I2C_CONTROL_READ |
- I2C_CONTROL_CNTL_START;
- SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
-
- while (exit_cnt < 5) {
- val64 = readq(&bar0->i2c_control);
- if (I2C_CONTROL_CNTL_END(val64)) {
- *data = I2C_CONTROL_GET_DATA(val64);
- ret = 0;
- break;
- }
- msleep(50);
- exit_cnt++;
- }
- }
-
- if (sp->device_type == XFRAME_II_DEVICE) {
- val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
- SPI_CONTROL_BYTECNT(0x3) |
- SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
- SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
- val64 |= SPI_CONTROL_REQ;
- SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
- while (exit_cnt < 5) {
- val64 = readq(&bar0->spi_control);
- if (val64 & SPI_CONTROL_NACK) {
- ret = 1;
- break;
- } else if (val64 & SPI_CONTROL_DONE) {
- *data = readq(&bar0->spi_data);
- *data &= 0xffffff;
- ret = 0;
- break;
- }
- msleep(50);
- exit_cnt++;
- }
- }
- return ret;
-}
-
-/**
- * write_eeprom - actually writes the relevant part of the data value.
- * @sp : private member of the device structure, which is a pointer to the
- * s2io_nic structure.
- * @off : offset at which the data must be written
- * @data : The data that is to be written
- * @cnt : Number of bytes of the data that are actually to be written into
- * the Eeprom. (max of 3)
- * Description:
- * Actually writes the relevant part of the data value into the Eeprom
- * through the I2C bus.
- * Return value:
- * 0 on success, -1 on failure.
- */
-
-static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt)
-{
- int exit_cnt = 0, ret = -1;
- u64 val64;
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
-
- if (sp->device_type == XFRAME_I_DEVICE) {
- val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
- I2C_CONTROL_ADDR(off) |
- I2C_CONTROL_BYTE_CNT(cnt) |
- I2C_CONTROL_SET_DATA((u32)data) |
- I2C_CONTROL_CNTL_START;
- SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
-
- while (exit_cnt < 5) {
- val64 = readq(&bar0->i2c_control);
- if (I2C_CONTROL_CNTL_END(val64)) {
- if (!(val64 & I2C_CONTROL_NACK))
- ret = 0;
- break;
- }
- msleep(50);
- exit_cnt++;
- }
- }
-
- if (sp->device_type == XFRAME_II_DEVICE) {
- int write_cnt = (cnt == 8) ? 0 : cnt;
- writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data);
-
- val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
- SPI_CONTROL_BYTECNT(write_cnt) |
- SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
- SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
- val64 |= SPI_CONTROL_REQ;
- SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
- while (exit_cnt < 5) {
- val64 = readq(&bar0->spi_control);
- if (val64 & SPI_CONTROL_NACK) {
- ret = 1;
- break;
- } else if (val64 & SPI_CONTROL_DONE) {
- ret = 0;
- break;
- }
- msleep(50);
- exit_cnt++;
- }
- }
- return ret;
-}
-static void s2io_vpd_read(struct s2io_nic *nic)
-{
- u8 *vpd_data;
- u8 data;
- int i = 0, cnt, len, fail = 0;
- int vpd_addr = 0x80;
- struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
-
- if (nic->device_type == XFRAME_II_DEVICE) {
- strcpy(nic->product_name, "Xframe II 10GbE network adapter");
- vpd_addr = 0x80;
- } else {
- strcpy(nic->product_name, "Xframe I 10GbE network adapter");
- vpd_addr = 0x50;
- }
- strcpy(nic->serial_num, "NOT AVAILABLE");
-
- vpd_data = kmalloc(256, GFP_KERNEL);
- if (!vpd_data) {
- swstats->mem_alloc_fail_cnt++;
- return;
- }
- swstats->mem_allocated += 256;
-
- for (i = 0; i < 256; i += 4) {
- pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
- pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
- pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
- for (cnt = 0; cnt < 5; cnt++) {
- msleep(2);
- pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
- if (data == 0x80)
- break;
- }
- if (cnt >= 5) {
- DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
- fail = 1;
- break;
- }
- pci_read_config_dword(nic->pdev, (vpd_addr + 4),
- (u32 *)&vpd_data[i]);
- }
-
- if (!fail) {
- /* read serial number of adapter */
- for (cnt = 0; cnt < 252; cnt++) {
- if ((vpd_data[cnt] == 'S') &&
- (vpd_data[cnt+1] == 'N')) {
- len = vpd_data[cnt+2];
- if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
- memcpy(nic->serial_num,
- &vpd_data[cnt + 3],
- len);
- memset(nic->serial_num+len,
- 0,
- VPD_STRING_LEN-len);
- break;
- }
- }
- }
- }
-
- if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
- len = vpd_data[1];
- memcpy(nic->product_name, &vpd_data[3], len);
- nic->product_name[len] = 0;
- }
- kfree(vpd_data);
- swstats->mem_freed += 256;
-}
-
-/**
- * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
- * @dev: pointer to netdev
- * @eeprom : pointer to the user level structure provided by ethtool,
- * containing all relevant information.
- * @data_buf : user defined value to be written into Eeprom.
- * Description: Reads the values stored in the Eeprom at given offset
- * for a given length. Stores these values int the input argument data
- * buffer 'data_buf' and returns these to the caller (ethtool.)
- * Return value:
- * int 0 on success
- */
-
-static int s2io_ethtool_geeprom(struct net_device *dev,
- struct ethtool_eeprom *eeprom, u8 * data_buf)
-{
- u32 i, valid;
- u64 data;
- struct s2io_nic *sp = netdev_priv(dev);
-
- eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
-
- if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
- eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
-
- for (i = 0; i < eeprom->len; i += 4) {
- if (read_eeprom(sp, (eeprom->offset + i), &data)) {
- DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
- return -EFAULT;
- }
- valid = INV(data);
- memcpy((data_buf + i), &valid, 4);
- }
- return 0;
-}
-
-/**
- * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
- * @dev: pointer to netdev
- * @eeprom : pointer to the user level structure provided by ethtool,
- * containing all relevant information.
- * @data_buf : user defined value to be written into Eeprom.
- * Description:
- * Tries to write the user provided value in the Eeprom, at the offset
- * given by the user.
- * Return value:
- * 0 on success, -EFAULT on failure.
- */
-
-static int s2io_ethtool_seeprom(struct net_device *dev,
- struct ethtool_eeprom *eeprom,
- u8 *data_buf)
-{
- int len = eeprom->len, cnt = 0;
- u64 valid = 0, data;
- struct s2io_nic *sp = netdev_priv(dev);
-
- if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
- DBG_PRINT(ERR_DBG,
- "ETHTOOL_WRITE_EEPROM Err: "
- "Magic value is wrong, it is 0x%x should be 0x%x\n",
- (sp->pdev->vendor | (sp->pdev->device << 16)),
- eeprom->magic);
- return -EFAULT;
- }
-
- while (len) {
- data = (u32)data_buf[cnt] & 0x000000FF;
- if (data)
- valid = (u32)(data << 24);
- else
- valid = data;
-
- if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
- DBG_PRINT(ERR_DBG,
- "ETHTOOL_WRITE_EEPROM Err: "
- "Cannot write into the specified offset\n");
- return -EFAULT;
- }
- cnt++;
- len--;
- }
-
- return 0;
-}
-
-/**
- * s2io_register_test - reads and writes into all clock domains.
- * @sp : private member of the device structure, which is a pointer to the
- * s2io_nic structure.
- * @data : variable that returns the result of each of the test conducted b
- * by the driver.
- * Description:
- * Read and write into all clock domains. The NIC has 3 clock domains,
- * see that registers in all the three regions are accessible.
- * Return value:
- * 0 on success.
- */
-
-static int s2io_register_test(struct s2io_nic *sp, uint64_t *data)
-{
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- u64 val64 = 0, exp_val;
- int fail = 0;
-
- val64 = readq(&bar0->pif_rd_swapper_fb);
- if (val64 != 0x123456789abcdefULL) {
- fail = 1;
- DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1);
- }
-
- val64 = readq(&bar0->rmac_pause_cfg);
- if (val64 != 0xc000ffff00000000ULL) {
- fail = 1;
- DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2);
- }
-
- val64 = readq(&bar0->rx_queue_cfg);
- if (sp->device_type == XFRAME_II_DEVICE)
- exp_val = 0x0404040404040404ULL;
- else
- exp_val = 0x0808080808080808ULL;
- if (val64 != exp_val) {
- fail = 1;
- DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3);
- }
-
- val64 = readq(&bar0->xgxs_efifo_cfg);
- if (val64 != 0x000000001923141EULL) {
- fail = 1;
- DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4);
- }
-
- val64 = 0x5A5A5A5A5A5A5A5AULL;
- writeq(val64, &bar0->xmsi_data);
- val64 = readq(&bar0->xmsi_data);
- if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
- fail = 1;
- DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1);
- }
-
- val64 = 0xA5A5A5A5A5A5A5A5ULL;
- writeq(val64, &bar0->xmsi_data);
- val64 = readq(&bar0->xmsi_data);
- if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
- fail = 1;
- DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2);
- }
-
- *data = fail;
- return fail;
-}
-
-/**
- * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
- * @sp : private member of the device structure, which is a pointer to the
- * s2io_nic structure.
- * @data:variable that returns the result of each of the test conducted by
- * the driver.
- * Description:
- * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
- * register.
- * Return value:
- * 0 on success.
- */
-
-static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data)
-{
- int fail = 0;
- u64 ret_data, org_4F0, org_7F0;
- u8 saved_4F0 = 0, saved_7F0 = 0;
- struct net_device *dev = sp->dev;
-
- /* Test Write Error at offset 0 */
- /* Note that SPI interface allows write access to all areas
- * of EEPROM. Hence doing all negative testing only for Xframe I.
- */
- if (sp->device_type == XFRAME_I_DEVICE)
- if (!write_eeprom(sp, 0, 0, 3))
- fail = 1;
-
- /* Save current values at offsets 0x4F0 and 0x7F0 */
- if (!read_eeprom(sp, 0x4F0, &org_4F0))
- saved_4F0 = 1;
- if (!read_eeprom(sp, 0x7F0, &org_7F0))
- saved_7F0 = 1;
-
- /* Test Write at offset 4f0 */
- if (write_eeprom(sp, 0x4F0, 0x012345, 3))
- fail = 1;
- if (read_eeprom(sp, 0x4F0, &ret_data))
- fail = 1;
-
- if (ret_data != 0x012345) {
- DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
- "Data written %llx Data read %llx\n",
- dev->name, (unsigned long long)0x12345,
- (unsigned long long)ret_data);
- fail = 1;
- }
-
- /* Reset the EEPROM data go FFFF */
- write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
-
- /* Test Write Request Error at offset 0x7c */
- if (sp->device_type == XFRAME_I_DEVICE)
- if (!write_eeprom(sp, 0x07C, 0, 3))
- fail = 1;
-
- /* Test Write Request at offset 0x7f0 */
- if (write_eeprom(sp, 0x7F0, 0x012345, 3))
- fail = 1;
- if (read_eeprom(sp, 0x7F0, &ret_data))
- fail = 1;
-
- if (ret_data != 0x012345) {
- DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
- "Data written %llx Data read %llx\n",
- dev->name, (unsigned long long)0x12345,
- (unsigned long long)ret_data);
- fail = 1;
- }
-
- /* Reset the EEPROM data go FFFF */
- write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
-
- if (sp->device_type == XFRAME_I_DEVICE) {
- /* Test Write Error at offset 0x80 */
- if (!write_eeprom(sp, 0x080, 0, 3))
- fail = 1;
-
- /* Test Write Error at offset 0xfc */
- if (!write_eeprom(sp, 0x0FC, 0, 3))
- fail = 1;
-
- /* Test Write Error at offset 0x100 */
- if (!write_eeprom(sp, 0x100, 0, 3))
- fail = 1;
-
- /* Test Write Error at offset 4ec */
- if (!write_eeprom(sp, 0x4EC, 0, 3))
- fail = 1;
- }
-
- /* Restore values at offsets 0x4F0 and 0x7F0 */
- if (saved_4F0)
- write_eeprom(sp, 0x4F0, org_4F0, 3);
- if (saved_7F0)
- write_eeprom(sp, 0x7F0, org_7F0, 3);
-
- *data = fail;
- return fail;
-}
-
-/**
- * s2io_bist_test - invokes the MemBist test of the card .
- * @sp : private member of the device structure, which is a pointer to the
- * s2io_nic structure.
- * @data:variable that returns the result of each of the test conducted by
- * the driver.
- * Description:
- * This invokes the MemBist test of the card. We give around
- * 2 secs time for the Test to complete. If it's still not complete
- * within this peiod, we consider that the test failed.
- * Return value:
- * 0 on success and -1 on failure.
- */
-
-static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
-{
- u8 bist = 0;
- int cnt = 0, ret = -1;
-
- pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
- bist |= PCI_BIST_START;
- pci_write_config_word(sp->pdev, PCI_BIST, bist);
-
- while (cnt < 20) {
- pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
- if (!(bist & PCI_BIST_START)) {
- *data = (bist & PCI_BIST_CODE_MASK);
- ret = 0;
- break;
- }
- msleep(100);
- cnt++;
- }
-
- return ret;
-}
-
-/**
- * s2io_link_test - verifies the link state of the nic
- * @sp: private member of the device structure, which is a pointer to the
- * s2io_nic structure.
- * @data: variable that returns the result of each of the test conducted by
- * the driver.
- * Description:
- * The function verifies the link state of the NIC and updates the input
- * argument 'data' appropriately.
- * Return value:
- * 0 on success.
- */
-
-static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
-{
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- u64 val64;
-
- val64 = readq(&bar0->adapter_status);
- if (!(LINK_IS_UP(val64)))
- *data = 1;
- else
- *data = 0;
-
- return *data;
-}
-
-/**
- * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
- * @sp: private member of the device structure, which is a pointer to the
- * s2io_nic structure.
- * @data: variable that returns the result of each of the test
- * conducted by the driver.
- * Description:
- * This is one of the offline test that tests the read and write
- * access to the RldRam chip on the NIC.
- * Return value:
- * 0 on success.
- */
-
-static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
-{
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- u64 val64;
- int cnt, iteration = 0, test_fail = 0;
-
- val64 = readq(&bar0->adapter_control);
- val64 &= ~ADAPTER_ECC_EN;
- writeq(val64, &bar0->adapter_control);
-
- val64 = readq(&bar0->mc_rldram_test_ctrl);
- val64 |= MC_RLDRAM_TEST_MODE;
- SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
-
- val64 = readq(&bar0->mc_rldram_mrs);
- val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
- SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
-
- val64 |= MC_RLDRAM_MRS_ENABLE;
- SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
-
- while (iteration < 2) {
- val64 = 0x55555555aaaa0000ULL;
- if (iteration == 1)
- val64 ^= 0xFFFFFFFFFFFF0000ULL;
- writeq(val64, &bar0->mc_rldram_test_d0);
-
- val64 = 0xaaaa5a5555550000ULL;
- if (iteration == 1)
- val64 ^= 0xFFFFFFFFFFFF0000ULL;
- writeq(val64, &bar0->mc_rldram_test_d1);
-
- val64 = 0x55aaaaaaaa5a0000ULL;
- if (iteration == 1)
- val64 ^= 0xFFFFFFFFFFFF0000ULL;
- writeq(val64, &bar0->mc_rldram_test_d2);
-
- val64 = (u64) (0x0000003ffffe0100ULL);
- writeq(val64, &bar0->mc_rldram_test_add);
-
- val64 = MC_RLDRAM_TEST_MODE |
- MC_RLDRAM_TEST_WRITE |
- MC_RLDRAM_TEST_GO;
- SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
-
- for (cnt = 0; cnt < 5; cnt++) {
- val64 = readq(&bar0->mc_rldram_test_ctrl);
- if (val64 & MC_RLDRAM_TEST_DONE)
- break;
- msleep(200);
- }
-
- if (cnt == 5)
- break;
-
- val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
- SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
-
- for (cnt = 0; cnt < 5; cnt++) {
- val64 = readq(&bar0->mc_rldram_test_ctrl);
- if (val64 & MC_RLDRAM_TEST_DONE)
- break;
- msleep(500);
- }
-
- if (cnt == 5)
- break;
-
- val64 = readq(&bar0->mc_rldram_test_ctrl);
- if (!(val64 & MC_RLDRAM_TEST_PASS))
- test_fail = 1;
-
- iteration++;
- }
-
- *data = test_fail;
-
- /* Bring the adapter out of test mode */
- SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
-
- return test_fail;
-}
-
-/**
- * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
- * @dev: pointer to netdev
- * @ethtest : pointer to a ethtool command specific structure that will be
- * returned to the user.
- * @data : variable that returns the result of each of the test
- * conducted by the driver.
- * Description:
- * This function conducts 6 tests ( 4 offline and 2 online) to determine
- * the health of the card.
- * Return value:
- * void
- */
-
-static void s2io_ethtool_test(struct net_device *dev,
- struct ethtool_test *ethtest,
- uint64_t *data)
-{
- struct s2io_nic *sp = netdev_priv(dev);
- int orig_state = netif_running(sp->dev);
-
- if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
- /* Offline Tests. */
- if (orig_state)
- s2io_close(sp->dev);
-
- if (s2io_register_test(sp, &data[0]))
- ethtest->flags |= ETH_TEST_FL_FAILED;
-
- s2io_reset(sp);
-
- if (s2io_rldram_test(sp, &data[3]))
- ethtest->flags |= ETH_TEST_FL_FAILED;
-
- s2io_reset(sp);
-
- if (s2io_eeprom_test(sp, &data[1]))
- ethtest->flags |= ETH_TEST_FL_FAILED;
-
- if (s2io_bist_test(sp, &data[4]))
- ethtest->flags |= ETH_TEST_FL_FAILED;
-
- if (orig_state)
- s2io_open(sp->dev);
-
- data[2] = 0;
- } else {
- /* Online Tests. */
- if (!orig_state) {
- DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n",
- dev->name);
- data[0] = -1;
- data[1] = -1;
- data[2] = -1;
- data[3] = -1;
- data[4] = -1;
- }
-
- if (s2io_link_test(sp, &data[2]))
- ethtest->flags |= ETH_TEST_FL_FAILED;
-
- data[0] = 0;
- data[1] = 0;
- data[3] = 0;
- data[4] = 0;
- }
-}
-
-static void s2io_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *estats,
- u64 *tmp_stats)
-{
- int i = 0, k;
- struct s2io_nic *sp = netdev_priv(dev);
- struct stat_block *stats = sp->mac_control.stats_info;
- struct swStat *swstats = &stats->sw_stat;
- struct xpakStat *xstats = &stats->xpak_stat;
-
- s2io_updt_stats(sp);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->tmac_frms_oflow) << 32 |
- le32_to_cpu(stats->tmac_frms);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
- le32_to_cpu(stats->tmac_data_octets);
- tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 |
- le32_to_cpu(stats->tmac_mcst_frms);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 |
- le32_to_cpu(stats->tmac_bcst_frms);
- tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 |
- le32_to_cpu(stats->tmac_ttl_octets);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 |
- le32_to_cpu(stats->tmac_ucst_frms);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 |
- le32_to_cpu(stats->tmac_nucst_frms);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
- le32_to_cpu(stats->tmac_any_err_frms);
- tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets);
- tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 |
- le32_to_cpu(stats->tmac_vld_ip);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 |
- le32_to_cpu(stats->tmac_drop_ip);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 |
- le32_to_cpu(stats->tmac_icmp);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 |
- le32_to_cpu(stats->tmac_rst_tcp);
- tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp);
- tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 |
- le32_to_cpu(stats->tmac_udp);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
- le32_to_cpu(stats->rmac_vld_frms);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
- le32_to_cpu(stats->rmac_data_octets);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
- le32_to_cpu(stats->rmac_vld_mcst_frms);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 |
- le32_to_cpu(stats->rmac_vld_bcst_frms);
- tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms);
- tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 |
- le32_to_cpu(stats->rmac_ttl_octets);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32
- | le32_to_cpu(stats->rmac_accepted_ucst_frms);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow)
- << 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 |
- le32_to_cpu(stats->rmac_discarded_frms);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_drop_events_oflow)
- << 32 | le32_to_cpu(stats->rmac_drop_events);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
- le32_to_cpu(stats->rmac_usized_frms);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 |
- le32_to_cpu(stats->rmac_osized_frms);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 |
- le32_to_cpu(stats->rmac_frag_frms);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 |
- le32_to_cpu(stats->rmac_jabber_frms);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 |
- le32_to_cpu(stats->rmac_ip);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets);
- tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 |
- le32_to_cpu(stats->rmac_drop_ip);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 |
- le32_to_cpu(stats->rmac_icmp);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 |
- le32_to_cpu(stats->rmac_udp);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 |
- le32_to_cpu(stats->rmac_err_drp_udp);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7);
- tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0);
- tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1);
- tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2);
- tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3);
- tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4);
- tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5);
- tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6);
- tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 |
- le32_to_cpu(stats->rmac_pause_cnt);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 |
- le32_to_cpu(stats->rmac_accepted_ip);
- tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp);
- tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt);
- tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt);
- tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt);
- tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt);
- tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt);
- tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt);
- tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt);
- tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt);
- tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt);
- tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt);
- tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt);
- tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt);
- tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt);
- tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt);
- tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt);
- tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt);
- tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt);
- tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt);
-
- /* Enhanced statistics exist only for Hercules */
- if (sp->device_type == XFRAME_II_DEVICE) {
- tmp_stats[i++] =
- le64_to_cpu(stats->rmac_ttl_1519_4095_frms);
- tmp_stats[i++] =
- le64_to_cpu(stats->rmac_ttl_4096_8191_frms);
- tmp_stats[i++] =
- le64_to_cpu(stats->rmac_ttl_8192_max_frms);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms);
- tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard);
- tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard);
- tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard);
- tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard);
- tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard);
- tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard);
- tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard);
- tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt);
- }
-
- tmp_stats[i++] = 0;
- tmp_stats[i++] = swstats->single_ecc_errs;
- tmp_stats[i++] = swstats->double_ecc_errs;
- tmp_stats[i++] = swstats->parity_err_cnt;
- tmp_stats[i++] = swstats->serious_err_cnt;
- tmp_stats[i++] = swstats->soft_reset_cnt;
- tmp_stats[i++] = swstats->fifo_full_cnt;
- for (k = 0; k < MAX_RX_RINGS; k++)
- tmp_stats[i++] = swstats->ring_full_cnt[k];
- tmp_stats[i++] = xstats->alarm_transceiver_temp_high;
- tmp_stats[i++] = xstats->alarm_transceiver_temp_low;
- tmp_stats[i++] = xstats->alarm_laser_bias_current_high;
- tmp_stats[i++] = xstats->alarm_laser_bias_current_low;
- tmp_stats[i++] = xstats->alarm_laser_output_power_high;
- tmp_stats[i++] = xstats->alarm_laser_output_power_low;
- tmp_stats[i++] = xstats->warn_transceiver_temp_high;
- tmp_stats[i++] = xstats->warn_transceiver_temp_low;
- tmp_stats[i++] = xstats->warn_laser_bias_current_high;
- tmp_stats[i++] = xstats->warn_laser_bias_current_low;
- tmp_stats[i++] = xstats->warn_laser_output_power_high;
- tmp_stats[i++] = xstats->warn_laser_output_power_low;
- tmp_stats[i++] = swstats->clubbed_frms_cnt;
- tmp_stats[i++] = swstats->sending_both;
- tmp_stats[i++] = swstats->outof_sequence_pkts;
- tmp_stats[i++] = swstats->flush_max_pkts;
- if (swstats->num_aggregations) {
- u64 tmp = swstats->sum_avg_pkts_aggregated;
- int count = 0;
- /*
- * Since 64-bit divide does not work on all platforms,
- * do repeated subtraction.
- */
- while (tmp >= swstats->num_aggregations) {
- tmp -= swstats->num_aggregations;
- count++;
- }
- tmp_stats[i++] = count;
- } else
- tmp_stats[i++] = 0;
- tmp_stats[i++] = swstats->mem_alloc_fail_cnt;
- tmp_stats[i++] = swstats->pci_map_fail_cnt;
- tmp_stats[i++] = swstats->watchdog_timer_cnt;
- tmp_stats[i++] = swstats->mem_allocated;
- tmp_stats[i++] = swstats->mem_freed;
- tmp_stats[i++] = swstats->link_up_cnt;
- tmp_stats[i++] = swstats->link_down_cnt;
- tmp_stats[i++] = swstats->link_up_time;
- tmp_stats[i++] = swstats->link_down_time;
-
- tmp_stats[i++] = swstats->tx_buf_abort_cnt;
- tmp_stats[i++] = swstats->tx_desc_abort_cnt;
- tmp_stats[i++] = swstats->tx_parity_err_cnt;
- tmp_stats[i++] = swstats->tx_link_loss_cnt;
- tmp_stats[i++] = swstats->tx_list_proc_err_cnt;
-
- tmp_stats[i++] = swstats->rx_parity_err_cnt;
- tmp_stats[i++] = swstats->rx_abort_cnt;
- tmp_stats[i++] = swstats->rx_parity_abort_cnt;
- tmp_stats[i++] = swstats->rx_rda_fail_cnt;
- tmp_stats[i++] = swstats->rx_unkn_prot_cnt;
- tmp_stats[i++] = swstats->rx_fcs_err_cnt;
- tmp_stats[i++] = swstats->rx_buf_size_err_cnt;
- tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt;
- tmp_stats[i++] = swstats->rx_unkn_err_cnt;
- tmp_stats[i++] = swstats->tda_err_cnt;
- tmp_stats[i++] = swstats->pfc_err_cnt;
- tmp_stats[i++] = swstats->pcc_err_cnt;
- tmp_stats[i++] = swstats->tti_err_cnt;
- tmp_stats[i++] = swstats->tpa_err_cnt;
- tmp_stats[i++] = swstats->sm_err_cnt;
- tmp_stats[i++] = swstats->lso_err_cnt;
- tmp_stats[i++] = swstats->mac_tmac_err_cnt;
- tmp_stats[i++] = swstats->mac_rmac_err_cnt;
- tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt;
- tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt;
- tmp_stats[i++] = swstats->rc_err_cnt;
- tmp_stats[i++] = swstats->prc_pcix_err_cnt;
- tmp_stats[i++] = swstats->rpa_err_cnt;
- tmp_stats[i++] = swstats->rda_err_cnt;
- tmp_stats[i++] = swstats->rti_err_cnt;
- tmp_stats[i++] = swstats->mc_err_cnt;
-}
-
-static int s2io_ethtool_get_regs_len(struct net_device *dev)
-{
- return XENA_REG_SPACE;
-}
-
-
-static int s2io_get_eeprom_len(struct net_device *dev)
-{
- return XENA_EEPROM_SPACE;
-}
-
-static int s2io_get_sset_count(struct net_device *dev, int sset)
-{
- struct s2io_nic *sp = netdev_priv(dev);
-
- switch (sset) {
- case ETH_SS_TEST:
- return S2IO_TEST_LEN;
- case ETH_SS_STATS:
- switch (sp->device_type) {
- case XFRAME_I_DEVICE:
- return XFRAME_I_STAT_LEN;
- case XFRAME_II_DEVICE:
- return XFRAME_II_STAT_LEN;
- default:
- return 0;
- }
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static void s2io_ethtool_get_strings(struct net_device *dev,
- u32 stringset, u8 *data)
-{
- int stat_size = 0;
- struct s2io_nic *sp = netdev_priv(dev);
-
- switch (stringset) {
- case ETH_SS_TEST:
- memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
- break;
- case ETH_SS_STATS:
- stat_size = sizeof(ethtool_xena_stats_keys);
- memcpy(data, &ethtool_xena_stats_keys, stat_size);
- if (sp->device_type == XFRAME_II_DEVICE) {
- memcpy(data + stat_size,
- &ethtool_enhanced_stats_keys,
- sizeof(ethtool_enhanced_stats_keys));
- stat_size += sizeof(ethtool_enhanced_stats_keys);
- }
-
- memcpy(data + stat_size, &ethtool_driver_stats_keys,
- sizeof(ethtool_driver_stats_keys));
- }
-}
-
-static int s2io_set_features(struct net_device *dev, netdev_features_t features)
-{
- struct s2io_nic *sp = netdev_priv(dev);
- netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO;
-
- if (changed && netif_running(dev)) {
- int rc;
-
- s2io_stop_all_tx_queue(sp);
- s2io_card_down(sp);
- dev->features = features;
- rc = s2io_card_up(sp);
- if (rc)
- s2io_reset(sp);
- else
- s2io_start_all_tx_queue(sp);
-
- return rc ? rc : 1;
- }
-
- return 0;
-}
-
-static const struct ethtool_ops netdev_ethtool_ops = {
- .get_drvinfo = s2io_ethtool_gdrvinfo,
- .get_regs_len = s2io_ethtool_get_regs_len,
- .get_regs = s2io_ethtool_gregs,
- .get_link = ethtool_op_get_link,
- .get_eeprom_len = s2io_get_eeprom_len,
- .get_eeprom = s2io_ethtool_geeprom,
- .set_eeprom = s2io_ethtool_seeprom,
- .get_ringparam = s2io_ethtool_gringparam,
- .get_pauseparam = s2io_ethtool_getpause_data,
- .set_pauseparam = s2io_ethtool_setpause_data,
- .self_test = s2io_ethtool_test,
- .get_strings = s2io_ethtool_get_strings,
- .set_phys_id = s2io_ethtool_set_led,
- .get_ethtool_stats = s2io_get_ethtool_stats,
- .get_sset_count = s2io_get_sset_count,
- .get_link_ksettings = s2io_ethtool_get_link_ksettings,
- .set_link_ksettings = s2io_ethtool_set_link_ksettings,
-};
-
-/**
- * s2io_ioctl - Entry point for the Ioctl
- * @dev : Device pointer.
- * @rq : An IOCTL specefic structure, that can contain a pointer to
- * a proprietary structure used to pass information to the driver.
- * @cmd : This is used to distinguish between the different commands that
- * can be passed to the IOCTL functions.
- * Description:
- * Currently there are no special functionality supported in IOCTL, hence
- * function always return EOPNOTSUPPORTED
- */
-
-static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- return -EOPNOTSUPP;
-}
-
-/**
- * s2io_change_mtu - entry point to change MTU size for the device.
- * @dev : device pointer.
- * @new_mtu : the new MTU size for the device.
- * Description: A driver entry point to change MTU size for the device.
- * Before changing the MTU the device must be stopped.
- * Return value:
- * 0 on success and an appropriate (-)ve integer as defined in errno.h
- * file on failure.
- */
-
-static int s2io_change_mtu(struct net_device *dev, int new_mtu)
-{
- struct s2io_nic *sp = netdev_priv(dev);
- int ret = 0;
-
- WRITE_ONCE(dev->mtu, new_mtu);
- if (netif_running(dev)) {
- s2io_stop_all_tx_queue(sp);
- s2io_card_down(sp);
- ret = s2io_card_up(sp);
- if (ret) {
- DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
- __func__);
- return ret;
- }
- s2io_wake_all_tx_queue(sp);
- } else { /* Device is down */
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- u64 val64 = new_mtu;
-
- writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
- }
-
- return ret;
-}
-
-/**
- * s2io_set_link - Set the LInk status
- * @work: work struct containing a pointer to device private structure
- * Description: Sets the link status for the adapter
- */
-
-static void s2io_set_link(struct work_struct *work)
-{
- struct s2io_nic *nic = container_of(work, struct s2io_nic,
- set_link_task);
- struct net_device *dev = nic->dev;
- struct XENA_dev_config __iomem *bar0 = nic->bar0;
- register u64 val64;
- u16 subid;
-
- rtnl_lock();
-
- if (!netif_running(dev))
- goto out_unlock;
-
- if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
- /* The card is being reset, no point doing anything */
- goto out_unlock;
- }
-
- subid = nic->pdev->subsystem_device;
- if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
- /*
- * Allow a small delay for the NICs self initiated
- * cleanup to complete.
- */
- msleep(100);
- }
-
- val64 = readq(&bar0->adapter_status);
- if (LINK_IS_UP(val64)) {
- if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
- if (verify_xena_quiescence(nic)) {
- val64 = readq(&bar0->adapter_control);
- val64 |= ADAPTER_CNTL_EN;
- writeq(val64, &bar0->adapter_control);
- if (CARDS_WITH_FAULTY_LINK_INDICATORS(
- nic->device_type, subid)) {
- val64 = readq(&bar0->gpio_control);
- val64 |= GPIO_CTRL_GPIO_0;
- writeq(val64, &bar0->gpio_control);
- val64 = readq(&bar0->gpio_control);
- } else {
- val64 |= ADAPTER_LED_ON;
- writeq(val64, &bar0->adapter_control);
- }
- nic->device_enabled_once = true;
- } else {
- DBG_PRINT(ERR_DBG,
- "%s: Error: device is not Quiescent\n",
- dev->name);
- s2io_stop_all_tx_queue(nic);
- }
- }
- val64 = readq(&bar0->adapter_control);
- val64 |= ADAPTER_LED_ON;
- writeq(val64, &bar0->adapter_control);
- s2io_link(nic, LINK_UP);
- } else {
- if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
- subid)) {
- val64 = readq(&bar0->gpio_control);
- val64 &= ~GPIO_CTRL_GPIO_0;
- writeq(val64, &bar0->gpio_control);
- val64 = readq(&bar0->gpio_control);
- }
- /* turn off LED */
- val64 = readq(&bar0->adapter_control);
- val64 = val64 & (~ADAPTER_LED_ON);
- writeq(val64, &bar0->adapter_control);
- s2io_link(nic, LINK_DOWN);
- }
- clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
-
-out_unlock:
- rtnl_unlock();
-}
-
-static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
- struct buffAdd *ba,
- struct sk_buff **skb, u64 *temp0, u64 *temp1,
- u64 *temp2, int size)
-{
- struct net_device *dev = sp->dev;
- struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
-
- if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
- struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
- /* allocate skb */
- if (*skb) {
- DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
- /*
- * As Rx frame are not going to be processed,
- * using same mapped address for the Rxd
- * buffer pointer
- */
- rxdp1->Buffer0_ptr = *temp0;
- } else {
- *skb = netdev_alloc_skb(dev, size);
- if (!(*skb)) {
- DBG_PRINT(INFO_DBG,
- "%s: Out of memory to allocate %s\n",
- dev->name, "1 buf mode SKBs");
- stats->mem_alloc_fail_cnt++;
- return -ENOMEM ;
- }
- stats->mem_allocated += (*skb)->truesize;
- /* storing the mapped addr in a temp variable
- * such it will be used for next rxd whose
- * Host Control is NULL
- */
- rxdp1->Buffer0_ptr = *temp0 =
- dma_map_single(&sp->pdev->dev, (*skb)->data,
- size - NET_IP_ALIGN,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&sp->pdev->dev, rxdp1->Buffer0_ptr))
- goto memalloc_failed;
- rxdp->Host_Control = (unsigned long) (*skb);
- }
- } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
- struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
- /* Two buffer Mode */
- if (*skb) {
- rxdp3->Buffer2_ptr = *temp2;
- rxdp3->Buffer0_ptr = *temp0;
- rxdp3->Buffer1_ptr = *temp1;
- } else {
- *skb = netdev_alloc_skb(dev, size);
- if (!(*skb)) {
- DBG_PRINT(INFO_DBG,
- "%s: Out of memory to allocate %s\n",
- dev->name,
- "2 buf mode SKBs");
- stats->mem_alloc_fail_cnt++;
- return -ENOMEM;
- }
- stats->mem_allocated += (*skb)->truesize;
- rxdp3->Buffer2_ptr = *temp2 =
- dma_map_single(&sp->pdev->dev, (*skb)->data,
- dev->mtu + 4, DMA_FROM_DEVICE);
- if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer2_ptr))
- goto memalloc_failed;
- rxdp3->Buffer0_ptr = *temp0 =
- dma_map_single(&sp->pdev->dev, ba->ba_0,
- BUF0_LEN, DMA_FROM_DEVICE);
- if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer0_ptr)) {
- dma_unmap_single(&sp->pdev->dev,
- (dma_addr_t)rxdp3->Buffer2_ptr,
- dev->mtu + 4,
- DMA_FROM_DEVICE);
- goto memalloc_failed;
- }
- rxdp->Host_Control = (unsigned long) (*skb);
-
- /* Buffer-1 will be dummy buffer not used */
- rxdp3->Buffer1_ptr = *temp1 =
- dma_map_single(&sp->pdev->dev, ba->ba_1,
- BUF1_LEN, DMA_FROM_DEVICE);
- if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer1_ptr)) {
- dma_unmap_single(&sp->pdev->dev,
- (dma_addr_t)rxdp3->Buffer0_ptr,
- BUF0_LEN, DMA_FROM_DEVICE);
- dma_unmap_single(&sp->pdev->dev,
- (dma_addr_t)rxdp3->Buffer2_ptr,
- dev->mtu + 4,
- DMA_FROM_DEVICE);
- goto memalloc_failed;
- }
- }
- }
- return 0;
-
-memalloc_failed:
- stats->pci_map_fail_cnt++;
- stats->mem_freed += (*skb)->truesize;
- dev_kfree_skb(*skb);
- return -ENOMEM;
-}
-
-static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
- int size)
-{
- struct net_device *dev = sp->dev;
- if (sp->rxd_mode == RXD_MODE_1) {
- rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
- } else if (sp->rxd_mode == RXD_MODE_3B) {
- rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
- rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
- rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4);
- }
-}
-
-static int rxd_owner_bit_reset(struct s2io_nic *sp)
-{
- int i, j, k, blk_cnt = 0, size;
- struct config_param *config = &sp->config;
- struct mac_info *mac_control = &sp->mac_control;
- struct net_device *dev = sp->dev;
- struct RxD_t *rxdp = NULL;
- struct sk_buff *skb = NULL;
- struct buffAdd *ba = NULL;
- u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
-
- /* Calculate the size based on ring mode */
- size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
- HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
- if (sp->rxd_mode == RXD_MODE_1)
- size += NET_IP_ALIGN;
- else if (sp->rxd_mode == RXD_MODE_3B)
- size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
-
- for (i = 0; i < config->rx_ring_num; i++) {
- struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
- struct ring_info *ring = &mac_control->rings[i];
-
- blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1);
-
- for (j = 0; j < blk_cnt; j++) {
- for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
- rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
- if (sp->rxd_mode == RXD_MODE_3B)
- ba = &ring->ba[j][k];
- if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
- &temp0_64,
- &temp1_64,
- &temp2_64,
- size) == -ENOMEM) {
- return 0;
- }
-
- set_rxd_buffer_size(sp, rxdp, size);
- dma_wmb();
- /* flip the Ownership bit to Hardware */
- rxdp->Control_1 |= RXD_OWN_XENA;
- }
- }
- }
- return 0;
-
-}
-
-static int s2io_add_isr(struct s2io_nic *sp)
-{
- int ret = 0;
- struct net_device *dev = sp->dev;
- int err = 0;
-
- if (sp->config.intr_type == MSI_X)
- ret = s2io_enable_msi_x(sp);
- if (ret) {
- DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
- sp->config.intr_type = INTA;
- }
-
- /*
- * Store the values of the MSIX table in
- * the struct s2io_nic structure
- */
- store_xmsi_data(sp);
-
- /* After proper initialization of H/W, register ISR */
- if (sp->config.intr_type == MSI_X) {
- int i, msix_rx_cnt = 0;
-
- for (i = 0; i < sp->num_entries; i++) {
- if (sp->s2io_entries[i].in_use == MSIX_FLG) {
- if (sp->s2io_entries[i].type ==
- MSIX_RING_TYPE) {
- snprintf(sp->desc[i],
- sizeof(sp->desc[i]),
- "%s:MSI-X-%d-RX",
- dev->name, i);
- err = request_irq(sp->entries[i].vector,
- s2io_msix_ring_handle,
- 0,
- sp->desc[i],
- sp->s2io_entries[i].arg);
- } else if (sp->s2io_entries[i].type ==
- MSIX_ALARM_TYPE) {
- snprintf(sp->desc[i],
- sizeof(sp->desc[i]),
- "%s:MSI-X-%d-TX",
- dev->name, i);
- err = request_irq(sp->entries[i].vector,
- s2io_msix_fifo_handle,
- 0,
- sp->desc[i],
- sp->s2io_entries[i].arg);
-
- }
- /* if either data or addr is zero print it. */
- if (!(sp->msix_info[i].addr &&
- sp->msix_info[i].data)) {
- DBG_PRINT(ERR_DBG,
- "%s @Addr:0x%llx Data:0x%llx\n",
- sp->desc[i],
- (unsigned long long)
- sp->msix_info[i].addr,
- (unsigned long long)
- ntohl(sp->msix_info[i].data));
- } else
- msix_rx_cnt++;
- if (err) {
- remove_msix_isr(sp);
-
- DBG_PRINT(ERR_DBG,
- "%s:MSI-X-%d registration "
- "failed\n", dev->name, i);
-
- DBG_PRINT(ERR_DBG,
- "%s: Defaulting to INTA\n",
- dev->name);
- sp->config.intr_type = INTA;
- break;
- }
- sp->s2io_entries[i].in_use =
- MSIX_REGISTERED_SUCCESS;
- }
- }
- if (!err) {
- pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
- DBG_PRINT(INFO_DBG,
- "MSI-X-TX entries enabled through alarm vector\n");
- }
- }
- if (sp->config.intr_type == INTA) {
- err = request_irq(sp->pdev->irq, s2io_isr, IRQF_SHARED,
- sp->name, dev);
- if (err) {
- DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
- dev->name);
- return -1;
- }
- }
- return 0;
-}
-
-static void s2io_rem_isr(struct s2io_nic *sp)
-{
- if (sp->config.intr_type == MSI_X)
- remove_msix_isr(sp);
- else
- remove_inta_isr(sp);
-}
-
-static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
-{
- int cnt = 0;
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- register u64 val64 = 0;
- struct config_param *config;
- config = &sp->config;
-
- if (!is_s2io_card_up(sp))
- return;
-
- timer_delete_sync(&sp->alarm_timer);
- /* If s2io_set_link task is executing, wait till it completes. */
- while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
- msleep(50);
- clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
-
- /* Disable napi */
- if (sp->config.napi) {
- int off = 0;
- if (config->intr_type == MSI_X) {
- for (; off < sp->config.rx_ring_num; off++)
- napi_disable(&sp->mac_control.rings[off].napi);
- }
- else
- napi_disable(&sp->napi);
- }
-
- /* disable Tx and Rx traffic on the NIC */
- if (do_io)
- stop_nic(sp);
-
- s2io_rem_isr(sp);
-
- /* stop the tx queue, indicate link down */
- s2io_link(sp, LINK_DOWN);
-
- /* Check if the device is Quiescent and then Reset the NIC */
- while (do_io) {
- /* As per the HW requirement we need to replenish the
- * receive buffer to avoid the ring bump. Since there is
- * no intention of processing the Rx frame at this pointwe are
- * just setting the ownership bit of rxd in Each Rx
- * ring to HW and set the appropriate buffer size
- * based on the ring mode
- */
- rxd_owner_bit_reset(sp);
-
- val64 = readq(&bar0->adapter_status);
- if (verify_xena_quiescence(sp)) {
- if (verify_pcc_quiescent(sp, sp->device_enabled_once))
- break;
- }
-
- msleep(50);
- cnt++;
- if (cnt == 10) {
- DBG_PRINT(ERR_DBG, "Device not Quiescent - "
- "adapter status reads 0x%llx\n",
- (unsigned long long)val64);
- break;
- }
- }
- if (do_io)
- s2io_reset(sp);
-
- /* Free all Tx buffers */
- free_tx_buffers(sp);
-
- /* Free all Rx buffers */
- free_rx_buffers(sp);
-
- clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
-}
-
-static void s2io_card_down(struct s2io_nic *sp)
-{
- do_s2io_card_down(sp, 1);
-}
-
-static int s2io_card_up(struct s2io_nic *sp)
-{
- int i, ret = 0;
- struct config_param *config;
- struct mac_info *mac_control;
- struct net_device *dev = sp->dev;
- u16 interruptible;
-
- /* Initialize the H/W I/O registers */
- ret = init_nic(sp);
- if (ret != 0) {
- DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
- dev->name);
- if (ret != -EIO)
- s2io_reset(sp);
- return ret;
- }
-
- /*
- * Initializing the Rx buffers. For now we are considering only 1
- * Rx ring and initializing buffers into 30 Rx blocks
- */
- config = &sp->config;
- mac_control = &sp->mac_control;
-
- for (i = 0; i < config->rx_ring_num; i++) {
- struct ring_info *ring = &mac_control->rings[i];
-
- ring->mtu = dev->mtu;
- ring->lro = !!(dev->features & NETIF_F_LRO);
- ret = fill_rx_buffers(sp, ring, 1);
- if (ret) {
- DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
- dev->name);
- ret = -ENOMEM;
- goto err_fill_buff;
- }
- DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
- ring->rx_bufs_left);
- }
-
- /* Initialise napi */
- if (config->napi) {
- if (config->intr_type == MSI_X) {
- for (i = 0; i < sp->config.rx_ring_num; i++)
- napi_enable(&sp->mac_control.rings[i].napi);
- } else {
- napi_enable(&sp->napi);
- }
- }
-
- /* Maintain the state prior to the open */
- if (sp->promisc_flg)
- sp->promisc_flg = 0;
- if (sp->m_cast_flg) {
- sp->m_cast_flg = 0;
- sp->all_multi_pos = 0;
- }
-
- /* Setting its receive mode */
- s2io_set_multicast(dev, true);
-
- if (dev->features & NETIF_F_LRO) {
- /* Initialize max aggregatable pkts per session based on MTU */
- sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
- /* Check if we can use (if specified) user provided value */
- if (lro_max_pkts < sp->lro_max_aggr_per_sess)
- sp->lro_max_aggr_per_sess = lro_max_pkts;
- }
-
- /* Enable Rx Traffic and interrupts on the NIC */
- if (start_nic(sp)) {
- DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
- ret = -ENODEV;
- goto err_out;
- }
-
- /* Add interrupt service routine */
- if (s2io_add_isr(sp) != 0) {
- if (sp->config.intr_type == MSI_X)
- s2io_rem_isr(sp);
- ret = -ENODEV;
- goto err_out;
- }
-
- timer_setup(&sp->alarm_timer, s2io_alarm_handle, 0);
- mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
-
- set_bit(__S2IO_STATE_CARD_UP, &sp->state);
-
- /* Enable select interrupts */
- en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
- if (sp->config.intr_type != INTA) {
- interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
- en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
- } else {
- interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
- interruptible |= TX_PIC_INTR;
- en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
- }
-
- return 0;
-
-err_out:
- if (config->napi) {
- if (config->intr_type == MSI_X) {
- for (i = 0; i < sp->config.rx_ring_num; i++)
- napi_disable(&sp->mac_control.rings[i].napi);
- } else {
- napi_disable(&sp->napi);
- }
- }
-err_fill_buff:
- s2io_reset(sp);
- free_rx_buffers(sp);
- return ret;
-}
-
-/**
- * s2io_restart_nic - Resets the NIC.
- * @work : work struct containing a pointer to the device private structure
- * Description:
- * This function is scheduled to be run by the s2io_tx_watchdog
- * function after 0.5 secs to reset the NIC. The idea is to reduce
- * the run time of the watch dog routine which is run holding a
- * spin lock.
- */
-
-static void s2io_restart_nic(struct work_struct *work)
-{
- struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
- struct net_device *dev = sp->dev;
-
- rtnl_lock();
-
- if (!netif_running(dev))
- goto out_unlock;
-
- s2io_card_down(sp);
- if (s2io_card_up(sp)) {
- DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name);
- }
- s2io_wake_all_tx_queue(sp);
- DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name);
-out_unlock:
- rtnl_unlock();
-}
-
-/**
- * s2io_tx_watchdog - Watchdog for transmit side.
- * @dev : Pointer to net device structure
- * @txqueue: index of the hanging queue
- * Description:
- * This function is triggered if the Tx Queue is stopped
- * for a pre-defined amount of time when the Interface is still up.
- * If the Interface is jammed in such a situation, the hardware is
- * reset (by s2io_close) and restarted again (by s2io_open) to
- * overcome any problem that might have been caused in the hardware.
- * Return value:
- * void
- */
-
-static void s2io_tx_watchdog(struct net_device *dev, unsigned int txqueue)
-{
- struct s2io_nic *sp = netdev_priv(dev);
- struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
-
- if (netif_carrier_ok(dev)) {
- swstats->watchdog_timer_cnt++;
- schedule_work(&sp->rst_timer_task);
- swstats->soft_reset_cnt++;
- }
-}
-
-/**
- * rx_osm_handler - To perform some OS related operations on SKB.
- * @ring_data : the ring from which this RxD was extracted.
- * @rxdp: descriptor
- * Description:
- * This function is called by the Rx interrupt serivce routine to perform
- * some OS related operations on the SKB before passing it to the upper
- * layers. It mainly checks if the checksum is OK, if so adds it to the
- * SKBs cksum variable, increments the Rx packet count and passes the SKB
- * to the upper layer. If the checksum is wrong, it increments the Rx
- * packet error count, frees the SKB and returns error.
- * Return value:
- * SUCCESS on success and -1 on failure.
- */
-static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
-{
- struct s2io_nic *sp = ring_data->nic;
- struct net_device *dev = ring_data->dev;
- struct sk_buff *skb = (struct sk_buff *)
- ((unsigned long)rxdp->Host_Control);
- int ring_no = ring_data->ring_no;
- u16 l3_csum, l4_csum;
- unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
- struct lro *lro;
- u8 err_mask;
- struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
-
- skb->dev = dev;
-
- if (err) {
- /* Check for parity error */
- if (err & 0x1)
- swstats->parity_err_cnt++;
-
- err_mask = err >> 48;
- switch (err_mask) {
- case 1:
- swstats->rx_parity_err_cnt++;
- break;
-
- case 2:
- swstats->rx_abort_cnt++;
- break;
-
- case 3:
- swstats->rx_parity_abort_cnt++;
- break;
-
- case 4:
- swstats->rx_rda_fail_cnt++;
- break;
-
- case 5:
- swstats->rx_unkn_prot_cnt++;
- break;
-
- case 6:
- swstats->rx_fcs_err_cnt++;
- break;
-
- case 7:
- swstats->rx_buf_size_err_cnt++;
- break;
-
- case 8:
- swstats->rx_rxd_corrupt_cnt++;
- break;
-
- case 15:
- swstats->rx_unkn_err_cnt++;
- break;
- }
- /*
- * Drop the packet if bad transfer code. Exception being
- * 0x5, which could be due to unsupported IPv6 extension header.
- * In this case, we let stack handle the packet.
- * Note that in this case, since checksum will be incorrect,
- * stack will validate the same.
- */
- if (err_mask != 0x5) {
- DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
- dev->name, err_mask);
- dev->stats.rx_crc_errors++;
- swstats->mem_freed
- += skb->truesize;
- dev_kfree_skb(skb);
- ring_data->rx_bufs_left -= 1;
- rxdp->Host_Control = 0;
- return 0;
- }
- }
-
- rxdp->Host_Control = 0;
- if (sp->rxd_mode == RXD_MODE_1) {
- int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
-
- skb_put(skb, len);
- } else if (sp->rxd_mode == RXD_MODE_3B) {
- int get_block = ring_data->rx_curr_get_info.block_index;
- int get_off = ring_data->rx_curr_get_info.offset;
- int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
- int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
-
- struct buffAdd *ba = &ring_data->ba[get_block][get_off];
- skb_put_data(skb, ba->ba_0, buf0_len);
- skb_put(skb, buf2_len);
- }
-
- if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
- ((!ring_data->lro) ||
- (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG))) &&
- (dev->features & NETIF_F_RXCSUM)) {
- l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
- l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
- if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
- /*
- * NIC verifies if the Checksum of the received
- * frame is Ok or not and accordingly returns
- * a flag in the RxD.
- */
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- if (ring_data->lro) {
- u32 tcp_len = 0;
- u8 *tcp;
- int ret = 0;
-
- ret = s2io_club_tcp_session(ring_data,
- skb->data, &tcp,
- &tcp_len, &lro,
- rxdp, sp);
- switch (ret) {
- case 3: /* Begin anew */
- lro->parent = skb;
- goto aggregate;
- case 1: /* Aggregate */
- lro_append_pkt(sp, lro, skb, tcp_len);
- goto aggregate;
- case 4: /* Flush session */
- lro_append_pkt(sp, lro, skb, tcp_len);
- queue_rx_frame(lro->parent,
- lro->vlan_tag);
- clear_lro_session(lro);
- swstats->flush_max_pkts++;
- goto aggregate;
- case 2: /* Flush both */
- lro->parent->data_len = lro->frags_len;
- swstats->sending_both++;
- queue_rx_frame(lro->parent,
- lro->vlan_tag);
- clear_lro_session(lro);
- goto send_up;
- case 0: /* sessions exceeded */
- case -1: /* non-TCP or not L2 aggregatable */
- case 5: /*
- * First pkt in session not
- * L3/L4 aggregatable
- */
- break;
- default:
- DBG_PRINT(ERR_DBG,
- "%s: Samadhana!!\n",
- __func__);
- BUG();
- }
- }
- } else {
- /*
- * Packet with erroneous checksum, let the
- * upper layers deal with it.
- */
- skb_checksum_none_assert(skb);
- }
- } else
- skb_checksum_none_assert(skb);
-
- swstats->mem_freed += skb->truesize;
-send_up:
- skb_record_rx_queue(skb, ring_no);
- queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
-aggregate:
- sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
- return SUCCESS;
-}
-
-/**
- * s2io_link - stops/starts the Tx queue.
- * @sp : private member of the device structure, which is a pointer to the
- * s2io_nic structure.
- * @link : inidicates whether link is UP/DOWN.
- * Description:
- * This function stops/starts the Tx queue depending on whether the link
- * status of the NIC is down or up. This is called by the Alarm
- * interrupt handler whenever a link change interrupt comes up.
- * Return value:
- * void.
- */
-
-static void s2io_link(struct s2io_nic *sp, int link)
-{
- struct net_device *dev = sp->dev;
- struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
-
- if (link != sp->last_link_state) {
- init_tti(sp, link, false);
- if (link == LINK_DOWN) {
- DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
- s2io_stop_all_tx_queue(sp);
- netif_carrier_off(dev);
- if (swstats->link_up_cnt)
- swstats->link_up_time =
- jiffies - sp->start_time;
- swstats->link_down_cnt++;
- } else {
- DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
- if (swstats->link_down_cnt)
- swstats->link_down_time =
- jiffies - sp->start_time;
- swstats->link_up_cnt++;
- netif_carrier_on(dev);
- s2io_wake_all_tx_queue(sp);
- }
- }
- sp->last_link_state = link;
- sp->start_time = jiffies;
-}
-
-/**
- * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
- * @sp : private member of the device structure, which is a pointer to the
- * s2io_nic structure.
- * Description:
- * This function initializes a few of the PCI and PCI-X configuration registers
- * with recommended values.
- * Return value:
- * void
- */
-
-static void s2io_init_pci(struct s2io_nic *sp)
-{
- u16 pci_cmd = 0, pcix_cmd = 0;
-
- /* Enable Data Parity Error Recovery in PCI-X command register. */
- pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
- &(pcix_cmd));
- pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
- (pcix_cmd | 1));
- pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
- &(pcix_cmd));
-
- /* Set the PErr Response bit in PCI command register. */
- pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
- pci_write_config_word(sp->pdev, PCI_COMMAND,
- (pci_cmd | PCI_COMMAND_PARITY));
- pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
-}
-
-static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
- u8 *dev_multiq)
-{
- int i;
-
- if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
- DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
- "(%d) not supported\n", tx_fifo_num);
-
- if (tx_fifo_num < 1)
- tx_fifo_num = 1;
- else
- tx_fifo_num = MAX_TX_FIFOS;
-
- DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num);
- }
-
- if (multiq)
- *dev_multiq = multiq;
-
- if (tx_steering_type && (1 == tx_fifo_num)) {
- if (tx_steering_type != TX_DEFAULT_STEERING)
- DBG_PRINT(ERR_DBG,
- "Tx steering is not supported with "
- "one fifo. Disabling Tx steering.\n");
- tx_steering_type = NO_STEERING;
- }
-
- if ((tx_steering_type < NO_STEERING) ||
- (tx_steering_type > TX_DEFAULT_STEERING)) {
- DBG_PRINT(ERR_DBG,
- "Requested transmit steering not supported\n");
- DBG_PRINT(ERR_DBG, "Disabling transmit steering\n");
- tx_steering_type = NO_STEERING;
- }
-
- if (rx_ring_num > MAX_RX_RINGS) {
- DBG_PRINT(ERR_DBG,
- "Requested number of rx rings not supported\n");
- DBG_PRINT(ERR_DBG, "Default to %d rx rings\n",
- MAX_RX_RINGS);
- rx_ring_num = MAX_RX_RINGS;
- }
-
- if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
- DBG_PRINT(ERR_DBG, "Wrong intr_type requested. "
- "Defaulting to INTA\n");
- *dev_intr_type = INTA;
- }
-
- if ((*dev_intr_type == MSI_X) &&
- ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
- (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
- DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. "
- "Defaulting to INTA\n");
- *dev_intr_type = INTA;
- }
-
- if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
- DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n");
- DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
- rx_ring_mode = 1;
- }
-
- for (i = 0; i < MAX_RX_RINGS; i++)
- if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
- DBG_PRINT(ERR_DBG, "Requested rx ring size not "
- "supported\nDefaulting to %d\n",
- MAX_RX_BLOCKS_PER_RING);
- rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
- }
-
- return SUCCESS;
-}
-
-/**
- * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS or Traffic class respectively.
- * @nic: device private variable
- * @ds_codepoint: data
- * @ring: ring index
- * Description: The function configures the receive steering to
- * desired receive ring.
- * Return Value: SUCCESS on success and
- * '-1' on failure (endian settings incorrect).
- */
-static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
-{
- struct XENA_dev_config __iomem *bar0 = nic->bar0;
- register u64 val64 = 0;
-
- if (ds_codepoint > 63)
- return FAILURE;
-
- val64 = RTS_DS_MEM_DATA(ring);
- writeq(val64, &bar0->rts_ds_mem_data);
-
- val64 = RTS_DS_MEM_CTRL_WE |
- RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
- RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
-
- writeq(val64, &bar0->rts_ds_mem_ctrl);
-
- return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
- RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
- S2IO_BIT_RESET, true);
-}
-
-static const struct net_device_ops s2io_netdev_ops = {
- .ndo_open = s2io_open,
- .ndo_stop = s2io_close,
- .ndo_get_stats = s2io_get_stats,
- .ndo_start_xmit = s2io_xmit,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_rx_mode = s2io_ndo_set_multicast,
- .ndo_eth_ioctl = s2io_ioctl,
- .ndo_set_mac_address = s2io_set_mac_addr,
- .ndo_change_mtu = s2io_change_mtu,
- .ndo_set_features = s2io_set_features,
- .ndo_tx_timeout = s2io_tx_watchdog,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = s2io_netpoll,
-#endif
-};
-
-/**
- * s2io_init_nic - Initialization of the adapter .
- * @pdev : structure containing the PCI related information of the device.
- * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
- * Description:
- * The function initializes an adapter identified by the pci_dec structure.
- * All OS related initialization including memory and device structure and
- * initlaization of the device private variable is done. Also the swapper
- * control register is initialized to enable read and write into the I/O
- * registers of the device.
- * Return value:
- * returns 0 on success and negative on failure.
- */
-
-static int
-s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
-{
- struct s2io_nic *sp;
- struct net_device *dev;
- int i, j, ret;
- u32 mac_up, mac_down;
- u64 val64 = 0, tmp64 = 0;
- struct XENA_dev_config __iomem *bar0 = NULL;
- u16 subid;
- struct config_param *config;
- struct mac_info *mac_control;
- int mode;
- u8 dev_intr_type = intr_type;
- u8 dev_multiq = 0;
-
- ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
- if (ret)
- return ret;
-
- ret = pci_enable_device(pdev);
- if (ret) {
- DBG_PRINT(ERR_DBG,
- "%s: pci_enable_device failed\n", __func__);
- return ret;
- }
-
- if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
- DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
- } else {
- pci_disable_device(pdev);
- return -ENOMEM;
- }
- ret = pci_request_regions(pdev, s2io_driver_name);
- if (ret) {
- DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n",
- __func__, ret);
- pci_disable_device(pdev);
- return -ENODEV;
- }
- if (dev_multiq)
- dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
- else
- dev = alloc_etherdev(sizeof(struct s2io_nic));
- if (dev == NULL) {
- pci_disable_device(pdev);
- pci_release_regions(pdev);
- return -ENODEV;
- }
-
- pci_set_master(pdev);
- pci_set_drvdata(pdev, dev);
- SET_NETDEV_DEV(dev, &pdev->dev);
-
- /* Private member variable initialized to s2io NIC structure */
- sp = netdev_priv(dev);
- sp->dev = dev;
- sp->pdev = pdev;
- sp->device_enabled_once = false;
- if (rx_ring_mode == 1)
- sp->rxd_mode = RXD_MODE_1;
- if (rx_ring_mode == 2)
- sp->rxd_mode = RXD_MODE_3B;
-
- sp->config.intr_type = dev_intr_type;
-
- if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
- (pdev->device == PCI_DEVICE_ID_HERC_UNI))
- sp->device_type = XFRAME_II_DEVICE;
- else
- sp->device_type = XFRAME_I_DEVICE;
-
-
- /* Initialize some PCI/PCI-X fields of the NIC. */
- s2io_init_pci(sp);
-
- /*
- * Setting the device configuration parameters.
- * Most of these parameters can be specified by the user during
- * module insertion as they are module loadable parameters. If
- * these parameters are not specified during load time, they
- * are initialized with default values.
- */
- config = &sp->config;
- mac_control = &sp->mac_control;
-
- config->napi = napi;
- config->tx_steering_type = tx_steering_type;
-
- /* Tx side parameters. */
- if (config->tx_steering_type == TX_PRIORITY_STEERING)
- config->tx_fifo_num = MAX_TX_FIFOS;
- else
- config->tx_fifo_num = tx_fifo_num;
-
- /* Initialize the fifos used for tx steering */
- if (config->tx_fifo_num < 5) {
- if (config->tx_fifo_num == 1)
- sp->total_tcp_fifos = 1;
- else
- sp->total_tcp_fifos = config->tx_fifo_num - 1;
- sp->udp_fifo_idx = config->tx_fifo_num - 1;
- sp->total_udp_fifos = 1;
- sp->other_fifo_idx = sp->total_tcp_fifos - 1;
- } else {
- sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
- FIFO_OTHER_MAX_NUM);
- sp->udp_fifo_idx = sp->total_tcp_fifos;
- sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
- sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
- }
-
- config->multiq = dev_multiq;
- for (i = 0; i < config->tx_fifo_num; i++) {
- struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
-
- tx_cfg->fifo_len = tx_fifo_len[i];
- tx_cfg->fifo_priority = i;
- }
-
- /* mapping the QoS priority to the configured fifos */
- for (i = 0; i < MAX_TX_FIFOS; i++)
- config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
-
- /* map the hashing selector table to the configured fifos */
- for (i = 0; i < config->tx_fifo_num; i++)
- sp->fifo_selector[i] = fifo_selector[i];
-
-
- config->tx_intr_type = TXD_INT_TYPE_UTILZ;
- for (i = 0; i < config->tx_fifo_num; i++) {
- struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
-
- tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
- if (tx_cfg->fifo_len < 65) {
- config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
- break;
- }
- }
- /* + 2 because one Txd for skb->data and one Txd for UFO */
- config->max_txds = MAX_SKB_FRAGS + 2;
-
- /* Rx side parameters. */
- config->rx_ring_num = rx_ring_num;
- for (i = 0; i < config->rx_ring_num; i++) {
- struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
- struct ring_info *ring = &mac_control->rings[i];
-
- rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
- rx_cfg->ring_priority = i;
- ring->rx_bufs_left = 0;
- ring->rxd_mode = sp->rxd_mode;
- ring->rxd_count = rxd_count[sp->rxd_mode];
- ring->pdev = sp->pdev;
- ring->dev = sp->dev;
- }
-
- for (i = 0; i < rx_ring_num; i++) {
- struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
-
- rx_cfg->ring_org = RING_ORG_BUFF1;
- rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
- }
-
- /* Setting Mac Control parameters */
- mac_control->rmac_pause_time = rmac_pause_time;
- mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
- mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
-
-
- /* initialize the shared memory used by the NIC and the host */
- if (init_shared_mem(sp)) {
- DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name);
- ret = -ENOMEM;
- goto mem_alloc_failed;
- }
-
- sp->bar0 = pci_ioremap_bar(pdev, 0);
- if (!sp->bar0) {
- DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
- dev->name);
- ret = -ENOMEM;
- goto bar0_remap_failed;
- }
-
- sp->bar1 = pci_ioremap_bar(pdev, 2);
- if (!sp->bar1) {
- DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
- dev->name);
- ret = -ENOMEM;
- goto bar1_remap_failed;
- }
-
- /* Initializing the BAR1 address as the start of the FIFO pointer. */
- for (j = 0; j < MAX_TX_FIFOS; j++) {
- mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000);
- }
-
- /* Driver entry points */
- dev->netdev_ops = &s2io_netdev_ops;
- dev->ethtool_ops = &netdev_ethtool_ops;
- dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_TSO | NETIF_F_TSO6 |
- NETIF_F_RXCSUM | NETIF_F_LRO;
- dev->features |= dev->hw_features |
- NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HIGHDMA;
- dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
- INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
- INIT_WORK(&sp->set_link_task, s2io_set_link);
-
- pci_save_state(sp->pdev);
-
- /* Setting swapper control on the NIC, for proper reset operation */
- if (s2io_set_swapper(sp)) {
- DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n",
- dev->name);
- ret = -EAGAIN;
- goto set_swap_failed;
- }
-
- /* Verify if the Herc works on the slot its placed into */
- if (sp->device_type & XFRAME_II_DEVICE) {
- mode = s2io_verify_pci_mode(sp);
- if (mode < 0) {
- DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n",
- __func__);
- ret = -EBADSLT;
- goto set_swap_failed;
- }
- }
-
- if (sp->config.intr_type == MSI_X) {
- sp->num_entries = config->rx_ring_num + 1;
- ret = s2io_enable_msi_x(sp);
-
- if (!ret) {
- ret = s2io_test_msi(sp);
- /* rollback MSI-X, will re-enable during add_isr() */
- remove_msix_isr(sp);
- }
- if (ret) {
-
- DBG_PRINT(ERR_DBG,
- "MSI-X requested but failed to enable\n");
- sp->config.intr_type = INTA;
- }
- }
-
- if (config->intr_type == MSI_X) {
- for (i = 0; i < config->rx_ring_num ; i++) {
- struct ring_info *ring = &mac_control->rings[i];
-
- netif_napi_add(dev, &ring->napi, s2io_poll_msix);
- }
- } else {
- netif_napi_add(dev, &sp->napi, s2io_poll_inta);
- }
-
- /* Not needed for Herc */
- if (sp->device_type & XFRAME_I_DEVICE) {
- /*
- * Fix for all "FFs" MAC address problems observed on
- * Alpha platforms
- */
- fix_mac_address(sp);
- s2io_reset(sp);
- }
-
- /*
- * MAC address initialization.
- * For now only one mac address will be read and used.
- */
- bar0 = sp->bar0;
- val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
- RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
- writeq(val64, &bar0->rmac_addr_cmd_mem);
- wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
- RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
- S2IO_BIT_RESET, true);
- tmp64 = readq(&bar0->rmac_addr_data0_mem);
- mac_down = (u32)tmp64;
- mac_up = (u32) (tmp64 >> 32);
-
- sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
- sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
- sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
- sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
- sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
- sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
-
- /* Set the factory defined MAC address initially */
- dev->addr_len = ETH_ALEN;
- eth_hw_addr_set(dev, sp->def_mac_addr[0].mac_addr);
-
- /* initialize number of multicast & unicast MAC entries variables */
- if (sp->device_type == XFRAME_I_DEVICE) {
- config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
- config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
- config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
- } else if (sp->device_type == XFRAME_II_DEVICE) {
- config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
- config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
- config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
- }
-
- /* MTU range: 46 - 9600 */
- dev->min_mtu = MIN_MTU;
- dev->max_mtu = S2IO_JUMBO_SIZE;
-
- /* store mac addresses from CAM to s2io_nic structure */
- do_s2io_store_unicast_mc(sp);
-
- /* Configure MSIX vector for number of rings configured plus one */
- if ((sp->device_type == XFRAME_II_DEVICE) &&
- (config->intr_type == MSI_X))
- sp->num_entries = config->rx_ring_num + 1;
-
- /* Store the values of the MSIX table in the s2io_nic structure */
- store_xmsi_data(sp);
- /* reset Nic and bring it to known state */
- s2io_reset(sp);
-
- /*
- * Initialize link state flags
- * and the card state parameter
- */
- sp->state = 0;
-
- /* Initialize spinlocks */
- for (i = 0; i < sp->config.tx_fifo_num; i++) {
- struct fifo_info *fifo = &mac_control->fifos[i];
-
- spin_lock_init(&fifo->tx_lock);
- }
-
- /*
- * SXE-002: Configure link and activity LED to init state
- * on driver load.
- */
- subid = sp->pdev->subsystem_device;
- if ((subid & 0xFF) >= 0x07) {
- val64 = readq(&bar0->gpio_control);
- val64 |= 0x0000800000000000ULL;
- writeq(val64, &bar0->gpio_control);
- val64 = 0x0411040400000000ULL;
- writeq(val64, (void __iomem *)bar0 + 0x2700);
- val64 = readq(&bar0->gpio_control);
- }
-
- sp->rx_csum = 1; /* Rx chksum verify enabled by default */
-
- if (register_netdev(dev)) {
- DBG_PRINT(ERR_DBG, "Device registration failed\n");
- ret = -ENODEV;
- goto register_failed;
- }
- s2io_vpd_read(sp);
- DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n");
- DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
- sp->product_name, pdev->revision);
- DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
- s2io_driver_version);
- DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr);
- DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num);
- if (sp->device_type & XFRAME_II_DEVICE) {
- mode = s2io_print_pci_mode(sp);
- if (mode < 0) {
- ret = -EBADSLT;
- unregister_netdev(dev);
- goto set_swap_failed;
- }
- }
- switch (sp->rxd_mode) {
- case RXD_MODE_1:
- DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
- dev->name);
- break;
- case RXD_MODE_3B:
- DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
- dev->name);
- break;
- }
-
- switch (sp->config.napi) {
- case 0:
- DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
- break;
- case 1:
- DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
- break;
- }
-
- DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
- sp->config.tx_fifo_num);
-
- DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
- sp->config.rx_ring_num);
-
- switch (sp->config.intr_type) {
- case INTA:
- DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
- break;
- case MSI_X:
- DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
- break;
- }
- if (sp->config.multiq) {
- for (i = 0; i < sp->config.tx_fifo_num; i++) {
- struct fifo_info *fifo = &mac_control->fifos[i];
-
- fifo->multiq = config->multiq;
- }
- DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
- dev->name);
- } else
- DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
- dev->name);
-
- switch (sp->config.tx_steering_type) {
- case NO_STEERING:
- DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n",
- dev->name);
- break;
- case TX_PRIORITY_STEERING:
- DBG_PRINT(ERR_DBG,
- "%s: Priority steering enabled for transmit\n",
- dev->name);
- break;
- case TX_DEFAULT_STEERING:
- DBG_PRINT(ERR_DBG,
- "%s: Default steering enabled for transmit\n",
- dev->name);
- }
-
- DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
- dev->name);
- /* Initialize device name */
- snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name,
- sp->product_name);
-
- if (vlan_tag_strip)
- sp->vlan_strip_flag = 1;
- else
- sp->vlan_strip_flag = 0;
-
- /*
- * Make Link state as off at this point, when the Link change
- * interrupt comes the state will be automatically changed to
- * the right state.
- */
- netif_carrier_off(dev);
-
- return 0;
-
-register_failed:
-set_swap_failed:
- iounmap(sp->bar1);
-bar1_remap_failed:
- iounmap(sp->bar0);
-bar0_remap_failed:
-mem_alloc_failed:
- free_shared_mem(sp);
- pci_disable_device(pdev);
- pci_release_regions(pdev);
- free_netdev(dev);
-
- return ret;
-}
-
-/**
- * s2io_rem_nic - Free the PCI device
- * @pdev: structure containing the PCI related information of the device.
- * Description: This function is called by the Pci subsystem to release a
- * PCI device and free up all resource held up by the device. This could
- * be in response to a Hot plug event or when the driver is to be removed
- * from memory.
- */
-
-static void s2io_rem_nic(struct pci_dev *pdev)
-{
- struct net_device *dev = pci_get_drvdata(pdev);
- struct s2io_nic *sp;
-
- if (dev == NULL) {
- DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
- return;
- }
-
- sp = netdev_priv(dev);
-
- cancel_work_sync(&sp->rst_timer_task);
- cancel_work_sync(&sp->set_link_task);
-
- unregister_netdev(dev);
-
- free_shared_mem(sp);
- iounmap(sp->bar0);
- iounmap(sp->bar1);
- pci_release_regions(pdev);
- free_netdev(dev);
- pci_disable_device(pdev);
-}
-
-module_pci_driver(s2io_driver);
-
-static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
- struct tcphdr **tcp, struct RxD_t *rxdp,
- struct s2io_nic *sp)
-{
- int ip_off;
- u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
-
- if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
- DBG_PRINT(INIT_DBG,
- "%s: Non-TCP frames not supported for LRO\n",
- __func__);
- return -1;
- }
-
- /* Checking for DIX type or DIX type with VLAN */
- if ((l2_type == 0) || (l2_type == 4)) {
- ip_off = HEADER_ETHERNET_II_802_3_SIZE;
- /*
- * If vlan stripping is disabled and the frame is VLAN tagged,
- * shift the offset by the VLAN header size bytes.
- */
- if ((!sp->vlan_strip_flag) &&
- (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
- ip_off += HEADER_VLAN_SIZE;
- } else {
- /* LLC, SNAP etc are considered non-mergeable */
- return -1;
- }
-
- *ip = (struct iphdr *)(buffer + ip_off);
- ip_len = (u8)((*ip)->ihl);
- ip_len <<= 2;
- *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
-
- return 0;
-}
-
-static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
- struct tcphdr *tcp)
-{
- DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
- if ((lro->iph->saddr != ip->saddr) ||
- (lro->iph->daddr != ip->daddr) ||
- (lro->tcph->source != tcp->source) ||
- (lro->tcph->dest != tcp->dest))
- return -1;
- return 0;
-}
-
-static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
-{
- return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
-}
-
-static void initiate_new_session(struct lro *lro, u8 *l2h,
- struct iphdr *ip, struct tcphdr *tcp,
- u32 tcp_pyld_len, u16 vlan_tag)
-{
- DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
- lro->l2h = l2h;
- lro->iph = ip;
- lro->tcph = tcp;
- lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
- lro->tcp_ack = tcp->ack_seq;
- lro->sg_num = 1;
- lro->total_len = ntohs(ip->tot_len);
- lro->frags_len = 0;
- lro->vlan_tag = vlan_tag;
- /*
- * Check if we saw TCP timestamp.
- * Other consistency checks have already been done.
- */
- if (tcp->doff == 8) {
- __be32 *ptr;
- ptr = (__be32 *)(tcp+1);
- lro->saw_ts = 1;
- lro->cur_tsval = ntohl(*(ptr+1));
- lro->cur_tsecr = *(ptr+2);
- }
- lro->in_use = 1;
-}
-
-static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
-{
- struct iphdr *ip = lro->iph;
- struct tcphdr *tcp = lro->tcph;
- struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
-
- DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
-
- /* Update L3 header */
- csum_replace2(&ip->check, ip->tot_len, htons(lro->total_len));
- ip->tot_len = htons(lro->total_len);
-
- /* Update L4 header */
- tcp->ack_seq = lro->tcp_ack;
- tcp->window = lro->window;
-
- /* Update tsecr field if this session has timestamps enabled */
- if (lro->saw_ts) {
- __be32 *ptr = (__be32 *)(tcp + 1);
- *(ptr+2) = lro->cur_tsecr;
- }
-
- /* Update counters required for calculation of
- * average no. of packets aggregated.
- */
- swstats->sum_avg_pkts_aggregated += lro->sg_num;
- swstats->num_aggregations++;
-}
-
-static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
- struct tcphdr *tcp, u32 l4_pyld)
-{
- DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
- lro->total_len += l4_pyld;
- lro->frags_len += l4_pyld;
- lro->tcp_next_seq += l4_pyld;
- lro->sg_num++;
-
- /* Update ack seq no. and window ad(from this pkt) in LRO object */
- lro->tcp_ack = tcp->ack_seq;
- lro->window = tcp->window;
-
- if (lro->saw_ts) {
- __be32 *ptr;
- /* Update tsecr and tsval from this packet */
- ptr = (__be32 *)(tcp+1);
- lro->cur_tsval = ntohl(*(ptr+1));
- lro->cur_tsecr = *(ptr + 2);
- }
-}
-
-static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
- struct tcphdr *tcp, u32 tcp_pyld_len)
-{
- u8 *ptr;
-
- DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
-
- if (!tcp_pyld_len) {
- /* Runt frame or a pure ack */
- return -1;
- }
-
- if (ip->ihl != 5) /* IP has options */
- return -1;
-
- /* If we see CE codepoint in IP header, packet is not mergeable */
- if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
- return -1;
-
- /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
- if (tcp->urg || tcp->psh || tcp->rst ||
- tcp->syn || tcp->fin ||
- tcp->ece || tcp->cwr || !tcp->ack) {
- /*
- * Currently recognize only the ack control word and
- * any other control field being set would result in
- * flushing the LRO session
- */
- return -1;
- }
-
- /*
- * Allow only one TCP timestamp option. Don't aggregate if
- * any other options are detected.
- */
- if (tcp->doff != 5 && tcp->doff != 8)
- return -1;
-
- if (tcp->doff == 8) {
- ptr = (u8 *)(tcp + 1);
- while (*ptr == TCPOPT_NOP)
- ptr++;
- if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
- return -1;
-
- /* Ensure timestamp value increases monotonically */
- if (l_lro)
- if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
- return -1;
-
- /* timestamp echo reply should be non-zero */
- if (*((__be32 *)(ptr+6)) == 0)
- return -1;
- }
-
- return 0;
-}
-
-static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
- u8 **tcp, u32 *tcp_len, struct lro **lro,
- struct RxD_t *rxdp, struct s2io_nic *sp)
-{
- struct iphdr *ip;
- struct tcphdr *tcph;
- int ret = 0, i;
- u16 vlan_tag = 0;
- struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
-
- ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
- rxdp, sp);
- if (ret)
- return ret;
-
- DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr);
-
- vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
- tcph = (struct tcphdr *)*tcp;
- *tcp_len = get_l4_pyld_length(ip, tcph);
- for (i = 0; i < MAX_LRO_SESSIONS; i++) {
- struct lro *l_lro = &ring_data->lro0_n[i];
- if (l_lro->in_use) {
- if (check_for_socket_match(l_lro, ip, tcph))
- continue;
- /* Sock pair matched */
- *lro = l_lro;
-
- if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
- DBG_PRINT(INFO_DBG, "%s: Out of sequence. "
- "expected 0x%x, actual 0x%x\n",
- __func__,
- (*lro)->tcp_next_seq,
- ntohl(tcph->seq));
-
- swstats->outof_sequence_pkts++;
- ret = 2;
- break;
- }
-
- if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
- *tcp_len))
- ret = 1; /* Aggregate */
- else
- ret = 2; /* Flush both */
- break;
- }
- }
-
- if (ret == 0) {
- /* Before searching for available LRO objects,
- * check if the pkt is L3/L4 aggregatable. If not
- * don't create new LRO session. Just send this
- * packet up.
- */
- if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len))
- return 5;
-
- for (i = 0; i < MAX_LRO_SESSIONS; i++) {
- struct lro *l_lro = &ring_data->lro0_n[i];
- if (!(l_lro->in_use)) {
- *lro = l_lro;
- ret = 3; /* Begin anew */
- break;
- }
- }
- }
-
- if (ret == 0) { /* sessions exceeded */
- DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n",
- __func__);
- *lro = NULL;
- return ret;
- }
-
- switch (ret) {
- case 3:
- initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
- vlan_tag);
- break;
- case 2:
- update_L3L4_header(sp, *lro);
- break;
- case 1:
- aggregate_new_rx(*lro, ip, tcph, *tcp_len);
- if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
- update_L3L4_header(sp, *lro);
- ret = 4; /* Flush the LRO */
- }
- break;
- default:
- DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__);
- break;
- }
-
- return ret;
-}
-
-static void clear_lro_session(struct lro *lro)
-{
- static u16 lro_struct_size = sizeof(struct lro);
-
- memset(lro, 0, lro_struct_size);
-}
-
-static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
-{
- struct net_device *dev = skb->dev;
- struct s2io_nic *sp = netdev_priv(dev);
-
- skb->protocol = eth_type_trans(skb, dev);
- if (vlan_tag && sp->vlan_strip_flag)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
- if (sp->config.napi)
- netif_receive_skb(skb);
- else
- netif_rx(skb);
-}
-
-static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
- struct sk_buff *skb, u32 tcp_len)
-{
- struct sk_buff *first = lro->parent;
- struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
-
- first->len += tcp_len;
- first->data_len = lro->frags_len;
- skb_pull(skb, (skb->len - tcp_len));
- if (skb_shinfo(first)->frag_list)
- lro->last_frag->next = skb;
- else
- skb_shinfo(first)->frag_list = skb;
- first->truesize += skb->truesize;
- lro->last_frag = skb;
- swstats->clubbed_frms_cnt++;
-}
-
-/**
- * s2io_io_error_detected - called when PCI error is detected
- * @pdev: Pointer to PCI device
- * @state: The current pci connection state
- *
- * This function is called after a PCI bus error affecting
- * this device has been detected.
- */
-static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct s2io_nic *sp = netdev_priv(netdev);
-
- netif_device_detach(netdev);
-
- if (state == pci_channel_io_perm_failure)
- return PCI_ERS_RESULT_DISCONNECT;
-
- if (netif_running(netdev)) {
- /* Bring down the card, while avoiding PCI I/O */
- do_s2io_card_down(sp, 0);
- }
- pci_disable_device(pdev);
-
- return PCI_ERS_RESULT_NEED_RESET;
-}
-
-/**
- * s2io_io_slot_reset - called after the pci bus has been reset.
- * @pdev: Pointer to PCI device
- *
- * Restart the card from scratch, as if from a cold-boot.
- * At this point, the card has experienced a hard reset,
- * followed by fixups by BIOS, and has its config space
- * set up identically to what it was at cold boot.
- */
-static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct s2io_nic *sp = netdev_priv(netdev);
-
- if (pci_enable_device(pdev)) {
- pr_err("Cannot re-enable PCI device after reset.\n");
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- pci_set_master(pdev);
- s2io_reset(sp);
-
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-/**
- * s2io_io_resume - called when traffic can start flowing again.
- * @pdev: Pointer to PCI device
- *
- * This callback is called when the error recovery driver tells
- * us that its OK to resume normal operation.
- */
-static void s2io_io_resume(struct pci_dev *pdev)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct s2io_nic *sp = netdev_priv(netdev);
-
- if (netif_running(netdev)) {
- if (s2io_card_up(sp)) {
- pr_err("Can't bring device back up after reset.\n");
- return;
- }
-
- if (do_s2io_prog_unicast(netdev, netdev->dev_addr) == FAILURE) {
- s2io_card_down(sp);
- pr_err("Can't restore mac addr after reset.\n");
- return;
- }
- }
-
- netif_device_attach(netdev);
- netif_tx_wake_all_queues(netdev);
-}
diff --git a/drivers/net/ethernet/neterion/s2io.h b/drivers/net/ethernet/neterion/s2io.h
deleted file mode 100644
index cb7080eb5912..000000000000
--- a/drivers/net/ethernet/neterion/s2io.h
+++ /dev/null
@@ -1,1124 +0,0 @@
-/************************************************************************
- * s2io.h: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
- * Copyright(c) 2002-2010 Exar Corp.
-
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- ************************************************************************/
-#include <linux/io-64-nonatomic-lo-hi.h>
-#ifndef _S2IO_H
-#define _S2IO_H
-
-#define TBD 0
-#define s2BIT(loc) (0x8000000000000000ULL >> (loc))
-#define vBIT(val, loc, sz) (((u64)val) << (64-loc-sz))
-#define INV(d) ((d&0xff)<<24) | (((d>>8)&0xff)<<16) | (((d>>16)&0xff)<<8)| ((d>>24)&0xff)
-
-#undef SUCCESS
-#define SUCCESS 0
-#define FAILURE -1
-#define S2IO_MINUS_ONE 0xFFFFFFFFFFFFFFFFULL
-#define S2IO_DISABLE_MAC_ENTRY 0xFFFFFFFFFFFFULL
-#define S2IO_MAX_PCI_CONFIG_SPACE_REINIT 100
-#define S2IO_BIT_RESET 1
-#define S2IO_BIT_SET 2
-#define CHECKBIT(value, nbit) (value & (1 << nbit))
-
-/* Maximum time to flicker LED when asked to identify NIC using ethtool */
-#define MAX_FLICKER_TIME 60000 /* 60 Secs */
-
-/* Maximum outstanding splits to be configured into xena. */
-enum {
- XENA_ONE_SPLIT_TRANSACTION = 0,
- XENA_TWO_SPLIT_TRANSACTION = 1,
- XENA_THREE_SPLIT_TRANSACTION = 2,
- XENA_FOUR_SPLIT_TRANSACTION = 3,
- XENA_EIGHT_SPLIT_TRANSACTION = 4,
- XENA_TWELVE_SPLIT_TRANSACTION = 5,
- XENA_SIXTEEN_SPLIT_TRANSACTION = 6,
- XENA_THIRTYTWO_SPLIT_TRANSACTION = 7
-};
-#define XENA_MAX_OUTSTANDING_SPLITS(n) (n << 4)
-
-/* OS concerned variables and constants */
-#define WATCH_DOG_TIMEOUT 15*HZ
-#define EFILL 0x1234
-#define ALIGN_SIZE 127
-#define PCIX_COMMAND_REGISTER 0x62
-
-/*
- * Debug related variables.
- */
-/* different debug levels. */
-#define ERR_DBG 0
-#define INIT_DBG 1
-#define INFO_DBG 2
-#define TX_DBG 3
-#define INTR_DBG 4
-
-/* Global variable that defines the present debug level of the driver. */
-static int debug_level = ERR_DBG;
-
-/* DEBUG message print. */
-#define DBG_PRINT(dbg_level, fmt, args...) do { \
- if (dbg_level <= debug_level) \
- pr_info(fmt, ##args); \
- } while (0)
-
-/* Protocol assist features of the NIC */
-#define L3_CKSUM_OK 0xFFFF
-#define L4_CKSUM_OK 0xFFFF
-#define S2IO_JUMBO_SIZE 9600
-
-/* Driver statistics maintained by driver */
-struct swStat {
- unsigned long long single_ecc_errs;
- unsigned long long double_ecc_errs;
- unsigned long long parity_err_cnt;
- unsigned long long serious_err_cnt;
- unsigned long long soft_reset_cnt;
- unsigned long long fifo_full_cnt;
- unsigned long long ring_full_cnt[8];
- /* LRO statistics */
- unsigned long long clubbed_frms_cnt;
- unsigned long long sending_both;
- unsigned long long outof_sequence_pkts;
- unsigned long long flush_max_pkts;
- unsigned long long sum_avg_pkts_aggregated;
- unsigned long long num_aggregations;
- /* Other statistics */
- unsigned long long mem_alloc_fail_cnt;
- unsigned long long pci_map_fail_cnt;
- unsigned long long watchdog_timer_cnt;
- unsigned long long mem_allocated;
- unsigned long long mem_freed;
- unsigned long long link_up_cnt;
- unsigned long long link_down_cnt;
- unsigned long long link_up_time;
- unsigned long long link_down_time;
-
- /* Transfer Code statistics */
- unsigned long long tx_buf_abort_cnt;
- unsigned long long tx_desc_abort_cnt;
- unsigned long long tx_parity_err_cnt;
- unsigned long long tx_link_loss_cnt;
- unsigned long long tx_list_proc_err_cnt;
-
- unsigned long long rx_parity_err_cnt;
- unsigned long long rx_abort_cnt;
- unsigned long long rx_parity_abort_cnt;
- unsigned long long rx_rda_fail_cnt;
- unsigned long long rx_unkn_prot_cnt;
- unsigned long long rx_fcs_err_cnt;
- unsigned long long rx_buf_size_err_cnt;
- unsigned long long rx_rxd_corrupt_cnt;
- unsigned long long rx_unkn_err_cnt;
-
- /* Error/alarm statistics*/
- unsigned long long tda_err_cnt;
- unsigned long long pfc_err_cnt;
- unsigned long long pcc_err_cnt;
- unsigned long long tti_err_cnt;
- unsigned long long lso_err_cnt;
- unsigned long long tpa_err_cnt;
- unsigned long long sm_err_cnt;
- unsigned long long mac_tmac_err_cnt;
- unsigned long long mac_rmac_err_cnt;
- unsigned long long xgxs_txgxs_err_cnt;
- unsigned long long xgxs_rxgxs_err_cnt;
- unsigned long long rc_err_cnt;
- unsigned long long prc_pcix_err_cnt;
- unsigned long long rpa_err_cnt;
- unsigned long long rda_err_cnt;
- unsigned long long rti_err_cnt;
- unsigned long long mc_err_cnt;
-
-};
-
-/* Xpak releated alarm and warnings */
-struct xpakStat {
- u64 alarm_transceiver_temp_high;
- u64 alarm_transceiver_temp_low;
- u64 alarm_laser_bias_current_high;
- u64 alarm_laser_bias_current_low;
- u64 alarm_laser_output_power_high;
- u64 alarm_laser_output_power_low;
- u64 warn_transceiver_temp_high;
- u64 warn_transceiver_temp_low;
- u64 warn_laser_bias_current_high;
- u64 warn_laser_bias_current_low;
- u64 warn_laser_output_power_high;
- u64 warn_laser_output_power_low;
- u64 xpak_regs_stat;
- u32 xpak_timer_count;
-};
-
-
-/* The statistics block of Xena */
-struct stat_block {
-/* Tx MAC statistics counters. */
- __le32 tmac_data_octets;
- __le32 tmac_frms;
- __le64 tmac_drop_frms;
- __le32 tmac_bcst_frms;
- __le32 tmac_mcst_frms;
- __le64 tmac_pause_ctrl_frms;
- __le32 tmac_ucst_frms;
- __le32 tmac_ttl_octets;
- __le32 tmac_any_err_frms;
- __le32 tmac_nucst_frms;
- __le64 tmac_ttl_less_fb_octets;
- __le64 tmac_vld_ip_octets;
- __le32 tmac_drop_ip;
- __le32 tmac_vld_ip;
- __le32 tmac_rst_tcp;
- __le32 tmac_icmp;
- __le64 tmac_tcp;
- __le32 reserved_0;
- __le32 tmac_udp;
-
-/* Rx MAC Statistics counters. */
- __le32 rmac_data_octets;
- __le32 rmac_vld_frms;
- __le64 rmac_fcs_err_frms;
- __le64 rmac_drop_frms;
- __le32 rmac_vld_bcst_frms;
- __le32 rmac_vld_mcst_frms;
- __le32 rmac_out_rng_len_err_frms;
- __le32 rmac_in_rng_len_err_frms;
- __le64 rmac_long_frms;
- __le64 rmac_pause_ctrl_frms;
- __le64 rmac_unsup_ctrl_frms;
- __le32 rmac_accepted_ucst_frms;
- __le32 rmac_ttl_octets;
- __le32 rmac_discarded_frms;
- __le32 rmac_accepted_nucst_frms;
- __le32 reserved_1;
- __le32 rmac_drop_events;
- __le64 rmac_ttl_less_fb_octets;
- __le64 rmac_ttl_frms;
- __le64 reserved_2;
- __le32 rmac_usized_frms;
- __le32 reserved_3;
- __le32 rmac_frag_frms;
- __le32 rmac_osized_frms;
- __le32 reserved_4;
- __le32 rmac_jabber_frms;
- __le64 rmac_ttl_64_frms;
- __le64 rmac_ttl_65_127_frms;
- __le64 reserved_5;
- __le64 rmac_ttl_128_255_frms;
- __le64 rmac_ttl_256_511_frms;
- __le64 reserved_6;
- __le64 rmac_ttl_512_1023_frms;
- __le64 rmac_ttl_1024_1518_frms;
- __le32 rmac_ip;
- __le32 reserved_7;
- __le64 rmac_ip_octets;
- __le32 rmac_drop_ip;
- __le32 rmac_hdr_err_ip;
- __le32 reserved_8;
- __le32 rmac_icmp;
- __le64 rmac_tcp;
- __le32 rmac_err_drp_udp;
- __le32 rmac_udp;
- __le64 rmac_xgmii_err_sym;
- __le64 rmac_frms_q0;
- __le64 rmac_frms_q1;
- __le64 rmac_frms_q2;
- __le64 rmac_frms_q3;
- __le64 rmac_frms_q4;
- __le64 rmac_frms_q5;
- __le64 rmac_frms_q6;
- __le64 rmac_frms_q7;
- __le16 rmac_full_q3;
- __le16 rmac_full_q2;
- __le16 rmac_full_q1;
- __le16 rmac_full_q0;
- __le16 rmac_full_q7;
- __le16 rmac_full_q6;
- __le16 rmac_full_q5;
- __le16 rmac_full_q4;
- __le32 reserved_9;
- __le32 rmac_pause_cnt;
- __le64 rmac_xgmii_data_err_cnt;
- __le64 rmac_xgmii_ctrl_err_cnt;
- __le32 rmac_err_tcp;
- __le32 rmac_accepted_ip;
-
-/* PCI/PCI-X Read transaction statistics. */
- __le32 new_rd_req_cnt;
- __le32 rd_req_cnt;
- __le32 rd_rtry_cnt;
- __le32 new_rd_req_rtry_cnt;
-
-/* PCI/PCI-X Write/Read transaction statistics. */
- __le32 wr_req_cnt;
- __le32 wr_rtry_rd_ack_cnt;
- __le32 new_wr_req_rtry_cnt;
- __le32 new_wr_req_cnt;
- __le32 wr_disc_cnt;
- __le32 wr_rtry_cnt;
-
-/* PCI/PCI-X Write / DMA Transaction statistics. */
- __le32 txp_wr_cnt;
- __le32 rd_rtry_wr_ack_cnt;
- __le32 txd_wr_cnt;
- __le32 txd_rd_cnt;
- __le32 rxd_wr_cnt;
- __le32 rxd_rd_cnt;
- __le32 rxf_wr_cnt;
- __le32 txf_rd_cnt;
-
-/* Tx MAC statistics overflow counters. */
- __le32 tmac_data_octets_oflow;
- __le32 tmac_frms_oflow;
- __le32 tmac_bcst_frms_oflow;
- __le32 tmac_mcst_frms_oflow;
- __le32 tmac_ucst_frms_oflow;
- __le32 tmac_ttl_octets_oflow;
- __le32 tmac_any_err_frms_oflow;
- __le32 tmac_nucst_frms_oflow;
- __le64 tmac_vlan_frms;
- __le32 tmac_drop_ip_oflow;
- __le32 tmac_vld_ip_oflow;
- __le32 tmac_rst_tcp_oflow;
- __le32 tmac_icmp_oflow;
- __le32 tpa_unknown_protocol;
- __le32 tmac_udp_oflow;
- __le32 reserved_10;
- __le32 tpa_parse_failure;
-
-/* Rx MAC Statistics overflow counters. */
- __le32 rmac_data_octets_oflow;
- __le32 rmac_vld_frms_oflow;
- __le32 rmac_vld_bcst_frms_oflow;
- __le32 rmac_vld_mcst_frms_oflow;
- __le32 rmac_accepted_ucst_frms_oflow;
- __le32 rmac_ttl_octets_oflow;
- __le32 rmac_discarded_frms_oflow;
- __le32 rmac_accepted_nucst_frms_oflow;
- __le32 rmac_usized_frms_oflow;
- __le32 rmac_drop_events_oflow;
- __le32 rmac_frag_frms_oflow;
- __le32 rmac_osized_frms_oflow;
- __le32 rmac_ip_oflow;
- __le32 rmac_jabber_frms_oflow;
- __le32 rmac_icmp_oflow;
- __le32 rmac_drop_ip_oflow;
- __le32 rmac_err_drp_udp_oflow;
- __le32 rmac_udp_oflow;
- __le32 reserved_11;
- __le32 rmac_pause_cnt_oflow;
- __le64 rmac_ttl_1519_4095_frms;
- __le64 rmac_ttl_4096_8191_frms;
- __le64 rmac_ttl_8192_max_frms;
- __le64 rmac_ttl_gt_max_frms;
- __le64 rmac_osized_alt_frms;
- __le64 rmac_jabber_alt_frms;
- __le64 rmac_gt_max_alt_frms;
- __le64 rmac_vlan_frms;
- __le32 rmac_len_discard;
- __le32 rmac_fcs_discard;
- __le32 rmac_pf_discard;
- __le32 rmac_da_discard;
- __le32 rmac_red_discard;
- __le32 rmac_rts_discard;
- __le32 reserved_12;
- __le32 rmac_ingm_full_discard;
- __le32 reserved_13;
- __le32 rmac_accepted_ip_oflow;
- __le32 reserved_14;
- __le32 link_fault_cnt;
- u8 buffer[20];
- struct swStat sw_stat;
- struct xpakStat xpak_stat;
-};
-
-/* Default value for 'vlan_strip_tag' configuration parameter */
-#define NO_STRIP_IN_PROMISC 2
-
-/*
- * Structures representing different init time configuration
- * parameters of the NIC.
- */
-
-#define MAX_TX_FIFOS 8
-#define MAX_RX_RINGS 8
-
-#define FIFO_DEFAULT_NUM 5
-#define FIFO_UDP_MAX_NUM 2 /* 0 - even, 1 -odd ports */
-#define FIFO_OTHER_MAX_NUM 1
-
-
-#define MAX_RX_DESC_1 (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 128)
-#define MAX_RX_DESC_2 (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 86)
-#define MAX_TX_DESC (MAX_AVAILABLE_TXDS)
-
-/* FIFO mappings for all possible number of fifos configured */
-static const int fifo_map[][MAX_TX_FIFOS] = {
- {0, 0, 0, 0, 0, 0, 0, 0},
- {0, 0, 0, 0, 1, 1, 1, 1},
- {0, 0, 0, 1, 1, 1, 2, 2},
- {0, 0, 1, 1, 2, 2, 3, 3},
- {0, 0, 1, 1, 2, 2, 3, 4},
- {0, 0, 1, 1, 2, 3, 4, 5},
- {0, 0, 1, 2, 3, 4, 5, 6},
- {0, 1, 2, 3, 4, 5, 6, 7},
-};
-
-static const u16 fifo_selector[MAX_TX_FIFOS] = {0, 1, 3, 3, 7, 7, 7, 7};
-
-/* Maintains Per FIFO related information. */
-struct tx_fifo_config {
-#define MAX_AVAILABLE_TXDS 8192
- u32 fifo_len; /* specifies len of FIFO up to 8192, ie no of TxDLs */
-/* Priority definition */
-#define TX_FIFO_PRI_0 0 /*Highest */
-#define TX_FIFO_PRI_1 1
-#define TX_FIFO_PRI_2 2
-#define TX_FIFO_PRI_3 3
-#define TX_FIFO_PRI_4 4
-#define TX_FIFO_PRI_5 5
-#define TX_FIFO_PRI_6 6
-#define TX_FIFO_PRI_7 7 /*lowest */
- u8 fifo_priority; /* specifies pointer level for FIFO */
- /* user should not set twos fifos with same pri */
- u8 f_no_snoop;
-#define NO_SNOOP_TXD 0x01
-#define NO_SNOOP_TXD_BUFFER 0x02
-};
-
-
-/* Maintains per Ring related information */
-struct rx_ring_config {
- u32 num_rxd; /*No of RxDs per Rx Ring */
-#define RX_RING_PRI_0 0 /* highest */
-#define RX_RING_PRI_1 1
-#define RX_RING_PRI_2 2
-#define RX_RING_PRI_3 3
-#define RX_RING_PRI_4 4
-#define RX_RING_PRI_5 5
-#define RX_RING_PRI_6 6
-#define RX_RING_PRI_7 7 /* lowest */
-
- u8 ring_priority; /*Specifies service priority of ring */
- /* OSM should not set any two rings with same priority */
- u8 ring_org; /*Organization of ring */
-#define RING_ORG_BUFF1 0x01
-#define RX_RING_ORG_BUFF3 0x03
-#define RX_RING_ORG_BUFF5 0x05
-
- u8 f_no_snoop;
-#define NO_SNOOP_RXD 0x01
-#define NO_SNOOP_RXD_BUFFER 0x02
-};
-
-/* This structure provides contains values of the tunable parameters
- * of the H/W
- */
-struct config_param {
-/* Tx Side */
- u32 tx_fifo_num; /*Number of Tx FIFOs */
-
- /* 0-No steering, 1-Priority steering, 2-Default fifo map */
-#define NO_STEERING 0
-#define TX_PRIORITY_STEERING 0x1
-#define TX_DEFAULT_STEERING 0x2
- u8 tx_steering_type;
-
- u8 fifo_mapping[MAX_TX_FIFOS];
- struct tx_fifo_config tx_cfg[MAX_TX_FIFOS]; /*Per-Tx FIFO config */
- u32 max_txds; /*Max no. of Tx buffer descriptor per TxDL */
- u64 tx_intr_type;
-#define INTA 0
-#define MSI_X 2
- u8 intr_type;
- u8 napi;
-
- /* Specifies if Tx Intr is UTILZ or PER_LIST type. */
-
-/* Rx Side */
- u32 rx_ring_num; /*Number of receive rings */
-#define MAX_RX_BLOCKS_PER_RING 150
-
- struct rx_ring_config rx_cfg[MAX_RX_RINGS]; /*Per-Rx Ring config */
-
-#define HEADER_ETHERNET_II_802_3_SIZE 14
-#define HEADER_802_2_SIZE 3
-#define HEADER_SNAP_SIZE 5
-#define HEADER_VLAN_SIZE 4
-
-#define MIN_MTU 46
-#define MAX_PYLD 1500
-#define MAX_MTU (MAX_PYLD+18)
-#define MAX_MTU_VLAN (MAX_PYLD+22)
-#define MAX_PYLD_JUMBO 9600
-#define MAX_MTU_JUMBO (MAX_PYLD_JUMBO+18)
-#define MAX_MTU_JUMBO_VLAN (MAX_PYLD_JUMBO+22)
- u16 bus_speed;
- int max_mc_addr; /* xena=64 herc=256 */
- int max_mac_addr; /* xena=16 herc=64 */
- int mc_start_offset; /* xena=16 herc=64 */
- u8 multiq;
-};
-
-/* Structure representing MAC Addrs */
-struct mac_addr {
- u8 mac_addr[ETH_ALEN];
-};
-
-/* Structure that represent every FIFO element in the BAR1
- * Address location.
- */
-struct TxFIFO_element {
- u64 TxDL_Pointer;
-
- u64 List_Control;
-#define TX_FIFO_LAST_TXD_NUM( val) vBIT(val,0,8)
-#define TX_FIFO_FIRST_LIST s2BIT(14)
-#define TX_FIFO_LAST_LIST s2BIT(15)
-#define TX_FIFO_FIRSTNLAST_LIST vBIT(3,14,2)
-#define TX_FIFO_SPECIAL_FUNC s2BIT(23)
-#define TX_FIFO_DS_NO_SNOOP s2BIT(31)
-#define TX_FIFO_BUFF_NO_SNOOP s2BIT(30)
-};
-
-/* Tx descriptor structure */
-struct TxD {
- u64 Control_1;
-/* bit mask */
-#define TXD_LIST_OWN_XENA s2BIT(7)
-#define TXD_T_CODE (s2BIT(12)|s2BIT(13)|s2BIT(14)|s2BIT(15))
-#define TXD_T_CODE_OK(val) (|(val & TXD_T_CODE))
-#define GET_TXD_T_CODE(val) ((val & TXD_T_CODE)<<12)
-#define TXD_GATHER_CODE (s2BIT(22) | s2BIT(23))
-#define TXD_GATHER_CODE_FIRST s2BIT(22)
-#define TXD_GATHER_CODE_LAST s2BIT(23)
-#define TXD_TCP_LSO_EN s2BIT(30)
-#define TXD_UDP_COF_EN s2BIT(31)
-#define TXD_UFO_EN s2BIT(31) | s2BIT(30)
-#define TXD_TCP_LSO_MSS(val) vBIT(val,34,14)
-#define TXD_UFO_MSS(val) vBIT(val,34,14)
-#define TXD_BUFFER0_SIZE(val) vBIT(val,48,16)
-
- u64 Control_2;
-#define TXD_TX_CKO_CONTROL (s2BIT(5)|s2BIT(6)|s2BIT(7))
-#define TXD_TX_CKO_IPV4_EN s2BIT(5)
-#define TXD_TX_CKO_TCP_EN s2BIT(6)
-#define TXD_TX_CKO_UDP_EN s2BIT(7)
-#define TXD_VLAN_ENABLE s2BIT(15)
-#define TXD_VLAN_TAG(val) vBIT(val,16,16)
-#define TXD_INT_NUMBER(val) vBIT(val,34,6)
-#define TXD_INT_TYPE_PER_LIST s2BIT(47)
-#define TXD_INT_TYPE_UTILZ s2BIT(46)
-#define TXD_SET_MARKER vBIT(0x6,0,4)
-
- u64 Buffer_Pointer;
- u64 Host_Control; /* reserved for host */
-};
-
-/* Structure to hold the phy and virt addr of every TxDL. */
-struct list_info_hold {
- dma_addr_t list_phy_addr;
- void *list_virt_addr;
-};
-
-/* Rx descriptor structure for 1 buffer mode */
-struct RxD_t {
- u64 Host_Control; /* reserved for host */
- u64 Control_1;
-#define RXD_OWN_XENA s2BIT(7)
-#define RXD_T_CODE (s2BIT(12)|s2BIT(13)|s2BIT(14)|s2BIT(15))
-#define RXD_FRAME_PROTO vBIT(0xFFFF,24,8)
-#define RXD_FRAME_VLAN_TAG s2BIT(24)
-#define RXD_FRAME_PROTO_IPV4 s2BIT(27)
-#define RXD_FRAME_PROTO_IPV6 s2BIT(28)
-#define RXD_FRAME_IP_FRAG s2BIT(29)
-#define RXD_FRAME_PROTO_TCP s2BIT(30)
-#define RXD_FRAME_PROTO_UDP s2BIT(31)
-#define TCP_OR_UDP_FRAME (RXD_FRAME_PROTO_TCP | RXD_FRAME_PROTO_UDP)
-#define RXD_GET_L3_CKSUM(val) ((u16)(val>> 16) & 0xFFFF)
-#define RXD_GET_L4_CKSUM(val) ((u16)(val) & 0xFFFF)
-
- u64 Control_2;
-#define THE_RXD_MARK 0x3
-#define SET_RXD_MARKER vBIT(THE_RXD_MARK, 0, 2)
-#define GET_RXD_MARKER(ctrl) ((ctrl & SET_RXD_MARKER) >> 62)
-
-#define MASK_VLAN_TAG vBIT(0xFFFF,48,16)
-#define SET_VLAN_TAG(val) vBIT(val,48,16)
-#define SET_NUM_TAG(val) vBIT(val,16,32)
-
-
-};
-/* Rx descriptor structure for 1 buffer mode */
-struct RxD1 {
- struct RxD_t h;
-
-#define MASK_BUFFER0_SIZE_1 vBIT(0x3FFF,2,14)
-#define SET_BUFFER0_SIZE_1(val) vBIT(val,2,14)
-#define RXD_GET_BUFFER0_SIZE_1(_Control_2) \
- (u16)((_Control_2 & MASK_BUFFER0_SIZE_1) >> 48)
- u64 Buffer0_ptr;
-};
-/* Rx descriptor structure for 3 or 2 buffer mode */
-
-struct RxD3 {
- struct RxD_t h;
-
-#define MASK_BUFFER0_SIZE_3 vBIT(0xFF,2,14)
-#define MASK_BUFFER1_SIZE_3 vBIT(0xFFFF,16,16)
-#define MASK_BUFFER2_SIZE_3 vBIT(0xFFFF,32,16)
-#define SET_BUFFER0_SIZE_3(val) vBIT(val,8,8)
-#define SET_BUFFER1_SIZE_3(val) vBIT(val,16,16)
-#define SET_BUFFER2_SIZE_3(val) vBIT(val,32,16)
-#define RXD_GET_BUFFER0_SIZE_3(Control_2) \
- (u8)((Control_2 & MASK_BUFFER0_SIZE_3) >> 48)
-#define RXD_GET_BUFFER1_SIZE_3(Control_2) \
- (u16)((Control_2 & MASK_BUFFER1_SIZE_3) >> 32)
-#define RXD_GET_BUFFER2_SIZE_3(Control_2) \
- (u16)((Control_2 & MASK_BUFFER2_SIZE_3) >> 16)
-#define BUF0_LEN 40
-#define BUF1_LEN 1
-
- u64 Buffer0_ptr;
- u64 Buffer1_ptr;
- u64 Buffer2_ptr;
-};
-
-
-/* Structure that represents the Rx descriptor block which contains
- * 128 Rx descriptors.
- */
-struct RxD_block {
-#define MAX_RXDS_PER_BLOCK_1 127
- struct RxD1 rxd[MAX_RXDS_PER_BLOCK_1];
-
- u64 reserved_0;
-#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL
- u64 reserved_1; /* 0xFEFFFFFFFFFFFFFF to mark last
- * Rxd in this blk */
- u64 reserved_2_pNext_RxD_block; /* Logical ptr to next */
- u64 pNext_RxD_Blk_physical; /* Buff0_ptr.In a 32 bit arch
- * the upper 32 bits should
- * be 0 */
-};
-
-#define SIZE_OF_BLOCK 4096
-
-#define RXD_MODE_1 0 /* One Buffer mode */
-#define RXD_MODE_3B 1 /* Two Buffer mode */
-
-/* Structure to hold virtual addresses of Buf0 and Buf1 in
- * 2buf mode. */
-struct buffAdd {
- void *ba_0_org;
- void *ba_1_org;
- void *ba_0;
- void *ba_1;
-};
-
-/* Structure which stores all the MAC control parameters */
-
-/* This structure stores the offset of the RxD in the ring
- * from which the Rx Interrupt processor can start picking
- * up the RxDs for processing.
- */
-struct rx_curr_get_info {
- u32 block_index;
- u32 offset;
- u32 ring_len;
-};
-
-struct rx_curr_put_info {
- u32 block_index;
- u32 offset;
- u32 ring_len;
-};
-
-/* This structure stores the offset of the TxDl in the FIFO
- * from which the Tx Interrupt processor can start picking
- * up the TxDLs for send complete interrupt processing.
- */
-struct tx_curr_get_info {
- u32 offset;
- u32 fifo_len;
-};
-
-struct tx_curr_put_info {
- u32 offset;
- u32 fifo_len;
-};
-
-struct rxd_info {
- void *virt_addr;
- dma_addr_t dma_addr;
-};
-
-/* Structure that holds the Phy and virt addresses of the Blocks */
-struct rx_block_info {
- void *block_virt_addr;
- dma_addr_t block_dma_addr;
- struct rxd_info *rxds;
-};
-
-/* Data structure to represent a LRO session */
-struct lro {
- struct sk_buff *parent;
- struct sk_buff *last_frag;
- u8 *l2h;
- struct iphdr *iph;
- struct tcphdr *tcph;
- u32 tcp_next_seq;
- __be32 tcp_ack;
- int total_len;
- int frags_len;
- int sg_num;
- int in_use;
- __be16 window;
- u16 vlan_tag;
- u32 cur_tsval;
- __be32 cur_tsecr;
- u8 saw_ts;
-} ____cacheline_aligned;
-
-/* Ring specific structure */
-struct ring_info {
- /* The ring number */
- int ring_no;
-
- /* per-ring buffer counter */
- u32 rx_bufs_left;
-
-#define MAX_LRO_SESSIONS 32
- struct lro lro0_n[MAX_LRO_SESSIONS];
- u8 lro;
-
- /* copy of sp->rxd_mode flag */
- int rxd_mode;
-
- /* Number of rxds per block for the rxd_mode */
- int rxd_count;
-
- /* copy of sp pointer */
- struct s2io_nic *nic;
-
- /* copy of sp->dev pointer */
- struct net_device *dev;
-
- /* copy of sp->pdev pointer */
- struct pci_dev *pdev;
-
- /* Per ring napi struct */
- struct napi_struct napi;
-
- unsigned long interrupt_count;
-
- /*
- * Place holders for the virtual and physical addresses of
- * all the Rx Blocks
- */
- struct rx_block_info rx_blocks[MAX_RX_BLOCKS_PER_RING];
- int block_count;
- int pkt_cnt;
-
- /*
- * Put pointer info which indictes which RxD has to be replenished
- * with a new buffer.
- */
- struct rx_curr_put_info rx_curr_put_info;
-
- /*
- * Get pointer info which indictes which is the last RxD that was
- * processed by the driver.
- */
- struct rx_curr_get_info rx_curr_get_info;
-
- /* interface MTU value */
- unsigned mtu;
-
- /* Buffer Address store. */
- struct buffAdd **ba;
-} ____cacheline_aligned;
-
-/* Fifo specific structure */
-struct fifo_info {
- /* FIFO number */
- int fifo_no;
-
- /* Maximum TxDs per TxDL */
- int max_txds;
-
- /* Place holder of all the TX List's Phy and Virt addresses. */
- struct list_info_hold *list_info;
-
- /*
- * Current offset within the tx FIFO where driver would write
- * new Tx frame
- */
- struct tx_curr_put_info tx_curr_put_info;
-
- /*
- * Current offset within tx FIFO from where the driver would start freeing
- * the buffers
- */
- struct tx_curr_get_info tx_curr_get_info;
-#define FIFO_QUEUE_START 0
-#define FIFO_QUEUE_STOP 1
- int queue_state;
-
- /* copy of sp->dev pointer */
- struct net_device *dev;
-
- /* copy of multiq status */
- u8 multiq;
-
- /* Per fifo lock */
- spinlock_t tx_lock;
-
- /* Per fifo UFO in band structure */
- u64 *ufo_in_band_v;
-
- struct s2io_nic *nic;
-} ____cacheline_aligned;
-
-/* Information related to the Tx and Rx FIFOs and Rings of Xena
- * is maintained in this structure.
- */
-struct mac_info {
-/* tx side stuff */
- /* logical pointer of start of each Tx FIFO */
- struct TxFIFO_element __iomem *tx_FIFO_start[MAX_TX_FIFOS];
-
- /* Fifo specific structure */
- struct fifo_info fifos[MAX_TX_FIFOS];
-
- /* Save virtual address of TxD page with zero DMA addr(if any) */
- void *zerodma_virt_addr;
-
-/* rx side stuff */
- /* Ring specific structure */
- struct ring_info rings[MAX_RX_RINGS];
-
- u16 rmac_pause_time;
- u16 mc_pause_threshold_q0q3;
- u16 mc_pause_threshold_q4q7;
-
- void *stats_mem; /* orignal pointer to allocated mem */
- dma_addr_t stats_mem_phy; /* Physical address of the stat block */
- u32 stats_mem_sz;
- struct stat_block *stats_info; /* Logical address of the stat block */
-};
-
-/* Default Tunable parameters of the NIC. */
-#define DEFAULT_FIFO_0_LEN 4096
-#define DEFAULT_FIFO_1_7_LEN 512
-#define SMALL_BLK_CNT 30
-#define LARGE_BLK_CNT 100
-
-/*
- * Structure to keep track of the MSI-X vectors and the corresponding
- * argument registered against each vector
- */
-#define MAX_REQUESTED_MSI_X 9
-struct s2io_msix_entry
-{
- u16 vector;
- u16 entry;
- void *arg;
-
- u8 type;
-#define MSIX_ALARM_TYPE 1
-#define MSIX_RING_TYPE 2
-
- u8 in_use;
-#define MSIX_REGISTERED_SUCCESS 0xAA
-};
-
-struct msix_info_st {
- u64 addr;
- u64 data;
-};
-
-/* These flags represent the devices temporary state */
-enum s2io_device_state_t
-{
- __S2IO_STATE_LINK_TASK=0,
- __S2IO_STATE_CARD_UP
-};
-
-/* Structure representing one instance of the NIC */
-struct s2io_nic {
- int rxd_mode;
- /*
- * Count of packets to be processed in a given iteration, it will be indicated
- * by the quota field of the device structure when NAPI is enabled.
- */
- int pkts_to_process;
- struct net_device *dev;
- struct mac_info mac_control;
- struct config_param config;
- struct pci_dev *pdev;
- void __iomem *bar0;
- void __iomem *bar1;
-#define MAX_MAC_SUPPORTED 16
-#define MAX_SUPPORTED_MULTICASTS MAX_MAC_SUPPORTED
-
- struct mac_addr def_mac_addr[256];
-
- struct net_device_stats stats;
- int device_enabled_once;
-
- char name[60];
-
- /* Timer that handles I/O errors/exceptions */
- struct timer_list alarm_timer;
-
- /* Space to back up the PCI config space */
- u32 config_space[256 / sizeof(u32)];
-
-#define PROMISC 1
-#define ALL_MULTI 2
-
-#define MAX_ADDRS_SUPPORTED 64
- u16 mc_addr_count;
-
- u16 m_cast_flg;
- u16 all_multi_pos;
- u16 promisc_flg;
-
- /* Restart timer, used to restart NIC if the device is stuck and
- * a schedule task that will set the correct Link state once the
- * NIC's PHY has stabilized after a state change.
- */
- struct work_struct rst_timer_task;
- struct work_struct set_link_task;
-
- /* Flag that can be used to turn on or turn off the Rx checksum
- * offload feature.
- */
- int rx_csum;
-
- /* Below variables are used for fifo selection to transmit a packet */
- u16 fifo_selector[MAX_TX_FIFOS];
-
- /* Total fifos for tcp packets */
- u8 total_tcp_fifos;
-
- /*
- * Beginning index of udp for udp packets
- * Value will be equal to
- * (tx_fifo_num - FIFO_UDP_MAX_NUM - FIFO_OTHER_MAX_NUM)
- */
- u8 udp_fifo_idx;
-
- u8 total_udp_fifos;
-
- /*
- * Beginning index of fifo for all other packets
- * Value will be equal to (tx_fifo_num - FIFO_OTHER_MAX_NUM)
- */
- u8 other_fifo_idx;
-
- struct napi_struct napi;
- /* after blink, the adapter must be restored with original
- * values.
- */
- u64 adapt_ctrl_org;
-
- /* Last known link state. */
- u16 last_link_state;
-#define LINK_DOWN 1
-#define LINK_UP 2
-
- int task_flag;
- unsigned long long start_time;
- int vlan_strip_flag;
-#define MSIX_FLG 0xA5
- int num_entries;
- struct msix_entry *entries;
- int msi_detected;
- wait_queue_head_t msi_wait;
- struct s2io_msix_entry *s2io_entries;
- char desc[MAX_REQUESTED_MSI_X][25];
-
- int avail_msix_vectors; /* No. of MSI-X vectors granted by system */
-
- struct msix_info_st msix_info[0x3f];
-
-#define XFRAME_I_DEVICE 1
-#define XFRAME_II_DEVICE 2
- u8 device_type;
-
- unsigned long clubbed_frms_cnt;
- unsigned long sending_both;
- u16 lro_max_aggr_per_sess;
- volatile unsigned long state;
- u64 general_int_mask;
-
-#define VPD_STRING_LEN 80
- u8 product_name[VPD_STRING_LEN];
- u8 serial_num[VPD_STRING_LEN];
-};
-
-#define RESET_ERROR 1
-#define CMD_ERROR 2
-
-/*
- * Some registers have to be written in a particular order to
- * expect correct hardware operation. The macro SPECIAL_REG_WRITE
- * is used to perform such ordered writes. Defines UF (Upper First)
- * and LF (Lower First) will be used to specify the required write order.
- */
-#define UF 1
-#define LF 2
-static inline void SPECIAL_REG_WRITE(u64 val, void __iomem *addr, int order)
-{
- if (order == LF) {
- writel((u32) (val), addr);
- (void) readl(addr);
- writel((u32) (val >> 32), (addr + 4));
- (void) readl(addr + 4);
- } else {
- writel((u32) (val >> 32), (addr + 4));
- (void) readl(addr + 4);
- writel((u32) (val), addr);
- (void) readl(addr);
- }
-}
-
-/* Interrupt related values of Xena */
-
-#define ENABLE_INTRS 1
-#define DISABLE_INTRS 2
-
-/* Highest level interrupt blocks */
-#define TX_PIC_INTR (0x0001<<0)
-#define TX_DMA_INTR (0x0001<<1)
-#define TX_MAC_INTR (0x0001<<2)
-#define TX_XGXS_INTR (0x0001<<3)
-#define TX_TRAFFIC_INTR (0x0001<<4)
-#define RX_PIC_INTR (0x0001<<5)
-#define RX_DMA_INTR (0x0001<<6)
-#define RX_MAC_INTR (0x0001<<7)
-#define RX_XGXS_INTR (0x0001<<8)
-#define RX_TRAFFIC_INTR (0x0001<<9)
-#define MC_INTR (0x0001<<10)
-#define ENA_ALL_INTRS ( TX_PIC_INTR | \
- TX_DMA_INTR | \
- TX_MAC_INTR | \
- TX_XGXS_INTR | \
- TX_TRAFFIC_INTR | \
- RX_PIC_INTR | \
- RX_DMA_INTR | \
- RX_MAC_INTR | \
- RX_XGXS_INTR | \
- RX_TRAFFIC_INTR | \
- MC_INTR )
-
-/* Interrupt masks for the general interrupt mask register */
-#define DISABLE_ALL_INTRS 0xFFFFFFFFFFFFFFFFULL
-
-#define TXPIC_INT_M s2BIT(0)
-#define TXDMA_INT_M s2BIT(1)
-#define TXMAC_INT_M s2BIT(2)
-#define TXXGXS_INT_M s2BIT(3)
-#define TXTRAFFIC_INT_M s2BIT(8)
-#define PIC_RX_INT_M s2BIT(32)
-#define RXDMA_INT_M s2BIT(33)
-#define RXMAC_INT_M s2BIT(34)
-#define MC_INT_M s2BIT(35)
-#define RXXGXS_INT_M s2BIT(36)
-#define RXTRAFFIC_INT_M s2BIT(40)
-
-/* PIC level Interrupts TODO*/
-
-/* DMA level Inressupts */
-#define TXDMA_PFC_INT_M s2BIT(0)
-#define TXDMA_PCC_INT_M s2BIT(2)
-
-/* PFC block interrupts */
-#define PFC_MISC_ERR_1 s2BIT(0) /* Interrupt to indicate FIFO full */
-
-/* PCC block interrupts. */
-#define PCC_FB_ECC_ERR vBIT(0xff, 16, 8) /* Interrupt to indicate
- PCC_FB_ECC Error. */
-
-#define RXD_GET_VLAN_TAG(Control_2) (u16)(Control_2 & MASK_VLAN_TAG)
-/*
- * Prototype declaration.
- */
-static int s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre);
-static void s2io_rem_nic(struct pci_dev *pdev);
-static int init_shared_mem(struct s2io_nic *sp);
-static void free_shared_mem(struct s2io_nic *sp);
-static int init_nic(struct s2io_nic *nic);
-static int rx_intr_handler(struct ring_info *ring_data, int budget);
-static void s2io_txpic_intr_handle(struct s2io_nic *sp);
-static void tx_intr_handler(struct fifo_info *fifo_data);
-static void s2io_handle_errors(void * dev_id);
-
-static void s2io_tx_watchdog(struct net_device *dev, unsigned int txqueue);
-static void s2io_set_multicast(struct net_device *dev, bool may_sleep);
-static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp);
-static void s2io_link(struct s2io_nic * sp, int link);
-static void s2io_reset(struct s2io_nic * sp);
-static int s2io_poll_msix(struct napi_struct *napi, int budget);
-static int s2io_poll_inta(struct napi_struct *napi, int budget);
-static void s2io_init_pci(struct s2io_nic * sp);
-static int do_s2io_prog_unicast(struct net_device *dev, const u8 *addr);
-static void s2io_alarm_handle(struct timer_list *t);
-static irqreturn_t
-s2io_msix_ring_handle(int irq, void *dev_id);
-static irqreturn_t
-s2io_msix_fifo_handle(int irq, void *dev_id);
-static irqreturn_t s2io_isr(int irq, void *dev_id);
-static int verify_xena_quiescence(struct s2io_nic *sp);
-static const struct ethtool_ops netdev_ethtool_ops;
-static void s2io_set_link(struct work_struct *work);
-static int s2io_set_swapper(struct s2io_nic * sp);
-static void s2io_card_down(struct s2io_nic *nic);
-static int s2io_card_up(struct s2io_nic *nic);
-static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
- int bit_state, bool may_sleep);
-static int s2io_add_isr(struct s2io_nic * sp);
-static void s2io_rem_isr(struct s2io_nic * sp);
-
-static void restore_xmsi_data(struct s2io_nic *nic);
-static void do_s2io_store_unicast_mc(struct s2io_nic *sp);
-static void do_s2io_restore_unicast_mc(struct s2io_nic *sp);
-static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset);
-static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr);
-static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int offset);
-static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr);
-
-static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
- u8 **tcp, u32 *tcp_len, struct lro **lro, struct RxD_t *rxdp,
- struct s2io_nic *sp);
-static void clear_lro_session(struct lro *lro);
-static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag);
-static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro);
-static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
- struct sk_buff *skb, u32 tcp_len);
-static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring);
-
-static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state);
-static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev);
-static void s2io_io_resume(struct pci_dev *pdev);
-
-#define s2io_tcp_mss(skb) skb_shinfo(skb)->gso_size
-#define s2io_udp_mss(skb) skb_shinfo(skb)->gso_size
-#define s2io_offload_type(skb) skb_shinfo(skb)->gso_type
-
-#define S2IO_PARM_INT(X, def_val) \
- static unsigned int X = def_val;\
- module_param(X , uint, 0);
-
-#endif /* _S2IO_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 16c828dd5c1a..e88b1c4732a5 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -1435,15 +1435,19 @@ static int nfp_net_get_fs_loc(struct nfp_net *nn, u32 *rule_locs)
return 0;
}
+static u32 nfp_net_get_rx_ring_count(struct net_device *netdev)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ return nn->dp.num_rx_rings;
+}
+
static int nfp_net_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *cmd, u32 *rule_locs)
{
struct nfp_net *nn = netdev_priv(netdev);
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = nn->dp.num_rx_rings;
- return 0;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = nn->fs.count;
return 0;
@@ -2501,6 +2505,7 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
.get_sset_count = nfp_net_get_sset_count,
.get_rxnfc = nfp_net_get_rxnfc,
.set_rxnfc = nfp_net_set_rxnfc,
+ .get_rx_ring_count = nfp_net_get_rx_ring_count,
.get_rxfh_indir_size = nfp_net_get_rxfh_indir_size,
.get_rxfh_key_size = nfp_net_get_rxfh_key_size,
.get_rxfh = nfp_net_get_rxfh,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 2d9efadb5d2a..1514c1019f28 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -263,9 +263,10 @@ static int ionic_get_link_ksettings(struct net_device *netdev,
/* This means there's no module plugged in */
break;
default:
- dev_info(lif->ionic->dev, "unknown xcvr type pid=%d / 0x%x\n",
- idev->port_info->status.xcvr.pid,
- idev->port_info->status.xcvr.pid);
+ dev_dbg_ratelimited(lif->ionic->dev,
+ "unknown xcvr type pid=%d / 0x%x\n",
+ idev->port_info->status.xcvr.pid,
+ idev->port_info->status.xcvr.pid);
break;
}
@@ -843,23 +844,11 @@ static int ionic_set_channels(struct net_device *netdev,
return err;
}
-static int ionic_get_rxnfc(struct net_device *netdev,
- struct ethtool_rxnfc *info, u32 *rules)
+static u32 ionic_get_rx_ring_count(struct net_device *netdev)
{
struct ionic_lif *lif = netdev_priv(netdev);
- int err = 0;
-
- switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = lif->nxqs;
- break;
- default:
- netdev_dbg(netdev, "Command parameter %d is not supported\n",
- info->cmd);
- err = -EOPNOTSUPP;
- }
- return err;
+ return lif->nxqs;
}
static u32 ionic_get_rxfh_indir_size(struct net_device *netdev)
@@ -1152,7 +1141,7 @@ static const struct ethtool_ops ionic_ethtool_ops = {
.get_strings = ionic_get_strings,
.get_ethtool_stats = ionic_get_stats,
.get_sset_count = ionic_get_sset_count,
- .get_rxnfc = ionic_get_rxnfc,
+ .get_rx_ring_count = ionic_get_rx_ring_count,
.get_rxfh_indir_size = ionic_get_rxfh_indir_size,
.get_rxfh_key_size = ionic_get_rxfh_key_size,
.get_rxfh = ionic_get_rxfh,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 23982704273c..647f30a16a94 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -1199,6 +1199,13 @@ static int qede_get_rxfh_fields(struct net_device *dev,
return 0;
}
+static u32 qede_get_rx_ring_count(struct net_device *dev)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ return QEDE_RSS_COUNT(edev);
+}
+
static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
u32 *rule_locs)
{
@@ -1206,9 +1213,6 @@ static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
int rc = 0;
switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = QEDE_RSS_COUNT(edev);
- break;
case ETHTOOL_GRXCLSRLCNT:
info->rule_cnt = qede_get_arfs_filter_count(edev);
info->data = QEDE_RFS_MAX_FLTR;
@@ -2289,6 +2293,7 @@ static const struct ethtool_ops qede_ethtool_ops = {
.get_sset_count = qede_get_sset_count,
.get_rxnfc = qede_get_rxnfc,
.set_rxnfc = qede_set_rxnfc,
+ .get_rx_ring_count = qede_get_rx_ring_count,
.get_rxfh_indir_size = qede_get_rxfh_indir_size,
.get_rxfh_key_size = qede_get_rxfh_key_size,
.get_rxfh = qede_get_rxfh,
@@ -2333,6 +2338,7 @@ static const struct ethtool_ops qede_vf_ethtool_ops = {
.get_sset_count = qede_get_sset_count,
.get_rxnfc = qede_get_rxnfc,
.set_rxnfc = qede_set_rxnfc,
+ .get_rx_ring_count = qede_get_rx_ring_count,
.get_rxfh_indir_size = qede_get_rxfh_indir_size,
.get_rxfh_key_size = qede_get_rxfh_key_size,
.get_rxfh = qede_get_rxfh,
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index a73dcaffa8c5..a8532ebd42ec 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -92,8 +92,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DRV_NAME "8139too"
-#define DRV_VERSION "0.9.28"
-
#include <linux/module.h>
#include <linux/kernel.h>
@@ -115,8 +113,6 @@
#include <linux/if_vlan.h>
#include <asm/irq.h>
-#define RTL8139_DRIVER_NAME DRV_NAME " Fast Ethernet driver " DRV_VERSION
-
/* Default Message level */
#define RTL8139_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
NETIF_MSG_PROBE | \
@@ -623,7 +619,6 @@ struct rtl8139_private {
MODULE_AUTHOR ("Jeff Garzik <jgarzik@pobox.com>");
MODULE_DESCRIPTION ("RealTek RTL-8139 Fast Ethernet driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
module_param(use_io, bool, 0);
MODULE_PARM_DESC(use_io, "Force use of I/O access mode. 0=MMIO 1=PIO");
@@ -955,17 +950,6 @@ static int rtl8139_init_one(struct pci_dev *pdev,
board_idx++;
- /* when we're built into the kernel, the driver version message
- * is only printed if at least one 8139 board has been found
- */
-#ifndef MODULE
- {
- static int printed_version;
- if (!printed_version++)
- pr_info(RTL8139_DRIVER_NAME "\n");
- }
-#endif
-
if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision >= 0x20) {
dev_info(&pdev->dev,
@@ -2383,7 +2367,6 @@ static void rtl8139_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *
{
struct rtl8139_private *tp = netdev_priv(dev);
strscpy(info->driver, DRV_NAME, sizeof(info->driver));
- strscpy(info->version, DRV_VERSION, sizeof(info->version));
strscpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
}
@@ -2656,25 +2639,4 @@ static struct pci_driver rtl8139_pci_driver = {
.driver.pm = &rtl8139_pm_ops,
};
-
-static int __init rtl8139_init_module (void)
-{
- /* when we're a module, we always print a version message,
- * even if no 8139 board is found.
- */
-#ifdef MODULE
- pr_info(RTL8139_DRIVER_NAME "\n");
-#endif
-
- return pci_register_driver(&rtl8139_pci_driver);
-}
-
-
-static void __exit rtl8139_cleanup_module (void)
-{
- pci_unregister_driver (&rtl8139_pci_driver);
-}
-
-
-module_init(rtl8139_init_module);
-module_exit(rtl8139_cleanup_module);
+module_pci_driver(rtl8139_pci_driver);
diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig
index 272c83bfdc6c..9b0f4f9631db 100644
--- a/drivers/net/ethernet/realtek/Kconfig
+++ b/drivers/net/ethernet/realtek/Kconfig
@@ -6,7 +6,7 @@
config NET_VENDOR_REALTEK
bool "Realtek devices"
default y
- depends on PCI || (PARPORT && X86)
+ depends on PCI
help
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -17,20 +17,6 @@ config NET_VENDOR_REALTEK
if NET_VENDOR_REALTEK
-config ATP
- tristate "AT-LAN-TEC/RealTek pocket adapter support"
- depends on PARPORT && X86
- select CRC32
- help
- This is a network (Ethernet) device which attaches to your parallel
- port. Read the file <file:drivers/net/ethernet/realtek/atp.c>
- if you want to use this. If you intend to use this driver, you
- should have said N to the "Parallel printer support", because the two
- drivers don't like each other.
-
- To compile this driver as a module, choose M here: the module
- will be called atp.
-
config 8139CP
tristate "RealTek RTL-8139 C+ PCI Fast Ethernet Adapter support"
depends on PCI
diff --git a/drivers/net/ethernet/realtek/Makefile b/drivers/net/ethernet/realtek/Makefile
index 046adf503ff4..12a9c399f40c 100644
--- a/drivers/net/ethernet/realtek/Makefile
+++ b/drivers/net/ethernet/realtek/Makefile
@@ -5,7 +5,6 @@
obj-$(CONFIG_8139CP) += 8139cp.o
obj-$(CONFIG_8139TOO) += 8139too.o
-obj-$(CONFIG_ATP) += atp.o
r8169-y += r8169_main.o r8169_firmware.o r8169_phy_config.o
r8169-$(CONFIG_R8169_LEDS) += r8169_leds.o
obj-$(CONFIG_R8169) += r8169.o
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
deleted file mode 100644
index 0d65434982a2..000000000000
--- a/drivers/net/ethernet/realtek/atp.c
+++ /dev/null
@@ -1,886 +0,0 @@
-/* atp.c: Attached (pocket) ethernet adapter driver for linux. */
-/*
- This is a driver for commonly OEM pocket (parallel port)
- ethernet adapters based on the Realtek RTL8002 and RTL8012 chips.
-
- Written 1993-2000 by Donald Becker.
-
- This software may be used and distributed according to the terms of
- the GNU General Public License (GPL), incorporated herein by reference.
- Drivers based on or derived from this code fall under the GPL and must
- retain the authorship, copyright and license notice. This file is not
- a complete program and may only be used when the entire operating
- system is licensed under the GPL.
-
- Copyright 1993 United States Government as represented by the Director,
- National Security Agency. Copyright 1994-2000 retained by the original
- author, Donald Becker. The timer-based reset code was supplied in 1995
- by Bill Carlson, wwc@super.org.
-
- The author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation
- 410 Severn Ave., Suite 210
- Annapolis MD 21403
-
- Support information and updates available at
- http://www.scyld.com/network/atp.html
-
-
- Modular support/softnet added by Alan Cox.
- _bit abuse fixed up by Alan Cox
-
-*/
-
-static const char version[] =
-"atp.c:v1.09=ac 2002/10/01 Donald Becker <becker@scyld.com>\n";
-
-/* The user-configurable values.
- These may be modified when a driver module is loaded.*/
-
-static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
-#define net_debug debug
-
-/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
-static int max_interrupt_work = 15;
-
-#define NUM_UNITS 2
-/* The standard set of ISA module parameters. */
-static int io[NUM_UNITS];
-static int irq[NUM_UNITS];
-static int xcvr[NUM_UNITS]; /* The data transfer mode. */
-
-/* Operational parameters that are set at compile time. */
-
-/* Time in jiffies before concluding the transmitter is hung. */
-#define TX_TIMEOUT (400*HZ/1000)
-
-/*
- This file is a device driver for the RealTek (aka AT-Lan-Tec) pocket
- ethernet adapter. This is a common low-cost OEM pocket ethernet
- adapter, sold under many names.
-
- Sources:
- This driver was written from the packet driver assembly code provided by
- Vincent Bono of AT-Lan-Tec. Ever try to figure out how a complicated
- device works just from the assembly code? It ain't pretty. The following
- description is written based on guesses and writing lots of special-purpose
- code to test my theorized operation.
-
- In 1997 Realtek made available the documentation for the second generation
- RTL8012 chip, which has lead to several driver improvements.
- http://www.realtek.com.tw/
-
- Theory of Operation
-
- The RTL8002 adapter seems to be built around a custom spin of the SEEQ
- controller core. It probably has a 16K or 64K internal packet buffer, of
- which the first 4K is devoted to transmit and the rest to receive.
- The controller maintains the queue of received packet and the packet buffer
- access pointer internally, with only 'reset to beginning' and 'skip to next
- packet' commands visible. The transmit packet queue holds two (or more?)
- packets: both 'retransmit this packet' (due to collision) and 'transmit next
- packet' commands must be started by hand.
-
- The station address is stored in a standard bit-serial EEPROM which must be
- read (ughh) by the device driver. (Provisions have been made for
- substituting a 74S288 PROM, but I haven't gotten reports of any models
- using it.) Unlike built-in devices, a pocket adapter can temporarily lose
- power without indication to the device driver. The major effect is that
- the station address, receive filter (promiscuous, etc.) and transceiver
- must be reset.
-
- The controller itself has 16 registers, some of which use only the lower
- bits. The registers are read and written 4 bits at a time. The four bit
- register address is presented on the data lines along with a few additional
- timing and control bits. The data is then read from status port or written
- to the data port.
-
- Correction: the controller has two banks of 16 registers. The second
- bank contains only the multicast filter table (now used) and the EEPROM
- access registers.
-
- Since the bulk data transfer of the actual packets through the slow
- parallel port dominates the driver's running time, four distinct data
- (non-register) transfer modes are provided by the adapter, two in each
- direction. In the first mode timing for the nibble transfers is
- provided through the data port. In the second mode the same timing is
- provided through the control port. In either case the data is read from
- the status port and written to the data port, just as it is accessing
- registers.
-
- In addition to the basic data transfer methods, several more are modes are
- created by adding some delay by doing multiple reads of the data to allow
- it to stabilize. This delay seems to be needed on most machines.
-
- The data transfer mode is stored in the 'dev->if_port' field. Its default
- value is '4'. It may be overridden at boot-time using the third parameter
- to the "ether=..." initialization.
-
- The header file <atp.h> provides inline functions that encapsulate the
- register and data access methods. These functions are hand-tuned to
- generate reasonable object code. This header file also documents my
- interpretations of the device registers.
-*/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/crc32.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <linux/bitops.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#include "atp.h"
-
-MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
-MODULE_DESCRIPTION("RealTek RTL8002/8012 parallel port Ethernet driver");
-MODULE_LICENSE("GPL");
-
-module_param(max_interrupt_work, int, 0);
-module_param(debug, int, 0);
-module_param_hw_array(io, int, ioport, NULL, 0);
-module_param_hw_array(irq, int, irq, NULL, 0);
-module_param_array(xcvr, int, NULL, 0);
-MODULE_PARM_DESC(max_interrupt_work, "ATP maximum events handled per interrupt");
-MODULE_PARM_DESC(debug, "ATP debug level (0-7)");
-MODULE_PARM_DESC(io, "ATP I/O base address(es)");
-MODULE_PARM_DESC(irq, "ATP IRQ number(s)");
-MODULE_PARM_DESC(xcvr, "ATP transceiver(s) (0=internal, 1=external)");
-
-/* The number of low I/O ports used by the ethercard. */
-#define ETHERCARD_TOTAL_SIZE 3
-
-/* Sequence to switch an 8012 from printer mux to ethernet mode. */
-static char mux_8012[] = { 0xff, 0xf7, 0xff, 0xfb, 0xf3, 0xfb, 0xff, 0xf7,};
-
-struct net_local {
- spinlock_t lock;
- struct net_device *next_module;
- struct timer_list timer; /* Media selection timer. */
- struct net_device *dev; /* Timer dev. */
- unsigned long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */
- int saved_tx_size;
- unsigned int tx_unit_busy:1;
- unsigned char re_tx, /* Number of packet retransmissions. */
- addr_mode, /* Current Rx filter e.g. promiscuous, etc. */
- pac_cnt_in_tx_buf;
-};
-
-/* This code, written by wwc@super.org, resets the adapter every
- TIMED_CHECKER ticks. This recovers from an unknown error which
- hangs the device. */
-#define TIMED_CHECKER (HZ/4)
-#ifdef TIMED_CHECKER
-#include <linux/timer.h>
-static void atp_timed_checker(struct timer_list *t);
-#endif
-
-/* Index to functions, as function prototypes. */
-
-static int atp_probe1(long ioaddr);
-static void get_node_ID(struct net_device *dev);
-static unsigned short eeprom_op(long ioaddr, unsigned int cmd);
-static int net_open(struct net_device *dev);
-static void hardware_init(struct net_device *dev);
-static void write_packet(long ioaddr, int length, unsigned char *packet, int pad, int mode);
-static void trigger_send(long ioaddr, int length);
-static netdev_tx_t atp_send_packet(struct sk_buff *skb,
- struct net_device *dev);
-static irqreturn_t atp_interrupt(int irq, void *dev_id);
-static void net_rx(struct net_device *dev);
-static void read_block(long ioaddr, int length, unsigned char *buffer, int data_mode);
-static int net_close(struct net_device *dev);
-static void set_rx_mode(struct net_device *dev);
-static void tx_timeout(struct net_device *dev, unsigned int txqueue);
-
-
-/* A list of all installed ATP devices, for removing the driver module. */
-static struct net_device *root_atp_dev;
-
-/* Check for a network adapter of this type, and return '0' iff one exists.
- If dev->base_addr == 0, probe all likely locations.
- If dev->base_addr == 1, always return failure.
- If dev->base_addr == 2, allocate space for the device and return success
- (detachable devices only).
-
- FIXME: we should use the parport layer for this
- */
-static int __init atp_init(void)
-{
- int *port, ports[] = {0x378, 0x278, 0x3bc, 0};
- int base_addr = io[0];
-
- if (base_addr > 0x1ff) /* Check a single specified location. */
- return atp_probe1(base_addr);
- else if (base_addr == 1) /* Don't probe at all. */
- return -ENXIO;
-
- for (port = ports; *port; port++) {
- long ioaddr = *port;
- outb(0x57, ioaddr + PAR_DATA);
- if (inb(ioaddr + PAR_DATA) != 0x57)
- continue;
- if (atp_probe1(ioaddr) == 0)
- return 0;
- }
-
- return -ENODEV;
-}
-
-static const struct net_device_ops atp_netdev_ops = {
- .ndo_open = net_open,
- .ndo_stop = net_close,
- .ndo_start_xmit = atp_send_packet,
- .ndo_set_rx_mode = set_rx_mode,
- .ndo_tx_timeout = tx_timeout,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int __init atp_probe1(long ioaddr)
-{
- struct net_device *dev = NULL;
- struct net_local *lp;
- int saved_ctrl_reg, status, i;
- int res;
-
- outb(0xff, ioaddr + PAR_DATA);
- /* Save the original value of the Control register, in case we guessed
- wrong. */
- saved_ctrl_reg = inb(ioaddr + PAR_CONTROL);
- if (net_debug > 3)
- printk("atp: Control register was %#2.2x.\n", saved_ctrl_reg);
- /* IRQEN=0, SLCTB=high INITB=high, AUTOFDB=high, STBB=high. */
- outb(0x04, ioaddr + PAR_CONTROL);
-#ifndef final_version
- if (net_debug > 3) {
- /* Turn off the printer multiplexer on the 8012. */
- for (i = 0; i < 8; i++)
- outb(mux_8012[i], ioaddr + PAR_DATA);
- write_reg(ioaddr, MODSEL, 0x00);
- printk("atp: Registers are ");
- for (i = 0; i < 32; i++)
- printk(" %2.2x", read_nibble(ioaddr, i));
- printk(".\n");
- }
-#endif
- /* Turn off the printer multiplexer on the 8012. */
- for (i = 0; i < 8; i++)
- outb(mux_8012[i], ioaddr + PAR_DATA);
- write_reg_high(ioaddr, CMR1, CMR1h_RESET);
- /* udelay() here? */
- status = read_nibble(ioaddr, CMR1);
-
- if (net_debug > 3) {
- printk(KERN_DEBUG "atp: Status nibble was %#2.2x..", status);
- for (i = 0; i < 32; i++)
- printk(" %2.2x", read_nibble(ioaddr, i));
- printk("\n");
- }
-
- if ((status & 0x78) != 0x08) {
- /* The pocket adapter probe failed, restore the control register. */
- outb(saved_ctrl_reg, ioaddr + PAR_CONTROL);
- return -ENODEV;
- }
- status = read_nibble(ioaddr, CMR2_h);
- if ((status & 0x78) != 0x10) {
- outb(saved_ctrl_reg, ioaddr + PAR_CONTROL);
- return -ENODEV;
- }
-
- dev = alloc_etherdev(sizeof(struct net_local));
- if (!dev)
- return -ENOMEM;
-
- /* Find the IRQ used by triggering an interrupt. */
- write_reg_byte(ioaddr, CMR2, 0x01); /* No accept mode, IRQ out. */
- write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE); /* Enable Tx and Rx. */
-
- /* Omit autoIRQ routine for now. Use "table lookup" instead. Uhgggh. */
- if (irq[0])
- dev->irq = irq[0];
- else if (ioaddr == 0x378)
- dev->irq = 7;
- else
- dev->irq = 5;
- write_reg_high(ioaddr, CMR1, CMR1h_TxRxOFF); /* Disable Tx and Rx units. */
- write_reg(ioaddr, CMR2, CMR2_NULL);
-
- dev->base_addr = ioaddr;
-
- /* Read the station address PROM. */
- get_node_ID(dev);
-
-#ifndef MODULE
- if (net_debug)
- printk(KERN_INFO "%s", version);
-#endif
-
- printk(KERN_NOTICE "%s: Pocket adapter found at %#3lx, IRQ %d, "
- "SAPROM %pM.\n",
- dev->name, dev->base_addr, dev->irq, dev->dev_addr);
-
- /* Reset the ethernet hardware and activate the printer pass-through. */
- write_reg_high(ioaddr, CMR1, CMR1h_RESET | CMR1h_MUX);
-
- lp = netdev_priv(dev);
- lp->addr_mode = CMR2h_Normal;
- spin_lock_init(&lp->lock);
-
- /* For the ATP adapter the "if_port" is really the data transfer mode. */
- if (xcvr[0])
- dev->if_port = xcvr[0];
- else
- dev->if_port = (dev->mem_start & 0xf) ? (dev->mem_start & 0x7) : 4;
- if (dev->mem_end & 0xf)
- net_debug = dev->mem_end & 7;
-
- dev->netdev_ops = &atp_netdev_ops;
- dev->watchdog_timeo = TX_TIMEOUT;
-
- res = register_netdev(dev);
- if (res) {
- free_netdev(dev);
- return res;
- }
-
- lp->next_module = root_atp_dev;
- root_atp_dev = dev;
-
- return 0;
-}
-
-/* Read the station address PROM, usually a word-wide EEPROM. */
-static void __init get_node_ID(struct net_device *dev)
-{
- long ioaddr = dev->base_addr;
- __be16 addr[ETH_ALEN / 2];
- int sa_offset = 0;
- int i;
-
- write_reg(ioaddr, CMR2, CMR2_EEPROM); /* Point to the EEPROM control registers. */
-
- /* Some adapters have the station address at offset 15 instead of offset
- zero. Check for it, and fix it if needed. */
- if (eeprom_op(ioaddr, EE_READ(0)) == 0xffff)
- sa_offset = 15;
-
- for (i = 0; i < 3; i++)
- addr[i] =
- cpu_to_be16(eeprom_op(ioaddr, EE_READ(sa_offset + i)));
- eth_hw_addr_set(dev, (u8 *)addr);
-
- write_reg(ioaddr, CMR2, CMR2_NULL);
-}
-
-/*
- An EEPROM read command starts by shifting out 0x60+address, and then
- shifting in the serial data. See the NatSemi databook for details.
- * ________________
- * CS : __|
- * ___ ___
- * CLK: ______| |___| |
- * __ _______ _______
- * DI : __X_______X_______X
- * DO : _________X_______X
- */
-
-static unsigned short __init eeprom_op(long ioaddr, u32 cmd)
-{
- unsigned eedata_out = 0;
- int num_bits = EE_CMD_SIZE;
-
- while (--num_bits >= 0) {
- char outval = (cmd & (1<<num_bits)) ? EE_DATA_WRITE : 0;
- write_reg_high(ioaddr, PROM_CMD, outval | EE_CLK_LOW);
- write_reg_high(ioaddr, PROM_CMD, outval | EE_CLK_HIGH);
- eedata_out <<= 1;
- if (read_nibble(ioaddr, PROM_DATA) & EE_DATA_READ)
- eedata_out++;
- }
- write_reg_high(ioaddr, PROM_CMD, EE_CLK_LOW & ~EE_CS);
- return eedata_out;
-}
-
-
-/* Open/initialize the board. This is called (in the current kernel)
- sometime after booting when the 'ifconfig' program is run.
-
- This routine sets everything up anew at each open, even
- registers that "should" only need to be set once at boot, so that
- there is non-reboot way to recover if something goes wrong.
-
- This is an attachable device: if there is no private entry then it wasn't
- probed for at boot-time, and we need to probe for it again.
- */
-static int net_open(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ret;
-
- /* The interrupt line is turned off (tri-stated) when the device isn't in
- use. That's especially important for "attached" interfaces where the
- port or interrupt may be shared. */
- ret = request_irq(dev->irq, atp_interrupt, 0, dev->name, dev);
- if (ret)
- return ret;
-
- hardware_init(dev);
-
- lp->dev = dev;
- timer_setup(&lp->timer, atp_timed_checker, 0);
- lp->timer.expires = jiffies + TIMED_CHECKER;
- add_timer(&lp->timer);
-
- netif_start_queue(dev);
- return 0;
-}
-
-/* This routine resets the hardware. We initialize everything, assuming that
- the hardware may have been temporarily detached. */
-static void hardware_init(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
- int i;
-
- /* Turn off the printer multiplexer on the 8012. */
- for (i = 0; i < 8; i++)
- outb(mux_8012[i], ioaddr + PAR_DATA);
- write_reg_high(ioaddr, CMR1, CMR1h_RESET);
-
- for (i = 0; i < 6; i++)
- write_reg_byte(ioaddr, PAR0 + i, dev->dev_addr[i]);
-
- write_reg_high(ioaddr, CMR2, lp->addr_mode);
-
- if (net_debug > 2) {
- printk(KERN_DEBUG "%s: Reset: current Rx mode %d.\n", dev->name,
- (read_nibble(ioaddr, CMR2_h) >> 3) & 0x0f);
- }
-
- write_reg(ioaddr, CMR2, CMR2_IRQOUT);
- write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE);
-
- /* Enable the interrupt line from the serial port. */
- outb(Ctrl_SelData + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
-
- /* Unmask the interesting interrupts. */
- write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
- write_reg_high(ioaddr, IMR, ISRh_RxErr);
-
- lp->tx_unit_busy = 0;
- lp->pac_cnt_in_tx_buf = 0;
- lp->saved_tx_size = 0;
-}
-
-static void trigger_send(long ioaddr, int length)
-{
- write_reg_byte(ioaddr, TxCNT0, length & 0xff);
- write_reg(ioaddr, TxCNT1, length >> 8);
- write_reg(ioaddr, CMR1, CMR1_Xmit);
-}
-
-static void write_packet(long ioaddr, int length, unsigned char *packet, int pad_len, int data_mode)
-{
- if (length & 1)
- {
- length++;
- pad_len++;
- }
-
- outb(EOC+MAR, ioaddr + PAR_DATA);
- if ((data_mode & 1) == 0) {
- /* Write the packet out, starting with the write addr. */
- outb(WrAddr+MAR, ioaddr + PAR_DATA);
- do {
- write_byte_mode0(ioaddr, *packet++);
- } while (--length > pad_len) ;
- do {
- write_byte_mode0(ioaddr, 0);
- } while (--length > 0) ;
- } else {
- /* Write the packet out in slow mode. */
- unsigned char outbyte = *packet++;
-
- outb(Ctrl_LNibWrite + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
- outb(WrAddr+MAR, ioaddr + PAR_DATA);
-
- outb((outbyte & 0x0f)|0x40, ioaddr + PAR_DATA);
- outb(outbyte & 0x0f, ioaddr + PAR_DATA);
- outbyte >>= 4;
- outb(outbyte & 0x0f, ioaddr + PAR_DATA);
- outb(Ctrl_HNibWrite + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
- while (--length > pad_len)
- write_byte_mode1(ioaddr, *packet++);
- while (--length > 0)
- write_byte_mode1(ioaddr, 0);
- }
- /* Terminate the Tx frame. End of write: ECB. */
- outb(0xff, ioaddr + PAR_DATA);
- outb(Ctrl_HNibWrite | Ctrl_SelData | Ctrl_IRQEN, ioaddr + PAR_CONTROL);
-}
-
-static void tx_timeout(struct net_device *dev, unsigned int txqueue)
-{
- long ioaddr = dev->base_addr;
-
- printk(KERN_WARNING "%s: Transmit timed out, %s?\n", dev->name,
- inb(ioaddr + PAR_CONTROL) & 0x10 ? "network cable problem"
- : "IRQ conflict");
- dev->stats.tx_errors++;
- /* Try to restart the adapter. */
- hardware_init(dev);
- netif_trans_update(dev); /* prevent tx timeout */
- netif_wake_queue(dev);
- dev->stats.tx_errors++;
-}
-
-static netdev_tx_t atp_send_packet(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
- int length;
- unsigned long flags;
-
- length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
-
- netif_stop_queue(dev);
-
- /* Disable interrupts by writing 0x00 to the Interrupt Mask Register.
- This sequence must not be interrupted by an incoming packet. */
-
- spin_lock_irqsave(&lp->lock, flags);
- write_reg(ioaddr, IMR, 0);
- write_reg_high(ioaddr, IMR, 0);
- spin_unlock_irqrestore(&lp->lock, flags);
-
- write_packet(ioaddr, length, skb->data, length-skb->len, dev->if_port);
-
- lp->pac_cnt_in_tx_buf++;
- if (lp->tx_unit_busy == 0) {
- trigger_send(ioaddr, length);
- lp->saved_tx_size = 0; /* Redundant */
- lp->re_tx = 0;
- lp->tx_unit_busy = 1;
- } else
- lp->saved_tx_size = length;
- /* Re-enable the LPT interrupts. */
- write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
- write_reg_high(ioaddr, IMR, ISRh_RxErr);
-
- dev_kfree_skb (skb);
- return NETDEV_TX_OK;
-}
-
-
-/* The typical workload of the driver:
- Handle the network interface interrupts. */
-static irqreturn_t atp_interrupt(int irq, void *dev_instance)
-{
- struct net_device *dev = dev_instance;
- struct net_local *lp;
- long ioaddr;
- static int num_tx_since_rx;
- int boguscount = max_interrupt_work;
- int handled = 0;
-
- ioaddr = dev->base_addr;
- lp = netdev_priv(dev);
-
- spin_lock(&lp->lock);
-
- /* Disable additional spurious interrupts. */
- outb(Ctrl_SelData, ioaddr + PAR_CONTROL);
-
- /* The adapter's output is currently the IRQ line, switch it to data. */
- write_reg(ioaddr, CMR2, CMR2_NULL);
- write_reg(ioaddr, IMR, 0);
-
- if (net_debug > 5)
- printk(KERN_DEBUG "%s: In interrupt ", dev->name);
- while (--boguscount > 0) {
- int status = read_nibble(ioaddr, ISR);
- if (net_debug > 5)
- printk("loop status %02x..", status);
-
- if (status & (ISR_RxOK<<3)) {
- handled = 1;
- write_reg(ioaddr, ISR, ISR_RxOK); /* Clear the Rx interrupt. */
- do {
- int read_status = read_nibble(ioaddr, CMR1);
- if (net_debug > 6)
- printk("handling Rx packet %02x..", read_status);
- /* We acknowledged the normal Rx interrupt, so if the interrupt
- is still outstanding we must have a Rx error. */
- if (read_status & (CMR1_IRQ << 3)) { /* Overrun. */
- dev->stats.rx_over_errors++;
- /* Set to no-accept mode long enough to remove a packet. */
- write_reg_high(ioaddr, CMR2, CMR2h_OFF);
- net_rx(dev);
- /* Clear the interrupt and return to normal Rx mode. */
- write_reg_high(ioaddr, ISR, ISRh_RxErr);
- write_reg_high(ioaddr, CMR2, lp->addr_mode);
- } else if ((read_status & (CMR1_BufEnb << 3)) == 0) {
- net_rx(dev);
- num_tx_since_rx = 0;
- } else
- break;
- } while (--boguscount > 0);
- } else if (status & ((ISR_TxErr + ISR_TxOK)<<3)) {
- handled = 1;
- if (net_debug > 6)
- printk("handling Tx done..");
- /* Clear the Tx interrupt. We should check for too many failures
- and reinitialize the adapter. */
- write_reg(ioaddr, ISR, ISR_TxErr + ISR_TxOK);
- if (status & (ISR_TxErr<<3)) {
- dev->stats.collisions++;
- if (++lp->re_tx > 15) {
- dev->stats.tx_aborted_errors++;
- hardware_init(dev);
- break;
- }
- /* Attempt to retransmit. */
- if (net_debug > 6) printk("attempting to ReTx");
- write_reg(ioaddr, CMR1, CMR1_ReXmit + CMR1_Xmit);
- } else {
- /* Finish up the transmit. */
- dev->stats.tx_packets++;
- lp->pac_cnt_in_tx_buf--;
- if ( lp->saved_tx_size) {
- trigger_send(ioaddr, lp->saved_tx_size);
- lp->saved_tx_size = 0;
- lp->re_tx = 0;
- } else
- lp->tx_unit_busy = 0;
- netif_wake_queue(dev); /* Inform upper layers. */
- }
- num_tx_since_rx++;
- } else if (num_tx_since_rx > 8 &&
- time_after(jiffies, lp->last_rx_time + HZ)) {
- if (net_debug > 2)
- printk(KERN_DEBUG "%s: Missed packet? No Rx after %d Tx and "
- "%ld jiffies status %02x CMR1 %02x.\n", dev->name,
- num_tx_since_rx, jiffies - lp->last_rx_time, status,
- (read_nibble(ioaddr, CMR1) >> 3) & 15);
- dev->stats.rx_missed_errors++;
- hardware_init(dev);
- num_tx_since_rx = 0;
- break;
- } else
- break;
- }
-
- /* This following code fixes a rare (and very difficult to track down)
- problem where the adapter forgets its ethernet address. */
- {
- int i;
- for (i = 0; i < 6; i++)
- write_reg_byte(ioaddr, PAR0 + i, dev->dev_addr[i]);
-#if 0 && defined(TIMED_CHECKER)
- mod_timer(&lp->timer, jiffies + TIMED_CHECKER);
-#endif
- }
-
- /* Tell the adapter that it can go back to using the output line as IRQ. */
- write_reg(ioaddr, CMR2, CMR2_IRQOUT);
- /* Enable the physical interrupt line, which is sure to be low until.. */
- outb(Ctrl_SelData + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
- /* .. we enable the interrupt sources. */
- write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
- write_reg_high(ioaddr, IMR, ISRh_RxErr); /* Hmmm, really needed? */
-
- spin_unlock(&lp->lock);
-
- if (net_debug > 5) printk("exiting interrupt.\n");
- return IRQ_RETVAL(handled);
-}
-
-#ifdef TIMED_CHECKER
-/* This following code fixes a rare (and very difficult to track down)
- problem where the adapter forgets its ethernet address. */
-static void atp_timed_checker(struct timer_list *t)
-{
- struct net_local *lp = timer_container_of(lp, t, timer);
- struct net_device *dev = lp->dev;
- long ioaddr = dev->base_addr;
- int tickssofar = jiffies - lp->last_rx_time;
- int i;
-
- spin_lock(&lp->lock);
- if (tickssofar > 2*HZ) {
-#if 1
- for (i = 0; i < 6; i++)
- write_reg_byte(ioaddr, PAR0 + i, dev->dev_addr[i]);
- lp->last_rx_time = jiffies;
-#else
- for (i = 0; i < 6; i++)
- if (read_cmd_byte(ioaddr, PAR0 + i) != atp_timed_dev->dev_addr[i])
- {
- struct net_local *lp = netdev_priv(atp_timed_dev);
- write_reg_byte(ioaddr, PAR0 + i, atp_timed_dev->dev_addr[i]);
- if (i == 2)
- dev->stats.tx_errors++;
- else if (i == 3)
- dev->stats.tx_dropped++;
- else if (i == 4)
- dev->stats.collisions++;
- else
- dev->stats.rx_errors++;
- }
-#endif
- }
- spin_unlock(&lp->lock);
- lp->timer.expires = jiffies + TIMED_CHECKER;
- add_timer(&lp->timer);
-}
-#endif
-
-/* We have a good packet(s), get it/them out of the buffers. */
-static void net_rx(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
- struct rx_header rx_head;
-
- /* Process the received packet. */
- outb(EOC+MAR, ioaddr + PAR_DATA);
- read_block(ioaddr, 8, (unsigned char*)&rx_head, dev->if_port);
- if (net_debug > 5)
- printk(KERN_DEBUG " rx_count %04x %04x %04x %04x..", rx_head.pad,
- rx_head.rx_count, rx_head.rx_status, rx_head.cur_addr);
- if ((rx_head.rx_status & 0x77) != 0x01) {
- dev->stats.rx_errors++;
- if (rx_head.rx_status & 0x0004) dev->stats.rx_frame_errors++;
- else if (rx_head.rx_status & 0x0002) dev->stats.rx_crc_errors++;
- if (net_debug > 3)
- printk(KERN_DEBUG "%s: Unknown ATP Rx error %04x.\n",
- dev->name, rx_head.rx_status);
- if (rx_head.rx_status & 0x0020) {
- dev->stats.rx_fifo_errors++;
- write_reg_high(ioaddr, CMR1, CMR1h_TxENABLE);
- write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE);
- } else if (rx_head.rx_status & 0x0050)
- hardware_init(dev);
- return;
- } else {
- /* Malloc up new buffer. The "-4" omits the FCS (CRC). */
- int pkt_len = (rx_head.rx_count & 0x7ff) - 4;
- struct sk_buff *skb;
-
- skb = netdev_alloc_skb(dev, pkt_len + 2);
- if (skb == NULL) {
- dev->stats.rx_dropped++;
- goto done;
- }
-
- skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
- read_block(ioaddr, pkt_len, skb_put(skb,pkt_len), dev->if_port);
- skb->protocol = eth_type_trans(skb, dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
- }
- done:
- write_reg(ioaddr, CMR1, CMR1_NextPkt);
- lp->last_rx_time = jiffies;
-}
-
-static void read_block(long ioaddr, int length, unsigned char *p, int data_mode)
-{
- if (data_mode <= 3) { /* Mode 0 or 1 */
- outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
- outb(length == 8 ? RdAddr | HNib | MAR : RdAddr | MAR,
- ioaddr + PAR_DATA);
- if (data_mode <= 1) { /* Mode 0 or 1 */
- do { *p++ = read_byte_mode0(ioaddr); } while (--length > 0);
- } else { /* Mode 2 or 3 */
- do { *p++ = read_byte_mode2(ioaddr); } while (--length > 0);
- }
- } else if (data_mode <= 5) {
- do { *p++ = read_byte_mode4(ioaddr); } while (--length > 0);
- } else {
- do { *p++ = read_byte_mode6(ioaddr); } while (--length > 0);
- }
-
- outb(EOC+HNib+MAR, ioaddr + PAR_DATA);
- outb(Ctrl_SelData, ioaddr + PAR_CONTROL);
-}
-
-/* The inverse routine to net_open(). */
-static int
-net_close(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
-
- netif_stop_queue(dev);
-
- timer_delete_sync(&lp->timer);
-
- /* Flush the Tx and disable Rx here. */
- lp->addr_mode = CMR2h_OFF;
- write_reg_high(ioaddr, CMR2, CMR2h_OFF);
-
- /* Free the IRQ line. */
- outb(0x00, ioaddr + PAR_CONTROL);
- free_irq(dev->irq, dev);
-
- /* Reset the ethernet hardware and activate the printer pass-through. */
- write_reg_high(ioaddr, CMR1, CMR1h_RESET | CMR1h_MUX);
- return 0;
-}
-
-/*
- * Set or clear the multicast filter for this adapter.
- */
-
-static void set_rx_mode(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
-
- if (!netdev_mc_empty(dev) || (dev->flags & (IFF_ALLMULTI|IFF_PROMISC)))
- lp->addr_mode = CMR2h_PROMISC;
- else
- lp->addr_mode = CMR2h_Normal;
- write_reg_high(ioaddr, CMR2, lp->addr_mode);
-}
-
-static int __init atp_init_module(void) {
- if (debug) /* Emit version even if no cards detected. */
- printk(KERN_INFO "%s", version);
- return atp_init();
-}
-
-static void __exit atp_cleanup_module(void) {
- struct net_device *next_dev;
-
- while (root_atp_dev) {
- struct net_local *atp_local = netdev_priv(root_atp_dev);
- next_dev = atp_local->next_module;
- unregister_netdev(root_atp_dev);
- /* No need to release_region(), since we never snarf it. */
- free_netdev(root_atp_dev);
- root_atp_dev = next_dev;
- }
-}
-
-module_init(atp_init_module);
-module_exit(atp_cleanup_module);
diff --git a/drivers/net/ethernet/realtek/atp.h b/drivers/net/ethernet/realtek/atp.h
deleted file mode 100644
index b202184eddd4..000000000000
--- a/drivers/net/ethernet/realtek/atp.h
+++ /dev/null
@@ -1,262 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Linux header file for the ATP pocket ethernet adapter. */
-/* v1.09 8/9/2000 becker@scyld.com. */
-
-#include <linux/if_ether.h>
-#include <linux/types.h>
-
-/* The header prepended to received packets. */
-struct rx_header {
- ushort pad; /* Pad. */
- ushort rx_count;
- ushort rx_status; /* Unknown bit assignments :-<. */
- ushort cur_addr; /* Apparently the current buffer address(?) */
-};
-
-#define PAR_DATA 0
-#define PAR_STATUS 1
-#define PAR_CONTROL 2
-
-#define Ctrl_LNibRead 0x08 /* LP_PSELECP */
-#define Ctrl_HNibRead 0
-#define Ctrl_LNibWrite 0x08 /* LP_PSELECP */
-#define Ctrl_HNibWrite 0
-#define Ctrl_SelData 0x04 /* LP_PINITP */
-#define Ctrl_IRQEN 0x10 /* LP_PINTEN */
-
-#define EOW 0xE0
-#define EOC 0xE0
-#define WrAddr 0x40 /* Set address of EPLC read, write register. */
-#define RdAddr 0xC0
-#define HNib 0x10
-
-enum page0_regs {
- /* The first six registers hold
- * the ethernet physical station address.
- */
- PAR0 = 0, PAR1 = 1, PAR2 = 2, PAR3 = 3, PAR4 = 4, PAR5 = 5,
- TxCNT0 = 6, TxCNT1 = 7, /* The transmit byte count. */
- TxSTAT = 8, RxSTAT = 9, /* Tx and Rx status. */
- ISR = 10, IMR = 11, /* Interrupt status and mask. */
- CMR1 = 12, /* Command register 1. */
- CMR2 = 13, /* Command register 2. */
- MODSEL = 14, /* Mode select register. */
- MAR = 14, /* Memory address register (?). */
- CMR2_h = 0x1d,
-};
-
-enum eepage_regs {
- PROM_CMD = 6,
- PROM_DATA = 7 /* Note that PROM_CMD is in the "high" bits. */
-};
-
-#define ISR_TxOK 0x01
-#define ISR_RxOK 0x04
-#define ISR_TxErr 0x02
-#define ISRh_RxErr 0x11 /* ISR, high nibble */
-
-#define CMR1h_MUX 0x08 /* Select printer multiplexor on 8012. */
-#define CMR1h_RESET 0x04 /* Reset. */
-#define CMR1h_RxENABLE 0x02 /* Rx unit enable. */
-#define CMR1h_TxENABLE 0x01 /* Tx unit enable. */
-#define CMR1h_TxRxOFF 0x00
-#define CMR1_ReXmit 0x08 /* Trigger a retransmit. */
-#define CMR1_Xmit 0x04 /* Trigger a transmit. */
-#define CMR1_IRQ 0x02 /* Interrupt active. */
-#define CMR1_BufEnb 0x01 /* Enable the buffer(?). */
-#define CMR1_NextPkt 0x01 /* Enable the buffer(?). */
-
-#define CMR2_NULL 8
-#define CMR2_IRQOUT 9
-#define CMR2_RAMTEST 10
-#define CMR2_EEPROM 12 /* Set to page 1, for reading the EEPROM. */
-
-#define CMR2h_OFF 0 /* No accept mode. */
-#define CMR2h_Physical 1 /* Accept a physical address match only. */
-#define CMR2h_Normal 2 /* Accept physical and broadcast address. */
-#define CMR2h_PROMISC 3 /* Promiscuous mode. */
-
-/* An inline function used below: it differs from inb() by explicitly
- * return an unsigned char, saving a truncation.
- */
-static inline unsigned char inbyte(unsigned short port)
-{
- unsigned char _v;
-
- __asm__ __volatile__ ("inb %w1,%b0" : "=a" (_v) : "d" (port));
- return _v;
-}
-
-/* Read register OFFSET.
- * This command should always be terminated with read_end().
- */
-static inline unsigned char read_nibble(short port, unsigned char offset)
-{
- unsigned char retval;
-
- outb(EOC+offset, port + PAR_DATA);
- outb(RdAddr+offset, port + PAR_DATA);
- inbyte(port + PAR_STATUS); /* Settling time delay */
- retval = inbyte(port + PAR_STATUS);
- outb(EOC+offset, port + PAR_DATA);
-
- return retval;
-}
-
-/* Functions for bulk data read. The interrupt line is always disabled. */
-/* Get a byte using read mode 0, reading data from the control lines. */
-static inline unsigned char read_byte_mode0(short ioaddr)
-{
- unsigned char low_nib;
-
- outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
- inbyte(ioaddr + PAR_STATUS);
- low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
- outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL);
- inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */
- inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */
- return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
-}
-
-/* The same as read_byte_mode0(), but does multiple inb()s for stability. */
-static inline unsigned char read_byte_mode2(short ioaddr)
-{
- unsigned char low_nib;
-
- outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
- inbyte(ioaddr + PAR_STATUS);
- low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
- outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL);
- inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */
- return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
-}
-
-/* Read a byte through the data register. */
-static inline unsigned char read_byte_mode4(short ioaddr)
-{
- unsigned char low_nib;
-
- outb(RdAddr | MAR, ioaddr + PAR_DATA);
- low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
- outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA);
- return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
-}
-
-/* Read a byte through the data register, double reading to allow settling. */
-static inline unsigned char read_byte_mode6(short ioaddr)
-{
- unsigned char low_nib;
-
- outb(RdAddr | MAR, ioaddr + PAR_DATA);
- inbyte(ioaddr + PAR_STATUS);
- low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
- outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA);
- inbyte(ioaddr + PAR_STATUS);
- return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
-}
-
-static inline void
-write_reg(short port, unsigned char reg, unsigned char value)
-{
- unsigned char outval;
-
- outb(EOC | reg, port + PAR_DATA);
- outval = WrAddr | reg;
- outb(outval, port + PAR_DATA);
- outb(outval, port + PAR_DATA); /* Double write for PS/2. */
-
- outval &= 0xf0;
- outval |= value;
- outb(outval, port + PAR_DATA);
- outval &= 0x1f;
- outb(outval, port + PAR_DATA);
- outb(outval, port + PAR_DATA);
-
- outb(EOC | outval, port + PAR_DATA);
-}
-
-static inline void
-write_reg_high(short port, unsigned char reg, unsigned char value)
-{
- unsigned char outval = EOC | HNib | reg;
-
- outb(outval, port + PAR_DATA);
- outval &= WrAddr | HNib | 0x0f;
- outb(outval, port + PAR_DATA);
- outb(outval, port + PAR_DATA); /* Double write for PS/2. */
-
- outval = WrAddr | HNib | value;
- outb(outval, port + PAR_DATA);
- outval &= HNib | 0x0f; /* HNib | value */
- outb(outval, port + PAR_DATA);
- outb(outval, port + PAR_DATA);
-
- outb(EOC | HNib | outval, port + PAR_DATA);
-}
-
-/* Write a byte out using nibble mode. The low nibble is written first. */
-static inline void
-write_reg_byte(short port, unsigned char reg, unsigned char value)
-{
- unsigned char outval;
-
- outb(EOC | reg, port + PAR_DATA); /* Reset the address register. */
- outval = WrAddr | reg;
- outb(outval, port + PAR_DATA);
- outb(outval, port + PAR_DATA); /* Double write for PS/2. */
-
- outb((outval & 0xf0) | (value & 0x0f), port + PAR_DATA);
- outb(value & 0x0f, port + PAR_DATA);
- value >>= 4;
- outb(value, port + PAR_DATA);
- outb(0x10 | value, port + PAR_DATA);
- outb(0x10 | value, port + PAR_DATA);
-
- outb(EOC | value, port + PAR_DATA); /* Reset the address register. */
-}
-
-/* Bulk data writes to the packet buffer. The interrupt line remains enabled.
- * The first, faster method uses only the dataport (data modes 0, 2 & 4).
- * The second (backup) method uses data and control regs (modes 1, 3 & 5).
- * It should only be needed when there is skew between the individual data
- * lines.
- */
-static inline void write_byte_mode0(short ioaddr, unsigned char value)
-{
- outb(value & 0x0f, ioaddr + PAR_DATA);
- outb((value>>4) | 0x10, ioaddr + PAR_DATA);
-}
-
-static inline void write_byte_mode1(short ioaddr, unsigned char value)
-{
- outb(value & 0x0f, ioaddr + PAR_DATA);
- outb(Ctrl_IRQEN | Ctrl_LNibWrite, ioaddr + PAR_CONTROL);
- outb((value>>4) | 0x10, ioaddr + PAR_DATA);
- outb(Ctrl_IRQEN | Ctrl_HNibWrite, ioaddr + PAR_CONTROL);
-}
-
-/* Write 16bit VALUE to the packet buffer: the same as above just doubled. */
-static inline void write_word_mode0(short ioaddr, unsigned short value)
-{
- outb(value & 0x0f, ioaddr + PAR_DATA);
- value >>= 4;
- outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA);
- value >>= 4;
- outb(value & 0x0f, ioaddr + PAR_DATA);
- value >>= 4;
- outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA);
-}
-
-/* EEPROM_Ctrl bits. */
-#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
-#define EE_CS 0x02 /* EEPROM chip select. */
-#define EE_CLK_HIGH 0x12
-#define EE_CLK_LOW 0x16
-#define EE_DATA_WRITE 0x01 /* EEPROM chip data in. */
-#define EE_DATA_READ 0x08 /* EEPROM chip data out. */
-
-/* The EEPROM commands include the alway-set leading bit. */
-#define EE_WRITE_CMD(offset) (((5 << 6) + (offset)) << 17)
-#define EE_READ(offset) (((6 << 6) + (offset)) << 17)
-#define EE_ERASE(offset) (((7 << 6) + (offset)) << 17)
-#define EE_CMD_SIZE 27 /* The command+address+data size. */
diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h
index 2c1a0c21af8d..aed4cf852091 100644
--- a/drivers/net/ethernet/realtek/r8169.h
+++ b/drivers/net/ethernet/realtek/r8169.h
@@ -72,7 +72,8 @@ enum mac_version {
RTL_GIGA_MAC_VER_70,
RTL_GIGA_MAC_VER_80,
RTL_GIGA_MAC_NONE,
- RTL_GIGA_MAC_VER_LAST = RTL_GIGA_MAC_NONE - 1
+ RTL_GIGA_MAC_VER_LAST = RTL_GIGA_MAC_NONE - 1,
+ RTL_GIGA_MAC_VER_EXTENDED
};
struct rtl8169_private;
diff --git a/drivers/net/ethernet/realtek/r8169_firmware.c b/drivers/net/ethernet/realtek/r8169_firmware.c
index bf055078a855..6dff3d94793e 100644
--- a/drivers/net/ethernet/realtek/r8169_firmware.c
+++ b/drivers/net/ethernet/realtek/r8169_firmware.c
@@ -68,7 +68,7 @@ static bool rtl_fw_format_ok(struct rtl_fw *rtl_fw)
if (size > (fw->size - start) / FW_OPCODE_SIZE)
return false;
- strscpy(rtl_fw->version, fw_info->version, RTL_VER_SIZE);
+ strscpy(rtl_fw->version, fw_info->version);
pa->code = (__le32 *)(fw->data + start);
pa->size = size;
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 755083852eef..2f7d9809c373 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -31,6 +31,7 @@
#include <linux/unaligned.h>
#include <net/ip6_checksum.h>
#include <net/netdev_queues.h>
+#include <net/phy/realtek_phy.h>
#include "r8169.h"
#include "r8169_firmware.h"
@@ -95,8 +96,8 @@
#define JUMBO_16K (SZ_16K - VLAN_ETH_HLEN - ETH_FCS_LEN)
static const struct rtl_chip_info {
- u16 mask;
- u16 val;
+ u32 mask;
+ u32 val;
enum mac_version mac_version;
const char *name;
const char *fw_name;
@@ -205,10 +206,21 @@ static const struct rtl_chip_info {
{ 0xfc8, 0x040, RTL_GIGA_MAC_VER_03, "RTL8110s" },
{ 0xfc8, 0x008, RTL_GIGA_MAC_VER_02, "RTL8169s" },
+ /* extended chip version*/
+ { 0x7cf, 0x7c8, RTL_GIGA_MAC_VER_EXTENDED },
+
/* Catch-all */
{ 0x000, 0x000, RTL_GIGA_MAC_NONE }
};
+static const struct rtl_chip_info rtl_chip_infos_extended[] = {
+ { 0x7fffffff, 0x00000000, RTL_GIGA_MAC_VER_64, "RTL9151AS",
+ FIRMWARE_9151A_1},
+
+ /* Catch-all */
+ { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE }
+};
+
static const struct pci_device_id rtl8169_pci_tbl[] = {
{ PCI_VDEVICE(REALTEK, 0x2502) },
{ PCI_VDEVICE(REALTEK, 0x2600) },
@@ -255,6 +267,8 @@ enum rtl_registers {
IntrStatus = 0x3e,
TxConfig = 0x40,
+ /* Extended chip version id */
+ TX_CONFIG_V2 = 0x60b0,
#define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */
#define TXCFG_EMPTY (1 << 11) /* 8111e-vl */
@@ -312,6 +326,15 @@ enum rtl_registers {
IBIMR0 = 0xfa,
IBISR0 = 0xfb,
FuncForceEvent = 0xfc,
+
+ ALDPS_LTR = 0xe0a2,
+ LTR_OBFF_LOCK = 0xe032,
+ LTR_SNOOP = 0xe034,
+
+#define ALDPS_LTR_EN BIT(0)
+#define LTR_OBFF_LOCK_EN BIT(0)
+#define LINK_SPEED_CHANGE_EN BIT(14)
+#define LTR_SNOOP_EN GENMASK(15, 14)
};
enum rtl8168_8101_registers {
@@ -397,6 +420,8 @@ enum rtl8168_registers {
#define PWM_EN (1 << 22)
#define RXDV_GATED_EN (1 << 19)
#define EARLY_TALLY_EN (1 << 16)
+ COMBO_LTR_EXTEND = 0xb6,
+#define COMBO_LTR_EXTEND_EN BIT(0)
};
enum rtl8125_registers {
@@ -733,6 +758,7 @@ struct rtl8169_private {
unsigned supports_gmii:1;
unsigned aspm_manageable:1;
unsigned dash_enabled:1;
+ bool sfp_mode:1;
dma_addr_t counters_phys_addr;
struct rtl8169_counters *counters;
struct rtl8169_tc_offsets tc_offset;
@@ -1097,6 +1123,10 @@ static int r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
if (rtl_ocp_reg_failure(reg))
return 0;
+ /* Return dummy MII_PHYSID2 in SFP mode to match SFP PHY driver */
+ if (tp->sfp_mode && reg == (OCP_STD_PHY_BASE + 2 * MII_PHYSID2))
+ return PHY_ID_RTL_DUMMY_SFP & 0xffff;
+
RTL_W32(tp, GPHY_OCP, reg << 15);
return rtl_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
@@ -1154,6 +1184,46 @@ static void r8168_mac_ocp_modify(struct rtl8169_private *tp, u32 reg, u16 mask,
raw_spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
}
+static void r8127_sfp_sds_phy_reset(struct rtl8169_private *tp)
+{
+ RTL_W8(tp, 0x2350, RTL_R8(tp, 0x2350) & ~BIT(0));
+ udelay(1);
+
+ RTL_W16(tp, 0x233a, 0x801f);
+ RTL_W8(tp, 0x2350, RTL_R8(tp, 0x2350) | BIT(0));
+ usleep_range(10, 20);
+}
+
+static void r8127_sfp_init_10g(struct rtl8169_private *tp)
+{
+ int val;
+
+ r8127_sfp_sds_phy_reset(tp);
+
+ RTL_W16(tp, 0x233a, 0x801a);
+ RTL_W16(tp, 0x233e, (RTL_R16(tp, 0x233e) & ~0x3003) | 0x1000);
+
+ r8168_phy_ocp_write(tp, 0xc40a, 0x0000);
+ r8168_phy_ocp_write(tp, 0xc466, 0x0003);
+ r8168_phy_ocp_write(tp, 0xc808, 0x0000);
+ r8168_phy_ocp_write(tp, 0xc80a, 0x0000);
+
+ val = r8168_phy_ocp_read(tp, 0xc804);
+ r8168_phy_ocp_write(tp, 0xc804, (val & ~0x000f) | 0x000c);
+}
+
+static void rtl_sfp_init(struct rtl8169_private *tp)
+{
+ if (tp->mac_version == RTL_GIGA_MAC_VER_80)
+ r8127_sfp_init_10g(tp);
+}
+
+static void rtl_sfp_reset(struct rtl8169_private *tp)
+{
+ if (tp->mac_version == RTL_GIGA_MAC_VER_80)
+ r8127_sfp_sds_phy_reset(tp);
+}
+
/* Work around a hw issue with RTL8168g PHY, the quirk disables
* PHY MCU interrupts before PHY power-down.
*/
@@ -1513,6 +1583,10 @@ static enum rtl_dash_type rtl_get_dash_type(struct rtl8169_private *tp)
return RTL_DASH_EP;
case RTL_GIGA_MAC_VER_66:
return RTL_DASH_25_BP;
+ case RTL_GIGA_MAC_VER_80:
+ return (tp->pci_dev->revision == 0x04)
+ ? RTL_DASH_25_BP
+ : RTL_DASH_NONE;
default:
return RTL_DASH_NONE;
}
@@ -1710,12 +1784,11 @@ static void rtl8169_get_drvinfo(struct net_device *dev,
struct rtl8169_private *tp = netdev_priv(dev);
struct rtl_fw *rtl_fw = tp->rtl_fw;
- strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strscpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, KBUILD_MODNAME);
+ strscpy(info->bus_info, pci_name(tp->pci_dev));
BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
if (rtl_fw)
- strscpy(info->fw_version, rtl_fw->version,
- sizeof(info->fw_version));
+ strscpy(info->fw_version, rtl_fw->version);
}
static int rtl8169_get_regs_len(struct net_device *dev)
@@ -2308,6 +2381,36 @@ static void rtl8169_get_eth_ctrl_stats(struct net_device *dev,
le32_to_cpu(tp->counters->rx_unknown_opcode);
}
+static int rtl8169_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct rtl8169_private *tp = netdev_priv(ndev);
+ struct phy_device *phydev = tp->phydev;
+ int duplex = cmd->base.duplex;
+ int speed = cmd->base.speed;
+
+ if (!tp->sfp_mode)
+ return phy_ethtool_ksettings_set(phydev, cmd);
+
+ if (cmd->base.autoneg != AUTONEG_DISABLE)
+ return -EINVAL;
+
+ if (!phy_check_valid(speed, duplex, phydev->supported))
+ return -EINVAL;
+
+ mutex_lock(&phydev->lock);
+
+ phydev->autoneg = AUTONEG_DISABLE;
+ phydev->speed = speed;
+ phydev->duplex = duplex;
+
+ rtl_sfp_init(tp);
+
+ mutex_unlock(&phydev->lock);
+
+ return 0;
+}
+
static const struct ethtool_ops rtl8169_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
@@ -2327,7 +2430,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_eee = rtl8169_get_eee,
.set_eee = rtl8169_set_eee,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .set_link_ksettings = rtl8169_set_link_ksettings,
.get_ringparam = rtl8169_get_ringparam,
.get_pause_stats = rtl8169_get_pause_stats,
.get_pauseparam = rtl8169_get_pauseparam,
@@ -2336,7 +2439,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_eth_ctrl_stats = rtl8169_get_eth_ctrl_stats,
};
-static const struct rtl_chip_info *rtl8169_get_chip_version(u16 xid, bool gmii)
+static const struct rtl_chip_info *rtl8169_get_chip_version(u32 xid, bool gmii)
{
/* Chips combining a 1Gbps MAC with a 100Mbps PHY */
static const struct rtl_chip_info rtl8106eus_info = {
@@ -2362,6 +2465,15 @@ static const struct rtl_chip_info *rtl8169_get_chip_version(u16 xid, bool gmii)
return p;
}
+static const struct rtl_chip_info *rtl8169_get_extended_chip_version(u32 xid2)
+{
+ const struct rtl_chip_info *p = rtl_chip_infos_extended;
+
+ while ((xid2 & p->mask) != p->val)
+ p++;
+ return p;
+}
+
static void rtl_release_firmware(struct rtl8169_private *tp)
{
if (tp->rtl_fw) {
@@ -2435,6 +2547,9 @@ static void rtl8169_init_phy(struct rtl8169_private *tp)
tp->pci_dev->subsystem_device == 0xe000)
phy_write_paged(tp->phydev, 0x0001, 0x10, 0xf01b);
+ if (tp->sfp_mode)
+ rtl_sfp_init(tp);
+
/* We may have called phy_speed_down before */
phy_speed_up(tp->phydev);
@@ -2915,6 +3030,92 @@ static void rtl_disable_exit_l1(struct rtl8169_private *tp)
}
}
+static void rtl_enable_ltr(struct rtl8169_private *tp)
+{
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_80:
+ r8168_mac_ocp_write(tp, 0xcdd0, 0x9003);
+ r8168_mac_ocp_modify(tp, LTR_SNOOP, 0x0000, LTR_SNOOP_EN);
+ r8168_mac_ocp_modify(tp, ALDPS_LTR, 0x0000, ALDPS_LTR_EN);
+ r8168_mac_ocp_write(tp, 0xcdd2, 0x8c09);
+ r8168_mac_ocp_write(tp, 0xcdd8, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdd4, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdda, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdd6, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcddc, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcde8, 0x887a);
+ r8168_mac_ocp_write(tp, 0xcdea, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdec, 0x8c09);
+ r8168_mac_ocp_write(tp, 0xcdee, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdf0, 0x8a62);
+ r8168_mac_ocp_write(tp, 0xcdf2, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdf4, 0x883e);
+ r8168_mac_ocp_write(tp, 0xcdf6, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdf8, 0x8849);
+ r8168_mac_ocp_write(tp, 0xcdfa, 0x9003);
+ r8168_mac_ocp_modify(tp, LTR_OBFF_LOCK, 0x0000, LINK_SPEED_CHANGE_EN);
+ break;
+ case RTL_GIGA_MAC_VER_70:
+ r8168_mac_ocp_write(tp, 0xcdd0, 0x9003);
+ r8168_mac_ocp_modify(tp, LTR_SNOOP, 0x0000, LTR_SNOOP_EN);
+ r8168_mac_ocp_modify(tp, ALDPS_LTR, 0x0000, ALDPS_LTR_EN);
+ r8168_mac_ocp_write(tp, 0xcdd2, 0x8c09);
+ r8168_mac_ocp_write(tp, 0xcdd8, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdd4, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdda, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdd6, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcddc, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcde8, 0x887a);
+ r8168_mac_ocp_write(tp, 0xcdea, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdec, 0x8c09);
+ r8168_mac_ocp_write(tp, 0xcdee, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdf0, 0x8a62);
+ r8168_mac_ocp_write(tp, 0xcdf2, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdf4, 0x883e);
+ r8168_mac_ocp_write(tp, 0xcdf6, 0x9003);
+ r8168_mac_ocp_modify(tp, LTR_OBFF_LOCK, 0x0000, LINK_SPEED_CHANGE_EN);
+ break;
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
+ r8168_mac_ocp_write(tp, 0xcdd0, 0x9003);
+ r8168_mac_ocp_modify(tp, LTR_SNOOP, 0x0000, LTR_SNOOP_EN);
+ r8168_mac_ocp_modify(tp, ALDPS_LTR, 0x0000, ALDPS_LTR_EN);
+ r8168_mac_ocp_write(tp, 0xcdd2, 0x889c);
+ r8168_mac_ocp_write(tp, 0xcdd8, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdd4, 0x8c30);
+ r8168_mac_ocp_write(tp, 0xcdda, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdd6, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcddc, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcde8, 0x883e);
+ r8168_mac_ocp_write(tp, 0xcdea, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdec, 0x889c);
+ r8168_mac_ocp_write(tp, 0xcdee, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdf0, 0x8C09);
+ r8168_mac_ocp_write(tp, 0xcdf2, 0x9003);
+ r8168_mac_ocp_modify(tp, LTR_OBFF_LOCK, 0x0000, LINK_SPEED_CHANGE_EN);
+ break;
+ case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
+ case RTL_GIGA_MAC_VER_52:
+ r8168_mac_ocp_modify(tp, ALDPS_LTR, 0x0000, ALDPS_LTR_EN);
+ RTL_W8(tp, COMBO_LTR_EXTEND, RTL_R8(tp, COMBO_LTR_EXTEND) | COMBO_LTR_EXTEND_EN);
+ fallthrough;
+ case RTL_GIGA_MAC_VER_51:
+ r8168_mac_ocp_modify(tp, LTR_SNOOP, 0x0000, LTR_SNOOP_EN);
+ r8168_mac_ocp_write(tp, 0xe02c, 0x1880);
+ r8168_mac_ocp_write(tp, 0xe02e, 0x4880);
+ r8168_mac_ocp_write(tp, 0xcdd8, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdda, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcddc, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdd2, 0x883c);
+ r8168_mac_ocp_write(tp, 0xcdd4, 0x8c12);
+ r8168_mac_ocp_write(tp, 0xcdd6, 0x9003);
+ break;
+ default:
+ return;
+ }
+ /* chip can trigger LTR */
+ r8168_mac_ocp_modify(tp, LTR_OBFF_LOCK, 0x0003, LTR_OBFF_LOCK_EN);
+}
+
static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
{
u8 val8;
@@ -2943,6 +3144,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
break;
}
+ rtl_enable_ltr(tp);
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST:
@@ -4800,6 +5002,10 @@ static void rtl8169_down(struct rtl8169_private *tp)
phy_stop(tp->phydev);
+ /* Reset SerDes PHY to bring down fiber link */
+ if (tp->sfp_mode)
+ rtl_sfp_reset(tp);
+
rtl8169_update_counters(tp);
pci_clear_master(tp->pci_dev);
@@ -5389,11 +5595,12 @@ static bool rtl_aspm_is_safe(struct rtl8169_private *tp)
static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct rtl_chip_info *chip;
+ const char *ext_xid_str = "";
struct rtl8169_private *tp;
int jumbo_max, region, rc;
struct net_device *dev;
u32 txconfig;
- u16 xid;
+ u32 xid;
dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp));
if (!dev)
@@ -5441,10 +5648,16 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Identify chip attached to board */
chip = rtl8169_get_chip_version(xid, tp->supports_gmii);
+
+ if (chip->mac_version == RTL_GIGA_MAC_VER_EXTENDED) {
+ ext_xid_str = "ext";
+ xid = RTL_R32(tp, TX_CONFIG_V2);
+ chip = rtl8169_get_extended_chip_version(xid);
+ }
if (chip->mac_version == RTL_GIGA_MAC_NONE)
return dev_err_probe(&pdev->dev, -ENODEV,
- "unknown chip XID %03x, contact r8169 maintainers (see MAINTAINERS file)\n",
- xid);
+ "unknown chip %sXID %x, contact r8169 maintainers (see MAINTAINERS file)\n",
+ ext_xid_str, xid);
tp->mac_version = chip->mac_version;
tp->fw_name = chip->fw_name;
@@ -5459,13 +5672,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
tp->aspm_manageable = !rc;
- /* Fiber mode on RTL8127AF isn't supported */
if (rtl_is_8125(tp)) {
u16 data = r8168_mac_ocp_read(tp, 0xd006);
if ((data & 0xff) == 0x07)
- return dev_err_probe(&pdev->dev, -ENODEV,
- "Fiber mode not supported\n");
+ tp->sfp_mode = true;
}
tp->dash_type = rtl_get_dash_type(tp);
@@ -5585,8 +5796,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->leds = rtl8168_init_leds(dev);
}
- netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n",
- chip->name, dev->dev_addr, xid, tp->irq);
+ netdev_info(dev, "%s, %pM, %sXID %x, IRQ %d\n",
+ chip->name, dev->dev_addr, ext_xid_str, xid, tp->irq);
if (jumbo_max)
netdev_info(dev, "jumbo features [frames: %d bytes, tx checksumming: %s]\n",
diff --git a/drivers/net/ethernet/renesas/rcar_gen4_ptp.c b/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
index d0979abd36de..27a6f0492097 100644
--- a/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
+++ b/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/ptp_clock_kernel.h>
#include <linux/slab.h>
#include "rcar_gen4_ptp.h"
@@ -23,6 +24,15 @@
#define PTPGPTPTM10_REG 0x0054
#define PTPGPTPTM20_REG 0x0058
+struct rcar_gen4_ptp_private {
+ void __iomem *addr;
+ struct ptp_clock *clock;
+ struct ptp_clock_info info;
+ spinlock_t lock; /* For multiple registers access */
+ s64 default_addend;
+ bool initialized;
+};
+
#define ptp_to_priv(ptp) container_of(ptp, struct rcar_gen4_ptp_private, info)
static int rcar_gen4_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
@@ -168,7 +178,8 @@ int rcar_gen4_ptp_unregister(struct rcar_gen4_ptp_private *ptp_priv)
}
EXPORT_SYMBOL_GPL(rcar_gen4_ptp_unregister);
-struct rcar_gen4_ptp_private *rcar_gen4_ptp_alloc(struct platform_device *pdev)
+struct rcar_gen4_ptp_private *rcar_gen4_ptp_alloc(struct platform_device *pdev,
+ void __iomem *addr)
{
struct rcar_gen4_ptp_private *ptp;
@@ -178,10 +189,31 @@ struct rcar_gen4_ptp_private *rcar_gen4_ptp_alloc(struct platform_device *pdev)
ptp->info = rcar_gen4_ptp_info;
+ ptp->addr = addr;
+
return ptp;
}
EXPORT_SYMBOL_GPL(rcar_gen4_ptp_alloc);
+int rcar_gen4_ptp_clock_index(struct rcar_gen4_ptp_private *priv)
+{
+ if (!priv->initialized)
+ return -1;
+
+ return ptp_clock_index(priv->clock);
+}
+EXPORT_SYMBOL_GPL(rcar_gen4_ptp_clock_index);
+
+void rcar_gen4_ptp_gettime64(struct rcar_gen4_ptp_private *priv,
+ struct timespec64 *ts)
+{
+ if (!priv->initialized)
+ return;
+
+ priv->info.gettime64(&priv->info, ts);
+}
+EXPORT_SYMBOL_GPL(rcar_gen4_ptp_gettime64);
+
MODULE_AUTHOR("Yoshihiro Shimoda");
MODULE_DESCRIPTION("Renesas R-Car Gen4 gPTP driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/renesas/rcar_gen4_ptp.h b/drivers/net/ethernet/renesas/rcar_gen4_ptp.h
index 9a9c232c854e..6abaa7cc6b77 100644
--- a/drivers/net/ethernet/renesas/rcar_gen4_ptp.h
+++ b/drivers/net/ethernet/renesas/rcar_gen4_ptp.h
@@ -7,19 +7,15 @@
#ifndef __RCAR_GEN4_PTP_H__
#define __RCAR_GEN4_PTP_H__
-#include <linux/ptp_clock_kernel.h>
-
-struct rcar_gen4_ptp_private {
- void __iomem *addr;
- struct ptp_clock *clock;
- struct ptp_clock_info info;
- spinlock_t lock; /* For multiple registers access */
- s64 default_addend;
- bool initialized;
-};
+struct rcar_gen4_ptp_private;
int rcar_gen4_ptp_register(struct rcar_gen4_ptp_private *ptp_priv, u32 rate);
int rcar_gen4_ptp_unregister(struct rcar_gen4_ptp_private *ptp_priv);
-struct rcar_gen4_ptp_private *rcar_gen4_ptp_alloc(struct platform_device *pdev);
+struct rcar_gen4_ptp_private *rcar_gen4_ptp_alloc(struct platform_device *pdev,
+ void __iomem *addr);
+
+int rcar_gen4_ptp_clock_index(struct rcar_gen4_ptp_private *priv);
+void rcar_gen4_ptp_gettime64(struct rcar_gen4_ptp_private *priv,
+ struct timespec64 *ts);
#endif /* #ifndef __RCAR_GEN4_PTP_H__ */
diff --git a/drivers/net/ethernet/renesas/rswitch_l2.c b/drivers/net/ethernet/renesas/rswitch_l2.c
index 4a69ec77d69c..9433cd8adced 100644
--- a/drivers/net/ethernet/renesas/rswitch_l2.c
+++ b/drivers/net/ethernet/renesas/rswitch_l2.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Renesas Ethernet Switch device driver
*
- * Copyright (C) 2025 Renesas Electronics Corporation
+ * Copyright (C) 2025 - 2026 Renesas Electronics Corporation
*/
#include <linux/err.h>
@@ -60,6 +60,7 @@ static void rswitch_update_l2_hw_learning(struct rswitch_private *priv)
static void rswitch_update_l2_hw_forwarding(struct rswitch_private *priv)
{
struct rswitch_device *rdev;
+ bool new_forwarding_offload;
unsigned int fwd_mask;
/* calculate fwd_mask with zeroes in bits corresponding to ports that
@@ -73,8 +74,9 @@ static void rswitch_update_l2_hw_forwarding(struct rswitch_private *priv)
}
rswitch_for_all_ports(priv, rdev) {
- if ((rdev_for_l2_offload(rdev) && rdev->forwarding_requested) ||
- rdev->forwarding_offloaded) {
+ new_forwarding_offload = (rdev_for_l2_offload(rdev) && rdev->forwarding_requested);
+
+ if (new_forwarding_offload || rdev->forwarding_offloaded) {
/* Update allowed offload destinations even for ports
* with L2 offload enabled earlier.
*
@@ -84,13 +86,10 @@ static void rswitch_update_l2_hw_forwarding(struct rswitch_private *priv)
priv->addr + FWPC2(rdev->port));
}
- if (rdev_for_l2_offload(rdev) &&
- rdev->forwarding_requested &&
- !rdev->forwarding_offloaded) {
+ if (new_forwarding_offload && !rdev->forwarding_offloaded)
rswitch_change_l2_hw_offloading(rdev, true, false);
- } else if (rdev->forwarding_offloaded) {
+ else if (!new_forwarding_offload && rdev->forwarding_offloaded)
rswitch_change_l2_hw_offloading(rdev, false, false);
- }
}
}
diff --git a/drivers/net/ethernet/renesas/rswitch_main.c b/drivers/net/ethernet/renesas/rswitch_main.c
index e14b21148f27..433eb2b00d10 100644
--- a/drivers/net/ethernet/renesas/rswitch_main.c
+++ b/drivers/net/ethernet/renesas/rswitch_main.c
@@ -1891,7 +1891,7 @@ static int rswitch_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts
{
struct rswitch_device *rdev = netdev_priv(ndev);
- info->phc_index = ptp_clock_index(rdev->priv->ptp_priv->clock);
+ info->phc_index = rcar_gen4_ptp_clock_index(rdev->priv->ptp_priv);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
@@ -2150,17 +2150,16 @@ static int renesas_eth_sw_probe(struct platform_device *pdev)
if (attr)
priv->etha_no_runtime_change = true;
- priv->ptp_priv = rcar_gen4_ptp_alloc(pdev);
- if (!priv->ptp_priv)
- return -ENOMEM;
-
platform_set_drvdata(pdev, priv);
priv->pdev = pdev;
priv->addr = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->addr))
return PTR_ERR(priv->addr);
- priv->ptp_priv->addr = priv->addr + RSWITCH_GPTP_OFFSET_S4;
+ priv->ptp_priv =
+ rcar_gen4_ptp_alloc(pdev, priv->addr + RSWITCH_GPTP_OFFSET_S4);
+ if (!priv->ptp_priv)
+ return -ENOMEM;
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
if (ret < 0) {
diff --git a/drivers/net/ethernet/renesas/rtsn.c b/drivers/net/ethernet/renesas/rtsn.c
index fdb1e7b7fb06..85052b47afb9 100644
--- a/drivers/net/ethernet/renesas/rtsn.c
+++ b/drivers/net/ethernet/renesas/rtsn.c
@@ -104,13 +104,6 @@ static void rtsn_ctrl_data_irq(struct rtsn_private *priv, bool enable)
}
}
-static void rtsn_get_timestamp(struct rtsn_private *priv, struct timespec64 *ts)
-{
- struct rcar_gen4_ptp_private *ptp_priv = priv->ptp_priv;
-
- ptp_priv->info.gettime64(&ptp_priv->info, ts);
-}
-
static int rtsn_tx_free(struct net_device *ndev, bool free_txed_only)
{
struct rtsn_private *priv = netdev_priv(ndev);
@@ -133,7 +126,7 @@ static int rtsn_tx_free(struct net_device *ndev, bool free_txed_only)
struct skb_shared_hwtstamps shhwtstamps;
struct timespec64 ts;
- rtsn_get_timestamp(priv, &ts);
+ rcar_gen4_ptp_gettime64(priv->ptp_priv, &ts);
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
skb_tstamp_tx(skb, &shhwtstamps);
@@ -1197,7 +1190,7 @@ static int rtsn_get_ts_info(struct net_device *ndev,
{
struct rtsn_private *priv = netdev_priv(ndev);
- info->phc_index = ptp_clock_index(priv->ptp_priv->clock);
+ info->phc_index = rcar_gen4_ptp_clock_index(priv->ptp_priv);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
@@ -1227,6 +1220,7 @@ static int rtsn_probe(struct platform_device *pdev)
{
struct rtsn_private *priv;
struct net_device *ndev;
+ void __iomem *ptpaddr;
struct resource *res;
int ret;
@@ -1239,12 +1233,6 @@ static int rtsn_probe(struct platform_device *pdev)
priv->pdev = pdev;
priv->ndev = ndev;
- priv->ptp_priv = rcar_gen4_ptp_alloc(pdev);
- if (!priv->ptp_priv) {
- ret = -ENOMEM;
- goto error_free;
- }
-
spin_lock_init(&priv->lock);
platform_set_drvdata(pdev, priv);
@@ -1288,9 +1276,15 @@ static int rtsn_probe(struct platform_device *pdev)
goto error_free;
}
- priv->ptp_priv->addr = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(priv->ptp_priv->addr)) {
- ret = PTR_ERR(priv->ptp_priv->addr);
+ ptpaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ptpaddr)) {
+ ret = PTR_ERR(ptpaddr);
+ goto error_free;
+ }
+
+ priv->ptp_priv = rcar_gen4_ptp_alloc(pdev, ptpaddr);
+ if (!priv->ptp_priv) {
+ ret = -ENOMEM;
goto error_free;
}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c
index 298a7402e39c..66e6de64626c 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c
@@ -25,7 +25,7 @@ static void sxgbe_mtl_init(void __iomem *ioaddr, unsigned int etsalg,
reg_val = readl(ioaddr + SXGBE_MTL_OP_MODE_REG);
reg_val &= ETS_RST;
- /* ETS Algorith */
+ /* ETS Algorithm */
switch (etsalg & SXGBE_MTL_OPMODE_ESTMASK) {
case ETS_WRR:
reg_val &= ETS_WRR;
diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.c b/drivers/net/ethernet/sfc/ef100_ethtool.c
index 6c3b74000d3b..05dc7b10c885 100644
--- a/drivers/net/ethernet/sfc/ef100_ethtool.c
+++ b/drivers/net/ethernet/sfc/ef100_ethtool.c
@@ -54,6 +54,7 @@ const struct ethtool_ops ef100_ethtool_ops = {
.get_ethtool_stats = efx_ethtool_get_stats,
.get_rxnfc = efx_ethtool_get_rxnfc,
.set_rxnfc = efx_ethtool_set_rxnfc,
+ .get_rx_ring_count = efx_ethtool_get_rx_ring_count,
.reset = efx_ethtool_reset,
.get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 18fe5850a978..362388754a29 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -261,6 +261,7 @@ const struct ethtool_ops efx_ethtool_ops = {
.reset = efx_ethtool_reset,
.get_rxnfc = efx_ethtool_get_rxnfc,
.set_rxnfc = efx_ethtool_set_rxnfc,
+ .get_rx_ring_count = efx_ethtool_get_rx_ring_count,
.get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
.get_rxfh_key_size = efx_ethtool_get_rxfh_key_size,
.rxfh_per_ctx_fields = true,
diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c
index fa303e171d98..2fc42b1a2bfb 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/ethtool_common.c
@@ -850,6 +850,13 @@ out_setdata_unlock:
return rc;
}
+u32 efx_ethtool_get_rx_ring_count(struct net_device *net_dev)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+
+ return efx->n_rx_channels;
+}
+
int efx_ethtool_get_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info, u32 *rule_locs)
{
@@ -858,10 +865,6 @@ int efx_ethtool_get_rxnfc(struct net_device *net_dev,
s32 rc = 0;
switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = efx->n_rx_channels;
- return 0;
-
case ETHTOOL_GRXCLSRLCNT:
info->data = efx_filter_get_rx_id_limit(efx);
if (info->data == 0)
diff --git a/drivers/net/ethernet/sfc/ethtool_common.h b/drivers/net/ethernet/sfc/ethtool_common.h
index 24db4fccbe78..f96db4253454 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.h
+++ b/drivers/net/ethernet/sfc/ethtool_common.h
@@ -40,6 +40,7 @@ int efx_ethtool_set_fecparam(struct net_device *net_dev,
struct ethtool_fecparam *fecparam);
int efx_ethtool_get_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info, u32 *rule_locs);
+u32 efx_ethtool_get_rx_ring_count(struct net_device *net_dev);
int efx_ethtool_set_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info);
u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev);
diff --git a/drivers/net/ethernet/sfc/falcon/ethtool.c b/drivers/net/ethernet/sfc/falcon/ethtool.c
index 27d1cd6f24ca..049364031545 100644
--- a/drivers/net/ethernet/sfc/falcon/ethtool.c
+++ b/drivers/net/ethernet/sfc/falcon/ethtool.c
@@ -974,6 +974,13 @@ ef4_ethtool_get_rxfh_fields(struct net_device *net_dev,
return 0;
}
+static u32 ef4_ethtool_get_rx_ring_count(struct net_device *net_dev)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+
+ return efx->n_rx_channels;
+}
+
static int
ef4_ethtool_get_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info, u32 *rule_locs)
@@ -981,10 +988,6 @@ ef4_ethtool_get_rxnfc(struct net_device *net_dev,
struct ef4_nic *efx = netdev_priv(net_dev);
switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = efx->n_rx_channels;
- return 0;
-
case ETHTOOL_GRXCLSRLCNT:
info->data = ef4_filter_get_rx_id_limit(efx);
if (info->data == 0)
@@ -1348,6 +1351,7 @@ const struct ethtool_ops ef4_ethtool_ops = {
.reset = ef4_ethtool_reset,
.get_rxnfc = ef4_ethtool_get_rxnfc,
.set_rxnfc = ef4_ethtool_set_rxnfc,
+ .get_rx_ring_count = ef4_ethtool_get_rx_ring_count,
.get_rxfh_indir_size = ef4_ethtool_get_rxfh_indir_size,
.get_rxfh = ef4_ethtool_get_rxfh,
.set_rxfh = ef4_ethtool_set_rxfh,
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 9fa5c4c713ab..ec3b2df43b68 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -156,9 +156,9 @@ enum {
* @tx_dpcpu_fw_id: Firmware ID of the TxDPCPU
* @must_probe_vswitching: Flag: vswitching has yet to be setup after MC reboot
* @pf_index: The number for this PF, or the parent PF if this is a VF
-#ifdef CONFIG_SFC_SRIOV
- * @vf: Pointer to VF data structure
-#endif
+ * @port_id: Ethernet address of owning PF, used for phys_port_id
+ * @vf_index: The number for this VF, or 0xFFFF if this is a VF
+ * @vf: for a PF, array of VF data structures indexed by VF's @vf_index
* @vport_mac: The MAC address on the vport, only for PFs; VFs will be zero
* @vlan_list: List of VLANs added over the interface. Serialised by vlan_lock.
* @vlan_lock: Lock to serialize access to vlan_list.
@@ -166,6 +166,7 @@ enum {
* @udp_tunnels_dirty: flag indicating a reboot occurred while pushing
* @udp_tunnels to hardware and thus the push must be re-done.
* @udp_tunnels_lock: Serialises writes to @udp_tunnels and @udp_tunnels_dirty.
+ * @licensed_features: Flags for licensed firmware features.
*/
struct efx_ef10_nic_data {
struct efx_buffer mcdi_buf;
diff --git a/drivers/net/ethernet/sfc/siena/ethtool.c b/drivers/net/ethernet/sfc/siena/ethtool.c
index 8c3ebd0617fb..36feedffe444 100644
--- a/drivers/net/ethernet/sfc/siena/ethtool.c
+++ b/drivers/net/ethernet/sfc/siena/ethtool.c
@@ -261,6 +261,7 @@ const struct ethtool_ops efx_siena_ethtool_ops = {
.reset = efx_siena_ethtool_reset,
.get_rxnfc = efx_siena_ethtool_get_rxnfc,
.set_rxnfc = efx_siena_ethtool_set_rxnfc,
+ .get_rx_ring_count = efx_siena_ethtool_get_rx_ring_count,
.get_rxfh_indir_size = efx_siena_ethtool_get_rxfh_indir_size,
.get_rxfh_key_size = efx_siena_ethtool_get_rxfh_key_size,
.get_rxfh = efx_siena_ethtool_get_rxfh,
diff --git a/drivers/net/ethernet/sfc/siena/ethtool_common.c b/drivers/net/ethernet/sfc/siena/ethtool_common.c
index 47cd16a113cf..c56e0b54d854 100644
--- a/drivers/net/ethernet/sfc/siena/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/siena/ethtool_common.c
@@ -841,6 +841,13 @@ out_setdata:
return 0;
}
+u32 efx_siena_ethtool_get_rx_ring_count(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ return efx->n_rx_channels;
+}
+
int efx_siena_ethtool_get_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info, u32 *rule_locs)
{
@@ -849,10 +856,6 @@ int efx_siena_ethtool_get_rxnfc(struct net_device *net_dev,
s32 rc = 0;
switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = efx->n_rx_channels;
- return 0;
-
case ETHTOOL_GRXCLSRLCNT:
info->data = efx_filter_get_rx_id_limit(efx);
if (info->data == 0)
diff --git a/drivers/net/ethernet/sfc/siena/ethtool_common.h b/drivers/net/ethernet/sfc/siena/ethtool_common.h
index 278d69e920d9..7b445b0ba38a 100644
--- a/drivers/net/ethernet/sfc/siena/ethtool_common.h
+++ b/drivers/net/ethernet/sfc/siena/ethtool_common.h
@@ -37,6 +37,7 @@ int efx_siena_ethtool_set_fecparam(struct net_device *net_dev,
struct ethtool_fecparam *fecparam);
int efx_siena_ethtool_get_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info, u32 *rule_locs);
+u32 efx_siena_ethtool_get_rx_ring_count(struct net_device *net_dev);
int efx_siena_ethtool_set_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info);
u32 efx_siena_ethtool_get_rxfh_indir_size(struct net_device *net_dev);
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index b461918dc5f4..d85ac8cbeb00 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -79,10 +79,6 @@
#include "sis900.h"
#define SIS900_MODULE_NAME "sis900"
-#define SIS900_DRV_VERSION "v1.08.10 Apr. 2 2006"
-
-static const char version[] =
- KERN_INFO "sis900.c: " SIS900_DRV_VERSION "\n";
static int max_interrupt_work = 40;
static int multicast_filter_limit = 128;
@@ -442,13 +438,6 @@ static int sis900_probe(struct pci_dev *pci_dev,
const char *card_name = card_names[pci_id->driver_data];
const char *dev_name = pci_name(pci_dev);
-/* when built into the kernel, we only print version if device is found */
-#ifndef MODULE
- static int printed_version;
- if (!printed_version++)
- printk(version);
-#endif
-
/* setup various bits in PCI command register */
ret = pcim_enable_device(pci_dev);
if(ret) return ret;
@@ -2029,7 +2018,6 @@ static void sis900_get_drvinfo(struct net_device *net_dev,
struct sis900_private *sis_priv = netdev_priv(net_dev);
strscpy(info->driver, SIS900_MODULE_NAME, sizeof(info->driver));
- strscpy(info->version, SIS900_DRV_VERSION, sizeof(info->version));
strscpy(info->bus_info, pci_name(sis_priv->pci_dev),
sizeof(info->bus_info));
}
@@ -2567,21 +2555,4 @@ static struct pci_driver sis900_pci_driver = {
.driver.pm = &sis900_pm_ops,
};
-static int __init sis900_init_module(void)
-{
-/* when a module, this is printed whether or not devices are found in probe */
-#ifdef MODULE
- printk(version);
-#endif
-
- return pci_register_driver(&sis900_pci_driver);
-}
-
-static void __exit sis900_cleanup_module(void)
-{
- pci_unregister_driver(&sis900_pci_driver);
-}
-
-module_init(sis900_init_module);
-module_exit(sis900_cleanup_module);
-
+module_pci_driver(sis900_pci_driver);
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 45f703fe0e5a..389659db06a8 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -26,8 +26,6 @@
*/
#define DRV_NAME "epic100"
-#define DRV_VERSION "2.1"
-#define DRV_RELDATE "Sept 11, 2006"
/* The user-configurable values.
These may be modified when a driver module is loaded.*/
@@ -89,12 +87,6 @@ static int rx_copybreak;
#include <linux/uaccess.h>
#include <asm/byteorder.h>
-/* These identify the driver base version and may not be removed. */
-static char version[] =
-DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>";
-static char version2[] =
-" (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")";
-
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
MODULE_LICENSE("GPL");
@@ -329,11 +321,6 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
void *ring_space;
dma_addr_t ring_dma;
-/* when built into the kernel, we only print version if device is found */
-#ifndef MODULE
- pr_info_once("%s%s\n", version, version2);
-#endif
-
card_idx++;
ret = pci_enable_device(pdev);
@@ -1393,7 +1380,6 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *
struct epic_private *np = netdev_priv(dev);
strscpy(info->driver, DRV_NAME, sizeof(info->driver));
- strscpy(info->version, DRV_VERSION, sizeof(info->version));
strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
@@ -1564,23 +1550,4 @@ static struct pci_driver epic_driver = {
.driver.pm = &epic_pm_ops,
};
-
-static int __init epic_init (void)
-{
-/* when a module, this is printed whether or not devices are found in probe */
-#ifdef MODULE
- pr_info("%s%s\n", version, version2);
-#endif
-
- return pci_register_driver(&epic_driver);
-}
-
-
-static void __exit epic_cleanup (void)
-{
- pci_unregister_driver (&epic_driver);
-}
-
-
-module_init(epic_init);
-module_exit(epic_cleanup);
+module_pci_driver(epic_driver);
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index 66b3549636f8..4700998c4837 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -586,10 +586,8 @@ static int ave_rxdesc_prepare(struct net_device *ndev, int entry)
skb = priv->rx.desc[entry].skbs;
if (!skb) {
skb = netdev_alloc_skb(ndev, AVE_MAX_ETHFRAME);
- if (!skb) {
- netdev_err(ndev, "can't allocate skb for Rx\n");
+ if (!skb)
return -ENOMEM;
- }
skb->data += AVE_FRAME_HEADROOM;
skb->tail += AVE_FRAME_HEADROOM;
}
diff --git a/drivers/net/ethernet/spacemit/k1_emac.c b/drivers/net/ethernet/spacemit/k1_emac.c
index b49c4708bf9e..dab0772c5b9d 100644
--- a/drivers/net/ethernet/spacemit/k1_emac.c
+++ b/drivers/net/ethernet/spacemit/k1_emac.c
@@ -47,8 +47,6 @@
#define EMAC_RX_FRAMES 64
#define EMAC_RX_COAL_TIMEOUT (600 * 312)
-#define DEFAULT_FC_PAUSE_TIME 0xffff
-#define DEFAULT_FC_FIFO_HIGH 1600
#define DEFAULT_TX_ALMOST_FULL 0x1f8
#define DEFAULT_TX_THRESHOLD 1518
#define DEFAULT_RX_THRESHOLD 12
@@ -133,9 +131,6 @@ struct emac_priv {
u32 tx_delay;
u32 rx_delay;
- bool flow_control_autoneg;
- u8 flow_control;
-
/* Softirq-safe, hold while touching hardware statistics */
spinlock_t stats_lock;
};
@@ -180,9 +175,7 @@ static void emac_set_mac_addr_reg(struct emac_priv *priv,
static void emac_set_mac_addr(struct emac_priv *priv, const unsigned char *addr)
{
- /* We use only one address, so set the same for flow control as well */
emac_set_mac_addr_reg(priv, addr, MAC_ADDRESS1_HIGH);
- emac_set_mac_addr_reg(priv, addr, MAC_FC_SOURCE_ADDRESS_HIGH);
}
static void emac_reset_hw(struct emac_priv *priv)
@@ -201,8 +194,6 @@ static void emac_reset_hw(struct emac_priv *priv)
static void emac_init_hw(struct emac_priv *priv)
{
- /* Destination address for 802.3x Ethernet flow control */
- u8 fc_dest_addr[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x01 };
u32 rxirq = 0, dma = 0, frame_sz;
regmap_set_bits(priv->regmap_apmu,
@@ -237,12 +228,6 @@ static void emac_init_hw(struct emac_priv *priv)
emac_wr(priv, MAC_TRANSMIT_JABBER_SIZE, frame_sz);
emac_wr(priv, MAC_RECEIVE_JABBER_SIZE, frame_sz);
- /* Configure flow control (enabled in emac_adjust_link() later) */
- emac_set_mac_addr_reg(priv, fc_dest_addr, MAC_FC_SOURCE_ADDRESS_HIGH);
- emac_wr(priv, MAC_FC_PAUSE_HIGH_THRESHOLD, DEFAULT_FC_FIFO_HIGH);
- emac_wr(priv, MAC_FC_HIGH_PAUSE_TIME, DEFAULT_FC_PAUSE_TIME);
- emac_wr(priv, MAC_FC_PAUSE_LOW_THRESHOLD, 0);
-
/* RX IRQ mitigation */
rxirq = FIELD_PREP(MREGBIT_RECEIVE_IRQ_FRAME_COUNTER_MASK,
EMAC_RX_FRAMES);
@@ -1027,57 +1012,6 @@ static int emac_mdio_init(struct emac_priv *priv)
return ret;
}
-static void emac_set_tx_fc(struct emac_priv *priv, bool enable)
-{
- u32 val;
-
- val = emac_rd(priv, MAC_FC_CONTROL);
-
- FIELD_MODIFY(MREGBIT_FC_GENERATION_ENABLE, &val, enable);
- FIELD_MODIFY(MREGBIT_AUTO_FC_GENERATION_ENABLE, &val, enable);
-
- emac_wr(priv, MAC_FC_CONTROL, val);
-}
-
-static void emac_set_rx_fc(struct emac_priv *priv, bool enable)
-{
- u32 val = emac_rd(priv, MAC_FC_CONTROL);
-
- FIELD_MODIFY(MREGBIT_FC_DECODE_ENABLE, &val, enable);
-
- emac_wr(priv, MAC_FC_CONTROL, val);
-}
-
-static void emac_set_fc(struct emac_priv *priv, u8 fc)
-{
- emac_set_tx_fc(priv, fc & FLOW_CTRL_TX);
- emac_set_rx_fc(priv, fc & FLOW_CTRL_RX);
- priv->flow_control = fc;
-}
-
-static void emac_set_fc_autoneg(struct emac_priv *priv)
-{
- struct phy_device *phydev = priv->ndev->phydev;
- u32 local_adv, remote_adv;
- u8 fc;
-
- local_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
-
- remote_adv = 0;
-
- if (phydev->pause)
- remote_adv |= LPA_PAUSE_CAP;
-
- if (phydev->asym_pause)
- remote_adv |= LPA_PAUSE_ASYM;
-
- fc = mii_resolve_flowctrl_fdx(local_adv, remote_adv);
-
- priv->flow_control_autoneg = true;
-
- emac_set_fc(priv, fc);
-}
-
/*
* Even though this MAC supports gigabit operation, it only provides 32-bit
* statistics counters. The most overflow-prone counters are the "bytes" ones,
@@ -1448,42 +1382,6 @@ static void emac_ethtool_get_regs(struct net_device *dev,
emac_rd(priv, MAC_GLOBAL_CONTROL + i * 4);
}
-static void emac_get_pauseparam(struct net_device *dev,
- struct ethtool_pauseparam *pause)
-{
- struct emac_priv *priv = netdev_priv(dev);
-
- pause->autoneg = priv->flow_control_autoneg;
- pause->tx_pause = !!(priv->flow_control & FLOW_CTRL_TX);
- pause->rx_pause = !!(priv->flow_control & FLOW_CTRL_RX);
-}
-
-static int emac_set_pauseparam(struct net_device *dev,
- struct ethtool_pauseparam *pause)
-{
- struct emac_priv *priv = netdev_priv(dev);
- u8 fc = 0;
-
- if (!netif_running(dev))
- return -ENETDOWN;
-
- priv->flow_control_autoneg = pause->autoneg;
-
- if (pause->autoneg) {
- emac_set_fc_autoneg(priv);
- } else {
- if (pause->tx_pause)
- fc |= FLOW_CTRL_TX;
-
- if (pause->rx_pause)
- fc |= FLOW_CTRL_RX;
-
- emac_set_fc(priv, fc);
- }
-
- return 0;
-}
-
static void emac_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
@@ -1658,8 +1556,6 @@ static void emac_adjust_link(struct net_device *dev)
emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
- emac_set_fc_autoneg(priv);
-
/*
* Reschedule stats updates now that link is up. See comments in
* emac_stats_update().
@@ -1744,12 +1640,12 @@ static int emac_phy_connect(struct net_device *ndev)
goto err_node_put;
}
- phy_support_asym_pause(phydev);
-
phydev->mac_managed_pm = true;
emac_update_delay_line(priv);
+ phy_attached_info(phydev);
+
err_node_put:
of_node_put(np);
return ret;
@@ -1915,9 +1811,6 @@ static const struct ethtool_ops emac_ethtool_ops = {
.get_sset_count = emac_get_sset_count,
.get_strings = emac_get_strings,
.get_ethtool_stats = emac_get_ethtool_stats,
-
- .get_pauseparam = emac_get_pauseparam,
- .set_pauseparam = emac_set_pauseparam,
};
static const struct net_device_ops emac_netdev_ops = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 907fe2e927f0..07088d03dbab 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -374,6 +374,15 @@ config DWMAC_LOONGSON
This selects the LOONGSON PCI bus support for the stmmac driver,
Support for ethernet controller on Loongson-2K1000 SoC and LS7A1000 bridge.
+config DWMAC_MOTORCOMM
+ tristate "Motorcomm PCI DWMAC support"
+ depends on PCI
+ select MOTORCOMM_PHY
+ select STMMAC_LIBPCI
+ help
+ This enables glue driver for Motorcomm DWMAC-based PCI Ethernet
+ controllers. Currently only YT6801 is supported.
+
config STMMAC_PCI
tristate "STMMAC PCI bus support"
depends on PCI
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 7bf528731034..c9263987ef8d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -48,4 +48,5 @@ obj-$(CONFIG_STMMAC_LIBPCI) += stmmac_libpci.o
obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o
obj-$(CONFIG_DWMAC_INTEL) += dwmac-intel.o
obj-$(CONFIG_DWMAC_LOONGSON) += dwmac-loongson.o
+obj-$(CONFIG_DWMAC_MOTORCOMM) += dwmac-motorcomm.o
stmmac-pci-objs:= stmmac_pci.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 49df46be3669..d26e8a063022 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -323,6 +323,10 @@ struct stmmac_safety_stats {
#define PHY_INTF_SEL_SMII 6
#define PHY_INTF_SEL_REVMII 7
+/* XGMAC uses a different encoding - from the AgileX5 documentation */
+#define PHY_INTF_GMII 0
+#define PHY_INTF_RGMII 1
+
/* MSI defines */
#define STMMAC_MSI_VEC_MAX 32
@@ -390,7 +394,6 @@ enum request_irq_err {
REQ_IRQ_ERR_SFTY,
REQ_IRQ_ERR_SFTY_UE,
REQ_IRQ_ERR_SFTY_CE,
- REQ_IRQ_ERR_LPI,
REQ_IRQ_ERR_WOL,
REQ_IRQ_ERR_MAC,
REQ_IRQ_ERR_NO,
@@ -512,6 +515,8 @@ struct dma_features {
unsigned int dbgmem;
/* Number of Policing Counters */
unsigned int pcsel;
+ /* Active PHY interface, PHY_INTF_SEL_xxx */
+ u8 actphyif;
};
/* RX Buffer size must be multiple of 4/8/16 bytes */
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h
index 49d6a866244f..e62e2ebcf273 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
@@ -32,13 +32,11 @@
#define RDES0_DESCRIPTOR_ERROR BIT(14)
#define RDES0_ERROR_SUMMARY BIT(15)
#define RDES0_FRAME_LEN_MASK GENMASK(29, 16)
-#define RDES0_FRAME_LEN_SHIFT 16
#define RDES0_DA_FILTER_FAIL BIT(30)
#define RDES0_OWN BIT(31)
/* RDES1 */
#define RDES1_BUFFER1_SIZE_MASK GENMASK(10, 0)
#define RDES1_BUFFER2_SIZE_MASK GENMASK(21, 11)
-#define RDES1_BUFFER2_SIZE_SHIFT 11
#define RDES1_SECOND_ADDRESS_CHAINED BIT(24)
#define RDES1_END_RING BIT(25)
#define RDES1_DISABLE_IC BIT(31)
@@ -53,7 +51,6 @@
#define ERDES1_SECOND_ADDRESS_CHAINED BIT(14)
#define ERDES1_END_RING BIT(15)
#define ERDES1_BUFFER2_SIZE_MASK GENMASK(28, 16)
-#define ERDES1_BUFFER2_SIZE_SHIFT 16
#define ERDES1_DISABLE_IC BIT(31)
/* Normal transmit descriptor defines */
@@ -77,14 +74,12 @@
/* TDES1 */
#define TDES1_BUFFER1_SIZE_MASK GENMASK(10, 0)
#define TDES1_BUFFER2_SIZE_MASK GENMASK(21, 11)
-#define TDES1_BUFFER2_SIZE_SHIFT 11
#define TDES1_TIME_STAMP_ENABLE BIT(22)
#define TDES1_DISABLE_PADDING BIT(23)
#define TDES1_SECOND_ADDRESS_CHAINED BIT(24)
#define TDES1_END_RING BIT(25)
#define TDES1_CRC_DISABLE BIT(26)
#define TDES1_CHECKSUM_INSERTION_MASK GENMASK(28, 27)
-#define TDES1_CHECKSUM_INSERTION_SHIFT 27
#define TDES1_FIRST_SEGMENT BIT(29)
#define TDES1_LAST_SEGMENT BIT(30)
#define TDES1_INTERRUPT BIT(31)
@@ -109,7 +104,6 @@
#define ETDES0_SECOND_ADDRESS_CHAINED BIT(20)
#define ETDES0_END_RING BIT(21)
#define ETDES0_CHECKSUM_INSERTION_MASK GENMASK(23, 22)
-#define ETDES0_CHECKSUM_INSERTION_SHIFT 22
#define ETDES0_TIME_STAMP_ENABLE BIT(25)
#define ETDES0_DISABLE_PADDING BIT(26)
#define ETDES0_CRC_DISABLE BIT(27)
@@ -120,7 +114,6 @@
/* TDES1 */
#define ETDES1_BUFFER1_SIZE_MASK GENMASK(12, 0)
#define ETDES1_BUFFER2_SIZE_MASK GENMASK(28, 16)
-#define ETDES1_BUFFER2_SIZE_SHIFT 16
/* Extended Receive descriptor definitions */
#define ERDES4_IP_PAYLOAD_TYPE_MASK GENMASK(6, 2)
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
index 40f7f2da9c5e..9d1a94a4fa49 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
@@ -23,9 +23,8 @@ static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end,
int bfsize)
{
if (bfsize == BUF_SIZE_16KiB)
- p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
- << ERDES1_BUFFER2_SIZE_SHIFT)
- & ERDES1_BUFFER2_SIZE_MASK);
+ p->des1 |= cpu_to_le32(FIELD_PREP(ERDES1_BUFFER2_SIZE_MASK,
+ BUF_SIZE_8KiB));
if (end)
p->des1 |= cpu_to_le32(ERDES1_END_RING);
@@ -39,15 +38,20 @@ static inline void enh_desc_end_tx_desc_on_ring(struct dma_desc *p, int end)
p->des0 &= cpu_to_le32(~ETDES0_END_RING);
}
+/* The maximum buffer 1 size is 8KiB - 1. However, we limit to 4KiB. */
static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
{
- if (unlikely(len > BUF_SIZE_4KiB)) {
- p->des1 |= cpu_to_le32((((len - BUF_SIZE_4KiB)
- << ETDES1_BUFFER2_SIZE_SHIFT)
- & ETDES1_BUFFER2_SIZE_MASK) | (BUF_SIZE_4KiB
- & ETDES1_BUFFER1_SIZE_MASK));
- } else
- p->des1 |= cpu_to_le32((len & ETDES1_BUFFER1_SIZE_MASK));
+ unsigned int buffer1_max_length = BUF_SIZE_4KiB;
+
+ if (unlikely(len > buffer1_max_length)) {
+ p->des1 |= cpu_to_le32(FIELD_PREP(ETDES1_BUFFER2_SIZE_MASK,
+ len - buffer1_max_length) |
+ FIELD_PREP(ETDES1_BUFFER1_SIZE_MASK,
+ buffer1_max_length));
+ } else {
+ p->des1 |= cpu_to_le32(FIELD_PREP(ETDES1_BUFFER1_SIZE_MASK,
+ len));
+ }
}
/* Normal descriptors */
@@ -57,8 +61,8 @@ static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end, int bfsize)
int bfsize2;
bfsize2 = min(bfsize - BUF_SIZE_2KiB + 1, BUF_SIZE_2KiB - 1);
- p->des1 |= cpu_to_le32((bfsize2 << RDES1_BUFFER2_SIZE_SHIFT)
- & RDES1_BUFFER2_SIZE_MASK);
+ p->des1 |= cpu_to_le32(FIELD_PREP(RDES1_BUFFER2_SIZE_MASK,
+ bfsize2));
}
if (end)
@@ -73,16 +77,20 @@ static inline void ndesc_end_tx_desc_on_ring(struct dma_desc *p, int end)
p->des1 &= cpu_to_le32(~TDES1_END_RING);
}
+/* The maximum buffer 1 size is 2KiB - 1, limited by the mask width */
static inline void norm_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
{
- if (unlikely(len > BUF_SIZE_2KiB)) {
- unsigned int buffer1 = (BUF_SIZE_2KiB - 1)
- & TDES1_BUFFER1_SIZE_MASK;
- p->des1 |= cpu_to_le32((((len - buffer1)
- << TDES1_BUFFER2_SIZE_SHIFT)
- & TDES1_BUFFER2_SIZE_MASK) | buffer1);
- } else
- p->des1 |= cpu_to_le32((len & TDES1_BUFFER1_SIZE_MASK));
+ unsigned int buffer1_max_length = BUF_SIZE_2KiB - 1;
+
+ if (unlikely(len > buffer1_max_length)) {
+ p->des1 |= cpu_to_le32(FIELD_PREP(TDES1_BUFFER2_SIZE_MASK,
+ len - buffer1_max_length) |
+ FIELD_PREP(TDES1_BUFFER1_SIZE_MASK,
+ buffer1_max_length));
+ } else {
+ p->des1 |= cpu_to_le32(FIELD_PREP(TDES1_BUFFER1_SIZE_MASK,
+ len));
+ }
}
/* Specific functions used for Chain mode */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index db288fbd5a4d..c4e85197629d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -28,11 +28,11 @@
#define GPR_ENET_QOS_CLK_TX_CLK_SEL (0x1 << 20)
#define GPR_ENET_QOS_RGMII_EN (0x1 << 21)
-#define MX93_GPR_ENET_QOS_INTF_MODE_MASK GENMASK(3, 0)
#define MX93_GPR_ENET_QOS_INTF_SEL_MASK GENMASK(3, 1)
-#define MX93_GPR_ENET_QOS_CLK_GEN_EN (0x1 << 0)
-#define MX93_GPR_ENET_QOS_CLK_SEL_MASK BIT_MASK(0)
-#define MX93_GPR_CLK_SEL_OFFSET (4)
+#define MX93_GPR_ENET_QOS_ENABLE BIT(0)
+
+#define MX93_ENET_CLK_SEL_OFFSET (4)
+#define MX93_ENET_QOS_CLK_TX_SEL_MASK BIT_MASK(0)
#define DMA_BUS_MODE 0x00001000
#define DMA_BUS_MODE_SFT_RESET (0x1 << 0)
@@ -46,7 +46,7 @@ struct imx_dwmac_ops {
u32 flags;
bool mac_rgmii_txclk_auto_adj;
- int (*fix_soc_reset)(struct stmmac_priv *priv, void __iomem *ioaddr);
+ int (*fix_soc_reset)(struct stmmac_priv *priv);
int (*set_intf_mode)(struct imx_priv_data *dwmac, u8 phy_intf_sel);
void (*fix_mac_speed)(void *priv, int speed, unsigned int mode);
};
@@ -95,17 +95,18 @@ static int imx93_set_intf_mode(struct imx_priv_data *dwmac, u8 phy_intf_sel)
if (phy_intf_sel == PHY_INTF_SEL_RMII && dwmac->rmii_refclk_ext) {
ret = regmap_clear_bits(dwmac->intf_regmap,
dwmac->intf_reg_off +
- MX93_GPR_CLK_SEL_OFFSET,
- MX93_GPR_ENET_QOS_CLK_SEL_MASK);
+ MX93_ENET_CLK_SEL_OFFSET,
+ MX93_ENET_QOS_CLK_TX_SEL_MASK);
if (ret)
return ret;
}
val = FIELD_PREP(MX93_GPR_ENET_QOS_INTF_SEL_MASK, phy_intf_sel) |
- MX93_GPR_ENET_QOS_CLK_GEN_EN;
+ MX93_GPR_ENET_QOS_ENABLE;
return regmap_update_bits(dwmac->intf_regmap, dwmac->intf_reg_off,
- MX93_GPR_ENET_QOS_INTF_MODE_MASK, val);
+ MX93_GPR_ENET_QOS_INTF_SEL_MASK |
+ MX93_GPR_ENET_QOS_ENABLE, val);
};
static int imx_dwmac_clks_config(void *priv, bool enabled)
@@ -205,7 +206,8 @@ static void imx93_dwmac_fix_speed(void *priv, int speed, unsigned int mode)
old_ctrl = readl(dwmac->base_addr + MAC_CTRL_REG);
ctrl = old_ctrl & ~CTRL_SPEED_MASK;
regmap_update_bits(dwmac->intf_regmap, dwmac->intf_reg_off,
- MX93_GPR_ENET_QOS_INTF_MODE_MASK, 0);
+ MX93_GPR_ENET_QOS_INTF_SEL_MASK |
+ MX93_GPR_ENET_QOS_ENABLE, 0);
writel(ctrl, dwmac->base_addr + MAC_CTRL_REG);
/* Ensure the settings for CTRL are applied. */
@@ -213,19 +215,22 @@ static void imx93_dwmac_fix_speed(void *priv, int speed, unsigned int mode)
usleep_range(10, 20);
iface &= MX93_GPR_ENET_QOS_INTF_SEL_MASK;
- iface |= MX93_GPR_ENET_QOS_CLK_GEN_EN;
+ iface |= MX93_GPR_ENET_QOS_ENABLE;
regmap_update_bits(dwmac->intf_regmap, dwmac->intf_reg_off,
- MX93_GPR_ENET_QOS_INTF_MODE_MASK, iface);
+ MX93_GPR_ENET_QOS_INTF_SEL_MASK |
+ MX93_GPR_ENET_QOS_ENABLE, iface);
writel(old_ctrl, dwmac->base_addr + MAC_CTRL_REG);
}
-static int imx_dwmac_mx93_reset(struct stmmac_priv *priv, void __iomem *ioaddr)
+static int imx_dwmac_mx93_reset(struct stmmac_priv *priv)
{
struct plat_stmmacenet_data *plat_dat = priv->plat;
- u32 value = readl(ioaddr + DMA_BUS_MODE);
+ void __iomem *ioaddr = priv->ioaddr;
+ u32 value;
/* DMA SW reset */
+ value = readl(ioaddr + DMA_BUS_MODE);
value |= DMA_BUS_MODE_SFT_RESET;
writel(value, ioaddr + DMA_BUS_MODE);
@@ -268,9 +273,9 @@ imx_dwmac_parse_dt(struct imx_priv_data *dwmac, struct device *dev)
if (of_machine_is_compatible("fsl,imx8mp") ||
of_machine_is_compatible("fsl,imx91") ||
of_machine_is_compatible("fsl,imx93")) {
- /* Binding doc describes the propety:
+ /* Binding doc describes the property:
* is required by i.MX8MP, i.MX91, i.MX93.
- * is optinoal for i.MX8DXL.
+ * is optional for i.MX8DXL.
*/
dwmac->intf_regmap =
syscon_regmap_lookup_by_phandle_args(np, "intf_mode", 1,
@@ -320,6 +325,9 @@ static int imx_dwmac_probe(struct platform_device *pdev)
if (data->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
plat_dat->flags |= STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY;
+ if (data->flags & STMMAC_FLAG_KEEP_PREAMBLE_BEFORE_SFD)
+ plat_dat->flags |= STMMAC_FLAG_KEEP_PREAMBLE_BEFORE_SFD;
+
/* Default TX Q0 to use TSO and rest TXQ for TBS */
for (int i = 1; i < plat_dat->tx_queues_to_use; i++)
plat_dat->tx_queues_cfg[i].tbs_en = 1;
@@ -355,7 +363,8 @@ static struct imx_dwmac_ops imx8mp_dwmac_data = {
.addr_width = 34,
.mac_rgmii_txclk_auto_adj = false,
.set_intf_mode = imx8mp_set_intf_mode,
- .flags = STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY,
+ .flags = STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY |
+ STMMAC_FLAG_KEEP_PREAMBLE_BEFORE_SFD,
};
static struct imx_dwmac_ops imx8dxl_dwmac_data = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index aad1be1ec4c1..92d77b0c2f54 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -719,7 +719,6 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
/* Setup MSI vector offset specific to Intel mGbE controller */
plat->msi_mac_vec = 29;
- plat->msi_lpi_vec = 28;
plat->msi_sfty_ce_vec = 27;
plat->msi_sfty_ue_vec = 26;
plat->msi_rx_base_vec = 0;
@@ -1177,8 +1176,6 @@ static int stmmac_config_multi_msi(struct pci_dev *pdev,
res->irq = pci_irq_vector(pdev, plat->msi_mac_vec);
if (plat->msi_wol_vec < STMMAC_MSI_VEC_MAX)
res->wol_irq = pci_irq_vector(pdev, plat->msi_wol_vec);
- if (plat->msi_lpi_vec < STMMAC_MSI_VEC_MAX)
- res->lpi_irq = pci_irq_vector(pdev, plat->msi_lpi_vec);
if (plat->msi_sfty_ce_vec < STMMAC_MSI_VEC_MAX)
res->sfty_ce_irq = pci_irq_vector(pdev, plat->msi_sfty_ce_vec);
if (plat->msi_sfty_ue_vec < STMMAC_MSI_VEC_MAX)
@@ -1294,7 +1291,6 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
*/
plat->msi_mac_vec = STMMAC_MSI_VEC_MAX;
plat->msi_wol_vec = STMMAC_MSI_VEC_MAX;
- plat->msi_lpi_vec = STMMAC_MSI_VEC_MAX;
plat->msi_sfty_ce_vec = STMMAC_MSI_VEC_MAX;
plat->msi_sfty_ue_vec = STMMAC_MSI_VEC_MAX;
plat->msi_rx_base_vec = STMMAC_MSI_VEC_MAX;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
index 107a7c84ace8..815213223583 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -91,8 +91,8 @@ static void loongson_default_data(struct pci_dev *pdev,
/* Get bus_id, this can be overwritten later */
plat->bus_id = pci_dev_id(pdev);
- /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
- plat->clk_csr = STMMAC_CSR_20_35M;
+ /* clk_csr_i = 100-150MHz & MDC = clk_csr_i/62 */
+ plat->clk_csr = STMMAC_CSR_100_150M;
plat->core_type = DWMAC_CORE_GMAC;
plat->force_sf_dma_mode = 1;
@@ -192,9 +192,8 @@ static void loongson_dwmac_dma_init_channel(struct stmmac_priv *priv,
value |= DMA_BUS_MODE_MAXPBL;
value |= DMA_BUS_MODE_USP;
- value &= ~(DMA_BUS_MODE_PBL_MASK | DMA_BUS_MODE_RPBL_MASK);
- value |= (txpbl << DMA_BUS_MODE_PBL_SHIFT);
- value |= (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
+ value = u32_replace_bits(value, txpbl, DMA_BUS_MODE_PBL_MASK);
+ value = u32_replace_bits(value, rxpbl, DMA_BUS_MODE_RPBL_MASK);
/* Set the Fixed burst mode */
if (dma_cfg->fixed_burst)
@@ -443,13 +442,6 @@ static int loongson_dwmac_dt_config(struct pci_dev *pdev,
res->wol_irq = res->irq;
}
- res->lpi_irq = of_irq_get_byname(np, "eth_lpi");
- if (res->lpi_irq < 0) {
- dev_err(&pdev->dev, "IRQ eth_lpi not found\n");
- ret = -ENODEV;
- goto err_put_node;
- }
-
ret = device_get_phy_mode(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "phy_mode not found\n");
@@ -486,10 +478,12 @@ static int loongson_dwmac_acpi_config(struct pci_dev *pdev,
}
/* Loongson's DWMAC device may take nearly two seconds to complete DMA reset */
-static int loongson_dwmac_fix_reset(struct stmmac_priv *priv, void __iomem *ioaddr)
+static int loongson_dwmac_fix_reset(struct stmmac_priv *priv)
{
- u32 value = readl(ioaddr + DMA_BUS_MODE);
+ void __iomem *ioaddr = priv->ioaddr;
+ u32 value;
+ value = readl(ioaddr + DMA_BUS_MODE);
if (value & DMA_BUS_MODE_SFT_RESET) {
netdev_err(priv->dev, "the PHY clock is missing\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-motorcomm.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-motorcomm.c
new file mode 100644
index 000000000000..8b45b9cf7202
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-motorcomm.c
@@ -0,0 +1,384 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * DWMAC glue driver for Motorcomm PCI Ethernet controllers
+ *
+ * Copyright (c) 2025-2026 Yao Zi <me@ziyao.cc>
+ */
+
+#include <linux/bits.h>
+#include <linux/dev_printk.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/stmmac.h>
+
+#include "dwmac4.h"
+#include "stmmac.h"
+#include "stmmac_libpci.h"
+
+#define DRIVER_NAME "dwmac-motorcomm"
+
+#define PCI_VENDOR_ID_MOTORCOMM 0x1f0a
+
+/* Register definition */
+#define EPHY_CTRL 0x1004
+/* Clearing this bit asserts resets for internal MDIO bus and PHY */
+#define EPHY_MDIO_PHY_RESET BIT(0)
+#define OOB_WOL_CTRL 0x1010
+#define OOB_WOL_CTRL_DIS BIT(0)
+#define MGMT_INT_CTRL0 0x1100
+#define INT_MODERATION 0x1108
+#define INT_MODERATION_RX GENMASK(11, 0)
+#define INT_MODERATION_TX GENMASK(27, 16)
+#define EFUSE_OP_CTRL_0 0x1500
+#define EFUSE_OP_MODE GENMASK(1, 0)
+#define EFUSE_OP_ROW_READ 0x1
+#define EFUSE_OP_START BIT(2)
+#define EFUSE_OP_ADDR GENMASK(15, 8)
+#define EFUSE_OP_CTRL_1 0x1504
+#define EFUSE_OP_DONE BIT(1)
+#define EFUSE_OP_RD_DATA GENMASK(31, 24)
+#define SYS_RESET 0x152c
+#define SYS_RESET_RESET BIT(31)
+#define GMAC_OFFSET 0x2000
+
+/* Constants */
+#define EFUSE_READ_TIMEOUT_US 20000
+#define EFUSE_PATCH_REGION_OFFSET 18
+#define EFUSE_PATCH_MAX_NUM 39
+#define EFUSE_ADDR_MACA0LR 0x1520
+#define EFUSE_ADDR_MACA0HR 0x1524
+
+struct motorcomm_efuse_patch {
+ __le16 addr;
+ __le32 data;
+} __packed;
+
+struct dwmac_motorcomm_priv {
+ void __iomem *base;
+};
+
+static int motorcomm_efuse_read_byte(struct dwmac_motorcomm_priv *priv,
+ u8 offset, u8 *byte)
+{
+ u32 reg;
+ int ret;
+
+ writel(FIELD_PREP(EFUSE_OP_MODE, EFUSE_OP_ROW_READ) |
+ FIELD_PREP(EFUSE_OP_ADDR, offset) |
+ EFUSE_OP_START, priv->base + EFUSE_OP_CTRL_0);
+
+ ret = readl_poll_timeout(priv->base + EFUSE_OP_CTRL_1,
+ reg, reg & EFUSE_OP_DONE, 2000,
+ EFUSE_READ_TIMEOUT_US);
+
+ *byte = FIELD_GET(EFUSE_OP_RD_DATA, reg);
+
+ return ret;
+}
+
+static int motorcomm_efuse_read_patch(struct dwmac_motorcomm_priv *priv,
+ u8 index,
+ struct motorcomm_efuse_patch *patch)
+{
+ u8 *p = (u8 *)patch, offset;
+ int i, ret;
+
+ for (i = 0; i < sizeof(*patch); i++) {
+ offset = EFUSE_PATCH_REGION_OFFSET + sizeof(*patch) * index + i;
+
+ ret = motorcomm_efuse_read_byte(priv, offset, &p[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int motorcomm_efuse_get_patch_value(struct dwmac_motorcomm_priv *priv,
+ u16 addr, u32 *value)
+{
+ struct motorcomm_efuse_patch patch;
+ int i, ret;
+
+ for (i = 0; i < EFUSE_PATCH_MAX_NUM; i++) {
+ ret = motorcomm_efuse_read_patch(priv, i, &patch);
+ if (ret)
+ return ret;
+
+ if (patch.addr == 0) {
+ return -ENOENT;
+ } else if (le16_to_cpu(patch.addr) == addr) {
+ *value = le32_to_cpu(patch.data);
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+static int motorcomm_efuse_read_mac(struct device *dev,
+ struct dwmac_motorcomm_priv *priv, u8 *mac)
+{
+ u32 maca0lr, maca0hr;
+ int ret;
+
+ ret = motorcomm_efuse_get_patch_value(priv, EFUSE_ADDR_MACA0LR,
+ &maca0lr);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to read maca0lr from eFuse\n");
+
+ ret = motorcomm_efuse_get_patch_value(priv, EFUSE_ADDR_MACA0HR,
+ &maca0hr);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to read maca0hr from eFuse\n");
+
+ mac[0] = FIELD_GET(GENMASK(15, 8), maca0hr);
+ mac[1] = FIELD_GET(GENMASK(7, 0), maca0hr);
+ mac[2] = FIELD_GET(GENMASK(31, 24), maca0lr);
+ mac[3] = FIELD_GET(GENMASK(23, 16), maca0lr);
+ mac[4] = FIELD_GET(GENMASK(15, 8), maca0lr);
+ mac[5] = FIELD_GET(GENMASK(7, 0), maca0lr);
+
+ return 0;
+}
+
+static void motorcomm_deassert_mdio_phy_reset(struct dwmac_motorcomm_priv *priv)
+{
+ u32 reg = readl(priv->base + EPHY_CTRL);
+
+ reg |= EPHY_MDIO_PHY_RESET;
+
+ writel(reg, priv->base + EPHY_CTRL);
+}
+
+static void motorcomm_reset(struct dwmac_motorcomm_priv *priv)
+{
+ u32 reg = readl(priv->base + SYS_RESET);
+
+ reg &= ~SYS_RESET_RESET;
+ writel(reg, priv->base + SYS_RESET);
+
+ reg |= SYS_RESET_RESET;
+ writel(reg, priv->base + SYS_RESET);
+
+ motorcomm_deassert_mdio_phy_reset(priv);
+}
+
+static void motorcomm_init(struct dwmac_motorcomm_priv *priv)
+{
+ writel(0x0, priv->base + MGMT_INT_CTRL0);
+
+ writel(FIELD_PREP(INT_MODERATION_RX, 200) |
+ FIELD_PREP(INT_MODERATION_TX, 200),
+ priv->base + INT_MODERATION);
+
+ /*
+ * OOB WOL must be disabled during normal operation, or DMA interrupts
+ * cannot be delivered to the host.
+ */
+ writel(OOB_WOL_CTRL_DIS, priv->base + OOB_WOL_CTRL);
+}
+
+static int motorcomm_resume(struct device *dev, void *bsp_priv)
+{
+ struct dwmac_motorcomm_priv *priv = bsp_priv;
+ int ret;
+
+ ret = stmmac_pci_plat_resume(dev, bsp_priv);
+ if (ret)
+ return ret;
+
+ /*
+ * When recovering from D3hot, EPHY_MDIO_PHY_RESET is automatically
+ * asserted, and must be deasserted for normal operation.
+ */
+ motorcomm_deassert_mdio_phy_reset(priv);
+ motorcomm_init(priv);
+
+ return 0;
+}
+
+static struct plat_stmmacenet_data *
+motorcomm_default_plat_data(struct pci_dev *pdev)
+{
+ struct plat_stmmacenet_data *plat;
+ struct device *dev = &pdev->dev;
+
+ plat = stmmac_plat_dat_alloc(dev);
+ if (!plat)
+ return NULL;
+
+ plat->mdio_bus_data = devm_kzalloc(dev, sizeof(*plat->mdio_bus_data),
+ GFP_KERNEL);
+ if (!plat->mdio_bus_data)
+ return NULL;
+
+ plat->dma_cfg = devm_kzalloc(dev, sizeof(*plat->dma_cfg), GFP_KERNEL);
+ if (!plat->dma_cfg)
+ return NULL;
+
+ plat->axi = devm_kzalloc(dev, sizeof(*plat->axi), GFP_KERNEL);
+ if (!plat->axi)
+ return NULL;
+
+ plat->dma_cfg->pbl = DEFAULT_DMA_PBL;
+ plat->dma_cfg->pblx8 = true;
+ plat->dma_cfg->txpbl = 32;
+ plat->dma_cfg->rxpbl = 32;
+ plat->dma_cfg->eame = true;
+ plat->dma_cfg->mixed_burst = true;
+
+ plat->axi->axi_wr_osr_lmt = 1;
+ plat->axi->axi_rd_osr_lmt = 1;
+ plat->axi->axi_mb = true;
+ plat->axi->axi_blen_regval = DMA_AXI_BLEN4 | DMA_AXI_BLEN8 |
+ DMA_AXI_BLEN16 | DMA_AXI_BLEN32;
+
+ plat->bus_id = pci_dev_id(pdev);
+ plat->phy_interface = PHY_INTERFACE_MODE_GMII;
+ /*
+ * YT6801 requires an 25MHz clock input/oscillator to function, which
+ * is likely the source of CSR clock.
+ */
+ plat->clk_csr = STMMAC_CSR_20_35M;
+ plat->tx_coe = 1;
+ plat->rx_coe = 1;
+ plat->clk_ref_rate = 125000000;
+ plat->core_type = DWMAC_CORE_GMAC4;
+ plat->suspend = stmmac_pci_plat_suspend;
+ plat->resume = motorcomm_resume;
+ plat->flags = STMMAC_FLAG_TSO_EN |
+ STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP;
+
+ return plat;
+}
+
+static void motorcomm_free_irq(void *data)
+{
+ struct pci_dev *pdev = data;
+
+ pci_free_irq_vectors(pdev);
+}
+
+static int motorcomm_setup_irq(struct pci_dev *pdev,
+ struct stmmac_resources *res,
+ struct plat_stmmacenet_data *plat)
+{
+ int ret;
+
+ ret = pci_alloc_irq_vectors(pdev, 6, 6, PCI_IRQ_MSIX);
+ if (ret > 0) {
+ res->rx_irq[0] = pci_irq_vector(pdev, 0);
+ res->tx_irq[0] = pci_irq_vector(pdev, 4);
+ res->irq = pci_irq_vector(pdev, 5);
+
+ plat->flags |= STMMAC_FLAG_MULTI_MSI_EN;
+ } else {
+ dev_info(&pdev->dev, "failed to allocate MSI-X vector: %d\n",
+ ret);
+ dev_info(&pdev->dev, "try MSI instead\n");
+
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to allocate MSI\n");
+
+ res->irq = pci_irq_vector(pdev, 0);
+ }
+
+ return devm_add_action_or_reset(&pdev->dev, motorcomm_free_irq, pdev);
+}
+
+static int motorcomm_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct plat_stmmacenet_data *plat;
+ struct dwmac_motorcomm_priv *priv;
+ struct stmmac_resources res = {};
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ plat = motorcomm_default_plat_data(pdev);
+ if (!plat)
+ return -ENOMEM;
+
+ plat->bsp_priv = priv;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to enable device\n");
+
+ priv->base = pcim_iomap_region(pdev, 0, DRIVER_NAME);
+ if (IS_ERR(priv->base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->base),
+ "failed to map IO region\n");
+
+ pci_set_master(pdev);
+
+ /*
+ * Some PCIe addons cards based on YT6801 don't deliver MSI(X) with ASPM
+ * enabled. Sadly there isn't a reliable way to read out OEM of the
+ * card, so let's disable L1 state unconditionally for safety.
+ */
+ ret = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1);
+ if (ret)
+ dev_warn(&pdev->dev, "failed to disable L1 state: %d\n", ret);
+
+ motorcomm_reset(priv);
+
+ ret = motorcomm_efuse_read_mac(&pdev->dev, priv, res.mac);
+ if (ret == -ENOENT) {
+ dev_warn(&pdev->dev, "eFuse contains no valid MAC address\n");
+ dev_warn(&pdev->dev, "fallback to random MAC address\n");
+
+ eth_random_addr(res.mac);
+ } else if (ret) {
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to read MAC address from eFuse\n");
+ }
+
+ ret = motorcomm_setup_irq(pdev, &res, plat);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "failed to setup IRQ\n");
+
+ motorcomm_init(priv);
+
+ res.addr = priv->base + GMAC_OFFSET;
+
+ return stmmac_dvr_probe(&pdev->dev, plat, &res);
+}
+
+static void motorcomm_remove(struct pci_dev *pdev)
+{
+ stmmac_dvr_remove(&pdev->dev);
+}
+
+static const struct pci_device_id dwmac_motorcomm_pci_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_MOTORCOMM, 0x6801) },
+ { },
+};
+MODULE_DEVICE_TABLE(pci, dwmac_motorcomm_pci_id_table);
+
+static struct pci_driver dwmac_motorcomm_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = dwmac_motorcomm_pci_id_table,
+ .probe = motorcomm_probe,
+ .remove = motorcomm_remove,
+ .driver = {
+ .pm = &stmmac_simple_pm_ops,
+ },
+};
+
+module_pci_driver(dwmac_motorcomm_pci_driver);
+
+MODULE_DESCRIPTION("DWMAC glue driver for Motorcomm PCI Ethernet controllers");
+MODULE_AUTHOR("Yao Zi <me@ziyao.cc>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 0826a7bd32ff..af8204c0e188 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -100,7 +100,6 @@ struct ethqos_emac_driver_data {
struct qcom_ethqos {
struct platform_device *pdev;
void __iomem *rgmii_base;
- void __iomem *mac_base;
int (*configure_func)(struct qcom_ethqos *ethqos, int speed);
unsigned int link_clk_rate;
@@ -660,10 +659,18 @@ static int qcom_ethqos_serdes_powerup(struct net_device *ndev, void *priv)
return ret;
ret = phy_power_on(ethqos->serdes_phy);
- if (ret)
+ if (ret) {
+ phy_exit(ethqos->serdes_phy);
return ret;
+ }
+
+ ret = phy_set_speed(ethqos->serdes_phy, ethqos->serdes_speed);
+ if (ret) {
+ phy_power_off(ethqos->serdes_phy);
+ phy_exit(ethqos->serdes_phy);
+ }
- return phy_set_speed(ethqos->serdes_phy, ethqos->serdes_speed);
+ return ret;
}
static void qcom_ethqos_serdes_powerdown(struct net_device *ndev, void *priv)
@@ -772,8 +779,6 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(ethqos->rgmii_base),
"Failed to map rgmii resource\n");
- ethqos->mac_base = stmmac_res.addr;
-
data = of_device_get_match_data(dev);
ethqos->por = data->por;
ethqos->num_por = data->num_por;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
index be7f5eb2cdcf..19f34e18bfef 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
@@ -214,6 +214,7 @@ static const struct renesas_gbeth_of_data renesas_gmac_of_data = {
};
static const struct of_device_id renesas_gbeth_match[] = {
+ { .compatible = "renesas,r9a08g046-gbeth", .data = &renesas_gbeth_of_data },
{ .compatible = "renesas,r9a09g077-gbeth", .data = &renesas_gmac_of_data },
{ .compatible = "renesas,rzv2h-gbeth", .data = &renesas_gbeth_of_data },
{ /* Sentinel */ }
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 0a95f54e725e..b0441a368cb1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -26,24 +26,43 @@
struct rk_priv_data;
-struct rk_reg_speed_data {
- unsigned int rgmii_10;
- unsigned int rgmii_100;
- unsigned int rgmii_1000;
- unsigned int rmii_10;
- unsigned int rmii_100;
+struct rk_clock_fields {
+ /* io_clksel_cru_mask - io_clksel bit in clock GRF register which,
+ * when set, selects the tx clock from CRU.
+ */
+ u16 io_clksel_cru_mask;
+ /* io_clksel_io_mask - io_clksel bit in clock GRF register which,
+ * when set, selects the tx clock from IO.
+ */
+ u16 io_clksel_io_mask;
+ u16 gmii_clk_sel_mask;
+ u16 rmii_clk_sel_mask;
+ u16 rmii_gate_en_mask;
+ u16 rmii_mode_mask;
+ u16 mac_speed_mask;
};
struct rk_gmac_ops {
+ int (*init)(struct rk_priv_data *bsp_priv);
void (*set_to_rgmii)(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay);
void (*set_to_rmii)(struct rk_priv_data *bsp_priv);
int (*set_speed)(struct rk_priv_data *bsp_priv,
phy_interface_t interface, int speed);
- void (*set_clock_selection)(struct rk_priv_data *bsp_priv, bool input,
- bool enable);
void (*integrated_phy_powerup)(struct rk_priv_data *bsp_priv);
void (*integrated_phy_powerdown)(struct rk_priv_data *bsp_priv);
+
+ u16 gmac_grf_reg;
+ u16 gmac_phy_intf_sel_mask;
+ u16 gmac_rmii_mode_mask;
+
+ u16 clock_grf_reg;
+ struct rk_clock_fields clock;
+
+ bool gmac_grf_reg_in_php;
+ bool clock_grf_reg_in_php;
+ bool supports_rgmii;
+ bool supports_rmii;
bool php_grf_required;
bool regs_valid;
u32 regs[];
@@ -77,6 +96,8 @@ struct rk_priv_data {
bool clk_enabled;
bool clock_input;
bool integrated_phy;
+ bool supports_rgmii;
+ bool supports_rmii;
struct clk_bulk_data *clks;
int num_clks;
@@ -89,51 +110,121 @@ struct rk_priv_data {
struct regmap *grf;
struct regmap *php_grf;
+
+ u16 gmac_grf_reg;
+ u16 gmac_phy_intf_sel_mask;
+ u16 gmac_rmii_mode_mask;
+
+ u16 clock_grf_reg;
+ struct rk_clock_fields clock;
};
-static int rk_set_reg_speed(struct rk_priv_data *bsp_priv,
- const struct rk_reg_speed_data *rsd,
- unsigned int reg, phy_interface_t interface,
- int speed)
+#define GMAC_CLK_DIV1_125M 0
+#define GMAC_CLK_DIV50_2_5M 2
+#define GMAC_CLK_DIV5_25M 3
+
+static int rk_gmac_rgmii_clk_div(int speed)
{
- unsigned int val;
-
- if (phy_interface_mode_is_rgmii(interface)) {
- if (speed == SPEED_10) {
- val = rsd->rgmii_10;
- } else if (speed == SPEED_100) {
- val = rsd->rgmii_100;
- } else if (speed == SPEED_1000) {
- val = rsd->rgmii_1000;
- } else {
- /* Phylink will not allow inappropriate speeds for
- * interface modes, so this should never happen.
- */
- return -EINVAL;
- }
- } else if (interface == PHY_INTERFACE_MODE_RMII) {
- if (speed == SPEED_10) {
- val = rsd->rmii_10;
- } else if (speed == SPEED_100) {
- val = rsd->rmii_100;
- } else {
- /* Phylink will not allow inappropriate speeds for
- * interface modes, so this should never happen.
- */
- return -EINVAL;
- }
- } else {
- /* This should never happen, as .get_interfaces() limits
- * the interface modes that are supported to RGMII and/or
- * RMII.
- */
- return -EINVAL;
- }
+ if (speed == SPEED_10)
+ return GMAC_CLK_DIV50_2_5M;
+ if (speed == SPEED_100)
+ return GMAC_CLK_DIV5_25M;
+ if (speed == SPEED_1000)
+ return GMAC_CLK_DIV1_125M;
+ return -EINVAL;
+}
- regmap_write(bsp_priv->grf, reg, val);
+static int rk_get_phy_intf_sel(phy_interface_t interface)
+{
+ int ret = stmmac_get_phy_intf_sel(interface);
- return 0;
+ /* Only RGMII and RMII are supported */
+ if (ret != PHY_INTF_SEL_RGMII && ret != PHY_INTF_SEL_RMII)
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static u32 rk_encode_wm16(u16 val, u16 mask)
+{
+ u32 reg_val = mask << 16;
+ if (mask)
+ reg_val |= mask & (val << (ffs(mask) - 1));
+
+ return reg_val;
+}
+
+static int rk_write_gmac_grf_reg(struct rk_priv_data *bsp_priv, u32 val)
+{
+ struct regmap *regmap;
+
+ if (bsp_priv->ops->gmac_grf_reg_in_php)
+ regmap = bsp_priv->php_grf;
+ else
+ regmap = bsp_priv->grf;
+
+ return regmap_write(regmap, bsp_priv->gmac_grf_reg, val);
+}
+
+static int rk_write_clock_grf_reg(struct rk_priv_data *bsp_priv, u32 val)
+{
+ struct regmap *regmap;
+
+ if (bsp_priv->ops->clock_grf_reg_in_php)
+ regmap = bsp_priv->php_grf;
+ else
+ regmap = bsp_priv->grf;
+
+ return regmap_write(regmap, bsp_priv->clock_grf_reg, val);
+}
+
+static int rk_set_rmii_gate_en(struct rk_priv_data *bsp_priv, bool state)
+{
+ u32 val;
+
+ if (!bsp_priv->clock.rmii_gate_en_mask)
+ return 0;
+
+ val = rk_encode_wm16(state, bsp_priv->clock.rmii_gate_en_mask);
+
+ return rk_write_clock_grf_reg(bsp_priv, val);
+}
+
+static int rk_ungate_rmii_clock(struct rk_priv_data *bsp_priv)
+{
+ return rk_set_rmii_gate_en(bsp_priv, false);
+}
+
+static int rk_gate_rmii_clock(struct rk_priv_data *bsp_priv)
+{
+ return rk_set_rmii_gate_en(bsp_priv, true);
+}
+
+static int rk_configure_io_clksel(struct rk_priv_data *bsp_priv)
+{
+ bool io, cru;
+ u32 val;
+
+ if (!bsp_priv->clock.io_clksel_io_mask &&
+ !bsp_priv->clock.io_clksel_cru_mask)
+ return 0;
+
+ io = bsp_priv->clock_input;
+ cru = !io;
+
+ /* The io_clksel configuration can be either:
+ * 0=CRU, 1=IO (rk3506, rk3520, rk3576) or
+ * 0=IO, 1=CRU (rk3588)
+ * where CRU means the transmit clock comes from the CRU and IO
+ * means the transmit clock comes from IO.
+ *
+ * Handle this by having two masks.
+ */
+ val = rk_encode_wm16(io, bsp_priv->clock.io_clksel_io_mask) |
+ rk_encode_wm16(cru, bsp_priv->clock.io_clksel_cru_mask);
+
+ return rk_write_clock_grf_reg(bsp_priv, val);
}
static int rk_set_clk_mac_speed(struct rk_priv_data *bsp_priv,
@@ -151,8 +242,6 @@ static int rk_set_clk_mac_speed(struct rk_priv_data *bsp_priv,
#define GRF_FIELD(hi, lo, val) \
FIELD_PREP_WM16(GENMASK_U16(hi, lo), val)
-#define GRF_FIELD_CONST(hi, lo, val) \
- FIELD_PREP_WM16_CONST(GENMASK_U16(hi, lo), val)
#define GRF_BIT(nr) (BIT(nr) | BIT(nr+16))
#define GRF_CLR_BIT(nr) (BIT(nr+16))
@@ -162,15 +251,17 @@ static int rk_set_clk_mac_speed(struct rk_priv_data *bsp_priv,
((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE))
#define RK_GRF_MACPHY_CON0 0xb00
-#define RK_GRF_MACPHY_CON1 0xb04
-#define RK_GRF_MACPHY_CON2 0xb08
-#define RK_GRF_MACPHY_CON3 0xb0c
-
#define RK_MACPHY_ENABLE GRF_BIT(0)
#define RK_MACPHY_DISABLE GRF_CLR_BIT(0)
#define RK_MACPHY_CFG_CLK_50M GRF_BIT(14)
#define RK_GMAC2PHY_RMII_MODE GRF_FIELD(7, 6, 1)
+
+#define RK_GRF_MACPHY_CON1 0xb04
+
+#define RK_GRF_MACPHY_CON2 0xb08
#define RK_GRF_CON2_MACPHY_ID GRF_FIELD(15, 0, 0x1234)
+
+#define RK_GRF_MACPHY_CON3 0xb0c
#define RK_GRF_CON3_MACPHY_ID GRF_FIELD(5, 0, 0x35)
static void rk_gmac_integrated_ephy_powerup(struct rk_priv_data *priv)
@@ -233,49 +324,16 @@ static void rk_gmac_integrated_fephy_powerdown(struct rk_priv_data *priv,
#define PX30_GRF_GMAC_CON1 0x0904
-/* PX30_GRF_GMAC_CON1 */
-#define PX30_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val)
-#define PX30_GMAC_SPEED_10M GRF_CLR_BIT(2)
-#define PX30_GMAC_SPEED_100M GRF_BIT(2)
-
-static void px30_set_to_rmii(struct rk_priv_data *bsp_priv)
-{
- regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
- PX30_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII));
-}
-
-static int px30_set_speed(struct rk_priv_data *bsp_priv,
- phy_interface_t interface, int speed)
-{
- struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
- struct device *dev = bsp_priv->dev;
- unsigned int con1;
- long rate;
-
- if (!clk_mac_speed) {
- dev_err(dev, "%s: Missing clk_mac_speed clock\n", __func__);
- return -EINVAL;
- }
-
- if (speed == 10) {
- con1 = PX30_GMAC_SPEED_10M;
- rate = 2500000;
- } else if (speed == 100) {
- con1 = PX30_GMAC_SPEED_100M;
- rate = 25000000;
- } else {
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
- return -EINVAL;
- }
+static const struct rk_gmac_ops px30_ops = {
+ .set_speed = rk_set_clk_mac_speed,
- regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1, con1);
+ .gmac_grf_reg = PX30_GRF_GMAC_CON1,
+ .gmac_phy_intf_sel_mask = GENMASK_U16(6, 4),
- return clk_set_rate(clk_mac_speed, rate);
-}
+ .clock_grf_reg = PX30_GRF_GMAC_CON1,
+ .clock.mac_speed_mask = BIT_U16(2),
-static const struct rk_gmac_ops px30_ops = {
- .set_to_rmii = px30_set_to_rmii,
- .set_speed = px30_set_speed,
+ .supports_rmii = true,
};
#define RK3128_GRF_MAC_CON0 0x0168
@@ -290,57 +348,31 @@ static const struct rk_gmac_ops px30_ops = {
#define RK3128_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
/* RK3128_GRF_MAC_CON1 */
-#define RK3128_GMAC_PHY_INTF_SEL(val) GRF_FIELD(8, 6, val)
#define RK3128_GMAC_FLOW_CTRL GRF_BIT(9)
#define RK3128_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(9)
-#define RK3128_GMAC_SPEED_10M GRF_CLR_BIT(10)
-#define RK3128_GMAC_SPEED_100M GRF_BIT(10)
-#define RK3128_GMAC_RMII_CLK_25M GRF_BIT(11)
-#define RK3128_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(11)
-#define RK3128_GMAC_CLK_125M GRF_FIELD_CONST(13, 12, 0)
-#define RK3128_GMAC_CLK_25M GRF_FIELD_CONST(13, 12, 3)
-#define RK3128_GMAC_CLK_2_5M GRF_FIELD_CONST(13, 12, 2)
-#define RK3128_GMAC_RMII_MODE GRF_BIT(14)
-#define RK3128_GMAC_RMII_MODE_CLR GRF_CLR_BIT(14)
static void rk3128_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
- RK3128_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
- RK3128_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON0,
DELAY_ENABLE(RK3128, tx_delay, rx_delay) |
RK3128_GMAC_CLK_RX_DL_CFG(rx_delay) |
RK3128_GMAC_CLK_TX_DL_CFG(tx_delay));
}
-static void rk3128_set_to_rmii(struct rk_priv_data *bsp_priv)
-{
- regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
- RK3128_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
- RK3128_GMAC_RMII_MODE);
-}
+static const struct rk_gmac_ops rk3128_ops = {
+ .set_to_rgmii = rk3128_set_to_rgmii,
-static const struct rk_reg_speed_data rk3128_reg_speed_data = {
- .rgmii_10 = RK3128_GMAC_CLK_2_5M,
- .rgmii_100 = RK3128_GMAC_CLK_25M,
- .rgmii_1000 = RK3128_GMAC_CLK_125M,
- .rmii_10 = RK3128_GMAC_RMII_CLK_2_5M | RK3128_GMAC_SPEED_10M,
- .rmii_100 = RK3128_GMAC_RMII_CLK_25M | RK3128_GMAC_SPEED_100M,
-};
+ .gmac_grf_reg = RK3128_GRF_MAC_CON1,
+ .gmac_phy_intf_sel_mask = GENMASK_U16(8, 6),
+ .gmac_rmii_mode_mask = BIT_U16(14),
-static int rk3128_set_speed(struct rk_priv_data *bsp_priv,
- phy_interface_t interface, int speed)
-{
- return rk_set_reg_speed(bsp_priv, &rk3128_reg_speed_data,
- RK3128_GRF_MAC_CON1, interface, speed);
-}
+ .clock_grf_reg = RK3128_GRF_MAC_CON1,
+ .clock.gmii_clk_sel_mask = GENMASK_U16(13, 12),
+ .clock.rmii_clk_sel_mask = BIT_U16(11),
+ .clock.mac_speed_mask = BIT_U16(10),
-static const struct rk_gmac_ops rk3128_ops = {
- .set_to_rgmii = rk3128_set_to_rgmii,
- .set_to_rmii = rk3128_set_to_rmii,
- .set_speed = rk3128_set_speed,
+ .supports_rmii = true,
};
#define RK3228_GRF_MAC_CON0 0x0900
@@ -353,18 +385,8 @@ static const struct rk_gmac_ops rk3128_ops = {
#define RK3228_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
/* RK3228_GRF_MAC_CON1 */
-#define RK3228_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val)
#define RK3228_GMAC_FLOW_CTRL GRF_BIT(3)
#define RK3228_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
-#define RK3228_GMAC_SPEED_10M GRF_CLR_BIT(2)
-#define RK3228_GMAC_SPEED_100M GRF_BIT(2)
-#define RK3228_GMAC_RMII_CLK_25M GRF_BIT(7)
-#define RK3228_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(7)
-#define RK3228_GMAC_CLK_125M GRF_FIELD_CONST(9, 8, 0)
-#define RK3228_GMAC_CLK_25M GRF_FIELD_CONST(9, 8, 3)
-#define RK3228_GMAC_CLK_2_5M GRF_FIELD_CONST(9, 8, 2)
-#define RK3228_GMAC_RMII_MODE GRF_BIT(10)
-#define RK3228_GMAC_RMII_MODE_CLR GRF_CLR_BIT(10)
#define RK3228_GMAC_TXCLK_DLY_ENABLE GRF_BIT(0)
#define RK3228_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(0)
#define RK3228_GMAC_RXCLK_DLY_ENABLE GRF_BIT(1)
@@ -377,8 +399,6 @@ static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
- RK3228_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
- RK3228_GMAC_RMII_MODE_CLR |
DELAY_ENABLE(RK3228, tx_delay, rx_delay));
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON0,
@@ -388,29 +408,10 @@ static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3228_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
- RK3228_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
- RK3228_GMAC_RMII_MODE);
-
/* set MAC to RMII mode */
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, GRF_BIT(11));
}
-static const struct rk_reg_speed_data rk3228_reg_speed_data = {
- .rgmii_10 = RK3228_GMAC_CLK_2_5M,
- .rgmii_100 = RK3228_GMAC_CLK_25M,
- .rgmii_1000 = RK3228_GMAC_CLK_125M,
- .rmii_10 = RK3228_GMAC_RMII_CLK_2_5M | RK3228_GMAC_SPEED_10M,
- .rmii_100 = RK3228_GMAC_RMII_CLK_25M | RK3228_GMAC_SPEED_100M,
-};
-
-static int rk3228_set_speed(struct rk_priv_data *bsp_priv,
- phy_interface_t interface, int speed)
-{
- return rk_set_reg_speed(bsp_priv, &rk3228_reg_speed_data,
- RK3228_GRF_MAC_CON1, interface, speed);
-}
-
static void rk3228_integrated_phy_powerup(struct rk_priv_data *priv)
{
regmap_write(priv->grf, RK3228_GRF_CON_MUX,
@@ -422,27 +423,25 @@ static void rk3228_integrated_phy_powerup(struct rk_priv_data *priv)
static const struct rk_gmac_ops rk3228_ops = {
.set_to_rgmii = rk3228_set_to_rgmii,
.set_to_rmii = rk3228_set_to_rmii,
- .set_speed = rk3228_set_speed,
.integrated_phy_powerup = rk3228_integrated_phy_powerup,
.integrated_phy_powerdown = rk_gmac_integrated_ephy_powerdown,
+
+ .gmac_grf_reg = RK3228_GRF_MAC_CON1,
+ .gmac_phy_intf_sel_mask = GENMASK_U16(6, 4),
+ .gmac_rmii_mode_mask = BIT_U16(10),
+
+ .clock_grf_reg = RK3228_GRF_MAC_CON1,
+ .clock.gmii_clk_sel_mask = GENMASK_U16(9, 8),
+ .clock.rmii_clk_sel_mask = BIT_U16(7),
+ .clock.mac_speed_mask = BIT_U16(2),
};
#define RK3288_GRF_SOC_CON1 0x0248
#define RK3288_GRF_SOC_CON3 0x0250
/*RK3288_GRF_SOC_CON1*/
-#define RK3288_GMAC_PHY_INTF_SEL(val) GRF_FIELD(8, 6, val)
#define RK3288_GMAC_FLOW_CTRL GRF_BIT(9)
#define RK3288_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(9)
-#define RK3288_GMAC_SPEED_10M GRF_CLR_BIT(10)
-#define RK3288_GMAC_SPEED_100M GRF_BIT(10)
-#define RK3288_GMAC_RMII_CLK_25M GRF_BIT(11)
-#define RK3288_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(11)
-#define RK3288_GMAC_CLK_125M GRF_FIELD_CONST(13, 12, 0)
-#define RK3288_GMAC_CLK_25M GRF_FIELD_CONST(13, 12, 3)
-#define RK3288_GMAC_CLK_2_5M GRF_FIELD_CONST(13, 12, 2)
-#define RK3288_GMAC_RMII_MODE GRF_BIT(14)
-#define RK3288_GMAC_RMII_MODE_CLR GRF_CLR_BIT(14)
/*RK3288_GRF_SOC_CON3*/
#define RK3288_GMAC_TXCLK_DLY_ENABLE GRF_BIT(14)
@@ -455,73 +454,41 @@ static const struct rk_gmac_ops rk3228_ops = {
static void rk3288_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
- RK3288_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
- RK3288_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON3,
DELAY_ENABLE(RK3288, tx_delay, rx_delay) |
RK3288_GMAC_CLK_RX_DL_CFG(rx_delay) |
RK3288_GMAC_CLK_TX_DL_CFG(tx_delay));
}
-static void rk3288_set_to_rmii(struct rk_priv_data *bsp_priv)
-{
- regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
- RK3288_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
- RK3288_GMAC_RMII_MODE);
-}
+static const struct rk_gmac_ops rk3288_ops = {
+ .set_to_rgmii = rk3288_set_to_rgmii,
-static const struct rk_reg_speed_data rk3288_reg_speed_data = {
- .rgmii_10 = RK3288_GMAC_CLK_2_5M,
- .rgmii_100 = RK3288_GMAC_CLK_25M,
- .rgmii_1000 = RK3288_GMAC_CLK_125M,
- .rmii_10 = RK3288_GMAC_RMII_CLK_2_5M | RK3288_GMAC_SPEED_10M,
- .rmii_100 = RK3288_GMAC_RMII_CLK_25M | RK3288_GMAC_SPEED_100M,
-};
+ .gmac_grf_reg = RK3288_GRF_SOC_CON1,
+ .gmac_phy_intf_sel_mask = GENMASK_U16(8, 6),
+ .gmac_rmii_mode_mask = BIT_U16(14),
-static int rk3288_set_speed(struct rk_priv_data *bsp_priv,
- phy_interface_t interface, int speed)
-{
- return rk_set_reg_speed(bsp_priv, &rk3288_reg_speed_data,
- RK3288_GRF_SOC_CON1, interface, speed);
-}
+ .clock_grf_reg = RK3288_GRF_SOC_CON1,
+ .clock.gmii_clk_sel_mask = GENMASK_U16(13, 12),
+ .clock.rmii_clk_sel_mask = BIT_U16(11),
+ .clock.mac_speed_mask = BIT_U16(10),
-static const struct rk_gmac_ops rk3288_ops = {
- .set_to_rgmii = rk3288_set_to_rgmii,
- .set_to_rmii = rk3288_set_to_rmii,
- .set_speed = rk3288_set_speed,
+ .supports_rmii = true,
};
#define RK3308_GRF_MAC_CON0 0x04a0
/* RK3308_GRF_MAC_CON0 */
-#define RK3308_GMAC_PHY_INTF_SEL(val) GRF_FIELD(4, 2, val)
#define RK3308_GMAC_FLOW_CTRL GRF_BIT(3)
#define RK3308_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
-#define RK3308_GMAC_SPEED_10M GRF_CLR_BIT(0)
-#define RK3308_GMAC_SPEED_100M GRF_BIT(0)
-static void rk3308_set_to_rmii(struct rk_priv_data *bsp_priv)
-{
- regmap_write(bsp_priv->grf, RK3308_GRF_MAC_CON0,
- RK3308_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII));
-}
-
-static const struct rk_reg_speed_data rk3308_reg_speed_data = {
- .rmii_10 = RK3308_GMAC_SPEED_10M,
- .rmii_100 = RK3308_GMAC_SPEED_100M,
-};
+static const struct rk_gmac_ops rk3308_ops = {
+ .gmac_grf_reg = RK3308_GRF_MAC_CON0,
+ .gmac_phy_intf_sel_mask = GENMASK_U16(4, 2),
-static int rk3308_set_speed(struct rk_priv_data *bsp_priv,
- phy_interface_t interface, int speed)
-{
- return rk_set_reg_speed(bsp_priv, &rk3308_reg_speed_data,
- RK3308_GRF_MAC_CON0, interface, speed);
-}
+ .clock_grf_reg = RK3308_GRF_MAC_CON0,
+ .clock.mac_speed_mask = BIT_U16(0),
-static const struct rk_gmac_ops rk3308_ops = {
- .set_to_rmii = rk3308_set_to_rmii,
- .set_speed = rk3308_set_speed,
+ .supports_rmii = true,
};
#define RK3328_GRF_MAC_CON0 0x0900
@@ -534,30 +501,38 @@ static const struct rk_gmac_ops rk3308_ops = {
#define RK3328_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
/* RK3328_GRF_MAC_CON1 */
-#define RK3328_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val)
#define RK3328_GMAC_FLOW_CTRL GRF_BIT(3)
#define RK3328_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
-#define RK3328_GMAC_SPEED_10M GRF_CLR_BIT(2)
-#define RK3328_GMAC_SPEED_100M GRF_BIT(2)
-#define RK3328_GMAC_RMII_CLK_25M GRF_BIT(7)
-#define RK3328_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(7)
-#define RK3328_GMAC_CLK_125M GRF_FIELD_CONST(12, 11, 0)
-#define RK3328_GMAC_CLK_25M GRF_FIELD_CONST(12, 11, 3)
-#define RK3328_GMAC_CLK_2_5M GRF_FIELD_CONST(12, 11, 2)
-#define RK3328_GMAC_RMII_MODE GRF_BIT(9)
-#define RK3328_GMAC_RMII_MODE_CLR GRF_CLR_BIT(9)
#define RK3328_GMAC_TXCLK_DLY_ENABLE GRF_BIT(0)
#define RK3328_GMAC_RXCLK_DLY_ENABLE GRF_BIT(1)
/* RK3328_GRF_MACPHY_CON1 */
#define RK3328_MACPHY_RMII_MODE GRF_BIT(9)
+static int rk3328_init(struct rk_priv_data *bsp_priv)
+{
+ switch (bsp_priv->id) {
+ case 0: /* gmac2io */
+ bsp_priv->gmac_grf_reg = RK3328_GRF_MAC_CON1;
+ bsp_priv->clock_grf_reg = RK3328_GRF_MAC_CON1;
+ bsp_priv->clock.gmii_clk_sel_mask = GENMASK_U16(12, 11);
+ return 0;
+
+ case 1: /* gmac2phy */
+ bsp_priv->gmac_grf_reg = RK3328_GRF_MAC_CON2;
+ bsp_priv->clock_grf_reg = RK3328_GRF_MAC_CON2;
+ bsp_priv->supports_rgmii = false;
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
static void rk3328_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
- RK3328_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
- RK3328_GMAC_RMII_MODE_CLR |
RK3328_GMAC_RXCLK_DLY_ENABLE |
RK3328_GMAC_TXCLK_DLY_ENABLE);
@@ -566,40 +541,6 @@ static void rk3328_set_to_rgmii(struct rk_priv_data *bsp_priv,
RK3328_GMAC_CLK_TX_DL_CFG(tx_delay));
}
-static void rk3328_set_to_rmii(struct rk_priv_data *bsp_priv)
-{
- unsigned int reg;
-
- reg = bsp_priv->integrated_phy ? RK3328_GRF_MAC_CON2 :
- RK3328_GRF_MAC_CON1;
-
- regmap_write(bsp_priv->grf, reg,
- RK3328_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
- RK3328_GMAC_RMII_MODE);
-}
-
-static const struct rk_reg_speed_data rk3328_reg_speed_data = {
- .rgmii_10 = RK3328_GMAC_CLK_2_5M,
- .rgmii_100 = RK3328_GMAC_CLK_25M,
- .rgmii_1000 = RK3328_GMAC_CLK_125M,
- .rmii_10 = RK3328_GMAC_RMII_CLK_2_5M | RK3328_GMAC_SPEED_10M,
- .rmii_100 = RK3328_GMAC_RMII_CLK_25M | RK3328_GMAC_SPEED_100M,
-};
-
-static int rk3328_set_speed(struct rk_priv_data *bsp_priv,
- phy_interface_t interface, int speed)
-{
- unsigned int reg;
-
- if (interface == PHY_INTERFACE_MODE_RMII && bsp_priv->integrated_phy)
- reg = RK3328_GRF_MAC_CON2;
- else
- reg = RK3328_GRF_MAC_CON1;
-
- return rk_set_reg_speed(bsp_priv, &rk3328_reg_speed_data, reg,
- interface, speed);
-}
-
static void rk3328_integrated_phy_powerup(struct rk_priv_data *priv)
{
regmap_write(priv->grf, RK3328_GRF_MACPHY_CON1,
@@ -609,29 +550,33 @@ static void rk3328_integrated_phy_powerup(struct rk_priv_data *priv)
}
static const struct rk_gmac_ops rk3328_ops = {
+ .init = rk3328_init,
.set_to_rgmii = rk3328_set_to_rgmii,
- .set_to_rmii = rk3328_set_to_rmii,
- .set_speed = rk3328_set_speed,
.integrated_phy_powerup = rk3328_integrated_phy_powerup,
.integrated_phy_powerdown = rk_gmac_integrated_ephy_powerdown,
+
+ .gmac_phy_intf_sel_mask = GENMASK_U16(6, 4),
+ .gmac_rmii_mode_mask = BIT_U16(9),
+
+ .clock.rmii_clk_sel_mask = BIT_U16(7),
+ .clock.mac_speed_mask = BIT_U16(2),
+
+ .supports_rmii = true,
+
+ .regs_valid = true,
+ .regs = {
+ 0xff540000, /* gmac2io */
+ 0xff550000, /* gmac2phy */
+ 0, /* sentinel */
+ },
};
#define RK3366_GRF_SOC_CON6 0x0418
#define RK3366_GRF_SOC_CON7 0x041c
/* RK3366_GRF_SOC_CON6 */
-#define RK3366_GMAC_PHY_INTF_SEL(val) GRF_FIELD(11, 9, val)
#define RK3366_GMAC_FLOW_CTRL GRF_BIT(8)
#define RK3366_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(8)
-#define RK3366_GMAC_SPEED_10M GRF_CLR_BIT(7)
-#define RK3366_GMAC_SPEED_100M GRF_BIT(7)
-#define RK3366_GMAC_RMII_CLK_25M GRF_BIT(3)
-#define RK3366_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(3)
-#define RK3366_GMAC_CLK_125M GRF_FIELD_CONST(5, 4, 0)
-#define RK3366_GMAC_CLK_25M GRF_FIELD_CONST(5, 4, 3)
-#define RK3366_GMAC_CLK_2_5M GRF_FIELD_CONST(5, 4, 2)
-#define RK3366_GMAC_RMII_MODE GRF_BIT(6)
-#define RK3366_GMAC_RMII_MODE_CLR GRF_CLR_BIT(6)
/* RK3366_GRF_SOC_CON7 */
#define RK3366_GMAC_TXCLK_DLY_ENABLE GRF_BIT(7)
@@ -644,59 +589,33 @@ static const struct rk_gmac_ops rk3328_ops = {
static void rk3366_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
- RK3366_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
- RK3366_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON7,
DELAY_ENABLE(RK3366, tx_delay, rx_delay) |
RK3366_GMAC_CLK_RX_DL_CFG(rx_delay) |
RK3366_GMAC_CLK_TX_DL_CFG(tx_delay));
}
-static void rk3366_set_to_rmii(struct rk_priv_data *bsp_priv)
-{
- regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
- RK3366_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
- RK3366_GMAC_RMII_MODE);
-}
+static const struct rk_gmac_ops rk3366_ops = {
+ .set_to_rgmii = rk3366_set_to_rgmii,
-static const struct rk_reg_speed_data rk3366_reg_speed_data = {
- .rgmii_10 = RK3366_GMAC_CLK_2_5M,
- .rgmii_100 = RK3366_GMAC_CLK_25M,
- .rgmii_1000 = RK3366_GMAC_CLK_125M,
- .rmii_10 = RK3366_GMAC_RMII_CLK_2_5M | RK3366_GMAC_SPEED_10M,
- .rmii_100 = RK3366_GMAC_RMII_CLK_25M | RK3366_GMAC_SPEED_100M,
-};
+ .gmac_grf_reg = RK3366_GRF_SOC_CON6,
+ .gmac_phy_intf_sel_mask = GENMASK_U16(11, 9),
+ .gmac_rmii_mode_mask = BIT_U16(6),
-static int rk3366_set_speed(struct rk_priv_data *bsp_priv,
- phy_interface_t interface, int speed)
-{
- return rk_set_reg_speed(bsp_priv, &rk3366_reg_speed_data,
- RK3366_GRF_SOC_CON6, interface, speed);
-}
+ .clock_grf_reg = RK3366_GRF_SOC_CON6,
+ .clock.gmii_clk_sel_mask = GENMASK_U16(5, 4),
+ .clock.rmii_clk_sel_mask = BIT_U16(3),
+ .clock.mac_speed_mask = BIT_U16(7),
-static const struct rk_gmac_ops rk3366_ops = {
- .set_to_rgmii = rk3366_set_to_rgmii,
- .set_to_rmii = rk3366_set_to_rmii,
- .set_speed = rk3366_set_speed,
+ .supports_rmii = true,
};
#define RK3368_GRF_SOC_CON15 0x043c
#define RK3368_GRF_SOC_CON16 0x0440
/* RK3368_GRF_SOC_CON15 */
-#define RK3368_GMAC_PHY_INTF_SEL(val) GRF_FIELD(11, 9, val)
#define RK3368_GMAC_FLOW_CTRL GRF_BIT(8)
#define RK3368_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(8)
-#define RK3368_GMAC_SPEED_10M GRF_CLR_BIT(7)
-#define RK3368_GMAC_SPEED_100M GRF_BIT(7)
-#define RK3368_GMAC_RMII_CLK_25M GRF_BIT(3)
-#define RK3368_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(3)
-#define RK3368_GMAC_CLK_125M GRF_FIELD_CONST(5, 4, 0)
-#define RK3368_GMAC_CLK_25M GRF_FIELD_CONST(5, 4, 3)
-#define RK3368_GMAC_CLK_2_5M GRF_FIELD_CONST(5, 4, 2)
-#define RK3368_GMAC_RMII_MODE GRF_BIT(6)
-#define RK3368_GMAC_RMII_MODE_CLR GRF_CLR_BIT(6)
/* RK3368_GRF_SOC_CON16 */
#define RK3368_GMAC_TXCLK_DLY_ENABLE GRF_BIT(7)
@@ -709,59 +628,33 @@ static const struct rk_gmac_ops rk3366_ops = {
static void rk3368_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
- RK3368_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
- RK3368_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON16,
DELAY_ENABLE(RK3368, tx_delay, rx_delay) |
RK3368_GMAC_CLK_RX_DL_CFG(rx_delay) |
RK3368_GMAC_CLK_TX_DL_CFG(tx_delay));
}
-static void rk3368_set_to_rmii(struct rk_priv_data *bsp_priv)
-{
- regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
- RK3368_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
- RK3368_GMAC_RMII_MODE);
-}
+static const struct rk_gmac_ops rk3368_ops = {
+ .set_to_rgmii = rk3368_set_to_rgmii,
-static const struct rk_reg_speed_data rk3368_reg_speed_data = {
- .rgmii_10 = RK3368_GMAC_CLK_2_5M,
- .rgmii_100 = RK3368_GMAC_CLK_25M,
- .rgmii_1000 = RK3368_GMAC_CLK_125M,
- .rmii_10 = RK3368_GMAC_RMII_CLK_2_5M | RK3368_GMAC_SPEED_10M,
- .rmii_100 = RK3368_GMAC_RMII_CLK_25M | RK3368_GMAC_SPEED_100M,
-};
+ .gmac_grf_reg = RK3368_GRF_SOC_CON15,
+ .gmac_phy_intf_sel_mask = GENMASK_U16(11, 9),
+ .gmac_rmii_mode_mask = BIT_U16(6),
-static int rk3368_set_speed(struct rk_priv_data *bsp_priv,
- phy_interface_t interface, int speed)
-{
- return rk_set_reg_speed(bsp_priv, &rk3368_reg_speed_data,
- RK3368_GRF_SOC_CON15, interface, speed);
-}
+ .clock_grf_reg = RK3368_GRF_SOC_CON15,
+ .clock.gmii_clk_sel_mask = GENMASK_U16(5, 4),
+ .clock.rmii_clk_sel_mask = BIT_U16(3),
+ .clock.mac_speed_mask = BIT_U16(7),
-static const struct rk_gmac_ops rk3368_ops = {
- .set_to_rgmii = rk3368_set_to_rgmii,
- .set_to_rmii = rk3368_set_to_rmii,
- .set_speed = rk3368_set_speed,
+ .supports_rmii = true,
};
#define RK3399_GRF_SOC_CON5 0xc214
#define RK3399_GRF_SOC_CON6 0xc218
/* RK3399_GRF_SOC_CON5 */
-#define RK3399_GMAC_PHY_INTF_SEL(val) GRF_FIELD(11, 9, val)
#define RK3399_GMAC_FLOW_CTRL GRF_BIT(8)
#define RK3399_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(8)
-#define RK3399_GMAC_SPEED_10M GRF_CLR_BIT(7)
-#define RK3399_GMAC_SPEED_100M GRF_BIT(7)
-#define RK3399_GMAC_RMII_CLK_25M GRF_BIT(3)
-#define RK3399_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(3)
-#define RK3399_GMAC_CLK_125M GRF_FIELD_CONST(5, 4, 0)
-#define RK3399_GMAC_CLK_25M GRF_FIELD_CONST(5, 4, 3)
-#define RK3399_GMAC_CLK_2_5M GRF_FIELD_CONST(5, 4, 2)
-#define RK3399_GMAC_RMII_MODE GRF_BIT(6)
-#define RK3399_GMAC_RMII_MODE_CLR GRF_CLR_BIT(6)
/* RK3399_GRF_SOC_CON6 */
#define RK3399_GMAC_TXCLK_DLY_ENABLE GRF_BIT(7)
@@ -774,41 +667,25 @@ static const struct rk_gmac_ops rk3368_ops = {
static void rk3399_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
- RK3399_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
- RK3399_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON6,
DELAY_ENABLE(RK3399, tx_delay, rx_delay) |
RK3399_GMAC_CLK_RX_DL_CFG(rx_delay) |
RK3399_GMAC_CLK_TX_DL_CFG(tx_delay));
}
-static void rk3399_set_to_rmii(struct rk_priv_data *bsp_priv)
-{
- regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
- RK3399_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
- RK3399_GMAC_RMII_MODE);
-}
+static const struct rk_gmac_ops rk3399_ops = {
+ .set_to_rgmii = rk3399_set_to_rgmii,
-static const struct rk_reg_speed_data rk3399_reg_speed_data = {
- .rgmii_10 = RK3399_GMAC_CLK_2_5M,
- .rgmii_100 = RK3399_GMAC_CLK_25M,
- .rgmii_1000 = RK3399_GMAC_CLK_125M,
- .rmii_10 = RK3399_GMAC_RMII_CLK_2_5M | RK3399_GMAC_SPEED_10M,
- .rmii_100 = RK3399_GMAC_RMII_CLK_25M | RK3399_GMAC_SPEED_100M,
-};
+ .gmac_grf_reg = RK3399_GRF_SOC_CON5,
+ .gmac_phy_intf_sel_mask = GENMASK_U16(11, 9),
+ .gmac_rmii_mode_mask = BIT_U16(6),
-static int rk3399_set_speed(struct rk_priv_data *bsp_priv,
- phy_interface_t interface, int speed)
-{
- return rk_set_reg_speed(bsp_priv, &rk3399_reg_speed_data,
- RK3399_GRF_SOC_CON5, interface, speed);
-}
+ .clock_grf_reg = RK3399_GRF_SOC_CON5,
+ .clock.gmii_clk_sel_mask = GENMASK_U16(5, 4),
+ .clock.rmii_clk_sel_mask = BIT_U16(3),
+ .clock.mac_speed_mask = BIT_U16(7),
-static const struct rk_gmac_ops rk3399_ops = {
- .set_to_rgmii = rk3399_set_to_rgmii,
- .set_to_rmii = rk3399_set_to_rmii,
- .set_speed = rk3399_set_speed,
+ .supports_rmii = true,
};
#define RK3506_GRF_SOC_CON8 0x0020
@@ -816,56 +693,32 @@ static const struct rk_gmac_ops rk3399_ops = {
#define RK3506_GMAC_RMII_MODE GRF_BIT(1)
-#define RK3506_GMAC_CLK_RMII_DIV2 GRF_BIT(3)
-#define RK3506_GMAC_CLK_RMII_DIV20 GRF_CLR_BIT(3)
-
-#define RK3506_GMAC_CLK_SELECT_CRU GRF_CLR_BIT(5)
-#define RK3506_GMAC_CLK_SELECT_IO GRF_BIT(5)
-
-#define RK3506_GMAC_CLK_RMII_GATE GRF_BIT(2)
-#define RK3506_GMAC_CLK_RMII_NOGATE GRF_CLR_BIT(2)
-
-static void rk3506_set_to_rmii(struct rk_priv_data *bsp_priv)
+static int rk3506_init(struct rk_priv_data *bsp_priv)
{
- unsigned int id = bsp_priv->id, offset;
+ switch (bsp_priv->id) {
+ case 0:
+ bsp_priv->clock_grf_reg = RK3506_GRF_SOC_CON8;
+ return 0;
- offset = (id == 1) ? RK3506_GRF_SOC_CON11 : RK3506_GRF_SOC_CON8;
- regmap_write(bsp_priv->grf, offset, RK3506_GMAC_RMII_MODE);
-}
+ case 1:
+ bsp_priv->clock_grf_reg = RK3506_GRF_SOC_CON11;
+ return 0;
-static const struct rk_reg_speed_data rk3506_reg_speed_data = {
- .rmii_10 = RK3506_GMAC_CLK_RMII_DIV20,
- .rmii_100 = RK3506_GMAC_CLK_RMII_DIV2,
-};
-
-static int rk3506_set_speed(struct rk_priv_data *bsp_priv,
- phy_interface_t interface, int speed)
-{
- unsigned int id = bsp_priv->id, offset;
-
- offset = (id == 1) ? RK3506_GRF_SOC_CON11 : RK3506_GRF_SOC_CON8;
- return rk_set_reg_speed(bsp_priv, &rk3506_reg_speed_data,
- offset, interface, speed);
+ default:
+ return -EINVAL;
+ }
}
-static void rk3506_set_clock_selection(struct rk_priv_data *bsp_priv,
- bool input, bool enable)
-{
- unsigned int value, offset, id = bsp_priv->id;
+static const struct rk_gmac_ops rk3506_ops = {
+ .init = rk3506_init,
- offset = (id == 1) ? RK3506_GRF_SOC_CON11 : RK3506_GRF_SOC_CON8;
+ .clock.io_clksel_io_mask = BIT_U16(5),
+ .clock.rmii_clk_sel_mask = BIT_U16(3),
+ .clock.rmii_gate_en_mask = BIT_U16(2),
+ .clock.rmii_mode_mask = BIT_U16(1),
- value = input ? RK3506_GMAC_CLK_SELECT_IO :
- RK3506_GMAC_CLK_SELECT_CRU;
- value |= enable ? RK3506_GMAC_CLK_RMII_NOGATE :
- RK3506_GMAC_CLK_RMII_GATE;
- regmap_write(bsp_priv->grf, offset, value);
-}
+ .supports_rmii = true,
-static const struct rk_gmac_ops rk3506_ops = {
- .set_to_rmii = rk3506_set_to_rmii,
- .set_speed = rk3506_set_speed,
- .set_clock_selection = rk3506_set_clock_selection,
.regs_valid = true,
.regs = {
0xff4c8000, /* gmac0 */
@@ -888,34 +741,35 @@ static const struct rk_gmac_ops rk3506_ops = {
#define RK3528_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(15, 8, val)
#define RK3528_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(7, 0, val)
-#define RK3528_GMAC0_PHY_INTF_SEL_RMII GRF_BIT(1)
-#define RK3528_GMAC1_PHY_INTF_SEL_RGMII GRF_CLR_BIT(8)
-#define RK3528_GMAC1_PHY_INTF_SEL_RMII GRF_BIT(8)
-
-#define RK3528_GMAC1_CLK_SELECT_CRU GRF_CLR_BIT(12)
-#define RK3528_GMAC1_CLK_SELECT_IO GRF_BIT(12)
-
-#define RK3528_GMAC0_CLK_RMII_DIV2 GRF_BIT(3)
-#define RK3528_GMAC0_CLK_RMII_DIV20 GRF_CLR_BIT(3)
-#define RK3528_GMAC1_CLK_RMII_DIV2 GRF_BIT(10)
-#define RK3528_GMAC1_CLK_RMII_DIV20 GRF_CLR_BIT(10)
+static int rk3528_init(struct rk_priv_data *bsp_priv)
+{
+ switch (bsp_priv->id) {
+ case 0:
+ bsp_priv->clock_grf_reg = RK3528_VO_GRF_GMAC_CON;
+ bsp_priv->clock.rmii_clk_sel_mask = BIT_U16(3);
+ bsp_priv->clock.rmii_gate_en_mask = BIT_U16(2);
+ bsp_priv->clock.rmii_mode_mask = BIT_U16(1);
+ bsp_priv->supports_rgmii = false;
+ return 0;
+
+ case 1:
+ bsp_priv->clock_grf_reg = RK3528_VPU_GRF_GMAC_CON5;
+ bsp_priv->clock.io_clksel_io_mask = BIT_U16(12);
+ bsp_priv->clock.gmii_clk_sel_mask = GENMASK_U16(11, 10);
+ bsp_priv->clock.rmii_clk_sel_mask = BIT_U16(10);
+ bsp_priv->clock.rmii_gate_en_mask = BIT_U16(9);
+ bsp_priv->clock.rmii_mode_mask = BIT_U16(8);
+ return 0;
-#define RK3528_GMAC1_CLK_RGMII_DIV1 GRF_FIELD_CONST(11, 10, 0)
-#define RK3528_GMAC1_CLK_RGMII_DIV5 GRF_FIELD_CONST(11, 10, 3)
-#define RK3528_GMAC1_CLK_RGMII_DIV50 GRF_FIELD_CONST(11, 10, 2)
-
-#define RK3528_GMAC0_CLK_RMII_GATE GRF_BIT(2)
-#define RK3528_GMAC0_CLK_RMII_NOGATE GRF_CLR_BIT(2)
-#define RK3528_GMAC1_CLK_RMII_GATE GRF_BIT(9)
-#define RK3528_GMAC1_CLK_RMII_NOGATE GRF_CLR_BIT(9)
+ default:
+ return -EINVAL;
+ }
+}
static void rk3528_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
- RK3528_GMAC1_PHY_INTF_SEL_RGMII);
-
- regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
DELAY_ENABLE(RK3528, tx_delay, rx_delay));
regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON6,
@@ -923,65 +777,6 @@ static void rk3528_set_to_rgmii(struct rk_priv_data *bsp_priv,
RK3528_GMAC_CLK_TX_DL_CFG(tx_delay));
}
-static void rk3528_set_to_rmii(struct rk_priv_data *bsp_priv)
-{
- if (bsp_priv->id == 1)
- regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
- RK3528_GMAC1_PHY_INTF_SEL_RMII);
- else
- regmap_write(bsp_priv->grf, RK3528_VO_GRF_GMAC_CON,
- RK3528_GMAC0_PHY_INTF_SEL_RMII |
- RK3528_GMAC0_CLK_RMII_DIV2);
-}
-
-static const struct rk_reg_speed_data rk3528_gmac0_reg_speed_data = {
- .rmii_10 = RK3528_GMAC0_CLK_RMII_DIV20,
- .rmii_100 = RK3528_GMAC0_CLK_RMII_DIV2,
-};
-
-static const struct rk_reg_speed_data rk3528_gmac1_reg_speed_data = {
- .rgmii_10 = RK3528_GMAC1_CLK_RGMII_DIV50,
- .rgmii_100 = RK3528_GMAC1_CLK_RGMII_DIV5,
- .rgmii_1000 = RK3528_GMAC1_CLK_RGMII_DIV1,
- .rmii_10 = RK3528_GMAC1_CLK_RMII_DIV20,
- .rmii_100 = RK3528_GMAC1_CLK_RMII_DIV2,
-};
-
-static int rk3528_set_speed(struct rk_priv_data *bsp_priv,
- phy_interface_t interface, int speed)
-{
- const struct rk_reg_speed_data *rsd;
- unsigned int reg;
-
- if (bsp_priv->id == 1) {
- rsd = &rk3528_gmac1_reg_speed_data;
- reg = RK3528_VPU_GRF_GMAC_CON5;
- } else {
- rsd = &rk3528_gmac0_reg_speed_data;
- reg = RK3528_VO_GRF_GMAC_CON;
- }
-
- return rk_set_reg_speed(bsp_priv, rsd, reg, interface, speed);
-}
-
-static void rk3528_set_clock_selection(struct rk_priv_data *bsp_priv,
- bool input, bool enable)
-{
- unsigned int val;
-
- if (bsp_priv->id == 1) {
- val = input ? RK3528_GMAC1_CLK_SELECT_IO :
- RK3528_GMAC1_CLK_SELECT_CRU;
- val |= enable ? RK3528_GMAC1_CLK_RMII_NOGATE :
- RK3528_GMAC1_CLK_RMII_GATE;
- regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5, val);
- } else {
- val = enable ? RK3528_GMAC0_CLK_RMII_NOGATE :
- RK3528_GMAC0_CLK_RMII_GATE;
- regmap_write(bsp_priv->grf, RK3528_VO_GRF_GMAC_CON, val);
- }
-}
-
static void rk3528_integrated_phy_powerup(struct rk_priv_data *bsp_priv)
{
rk_gmac_integrated_fephy_powerup(bsp_priv, RK3528_VO_GRF_MACPHY_CON0);
@@ -993,12 +788,13 @@ static void rk3528_integrated_phy_powerdown(struct rk_priv_data *bsp_priv)
}
static const struct rk_gmac_ops rk3528_ops = {
+ .init = rk3528_init,
.set_to_rgmii = rk3528_set_to_rgmii,
- .set_to_rmii = rk3528_set_to_rmii,
- .set_speed = rk3528_set_speed,
- .set_clock_selection = rk3528_set_clock_selection,
.integrated_phy_powerup = rk3528_integrated_phy_powerup,
.integrated_phy_powerdown = rk3528_integrated_phy_powerdown,
+
+ .supports_rmii = true,
+
.regs_valid = true,
.regs = {
0xffbd0000, /* gmac0 */
@@ -1013,7 +809,6 @@ static const struct rk_gmac_ops rk3528_ops = {
#define RK3568_GRF_GMAC1_CON1 0x038c
/* RK3568_GRF_GMAC0_CON1 && RK3568_GRF_GMAC1_CON1 */
-#define RK3568_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val)
#define RK3568_GMAC_FLOW_CTRL GRF_BIT(3)
#define RK3568_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
#define RK3568_GMAC_RXCLK_DLY_ENABLE GRF_BIT(1)
@@ -1025,6 +820,22 @@ static const struct rk_gmac_ops rk3528_ops = {
#define RK3568_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(14, 8, val)
#define RK3568_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
+static int rk3568_init(struct rk_priv_data *bsp_priv)
+{
+ switch (bsp_priv->id) {
+ case 0:
+ bsp_priv->gmac_grf_reg = RK3568_GRF_GMAC0_CON1;
+ return 0;
+
+ case 1:
+ bsp_priv->gmac_grf_reg = RK3568_GRF_GMAC1_CON1;
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
static void rk3568_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
@@ -1040,25 +851,19 @@ static void rk3568_set_to_rgmii(struct rk_priv_data *bsp_priv,
RK3568_GMAC_CLK_TX_DL_CFG(tx_delay));
regmap_write(bsp_priv->grf, con1,
- RK3568_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RK3568_GMAC_RXCLK_DLY_ENABLE |
RK3568_GMAC_TXCLK_DLY_ENABLE);
}
-static void rk3568_set_to_rmii(struct rk_priv_data *bsp_priv)
-{
- u32 con1;
-
- con1 = (bsp_priv->id == 1) ? RK3568_GRF_GMAC1_CON1 :
- RK3568_GRF_GMAC0_CON1;
- regmap_write(bsp_priv->grf, con1,
- RK3568_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII));
-}
-
static const struct rk_gmac_ops rk3568_ops = {
+ .init = rk3568_init,
.set_to_rgmii = rk3568_set_to_rgmii,
- .set_to_rmii = rk3568_set_to_rmii,
.set_speed = rk_set_clk_mac_speed,
+
+ .gmac_phy_intf_sel_mask = GENMASK_U16(6, 4),
+
+ .supports_rmii = true,
+
.regs_valid = true,
.regs = {
0xfe2a0000, /* gmac0 */
@@ -1085,32 +890,29 @@ static const struct rk_gmac_ops rk3568_ops = {
#define RK3576_GRF_GMAC_CON0 0X0020
#define RK3576_GRF_GMAC_CON1 0X0024
-#define RK3576_GMAC_RMII_MODE GRF_BIT(3)
-#define RK3576_GMAC_RGMII_MODE GRF_CLR_BIT(3)
-
-#define RK3576_GMAC_CLK_SELECT_IO GRF_BIT(7)
-#define RK3576_GMAC_CLK_SELECT_CRU GRF_CLR_BIT(7)
-
-#define RK3576_GMAC_CLK_RMII_DIV2 GRF_BIT(5)
-#define RK3576_GMAC_CLK_RMII_DIV20 GRF_CLR_BIT(5)
+static int rk3576_init(struct rk_priv_data *bsp_priv)
+{
+ switch (bsp_priv->id) {
+ case 0:
+ bsp_priv->gmac_grf_reg = RK3576_GRF_GMAC_CON0;
+ bsp_priv->clock_grf_reg = RK3576_GRF_GMAC_CON0;
+ return 0;
-#define RK3576_GMAC_CLK_RGMII_DIV1 GRF_FIELD_CONST(6, 5, 0)
-#define RK3576_GMAC_CLK_RGMII_DIV5 GRF_FIELD_CONST(6, 5, 3)
-#define RK3576_GMAC_CLK_RGMII_DIV50 GRF_FIELD_CONST(6, 5, 2)
+ case 1:
+ bsp_priv->gmac_grf_reg = RK3576_GRF_GMAC_CON1;
+ bsp_priv->clock_grf_reg = RK3576_GRF_GMAC_CON1;
+ return 0;
-#define RK3576_GMAC_CLK_RMII_GATE GRF_BIT(4)
-#define RK3576_GMAC_CLK_RMII_NOGATE GRF_CLR_BIT(4)
+ default:
+ return -EINVAL;
+ }
+}
static void rk3576_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
unsigned int offset_con;
- offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 :
- RK3576_GRF_GMAC_CON0;
-
- regmap_write(bsp_priv->grf, offset_con, RK3576_GMAC_RGMII_MODE);
-
offset_con = bsp_priv->id == 1 ? RK3576_VCCIO0_1_3_IOC_CON4 :
RK3576_VCCIO0_1_3_IOC_CON2;
@@ -1129,57 +931,19 @@ static void rk3576_set_to_rgmii(struct rk_priv_data *bsp_priv,
RK3576_GMAC_CLK_RX_DL_CFG(rx_delay));
}
-static void rk3576_set_to_rmii(struct rk_priv_data *bsp_priv)
-{
- unsigned int offset_con;
-
- offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 :
- RK3576_GRF_GMAC_CON0;
-
- regmap_write(bsp_priv->grf, offset_con, RK3576_GMAC_RMII_MODE);
-}
-
-static const struct rk_reg_speed_data rk3578_reg_speed_data = {
- .rgmii_10 = RK3576_GMAC_CLK_RGMII_DIV50,
- .rgmii_100 = RK3576_GMAC_CLK_RGMII_DIV5,
- .rgmii_1000 = RK3576_GMAC_CLK_RGMII_DIV1,
- .rmii_10 = RK3576_GMAC_CLK_RMII_DIV20,
- .rmii_100 = RK3576_GMAC_CLK_RMII_DIV2,
-};
-
-static int rk3576_set_gmac_speed(struct rk_priv_data *bsp_priv,
- phy_interface_t interface, int speed)
-{
- unsigned int offset_con;
-
- offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 :
- RK3576_GRF_GMAC_CON0;
-
- return rk_set_reg_speed(bsp_priv, &rk3578_reg_speed_data, offset_con,
- interface, speed);
-}
-
-static void rk3576_set_clock_selection(struct rk_priv_data *bsp_priv, bool input,
- bool enable)
-{
- unsigned int val = input ? RK3576_GMAC_CLK_SELECT_IO :
- RK3576_GMAC_CLK_SELECT_CRU;
- unsigned int offset_con;
+static const struct rk_gmac_ops rk3576_ops = {
+ .init = rk3576_init,
+ .set_to_rgmii = rk3576_set_to_rgmii,
- val |= enable ? RK3576_GMAC_CLK_RMII_NOGATE :
- RK3576_GMAC_CLK_RMII_GATE;
+ .gmac_rmii_mode_mask = BIT_U16(3),
- offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 :
- RK3576_GRF_GMAC_CON0;
+ .clock.io_clksel_io_mask = BIT_U16(7),
+ .clock.gmii_clk_sel_mask = GENMASK_U16(6, 5),
+ .clock.rmii_clk_sel_mask = BIT_U16(5),
+ .clock.rmii_gate_en_mask = BIT_U16(4),
- regmap_write(bsp_priv->grf, offset_con, val);
-}
+ .supports_rmii = true,
-static const struct rk_gmac_ops rk3576_ops = {
- .set_to_rgmii = rk3576_set_to_rgmii,
- .set_to_rmii = rk3576_set_to_rmii,
- .set_speed = rk3576_set_gmac_speed,
- .set_clock_selection = rk3576_set_clock_selection,
.php_grf_required = true,
.regs_valid = true,
.regs = {
@@ -1206,27 +970,31 @@ static const struct rk_gmac_ops rk3576_ops = {
#define RK3588_GRF_GMAC_CON0 0X0008
#define RK3588_GRF_CLK_CON1 0X0070
-#define RK3588_GMAC_PHY_INTF_SEL(id, val) \
- (GRF_FIELD(5, 3, val) << ((id) * 6))
-
-#define RK3588_GMAC_CLK_RMII_MODE(id) GRF_BIT(5 * (id))
-#define RK3588_GMAC_CLK_RGMII_MODE(id) GRF_CLR_BIT(5 * (id))
+static int rk3588_init(struct rk_priv_data *bsp_priv)
+{
+ switch (bsp_priv->id) {
+ case 0:
+ bsp_priv->gmac_phy_intf_sel_mask = GENMASK_U16(5, 3);
+ bsp_priv->clock.io_clksel_cru_mask = BIT_U16(4);
+ bsp_priv->clock.gmii_clk_sel_mask = GENMASK_U16(3, 2);
+ bsp_priv->clock.rmii_clk_sel_mask = BIT_U16(2);
+ bsp_priv->clock.rmii_gate_en_mask = BIT_U16(1);
+ bsp_priv->clock.rmii_mode_mask = BIT_U16(0);
+ return 0;
+
+ case 1:
+ bsp_priv->gmac_phy_intf_sel_mask = GENMASK_U16(11, 9);
+ bsp_priv->clock.io_clksel_cru_mask = BIT_U16(9);
+ bsp_priv->clock.gmii_clk_sel_mask = GENMASK_U16(8, 7);
+ bsp_priv->clock.rmii_clk_sel_mask = BIT_U16(7);
+ bsp_priv->clock.rmii_gate_en_mask = BIT_U16(6);
+ bsp_priv->clock.rmii_mode_mask = BIT_U16(5);
+ return 0;
-#define RK3588_GMAC_CLK_SELECT_CRU(id) GRF_BIT(5 * (id) + 4)
-#define RK3588_GMAC_CLK_SELECT_IO(id) GRF_CLR_BIT(5 * (id) + 4)
-
-#define RK3588_GMA_CLK_RMII_DIV2(id) GRF_BIT(5 * (id) + 2)
-#define RK3588_GMA_CLK_RMII_DIV20(id) GRF_CLR_BIT(5 * (id) + 2)
-
-#define RK3588_GMAC_CLK_RGMII_DIV1(id) \
- (GRF_FIELD_CONST(3, 2, 0) << ((id) * 5))
-#define RK3588_GMAC_CLK_RGMII_DIV5(id) \
- (GRF_FIELD_CONST(3, 2, 3) << ((id) * 5))
-#define RK3588_GMAC_CLK_RGMII_DIV50(id) \
- (GRF_FIELD_CONST(3, 2, 2) << ((id) * 5))
-
-#define RK3588_GMAC_CLK_RMII_GATE(id) GRF_BIT(5 * (id) + 1)
-#define RK3588_GMAC_CLK_RMII_NOGATE(id) GRF_CLR_BIT(5 * (id) + 1)
+ default:
+ return -EINVAL;
+ }
+}
static void rk3588_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
@@ -1236,12 +1004,6 @@ static void rk3588_set_to_rgmii(struct rk_priv_data *bsp_priv,
offset_con = bsp_priv->id == 1 ? RK3588_GRF_GMAC_CON9 :
RK3588_GRF_GMAC_CON8;
- regmap_write(bsp_priv->php_grf, RK3588_GRF_GMAC_CON0,
- RK3588_GMAC_PHY_INTF_SEL(id, PHY_INTF_SEL_RGMII));
-
- regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1,
- RK3588_GMAC_CLK_RGMII_MODE(id));
-
regmap_write(bsp_priv->grf, RK3588_GRF_GMAC_CON7,
RK3588_GMAC_RXCLK_DLY_ENABLE(id) |
RK3588_GMAC_TXCLK_DLY_ENABLE(id));
@@ -1251,67 +1013,18 @@ static void rk3588_set_to_rgmii(struct rk_priv_data *bsp_priv,
RK3588_GMAC_CLK_TX_DL_CFG(tx_delay));
}
-static void rk3588_set_to_rmii(struct rk_priv_data *bsp_priv)
-{
- regmap_write(bsp_priv->php_grf, RK3588_GRF_GMAC_CON0,
- RK3588_GMAC_PHY_INTF_SEL(bsp_priv->id, PHY_INTF_SEL_RMII));
-
- regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1,
- RK3588_GMAC_CLK_RMII_MODE(bsp_priv->id));
-}
-
-static int rk3588_set_gmac_speed(struct rk_priv_data *bsp_priv,
- phy_interface_t interface, int speed)
-{
- unsigned int val = 0, id = bsp_priv->id;
-
- switch (speed) {
- case 10:
- if (interface == PHY_INTERFACE_MODE_RMII)
- val = RK3588_GMA_CLK_RMII_DIV20(id);
- else
- val = RK3588_GMAC_CLK_RGMII_DIV50(id);
- break;
- case 100:
- if (interface == PHY_INTERFACE_MODE_RMII)
- val = RK3588_GMA_CLK_RMII_DIV2(id);
- else
- val = RK3588_GMAC_CLK_RGMII_DIV5(id);
- break;
- case 1000:
- if (interface != PHY_INTERFACE_MODE_RMII)
- val = RK3588_GMAC_CLK_RGMII_DIV1(id);
- else
- goto err;
- break;
- default:
- goto err;
- }
-
- regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1, val);
-
- return 0;
-err:
- return -EINVAL;
-}
+static const struct rk_gmac_ops rk3588_ops = {
+ .init = rk3588_init,
+ .set_to_rgmii = rk3588_set_to_rgmii,
-static void rk3588_set_clock_selection(struct rk_priv_data *bsp_priv, bool input,
- bool enable)
-{
- unsigned int val = input ? RK3588_GMAC_CLK_SELECT_IO(bsp_priv->id) :
- RK3588_GMAC_CLK_SELECT_CRU(bsp_priv->id);
+ .gmac_grf_reg_in_php = true,
+ .gmac_grf_reg = RK3588_GRF_GMAC_CON0,
- val |= enable ? RK3588_GMAC_CLK_RMII_NOGATE(bsp_priv->id) :
- RK3588_GMAC_CLK_RMII_GATE(bsp_priv->id);
+ .clock_grf_reg_in_php = true,
+ .clock_grf_reg = RK3588_GRF_CLK_CON1,
- regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1, val);
-}
+ .supports_rmii = true,
-static const struct rk_gmac_ops rk3588_ops = {
- .set_to_rgmii = rk3588_set_to_rgmii,
- .set_to_rmii = rk3588_set_to_rmii,
- .set_speed = rk3588_set_gmac_speed,
- .set_clock_selection = rk3588_set_clock_selection,
.php_grf_required = true,
.regs_valid = true,
.regs = {
@@ -1324,35 +1037,18 @@ static const struct rk_gmac_ops rk3588_ops = {
#define RV1108_GRF_GMAC_CON0 0X0900
/* RV1108_GRF_GMAC_CON0 */
-#define RV1108_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val)
#define RV1108_GMAC_FLOW_CTRL GRF_BIT(3)
#define RV1108_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
-#define RV1108_GMAC_SPEED_10M GRF_CLR_BIT(2)
-#define RV1108_GMAC_SPEED_100M GRF_BIT(2)
-#define RV1108_GMAC_RMII_CLK_25M GRF_BIT(7)
-#define RV1108_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(7)
-static void rv1108_set_to_rmii(struct rk_priv_data *bsp_priv)
-{
- regmap_write(bsp_priv->grf, RV1108_GRF_GMAC_CON0,
- RV1108_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII));
-}
+static const struct rk_gmac_ops rv1108_ops = {
+ .gmac_grf_reg = RV1108_GRF_GMAC_CON0,
+ .gmac_phy_intf_sel_mask = GENMASK_U16(6, 4),
-static const struct rk_reg_speed_data rv1108_reg_speed_data = {
- .rmii_10 = RV1108_GMAC_RMII_CLK_2_5M | RV1108_GMAC_SPEED_10M,
- .rmii_100 = RV1108_GMAC_RMII_CLK_25M | RV1108_GMAC_SPEED_100M,
-};
+ .clock_grf_reg = RV1108_GRF_GMAC_CON0,
+ .clock.rmii_clk_sel_mask = BIT_U16(7),
+ .clock.mac_speed_mask = BIT_U16(2),
-static int rv1108_set_speed(struct rk_priv_data *bsp_priv,
- phy_interface_t interface, int speed)
-{
- return rk_set_reg_speed(bsp_priv, &rv1108_reg_speed_data,
- RV1108_GRF_GMAC_CON0, interface, speed);
-}
-
-static const struct rk_gmac_ops rv1108_ops = {
- .set_to_rmii = rv1108_set_to_rmii,
- .set_speed = rv1108_set_speed,
+ .supports_rmii = true,
};
#define RV1126_GRF_GMAC_CON0 0X0070
@@ -1360,7 +1056,6 @@ static const struct rk_gmac_ops rv1108_ops = {
#define RV1126_GRF_GMAC_CON2 0X0078
/* RV1126_GRF_GMAC_CON0 */
-#define RV1126_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val)
#define RV1126_GMAC_FLOW_CTRL GRF_BIT(7)
#define RV1126_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(7)
#define RV1126_GMAC_M0_RXCLK_DLY_ENABLE GRF_BIT(1)
@@ -1383,7 +1078,6 @@ static void rv1126_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON0,
- RV1126_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RV1126_GMAC_M0_RXCLK_DLY_ENABLE |
RV1126_GMAC_M0_TXCLK_DLY_ENABLE |
RV1126_GMAC_M1_RXCLK_DLY_ENABLE |
@@ -1398,16 +1092,14 @@ static void rv1126_set_to_rgmii(struct rk_priv_data *bsp_priv,
RV1126_GMAC_M1_CLK_TX_DL_CFG(tx_delay));
}
-static void rv1126_set_to_rmii(struct rk_priv_data *bsp_priv)
-{
- regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON0,
- RV1126_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII));
-}
-
static const struct rk_gmac_ops rv1126_ops = {
.set_to_rgmii = rv1126_set_to_rgmii,
- .set_to_rmii = rv1126_set_to_rmii,
.set_speed = rk_set_clk_mac_speed,
+
+ .gmac_grf_reg = RV1126_GRF_GMAC_CON0,
+ .gmac_phy_intf_sel_mask = GENMASK_U16(6, 4),
+
+ .supports_rmii = true,
};
static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat)
@@ -1473,19 +1165,15 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
if (ret)
return ret;
- if (bsp_priv->ops && bsp_priv->ops->set_clock_selection)
- bsp_priv->ops->set_clock_selection(bsp_priv,
- bsp_priv->clock_input, true);
+ rk_configure_io_clksel(bsp_priv);
+ rk_ungate_rmii_clock(bsp_priv);
mdelay(5);
bsp_priv->clk_enabled = true;
}
} else {
if (bsp_priv->clk_enabled) {
- if (bsp_priv->ops && bsp_priv->ops->set_clock_selection) {
- bsp_priv->ops->set_clock_selection(bsp_priv,
- bsp_priv->clock_input, false);
- }
+ rk_gate_rmii_clock(bsp_priv);
clk_bulk_disable_unprepare(bsp_priv->num_clks,
bsp_priv->clks);
@@ -1498,23 +1186,26 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
return 0;
}
-static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
+static int rk_phy_powerup(struct rk_priv_data *bsp_priv)
{
struct regulator *ldo = bsp_priv->regulator;
- struct device *dev = bsp_priv->dev;
int ret;
- if (enable) {
- ret = regulator_enable(ldo);
- if (ret)
- dev_err(dev, "fail to enable phy-supply\n");
- } else {
- ret = regulator_disable(ldo);
- if (ret)
- dev_err(dev, "fail to disable phy-supply\n");
- }
+ ret = regulator_enable(ldo);
+ if (ret)
+ dev_err(bsp_priv->dev, "fail to enable phy-supply\n");
- return 0;
+ return ret;
+}
+
+static void rk_phy_powerdown(struct rk_priv_data *bsp_priv)
+{
+ struct regulator *ldo = bsp_priv->regulator;
+ int ret;
+
+ ret = regulator_disable(ldo);
+ if (ret)
+ dev_err(bsp_priv->dev, "fail to disable phy-supply\n");
}
static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
@@ -1628,6 +1319,31 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
bsp_priv->dev = dev;
+ /* Set the default phy_intf_sel and RMII mode register parameters. */
+ bsp_priv->gmac_grf_reg = ops->gmac_grf_reg;
+ bsp_priv->gmac_phy_intf_sel_mask = ops->gmac_phy_intf_sel_mask;
+ bsp_priv->gmac_rmii_mode_mask = ops->gmac_rmii_mode_mask;
+
+ /* Set the default clock control register related parameters */
+ bsp_priv->clock_grf_reg = ops->clock_grf_reg;
+ bsp_priv->clock = ops->clock;
+
+ bsp_priv->supports_rgmii = ops->supports_rgmii || !!ops->set_to_rgmii;
+ bsp_priv->supports_rmii = ops->supports_rmii || !!ops->set_to_rmii;
+
+ if (ops->init) {
+ ret = ops->init(bsp_priv);
+ if (ret) {
+ reset_control_put(bsp_priv->phy_reset);
+ dev_err_probe(dev, ret, "failed to init BSP\n");
+ return ERR_PTR(ret);
+ }
+ }
+
+ if (bsp_priv->clock.io_clksel_cru_mask &&
+ bsp_priv->clock.io_clksel_io_mask)
+ dev_warn(dev, "both CRU and IO io_clksel masks should not be populated - driver may malfunction\n");
+
return bsp_priv;
}
@@ -1638,11 +1354,11 @@ static int rk_gmac_check_ops(struct rk_priv_data *bsp_priv)
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
- if (!bsp_priv->ops->set_to_rgmii)
+ if (!bsp_priv->supports_rgmii)
return -EINVAL;
break;
case PHY_INTERFACE_MODE_RMII:
- if (!bsp_priv->ops->set_to_rmii)
+ if (!bsp_priv->supports_rmii)
return -EINVAL;
break;
default:
@@ -1655,44 +1371,87 @@ static int rk_gmac_check_ops(struct rk_priv_data *bsp_priv)
static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
{
struct device *dev = bsp_priv->dev;
+ u32 val;
int ret;
+ u8 intf;
ret = rk_gmac_check_ops(bsp_priv);
if (ret)
return ret;
+ ret = rk_get_phy_intf_sel(bsp_priv->phy_iface);
+ if (ret < 0)
+ return ret;
+
+ intf = ret;
+
ret = gmac_clk_enable(bsp_priv, true);
if (ret)
return ret;
+ if (bsp_priv->gmac_phy_intf_sel_mask ||
+ bsp_priv->gmac_rmii_mode_mask) {
+ /* If defined, encode the phy_intf_sel value */
+ val = rk_encode_wm16(intf, bsp_priv->gmac_phy_intf_sel_mask);
+
+ /* If defined, encode the RMII mode mask setting. */
+ val |= rk_encode_wm16(intf == PHY_INTF_SEL_RMII,
+ bsp_priv->gmac_rmii_mode_mask);
+
+ ret = rk_write_gmac_grf_reg(bsp_priv, val);
+ if (ret < 0) {
+ gmac_clk_enable(bsp_priv, false);
+ return ret;
+ }
+ }
+
+ if (bsp_priv->clock.rmii_mode_mask) {
+ val = rk_encode_wm16(intf == PHY_INTF_SEL_RMII,
+ bsp_priv->clock.rmii_mode_mask);
+
+ ret = rk_write_clock_grf_reg(bsp_priv, val);
+ if (ret < 0) {
+ gmac_clk_enable(bsp_priv, false);
+ return ret;
+ }
+ }
+
/*rmii or rgmii*/
switch (bsp_priv->phy_iface) {
case PHY_INTERFACE_MODE_RGMII:
dev_info(dev, "init for RGMII\n");
- bsp_priv->ops->set_to_rgmii(bsp_priv, bsp_priv->tx_delay,
- bsp_priv->rx_delay);
+ if (bsp_priv->ops->set_to_rgmii)
+ bsp_priv->ops->set_to_rgmii(bsp_priv,
+ bsp_priv->tx_delay,
+ bsp_priv->rx_delay);
break;
case PHY_INTERFACE_MODE_RGMII_ID:
dev_info(dev, "init for RGMII_ID\n");
- bsp_priv->ops->set_to_rgmii(bsp_priv, 0, 0);
+ if (bsp_priv->ops->set_to_rgmii)
+ bsp_priv->ops->set_to_rgmii(bsp_priv, 0, 0);
break;
case PHY_INTERFACE_MODE_RGMII_RXID:
dev_info(dev, "init for RGMII_RXID\n");
- bsp_priv->ops->set_to_rgmii(bsp_priv, bsp_priv->tx_delay, 0);
+ if (bsp_priv->ops->set_to_rgmii)
+ bsp_priv->ops->set_to_rgmii(bsp_priv,
+ bsp_priv->tx_delay, 0);
break;
case PHY_INTERFACE_MODE_RGMII_TXID:
dev_info(dev, "init for RGMII_TXID\n");
- bsp_priv->ops->set_to_rgmii(bsp_priv, 0, bsp_priv->rx_delay);
+ if (bsp_priv->ops->set_to_rgmii)
+ bsp_priv->ops->set_to_rgmii(bsp_priv,
+ 0, bsp_priv->rx_delay);
break;
case PHY_INTERFACE_MODE_RMII:
dev_info(dev, "init for RMII\n");
- bsp_priv->ops->set_to_rmii(bsp_priv);
+ if (bsp_priv->ops->set_to_rmii)
+ bsp_priv->ops->set_to_rmii(bsp_priv);
break;
default:
dev_err(dev, "NO interface defined!\n");
}
- ret = phy_power_on(bsp_priv, true);
+ ret = rk_phy_powerup(bsp_priv);
if (ret) {
gmac_clk_enable(bsp_priv, false);
return ret;
@@ -1713,7 +1472,7 @@ static void rk_gmac_powerdown(struct rk_priv_data *gmac)
pm_runtime_put_sync(gmac->dev);
- phy_power_on(gmac, false);
+ rk_phy_powerdown(gmac);
gmac_clk_enable(gmac, false);
}
@@ -1722,10 +1481,10 @@ static void rk_get_interfaces(struct stmmac_priv *priv, void *bsp_priv,
{
struct rk_priv_data *rk = bsp_priv;
- if (rk->ops->set_to_rgmii)
+ if (rk->supports_rgmii)
phy_interface_set_rgmii(interfaces);
- if (rk->ops->set_to_rmii)
+ if (rk->supports_rmii)
__set_bit(PHY_INTERFACE_MODE_RMII, interfaces);
}
@@ -1733,11 +1492,37 @@ static int rk_set_clk_tx_rate(void *bsp_priv_, struct clk *clk_tx_i,
phy_interface_t interface, int speed)
{
struct rk_priv_data *bsp_priv = bsp_priv_;
+ int ret = -EINVAL;
+ bool is_100m;
+ u32 val;
+
+ if (bsp_priv->ops->set_speed) {
+ ret = bsp_priv->ops->set_speed(bsp_priv, interface, speed);
+ if (ret < 0)
+ return ret;
+ }
- if (bsp_priv->ops->set_speed)
- return bsp_priv->ops->set_speed(bsp_priv, interface, speed);
+ if (phy_interface_mode_is_rgmii(interface) &&
+ bsp_priv->clock.gmii_clk_sel_mask) {
+ ret = rk_gmac_rgmii_clk_div(speed);
+ if (ret < 0)
+ return ret;
- return -EINVAL;
+ val = rk_encode_wm16(ret, bsp_priv->clock.gmii_clk_sel_mask);
+
+ ret = rk_write_clock_grf_reg(bsp_priv, val);
+ } else if (interface == PHY_INTERFACE_MODE_RMII &&
+ (bsp_priv->clock.rmii_clk_sel_mask ||
+ bsp_priv->clock.mac_speed_mask)) {
+ is_100m = speed == SPEED_100;
+ val = rk_encode_wm16(is_100m, bsp_priv->clock.mac_speed_mask) |
+ rk_encode_wm16(is_100m,
+ bsp_priv->clock.rmii_clk_sel_mask);
+
+ ret = rk_write_clock_grf_reg(bsp_priv, val);
+ }
+
+ return ret;
}
static int rk_gmac_suspend(struct device *dev, void *bsp_priv_)
@@ -1776,6 +1561,8 @@ static void rk_gmac_exit(struct device *dev, void *bsp_priv_)
if (priv->plat->phy_node && bsp_priv->integrated_phy)
clk_put(bsp_priv->clk_phy);
+
+ reset_control_put(bsp_priv->phy_reset);
}
static int rk_gmac_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
index 5a485ee98fa7..af594a096676 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
@@ -11,12 +11,14 @@
#include <linux/device.h>
#include <linux/ethtool.h>
#include <linux/io.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_mdio.h>
#include <linux/of_address.h>
#include <linux/phy.h>
#include <linux/phylink.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/stmmac.h>
#include "stmmac_platform.h"
@@ -32,6 +34,8 @@
struct s32_priv_data {
void __iomem *ioaddr;
void __iomem *ctrl_sts;
+ struct regmap *sts_regmap;
+ unsigned int sts_offset;
struct device *dev;
phy_interface_t *intf_mode;
struct clk *tx_clk;
@@ -40,11 +44,17 @@ struct s32_priv_data {
static int s32_gmac_write_phy_intf_select(struct s32_priv_data *gmac)
{
- writel(S32_PHY_INTF_SEL_RGMII, gmac->ctrl_sts);
+ int ret = 0;
+
+ if (gmac->ctrl_sts)
+ writel(S32_PHY_INTF_SEL_RGMII, gmac->ctrl_sts);
+ else
+ ret = regmap_write(gmac->sts_regmap, gmac->sts_offset,
+ S32_PHY_INTF_SEL_RGMII);
dev_dbg(gmac->dev, "PHY mode set to %s\n", phy_modes(*gmac->intf_mode));
- return 0;
+ return ret;
}
static int s32_gmac_init(struct device *dev, void *priv)
@@ -125,10 +135,16 @@ static int s32_dwmac_probe(struct platform_device *pdev)
"dt configuration failed\n");
/* PHY interface mode control reg */
- gmac->ctrl_sts = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
- if (IS_ERR(gmac->ctrl_sts))
- return dev_err_probe(dev, PTR_ERR(gmac->ctrl_sts),
- "S32CC config region is missing\n");
+ gmac->sts_regmap = syscon_regmap_lookup_by_phandle_args(dev->of_node,
+ "nxp,phy-sel", 1, &gmac->sts_offset);
+ if (gmac->sts_regmap == ERR_PTR(-EPROBE_DEFER))
+ return PTR_ERR(gmac->sts_regmap);
+ if (IS_ERR(gmac->sts_regmap)) {
+ gmac->ctrl_sts = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
+ if (IS_ERR(gmac->ctrl_sts))
+ return dev_err_probe(dev, PTR_ERR(gmac->ctrl_sts),
+ "S32CC config region is missing\n");
+ }
/* tx clock */
gmac->tx_clk = devm_clk_get(&pdev->dev, "tx");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index a2b52d2c4eb6..4c8991f3b38d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -367,9 +367,8 @@ static int smtg_crosststamp(ktime_t *device, struct system_counterval_t *system,
.use_nsecs = false,
};
- num_snapshot = (readl(ioaddr + XGMAC_TIMESTAMP_STATUS) &
- XGMAC_TIMESTAMP_ATSNS_MASK) >>
- XGMAC_TIMESTAMP_ATSNS_SHIFT;
+ num_snapshot = FIELD_GET(XGMAC_TIMESTAMP_ATSNS_MASK,
+ readl(ioaddr + XGMAC_TIMESTAMP_STATUS));
/* Repeat until the timestamps are from the FIFO last segment */
for (i = 0; i < num_snapshot; i++) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 8aa496ac85cc..c01b86fd64da 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -746,7 +746,7 @@ static int sun8i_dwmac_reset(struct stmmac_priv *priv)
v = readl(priv->ioaddr + EMAC_BASIC_CTL1);
writel(v | 0x01, priv->ioaddr + EMAC_BASIC_CTL1);
- /* The timeout was previoulsy set to 10ms, but some board (OrangePI0)
+ /* The timeout was previously set to 10ms, but some board (OrangePI0)
* need more if no cable plugged. 100ms seems OK
*/
err = readl_poll_timeout(priv->ioaddr + EMAC_BASIC_CTL1, v,
@@ -821,7 +821,7 @@ static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv)
return ret;
}
- /* Make sure the EPHY is properly reseted, as U-Boot may leave
+ /* Make sure the EPHY is properly reset, as U-Boot may leave
* it at deasserted state, and thus it may fail to reset EMAC.
*
* This assumes the driver has exclusive access to the EPHY reset.
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
index e291028ba56e..0d46a6c3f077 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
@@ -37,9 +37,9 @@
#define GMAC_GTXCLK_SEL 0x18
#define GMAC_GTXCLK_SEL_PLL BIT(0)
#define GMAC_INTF_CTRL 0x1c
-#define PHY_INTF_MASK BIT(0)
-#define PHY_INTF_RGMII FIELD_PREP(PHY_INTF_MASK, 1)
-#define PHY_INTF_MII_GMII FIELD_PREP(PHY_INTF_MASK, 0)
+#define GMAC_INTF_MASK BIT(0)
+#define GMAC_INTF_RGMII FIELD_PREP(GMAC_INTF_MASK, 1)
+#define GMAC_INTF_MII_GMII FIELD_PREP(GMAC_INTF_MASK, 0)
#define GMAC_TXCLK_OEN 0x20
#define TXCLK_DIR_MASK BIT(0)
#define TXCLK_DIR_OUTPUT FIELD_PREP(TXCLK_DIR_MASK, 0)
@@ -58,13 +58,13 @@ static int thead_dwmac_set_phy_if(struct plat_stmmacenet_data *plat)
switch (plat->phy_interface) {
case PHY_INTERFACE_MODE_MII:
- phyif = PHY_INTF_MII_GMII;
+ phyif = GMAC_INTF_MII_GMII;
break;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_TXID:
case PHY_INTERFACE_MODE_RGMII_RXID:
- phyif = PHY_INTF_RGMII;
+ phyif = GMAC_INTF_RGMII;
break;
default:
dev_err(dwmac->dev, "unsupported phy interface %s\n",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
index 7ab791c8d355..547863cb982f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
@@ -30,62 +30,30 @@
#define MAC_VLAN2 0x00000024 /* VLAN2 Tag */
/* MAC CTRL defines */
-#define MAC_CONTROL_RA 0x80000000 /* Receive All Mode */
-#define MAC_CONTROL_BLE 0x40000000 /* Endian Mode */
#define MAC_CONTROL_HBD 0x10000000 /* Heartbeat Disable */
#define MAC_CONTROL_PS 0x08000000 /* Port Select */
-#define MAC_CONTROL_DRO 0x00800000 /* Disable Receive Own */
-#define MAC_CONTROL_EXT_LOOPBACK 0x00400000 /* Reserved (ext loopback?) */
#define MAC_CONTROL_OM 0x00200000 /* Loopback Operating Mode */
#define MAC_CONTROL_F 0x00100000 /* Full Duplex Mode */
#define MAC_CONTROL_PM 0x00080000 /* Pass All Multicast */
#define MAC_CONTROL_PR 0x00040000 /* Promiscuous Mode */
#define MAC_CONTROL_IF 0x00020000 /* Inverse Filtering */
-#define MAC_CONTROL_PB 0x00010000 /* Pass Bad Frames */
#define MAC_CONTROL_HO 0x00008000 /* Hash Only Filtering Mode */
#define MAC_CONTROL_HP 0x00002000 /* Hash/Perfect Filtering Mode */
-#define MAC_CONTROL_LCC 0x00001000 /* Late Collision Control */
-#define MAC_CONTROL_DBF 0x00000800 /* Disable Broadcast Frames */
-#define MAC_CONTROL_DRTY 0x00000400 /* Disable Retry */
-#define MAC_CONTROL_ASTP 0x00000100 /* Automatic Pad Stripping */
-#define MAC_CONTROL_BOLMT_10 0x00000000 /* Back Off Limit 10 */
-#define MAC_CONTROL_BOLMT_8 0x00000040 /* Back Off Limit 8 */
-#define MAC_CONTROL_BOLMT_4 0x00000080 /* Back Off Limit 4 */
-#define MAC_CONTROL_BOLMT_1 0x000000c0 /* Back Off Limit 1 */
-#define MAC_CONTROL_DC 0x00000020 /* Deferral Check */
-#define MAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
-#define MAC_CONTROL_RE 0x00000004 /* Receiver Enable */
#define MAC_CORE_INIT (MAC_CONTROL_HBD)
/* MAC FLOW CTRL defines */
-#define MAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
-#define MAC_FLOW_CTRL_PT_SHIFT 16
-#define MAC_FLOW_CTRL_PASS 0x00000004 /* Pass Control Frames */
+#define MAC_FLOW_CTRL_PT_MASK GENMASK(31, 16) /* Pause Time Mask */
#define MAC_FLOW_CTRL_ENABLE 0x00000002 /* Flow Control Enable */
-#define MAC_FLOW_CTRL_PAUSE 0x00000001 /* Flow Control Busy ... */
-
-/* MII ADDR defines */
-#define MAC_MII_ADDR_WRITE 0x00000002 /* MII Write */
-#define MAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */
/*----------------------------------------------------------------------------
* DMA BLOCK defines
*---------------------------------------------------------------------------*/
/* DMA Bus Mode register defines */
-#define DMA_BUS_MODE_DBO 0x00100000 /* Descriptor Byte Ordering */
-#define DMA_BUS_MODE_BLE 0x00000080 /* Big Endian/Little Endian */
-#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
-#define DMA_BUS_MODE_PBL_SHIFT 8
-#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
-#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
-#define DMA_BUS_MODE_BAR_BUS 0x00000002 /* Bar-Bus Arbitration */
+#define DMA_BUS_MODE_PBL_MASK GENMASK(13, 8) /* Programmable Burst Len */
#define DMA_BUS_MODE_DEFAULT 0x00000000
-/* DMA Control register defines */
-#define DMA_CONTROL_SF 0x00200000 /* Store And Forward */
-
/* Transmit Threshold Control */
enum ttc_control {
DMA_CONTROL_TTC_DEFAULT = 0x00000000, /* Threshold is 32 DWORDS */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 697bba641e05..9fe639fb06bb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -20,15 +20,11 @@
#define GMAC_FLOW_CTRL 0x00000018 /* Flow Control */
#define GMAC_VLAN_TAG 0x0000001c /* VLAN Tag */
#define GMAC_DEBUG 0x00000024 /* GMAC debug register */
-#define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */
#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */
-#define GMAC_INT_STATUS_PMT BIT(3)
-#define GMAC_INT_STATUS_MMCIS BIT(4)
#define GMAC_INT_STATUS_MMCRIS BIT(5)
#define GMAC_INT_STATUS_MMCTIS BIT(6)
#define GMAC_INT_STATUS_MMCCSUM BIT(7)
-#define GMAC_INT_STATUS_TSTAMP BIT(9)
#define GMAC_INT_STATUS_LPIIS BIT(10)
/* interrupt mask register */
@@ -76,7 +72,6 @@ enum power_event {
/* SGMII/RGMII status register */
#define GMAC_RGSMIIIS_LNKMODE BIT(0)
#define GMAC_RGSMIIIS_SPEED GENMASK(2, 1)
-#define GMAC_RGSMIIIS_SPEED_SHIFT 1
#define GMAC_RGSMIIIS_LNKSTS BIT(3)
#define GMAC_RGSMIIIS_JABTO BIT(4)
#define GMAC_RGSMIIIS_FALSECARDET BIT(5)
@@ -90,8 +85,6 @@ enum power_event {
/* GMAC Configuration defines */
#define GMAC_CONTROL_2K 0x08000000 /* IEEE 802.3as 2K packets */
-#define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */
-#define GMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on receive */
#define GMAC_CONTROL_JD 0x00400000 /* Jabber disable */
#define GMAC_CONTROL_BE 0x00200000 /* Frame Burst Enable */
#define GMAC_CONTROL_JE 0x00100000 /* Jumbo frame */
@@ -103,42 +96,25 @@ enum inter_frame_gap {
#define GMAC_CONTROL_DCRS 0x00010000 /* Disable carrier sense */
#define GMAC_CONTROL_PS 0x00008000 /* Port Select 0:GMI 1:MII */
#define GMAC_CONTROL_FES 0x00004000 /* Speed 0:10 1:100 */
-#define GMAC_CONTROL_DO 0x00002000 /* Disable Rx Own */
#define GMAC_CONTROL_LM 0x00001000 /* Loop-back mode */
#define GMAC_CONTROL_DM 0x00000800 /* Duplex Mode */
#define GMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */
-#define GMAC_CONTROL_DR 0x00000200 /* Disable Retry */
-#define GMAC_CONTROL_LUD 0x00000100 /* Link up/down */
-#define GMAC_CONTROL_ACS 0x00000080 /* Auto Pad/FCS Stripping */
-#define GMAC_CONTROL_DC 0x00000010 /* Deferral Check */
-#define GMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
-#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | \
GMAC_CONTROL_BE | GMAC_CONTROL_DCRS)
/* GMAC Frame Filter defines */
#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
-#define GMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */
#define GMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */
-#define GMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */
#define GMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */
-#define GMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */
#define GMAC_FRAME_FILTER_PCF 0x00000080 /* Pass Control frames */
-#define GMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */
-#define GMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */
#define GMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */
#define GMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */
-/* GMII ADDR defines */
-#define GMAC_MII_ADDR_WRITE 0x00000002 /* MII Write */
-#define GMAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */
/* GMAC FLOW CTRL defines */
-#define GMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
-#define GMAC_FLOW_CTRL_PT_SHIFT 16
+#define GMAC_FLOW_CTRL_PT_MASK GENMASK(31, 16) /* Pause Time Mask */
#define GMAC_FLOW_CTRL_UP 0x00000008 /* Unicast pause frame enable */
#define GMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */
#define GMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */
-#define GMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */
/* DEBUG Register defines */
/* MTL TxStatus FIFO */
@@ -147,29 +123,23 @@ enum inter_frame_gap {
#define GMAC_DEBUG_TWCSTS BIT(22) /* MTL Tx FIFO Write Controller */
/* MTL Tx FIFO Read Controller Status */
#define GMAC_DEBUG_TRCSTS_MASK GENMASK(21, 20)
-#define GMAC_DEBUG_TRCSTS_SHIFT 20
-#define GMAC_DEBUG_TRCSTS_IDLE 0
#define GMAC_DEBUG_TRCSTS_READ 1
#define GMAC_DEBUG_TRCSTS_TXW 2
#define GMAC_DEBUG_TRCSTS_WRITE 3
#define GMAC_DEBUG_TXPAUSED BIT(19) /* MAC Transmitter in PAUSE */
/* MAC Transmit Frame Controller Status */
#define GMAC_DEBUG_TFCSTS_MASK GENMASK(18, 17)
-#define GMAC_DEBUG_TFCSTS_SHIFT 17
-#define GMAC_DEBUG_TFCSTS_IDLE 0
#define GMAC_DEBUG_TFCSTS_WAIT 1
#define GMAC_DEBUG_TFCSTS_GEN_PAUSE 2
#define GMAC_DEBUG_TFCSTS_XFER 3
/* MAC GMII or MII Transmit Protocol Engine Status */
#define GMAC_DEBUG_TPESTS BIT(16)
#define GMAC_DEBUG_RXFSTS_MASK GENMASK(9, 8) /* MTL Rx FIFO Fill-level */
-#define GMAC_DEBUG_RXFSTS_SHIFT 8
#define GMAC_DEBUG_RXFSTS_EMPTY 0
#define GMAC_DEBUG_RXFSTS_BT 1
#define GMAC_DEBUG_RXFSTS_AT 2
#define GMAC_DEBUG_RXFSTS_FULL 3
#define GMAC_DEBUG_RRCSTS_MASK GENMASK(6, 5) /* MTL Rx FIFO Read Controller */
-#define GMAC_DEBUG_RRCSTS_SHIFT 5
#define GMAC_DEBUG_RRCSTS_IDLE 0
#define GMAC_DEBUG_RRCSTS_RDATA 1
#define GMAC_DEBUG_RRCSTS_RSTAT 2
@@ -177,18 +147,13 @@ enum inter_frame_gap {
#define GMAC_DEBUG_RWCSTS BIT(4) /* MTL Rx FIFO Write Controller Active */
/* MAC Receive Frame Controller FIFO Status */
#define GMAC_DEBUG_RFCFCSTS_MASK GENMASK(2, 1)
-#define GMAC_DEBUG_RFCFCSTS_SHIFT 1
/* MAC GMII or MII Receive Protocol Engine Status */
#define GMAC_DEBUG_RPESTS BIT(0)
/*--- DMA BLOCK defines ---*/
/* DMA Bus Mode register defines */
-#define DMA_BUS_MODE_DA 0x00000002 /* Arbitration scheme */
-#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
-#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
-/* Programmable burst length (passed thorugh platform)*/
-#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
-#define DMA_BUS_MODE_PBL_SHIFT 8
+/* Programmable burst length (passed through platform)*/
+#define DMA_BUS_MODE_PBL_MASK GENMASK(13, 8) /* Programmable Burst Len */
#define DMA_BUS_MODE_ATDS 0x00000080 /* Alternate Descriptor Size */
enum rx_tx_priority_ratio {
@@ -199,23 +164,15 @@ enum rx_tx_priority_ratio {
#define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */
#define DMA_BUS_MODE_MB 0x04000000 /* Mixed burst */
-#define DMA_BUS_MODE_RPBL_MASK 0x007e0000 /* Rx-Programmable Burst Len */
-#define DMA_BUS_MODE_RPBL_SHIFT 17
+#define DMA_BUS_MODE_RPBL_MASK GENMASK(22, 17) /* Rx-Programmable Burst Len */
#define DMA_BUS_MODE_USP 0x00800000
#define DMA_BUS_MODE_MAXPBL 0x01000000
#define DMA_BUS_MODE_AAL 0x02000000
/* DMA CRS Control and Status Register Mapping */
-#define DMA_HOST_TX_DESC 0x00001048 /* Current Host Tx descriptor */
-#define DMA_HOST_RX_DESC 0x0000104c /* Current Host Rx descriptor */
-/* DMA Bus Mode register defines */
-#define DMA_BUS_PR_RATIO_MASK 0x0000c000 /* Rx/Tx priority ratio */
-#define DMA_BUS_PR_RATIO_SHIFT 14
-#define DMA_BUS_FB 0x00010000 /* Fixed Burst */
/* DMA operation mode defines (start/stop tx/rx are placed in common header)*/
/* Disable Drop TCP/IP csum error */
-#define DMA_CONTROL_DT 0x04000000
#define DMA_CONTROL_RSF 0x02000000 /* Receive Store and Forward */
#define DMA_CONTROL_DFF 0x01000000 /* Disaable flushing */
/* Threshold for Activating the FC */
@@ -247,8 +204,6 @@ enum ttc_control {
#define DMA_CONTROL_TC_TX_MASK 0xfffe3fff
#define DMA_CONTROL_EFC 0x00000100
-#define DMA_CONTROL_FEF 0x00000080
-#define DMA_CONTROL_FUF 0x00000040
/* Receive flow control activation field
* RFA field in DMA control register, bits 23,10:9
@@ -285,20 +240,8 @@ enum ttc_control {
*/
#define RFA_FULL_MINUS_1K 0x00000000
-#define RFA_FULL_MINUS_2K 0x00000200
-#define RFA_FULL_MINUS_3K 0x00000400
-#define RFA_FULL_MINUS_4K 0x00000600
-#define RFA_FULL_MINUS_5K 0x00800000
-#define RFA_FULL_MINUS_6K 0x00800200
-#define RFA_FULL_MINUS_7K 0x00800400
-
-#define RFD_FULL_MINUS_1K 0x00000000
+
#define RFD_FULL_MINUS_2K 0x00000800
-#define RFD_FULL_MINUS_3K 0x00001000
-#define RFD_FULL_MINUS_4K 0x00001800
-#define RFD_FULL_MINUS_5K 0x00400000
-#define RFD_FULL_MINUS_6K 0x00400800
-#define RFD_FULL_MINUS_7K 0x00401000
enum rtc_control {
DMA_CONTROL_RTC_64 = 0x00000000,
@@ -311,16 +254,11 @@ enum rtc_control {
#define DMA_CONTROL_OSF 0x00000004 /* Operate on second frame */
/* MMC registers offset */
-#define GMAC_MMC_CTRL 0x100
-#define GMAC_MMC_RX_INTR 0x104
-#define GMAC_MMC_TX_INTR 0x108
-#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
#define GMAC_EXTHASH_BASE 0x500
/* PTP and timestamping registers */
#define GMAC3_X_ATSNS GENMASK(29, 25)
-#define GMAC3_X_ATSNS_SHIFT 25
#define GMAC_PTP_TCR_ATSFC BIT(24)
#define GMAC_PTP_TCR_ATSEN0 BIT(25)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index a2ae136d2c0e..af566636fad9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -242,7 +242,7 @@ static void dwmac1000_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
if (duplex) {
pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
- flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
+ flow |= FIELD_PREP(GMAC_FLOW_CTRL_PT_MASK, pause_time);
}
writel(flow, ioaddr + GMAC_FLOW_CTRL);
@@ -265,10 +265,10 @@ static void dwmac1000_pmt(struct mac_device_info *hw, unsigned long mode)
writel(pmt, ioaddr + GMAC_PMT);
}
-static int dwmac1000_irq_status(struct mac_device_info *hw,
+static int dwmac1000_irq_status(struct stmmac_priv *priv,
struct stmmac_extra_stats *x)
{
- void __iomem *ioaddr = hw->pcsr;
+ void __iomem *ioaddr = priv->hw->pcsr;
u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
u32 intr_mask = readl(ioaddr + GMAC_INT_MASK);
int ret = 0;
@@ -304,7 +304,8 @@ static int dwmac1000_irq_status(struct mac_device_info *hw,
x->irq_rx_path_exit_lpi_mode_n++;
}
- dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
+ if (intr_status & (PCS_ANE_IRQ | PCS_LINK_IRQ))
+ stmmac_integrated_pcs_irq(priv, intr_status, x);
return ret;
}
@@ -378,8 +379,8 @@ static void dwmac1000_debug(struct stmmac_priv *priv, void __iomem *ioaddr,
if (value & GMAC_DEBUG_TWCSTS)
x->mmtl_fifo_ctrl++;
if (value & GMAC_DEBUG_TRCSTS_MASK) {
- u32 trcsts = (value & GMAC_DEBUG_TRCSTS_MASK)
- >> GMAC_DEBUG_TRCSTS_SHIFT;
+ u32 trcsts = FIELD_GET(GMAC_DEBUG_TRCSTS_MASK, value);
+
if (trcsts == GMAC_DEBUG_TRCSTS_WRITE)
x->mtl_tx_fifo_read_ctrl_write++;
else if (trcsts == GMAC_DEBUG_TRCSTS_TXW)
@@ -392,8 +393,7 @@ static void dwmac1000_debug(struct stmmac_priv *priv, void __iomem *ioaddr,
if (value & GMAC_DEBUG_TXPAUSED)
x->mac_tx_in_pause++;
if (value & GMAC_DEBUG_TFCSTS_MASK) {
- u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK)
- >> GMAC_DEBUG_TFCSTS_SHIFT;
+ u32 tfcsts = FIELD_GET(GMAC_DEBUG_TFCSTS_MASK, value);
if (tfcsts == GMAC_DEBUG_TFCSTS_XFER)
x->mac_tx_frame_ctrl_xfer++;
@@ -407,8 +407,7 @@ static void dwmac1000_debug(struct stmmac_priv *priv, void __iomem *ioaddr,
if (value & GMAC_DEBUG_TPESTS)
x->mac_gmii_tx_proto_engine++;
if (value & GMAC_DEBUG_RXFSTS_MASK) {
- u32 rxfsts = (value & GMAC_DEBUG_RXFSTS_MASK)
- >> GMAC_DEBUG_RRCSTS_SHIFT;
+ u32 rxfsts = FIELD_GET(GMAC_DEBUG_RXFSTS_MASK, value);
if (rxfsts == GMAC_DEBUG_RXFSTS_FULL)
x->mtl_rx_fifo_fill_level_full++;
@@ -420,8 +419,7 @@ static void dwmac1000_debug(struct stmmac_priv *priv, void __iomem *ioaddr,
x->mtl_rx_fifo_fill_level_empty++;
}
if (value & GMAC_DEBUG_RRCSTS_MASK) {
- u32 rrcsts = (value & GMAC_DEBUG_RRCSTS_MASK) >>
- GMAC_DEBUG_RRCSTS_SHIFT;
+ u32 rrcsts = FIELD_GET(GMAC_DEBUG_RRCSTS_MASK, value);
if (rrcsts == GMAC_DEBUG_RRCSTS_FLUSH)
x->mtl_rx_fifo_read_ctrl_flush++;
@@ -435,8 +433,8 @@ static void dwmac1000_debug(struct stmmac_priv *priv, void __iomem *ioaddr,
if (value & GMAC_DEBUG_RWCSTS)
x->mtl_rx_fifo_ctrl_active++;
if (value & GMAC_DEBUG_RFCFCSTS_MASK)
- x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK)
- >> GMAC_DEBUG_RFCFCSTS_SHIFT;
+ x->mac_rx_frame_ctrl_fifo = FIELD_GET(GMAC_DEBUG_RFCFCSTS_MASK,
+ value);
if (value & GMAC_DEBUG_RPESTS)
x->mac_gmii_rx_proto_engine++;
}
@@ -534,7 +532,7 @@ void dwmac1000_timestamp_interrupt(struct stmmac_priv *priv)
if (!(priv->plat->flags & STMMAC_FLAG_EXT_SNAPSHOT_EN))
return;
- num_snapshot = (ts_status & GMAC3_X_ATSNS) >> GMAC3_X_ATSNS_SHIFT;
+ num_snapshot = FIELD_GET(GMAC3_X_ATSNS, ts_status);
for (i = 0; i < num_snapshot; i++) {
read_lock_irqsave(&priv->ptp_lock, flags);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 5877fec9f6c3..3ac7a7949529 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -28,13 +28,10 @@ static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
if (axi->axi_xit_frm)
value |= DMA_AXI_LPI_XIT_FRM;
- value &= ~DMA_AXI_WR_OSR_LMT;
- value |= (axi->axi_wr_osr_lmt & DMA_AXI_WR_OSR_LMT_MASK) <<
- DMA_AXI_WR_OSR_LMT_SHIFT;
-
- value &= ~DMA_AXI_RD_OSR_LMT;
- value |= (axi->axi_rd_osr_lmt & DMA_AXI_RD_OSR_LMT_MASK) <<
- DMA_AXI_RD_OSR_LMT_SHIFT;
+ value = u32_replace_bits(value, axi->axi_wr_osr_lmt,
+ DMA_AXI_WR_OSR_LMT);
+ value = u32_replace_bits(value, axi->axi_rd_osr_lmt,
+ DMA_AXI_RD_OSR_LMT);
/* Depending on the UNDEF bit the Master AXI will perform any burst
* length according to the BLEN programmed (by default all BLEN are
@@ -64,9 +61,8 @@ static void dwmac1000_dma_init_channel(struct stmmac_priv *priv,
if (dma_cfg->pblx8)
value |= DMA_BUS_MODE_MAXPBL;
value |= DMA_BUS_MODE_USP;
- value &= ~(DMA_BUS_MODE_PBL_MASK | DMA_BUS_MODE_RPBL_MASK);
- value |= (txpbl << DMA_BUS_MODE_PBL_SHIFT);
- value |= (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
+ value = u32_replace_bits(value, txpbl, DMA_BUS_MODE_PBL_MASK);
+ value = u32_replace_bits(value, rxpbl, DMA_BUS_MODE_RPBL_MASK);
/* Set the Fixed burst mode */
if (dma_cfg->fixed_burst)
@@ -243,6 +239,8 @@ static int dwmac1000_get_hw_feature(void __iomem *ioaddr,
/* Alternate (enhanced) DESC mode */
dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
+ dma_cap->actphyif = FIELD_GET(DMA_HW_FEAT_ACTPHYIF, hw_cap);
+
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 14e847c0e1a9..db4fbe64a38a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -53,7 +53,7 @@ static int dwmac100_rx_ipc_enable(struct mac_device_info *hw)
return 0;
}
-static int dwmac100_irq_status(struct mac_device_info *hw,
+static int dwmac100_irq_status(struct stmmac_priv *priv,
struct stmmac_extra_stats *x)
{
return 0;
@@ -108,7 +108,7 @@ static void dwmac100_set_filter(struct mac_device_info *hw,
memset(mc_filter, 0, sizeof(mc_filter));
netdev_for_each_mc_addr(ha, dev) {
/* The upper 6 bits of the calculated CRC are used to
- * index the contens of the hash table
+ * index the contents of the hash table
*/
int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
/* The most significant bit determines the register to
@@ -132,7 +132,7 @@ static void dwmac100_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
unsigned int flow = MAC_FLOW_CTRL_ENABLE;
if (duplex)
- flow |= (pause_time << MAC_FLOW_CTRL_PT_SHIFT);
+ flow |= FIELD_PREP(MAC_FLOW_CTRL_PT_MASK, pause_time);
writel(flow, ioaddr + MAC_FLOW_CTRL);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index 82957db47c99..12b2bf2d739a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -22,7 +22,8 @@ static void dwmac100_dma_init(void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg)
{
/* Enable Application Access by writing to DMA CSR0 */
- writel(DMA_BUS_MODE_DEFAULT | (dma_cfg->pbl << DMA_BUS_MODE_PBL_SHIFT),
+ writel(DMA_BUS_MODE_DEFAULT |
+ FIELD_PREP(DMA_BUS_MODE_PBL_MASK, dma_cfg->pbl),
ioaddr + DMA_BUS_MODE);
/* Mask interrupts by writing to CSR7 */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index 3cb733781e1e..d797d936aee1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -95,7 +95,7 @@
/* MAC Flow Control TX */
#define GMAC_TX_FLOW_CTRL_TFE BIT(1)
-#define GMAC_TX_FLOW_CTRL_PT_SHIFT 16
+#define GMAC_TX_FLOW_CTRL_PT_MASK GENMASK(31, 16)
/* MAC Interrupt bitmap*/
#define GMAC_INT_RGSMIIS BIT(0)
@@ -142,23 +142,19 @@ enum power_event {
/* MAC Debug bitmap */
#define GMAC_DEBUG_TFCSTS_MASK GENMASK(18, 17)
-#define GMAC_DEBUG_TFCSTS_SHIFT 17
#define GMAC_DEBUG_TFCSTS_IDLE 0
#define GMAC_DEBUG_TFCSTS_WAIT 1
#define GMAC_DEBUG_TFCSTS_GEN_PAUSE 2
#define GMAC_DEBUG_TFCSTS_XFER 3
#define GMAC_DEBUG_TPESTS BIT(16)
#define GMAC_DEBUG_RFCFCSTS_MASK GENMASK(2, 1)
-#define GMAC_DEBUG_RFCFCSTS_SHIFT 1
#define GMAC_DEBUG_RPESTS BIT(0)
/* MAC config */
#define GMAC_CONFIG_ARPEN BIT(31)
#define GMAC_CONFIG_SARC GENMASK(30, 28)
-#define GMAC_CONFIG_SARC_SHIFT 28
#define GMAC_CONFIG_IPC BIT(27)
#define GMAC_CONFIG_IPG GENMASK(26, 24)
-#define GMAC_CONFIG_IPG_SHIFT 24
#define GMAC_CONFIG_2K BIT(22)
#define GMAC_CONFIG_ACS BIT(20)
#define GMAC_CONFIG_BE BIT(18)
@@ -166,7 +162,6 @@ enum power_event {
#define GMAC_CONFIG_JE BIT(16)
#define GMAC_CONFIG_PS BIT(15)
#define GMAC_CONFIG_FES BIT(14)
-#define GMAC_CONFIG_FES_SHIFT 14
#define GMAC_CONFIG_DM BIT(13)
#define GMAC_CONFIG_LM BIT(12)
#define GMAC_CONFIG_DCRS BIT(9)
@@ -175,11 +170,9 @@ enum power_event {
/* MAC extended config */
#define GMAC_CONFIG_EIPG GENMASK(29, 25)
-#define GMAC_CONFIG_EIPG_SHIFT 25
#define GMAC_CONFIG_EIPG_EN BIT(24)
#define GMAC_CONFIG_HDSMS GENMASK(22, 20)
-#define GMAC_CONFIG_HDSMS_SHIFT 20
-#define GMAC_CONFIG_HDSMS_256 (0x2 << GMAC_CONFIG_HDSMS_SHIFT)
+#define GMAC_CONFIG_HDSMS_256 FIELD_PREP_CONST(GMAC_CONFIG_HDSMS, 0x2)
/* MAC HW features0 bitmap */
#define GMAC_HW_FEAT_SAVLANINS BIT(27)
@@ -242,7 +235,6 @@ enum power_event {
/* MAC HW ADDR regs */
#define GMAC_HI_DCS GENMASK(18, 16)
-#define GMAC_HI_DCS_SHIFT 16
#define GMAC_HI_REG_AE BIT(31)
/* L3/L4 Filters regs */
@@ -257,7 +249,6 @@ enum power_event {
#define GMAC_L3SAM0 BIT(2)
#define GMAC_L3PEN0 BIT(0)
#define GMAC_L4DP0 GENMASK(31, 16)
-#define GMAC_L4DP0_SHIFT 16
#define GMAC_L4SP0 GENMASK(15, 0)
/* MAC Timestamp Status */
@@ -314,39 +305,32 @@ static inline u32 mtl_chanx_base_addr(const struct dwmac4_addrs *addrs,
#define MTL_OP_MODE_TSF BIT(1)
#define MTL_OP_MODE_TQS_MASK GENMASK(24, 16)
-#define MTL_OP_MODE_TQS_SHIFT 16
-#define MTL_OP_MODE_TTC_MASK 0x70
-#define MTL_OP_MODE_TTC_SHIFT 4
-
-#define MTL_OP_MODE_TTC_32 0
-#define MTL_OP_MODE_TTC_64 (1 << MTL_OP_MODE_TTC_SHIFT)
-#define MTL_OP_MODE_TTC_96 (2 << MTL_OP_MODE_TTC_SHIFT)
-#define MTL_OP_MODE_TTC_128 (3 << MTL_OP_MODE_TTC_SHIFT)
-#define MTL_OP_MODE_TTC_192 (4 << MTL_OP_MODE_TTC_SHIFT)
-#define MTL_OP_MODE_TTC_256 (5 << MTL_OP_MODE_TTC_SHIFT)
-#define MTL_OP_MODE_TTC_384 (6 << MTL_OP_MODE_TTC_SHIFT)
-#define MTL_OP_MODE_TTC_512 (7 << MTL_OP_MODE_TTC_SHIFT)
+#define MTL_OP_MODE_TTC_MASK GENMASK(6, 4)
+#define MTL_OP_MODE_TTC_32 FIELD_PREP(MTL_OP_MODE_TTC_MASK, 0)
+#define MTL_OP_MODE_TTC_64 FIELD_PREP(MTL_OP_MODE_TTC_MASK, 1)
+#define MTL_OP_MODE_TTC_96 FIELD_PREP(MTL_OP_MODE_TTC_MASK, 2)
+#define MTL_OP_MODE_TTC_128 FIELD_PREP(MTL_OP_MODE_TTC_MASK, 3)
+#define MTL_OP_MODE_TTC_192 FIELD_PREP(MTL_OP_MODE_TTC_MASK, 4)
+#define MTL_OP_MODE_TTC_256 FIELD_PREP(MTL_OP_MODE_TTC_MASK, 5)
+#define MTL_OP_MODE_TTC_384 FIELD_PREP(MTL_OP_MODE_TTC_MASK, 6)
+#define MTL_OP_MODE_TTC_512 FIELD_PREP(MTL_OP_MODE_TTC_MASK, 7)
#define MTL_OP_MODE_RQS_MASK GENMASK(29, 20)
-#define MTL_OP_MODE_RQS_SHIFT 20
#define MTL_OP_MODE_RFD_MASK GENMASK(19, 14)
-#define MTL_OP_MODE_RFD_SHIFT 14
#define MTL_OP_MODE_RFA_MASK GENMASK(13, 8)
-#define MTL_OP_MODE_RFA_SHIFT 8
#define MTL_OP_MODE_EHFC BIT(7)
#define MTL_OP_MODE_DIS_TCP_EF BIT(6)
#define MTL_OP_MODE_RTC_MASK GENMASK(1, 0)
-#define MTL_OP_MODE_RTC_SHIFT 0
-#define MTL_OP_MODE_RTC_32 (1 << MTL_OP_MODE_RTC_SHIFT)
-#define MTL_OP_MODE_RTC_64 0
-#define MTL_OP_MODE_RTC_96 (2 << MTL_OP_MODE_RTC_SHIFT)
-#define MTL_OP_MODE_RTC_128 (3 << MTL_OP_MODE_RTC_SHIFT)
+#define MTL_OP_MODE_RTC_32 FIELD_PREP(MTL_OP_MODE_RTC_MASK, 1)
+#define MTL_OP_MODE_RTC_64 FIELD_PREP(MTL_OP_MODE_RTC_MASK, 0)
+#define MTL_OP_MODE_RTC_96 FIELD_PREP(MTL_OP_MODE_RTC_MASK, 2)
+#define MTL_OP_MODE_RTC_128 FIELD_PREP(MTL_OP_MODE_RTC_MASK, 3)
/* MTL ETS Control register */
#define MTL_ETS_CTRL_BASE_ADDR 0x00000d10
@@ -451,7 +435,6 @@ static inline u32 mtl_low_credx_base_addr(const struct dwmac4_addrs *addrs,
/* MTL debug: Tx FIFO Read Controller Status */
#define MTL_DEBUG_TRCSTS_MASK GENMASK(2, 1)
-#define MTL_DEBUG_TRCSTS_SHIFT 1
#define MTL_DEBUG_TRCSTS_IDLE 0
#define MTL_DEBUG_TRCSTS_READ 1
#define MTL_DEBUG_TRCSTS_TXW 2
@@ -460,13 +443,11 @@ static inline u32 mtl_low_credx_base_addr(const struct dwmac4_addrs *addrs,
/* MAC debug: GMII or MII Transmit Protocol Engine Status */
#define MTL_DEBUG_RXFSTS_MASK GENMASK(5, 4)
-#define MTL_DEBUG_RXFSTS_SHIFT 4
#define MTL_DEBUG_RXFSTS_EMPTY 0
#define MTL_DEBUG_RXFSTS_BT 1
#define MTL_DEBUG_RXFSTS_AT 2
#define MTL_DEBUG_RXFSTS_FULL 3
#define MTL_DEBUG_RRCSTS_MASK GENMASK(2, 1)
-#define MTL_DEBUG_RRCSTS_SHIFT 1
#define MTL_DEBUG_RRCSTS_IDLE 0
#define MTL_DEBUG_RRCSTS_RDATA 1
#define MTL_DEBUG_RRCSTS_RSTAT 2
@@ -485,42 +466,12 @@ static inline u32 mtl_low_credx_base_addr(const struct dwmac4_addrs *addrs,
/* To dump the core regs excluding the Address Registers */
#define GMAC_REG_NUM 132
-/* MTL debug */
-#define MTL_DEBUG_TXSTSFSTS BIT(5)
-#define MTL_DEBUG_TXFSTS BIT(4)
-#define MTL_DEBUG_TWCSTS BIT(3)
-
-/* MTL debug: Tx FIFO Read Controller Status */
-#define MTL_DEBUG_TRCSTS_MASK GENMASK(2, 1)
-#define MTL_DEBUG_TRCSTS_SHIFT 1
-#define MTL_DEBUG_TRCSTS_IDLE 0
-#define MTL_DEBUG_TRCSTS_READ 1
-#define MTL_DEBUG_TRCSTS_TXW 2
-#define MTL_DEBUG_TRCSTS_WRITE 3
-#define MTL_DEBUG_TXPAUSED BIT(0)
-
-/* MAC debug: GMII or MII Transmit Protocol Engine Status */
-#define MTL_DEBUG_RXFSTS_MASK GENMASK(5, 4)
-#define MTL_DEBUG_RXFSTS_SHIFT 4
-#define MTL_DEBUG_RXFSTS_EMPTY 0
-#define MTL_DEBUG_RXFSTS_BT 1
-#define MTL_DEBUG_RXFSTS_AT 2
-#define MTL_DEBUG_RXFSTS_FULL 3
-#define MTL_DEBUG_RRCSTS_MASK GENMASK(2, 1)
-#define MTL_DEBUG_RRCSTS_SHIFT 1
-#define MTL_DEBUG_RRCSTS_IDLE 0
-#define MTL_DEBUG_RRCSTS_RDATA 1
-#define MTL_DEBUG_RRCSTS_RSTAT 2
-#define MTL_DEBUG_RRCSTS_FLUSH 3
-#define MTL_DEBUG_RWCSTS BIT(0)
-
/* SGMII/RGMII status register */
#define GMAC_PHYIF_CTRLSTATUS_TC BIT(0)
#define GMAC_PHYIF_CTRLSTATUS_LUD BIT(1)
#define GMAC_PHYIF_CTRLSTATUS_SMIDRXS BIT(4)
#define GMAC_PHYIF_CTRLSTATUS_LNKMOD BIT(16)
#define GMAC_PHYIF_CTRLSTATUS_SPEED GENMASK(18, 17)
-#define GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT 17
#define GMAC_PHYIF_CTRLSTATUS_LNKSTS BIT(19)
#define GMAC_PHYIF_CTRLSTATUS_JABTO BIT(20)
#define GMAC_PHYIF_CTRLSTATUS_FALSECARDET BIT(21)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index a4282fd7c3c7..623868afe93d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -572,8 +572,8 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
flow = GMAC_TX_FLOW_CTRL_TFE;
if (duplex)
- flow |=
- (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
+ flow |= FIELD_PREP(GMAC_TX_FLOW_CTRL_PT_MASK,
+ pause_time);
writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
}
@@ -615,10 +615,10 @@ static int dwmac4_irq_mtl_status(struct stmmac_priv *priv,
return ret;
}
-static int dwmac4_irq_status(struct mac_device_info *hw,
+static int dwmac4_irq_status(struct stmmac_priv *priv,
struct stmmac_extra_stats *x)
{
- void __iomem *ioaddr = hw->pcsr;
+ void __iomem *ioaddr = priv->hw->pcsr;
u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
u32 intr_enable = readl(ioaddr + GMAC_INT_EN);
int ret = 0;
@@ -658,7 +658,8 @@ static int dwmac4_irq_status(struct mac_device_info *hw,
x->irq_rx_path_exit_lpi_mode_n++;
}
- dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
+ if (intr_status & (PCS_ANE_IRQ | PCS_LINK_IRQ))
+ stmmac_integrated_pcs_irq(priv, intr_status, x);
return ret;
}
@@ -681,8 +682,8 @@ static void dwmac4_debug(struct stmmac_priv *priv, void __iomem *ioaddr,
if (value & MTL_DEBUG_TWCSTS)
x->mmtl_fifo_ctrl++;
if (value & MTL_DEBUG_TRCSTS_MASK) {
- u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
- >> MTL_DEBUG_TRCSTS_SHIFT;
+ u32 trcsts = FIELD_GET(MTL_DEBUG_TRCSTS_MASK, value);
+
if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
x->mtl_tx_fifo_read_ctrl_write++;
else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
@@ -700,8 +701,7 @@ static void dwmac4_debug(struct stmmac_priv *priv, void __iomem *ioaddr,
value = readl(ioaddr + MTL_CHAN_RX_DEBUG(dwmac4_addrs, queue));
if (value & MTL_DEBUG_RXFSTS_MASK) {
- u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
- >> MTL_DEBUG_RRCSTS_SHIFT;
+ u32 rxfsts = FIELD_GET(MTL_DEBUG_RXFSTS_MASK, value);
if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
x->mtl_rx_fifo_fill_level_full++;
@@ -713,8 +713,7 @@ static void dwmac4_debug(struct stmmac_priv *priv, void __iomem *ioaddr,
x->mtl_rx_fifo_fill_level_empty++;
}
if (value & MTL_DEBUG_RRCSTS_MASK) {
- u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
- MTL_DEBUG_RRCSTS_SHIFT;
+ u32 rrcsts = FIELD_GET(MTL_DEBUG_RRCSTS_MASK, value);
if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
x->mtl_rx_fifo_read_ctrl_flush++;
@@ -733,8 +732,7 @@ static void dwmac4_debug(struct stmmac_priv *priv, void __iomem *ioaddr,
value = readl(ioaddr + GMAC_DEBUG);
if (value & GMAC_DEBUG_TFCSTS_MASK) {
- u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK)
- >> GMAC_DEBUG_TFCSTS_SHIFT;
+ u32 tfcsts = FIELD_GET(GMAC_DEBUG_TFCSTS_MASK, value);
if (tfcsts == GMAC_DEBUG_TFCSTS_XFER)
x->mac_tx_frame_ctrl_xfer++;
@@ -748,8 +746,8 @@ static void dwmac4_debug(struct stmmac_priv *priv, void __iomem *ioaddr,
if (value & GMAC_DEBUG_TPESTS)
x->mac_gmii_tx_proto_engine++;
if (value & GMAC_DEBUG_RFCFCSTS_MASK)
- x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK)
- >> GMAC_DEBUG_RFCFCSTS_SHIFT;
+ x->mac_rx_frame_ctrl_fifo = FIELD_GET(GMAC_DEBUG_RFCFCSTS_MASK,
+ value);
if (value & GMAC_DEBUG_RPESTS)
x->mac_gmii_rx_proto_engine++;
}
@@ -770,8 +768,7 @@ static void dwmac4_sarc_configure(void __iomem *ioaddr, int val)
{
u32 value = readl(ioaddr + GMAC_CONFIG);
- value &= ~GMAC_CONFIG_SARC;
- value |= val << GMAC_CONFIG_SARC_SHIFT;
+ value = u32_replace_bits(value, val, GMAC_CONFIG_SARC);
writel(value, ioaddr + GMAC_CONFIG);
}
@@ -879,9 +876,9 @@ static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
if (sa) {
- value = match & GMAC_L4SP0;
+ value = FIELD_PREP(GMAC_L4SP0, match);
} else {
- value = (match << GMAC_L4DP0_SHIFT) & GMAC_L4DP0;
+ value = FIELD_PREP(GMAC_L4DP0, match);
}
writel(value, ioaddr + GMAC_L4_ADDR(filter_no));
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index aac68dc28dc1..e226dc6a1b17 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -17,11 +17,9 @@ static int dwmac4_wrback_get_tx_status(struct stmmac_extra_stats *x,
struct dma_desc *p,
void __iomem *ioaddr)
{
- unsigned int tdes3;
+ u32 tdes3 = le32_to_cpu(p->des3);
int ret = tx_done;
- tdes3 = le32_to_cpu(p->des3);
-
/* Get tx owner first */
if (unlikely(tdes3 & TDES3_OWN))
return tx_dma_own;
@@ -46,8 +44,7 @@ static int dwmac4_wrback_get_tx_status(struct stmmac_extra_stats *x,
if (unlikely((tdes3 & TDES3_LATE_COLLISION) ||
(tdes3 & TDES3_EXCESSIVE_COLLISION)))
x->tx_collision +=
- (tdes3 & TDES3_COLLISION_COUNT_MASK)
- >> TDES3_COLLISION_COUNT_SHIFT;
+ FIELD_GET(TDES3_COLLISION_COUNT_MASK, tdes3);
if (unlikely(tdes3 & TDES3_EXCESSIVE_DEFERRAL))
x->tx_deferred++;
@@ -73,9 +70,9 @@ static int dwmac4_wrback_get_tx_status(struct stmmac_extra_stats *x,
static int dwmac4_wrback_get_rx_status(struct stmmac_extra_stats *x,
struct dma_desc *p)
{
- unsigned int rdes1 = le32_to_cpu(p->des1);
- unsigned int rdes2 = le32_to_cpu(p->des2);
- unsigned int rdes3 = le32_to_cpu(p->des3);
+ u32 rdes1 = le32_to_cpu(p->des1);
+ u32 rdes2 = le32_to_cpu(p->des2);
+ u32 rdes3 = le32_to_cpu(p->des3);
int message_type;
int ret = good_frame;
@@ -108,7 +105,7 @@ static int dwmac4_wrback_get_rx_status(struct stmmac_extra_stats *x,
ret = discard_frame;
}
- message_type = (rdes1 & ERDES4_MSG_TYPE_MASK) >> 8;
+ message_type = FIELD_GET(RDES1_PTP_MSG_TYPE_MASK, rdes1);
if (rdes1 & RDES1_IP_HDR_ERROR) {
x->ip_hdr_err++;
@@ -168,8 +165,7 @@ static int dwmac4_wrback_get_rx_status(struct stmmac_extra_stats *x,
x->l3_filter_match++;
if (rdes2 & RDES2_L4_FILTER_MATCH)
x->l4_filter_match++;
- if ((rdes2 & RDES2_L3_L4_FILT_NB_MATCH_MASK)
- >> RDES2_L3_L4_FILT_NB_MATCH_SHIFT)
+ if (rdes2 & RDES2_L3_L4_FILT_NB_MATCH_MASK)
x->l3_l4_filter_no_match++;
return ret;
@@ -255,15 +251,14 @@ static inline void dwmac4_get_timestamp(void *desc, u32 ats, u64 *ts)
static int dwmac4_rx_check_timestamp(void *desc)
{
struct dma_desc *p = (struct dma_desc *)desc;
- unsigned int rdes0 = le32_to_cpu(p->des0);
- unsigned int rdes1 = le32_to_cpu(p->des1);
- unsigned int rdes3 = le32_to_cpu(p->des3);
- u32 own, ctxt;
+ u32 rdes0 = le32_to_cpu(p->des0);
+ u32 rdes1 = le32_to_cpu(p->des1);
+ u32 rdes3 = le32_to_cpu(p->des3);
+ bool own, ctxt;
int ret = 1;
own = rdes3 & RDES3_OWN;
- ctxt = ((rdes3 & RDES3_CONTEXT_DESCRIPTOR)
- >> RDES3_CONTEXT_DESCRIPTOR_SHIFT);
+ ctxt = rdes3 & RDES3_CONTEXT_DESCRIPTOR;
if (likely(!own && ctxt)) {
if ((rdes0 == 0xffffffff) && (rdes1 == 0xffffffff))
@@ -327,7 +322,7 @@ static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
bool csum_flag, int mode, bool tx_own,
bool ls, unsigned int tot_pkt_len)
{
- unsigned int tdes3 = le32_to_cpu(p->des3);
+ u32 tdes3 = le32_to_cpu(p->des3);
p->des2 |= cpu_to_le32(len & TDES2_BUFFER1_SIZE_MASK);
@@ -337,10 +332,8 @@ static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
else
tdes3 &= ~TDES3_FIRST_DESCRIPTOR;
- if (likely(csum_flag))
- tdes3 |= (TX_CIC_FULL << TDES3_CHECKSUM_INSERTION_SHIFT);
- else
- tdes3 &= ~(TX_CIC_FULL << TDES3_CHECKSUM_INSERTION_SHIFT);
+ tdes3 = u32_replace_bits(tdes3, csum_flag ? TX_CIC_FULL : 0,
+ TDES3_CHECKSUM_INSERTION_MASK);
if (ls)
tdes3 |= TDES3_LAST_DESCRIPTOR;
@@ -366,21 +359,21 @@ static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
bool ls, unsigned int tcphdrlen,
unsigned int tcppayloadlen)
{
- unsigned int tdes3 = le32_to_cpu(p->des3);
+ u32 tdes3 = le32_to_cpu(p->des3);
if (len1)
- p->des2 |= cpu_to_le32((len1 & TDES2_BUFFER1_SIZE_MASK));
+ p->des2 |= cpu_to_le32(FIELD_PREP(TDES2_BUFFER1_SIZE_MASK,
+ len1));
if (len2)
- p->des2 |= cpu_to_le32((len2 << TDES2_BUFFER2_SIZE_MASK_SHIFT)
- & TDES2_BUFFER2_SIZE_MASK);
+ p->des2 |= cpu_to_le32(FIELD_PREP(TDES2_BUFFER2_SIZE_MASK,
+ len2));
if (is_fs) {
tdes3 |= TDES3_FIRST_DESCRIPTOR |
TDES3_TCP_SEGMENTATION_ENABLE |
- ((tcphdrlen << TDES3_HDR_LEN_SHIFT) &
- TDES3_SLOT_NUMBER_MASK) |
- ((tcppayloadlen & TDES3_TCP_PKT_PAYLOAD_MASK));
+ FIELD_PREP(TDES3_SLOT_NUMBER_MASK, tcphdrlen) |
+ FIELD_PREP(TDES3_TCP_PKT_PAYLOAD_MASK, tcppayloadlen);
} else {
tdes3 &= ~TDES3_FIRST_DESCRIPTOR;
}
@@ -491,9 +484,8 @@ static void dwmac4_clear(struct dma_desc *p)
static void dwmac4_set_sarc(struct dma_desc *p, u32 sarc_type)
{
- sarc_type <<= TDES3_SA_INSERT_CTRL_SHIFT;
-
- p->des3 |= cpu_to_le32(sarc_type & TDES3_SA_INSERT_CTRL_MASK);
+ p->des3 |= cpu_to_le32(FIELD_PREP(TDES3_SA_INSERT_CTRL_MASK,
+ sarc_type));
}
static int set_16kib_bfsize(int mtu)
@@ -515,14 +507,9 @@ static void dwmac4_set_vlan_tag(struct dma_desc *p, u16 tag, u16 inner_tag,
/* Inner VLAN */
if (inner_type) {
- u32 des = inner_tag << TDES2_IVT_SHIFT;
-
- des &= TDES2_IVT_MASK;
- p->des2 = cpu_to_le32(des);
-
- des = inner_type << TDES3_IVTIR_SHIFT;
- des &= TDES3_IVTIR_MASK;
- p->des3 = cpu_to_le32(des | TDES3_IVLTV);
+ p->des2 = cpu_to_le32(FIELD_PREP(TDES2_IVT_MASK, inner_tag));
+ p->des3 = cpu_to_le32(FIELD_PREP(TDES3_IVTIR_MASK, inner_type) |
+ TDES3_IVLTV);
}
/* Outer VLAN */
@@ -534,8 +521,7 @@ static void dwmac4_set_vlan_tag(struct dma_desc *p, u16 tag, u16 inner_tag,
static void dwmac4_set_vlan(struct dma_desc *p, u32 type)
{
- type <<= TDES2_VLAN_TAG_SHIFT;
- p->des2 |= cpu_to_le32(type & TDES2_VLAN_TAG_MASK);
+ p->des2 |= cpu_to_le32(FIELD_PREP(TDES2_VLAN_TAG_MASK, type));
}
static void dwmac4_get_rx_header_len(struct dma_desc *p, unsigned int *len)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
index 806555976496..fb1fea5b0e6e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
@@ -18,15 +18,11 @@
/* TDES2 (read format) */
#define TDES2_BUFFER1_SIZE_MASK GENMASK(13, 0)
#define TDES2_VLAN_TAG_MASK GENMASK(15, 14)
-#define TDES2_VLAN_TAG_SHIFT 14
#define TDES2_BUFFER2_SIZE_MASK GENMASK(29, 16)
-#define TDES2_BUFFER2_SIZE_MASK_SHIFT 16
#define TDES3_IVTIR_MASK GENMASK(19, 18)
-#define TDES3_IVTIR_SHIFT 18
#define TDES3_IVLTV BIT(17)
#define TDES2_TIMESTAMP_ENABLE BIT(30)
#define TDES2_IVT_MASK GENMASK(31, 16)
-#define TDES2_IVT_SHIFT 16
#define TDES2_INTERRUPT_ON_COMPLETION BIT(31)
/* TDES3 (read format) */
@@ -34,13 +30,10 @@
#define TDES3_VLAN_TAG GENMASK(15, 0)
#define TDES3_VLTV BIT(16)
#define TDES3_CHECKSUM_INSERTION_MASK GENMASK(17, 16)
-#define TDES3_CHECKSUM_INSERTION_SHIFT 16
#define TDES3_TCP_PKT_PAYLOAD_MASK GENMASK(17, 0)
#define TDES3_TCP_SEGMENTATION_ENABLE BIT(18)
-#define TDES3_HDR_LEN_SHIFT 19
#define TDES3_SLOT_NUMBER_MASK GENMASK(22, 19)
#define TDES3_SA_INSERT_CTRL_MASK GENMASK(25, 23)
-#define TDES3_SA_INSERT_CTRL_SHIFT 23
#define TDES3_CRC_PAD_CTRL_MASK GENMASK(27, 26)
/* TDES3 (write back format) */
@@ -49,7 +42,6 @@
#define TDES3_UNDERFLOW_ERROR BIT(2)
#define TDES3_EXCESSIVE_DEFERRAL BIT(3)
#define TDES3_COLLISION_COUNT_MASK GENMASK(7, 4)
-#define TDES3_COLLISION_COUNT_SHIFT 4
#define TDES3_EXCESSIVE_COLLISION BIT(8)
#define TDES3_LATE_COLLISION BIT(9)
#define TDES3_NO_CARRIER BIT(10)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index 7b513324cfb0..60b880cdd9da 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -27,13 +27,10 @@ static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
if (axi->axi_xit_frm)
value |= DMA_AXI_LPI_XIT_FRM;
- value &= ~DMA_AXI_WR_OSR_LMT;
- value |= (axi->axi_wr_osr_lmt & DMA_AXI_OSR_MAX) <<
- DMA_AXI_WR_OSR_LMT_SHIFT;
-
- value &= ~DMA_AXI_RD_OSR_LMT;
- value |= (axi->axi_rd_osr_lmt & DMA_AXI_OSR_MAX) <<
- DMA_AXI_RD_OSR_LMT_SHIFT;
+ value = u32_replace_bits(value, axi->axi_wr_osr_lmt,
+ DMA_AXI_WR_OSR_LMT);
+ value = u32_replace_bits(value, axi->axi_rd_osr_lmt,
+ DMA_AXI_RD_OSR_LMT);
/* Depending on the UNDEF bit the Master AXI will perform any burst
* length according to the BLEN programmed (by default all BLEN are
@@ -55,7 +52,7 @@ static void dwmac4_dma_init_rx_chan(struct stmmac_priv *priv,
u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
value = readl(ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, chan));
- value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
+ value = value | FIELD_PREP(DMA_CHAN_RX_CTRL_RXPBL_MASK, rxpbl);
writel(value, ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, chan));
if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && likely(dma_cfg->eame))
@@ -76,7 +73,7 @@ static void dwmac4_dma_init_tx_chan(struct stmmac_priv *priv,
u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
value = readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan));
- value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT);
+ value = value | FIELD_PREP(DMA_CHAN_TX_CTRL_TXPBL_MASK, txpbl);
/* Enable OSP to get best performance */
value |= DMA_CONTROL_OSP;
@@ -101,7 +98,7 @@ static void dwmac4_dma_init_channel(struct stmmac_priv *priv,
/* common channel control register config */
value = readl(ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan));
if (dma_cfg->pblx8)
- value = value | DMA_BUS_MODE_PBL;
+ value = value | DMA_CHAN_CTRL_PBLX8;
writel(value, ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan));
/* Mask interrupts by writing to CSR7 */
@@ -119,7 +116,7 @@ static void dwmac410_dma_init_channel(struct stmmac_priv *priv,
/* common channel control register config */
value = readl(ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan));
if (dma_cfg->pblx8)
- value = value | DMA_BUS_MODE_PBL;
+ value = value | DMA_CHAN_CTRL_PBLX8;
writel(value, ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan));
@@ -151,10 +148,9 @@ static void dwmac4_dma_init(void __iomem *ioaddr,
value = readl(ioaddr + DMA_BUS_MODE);
- if (dma_cfg->multi_msi_en) {
- value &= ~DMA_BUS_MODE_INTM_MASK;
- value |= (DMA_BUS_MODE_INTM_MODE1 << DMA_BUS_MODE_INTM_SHIFT);
- }
+ if (dma_cfg->multi_msi_en)
+ value = u32_replace_bits(value, DMA_BUS_MODE_INTM_MODE1,
+ DMA_BUS_MODE_INTM_MASK);
if (dma_cfg->dche)
value |= DMA_BUS_MODE_DCHE;
@@ -264,7 +260,7 @@ static void dwmac4_dma_rx_chan_op_mode(struct stmmac_priv *priv,
}
mtl_rx_op &= ~MTL_OP_MODE_RQS_MASK;
- mtl_rx_op |= rqs << MTL_OP_MODE_RQS_SHIFT;
+ mtl_rx_op |= FIELD_PREP(MTL_OP_MODE_RQS_MASK, rqs);
/* Enable flow control only if each channel gets 4 KiB or more FIFO and
* only if channel is not an AVB channel.
@@ -295,11 +291,10 @@ static void dwmac4_dma_rx_chan_op_mode(struct stmmac_priv *priv,
break;
}
- mtl_rx_op &= ~MTL_OP_MODE_RFD_MASK;
- mtl_rx_op |= rfd << MTL_OP_MODE_RFD_SHIFT;
-
- mtl_rx_op &= ~MTL_OP_MODE_RFA_MASK;
- mtl_rx_op |= rfa << MTL_OP_MODE_RFA_SHIFT;
+ mtl_rx_op = u32_replace_bits(mtl_rx_op, rfd,
+ MTL_OP_MODE_RFD_MASK);
+ mtl_rx_op = u32_replace_bits(mtl_rx_op, rfa,
+ MTL_OP_MODE_RFA_MASK);
}
writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(dwmac4_addrs, channel));
@@ -354,8 +349,8 @@ static void dwmac4_dma_tx_chan_op_mode(struct stmmac_priv *priv,
mtl_tx_op |= MTL_OP_MODE_TXQEN;
else
mtl_tx_op |= MTL_OP_MODE_TXQEN_AV;
- mtl_tx_op &= ~MTL_OP_MODE_TQS_MASK;
- mtl_tx_op |= tqs << MTL_OP_MODE_TQS_SHIFT;
+
+ mtl_tx_op = u32_replace_bits(mtl_tx_op, tqs, MTL_OP_MODE_TQS_MASK);
writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(dwmac4_addrs, channel));
}
@@ -387,6 +382,8 @@ static int dwmac4_get_hw_feature(void __iomem *ioaddr,
dma_cap->vlins = (hw_cap & GMAC_HW_FEAT_SAVLANINS) >> 27;
dma_cap->arpoffsel = (hw_cap & GMAC_HW_FEAT_ARPOFFSEL) >> 9;
+ dma_cap->actphyif = FIELD_GET(DMA_HW_FEAT_ACTPHYIF, hw_cap);
+
/* MAC HW feature1 */
hw_cap = readl(ioaddr + GMAC_HW_FEATURE1);
dma_cap->l3l4fnum = (hw_cap & GMAC_HW_FEAT_L3L4FNUM) >> 27;
@@ -496,8 +493,7 @@ static void dwmac4_set_bfsize(struct stmmac_priv *priv, void __iomem *ioaddr,
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, chan));
- value &= ~DMA_RBSZ_MASK;
- value |= (bfsize << DMA_RBSZ_SHIFT) & DMA_RBSZ_MASK;
+ value = u32_replace_bits(value, bfsize, DMA_RBSZ_MASK);
writel(value, ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, chan));
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
index f27126f05551..9d9077a4ac9f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -16,69 +16,33 @@
#define DMA_CHANNEL_NB_MAX 1
#define DMA_BUS_MODE 0x00001000
-#define DMA_SYS_BUS_MODE 0x00001004
-#define DMA_STATUS 0x00001008
-#define DMA_DEBUG_STATUS_0 0x0000100c
-#define DMA_DEBUG_STATUS_1 0x00001010
-#define DMA_DEBUG_STATUS_2 0x00001014
-#define DMA_AXI_BUS_MODE 0x00001028
-#define DMA_TBS_CTRL 0x00001050
-/* DMA Bus Mode bitmap */
#define DMA_BUS_MODE_DCHE BIT(19)
#define DMA_BUS_MODE_INTM_MASK GENMASK(17, 16)
-#define DMA_BUS_MODE_INTM_SHIFT 16
#define DMA_BUS_MODE_INTM_MODE1 0x1
#define DMA_BUS_MODE_SFT_RESET BIT(0)
-/* DMA SYS Bus Mode bitmap */
-#define DMA_BUS_MODE_SPH BIT(24)
-#define DMA_BUS_MODE_PBL BIT(16)
-#define DMA_BUS_MODE_PBL_SHIFT 16
-#define DMA_BUS_MODE_RPBL_SHIFT 16
+#define DMA_SYS_BUS_MODE 0x00001004
+
#define DMA_BUS_MODE_MB BIT(14)
#define DMA_BUS_MODE_FB BIT(0)
-/* DMA Interrupt top status */
-#define DMA_STATUS_MAC BIT(17)
-#define DMA_STATUS_MTL BIT(16)
-#define DMA_STATUS_CHAN7 BIT(7)
-#define DMA_STATUS_CHAN6 BIT(6)
-#define DMA_STATUS_CHAN5 BIT(5)
-#define DMA_STATUS_CHAN4 BIT(4)
-#define DMA_STATUS_CHAN3 BIT(3)
-#define DMA_STATUS_CHAN2 BIT(2)
-#define DMA_STATUS_CHAN1 BIT(1)
-#define DMA_STATUS_CHAN0 BIT(0)
-
-/* DMA debug status bitmap */
-#define DMA_DEBUG_STATUS_TS_MASK 0xf
-#define DMA_DEBUG_STATUS_RS_MASK 0xf
-
-/* DMA AXI bitmap */
+#define DMA_STATUS 0x00001008
+
+#define DMA_AXI_BUS_MODE 0x00001028
+
#define DMA_AXI_EN_LPI BIT(31)
#define DMA_AXI_LPI_XIT_FRM BIT(30)
#define DMA_AXI_WR_OSR_LMT GENMASK(27, 24)
-#define DMA_AXI_WR_OSR_LMT_SHIFT 24
#define DMA_AXI_RD_OSR_LMT GENMASK(19, 16)
-#define DMA_AXI_RD_OSR_LMT_SHIFT 16
-
-#define DMA_AXI_OSR_MAX 0xf
-#define DMA_AXI_MAX_OSR_LIMIT ((DMA_AXI_OSR_MAX << DMA_AXI_WR_OSR_LMT_SHIFT) | \
- (DMA_AXI_OSR_MAX << DMA_AXI_RD_OSR_LMT_SHIFT))
#define DMA_SYS_BUS_MB BIT(14)
-#define DMA_AXI_1KBBE BIT(13)
#define DMA_SYS_BUS_AAL DMA_AXI_AAL
#define DMA_SYS_BUS_EAME BIT(11)
#define DMA_SYS_BUS_FB BIT(0)
-#define DMA_BURST_LEN_DEFAULT (DMA_AXI_BLEN256 | DMA_AXI_BLEN128 | \
- DMA_AXI_BLEN64 | DMA_AXI_BLEN32 | \
- DMA_AXI_BLEN16 | DMA_AXI_BLEN8 | \
- DMA_AXI_BLEN4)
+#define DMA_TBS_CTRL 0x00001050
-/* DMA TBS Control */
#define DMA_TBS_FTOS GENMASK(31, 8)
#define DMA_TBS_FTOV BIT(0)
#define DMA_TBS_DEF_FTOS (DMA_TBS_FTOS | DMA_TBS_FTOV)
@@ -100,11 +64,25 @@ static inline u32 dma_chanx_base_addr(const struct dwmac4_addrs *addrs,
return addr;
}
-#define DMA_CHAN_REG_NUMBER 17
-
#define DMA_CHAN_CONTROL(addrs, x) dma_chanx_base_addr(addrs, x)
+
+#define DMA_CHAN_CTRL_PBLX8 BIT(16)
+#define DMA_CONTROL_SPH BIT(24)
+
#define DMA_CHAN_TX_CONTROL(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x4)
+
+#define DMA_CONTROL_EDSE BIT(28)
+#define DMA_CHAN_TX_CTRL_TXPBL_MASK GENMASK(21, 16)
+#define DMA_CONTROL_TSE BIT(12)
+#define DMA_CONTROL_OSP BIT(4)
+#define DMA_CONTROL_ST BIT(0)
+
#define DMA_CHAN_RX_CONTROL(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x8)
+
+#define DMA_CHAN_RX_CTRL_RXPBL_MASK GENMASK(21, 16)
+#define DMA_RBSZ_MASK GENMASK(14, 1)
+#define DMA_CONTROL_SR BIT(0)
+
#define DMA_CHAN_TX_BASE_ADDR_HI(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x10)
#define DMA_CHAN_TX_BASE_ADDR(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x14)
#define DMA_CHAN_RX_BASE_ADDR_HI(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x18)
@@ -113,7 +91,41 @@ static inline u32 dma_chanx_base_addr(const struct dwmac4_addrs *addrs,
#define DMA_CHAN_RX_END_ADDR(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x28)
#define DMA_CHAN_TX_RING_LEN(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x2c)
#define DMA_CHAN_RX_RING_LEN(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x30)
+
#define DMA_CHAN_INTR_ENA(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x34)
+
+#define DMA_CHAN_INTR_ENA_NIE BIT(16)
+#define DMA_CHAN_INTR_ENA_AIE BIT(15)
+#define DMA_CHAN_INTR_ENA_NIE_4_10 BIT(15)
+#define DMA_CHAN_INTR_ENA_AIE_4_10 BIT(14)
+#define DMA_CHAN_INTR_ENA_FBE BIT(12)
+#define DMA_CHAN_INTR_ENA_RIE BIT(6)
+#define DMA_CHAN_INTR_ENA_TIE BIT(0)
+
+#define DMA_CHAN_INTR_NORMAL (DMA_CHAN_INTR_ENA_NIE | \
+ DMA_CHAN_INTR_ENA_RIE | \
+ DMA_CHAN_INTR_ENA_TIE)
+
+#define DMA_CHAN_INTR_ABNORMAL (DMA_CHAN_INTR_ENA_AIE | \
+ DMA_CHAN_INTR_ENA_FBE)
+/* DMA default interrupt mask for 4.00 */
+#define DMA_CHAN_INTR_DEFAULT_MASK (DMA_CHAN_INTR_NORMAL | \
+ DMA_CHAN_INTR_ABNORMAL)
+#define DMA_CHAN_INTR_DEFAULT_RX (DMA_CHAN_INTR_ENA_RIE)
+#define DMA_CHAN_INTR_DEFAULT_TX (DMA_CHAN_INTR_ENA_TIE)
+
+#define DMA_CHAN_INTR_NORMAL_4_10 (DMA_CHAN_INTR_ENA_NIE_4_10 | \
+ DMA_CHAN_INTR_ENA_RIE | \
+ DMA_CHAN_INTR_ENA_TIE)
+
+#define DMA_CHAN_INTR_ABNORMAL_4_10 (DMA_CHAN_INTR_ENA_AIE_4_10 | \
+ DMA_CHAN_INTR_ENA_FBE)
+/* DMA default interrupt mask for 4.10a */
+#define DMA_CHAN_INTR_DEFAULT_MASK_4_10 (DMA_CHAN_INTR_NORMAL_4_10 | \
+ DMA_CHAN_INTR_ABNORMAL_4_10)
+#define DMA_CHAN_INTR_DEFAULT_RX_4_10 (DMA_CHAN_INTR_ENA_RIE)
+#define DMA_CHAN_INTR_DEFAULT_TX_4_10 (DMA_CHAN_INTR_ENA_TIE)
+
#define DMA_CHAN_RX_WATCHDOG(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x38)
#define DMA_CHAN_SLOT_CTRL_STATUS(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x3c)
#define DMA_CHAN_CUR_TX_DESC(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x44)
@@ -124,26 +136,8 @@ static inline u32 dma_chanx_base_addr(const struct dwmac4_addrs *addrs,
#define DMA_CHAN_CUR_RX_BUF_ADDR(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x5c)
#define DMA_CHAN_STATUS(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x60)
-/* DMA Control X */
-#define DMA_CONTROL_SPH BIT(24)
-#define DMA_CONTROL_MSS_MASK GENMASK(13, 0)
-
-/* DMA Tx Channel X Control register defines */
-#define DMA_CONTROL_EDSE BIT(28)
-#define DMA_CONTROL_TSE BIT(12)
-#define DMA_CONTROL_OSP BIT(4)
-#define DMA_CONTROL_ST BIT(0)
-
-/* DMA Rx Channel X Control register defines */
-#define DMA_CONTROL_SR BIT(0)
-#define DMA_RBSZ_MASK GENMASK(14, 1)
-#define DMA_RBSZ_SHIFT 1
-
/* Interrupt status per channel */
#define DMA_CHAN_STATUS_REB GENMASK(21, 19)
-#define DMA_CHAN_STATUS_REB_SHIFT 19
-#define DMA_CHAN_STATUS_TEB GENMASK(18, 16)
-#define DMA_CHAN_STATUS_TEB_SHIFT 16
#define DMA_CHAN_STATUS_NIS BIT(15)
#define DMA_CHAN_STATUS_AIS BIT(14)
#define DMA_CHAN_STATUS_CDE BIT(13)
@@ -177,53 +171,6 @@ static inline u32 dma_chanx_base_addr(const struct dwmac4_addrs *addrs,
DMA_CHAN_STATUS_TI | \
DMA_CHAN_STATUS_MSK_COMMON)
-/* Interrupt enable bits per channel */
-#define DMA_CHAN_INTR_ENA_NIE BIT(16)
-#define DMA_CHAN_INTR_ENA_AIE BIT(15)
-#define DMA_CHAN_INTR_ENA_NIE_4_10 BIT(15)
-#define DMA_CHAN_INTR_ENA_AIE_4_10 BIT(14)
-#define DMA_CHAN_INTR_ENA_CDE BIT(13)
-#define DMA_CHAN_INTR_ENA_FBE BIT(12)
-#define DMA_CHAN_INTR_ENA_ERE BIT(11)
-#define DMA_CHAN_INTR_ENA_ETE BIT(10)
-#define DMA_CHAN_INTR_ENA_RWE BIT(9)
-#define DMA_CHAN_INTR_ENA_RSE BIT(8)
-#define DMA_CHAN_INTR_ENA_RBUE BIT(7)
-#define DMA_CHAN_INTR_ENA_RIE BIT(6)
-#define DMA_CHAN_INTR_ENA_TBUE BIT(2)
-#define DMA_CHAN_INTR_ENA_TSE BIT(1)
-#define DMA_CHAN_INTR_ENA_TIE BIT(0)
-
-#define DMA_CHAN_INTR_NORMAL (DMA_CHAN_INTR_ENA_NIE | \
- DMA_CHAN_INTR_ENA_RIE | \
- DMA_CHAN_INTR_ENA_TIE)
-
-#define DMA_CHAN_INTR_ABNORMAL (DMA_CHAN_INTR_ENA_AIE | \
- DMA_CHAN_INTR_ENA_FBE)
-/* DMA default interrupt mask for 4.00 */
-#define DMA_CHAN_INTR_DEFAULT_MASK (DMA_CHAN_INTR_NORMAL | \
- DMA_CHAN_INTR_ABNORMAL)
-#define DMA_CHAN_INTR_DEFAULT_RX (DMA_CHAN_INTR_ENA_RIE)
-#define DMA_CHAN_INTR_DEFAULT_TX (DMA_CHAN_INTR_ENA_TIE)
-
-#define DMA_CHAN_INTR_NORMAL_4_10 (DMA_CHAN_INTR_ENA_NIE_4_10 | \
- DMA_CHAN_INTR_ENA_RIE | \
- DMA_CHAN_INTR_ENA_TIE)
-
-#define DMA_CHAN_INTR_ABNORMAL_4_10 (DMA_CHAN_INTR_ENA_AIE_4_10 | \
- DMA_CHAN_INTR_ENA_FBE)
-/* DMA default interrupt mask for 4.10a */
-#define DMA_CHAN_INTR_DEFAULT_MASK_4_10 (DMA_CHAN_INTR_NORMAL_4_10 | \
- DMA_CHAN_INTR_ABNORMAL_4_10)
-#define DMA_CHAN_INTR_DEFAULT_RX_4_10 (DMA_CHAN_INTR_ENA_RIE)
-#define DMA_CHAN_INTR_DEFAULT_TX_4_10 (DMA_CHAN_INTR_ENA_TIE)
-
-/* channel 0 specific fields */
-#define DMA_CHAN0_DBG_STAT_TPS GENMASK(15, 12)
-#define DMA_CHAN0_DBG_STAT_TPS_SHIFT 12
-#define DMA_CHAN0_DBG_STAT_RPS GENMASK(11, 8)
-#define DMA_CHAN0_DBG_STAT_RPS_SHIFT 8
-
int dwmac4_dma_reset(void __iomem *ioaddr);
void dwmac4_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan, bool rx, bool tx);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
index 57c03d491774..c098047a3bff 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -234,7 +234,7 @@ void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, const u8 addr[6],
* bit that has no effect on the High Reg 0 where the bit 31 (MO)
* is RO.
*/
- data |= (STMMAC_CHAN0 << GMAC_HI_DCS_SHIFT);
+ data |= FIELD_PREP(GMAC_HI_DCS, STMMAC_CHAN0);
writel(data | GMAC_HI_REG_AE, ioaddr + high);
data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
writel(data, ioaddr + low);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 054ecb20ce3f..e1c37ac2c99d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -13,13 +13,86 @@
/* DMA CRS Control and Status Register Mapping */
#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
+
+#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
+
#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
#define DMA_RCV_POLL_DEMAND 0x00001008 /* Received Poll Demand */
#define DMA_RCV_BASE_ADDR 0x0000100c /* Receive List Base */
#define DMA_TX_BASE_ADDR 0x00001010 /* Transmit List Base */
+
#define DMA_STATUS 0x00001014 /* Status Register */
+#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
+#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
+#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */
+#define DMA_STATUS_TS_MASK GENMASK(22, 20) /* Transmit Process State */
+#define DMA_STATUS_RS_MASK GENMASK(19, 17) /* Receive Process State */
+#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
+#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
+#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
+#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
+#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
+#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
+#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
+#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
+#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
+#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
+#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
+#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
+#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
+#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
+#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
+
+#define DMA_STATUS_MSK_COMMON (DMA_STATUS_NIS | \
+ DMA_STATUS_AIS | \
+ DMA_STATUS_FBI)
+
+#define DMA_STATUS_MSK_RX (DMA_STATUS_ERI | \
+ DMA_STATUS_RWT | \
+ DMA_STATUS_RPS | \
+ DMA_STATUS_RU | \
+ DMA_STATUS_RI | \
+ DMA_STATUS_OVF | \
+ DMA_STATUS_MSK_COMMON)
+
+#define DMA_STATUS_MSK_TX (DMA_STATUS_ETI | \
+ DMA_STATUS_UNF | \
+ DMA_STATUS_TJT | \
+ DMA_STATUS_TU | \
+ DMA_STATUS_TPS | \
+ DMA_STATUS_TI | \
+ DMA_STATUS_MSK_COMMON)
+
#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */
+
+/* DMA Control register defines */
+#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
+#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
+#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
+
#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
+
+/* DMA Normal interrupt */
+#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
+#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
+#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
+
+#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
+ DMA_INTR_ENA_TIE)
+
+/* DMA Abnormal interrupt */
+#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
+#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
+#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
+
+#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
+ DMA_INTR_ENA_UNE)
+
+/* DMA default interrupt mask */
+#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
+#define DMA_INTR_DEFAULT_RX (DMA_INTR_ENA_RIE)
+#define DMA_INTR_DEFAULT_TX (DMA_INTR_ENA_TIE)
+
#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
/* Following DMA defines are channels oriented */
@@ -42,13 +115,9 @@ static inline u32 dma_chan_base_addr(u32 base, u32 chan)
#define DMA_CHAN_STATUS(chan) dma_chan_base_addr(DMA_STATUS, chan)
#define DMA_CHAN_CONTROL(chan) dma_chan_base_addr(DMA_CONTROL, chan)
#define DMA_CHAN_INTR_ENA(chan) dma_chan_base_addr(DMA_INTR_ENA, chan)
-#define DMA_CHAN_MISSED_FRAME_CTR(chan) \
- dma_chan_base_addr(DMA_MISSED_FRAME_CTR, chan)
#define DMA_CHAN_RX_WATCHDOG(chan) \
dma_chan_base_addr(DMA_RX_WATCHDOG, chan)
-/* SW Reset */
-#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
/* Rx watchdog register */
#define DMA_RX_WATCHDOG 0x00001024
@@ -59,19 +128,7 @@ static inline u32 dma_chan_base_addr(u32 base, u32 chan)
#define DMA_AXI_EN_LPI BIT(31)
#define DMA_AXI_LPI_XIT_FRM BIT(30)
#define DMA_AXI_WR_OSR_LMT GENMASK(23, 20)
-#define DMA_AXI_WR_OSR_LMT_SHIFT 20
-#define DMA_AXI_WR_OSR_LMT_MASK 0xf
#define DMA_AXI_RD_OSR_LMT GENMASK(19, 16)
-#define DMA_AXI_RD_OSR_LMT_SHIFT 16
-#define DMA_AXI_RD_OSR_LMT_MASK 0xf
-
-#define DMA_AXI_OSR_MAX 0xf
-#define DMA_AXI_MAX_OSR_LIMIT ((DMA_AXI_OSR_MAX << DMA_AXI_WR_OSR_LMT_SHIFT) | \
- (DMA_AXI_OSR_MAX << DMA_AXI_RD_OSR_LMT_SHIFT))
-#define DMA_BURST_LEN_DEFAULT (DMA_AXI_BLEN256 | DMA_AXI_BLEN128 | \
- DMA_AXI_BLEN64 | DMA_AXI_BLEN32 | \
- DMA_AXI_BLEN16 | DMA_AXI_BLEN8 | \
- DMA_AXI_BLEN4)
#define DMA_AXI_1KBBE BIT(13)
@@ -81,89 +138,6 @@ static inline u32 dma_chan_base_addr(u32 base, u32 chan)
#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
#define DMA_HW_FEATURE 0x00001058 /* HW Feature Register */
-/* DMA Control register defines */
-#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
-#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
-
-/* DMA Normal interrupt */
-#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
-#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
-#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavailable */
-#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
-#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
-
-#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
- DMA_INTR_ENA_TIE)
-
-/* DMA Abnormal interrupt */
-#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
-#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
-#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
-#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
-#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
-#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
-#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
-#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
-#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
-#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
-
-#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
- DMA_INTR_ENA_UNE)
-
-/* DMA default interrupt mask */
-#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
-#define DMA_INTR_DEFAULT_RX (DMA_INTR_ENA_RIE)
-#define DMA_INTR_DEFAULT_TX (DMA_INTR_ENA_TIE)
-
-/* DMA Status register defines */
-#define DMA_STATUS_GLPII 0x40000000 /* GMAC LPI interrupt */
-#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
-#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
-#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */
-#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
-#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
-#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
-#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
-#define DMA_STATUS_TS_SHIFT 20
-#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
-#define DMA_STATUS_RS_SHIFT 17
-#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
-#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
-#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
-#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
-#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
-#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
-#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
-#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
-#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
-#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
-#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
-#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
-#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
-#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
-#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
-#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
-
-#define DMA_STATUS_MSK_COMMON (DMA_STATUS_NIS | \
- DMA_STATUS_AIS | \
- DMA_STATUS_FBI)
-
-#define DMA_STATUS_MSK_RX (DMA_STATUS_ERI | \
- DMA_STATUS_RWT | \
- DMA_STATUS_RPS | \
- DMA_STATUS_RU | \
- DMA_STATUS_RI | \
- DMA_STATUS_OVF | \
- DMA_STATUS_MSK_COMMON)
-
-#define DMA_STATUS_MSK_TX (DMA_STATUS_ETI | \
- DMA_STATUS_UNF | \
- DMA_STATUS_TJT | \
- DMA_STATUS_TU | \
- DMA_STATUS_TPS | \
- DMA_STATUS_TI | \
- DMA_STATUS_MSK_COMMON)
-
#define NUM_DWMAC100_DMA_REGS 9
#define NUM_DWMAC1000_DMA_REGS 23
#define NUM_DWMAC4_DMA_REGS 27
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 97a803d68e3a..a0383f9486c2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -97,10 +97,7 @@ void dwmac_dma_stop_rx(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan)
#ifdef DWMAC_DMA_DEBUG
static void show_tx_process_state(unsigned int status)
{
- unsigned int state;
- state = (status & DMA_STATUS_TS_MASK) >> DMA_STATUS_TS_SHIFT;
-
- switch (state) {
+ switch (FIELD_GET(DMA_STATUS_TS_MASK, status)) {
case 0:
pr_debug("- TX (Stopped): Reset or Stop command\n");
break;
@@ -128,10 +125,7 @@ static void show_tx_process_state(unsigned int status)
static void show_rx_process_state(unsigned int status)
{
- unsigned int state;
- state = (status & DMA_STATUS_RS_MASK) >> DMA_STATUS_RS_SHIFT;
-
- switch (state) {
+ switch (FIELD_GET(DMA_STATUS_RS_MASK, status)) {
case 0:
pr_debug("- RX (Stopped): Reset or Stop command\n");
break;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index fecda3034d36..51943705a2b0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -24,17 +24,15 @@
#define XGMAC_CONFIG_SS_2500 (0x6 << XGMAC_CONFIG_SS_OFF)
#define XGMAC_CONFIG_SS_10_MII (0x7 << XGMAC_CONFIG_SS_OFF)
#define XGMAC_CONFIG_SARC GENMASK(22, 20)
-#define XGMAC_CONFIG_SARC_SHIFT 20
#define XGMAC_CONFIG_JD BIT(16)
#define XGMAC_CONFIG_TE BIT(0)
#define XGMAC_CORE_INIT_TX (XGMAC_CONFIG_JD)
#define XGMAC_RX_CONFIG 0x00000004
#define XGMAC_CONFIG_ARPEN BIT(31)
#define XGMAC_CONFIG_GPSL GENMASK(29, 16)
-#define XGMAC_CONFIG_GPSL_SHIFT 16
#define XGMAC_CONFIG_HDSMS GENMASK(14, 12)
#define XGMAC_CONFIG_HDSMS_SHIFT 12
-#define XGMAC_CONFIG_HDSMS_256 (0x2 << XGMAC_CONFIG_HDSMS_SHIFT)
+#define XGMAC_CONFIG_HDSMS_256 FIELD_PREP(XGMAC_CONFIG_HDSMS, 0x2)
#define XGMAC_CONFIG_S2KP BIT(11)
#define XGMAC_CONFIG_LM BIT(10)
#define XGMAC_CONFIG_IPC BIT(9)
@@ -44,8 +42,10 @@
#define XGMAC_CONFIG_CST BIT(2)
#define XGMAC_CONFIG_ACS BIT(1)
#define XGMAC_CONFIG_RE BIT(0)
-#define XGMAC_CORE_INIT_RX (XGMAC_CONFIG_GPSLCE | XGMAC_CONFIG_WD | \
- (XGMAC_JUMBO_LEN << XGMAC_CONFIG_GPSL_SHIFT))
+#define XGMAC_CORE_INIT_RX (XGMAC_CONFIG_GPSLCE | \
+ XGMAC_CONFIG_WD | \
+ FIELD_PREP(XGMAC_CONFIG_GPSL, \
+ XGMAC_JUMBO_LEN))
#define XGMAC_PACKET_FILTER 0x00000008
#define XGMAC_FILTER_RA BIT(31)
#define XGMAC_FILTER_IPFE BIT(20)
@@ -90,7 +90,6 @@
#define XGMAC_INT_DEFAULT_EN (XGMAC_LPIIE | XGMAC_PMTIE)
#define XGMAC_Qx_TX_FLOW_CTRL(x) (0x00000070 + (x) * 4)
#define XGMAC_PT GENMASK(31, 16)
-#define XGMAC_PT_SHIFT 16
#define XGMAC_TFE BIT(1)
#define XGMAC_RX_FLOW_CTRL 0x00000090
#define XGMAC_RFE BIT(0)
@@ -108,6 +107,7 @@
#define XGMAC_HWFEAT_VXN BIT(29)
#define XGMAC_HWFEAT_SAVLANINS BIT(27)
#define XGMAC_HWFEAT_TSSTSSEL GENMASK(26, 25)
+#define XGMAC_HWFEAT_PHYSEL GENMASK(24, 23)
#define XGMAC_HWFEAT_ADDMACADRSEL GENMASK(22, 18)
#define XGMAC_HWFEAT_RXCOESEL BIT(16)
#define XGMAC_HWFEAT_TXCOESEL BIT(14)
@@ -180,12 +180,11 @@
#define XGMAC_ADDR_MAX 32
#define XGMAC_AE BIT(31)
#define XGMAC_DCS GENMASK(19, 16)
-#define XGMAC_DCS_SHIFT 16
#define XGMAC_ADDRx_LOW(x) (0x00000304 + (x) * 0x8)
#define XGMAC_L3L4_ADDR_CTRL 0x00000c00
#define XGMAC_IDDR GENMASK(16, 8)
-#define XGMAC_IDDR_SHIFT 8
-#define XGMAC_IDDR_FNUM 4
+#define XGMAC_IDDR_FNUM_MASK GENMASK(7, 4) /* FNUM within IDDR */
+#define XGMAC_IDDR_REG_MASK GENMASK(3, 0) /* REG within IDDR */
#define XGMAC_TT BIT(1)
#define XGMAC_XB BIT(0)
#define XGMAC_L3L4_DATA 0x00000c04
@@ -204,7 +203,6 @@
#define XGMAC_L3PEN0 BIT(0)
#define XGMAC_L4_ADDR 0x1
#define XGMAC_L4DP0 GENMASK(31, 16)
-#define XGMAC_L4DP0_SHIFT 16
#define XGMAC_L4SP0 GENMASK(15, 0)
#define XGMAC_L3_ADDR0 0x4
#define XGMAC_L3_ADDR1 0x5
@@ -224,7 +222,6 @@
#define XGMAC_RSS_DATA 0x00000c8c
#define XGMAC_TIMESTAMP_STATUS 0x00000d20
#define XGMAC_TIMESTAMP_ATSNS_MASK GENMASK(29, 25)
-#define XGMAC_TIMESTAMP_ATSNS_SHIFT 25
#define XGMAC_TXTSC BIT(15)
#define XGMAC_TXTIMESTAMP_NSEC 0x00000d30
#define XGMAC_TXTSSTSLO GENMASK(30, 0)
@@ -290,13 +287,9 @@
#define XGMAC_DPP_DISABLE BIT(0)
#define XGMAC_MTL_TXQ_OPMODE(x) (0x00001100 + (0x80 * (x)))
#define XGMAC_TQS GENMASK(25, 16)
-#define XGMAC_TQS_SHIFT 16
#define XGMAC_Q2TCMAP GENMASK(10, 8)
-#define XGMAC_Q2TCMAP_SHIFT 8
#define XGMAC_TTC GENMASK(6, 4)
-#define XGMAC_TTC_SHIFT 4
#define XGMAC_TXQEN GENMASK(3, 2)
-#define XGMAC_TXQEN_SHIFT 2
#define XGMAC_TSF BIT(1)
#define XGMAC_MTL_TCx_ETS_CONTROL(x) (0x00001110 + (0x80 * (x)))
#define XGMAC_MTL_TCx_QUANTUM_WEIGHT(x) (0x00001118 + (0x80 * (x)))
@@ -310,16 +303,12 @@
#define XGMAC_ETS (0x2 << 0)
#define XGMAC_MTL_RXQ_OPMODE(x) (0x00001140 + (0x80 * (x)))
#define XGMAC_RQS GENMASK(25, 16)
-#define XGMAC_RQS_SHIFT 16
#define XGMAC_EHFC BIT(7)
#define XGMAC_RSF BIT(5)
#define XGMAC_RTC GENMASK(1, 0)
-#define XGMAC_RTC_SHIFT 0
#define XGMAC_MTL_RXQ_FLOW_CONTROL(x) (0x00001150 + (0x80 * (x)))
#define XGMAC_RFD GENMASK(31, 17)
-#define XGMAC_RFD_SHIFT 17
#define XGMAC_RFA GENMASK(15, 1)
-#define XGMAC_RFA_SHIFT 1
#define XGMAC_MTL_QINTEN(x) (0x00001170 + (0x80 * (x)))
#define XGMAC_RXOIE BIT(16)
#define XGMAC_MTL_QINT_STATUS(x) (0x00001174 + (0x80 * (x)))
@@ -333,9 +322,7 @@
#define XGMAC_SWR BIT(0)
#define XGMAC_DMA_SYSBUS_MODE 0x00003004
#define XGMAC_WR_OSR_LMT GENMASK(29, 24)
-#define XGMAC_WR_OSR_LMT_SHIFT 24
#define XGMAC_RD_OSR_LMT GENMASK(21, 16)
-#define XGMAC_RD_OSR_LMT_SHIFT 16
#define XGMAC_EN_LPI BIT(15)
#define XGMAC_LPI_XIT_PKT BIT(14)
#define XGMAC_AAL DMA_AXI_AAL
@@ -370,15 +357,12 @@
#define XGMAC_DMA_CH_TX_CONTROL(x) (0x00003104 + (0x80 * (x)))
#define XGMAC_EDSE BIT(28)
#define XGMAC_TxPBL GENMASK(21, 16)
-#define XGMAC_TxPBL_SHIFT 16
#define XGMAC_TSE BIT(12)
#define XGMAC_OSP BIT(4)
#define XGMAC_TXST BIT(0)
#define XGMAC_DMA_CH_RX_CONTROL(x) (0x00003108 + (0x80 * (x)))
#define XGMAC_RxPBL GENMASK(21, 16)
-#define XGMAC_RxPBL_SHIFT 16
#define XGMAC_RBSZ GENMASK(14, 1)
-#define XGMAC_RBSZ_SHIFT 1
#define XGMAC_RXST BIT(0)
#define XGMAC_DMA_CH_TxDESC_HADDR(x) (0x00003110 + (0x80 * (x)))
#define XGMAC_DMA_CH_TxDESC_LADDR(x) (0x00003114 + (0x80 * (x)))
@@ -423,32 +407,24 @@
#define XGMAC_TDES0_LT GENMASK(7, 0)
#define XGMAC_TDES1_LT GENMASK(31, 8)
#define XGMAC_TDES2_IVT GENMASK(31, 16)
-#define XGMAC_TDES2_IVT_SHIFT 16
#define XGMAC_TDES2_IOC BIT(31)
#define XGMAC_TDES2_TTSE BIT(30)
#define XGMAC_TDES2_B2L GENMASK(29, 16)
-#define XGMAC_TDES2_B2L_SHIFT 16
#define XGMAC_TDES2_VTIR GENMASK(15, 14)
-#define XGMAC_TDES2_VTIR_SHIFT 14
#define XGMAC_TDES2_B1L GENMASK(13, 0)
#define XGMAC_TDES3_OWN BIT(31)
#define XGMAC_TDES3_CTXT BIT(30)
#define XGMAC_TDES3_FD BIT(29)
#define XGMAC_TDES3_LD BIT(28)
#define XGMAC_TDES3_CPC GENMASK(27, 26)
-#define XGMAC_TDES3_CPC_SHIFT 26
#define XGMAC_TDES3_TCMSSV BIT(26)
#define XGMAC_TDES3_SAIC GENMASK(25, 23)
-#define XGMAC_TDES3_SAIC_SHIFT 23
#define XGMAC_TDES3_TBSV BIT(24)
#define XGMAC_TDES3_THL GENMASK(22, 19)
-#define XGMAC_TDES3_THL_SHIFT 19
#define XGMAC_TDES3_IVTIR GENMASK(19, 18)
-#define XGMAC_TDES3_IVTIR_SHIFT 18
#define XGMAC_TDES3_TSE BIT(18)
#define XGMAC_TDES3_IVLTV BIT(17)
#define XGMAC_TDES3_CIC GENMASK(17, 16)
-#define XGMAC_TDES3_CIC_SHIFT 16
#define XGMAC_TDES3_TPL GENMASK(17, 0)
#define XGMAC_TDES3_VLTV BIT(16)
#define XGMAC_TDES3_VT GENMASK(15, 0)
@@ -461,7 +437,6 @@
#define XGMAC_RDES3_CDA BIT(27)
#define XGMAC_RDES3_RSV BIT(26)
#define XGMAC_RDES3_L34T GENMASK(23, 20)
-#define XGMAC_RDES3_L34T_SHIFT 20
#define XGMAC_RDES3_ET_LT GENMASK(19, 16)
#define XGMAC_L34T_IP4TCP 0x1
#define XGMAC_L34T_IP4UDP 0x2
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index b40b3ea50e25..49893b9fb88c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -46,8 +46,6 @@ static void dwxgmac2_update_caps(struct stmmac_priv *priv)
{
if (!priv->dma_cap.mbps_10_100)
priv->hw->link.caps &= ~(MAC_10 | MAC_100);
- else if (!priv->dma_cap.half_duplex)
- priv->hw->link.caps &= ~(MAC_10HD | MAC_100HD);
}
static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
@@ -298,10 +296,10 @@ static void dwxgmac2_dump_regs(struct mac_device_info *hw, u32 *reg_space)
reg_space[i] = readl(ioaddr + i * 4);
}
-static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
+static int dwxgmac2_host_irq_status(struct stmmac_priv *priv,
struct stmmac_extra_stats *x)
{
- void __iomem *ioaddr = hw->pcsr;
+ void __iomem *ioaddr = priv->hw->pcsr;
u32 stat, en;
int ret = 0;
@@ -369,7 +367,7 @@ static void dwxgmac2_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
u32 value = XGMAC_TFE;
if (duplex)
- value |= pause_time << XGMAC_PT_SHIFT;
+ value |= FIELD_PREP(XGMAC_PT, pause_time);
writel(value, ioaddr + XGMAC_Qx_TX_FLOW_CTRL(i));
}
@@ -1226,8 +1224,7 @@ static void dwxgmac2_sarc_configure(void __iomem *ioaddr, int val)
{
u32 value = readl(ioaddr + XGMAC_TX_CONFIG);
- value &= ~XGMAC_CONFIG_SARC;
- value |= val << XGMAC_CONFIG_SARC_SHIFT;
+ value = u32_replace_bits(value, val, XGMAC_CONFIG_SARC);
writel(value, ioaddr + XGMAC_TX_CONFIG);
}
@@ -1247,14 +1244,16 @@ static int dwxgmac2_filter_read(struct mac_device_info *hw, u32 filter_no,
u8 reg, u32 *data)
{
void __iomem *ioaddr = hw->pcsr;
- u32 value;
+ u32 value, iddr;
int ret;
ret = dwxgmac2_filter_wait(hw);
if (ret)
return ret;
- value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
+ iddr = FIELD_PREP(XGMAC_IDDR_FNUM_MASK, filter_no) |
+ FIELD_PREP(XGMAC_IDDR_REG_MASK, reg);
+ value = FIELD_PREP(XGMAC_IDDR, iddr);
value |= XGMAC_TT | XGMAC_XB;
writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
@@ -1270,7 +1269,7 @@ static int dwxgmac2_filter_write(struct mac_device_info *hw, u32 filter_no,
u8 reg, u32 data)
{
void __iomem *ioaddr = hw->pcsr;
- u32 value;
+ u32 value, iddr;
int ret;
ret = dwxgmac2_filter_wait(hw);
@@ -1279,7 +1278,9 @@ static int dwxgmac2_filter_write(struct mac_device_info *hw, u32 filter_no,
writel(data, ioaddr + XGMAC_L3L4_DATA);
- value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
+ iddr = FIELD_PREP(XGMAC_IDDR_FNUM_MASK, filter_no) |
+ FIELD_PREP(XGMAC_IDDR_REG_MASK, reg);
+ value = FIELD_PREP(XGMAC_IDDR, iddr);
value |= XGMAC_XB;
writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
@@ -1388,13 +1389,13 @@ static int dwxgmac2_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
return ret;
if (sa) {
- value = match & XGMAC_L4SP0;
+ value = FIELD_PREP(XGMAC_L4SP0, match);
ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
if (ret)
return ret;
} else {
- value = (match << XGMAC_L4DP0_SHIFT) & XGMAC_L4DP0;
+ value = FIELD_PREP(XGMAC_L4DP0, match);
ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
if (ret)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
index a2980482fcce..41e5b420a215 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -12,7 +12,7 @@
static int dwxgmac2_get_tx_status(struct stmmac_extra_stats *x,
struct dma_desc *p, void __iomem *ioaddr)
{
- unsigned int tdes3 = le32_to_cpu(p->des3);
+ u32 tdes3 = le32_to_cpu(p->des3);
int ret = tx_done;
if (unlikely(tdes3 & XGMAC_TDES3_OWN))
@@ -26,7 +26,7 @@ static int dwxgmac2_get_tx_status(struct stmmac_extra_stats *x,
static int dwxgmac2_get_rx_status(struct stmmac_extra_stats *x,
struct dma_desc *p)
{
- unsigned int rdes3 = le32_to_cpu(p->des3);
+ u32 rdes3 = le32_to_cpu(p->des3);
if (unlikely(rdes3 & XGMAC_RDES3_OWN))
return dma_own;
@@ -114,7 +114,7 @@ static inline void dwxgmac2_get_timestamp(void *desc, u32 ats, u64 *ts)
static int dwxgmac2_rx_check_timestamp(void *desc)
{
struct dma_desc *p = (struct dma_desc *)desc;
- unsigned int rdes3 = le32_to_cpu(p->des3);
+ u32 rdes3 = le32_to_cpu(p->des3);
bool desc_valid, ts_valid;
dma_rmb();
@@ -135,7 +135,7 @@ static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc,
u32 ats)
{
struct dma_desc *p = (struct dma_desc *)desc;
- unsigned int rdes3 = le32_to_cpu(p->des3);
+ u32 rdes3 = le32_to_cpu(p->des3);
int ret = -EBUSY;
if (likely(rdes3 & XGMAC_RDES3_CDA))
@@ -162,7 +162,7 @@ static void dwxgmac2_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
bool csum_flag, int mode, bool tx_own,
bool ls, unsigned int tot_pkt_len)
{
- unsigned int tdes3 = le32_to_cpu(p->des3);
+ u32 tdes3 = le32_to_cpu(p->des3);
p->des2 |= cpu_to_le32(len & XGMAC_TDES2_B1L);
@@ -173,7 +173,7 @@ static void dwxgmac2_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
tdes3 &= ~XGMAC_TDES3_FD;
if (csum_flag)
- tdes3 |= 0x3 << XGMAC_TDES3_CIC_SHIFT;
+ tdes3 |= FIELD_PREP(XGMAC_TDES3_CIC, 0x3);
else
tdes3 &= ~XGMAC_TDES3_CIC;
@@ -201,18 +201,16 @@ static void dwxgmac2_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
bool ls, unsigned int tcphdrlen,
unsigned int tcppayloadlen)
{
- unsigned int tdes3 = le32_to_cpu(p->des3);
+ u32 tdes3 = le32_to_cpu(p->des3);
if (len1)
p->des2 |= cpu_to_le32(len1 & XGMAC_TDES2_B1L);
if (len2)
- p->des2 |= cpu_to_le32((len2 << XGMAC_TDES2_B2L_SHIFT) &
- XGMAC_TDES2_B2L);
+ p->des2 |= cpu_to_le32(FIELD_PREP(XGMAC_TDES2_B2L, len2));
if (is_fs) {
tdes3 |= XGMAC_TDES3_FD | XGMAC_TDES3_TSE;
- tdes3 |= (tcphdrlen << XGMAC_TDES3_THL_SHIFT) &
- XGMAC_TDES3_THL;
- tdes3 |= tcppayloadlen & XGMAC_TDES3_TPL;
+ tdes3 |= FIELD_PREP(XGMAC_TDES3_THL, tcphdrlen);
+ tdes3 |= FIELD_PREP(XGMAC_TDES3_TPL, tcppayloadlen);
} else {
tdes3 &= ~XGMAC_TDES3_FD;
}
@@ -274,11 +272,11 @@ static void dwxgmac2_clear(struct dma_desc *p)
static int dwxgmac2_get_rx_hash(struct dma_desc *p, u32 *hash,
enum pkt_hash_types *type)
{
- unsigned int rdes3 = le32_to_cpu(p->des3);
+ u32 rdes3 = le32_to_cpu(p->des3);
u32 ptype;
if (rdes3 & XGMAC_RDES3_RSV) {
- ptype = (rdes3 & XGMAC_RDES3_L34T) >> XGMAC_RDES3_L34T_SHIFT;
+ ptype = FIELD_GET(XGMAC_RDES3_L34T, rdes3);
switch (ptype) {
case XGMAC_L34T_IP4TCP:
@@ -313,9 +311,7 @@ static void dwxgmac2_set_sec_addr(struct dma_desc *p, dma_addr_t addr, bool is_v
static void dwxgmac2_set_sarc(struct dma_desc *p, u32 sarc_type)
{
- sarc_type <<= XGMAC_TDES3_SAIC_SHIFT;
-
- p->des3 |= cpu_to_le32(sarc_type & XGMAC_TDES3_SAIC);
+ p->des3 |= cpu_to_le32(FIELD_PREP(XGMAC_TDES3_SAIC, sarc_type));
}
static void dwxgmac2_set_vlan_tag(struct dma_desc *p, u16 tag, u16 inner_tag,
@@ -328,13 +324,11 @@ static void dwxgmac2_set_vlan_tag(struct dma_desc *p, u16 tag, u16 inner_tag,
/* Inner VLAN */
if (inner_type) {
- u32 des = inner_tag << XGMAC_TDES2_IVT_SHIFT;
+ u32 des = FIELD_PREP(XGMAC_TDES2_IVT, inner_tag);
- des &= XGMAC_TDES2_IVT;
p->des2 = cpu_to_le32(des);
- des = inner_type << XGMAC_TDES3_IVTIR_SHIFT;
- des &= XGMAC_TDES3_IVTIR;
+ des = FIELD_PREP(XGMAC_TDES3_IVTIR, inner_type);
p->des3 = cpu_to_le32(des | XGMAC_TDES3_IVLTV);
}
@@ -347,8 +341,7 @@ static void dwxgmac2_set_vlan_tag(struct dma_desc *p, u16 tag, u16 inner_tag,
static void dwxgmac2_set_vlan(struct dma_desc *p, u32 type)
{
- type <<= XGMAC_TDES2_VTIR_SHIFT;
- p->des2 |= cpu_to_le32(type & XGMAC_TDES2_VTIR);
+ p->des2 |= cpu_to_le32(FIELD_PREP(XGMAC_TDES2_VTIR, type));
}
static void dwxgmac2_set_tbs(struct dma_edesc *p, u32 sec, u32 nsec)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index cc1bdc0975d5..03437f1cf3df 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -55,8 +55,7 @@ static void dwxgmac2_dma_init_rx_chan(struct stmmac_priv *priv,
u32 value;
value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
- value &= ~XGMAC_RxPBL;
- value |= (rxpbl << XGMAC_RxPBL_SHIFT) & XGMAC_RxPBL;
+ value = u32_replace_bits(value, rxpbl, XGMAC_RxPBL);
writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
writel(upper_32_bits(phy), ioaddr + XGMAC_DMA_CH_RxDESC_HADDR(chan));
@@ -72,9 +71,7 @@ static void dwxgmac2_dma_init_tx_chan(struct stmmac_priv *priv,
u32 value;
value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
- value &= ~XGMAC_TxPBL;
- value |= (txpbl << XGMAC_TxPBL_SHIFT) & XGMAC_TxPBL;
- value |= XGMAC_OSP;
+ value = u32_replace_bits(value, txpbl, XGMAC_TxPBL);
writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
writel(upper_32_bits(phy), ioaddr + XGMAC_DMA_CH_TxDESC_HADDR(chan));
@@ -90,13 +87,8 @@ static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
if (axi->axi_xit_frm)
value |= XGMAC_LPI_XIT_PKT;
- value &= ~XGMAC_WR_OSR_LMT;
- value |= (axi->axi_wr_osr_lmt << XGMAC_WR_OSR_LMT_SHIFT) &
- XGMAC_WR_OSR_LMT;
-
- value &= ~XGMAC_RD_OSR_LMT;
- value |= (axi->axi_rd_osr_lmt << XGMAC_RD_OSR_LMT_SHIFT) &
- XGMAC_RD_OSR_LMT;
+ value = u32_replace_bits(value, axi->axi_wr_osr_lmt, XGMAC_WR_OSR_LMT);
+ value = u32_replace_bits(value, axi->axi_rd_osr_lmt, XGMAC_RD_OSR_LMT);
if (!axi->axi_fb)
value |= XGMAC_UNDEF;
@@ -127,23 +119,24 @@ static void dwxgmac2_dma_rx_mode(struct stmmac_priv *priv, void __iomem *ioaddr,
{
u32 value = readl(ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
unsigned int rqs = fifosz / 256 - 1;
+ unsigned int rtc;
if (mode == SF_DMA_MODE) {
value |= XGMAC_RSF;
} else {
value &= ~XGMAC_RSF;
- value &= ~XGMAC_RTC;
if (mode <= 64)
- value |= 0x0 << XGMAC_RTC_SHIFT;
+ rtc = 0x0;
else if (mode <= 96)
- value |= 0x2 << XGMAC_RTC_SHIFT;
+ rtc = 0x2;
else
- value |= 0x3 << XGMAC_RTC_SHIFT;
+ rtc = 0x3;
+
+ value = u32_replace_bits(value, rtc, XGMAC_RTC);
}
- value &= ~XGMAC_RQS;
- value |= (rqs << XGMAC_RQS_SHIFT) & XGMAC_RQS;
+ value = u32_replace_bits(value, rqs, XGMAC_RQS);
if ((fifosz >= 4096) && (qmode != MTL_QUEUE_AVB)) {
u32 flow = readl(ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel));
@@ -172,11 +165,8 @@ static void dwxgmac2_dma_rx_mode(struct stmmac_priv *priv, void __iomem *ioaddr,
break;
}
- flow &= ~XGMAC_RFD;
- flow |= rfd << XGMAC_RFD_SHIFT;
-
- flow &= ~XGMAC_RFA;
- flow |= rfa << XGMAC_RFA_SHIFT;
+ flow = u32_replace_bits(flow, rfd, XGMAC_RFD);
+ flow = u32_replace_bits(flow, rfa, XGMAC_RFA);
writel(flow, ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel));
}
@@ -189,40 +179,41 @@ static void dwxgmac2_dma_tx_mode(struct stmmac_priv *priv, void __iomem *ioaddr,
{
u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
unsigned int tqs = fifosz / 256 - 1;
+ unsigned int ttc, txqen;
if (mode == SF_DMA_MODE) {
value |= XGMAC_TSF;
} else {
value &= ~XGMAC_TSF;
- value &= ~XGMAC_TTC;
if (mode <= 64)
- value |= 0x0 << XGMAC_TTC_SHIFT;
+ ttc = 0x0;
else if (mode <= 96)
- value |= 0x2 << XGMAC_TTC_SHIFT;
+ ttc = 0x2;
else if (mode <= 128)
- value |= 0x3 << XGMAC_TTC_SHIFT;
+ ttc = 0x3;
else if (mode <= 192)
- value |= 0x4 << XGMAC_TTC_SHIFT;
+ ttc = 0x4;
else if (mode <= 256)
- value |= 0x5 << XGMAC_TTC_SHIFT;
+ ttc = 0x5;
else if (mode <= 384)
- value |= 0x6 << XGMAC_TTC_SHIFT;
+ ttc = 0x6;
else
- value |= 0x7 << XGMAC_TTC_SHIFT;
+ ttc = 0x7;
+
+ value = u32_replace_bits(value, ttc, XGMAC_TTC);
}
/* Use static TC to Queue mapping */
- value |= (channel << XGMAC_Q2TCMAP_SHIFT) & XGMAC_Q2TCMAP;
+ value |= FIELD_PREP(XGMAC_Q2TCMAP, channel);
- value &= ~XGMAC_TXQEN;
if (qmode != MTL_QUEUE_AVB)
- value |= 0x2 << XGMAC_TXQEN_SHIFT;
+ txqen = 0x2;
else
- value |= 0x1 << XGMAC_TXQEN_SHIFT;
+ txqen = 0x1;
- value &= ~XGMAC_TQS;
- value |= (tqs << XGMAC_TQS_SHIFT) & XGMAC_TQS;
+ value = u32_replace_bits(value, txqen, XGMAC_TXQEN);
+ value = u32_replace_bits(value, tqs, XGMAC_TQS);
writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
}
@@ -373,6 +364,7 @@ static int dwxgmac2_get_hw_feature(void __iomem *ioaddr,
dma_cap->vxn = (hw_cap & XGMAC_HWFEAT_VXN) >> 29;
dma_cap->vlins = (hw_cap & XGMAC_HWFEAT_SAVLANINS) >> 27;
dma_cap->tssrc = (hw_cap & XGMAC_HWFEAT_TSSTSSEL) >> 25;
+ dma_cap->actphyif = FIELD_GET(XGMAC_HWFEAT_PHYSEL, hw_cap);
dma_cap->multi_addr = (hw_cap & XGMAC_HWFEAT_ADDMACADRSEL) >> 18;
dma_cap->rx_coe = (hw_cap & XGMAC_HWFEAT_RXCOESEL) >> 16;
dma_cap->tx_coe = (hw_cap & XGMAC_HWFEAT_TXCOESEL) >> 14;
@@ -526,16 +518,17 @@ static void dwxgmac2_qmode(struct stmmac_priv *priv, void __iomem *ioaddr,
{
u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
u32 flow = readl(ioaddr + XGMAC_RX_FLOW_CTRL);
+ unsigned int txqen;
- value &= ~XGMAC_TXQEN;
if (qmode != MTL_QUEUE_AVB) {
- value |= 0x2 << XGMAC_TXQEN_SHIFT;
+ txqen = 0x2;
writel(0, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(channel));
} else {
- value |= 0x1 << XGMAC_TXQEN_SHIFT;
+ txqen = 0x1;
writel(flow & (~XGMAC_RFE), ioaddr + XGMAC_RX_FLOW_CTRL);
}
+ value = u32_replace_bits(value, txqen, XGMAC_TXQEN);
writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
}
@@ -545,8 +538,7 @@ static void dwxgmac2_set_bfsize(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 value;
value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
- value &= ~XGMAC_RBSZ;
- value |= bfsize << XGMAC_RBSZ_SHIFT;
+ value = u32_replace_bits(value, bfsize, XGMAC_RBSZ);
writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 937b7a0466fc..8f6993c8bcae 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -15,7 +15,7 @@
static int enh_desc_get_tx_status(struct stmmac_extra_stats *x,
struct dma_desc *p, void __iomem *ioaddr)
{
- unsigned int tdes0 = le32_to_cpu(p->des0);
+ u32 tdes0 = le32_to_cpu(p->des0);
int ret = tx_done;
/* Get tx owner first */
@@ -44,7 +44,7 @@ static int enh_desc_get_tx_status(struct stmmac_extra_stats *x,
if (unlikely((tdes0 & ETDES0_LATE_COLLISION) ||
(tdes0 & ETDES0_EXCESSIVE_COLLISIONS)))
x->tx_collision +=
- (tdes0 & ETDES0_COLLISION_COUNT_MASK) >> 3;
+ FIELD_GET(ETDES0_COLLISION_COUNT_MASK, tdes0);
if (unlikely(tdes0 & ETDES0_EXCESSIVE_DEFERRAL))
x->tx_deferred++;
@@ -88,7 +88,7 @@ static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
/* bits 5 7 0 | Frame status
* ----------------------------------------------------------
- * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
+ * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octets)
* 1 0 0 | IPv4/6 No CSUM errorS.
* 1 0 1 | IPv4/6 CSUM PAYLOAD error
* 1 1 0 | IPv4/6 CSUM IP HR error
@@ -117,11 +117,11 @@ static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
static void enh_desc_get_ext_status(struct stmmac_extra_stats *x,
struct dma_extended_desc *p)
{
- unsigned int rdes0 = le32_to_cpu(p->basic.des0);
- unsigned int rdes4 = le32_to_cpu(p->des4);
+ u32 rdes0 = le32_to_cpu(p->basic.des0);
+ u32 rdes4 = le32_to_cpu(p->des4);
if (unlikely(rdes0 & ERDES0_RX_MAC_ADDR)) {
- int message_type = (rdes4 & ERDES4_MSG_TYPE_MASK) >> 8;
+ int message_type = FIELD_GET(ERDES4_MSG_TYPE_MASK, rdes4);
if (rdes4 & ERDES4_IP_HDR_ERR)
x->ip_hdr_err++;
@@ -167,13 +167,13 @@ static void enh_desc_get_ext_status(struct stmmac_extra_stats *x,
x->av_pkt_rcvd++;
if (rdes4 & ERDES4_AV_TAGGED_PKT_RCVD)
x->av_tagged_pkt_rcvd++;
- if ((rdes4 & ERDES4_VLAN_TAG_PRI_VAL_MASK) >> 18)
+ if (rdes4 & ERDES4_VLAN_TAG_PRI_VAL_MASK)
x->vlan_tag_priority_val++;
if (rdes4 & ERDES4_L3_FILTER_MATCH)
x->l3_filter_match++;
if (rdes4 & ERDES4_L4_FILTER_MATCH)
x->l4_filter_match++;
- if ((rdes4 & ERDES4_L3_L4_FILT_NO_MATCH_MASK) >> 26)
+ if (rdes4 & ERDES4_L3_L4_FILT_NO_MATCH_MASK)
x->l3_l4_filter_no_match++;
}
}
@@ -181,7 +181,7 @@ static void enh_desc_get_ext_status(struct stmmac_extra_stats *x,
static int enh_desc_get_rx_status(struct stmmac_extra_stats *x,
struct dma_desc *p)
{
- unsigned int rdes0 = le32_to_cpu(p->des0);
+ u32 rdes0 = le32_to_cpu(p->des0);
int ret = good_frame;
if (unlikely(rdes0 & RDES0_OWN))
@@ -312,7 +312,7 @@ static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
bool csum_flag, int mode, bool tx_own,
bool ls, unsigned int tot_pkt_len)
{
- unsigned int tdes0 = le32_to_cpu(p->des0);
+ u32 tdes0 = le32_to_cpu(p->des0);
if (mode == STMMAC_CHAIN_MODE)
enh_set_tx_desc_len_on_chain(p, len);
@@ -324,10 +324,8 @@ static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
else
tdes0 &= ~ETDES0_FIRST_SEGMENT;
- if (likely(csum_flag))
- tdes0 |= (TX_CIC_FULL << ETDES0_CHECKSUM_INSERTION_SHIFT);
- else
- tdes0 &= ~(TX_CIC_FULL << ETDES0_CHECKSUM_INSERTION_SHIFT);
+ tdes0 = u32_replace_bits(tdes0, csum_flag ? TX_CIC_FULL : 0,
+ ETDES0_CHECKSUM_INSERTION_MASK);
if (ls)
tdes0 |= ETDES0_LAST_SEGMENT;
@@ -363,8 +361,7 @@ static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
if (rx_coe_type == STMMAC_RX_COE_TYPE1)
csum = 2;
- return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK)
- >> RDES0_FRAME_LEN_SHIFT) - csum);
+ return FIELD_GET(RDES0_FRAME_LEN_MASK, le32_to_cpu(p->des0)) - csum;
}
static void enh_desc_enable_tx_timestamp(struct dma_desc *p)
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index 014f7cd79a3c..7e69ff4b9a98 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -109,7 +109,7 @@ int stmmac_reset(struct stmmac_priv *priv)
void __iomem *ioaddr = priv->ioaddr;
if (plat && plat->fix_soc_reset)
- return plat->fix_soc_reset(priv, ioaddr);
+ return plat->fix_soc_reset(priv);
return stmmac_do_callback(priv, dma, reset, ioaddr);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index df6e8a567b1f..0db96a387259 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -354,7 +354,7 @@ struct stmmac_ops {
/* Dump MAC registers */
void (*dump_regs)(struct mac_device_info *hw, u32 *reg_space);
/* Handle extra events on specific interrupts hw dependent */
- int (*host_irq_status)(struct mac_device_info *hw,
+ int (*host_irq_status)(struct stmmac_priv *priv,
struct stmmac_extra_stats *x);
/* Handle MTL interrupts */
int (*host_mtl_irq_status)(struct stmmac_priv *priv,
@@ -453,7 +453,7 @@ struct stmmac_ops {
#define stmmac_dump_mac_regs(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, dump_regs, __args)
#define stmmac_host_irq_status(__priv, __args...) \
- stmmac_do_callback(__priv, mac, host_irq_status, __args)
+ stmmac_do_callback(__priv, mac, host_irq_status, __priv, __args)
#define stmmac_host_mtl_irq_status(__priv, __args...) \
stmmac_do_callback(__priv, mac, host_mtl_irq_status, __priv, __args)
#define stmmac_set_filter(__priv, __args...) \
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index 0fab842902a8..1b3b114e7bec 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -252,7 +252,7 @@ static void dwmac_mmc_intr_all_mask(void __iomem *mmcaddr)
writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_IPC_INTR_MASK);
}
-/* This reads the MAC core counters (if actaully supported).
+/* This reads the MAC core counters (if actually supported).
* by default the MMC core is programmed to reset each
* counter after a read. So all the field of the mmc struct
* have to be incremented.
@@ -420,7 +420,7 @@ static void dwxgmac_read_mmc_reg(void __iomem *addr, u32 reg, u32 *dest)
*dest = *dest + tmp;
}
-/* This reads the MAC core counters (if actaully supported).
+/* This reads the MAC core counters (if actually supported).
* by default the MMC core is programmed to reset each
* counter after a read. So all the field of the mmc struct
* have to be incremented.
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index 68a7cfcb1d8f..859cb9242a52 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -15,8 +15,8 @@
static int ndesc_get_tx_status(struct stmmac_extra_stats *x,
struct dma_desc *p, void __iomem *ioaddr)
{
- unsigned int tdes0 = le32_to_cpu(p->des0);
- unsigned int tdes1 = le32_to_cpu(p->des1);
+ u32 tdes0 = le32_to_cpu(p->des0);
+ u32 tdes1 = le32_to_cpu(p->des1);
int ret = tx_done;
/* Get tx owner first */
@@ -40,10 +40,8 @@ static int ndesc_get_tx_status(struct stmmac_extra_stats *x,
if (unlikely((tdes0 & TDES0_EXCESSIVE_DEFERRAL) ||
(tdes0 & TDES0_EXCESSIVE_COLLISIONS) ||
(tdes0 & TDES0_LATE_COLLISION))) {
- unsigned int collisions;
-
- collisions = (tdes0 & TDES0_COLLISION_COUNT_MASK) >> 3;
- x->tx_collision += collisions;
+ x->tx_collision +=
+ FIELD_GET(TDES0_COLLISION_COUNT_MASK, tdes0);
}
ret = tx_err;
}
@@ -69,8 +67,8 @@ static int ndesc_get_tx_len(struct dma_desc *p)
static int ndesc_get_rx_status(struct stmmac_extra_stats *x,
struct dma_desc *p)
{
+ u32 rdes0 = le32_to_cpu(p->des0);
int ret = good_frame;
- unsigned int rdes0 = le32_to_cpu(p->des0);
if (unlikely(rdes0 & RDES0_OWN))
return dma_own;
@@ -178,17 +176,15 @@ static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
bool csum_flag, int mode, bool tx_own,
bool ls, unsigned int tot_pkt_len)
{
- unsigned int tdes1 = le32_to_cpu(p->des1);
+ u32 tdes1 = le32_to_cpu(p->des1);
if (is_fs)
tdes1 |= TDES1_FIRST_SEGMENT;
else
tdes1 &= ~TDES1_FIRST_SEGMENT;
- if (likely(csum_flag))
- tdes1 |= (TX_CIC_FULL) << TDES1_CHECKSUM_INSERTION_SHIFT;
- else
- tdes1 &= ~(TX_CIC_FULL << TDES1_CHECKSUM_INSERTION_SHIFT);
+ tdes1 = u32_replace_bits(tdes1, csum_flag ? TX_CIC_FULL : 0,
+ TDES1_CHECKSUM_INSERTION_MASK);
if (ls)
tdes1 |= TDES1_LAST_SEGMENT;
@@ -222,10 +218,7 @@ static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
if (rx_coe_type == STMMAC_RX_COE_TYPE1)
csum = 2;
- return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK)
- >> RDES0_FRAME_LEN_SHIFT) -
- csum);
-
+ return FIELD_GET(RDES0_FRAME_LEN_MASK, le32_to_cpu(p->des0)) - csum;
}
static void ndesc_enable_tx_timestamp(struct dma_desc *p)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 012b0a477255..51c96a738151 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -31,7 +31,6 @@ struct stmmac_resources {
void __iomem *addr;
u8 mac[ETH_ALEN];
int wol_irq;
- int lpi_irq;
int irq;
int sfty_irq;
int sfty_ce_irq;
@@ -297,12 +296,12 @@ struct stmmac_priv {
int wol_irq;
u32 gmii_address_bus_config;
struct timer_list eee_ctrl_timer;
- int lpi_irq;
u32 tx_lpi_timer;
bool tx_lpi_clk_stop;
bool eee_enabled;
bool eee_active;
bool eee_sw_timer_en;
+ bool legacy_serdes_is_powered;
unsigned int mode;
unsigned int chain_mode;
int extend_desc;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index b155e71aac51..c1e26965d9b5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -914,20 +914,11 @@ static int stmmac_set_per_queue_coalesce(struct net_device *dev, u32 queue,
return __stmmac_set_coalesce(dev, ec, queue);
}
-static int stmmac_get_rxnfc(struct net_device *dev,
- struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
+static u32 stmmac_get_rx_ring_count(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
- switch (rxnfc->cmd) {
- case ETHTOOL_GRXRINGS:
- rxnfc->data = priv->plat->rx_queues_to_use;
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- return 0;
+ return priv->plat->rx_queues_to_use;
}
static u32 stmmac_get_rxfh_key_size(struct net_device *dev)
@@ -1121,7 +1112,7 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
.get_eee = stmmac_ethtool_op_get_eee,
.set_eee = stmmac_ethtool_op_set_eee,
.get_sset_count = stmmac_get_sset_count,
- .get_rxnfc = stmmac_get_rxnfc,
+ .get_rx_ring_count = stmmac_get_rx_ring_count,
.get_rxfh_key_size = stmmac_get_rxfh_key_size,
.get_rxfh_indir_size = stmmac_get_rxfh_indir_size,
.get_rxfh = stmmac_get_rxfh,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index bb110124f21e..b9a985fa772c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -43,7 +43,7 @@ static void config_sub_second_increment(void __iomem *ioaddr,
unsigned long data;
u32 reg_value;
- /* For GMAC3.x, 4.x versions, in "fine adjustement mode" set sub-second
+ /* For GMAC3.x, 4.x versions, in "fine adjustment mode" set sub-second
* increment to twice the number of nanoseconds of a clock cycle.
* The calculation of the default_addend value by the caller will set it
* to mid-range = 2^31 when the remainder of this division is zero,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index a379221b96a3..c63099a77cc0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -127,6 +127,22 @@ static unsigned int chain_mode;
module_param(chain_mode, int, 0444);
MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
+static const char *stmmac_dwmac_actphyif[8] = {
+ [PHY_INTF_SEL_GMII_MII] = "GMII/MII",
+ [PHY_INTF_SEL_RGMII] = "RGMII",
+ [PHY_INTF_SEL_SGMII] = "SGMII",
+ [PHY_INTF_SEL_TBI] = "TBI",
+ [PHY_INTF_SEL_RMII] = "RMII",
+ [PHY_INTF_SEL_RTBI] = "RTBI",
+ [PHY_INTF_SEL_SMII] = "SMII",
+ [PHY_INTF_SEL_REVMII] = "REVMII",
+};
+
+static const char *stmmac_dwxgmac_phyif[4] = {
+ [PHY_INTF_GMII] = "GMII",
+ [PHY_INTF_RGMII] = "RGMII",
+};
+
static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
/* For MSI interrupts handling */
static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
@@ -866,6 +882,30 @@ static void stmmac_release_ptp(struct stmmac_priv *priv)
clk_disable_unprepare(priv->plat->clk_ptp_ref);
}
+static void stmmac_legacy_serdes_power_down(struct stmmac_priv *priv)
+{
+ if (priv->plat->serdes_powerdown && priv->legacy_serdes_is_powered)
+ priv->plat->serdes_powerdown(priv->dev, priv->plat->bsp_priv);
+
+ priv->legacy_serdes_is_powered = false;
+}
+
+static int stmmac_legacy_serdes_power_up(struct stmmac_priv *priv)
+{
+ int ret;
+
+ if (!priv->plat->serdes_powerup)
+ return 0;
+
+ ret = priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
+ if (ret < 0)
+ netdev_err(priv->dev, "SerDes powerup failed\n");
+ else
+ priv->legacy_serdes_is_powered = true;
+
+ return ret;
+}
+
/**
* stmmac_mac_flow_ctrl - Configure flow control in all queues
* @priv: driver private structure
@@ -890,6 +930,9 @@ static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
/* Refresh the MAC-specific capabilities */
stmmac_mac_update_caps(priv);
+ if (priv->hw_cap_support && !priv->dma_cap.half_duplex)
+ priv->hw->link.caps &= ~(MAC_1000HD | MAC_100HD | MAC_10HD);
+
config->mac_capabilities = priv->hw->link.caps;
if (priv->plat->max_speed)
@@ -962,9 +1005,8 @@ static void stmmac_mac_link_up(struct phylink_config *config,
u32 old_ctrl, ctrl;
int ret;
- if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
- priv->plat->serdes_powerup)
- priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
+ if (priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP)
+ stmmac_legacy_serdes_power_up(priv);
old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
ctrl = old_ctrl & ~priv->hw->link.speed_mask;
@@ -1114,7 +1156,7 @@ static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
STMMAC_DEFAULT_TWT_LS);
- /* Try to cnfigure the hardware timer. */
+ /* Try to configure the hardware timer. */
ret = stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_TIMER,
priv->tx_lpi_clk_stop, priv->tx_lpi_timer);
@@ -1206,6 +1248,7 @@ static int stmmac_init_phy(struct net_device *dev)
struct fwnode_handle *phy_fwnode;
struct fwnode_handle *fwnode;
struct ethtool_keee eee;
+ u32 dev_flags = 0;
int ret;
if (!phylink_expects_phy(priv->phylink))
@@ -1224,6 +1267,9 @@ static int stmmac_init_phy(struct net_device *dev)
else
phy_fwnode = NULL;
+ if (priv->plat->flags & STMMAC_FLAG_KEEP_PREAMBLE_BEFORE_SFD)
+ dev_flags |= PHY_F_KEEP_PREAMBLE_BEFORE_SFD;
+
/* Some DT bindings do not set-up the PHY handle. Let's try to
* manually parse it
*/
@@ -1242,10 +1288,12 @@ static int stmmac_init_phy(struct net_device *dev)
return -ENODEV;
}
+ phydev->dev_flags |= dev_flags;
+
ret = phylink_connect_phy(priv->phylink, phydev);
} else {
fwnode_handle_put(phy_fwnode);
- ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
+ ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, dev_flags);
}
if (ret) {
@@ -3134,8 +3182,6 @@ int stmmac_get_phy_intf_sel(phy_interface_t interface)
phy_intf_sel = PHY_INTF_SEL_GMII_MII;
else if (phy_interface_mode_is_rgmii(interface))
phy_intf_sel = PHY_INTF_SEL_RGMII;
- else if (interface == PHY_INTERFACE_MODE_SGMII)
- phy_intf_sel = PHY_INTF_SEL_SGMII;
else if (interface == PHY_INTERFACE_MODE_RMII)
phy_intf_sel = PHY_INTF_SEL_RMII;
else if (interface == PHY_INTERFACE_MODE_REVMII)
@@ -3149,13 +3195,24 @@ static int stmmac_prereset_configure(struct stmmac_priv *priv)
{
struct plat_stmmacenet_data *plat_dat = priv->plat;
phy_interface_t interface;
+ struct phylink_pcs *pcs;
int phy_intf_sel, ret;
if (!plat_dat->set_phy_intf_sel)
return 0;
interface = plat_dat->phy_interface;
- phy_intf_sel = stmmac_get_phy_intf_sel(interface);
+
+ /* Check whether this mode uses a PCS */
+ pcs = stmmac_mac_select_pcs(&priv->phylink_config, interface);
+ if (priv->integrated_pcs && pcs == &priv->integrated_pcs->pcs) {
+ /* Request the phy_intf_sel from the integrated PCS */
+ phy_intf_sel = stmmac_integrated_pcs_get_phy_intf_sel(pcs,
+ interface);
+ } else {
+ phy_intf_sel = stmmac_get_phy_intf_sel(interface);
+ }
+
if (phy_intf_sel < 0) {
netdev_err(priv->dev,
"failed to get phy_intf_sel for %s: %pe\n",
@@ -3489,7 +3546,7 @@ static void stmmac_mac_config_rss(struct stmmac_priv *priv)
/**
* stmmac_mtl_configuration - Configure MTL
* @priv: driver private structure
- * Description: It is used for configurring MTL
+ * Description: It is used for configuring MTL
*/
static void stmmac_mtl_configuration(struct stmmac_priv *priv)
{
@@ -3712,10 +3769,6 @@ static void stmmac_free_irq(struct net_device *dev,
free_irq(priv->sfty_ce_irq, dev);
fallthrough;
case REQ_IRQ_ERR_SFTY_CE:
- if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
- free_irq(priv->lpi_irq, dev);
- fallthrough;
- case REQ_IRQ_ERR_LPI:
if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
free_irq(priv->wol_irq, dev);
fallthrough;
@@ -3773,24 +3826,6 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
}
}
- /* Request the LPI IRQ in case of another line
- * is used for LPI
- */
- if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
- int_name = priv->int_name_lpi;
- sprintf(int_name, "%s:%s", dev->name, "lpi");
- ret = request_irq(priv->lpi_irq,
- stmmac_mac_interrupt,
- 0, int_name, dev);
- if (unlikely(ret < 0)) {
- netdev_err(priv->dev,
- "%s: alloc lpi MSI %d (error: %d)\n",
- __func__, priv->lpi_irq, ret);
- irq_err = REQ_IRQ_ERR_LPI;
- goto irq_error;
- }
- }
-
/* Request the common Safety Feature Correctible/Uncorrectible
* Error line in case of another line is used
*/
@@ -3930,19 +3965,6 @@ static int stmmac_request_irq_single(struct net_device *dev)
}
}
- /* Request the IRQ lines */
- if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
- ret = request_irq(priv->lpi_irq, stmmac_interrupt,
- IRQF_SHARED, dev->name, dev);
- if (unlikely(ret < 0)) {
- netdev_err(priv->dev,
- "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
- __func__, priv->lpi_irq, ret);
- irq_err = REQ_IRQ_ERR_LPI;
- goto irq_error;
- }
- }
-
/* Request the common Safety Feature Correctible/Uncorrectible
* Error line in case of another line is used
*/
@@ -4077,16 +4099,6 @@ static int __stmmac_open(struct net_device *dev,
stmmac_reset_queues_param(priv);
- if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
- priv->plat->serdes_powerup) {
- ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
- if (ret < 0) {
- netdev_err(priv->dev, "%s: Serdes powerup failed\n",
- __func__);
- goto init_error;
- }
- }
-
ret = stmmac_hw_setup(dev);
if (ret < 0) {
netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
@@ -4142,9 +4154,15 @@ static int stmmac_open(struct net_device *dev)
if (ret)
goto err_runtime_pm;
+ if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP)) {
+ ret = stmmac_legacy_serdes_power_up(priv);
+ if (ret < 0)
+ goto err_disconnect_phy;
+ }
+
ret = __stmmac_open(dev, dma_conf);
if (ret)
- goto err_disconnect_phy;
+ goto err_serdes;
kfree(dma_conf);
@@ -4153,6 +4171,8 @@ static int stmmac_open(struct net_device *dev)
return ret;
+err_serdes:
+ stmmac_legacy_serdes_power_down(priv);
err_disconnect_phy:
phylink_disconnect_phy(priv->phylink);
err_runtime_pm:
@@ -4187,10 +4207,6 @@ static void __stmmac_release(struct net_device *dev)
/* Release and free the Rx/Tx resources */
free_dma_desc_resources(priv, &priv->dma_conf);
- /* Powerdown Serdes if there is */
- if (priv->plat->serdes_powerdown)
- priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
-
stmmac_release_ptp(priv);
if (stmmac_fpe_supported(priv))
@@ -4216,6 +4232,7 @@ static int stmmac_release(struct net_device *dev)
__stmmac_release(dev);
+ stmmac_legacy_serdes_power_down(priv);
phylink_disconnect_phy(priv->phylink);
pm_runtime_put(priv->device);
@@ -4367,7 +4384,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
/* Always insert VLAN tag to SKB payload for TSO frames.
*
- * Never insert VLAN tag by HW, since segments splited by
+ * Never insert VLAN tag by HW, since segments split by
* TSO engine will be un-tagged by mistake.
*/
if (skb_vlan_tag_present(skb)) {
@@ -5940,7 +5957,7 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
unsigned long flags;
spin_lock_irqsave(&ch->lock, flags);
- /* Both RX and TX work done are compelte,
+ /* Both RX and TX work done are complete,
* so enable both RX & TX IRQs.
*/
stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
@@ -6143,7 +6160,7 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv)
/* To handle GMAC own interrupts */
if (priv->plat->core_type == DWMAC_CORE_GMAC || xmac) {
- int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
+ int status = stmmac_host_irq_status(priv, &priv->xstats);
if (unlikely(status)) {
/* For LPI we need to save the tx status */
@@ -6272,7 +6289,7 @@ static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
/**
* stmmac_ioctl - Entry point for the Ioctl
* @dev: Device pointer.
- * @rq: An IOCTL specefic structure, that can contain a pointer to
+ * @rq: An IOCTL specific structure, that can contain a pointer to
* a proprietary structure used to pass information to the driver.
* @cmd: IOCTL command
* Description:
@@ -7264,6 +7281,40 @@ static void stmmac_service_task(struct work_struct *work)
clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
}
+static void stmmac_print_actphyif(struct stmmac_priv *priv)
+{
+ const char **phyif_table;
+ const char *actphyif_str;
+ size_t phyif_table_size;
+
+ switch (priv->plat->core_type) {
+ case DWMAC_CORE_MAC100:
+ return;
+
+ case DWMAC_CORE_GMAC:
+ case DWMAC_CORE_GMAC4:
+ phyif_table = stmmac_dwmac_actphyif;
+ phyif_table_size = ARRAY_SIZE(stmmac_dwmac_actphyif);
+ break;
+
+ case DWMAC_CORE_XGMAC:
+ phyif_table = stmmac_dwxgmac_phyif;
+ phyif_table_size = ARRAY_SIZE(stmmac_dwxgmac_phyif);
+ break;
+ }
+
+ if (priv->dma_cap.actphyif < phyif_table_size)
+ actphyif_str = phyif_table[priv->dma_cap.actphyif];
+ else
+ actphyif_str = NULL;
+
+ if (!actphyif_str)
+ actphyif_str = "unknown";
+
+ dev_info(priv->device, "Active PHY interface: %s (%u)\n",
+ actphyif_str, priv->dma_cap.actphyif);
+}
+
/**
* stmmac_hw_init - Init the MAC device
* @priv: driver private structure
@@ -7320,6 +7371,7 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
else if (priv->dma_cap.rx_coe_type1)
priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
+ stmmac_print_actphyif(priv);
} else {
dev_info(priv->device, "No HW DMA feature register supported\n");
}
@@ -7695,7 +7747,6 @@ static int __stmmac_dvr_probe(struct device *device,
priv->dev->irq = res->irq;
priv->wol_irq = res->wol_irq;
- priv->lpi_irq = res->lpi_irq;
priv->sfty_irq = res->sfty_irq;
priv->sfty_ce_irq = res->sfty_ce_irq;
priv->sfty_ue_irq = res->sfty_ue_irq;
@@ -8061,8 +8112,7 @@ int stmmac_suspend(struct device *dev)
/* Stop TX/RX DMA */
stmmac_stop_all_dma(priv);
- if (priv->plat->serdes_powerdown)
- priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
+ stmmac_legacy_serdes_power_down(priv);
/* Enable Power down mode by programming the PMT regs */
if (priv->wolopts) {
@@ -8165,11 +8215,8 @@ int stmmac_resume(struct device *dev)
stmmac_mdio_reset(priv->mii);
}
- if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
- priv->plat->serdes_powerup) {
- ret = priv->plat->serdes_powerup(ndev,
- priv->plat->bsp_priv);
-
+ if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP)) {
+ ret = stmmac_legacy_serdes_power_up(priv);
if (ret < 0)
return ret;
}
@@ -8191,6 +8238,7 @@ int stmmac_resume(struct device *dev)
ret = stmmac_hw_setup(ndev);
if (ret < 0) {
netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
+ stmmac_legacy_serdes_power_down(priv);
mutex_unlock(&priv->lock);
rtnl_unlock();
return ret;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 1e82850f2a25..a7c2496b39f2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -483,7 +483,7 @@ void stmmac_pcs_clean(struct net_device *ndev)
* If a specific clk_csr value is passed from the platform
* this means that the CSR Clock Range selection cannot be
* changed at run-time and it is fixed (as reported in the driver
- * documentation). Viceversa the driver will try to set the MDC
+ * documentation). Vice versa the driver will try to set the MDC
* clock dynamically according to the actual clock input.
*/
static u32 stmmac_clk_csr_set(struct stmmac_priv *priv)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.c
index e2f531c11986..88fa359ea716 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.c
@@ -2,6 +2,20 @@
#include "stmmac.h"
#include "stmmac_pcs.h"
+/*
+ * GMAC_AN_STATUS is equivalent to MII_BMSR
+ * GMAC_ANE_ADV is equivalent to 802.3z MII_ADVERTISE
+ * GMAC_ANE_LPA is equivalent to 802.3z MII_LPA
+ * GMAC_ANE_EXP is equivalent to MII_EXPANSION
+ * GMAC_TBI is equivalent to MII_ESTATUS
+ *
+ * ADV, LPA and EXP are only available for the TBI and RTBI modes.
+ */
+#define GMAC_AN_STATUS 0x04 /* AN status */
+#define GMAC_ANE_ADV 0x08 /* ANE Advertisement */
+#define GMAC_ANE_LPA 0x0c /* ANE link partener ability */
+#define GMAC_TBI 0x14 /* TBI extend status */
+
static int dwmac_integrated_pcs_enable(struct phylink_pcs *pcs)
{
struct stmmac_pcs *spcs = phylink_pcs_to_stmmac_pcs(pcs);
@@ -45,6 +59,37 @@ static const struct phylink_pcs_ops dwmac_integrated_pcs_ops = {
.pcs_config = dwmac_integrated_pcs_config,
};
+void stmmac_integrated_pcs_irq(struct stmmac_priv *priv, u32 status,
+ struct stmmac_extra_stats *x)
+{
+ struct stmmac_pcs *spcs = priv->integrated_pcs;
+ u32 val = readl(spcs->base + GMAC_AN_STATUS);
+
+ if (status & PCS_ANE_IRQ) {
+ x->irq_pcs_ane_n++;
+ if (val & BMSR_ANEGCOMPLETE)
+ dev_info(priv->device,
+ "PCS ANE process completed\n");
+ }
+
+ if (status & PCS_LINK_IRQ) {
+ x->irq_pcs_link_n++;
+ dev_info(priv->device, "PCS Link %s\n",
+ val & BMSR_LSTATUS ? "Up" : "Down");
+
+ phylink_pcs_change(&spcs->pcs, val & BMSR_LSTATUS);
+ }
+}
+
+int stmmac_integrated_pcs_get_phy_intf_sel(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ if (interface == PHY_INTERFACE_MODE_SGMII)
+ return PHY_INTF_SEL_SGMII;
+
+ return -EINVAL;
+}
+
int stmmac_integrated_pcs_init(struct stmmac_priv *priv, unsigned int offset,
u32 int_mask)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
index cda93894168e..23bbd4f10bf8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
@@ -16,36 +16,14 @@
/* PCS registers (AN/TBI/SGMII/RGMII) offsets */
#define GMAC_AN_CTRL(x) (x) /* AN control */
-#define GMAC_AN_STATUS(x) (x + 0x4) /* AN status */
-
-/* ADV, LPA and EXP are only available for the TBI and RTBI interfaces */
-#define GMAC_ANE_ADV(x) (x + 0x8) /* ANE Advertisement */
-#define GMAC_ANE_LPA(x) (x + 0xc) /* ANE link partener ability */
-#define GMAC_ANE_EXP(x) (x + 0x10) /* ANE expansion */
-#define GMAC_TBI(x) (x + 0x14) /* TBI extend status */
/* AN Configuration defines */
-#define GMAC_AN_CTRL_RAN BIT(9) /* Restart Auto-Negotiation */
-#define GMAC_AN_CTRL_ANE BIT(12) /* Auto-Negotiation Enable */
-#define GMAC_AN_CTRL_ELE BIT(14) /* External Loopback Enable */
-#define GMAC_AN_CTRL_ECD BIT(16) /* Enable Comma Detect */
-#define GMAC_AN_CTRL_LR BIT(17) /* Lock to Reference */
-#define GMAC_AN_CTRL_SGMRAL BIT(18) /* SGMII RAL Control */
-
-/* AN Status defines */
-#define GMAC_AN_STATUS_LS BIT(2) /* Link Status 0:down 1:up */
-#define GMAC_AN_STATUS_ANA BIT(3) /* Auto-Negotiation Ability */
-#define GMAC_AN_STATUS_ANC BIT(5) /* Auto-Negotiation Complete */
-#define GMAC_AN_STATUS_ES BIT(8) /* Extended Status */
-
-/* ADV and LPA defines */
-#define GMAC_ANE_FD BIT(5)
-#define GMAC_ANE_HD BIT(6)
-#define GMAC_ANE_PSE GENMASK(8, 7)
-#define GMAC_ANE_PSE_SHIFT 7
-#define GMAC_ANE_RFE GENMASK(13, 12)
-#define GMAC_ANE_RFE_SHIFT 12
-#define GMAC_ANE_ACK BIT(14)
+#define GMAC_AN_CTRL_RAN BIT_U32(9) /* Restart Auto-Negotiation */
+#define GMAC_AN_CTRL_ANE BIT_U32(12) /* Auto-Negotiation Enable */
+#define GMAC_AN_CTRL_ELE BIT_U32(14) /* External Loopback Enable */
+#define GMAC_AN_CTRL_ECD BIT_U32(16) /* Enable Comma Detect */
+#define GMAC_AN_CTRL_LR BIT_U32(17) /* Lock to Reference */
+#define GMAC_AN_CTRL_SGMRAL BIT_U32(18) /* SGMII RAL Control */
struct stmmac_priv;
@@ -62,40 +40,14 @@ phylink_pcs_to_stmmac_pcs(struct phylink_pcs *pcs)
return container_of(pcs, struct stmmac_pcs, pcs);
}
+void stmmac_integrated_pcs_irq(struct stmmac_priv *priv, u32 status,
+ struct stmmac_extra_stats *x);
+int stmmac_integrated_pcs_get_phy_intf_sel(struct phylink_pcs *pcs,
+ phy_interface_t interface);
int stmmac_integrated_pcs_init(struct stmmac_priv *priv, unsigned int offset,
u32 int_mask);
/**
- * dwmac_pcs_isr - TBI, RTBI, or SGMII PHY ISR
- * @ioaddr: IO registers pointer
- * @reg: Base address of the AN Control Register.
- * @intr_status: GMAC core interrupt status
- * @x: pointer to log these events as stats
- * Description: it is the ISR for PCS events: Auto-Negotiation Completed and
- * Link status.
- */
-static inline void dwmac_pcs_isr(void __iomem *ioaddr, u32 reg,
- unsigned int intr_status,
- struct stmmac_extra_stats *x)
-{
- u32 val = readl(ioaddr + GMAC_AN_STATUS(reg));
-
- if (intr_status & PCS_ANE_IRQ) {
- x->irq_pcs_ane_n++;
- if (val & GMAC_AN_STATUS_ANC)
- pr_info("stmmac_pcs: ANE process completed\n");
- }
-
- if (intr_status & PCS_LINK_IRQ) {
- x->irq_pcs_link_n++;
- if (val & GMAC_AN_STATUS_LS)
- pr_info("stmmac_pcs: Link Up\n");
- else
- pr_info("stmmac_pcs: Link Down\n");
- }
-}
-
-/**
* dwmac_ctrl_ane - To program the AN Control Register.
* @ioaddr: IO registers pointer
* @reg: Base address of the AN Control Register.
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 8979a50b5507..5c9fd91a1db9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -725,14 +725,6 @@ int stmmac_get_platform_resources(struct platform_device *pdev,
stmmac_res->wol_irq = stmmac_res->irq;
}
- stmmac_res->lpi_irq =
- platform_get_irq_byname_optional(pdev, "eth_lpi");
- if (stmmac_res->lpi_irq < 0) {
- if (stmmac_res->lpi_irq == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- dev_info(&pdev->dev, "IRQ eth_lpi not found\n");
- }
-
stmmac_res->sfty_irq =
platform_get_irq_byname_optional(pdev, "sfty");
if (stmmac_res->sfty_irq < 0) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index e90a2c469b9a..08b60b7d5fd6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -2000,7 +2000,7 @@ void stmmac_selftest_run(struct net_device *dev,
}
/*
- * First tests will always be MAC / PHY loobpack. If any of
+ * First tests will always be MAC / PHY loopback. If any of
* them is not supported we abort earlier.
*/
if (ret) {
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 893216b0e08d..f035e3bbbef8 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -7302,6 +7302,13 @@ static int niu_get_ethtool_tcam_all(struct niu *np,
return ret;
}
+static u32 niu_get_rx_ring_count(struct net_device *dev)
+{
+ struct niu *np = netdev_priv(dev);
+
+ return np->num_rx_rings;
+}
+
static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
@@ -7309,9 +7316,6 @@ static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
int ret = 0;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = np->num_rx_rings;
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = tcam_get_valid_entry_cnt(np);
break;
@@ -7928,6 +7932,7 @@ static const struct ethtool_ops niu_ethtool_ops = {
.set_phys_id = niu_set_phys_id,
.get_rxnfc = niu_get_nfc,
.set_rxnfc = niu_set_nfc,
+ .get_rx_ring_count = niu_get_rx_ring_count,
.get_rxfh_fields = niu_get_rxfh_fields,
.set_rxfh_fields = niu_set_rxfh_fields,
.get_link_ksettings = niu_get_link_ksettings,
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 48f0a96c0e9e..666998082998 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2551,6 +2551,9 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
goto err_out_clear_quattro;
}
+ /* BIGMAC may have bogus sizes */
+ if ((op->resource[3].end - op->resource[3].start) >= BMAC_REG_SIZE)
+ op->resource[3].end = op->resource[3].start + BMAC_REG_SIZE - 1;
hp->bigmacregs = devm_platform_ioremap_resource(op, 3);
if (IS_ERR(hp->bigmacregs)) {
dev_err(&op->dev, "Cannot map BIGMAC registers.\n");
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index fe5b2926d8ab..c60b04921c62 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -192,6 +192,7 @@ config TI_ICSSG_PRUETH
depends on NET_SWITCHDEV
depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
depends on PTP_1588_CLOCK_OPTIONAL
+ depends on HSR || !HSR
help
Support dual Gigabit Ethernet ports over the ICSSG PRU Subsystem.
This subsystem is available starting with the AM65 platform.
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index 93c0a4d0e33a..6da50f4b7c2e 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -4,7 +4,7 @@
#
obj-$(CONFIG_TI_PRUETH) += icssm-prueth.o
-icssm-prueth-y := icssm/icssm_prueth.o
+icssm-prueth-y := icssm/icssm_prueth.o icssm/icssm_prueth_switch.o icssm/icssm_switchdev.o
obj-$(CONFIG_TI_CPSW) += cpsw-common.o
obj-$(CONFIG_TI_DAVINCI_EMAC) += cpsw-common.o
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index c57497074ae6..98d60da7cc3b 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -391,11 +391,8 @@ static int am65_cpsw_ethtool_op_begin(struct net_device *ndev)
static void am65_cpsw_ethtool_op_complete(struct net_device *ndev)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
- int ret;
- ret = pm_runtime_put(common->dev);
- if (ret < 0 && ret != -EBUSY)
- dev_err(common->dev, "ethtool complete failed %d\n", ret);
+ pm_runtime_put(common->dev);
}
static void am65_cpsw_get_drvinfo(struct net_device *ndev,
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index fbe35af615a6..bb969dd435b4 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -23,11 +23,6 @@
#define BITMASK(bits) (BIT(bits) - 1)
-#define ALE_VERSION_MAJOR(rev, mask) (((rev) >> 8) & (mask))
-#define ALE_VERSION_MINOR(rev) (rev & 0xff)
-#define ALE_VERSION_1R3 0x0103
-#define ALE_VERSION_1R4 0x0104
-
/* ALE Registers */
#define ALE_IDVER 0x00
#define ALE_STATUS 0x04
diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
index bdc4db0d169c..a43f75ee269e 100644
--- a/drivers/net/ethernet/ti/cpsw_ethtool.c
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -374,11 +374,8 @@ int cpsw_ethtool_op_begin(struct net_device *ndev)
void cpsw_ethtool_op_complete(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
- int ret;
- ret = pm_runtime_put(priv->cpsw->dev);
- if (ret < 0)
- cpsw_err(priv, drv, "ethtool complete failed %d\n", ret);
+ pm_runtime_put(priv->cpsw->dev);
}
void cpsw_get_channels(struct net_device *ndev, struct ethtool_channels *ch)
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 21af0a10626a..7f42f58a4b03 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -1472,7 +1472,7 @@ static void cpsw_unregister_ports(struct cpsw_common *cpsw)
for (i = 0; i < cpsw->data.slaves; i++) {
ndev = cpsw->slaves[i].ndev;
- if (!ndev)
+ if (!ndev || ndev->reg_state != NETREG_REGISTERED)
continue;
priv = netdev_priv(ndev);
@@ -1494,7 +1494,6 @@ static int cpsw_register_ports(struct cpsw_common *cpsw)
if (ret) {
dev_err(cpsw->dev,
"cpsw: err registering net device%d\n", i);
- cpsw->slaves[i].ndev = NULL;
break;
}
}
@@ -2003,7 +2002,7 @@ static int cpsw_probe(struct platform_device *pdev)
/* setup netdevs */
ret = cpsw_create_ports(cpsw);
if (ret)
- goto clean_unregister_netdev;
+ goto clean_cpts;
/* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
* MISC IRQs which are always kept disabled with this driver so
@@ -2017,14 +2016,14 @@ static int cpsw_probe(struct platform_device *pdev)
0, dev_name(dev), cpsw);
if (ret < 0) {
dev_err(dev, "error attaching irq (%d)\n", ret);
- goto clean_unregister_netdev;
+ goto clean_cpts;
}
ret = devm_request_irq(dev, cpsw->irqs_table[1], cpsw_tx_interrupt,
0, dev_name(dev), cpsw);
if (ret < 0) {
dev_err(dev, "error attaching irq (%d)\n", ret);
- goto clean_unregister_netdev;
+ goto clean_cpts;
}
if (!cpsw->cpts)
@@ -2034,7 +2033,7 @@ static int cpsw_probe(struct platform_device *pdev)
0, dev_name(&pdev->dev), cpsw);
if (ret < 0) {
dev_err(dev, "error attaching misc irq (%d)\n", ret);
- goto clean_unregister_netdev;
+ goto clean_cpts;
}
/* Enable misc CPTS evnt_pend IRQ */
@@ -2043,7 +2042,7 @@ static int cpsw_probe(struct platform_device *pdev)
skip_cpts:
ret = cpsw_register_notifiers(cpsw);
if (ret)
- goto clean_unregister_netdev;
+ goto clean_cpts;
ret = cpsw_register_devlink(cpsw);
if (ret)
@@ -2065,8 +2064,6 @@ skip_cpts:
clean_unregister_notifiers:
cpsw_unregister_notifiers(cpsw);
-clean_unregister_netdev:
- cpsw_unregister_ports(cpsw);
clean_cpts:
cpts_release(cpsw->cpts);
cpdma_ctlr_destroy(cpsw->dma);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c
index 090aa74d3ce7..0cf9dfe0fa36 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_common.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_common.c
@@ -1720,7 +1720,6 @@ void prueth_netdev_exit(struct prueth *prueth,
netif_napi_del(&emac->napi_rx);
pruss_release_mem_region(prueth->pruss, &emac->dram);
- destroy_workqueue(emac->cmd_wq);
free_netdev(emac->ndev);
prueth->emac[mac] = NULL;
}
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index f65041662173..0939994c932f 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -1099,7 +1099,7 @@ static void emac_ndo_set_rx_mode(struct net_device *ndev)
{
struct prueth_emac *emac = netdev_priv(ndev);
- queue_work(emac->cmd_wq, &emac->rx_mode_work);
+ schedule_work(&emac->rx_mode_work);
}
static netdev_features_t emac_ndo_fix_features(struct net_device *ndev,
@@ -1451,11 +1451,6 @@ static int prueth_netdev_init(struct prueth *prueth,
emac->port_id = port;
emac->xdp_prog = NULL;
emac->ndev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
- emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq");
- if (!emac->cmd_wq) {
- ret = -ENOMEM;
- goto free_ndev;
- }
INIT_WORK(&emac->rx_mode_work, emac_ndo_set_rx_mode_work);
INIT_DELAYED_WORK(&emac->stats_work, icssg_stats_work_handler);
@@ -1467,7 +1462,7 @@ static int prueth_netdev_init(struct prueth *prueth,
if (ret) {
dev_err(prueth->dev, "unable to get DRAM: %d\n", ret);
ret = -ENOMEM;
- goto free_wq;
+ goto free_ndev;
}
emac->tx_ch_num = 1;
@@ -1566,8 +1561,6 @@ static int prueth_netdev_init(struct prueth *prueth,
free:
pruss_release_mem_region(prueth->pruss, &emac->dram);
-free_wq:
- destroy_workqueue(emac->cmd_wq);
free_ndev:
emac->ndev = NULL;
prueth->emac[mac] = NULL;
@@ -2236,6 +2229,7 @@ netdev_unregister:
prueth->emac[i]->ndev->phydev = NULL;
}
unregister_netdev(prueth->registered_netdevs[i]);
+ disable_work_sync(&prueth->emac[i]->rx_mode_work);
}
netdev_exit:
@@ -2295,6 +2289,7 @@ static void prueth_remove(struct platform_device *pdev)
phy_disconnect(prueth->emac[i]->ndev->phydev);
prueth->emac[i]->ndev->phydev = NULL;
unregister_netdev(prueth->registered_netdevs[i]);
+ disable_work_sync(&prueth->emac[i]->rx_mode_work);
}
for (i = 0; i < PRUETH_NUM_MACS; i++) {
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
index 10eadd356650..3d94fa5a7ac1 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
@@ -236,7 +236,6 @@ struct prueth_emac {
/* Mutex to serialize access to firmware command interface */
struct mutex cmd_lock;
struct work_struct rx_mode_work;
- struct workqueue_struct *cmd_wq;
struct pruss_mem_region dram;
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
index 7bb4f0d850cc..b8115ca47082 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
@@ -783,11 +783,6 @@ static int prueth_netdev_init(struct prueth *prueth,
emac->prueth = prueth;
emac->ndev = ndev;
emac->port_id = port;
- emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq");
- if (!emac->cmd_wq) {
- ret = -ENOMEM;
- goto free_ndev;
- }
INIT_DELAYED_WORK(&emac->stats_work, icssg_stats_work_handler);
@@ -798,7 +793,7 @@ static int prueth_netdev_init(struct prueth *prueth,
if (ret) {
dev_err(prueth->dev, "unable to get DRAM: %d\n", ret);
ret = -ENOMEM;
- goto free_wq;
+ goto free_ndev;
}
/* SR1.0 uses a dedicated high priority channel
@@ -883,8 +878,6 @@ static int prueth_netdev_init(struct prueth *prueth,
free:
pruss_release_mem_region(prueth->pruss, &emac->dram);
-free_wq:
- destroy_workqueue(emac->cmd_wq);
free_ndev:
emac->ndev = NULL;
prueth->emac[mac] = NULL;
diff --git a/drivers/net/ethernet/ti/icssm/icssm_prueth.c b/drivers/net/ethernet/ti/icssm/icssm_prueth.c
index 293b7af04263..53bbd9290904 100644
--- a/drivers/net/ethernet/ti/icssm/icssm_prueth.c
+++ b/drivers/net/ethernet/ti/icssm/icssm_prueth.c
@@ -29,6 +29,8 @@
#include <net/pkt_cls.h>
#include "icssm_prueth.h"
+#include "icssm_prueth_switch.h"
+#include "icssm_vlan_mcast_filter_mmap.h"
#include "../icssg/icssg_mii_rt.h"
#include "../icssg/icss_iep.h"
@@ -145,7 +147,7 @@ static const struct prueth_queue_info queue_infos[][NUM_QUEUES] = {
},
};
-static const struct prueth_queue_desc queue_descs[][NUM_QUEUES] = {
+const struct prueth_queue_desc queue_descs[][NUM_QUEUES] = {
[PRUETH_PORT_QUEUE_HOST] = {
{ .rd_ptr = P0_Q1_BD_OFFSET, .wr_ptr = P0_Q1_BD_OFFSET, },
{ .rd_ptr = P0_Q2_BD_OFFSET, .wr_ptr = P0_Q2_BD_OFFSET, },
@@ -205,9 +207,9 @@ static void icssm_prueth_hostconfig(struct prueth *prueth)
static void icssm_prueth_mii_init(struct prueth *prueth)
{
+ u32 txcfg_reg, txcfg, txcfg2;
struct regmap *mii_rt;
u32 rxcfg_reg, rxcfg;
- u32 txcfg_reg, txcfg;
mii_rt = prueth->mii_rt;
@@ -235,17 +237,23 @@ static void icssm_prueth_mii_init(struct prueth *prueth)
(TX_START_DELAY << PRUSS_MII_RT_TXCFG_TX_START_DELAY_SHIFT) |
(TX_CLK_DELAY_100M << PRUSS_MII_RT_TXCFG_TX_CLK_DELAY_SHIFT);
+ txcfg2 = txcfg;
+ if (!PRUETH_IS_EMAC(prueth))
+ txcfg2 |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL;
+
/* Configuration of Port 0 Tx */
txcfg_reg = PRUSS_MII_RT_TXCFG0;
- regmap_write(mii_rt, txcfg_reg, txcfg);
+ regmap_write(mii_rt, txcfg_reg, txcfg2);
- txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL;
+ txcfg2 = txcfg;
+ if (PRUETH_IS_EMAC(prueth))
+ txcfg2 |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL;
/* Configuration of Port 1 Tx */
txcfg_reg = PRUSS_MII_RT_TXCFG1;
- regmap_write(mii_rt, txcfg_reg, txcfg);
+ regmap_write(mii_rt, txcfg_reg, txcfg2);
txcfg_reg = PRUSS_MII_RT_RX_FRMS0;
@@ -292,7 +300,10 @@ static void icssm_prueth_hostinit(struct prueth *prueth)
icssm_prueth_clearmem(prueth, PRUETH_MEM_DRAM1);
/* Initialize host queues in shared RAM */
- icssm_prueth_hostconfig(prueth);
+ if (!PRUETH_IS_EMAC(prueth))
+ icssm_prueth_sw_hostconfig(prueth);
+ else
+ icssm_prueth_hostconfig(prueth);
/* Configure MII_RT */
icssm_prueth_mii_init(prueth);
@@ -499,19 +510,24 @@ static int icssm_prueth_tx_enqueue(struct prueth_emac *emac,
struct prueth_queue_desc __iomem *queue_desc;
const struct prueth_queue_info *txqueue;
struct net_device *ndev = emac->ndev;
+ struct prueth *prueth = emac->prueth;
unsigned int buffer_desc_count;
int free_blocks, update_block;
bool buffer_wrapped = false;
int write_block, read_block;
void *src_addr, *dst_addr;
int pkt_block_size;
+ void __iomem *sram;
void __iomem *dram;
int txport, pktlen;
u16 update_wr_ptr;
u32 wr_buf_desc;
void *ocmc_ram;
- dram = emac->prueth->mem[emac->dram].va;
+ if (!PRUETH_IS_EMAC(prueth))
+ dram = prueth->mem[PRUETH_MEM_DRAM1].va;
+ else
+ dram = emac->prueth->mem[emac->dram].va;
if (eth_skb_pad(skb)) {
if (netif_msg_tx_err(emac) && net_ratelimit())
netdev_err(ndev, "packet pad failed\n");
@@ -524,7 +540,10 @@ static int icssm_prueth_tx_enqueue(struct prueth_emac *emac,
pktlen = skb->len;
/* Get the tx queue */
queue_desc = emac->tx_queue_descs + queue_id;
- txqueue = &queue_infos[txport][queue_id];
+ if (!PRUETH_IS_EMAC(prueth))
+ txqueue = &sw_queue_infos[txport][queue_id];
+ else
+ txqueue = &queue_infos[txport][queue_id];
buffer_desc_count = icssm_get_buff_desc_count(txqueue);
@@ -590,7 +609,11 @@ static int icssm_prueth_tx_enqueue(struct prueth_emac *emac,
/* update first buffer descriptor */
wr_buf_desc = (pktlen << PRUETH_BD_LENGTH_SHIFT) &
PRUETH_BD_LENGTH_MASK;
- writel(wr_buf_desc, dram + readw(&queue_desc->wr_ptr));
+ sram = prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+ if (!PRUETH_IS_EMAC(prueth))
+ writel(wr_buf_desc, sram + readw(&queue_desc->wr_ptr));
+ else
+ writel(wr_buf_desc, dram + readw(&queue_desc->wr_ptr));
/* update the write pointer in this queue descriptor, the firmware
* polls for this change so this will signal the start of transmission
@@ -604,7 +627,6 @@ static int icssm_prueth_tx_enqueue(struct prueth_emac *emac,
void icssm_parse_packet_info(struct prueth *prueth, u32 buffer_descriptor,
struct prueth_packet_info *pkt_info)
{
- pkt_info->shadow = !!(buffer_descriptor & PRUETH_BD_SHADOW_MASK);
pkt_info->port = (buffer_descriptor & PRUETH_BD_PORT_MASK) >>
PRUETH_BD_PORT_SHIFT;
pkt_info->length = (buffer_descriptor & PRUETH_BD_LENGTH_MASK) >>
@@ -713,11 +735,19 @@ int icssm_emac_rx_packet(struct prueth_emac *emac, u16 *bd_rd_ptr,
src_addr += actual_pkt_len;
}
+ if (PRUETH_IS_SWITCH(emac->prueth)) {
+ skb->offload_fwd_mark = READ_ONCE(emac->offload_fwd_mark);
+ if (!pkt_info->lookup_success)
+ icssm_prueth_sw_learn_fdb(emac, skb->data + ETH_ALEN);
+ }
+
skb_put(skb, actual_pkt_len);
/* send packet up the stack */
skb->protocol = eth_type_trans(skb, ndev);
+ local_bh_disable();
netif_receive_skb(skb);
+ local_bh_enable();
/* update stats */
emac->stats.rx_bytes += actual_pkt_len;
@@ -743,6 +773,7 @@ static int icssm_emac_rx_packets(struct prueth_emac *emac, int budget)
shared_ram = emac->prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+ /* Start and end queue is made common for EMAC, RSTP */
start_queue = emac->rx_queue_start;
end_queue = emac->rx_queue_end;
@@ -753,8 +784,10 @@ static int icssm_emac_rx_packets(struct prueth_emac *emac, int budget)
/* search host queues for packets */
for (i = start_queue; i <= end_queue; i++) {
queue_desc = emac->rx_queue_descs + i;
- rxqueue = &queue_infos[PRUETH_PORT_HOST][i];
-
+ if (PRUETH_IS_SWITCH(emac->prueth))
+ rxqueue = &sw_queue_infos[PRUETH_PORT_HOST][i];
+ else
+ rxqueue = &queue_infos[PRUETH_PORT_HOST][i];
overflow_cnt = readb(&queue_desc->overflow_cnt);
if (overflow_cnt > 0) {
emac->stats.rx_over_errors += overflow_cnt;
@@ -879,6 +912,13 @@ static int icssm_emac_request_irqs(struct prueth_emac *emac)
return ret;
}
+/* Function to free memory related to sw */
+static void icssm_prueth_free_memory(struct prueth *prueth)
+{
+ if (PRUETH_IS_SWITCH(prueth))
+ icssm_prueth_sw_free_fdb_table(prueth);
+}
+
static void icssm_ptp_dram_init(struct prueth_emac *emac)
{
void __iomem *sram = emac->prueth->mem[PRUETH_MEM_SHARED_RAM].va;
@@ -941,20 +981,38 @@ static int icssm_emac_ndo_open(struct net_device *ndev)
if (!prueth->emac_configured)
icssm_prueth_init_ethernet_mode(prueth);
- icssm_prueth_emac_config(emac);
+ /* reset and start PRU firmware */
+ if (PRUETH_IS_SWITCH(prueth)) {
+ ret = icssm_prueth_sw_emac_config(emac);
+ if (ret)
+ return ret;
+
+ ret = icssm_prueth_sw_init_fdb_table(prueth);
+ if (ret)
+ return ret;
+ } else {
+ icssm_prueth_emac_config(emac);
+ }
if (!prueth->emac_configured) {
icssm_ptp_dram_init(emac);
ret = icss_iep_init(prueth->iep, NULL, NULL, 0);
if (ret) {
netdev_err(ndev, "Failed to initialize iep: %d\n", ret);
- goto iep_exit;
+ goto free_mem;
}
}
- ret = icssm_emac_set_boot_pru(emac, ndev);
- if (ret)
- goto iep_exit;
+ if (!PRUETH_IS_EMAC(prueth)) {
+ ret = icssm_prueth_sw_boot_prus(prueth, ndev);
+ if (ret)
+ goto iep_exit;
+ } else {
+ /* boot the PRU */
+ ret = icssm_emac_set_boot_pru(emac, ndev);
+ if (ret)
+ goto iep_exit;
+ }
ret = icssm_emac_request_irqs(emac);
if (ret)
@@ -969,19 +1027,25 @@ static int icssm_emac_ndo_open(struct net_device *ndev)
icssm_prueth_port_enable(emac, true);
prueth->emac_configured |= BIT(emac->port_id);
-
+ if (PRUETH_IS_SWITCH(prueth))
+ icssm_prueth_sw_set_stp_state(prueth, emac->port_id,
+ BR_STATE_LEARNING);
if (netif_msg_drv(emac))
dev_notice(&ndev->dev, "started\n");
return 0;
rproc_shutdown:
- rproc_shutdown(emac->pru);
+ if (!PRUETH_IS_EMAC(prueth))
+ icssm_prueth_sw_shutdown_prus(emac, ndev);
+ else
+ rproc_shutdown(emac->pru);
iep_exit:
if (!prueth->emac_configured)
icss_iep_exit(prueth->iep);
-
+free_mem:
+ icssm_prueth_free_memory(emac->prueth);
return ret;
}
@@ -1010,17 +1074,83 @@ static int icssm_emac_ndo_stop(struct net_device *ndev)
hrtimer_cancel(&emac->tx_hrtimer);
/* stop the PRU */
- rproc_shutdown(emac->pru);
+ if (!PRUETH_IS_EMAC(prueth))
+ icssm_prueth_sw_shutdown_prus(emac, ndev);
+ else
+ rproc_shutdown(emac->pru);
/* free rx interrupts */
free_irq(emac->rx_irq, ndev);
+ /* free memory related to sw */
+ icssm_prueth_free_memory(emac->prueth);
+
+ if (!prueth->emac_configured)
+ icss_iep_exit(prueth->iep);
+
if (netif_msg_drv(emac))
dev_notice(&ndev->dev, "stopped\n");
return 0;
}
+static int icssm_prueth_change_mode(struct prueth *prueth,
+ enum pruss_ethtype mode)
+{
+ bool portstatus[PRUETH_NUM_MACS];
+ struct prueth_emac *emac;
+ struct net_device *ndev;
+ int i, ret;
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ if (!prueth->emac[i]) {
+ dev_err(prueth->dev, "Unknown MAC port\n");
+ return -EINVAL;
+ }
+
+ emac = prueth->emac[i];
+ ndev = emac->ndev;
+
+ portstatus[i] = netif_running(ndev);
+ if (!portstatus[i])
+ continue;
+
+ ret = ndev->netdev_ops->ndo_stop(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to stop: %d", ret);
+ return ret;
+ }
+ }
+
+ if (mode == PRUSS_ETHTYPE_EMAC || mode == PRUSS_ETHTYPE_SWITCH) {
+ prueth->eth_type = mode;
+ } else {
+ dev_err(prueth->dev, "unknown mode\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ if (!prueth->emac[i]) {
+ dev_err(prueth->dev, "Unknown MAC port\n");
+ return -EINVAL;
+ }
+
+ emac = prueth->emac[i];
+ ndev = emac->ndev;
+
+ if (!portstatus[i])
+ continue;
+
+ ret = ndev->netdev_ops->ndo_open(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to start: %d", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
/* VLAN-tag PCP to priority queue map for EMAC/Switch/HSR/PRP used by driver
* Index is PCP val / 2.
* low - pcp 0..3 maps to Q4 for Host
@@ -1131,11 +1261,183 @@ static void icssm_emac_ndo_get_stats64(struct net_device *ndev,
stats->rx_length_errors = emac->stats.rx_length_errors;
}
+/* enable/disable MC filter */
+static void icssm_emac_mc_filter_ctrl(struct prueth_emac *emac, bool enable)
+{
+ struct prueth *prueth = emac->prueth;
+ void __iomem *mc_filter_ctrl;
+ void __iomem *ram;
+ u32 reg;
+
+ ram = prueth->mem[emac->dram].va;
+ mc_filter_ctrl = ram + ICSS_EMAC_FW_MULTICAST_FILTER_CTRL_OFFSET;
+
+ if (enable)
+ reg = ICSS_EMAC_FW_MULTICAST_FILTER_CTRL_ENABLED;
+ else
+ reg = ICSS_EMAC_FW_MULTICAST_FILTER_CTRL_DISABLED;
+
+ writeb(reg, mc_filter_ctrl);
+}
+
+/* reset MC filter bins */
+static void icssm_emac_mc_filter_reset(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ void __iomem *mc_filter_tbl;
+ u32 mc_filter_tbl_base;
+ void __iomem *ram;
+
+ ram = prueth->mem[emac->dram].va;
+ mc_filter_tbl_base = ICSS_EMAC_FW_MULTICAST_FILTER_TABLE;
+
+ mc_filter_tbl = ram + mc_filter_tbl_base;
+ memset_io(mc_filter_tbl, 0, ICSS_EMAC_FW_MULTICAST_TABLE_SIZE_BYTES);
+}
+
+/* set MC filter hashmask */
+static void icssm_emac_mc_filter_hashmask
+ (struct prueth_emac *emac,
+ u8 mask[ICSS_EMAC_FW_MULTICAST_FILTER_MASK_SIZE_BYTES])
+{
+ struct prueth *prueth = emac->prueth;
+ void __iomem *mc_filter_mask;
+ void __iomem *ram;
+
+ ram = prueth->mem[emac->dram].va;
+
+ mc_filter_mask = ram + ICSS_EMAC_FW_MULTICAST_FILTER_MASK_OFFSET;
+ memcpy_toio(mc_filter_mask, mask,
+ ICSS_EMAC_FW_MULTICAST_FILTER_MASK_SIZE_BYTES);
+}
+
+static void icssm_emac_mc_filter_bin_update(struct prueth_emac *emac, u8 hash,
+ u8 val)
+{
+ struct prueth *prueth = emac->prueth;
+ void __iomem *mc_filter_tbl;
+ void __iomem *ram;
+
+ ram = prueth->mem[emac->dram].va;
+
+ mc_filter_tbl = ram + ICSS_EMAC_FW_MULTICAST_FILTER_TABLE;
+ writeb(val, mc_filter_tbl + hash);
+}
+
+void icssm_emac_mc_filter_bin_allow(struct prueth_emac *emac, u8 hash)
+{
+ icssm_emac_mc_filter_bin_update
+ (emac, hash,
+ ICSS_EMAC_FW_MULTICAST_FILTER_HOST_RCV_ALLOWED);
+}
+
+void icssm_emac_mc_filter_bin_disallow(struct prueth_emac *emac, u8 hash)
+{
+ icssm_emac_mc_filter_bin_update
+ (emac, hash,
+ ICSS_EMAC_FW_MULTICAST_FILTER_HOST_RCV_NOT_ALLOWED);
+}
+
+u8 icssm_emac_get_mc_hash(u8 *mac, u8 *mask)
+{
+ u8 hash;
+ int j;
+
+ for (j = 0, hash = 0; j < ETH_ALEN; j++)
+ hash ^= (mac[j] & mask[j]);
+
+ return hash;
+}
+
+/**
+ * icssm_emac_ndo_set_rx_mode - EMAC set receive mode function
+ * @ndev: The EMAC network adapter
+ *
+ * Called when system wants to set the receive mode of the device.
+ *
+ */
+static void icssm_emac_ndo_set_rx_mode(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ bool promisc = ndev->flags & IFF_PROMISC;
+ struct netdev_hw_addr *ha;
+ struct prueth *prueth;
+ unsigned long flags;
+ void __iomem *sram;
+ u32 mask, reg;
+ u8 hash;
+
+ prueth = emac->prueth;
+ sram = prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+ reg = readl(sram + EMAC_PROMISCUOUS_MODE_OFFSET);
+
+ /* It is a shared table. So lock the access */
+ spin_lock_irqsave(&emac->addr_lock, flags);
+
+ /* Disable and reset multicast filter, allows allmulti */
+ icssm_emac_mc_filter_ctrl(emac, false);
+ icssm_emac_mc_filter_reset(emac);
+ icssm_emac_mc_filter_hashmask(emac, emac->mc_filter_mask);
+
+ if (PRUETH_IS_EMAC(prueth)) {
+ switch (emac->port_id) {
+ case PRUETH_PORT_MII0:
+ mask = EMAC_P1_PROMISCUOUS_BIT;
+ break;
+ case PRUETH_PORT_MII1:
+ mask = EMAC_P2_PROMISCUOUS_BIT;
+ break;
+ default:
+ netdev_err(ndev, "%s: invalid port\n", __func__);
+ goto unlock;
+ }
+
+ if (promisc) {
+ /* Enable promiscuous mode */
+ reg |= mask;
+ } else {
+ /* Disable promiscuous mode */
+ reg &= ~mask;
+ }
+
+ writel(reg, sram + EMAC_PROMISCUOUS_MODE_OFFSET);
+
+ if (promisc)
+ goto unlock;
+ }
+
+ if (ndev->flags & IFF_ALLMULTI && !PRUETH_IS_SWITCH(prueth))
+ goto unlock;
+
+ icssm_emac_mc_filter_ctrl(emac, true); /* all multicast blocked */
+
+ if (netdev_mc_empty(ndev))
+ goto unlock;
+
+ netdev_for_each_mc_addr(ha, ndev) {
+ hash = icssm_emac_get_mc_hash(ha->addr, emac->mc_filter_mask);
+ icssm_emac_mc_filter_bin_allow(emac, hash);
+ }
+
+ /* Add bridge device's MC addresses as well */
+ if (prueth->hw_bridge_dev) {
+ netdev_for_each_mc_addr(ha, prueth->hw_bridge_dev) {
+ hash = icssm_emac_get_mc_hash(ha->addr,
+ emac->mc_filter_mask);
+ icssm_emac_mc_filter_bin_allow(emac, hash);
+ }
+ }
+
+unlock:
+ spin_unlock_irqrestore(&emac->addr_lock, flags);
+}
+
static const struct net_device_ops emac_netdev_ops = {
.ndo_open = icssm_emac_ndo_open,
.ndo_stop = icssm_emac_ndo_stop,
.ndo_start_xmit = icssm_emac_ndo_start_xmit,
.ndo_get_stats64 = icssm_emac_ndo_get_stats64,
+ .ndo_set_rx_mode = icssm_emac_ndo_set_rx_mode,
};
/* get emac_port corresponding to eth_node name */
@@ -1188,6 +1490,7 @@ static enum hrtimer_restart icssm_emac_tx_timer_callback(struct hrtimer *timer)
static int icssm_prueth_netdev_init(struct prueth *prueth,
struct device_node *eth_node)
{
+ const struct prueth_private_data *fw_data = prueth->fw_data;
struct prueth_emac *emac;
struct net_device *ndev;
enum prueth_port port;
@@ -1212,6 +1515,7 @@ static int icssm_prueth_netdev_init(struct prueth *prueth,
emac->prueth = prueth;
emac->ndev = ndev;
emac->port_id = port;
+ memset(&emac->mc_filter_mask[0], 0xff, ETH_ALEN);
/* by default eth_type is EMAC */
switch (port) {
@@ -1247,6 +1551,9 @@ static int icssm_prueth_netdev_init(struct prueth *prueth,
goto free;
}
+ spin_lock_init(&emac->lock);
+ spin_lock_init(&emac->addr_lock);
+
/* get mac address from DT and set private and netdev addr */
ret = of_get_ethdev_address(eth_node, ndev);
if (!is_valid_ether_addr(ndev->dev_addr)) {
@@ -1274,6 +1581,14 @@ static int icssm_prueth_netdev_init(struct prueth *prueth,
phy_remove_link_mode(emac->phydev, ETHTOOL_LINK_MODE_Pause_BIT);
phy_remove_link_mode(emac->phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
+ /* Protocol switching
+ * Enabling L2 Firmware offloading
+ */
+ if (fw_data->support_switch) {
+ ndev->features |= NETIF_F_HW_L2FW_DOFFLOAD;
+ ndev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD;
+ }
+
ndev->dev.of_node = eth_node;
ndev->netdev_ops = &emac_netdev_ops;
@@ -1310,6 +1625,169 @@ static void icssm_prueth_netdev_exit(struct prueth *prueth,
prueth->emac[mac] = NULL;
}
+bool icssm_prueth_sw_port_dev_check(const struct net_device *ndev)
+{
+ if (ndev->netdev_ops != &emac_netdev_ops)
+ return false;
+
+ if (ndev->features & NETIF_F_HW_L2FW_DOFFLOAD)
+ return true;
+
+ return false;
+}
+
+static int icssm_prueth_port_offload_fwd_mark_update(struct prueth *prueth)
+{
+ int set_val = 0;
+ int i, ret = 0;
+ u8 all_slaves;
+
+ all_slaves = BIT(PRUETH_PORT_MII0) | BIT(PRUETH_PORT_MII1);
+
+ if (prueth->br_members == all_slaves)
+ set_val = 1;
+
+ dev_dbg(prueth->dev, "set offload_fwd_mark %d, mbrs=0x%x\n",
+ set_val, prueth->br_members);
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ if (prueth->emac[i])
+ WRITE_ONCE(prueth->emac[i]->offload_fwd_mark, set_val);
+ }
+
+ /* Bridge is created, load switch firmware,
+ * if not already in that mode
+ */
+ if (set_val && !PRUETH_IS_SWITCH(prueth)) {
+ ret = icssm_prueth_change_mode(prueth, PRUSS_ETHTYPE_SWITCH);
+ if (ret < 0)
+ dev_err(prueth->dev, "Failed to enable Switch mode\n");
+ else
+ dev_info(prueth->dev,
+ "TI PRU ethernet now in Switch mode\n");
+ }
+
+ /* Bridge is deleted, switch to Dual EMAC mode */
+ if (!prueth->br_members && !PRUETH_IS_EMAC(prueth)) {
+ ret = icssm_prueth_change_mode(prueth, PRUSS_ETHTYPE_EMAC);
+ if (ret < 0)
+ dev_err(prueth->dev, "Failed to enable Dual EMAC mode\n");
+ else
+ dev_info(prueth->dev,
+ "TI PRU ethernet now in Dual EMAC mode\n");
+ }
+
+ return ret;
+}
+
+static int icssm_prueth_ndev_port_link(struct net_device *ndev,
+ struct net_device *br_ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ unsigned long flags;
+ int ret = 0;
+
+ dev_dbg(prueth->dev, "%s: br_mbrs=0x%x %s\n",
+ __func__, prueth->br_members, ndev->name);
+
+ spin_lock_irqsave(&emac->addr_lock, flags);
+
+ if (!prueth->br_members) {
+ prueth->hw_bridge_dev = br_ndev;
+ } else {
+ /* This is adding the port to a second bridge,
+ * this is unsupported
+ */
+ if (prueth->hw_bridge_dev != br_ndev) {
+ spin_unlock_irqrestore(&emac->addr_lock, flags);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ prueth->br_members |= BIT(emac->port_id);
+
+ spin_unlock_irqrestore(&emac->addr_lock, flags);
+
+ ret = icssm_prueth_port_offload_fwd_mark_update(prueth);
+
+ return ret;
+}
+
+static int icssm_prueth_ndev_port_unlink(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ unsigned long flags;
+ int ret = 0;
+
+ dev_dbg(prueth->dev, "emac_sw_ndev_port_unlink\n");
+
+ spin_lock_irqsave(&emac->addr_lock, flags);
+
+ prueth->br_members &= ~BIT(emac->port_id);
+
+ spin_unlock_irqrestore(&emac->addr_lock, flags);
+
+ ret = icssm_prueth_port_offload_fwd_mark_update(prueth);
+
+ spin_lock_irqsave(&emac->addr_lock, flags);
+
+ if (!prueth->br_members)
+ prueth->hw_bridge_dev = NULL;
+
+ spin_unlock_irqrestore(&emac->addr_lock, flags);
+
+ return ret;
+}
+
+static int icssm_prueth_ndev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info;
+ int ret = NOTIFY_DONE;
+
+ if (!icssm_prueth_sw_port_dev_check(ndev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ info = ptr;
+ if (netif_is_bridge_master(info->upper_dev)) {
+ if (info->linking)
+ ret = icssm_prueth_ndev_port_link
+ (ndev, info->upper_dev);
+ else
+ ret = icssm_prueth_ndev_port_unlink(ndev);
+ }
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return notifier_from_errno(ret);
+}
+
+static int icssm_prueth_register_notifiers(struct prueth *prueth)
+{
+ int ret = 0;
+
+ prueth->prueth_netdevice_nb.notifier_call = icssm_prueth_ndev_event;
+ ret = register_netdevice_notifier(&prueth->prueth_netdevice_nb);
+ if (ret) {
+ dev_err(prueth->dev,
+ "register netdevice notifier failed ret: %d\n", ret);
+ return ret;
+ }
+
+ ret = icssm_prueth_sw_register_notifiers(prueth);
+ if (ret)
+ unregister_netdevice_notifier(&prueth->prueth_netdevice_nb);
+
+ return ret;
+}
+
static int icssm_prueth_probe(struct platform_device *pdev)
{
struct device_node *eth0_node = NULL, *eth1_node = NULL;
@@ -1529,6 +2007,12 @@ static int icssm_prueth_probe(struct platform_device *pdev)
prueth->emac[PRUETH_MAC1]->ndev;
}
+ ret = icssm_prueth_register_notifiers(prueth);
+ if (ret) {
+ dev_err(dev, "can't register switchdev notifiers");
+ goto netdev_unregister;
+ }
+
dev_info(dev, "TI PRU ethernet driver initialized: %s EMAC mode\n",
(!eth0_node || !eth1_node) ? "single" : "dual");
@@ -1589,6 +2073,9 @@ static void icssm_prueth_remove(struct platform_device *pdev)
struct device_node *eth_node;
int i;
+ unregister_netdevice_notifier(&prueth->prueth_netdevice_nb);
+ icssm_prueth_sw_unregister_notifiers(prueth);
+
for (i = 0; i < PRUETH_NUM_MACS; i++) {
if (!prueth->registered_netdevs[i])
continue;
@@ -1688,11 +2175,16 @@ static struct prueth_private_data am335x_prueth_pdata = {
.fw_pru[PRUSS_PRU0] = {
.fw_name[PRUSS_ETHTYPE_EMAC] =
"ti-pruss/am335x-pru0-prueth-fw.elf",
+ .fw_name[PRUSS_ETHTYPE_SWITCH] =
+ "ti-pruss/am335x-pru0-prusw-fw.elf",
},
.fw_pru[PRUSS_PRU1] = {
.fw_name[PRUSS_ETHTYPE_EMAC] =
"ti-pruss/am335x-pru1-prueth-fw.elf",
+ .fw_name[PRUSS_ETHTYPE_SWITCH] =
+ "ti-pruss/am335x-pru1-prusw-fw.elf",
},
+ .support_switch = true,
};
/* AM437x SoC-specific firmware data */
@@ -1701,11 +2193,16 @@ static struct prueth_private_data am437x_prueth_pdata = {
.fw_pru[PRUSS_PRU0] = {
.fw_name[PRUSS_ETHTYPE_EMAC] =
"ti-pruss/am437x-pru0-prueth-fw.elf",
+ .fw_name[PRUSS_ETHTYPE_SWITCH] =
+ "ti-pruss/am437x-pru0-prusw-fw.elf",
},
.fw_pru[PRUSS_PRU1] = {
.fw_name[PRUSS_ETHTYPE_EMAC] =
"ti-pruss/am437x-pru1-prueth-fw.elf",
+ .fw_name[PRUSS_ETHTYPE_SWITCH] =
+ "ti-pruss/am437x-pru1-prusw-fw.elf",
},
+ .support_switch = true,
};
/* AM57xx SoC-specific firmware data */
@@ -1714,11 +2211,17 @@ static struct prueth_private_data am57xx_prueth_pdata = {
.fw_pru[PRUSS_PRU0] = {
.fw_name[PRUSS_ETHTYPE_EMAC] =
"ti-pruss/am57xx-pru0-prueth-fw.elf",
+ .fw_name[PRUSS_ETHTYPE_SWITCH] =
+ "ti-pruss/am57xx-pru0-prusw-fw.elf",
},
.fw_pru[PRUSS_PRU1] = {
.fw_name[PRUSS_ETHTYPE_EMAC] =
"ti-pruss/am57xx-pru1-prueth-fw.elf",
+ .fw_name[PRUSS_ETHTYPE_SWITCH] =
+ "ti-pruss/am57xx-pru1-prusw-fw.elf",
+
},
+ .support_switch = true,
};
static const struct of_device_id prueth_dt_match[] = {
diff --git a/drivers/net/ethernet/ti/icssm/icssm_prueth.h b/drivers/net/ethernet/ti/icssm/icssm_prueth.h
index 8e7e0af08144..d5b49b462c24 100644
--- a/drivers/net/ethernet/ti/icssm/icssm_prueth.h
+++ b/drivers/net/ethernet/ti/icssm/icssm_prueth.h
@@ -15,6 +15,7 @@
#include "icssm_switch.h"
#include "icssm_prueth_ptp.h"
+#include "icssm_prueth_fdb_tbl.h"
/* ICSSM size of redundancy tag */
#define ICSSM_LRE_TAG_SIZE 6
@@ -181,10 +182,12 @@ enum pruss_device {
* struct prueth_private_data - PRU Ethernet private data
* @driver_data: PRU Ethernet device name
* @fw_pru: firmware names to be used for PRUSS ethernet usecases
+ * @support_switch: boolean to indicate if switch is enabled
*/
struct prueth_private_data {
enum pruss_device driver_data;
const struct prueth_firmware fw_pru[PRUSS_NUM_PRUS];
+ bool support_switch;
};
struct prueth_emac_stats {
@@ -221,15 +224,18 @@ struct prueth_emac {
const char *phy_id;
u32 msg_enable;
u8 mac_addr[6];
+ unsigned char mc_filter_mask[ETH_ALEN]; /* for multicast filtering */
phy_interface_t phy_if;
/* spin lock used to protect
* during link configuration
*/
spinlock_t lock;
+ spinlock_t addr_lock; /* serialize access to VLAN/MC filter table */
struct hrtimer tx_hrtimer;
struct prueth_emac_stats stats;
+ int offload_fwd_mark;
};
struct prueth {
@@ -248,15 +254,27 @@ struct prueth {
struct prueth_emac *emac[PRUETH_NUM_MACS];
struct net_device *registered_netdevs[PRUETH_NUM_MACS];
+ struct net_device *hw_bridge_dev;
+ struct fdb_tbl *fdb_tbl;
+
+ struct notifier_block prueth_netdevice_nb;
+ struct notifier_block prueth_switchdev_nb;
+ struct notifier_block prueth_switchdev_bl_nb;
+
unsigned int eth_type;
size_t ocmc_ram_size;
u8 emac_configured;
+ u8 br_members;
};
+extern const struct prueth_queue_desc queue_descs[][NUM_QUEUES];
+
void icssm_parse_packet_info(struct prueth *prueth, u32 buffer_descriptor,
struct prueth_packet_info *pkt_info);
int icssm_emac_rx_packet(struct prueth_emac *emac, u16 *bd_rd_ptr,
struct prueth_packet_info *pkt_info,
const struct prueth_queue_info *rxqueue);
-
+void icssm_emac_mc_filter_bin_allow(struct prueth_emac *emac, u8 hash);
+void icssm_emac_mc_filter_bin_disallow(struct prueth_emac *emac, u8 hash);
+u8 icssm_emac_get_mc_hash(u8 *mac, u8 *mask);
#endif /* __NET_TI_PRUETH_H */
diff --git a/drivers/net/ethernet/ti/icssm/icssm_prueth_fdb_tbl.h b/drivers/net/ethernet/ti/icssm/icssm_prueth_fdb_tbl.h
new file mode 100644
index 000000000000..9089259d96ea
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssm/icssm_prueth_fdb_tbl.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021 Texas Instruments Incorporated - https://www.ti.com */
+#ifndef __NET_TI_PRUSS_FDB_TBL_H
+#define __NET_TI_PRUSS_FDB_TBL_H
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include "icssm_prueth.h"
+
+/* 4 bytes */
+struct fdb_index_tbl_entry {
+ /* Bucket Table index of first Bucket with this MAC address */
+ u16 bucket_idx;
+ u16 bucket_entries; /* Number of entries in this bucket */
+};
+
+/* 4 * 256 = 1024 = 0x200 bytes */
+struct fdb_index_array {
+ struct fdb_index_tbl_entry index_tbl_entry[FDB_INDEX_TBL_MAX_ENTRIES];
+};
+
+/* 10 bytes */
+struct fdb_mac_tbl_entry {
+ u8 mac[ETH_ALEN];
+ u16 age;
+ u8 port; /* 0 based: 0=port1, 1=port2 */
+ union {
+ struct {
+ u8 is_static:1;
+ u8 active:1;
+ };
+ u8 flags;
+ };
+};
+
+/* 10 * 256 = 2560 = 0xa00 bytes */
+struct fdb_mac_tbl_array {
+ struct fdb_mac_tbl_entry mac_tbl_entry[FDB_MAC_TBL_MAX_ENTRIES];
+};
+
+/* 1 byte */
+struct fdb_stp_config {
+ u8 state; /* per-port STP state (defined in FW header) */
+};
+
+/* 1 byte */
+struct fdb_flood_config {
+ u8 host_flood_enable:1;
+ u8 port1_flood_enable:1;
+ u8 port2_flood_enable:1;
+};
+
+/* 2 byte */
+struct fdb_arbitration {
+ u8 host_lock;
+ u8 pru_locks;
+};
+
+struct fdb_tbl {
+ /* fdb index table */
+ struct fdb_index_array __iomem *index_a;
+ /* fdb MAC table */
+ struct fdb_mac_tbl_array __iomem *mac_tbl_a;
+ /* port 1 stp config */
+ struct fdb_stp_config __iomem *port1_stp_cfg;
+ /* port 2 stp config */
+ struct fdb_stp_config __iomem *port2_stp_cfg;
+ /* per-port flood enable */
+ struct fdb_flood_config __iomem *flood_enable_flags;
+ /* fdb locking mechanism */
+ struct fdb_arbitration __iomem *locks;
+ /* total number of entries in hash table */
+ u16 total_entries;
+};
+
+#endif
diff --git a/drivers/net/ethernet/ti/icssm/icssm_prueth_switch.c b/drivers/net/ethernet/ti/icssm/icssm_prueth_switch.c
new file mode 100644
index 000000000000..07c08564386e
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssm/icssm_prueth_switch.c
@@ -0,0 +1,1065 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Texas Instruments PRUETH Switch Driver
+ *
+ * Copyright (C) 2020-2021 Texas Instruments Incorporated - https://www.ti.com
+ */
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/remoteproc.h>
+#include <net/switchdev.h>
+#include "icssm_prueth.h"
+#include "icssm_prueth_switch.h"
+#include "icssm_prueth_fdb_tbl.h"
+
+#define FDB_IDX_TBL_ENTRY(n) (&prueth->fdb_tbl->index_a->index_tbl_entry[n])
+
+#define FDB_MAC_TBL_ENTRY(n) (&prueth->fdb_tbl->mac_tbl_a->mac_tbl_entry[n])
+
+#define FLAG_IS_STATIC BIT(0)
+#define FLAG_ACTIVE BIT(1)
+
+#define FDB_LEARN 1
+#define FDB_PURGE 2
+
+struct icssm_prueth_sw_fdb_work {
+ netdevice_tracker ndev_tracker;
+ struct work_struct work;
+ struct prueth_emac *emac;
+ u8 addr[ETH_ALEN];
+ int event;
+};
+
+const struct prueth_queue_info sw_queue_infos[][NUM_QUEUES] = {
+ [PRUETH_PORT_QUEUE_HOST] = {
+ [PRUETH_QUEUE1] = {
+ P0_Q1_BUFFER_OFFSET,
+ P0_QUEUE_DESC_OFFSET,
+ P0_Q1_BD_OFFSET,
+ P0_Q1_BD_OFFSET + ((HOST_QUEUE_1_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE2] = {
+ P0_Q2_BUFFER_OFFSET,
+ P0_QUEUE_DESC_OFFSET + 8,
+ P0_Q2_BD_OFFSET,
+ P0_Q2_BD_OFFSET + ((HOST_QUEUE_2_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE3] = {
+ P0_Q3_BUFFER_OFFSET,
+ P0_QUEUE_DESC_OFFSET + 16,
+ P0_Q3_BD_OFFSET,
+ P0_Q3_BD_OFFSET + ((HOST_QUEUE_3_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE4] = {
+ P0_Q4_BUFFER_OFFSET,
+ P0_QUEUE_DESC_OFFSET + 24,
+ P0_Q4_BD_OFFSET,
+ P0_Q4_BD_OFFSET + ((HOST_QUEUE_4_SIZE - 1) * BD_SIZE),
+ },
+ },
+ [PRUETH_PORT_QUEUE_MII0] = {
+ [PRUETH_QUEUE1] = {
+ P1_Q1_BUFFER_OFFSET,
+ P1_Q1_BUFFER_OFFSET +
+ ((QUEUE_1_SIZE - 1) * ICSS_BLOCK_SIZE),
+ P1_Q1_BD_OFFSET,
+ P1_Q1_BD_OFFSET + ((QUEUE_1_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE2] = {
+ P1_Q2_BUFFER_OFFSET,
+ P1_Q2_BUFFER_OFFSET +
+ ((QUEUE_2_SIZE - 1) * ICSS_BLOCK_SIZE),
+ P1_Q2_BD_OFFSET,
+ P1_Q2_BD_OFFSET + ((QUEUE_2_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE3] = {
+ P1_Q3_BUFFER_OFFSET,
+ P1_Q3_BUFFER_OFFSET +
+ ((QUEUE_3_SIZE - 1) * ICSS_BLOCK_SIZE),
+ P1_Q3_BD_OFFSET,
+ P1_Q3_BD_OFFSET + ((QUEUE_3_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE4] = {
+ P1_Q4_BUFFER_OFFSET,
+ P1_Q4_BUFFER_OFFSET +
+ ((QUEUE_4_SIZE - 1) * ICSS_BLOCK_SIZE),
+ P1_Q4_BD_OFFSET,
+ P1_Q4_BD_OFFSET + ((QUEUE_4_SIZE - 1) * BD_SIZE),
+ },
+ },
+ [PRUETH_PORT_QUEUE_MII1] = {
+ [PRUETH_QUEUE1] = {
+ P2_Q1_BUFFER_OFFSET,
+ P2_Q1_BUFFER_OFFSET +
+ ((QUEUE_1_SIZE - 1) * ICSS_BLOCK_SIZE),
+ P2_Q1_BD_OFFSET,
+ P2_Q1_BD_OFFSET + ((QUEUE_1_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE2] = {
+ P2_Q2_BUFFER_OFFSET,
+ P2_Q2_BUFFER_OFFSET +
+ ((QUEUE_2_SIZE - 1) * ICSS_BLOCK_SIZE),
+ P2_Q2_BD_OFFSET,
+ P2_Q2_BD_OFFSET + ((QUEUE_2_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE3] = {
+ P2_Q3_BUFFER_OFFSET,
+ P2_Q3_BUFFER_OFFSET +
+ ((QUEUE_3_SIZE - 1) * ICSS_BLOCK_SIZE),
+ P2_Q3_BD_OFFSET,
+ P2_Q3_BD_OFFSET + ((QUEUE_3_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE4] = {
+ P2_Q4_BUFFER_OFFSET,
+ P2_Q4_BUFFER_OFFSET +
+ ((QUEUE_4_SIZE - 1) * ICSS_BLOCK_SIZE),
+ P2_Q4_BD_OFFSET,
+ P2_Q4_BD_OFFSET + ((QUEUE_4_SIZE - 1) * BD_SIZE),
+ },
+ },
+};
+
+static const struct prueth_queue_info rx_queue_infos[][NUM_QUEUES] = {
+ [PRUETH_PORT_QUEUE_HOST] = {
+ [PRUETH_QUEUE1] = {
+ P0_Q1_BUFFER_OFFSET,
+ HOST_QUEUE_DESC_OFFSET,
+ P0_Q1_BD_OFFSET,
+ P0_Q1_BD_OFFSET + ((HOST_QUEUE_1_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE2] = {
+ P0_Q2_BUFFER_OFFSET,
+ HOST_QUEUE_DESC_OFFSET + 8,
+ P0_Q2_BD_OFFSET,
+ P0_Q2_BD_OFFSET + ((HOST_QUEUE_2_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE3] = {
+ P0_Q3_BUFFER_OFFSET,
+ HOST_QUEUE_DESC_OFFSET + 16,
+ P0_Q3_BD_OFFSET,
+ P0_Q3_BD_OFFSET + ((HOST_QUEUE_3_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE4] = {
+ P0_Q4_BUFFER_OFFSET,
+ HOST_QUEUE_DESC_OFFSET + 24,
+ P0_Q4_BD_OFFSET,
+ P0_Q4_BD_OFFSET + ((HOST_QUEUE_4_SIZE - 1) * BD_SIZE),
+ },
+ },
+ [PRUETH_PORT_QUEUE_MII0] = {
+ [PRUETH_QUEUE1] = {
+ P1_Q1_BUFFER_OFFSET,
+ P1_QUEUE_DESC_OFFSET,
+ P1_Q1_BD_OFFSET,
+ P1_Q1_BD_OFFSET + ((QUEUE_1_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE2] = {
+ P1_Q2_BUFFER_OFFSET,
+ P1_QUEUE_DESC_OFFSET + 8,
+ P1_Q2_BD_OFFSET,
+ P1_Q2_BD_OFFSET + ((QUEUE_2_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE3] = {
+ P1_Q3_BUFFER_OFFSET,
+ P1_QUEUE_DESC_OFFSET + 16,
+ P1_Q3_BD_OFFSET,
+ P1_Q3_BD_OFFSET + ((QUEUE_3_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE4] = {
+ P1_Q4_BUFFER_OFFSET,
+ P1_QUEUE_DESC_OFFSET + 24,
+ P1_Q4_BD_OFFSET,
+ P1_Q4_BD_OFFSET + ((QUEUE_4_SIZE - 1) * BD_SIZE),
+ },
+ },
+ [PRUETH_PORT_QUEUE_MII1] = {
+ [PRUETH_QUEUE1] = {
+ P2_Q1_BUFFER_OFFSET,
+ P2_QUEUE_DESC_OFFSET,
+ P2_Q1_BD_OFFSET,
+ P2_Q1_BD_OFFSET + ((QUEUE_1_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE2] = {
+ P2_Q2_BUFFER_OFFSET,
+ P2_QUEUE_DESC_OFFSET + 8,
+ P2_Q2_BD_OFFSET,
+ P2_Q2_BD_OFFSET + ((QUEUE_2_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE3] = {
+ P2_Q3_BUFFER_OFFSET,
+ P2_QUEUE_DESC_OFFSET + 16,
+ P2_Q3_BD_OFFSET,
+ P2_Q3_BD_OFFSET + ((QUEUE_3_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE4] = {
+ P2_Q4_BUFFER_OFFSET,
+ P2_QUEUE_DESC_OFFSET + 24,
+ P2_Q4_BD_OFFSET,
+ P2_Q4_BD_OFFSET + ((QUEUE_4_SIZE - 1) * BD_SIZE),
+ },
+ },
+};
+
+void icssm_prueth_sw_free_fdb_table(struct prueth *prueth)
+{
+ if (prueth->emac_configured)
+ return;
+
+ kfree(prueth->fdb_tbl);
+ prueth->fdb_tbl = NULL;
+}
+
+void icssm_prueth_sw_fdb_tbl_init(struct prueth *prueth)
+{
+ struct fdb_tbl *t = prueth->fdb_tbl;
+ void __iomem *sram_base;
+ u8 val;
+
+ sram_base = prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+
+ t->index_a = sram_base + V2_1_FDB_TBL_OFFSET;
+ t->mac_tbl_a = sram_base + FDB_MAC_TBL_OFFSET;
+ t->port1_stp_cfg = sram_base + FDB_PORT1_STP_CFG_OFFSET;
+ t->port2_stp_cfg = sram_base + FDB_PORT2_STP_CFG_OFFSET;
+ t->flood_enable_flags = sram_base + FDB_FLOOD_ENABLE_FLAGS_OFFSET;
+ t->locks = sram_base + FDB_LOCKS_OFFSET;
+
+ val = readb(t->flood_enable_flags);
+ /* host_flood_enable = 1 */
+ val |= BIT(0);
+ /* port1_flood_enable = 1 */
+ val |= BIT(1);
+ /* port2_flood_enable = 1 */
+ val |= BIT(2);
+ writeb(val, t->flood_enable_flags);
+
+ writeb(0, &t->locks->host_lock);
+ t->total_entries = 0;
+}
+
+static u8 icssm_pru_lock_done(struct fdb_tbl *fdb_tbl)
+{
+ return readb(&fdb_tbl->locks->pru_locks);
+}
+
+static int icssm_prueth_sw_fdb_spin_lock(struct fdb_tbl *fdb_tbl)
+{
+ u8 done;
+ int ret;
+
+ /* Take the host lock */
+ writeb(1, &fdb_tbl->locks->host_lock);
+
+ /* Wait for the PRUs to release their locks */
+ ret = read_poll_timeout(icssm_pru_lock_done, done, done == 0,
+ 1, 10, false, fdb_tbl);
+ if (ret == -ETIMEDOUT)
+ writeb(0, &fdb_tbl->locks->host_lock);
+
+ return ret;
+}
+
+static void icssm_prueth_sw_fdb_spin_unlock(struct fdb_tbl *fdb_tbl)
+{
+ writeb(0, &fdb_tbl->locks->host_lock);
+}
+
+static u8 icssm_prueth_sw_fdb_hash(const u8 *mac)
+{
+ return (mac[0] ^ mac[1] ^ mac[2] ^ mac[3] ^ mac[4] ^ mac[5]);
+}
+
+static int
+icssm_prueth_sw_fdb_search(struct fdb_mac_tbl_array __iomem *mac_tbl,
+ struct fdb_index_tbl_entry __iomem *bucket_info,
+ const u8 *mac)
+{
+ unsigned int bucket_entries, mac_tbl_idx;
+ u8 tmp_mac[ETH_ALEN];
+ int i;
+
+ mac_tbl_idx = readw(&bucket_info->bucket_idx);
+ bucket_entries = readw(&bucket_info->bucket_entries);
+ for (i = 0; i < bucket_entries; i++, mac_tbl_idx++) {
+ memcpy_fromio(tmp_mac, mac_tbl->mac_tbl_entry[mac_tbl_idx].mac,
+ ETH_ALEN);
+ if (ether_addr_equal(mac, tmp_mac))
+ return mac_tbl_idx;
+ }
+
+ return -ENODATA;
+}
+
+static int icssm_prueth_sw_fdb_find_open_slot(struct fdb_tbl *fdb_tbl)
+{
+ unsigned int i;
+ u8 flags;
+
+ for (i = 0; i < FDB_MAC_TBL_MAX_ENTRIES; i++) {
+ flags = readb(&fdb_tbl->mac_tbl_a->mac_tbl_entry[i].flags);
+ if (!(flags & FLAG_ACTIVE))
+ break;
+ }
+
+ return i;
+}
+
+static int
+icssm_prueth_sw_find_fdb_insert(struct fdb_tbl *fdb, struct prueth *prueth,
+ struct fdb_index_tbl_entry __iomem *bkt_info,
+ const u8 *mac, const u8 port)
+{
+ struct fdb_mac_tbl_array __iomem *mac_tbl = fdb->mac_tbl_a;
+ unsigned int bucket_entries, mac_tbl_idx;
+ struct fdb_mac_tbl_entry __iomem *e;
+ u8 mac_from_hw[ETH_ALEN];
+ s8 cmp;
+ int i;
+
+ mac_tbl_idx = readw(&bkt_info->bucket_idx);
+ bucket_entries = readw(&bkt_info->bucket_entries);
+
+ for (i = 0; i < bucket_entries; i++, mac_tbl_idx++) {
+ e = &mac_tbl->mac_tbl_entry[mac_tbl_idx];
+ memcpy_fromio(mac_from_hw, e->mac, ETH_ALEN);
+ cmp = memcmp(mac, mac_from_hw, ETH_ALEN);
+ if (cmp < 0) {
+ return mac_tbl_idx;
+ } else if (cmp == 0) {
+ if (readb(&e->port) != port) {
+ /* MAC is already in FDB, only port is
+ * different. So just update the port.
+ * Note: total_entries and bucket_entries
+ * remain the same.
+ */
+ writeb(port, &e->port);
+ }
+
+ /* MAC and port are the same, touch the fdb */
+ writew(0, &e->age);
+ return -EEXIST;
+ }
+ }
+
+ return mac_tbl_idx;
+}
+
+static int
+icssm_prueth_sw_fdb_empty_slot_left(struct fdb_mac_tbl_array __iomem *mac_tbl,
+ unsigned int mac_tbl_idx)
+{
+ u8 flags;
+ int i;
+
+ for (i = mac_tbl_idx - 1; i > -1; i--) {
+ flags = readb(&mac_tbl->mac_tbl_entry[i].flags);
+ if (!(flags & FLAG_ACTIVE))
+ break;
+ }
+
+ return i;
+}
+
+static int
+icssm_prueth_sw_fdb_empty_slot_right(struct fdb_mac_tbl_array __iomem *mac_tbl,
+ unsigned int mac_tbl_idx)
+{
+ u8 flags;
+ int i;
+
+ for (i = mac_tbl_idx; i < FDB_MAC_TBL_MAX_ENTRIES; i++) {
+ flags = readb(&mac_tbl->mac_tbl_entry[i].flags);
+ if (!(flags & FLAG_ACTIVE))
+ return i;
+ }
+
+ return -1;
+}
+
+static void icssm_prueth_sw_fdb_move_range_left(struct prueth *prueth,
+ u16 left, u16 right)
+{
+ struct fdb_mac_tbl_entry entry;
+ u32 sz = 0;
+ u16 i;
+
+ sz = sizeof(struct fdb_mac_tbl_entry);
+ for (i = left; i < right; i++) {
+ memcpy_fromio(&entry, FDB_MAC_TBL_ENTRY(i + 1), sz);
+ memcpy_toio(FDB_MAC_TBL_ENTRY(i), &entry, sz);
+ }
+}
+
+static void icssm_prueth_sw_fdb_move_range_right(struct prueth *prueth,
+ u16 left, u16 right)
+{
+ struct fdb_mac_tbl_entry entry;
+ u32 sz = 0;
+ u16 i;
+
+ sz = sizeof(struct fdb_mac_tbl_entry);
+ for (i = right; i > left; i--) {
+ memcpy_fromio(&entry, FDB_MAC_TBL_ENTRY(i - 1), sz);
+ memcpy_toio(FDB_MAC_TBL_ENTRY(i), &entry, sz);
+ }
+}
+
+static void icssm_prueth_sw_fdb_update_index_tbl(struct prueth *prueth,
+ u16 left, u16 right)
+{
+ unsigned int hash, hash_prev;
+ u8 mac[ETH_ALEN];
+ unsigned int i;
+
+ /* To ensure we don't improperly update the
+ * bucket index, initialize with an invalid
+ * hash in case we are in leftmost slot
+ */
+ hash_prev = 0xff;
+
+ if (left > 0) {
+ memcpy_fromio(mac, FDB_MAC_TBL_ENTRY(left - 1)->mac, ETH_ALEN);
+ hash_prev = icssm_prueth_sw_fdb_hash(mac);
+ }
+
+ /* For each moved element, update the bucket index */
+ for (i = left; i <= right; i++) {
+ memcpy_fromio(mac, FDB_MAC_TBL_ENTRY(i)->mac, ETH_ALEN);
+ hash = icssm_prueth_sw_fdb_hash(mac);
+
+ /* Only need to update buckets once */
+ if (hash != hash_prev)
+ writew(i, &FDB_IDX_TBL_ENTRY(hash)->bucket_idx);
+
+ hash_prev = hash;
+ }
+}
+
+static struct fdb_mac_tbl_entry __iomem *
+icssm_prueth_sw_find_free_mac(struct prueth *prueth, struct fdb_index_tbl_entry
+ __iomem *bucket_info, u8 suggested_mac_tbl_idx,
+ bool *update_indexes, const u8 *mac)
+{
+ s16 empty_slot_idx = 0, left = 0, right = 0;
+ unsigned int mti = suggested_mac_tbl_idx;
+ struct fdb_mac_tbl_array __iomem *mt;
+ struct fdb_tbl *fdb;
+ u8 flags;
+
+ fdb = prueth->fdb_tbl;
+ mt = fdb->mac_tbl_a;
+
+ flags = readb(&FDB_MAC_TBL_ENTRY(mti)->flags);
+ if (!(flags & FLAG_ACTIVE)) {
+ /* Claim the entry */
+ flags |= FLAG_ACTIVE;
+ writeb(flags, &FDB_MAC_TBL_ENTRY(mti)->flags);
+
+ return FDB_MAC_TBL_ENTRY(mti);
+ }
+
+ if (fdb->total_entries == FDB_MAC_TBL_MAX_ENTRIES)
+ return NULL;
+
+ empty_slot_idx = icssm_prueth_sw_fdb_empty_slot_left(mt, mti);
+ if (empty_slot_idx == -1) {
+ /* Nothing available on the left. But table isn't full
+ * so there must be space to the right,
+ */
+ empty_slot_idx = icssm_prueth_sw_fdb_empty_slot_right(mt, mti);
+
+ /* Shift right */
+ left = mti;
+ right = empty_slot_idx;
+ icssm_prueth_sw_fdb_move_range_right(prueth, left, right);
+
+ /* Claim the entry */
+ flags = readb(&FDB_MAC_TBL_ENTRY(mti)->flags);
+ flags |= FLAG_ACTIVE;
+ writeb(flags, &FDB_MAC_TBL_ENTRY(mti)->flags);
+
+ memcpy_toio(FDB_MAC_TBL_ENTRY(mti)->mac, mac, ETH_ALEN);
+
+ /* There is a chance we moved something in a
+ * different bucket, update index table
+ */
+ icssm_prueth_sw_fdb_update_index_tbl(prueth, left, right);
+
+ return FDB_MAC_TBL_ENTRY(mti);
+ }
+
+ if (empty_slot_idx == mti - 1) {
+ /* There is space immediately left of the open slot,
+ * which means the inserted MAC address
+ * must be the lowest-valued MAC address in bucket.
+ * Update bucket pointer accordingly.
+ */
+ writew(empty_slot_idx, &bucket_info->bucket_idx);
+
+ /* Claim the entry */
+ flags = readb(&FDB_MAC_TBL_ENTRY(empty_slot_idx)->flags);
+ flags |= FLAG_ACTIVE;
+ writeb(flags, &FDB_MAC_TBL_ENTRY(empty_slot_idx)->flags);
+
+ return FDB_MAC_TBL_ENTRY(empty_slot_idx);
+ }
+
+ /* There is empty space to the left, shift MAC table entries left */
+ left = empty_slot_idx;
+ right = mti - 1;
+ icssm_prueth_sw_fdb_move_range_left(prueth, left, right);
+
+ /* Claim the entry */
+ flags = readb(&FDB_MAC_TBL_ENTRY(mti - 1)->flags);
+ flags |= FLAG_ACTIVE;
+ writeb(flags, &FDB_MAC_TBL_ENTRY(mti - 1)->flags);
+
+ memcpy_toio(FDB_MAC_TBL_ENTRY(mti - 1)->mac, mac, ETH_ALEN);
+
+ /* There is a chance we moved something in a
+ * different bucket, update index table
+ */
+ icssm_prueth_sw_fdb_update_index_tbl(prueth, left, right);
+
+ return FDB_MAC_TBL_ENTRY(mti - 1);
+}
+
+static int icssm_prueth_sw_insert_fdb_entry(struct prueth_emac *emac,
+ const u8 *mac, u8 is_static)
+{
+ struct fdb_index_tbl_entry __iomem *bucket_info;
+ struct fdb_mac_tbl_entry __iomem *mac_info;
+ struct prueth *prueth = emac->prueth;
+ unsigned int hash_val, mac_tbl_idx;
+ struct prueth_emac *other_emac;
+ enum prueth_port other_port_id;
+ int total_fdb_entries;
+ struct fdb_tbl *fdb;
+ u8 flags;
+ s16 ret;
+ int err;
+ u16 val;
+
+ fdb = prueth->fdb_tbl;
+ other_port_id = (emac->port_id == PRUETH_PORT_MII0) ?
+ PRUETH_PORT_MII1 : PRUETH_PORT_MII0;
+
+ other_emac = prueth->emac[other_port_id - 1];
+ if (!other_emac)
+ return -EINVAL;
+
+ err = icssm_prueth_sw_fdb_spin_lock(fdb);
+ if (err) {
+ dev_err(prueth->dev, "PRU lock timeout %d\n", err);
+ return err;
+ }
+
+ if (fdb->total_entries == FDB_MAC_TBL_MAX_ENTRIES) {
+ icssm_prueth_sw_fdb_spin_unlock(fdb);
+ return -ENOMEM;
+ }
+
+ if (ether_addr_equal(mac, emac->mac_addr) ||
+ (ether_addr_equal(mac, other_emac->mac_addr))) {
+ icssm_prueth_sw_fdb_spin_unlock(fdb);
+ /* Don't insert fdb of own mac addr */
+ return -EINVAL;
+ }
+
+ /* Get the bucket that the mac belongs to */
+ hash_val = icssm_prueth_sw_fdb_hash(mac);
+ bucket_info = FDB_IDX_TBL_ENTRY(hash_val);
+
+ if (!readw(&bucket_info->bucket_entries)) {
+ mac_tbl_idx = icssm_prueth_sw_fdb_find_open_slot(fdb);
+ writew(mac_tbl_idx, &bucket_info->bucket_idx);
+ }
+
+ ret = icssm_prueth_sw_find_fdb_insert(fdb, prueth, bucket_info, mac,
+ emac->port_id - 1);
+ if (ret < 0) {
+ icssm_prueth_sw_fdb_spin_unlock(fdb);
+ /* mac is already in fdb table */
+ return 0;
+ }
+
+ mac_tbl_idx = ret;
+
+ mac_info = icssm_prueth_sw_find_free_mac(prueth, bucket_info,
+ mac_tbl_idx, NULL,
+ mac);
+ if (!mac_info) {
+ /* Should not happen */
+ dev_warn(prueth->dev, "OUT of FDB MEM\n");
+ icssm_prueth_sw_fdb_spin_unlock(fdb);
+ return -ENOMEM;
+ }
+
+ memcpy_toio(mac_info->mac, mac, ETH_ALEN);
+ writew(0, &mac_info->age);
+ writeb(emac->port_id - 1, &mac_info->port);
+
+ flags = readb(&mac_info->flags);
+ if (is_static)
+ flags |= FLAG_IS_STATIC;
+ else
+ flags &= ~FLAG_IS_STATIC;
+
+ /* bit 1 - active */
+ flags |= FLAG_ACTIVE;
+ writeb(flags, &mac_info->flags);
+
+ val = readw(&bucket_info->bucket_entries);
+ val++;
+ writew(val, &bucket_info->bucket_entries);
+
+ fdb->total_entries++;
+
+ total_fdb_entries = fdb->total_entries;
+
+ icssm_prueth_sw_fdb_spin_unlock(fdb);
+
+ dev_dbg(prueth->dev, "added fdb: %pM port=%d total_entries=%u\n",
+ mac, emac->port_id, total_fdb_entries);
+
+ return 0;
+}
+
+static int icssm_prueth_sw_delete_fdb_entry(struct prueth_emac *emac,
+ const u8 *mac, u8 is_static)
+{
+ struct fdb_index_tbl_entry __iomem *bucket_info;
+ struct fdb_mac_tbl_entry __iomem *mac_info;
+ struct fdb_mac_tbl_array __iomem *mt;
+ unsigned int hash_val, mac_tbl_idx;
+ unsigned int idx, entries;
+ struct prueth *prueth;
+ int total_fdb_entries;
+ s16 ret, left, right;
+ struct fdb_tbl *fdb;
+ u8 flags;
+ int err;
+ u16 val;
+
+ prueth = emac->prueth;
+ fdb = prueth->fdb_tbl;
+ mt = fdb->mac_tbl_a;
+
+ err = icssm_prueth_sw_fdb_spin_lock(fdb);
+ if (err) {
+ dev_err(prueth->dev, "PRU lock timeout %d\n", err);
+ return err;
+ }
+
+ if (fdb->total_entries == 0) {
+ icssm_prueth_sw_fdb_spin_unlock(fdb);
+ return 0;
+ }
+
+ /* Get the bucket that the mac belongs to */
+ hash_val = icssm_prueth_sw_fdb_hash(mac);
+ bucket_info = FDB_IDX_TBL_ENTRY(hash_val);
+
+ ret = icssm_prueth_sw_fdb_search(mt, bucket_info, mac);
+ if (ret < 0) {
+ icssm_prueth_sw_fdb_spin_unlock(fdb);
+ return ret;
+ }
+
+ mac_tbl_idx = ret;
+ mac_info = FDB_MAC_TBL_ENTRY(mac_tbl_idx);
+
+ /* Shift all elements in bucket to the left. No need to
+ * update index table since only shifting within bucket.
+ */
+ left = mac_tbl_idx;
+ idx = readw(&bucket_info->bucket_idx);
+ entries = readw(&bucket_info->bucket_entries);
+ right = idx + entries - 1;
+ icssm_prueth_sw_fdb_move_range_left(prueth, left, right);
+
+ /* Remove end of bucket from table */
+ mac_info = FDB_MAC_TBL_ENTRY(right);
+ flags = readb(&mac_info->flags);
+ /* active = 0 */
+ flags &= ~FLAG_ACTIVE;
+ writeb(flags, &mac_info->flags);
+ val = readw(&bucket_info->bucket_entries);
+ val--;
+ writew(val, &bucket_info->bucket_entries);
+ fdb->total_entries--;
+
+ total_fdb_entries = fdb->total_entries;
+
+ icssm_prueth_sw_fdb_spin_unlock(fdb);
+
+ dev_dbg(prueth->dev, "del fdb: %pM total_entries=%u\n",
+ mac, total_fdb_entries);
+
+ return 0;
+}
+
+int icssm_prueth_sw_do_purge_fdb(struct prueth_emac *emac)
+{
+ struct fdb_index_tbl_entry __iomem *bucket_info;
+ struct prueth *prueth = emac->prueth;
+ u8 flags, mac[ETH_ALEN];
+ unsigned int hash_val;
+ struct fdb_tbl *fdb;
+ int ret, i;
+ u16 val;
+
+ fdb = prueth->fdb_tbl;
+
+ ret = icssm_prueth_sw_fdb_spin_lock(fdb);
+ if (ret) {
+ dev_err(prueth->dev, "PRU lock timeout %d\n", ret);
+ return ret;
+ }
+
+ if (fdb->total_entries == 0) {
+ icssm_prueth_sw_fdb_spin_unlock(fdb);
+ return 0;
+ }
+
+ for (i = 0; i < FDB_MAC_TBL_MAX_ENTRIES; i++) {
+ flags = readb(&fdb->mac_tbl_a->mac_tbl_entry[i].flags);
+ if ((flags & FLAG_ACTIVE) && !(flags & FLAG_IS_STATIC)) {
+ /* Get the bucket that the mac belongs to */
+ memcpy_fromio(mac, FDB_MAC_TBL_ENTRY(i)->mac,
+ ETH_ALEN);
+ hash_val = icssm_prueth_sw_fdb_hash(mac);
+ bucket_info = FDB_IDX_TBL_ENTRY(hash_val);
+ flags &= ~FLAG_ACTIVE;
+ writeb(flags,
+ &fdb->mac_tbl_a->mac_tbl_entry[i].flags);
+ val = readw(&bucket_info->bucket_entries);
+ val--;
+ writew(val, &bucket_info->bucket_entries);
+ fdb->total_entries--;
+ }
+ }
+
+ icssm_prueth_sw_fdb_spin_unlock(fdb);
+ return 0;
+}
+
+int icssm_prueth_sw_init_fdb_table(struct prueth *prueth)
+{
+ if (prueth->emac_configured)
+ return 0;
+
+ prueth->fdb_tbl = kmalloc(sizeof(*prueth->fdb_tbl), GFP_KERNEL);
+ if (!prueth->fdb_tbl)
+ return -ENOMEM;
+
+ icssm_prueth_sw_fdb_tbl_init(prueth);
+
+ return 0;
+}
+
+/**
+ * icssm_prueth_sw_fdb_add - insert fdb entry
+ *
+ * @emac: EMAC data structure
+ * @fdb: fdb info
+ *
+ */
+void icssm_prueth_sw_fdb_add(struct prueth_emac *emac,
+ struct switchdev_notifier_fdb_info *fdb)
+{
+ icssm_prueth_sw_insert_fdb_entry(emac, fdb->addr, 1);
+}
+
+/**
+ * icssm_prueth_sw_fdb_del - delete fdb entry
+ *
+ * @emac: EMAC data structure
+ * @fdb: fdb info
+ *
+ */
+void icssm_prueth_sw_fdb_del(struct prueth_emac *emac,
+ struct switchdev_notifier_fdb_info *fdb)
+{
+ icssm_prueth_sw_delete_fdb_entry(emac, fdb->addr, 1);
+}
+
+static void icssm_prueth_sw_fdb_work(struct work_struct *work)
+{
+ struct icssm_prueth_sw_fdb_work *fdb_work =
+ container_of(work, struct icssm_prueth_sw_fdb_work, work);
+ struct prueth_emac *emac = fdb_work->emac;
+
+ rtnl_lock();
+
+ /* Interface is not up */
+ if (!emac->prueth->fdb_tbl)
+ goto free;
+
+ switch (fdb_work->event) {
+ case FDB_LEARN:
+ icssm_prueth_sw_insert_fdb_entry(emac, fdb_work->addr, 0);
+ break;
+ case FDB_PURGE:
+ icssm_prueth_sw_do_purge_fdb(emac);
+ break;
+ default:
+ break;
+ }
+
+free:
+ rtnl_unlock();
+ netdev_put(emac->ndev, &fdb_work->ndev_tracker);
+ kfree(fdb_work);
+}
+
+int icssm_prueth_sw_learn_fdb(struct prueth_emac *emac, u8 *src_mac)
+{
+ struct icssm_prueth_sw_fdb_work *fdb_work;
+
+ fdb_work = kzalloc(sizeof(*fdb_work), GFP_ATOMIC);
+ if (WARN_ON(!fdb_work))
+ return -ENOMEM;
+
+ INIT_WORK(&fdb_work->work, icssm_prueth_sw_fdb_work);
+
+ fdb_work->event = FDB_LEARN;
+ fdb_work->emac = emac;
+ ether_addr_copy(fdb_work->addr, src_mac);
+
+ netdev_hold(emac->ndev, &fdb_work->ndev_tracker, GFP_ATOMIC);
+ queue_work(system_long_wq, &fdb_work->work);
+ return 0;
+}
+
+int icssm_prueth_sw_purge_fdb(struct prueth_emac *emac)
+{
+ struct icssm_prueth_sw_fdb_work *fdb_work;
+
+ fdb_work = kzalloc(sizeof(*fdb_work), GFP_ATOMIC);
+ if (WARN_ON(!fdb_work))
+ return -ENOMEM;
+
+ INIT_WORK(&fdb_work->work, icssm_prueth_sw_fdb_work);
+
+ fdb_work->event = FDB_PURGE;
+ fdb_work->emac = emac;
+
+ netdev_hold(emac->ndev, &fdb_work->ndev_tracker, GFP_ATOMIC);
+ queue_work(system_long_wq, &fdb_work->work);
+ return 0;
+}
+
+void icssm_prueth_sw_hostconfig(struct prueth *prueth)
+{
+ void __iomem *dram1_base = prueth->mem[PRUETH_MEM_DRAM1].va;
+ void __iomem *dram;
+
+ /* queue information table */
+ dram = dram1_base + P0_Q1_RX_CONTEXT_OFFSET;
+ memcpy_toio(dram, sw_queue_infos[PRUETH_PORT_QUEUE_HOST],
+ sizeof(sw_queue_infos[PRUETH_PORT_QUEUE_HOST]));
+
+ /* buffer descriptor offset table*/
+ dram = dram1_base + QUEUE_DESCRIPTOR_OFFSET_ADDR;
+ writew(P0_Q1_BD_OFFSET, dram);
+ writew(P0_Q2_BD_OFFSET, dram + 2);
+ writew(P0_Q3_BD_OFFSET, dram + 4);
+ writew(P0_Q4_BD_OFFSET, dram + 6);
+
+ /* buffer offset table */
+ dram = dram1_base + QUEUE_OFFSET_ADDR;
+ writew(P0_Q1_BUFFER_OFFSET, dram);
+ writew(P0_Q2_BUFFER_OFFSET, dram + 2);
+ writew(P0_Q3_BUFFER_OFFSET, dram + 4);
+ writew(P0_Q4_BUFFER_OFFSET, dram + 6);
+
+ /* queue size lookup table */
+ dram = dram1_base + QUEUE_SIZE_ADDR;
+ writew(HOST_QUEUE_1_SIZE, dram);
+ writew(HOST_QUEUE_1_SIZE, dram + 2);
+ writew(HOST_QUEUE_1_SIZE, dram + 4);
+ writew(HOST_QUEUE_1_SIZE, dram + 6);
+
+ /* queue table */
+ dram = dram1_base + P0_QUEUE_DESC_OFFSET;
+ memcpy_toio(dram, queue_descs[PRUETH_PORT_QUEUE_HOST],
+ sizeof(queue_descs[PRUETH_PORT_QUEUE_HOST]));
+}
+
+static int icssm_prueth_sw_port_config(struct prueth *prueth,
+ enum prueth_port port_id)
+{
+ unsigned int tx_context_ofs_addr, rx_context_ofs, queue_desc_ofs;
+ void __iomem *dram, *dram_base, *dram_mac;
+ struct prueth_emac *emac;
+ void __iomem *dram1_base;
+
+ dram1_base = prueth->mem[PRUETH_MEM_DRAM1].va;
+ emac = prueth->emac[port_id - 1];
+ switch (port_id) {
+ case PRUETH_PORT_MII0:
+ tx_context_ofs_addr = TX_CONTEXT_P1_Q1_OFFSET_ADDR;
+ rx_context_ofs = P1_Q1_RX_CONTEXT_OFFSET;
+ queue_desc_ofs = P1_QUEUE_DESC_OFFSET;
+
+ /* for switch PORT MII0 mac addr is in DRAM0. */
+ dram_mac = prueth->mem[PRUETH_MEM_DRAM0].va;
+ break;
+ case PRUETH_PORT_MII1:
+ tx_context_ofs_addr = TX_CONTEXT_P2_Q1_OFFSET_ADDR;
+ rx_context_ofs = P2_Q1_RX_CONTEXT_OFFSET;
+ queue_desc_ofs = P2_QUEUE_DESC_OFFSET;
+
+ /* for switch PORT MII1 mac addr is in DRAM1. */
+ dram_mac = prueth->mem[PRUETH_MEM_DRAM1].va;
+ break;
+ default:
+ netdev_err(emac->ndev, "invalid port\n");
+ return -EINVAL;
+ }
+
+ /* setup mac address */
+ memcpy_toio(dram_mac + PORT_MAC_ADDR, emac->mac_addr, 6);
+
+ /* Remaining switch port configs are in DRAM1 */
+ dram_base = prueth->mem[PRUETH_MEM_DRAM1].va;
+
+ /* queue information table */
+ memcpy_toio(dram_base + tx_context_ofs_addr,
+ sw_queue_infos[port_id],
+ sizeof(sw_queue_infos[port_id]));
+
+ memcpy_toio(dram_base + rx_context_ofs,
+ rx_queue_infos[port_id],
+ sizeof(rx_queue_infos[port_id]));
+
+ /* buffer descriptor offset table*/
+ dram = dram_base + QUEUE_DESCRIPTOR_OFFSET_ADDR +
+ (port_id * NUM_QUEUES * sizeof(u16));
+ writew(sw_queue_infos[port_id][PRUETH_QUEUE1].buffer_desc_offset, dram);
+ writew(sw_queue_infos[port_id][PRUETH_QUEUE2].buffer_desc_offset,
+ dram + 2);
+ writew(sw_queue_infos[port_id][PRUETH_QUEUE3].buffer_desc_offset,
+ dram + 4);
+ writew(sw_queue_infos[port_id][PRUETH_QUEUE4].buffer_desc_offset,
+ dram + 6);
+
+ /* buffer offset table */
+ dram = dram_base + QUEUE_OFFSET_ADDR +
+ port_id * NUM_QUEUES * sizeof(u16);
+ writew(sw_queue_infos[port_id][PRUETH_QUEUE1].buffer_offset, dram);
+ writew(sw_queue_infos[port_id][PRUETH_QUEUE2].buffer_offset,
+ dram + 2);
+ writew(sw_queue_infos[port_id][PRUETH_QUEUE3].buffer_offset,
+ dram + 4);
+ writew(sw_queue_infos[port_id][PRUETH_QUEUE4].buffer_offset,
+ dram + 6);
+
+ /* queue size lookup table */
+ dram = dram_base + QUEUE_SIZE_ADDR +
+ port_id * NUM_QUEUES * sizeof(u16);
+ writew(QUEUE_1_SIZE, dram);
+ writew(QUEUE_2_SIZE, dram + 2);
+ writew(QUEUE_3_SIZE, dram + 4);
+ writew(QUEUE_4_SIZE, dram + 6);
+
+ /* queue table */
+ memcpy_toio(dram_base + queue_desc_ofs,
+ &queue_descs[port_id][0],
+ 4 * sizeof(queue_descs[port_id][0]));
+
+ emac->rx_queue_descs = dram1_base + P0_QUEUE_DESC_OFFSET;
+ emac->tx_queue_descs = dram1_base +
+ rx_queue_infos[port_id][PRUETH_QUEUE1].queue_desc_offset;
+
+ return 0;
+}
+
+int icssm_prueth_sw_emac_config(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ u32 sharedramaddr, ocmcaddr;
+ int ret;
+
+ /* PRU needs local shared RAM address for C28 */
+ sharedramaddr = ICSS_LOCAL_SHARED_RAM;
+ /* PRU needs real global OCMC address for C30*/
+ ocmcaddr = (u32)prueth->mem[PRUETH_MEM_OCMC].pa;
+
+ if (prueth->emac_configured & BIT(emac->port_id))
+ return 0;
+
+ ret = icssm_prueth_sw_port_config(prueth, emac->port_id);
+ if (ret)
+ return ret;
+
+ if (!prueth->emac_configured) {
+ /* Set in constant table C28 of PRUn to ICSS Shared memory */
+ pru_rproc_set_ctable(prueth->pru0, PRU_C28, sharedramaddr);
+ pru_rproc_set_ctable(prueth->pru1, PRU_C28, sharedramaddr);
+
+ /* Set in constant table C30 of PRUn to OCMC memory */
+ pru_rproc_set_ctable(prueth->pru0, PRU_C30, ocmcaddr);
+ pru_rproc_set_ctable(prueth->pru1, PRU_C30, ocmcaddr);
+ }
+ return 0;
+}
+
+int icssm_prueth_sw_boot_prus(struct prueth *prueth, struct net_device *ndev)
+{
+ const struct prueth_firmware *pru_firmwares;
+ const char *fw_name, *fw_name1;
+ int ret;
+
+ if (prueth->emac_configured)
+ return 0;
+
+ pru_firmwares = &prueth->fw_data->fw_pru[PRUSS_PRU0];
+ fw_name = pru_firmwares->fw_name[prueth->eth_type];
+ pru_firmwares = &prueth->fw_data->fw_pru[PRUSS_PRU1];
+ fw_name1 = pru_firmwares->fw_name[prueth->eth_type];
+
+ ret = rproc_set_firmware(prueth->pru0, fw_name);
+ if (ret) {
+ netdev_err(ndev, "failed to set PRU0 firmware %s: %d\n",
+ fw_name, ret);
+ return ret;
+ }
+ ret = rproc_boot(prueth->pru0);
+ if (ret) {
+ netdev_err(ndev, "failed to boot PRU0: %d\n", ret);
+ return ret;
+ }
+
+ ret = rproc_set_firmware(prueth->pru1, fw_name1);
+ if (ret) {
+ netdev_err(ndev, "failed to set PRU1 firmware %s: %d\n",
+ fw_name1, ret);
+ goto rproc0_shutdown;
+ }
+ ret = rproc_boot(prueth->pru1);
+ if (ret) {
+ netdev_err(ndev, "failed to boot PRU1: %d\n", ret);
+ goto rproc0_shutdown;
+ }
+
+ return 0;
+
+rproc0_shutdown:
+ rproc_shutdown(prueth->pru0);
+ return ret;
+}
+
+int icssm_prueth_sw_shutdown_prus(struct prueth_emac *emac,
+ struct net_device *ndev)
+{
+ struct prueth *prueth = emac->prueth;
+
+ if (prueth->emac_configured)
+ return 0;
+
+ rproc_shutdown(prueth->pru0);
+ rproc_shutdown(prueth->pru1);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/ti/icssm/icssm_prueth_switch.h b/drivers/net/ethernet/ti/icssm/icssm_prueth_switch.h
new file mode 100644
index 000000000000..e6111bba166e
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssm/icssm_prueth_switch.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020-2021 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#ifndef __NET_TI_PRUETH_SWITCH_H
+#define __NET_TI_PRUETH_SWITCH_H
+
+#include <net/switchdev.h>
+
+#include "icssm_prueth.h"
+#include "icssm_prueth_fdb_tbl.h"
+#include "icssm_switchdev.h"
+
+void icssm_prueth_sw_set_stp_state(struct prueth *prueth,
+ enum prueth_port port, u8 state);
+u8 icssm_prueth_sw_get_stp_state(struct prueth *prueth,
+ enum prueth_port port);
+
+extern const struct prueth_queue_info sw_queue_infos[][4];
+
+void icssm_prueth_sw_fdb_tbl_init(struct prueth *prueth);
+int icssm_prueth_sw_init_fdb_table(struct prueth *prueth);
+void icssm_prueth_sw_free_fdb_table(struct prueth *prueth);
+int icssm_prueth_sw_do_purge_fdb(struct prueth_emac *emac);
+void icssm_prueth_sw_fdb_add(struct prueth_emac *emac,
+ struct switchdev_notifier_fdb_info *fdb);
+void icssm_prueth_sw_fdb_del(struct prueth_emac *emac,
+ struct switchdev_notifier_fdb_info *fdb);
+int icssm_prueth_sw_learn_fdb(struct prueth_emac *emac, u8 *src_mac);
+int icssm_prueth_sw_purge_fdb(struct prueth_emac *emac);
+void icssm_prueth_sw_hostconfig(struct prueth *prueth);
+int icssm_prueth_sw_emac_config(struct prueth_emac *emac);
+int icssm_prueth_sw_boot_prus(struct prueth *prueth, struct net_device *ndev);
+int icssm_prueth_sw_shutdown_prus(struct prueth_emac *emac,
+ struct net_device *ndev);
+
+#endif /* __NET_TI_PRUETH_SWITCH_H */
diff --git a/drivers/net/ethernet/ti/icssm/icssm_switch.h b/drivers/net/ethernet/ti/icssm/icssm_switch.h
index 8b494ffdcde7..5ba9ce14da44 100644
--- a/drivers/net/ethernet/ti/icssm/icssm_switch.h
+++ b/drivers/net/ethernet/ti/icssm/icssm_switch.h
@@ -117,6 +117,15 @@
#define STATISTICS_OFFSET 0x1F00
#define STAT_SIZE 0x98
+/* The following offsets indicate which sections of the memory are used
+ * for switch internal tasks
+ */
+#define SWITCH_SPECIFIC_DRAM0_START_SIZE 0x100
+#define SWITCH_SPECIFIC_DRAM0_START_OFFSET 0x1F00
+
+#define SWITCH_SPECIFIC_DRAM1_START_SIZE 0x300
+#define SWITCH_SPECIFIC_DRAM1_START_OFFSET 0x1D00
+
/* Offset for storing
* 1. Storm Prevention Params
* 2. PHY Speed Offset
@@ -146,6 +155,74 @@
/* 4 bytes ? */
#define STP_INVALID_STATE_OFFSET (STATISTICS_OFFSET + STAT_SIZE + 33)
+/* DRAM1 Offsets for Switch */
+/* 4 queue descriptors for port 0 (host receive) */
+#define P0_QUEUE_DESC_OFFSET 0x1E7C
+#define P1_QUEUE_DESC_OFFSET 0x1E9C
+#define P2_QUEUE_DESC_OFFSET 0x1EBC
+/* collision descriptor of port 0 */
+#define P0_COL_QUEUE_DESC_OFFSET 0x1E64
+#define P1_COL_QUEUE_DESC_OFFSET 0x1E6C
+#define P2_COL_QUEUE_DESC_OFFSET 0x1E74
+/* Collision Status Register
+ * P0: bit 0 is pending flag, bit 1..2 indicates which queue,
+ * P1: bit 8 is pending flag, 9..10 is queue number
+ * P2: bit 16 is pending flag, 17..18 is queue number, remaining bits are 0.
+ */
+#define COLLISION_STATUS_ADDR 0x1E60
+
+#define INTERFACE_MAC_ADDR 0x1E58
+#define P2_MAC_ADDR 0x1E50
+#define P1_MAC_ADDR 0x1E48
+
+#define QUEUE_SIZE_ADDR 0x1E30
+#define QUEUE_OFFSET_ADDR 0x1E18
+#define QUEUE_DESCRIPTOR_OFFSET_ADDR 0x1E00
+
+#define COL_RX_CONTEXT_P2_OFFSET_ADDR (COL_RX_CONTEXT_P1_OFFSET_ADDR + 12)
+#define COL_RX_CONTEXT_P1_OFFSET_ADDR (COL_RX_CONTEXT_P0_OFFSET_ADDR + 12)
+#define COL_RX_CONTEXT_P0_OFFSET_ADDR (P2_Q4_RX_CONTEXT_OFFSET + 8)
+
+/* Port 2 Rx Context */
+#define P2_Q4_RX_CONTEXT_OFFSET (P2_Q3_RX_CONTEXT_OFFSET + 8)
+#define P2_Q3_RX_CONTEXT_OFFSET (P2_Q2_RX_CONTEXT_OFFSET + 8)
+#define P2_Q2_RX_CONTEXT_OFFSET (P2_Q1_RX_CONTEXT_OFFSET + 8)
+#define P2_Q1_RX_CONTEXT_OFFSET RX_CONTEXT_P2_Q1_OFFSET_ADDR
+#define RX_CONTEXT_P2_Q1_OFFSET_ADDR (P1_Q4_RX_CONTEXT_OFFSET + 8)
+
+/* Port 1 Rx Context */
+#define P1_Q4_RX_CONTEXT_OFFSET (P1_Q3_RX_CONTEXT_OFFSET + 8)
+#define P1_Q3_RX_CONTEXT_OFFSET (P1_Q2_RX_CONTEXT_OFFSET + 8)
+#define P1_Q2_RX_CONTEXT_OFFSET (P1_Q1_RX_CONTEXT_OFFSET + 8)
+#define P1_Q1_RX_CONTEXT_OFFSET (RX_CONTEXT_P1_Q1_OFFSET_ADDR)
+#define RX_CONTEXT_P1_Q1_OFFSET_ADDR (P0_Q4_RX_CONTEXT_OFFSET + 8)
+
+/* Host Port Rx Context */
+#define P0_Q4_RX_CONTEXT_OFFSET (P0_Q3_RX_CONTEXT_OFFSET + 8)
+#define P0_Q3_RX_CONTEXT_OFFSET (P0_Q2_RX_CONTEXT_OFFSET + 8)
+#define P0_Q2_RX_CONTEXT_OFFSET (P0_Q1_RX_CONTEXT_OFFSET + 8)
+#define P0_Q1_RX_CONTEXT_OFFSET RX_CONTEXT_P0_Q1_OFFSET_ADDR
+#define RX_CONTEXT_P0_Q1_OFFSET_ADDR (COL_TX_CONTEXT_P2_Q1_OFFSET_ADDR + 8)
+
+/* Port 2 Tx Collision Context */
+#define COL_TX_CONTEXT_P2_Q1_OFFSET_ADDR (COL_TX_CONTEXT_P1_Q1_OFFSET_ADDR + 8)
+/* Port 1 Tx Collision Context */
+#define COL_TX_CONTEXT_P1_Q1_OFFSET_ADDR (P2_Q4_TX_CONTEXT_OFFSET + 8)
+
+/* Port 2 */
+#define P2_Q4_TX_CONTEXT_OFFSET (P2_Q3_TX_CONTEXT_OFFSET + 8)
+#define P2_Q3_TX_CONTEXT_OFFSET (P2_Q2_TX_CONTEXT_OFFSET + 8)
+#define P2_Q2_TX_CONTEXT_OFFSET (P2_Q1_TX_CONTEXT_OFFSET + 8)
+#define P2_Q1_TX_CONTEXT_OFFSET TX_CONTEXT_P2_Q1_OFFSET_ADDR
+#define TX_CONTEXT_P2_Q1_OFFSET_ADDR (P1_Q4_TX_CONTEXT_OFFSET + 8)
+
+/* Port 1 */
+#define P1_Q4_TX_CONTEXT_OFFSET (P1_Q3_TX_CONTEXT_OFFSET + 8)
+#define P1_Q3_TX_CONTEXT_OFFSET (P1_Q2_TX_CONTEXT_OFFSET + 8)
+#define P1_Q2_TX_CONTEXT_OFFSET (P1_Q1_TX_CONTEXT_OFFSET + 8)
+#define P1_Q1_TX_CONTEXT_OFFSET TX_CONTEXT_P1_Q1_OFFSET_ADDR
+#define TX_CONTEXT_P1_Q1_OFFSET_ADDR SWITCH_SPECIFIC_DRAM1_START_OFFSET
+
/* DRAM Offsets for EMAC
* Present on Both DRAM0 and DRAM1
*/
@@ -254,4 +331,30 @@
#define P0_COL_BUFFER_OFFSET 0xEE00
#define P0_Q1_BUFFER_OFFSET 0x0000
+#define V2_1_FDB_TBL_LOC PRUETH_MEM_SHARED_RAM
+#define V2_1_FDB_TBL_OFFSET 0x2000
+
+#define FDB_INDEX_TBL_MAX_ENTRIES 256
+#define FDB_MAC_TBL_MAX_ENTRIES 256
+
+#define FDB_INDEX_TBL_OFFSET V2_1_FDB_TBL_OFFSET
+#define FDB_INDEX_TBL_SIZE (FDB_INDEX_TBL_MAX_ENTRIES * \
+ sizeof(struct fdb_index_tbl_entry))
+
+#define FDB_MAC_TBL_OFFSET (FDB_INDEX_TBL_OFFSET + FDB_INDEX_TBL_SIZE)
+#define FDB_MAC_TBL_SIZE (FDB_MAC_TBL_MAX_ENTRIES * \
+ sizeof(struct fdb_mac_tbl_entry))
+
+#define FDB_PORT1_STP_CFG_OFFSET (FDB_MAC_TBL_OFFSET + FDB_MAC_TBL_SIZE)
+#define FDB_PORT_STP_CFG_SIZE sizeof(struct fdb_stp_config)
+#define FDB_PORT2_STP_CFG_OFFSET (FDB_PORT1_STP_CFG_OFFSET + \
+ FDB_PORT_STP_CFG_SIZE)
+
+#define FDB_FLOOD_ENABLE_FLAGS_OFFSET (FDB_PORT2_STP_CFG_OFFSET + \
+ FDB_PORT_STP_CFG_SIZE)
+#define FDB_FLOOD_ENABLE_FLAGS_SIZE sizeof(struct fdb_flood_config)
+
+#define FDB_LOCKS_OFFSET (FDB_FLOOD_ENABLE_FLAGS_OFFSET + \
+ FDB_FLOOD_ENABLE_FLAGS_SIZE)
+
#endif /* __ICSS_SWITCH_H */
diff --git a/drivers/net/ethernet/ti/icssm/icssm_switchdev.c b/drivers/net/ethernet/ti/icssm/icssm_switchdev.c
new file mode 100644
index 000000000000..414ec9fc02a0
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssm/icssm_switchdev.c
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Texas Instruments ICSSM Ethernet Driver
+ *
+ * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/remoteproc.h>
+#include <net/switchdev.h>
+
+#include "icssm_prueth.h"
+#include "icssm_prueth_switch.h"
+#include "icssm_prueth_fdb_tbl.h"
+
+/* switchev event work */
+struct icssm_sw_event_work {
+ netdevice_tracker ndev_tracker;
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct prueth_emac *emac;
+ unsigned long event;
+};
+
+void icssm_prueth_sw_set_stp_state(struct prueth *prueth,
+ enum prueth_port port, u8 state)
+{
+ struct fdb_tbl *t = prueth->fdb_tbl;
+
+ writeb(state, port - 1 ? (void __iomem *)&t->port2_stp_cfg->state :
+ (void __iomem *)&t->port1_stp_cfg->state);
+}
+
+u8 icssm_prueth_sw_get_stp_state(struct prueth *prueth, enum prueth_port port)
+{
+ struct fdb_tbl *t = prueth->fdb_tbl;
+ u8 state;
+
+ state = readb(port - 1 ? (void __iomem *)&t->port2_stp_cfg->state :
+ (void __iomem *)&t->port1_stp_cfg->state);
+ return state;
+}
+
+static int icssm_prueth_sw_attr_set(struct net_device *ndev, const void *ctx,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ int err = 0;
+ u8 o_state;
+
+ /* Interface is not up */
+ if (!prueth->fdb_tbl)
+ return 0;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ o_state = icssm_prueth_sw_get_stp_state(prueth, emac->port_id);
+ icssm_prueth_sw_set_stp_state(prueth, emac->port_id,
+ attr->u.stp_state);
+
+ if (o_state != attr->u.stp_state)
+ icssm_prueth_sw_purge_fdb(emac);
+
+ dev_dbg(prueth->dev, "attr set: stp state:%u port:%u\n",
+ attr->u.stp_state, emac->port_id);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static void icssm_prueth_sw_fdb_offload(struct net_device *ndev,
+ struct switchdev_notifier_fdb_info *rcv)
+{
+ struct switchdev_notifier_fdb_info info;
+
+ info.addr = rcv->addr;
+ info.vid = rcv->vid;
+ call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, ndev, &info.info,
+ NULL);
+}
+
+/**
+ * icssm_sw_event_work - insert/delete fdb entry
+ *
+ * @work: work structure
+ *
+ */
+static void icssm_sw_event_work(struct work_struct *work)
+{
+ struct icssm_sw_event_work *switchdev_work =
+ container_of(work, struct icssm_sw_event_work, work);
+ struct prueth_emac *emac = switchdev_work->emac;
+ struct switchdev_notifier_fdb_info *fdb;
+ struct prueth *prueth = emac->prueth;
+ int port = emac->port_id;
+
+ rtnl_lock();
+
+ /* Interface is not up */
+ if (!emac->prueth->fdb_tbl)
+ goto free;
+
+ switch (switchdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ fdb = &switchdev_work->fdb_info;
+ dev_dbg(prueth->dev,
+ "prueth fdb add: MACID = %pM vid = %u flags = %u -- port %d\n",
+ fdb->addr, fdb->vid, fdb->added_by_user, port);
+
+ if (!fdb->added_by_user)
+ break;
+
+ if (fdb->is_local)
+ break;
+
+ icssm_prueth_sw_fdb_add(emac, fdb);
+ icssm_prueth_sw_fdb_offload(emac->ndev, fdb);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ fdb = &switchdev_work->fdb_info;
+ dev_dbg(prueth->dev,
+ "prueth fdb del: MACID = %pM vid = %u flags = %u -- port %d\n",
+ fdb->addr, fdb->vid, fdb->added_by_user, port);
+
+ if (fdb->is_local)
+ break;
+
+ icssm_prueth_sw_fdb_del(emac, fdb);
+ break;
+ default:
+ break;
+ }
+
+free:
+ rtnl_unlock();
+
+ netdev_put(emac->ndev, &switchdev_work->ndev_tracker);
+ kfree(switchdev_work->fdb_info.addr);
+ kfree(switchdev_work);
+}
+
+/* called under rcu_read_lock() */
+static int icssm_prueth_sw_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = switchdev_notifier_info_to_dev(ptr);
+ struct switchdev_notifier_fdb_info *fdb_info = ptr;
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct icssm_sw_event_work *switchdev_work;
+ int err;
+
+ if (!icssm_prueth_sw_port_dev_check(ndev))
+ return NOTIFY_DONE;
+
+ if (event == SWITCHDEV_PORT_ATTR_SET) {
+ err = switchdev_handle_port_attr_set
+ (ndev, ptr, icssm_prueth_sw_port_dev_check,
+ icssm_prueth_sw_attr_set);
+ return notifier_from_errno(err);
+ }
+
+ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+ if (WARN_ON(!switchdev_work))
+ return NOTIFY_BAD;
+
+ INIT_WORK(&switchdev_work->work, icssm_sw_event_work);
+ switchdev_work->emac = emac;
+ switchdev_work->event = event;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ memcpy(&switchdev_work->fdb_info, ptr,
+ sizeof(switchdev_work->fdb_info));
+ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!switchdev_work->fdb_info.addr)
+ goto err_addr_alloc;
+ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
+ fdb_info->addr);
+ netdev_hold(ndev, &switchdev_work->ndev_tracker, GFP_ATOMIC);
+ break;
+ default:
+ kfree(switchdev_work);
+ return NOTIFY_DONE;
+ }
+
+ queue_work(system_long_wq, &switchdev_work->work);
+
+ return NOTIFY_DONE;
+
+err_addr_alloc:
+ kfree(switchdev_work);
+ return NOTIFY_BAD;
+}
+
+static int icssm_prueth_switchdev_obj_add(struct net_device *ndev,
+ const void *ctx,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack)
+{
+ struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ int ret = 0;
+ u8 hash;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ dev_dbg(prueth->dev, "MDB add: %s: vid %u:%pM port: %x\n",
+ ndev->name, mdb->vid, mdb->addr, emac->port_id);
+ hash = icssm_emac_get_mc_hash(mdb->addr, emac->mc_filter_mask);
+ icssm_emac_mc_filter_bin_allow(emac, hash);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static int icssm_prueth_switchdev_obj_del(struct net_device *ndev,
+ const void *ctx,
+ const struct switchdev_obj *obj)
+{
+ struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ struct netdev_hw_addr *ha;
+ u8 hash, tmp_hash;
+ int ret = 0;
+ u8 *mask;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ dev_dbg(prueth->dev, "MDB del: %s: vid %u:%pM port: %x\n",
+ ndev->name, mdb->vid, mdb->addr, emac->port_id);
+ if (prueth->hw_bridge_dev) {
+ mask = emac->mc_filter_mask;
+ hash = icssm_emac_get_mc_hash(mdb->addr, mask);
+ netdev_for_each_mc_addr(ha, prueth->hw_bridge_dev) {
+ tmp_hash = icssm_emac_get_mc_hash(ha->addr,
+ mask);
+ /* Another MC address is in the bin.
+ * Don't disable.
+ */
+ if (tmp_hash == hash)
+ return 0;
+ }
+ icssm_emac_mc_filter_bin_disallow(emac, hash);
+ }
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+/* switchdev notifiers */
+static int icssm_prueth_sw_blocking_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = switchdev_handle_port_obj_add
+ (ndev, ptr, icssm_prueth_sw_port_dev_check,
+ icssm_prueth_switchdev_obj_add);
+ return notifier_from_errno(err);
+
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = switchdev_handle_port_obj_del
+ (ndev, ptr, icssm_prueth_sw_port_dev_check,
+ icssm_prueth_switchdev_obj_del);
+ return notifier_from_errno(err);
+
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set
+ (ndev, ptr, icssm_prueth_sw_port_dev_check,
+ icssm_prueth_sw_attr_set);
+ return notifier_from_errno(err);
+
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+int icssm_prueth_sw_register_notifiers(struct prueth *prueth)
+{
+ int ret = 0;
+
+ prueth->prueth_switchdev_nb.notifier_call =
+ &icssm_prueth_sw_switchdev_event;
+ ret = register_switchdev_notifier(&prueth->prueth_switchdev_nb);
+ if (ret) {
+ dev_err(prueth->dev,
+ "register switchdev notifier failed ret:%d\n", ret);
+ return ret;
+ }
+
+ prueth->prueth_switchdev_bl_nb.notifier_call =
+ &icssm_prueth_sw_blocking_event;
+ ret = register_switchdev_blocking_notifier
+ (&prueth->prueth_switchdev_bl_nb);
+ if (ret) {
+ dev_err(prueth->dev,
+ "register switchdev blocking notifier failed ret:%d\n",
+ ret);
+ unregister_switchdev_notifier(&prueth->prueth_switchdev_nb);
+ }
+
+ return ret;
+}
+
+void icssm_prueth_sw_unregister_notifiers(struct prueth *prueth)
+{
+ unregister_switchdev_blocking_notifier(&prueth->prueth_switchdev_bl_nb);
+ unregister_switchdev_notifier(&prueth->prueth_switchdev_nb);
+}
diff --git a/drivers/net/ethernet/ti/icssm/icssm_switchdev.h b/drivers/net/ethernet/ti/icssm/icssm_switchdev.h
new file mode 100644
index 000000000000..b03a98e3472e
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssm/icssm_switchdev.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020-2021 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#ifndef __NET_TI_ICSSM_SWITCHDEV_H
+#define __NET_TI_ICSSM_SWITCHDEV_H
+
+#include "icssm_prueth.h"
+
+int icssm_prueth_sw_register_notifiers(struct prueth *prueth);
+void icssm_prueth_sw_unregister_notifiers(struct prueth *prueth);
+bool icssm_prueth_sw_port_dev_check(const struct net_device *ndev);
+#endif /* __NET_TI_ICSSM_SWITCHDEV_H */
diff --git a/drivers/net/ethernet/ti/icssm/icssm_vlan_mcast_filter_mmap.h b/drivers/net/ethernet/ti/icssm/icssm_vlan_mcast_filter_mmap.h
new file mode 100644
index 000000000000..c177c19a36ef
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssm/icssm_vlan_mcast_filter_mmap.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (C) 2015-2021 Texas Instruments Incorporated - https://www.ti.com
+ *
+ * This file contains VLAN/Multicast filtering feature memory map
+ *
+ */
+
+#ifndef ICSS_VLAN_MULTICAST_FILTER_MM_H
+#define ICSS_VLAN_MULTICAST_FILTER_MM_H
+
+/* VLAN/Multicast filter defines & offsets,
+ * present on both PRU0 and PRU1 DRAM
+ */
+
+/* Feature enable/disable values for multicast filtering */
+#define ICSS_EMAC_FW_MULTICAST_FILTER_CTRL_DISABLED 0x00
+#define ICSS_EMAC_FW_MULTICAST_FILTER_CTRL_ENABLED 0x01
+
+/* Feature enable/disable values for VLAN filtering */
+#define ICSS_EMAC_FW_VLAN_FILTER_CTRL_DISABLED 0x00
+#define ICSS_EMAC_FW_VLAN_FILTER_CTRL_ENABLED 0x01
+
+/* Add/remove multicast mac id for filtering bin */
+#define ICSS_EMAC_FW_MULTICAST_FILTER_HOST_RCV_ALLOWED 0x01
+#define ICSS_EMAC_FW_MULTICAST_FILTER_HOST_RCV_NOT_ALLOWED 0x00
+
+/* Default HASH value for the multicast filtering Mask */
+#define ICSS_EMAC_FW_MULTICAST_FILTER_INIT_VAL 0xFF
+
+/* Size requirements for Multicast filtering feature */
+#define ICSS_EMAC_FW_MULTICAST_TABLE_SIZE_BYTES 256
+#define ICSS_EMAC_FW_MULTICAST_FILTER_MASK_SIZE_BYTES 6
+#define ICSS_EMAC_FW_MULTICAST_FILTER_CTRL_SIZE_BYTES 1
+#define ICSS_EMAC_FW_MULTICAST_FILTER_MASK_OVERRIDE_STATUS_SIZE_BYTES 1
+#define ICSS_EMAC_FW_MULTICAST_FILTER_DROP_CNT_SIZE_BYTES 4
+
+/* Size requirements for VLAN filtering feature : 4096 bits = 512 bytes */
+#define ICSS_EMAC_FW_VLAN_FILTER_TABLE_SIZE_BYTES 512
+#define ICSS_EMAC_FW_VLAN_FILTER_CTRL_SIZE_BYTES 1
+#define ICSS_EMAC_FW_VLAN_FILTER_DROP_CNT_SIZE_BYTES 4
+
+/* Mask override set status */
+#define ICSS_EMAC_FW_MULTICAST_FILTER_MASK_OVERRIDE_SET 1
+/* Mask override not set status */
+#define ICSS_EMAC_FW_MULTICAST_FILTER_MASK_OVERRIDE_NOT_SET 0
+/* 6 bytes HASH Mask for the MAC */
+#define ICSS_EMAC_FW_MULTICAST_FILTER_MASK_OFFSET 0xF4
+/* 0 -> multicast filtering disabled | 1 -> multicast filtering enabled */
+#define ICSS_EMAC_FW_MULTICAST_FILTER_CTRL_OFFSET \
+ (ICSS_EMAC_FW_MULTICAST_FILTER_MASK_OFFSET + \
+ ICSS_EMAC_FW_MULTICAST_FILTER_MASK_SIZE_BYTES)
+/* Status indicating if the HASH override is done or not: 0: no, 1: yes */
+#define ICSS_EMAC_FW_MULTICAST_FILTER_OVERRIDE_STATUS \
+ (ICSS_EMAC_FW_MULTICAST_FILTER_CTRL_OFFSET + \
+ ICSS_EMAC_FW_MULTICAST_FILTER_CTRL_SIZE_BYTES)
+/* Multicast drop statistics */
+#define ICSS_EMAC_FW_MULTICAST_FILTER_DROP_CNT_OFFSET \
+ (ICSS_EMAC_FW_MULTICAST_FILTER_OVERRIDE_STATUS +\
+ ICSS_EMAC_FW_MULTICAST_FILTER_MASK_OVERRIDE_STATUS_SIZE_BYTES)
+/* Multicast table */
+#define ICSS_EMAC_FW_MULTICAST_FILTER_TABLE \
+ (ICSS_EMAC_FW_MULTICAST_FILTER_DROP_CNT_OFFSET +\
+ ICSS_EMAC_FW_MULTICAST_FILTER_DROP_CNT_SIZE_BYTES)
+
+/* Multicast filter defines & offsets for LRE
+ */
+#define ICSS_LRE_FW_MULTICAST_TABLE_SEARCH_OP_CONTROL_BIT 0xE0
+/* one byte field :
+ * 0 -> multicast filtering disabled
+ * 1 -> multicast filtering enabled
+ */
+#define ICSS_LRE_FW_MULTICAST_FILTER_MASK 0xE4
+#define ICSS_LRE_FW_MULTICAST_FILTER_TABLE 0x100
+
+/* VLAN table Offsets */
+#define ICSS_EMAC_FW_VLAN_FLTR_TBL_BASE_ADDR 0x200
+#define ICSS_EMAC_FW_VLAN_FILTER_CTRL_BITMAP_OFFSET 0xEF
+#define ICSS_EMAC_FW_VLAN_FILTER_DROP_CNT_OFFSET \
+ (ICSS_EMAC_FW_VLAN_FILTER_CTRL_BITMAP_OFFSET + \
+ ICSS_EMAC_FW_VLAN_FILTER_CTRL_SIZE_BYTES)
+
+/* VLAN filter Control Bit maps */
+/* one bit field, bit 0: | 0 : VLAN filter disabled (default),
+ * 1: VLAN filter enabled
+ */
+#define ICSS_EMAC_FW_VLAN_FILTER_CTRL_ENABLE_BIT 0
+/* one bit field, bit 1: | 0 : untagged host rcv allowed (default),
+ * 1: untagged host rcv not allowed
+ */
+#define ICSS_EMAC_FW_VLAN_FILTER_UNTAG_HOST_RCV_ALLOW_CTRL_BIT 1
+/* one bit field, bit 1: | 0 : priotag host rcv allowed (default),
+ * 1: priotag host rcv not allowed
+ */
+#define ICSS_EMAC_FW_VLAN_FILTER_PRIOTAG_HOST_RCV_ALLOW_CTRL_BIT 2
+/* one bit field, bit 1: | 0 : skip sv vlan flow
+ * :1 : take sv vlan flow (not applicable for dual emac )
+ */
+#define ICSS_EMAC_FW_VLAN_FILTER_SV_VLAN_FLOW_HOST_RCV_ALLOW_CTRL_BIT 3
+
+/* VLAN IDs */
+#define ICSS_EMAC_FW_VLAN_FILTER_PRIOTAG_VID 0
+#define ICSS_EMAC_FW_VLAN_FILTER_VID_MIN 0x0000
+#define ICSS_EMAC_FW_VLAN_FILTER_VID_MAX 0x0FFF
+
+/* VLAN Filtering Commands */
+#define ICSS_EMAC_FW_VLAN_FILTER_ADD_VLAN_VID_CMD 0x00
+#define ICSS_EMAC_FW_VLAN_FILTER_REMOVE_VLAN_VID_CMD 0x01
+
+/* Switch defines for VLAN/MC filtering */
+/* SRAM
+ * VLAN filter defines & offsets
+ */
+#define ICSS_LRE_FW_VLAN_FLTR_CTRL_BYTE 0x1FE
+/* one bit field | 0 : VLAN filter disabled
+ * | 1 : VLAN filter enabled
+ */
+#define ICSS_LRE_FW_VLAN_FLTR_TBL_BASE_ADDR 0x200
+
+#endif /* ICSS_MULTICAST_FILTER_MM_H */
diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h
index b9cbd3b4a8a2..9cfddaa807e2 100644
--- a/drivers/net/ethernet/ti/netcp.h
+++ b/drivers/net/ethernet/ti/netcp.h
@@ -65,14 +65,14 @@ struct netcp_addr {
struct netcp_stats {
struct u64_stats_sync syncp_rx ____cacheline_aligned_in_smp;
- u64 rx_packets;
- u64 rx_bytes;
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
u32 rx_errors;
u32 rx_dropped;
struct u64_stats_sync syncp_tx ____cacheline_aligned_in_smp;
- u64 tx_packets;
- u64 tx_bytes;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
u32 tx_errors;
u32 tx_dropped;
};
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 5ed1c46bbcb1..eb8fc2ed05f4 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -759,8 +759,8 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
knav_pool_desc_put(netcp->rx_pool, desc);
u64_stats_update_begin(&rx_stats->syncp_rx);
- rx_stats->rx_packets++;
- rx_stats->rx_bytes += skb->len;
+ u64_stats_inc(&rx_stats->rx_packets);
+ u64_stats_add(&rx_stats->rx_bytes, skb->len);
u64_stats_update_end(&rx_stats->syncp_rx);
/* push skb up the stack */
@@ -1045,8 +1045,8 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
}
u64_stats_update_begin(&tx_stats->syncp_tx);
- tx_stats->tx_packets++;
- tx_stats->tx_bytes += skb->len;
+ u64_stats_inc(&tx_stats->tx_packets);
+ u64_stats_add(&tx_stats->tx_bytes, skb->len);
u64_stats_update_end(&tx_stats->syncp_tx);
dev_kfree_skb(skb);
pkts++;
@@ -1973,14 +1973,14 @@ netcp_get_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats)
do {
start = u64_stats_fetch_begin(&p->syncp_rx);
- rxpackets = p->rx_packets;
- rxbytes = p->rx_bytes;
+ rxpackets = u64_stats_read(&p->rx_packets);
+ rxbytes = u64_stats_read(&p->rx_bytes);
} while (u64_stats_fetch_retry(&p->syncp_rx, start));
do {
start = u64_stats_fetch_begin(&p->syncp_tx);
- txpackets = p->tx_packets;
- txbytes = p->tx_bytes;
+ txpackets = u64_stats_read(&p->tx_packets);
+ txbytes = u64_stats_read(&p->tx_bytes);
} while (u64_stats_fetch_retry(&p->syncp_tx, start));
stats->rx_packets = rxpackets;
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index 32cadafa4b3b..b31b48d26575 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -179,8 +179,7 @@ static void wx_dma_sync_frag(struct wx_ring *rx_ring,
static struct wx_rx_buffer *wx_get_rx_buffer(struct wx_ring *rx_ring,
union wx_rx_desc *rx_desc,
- struct sk_buff **skb,
- int *rx_buffer_pgcnt)
+ struct sk_buff **skb)
{
struct wx_rx_buffer *rx_buffer;
unsigned int size;
@@ -188,12 +187,6 @@ static struct wx_rx_buffer *wx_get_rx_buffer(struct wx_ring *rx_ring,
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
size = le16_to_cpu(rx_desc->wb.upper.length);
-#if (PAGE_SIZE < 8192)
- *rx_buffer_pgcnt = page_count(rx_buffer->page);
-#else
- *rx_buffer_pgcnt = 0;
-#endif
-
prefetchw(rx_buffer->page);
*skb = rx_buffer->skb;
@@ -221,8 +214,7 @@ skip_sync:
static void wx_put_rx_buffer(struct wx_ring *rx_ring,
struct wx_rx_buffer *rx_buffer,
- struct sk_buff *skb,
- int rx_buffer_pgcnt)
+ struct sk_buff *skb)
{
/* clear contents of rx_buffer */
rx_buffer->page = NULL;
@@ -685,7 +677,6 @@ static int wx_clean_rx_irq(struct wx_q_vector *q_vector,
struct wx_rx_buffer *rx_buffer;
union wx_rx_desc *rx_desc;
struct sk_buff *skb;
- int rx_buffer_pgcnt;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= WX_RX_BUFFER_WRITE) {
@@ -703,7 +694,7 @@ static int wx_clean_rx_irq(struct wx_q_vector *q_vector,
*/
dma_rmb();
- rx_buffer = wx_get_rx_buffer(rx_ring, rx_desc, &skb, &rx_buffer_pgcnt);
+ rx_buffer = wx_get_rx_buffer(rx_ring, rx_desc, &skb);
/* retrieve a buffer from the ring */
skb = wx_build_skb(rx_ring, rx_buffer, rx_desc);
@@ -714,7 +705,7 @@ static int wx_clean_rx_irq(struct wx_q_vector *q_vector,
break;
}
- wx_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt);
+ wx_put_rx_buffer(rx_ring, rx_buffer, skb);
cleaned_count++;
/* place incomplete frames back on ring for completion */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
index f3cb00109529..59d758acccf0 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
@@ -193,6 +193,13 @@ static int txgbe_get_ethtool_fdir_all(struct txgbe *txgbe,
return 0;
}
+static u32 txgbe_get_rx_ring_count(struct net_device *dev)
+{
+ struct wx *wx = netdev_priv(dev);
+
+ return wx->num_rx_queues;
+}
+
static int txgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
@@ -201,10 +208,6 @@ static int txgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = wx->num_rx_queues;
- ret = 0;
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = txgbe->fdir_filter_count;
ret = 0;
@@ -587,6 +590,7 @@ static const struct ethtool_ops txgbe_ethtool_ops = {
.set_channels = txgbe_set_channels,
.get_rxnfc = txgbe_get_rxnfc,
.set_rxnfc = txgbe_set_rxnfc,
+ .get_rx_ring_count = txgbe_get_rx_ring_count,
.get_rxfh_fields = wx_get_rxfh_fields,
.set_rxfh_fields = wx_set_rxfh_fields,
.get_rxfh_indir_size = wx_rss_indir_size,
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 284031fb2e2c..998bacd508b8 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -2787,7 +2787,7 @@ static int axienet_probe(struct platform_device *pdev)
int addr_width = 32;
u32 value;
- ndev = alloc_etherdev(sizeof(*lp));
+ ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*lp));
if (!ndev)
return -ENOMEM;
@@ -2815,41 +2815,32 @@ static int axienet_probe(struct platform_device *pdev)
seqcount_mutex_init(&lp->hw_stats_seqcount, &lp->stats_lock);
INIT_DEFERRABLE_WORK(&lp->stats_work, axienet_refresh_stats);
- lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
+ lp->axi_clk = devm_clk_get_optional_enabled(&pdev->dev,
+ "s_axi_lite_clk");
if (!lp->axi_clk) {
/* For backward compatibility, if named AXI clock is not present,
* treat the first clock specified as the AXI clock.
*/
- lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL);
- }
- if (IS_ERR(lp->axi_clk)) {
- ret = PTR_ERR(lp->axi_clk);
- goto free_netdev;
- }
- ret = clk_prepare_enable(lp->axi_clk);
- if (ret) {
- dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret);
- goto free_netdev;
+ lp->axi_clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
}
+ if (IS_ERR(lp->axi_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(lp->axi_clk),
+ "could not get AXI clock\n");
lp->misc_clks[0].id = "axis_clk";
lp->misc_clks[1].id = "ref_clk";
lp->misc_clks[2].id = "mgt_clk";
- ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks);
- if (ret)
- goto cleanup_clk;
-
- ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
+ ret = devm_clk_bulk_get_optional_enable(&pdev->dev, XAE_NUM_MISC_CLOCKS,
+ lp->misc_clks);
if (ret)
- goto cleanup_clk;
+ return dev_err_probe(&pdev->dev, ret,
+ "could not get/enable misc. clocks\n");
/* Map device registers */
lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ethres);
- if (IS_ERR(lp->regs)) {
- ret = PTR_ERR(lp->regs);
- goto cleanup_clk;
- }
+ if (IS_ERR(lp->regs))
+ return PTR_ERR(lp->regs);
lp->regs_start = ethres->start;
/* Setup checksum offload, but default to off if not specified */
@@ -2918,19 +2909,17 @@ static int axienet_probe(struct platform_device *pdev)
lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX;
break;
default:
- ret = -EINVAL;
- goto cleanup_clk;
+ return -EINVAL;
}
} else {
ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
if (ret)
- goto cleanup_clk;
+ return ret;
}
if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII &&
lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) {
dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n");
- ret = -EINVAL;
- goto cleanup_clk;
+ return -EINVAL;
}
if (!of_property_present(pdev->dev.of_node, "dmas")) {
@@ -2945,7 +2934,7 @@ static int axienet_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"unable to get DMA resource\n");
of_node_put(np);
- goto cleanup_clk;
+ return ret;
}
lp->dma_regs = devm_ioremap_resource(&pdev->dev,
&dmares);
@@ -2962,19 +2951,17 @@ static int axienet_probe(struct platform_device *pdev)
}
if (IS_ERR(lp->dma_regs)) {
dev_err(&pdev->dev, "could not map DMA regs\n");
- ret = PTR_ERR(lp->dma_regs);
- goto cleanup_clk;
+ return PTR_ERR(lp->dma_regs);
}
if (lp->rx_irq <= 0 || lp->tx_irq <= 0) {
dev_err(&pdev->dev, "could not determine irqs\n");
- ret = -ENOMEM;
- goto cleanup_clk;
+ return -ENOMEM;
}
/* Reset core now that clocks are enabled, prior to accessing MDIO */
ret = __axienet_device_reset(lp);
if (ret)
- goto cleanup_clk;
+ return ret;
/* Autodetect the need for 64-bit DMA pointers.
* When the IP is configured for a bus width bigger than 32 bits,
@@ -3001,14 +2988,13 @@ static int axienet_probe(struct platform_device *pdev)
}
if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit architecture\n");
- ret = -EINVAL;
- goto cleanup_clk;
+ return -EINVAL;
}
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
if (ret) {
dev_err(&pdev->dev, "No suitable DMA available\n");
- goto cleanup_clk;
+ return ret;
}
netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
@@ -3018,15 +3004,12 @@ static int axienet_probe(struct platform_device *pdev)
lp->eth_irq = platform_get_irq_optional(pdev, 0);
if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) {
- ret = lp->eth_irq;
- goto cleanup_clk;
+ return lp->eth_irq;
}
tx_chan = dma_request_chan(lp->dev, "tx_chan0");
- if (IS_ERR(tx_chan)) {
- ret = PTR_ERR(tx_chan);
- dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n");
- goto cleanup_clk;
- }
+ if (IS_ERR(tx_chan))
+ return dev_err_probe(lp->dev, PTR_ERR(tx_chan),
+ "No Ethernet DMA (TX) channel found\n");
cfg.reset = 1;
/* As name says VDMA but it has support for DMA channel reset */
@@ -3034,7 +3017,7 @@ static int axienet_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev, "Reset channel failed\n");
dma_release_channel(tx_chan);
- goto cleanup_clk;
+ return ret;
}
dma_release_channel(tx_chan);
@@ -3139,13 +3122,6 @@ cleanup_mdio:
put_device(&lp->pcs_phy->dev);
if (lp->mii_bus)
axienet_mdio_teardown(lp);
-cleanup_clk:
- clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
- clk_disable_unprepare(lp->axi_clk);
-
-free_netdev:
- free_netdev(ndev);
-
return ret;
}
@@ -3163,11 +3139,6 @@ static void axienet_remove(struct platform_device *pdev)
put_device(&lp->pcs_phy->dev);
axienet_mdio_teardown(lp);
-
- clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
- clk_disable_unprepare(lp->axi_clk);
-
- free_netdev(ndev);
}
static void axienet_shutdown(struct platform_device *pdev)