summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/atm/idt77252.c4
-rw-r--r--drivers/bluetooth/btintel_pcie.c3
-rw-r--r--drivers/bluetooth/btmtksdio.c8
-rw-r--r--drivers/bluetooth/btnxpuart.c6
-rw-r--r--drivers/bluetooth/btqca.c58
-rw-r--r--drivers/bluetooth/btusb.c26
-rw-r--r--drivers/bluetooth/hci_bcm4377.c11
-rw-r--r--drivers/bluetooth/hci_intel.c10
-rw-r--r--drivers/bluetooth/hci_qca.c52
-rw-r--r--drivers/clk/clk-devres.c50
-rw-r--r--drivers/dpll/Kconfig15
-rw-r--r--drivers/dpll/dpll_core.c288
-rw-r--r--drivers/dpll/dpll_core.h11
-rw-r--r--drivers/dpll/dpll_netlink.c87
-rw-r--r--drivers/dpll/dpll_nl.c1
-rw-r--r--drivers/dpll/zl3073x/core.c7
-rw-r--r--drivers/dpll/zl3073x/core.h30
-rw-r--r--drivers/dpll/zl3073x/dpll.c178
-rw-r--r--drivers/dpll/zl3073x/dpll.h2
-rw-r--r--drivers/dpll/zl3073x/out.h17
-rw-r--r--drivers/dpll/zl3073x/prop.c34
-rw-r--r--drivers/infiniband/hw/bng_re/Makefile2
-rw-r--r--drivers/infiniband/hw/bng_re/bng_fw.c2
-rw-r--r--drivers/infiniband/hw/bng_re/bng_res.c4
-rw-r--r--drivers/infiniband/hw/bng_re/bng_res.h2
-rw-r--r--drivers/infiniband/hw/bng_re/bng_roce_hsi.h6450
-rw-r--r--drivers/infiniband/hw/bng_re/bng_tlv.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c4
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c6
-rw-r--r--drivers/mmc/core/sdio_bus.c25
-rw-r--r--drivers/net/Kconfig3
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/arcnet/com20020-pci.c15
-rw-r--r--drivers/net/arcnet/com20020.c16
-rw-r--r--drivers/net/bonding/bond_3ad.c16
-rw-r--r--drivers/net/bonding/bond_main.c128
-rw-r--r--drivers/net/caif/caif_serial.c5
-rw-r--r--drivers/net/can/dev/skb.c123
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c32
-rw-r--r--drivers/net/can/sja1000/sja1000.c58
-rw-r--r--drivers/net/can/vxcan.c15
-rw-r--r--drivers/net/dsa/Kconfig3
-rw-r--r--drivers/net/dsa/Makefile1
-rw-r--r--drivers/net/dsa/dsa_loop.c8
-rw-r--r--drivers/net/dsa/lantiq/Kconfig5
-rw-r--r--drivers/net/dsa/lantiq/lantiq_gswip.c46
-rw-r--r--drivers/net/dsa/lantiq/lantiq_gswip.h7
-rw-r--r--drivers/net/dsa/lantiq/lantiq_gswip_common.c33
-rw-r--r--drivers/net/dsa/lantiq/mxl-gsw1xx.c255
-rw-r--r--drivers/net/dsa/lantiq/mxl-gsw1xx.h13
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c15
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h7
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.c63
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp_reg.h16
-rw-r--r--drivers/net/dsa/mt7530-mdio.c4
-rw-r--r--drivers/net/dsa/mxl862xx/Kconfig12
-rw-r--r--drivers/net/dsa/mxl862xx/Makefile3
-rw-r--r--drivers/net/dsa/mxl862xx/mxl862xx-api.h675
-rw-r--r--drivers/net/dsa/mxl862xx/mxl862xx-cmd.h49
-rw-r--r--drivers/net/dsa/mxl862xx/mxl862xx-host.c245
-rw-r--r--drivers/net/dsa/mxl862xx/mxl862xx-host.h12
-rw-r--r--drivers/net/dsa/mxl862xx/mxl862xx.c476
-rw-r--r--drivers/net/dsa/mxl862xx/mxl862xx.h16
-rw-r--r--drivers/net/dsa/ocelot/felix.c4
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c88
-rw-r--r--drivers/net/dsa/yt921x.c496
-rw-r--r--drivers/net/dsa/yt921x.h75
-rw-r--r--drivers/net/ethernet/8390/8390.c14
-rw-r--r--drivers/net/ethernet/8390/8390p.c11
-rw-r--r--drivers/net/ethernet/Kconfig13
-rw-r--r--drivers/net/ethernet/Makefile2
-rw-r--r--drivers/net/ethernet/adi/adin1110.c2
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.c41
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.h2
-rw-r--r--drivers/net/ethernet/airoha/airoha_npu.c60
-rw-r--r--drivers/net/ethernet/alacritech/slic.h50
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c8
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c22
-rw-r--r--drivers/net/ethernet/amd/Kconfig2
-rw-r--r--drivers/net/ethernet/amd/declance.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h3
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c64
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c18
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c3
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.c32
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.h36
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c58
-rw-r--r--drivers/net/ethernet/broadcom/bnge/Makefile3
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_auxr.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_devlink.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hw_def.h446
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c67
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_netdev.c409
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_netdev.h123
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_rmem.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_txrx.c1642
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_txrx.h126
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c253
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c131
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c51
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h2
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c40
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c11
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c11
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c17
-rw-r--r--drivers/net/ethernet/dnet.c877
-rw-r--r--drivers/net/ethernet/dnet.h220
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h8
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c43
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c16
-rw-r--r--drivers/net/ethernet/engleder/tsnep_ethtool.c11
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c356
-rw-r--r--drivers/net/ethernet/freescale/fec.h14
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c1601
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_ethtool.c14
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c11
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c9
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c23
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c11
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c19
-rw-r--r--drivers/net/ethernet/huawei/hinic3/Kconfig1
-rw-r--r--drivers/net/ethernet/huawei/hinic3/Makefile1
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c3
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_csr.h6
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c3
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_filter.c417
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c115
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h6
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h24
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c99
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c90
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h23
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_irq.c97
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_lld.c58
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_main.c186
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c55
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c313
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h53
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h69
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c377
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c290
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h47
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h60
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c6
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rx.c27
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rx.h21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_tx.c34
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_tx.h16
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c79
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c758
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.h30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_irq.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c71
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c196
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c32
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tspll.c217
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tspll.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c49
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h199
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c4
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h179
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_dev.c18
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c93
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c238
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ptp.c17
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c810
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h44
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_vf_dev.c21
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c1096
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.h88
-rw-r--r--drivers/net/ethernet/intel/idpf/xdp.c79
-rw-r--r--drivers/net/ethernet/intel/idpf/xdp.h26
-rw-r--r--drivers/net/ethernet/intel/idpf/xsk.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c45
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c47
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c66
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.h2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h31
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_regs_cnxk_pf.h2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.c8
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c3
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c39
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c22
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c1
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_ethtool.c5
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_pci.c2
-rw-r--r--drivers/net/ethernet/marvell/skge.c1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dpll.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ecpf.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c429
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c382
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c215
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c74
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c12
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.h12
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c407
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_devlink.c8
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c14
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.c44
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.h10
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.c24
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_pci.c4
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.c28
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.h6
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c4
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c4
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c4
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c13
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c80
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c13
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c28
-rw-r--r--drivers/net/ethernet/neterion/Kconfig35
-rw-r--r--drivers/net/ethernet/neterion/Makefile6
-rw-r--r--drivers/net/ethernet/neterion/s2io-regs.h958
-rw-r--r--drivers/net/ethernet/neterion/s2io.c8572
-rw-r--r--drivers/net/ethernet/neterion/s2io.h1124
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c11
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c25
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c12
-rw-r--r--drivers/net/ethernet/realtek/8139too.c40
-rw-r--r--drivers/net/ethernet/realtek/Kconfig16
-rw-r--r--drivers/net/ethernet/realtek/Makefile1
-rw-r--r--drivers/net/ethernet/realtek/atp.c886
-rw-r--r--drivers/net/ethernet/realtek/atp.h262
-rw-r--r--drivers/net/ethernet/realtek/r8169.h3
-rw-r--r--drivers/net/ethernet/realtek/r8169_firmware.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c243
-rw-r--r--drivers/net/ethernet/renesas/rcar_gen4_ptp.c34
-rw-r--r--drivers/net/ethernet/renesas/rcar_gen4_ptp.h18
-rw-r--r--drivers/net/ethernet/renesas/rswitch_l2.c15
-rw-r--r--drivers/net/ethernet/renesas/rswitch_main.c11
-rw-r--r--drivers/net/ethernet/renesas/rtsn.c30
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c2
-rw-r--r--drivers/net/ethernet/sfc/ef100_ethtool.c1
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c1
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.c11
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.h1
-rw-r--r--drivers/net/ethernet/sfc/falcon/ethtool.c12
-rw-r--r--drivers/net/ethernet/sfc/nic.h7
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool.c1
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool_common.c11
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool_common.h1
-rw-r--r--drivers/net/ethernet/sis/sis900.c31
-rw-r--r--drivers/net/ethernet/smsc/epic100.c35
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c4
-rw-r--r--drivers/net/ethernet/spacemit/k1_emac.c111
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs.h7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs_com.h48
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c43
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-motorcomm.c384
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c1245
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c28
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100.h36
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h72
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c28
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h79
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c70
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c46
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h167
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h172
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h41
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c39
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c76
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c190
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.c45
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h68
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c2
-rw-r--r--drivers/net/ethernet/sun/niu.c11
-rw-r--r--drivers/net/ethernet/sun/sunhme.c3
-rw-r--r--drivers/net/ethernet/ti/Kconfig1
-rw-r--r--drivers/net/ethernet/ti/Makefile2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c5
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c5
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c5
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c15
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c1
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c13
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h1
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c9
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_prueth.c545
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_prueth.h20
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_prueth_fdb_tbl.h76
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_prueth_switch.c1065
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_prueth_switch.h37
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_switch.h103
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_switchdev.c333
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_switchdev.h13
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_vlan_mcast_filter_mmap.h120
-rw-r--r--drivers/net/ethernet/ti/netcp.h8
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c16
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c17
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c12
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c83
-rw-r--r--drivers/net/geneve.c557
-rw-r--r--drivers/net/hamradio/hdlcdrv.c20
-rw-r--r--drivers/net/hippi/Kconfig40
-rw-r--r--drivers/net/hippi/Makefile6
-rw-r--r--drivers/net/hippi/rrunner.c1687
-rw-r--r--drivers/net/hippi/rrunner.h848
-rw-r--r--drivers/net/ifb.c18
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c55
-rw-r--r--drivers/net/macsec.c6
-rw-r--r--drivers/net/mctp/mctp-i2c.c9
-rw-r--r--drivers/net/net_failover.c13
-rw-r--r--drivers/net/netconsole.c450
-rw-r--r--drivers/net/netdevsim/netdev.c7
-rw-r--r--drivers/net/netdevsim/netdevsim.h8
-rw-r--r--drivers/net/netdevsim/psp.c18
-rw-r--r--drivers/net/ovpn/peer.c2
-rw-r--r--drivers/net/pcs/Kconfig1
-rw-r--r--drivers/net/pcs/pcs-mtk-lynxi.c63
-rw-r--r--drivers/net/pcs/pcs-rzn1-miic.c105
-rw-r--r--drivers/net/phy/Kconfig1
-rw-r--r--drivers/net/phy/Makefile2
-rw-r--r--drivers/net/phy/adin.c20
-rw-r--r--drivers/net/phy/air_en8811h.c499
-rw-r--r--drivers/net/phy/ax88796b_rust.rs7
-rw-r--r--drivers/net/phy/dp83822.c71
-rw-r--r--drivers/net/phy/dp83867.c63
-rw-r--r--drivers/net/phy/fixed_phy.c83
-rw-r--r--drivers/net/phy/marvell-88x2222.c94
-rw-r--r--drivers/net/phy/marvell.c92
-rw-r--r--drivers/net/phy/marvell10g.c49
-rw-r--r--drivers/net/phy/mdio_device.c13
-rw-r--r--drivers/net/phy/mediatek/mtk-ge-soc.c2
-rw-r--r--drivers/net/phy/micrel.c58
-rw-r--r--drivers/net/phy/microchip_rds_ptp.c34
-rw-r--r--drivers/net/phy/motorcomm.c4
-rw-r--r--drivers/net/phy/mxl-gpy.c61
-rw-r--r--drivers/net/phy/phy-caps.h6
-rw-r--r--drivers/net/phy/phy-core.c8
-rw-r--r--drivers/net/phy/phy_caps.c67
-rw-r--r--drivers/net/phy/phy_device.c427
-rw-r--r--drivers/net/phy/phy_port.c212
-rw-r--r--drivers/net/phy/phylib-internal.h6
-rw-r--r--drivers/net/phy/phylib.h5
-rw-r--r--drivers/net/phy/phylink.c79
-rw-r--r--drivers/net/phy/qcom/at803x.c77
-rw-r--r--drivers/net/phy/qcom/qca807x.c72
-rw-r--r--drivers/net/phy/qt2025.rs5
-rw-r--r--drivers/net/phy/realtek/realtek_main.c432
-rw-r--r--drivers/net/phy/sfp.c8
-rw-r--r--drivers/net/ppp/ppp_generic.c30
-rw-r--r--drivers/net/thunderbolt/main.c53
-rw-r--r--drivers/net/usb/Kconfig2
-rw-r--r--drivers/net/usb/aqc111.c5
-rw-r--r--drivers/net/usb/asix.h3
-rw-r--r--drivers/net/usb/asix_common.c8
-rw-r--r--drivers/net/usb/asix_devices.c30
-rw-r--r--drivers/net/usb/ax88172a.c2
-rw-r--r--drivers/net/usb/ax88179_178a.c8
-rw-r--r--drivers/net/usb/catc.c7
-rw-r--r--drivers/net/usb/dm9601.c27
-rw-r--r--drivers/net/usb/int51x1.c39
-rw-r--r--drivers/net/usb/mcs7830.c15
-rw-r--r--drivers/net/usb/pegasus.c3
-rw-r--r--drivers/net/usb/r8152.c12
-rw-r--r--drivers/net/usb/rtl8150.c3
-rw-r--r--drivers/net/usb/sierra_net.c14
-rw-r--r--drivers/net/usb/smsc75xx.c7
-rw-r--r--drivers/net/usb/smsc95xx.c13
-rw-r--r--drivers/net/usb/sr9700.c251
-rw-r--r--drivers/net/usb/sr9700.h22
-rw-r--r--drivers/net/usb/sr9800.c30
-rw-r--r--drivers/net/usb/sr9800.h3
-rw-r--r--drivers/net/usb/usbnet.c8
-rw-r--r--drivers/net/vxlan/vxlan_core.c16
-rw-r--r--drivers/net/vxlan/vxlan_vnifilter.c2
-rw-r--r--drivers/net/wan/framer/framer-core.c6
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c53
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/Kconfig11
-rw-r--r--drivers/net/wireless/ath/ath11k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath11k/cfr.c1023
-rw-r--r--drivers/net/wireless/ath/ath11k/cfr.h308
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c108
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h23
-rw-r--r--drivers/net/wireless/ath/ath11k/dbring.c50
-rw-r--r--drivers/net/wireless/ath/ath11k/dbring.h8
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.h8
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.c142
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c12
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c9
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.c19
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h8
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c23
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c9
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c147
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h97
-rw-r--r--drivers/net/wireless/ath/ath12k/Makefile8
-rw-r--r--drivers/net/wireless/ath/ath12k/ahb.c165
-rw-r--r--drivers/net/wireless/ath/ath12k/ahb.h31
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.c326
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.h7
-rw-r--r--drivers/net/wireless/ath/ath12k/cmn_defs.h20
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c75
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h179
-rw-r--r--drivers/net/wireless/ath/ath12k/dbring.c6
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.c8
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.c8
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c197
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h85
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_sta.c62
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c561
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h1742
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_cmn.h106
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_htt.c1353
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_htt.h1546
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c3517
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.h58
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_peer.c690
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_peer.h182
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c3478
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.h191
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.c1634
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.h39
-rw-r--r--drivers/net/wireless/ath/ath12k/fw.c10
-rw-r--r--drivers/net/wireless/ath/ath12k/fw.h3
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.c2134
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.h1523
-rw-r--r--drivers/net/wireless/ath/ath12k/htc.c3
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.c1680
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.h96
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c1255
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h142
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.c132
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.h5
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c255
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.h49
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.c453
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.h112
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c180
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.h16
-rw-r--r--drivers/net/wireless/ath/ath12k/testmode.c3
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/Makefile20
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/ahb.c75
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/ahb.h20
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/ce.c973
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/ce.h22
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/core.c68
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/core.h11
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/dp.c181
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/dp.h20
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/dp_mon.c3385
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/dp_mon.h23
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/dp_rx.c2246
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/dp_rx.h60
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/dp_tx.c978
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/dp_tx.h16
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/hal.c713
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/hal.h561
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/hal_desc.h (renamed from drivers/net/wireless/ath/ath12k/hal_desc.h)331
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/hal_qcc2072.c503
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/hal_qcc2072.h13
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/hal_qcn9274.c1038
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/hal_qcn9274.h45
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/hal_rx.c (renamed from drivers/net/wireless/ath/ath12k/hal_rx.c)284
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/hal_rx.h (renamed from drivers/net/wireless/ath/ath12k/hal_rx.h)424
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/hal_rx_desc.h (renamed from drivers/net/wireless/ath/ath12k/rx_desc.h)72
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/hal_tx.c (renamed from drivers/net/wireless/ath/ath12k/hal_tx.c)23
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/hal_tx.h (renamed from drivers/net/wireless/ath/ath12k/hal_tx.h)30
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/hal_wcn7850.c809
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/hal_wcn7850.h40
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/hw.c1049
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/hw.h13
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/mhi.c138
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/mhi.h11
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/pci.c215
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/pci.h12
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/wmi.c110
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/wmi.h15
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c334
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h61
-rw-r--r--drivers/net/wireless/ath/ath12k/wow.c21
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath9k/common-debug.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h15
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h33
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c9
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-rs.c7
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-fm.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-pe.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c117
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h159
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h41
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h95
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.c206
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/smem.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c72
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/d3.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/debugfs.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.c21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mac80211.c114
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mlo.c59
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mlo.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/nan.c299
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/nan.h28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/notif.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/notif.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/power.c37
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/regulatory.c66
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/rx.c142
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/scan.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/sta.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tlc.c85
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tx.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c202
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/link.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c68
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c193
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h60
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tdls.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c4
-rw-r--r--drivers/net/wireless/intersil/p54/main.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c13
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/core.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/regd.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c54
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c20
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723cs.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723ds.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821cs.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821cu.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822bs.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822cs.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/sdio.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/sdio.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c134
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.h129
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c69
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c237
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h198
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c162
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse.h6
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse_be.c26
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c1163
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h521
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c199
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h102
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c44
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac_be.c696
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c22
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h57
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci_be.c138
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c428
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h36
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy_be.c563
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h934
-rw-r--r--drivers/net/wireless/realtek/rtw89/regd.c21
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c10
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c10
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852au.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c32
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_common.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt.c8
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bu.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c10
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852cu.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c89
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c48
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c10
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h22
-rw-r--r--drivers/net/wireless/realtek/rtw89/usb.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c14
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.h1
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_main.c29
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c5
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.c11
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c9
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c6
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c2
-rw-r--r--drivers/net/wwan/Kconfig1
-rw-r--r--drivers/net/wwan/mhi_wwan_ctrl.c1
-rw-r--r--drivers/net/wwan/mhi_wwan_mbim.c3
-rw-r--r--drivers/net/wwan/wwan_core.c278
-rw-r--r--drivers/net/wwan/wwan_hwsim.c201
-rw-r--r--drivers/net/xen-netback/hash.c2
-rw-r--r--drivers/net/xen-netfront.c24
-rw-r--r--drivers/nfc/nxp-nci/i2c.c2
-rw-r--r--drivers/phy/Kconfig22
-rw-r--r--drivers/phy/Makefile2
-rw-r--r--drivers/phy/phy-common-props-test.c422
-rw-r--r--drivers/phy/phy-common-props.c209
-rw-r--r--drivers/ptp/Kconfig2
-rw-r--r--drivers/ptp/ptp_ocp.c18
-rw-r--r--drivers/ptp/ptp_vmclock.c236
-rw-r--r--drivers/vhost/vsock.c44
730 files changed, 62097 insertions, 42413 deletions
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index f2e91b7d79f0..888695ccc2a7 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -1844,7 +1844,6 @@ add_rx_skb(struct idt77252_dev *card, int queue,
{
struct sk_buff *skb;
dma_addr_t paddr;
- u32 handle;
while (count--) {
skb = dev_alloc_skb(size);
@@ -1876,8 +1875,7 @@ outunmap:
skb_end_pointer(skb) - skb->data, DMA_FROM_DEVICE);
outpoolrm:
- handle = IDT77252_PRV_POOL(skb);
- card->sbpool[POOL_QUEUE(handle)].skb[POOL_INDEX(handle)] = NULL;
+ sb_pool_remove(card, skb);
outfree:
dev_kfree_skb(skb);
diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c
index 704767b334b9..eaf5de46a702 100644
--- a/drivers/bluetooth/btintel_pcie.c
+++ b/drivers/bluetooth/btintel_pcie.c
@@ -1190,8 +1190,7 @@ static int btintel_pcie_recv_frame(struct btintel_pcie_data *data,
skb = NULL; /* skb is freed in the callee */
exit_error:
- if (skb)
- kfree_skb(skb);
+ kfree_skb(skb);
if (ret)
hdev->stat.err_rx++;
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index fba3ab6d30a5..e986e5af51ae 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -1472,7 +1472,6 @@ static void btmtksdio_remove(struct sdio_func *func)
hci_free_dev(hdev);
}
-#ifdef CONFIG_PM
static int btmtksdio_runtime_suspend(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
@@ -1542,18 +1541,13 @@ static const struct dev_pm_ops btmtksdio_pm_ops = {
RUNTIME_PM_OPS(btmtksdio_runtime_suspend, btmtksdio_runtime_resume, NULL)
};
-#define BTMTKSDIO_PM_OPS (&btmtksdio_pm_ops)
-#else /* CONFIG_PM */
-#define BTMTKSDIO_PM_OPS NULL
-#endif /* CONFIG_PM */
-
static struct sdio_driver btmtksdio_driver = {
.name = "btmtksdio",
.probe = btmtksdio_probe,
.remove = btmtksdio_remove,
.id_table = btmtksdio_table,
.drv = {
- .pm = BTMTKSDIO_PM_OPS,
+ .pm = pm_ptr(&btmtksdio_pm_ops),
}
};
diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
index 3b1e9224e965..e7036a48ce48 100644
--- a/drivers/bluetooth/btnxpuart.c
+++ b/drivers/bluetooth/btnxpuart.c
@@ -1947,8 +1947,7 @@ static void nxp_serdev_remove(struct serdev_device *serdev)
hci_free_dev(hdev);
}
-#ifdef CONFIG_PM_SLEEP
-static int nxp_serdev_suspend(struct device *dev)
+static int __maybe_unused nxp_serdev_suspend(struct device *dev)
{
struct btnxpuart_dev *nxpdev = dev_get_drvdata(dev);
struct ps_data *psdata = &nxpdev->psdata;
@@ -1962,7 +1961,7 @@ static int nxp_serdev_suspend(struct device *dev)
return 0;
}
-static int nxp_serdev_resume(struct device *dev)
+static int __maybe_unused nxp_serdev_resume(struct device *dev)
{
struct btnxpuart_dev *nxpdev = dev_get_drvdata(dev);
struct ps_data *psdata = &nxpdev->psdata;
@@ -1975,7 +1974,6 @@ static int nxp_serdev_resume(struct device *dev)
ps_control(psdata->hdev, PS_STATE_AWAKE);
return 0;
}
-#endif
#ifdef CONFIG_DEV_COREDUMP
static void nxp_serdev_coredump(struct device *dev)
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index 7c958d6065be..74f820e89655 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -581,28 +581,11 @@ static int qca_download_firmware(struct hci_dev *hdev,
ret = request_firmware(&fw, config->fwname, &hdev->dev);
if (ret) {
- /* For WCN6750, if mbn file is not present then check for
- * tlv file.
- */
- if (soc_type == QCA_WCN6750 && config->type == ELF_TYPE_PATCH) {
- bt_dev_dbg(hdev, "QCA Failed to request file: %s (%d)",
- config->fwname, ret);
- config->type = TLV_TYPE_PATCH;
- snprintf(config->fwname, sizeof(config->fwname),
- "qca/msbtfw%02x.tlv", rom_ver);
- bt_dev_info(hdev, "QCA Downloading %s", config->fwname);
- ret = request_firmware(&fw, config->fwname, &hdev->dev);
- if (ret) {
- bt_dev_err(hdev, "QCA Failed to request file: %s (%d)",
- config->fwname, ret);
- return ret;
- }
- }
/* If the board-specific file is missing, try loading the default
* one, unless that was attempted already.
*/
- else if (config->type == TLV_TYPE_NVM &&
- qca_get_alt_nvm_file(config->fwname, sizeof(config->fwname))) {
+ if (config->type == TLV_TYPE_NVM &&
+ qca_get_alt_nvm_file(config->fwname, sizeof(config->fwname))) {
bt_dev_info(hdev, "QCA Downloading %s", config->fwname);
ret = request_firmware(&fw, config->fwname, &hdev->dev);
if (ret) {
@@ -847,8 +830,12 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
"qca/msbtfw%02x.mbn", rom_ver);
break;
case QCA_WCN6855:
+ /* Due to historical reasons, WCN685x chip has been using firmware
+ * without the "wcn" prefix. The mapping between the chip and its
+ * corresponding firmware has now been corrected.
+ */
snprintf(config.fwname, sizeof(config.fwname),
- "qca/hpbtfw%02x.tlv", rom_ver);
+ "qca/wcnhpbtfw%02x.tlv", rom_ver);
break;
case QCA_WCN7850:
snprintf(config.fwname, sizeof(config.fwname),
@@ -861,8 +848,26 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
}
err = qca_download_firmware(hdev, &config, soc_type, rom_ver);
+ /* For WCN6750, if mbn file is not present then check for
+ * tlv file.
+ */
+ if (err < 0 && soc_type == QCA_WCN6750) {
+ bt_dev_dbg(hdev, "QCA Failed to request file: %s (%d)",
+ config.fwname, err);
+ config.type = TLV_TYPE_PATCH;
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/msbtfw%02x.tlv", rom_ver);
+ bt_dev_info(hdev, "QCA Downloading %s", config.fwname);
+ err = qca_download_firmware(hdev, &config, soc_type, rom_ver);
+ } else if (err < 0 && !rampatch_name && soc_type == QCA_WCN6855) {
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/hpbtfw%02x.tlv", rom_ver);
+ err = qca_download_firmware(hdev, &config, soc_type, rom_ver);
+ }
+
if (err < 0) {
- bt_dev_err(hdev, "QCA Failed to download patch (%d)", err);
+ bt_dev_err(hdev, "QCA Failed to request file: %s (%d)",
+ config.fwname, err);
return err;
}
@@ -923,7 +928,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
case QCA_WCN6855:
qca_read_fw_board_id(hdev, &boardid);
qca_get_nvm_name_by_board(config.fwname, sizeof(config.fwname),
- "hpnv", soc_type, ver, rom_ver, boardid);
+ "wcnhpnv", soc_type, ver, rom_ver, boardid);
break;
case QCA_WCN7850:
qca_get_nvm_name_by_board(config.fwname, sizeof(config.fwname),
@@ -936,8 +941,15 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
}
err = qca_download_firmware(hdev, &config, soc_type, rom_ver);
+ if (err < 0 && !firmware_name && soc_type == QCA_WCN6855) {
+ qca_get_nvm_name_by_board(config.fwname, sizeof(config.fwname),
+ "hpnv", soc_type, ver, rom_ver, boardid);
+ err = qca_download_firmware(hdev, &config, soc_type, rom_ver);
+ }
+
if (err < 0) {
- bt_dev_err(hdev, "QCA Failed to download NVM (%d)", err);
+ bt_dev_err(hdev, "QCA Failed to request file: %s (%d)",
+ config.fwname, err);
return err;
}
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index ded09e94d296..fcec8e589e81 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -521,12 +521,16 @@ static const struct usb_device_id quirks_table[] = {
{ USB_DEVICE(0x0bda, 0xb850), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3600), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3601), .driver_info = BTUSB_REALTEK },
+ { USB_DEVICE(0x0489, 0xe112), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Realtek 8851BU Bluetooth devices */
{ USB_DEVICE(0x3625, 0x010b), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x2001, 0x332a), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x7392, 0xe611), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Realtek 8852AE Bluetooth devices */
{ USB_DEVICE(0x0bda, 0x2852), .driver_info = BTUSB_REALTEK |
@@ -557,6 +561,8 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3592), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3612), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe122), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
@@ -577,8 +583,6 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3591), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x13d3, 0x3618), .driver_info = BTUSB_REALTEK |
- BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe123), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe125), .driver_info = BTUSB_REALTEK |
@@ -637,6 +641,8 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3622), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe158), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Additional MediaTek MT7921 Bluetooth devices */
{ USB_DEVICE(0x0489, 0xe0c8), .driver_info = BTUSB_MEDIATEK |
@@ -773,6 +779,7 @@ static const struct usb_device_id quirks_table[] = {
/* Additional Realtek 8723BU Bluetooth devices */
{ USB_DEVICE(0x7392, 0xa611), .driver_info = BTUSB_REALTEK },
+ { USB_DEVICE(0x2c0a, 0x8761), .driver_info = BTUSB_REALTEK },
/* Additional Realtek 8723DE Bluetooth devices */
{ USB_DEVICE(0x0bda, 0xb009), .driver_info = BTUSB_REALTEK },
@@ -4462,17 +4469,17 @@ static void btusb_disconnect(struct usb_interface *intf)
kfree(data);
}
-#ifdef CONFIG_PM
static int btusb_suspend(struct usb_interface *intf, pm_message_t message)
{
struct btusb_data *data = usb_get_intfdata(intf);
BT_DBG("intf %p", intf);
- /* Don't auto-suspend if there are connections; external suspend calls
- * shall never fail.
+ /* Don't auto-suspend if there are connections or discovery in
+ * progress; external suspend calls shall never fail.
*/
- if (PMSG_IS_AUTO(message) && hci_conn_count(data->hdev))
+ if (PMSG_IS_AUTO(message) &&
+ (hci_conn_count(data->hdev) || hci_discovery_active(data->hdev)))
return -EBUSY;
if (data->suspend_count++)
@@ -4616,7 +4623,6 @@ done:
return err;
}
-#endif
#ifdef CONFIG_DEV_COREDUMP
static void btusb_coredump(struct device *dev)
@@ -4633,10 +4639,8 @@ static struct usb_driver btusb_driver = {
.name = "btusb",
.probe = btusb_probe,
.disconnect = btusb_disconnect,
-#ifdef CONFIG_PM
- .suspend = btusb_suspend,
- .resume = btusb_resume,
-#endif
+ .suspend = pm_ptr(btusb_suspend),
+ .resume = pm_ptr(btusb_resume),
.id_table = btusb_table,
.supports_autosuspend = 1,
.disable_hub_initiated_lpm = 1,
diff --git a/drivers/bluetooth/hci_bcm4377.c b/drivers/bluetooth/hci_bcm4377.c
index 45e6d84224ee..925d0a635945 100644
--- a/drivers/bluetooth/hci_bcm4377.c
+++ b/drivers/bluetooth/hci_bcm4377.c
@@ -2416,8 +2416,9 @@ static int bcm4377_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hdev);
}
-static int bcm4377_suspend(struct pci_dev *pdev, pm_message_t state)
+static int bcm4377_suspend(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct bcm4377_data *bcm4377 = pci_get_drvdata(pdev);
int ret;
@@ -2431,8 +2432,9 @@ static int bcm4377_suspend(struct pci_dev *pdev, pm_message_t state)
return 0;
}
-static int bcm4377_resume(struct pci_dev *pdev)
+static int bcm4377_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct bcm4377_data *bcm4377 = pci_get_drvdata(pdev);
iowrite32(BCM4377_BAR0_SLEEP_CONTROL_UNQUIESCE,
@@ -2441,6 +2443,8 @@ static int bcm4377_resume(struct pci_dev *pdev)
return hci_resume_dev(bcm4377->hdev);
}
+static DEFINE_SIMPLE_DEV_PM_OPS(bcm4377_ops, bcm4377_suspend, bcm4377_resume);
+
static const struct dmi_system_id bcm4377_dmi_board_table[] = {
{
.matches = {
@@ -2541,8 +2545,7 @@ static struct pci_driver bcm4377_pci_driver = {
.name = "hci_bcm4377",
.id_table = bcm4377_devid_table,
.probe = bcm4377_probe,
- .suspend = bcm4377_suspend,
- .resume = bcm4377_resume,
+ .driver.pm = &bcm4377_ops,
};
module_pci_driver(bcm4377_pci_driver);
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 20baf2895dec..f7570c2eaa46 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -126,7 +126,6 @@ static int intel_wait_booting(struct hci_uart *hu)
return err;
}
-#ifdef CONFIG_PM
static int intel_wait_lpm_transaction(struct hci_uart *hu)
{
struct intel_data *intel = hu->priv;
@@ -237,7 +236,6 @@ static int intel_lpm_resume(struct hci_uart *hu)
return 0;
}
-#endif /* CONFIG_PM */
static int intel_lpm_host_wake(struct hci_uart *hu)
{
@@ -1066,7 +1064,6 @@ static const struct acpi_device_id intel_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, intel_acpi_match);
#endif
-#ifdef CONFIG_PM
static int intel_suspend_device(struct device *dev)
{
struct intel_device *idev = dev_get_drvdata(dev);
@@ -1090,10 +1087,8 @@ static int intel_resume_device(struct device *dev)
return 0;
}
-#endif
-#ifdef CONFIG_PM_SLEEP
-static int intel_suspend(struct device *dev)
+static int __maybe_unused intel_suspend(struct device *dev)
{
struct intel_device *idev = dev_get_drvdata(dev);
@@ -1103,7 +1098,7 @@ static int intel_suspend(struct device *dev)
return intel_suspend_device(dev);
}
-static int intel_resume(struct device *dev)
+static int __maybe_unused intel_resume(struct device *dev)
{
struct intel_device *idev = dev_get_drvdata(dev);
@@ -1112,7 +1107,6 @@ static int intel_resume(struct device *dev)
return intel_resume_device(dev);
}
-#endif
static const struct dev_pm_ops intel_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(intel_suspend, intel_resume)
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 888176b0faa9..51309f5b5714 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -87,6 +87,7 @@ enum qca_flags {
enum qca_capabilities {
QCA_CAP_WIDEBAND_SPEECH = BIT(0),
QCA_CAP_VALID_LE_STATES = BIT(1),
+ QCA_CAP_HFP_HW_OFFLOAD = BIT(2),
};
/* HCI_IBS transmit side sleep protocol states */
@@ -229,6 +230,7 @@ struct qca_serdev {
u32 init_speed;
u32 oper_speed;
bool bdaddr_property_broken;
+ bool support_hfp_hw_offload;
const char *firmware_name[2];
};
@@ -1653,6 +1655,39 @@ static void qca_hw_error(struct hci_dev *hdev, u8 code)
skb_queue_purge(&qca->rx_memdump_q);
}
+ /*
+ * If the BT chip's bt_en pin is connected to a 3.3V power supply via
+ * hardware and always stays high, driver cannot control the bt_en pin.
+ * As a result, during SSR (SubSystem Restart), QCA_SSR_TRIGGERED and
+ * QCA_IBS_DISABLED flags cannot be cleared, which leads to a reset
+ * command timeout.
+ * Add an msleep delay to ensure controller completes the SSR process.
+ *
+ * Host will not download the firmware after SSR, controller to remain
+ * in the IBS_WAKE state, and the host needs to synchronize with it
+ *
+ * Since the bluetooth chip has been reset, clear the memdump state.
+ */
+ if (!hci_test_quirk(hu->hdev, HCI_QUIRK_NON_PERSISTENT_SETUP)) {
+ /*
+ * When the SSR (SubSystem Restart) duration exceeds 2 seconds,
+ * it triggers host tx_idle_delay, which sets host TX state
+ * to sleep. Reset tx_idle_timer after SSR to prevent
+ * host enter TX IBS_Sleep mode.
+ */
+ mod_timer(&qca->tx_idle_timer, jiffies +
+ msecs_to_jiffies(qca->tx_idle_delay));
+
+ /* Controller reset completion time is 50ms */
+ msleep(50);
+
+ clear_bit(QCA_SSR_TRIGGERED, &qca->flags);
+ clear_bit(QCA_IBS_DISABLED, &qca->flags);
+
+ qca->tx_ibs_state = HCI_IBS_TX_AWAKE;
+ qca->memdump_state = QCA_MEMDUMP_IDLE;
+ }
+
clear_bit(QCA_HW_ERROR_EVENT, &qca->flags);
}
@@ -1879,7 +1914,7 @@ static int qca_setup(struct hci_uart *hu)
const char *rampatch_name = qca_get_rampatch_name(hu);
int ret;
struct qca_btsoc_version ver;
- struct qca_serdev *qcadev;
+ struct qca_serdev *qcadev = serdev_device_get_drvdata(hu->serdev);
const char *soc_name;
ret = qca_check_speeds(hu);
@@ -1943,7 +1978,6 @@ retry:
case QCA_WCN6750:
case QCA_WCN6855:
case QCA_WCN7850:
- qcadev = serdev_device_get_drvdata(hu->serdev);
if (qcadev->bdaddr_property_broken)
hci_set_quirk(hdev, HCI_QUIRK_BDADDR_PROPERTY_BROKEN);
@@ -2033,7 +2067,7 @@ out:
else
hu->hdev->set_bdaddr = qca_set_bdaddr;
- if (soc_type == QCA_QCA2066)
+ if (qcadev->support_hfp_hw_offload)
qca_configure_hfp_offload(hdev);
qca->fw_version = le16_to_cpu(ver.patch_ver);
@@ -2117,7 +2151,8 @@ static const struct qca_device_data qca_soc_data_wcn3998 __maybe_unused = {
static const struct qca_device_data qca_soc_data_qca2066 __maybe_unused = {
.soc_type = QCA_QCA2066,
.num_vregs = 0,
- .capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES,
+ .capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES |
+ QCA_CAP_HFP_HW_OFFLOAD,
};
static const struct qca_device_data qca_soc_data_qca6390 __maybe_unused = {
@@ -2153,7 +2188,8 @@ static const struct qca_device_data qca_soc_data_wcn6855 __maybe_unused = {
{ "vddrfa1p2", 257000 },
},
.num_vregs = 6,
- .capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES,
+ .capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES |
+ QCA_CAP_HFP_HW_OFFLOAD,
};
static const struct qca_device_data qca_soc_data_wcn7850 __maybe_unused = {
@@ -2167,7 +2203,8 @@ static const struct qca_device_data qca_soc_data_wcn7850 __maybe_unused = {
{ "vddrfa1p9", 302000 },
},
.num_vregs = 6,
- .capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES,
+ .capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES |
+ QCA_CAP_HFP_HW_OFFLOAD,
};
static void qca_power_shutdown(struct hci_uart *hu)
@@ -2502,6 +2539,9 @@ static int qca_serdev_probe(struct serdev_device *serdev)
if (!(data->capabilities & QCA_CAP_VALID_LE_STATES))
hci_set_quirk(hdev, HCI_QUIRK_BROKEN_LE_STATES);
+
+ if (data->capabilities & QCA_CAP_HFP_HW_OFFLOAD)
+ qcadev->support_hfp_hw_offload = true;
}
return 0;
diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c
index 5368d92d9b39..994d5bc5168b 100644
--- a/drivers/clk/clk-devres.c
+++ b/drivers/clk/clk-devres.c
@@ -179,6 +179,56 @@ int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks,
}
EXPORT_SYMBOL_GPL(devm_clk_bulk_get_optional);
+static void devm_clk_bulk_release_enable(struct device *dev, void *res)
+{
+ struct clk_bulk_devres *devres = res;
+
+ clk_bulk_disable_unprepare(devres->num_clks, devres->clks);
+ clk_bulk_put(devres->num_clks, devres->clks);
+}
+
+static int __devm_clk_bulk_get_enable(struct device *dev, int num_clks,
+ struct clk_bulk_data *clks, bool optional)
+{
+ struct clk_bulk_devres *devres;
+ int ret;
+
+ devres = devres_alloc(devm_clk_bulk_release_enable,
+ sizeof(*devres), GFP_KERNEL);
+ if (!devres)
+ return -ENOMEM;
+
+ if (optional)
+ ret = clk_bulk_get_optional(dev, num_clks, clks);
+ else
+ ret = clk_bulk_get(dev, num_clks, clks);
+ if (ret)
+ goto err_clk_get;
+
+ ret = clk_bulk_prepare_enable(num_clks, clks);
+ if (ret)
+ goto err_clk_prepare;
+
+ devres->clks = clks;
+ devres->num_clks = num_clks;
+ devres_add(dev, devres);
+
+ return 0;
+
+err_clk_prepare:
+ clk_bulk_put(num_clks, clks);
+err_clk_get:
+ devres_free(devres);
+ return ret;
+}
+
+int __must_check devm_clk_bulk_get_optional_enable(struct device *dev, int num_clks,
+ struct clk_bulk_data *clks)
+{
+ return __devm_clk_bulk_get_enable(dev, num_clks, clks, true);
+}
+EXPORT_SYMBOL_GPL(devm_clk_bulk_get_optional_enable);
+
static void devm_clk_bulk_release_all(struct device *dev, void *res)
{
struct clk_bulk_devres *devres = res;
diff --git a/drivers/dpll/Kconfig b/drivers/dpll/Kconfig
index ade872c915ac..be98969f040a 100644
--- a/drivers/dpll/Kconfig
+++ b/drivers/dpll/Kconfig
@@ -8,6 +8,21 @@ menu "DPLL device support"
config DPLL
bool
+config DPLL_REFCNT_TRACKER
+ bool "DPLL reference count tracking"
+ depends on DEBUG_KERNEL && STACKTRACE_SUPPORT && DPLL
+ select REF_TRACKER
+ help
+ Enable reference count tracking for DPLL devices and pins.
+ This helps debugging reference leaks and use-after-free bugs
+ by recording stack traces for each get/put operation.
+
+ The tracking information is exposed via debugfs at:
+ /sys/kernel/debug/ref_tracker/dpll_device_*
+ /sys/kernel/debug/ref_tracker/dpll_pin_*
+
+ If unsure, say N.
+
source "drivers/dpll/zl3073x/Kconfig"
endmenu
diff --git a/drivers/dpll/dpll_core.c b/drivers/dpll/dpll_core.c
index 8879a7235156..627a5b39a0ef 100644
--- a/drivers/dpll/dpll_core.c
+++ b/drivers/dpll/dpll_core.c
@@ -10,6 +10,8 @@
#include <linux/device.h>
#include <linux/err.h>
+#include <linux/idr.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/string.h>
@@ -22,6 +24,9 @@ DEFINE_MUTEX(dpll_lock);
DEFINE_XARRAY_FLAGS(dpll_device_xa, XA_FLAGS_ALLOC);
DEFINE_XARRAY_FLAGS(dpll_pin_xa, XA_FLAGS_ALLOC);
+static RAW_NOTIFIER_HEAD(dpll_notifier_chain);
+static DEFINE_IDA(dpll_pin_idx_ida);
+
static u32 dpll_device_xa_id;
static u32 dpll_pin_xa_id;
@@ -36,6 +41,7 @@ struct dpll_device_registration {
struct list_head list;
const struct dpll_device_ops *ops;
void *priv;
+ dpll_tracker tracker;
};
struct dpll_pin_registration {
@@ -43,8 +49,117 @@ struct dpll_pin_registration {
const struct dpll_pin_ops *ops;
void *priv;
void *cookie;
+ dpll_tracker tracker;
};
+static int call_dpll_notifiers(unsigned long action, void *info)
+{
+ lockdep_assert_held(&dpll_lock);
+ return raw_notifier_call_chain(&dpll_notifier_chain, action, info);
+}
+
+void dpll_device_notify(struct dpll_device *dpll, unsigned long action)
+{
+ struct dpll_device_notifier_info info = {
+ .dpll = dpll,
+ .id = dpll->id,
+ .idx = dpll->device_idx,
+ .clock_id = dpll->clock_id,
+ .type = dpll->type,
+ };
+
+ call_dpll_notifiers(action, &info);
+}
+
+void dpll_pin_notify(struct dpll_pin *pin, unsigned long action)
+{
+ struct dpll_pin_notifier_info info = {
+ .pin = pin,
+ .id = pin->id,
+ .idx = pin->pin_idx,
+ .clock_id = pin->clock_id,
+ .fwnode = pin->fwnode,
+ .prop = &pin->prop,
+ };
+
+ call_dpll_notifiers(action, &info);
+}
+
+static void dpll_device_tracker_alloc(struct dpll_device *dpll,
+ dpll_tracker *tracker)
+{
+#ifdef CONFIG_DPLL_REFCNT_TRACKER
+ ref_tracker_alloc(&dpll->refcnt_tracker, tracker, GFP_KERNEL);
+#endif
+}
+
+static void dpll_device_tracker_free(struct dpll_device *dpll,
+ dpll_tracker *tracker)
+{
+#ifdef CONFIG_DPLL_REFCNT_TRACKER
+ ref_tracker_free(&dpll->refcnt_tracker, tracker);
+#endif
+}
+
+static void __dpll_device_hold(struct dpll_device *dpll, dpll_tracker *tracker)
+{
+ dpll_device_tracker_alloc(dpll, tracker);
+ refcount_inc(&dpll->refcount);
+}
+
+static void __dpll_device_put(struct dpll_device *dpll, dpll_tracker *tracker)
+{
+ dpll_device_tracker_free(dpll, tracker);
+ if (refcount_dec_and_test(&dpll->refcount)) {
+ ASSERT_DPLL_NOT_REGISTERED(dpll);
+ WARN_ON_ONCE(!xa_empty(&dpll->pin_refs));
+ xa_destroy(&dpll->pin_refs);
+ xa_erase(&dpll_device_xa, dpll->id);
+ WARN_ON(!list_empty(&dpll->registration_list));
+ ref_tracker_dir_exit(&dpll->refcnt_tracker);
+ kfree(dpll);
+ }
+}
+
+static void dpll_pin_tracker_alloc(struct dpll_pin *pin, dpll_tracker *tracker)
+{
+#ifdef CONFIG_DPLL_REFCNT_TRACKER
+ ref_tracker_alloc(&pin->refcnt_tracker, tracker, GFP_KERNEL);
+#endif
+}
+
+static void dpll_pin_tracker_free(struct dpll_pin *pin, dpll_tracker *tracker)
+{
+#ifdef CONFIG_DPLL_REFCNT_TRACKER
+ ref_tracker_free(&pin->refcnt_tracker, tracker);
+#endif
+}
+
+static void __dpll_pin_hold(struct dpll_pin *pin, dpll_tracker *tracker)
+{
+ dpll_pin_tracker_alloc(pin, tracker);
+ refcount_inc(&pin->refcount);
+}
+
+static void dpll_pin_idx_free(u32 pin_idx);
+static void dpll_pin_prop_free(struct dpll_pin_properties *prop);
+
+static void __dpll_pin_put(struct dpll_pin *pin, dpll_tracker *tracker)
+{
+ dpll_pin_tracker_free(pin, tracker);
+ if (refcount_dec_and_test(&pin->refcount)) {
+ xa_erase(&dpll_pin_xa, pin->id);
+ xa_destroy(&pin->dpll_refs);
+ xa_destroy(&pin->parent_refs);
+ xa_destroy(&pin->ref_sync_pins);
+ dpll_pin_prop_free(&pin->prop);
+ fwnode_handle_put(pin->fwnode);
+ dpll_pin_idx_free(pin->pin_idx);
+ ref_tracker_dir_exit(&pin->refcnt_tracker);
+ kfree_rcu(pin, rcu);
+ }
+}
+
struct dpll_device *dpll_device_get_by_id(int id)
{
if (xa_get_mark(&dpll_device_xa, id, DPLL_REGISTERED))
@@ -114,6 +229,7 @@ dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin,
reg->ops = ops;
reg->priv = priv;
reg->cookie = cookie;
+ __dpll_pin_hold(pin, &reg->tracker);
if (ref_exists)
refcount_inc(&ref->refcount);
list_add_tail(&reg->list, &ref->registration_list);
@@ -136,6 +252,7 @@ static int dpll_xa_ref_pin_del(struct xarray *xa_pins, struct dpll_pin *pin,
if (WARN_ON(!reg))
return -EINVAL;
list_del(&reg->list);
+ __dpll_pin_put(pin, &reg->tracker);
kfree(reg);
if (refcount_dec_and_test(&ref->refcount)) {
xa_erase(xa_pins, i);
@@ -193,6 +310,7 @@ dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll,
reg->ops = ops;
reg->priv = priv;
reg->cookie = cookie;
+ __dpll_device_hold(dpll, &reg->tracker);
if (ref_exists)
refcount_inc(&ref->refcount);
list_add_tail(&reg->list, &ref->registration_list);
@@ -215,6 +333,7 @@ dpll_xa_ref_dpll_del(struct xarray *xa_dplls, struct dpll_device *dpll,
if (WARN_ON(!reg))
return;
list_del(&reg->list);
+ __dpll_device_put(dpll, &reg->tracker);
kfree(reg);
if (refcount_dec_and_test(&ref->refcount)) {
xa_erase(xa_dplls, i);
@@ -256,6 +375,7 @@ dpll_device_alloc(const u64 clock_id, u32 device_idx, struct module *module)
return ERR_PTR(ret);
}
xa_init_flags(&dpll->pin_refs, XA_FLAGS_ALLOC);
+ ref_tracker_dir_init(&dpll->refcnt_tracker, 128, "dpll_device");
return dpll;
}
@@ -265,6 +385,7 @@ dpll_device_alloc(const u64 clock_id, u32 device_idx, struct module *module)
* @clock_id: clock_id of creator
* @device_idx: idx given by device driver
* @module: reference to registering module
+ * @tracker: tracking object for the acquired reference
*
* Get existing object of a dpll device, unique for given arguments.
* Create new if doesn't exist yet.
@@ -275,7 +396,8 @@ dpll_device_alloc(const u64 clock_id, u32 device_idx, struct module *module)
* * ERR_PTR(X) - error
*/
struct dpll_device *
-dpll_device_get(u64 clock_id, u32 device_idx, struct module *module)
+dpll_device_get(u64 clock_id, u32 device_idx, struct module *module,
+ dpll_tracker *tracker)
{
struct dpll_device *dpll, *ret = NULL;
unsigned long index;
@@ -285,13 +407,17 @@ dpll_device_get(u64 clock_id, u32 device_idx, struct module *module)
if (dpll->clock_id == clock_id &&
dpll->device_idx == device_idx &&
dpll->module == module) {
+ __dpll_device_hold(dpll, tracker);
ret = dpll;
- refcount_inc(&ret->refcount);
break;
}
}
- if (!ret)
+ if (!ret) {
ret = dpll_device_alloc(clock_id, device_idx, module);
+ if (!IS_ERR(ret))
+ dpll_device_tracker_alloc(ret, tracker);
+ }
+
mutex_unlock(&dpll_lock);
return ret;
@@ -301,22 +427,16 @@ EXPORT_SYMBOL_GPL(dpll_device_get);
/**
* dpll_device_put - decrease the refcount and free memory if possible
* @dpll: dpll_device struct pointer
+ * @tracker: tracking object for the acquired reference
*
* Context: Acquires a lock (dpll_lock)
* Drop reference for a dpll device, if all references are gone, delete
* dpll device object.
*/
-void dpll_device_put(struct dpll_device *dpll)
+void dpll_device_put(struct dpll_device *dpll, dpll_tracker *tracker)
{
mutex_lock(&dpll_lock);
- if (refcount_dec_and_test(&dpll->refcount)) {
- ASSERT_DPLL_NOT_REGISTERED(dpll);
- WARN_ON_ONCE(!xa_empty(&dpll->pin_refs));
- xa_destroy(&dpll->pin_refs);
- xa_erase(&dpll_device_xa, dpll->id);
- WARN_ON(!list_empty(&dpll->registration_list));
- kfree(dpll);
- }
+ __dpll_device_put(dpll, tracker);
mutex_unlock(&dpll_lock);
}
EXPORT_SYMBOL_GPL(dpll_device_put);
@@ -378,6 +498,7 @@ int dpll_device_register(struct dpll_device *dpll, enum dpll_type type,
reg->ops = ops;
reg->priv = priv;
dpll->type = type;
+ __dpll_device_hold(dpll, &reg->tracker);
first_registration = list_empty(&dpll->registration_list);
list_add_tail(&reg->list, &dpll->registration_list);
if (!first_registration) {
@@ -417,6 +538,7 @@ void dpll_device_unregister(struct dpll_device *dpll,
return;
}
list_del(&reg->list);
+ __dpll_device_put(dpll, &reg->tracker);
kfree(reg);
if (!list_empty(&dpll->registration_list)) {
@@ -428,6 +550,36 @@ void dpll_device_unregister(struct dpll_device *dpll,
}
EXPORT_SYMBOL_GPL(dpll_device_unregister);
+static int dpll_pin_idx_alloc(u32 *pin_idx)
+{
+ int ret;
+
+ if (!pin_idx)
+ return -EINVAL;
+
+ /* Alloc unique number from IDA. Number belongs to <0, INT_MAX> range */
+ ret = ida_alloc(&dpll_pin_idx_ida, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+
+ /* Map the value to dynamic pin index range <INT_MAX+1, U32_MAX> */
+ *pin_idx = (u32)ret + INT_MAX + 1;
+
+ return 0;
+}
+
+static void dpll_pin_idx_free(u32 pin_idx)
+{
+ if (pin_idx <= INT_MAX)
+ return; /* Not a dynamic pin index */
+
+ /* Map the index value from dynamic pin index range to IDA range and
+ * free it.
+ */
+ pin_idx -= (u32)INT_MAX + 1;
+ ida_free(&dpll_pin_idx_ida, pin_idx);
+}
+
static void dpll_pin_prop_free(struct dpll_pin_properties *prop)
{
kfree(prop->package_label);
@@ -485,9 +637,18 @@ dpll_pin_alloc(u64 clock_id, u32 pin_idx, struct module *module,
struct dpll_pin *pin;
int ret;
+ if (pin_idx == DPLL_PIN_IDX_UNSPEC) {
+ ret = dpll_pin_idx_alloc(&pin_idx);
+ if (ret)
+ return ERR_PTR(ret);
+ } else if (pin_idx > INT_MAX) {
+ return ERR_PTR(-EINVAL);
+ }
pin = kzalloc(sizeof(*pin), GFP_KERNEL);
- if (!pin)
- return ERR_PTR(-ENOMEM);
+ if (!pin) {
+ ret = -ENOMEM;
+ goto err_pin_alloc;
+ }
pin->pin_idx = pin_idx;
pin->clock_id = clock_id;
pin->module = module;
@@ -507,6 +668,7 @@ dpll_pin_alloc(u64 clock_id, u32 pin_idx, struct module *module,
&dpll_pin_xa_id, GFP_KERNEL);
if (ret < 0)
goto err_xa_alloc;
+ ref_tracker_dir_init(&pin->refcnt_tracker, 128, "dpll_pin");
return pin;
err_xa_alloc:
xa_destroy(&pin->dpll_refs);
@@ -515,6 +677,8 @@ err_xa_alloc:
dpll_pin_prop_free(&pin->prop);
err_pin_prop:
kfree(pin);
+err_pin_alloc:
+ dpll_pin_idx_free(pin_idx);
return ERR_PTR(ret);
}
@@ -538,12 +702,35 @@ void dpll_netdev_pin_clear(struct net_device *dev)
}
EXPORT_SYMBOL(dpll_netdev_pin_clear);
+int register_dpll_notifier(struct notifier_block *nb)
+{
+ int ret;
+
+ mutex_lock(&dpll_lock);
+ ret = raw_notifier_chain_register(&dpll_notifier_chain, nb);
+ mutex_unlock(&dpll_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(register_dpll_notifier);
+
+int unregister_dpll_notifier(struct notifier_block *nb)
+{
+ int ret;
+
+ mutex_lock(&dpll_lock);
+ ret = raw_notifier_chain_unregister(&dpll_notifier_chain, nb);
+ mutex_unlock(&dpll_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(unregister_dpll_notifier);
+
/**
* dpll_pin_get - find existing or create new dpll pin
* @clock_id: clock_id of creator
* @pin_idx: idx given by dev driver
* @module: reference to registering module
* @prop: dpll pin properties
+ * @tracker: tracking object for the acquired reference
*
* Get existing object of a pin (unique for given arguments) or create new
* if doesn't exist yet.
@@ -555,7 +742,7 @@ EXPORT_SYMBOL(dpll_netdev_pin_clear);
*/
struct dpll_pin *
dpll_pin_get(u64 clock_id, u32 pin_idx, struct module *module,
- const struct dpll_pin_properties *prop)
+ const struct dpll_pin_properties *prop, dpll_tracker *tracker)
{
struct dpll_pin *pos, *ret = NULL;
unsigned long i;
@@ -565,13 +752,16 @@ dpll_pin_get(u64 clock_id, u32 pin_idx, struct module *module,
if (pos->clock_id == clock_id &&
pos->pin_idx == pin_idx &&
pos->module == module) {
+ __dpll_pin_hold(pos, tracker);
ret = pos;
- refcount_inc(&ret->refcount);
break;
}
}
- if (!ret)
+ if (!ret) {
ret = dpll_pin_alloc(clock_id, pin_idx, module, prop);
+ if (!IS_ERR(ret))
+ dpll_pin_tracker_alloc(ret, tracker);
+ }
mutex_unlock(&dpll_lock);
return ret;
@@ -581,26 +771,69 @@ EXPORT_SYMBOL_GPL(dpll_pin_get);
/**
* dpll_pin_put - decrease the refcount and free memory if possible
* @pin: pointer to a pin to be put
+ * @tracker: tracking object for the acquired reference
*
* Drop reference for a pin, if all references are gone, delete pin object.
*
* Context: Acquires a lock (dpll_lock)
*/
-void dpll_pin_put(struct dpll_pin *pin)
+void dpll_pin_put(struct dpll_pin *pin, dpll_tracker *tracker)
{
mutex_lock(&dpll_lock);
- if (refcount_dec_and_test(&pin->refcount)) {
- xa_erase(&dpll_pin_xa, pin->id);
- xa_destroy(&pin->dpll_refs);
- xa_destroy(&pin->parent_refs);
- xa_destroy(&pin->ref_sync_pins);
- dpll_pin_prop_free(&pin->prop);
- kfree_rcu(pin, rcu);
- }
+ __dpll_pin_put(pin, tracker);
mutex_unlock(&dpll_lock);
}
EXPORT_SYMBOL_GPL(dpll_pin_put);
+/**
+ * dpll_pin_fwnode_set - set dpll pin firmware node reference
+ * @pin: pointer to a dpll pin
+ * @fwnode: firmware node handle
+ *
+ * Set firmware node handle for the given dpll pin.
+ */
+void dpll_pin_fwnode_set(struct dpll_pin *pin, struct fwnode_handle *fwnode)
+{
+ mutex_lock(&dpll_lock);
+ fwnode_handle_put(pin->fwnode); /* Drop fwnode previously set */
+ pin->fwnode = fwnode_handle_get(fwnode);
+ mutex_unlock(&dpll_lock);
+}
+EXPORT_SYMBOL_GPL(dpll_pin_fwnode_set);
+
+/**
+ * fwnode_dpll_pin_find - find dpll pin by firmware node reference
+ * @fwnode: reference to firmware node
+ * @tracker: tracking object for the acquired reference
+ *
+ * Get existing object of a pin that is associated with given firmware node
+ * reference.
+ *
+ * Context: Acquires a lock (dpll_lock)
+ * Return:
+ * * valid dpll_pin pointer on success
+ * * NULL when no such pin exists
+ */
+struct dpll_pin *fwnode_dpll_pin_find(struct fwnode_handle *fwnode,
+ dpll_tracker *tracker)
+{
+ struct dpll_pin *pin, *ret = NULL;
+ unsigned long index;
+
+ mutex_lock(&dpll_lock);
+ xa_for_each(&dpll_pin_xa, index, pin) {
+ if (pin->fwnode == fwnode) {
+ __dpll_pin_hold(pin, tracker);
+ ret = pin;
+ break;
+ }
+ }
+ mutex_unlock(&dpll_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(fwnode_dpll_pin_find);
+
static int
__dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
const struct dpll_pin_ops *ops, void *priv, void *cookie)
@@ -743,7 +976,6 @@ int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin,
ret = dpll_xa_ref_pin_add(&pin->parent_refs, parent, ops, priv, pin);
if (ret)
goto unlock;
- refcount_inc(&pin->refcount);
xa_for_each(&parent->dpll_refs, i, ref) {
ret = __dpll_pin_register(ref->dpll, pin, ops, priv, parent);
if (ret) {
@@ -763,7 +995,6 @@ dpll_unregister:
parent);
dpll_pin_delete_ntf(pin);
}
- refcount_dec(&pin->refcount);
dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv, pin);
unlock:
mutex_unlock(&dpll_lock);
@@ -790,7 +1021,6 @@ void dpll_pin_on_pin_unregister(struct dpll_pin *parent, struct dpll_pin *pin,
mutex_lock(&dpll_lock);
dpll_pin_delete_ntf(pin);
dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv, pin);
- refcount_dec(&pin->refcount);
xa_for_each(&pin->dpll_refs, i, ref)
__dpll_pin_unregister(ref->dpll, pin, ops, priv, parent);
mutex_unlock(&dpll_lock);
diff --git a/drivers/dpll/dpll_core.h b/drivers/dpll/dpll_core.h
index 8ce969bbeb64..71ac88ef2017 100644
--- a/drivers/dpll/dpll_core.h
+++ b/drivers/dpll/dpll_core.h
@@ -10,6 +10,7 @@
#include <linux/dpll.h>
#include <linux/list.h>
#include <linux/refcount.h>
+#include <linux/ref_tracker.h>
#include "dpll_nl.h"
#define DPLL_REGISTERED XA_MARK_1
@@ -23,6 +24,7 @@
* @type: type of a dpll
* @pin_refs: stores pins registered within a dpll
* @refcount: refcount
+ * @refcnt_tracker: ref_tracker directory for debugging reference leaks
* @registration_list: list of registered ops and priv data of dpll owners
**/
struct dpll_device {
@@ -33,6 +35,7 @@ struct dpll_device {
enum dpll_type type;
struct xarray pin_refs;
refcount_t refcount;
+ struct ref_tracker_dir refcnt_tracker;
struct list_head registration_list;
};
@@ -42,11 +45,13 @@ struct dpll_device {
* @pin_idx: index of a pin given by dev driver
* @clock_id: clock_id of creator
* @module: module of creator
+ * @fwnode: optional reference to firmware node
* @dpll_refs: hold referencees to dplls pin was registered with
* @parent_refs: hold references to parent pins pin was registered with
* @ref_sync_pins: hold references to pins for Reference SYNC feature
* @prop: pin properties copied from the registerer
* @refcount: refcount
+ * @refcnt_tracker: ref_tracker directory for debugging reference leaks
* @rcu: rcu_head for kfree_rcu()
**/
struct dpll_pin {
@@ -54,11 +59,13 @@ struct dpll_pin {
u32 pin_idx;
u64 clock_id;
struct module *module;
+ struct fwnode_handle *fwnode;
struct xarray dpll_refs;
struct xarray parent_refs;
struct xarray ref_sync_pins;
struct dpll_pin_properties prop;
refcount_t refcount;
+ struct ref_tracker_dir refcnt_tracker;
struct rcu_head rcu;
};
@@ -89,4 +96,8 @@ struct dpll_pin_ref *dpll_xa_ref_dpll_first(struct xarray *xa_refs);
extern struct xarray dpll_device_xa;
extern struct xarray dpll_pin_xa;
extern struct mutex dpll_lock;
+
+void dpll_device_notify(struct dpll_device *dpll, unsigned long action);
+void dpll_pin_notify(struct dpll_pin *pin, unsigned long action);
+
#endif
diff --git a/drivers/dpll/dpll_netlink.c b/drivers/dpll/dpll_netlink.c
index 64944f601ee5..83cbd64abf5a 100644
--- a/drivers/dpll/dpll_netlink.c
+++ b/drivers/dpll/dpll_netlink.c
@@ -128,18 +128,29 @@ dpll_msg_add_mode_supported(struct sk_buff *msg, struct dpll_device *dpll,
struct netlink_ext_ack *extack)
{
const struct dpll_device_ops *ops = dpll_device_ops(dpll);
+ DECLARE_BITMAP(modes, DPLL_MODE_MAX + 1) = { 0 };
enum dpll_mode mode;
int ret;
- /* No mode change is supported now, so the only supported mode is the
- * one obtained by mode_get().
- */
+ if (ops->supported_modes_get) {
+ ret = ops->supported_modes_get(dpll, dpll_priv(dpll), modes,
+ extack);
+ if (ret)
+ return ret;
+ } else {
+ /* If the supported modes are not reported by the driver, the
+ * only supported mode is the one obtained by mode_get().
+ */
+ ret = ops->mode_get(dpll, dpll_priv(dpll), &mode, extack);
+ if (ret)
+ return ret;
- ret = ops->mode_get(dpll, dpll_priv(dpll), &mode, extack);
- if (ret)
- return ret;
- if (nla_put_u32(msg, DPLL_A_MODE_SUPPORTED, mode))
- return -EMSGSIZE;
+ __set_bit(mode, modes);
+ }
+
+ for_each_set_bit(mode, modes, DPLL_MODE_MAX + 1)
+ if (nla_put_u32(msg, DPLL_A_MODE_SUPPORTED, mode))
+ return -EMSGSIZE;
return 0;
}
@@ -378,7 +389,15 @@ static int dpll_msg_add_ffo(struct sk_buff *msg, struct dpll_pin *pin,
return 0;
return ret;
}
- return nla_put_sint(msg, DPLL_A_PIN_FRACTIONAL_FREQUENCY_OFFSET, ffo);
+ /* Put the FFO value in PPM to preserve compatibility with older
+ * programs.
+ */
+ ret = nla_put_sint(msg, DPLL_A_PIN_FRACTIONAL_FREQUENCY_OFFSET,
+ div_s64(ffo, 1000000));
+ if (ret)
+ return -EMSGSIZE;
+ return nla_put_sint(msg, DPLL_A_PIN_FRACTIONAL_FREQUENCY_OFFSET_PPT,
+ ffo);
}
static int
@@ -742,17 +761,20 @@ err_free_msg:
int dpll_device_create_ntf(struct dpll_device *dpll)
{
+ dpll_device_notify(dpll, DPLL_DEVICE_CREATED);
return dpll_device_event_send(DPLL_CMD_DEVICE_CREATE_NTF, dpll);
}
int dpll_device_delete_ntf(struct dpll_device *dpll)
{
+ dpll_device_notify(dpll, DPLL_DEVICE_DELETED);
return dpll_device_event_send(DPLL_CMD_DEVICE_DELETE_NTF, dpll);
}
static int
__dpll_device_change_ntf(struct dpll_device *dpll)
{
+ dpll_device_notify(dpll, DPLL_DEVICE_CHANGED);
return dpll_device_event_send(DPLL_CMD_DEVICE_CHANGE_NTF, dpll);
}
@@ -810,16 +832,19 @@ err_free_msg:
int dpll_pin_create_ntf(struct dpll_pin *pin)
{
+ dpll_pin_notify(pin, DPLL_PIN_CREATED);
return dpll_pin_event_send(DPLL_CMD_PIN_CREATE_NTF, pin);
}
int dpll_pin_delete_ntf(struct dpll_pin *pin)
{
+ dpll_pin_notify(pin, DPLL_PIN_DELETED);
return dpll_pin_event_send(DPLL_CMD_PIN_DELETE_NTF, pin);
}
int __dpll_pin_change_ntf(struct dpll_pin *pin)
{
+ dpll_pin_notify(pin, DPLL_PIN_CHANGED);
return dpll_pin_event_send(DPLL_CMD_PIN_CHANGE_NTF, pin);
}
@@ -843,6 +868,45 @@ int dpll_pin_change_ntf(struct dpll_pin *pin)
EXPORT_SYMBOL_GPL(dpll_pin_change_ntf);
static int
+dpll_mode_set(struct dpll_device *dpll, struct nlattr *a,
+ struct netlink_ext_ack *extack)
+{
+ const struct dpll_device_ops *ops = dpll_device_ops(dpll);
+ DECLARE_BITMAP(modes, DPLL_MODE_MAX + 1) = { 0 };
+ enum dpll_mode mode = nla_get_u32(a), old_mode;
+ int ret;
+
+ if (!(ops->mode_set && ops->supported_modes_get)) {
+ NL_SET_ERR_MSG_ATTR(extack, a,
+ "dpll device does not support mode switch");
+ return -EOPNOTSUPP;
+ }
+
+ ret = ops->mode_get(dpll, dpll_priv(dpll), &old_mode, extack);
+ if (ret) {
+ NL_SET_ERR_MSG(extack, "unable to get current mode");
+ return ret;
+ }
+
+ if (mode == old_mode)
+ return 0;
+
+ ret = ops->supported_modes_get(dpll, dpll_priv(dpll), modes, extack);
+ if (ret) {
+ NL_SET_ERR_MSG(extack, "unable to get supported modes");
+ return ret;
+ }
+
+ if (!test_bit(mode, modes)) {
+ NL_SET_ERR_MSG(extack,
+ "dpll device does not support requested mode");
+ return -EINVAL;
+ }
+
+ return ops->mode_set(dpll, dpll_priv(dpll), mode, extack);
+}
+
+static int
dpll_phase_offset_monitor_set(struct dpll_device *dpll, struct nlattr *a,
struct netlink_ext_ack *extack)
{
@@ -1797,6 +1861,11 @@ dpll_set_from_nlattr(struct dpll_device *dpll, struct genl_info *info)
nla_for_each_attr(a, genlmsg_data(info->genlhdr),
genlmsg_len(info->genlhdr), rem) {
switch (nla_type(a)) {
+ case DPLL_A_MODE:
+ ret = dpll_mode_set(dpll, a, info->extack);
+ if (ret)
+ return ret;
+ break;
case DPLL_A_PHASE_OFFSET_MONITOR:
ret = dpll_phase_offset_monitor_set(dpll, a,
info->extack);
diff --git a/drivers/dpll/dpll_nl.c b/drivers/dpll/dpll_nl.c
index 36d11ff195df..a2b22d492114 100644
--- a/drivers/dpll/dpll_nl.c
+++ b/drivers/dpll/dpll_nl.c
@@ -45,6 +45,7 @@ static const struct nla_policy dpll_device_get_nl_policy[DPLL_A_ID + 1] = {
/* DPLL_CMD_DEVICE_SET - do */
static const struct nla_policy dpll_device_set_nl_policy[DPLL_A_PHASE_OFFSET_AVG_FACTOR + 1] = {
[DPLL_A_ID] = { .type = NLA_U32, },
+ [DPLL_A_MODE] = NLA_POLICY_RANGE(NLA_U32, 1, 2),
[DPLL_A_PHASE_OFFSET_MONITOR] = NLA_POLICY_MAX(NLA_U32, 1),
[DPLL_A_PHASE_OFFSET_AVG_FACTOR] = { .type = NLA_U32, },
};
diff --git a/drivers/dpll/zl3073x/core.c b/drivers/dpll/zl3073x/core.c
index 383e2397dd03..63bd97181b9e 100644
--- a/drivers/dpll/zl3073x/core.c
+++ b/drivers/dpll/zl3073x/core.c
@@ -710,8 +710,11 @@ zl3073x_ref_ffo_update(struct zl3073x_dev *zldev)
if (rc)
return rc;
- /* Convert to ppm -> ffo = (10^6 * value) / 2^32 */
- zldev->ref[i].ffo = mul_s64_u64_shr(value, 1000000, 32);
+ /* Convert to ppt
+ * ffo = (10^12 * value) / 2^32
+ * ffo = ( 5^12 * value) / 2^20
+ */
+ zldev->ref[i].ffo = mul_s64_u64_shr(value, 244140625, 20);
}
return 0;
diff --git a/drivers/dpll/zl3073x/core.h b/drivers/dpll/zl3073x/core.h
index 09bca2d0926d..dddfcacea5c0 100644
--- a/drivers/dpll/zl3073x/core.h
+++ b/drivers/dpll/zl3073x/core.h
@@ -302,6 +302,36 @@ u8 zl3073x_dev_out_dpll_get(struct zl3073x_dev *zldev, u8 index)
}
/**
+ * zl3073x_dev_output_pin_freq_get - get output pin frequency
+ * @zldev: pointer to zl3073x device
+ * @id: output pin id
+ *
+ * Computes the output pin frequency based on the synth frequency, output
+ * divisor, and signal format. For N-div formats, N-pin frequency is
+ * additionally divided by esync_n_period.
+ *
+ * Return: frequency of the given output pin in Hz
+ */
+static inline u32
+zl3073x_dev_output_pin_freq_get(struct zl3073x_dev *zldev, u8 id)
+{
+ const struct zl3073x_synth *synth;
+ const struct zl3073x_out *out;
+ u8 out_id;
+ u32 freq;
+
+ out_id = zl3073x_output_pin_out_get(id);
+ out = zl3073x_out_state_get(zldev, out_id);
+ synth = zl3073x_synth_state_get(zldev, zl3073x_out_synth_get(out));
+ freq = zl3073x_synth_freq_get(synth) / out->div;
+
+ if (zl3073x_out_is_ndiv(out) && zl3073x_is_n_pin(id))
+ freq /= out->esync_n_period;
+
+ return freq;
+}
+
+/**
* zl3073x_dev_out_is_diff - check if the given output is differential
* @zldev: pointer to zl3073x device
* @index: output index
diff --git a/drivers/dpll/zl3073x/dpll.c b/drivers/dpll/zl3073x/dpll.c
index 9879d85d29af..78edc36b17fb 100644
--- a/drivers/dpll/zl3073x/dpll.c
+++ b/drivers/dpll/zl3073x/dpll.c
@@ -29,6 +29,7 @@
* @list: this DPLL pin list entry
* @dpll: DPLL the pin is registered to
* @dpll_pin: pointer to registered dpll_pin
+ * @tracker: tracking object for the acquired reference
* @label: package label
* @dir: pin direction
* @id: pin id
@@ -44,6 +45,7 @@ struct zl3073x_dpll_pin {
struct list_head list;
struct zl3073x_dpll *dpll;
struct dpll_pin *dpll_pin;
+ dpll_tracker tracker;
char label[8];
enum dpll_pin_direction dir;
u8 id;
@@ -100,6 +102,20 @@ zl3073x_dpll_pin_direction_get(const struct dpll_pin *dpll_pin, void *pin_priv,
return 0;
}
+static struct zl3073x_dpll_pin *
+zl3073x_dpll_pin_get_by_ref(struct zl3073x_dpll *zldpll, u8 ref_id)
+{
+ struct zl3073x_dpll_pin *pin;
+
+ list_for_each_entry(pin, &zldpll->pins, list) {
+ if (zl3073x_dpll_is_input_pin(pin) &&
+ zl3073x_input_pin_ref_get(pin->id) == ref_id)
+ return pin;
+ }
+
+ return NULL;
+}
+
static int
zl3073x_dpll_input_pin_esync_get(const struct dpll_pin *dpll_pin,
void *pin_priv,
@@ -900,46 +916,9 @@ zl3073x_dpll_output_pin_frequency_get(const struct dpll_pin *dpll_pin,
struct netlink_ext_ack *extack)
{
struct zl3073x_dpll *zldpll = dpll_priv;
- struct zl3073x_dev *zldev = zldpll->dev;
struct zl3073x_dpll_pin *pin = pin_priv;
- const struct zl3073x_synth *synth;
- const struct zl3073x_out *out;
- u32 synth_freq;
- u8 out_id;
- out_id = zl3073x_output_pin_out_get(pin->id);
- out = zl3073x_out_state_get(zldev, out_id);
-
- /* Get attached synth frequency */
- synth = zl3073x_synth_state_get(zldev, zl3073x_out_synth_get(out));
- synth_freq = zl3073x_synth_freq_get(synth);
-
- switch (zl3073x_out_signal_format_get(out)) {
- case ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV:
- case ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV_INV:
- /* In case of divided format we have to distiguish between
- * given output pin type.
- *
- * For P-pin the resulting frequency is computed as simple
- * division of synth frequency and output divisor.
- *
- * For N-pin we have to divide additionally by divisor stored
- * in esync_n_period output mailbox register that is used as
- * N-pin divisor for these modes.
- */
- *frequency = synth_freq / out->div;
-
- if (!zl3073x_dpll_is_p_pin(pin))
- *frequency = (u32)*frequency / out->esync_n_period;
-
- break;
- default:
- /* In other modes the resulting frequency is computed as
- * division of synth frequency and output divisor.
- */
- *frequency = synth_freq / out->div;
- break;
- }
+ *frequency = zl3073x_dev_output_pin_freq_get(zldpll->dev, pin->id);
return 0;
}
@@ -1039,10 +1018,8 @@ zl3073x_dpll_output_pin_phase_adjust_get(const struct dpll_pin *dpll_pin,
out_id = zl3073x_output_pin_out_get(pin->id);
out = zl3073x_out_state_get(zldev, out_id);
- /* Convert value to ps and reverse two's complement negation applied
- * during 'set'
- */
- *phase_adjust = -out->phase_comp * pin->phase_gran;
+ /* The value in the register is expressed in half synth clock cycles. */
+ *phase_adjust = out->phase_comp * pin->phase_gran;
return 0;
}
@@ -1064,10 +1041,8 @@ zl3073x_dpll_output_pin_phase_adjust_set(const struct dpll_pin *dpll_pin,
out_id = zl3073x_output_pin_out_get(pin->id);
out = *zl3073x_out_state_get(zldev, out_id);
- /* The value in the register is stored as two's complement negation
- * of requested value and expressed in half synth clock cycles.
- */
- out.phase_comp = -phase_adjust / pin->phase_gran;
+ /* The value in the register is expressed in half synth clock cycles. */
+ out.phase_comp = phase_adjust / pin->phase_gran;
/* Update output configuration from mailbox */
return zl3073x_out_state_set(zldev, out_id, &out);
@@ -1138,6 +1113,26 @@ zl3073x_dpll_lock_status_get(const struct dpll_device *dpll, void *dpll_priv,
}
static int
+zl3073x_dpll_supported_modes_get(const struct dpll_device *dpll,
+ void *dpll_priv, unsigned long *modes,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+
+ /* We support switching between automatic and manual mode, except in
+ * a case where the DPLL channel is configured to run in NCO mode.
+ * In this case, report only the manual mode to which the NCO is mapped
+ * as the only supported one.
+ */
+ if (zldpll->refsel_mode != ZL_DPLL_MODE_REFSEL_MODE_NCO)
+ __set_bit(DPLL_MODE_AUTOMATIC, modes);
+
+ __set_bit(DPLL_MODE_MANUAL, modes);
+
+ return 0;
+}
+
+static int
zl3073x_dpll_mode_get(const struct dpll_device *dpll, void *dpll_priv,
enum dpll_mode *mode, struct netlink_ext_ack *extack)
{
@@ -1218,6 +1213,82 @@ zl3073x_dpll_phase_offset_avg_factor_set(const struct dpll_device *dpll,
}
static int
+zl3073x_dpll_mode_set(const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_mode mode, struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ u8 hw_mode, mode_refsel, ref;
+ int rc;
+
+ rc = zl3073x_dpll_selected_ref_get(zldpll, &ref);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "failed to get selected reference");
+ return rc;
+ }
+
+ if (mode == DPLL_MODE_MANUAL) {
+ /* We are switching from automatic to manual mode:
+ * - if we have a valid reference selected during auto mode then
+ * we will switch to forced reference lock mode and use this
+ * reference for selection
+ * - if NO valid reference is selected, we will switch to forced
+ * holdover mode or freerun mode, depending on the current
+ * lock status
+ */
+ if (ZL3073X_DPLL_REF_IS_VALID(ref))
+ hw_mode = ZL_DPLL_MODE_REFSEL_MODE_REFLOCK;
+ else if (zldpll->lock_status == DPLL_LOCK_STATUS_UNLOCKED)
+ hw_mode = ZL_DPLL_MODE_REFSEL_MODE_FREERUN;
+ else
+ hw_mode = ZL_DPLL_MODE_REFSEL_MODE_HOLDOVER;
+ } else {
+ /* We are switching from manual to automatic mode:
+ * - if there is a valid reference selected then ensure that
+ * it is selectable after switch to automatic mode
+ * - switch to automatic mode
+ */
+ struct zl3073x_dpll_pin *pin;
+
+ pin = zl3073x_dpll_pin_get_by_ref(zldpll, ref);
+ if (pin && !pin->selectable) {
+ /* Restore pin priority in HW */
+ rc = zl3073x_dpll_ref_prio_set(pin, pin->prio);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "failed to restore pin priority");
+ return rc;
+ }
+
+ pin->selectable = true;
+ }
+
+ hw_mode = ZL_DPLL_MODE_REFSEL_MODE_AUTO;
+ }
+
+ /* Build mode_refsel value */
+ mode_refsel = FIELD_PREP(ZL_DPLL_MODE_REFSEL_MODE, hw_mode);
+
+ if (ZL3073X_DPLL_REF_IS_VALID(ref))
+ mode_refsel |= FIELD_PREP(ZL_DPLL_MODE_REFSEL_REF, ref);
+
+ /* Update dpll_mode_refsel register */
+ rc = zl3073x_write_u8(zldpll->dev, ZL_REG_DPLL_MODE_REFSEL(zldpll->id),
+ mode_refsel);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "failed to set reference selection mode");
+ return rc;
+ }
+
+ zldpll->refsel_mode = hw_mode;
+
+ if (ZL3073X_DPLL_REF_IS_VALID(ref))
+ zldpll->forced_ref = ref;
+
+ return 0;
+}
+
+static int
zl3073x_dpll_phase_offset_monitor_get(const struct dpll_device *dpll,
void *dpll_priv,
enum dpll_feature_state *state,
@@ -1276,10 +1347,12 @@ static const struct dpll_pin_ops zl3073x_dpll_output_pin_ops = {
static const struct dpll_device_ops zl3073x_dpll_device_ops = {
.lock_status_get = zl3073x_dpll_lock_status_get,
.mode_get = zl3073x_dpll_mode_get,
+ .mode_set = zl3073x_dpll_mode_set,
.phase_offset_avg_factor_get = zl3073x_dpll_phase_offset_avg_factor_get,
.phase_offset_avg_factor_set = zl3073x_dpll_phase_offset_avg_factor_set,
.phase_offset_monitor_get = zl3073x_dpll_phase_offset_monitor_get,
.phase_offset_monitor_set = zl3073x_dpll_phase_offset_monitor_set,
+ .supported_modes_get = zl3073x_dpll_supported_modes_get,
};
/**
@@ -1368,11 +1441,12 @@ zl3073x_dpll_pin_register(struct zl3073x_dpll_pin *pin, u32 index)
/* Create or get existing DPLL pin */
pin->dpll_pin = dpll_pin_get(zldpll->dev->clock_id, index, THIS_MODULE,
- &props->dpll_props);
+ &props->dpll_props, &pin->tracker);
if (IS_ERR(pin->dpll_pin)) {
rc = PTR_ERR(pin->dpll_pin);
goto err_pin_get;
}
+ dpll_pin_fwnode_set(pin->dpll_pin, props->fwnode);
if (zl3073x_dpll_is_input_pin(pin))
ops = &zl3073x_dpll_input_pin_ops;
@@ -1390,7 +1464,7 @@ zl3073x_dpll_pin_register(struct zl3073x_dpll_pin *pin, u32 index)
return 0;
err_register:
- dpll_pin_put(pin->dpll_pin);
+ dpll_pin_put(pin->dpll_pin, &pin->tracker);
err_prio_get:
pin->dpll_pin = NULL;
err_pin_get:
@@ -1421,7 +1495,7 @@ zl3073x_dpll_pin_unregister(struct zl3073x_dpll_pin *pin)
/* Unregister the pin */
dpll_pin_unregister(zldpll->dpll_dev, pin->dpll_pin, ops, pin);
- dpll_pin_put(pin->dpll_pin);
+ dpll_pin_put(pin->dpll_pin, &pin->tracker);
pin->dpll_pin = NULL;
}
@@ -1595,7 +1669,7 @@ zl3073x_dpll_device_register(struct zl3073x_dpll *zldpll)
dpll_mode_refsel);
zldpll->dpll_dev = dpll_device_get(zldev->clock_id, zldpll->id,
- THIS_MODULE);
+ THIS_MODULE, &zldpll->tracker);
if (IS_ERR(zldpll->dpll_dev)) {
rc = PTR_ERR(zldpll->dpll_dev);
zldpll->dpll_dev = NULL;
@@ -1607,7 +1681,7 @@ zl3073x_dpll_device_register(struct zl3073x_dpll *zldpll)
zl3073x_prop_dpll_type_get(zldev, zldpll->id),
&zl3073x_dpll_device_ops, zldpll);
if (rc) {
- dpll_device_put(zldpll->dpll_dev);
+ dpll_device_put(zldpll->dpll_dev, &zldpll->tracker);
zldpll->dpll_dev = NULL;
}
@@ -1630,7 +1704,7 @@ zl3073x_dpll_device_unregister(struct zl3073x_dpll *zldpll)
dpll_device_unregister(zldpll->dpll_dev, &zl3073x_dpll_device_ops,
zldpll);
- dpll_device_put(zldpll->dpll_dev);
+ dpll_device_put(zldpll->dpll_dev, &zldpll->tracker);
zldpll->dpll_dev = NULL;
}
diff --git a/drivers/dpll/zl3073x/dpll.h b/drivers/dpll/zl3073x/dpll.h
index e8c39b44b356..c65c798c3792 100644
--- a/drivers/dpll/zl3073x/dpll.h
+++ b/drivers/dpll/zl3073x/dpll.h
@@ -18,6 +18,7 @@
* @check_count: periodic check counter
* @phase_monitor: is phase offset monitor enabled
* @dpll_dev: pointer to registered DPLL device
+ * @tracker: tracking object for the acquired reference
* @lock_status: last saved DPLL lock status
* @pins: list of pins
* @change_work: device change notification work
@@ -31,6 +32,7 @@ struct zl3073x_dpll {
u8 check_count;
bool phase_monitor;
struct dpll_device *dpll_dev;
+ dpll_tracker tracker;
enum dpll_lock_status lock_status;
struct list_head pins;
struct work_struct change_work;
diff --git a/drivers/dpll/zl3073x/out.h b/drivers/dpll/zl3073x/out.h
index e8ea7a0e0f07..318f9bb8da3a 100644
--- a/drivers/dpll/zl3073x/out.h
+++ b/drivers/dpll/zl3073x/out.h
@@ -80,6 +80,23 @@ static inline bool zl3073x_out_is_enabled(const struct zl3073x_out *out)
}
/**
+ * zl3073x_out_is_ndiv - check if the given output is in N-div mode
+ * @out: pointer to out state
+ *
+ * Return: true if output is in N-div mode, false otherwise
+ */
+static inline bool zl3073x_out_is_ndiv(const struct zl3073x_out *out)
+{
+ switch (zl3073x_out_signal_format_get(out)) {
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV:
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV_INV:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
* zl3073x_out_synth_get - get synth connected to given output
* @out: pointer to out state
*
diff --git a/drivers/dpll/zl3073x/prop.c b/drivers/dpll/zl3073x/prop.c
index 4ed153087570..8523dc8c226e 100644
--- a/drivers/dpll/zl3073x/prop.c
+++ b/drivers/dpll/zl3073x/prop.c
@@ -193,9 +193,10 @@ struct zl3073x_pin_props *zl3073x_pin_props_get(struct zl3073x_dev *zldev,
{
struct dpll_pin_frequency *ranges;
struct zl3073x_pin_props *props;
- int i, j, num_freqs, rc;
+ int i, j, num_freqs = 0, rc;
+ u64 *freqs = NULL;
const char *type;
- u64 *freqs;
+ u32 curr_freq;
props = kzalloc(sizeof(*props), GFP_KERNEL);
if (!props)
@@ -207,6 +208,7 @@ struct zl3073x_pin_props *zl3073x_pin_props_get(struct zl3073x_dev *zldev,
props->dpll_props.capabilities =
DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE |
DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
+ curr_freq = zl3073x_dev_ref_freq_get(zldev, index);
} else {
u8 out, synth;
u32 f;
@@ -220,6 +222,7 @@ struct zl3073x_pin_props *zl3073x_pin_props_get(struct zl3073x_dev *zldev,
synth = zl3073x_dev_out_synth_get(zldev, out);
f = 2 * zl3073x_dev_synth_freq_get(zldev, synth);
props->dpll_props.phase_gran = f ? div_u64(PSEC_PER_SEC, f) : 1;
+ curr_freq = zl3073x_dev_output_pin_freq_get(zldev, index);
}
props->dpll_props.phase_range.min = S32_MIN;
@@ -230,7 +233,7 @@ struct zl3073x_pin_props *zl3073x_pin_props_get(struct zl3073x_dev *zldev,
/* Get firmware node for the given pin */
rc = zl3073x_prop_pin_fwnode_get(zldev, props, dir, index);
if (rc)
- return props; /* Return if it does not exist */
+ goto skip_fwnode_props;
/* Look for label property and store the value as board label */
fwnode_property_read_string(props->fwnode, "label",
@@ -249,6 +252,8 @@ struct zl3073x_pin_props *zl3073x_pin_props_get(struct zl3073x_dev *zldev,
props->dpll_props.type = DPLL_PIN_TYPE_INT_OSCILLATOR;
else if (!strcmp(type, "synce"))
props->dpll_props.type = DPLL_PIN_TYPE_SYNCE_ETH_PORT;
+ else if (!strcmp(type, "mux"))
+ props->dpll_props.type = DPLL_PIN_TYPE_MUX;
else
dev_warn(zldev->dev,
"Unknown or unsupported pin type '%s'\n",
@@ -262,9 +267,10 @@ struct zl3073x_pin_props *zl3073x_pin_props_get(struct zl3073x_dev *zldev,
/* Read supported frequencies property if it is specified */
num_freqs = fwnode_property_count_u64(props->fwnode,
"supported-frequencies-hz");
- if (num_freqs <= 0)
- /* Return if the property does not exist or number is 0 */
- return props;
+ if (num_freqs <= 0) {
+ num_freqs = 0;
+ goto skip_fwnode_props;
+ }
/* The firmware node specifies list of supported frequencies while
* DPLL core pin properties requires list of frequency ranges.
@@ -281,19 +287,25 @@ struct zl3073x_pin_props *zl3073x_pin_props_get(struct zl3073x_dev *zldev,
"supported-frequencies-hz", freqs,
num_freqs);
- /* Allocate frequency ranges list and fill it */
- ranges = kcalloc(num_freqs, sizeof(*ranges), GFP_KERNEL);
+skip_fwnode_props:
+ /* Allocate frequency ranges list - extra slot for current frequency */
+ ranges = kcalloc(num_freqs + 1, sizeof(*ranges), GFP_KERNEL);
if (!ranges) {
rc = -ENOMEM;
goto err_alloc_ranges;
}
- /* Convert list of frequencies to list of frequency ranges but
- * filter-out frequencies that are not representable by device
+ /* Start with current frequency at index 0 */
+ ranges[0] = (struct dpll_pin_frequency)DPLL_PIN_FREQUENCY(curr_freq);
+
+ /* Add frequencies from firmware node, skipping current frequency
+ * and filtering out frequencies not representable by device
*/
- for (i = 0, j = 0; i < num_freqs; i++) {
+ for (i = 0, j = 1; i < num_freqs; i++) {
struct dpll_pin_frequency freq = DPLL_PIN_FREQUENCY(freqs[i]);
+ if (freqs[i] == curr_freq)
+ continue;
if (zl3073x_pin_check_freq(zldev, dir, index, freqs[i])) {
ranges[j] = freq;
j++;
diff --git a/drivers/infiniband/hw/bng_re/Makefile b/drivers/infiniband/hw/bng_re/Makefile
index c6aaaf853c77..17e9d5871d40 100644
--- a/drivers/infiniband/hw/bng_re/Makefile
+++ b/drivers/infiniband/hw/bng_re/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-ccflags-y := -I $(srctree)/drivers/net/ethernet/broadcom/bnge -I $(srctree)/drivers/infiniband/hw/bnxt_re
+ccflags-y := -I $(srctree)/drivers/net/ethernet/broadcom/bnge
obj-$(CONFIG_INFINIBAND_BNG_RE) += bng_re.o
diff --git a/drivers/infiniband/hw/bng_re/bng_fw.c b/drivers/infiniband/hw/bng_re/bng_fw.c
index 7d9539113cf5..01b3e1cbe719 100644
--- a/drivers/infiniband/hw/bng_re/bng_fw.c
+++ b/drivers/infiniband/hw/bng_re/bng_fw.c
@@ -2,7 +2,7 @@
// Copyright (c) 2025 Broadcom.
#include <linux/pci.h>
-#include "roce_hsi.h"
+#include "bng_roce_hsi.h"
#include "bng_res.h"
#include "bng_fw.h"
#include "bng_sp.h"
diff --git a/drivers/infiniband/hw/bng_re/bng_res.c b/drivers/infiniband/hw/bng_re/bng_res.c
index c50823758b53..f6e3528e7f4c 100644
--- a/drivers/infiniband/hw/bng_re/bng_res.c
+++ b/drivers/infiniband/hw/bng_re/bng_res.c
@@ -5,9 +5,9 @@
#include <linux/vmalloc.h>
#include <rdma/ib_umem.h>
-#include <linux/bnxt/hsi.h>
+#include <linux/bnge/hsi.h>
#include "bng_res.h"
-#include "roce_hsi.h"
+#include "bng_roce_hsi.h"
/* Stats */
void bng_re_free_stats_ctx_mem(struct pci_dev *pdev,
diff --git a/drivers/infiniband/hw/bng_re/bng_res.h b/drivers/infiniband/hw/bng_re/bng_res.h
index 9997f86d6a0e..2c4e9191ad1c 100644
--- a/drivers/infiniband/hw/bng_re/bng_res.h
+++ b/drivers/infiniband/hw/bng_re/bng_res.h
@@ -4,7 +4,7 @@
#ifndef __BNG_RES_H__
#define __BNG_RES_H__
-#include "roce_hsi.h"
+#include "bng_roce_hsi.h"
#define BNG_ROCE_FW_MAX_TIMEOUT 60
diff --git a/drivers/infiniband/hw/bng_re/bng_roce_hsi.h b/drivers/infiniband/hw/bng_re/bng_roce_hsi.h
new file mode 100644
index 000000000000..5ebd7ba90b7b
--- /dev/null
+++ b/drivers/infiniband/hw/bng_re/bng_roce_hsi.h
@@ -0,0 +1,6450 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2026 Broadcom */
+
+/* DO NOT MODIFY!!! This file is automatically generated. */
+
+#ifndef _BNG_RE_HSI_H_
+#define _BNG_RE_HSI_H_
+
+#include <linux/bnge/hsi.h>
+
+/* tx_doorbell (size:32b/4B) */
+struct tx_doorbell {
+ __le32 key_idx;
+ #define TX_DOORBELL_IDX_MASK 0xffffffUL
+ #define TX_DOORBELL_IDX_SFT 0
+ #define TX_DOORBELL_KEY_MASK 0xf0000000UL
+ #define TX_DOORBELL_KEY_SFT 28
+ #define TX_DOORBELL_KEY_TX (0x0UL << 28)
+ #define TX_DOORBELL_KEY_LAST TX_DOORBELL_KEY_TX
+};
+
+/* rx_doorbell (size:32b/4B) */
+struct rx_doorbell {
+ __le32 key_idx;
+ #define RX_DOORBELL_IDX_MASK 0xffffffUL
+ #define RX_DOORBELL_IDX_SFT 0
+ #define RX_DOORBELL_KEY_MASK 0xf0000000UL
+ #define RX_DOORBELL_KEY_SFT 28
+ #define RX_DOORBELL_KEY_RX (0x1UL << 28)
+ #define RX_DOORBELL_KEY_LAST RX_DOORBELL_KEY_RX
+};
+
+/* cmpl_doorbell (size:32b/4B) */
+struct cmpl_doorbell {
+ __le32 key_mask_valid_idx;
+ #define CMPL_DOORBELL_IDX_MASK 0xffffffUL
+ #define CMPL_DOORBELL_IDX_SFT 0
+ #define CMPL_DOORBELL_IDX_VALID 0x4000000UL
+ #define CMPL_DOORBELL_MASK 0x8000000UL
+ #define CMPL_DOORBELL_KEY_MASK 0xf0000000UL
+ #define CMPL_DOORBELL_KEY_SFT 28
+ #define CMPL_DOORBELL_KEY_CMPL (0x2UL << 28)
+ #define CMPL_DOORBELL_KEY_LAST CMPL_DOORBELL_KEY_CMPL
+};
+
+/* status_doorbell (size:32b/4B) */
+struct status_doorbell {
+ __le32 key_idx;
+ #define STATUS_DOORBELL_IDX_MASK 0xffffffUL
+ #define STATUS_DOORBELL_IDX_SFT 0
+ #define STATUS_DOORBELL_KEY_MASK 0xf0000000UL
+ #define STATUS_DOORBELL_KEY_SFT 28
+ #define STATUS_DOORBELL_KEY_STAT (0x3UL << 28)
+ #define STATUS_DOORBELL_KEY_LAST STATUS_DOORBELL_KEY_STAT
+};
+
+/* cmdq_init (size:128b/16B) */
+struct cmdq_init {
+ __le64 cmdq_pbl;
+ __le16 cmdq_size_cmdq_lvl;
+ #define CMDQ_INIT_CMDQ_LVL_MASK 0x3UL
+ #define CMDQ_INIT_CMDQ_LVL_SFT 0
+ #define CMDQ_INIT_CMDQ_SIZE_MASK 0xfffcUL
+ #define CMDQ_INIT_CMDQ_SIZE_SFT 2
+ __le16 creq_ring_id;
+ __le32 prod_idx;
+};
+
+/* cmdq_base (size:128b/16B) */
+struct cmdq_base {
+ u8 opcode;
+ #define CMDQ_BASE_OPCODE_CREATE_QP 0x1UL
+ #define CMDQ_BASE_OPCODE_DESTROY_QP 0x2UL
+ #define CMDQ_BASE_OPCODE_MODIFY_QP 0x3UL
+ #define CMDQ_BASE_OPCODE_QUERY_QP 0x4UL
+ #define CMDQ_BASE_OPCODE_CREATE_SRQ 0x5UL
+ #define CMDQ_BASE_OPCODE_DESTROY_SRQ 0x6UL
+ #define CMDQ_BASE_OPCODE_QUERY_SRQ 0x8UL
+ #define CMDQ_BASE_OPCODE_CREATE_CQ 0x9UL
+ #define CMDQ_BASE_OPCODE_DESTROY_CQ 0xaUL
+ #define CMDQ_BASE_OPCODE_RESIZE_CQ 0xcUL
+ #define CMDQ_BASE_OPCODE_ALLOCATE_MRW 0xdUL
+ #define CMDQ_BASE_OPCODE_DEALLOCATE_KEY 0xeUL
+ #define CMDQ_BASE_OPCODE_REGISTER_MR 0xfUL
+ #define CMDQ_BASE_OPCODE_DEREGISTER_MR 0x10UL
+ #define CMDQ_BASE_OPCODE_ADD_GID 0x11UL
+ #define CMDQ_BASE_OPCODE_DELETE_GID 0x12UL
+ #define CMDQ_BASE_OPCODE_MODIFY_GID 0x17UL
+ #define CMDQ_BASE_OPCODE_QUERY_GID 0x18UL
+ #define CMDQ_BASE_OPCODE_CREATE_QP1 0x13UL
+ #define CMDQ_BASE_OPCODE_DESTROY_QP1 0x14UL
+ #define CMDQ_BASE_OPCODE_CREATE_AH 0x15UL
+ #define CMDQ_BASE_OPCODE_DESTROY_AH 0x16UL
+ #define CMDQ_BASE_OPCODE_INITIALIZE_FW 0x80UL
+ #define CMDQ_BASE_OPCODE_DEINITIALIZE_FW 0x81UL
+ #define CMDQ_BASE_OPCODE_STOP_FUNC 0x82UL
+ #define CMDQ_BASE_OPCODE_QUERY_FUNC 0x83UL
+ #define CMDQ_BASE_OPCODE_SET_FUNC_RESOURCES 0x84UL
+ #define CMDQ_BASE_OPCODE_READ_CONTEXT 0x85UL
+ #define CMDQ_BASE_OPCODE_VF_BACKCHANNEL_REQUEST 0x86UL
+ #define CMDQ_BASE_OPCODE_READ_VF_MEMORY 0x87UL
+ #define CMDQ_BASE_OPCODE_COMPLETE_VF_REQUEST 0x88UL
+ #define CMDQ_BASE_OPCODE_EXTEND_CONTEXT_ARRAY_DEPRECATED 0x89UL
+ #define CMDQ_BASE_OPCODE_MAP_TC_TO_COS 0x8aUL
+ #define CMDQ_BASE_OPCODE_QUERY_VERSION 0x8bUL
+ #define CMDQ_BASE_OPCODE_MODIFY_ROCE_CC 0x8cUL
+ #define CMDQ_BASE_OPCODE_QUERY_ROCE_CC 0x8dUL
+ #define CMDQ_BASE_OPCODE_QUERY_ROCE_STATS 0x8eUL
+ #define CMDQ_BASE_OPCODE_SET_LINK_AGGR_MODE 0x8fUL
+ #define CMDQ_BASE_OPCODE_MODIFY_CQ 0x90UL
+ #define CMDQ_BASE_OPCODE_QUERY_QP_EXTEND 0x91UL
+ #define CMDQ_BASE_OPCODE_QUERY_ROCE_STATS_EXT 0x92UL
+ #define CMDQ_BASE_OPCODE_ORCHESTRATE_QID_MIGRATION 0x93UL
+ #define CMDQ_BASE_OPCODE_CREATE_QP_BATCH 0x94UL
+ #define CMDQ_BASE_OPCODE_DESTROY_QP_BATCH 0x95UL
+ #define CMDQ_BASE_OPCODE_ALLOCATE_ROCE_STATS_EXT_CTX 0x96UL
+ #define CMDQ_BASE_OPCODE_DEALLOCATE_ROCE_STATS_EXT_CTX 0x97UL
+ #define CMDQ_BASE_OPCODE_QUERY_ROCE_STATS_EXT_V2 0x98UL
+ #define CMDQ_BASE_OPCODE_PNO_STATS_CONFIG 0x99UL
+ #define CMDQ_BASE_OPCODE_PNO_DEBUG_TUNNEL_CONFIG 0x9aUL
+ #define CMDQ_BASE_OPCODE_SET_PNO_FABRIC_NEXTHOP_MAC 0x9bUL
+ #define CMDQ_BASE_OPCODE_PNO_PATH_STRPATH_CONFIG 0x9cUL
+ #define CMDQ_BASE_OPCODE_PNO_PATH_QUERY 0x9dUL
+ #define CMDQ_BASE_OPCODE_PNO_PATH_ACCESS_CONTROL 0x9eUL
+ #define CMDQ_BASE_OPCODE_QUERY_PNO_FABRIC_NEXTHOP_IP 0x9fUL
+ #define CMDQ_BASE_OPCODE_PNO_PATH_PLANE_CONFIG 0xa0UL
+ #define CMDQ_BASE_OPCODE_PNO_TUNNEL_CLOSE 0xa1UL
+ #define CMDQ_BASE_OPCODE_PNO_HOST_PROCESSING_DONE 0xa2UL
+ #define CMDQ_BASE_OPCODE_PNO_STATS_QPARAM 0xa3UL
+ #define CMDQ_BASE_OPCODE_PATH_PROBE_CFG 0xa4UL
+ #define CMDQ_BASE_OPCODE_PATH_PROBE_DISABLE 0xa5UL
+ #define CMDQ_BASE_OPCODE_ROCE_MIRROR_CFG 0xa6UL
+ #define CMDQ_BASE_OPCODE_ROCE_CFG 0xa7UL
+ #define CMDQ_BASE_OPCODE_PNO_EV_MONITORING_CONFIG 0xa8UL
+ #define CMDQ_BASE_OPCODE_LAST CMDQ_BASE_OPCODE_PNO_EV_MONITORING_CONFIG
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+};
+
+/* creq_base (size:128b/16B) */
+struct creq_base {
+ u8 type;
+ #define CREQ_BASE_TYPE_MASK 0x3fUL
+ #define CREQ_BASE_TYPE_SFT 0
+ #define CREQ_BASE_TYPE_QP_EVENT 0x38UL
+ #define CREQ_BASE_TYPE_FUNC_EVENT 0x3aUL
+ #define CREQ_BASE_TYPE_LAST CREQ_BASE_TYPE_FUNC_EVENT
+ u8 reserved56[7];
+ u8 v;
+ #define CREQ_BASE_V 0x1UL
+ u8 event;
+ u8 reserved48[6];
+};
+
+/* roce_stats_ext_ctx (size:1920b/240B) */
+struct roce_stats_ext_ctx {
+ __le64 tx_atomic_req_pkts;
+ __le64 tx_read_req_pkts;
+ __le64 tx_read_res_pkts;
+ __le64 tx_write_req_pkts;
+ __le64 tx_rc_send_req_pkts;
+ __le64 tx_ud_send_req_pkts;
+ __le64 tx_cnp_pkts;
+ __le64 tx_roce_pkts;
+ __le64 tx_roce_bytes;
+ __le64 rx_out_of_buffer_pkts;
+ __le64 rx_out_of_sequence_pkts;
+ __le64 dup_req;
+ __le64 missing_resp;
+ __le64 seq_err_naks_rcvd;
+ __le64 rnr_naks_rcvd;
+ __le64 to_retransmits;
+ __le64 rx_atomic_req_pkts;
+ __le64 rx_read_req_pkts;
+ __le64 rx_read_res_pkts;
+ __le64 rx_write_req_pkts;
+ __le64 rx_rc_send_pkts;
+ __le64 rx_ud_send_pkts;
+ __le64 rx_dcn_payload_cut;
+ __le64 rx_ecn_marked_pkts;
+ __le64 rx_cnp_pkts;
+ __le64 rx_roce_pkts;
+ __le64 rx_roce_bytes;
+ __le64 rx_roce_good_pkts;
+ __le64 rx_roce_good_bytes;
+ __le64 rx_ack_pkts;
+};
+
+/* cmdq_query_version (size:128b/16B) */
+struct cmdq_query_version {
+ u8 opcode;
+ #define CMDQ_QUERY_VERSION_OPCODE_QUERY_VERSION 0x8bUL
+ #define CMDQ_QUERY_VERSION_OPCODE_LAST CMDQ_QUERY_VERSION_OPCODE_QUERY_VERSION
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+};
+
+/* creq_query_version_resp (size:128b/16B) */
+struct creq_query_version_resp {
+ u8 type;
+ #define CREQ_QUERY_VERSION_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_QUERY_VERSION_RESP_TYPE_SFT 0
+ #define CREQ_QUERY_VERSION_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_QUERY_VERSION_RESP_TYPE_LAST CREQ_QUERY_VERSION_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ u8 fw_maj;
+ u8 fw_minor;
+ u8 fw_bld;
+ u8 fw_rsvd;
+ u8 v;
+ #define CREQ_QUERY_VERSION_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_QUERY_VERSION_RESP_EVENT_QUERY_VERSION 0x8bUL
+ #define CREQ_QUERY_VERSION_RESP_EVENT_LAST CREQ_QUERY_VERSION_RESP_EVENT_QUERY_VERSION
+ __le16 reserved16;
+ u8 intf_maj;
+ u8 intf_minor;
+ u8 intf_bld;
+ u8 intf_rsvd;
+};
+
+/* cmdq_initialize_fw (size:1024b/128B) */
+struct cmdq_initialize_fw {
+ u8 opcode;
+ #define CMDQ_INITIALIZE_FW_OPCODE_INITIALIZE_FW 0x80UL
+ #define CMDQ_INITIALIZE_FW_OPCODE_LAST CMDQ_INITIALIZE_FW_OPCODE_INITIALIZE_FW
+ u8 cmd_size;
+ __le16 flags;
+ #define CMDQ_INITIALIZE_FW_FLAGS_MRAV_RESERVATION_SPLIT 0x1UL
+ #define CMDQ_INITIALIZE_FW_FLAGS_HW_REQUESTER_RETX_SUPPORTED 0x2UL
+ #define CMDQ_INITIALIZE_FW_FLAGS_DRV_VERSION 0x4UL
+ #define CMDQ_INITIALIZE_FW_FLAGS_OPTIMIZE_MODIFY_QP_SUPPORTED 0x8UL
+ #define CMDQ_INITIALIZE_FW_FLAGS_L2_VF_RESOURCE_MGMT 0x10UL
+ #define CMDQ_INITIALIZE_FW_FLAGS_DESTROY_CONTEXT_SB_SUPPORTED 0x20UL
+ #define CMDQ_INITIALIZE_FW_FLAGS_DESTROY_UDCC_SESSION_DATA_SB_SUPPORTED 0x40UL
+ #define CMDQ_INITIALIZE_FW_FLAGS_MIRROR_ON_ROCE_SUPPORTED 0x80UL
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ u8 qpc_pg_size_qpc_lvl;
+ #define CMDQ_INITIALIZE_FW_QPC_LVL_MASK 0xfUL
+ #define CMDQ_INITIALIZE_FW_QPC_LVL_SFT 0
+ #define CMDQ_INITIALIZE_FW_QPC_LVL_LVL_0 0x0UL
+ #define CMDQ_INITIALIZE_FW_QPC_LVL_LVL_1 0x1UL
+ #define CMDQ_INITIALIZE_FW_QPC_LVL_LVL_2 0x2UL
+ #define CMDQ_INITIALIZE_FW_QPC_LVL_LAST CMDQ_INITIALIZE_FW_QPC_LVL_LVL_2
+ #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_MASK 0xf0UL
+ #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT 4
+ #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_LAST CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_1G
+ u8 mrw_pg_size_mrw_lvl;
+ #define CMDQ_INITIALIZE_FW_MRW_LVL_MASK 0xfUL
+ #define CMDQ_INITIALIZE_FW_MRW_LVL_SFT 0
+ #define CMDQ_INITIALIZE_FW_MRW_LVL_LVL_0 0x0UL
+ #define CMDQ_INITIALIZE_FW_MRW_LVL_LVL_1 0x1UL
+ #define CMDQ_INITIALIZE_FW_MRW_LVL_LVL_2 0x2UL
+ #define CMDQ_INITIALIZE_FW_MRW_LVL_LAST CMDQ_INITIALIZE_FW_MRW_LVL_LVL_2
+ #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_MASK 0xf0UL
+ #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_SFT 4
+ #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_PG_4K (0x0UL << 4)
+ #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_PG_8K (0x1UL << 4)
+ #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_PG_64K (0x2UL << 4)
+ #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_PG_2M (0x3UL << 4)
+ #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_PG_8M (0x4UL << 4)
+ #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_PG_1G (0x5UL << 4)
+ #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_LAST CMDQ_INITIALIZE_FW_MRW_PG_SIZE_PG_1G
+ u8 srq_pg_size_srq_lvl;
+ #define CMDQ_INITIALIZE_FW_SRQ_LVL_MASK 0xfUL
+ #define CMDQ_INITIALIZE_FW_SRQ_LVL_SFT 0
+ #define CMDQ_INITIALIZE_FW_SRQ_LVL_LVL_0 0x0UL
+ #define CMDQ_INITIALIZE_FW_SRQ_LVL_LVL_1 0x1UL
+ #define CMDQ_INITIALIZE_FW_SRQ_LVL_LVL_2 0x2UL
+ #define CMDQ_INITIALIZE_FW_SRQ_LVL_LAST CMDQ_INITIALIZE_FW_SRQ_LVL_LVL_2
+ #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_MASK 0xf0UL
+ #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_SFT 4
+ #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_PG_4K (0x0UL << 4)
+ #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_PG_8K (0x1UL << 4)
+ #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_PG_64K (0x2UL << 4)
+ #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_PG_2M (0x3UL << 4)
+ #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_PG_8M (0x4UL << 4)
+ #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_PG_1G (0x5UL << 4)
+ #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_LAST CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_PG_1G
+ u8 cq_pg_size_cq_lvl;
+ #define CMDQ_INITIALIZE_FW_CQ_LVL_MASK 0xfUL
+ #define CMDQ_INITIALIZE_FW_CQ_LVL_SFT 0
+ #define CMDQ_INITIALIZE_FW_CQ_LVL_LVL_0 0x0UL
+ #define CMDQ_INITIALIZE_FW_CQ_LVL_LVL_1 0x1UL
+ #define CMDQ_INITIALIZE_FW_CQ_LVL_LVL_2 0x2UL
+ #define CMDQ_INITIALIZE_FW_CQ_LVL_LAST CMDQ_INITIALIZE_FW_CQ_LVL_LVL_2
+ #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_MASK 0xf0UL
+ #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_SFT 4
+ #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_PG_4K (0x0UL << 4)
+ #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_PG_8K (0x1UL << 4)
+ #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_PG_64K (0x2UL << 4)
+ #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_PG_2M (0x3UL << 4)
+ #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_PG_8M (0x4UL << 4)
+ #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_PG_1G (0x5UL << 4)
+ #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_LAST CMDQ_INITIALIZE_FW_CQ_PG_SIZE_PG_1G
+ u8 tqm_pg_size_tqm_lvl;
+ #define CMDQ_INITIALIZE_FW_TQM_LVL_MASK 0xfUL
+ #define CMDQ_INITIALIZE_FW_TQM_LVL_SFT 0
+ #define CMDQ_INITIALIZE_FW_TQM_LVL_LVL_0 0x0UL
+ #define CMDQ_INITIALIZE_FW_TQM_LVL_LVL_1 0x1UL
+ #define CMDQ_INITIALIZE_FW_TQM_LVL_LVL_2 0x2UL
+ #define CMDQ_INITIALIZE_FW_TQM_LVL_LAST CMDQ_INITIALIZE_FW_TQM_LVL_LVL_2
+ #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_MASK 0xf0UL
+ #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_SFT 4
+ #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_PG_4K (0x0UL << 4)
+ #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_PG_8K (0x1UL << 4)
+ #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_PG_64K (0x2UL << 4)
+ #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_PG_2M (0x3UL << 4)
+ #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_PG_8M (0x4UL << 4)
+ #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_PG_1G (0x5UL << 4)
+ #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_LAST CMDQ_INITIALIZE_FW_TQM_PG_SIZE_PG_1G
+ u8 tim_pg_size_tim_lvl;
+ #define CMDQ_INITIALIZE_FW_TIM_LVL_MASK 0xfUL
+ #define CMDQ_INITIALIZE_FW_TIM_LVL_SFT 0
+ #define CMDQ_INITIALIZE_FW_TIM_LVL_LVL_0 0x0UL
+ #define CMDQ_INITIALIZE_FW_TIM_LVL_LVL_1 0x1UL
+ #define CMDQ_INITIALIZE_FW_TIM_LVL_LVL_2 0x2UL
+ #define CMDQ_INITIALIZE_FW_TIM_LVL_LAST CMDQ_INITIALIZE_FW_TIM_LVL_LVL_2
+ #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_MASK 0xf0UL
+ #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_SFT 4
+ #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_4K (0x0UL << 4)
+ #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_8K (0x1UL << 4)
+ #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_64K (0x2UL << 4)
+ #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_2M (0x3UL << 4)
+ #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_8M (0x4UL << 4)
+ #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G (0x5UL << 4)
+ #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_LAST CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G
+ __le16 log2_dbr_pg_size;
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_MASK 0xfUL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_SFT 0
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4K 0x0UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8K 0x1UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16K 0x2UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32K 0x3UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64K 0x4UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128K 0x5UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_256K 0x6UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_512K 0x7UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_1M 0x8UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_2M 0x9UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4M 0xaUL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8M 0xbUL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16M 0xcUL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32M 0xdUL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64M 0xeUL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M 0xfUL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_LAST CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M
+ #define CMDQ_INITIALIZE_FW_RSVD_MASK 0xfff0UL
+ #define CMDQ_INITIALIZE_FW_RSVD_SFT 4
+ __le64 qpc_page_dir;
+ __le64 mrw_page_dir;
+ __le64 srq_page_dir;
+ __le64 cq_page_dir;
+ __le64 tqm_page_dir;
+ __le64 tim_page_dir;
+ __le32 number_of_qp;
+ __le32 number_of_mrw;
+ __le32 number_of_srq;
+ __le32 number_of_cq;
+ __le32 max_qp_per_vf;
+ __le32 max_mrw_per_vf;
+ __le32 max_srq_per_vf;
+ __le32 max_cq_per_vf;
+ __le32 max_gid_per_vf;
+ __le32 stat_ctx_id;
+ u8 drv_hsi_ver_maj;
+ u8 drv_hsi_ver_min;
+ u8 drv_hsi_ver_upd;
+ u8 unused40[5];
+ __le16 drv_build_ver_maj;
+ __le16 drv_build_ver_min;
+ __le16 drv_build_ver_upd;
+ __le16 drv_build_ver_patch;
+};
+
+/* creq_initialize_fw_resp (size:128b/16B) */
+struct creq_initialize_fw_resp {
+ u8 type;
+ #define CREQ_INITIALIZE_FW_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_INITIALIZE_FW_RESP_TYPE_SFT 0
+ #define CREQ_INITIALIZE_FW_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_INITIALIZE_FW_RESP_TYPE_LAST CREQ_INITIALIZE_FW_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 reserved32;
+ u8 v;
+ #define CREQ_INITIALIZE_FW_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_INITIALIZE_FW_RESP_EVENT_INITIALIZE_FW 0x80UL
+ #define CREQ_INITIALIZE_FW_RESP_EVENT_LAST CREQ_INITIALIZE_FW_RESP_EVENT_INITIALIZE_FW
+ u8 udcc_session_size;
+ u8 reserved40[5];
+};
+
+/* cmdq_deinitialize_fw (size:128b/16B) */
+struct cmdq_deinitialize_fw {
+ u8 opcode;
+ #define CMDQ_DEINITIALIZE_FW_OPCODE_DEINITIALIZE_FW 0x81UL
+ #define CMDQ_DEINITIALIZE_FW_OPCODE_LAST CMDQ_DEINITIALIZE_FW_OPCODE_DEINITIALIZE_FW
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+};
+
+/* creq_deinitialize_fw_resp (size:128b/16B) */
+struct creq_deinitialize_fw_resp {
+ u8 type;
+ #define CREQ_DEINITIALIZE_FW_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_DEINITIALIZE_FW_RESP_TYPE_SFT 0
+ #define CREQ_DEINITIALIZE_FW_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_DEINITIALIZE_FW_RESP_TYPE_LAST CREQ_DEINITIALIZE_FW_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 reserved32;
+ u8 v;
+ #define CREQ_DEINITIALIZE_FW_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_DEINITIALIZE_FW_RESP_EVENT_DEINITIALIZE_FW 0x81UL
+ #define CREQ_DEINITIALIZE_FW_RESP_EVENT_LAST CREQ_DEINITIALIZE_FW_RESP_EVENT_DEINITIALIZE_FW
+ u8 reserved48[6];
+};
+
+/* cmdq_create_qp (size:1152b/144B) */
+struct cmdq_create_qp {
+ u8 opcode;
+ #define CMDQ_CREATE_QP_OPCODE_CREATE_QP 0x1UL
+ #define CMDQ_CREATE_QP_OPCODE_LAST CMDQ_CREATE_QP_OPCODE_CREATE_QP
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le64 qp_handle;
+ __le32 qp_flags;
+ #define CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED 0x1UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION 0x2UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE 0x4UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED 0x8UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED 0x10UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_OPTIMIZED_TRANSMIT_ENABLED 0x20UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_RESPONDER_UD_CQE_WITH_CFA 0x40UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED 0x80UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_EXPRESS_MODE_ENABLED 0x100UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_STEERING_TAG_VALID 0x200UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_RDMA_READ_OR_ATOMICS_USED 0x400UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_CTX_VALID 0x800UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_SCHQ_ID_VALID 0x1000UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_EROCE_VALID 0x2000UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_RQ_PBL_PG_SIZE_VALID 0x4000UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_SQ_PBL_PG_SIZE_VALID 0x8000UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_LAST CMDQ_CREATE_QP_QP_FLAGS_SQ_PBL_PG_SIZE_VALID
+ u8 type;
+ #define CMDQ_CREATE_QP_TYPE_RC 0x2UL
+ #define CMDQ_CREATE_QP_TYPE_UD 0x4UL
+ #define CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE 0x6UL
+ #define CMDQ_CREATE_QP_TYPE_GSI 0x7UL
+ #define CMDQ_CREATE_QP_TYPE_LAST CMDQ_CREATE_QP_TYPE_GSI
+ u8 sq_pg_size_sq_lvl;
+ #define CMDQ_CREATE_QP_SQ_LVL_MASK 0xfUL
+ #define CMDQ_CREATE_QP_SQ_LVL_SFT 0
+ #define CMDQ_CREATE_QP_SQ_LVL_LVL_0 0x0UL
+ #define CMDQ_CREATE_QP_SQ_LVL_LVL_1 0x1UL
+ #define CMDQ_CREATE_QP_SQ_LVL_LVL_2 0x2UL
+ #define CMDQ_CREATE_QP_SQ_LVL_LAST CMDQ_CREATE_QP_SQ_LVL_LVL_2
+ #define CMDQ_CREATE_QP_SQ_PG_SIZE_MASK 0xf0UL
+ #define CMDQ_CREATE_QP_SQ_PG_SIZE_SFT 4
+ #define CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K (0x0UL << 4)
+ #define CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8K (0x1UL << 4)
+ #define CMDQ_CREATE_QP_SQ_PG_SIZE_PG_64K (0x2UL << 4)
+ #define CMDQ_CREATE_QP_SQ_PG_SIZE_PG_2M (0x3UL << 4)
+ #define CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8M (0x4UL << 4)
+ #define CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G (0x5UL << 4)
+ #define CMDQ_CREATE_QP_SQ_PG_SIZE_LAST CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G
+ u8 rq_pg_size_rq_lvl;
+ #define CMDQ_CREATE_QP_RQ_LVL_MASK 0xfUL
+ #define CMDQ_CREATE_QP_RQ_LVL_SFT 0
+ #define CMDQ_CREATE_QP_RQ_LVL_LVL_0 0x0UL
+ #define CMDQ_CREATE_QP_RQ_LVL_LVL_1 0x1UL
+ #define CMDQ_CREATE_QP_RQ_LVL_LVL_2 0x2UL
+ #define CMDQ_CREATE_QP_RQ_LVL_LAST CMDQ_CREATE_QP_RQ_LVL_LVL_2
+ #define CMDQ_CREATE_QP_RQ_PG_SIZE_MASK 0xf0UL
+ #define CMDQ_CREATE_QP_RQ_PG_SIZE_SFT 4
+ #define CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K (0x0UL << 4)
+ #define CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8K (0x1UL << 4)
+ #define CMDQ_CREATE_QP_RQ_PG_SIZE_PG_64K (0x2UL << 4)
+ #define CMDQ_CREATE_QP_RQ_PG_SIZE_PG_2M (0x3UL << 4)
+ #define CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8M (0x4UL << 4)
+ #define CMDQ_CREATE_QP_RQ_PG_SIZE_PG_1G (0x5UL << 4)
+ #define CMDQ_CREATE_QP_RQ_PG_SIZE_LAST CMDQ_CREATE_QP_RQ_PG_SIZE_PG_1G
+ u8 unused_0;
+ __le32 dpi;
+ __le32 sq_size;
+ __le32 rq_size;
+ __le16 sq_fwo_sq_sge;
+ #define CMDQ_CREATE_QP_SQ_SGE_MASK 0xfUL
+ #define CMDQ_CREATE_QP_SQ_SGE_SFT 0
+ #define CMDQ_CREATE_QP_SQ_FWO_MASK 0xfff0UL
+ #define CMDQ_CREATE_QP_SQ_FWO_SFT 4
+ __le16 rq_fwo_rq_sge;
+ #define CMDQ_CREATE_QP_RQ_SGE_MASK 0xfUL
+ #define CMDQ_CREATE_QP_RQ_SGE_SFT 0
+ #define CMDQ_CREATE_QP_RQ_FWO_MASK 0xfff0UL
+ #define CMDQ_CREATE_QP_RQ_FWO_SFT 4
+ __le32 scq_cid;
+ __le32 rcq_cid;
+ __le32 srq_cid;
+ __le32 pd_id;
+ __le64 sq_pbl;
+ __le64 rq_pbl;
+ __le64 irrq_addr;
+ __le64 orrq_addr;
+ __le32 request_xid;
+ __le16 steering_tag;
+ __le16 sq_max_num_wqes;
+ __le32 ext_stats_ctx_id;
+ __le16 schq_id;
+ u8 sq_pbl_pg_size;
+ #define CMDQ_CREATE_QP_SQ_PBL_PG_SIZE_MASK 0xfUL
+ #define CMDQ_CREATE_QP_SQ_PBL_PG_SIZE_SFT 0
+ #define CMDQ_CREATE_QP_SQ_PBL_PG_SIZE_PG_4K 0x0UL
+ #define CMDQ_CREATE_QP_SQ_PBL_PG_SIZE_PG_8K 0x1UL
+ #define CMDQ_CREATE_QP_SQ_PBL_PG_SIZE_PG_64K 0x2UL
+ #define CMDQ_CREATE_QP_SQ_PBL_PG_SIZE_PG_2M 0x3UL
+ #define CMDQ_CREATE_QP_SQ_PBL_PG_SIZE_PG_8M 0x4UL
+ #define CMDQ_CREATE_QP_SQ_PBL_PG_SIZE_PG_1G 0x5UL
+ #define CMDQ_CREATE_QP_SQ_PBL_PG_SIZE_LAST CMDQ_CREATE_QP_SQ_PBL_PG_SIZE_PG_1G
+ u8 rq_pbl_pg_size;
+ #define CMDQ_CREATE_QP_RQ_PBL_PG_SIZE_MASK 0xfUL
+ #define CMDQ_CREATE_QP_RQ_PBL_PG_SIZE_SFT 0
+ #define CMDQ_CREATE_QP_RQ_PBL_PG_SIZE_PG_4K 0x0UL
+ #define CMDQ_CREATE_QP_RQ_PBL_PG_SIZE_PG_8K 0x1UL
+ #define CMDQ_CREATE_QP_RQ_PBL_PG_SIZE_PG_64K 0x2UL
+ #define CMDQ_CREATE_QP_RQ_PBL_PG_SIZE_PG_2M 0x3UL
+ #define CMDQ_CREATE_QP_RQ_PBL_PG_SIZE_PG_8M 0x4UL
+ #define CMDQ_CREATE_QP_RQ_PBL_PG_SIZE_PG_1G 0x5UL
+ #define CMDQ_CREATE_QP_RQ_PBL_PG_SIZE_LAST CMDQ_CREATE_QP_RQ_PBL_PG_SIZE_PG_1G
+ __le32 msn_iqp;
+ __le32 irrq_iqp;
+ __le32 orrq_iqp;
+ __le32 msn_size;
+ __le32 irrq_size;
+ __le32 orrq_size;
+ __le16 eroce;
+ #define CMDQ_CREATE_QP_EROCE_COS 0x1UL
+ #define CMDQ_CREATE_QP_EROCE_RESERVED_0_MASK 0xeUL
+ #define CMDQ_CREATE_QP_EROCE_RESERVED_0_SFT 1
+ #define CMDQ_CREATE_QP_EROCE_GRP_MASK 0xf0UL
+ #define CMDQ_CREATE_QP_EROCE_GRP_SFT 4
+ #define CMDQ_CREATE_QP_EROCE_CSIG_ENABLED 0x100UL
+ u8 reserved48[6];
+};
+
+/* creq_create_qp_resp (size:128b/16B) */
+struct creq_create_qp_resp {
+ u8 type;
+ #define CREQ_CREATE_QP_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_CREATE_QP_RESP_TYPE_SFT 0
+ #define CREQ_CREATE_QP_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_CREATE_QP_RESP_TYPE_LAST CREQ_CREATE_QP_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_CREATE_QP_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_CREATE_QP_RESP_EVENT_CREATE_QP 0x1UL
+ #define CREQ_CREATE_QP_RESP_EVENT_LAST CREQ_CREATE_QP_RESP_EVENT_CREATE_QP
+ u8 optimized_transmit_enabled;
+ u8 context_size;
+ u8 reserved32[4];
+};
+
+/* cmdq_destroy_qp (size:192b/24B) */
+struct cmdq_destroy_qp {
+ u8 opcode;
+ #define CMDQ_DESTROY_QP_OPCODE_DESTROY_QP 0x2UL
+ #define CMDQ_DESTROY_QP_OPCODE_LAST CMDQ_DESTROY_QP_OPCODE_DESTROY_QP
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le32 qp_cid;
+ __le32 unused_0;
+};
+
+/* creq_destroy_qp_resp (size:128b/16B) */
+struct creq_destroy_qp_resp {
+ u8 type;
+ #define CREQ_DESTROY_QP_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_DESTROY_QP_RESP_TYPE_SFT 0
+ #define CREQ_DESTROY_QP_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_DESTROY_QP_RESP_TYPE_LAST CREQ_DESTROY_QP_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_DESTROY_QP_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_DESTROY_QP_RESP_EVENT_DESTROY_QP 0x2UL
+ #define CREQ_DESTROY_QP_RESP_EVENT_LAST CREQ_DESTROY_QP_RESP_EVENT_DESTROY_QP
+ __le16 udcc_session_id;
+ __le16 udcc_session_data_offset;
+ u8 flags;
+ #define CREQ_DESTROY_QP_RESP_FLAGS_UDCC_SESSION_DATA 0x1UL
+ #define CREQ_DESTROY_QP_RESP_FLAGS_UDCC_RTT_DATA 0x2UL
+ u8 udcc_session_data_size;
+};
+
+/* cmdq_modify_qp (size:1152b/144B) */
+struct cmdq_modify_qp {
+ u8 opcode;
+ #define CMDQ_MODIFY_QP_OPCODE_MODIFY_QP 0x3UL
+ #define CMDQ_MODIFY_QP_OPCODE_LAST CMDQ_MODIFY_QP_OPCODE_MODIFY_QP
+ u8 cmd_size;
+ __le16 flags;
+ #define CMDQ_MODIFY_QP_FLAGS_SRQ_USED 0x1UL
+ #define CMDQ_MODIFY_QP_FLAGS_EXCLUDE_QP_UDCC 0x2UL
+ __le16 cookie;
+ u8 resp_size;
+ u8 qp_type;
+ #define CMDQ_MODIFY_QP_QP_TYPE_RC 0x2UL
+ #define CMDQ_MODIFY_QP_QP_TYPE_UD 0x4UL
+ #define CMDQ_MODIFY_QP_QP_TYPE_RAW_ETHERTYPE 0x6UL
+ #define CMDQ_MODIFY_QP_QP_TYPE_GSI 0x7UL
+ #define CMDQ_MODIFY_QP_QP_TYPE_LAST CMDQ_MODIFY_QP_QP_TYPE_GSI
+ __le64 resp_addr;
+ __le32 modify_mask;
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_STATE 0x1UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY 0x2UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS 0x4UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_PKEY 0x8UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_QKEY 0x10UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_DGID 0x20UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL 0x40UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX 0x80UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT 0x100UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS 0x200UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC 0x400UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_PINGPONG_PUSH_MODE 0x800UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU 0x1000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT 0x2000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT 0x4000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY 0x8000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN 0x10000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC 0x20000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER 0x40000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN 0x80000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC 0x100000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE 0x200000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE 0x400000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE 0x800000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE 0x1000000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA 0x2000000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID 0x4000000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC 0x8000000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID 0x10000000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_ENABLE_CC 0x20000000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_TOS_ECN 0x40000000UL
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_TOS_DSCP 0x80000000UL
+ __le32 qp_cid;
+ u8 network_type_en_sqd_async_notify_new_state;
+ #define CMDQ_MODIFY_QP_NEW_STATE_MASK 0xfUL
+ #define CMDQ_MODIFY_QP_NEW_STATE_SFT 0
+ #define CMDQ_MODIFY_QP_NEW_STATE_RESET 0x0UL
+ #define CMDQ_MODIFY_QP_NEW_STATE_INIT 0x1UL
+ #define CMDQ_MODIFY_QP_NEW_STATE_RTR 0x2UL
+ #define CMDQ_MODIFY_QP_NEW_STATE_RTS 0x3UL
+ #define CMDQ_MODIFY_QP_NEW_STATE_SQD 0x4UL
+ #define CMDQ_MODIFY_QP_NEW_STATE_SQE 0x5UL
+ #define CMDQ_MODIFY_QP_NEW_STATE_ERR 0x6UL
+ #define CMDQ_MODIFY_QP_NEW_STATE_LAST CMDQ_MODIFY_QP_NEW_STATE_ERR
+ #define CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY 0x10UL
+ #define CMDQ_MODIFY_QP_UNUSED1 0x20UL
+ #define CMDQ_MODIFY_QP_NETWORK_TYPE_MASK 0xc0UL
+ #define CMDQ_MODIFY_QP_NETWORK_TYPE_SFT 6
+ #define CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1 (0x0UL << 6)
+ #define CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4 (0x2UL << 6)
+ #define CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6 (0x3UL << 6)
+ #define CMDQ_MODIFY_QP_NETWORK_TYPE_LAST CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6
+ u8 access;
+ #define CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC_REMOTE_READ_REMOTE_WRITE_LOCAL_WRITE_MASK 0xffUL
+ #define CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC_REMOTE_READ_REMOTE_WRITE_LOCAL_WRITE_SFT 0
+ #define CMDQ_MODIFY_QP_ACCESS_LOCAL_WRITE 0x1UL
+ #define CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE 0x2UL
+ #define CMDQ_MODIFY_QP_ACCESS_REMOTE_READ 0x4UL
+ #define CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC 0x8UL
+ __le16 pkey;
+ __le32 qkey;
+ __le32 dgid[4];
+ __le32 flow_label;
+ __le16 sgid_index;
+ u8 hop_limit;
+ u8 traffic_class;
+ __le16 dest_mac[3];
+ u8 tos_dscp_tos_ecn;
+ #define CMDQ_MODIFY_QP_TOS_ECN_MASK 0x3UL
+ #define CMDQ_MODIFY_QP_TOS_ECN_SFT 0
+ #define CMDQ_MODIFY_QP_TOS_DSCP_MASK 0xfcUL
+ #define CMDQ_MODIFY_QP_TOS_DSCP_SFT 2
+ u8 path_mtu_pingpong_push_enable;
+ #define CMDQ_MODIFY_QP_PINGPONG_PUSH_ENABLE 0x1UL
+ #define CMDQ_MODIFY_QP_UNUSED3_MASK 0xeUL
+ #define CMDQ_MODIFY_QP_UNUSED3_SFT 1
+ #define CMDQ_MODIFY_QP_PATH_MTU_MASK 0xf0UL
+ #define CMDQ_MODIFY_QP_PATH_MTU_SFT 4
+ #define CMDQ_MODIFY_QP_PATH_MTU_MTU_256 (0x0UL << 4)
+ #define CMDQ_MODIFY_QP_PATH_MTU_MTU_512 (0x1UL << 4)
+ #define CMDQ_MODIFY_QP_PATH_MTU_MTU_1024 (0x2UL << 4)
+ #define CMDQ_MODIFY_QP_PATH_MTU_MTU_2048 (0x3UL << 4)
+ #define CMDQ_MODIFY_QP_PATH_MTU_MTU_4096 (0x4UL << 4)
+ #define CMDQ_MODIFY_QP_PATH_MTU_MTU_8192 (0x5UL << 4)
+ #define CMDQ_MODIFY_QP_PATH_MTU_LAST CMDQ_MODIFY_QP_PATH_MTU_MTU_8192
+ u8 timeout;
+ u8 retry_cnt;
+ u8 rnr_retry;
+ u8 min_rnr_timer;
+ __le32 rq_psn;
+ __le32 sq_psn;
+ u8 max_rd_atomic;
+ u8 max_dest_rd_atomic;
+ __le16 enable_cc;
+ #define CMDQ_MODIFY_QP_ENABLE_CC 0x1UL
+ #define CMDQ_MODIFY_QP_ENH_MODE_MASK 0x6UL
+ #define CMDQ_MODIFY_QP_ENH_MODE_SFT 1
+ #define CMDQ_MODIFY_QP_ENH_COS 0x8UL
+ #define CMDQ_MODIFY_QP_ENH_GRP_MASK 0xf0UL
+ #define CMDQ_MODIFY_QP_ENH_GRP_SFT 4
+ #define CMDQ_MODIFY_QP_UNUSED8_MASK 0xff00UL
+ #define CMDQ_MODIFY_QP_UNUSED8_SFT 8
+ __le32 sq_size;
+ __le32 rq_size;
+ __le16 sq_sge;
+ __le16 rq_sge;
+ __le32 max_inline_data;
+ __le32 dest_qp_id;
+ __le32 pingpong_push_dpi;
+ __le16 src_mac[3];
+ __le16 vlan_pcp_vlan_dei_vlan_id;
+ #define CMDQ_MODIFY_QP_VLAN_ID_MASK 0xfffUL
+ #define CMDQ_MODIFY_QP_VLAN_ID_SFT 0
+ #define CMDQ_MODIFY_QP_VLAN_DEI 0x1000UL
+ #define CMDQ_MODIFY_QP_VLAN_PCP_MASK 0xe000UL
+ #define CMDQ_MODIFY_QP_VLAN_PCP_SFT 13
+ __le64 irrq_addr;
+ __le64 orrq_addr;
+ __le32 ext_modify_mask;
+ #define CMDQ_MODIFY_QP_EXT_MODIFY_MASK_EXT_STATS_CTX 0x1UL
+ #define CMDQ_MODIFY_QP_EXT_MODIFY_MASK_SCHQ_ID_VALID 0x2UL
+ #define CMDQ_MODIFY_QP_EXT_MODIFY_MASK_UDP_SRC_PORT_VALID 0x4UL
+ #define CMDQ_MODIFY_QP_EXT_MODIFY_MASK_RATE_LIMIT_VALID 0x8UL
+ __le32 ext_stats_ctx_id;
+ __le16 schq_id;
+ __le16 udp_src_port;
+ __le32 rate_limit;
+};
+
+/* creq_modify_qp_resp (size:128b/16B) */
+struct creq_modify_qp_resp {
+ u8 type;
+ #define CREQ_MODIFY_QP_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_MODIFY_QP_RESP_TYPE_SFT 0
+ #define CREQ_MODIFY_QP_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_MODIFY_QP_RESP_TYPE_LAST CREQ_MODIFY_QP_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_MODIFY_QP_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_MODIFY_QP_RESP_EVENT_MODIFY_QP 0x3UL
+ #define CREQ_MODIFY_QP_RESP_EVENT_LAST CREQ_MODIFY_QP_RESP_EVENT_MODIFY_QP
+ u8 pingpong_push_state_index_enabled;
+ #define CREQ_MODIFY_QP_RESP_PINGPONG_PUSH_ENABLED 0x1UL
+ #define CREQ_MODIFY_QP_RESP_PINGPONG_PUSH_INDEX_MASK 0xeUL
+ #define CREQ_MODIFY_QP_RESP_PINGPONG_PUSH_INDEX_SFT 1
+ #define CREQ_MODIFY_QP_RESP_PINGPONG_PUSH_STATE 0x10UL
+ u8 shaper_allocation_status;
+ #define CREQ_MODIFY_QP_RESP_SHAPER_ALLOCATED 0x1UL
+ __le16 flags;
+ #define CREQ_MODIFY_QP_RESP_SESSION_ELIGIBLE 0x1UL
+ __le16 reserved16;
+};
+
+/* cmdq_query_qp (size:192b/24B) */
+struct cmdq_query_qp {
+ u8 opcode;
+ #define CMDQ_QUERY_QP_OPCODE_QUERY_QP 0x4UL
+ #define CMDQ_QUERY_QP_OPCODE_LAST CMDQ_QUERY_QP_OPCODE_QUERY_QP
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le32 qp_cid;
+ __le32 unused_0;
+};
+
+/* creq_query_qp_resp (size:128b/16B) */
+struct creq_query_qp_resp {
+ u8 type;
+ #define CREQ_QUERY_QP_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_QUERY_QP_RESP_TYPE_SFT 0
+ #define CREQ_QUERY_QP_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_QUERY_QP_RESP_TYPE_LAST CREQ_QUERY_QP_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 size;
+ u8 v;
+ #define CREQ_QUERY_QP_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_QUERY_QP_RESP_EVENT_QUERY_QP 0x4UL
+ #define CREQ_QUERY_QP_RESP_EVENT_LAST CREQ_QUERY_QP_RESP_EVENT_QUERY_QP
+ u8 reserved48[6];
+};
+
+/* creq_query_qp_resp_sb (size:896b/112B) */
+struct creq_query_qp_resp_sb {
+ u8 opcode;
+ #define CREQ_QUERY_QP_RESP_SB_OPCODE_QUERY_QP 0x4UL
+ #define CREQ_QUERY_QP_RESP_SB_OPCODE_LAST CREQ_QUERY_QP_RESP_SB_OPCODE_QUERY_QP
+ u8 status;
+ __le16 cookie;
+ __le16 flags;
+ u8 resp_size;
+ u8 reserved8;
+ __le32 xid;
+ u8 en_sqd_async_notify_state;
+ #define CREQ_QUERY_QP_RESP_SB_STATE_MASK 0xfUL
+ #define CREQ_QUERY_QP_RESP_SB_STATE_SFT 0
+ #define CREQ_QUERY_QP_RESP_SB_STATE_RESET 0x0UL
+ #define CREQ_QUERY_QP_RESP_SB_STATE_INIT 0x1UL
+ #define CREQ_QUERY_QP_RESP_SB_STATE_RTR 0x2UL
+ #define CREQ_QUERY_QP_RESP_SB_STATE_RTS 0x3UL
+ #define CREQ_QUERY_QP_RESP_SB_STATE_SQD 0x4UL
+ #define CREQ_QUERY_QP_RESP_SB_STATE_SQE 0x5UL
+ #define CREQ_QUERY_QP_RESP_SB_STATE_ERR 0x6UL
+ #define CREQ_QUERY_QP_RESP_SB_STATE_LAST CREQ_QUERY_QP_RESP_SB_STATE_ERR
+ #define CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY 0x10UL
+ #define CREQ_QUERY_QP_RESP_SB_UNUSED3_MASK 0xe0UL
+ #define CREQ_QUERY_QP_RESP_SB_UNUSED3_SFT 5
+ u8 access;
+ #define CREQ_QUERY_QP_RESP_SB_ACCESS_REMOTE_ATOMIC_REMOTE_READ_REMOTE_WRITE_LOCAL_WRITE_MASK 0xffUL
+ #define CREQ_QUERY_QP_RESP_SB_ACCESS_REMOTE_ATOMIC_REMOTE_READ_REMOTE_WRITE_LOCAL_WRITE_SFT 0
+ #define CREQ_QUERY_QP_RESP_SB_ACCESS_LOCAL_WRITE 0x1UL
+ #define CREQ_QUERY_QP_RESP_SB_ACCESS_REMOTE_WRITE 0x2UL
+ #define CREQ_QUERY_QP_RESP_SB_ACCESS_REMOTE_READ 0x4UL
+ #define CREQ_QUERY_QP_RESP_SB_ACCESS_REMOTE_ATOMIC 0x8UL
+ __le16 pkey;
+ __le32 qkey;
+ __le16 udp_src_port;
+ __le16 reserved16;
+ __le32 dgid[4];
+ __le32 flow_label;
+ __le16 sgid_index;
+ u8 hop_limit;
+ u8 traffic_class;
+ __le16 dest_mac[3];
+ __le16 path_mtu_dest_vlan_id;
+ #define CREQ_QUERY_QP_RESP_SB_DEST_VLAN_ID_MASK 0xfffUL
+ #define CREQ_QUERY_QP_RESP_SB_DEST_VLAN_ID_SFT 0
+ #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK 0xf000UL
+ #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT 12
+ #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_MTU_256 (0x0UL << 12)
+ #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_MTU_512 (0x1UL << 12)
+ #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_MTU_1024 (0x2UL << 12)
+ #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_MTU_2048 (0x3UL << 12)
+ #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_MTU_4096 (0x4UL << 12)
+ #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_MTU_8192 (0x5UL << 12)
+ #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_LAST CREQ_QUERY_QP_RESP_SB_PATH_MTU_MTU_8192
+ u8 timeout;
+ u8 retry_cnt;
+ u8 rnr_retry;
+ u8 min_rnr_timer;
+ __le32 rq_psn;
+ __le32 sq_psn;
+ u8 max_rd_atomic;
+ u8 max_dest_rd_atomic;
+ u8 tos_dscp_tos_ecn;
+ #define CREQ_QUERY_QP_RESP_SB_TOS_ECN_MASK 0x3UL
+ #define CREQ_QUERY_QP_RESP_SB_TOS_ECN_SFT 0
+ #define CREQ_QUERY_QP_RESP_SB_TOS_DSCP_MASK 0xfcUL
+ #define CREQ_QUERY_QP_RESP_SB_TOS_DSCP_SFT 2
+ u8 enable_cc;
+ #define CREQ_QUERY_QP_RESP_SB_ENABLE_CC 0x1UL
+ __le32 sq_size;
+ __le32 rq_size;
+ __le16 sq_sge;
+ __le16 rq_sge;
+ __le32 max_inline_data;
+ __le32 dest_qp_id;
+ __le16 port_id;
+ u8 unused_0;
+ u8 stat_collection_id;
+ __le16 src_mac[3];
+ __le16 vlan_pcp_vlan_dei_vlan_id;
+ #define CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK 0xfffUL
+ #define CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT 0
+ #define CREQ_QUERY_QP_RESP_SB_VLAN_DEI 0x1000UL
+ #define CREQ_QUERY_QP_RESP_SB_VLAN_PCP_MASK 0xe000UL
+ #define CREQ_QUERY_QP_RESP_SB_VLAN_PCP_SFT 13
+ __le32 rate_limit;
+ __le32 reserved32;
+};
+
+/* cmdq_query_qp_extend (size:192b/24B) */
+struct cmdq_query_qp_extend {
+ u8 opcode;
+ #define CMDQ_QUERY_QP_EXTEND_OPCODE_QUERY_QP_EXTEND 0x91UL
+ #define CMDQ_QUERY_QP_EXTEND_OPCODE_LAST CMDQ_QUERY_QP_EXTEND_OPCODE_QUERY_QP_EXTEND
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 num_qps;
+ __le64 resp_addr;
+ __le32 function_id;
+ #define CMDQ_QUERY_QP_EXTEND_PF_NUM_MASK 0xffUL
+ #define CMDQ_QUERY_QP_EXTEND_PF_NUM_SFT 0
+ #define CMDQ_QUERY_QP_EXTEND_VF_NUM_MASK 0xffff00UL
+ #define CMDQ_QUERY_QP_EXTEND_VF_NUM_SFT 8
+ #define CMDQ_QUERY_QP_EXTEND_VF_VALID 0x1000000UL
+ __le32 current_index;
+};
+
+/* creq_query_qp_extend_resp (size:128b/16B) */
+struct creq_query_qp_extend_resp {
+ u8 type;
+ #define CREQ_QUERY_QP_EXTEND_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_QUERY_QP_EXTEND_RESP_TYPE_SFT 0
+ #define CREQ_QUERY_QP_EXTEND_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_TYPE_LAST CREQ_QUERY_QP_EXTEND_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 size;
+ u8 v;
+ #define CREQ_QUERY_QP_EXTEND_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_QUERY_QP_EXTEND_RESP_EVENT_QUERY_QP_EXTEND 0x91UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_EVENT_LAST CREQ_QUERY_QP_EXTEND_RESP_EVENT_QUERY_QP_EXTEND
+ __le16 reserved16;
+ __le32 current_index;
+};
+
+/* creq_query_qp_extend_resp_sb (size:384b/48B) */
+struct creq_query_qp_extend_resp_sb {
+ u8 opcode;
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_OPCODE_QUERY_QP_EXTEND 0x91UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_OPCODE_LAST CREQ_QUERY_QP_EXTEND_RESP_SB_OPCODE_QUERY_QP_EXTEND
+ u8 status;
+ __le16 cookie;
+ __le16 flags;
+ u8 resp_size;
+ u8 reserved8;
+ __le32 xid;
+ u8 state;
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_MASK 0xfUL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_SFT 0
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_RESET 0x0UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_INIT 0x1UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_RTR 0x2UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_RTS 0x3UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_SQD 0x4UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_SQE 0x5UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_ERR 0x6UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_LAST CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_ERR
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_UNUSED4_MASK 0xf0UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_UNUSED4_SFT 4
+ u8 reserved_8;
+ __le16 port_id;
+ __le32 qkey;
+ __le16 sgid_index;
+ u8 network_type;
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_NETWORK_TYPE_ROCEV1 0x0UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_NETWORK_TYPE_ROCEV2_IPV4 0x2UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_NETWORK_TYPE_ROCEV2_IPV6 0x3UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_NETWORK_TYPE_LAST CREQ_QUERY_QP_EXTEND_RESP_SB_NETWORK_TYPE_ROCEV2_IPV6
+ u8 unused_0;
+ __le32 dgid[4];
+ __le32 dest_qp_id;
+ u8 stat_collection_id;
+ u8 reserved2_8;
+ __le16 reserved_16;
+};
+
+/* creq_query_qp_extend_resp_sb_tlv (size:512b/64B) */
+struct creq_query_qp_extend_resp_sb_tlv {
+ __le16 cmd_discr;
+ u8 reserved_8b;
+ u8 tlv_flags;
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_TLV_FLAGS_MORE 0x1UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_TLV_FLAGS_MORE_LAST 0x0UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_TLV_FLAGS_MORE_NOT_LAST 0x1UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_TLV_FLAGS_REQUIRED 0x2UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_TLV_FLAGS_REQUIRED_NO (0x0UL << 1)
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_TLV_FLAGS_REQUIRED_YES (0x1UL << 1)
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_TLV_FLAGS_REQUIRED_LAST CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_TLV_FLAGS_REQUIRED_YES
+ __le16 tlv_type;
+ __le16 length;
+ u8 total_size;
+ u8 reserved56[7];
+ u8 opcode;
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_OPCODE_QUERY_QP_EXTEND 0x91UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_OPCODE_LAST CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_OPCODE_QUERY_QP_EXTEND
+ u8 status;
+ __le16 cookie;
+ __le16 flags;
+ u8 resp_size;
+ u8 reserved8;
+ __le32 xid;
+ u8 state;
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_MASK 0xfUL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_SFT 0
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_RESET 0x0UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_INIT 0x1UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_RTR 0x2UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_RTS 0x3UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_SQD 0x4UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_SQE 0x5UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_ERR 0x6UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_LAST CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_ERR
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_UNUSED4_MASK 0xf0UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_UNUSED4_SFT 4
+ u8 reserved_8;
+ __le16 port_id;
+ __le32 qkey;
+ __le16 sgid_index;
+ u8 network_type;
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_NETWORK_TYPE_ROCEV1 0x0UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_NETWORK_TYPE_ROCEV2_IPV4 0x2UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_NETWORK_TYPE_ROCEV2_IPV6 0x3UL
+ #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_NETWORK_TYPE_LAST CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_NETWORK_TYPE_ROCEV2_IPV6
+ u8 unused_0;
+ __le32 dgid[4];
+ __le32 dest_qp_id;
+ u8 stat_collection_id;
+ u8 reserved2_8;
+ __le16 reserved_16;
+};
+
+/* cmdq_create_srq (size:512b/64B) */
+struct cmdq_create_srq {
+ u8 opcode;
+ #define CMDQ_CREATE_SRQ_OPCODE_CREATE_SRQ 0x5UL
+ #define CMDQ_CREATE_SRQ_OPCODE_LAST CMDQ_CREATE_SRQ_OPCODE_CREATE_SRQ
+ u8 cmd_size;
+ __le16 flags;
+ #define CMDQ_CREATE_SRQ_FLAGS_STEERING_TAG_VALID 0x1UL
+ #define CMDQ_CREATE_SRQ_FLAGS_PBL_PG_SIZE_VALID 0x2UL
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le64 srq_handle;
+ __le16 pg_size_lvl;
+ #define CMDQ_CREATE_SRQ_LVL_MASK 0x3UL
+ #define CMDQ_CREATE_SRQ_LVL_SFT 0
+ #define CMDQ_CREATE_SRQ_LVL_LVL_0 0x0UL
+ #define CMDQ_CREATE_SRQ_LVL_LVL_1 0x1UL
+ #define CMDQ_CREATE_SRQ_LVL_LVL_2 0x2UL
+ #define CMDQ_CREATE_SRQ_LVL_LAST CMDQ_CREATE_SRQ_LVL_LVL_2
+ #define CMDQ_CREATE_SRQ_PG_SIZE_MASK 0x1cUL
+ #define CMDQ_CREATE_SRQ_PG_SIZE_SFT 2
+ #define CMDQ_CREATE_SRQ_PG_SIZE_PG_4K (0x0UL << 2)
+ #define CMDQ_CREATE_SRQ_PG_SIZE_PG_8K (0x1UL << 2)
+ #define CMDQ_CREATE_SRQ_PG_SIZE_PG_64K (0x2UL << 2)
+ #define CMDQ_CREATE_SRQ_PG_SIZE_PG_2M (0x3UL << 2)
+ #define CMDQ_CREATE_SRQ_PG_SIZE_PG_8M (0x4UL << 2)
+ #define CMDQ_CREATE_SRQ_PG_SIZE_PG_1G (0x5UL << 2)
+ #define CMDQ_CREATE_SRQ_PG_SIZE_LAST CMDQ_CREATE_SRQ_PG_SIZE_PG_1G
+ #define CMDQ_CREATE_SRQ_UNUSED11_MASK 0xffe0UL
+ #define CMDQ_CREATE_SRQ_UNUSED11_SFT 5
+ __le16 eventq_id;
+ #define CMDQ_CREATE_SRQ_EVENTQ_ID_MASK 0xfffUL
+ #define CMDQ_CREATE_SRQ_EVENTQ_ID_SFT 0
+ #define CMDQ_CREATE_SRQ_UNUSED4_MASK 0xf000UL
+ #define CMDQ_CREATE_SRQ_UNUSED4_SFT 12
+ __le16 srq_size;
+ __le16 srq_fwo;
+ #define CMDQ_CREATE_SRQ_SRQ_FWO_MASK 0xfffUL
+ #define CMDQ_CREATE_SRQ_SRQ_FWO_SFT 0
+ #define CMDQ_CREATE_SRQ_SRQ_SGE_MASK 0xf000UL
+ #define CMDQ_CREATE_SRQ_SRQ_SGE_SFT 12
+ __le32 dpi;
+ __le32 pd_id;
+ __le64 pbl;
+ __le16 steering_tag;
+ u8 pbl_pg_size;
+ #define CMDQ_CREATE_SRQ_PBL_PG_SIZE_MASK 0x7UL
+ #define CMDQ_CREATE_SRQ_PBL_PG_SIZE_SFT 0
+ #define CMDQ_CREATE_SRQ_PBL_PG_SIZE_PG_4K 0x0UL
+ #define CMDQ_CREATE_SRQ_PBL_PG_SIZE_PG_8K 0x1UL
+ #define CMDQ_CREATE_SRQ_PBL_PG_SIZE_PG_64K 0x2UL
+ #define CMDQ_CREATE_SRQ_PBL_PG_SIZE_PG_2M 0x3UL
+ #define CMDQ_CREATE_SRQ_PBL_PG_SIZE_PG_8M 0x4UL
+ #define CMDQ_CREATE_SRQ_PBL_PG_SIZE_PG_1G 0x5UL
+ #define CMDQ_CREATE_SRQ_PBL_PG_SIZE_LAST CMDQ_CREATE_SRQ_PBL_PG_SIZE_PG_1G
+ u8 reserved40[5];
+ __le64 reserved64;
+};
+
+/* creq_create_srq_resp (size:128b/16B) */
+struct creq_create_srq_resp {
+ u8 type;
+ #define CREQ_CREATE_SRQ_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_CREATE_SRQ_RESP_TYPE_SFT 0
+ #define CREQ_CREATE_SRQ_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_CREATE_SRQ_RESP_TYPE_LAST CREQ_CREATE_SRQ_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_CREATE_SRQ_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_CREATE_SRQ_RESP_EVENT_CREATE_SRQ 0x5UL
+ #define CREQ_CREATE_SRQ_RESP_EVENT_LAST CREQ_CREATE_SRQ_RESP_EVENT_CREATE_SRQ
+ u8 context_size;
+ u8 reserved48[5];
+};
+
+/* cmdq_destroy_srq (size:192b/24B) */
+struct cmdq_destroy_srq {
+ u8 opcode;
+ #define CMDQ_DESTROY_SRQ_OPCODE_DESTROY_SRQ 0x6UL
+ #define CMDQ_DESTROY_SRQ_OPCODE_LAST CMDQ_DESTROY_SRQ_OPCODE_DESTROY_SRQ
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le32 srq_cid;
+ __le32 unused_0;
+};
+
+/* creq_destroy_srq_resp (size:128b/16B) */
+struct creq_destroy_srq_resp {
+ u8 type;
+ #define CREQ_DESTROY_SRQ_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_DESTROY_SRQ_RESP_TYPE_SFT 0
+ #define CREQ_DESTROY_SRQ_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_DESTROY_SRQ_RESP_TYPE_LAST CREQ_DESTROY_SRQ_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_DESTROY_SRQ_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_DESTROY_SRQ_RESP_EVENT_DESTROY_SRQ 0x6UL
+ #define CREQ_DESTROY_SRQ_RESP_EVENT_LAST CREQ_DESTROY_SRQ_RESP_EVENT_DESTROY_SRQ
+ __le16 enable_for_arm[3];
+ #define CREQ_DESTROY_SRQ_RESP_UNUSED0_MASK 0xffffUL
+ #define CREQ_DESTROY_SRQ_RESP_UNUSED0_SFT 0
+ #define CREQ_DESTROY_SRQ_RESP_ENABLE_FOR_ARM_MASK 0x30000UL
+ #define CREQ_DESTROY_SRQ_RESP_ENABLE_FOR_ARM_SFT 16
+};
+
+/* cmdq_query_srq (size:192b/24B) */
+struct cmdq_query_srq {
+ u8 opcode;
+ #define CMDQ_QUERY_SRQ_OPCODE_QUERY_SRQ 0x8UL
+ #define CMDQ_QUERY_SRQ_OPCODE_LAST CMDQ_QUERY_SRQ_OPCODE_QUERY_SRQ
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le32 srq_cid;
+ __le32 unused_0;
+};
+
+/* creq_query_srq_resp (size:128b/16B) */
+struct creq_query_srq_resp {
+ u8 type;
+ #define CREQ_QUERY_SRQ_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_QUERY_SRQ_RESP_TYPE_SFT 0
+ #define CREQ_QUERY_SRQ_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_QUERY_SRQ_RESP_TYPE_LAST CREQ_QUERY_SRQ_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 size;
+ u8 v;
+ #define CREQ_QUERY_SRQ_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_QUERY_SRQ_RESP_EVENT_QUERY_SRQ 0x8UL
+ #define CREQ_QUERY_SRQ_RESP_EVENT_LAST CREQ_QUERY_SRQ_RESP_EVENT_QUERY_SRQ
+ u8 reserved48[6];
+};
+
+/* creq_query_srq_resp_sb (size:256b/32B) */
+struct creq_query_srq_resp_sb {
+ u8 opcode;
+ #define CREQ_QUERY_SRQ_RESP_SB_OPCODE_QUERY_SRQ 0x8UL
+ #define CREQ_QUERY_SRQ_RESP_SB_OPCODE_LAST CREQ_QUERY_SRQ_RESP_SB_OPCODE_QUERY_SRQ
+ u8 status;
+ __le16 cookie;
+ __le16 flags;
+ u8 resp_size;
+ u8 reserved8;
+ __le32 xid;
+ __le16 srq_limit;
+ __le16 reserved16;
+ __le32 data[4];
+};
+
+/* cmdq_create_cq (size:512b/64B) */
+struct cmdq_create_cq {
+ u8 opcode;
+ #define CMDQ_CREATE_CQ_OPCODE_CREATE_CQ 0x9UL
+ #define CMDQ_CREATE_CQ_OPCODE_LAST CMDQ_CREATE_CQ_OPCODE_CREATE_CQ
+ u8 cmd_size;
+ __le16 flags;
+ #define CMDQ_CREATE_CQ_FLAGS_DISABLE_CQ_OVERFLOW_DETECTION 0x1UL
+ #define CMDQ_CREATE_CQ_FLAGS_STEERING_TAG_VALID 0x2UL
+ #define CMDQ_CREATE_CQ_FLAGS_INFINITE_CQ_MODE 0x4UL
+ #define CMDQ_CREATE_CQ_FLAGS_COALESCING_VALID 0x8UL
+ #define CMDQ_CREATE_CQ_FLAGS_PBL_PG_SIZE_VALID 0x10UL
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le64 cq_handle;
+ __le32 pg_size_lvl;
+ #define CMDQ_CREATE_CQ_LVL_MASK 0x3UL
+ #define CMDQ_CREATE_CQ_LVL_SFT 0
+ #define CMDQ_CREATE_CQ_LVL_LVL_0 0x0UL
+ #define CMDQ_CREATE_CQ_LVL_LVL_1 0x1UL
+ #define CMDQ_CREATE_CQ_LVL_LVL_2 0x2UL
+ #define CMDQ_CREATE_CQ_LVL_LAST CMDQ_CREATE_CQ_LVL_LVL_2
+ #define CMDQ_CREATE_CQ_PG_SIZE_MASK 0x1cUL
+ #define CMDQ_CREATE_CQ_PG_SIZE_SFT 2
+ #define CMDQ_CREATE_CQ_PG_SIZE_PG_4K (0x0UL << 2)
+ #define CMDQ_CREATE_CQ_PG_SIZE_PG_8K (0x1UL << 2)
+ #define CMDQ_CREATE_CQ_PG_SIZE_PG_64K (0x2UL << 2)
+ #define CMDQ_CREATE_CQ_PG_SIZE_PG_2M (0x3UL << 2)
+ #define CMDQ_CREATE_CQ_PG_SIZE_PG_8M (0x4UL << 2)
+ #define CMDQ_CREATE_CQ_PG_SIZE_PG_1G (0x5UL << 2)
+ #define CMDQ_CREATE_CQ_PG_SIZE_LAST CMDQ_CREATE_CQ_PG_SIZE_PG_1G
+ #define CMDQ_CREATE_CQ_UNUSED27_MASK 0xffffffe0UL
+ #define CMDQ_CREATE_CQ_UNUSED27_SFT 5
+ __le32 cq_fco_cnq_id;
+ #define CMDQ_CREATE_CQ_CNQ_ID_MASK 0xfffUL
+ #define CMDQ_CREATE_CQ_CNQ_ID_SFT 0
+ #define CMDQ_CREATE_CQ_CQ_FCO_MASK 0xfffff000UL
+ #define CMDQ_CREATE_CQ_CQ_FCO_SFT 12
+ __le32 dpi;
+ __le32 cq_size;
+ __le64 pbl;
+ __le16 steering_tag;
+ u8 pbl_pg_size;
+ #define CMDQ_CREATE_CQ_PBL_PG_SIZE_MASK 0x7UL
+ #define CMDQ_CREATE_CQ_PBL_PG_SIZE_SFT 0
+ #define CMDQ_CREATE_CQ_PBL_PG_SIZE_PG_4K 0x0UL
+ #define CMDQ_CREATE_CQ_PBL_PG_SIZE_PG_8K 0x1UL
+ #define CMDQ_CREATE_CQ_PBL_PG_SIZE_PG_64K 0x2UL
+ #define CMDQ_CREATE_CQ_PBL_PG_SIZE_PG_2M 0x3UL
+ #define CMDQ_CREATE_CQ_PBL_PG_SIZE_PG_8M 0x4UL
+ #define CMDQ_CREATE_CQ_PBL_PG_SIZE_PG_1G 0x5UL
+ #define CMDQ_CREATE_CQ_PBL_PG_SIZE_LAST CMDQ_CREATE_CQ_PBL_PG_SIZE_PG_1G
+ u8 reserved8_1;
+ __le32 coalescing;
+ #define CMDQ_CREATE_CQ_BUF_MAXTIME_MASK 0x1ffUL
+ #define CMDQ_CREATE_CQ_BUF_MAXTIME_SFT 0
+ #define CMDQ_CREATE_CQ_NORMAL_MAXBUF_MASK 0x3e00UL
+ #define CMDQ_CREATE_CQ_NORMAL_MAXBUF_SFT 9
+ #define CMDQ_CREATE_CQ_DURING_MAXBUF_MASK 0x7c000UL
+ #define CMDQ_CREATE_CQ_DURING_MAXBUF_SFT 14
+ #define CMDQ_CREATE_CQ_ENABLE_RING_IDLE_MODE 0x80000UL
+ #define CMDQ_CREATE_CQ_UNUSED12_MASK 0xfff00000UL
+ #define CMDQ_CREATE_CQ_UNUSED12_SFT 20
+ __le64 reserved64;
+};
+
+/* creq_create_cq_resp (size:128b/16B) */
+struct creq_create_cq_resp {
+ u8 type;
+ #define CREQ_CREATE_CQ_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_CREATE_CQ_RESP_TYPE_SFT 0
+ #define CREQ_CREATE_CQ_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_CREATE_CQ_RESP_TYPE_LAST CREQ_CREATE_CQ_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_CREATE_CQ_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_CREATE_CQ_RESP_EVENT_CREATE_CQ 0x9UL
+ #define CREQ_CREATE_CQ_RESP_EVENT_LAST CREQ_CREATE_CQ_RESP_EVENT_CREATE_CQ
+ u8 context_size;
+ u8 reserved48[5];
+};
+
+/* cmdq_destroy_cq (size:192b/24B) */
+struct cmdq_destroy_cq {
+ u8 opcode;
+ #define CMDQ_DESTROY_CQ_OPCODE_DESTROY_CQ 0xaUL
+ #define CMDQ_DESTROY_CQ_OPCODE_LAST CMDQ_DESTROY_CQ_OPCODE_DESTROY_CQ
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le32 cq_cid;
+ __le32 unused_0;
+};
+
+/* creq_destroy_cq_resp (size:128b/16B) */
+struct creq_destroy_cq_resp {
+ u8 type;
+ #define CREQ_DESTROY_CQ_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_DESTROY_CQ_RESP_TYPE_SFT 0
+ #define CREQ_DESTROY_CQ_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_DESTROY_CQ_RESP_TYPE_LAST CREQ_DESTROY_CQ_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_DESTROY_CQ_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_DESTROY_CQ_RESP_EVENT_DESTROY_CQ 0xaUL
+ #define CREQ_DESTROY_CQ_RESP_EVENT_LAST CREQ_DESTROY_CQ_RESP_EVENT_DESTROY_CQ
+ __le16 cq_arm_lvl;
+ #define CREQ_DESTROY_CQ_RESP_CQ_ARM_LVL_MASK 0x3UL
+ #define CREQ_DESTROY_CQ_RESP_CQ_ARM_LVL_SFT 0
+ __le16 total_cnq_events;
+ __le16 reserved16;
+};
+
+/* cmdq_resize_cq (size:320b/40B) */
+struct cmdq_resize_cq {
+ u8 opcode;
+ #define CMDQ_RESIZE_CQ_OPCODE_RESIZE_CQ 0xcUL
+ #define CMDQ_RESIZE_CQ_OPCODE_LAST CMDQ_RESIZE_CQ_OPCODE_RESIZE_CQ
+ u8 cmd_size;
+ __le16 flags;
+ #define CMDQ_RESIZE_CQ_FLAGS_PBL_PG_SIZE_VALID 0x1UL
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le32 cq_cid;
+ __le32 new_cq_size_pg_size_lvl;
+ #define CMDQ_RESIZE_CQ_LVL_MASK 0x3UL
+ #define CMDQ_RESIZE_CQ_LVL_SFT 0
+ #define CMDQ_RESIZE_CQ_LVL_LVL_0 0x0UL
+ #define CMDQ_RESIZE_CQ_LVL_LVL_1 0x1UL
+ #define CMDQ_RESIZE_CQ_LVL_LVL_2 0x2UL
+ #define CMDQ_RESIZE_CQ_LVL_LAST CMDQ_RESIZE_CQ_LVL_LVL_2
+ #define CMDQ_RESIZE_CQ_PG_SIZE_MASK 0x1cUL
+ #define CMDQ_RESIZE_CQ_PG_SIZE_SFT 2
+ #define CMDQ_RESIZE_CQ_PG_SIZE_PG_4K (0x0UL << 2)
+ #define CMDQ_RESIZE_CQ_PG_SIZE_PG_8K (0x1UL << 2)
+ #define CMDQ_RESIZE_CQ_PG_SIZE_PG_64K (0x2UL << 2)
+ #define CMDQ_RESIZE_CQ_PG_SIZE_PG_2M (0x3UL << 2)
+ #define CMDQ_RESIZE_CQ_PG_SIZE_PG_8M (0x4UL << 2)
+ #define CMDQ_RESIZE_CQ_PG_SIZE_PG_1G (0x5UL << 2)
+ #define CMDQ_RESIZE_CQ_PG_SIZE_LAST CMDQ_RESIZE_CQ_PG_SIZE_PG_1G
+ #define CMDQ_RESIZE_CQ_NEW_CQ_SIZE_MASK 0x1fffffe0UL
+ #define CMDQ_RESIZE_CQ_NEW_CQ_SIZE_SFT 5
+ __le64 new_pbl;
+ __le32 new_cq_fco;
+ #define CMDQ_RESIZE_CQ_CQ_FCO_MASK 0xfffffUL
+ #define CMDQ_RESIZE_CQ_CQ_FCO_SFT 0
+ #define CMDQ_RESIZE_CQ_RSVD_MASK 0xfff00000UL
+ #define CMDQ_RESIZE_CQ_RSVD_SFT 20
+ u8 pbl_pg_size;
+ #define CMDQ_RESIZE_CQ_PBL_PG_SIZE_MASK 0x7UL
+ #define CMDQ_RESIZE_CQ_PBL_PG_SIZE_SFT 0
+ #define CMDQ_RESIZE_CQ_PBL_PG_SIZE_PG_4K 0x0UL
+ #define CMDQ_RESIZE_CQ_PBL_PG_SIZE_PG_8K 0x1UL
+ #define CMDQ_RESIZE_CQ_PBL_PG_SIZE_PG_64K 0x2UL
+ #define CMDQ_RESIZE_CQ_PBL_PG_SIZE_PG_2M 0x3UL
+ #define CMDQ_RESIZE_CQ_PBL_PG_SIZE_PG_8M 0x4UL
+ #define CMDQ_RESIZE_CQ_PBL_PG_SIZE_PG_1G 0x5UL
+ #define CMDQ_RESIZE_CQ_PBL_PG_SIZE_LAST CMDQ_RESIZE_CQ_PBL_PG_SIZE_PG_1G
+ u8 unused_0[3];
+};
+
+/* creq_resize_cq_resp (size:128b/16B) */
+struct creq_resize_cq_resp {
+ u8 type;
+ #define CREQ_RESIZE_CQ_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_RESIZE_CQ_RESP_TYPE_SFT 0
+ #define CREQ_RESIZE_CQ_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_RESIZE_CQ_RESP_TYPE_LAST CREQ_RESIZE_CQ_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_RESIZE_CQ_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_RESIZE_CQ_RESP_EVENT_RESIZE_CQ 0xcUL
+ #define CREQ_RESIZE_CQ_RESP_EVENT_LAST CREQ_RESIZE_CQ_RESP_EVENT_RESIZE_CQ
+ u8 reserved48[6];
+};
+
+/* cmdq_allocate_mrw (size:256b/32B) */
+struct cmdq_allocate_mrw {
+ u8 opcode;
+ #define CMDQ_ALLOCATE_MRW_OPCODE_ALLOCATE_MRW 0xdUL
+ #define CMDQ_ALLOCATE_MRW_OPCODE_LAST CMDQ_ALLOCATE_MRW_OPCODE_ALLOCATE_MRW
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le64 mrw_handle;
+ u8 mrw_flags;
+ #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MASK 0xfUL
+ #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_SFT 0
+ #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR 0x0UL
+ #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR 0x1UL
+ #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 0x2UL
+ #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A 0x3UL
+ #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B 0x4UL
+ #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_LAST CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B
+ #define CMDQ_ALLOCATE_MRW_STEERING_TAG_VALID 0x10UL
+ #define CMDQ_ALLOCATE_MRW_UNUSED3_MASK 0xe0UL
+ #define CMDQ_ALLOCATE_MRW_UNUSED3_SFT 5
+ u8 access;
+ #define CMDQ_ALLOCATE_MRW_ACCESS_CONSUMER_OWNED_KEY 0x20UL
+ __le16 steering_tag;
+ __le32 pd_id;
+};
+
+/* creq_allocate_mrw_resp (size:128b/16B) */
+struct creq_allocate_mrw_resp {
+ u8 type;
+ #define CREQ_ALLOCATE_MRW_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_ALLOCATE_MRW_RESP_TYPE_SFT 0
+ #define CREQ_ALLOCATE_MRW_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_ALLOCATE_MRW_RESP_TYPE_LAST CREQ_ALLOCATE_MRW_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_ALLOCATE_MRW_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_ALLOCATE_MRW_RESP_EVENT_ALLOCATE_MRW 0xdUL
+ #define CREQ_ALLOCATE_MRW_RESP_EVENT_LAST CREQ_ALLOCATE_MRW_RESP_EVENT_ALLOCATE_MRW
+ u8 context_size;
+ u8 reserved48[5];
+};
+
+/* cmdq_deallocate_key (size:192b/24B) */
+struct cmdq_deallocate_key {
+ u8 opcode;
+ #define CMDQ_DEALLOCATE_KEY_OPCODE_DEALLOCATE_KEY 0xeUL
+ #define CMDQ_DEALLOCATE_KEY_OPCODE_LAST CMDQ_DEALLOCATE_KEY_OPCODE_DEALLOCATE_KEY
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ u8 mrw_flags;
+ #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_MASK 0xfUL
+ #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_SFT 0
+ #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_MR 0x0UL
+ #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_PMR 0x1UL
+ #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_MW_TYPE1 0x2UL
+ #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_MW_TYPE2A 0x3UL
+ #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_MW_TYPE2B 0x4UL
+ #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_LAST CMDQ_DEALLOCATE_KEY_MRW_FLAGS_MW_TYPE2B
+ #define CMDQ_DEALLOCATE_KEY_UNUSED4_MASK 0xf0UL
+ #define CMDQ_DEALLOCATE_KEY_UNUSED4_SFT 4
+ u8 unused24[3];
+ __le32 key;
+};
+
+/* creq_deallocate_key_resp (size:128b/16B) */
+struct creq_deallocate_key_resp {
+ u8 type;
+ #define CREQ_DEALLOCATE_KEY_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_DEALLOCATE_KEY_RESP_TYPE_SFT 0
+ #define CREQ_DEALLOCATE_KEY_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_DEALLOCATE_KEY_RESP_TYPE_LAST CREQ_DEALLOCATE_KEY_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_DEALLOCATE_KEY_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_DEALLOCATE_KEY_RESP_EVENT_DEALLOCATE_KEY 0xeUL
+ #define CREQ_DEALLOCATE_KEY_RESP_EVENT_LAST CREQ_DEALLOCATE_KEY_RESP_EVENT_DEALLOCATE_KEY
+ __le16 reserved16;
+ __le32 bound_window_info;
+};
+
+/* cmdq_register_mr (size:512b/64B) */
+struct cmdq_register_mr {
+ u8 opcode;
+ #define CMDQ_REGISTER_MR_OPCODE_REGISTER_MR 0xfUL
+ #define CMDQ_REGISTER_MR_OPCODE_LAST CMDQ_REGISTER_MR_OPCODE_REGISTER_MR
+ u8 cmd_size;
+ __le16 flags;
+ #define CMDQ_REGISTER_MR_FLAGS_ALLOC_MR 0x1UL
+ #define CMDQ_REGISTER_MR_FLAGS_STEERING_TAG_VALID 0x2UL
+ #define CMDQ_REGISTER_MR_FLAGS_ENABLE_RO 0x4UL
+ #define CMDQ_REGISTER_MR_FLAGS_ENABLE_EROCE 0x8UL
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ u8 log2_pg_size_lvl;
+ #define CMDQ_REGISTER_MR_LVL_MASK 0x3UL
+ #define CMDQ_REGISTER_MR_LVL_SFT 0
+ #define CMDQ_REGISTER_MR_LVL_LVL_0 0x0UL
+ #define CMDQ_REGISTER_MR_LVL_LVL_1 0x1UL
+ #define CMDQ_REGISTER_MR_LVL_LVL_2 0x2UL
+ #define CMDQ_REGISTER_MR_LVL_LAST CMDQ_REGISTER_MR_LVL_LVL_2
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_MASK 0x7cUL
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_SFT 2
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_4K (0xcUL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_8K (0xdUL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_64K (0x10UL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_256K (0x12UL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_1M (0x14UL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_2M (0x15UL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_4M (0x16UL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_1G (0x1eUL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_LAST CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_1G
+ #define CMDQ_REGISTER_MR_UNUSED1 0x80UL
+ u8 access;
+ #define CMDQ_REGISTER_MR_ACCESS_LOCAL_WRITE 0x1UL
+ #define CMDQ_REGISTER_MR_ACCESS_REMOTE_READ 0x2UL
+ #define CMDQ_REGISTER_MR_ACCESS_REMOTE_WRITE 0x4UL
+ #define CMDQ_REGISTER_MR_ACCESS_REMOTE_ATOMIC 0x8UL
+ #define CMDQ_REGISTER_MR_ACCESS_MW_BIND 0x10UL
+ #define CMDQ_REGISTER_MR_ACCESS_ZERO_BASED 0x20UL
+ __le16 log2_pbl_pg_size;
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_MASK 0x1fUL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_SFT 0
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K 0xcUL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K 0xdUL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K 0x10UL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K 0x12UL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M 0x14UL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M 0x15UL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M 0x16UL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G 0x1eUL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_LAST CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G
+ #define CMDQ_REGISTER_MR_UNUSED11_MASK 0xffe0UL
+ #define CMDQ_REGISTER_MR_UNUSED11_SFT 5
+ __le32 key;
+ __le64 pbl;
+ __le64 va;
+ __le64 mr_size;
+ __le16 steering_tag;
+ u8 reserved48[6];
+ __le64 reserved64;
+};
+
+/* creq_register_mr_resp (size:128b/16B) */
+struct creq_register_mr_resp {
+ u8 type;
+ #define CREQ_REGISTER_MR_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_REGISTER_MR_RESP_TYPE_SFT 0
+ #define CREQ_REGISTER_MR_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_REGISTER_MR_RESP_TYPE_LAST CREQ_REGISTER_MR_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_REGISTER_MR_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_REGISTER_MR_RESP_EVENT_REGISTER_MR 0xfUL
+ #define CREQ_REGISTER_MR_RESP_EVENT_LAST CREQ_REGISTER_MR_RESP_EVENT_REGISTER_MR
+ u8 context_size;
+ u8 reserved48[5];
+};
+
+/* cmdq_deregister_mr (size:192b/24B) */
+struct cmdq_deregister_mr {
+ u8 opcode;
+ #define CMDQ_DEREGISTER_MR_OPCODE_DEREGISTER_MR 0x10UL
+ #define CMDQ_DEREGISTER_MR_OPCODE_LAST CMDQ_DEREGISTER_MR_OPCODE_DEREGISTER_MR
+ u8 cmd_size;
+ __le16 flags;
+ #define CMDQ_DEREGISTER_MR_FLAGS_ENABLE_EROCE 0x1UL
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le32 lkey;
+ __le32 unused_0;
+};
+
+/* creq_deregister_mr_resp (size:128b/16B) */
+struct creq_deregister_mr_resp {
+ u8 type;
+ #define CREQ_DEREGISTER_MR_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_DEREGISTER_MR_RESP_TYPE_SFT 0
+ #define CREQ_DEREGISTER_MR_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_DEREGISTER_MR_RESP_TYPE_LAST CREQ_DEREGISTER_MR_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_DEREGISTER_MR_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_DEREGISTER_MR_RESP_EVENT_DEREGISTER_MR 0x10UL
+ #define CREQ_DEREGISTER_MR_RESP_EVENT_LAST CREQ_DEREGISTER_MR_RESP_EVENT_DEREGISTER_MR
+ __le16 reserved16;
+ __le32 bound_windows;
+};
+
+/* cmdq_add_gid (size:384b/48B) */
+struct cmdq_add_gid {
+ u8 opcode;
+ #define CMDQ_ADD_GID_OPCODE_ADD_GID 0x11UL
+ #define CMDQ_ADD_GID_OPCODE_LAST CMDQ_ADD_GID_OPCODE_ADD_GID
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le32 gid[4];
+ __le16 src_mac[3];
+ __le16 vlan;
+ #define CMDQ_ADD_GID_VLAN_VLAN_EN_TPID_VLAN_ID_MASK 0xffffUL
+ #define CMDQ_ADD_GID_VLAN_VLAN_EN_TPID_VLAN_ID_SFT 0
+ #define CMDQ_ADD_GID_VLAN_VLAN_ID_MASK 0xfffUL
+ #define CMDQ_ADD_GID_VLAN_VLAN_ID_SFT 0
+ #define CMDQ_ADD_GID_VLAN_TPID_MASK 0x7000UL
+ #define CMDQ_ADD_GID_VLAN_TPID_SFT 12
+ #define CMDQ_ADD_GID_VLAN_TPID_TPID_88A8 (0x0UL << 12)
+ #define CMDQ_ADD_GID_VLAN_TPID_TPID_8100 (0x1UL << 12)
+ #define CMDQ_ADD_GID_VLAN_TPID_TPID_9100 (0x2UL << 12)
+ #define CMDQ_ADD_GID_VLAN_TPID_TPID_9200 (0x3UL << 12)
+ #define CMDQ_ADD_GID_VLAN_TPID_TPID_9300 (0x4UL << 12)
+ #define CMDQ_ADD_GID_VLAN_TPID_TPID_CFG1 (0x5UL << 12)
+ #define CMDQ_ADD_GID_VLAN_TPID_TPID_CFG2 (0x6UL << 12)
+ #define CMDQ_ADD_GID_VLAN_TPID_TPID_CFG3 (0x7UL << 12)
+ #define CMDQ_ADD_GID_VLAN_TPID_LAST CMDQ_ADD_GID_VLAN_TPID_TPID_CFG3
+ #define CMDQ_ADD_GID_VLAN_VLAN_EN 0x8000UL
+ __le16 ipid;
+ __le16 stats_ctx;
+ #define CMDQ_ADD_GID_STATS_CTX_STATS_CTX_VALID_STATS_CTX_ID_MASK 0xffffUL
+ #define CMDQ_ADD_GID_STATS_CTX_STATS_CTX_VALID_STATS_CTX_ID_SFT 0
+ #define CMDQ_ADD_GID_STATS_CTX_STATS_CTX_ID_MASK 0x7fffUL
+ #define CMDQ_ADD_GID_STATS_CTX_STATS_CTX_ID_SFT 0
+ #define CMDQ_ADD_GID_STATS_CTX_STATS_CTX_VALID 0x8000UL
+ __le16 host_gid_index;
+ __le16 unused_0;
+};
+
+/* creq_add_gid_resp (size:128b/16B) */
+struct creq_add_gid_resp {
+ u8 type;
+ #define CREQ_ADD_GID_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_ADD_GID_RESP_TYPE_SFT 0
+ #define CREQ_ADD_GID_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_ADD_GID_RESP_TYPE_LAST CREQ_ADD_GID_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_ADD_GID_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_ADD_GID_RESP_EVENT_ADD_GID 0x11UL
+ #define CREQ_ADD_GID_RESP_EVENT_LAST CREQ_ADD_GID_RESP_EVENT_ADD_GID
+ u8 reserved48[6];
+};
+
+/* cmdq_delete_gid (size:192b/24B) */
+struct cmdq_delete_gid {
+ u8 opcode;
+ #define CMDQ_DELETE_GID_OPCODE_DELETE_GID 0x12UL
+ #define CMDQ_DELETE_GID_OPCODE_LAST CMDQ_DELETE_GID_OPCODE_DELETE_GID
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le16 gid_index;
+ __le16 host_gid_index;
+ u8 unused_0[4];
+};
+
+/* creq_delete_gid_resp (size:128b/16B) */
+struct creq_delete_gid_resp {
+ u8 type;
+ #define CREQ_DELETE_GID_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_DELETE_GID_RESP_TYPE_SFT 0
+ #define CREQ_DELETE_GID_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_DELETE_GID_RESP_TYPE_LAST CREQ_DELETE_GID_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_DELETE_GID_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_DELETE_GID_RESP_EVENT_DELETE_GID 0x12UL
+ #define CREQ_DELETE_GID_RESP_EVENT_LAST CREQ_DELETE_GID_RESP_EVENT_DELETE_GID
+ u8 reserved48[6];
+};
+
+/* cmdq_modify_gid (size:384b/48B) */
+struct cmdq_modify_gid {
+ u8 opcode;
+ #define CMDQ_MODIFY_GID_OPCODE_MODIFY_GID 0x17UL
+ #define CMDQ_MODIFY_GID_OPCODE_LAST CMDQ_MODIFY_GID_OPCODE_MODIFY_GID
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le32 gid[4];
+ __le16 src_mac[3];
+ __le16 vlan;
+ #define CMDQ_MODIFY_GID_VLAN_VLAN_ID_MASK 0xfffUL
+ #define CMDQ_MODIFY_GID_VLAN_VLAN_ID_SFT 0
+ #define CMDQ_MODIFY_GID_VLAN_TPID_MASK 0x7000UL
+ #define CMDQ_MODIFY_GID_VLAN_TPID_SFT 12
+ #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_88A8 (0x0UL << 12)
+ #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_8100 (0x1UL << 12)
+ #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_9100 (0x2UL << 12)
+ #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_9200 (0x3UL << 12)
+ #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_9300 (0x4UL << 12)
+ #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_CFG1 (0x5UL << 12)
+ #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_CFG2 (0x6UL << 12)
+ #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_CFG3 (0x7UL << 12)
+ #define CMDQ_MODIFY_GID_VLAN_TPID_LAST CMDQ_MODIFY_GID_VLAN_TPID_TPID_CFG3
+ #define CMDQ_MODIFY_GID_VLAN_VLAN_EN 0x8000UL
+ __le16 ipid;
+ __le16 gid_index;
+ __le16 stats_ctx;
+ #define CMDQ_MODIFY_GID_STATS_CTX_STATS_CTX_ID_MASK 0x7fffUL
+ #define CMDQ_MODIFY_GID_STATS_CTX_STATS_CTX_ID_SFT 0
+ #define CMDQ_MODIFY_GID_STATS_CTX_STATS_CTX_VALID 0x8000UL
+ __le16 host_gid_index;
+};
+
+/* creq_modify_gid_resp (size:128b/16B) */
+struct creq_modify_gid_resp {
+ u8 type;
+ #define CREQ_MODIFY_GID_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_MODIFY_GID_RESP_TYPE_SFT 0
+ #define CREQ_MODIFY_GID_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_MODIFY_GID_RESP_TYPE_LAST CREQ_MODIFY_GID_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_MODIFY_GID_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_MODIFY_GID_RESP_EVENT_ADD_GID 0x11UL
+ #define CREQ_MODIFY_GID_RESP_EVENT_LAST CREQ_MODIFY_GID_RESP_EVENT_ADD_GID
+ u8 reserved48[6];
+};
+
+/* cmdq_query_gid (size:192b/24B) */
+struct cmdq_query_gid {
+ u8 opcode;
+ #define CMDQ_QUERY_GID_OPCODE_QUERY_GID 0x18UL
+ #define CMDQ_QUERY_GID_OPCODE_LAST CMDQ_QUERY_GID_OPCODE_QUERY_GID
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le16 gid_index;
+ u8 unused16[6];
+};
+
+/* creq_query_gid_resp (size:128b/16B) */
+struct creq_query_gid_resp {
+ u8 type;
+ #define CREQ_QUERY_GID_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_QUERY_GID_RESP_TYPE_SFT 0
+ #define CREQ_QUERY_GID_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_QUERY_GID_RESP_TYPE_LAST CREQ_QUERY_GID_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 size;
+ u8 v;
+ #define CREQ_QUERY_GID_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_QUERY_GID_RESP_EVENT_QUERY_GID 0x18UL
+ #define CREQ_QUERY_GID_RESP_EVENT_LAST CREQ_QUERY_GID_RESP_EVENT_QUERY_GID
+ u8 reserved48[6];
+};
+
+/* creq_query_gid_resp_sb (size:320b/40B) */
+struct creq_query_gid_resp_sb {
+ u8 opcode;
+ #define CREQ_QUERY_GID_RESP_SB_OPCODE_QUERY_GID 0x18UL
+ #define CREQ_QUERY_GID_RESP_SB_OPCODE_LAST CREQ_QUERY_GID_RESP_SB_OPCODE_QUERY_GID
+ u8 status;
+ __le16 cookie;
+ __le16 flags;
+ u8 resp_size;
+ u8 reserved8;
+ __le32 gid[4];
+ __le16 src_mac[3];
+ __le16 vlan;
+ #define CREQ_QUERY_GID_RESP_SB_VLAN_VLAN_EN_TPID_VLAN_ID_MASK 0xffffUL
+ #define CREQ_QUERY_GID_RESP_SB_VLAN_VLAN_EN_TPID_VLAN_ID_SFT 0
+ #define CREQ_QUERY_GID_RESP_SB_VLAN_VLAN_ID_MASK 0xfffUL
+ #define CREQ_QUERY_GID_RESP_SB_VLAN_VLAN_ID_SFT 0
+ #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_MASK 0x7000UL
+ #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_SFT 12
+ #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_88A8 (0x0UL << 12)
+ #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_8100 (0x1UL << 12)
+ #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_9100 (0x2UL << 12)
+ #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_9200 (0x3UL << 12)
+ #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_9300 (0x4UL << 12)
+ #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_CFG1 (0x5UL << 12)
+ #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_CFG2 (0x6UL << 12)
+ #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_CFG3 (0x7UL << 12)
+ #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_LAST CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_CFG3
+ #define CREQ_QUERY_GID_RESP_SB_VLAN_VLAN_EN 0x8000UL
+ __le16 ipid;
+ __le16 gid_index;
+ __le32 unused_0;
+};
+
+/* cmdq_create_qp1 (size:640b/80B) */
+struct cmdq_create_qp1 {
+ u8 opcode;
+ #define CMDQ_CREATE_QP1_OPCODE_CREATE_QP1 0x13UL
+ #define CMDQ_CREATE_QP1_OPCODE_LAST CMDQ_CREATE_QP1_OPCODE_CREATE_QP1
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le64 qp_handle;
+ __le32 qp_flags;
+ #define CMDQ_CREATE_QP1_QP_FLAGS_SRQ_USED 0x1UL
+ #define CMDQ_CREATE_QP1_QP_FLAGS_FORCE_COMPLETION 0x2UL
+ #define CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE 0x4UL
+ #define CMDQ_CREATE_QP1_QP_FLAGS_LAST CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE
+ u8 type;
+ #define CMDQ_CREATE_QP1_TYPE_GSI 0x1UL
+ #define CMDQ_CREATE_QP1_TYPE_LAST CMDQ_CREATE_QP1_TYPE_GSI
+ u8 sq_pg_size_sq_lvl;
+ #define CMDQ_CREATE_QP1_SQ_LVL_MASK 0xfUL
+ #define CMDQ_CREATE_QP1_SQ_LVL_SFT 0
+ #define CMDQ_CREATE_QP1_SQ_LVL_LVL_0 0x0UL
+ #define CMDQ_CREATE_QP1_SQ_LVL_LVL_1 0x1UL
+ #define CMDQ_CREATE_QP1_SQ_LVL_LVL_2 0x2UL
+ #define CMDQ_CREATE_QP1_SQ_LVL_LAST CMDQ_CREATE_QP1_SQ_LVL_LVL_2
+ #define CMDQ_CREATE_QP1_SQ_PG_SIZE_MASK 0xf0UL
+ #define CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT 4
+ #define CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K (0x0UL << 4)
+ #define CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8K (0x1UL << 4)
+ #define CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_64K (0x2UL << 4)
+ #define CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_2M (0x3UL << 4)
+ #define CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8M (0x4UL << 4)
+ #define CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_1G (0x5UL << 4)
+ #define CMDQ_CREATE_QP1_SQ_PG_SIZE_LAST CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_1G
+ u8 rq_pg_size_rq_lvl;
+ #define CMDQ_CREATE_QP1_RQ_LVL_MASK 0xfUL
+ #define CMDQ_CREATE_QP1_RQ_LVL_SFT 0
+ #define CMDQ_CREATE_QP1_RQ_LVL_LVL_0 0x0UL
+ #define CMDQ_CREATE_QP1_RQ_LVL_LVL_1 0x1UL
+ #define CMDQ_CREATE_QP1_RQ_LVL_LVL_2 0x2UL
+ #define CMDQ_CREATE_QP1_RQ_LVL_LAST CMDQ_CREATE_QP1_RQ_LVL_LVL_2
+ #define CMDQ_CREATE_QP1_RQ_PG_SIZE_MASK 0xf0UL
+ #define CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT 4
+ #define CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K (0x0UL << 4)
+ #define CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8K (0x1UL << 4)
+ #define CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_64K (0x2UL << 4)
+ #define CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_2M (0x3UL << 4)
+ #define CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8M (0x4UL << 4)
+ #define CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_1G (0x5UL << 4)
+ #define CMDQ_CREATE_QP1_RQ_PG_SIZE_LAST CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_1G
+ u8 unused_0;
+ __le32 dpi;
+ __le32 sq_size;
+ __le32 rq_size;
+ __le16 sq_fwo_sq_sge;
+ #define CMDQ_CREATE_QP1_SQ_SGE_MASK 0xfUL
+ #define CMDQ_CREATE_QP1_SQ_SGE_SFT 0
+ #define CMDQ_CREATE_QP1_SQ_FWO_MASK 0xfff0UL
+ #define CMDQ_CREATE_QP1_SQ_FWO_SFT 4
+ __le16 rq_fwo_rq_sge;
+ #define CMDQ_CREATE_QP1_RQ_SGE_MASK 0xfUL
+ #define CMDQ_CREATE_QP1_RQ_SGE_SFT 0
+ #define CMDQ_CREATE_QP1_RQ_FWO_MASK 0xfff0UL
+ #define CMDQ_CREATE_QP1_RQ_FWO_SFT 4
+ __le32 scq_cid;
+ __le32 rcq_cid;
+ __le32 srq_cid;
+ __le32 pd_id;
+ __le64 sq_pbl;
+ __le64 rq_pbl;
+};
+
+/* creq_create_qp1_resp (size:128b/16B) */
+struct creq_create_qp1_resp {
+ u8 type;
+ #define CREQ_CREATE_QP1_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_CREATE_QP1_RESP_TYPE_SFT 0
+ #define CREQ_CREATE_QP1_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_CREATE_QP1_RESP_TYPE_LAST CREQ_CREATE_QP1_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_CREATE_QP1_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_CREATE_QP1_RESP_EVENT_CREATE_QP1 0x13UL
+ #define CREQ_CREATE_QP1_RESP_EVENT_LAST CREQ_CREATE_QP1_RESP_EVENT_CREATE_QP1
+ u8 reserved48[6];
+};
+
+/* cmdq_destroy_qp1 (size:192b/24B) */
+struct cmdq_destroy_qp1 {
+ u8 opcode;
+ #define CMDQ_DESTROY_QP1_OPCODE_DESTROY_QP1 0x14UL
+ #define CMDQ_DESTROY_QP1_OPCODE_LAST CMDQ_DESTROY_QP1_OPCODE_DESTROY_QP1
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le32 qp1_cid;
+ __le32 unused_0;
+};
+
+/* creq_destroy_qp1_resp (size:128b/16B) */
+struct creq_destroy_qp1_resp {
+ u8 type;
+ #define CREQ_DESTROY_QP1_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_DESTROY_QP1_RESP_TYPE_SFT 0
+ #define CREQ_DESTROY_QP1_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_DESTROY_QP1_RESP_TYPE_LAST CREQ_DESTROY_QP1_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_DESTROY_QP1_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_DESTROY_QP1_RESP_EVENT_DESTROY_QP1 0x14UL
+ #define CREQ_DESTROY_QP1_RESP_EVENT_LAST CREQ_DESTROY_QP1_RESP_EVENT_DESTROY_QP1
+ u8 reserved48[6];
+};
+
+/* cmdq_create_ah (size:512b/64B) */
+struct cmdq_create_ah {
+ u8 opcode;
+ #define CMDQ_CREATE_AH_OPCODE_CREATE_AH 0x15UL
+ #define CMDQ_CREATE_AH_OPCODE_LAST CMDQ_CREATE_AH_OPCODE_CREATE_AH
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le64 ah_handle;
+ __le32 dgid[4];
+ u8 type;
+ #define CMDQ_CREATE_AH_TYPE_V1 0x0UL
+ #define CMDQ_CREATE_AH_TYPE_V2IPV4 0x2UL
+ #define CMDQ_CREATE_AH_TYPE_V2IPV6 0x3UL
+ #define CMDQ_CREATE_AH_TYPE_LAST CMDQ_CREATE_AH_TYPE_V2IPV6
+ u8 hop_limit;
+ __le16 sgid_index;
+ __le32 dest_vlan_id_flow_label;
+ #define CMDQ_CREATE_AH_FLOW_LABEL_MASK 0xfffffUL
+ #define CMDQ_CREATE_AH_FLOW_LABEL_SFT 0
+ #define CMDQ_CREATE_AH_DEST_VLAN_ID_MASK 0xfff00000UL
+ #define CMDQ_CREATE_AH_DEST_VLAN_ID_SFT 20
+ __le32 pd_id;
+ __le32 unused_0;
+ __le16 dest_mac[3];
+ u8 traffic_class;
+ u8 enable_cc;
+ #define CMDQ_CREATE_AH_ENABLE_CC 0x1UL
+};
+
+/* creq_create_ah_resp (size:128b/16B) */
+struct creq_create_ah_resp {
+ u8 type;
+ #define CREQ_CREATE_AH_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_CREATE_AH_RESP_TYPE_SFT 0
+ #define CREQ_CREATE_AH_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_CREATE_AH_RESP_TYPE_LAST CREQ_CREATE_AH_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_CREATE_AH_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_CREATE_AH_RESP_EVENT_CREATE_AH 0x15UL
+ #define CREQ_CREATE_AH_RESP_EVENT_LAST CREQ_CREATE_AH_RESP_EVENT_CREATE_AH
+ u8 reserved48[6];
+};
+
+/* cmdq_destroy_ah (size:192b/24B) */
+struct cmdq_destroy_ah {
+ u8 opcode;
+ #define CMDQ_DESTROY_AH_OPCODE_DESTROY_AH 0x16UL
+ #define CMDQ_DESTROY_AH_OPCODE_LAST CMDQ_DESTROY_AH_OPCODE_DESTROY_AH
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le32 ah_cid;
+ __le32 unused_0;
+};
+
+/* creq_destroy_ah_resp (size:128b/16B) */
+struct creq_destroy_ah_resp {
+ u8 type;
+ #define CREQ_DESTROY_AH_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_DESTROY_AH_RESP_TYPE_SFT 0
+ #define CREQ_DESTROY_AH_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_DESTROY_AH_RESP_TYPE_LAST CREQ_DESTROY_AH_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 xid;
+ u8 v;
+ #define CREQ_DESTROY_AH_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_DESTROY_AH_RESP_EVENT_DESTROY_AH 0x16UL
+ #define CREQ_DESTROY_AH_RESP_EVENT_LAST CREQ_DESTROY_AH_RESP_EVENT_DESTROY_AH
+ u8 reserved48[6];
+};
+
+/* cmdq_query_roce_stats (size:192b/24B) */
+struct cmdq_query_roce_stats {
+ u8 opcode;
+ #define CMDQ_QUERY_ROCE_STATS_OPCODE_QUERY_ROCE_STATS 0x8eUL
+ #define CMDQ_QUERY_ROCE_STATS_OPCODE_LAST CMDQ_QUERY_ROCE_STATS_OPCODE_QUERY_ROCE_STATS
+ u8 cmd_size;
+ __le16 flags;
+ #define CMDQ_QUERY_ROCE_STATS_FLAGS_COLLECTION_ID 0x1UL
+ #define CMDQ_QUERY_ROCE_STATS_FLAGS_FUNCTION_ID 0x2UL
+ __le16 cookie;
+ u8 resp_size;
+ u8 collection_id;
+ __le64 resp_addr;
+ __le32 function_id;
+ #define CMDQ_QUERY_ROCE_STATS_PF_NUM_MASK 0xffUL
+ #define CMDQ_QUERY_ROCE_STATS_PF_NUM_SFT 0
+ #define CMDQ_QUERY_ROCE_STATS_VF_NUM_MASK 0xffff00UL
+ #define CMDQ_QUERY_ROCE_STATS_VF_NUM_SFT 8
+ #define CMDQ_QUERY_ROCE_STATS_VF_VALID 0x1000000UL
+ __le32 reserved32;
+};
+
+/* creq_query_roce_stats_resp (size:128b/16B) */
+struct creq_query_roce_stats_resp {
+ u8 type;
+ #define CREQ_QUERY_ROCE_STATS_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_QUERY_ROCE_STATS_RESP_TYPE_SFT 0
+ #define CREQ_QUERY_ROCE_STATS_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_QUERY_ROCE_STATS_RESP_TYPE_LAST CREQ_QUERY_ROCE_STATS_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 size;
+ u8 v;
+ #define CREQ_QUERY_ROCE_STATS_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_QUERY_ROCE_STATS_RESP_EVENT_QUERY_ROCE_STATS 0x8eUL
+ #define CREQ_QUERY_ROCE_STATS_RESP_EVENT_LAST CREQ_QUERY_ROCE_STATS_RESP_EVENT_QUERY_ROCE_STATS
+ u8 reserved48[6];
+};
+
+/* creq_query_roce_stats_resp_sb (size:3072b/384B) */
+struct creq_query_roce_stats_resp_sb {
+ u8 opcode;
+ #define CREQ_QUERY_ROCE_STATS_RESP_SB_OPCODE_QUERY_ROCE_STATS 0x8eUL
+ #define CREQ_QUERY_ROCE_STATS_RESP_SB_OPCODE_LAST CREQ_QUERY_ROCE_STATS_RESP_SB_OPCODE_QUERY_ROCE_STATS
+ u8 status;
+ __le16 cookie;
+ __le16 flags;
+ u8 resp_size;
+ u8 rsvd;
+ __le32 num_counters;
+ __le32 rsvd1;
+ __le64 to_retransmits;
+ __le64 seq_err_naks_rcvd;
+ __le64 max_retry_exceeded;
+ __le64 rnr_naks_rcvd;
+ __le64 missing_resp;
+ __le64 unrecoverable_err;
+ __le64 bad_resp_err;
+ __le64 local_qp_op_err;
+ __le64 local_protection_err;
+ __le64 mem_mgmt_op_err;
+ __le64 remote_invalid_req_err;
+ __le64 remote_access_err;
+ __le64 remote_op_err;
+ __le64 dup_req;
+ __le64 res_exceed_max;
+ __le64 res_length_mismatch;
+ __le64 res_exceeds_wqe;
+ __le64 res_opcode_err;
+ __le64 res_rx_invalid_rkey;
+ __le64 res_rx_domain_err;
+ __le64 res_rx_no_perm;
+ __le64 res_rx_range_err;
+ __le64 res_tx_invalid_rkey;
+ __le64 res_tx_domain_err;
+ __le64 res_tx_no_perm;
+ __le64 res_tx_range_err;
+ __le64 res_irrq_oflow;
+ __le64 res_unsup_opcode;
+ __le64 res_unaligned_atomic;
+ __le64 res_rem_inv_err;
+ __le64 res_mem_error;
+ __le64 res_srq_err;
+ __le64 res_cmp_err;
+ __le64 res_invalid_dup_rkey;
+ __le64 res_wqe_format_err;
+ __le64 res_cq_load_err;
+ __le64 res_srq_load_err;
+ __le64 res_tx_pci_err;
+ __le64 res_rx_pci_err;
+ __le64 res_oos_drop_count;
+ __le64 active_qp_count_p0;
+ __le64 active_qp_count_p1;
+ __le64 active_qp_count_p2;
+ __le64 active_qp_count_p3;
+ __le64 xp_sq_overflow_err;
+ __le64 xp_rq_overflow_error;
+};
+
+/* cmdq_query_roce_stats_ext (size:192b/24B) */
+struct cmdq_query_roce_stats_ext {
+ u8 opcode;
+ #define CMDQ_QUERY_ROCE_STATS_EXT_OPCODE_QUERY_ROCE_STATS 0x92UL
+ #define CMDQ_QUERY_ROCE_STATS_EXT_OPCODE_LAST CMDQ_QUERY_ROCE_STATS_EXT_OPCODE_QUERY_ROCE_STATS
+ u8 cmd_size;
+ __le16 flags;
+ #define CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_COLLECTION_ID 0x1UL
+ #define CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_FUNCTION_ID 0x2UL
+ __le16 cookie;
+ u8 resp_size;
+ u8 collection_id;
+ __le64 resp_addr;
+ __le32 function_id;
+ #define CMDQ_QUERY_ROCE_STATS_EXT_PF_NUM_MASK 0xffUL
+ #define CMDQ_QUERY_ROCE_STATS_EXT_PF_NUM_SFT 0
+ #define CMDQ_QUERY_ROCE_STATS_EXT_VF_NUM_MASK 0xffff00UL
+ #define CMDQ_QUERY_ROCE_STATS_EXT_VF_NUM_SFT 8
+ #define CMDQ_QUERY_ROCE_STATS_EXT_VF_VALID 0x1000000UL
+ __le32 reserved32;
+};
+
+/* creq_query_roce_stats_ext_resp (size:128b/16B) */
+struct creq_query_roce_stats_ext_resp {
+ u8 type;
+ #define CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_SFT 0
+ #define CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_LAST CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 size;
+ u8 v;
+ #define CREQ_QUERY_ROCE_STATS_EXT_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_QUERY_ROCE_STATS_EXT_RESP_EVENT_QUERY_ROCE_STATS_EXT 0x92UL
+ #define CREQ_QUERY_ROCE_STATS_EXT_RESP_EVENT_LAST CREQ_QUERY_ROCE_STATS_EXT_RESP_EVENT_QUERY_ROCE_STATS_EXT
+ u8 reserved48[6];
+};
+
+/* creq_query_roce_stats_ext_resp_sb (size:2368b/296B) */
+struct creq_query_roce_stats_ext_resp_sb {
+ u8 opcode;
+ #define CREQ_QUERY_ROCE_STATS_EXT_RESP_SB_OPCODE_QUERY_ROCE_STATS_EXT 0x92UL
+ #define CREQ_QUERY_ROCE_STATS_EXT_RESP_SB_OPCODE_LAST CREQ_QUERY_ROCE_STATS_EXT_RESP_SB_OPCODE_QUERY_ROCE_STATS_EXT
+ u8 status;
+ __le16 cookie;
+ __le16 flags;
+ u8 resp_size;
+ u8 rsvd;
+ __le64 rx_ack_pkts;
+ __le64 tx_atomic_req_pkts;
+ __le64 tx_read_req_pkts;
+ __le64 tx_read_res_pkts;
+ __le64 tx_write_req_pkts;
+ __le64 tx_send_req_pkts;
+ __le64 tx_roce_pkts;
+ __le64 tx_roce_bytes;
+ __le64 rx_atomic_req_pkts;
+ __le64 rx_read_req_pkts;
+ __le64 rx_read_res_pkts;
+ __le64 rx_write_req_pkts;
+ __le64 rx_send_req_pkts;
+ __le64 rx_roce_pkts;
+ __le64 rx_roce_bytes;
+ __le64 rx_roce_good_pkts;
+ __le64 rx_roce_good_bytes;
+ __le64 rx_out_of_buffer_pkts;
+ __le64 rx_out_of_sequence_pkts;
+ __le64 tx_cnp_pkts;
+ __le64 rx_cnp_pkts;
+ __le64 rx_ecn_marked_pkts;
+ __le64 tx_cnp_bytes;
+ __le64 rx_cnp_bytes;
+ __le64 seq_err_naks_rcvd;
+ __le64 rnr_naks_rcvd;
+ __le64 missing_resp;
+ __le64 to_retransmit;
+ __le64 dup_req;
+ __le64 rx_dcn_payload_cut;
+ __le64 te_bypassed;
+ __le64 tx_dcn_cnp;
+ __le64 rx_dcn_cnp;
+ __le64 rx_payload_cut;
+ __le64 rx_payload_cut_ignored;
+ __le64 rx_dcn_cnp_ignored;
+};
+
+/* cmdq_query_func (size:128b/16B) */
+struct cmdq_query_func {
+ u8 opcode;
+ #define CMDQ_QUERY_FUNC_OPCODE_QUERY_FUNC 0x83UL
+ #define CMDQ_QUERY_FUNC_OPCODE_LAST CMDQ_QUERY_FUNC_OPCODE_QUERY_FUNC
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+};
+
+/* creq_query_func_resp (size:128b/16B) */
+struct creq_query_func_resp {
+ u8 type;
+ #define CREQ_QUERY_FUNC_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_QUERY_FUNC_RESP_TYPE_SFT 0
+ #define CREQ_QUERY_FUNC_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_QUERY_FUNC_RESP_TYPE_LAST CREQ_QUERY_FUNC_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 size;
+ u8 v;
+ #define CREQ_QUERY_FUNC_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_QUERY_FUNC_RESP_EVENT_QUERY_FUNC 0x83UL
+ #define CREQ_QUERY_FUNC_RESP_EVENT_LAST CREQ_QUERY_FUNC_RESP_EVENT_QUERY_FUNC
+ u8 reserved48[6];
+};
+
+/* creq_query_func_resp_sb (size:1728b/216B) */
+struct creq_query_func_resp_sb {
+ u8 opcode;
+ #define CREQ_QUERY_FUNC_RESP_SB_OPCODE_QUERY_FUNC 0x83UL
+ #define CREQ_QUERY_FUNC_RESP_SB_OPCODE_LAST CREQ_QUERY_FUNC_RESP_SB_OPCODE_QUERY_FUNC
+ u8 status;
+ __le16 cookie;
+ __le16 flags;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 max_mr_size;
+ __le32 max_qp;
+ __le16 max_qp_wr;
+ __le16 dev_cap_flags;
+ #define CREQ_QUERY_FUNC_RESP_SB_RESIZE_QP 0x1UL
+ #define CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_MASK 0xeUL
+ #define CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_SFT 1
+ #define CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_CC_GEN0 (0x0UL << 1)
+ #define CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_CC_GEN1 (0x1UL << 1)
+ #define CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_CC_GEN1_EXT (0x2UL << 1)
+ #define CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_CC_GEN2 (0x3UL << 1)
+ #define CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_CC_GEN2_EXT (0x4UL << 1)
+ #define CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_LAST CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_CC_GEN2_EXT
+ #define CREQ_QUERY_FUNC_RESP_SB_EXT_STATS 0x10UL
+ #define CREQ_QUERY_FUNC_RESP_SB_MR_REGISTER_ALLOC 0x20UL
+ #define CREQ_QUERY_FUNC_RESP_SB_OPTIMIZED_TRANSMIT_ENABLED 0x40UL
+ #define CREQ_QUERY_FUNC_RESP_SB_CQE_V2 0x80UL
+ #define CREQ_QUERY_FUNC_RESP_SB_PINGPONG_PUSH_MODE 0x100UL
+ #define CREQ_QUERY_FUNC_RESP_SB_HW_REQUESTER_RETX_ENABLED 0x200UL
+ #define CREQ_QUERY_FUNC_RESP_SB_HW_RESPONDER_RETX_ENABLED 0x400UL
+ #define CREQ_QUERY_FUNC_RESP_SB_LINK_AGGR_SUPPORTED 0x800UL
+ #define CREQ_QUERY_FUNC_RESP_SB_LINK_AGGR_SUPPORTED_VALID 0x1000UL
+ #define CREQ_QUERY_FUNC_RESP_SB_PSEUDO_STATIC_QP_ALLOC_SUPPORTED 0x2000UL
+ #define CREQ_QUERY_FUNC_RESP_SB_EXPRESS_MODE_SUPPORTED 0x4000UL
+ #define CREQ_QUERY_FUNC_RESP_SB_INTERNAL_QUEUE_MEMORY 0x8000UL
+ __le32 max_cq;
+ __le32 max_cqe;
+ __le32 max_pd;
+ u8 max_sge;
+ u8 max_srq_sge;
+ u8 max_qp_rd_atom;
+ u8 max_qp_init_rd_atom;
+ __le32 max_mr;
+ __le32 max_mw;
+ __le32 max_raw_eth_qp;
+ __le32 max_ah;
+ __le32 max_fmr;
+ __le32 max_srq_wr;
+ __le32 max_pkeys;
+ __le32 max_inline_data;
+ u8 max_map_per_fmr;
+ u8 l2_db_space_size;
+ __le16 max_srq;
+ __le32 max_gid;
+ __le32 tqm_alloc_reqs[12];
+ __le32 max_dpi;
+ u8 max_sge_var_wqe;
+ u8 dev_cap_ext_flags;
+ #define CREQ_QUERY_FUNC_RESP_SB_ATOMIC_OPS_NOT_SUPPORTED 0x1UL
+ #define CREQ_QUERY_FUNC_RESP_SB_DRV_VERSION_RGTR_SUPPORTED 0x2UL
+ #define CREQ_QUERY_FUNC_RESP_SB_CREATE_QP_BATCH_SUPPORTED 0x4UL
+ #define CREQ_QUERY_FUNC_RESP_SB_DESTROY_QP_BATCH_SUPPORTED 0x8UL
+ #define CREQ_QUERY_FUNC_RESP_SB_ROCE_STATS_EXT_CTX_SUPPORTED 0x10UL
+ #define CREQ_QUERY_FUNC_RESP_SB_CREATE_SRQ_SGE_SUPPORTED 0x20UL
+ #define CREQ_QUERY_FUNC_RESP_SB_FIXED_SIZE_WQE_DISABLED 0x40UL
+ #define CREQ_QUERY_FUNC_RESP_SB_DCN_SUPPORTED 0x80UL
+ __le16 max_inline_data_var_wqe;
+ __le32 start_qid;
+ u8 max_msn_table_size;
+ u8 dev_cap_ext_flags_1;
+ #define CREQ_QUERY_FUNC_RESP_SB_PBL_PAGE_SIZE_SUPPORTED 0x1UL
+ #define CREQ_QUERY_FUNC_RESP_SB_INFINITE_RETRY_TO_RETX_SUPPORTED 0x2UL
+ __le16 dev_cap_ext_flags_2;
+ #define CREQ_QUERY_FUNC_RESP_SB_OPTIMIZE_MODIFY_QP_SUPPORTED 0x1UL
+ #define CREQ_QUERY_FUNC_RESP_SB_CHANGE_UDP_SRC_PORT_WQE_SUPPORTED 0x2UL
+ #define CREQ_QUERY_FUNC_RESP_SB_CQ_COALESCING_SUPPORTED 0x4UL
+ #define CREQ_QUERY_FUNC_RESP_SB_MEMORY_REGION_RO_SUPPORTED 0x8UL
+ #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_MASK 0x30UL
+ #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_SFT 4
+ #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_HOST_PSN_TABLE (0x0UL << 4)
+ #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_HOST_MSN_TABLE (0x1UL << 4)
+ #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_IQM_MSN_TABLE (0x2UL << 4)
+ #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_LAST CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_IQM_MSN_TABLE
+ #define CREQ_QUERY_FUNC_RESP_SB_MAX_SRQ_EXTENDED 0x40UL
+ #define CREQ_QUERY_FUNC_RESP_SB_CHANGE_UDP_SRC_PORT_SUPPORTED 0x80UL
+ #define CREQ_QUERY_FUNC_RESP_SB_DESTROY_CONTEXT_SB_SUPPORTED 0x100UL
+ #define CREQ_QUERY_FUNC_RESP_SB_DEFAULT_ROCE_CC_PARAMS_SUPPORTED 0x200UL
+ #define CREQ_QUERY_FUNC_RESP_SB_MODIFY_QP_RATE_LIMIT_SUPPORTED 0x400UL
+ #define CREQ_QUERY_FUNC_RESP_SB_DESTROY_UDCC_SESSION_SB_SUPPORTED 0x800UL
+ #define CREQ_QUERY_FUNC_RESP_SB_MIN_RNR_RTR_RTS_OPT_SUPPORTED 0x1000UL
+ #define CREQ_QUERY_FUNC_RESP_SB_CQ_OVERFLOW_DETECTION_ENABLED 0x2000UL
+ #define CREQ_QUERY_FUNC_RESP_SB_ICRC_CHECK_DISABLE_SUPPORTED 0x4000UL
+ #define CREQ_QUERY_FUNC_RESP_SB_FORCE_MIRROR_ENABLE_SUPPORTED 0x8000UL
+ __le16 max_xp_qp_size;
+ __le16 create_qp_batch_size;
+ __le16 destroy_qp_batch_size;
+ __le16 max_srq_ext;
+ __le16 roce_cc_tlv_en_flags;
+ #define CREQ_QUERY_FUNC_RESP_SB_ROCE_CC_GEN2_TLV_EN 0x1UL
+ #define CREQ_QUERY_FUNC_RESP_SB_ROCE_CC_GEN1_EXT_TLV_EN 0x2UL
+ #define CREQ_QUERY_FUNC_RESP_SB_ROCE_CC_GEN2_EXT_TLV_EN 0x4UL
+ __le16 pno_flags;
+ #define CREQ_QUERY_FUNC_RESP_SB_PNO_FLAGS_PNO_ENABLED 0x1UL
+ #define CREQ_QUERY_FUNC_RESP_SB_PNO_FLAGS_PNO_ENABLED_ON_PF 0x2UL
+ #define CREQ_QUERY_FUNC_RESP_SB_PNO_FLAGS_PNO_EXT_ENABLED 0x4UL
+ #define CREQ_QUERY_FUNC_RESP_SB_PNO_FLAGS_PNO_EXT_ENABLED_ON_PF 0x8UL
+ #define CREQ_QUERY_FUNC_RESP_SB_PNO_FLAGS_PSP_ENABLED 0x10UL
+ #define CREQ_QUERY_FUNC_RESP_SB_PNO_FLAGS_PSP_ENABLED_ON_PF 0x20UL
+ #define CREQ_QUERY_FUNC_RESP_SB_PNO_FLAGS_DYNAMIC_TUNNELS_ENABLED 0x40UL
+ #define CREQ_QUERY_FUNC_RESP_SB_PNO_FLAGS_PER_TUNNEL_EV_MONITORING_SUPPORTED 0x80UL
+ u8 pno_tnl_dest_grp_auto;
+ u8 pno_max_tnl_per_endpoint;
+ u8 pno_cc_algo;
+ u8 pno_pf_num;
+ __le32 pno_max_endpoints;
+ u8 eroce_spec_dscp[2];
+ u8 eroce_pull_dscp[2];
+ u8 eroce_retx_dscp[2];
+ u8 eroce_rts_dscp[2];
+ u8 eroce_rerouted_dscp[2];
+ u8 eroce_trim_dscp;
+ u8 eroce_trim_last_hop_dscp;
+ u8 eroce_control_dscp;
+ u8 reserved24[3];
+ __le16 pno_num_debug_tunnels;
+ u8 pno_num_cos;
+ u8 reserved40[5];
+ __le32 pno_flags_ext;
+ #define CREQ_QUERY_FUNC_RESP_SB_PNO_FLAGS_EXT_PATH_UPDATE_RTS 0x1UL
+ #define CREQ_QUERY_FUNC_RESP_SB_PNO_FLAGS_EXT_PATH_EVENT 0x2UL
+ #define CREQ_QUERY_FUNC_RESP_SB_PNO_FLAGS_EXT_PATH_EXP_ARRAY 0x4UL
+ #define CREQ_QUERY_FUNC_RESP_SB_PNO_FLAGS_EXT_PATH_GEN_ARRAY 0x8UL
+ #define CREQ_QUERY_FUNC_RESP_SB_PNO_FLAGS_EXT_PATH_EXP_ARRAY_RANGE 0x10UL
+ #define CREQ_QUERY_FUNC_RESP_SB_PNO_FLAGS_EXT_PATH_PROBE 0x20UL
+ #define CREQ_QUERY_FUNC_RESP_SB_PNO_FLAGS_EXT_PATH_EVENT_PRECISE_CNT 0x40UL
+ #define CREQ_QUERY_FUNC_RESP_SB_PNO_FLAGS_EXT_PATH_SPRAYING_DISABLE 0x80UL
+ __le16 max_mpr;
+ u8 max_paths_per_path_ctx;
+ u8 max_plane;
+ u8 max_paths_per_plane;
+ u8 max_strpath_tiers;
+ u8 pno_telemetry_type;
+ u8 reserved9_8b;
+ __le32 max_path_ctx_strpath;
+ __le32 max_path_ctx_srv6;
+ __le32 rate_limit_max;
+ __le32 rate_limit_min;
+};
+
+/* cmdq_set_func_resources (size:448b/56B) */
+struct cmdq_set_func_resources {
+ u8 opcode;
+ #define CMDQ_SET_FUNC_RESOURCES_OPCODE_SET_FUNC_RESOURCES 0x84UL
+ #define CMDQ_SET_FUNC_RESOURCES_OPCODE_LAST CMDQ_SET_FUNC_RESOURCES_OPCODE_SET_FUNC_RESOURCES
+ u8 cmd_size;
+ __le16 flags;
+ #define CMDQ_SET_FUNC_RESOURCES_FLAGS_MRAV_RESERVATION_SPLIT 0x1UL
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le32 number_of_qp;
+ __le32 number_of_mrw;
+ __le32 number_of_srq;
+ __le32 number_of_cq;
+ __le32 max_qp_per_vf;
+ __le32 max_mrw_per_vf;
+ __le32 max_srq_per_vf;
+ __le32 max_cq_per_vf;
+ __le32 max_gid_per_vf;
+ __le32 stat_ctx_id;
+};
+
+/* creq_set_func_resources_resp (size:128b/16B) */
+struct creq_set_func_resources_resp {
+ u8 type;
+ #define CREQ_SET_FUNC_RESOURCES_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_SET_FUNC_RESOURCES_RESP_TYPE_SFT 0
+ #define CREQ_SET_FUNC_RESOURCES_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_SET_FUNC_RESOURCES_RESP_TYPE_LAST CREQ_SET_FUNC_RESOURCES_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 reserved32;
+ u8 v;
+ #define CREQ_SET_FUNC_RESOURCES_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_SET_FUNC_RESOURCES_RESP_EVENT_SET_FUNC_RESOURCES 0x84UL
+ #define CREQ_SET_FUNC_RESOURCES_RESP_EVENT_LAST \
+ CREQ_SET_FUNC_RESOURCES_RESP_EVENT_SET_FUNC_RESOURCES
+ u8 reserved48[6];
+};
+
+/* cmdq_read_context (size:192b/24B) */
+struct cmdq_read_context {
+ u8 opcode;
+ #define CMDQ_READ_CONTEXT_OPCODE_READ_CONTEXT 0x85UL
+ #define CMDQ_READ_CONTEXT_OPCODE_LAST CMDQ_READ_CONTEXT_OPCODE_READ_CONTEXT
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le32 xid;
+ u8 type;
+ #define CMDQ_READ_CONTEXT_TYPE_QPC 0x0UL
+ #define CMDQ_READ_CONTEXT_TYPE_CQ 0x1UL
+ #define CMDQ_READ_CONTEXT_TYPE_MRW 0x2UL
+ #define CMDQ_READ_CONTEXT_TYPE_SRQ 0x3UL
+ #define CMDQ_READ_CONTEXT_TYPE_LAST CMDQ_READ_CONTEXT_TYPE_SRQ
+ u8 unused_0[3];
+};
+
+/* creq_read_context (size:128b/16B) */
+struct creq_read_context {
+ u8 type;
+ #define CREQ_READ_CONTEXT_TYPE_MASK 0x3fUL
+ #define CREQ_READ_CONTEXT_TYPE_SFT 0
+ #define CREQ_READ_CONTEXT_TYPE_QP_EVENT 0x38UL
+ #define CREQ_READ_CONTEXT_TYPE_LAST CREQ_READ_CONTEXT_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 reserved32;
+ u8 v;
+ #define CREQ_READ_CONTEXT_V 0x1UL
+ u8 event;
+ #define CREQ_READ_CONTEXT_EVENT_READ_CONTEXT 0x85UL
+ #define CREQ_READ_CONTEXT_EVENT_LAST CREQ_READ_CONTEXT_EVENT_READ_CONTEXT
+ __le16 reserved16;
+ __le32 reserved_32;
+};
+
+/* cmdq_map_tc_to_cos (size:192b/24B) */
+struct cmdq_map_tc_to_cos {
+ u8 opcode;
+ #define CMDQ_MAP_TC_TO_COS_OPCODE_MAP_TC_TO_COS 0x8aUL
+ #define CMDQ_MAP_TC_TO_COS_OPCODE_LAST CMDQ_MAP_TC_TO_COS_OPCODE_MAP_TC_TO_COS
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le16 cos0;
+ #define CMDQ_MAP_TC_TO_COS_COS0_NO_CHANGE 0xffffUL
+ #define CMDQ_MAP_TC_TO_COS_COS0_LAST CMDQ_MAP_TC_TO_COS_COS0_NO_CHANGE
+ __le16 cos1;
+ #define CMDQ_MAP_TC_TO_COS_COS1_DISABLE 0x8000UL
+ #define CMDQ_MAP_TC_TO_COS_COS1_NO_CHANGE 0xffffUL
+ #define CMDQ_MAP_TC_TO_COS_COS1_LAST CMDQ_MAP_TC_TO_COS_COS1_NO_CHANGE
+ __le32 unused_0;
+};
+
+/* creq_map_tc_to_cos_resp (size:128b/16B) */
+struct creq_map_tc_to_cos_resp {
+ u8 type;
+ #define CREQ_MAP_TC_TO_COS_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_MAP_TC_TO_COS_RESP_TYPE_SFT 0
+ #define CREQ_MAP_TC_TO_COS_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_MAP_TC_TO_COS_RESP_TYPE_LAST CREQ_MAP_TC_TO_COS_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 reserved32;
+ u8 v;
+ #define CREQ_MAP_TC_TO_COS_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_MAP_TC_TO_COS_RESP_EVENT_MAP_TC_TO_COS 0x8aUL
+ #define CREQ_MAP_TC_TO_COS_RESP_EVENT_LAST CREQ_MAP_TC_TO_COS_RESP_EVENT_MAP_TC_TO_COS
+ u8 reserved48[6];
+};
+
+/* cmdq_query_roce_cc (size:128b/16B) */
+struct cmdq_query_roce_cc {
+ u8 opcode;
+ #define CMDQ_QUERY_ROCE_CC_OPCODE_QUERY_ROCE_CC 0x8dUL
+ #define CMDQ_QUERY_ROCE_CC_OPCODE_LAST CMDQ_QUERY_ROCE_CC_OPCODE_QUERY_ROCE_CC
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+};
+
+/* creq_query_roce_cc_resp (size:128b/16B) */
+struct creq_query_roce_cc_resp {
+ u8 type;
+ #define CREQ_QUERY_ROCE_CC_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_QUERY_ROCE_CC_RESP_TYPE_SFT 0
+ #define CREQ_QUERY_ROCE_CC_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_QUERY_ROCE_CC_RESP_TYPE_LAST CREQ_QUERY_ROCE_CC_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 size;
+ u8 v;
+ #define CREQ_QUERY_ROCE_CC_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_QUERY_ROCE_CC_RESP_EVENT_QUERY_ROCE_CC 0x8dUL
+ #define CREQ_QUERY_ROCE_CC_RESP_EVENT_LAST CREQ_QUERY_ROCE_CC_RESP_EVENT_QUERY_ROCE_CC
+ u8 reserved48[6];
+};
+
+/* creq_query_roce_cc_resp_sb (size:256b/32B) */
+struct creq_query_roce_cc_resp_sb {
+ u8 opcode;
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_OPCODE_QUERY_ROCE_CC 0x8dUL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_OPCODE_LAST CREQ_QUERY_ROCE_CC_RESP_SB_OPCODE_QUERY_ROCE_CC
+ u8 status;
+ __le16 cookie;
+ __le16 flags;
+ u8 resp_size;
+ u8 reserved8;
+ u8 enable_cc;
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_ENABLE_CC 0x1UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_UNUSED7_MASK 0xfeUL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_UNUSED7_SFT 1
+ u8 tos_dscp_tos_ecn;
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TOS_ECN_MASK 0x3UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TOS_ECN_SFT 0
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TOS_DSCP_MASK 0xfcUL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TOS_DSCP_SFT 2
+ u8 g;
+ u8 num_phases_per_state;
+ __le16 init_cr;
+ __le16 init_tr;
+ u8 alt_vlan_pcp;
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_ALT_VLAN_PCP_MASK 0x7UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_ALT_VLAN_PCP_SFT 0
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_RSVD1_MASK 0xf8UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_RSVD1_SFT 3
+ u8 alt_tos_dscp;
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_ALT_TOS_DSCP_MASK 0x3fUL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_ALT_TOS_DSCP_SFT 0
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_RSVD4_MASK 0xc0UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_RSVD4_SFT 6
+ u8 cc_mode;
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_CC_MODE_DCTCP 0x0UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_CC_MODE_PROBABILISTIC 0x1UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_CC_MODE_LAST CREQ_QUERY_ROCE_CC_RESP_SB_CC_MODE_PROBABILISTIC
+ u8 tx_queue;
+ __le16 rtt;
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_RTT_MASK 0x3fffUL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_RTT_SFT 0
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_RSVD5_MASK 0xc000UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_RSVD5_SFT 14
+ __le16 tcp_cp;
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TCP_CP_MASK 0x3ffUL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TCP_CP_SFT 0
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_RSVD6_MASK 0xfc00UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_RSVD6_SFT 10
+ __le16 inactivity_th;
+ u8 pkts_per_phase;
+ u8 time_per_phase;
+ __le32 reserved32;
+};
+
+/* creq_query_roce_cc_resp_sb_tlv (size:384b/48B) */
+struct creq_query_roce_cc_resp_sb_tlv {
+ __le16 cmd_discr;
+ u8 reserved_8b;
+ u8 tlv_flags;
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TLV_FLAGS_MORE 0x1UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TLV_FLAGS_MORE_LAST 0x0UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TLV_FLAGS_MORE_NOT_LAST 0x1UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TLV_FLAGS_REQUIRED 0x2UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TLV_FLAGS_REQUIRED_NO (0x0UL << 1)
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TLV_FLAGS_REQUIRED_YES (0x1UL << 1)
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TLV_FLAGS_REQUIRED_LAST CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TLV_FLAGS_REQUIRED_YES
+ __le16 tlv_type;
+ __le16 length;
+ u8 total_size;
+ u8 reserved56[7];
+ u8 opcode;
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_OPCODE_QUERY_ROCE_CC 0x8dUL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_OPCODE_LAST CREQ_QUERY_ROCE_CC_RESP_SB_TLV_OPCODE_QUERY_ROCE_CC
+ u8 status;
+ __le16 cookie;
+ __le16 flags;
+ u8 resp_size;
+ u8 reserved8;
+ u8 enable_cc;
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_ENABLE_CC 0x1UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_UNUSED7_MASK 0xfeUL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_UNUSED7_SFT 1
+ u8 tos_dscp_tos_ecn;
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TOS_ECN_MASK 0x3UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TOS_ECN_SFT 0
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TOS_DSCP_MASK 0xfcUL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TOS_DSCP_SFT 2
+ u8 g;
+ u8 num_phases_per_state;
+ __le16 init_cr;
+ __le16 init_tr;
+ u8 alt_vlan_pcp;
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_ALT_VLAN_PCP_MASK 0x7UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_ALT_VLAN_PCP_SFT 0
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RSVD1_MASK 0xf8UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RSVD1_SFT 3
+ u8 alt_tos_dscp;
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_ALT_TOS_DSCP_MASK 0x3fUL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_ALT_TOS_DSCP_SFT 0
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RSVD4_MASK 0xc0UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RSVD4_SFT 6
+ u8 cc_mode;
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_CC_MODE_DCTCP 0x0UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_CC_MODE_PROBABILISTIC 0x1UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_CC_MODE_LAST CREQ_QUERY_ROCE_CC_RESP_SB_TLV_CC_MODE_PROBABILISTIC
+ u8 tx_queue;
+ __le16 rtt;
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RTT_MASK 0x3fffUL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RTT_SFT 0
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RSVD5_MASK 0xc000UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RSVD5_SFT 14
+ __le16 tcp_cp;
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TCP_CP_MASK 0x3ffUL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TCP_CP_SFT 0
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RSVD6_MASK 0xfc00UL
+ #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RSVD6_SFT 10
+ __le16 inactivity_th;
+ u8 pkts_per_phase;
+ u8 time_per_phase;
+ __le32 reserved32;
+};
+
+/* creq_query_roce_cc_gen1_resp_sb_tlv (size:704b/88B) */
+struct creq_query_roce_cc_gen1_resp_sb_tlv {
+ __le16 cmd_discr;
+ u8 reserved_8b;
+ u8 tlv_flags;
+ #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_TLV_FLAGS_MORE 0x1UL
+ #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_TLV_FLAGS_MORE_LAST 0x0UL
+ #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_TLV_FLAGS_MORE_NOT_LAST 0x1UL
+ #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_TLV_FLAGS_REQUIRED 0x2UL
+ #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_TLV_FLAGS_REQUIRED_NO (0x0UL << 1)
+ #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_TLV_FLAGS_REQUIRED_YES (0x1UL << 1)
+ #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_TLV_FLAGS_REQUIRED_LAST CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_TLV_FLAGS_REQUIRED_YES
+ __le16 tlv_type;
+ __le16 length;
+ __le64 reserved64;
+ __le16 inactivity_th_hi;
+ __le16 min_time_between_cnps;
+ __le16 init_cp;
+ u8 tr_update_mode;
+ u8 tr_update_cycles;
+ u8 fr_num_rtts;
+ u8 ai_rate_increase;
+ __le16 reduction_relax_rtts_th;
+ __le16 additional_relax_cr_th;
+ __le16 cr_min_th;
+ u8 bw_avg_weight;
+ u8 actual_cr_factor;
+ __le16 max_cp_cr_th;
+ u8 cp_bias_en;
+ u8 cp_bias;
+ u8 cnp_ecn;
+ #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_CNP_ECN_NOT_ECT 0x0UL
+ #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_CNP_ECN_ECT_1 0x1UL
+ #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_CNP_ECN_ECT_0 0x2UL
+ #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_CNP_ECN_LAST CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_CNP_ECN_ECT_0
+ u8 rtt_jitter_en;
+ __le16 link_bytes_per_usec;
+ __le16 reset_cc_cr_th;
+ u8 cr_width;
+ u8 quota_period_min;
+ u8 quota_period_max;
+ u8 quota_period_abs_max;
+ __le16 tr_lower_bound;
+ u8 cr_prob_factor;
+ u8 tr_prob_factor;
+ __le16 fairness_cr_th;
+ u8 red_div;
+ u8 cnp_ratio_th;
+ __le16 exp_ai_rtts;
+ u8 exp_ai_cr_cp_ratio;
+ u8 use_rate_table;
+ __le16 cp_exp_update_th;
+ __le16 high_exp_ai_rtts_th1;
+ __le16 high_exp_ai_rtts_th2;
+ __le16 actual_cr_cong_free_rtts_th;
+ __le16 severe_cong_cr_th1;
+ __le16 severe_cong_cr_th2;
+ __le32 link64B_per_rtt;
+ u8 cc_ack_bytes;
+ u8 reduce_init_en;
+ __le16 reduce_init_cong_free_rtts_th;
+ u8 random_no_red_en;
+ u8 actual_cr_shift_correction_en;
+ u8 quota_period_adjust_en;
+ u8 reserved[5];
+};
+
+/* creq_query_roce_cc_gen2_resp_sb_tlv (size:512b/64B) */
+struct creq_query_roce_cc_gen2_resp_sb_tlv {
+ __le16 cmd_discr;
+ u8 reserved_8b;
+ u8 tlv_flags;
+ #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_TLV_FLAGS_MORE 0x1UL
+ #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_TLV_FLAGS_MORE_LAST 0x0UL
+ #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_TLV_FLAGS_MORE_NOT_LAST 0x1UL
+ #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_TLV_FLAGS_REQUIRED 0x2UL
+ #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_TLV_FLAGS_REQUIRED_NO (0x0UL << 1)
+ #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_TLV_FLAGS_REQUIRED_YES (0x1UL << 1)
+ #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_TLV_FLAGS_REQUIRED_LAST CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_TLV_FLAGS_REQUIRED_YES
+ __le16 tlv_type;
+ __le16 length;
+ __le64 reserved64;
+ __le16 dcn_qlevel_tbl_thr[8];
+ __le32 dcn_qlevel_tbl_act[8];
+ #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_DCN_QLEVEL_TBL_ACT_CR_MASK 0x3fffUL
+ #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_DCN_QLEVEL_TBL_ACT_CR_SFT 0
+ #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_DCN_QLEVEL_TBL_ACT_INC_CNP 0x4000UL
+ #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_DCN_QLEVEL_TBL_ACT_UPD_IMM 0x8000UL
+ #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_DCN_QLEVEL_TBL_ACT_TR_MASK 0x3fff0000UL
+ #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_DCN_QLEVEL_TBL_ACT_TR_SFT 16
+};
+
+/* creq_query_roce_cc_gen1_ext_resp_sb_tlv (size:896b/112B) */
+struct creq_query_roce_cc_gen1_ext_resp_sb_tlv {
+ __le16 cmd_discr;
+ u8 reserved_8b;
+ u8 tlv_flags;
+ #define CREQ_QUERY_ROCE_CC_GEN1_EXT_RESP_SB_TLV_TLV_FLAGS_MORE 0x1UL
+ #define CREQ_QUERY_ROCE_CC_GEN1_EXT_RESP_SB_TLV_TLV_FLAGS_MORE_LAST 0x0UL
+ #define CREQ_QUERY_ROCE_CC_GEN1_EXT_RESP_SB_TLV_TLV_FLAGS_MORE_NOT_LAST 0x1UL
+ #define CREQ_QUERY_ROCE_CC_GEN1_EXT_RESP_SB_TLV_TLV_FLAGS_REQUIRED 0x2UL
+ #define CREQ_QUERY_ROCE_CC_GEN1_EXT_RESP_SB_TLV_TLV_FLAGS_REQUIRED_NO (0x0UL << 1)
+ #define CREQ_QUERY_ROCE_CC_GEN1_EXT_RESP_SB_TLV_TLV_FLAGS_REQUIRED_YES (0x1UL << 1)
+ #define CREQ_QUERY_ROCE_CC_GEN1_EXT_RESP_SB_TLV_TLV_FLAGS_REQUIRED_LAST CREQ_QUERY_ROCE_CC_GEN1_EXT_RESP_SB_TLV_TLV_FLAGS_REQUIRED_YES
+ __le16 tlv_type;
+ __le16 length;
+ __le64 reserved64;
+ __le16 rnd_no_red_mult;
+ #define CREQ_QUERY_ROCE_CC_GEN1_EXT_RESP_SB_TLV_RND_NO_RED_MULT_MASK 0x3ffUL
+ #define CREQ_QUERY_ROCE_CC_GEN1_EXT_RESP_SB_TLV_RND_NO_RED_MULT_SFT 0
+ __le16 no_red_offset;
+ #define CREQ_QUERY_ROCE_CC_GEN1_EXT_RESP_SB_TLV_NO_RED_OFFSET_MASK 0x7ffUL
+ #define CREQ_QUERY_ROCE_CC_GEN1_EXT_RESP_SB_TLV_NO_RED_OFFSET_SFT 0
+ __le16 reduce2_init_cong_free_rtts_th;
+ #define CREQ_QUERY_ROCE_CC_GEN1_EXT_RESP_SB_TLV_REDUCE2_INIT_CONG_FREE_RTTS_TH_MASK 0x3ffUL
+ #define CREQ_QUERY_ROCE_CC_GEN1_EXT_RESP_SB_TLV_REDUCE2_INIT_CONG_FREE_RTTS_TH_SFT 0
+ u8 reduce2_init_en;
+ #define CREQ_QUERY_ROCE_CC_GEN1_EXT_RESP_SB_TLV_REDUCE2_INIT_EN 0x1UL
+ u8 period_adjust_count;
+ #define CREQ_QUERY_ROCE_CC_GEN1_EXT_RESP_SB_TLV_PERIOD_ADJUST_COUNT_MASK 0xffUL
+ #define CREQ_QUERY_ROCE_CC_GEN1_EXT_RESP_SB_TLV_PERIOD_ADJUST_COUNT_SFT 0
+ __le16 current_rate_threshold_1;
+ #define CREQ_QUERY_ROCE_CC_GEN1_EXT_RESP_SB_TLV_CURRENT_RATE_THRESHOLD_1_MASK 0x3fffUL
+ #define CREQ_QUERY_ROCE_CC_GEN1_EXT_RESP_SB_TLV_CURRENT_RATE_THRESHOLD_1_SFT 0
+ __le16 current_rate_threshold_2;
+ #define CREQ_QUERY_ROCE_CC_GEN1_EXT_RESP_SB_TLV_CURRENT_RATE_THRESHOLD_2_MASK 0x3fffUL
+ #define CREQ_QUERY_ROCE_CC_GEN1_EXT_RESP_SB_TLV_CURRENT_RATE_THRESHOLD_2_SFT 0
+ __le32 reserved32;
+ __le64 reserved64_1;
+ u8 rate_table_quota_period[24];
+ #define CREQ_QUERY_ROCE_CC_GEN1_EXT_RESP_SB_TLV_RATE_TABLE_QUOTA_PERIOD_MASK 0xffUL
+ #define CREQ_QUERY_ROCE_CC_GEN1_EXT_RESP_SB_TLV_RATE_TABLE_QUOTA_PERIOD_SFT 0
+ __le16 rate_table_byte_quota[24];
+ #define CREQ_QUERY_ROCE_CC_GEN1_EXT_RESP_SB_TLV_RATE_TABLE_BYTE_QUOTA_MASK 0xffffUL
+ #define CREQ_QUERY_ROCE_CC_GEN1_EXT_RESP_SB_TLV_RATE_TABLE_BYTE_QUOTA_SFT 0
+};
+
+/* creq_query_roce_cc_gen2_ext_resp_sb_tlv (size:256b/32B) */
+struct creq_query_roce_cc_gen2_ext_resp_sb_tlv {
+ __le16 cmd_discr;
+ u8 reserved_8b;
+ u8 tlv_flags;
+ #define CREQ_QUERY_ROCE_CC_GEN2_EXT_RESP_SB_TLV_TLV_FLAGS_MORE 0x1UL
+ #define CREQ_QUERY_ROCE_CC_GEN2_EXT_RESP_SB_TLV_TLV_FLAGS_MORE_LAST 0x0UL
+ #define CREQ_QUERY_ROCE_CC_GEN2_EXT_RESP_SB_TLV_TLV_FLAGS_MORE_NOT_LAST 0x1UL
+ #define CREQ_QUERY_ROCE_CC_GEN2_EXT_RESP_SB_TLV_TLV_FLAGS_REQUIRED 0x2UL
+ #define CREQ_QUERY_ROCE_CC_GEN2_EXT_RESP_SB_TLV_TLV_FLAGS_REQUIRED_NO (0x0UL << 1)
+ #define CREQ_QUERY_ROCE_CC_GEN2_EXT_RESP_SB_TLV_TLV_FLAGS_REQUIRED_YES (0x1UL << 1)
+ #define CREQ_QUERY_ROCE_CC_GEN2_EXT_RESP_SB_TLV_TLV_FLAGS_REQUIRED_LAST CREQ_QUERY_ROCE_CC_GEN2_EXT_RESP_SB_TLV_TLV_FLAGS_REQUIRED_YES
+ __le16 tlv_type;
+ __le16 length;
+ __le64 reserved64;
+ __le16 cr2bw_64b_ratio;
+ #define CREQ_QUERY_ROCE_CC_GEN2_EXT_RESP_SB_TLV_CR2BW_64B_RATIO_MASK 0x3ffUL
+ #define CREQ_QUERY_ROCE_CC_GEN2_EXT_RESP_SB_TLV_CR2BW_64B_RATIO_SFT 0
+ u8 sr2_cc_first_cnp_en;
+ #define CREQ_QUERY_ROCE_CC_GEN2_EXT_RESP_SB_TLV_SR2_CC_FIRST_CNP_EN 0x1UL
+ u8 sr2_cc_actual_cr_en;
+ #define CREQ_QUERY_ROCE_CC_GEN2_EXT_RESP_SB_TLV_SR2_CC_ACTUAL_CR_EN 0x1UL
+ __le16 retx_cp;
+ #define CREQ_QUERY_ROCE_CC_GEN2_EXT_RESP_SB_TLV_RETX_CP_MASK 0x3ffUL
+ #define CREQ_QUERY_ROCE_CC_GEN2_EXT_RESP_SB_TLV_RETX_CP_SFT 0
+ __le16 retx_cr;
+ #define CREQ_QUERY_ROCE_CC_GEN2_EXT_RESP_SB_TLV_RETX_CR_MASK 0x3fffUL
+ #define CREQ_QUERY_ROCE_CC_GEN2_EXT_RESP_SB_TLV_RETX_CR_SFT 0
+ __le16 retx_tr;
+ #define CREQ_QUERY_ROCE_CC_GEN2_EXT_RESP_SB_TLV_RETX_TR_MASK 0x3fffUL
+ #define CREQ_QUERY_ROCE_CC_GEN2_EXT_RESP_SB_TLV_RETX_TR_SFT 0
+ u8 hw_retx_cc_reset_en;
+ #define CREQ_QUERY_ROCE_CC_GEN2_EXT_RESP_SB_TLV_ACK_TIMEOUT_EN 0x1UL
+ #define CREQ_QUERY_ROCE_CC_GEN2_EXT_RESP_SB_TLV_RX_NAK_EN 0x2UL
+ #define CREQ_QUERY_ROCE_CC_GEN2_EXT_RESP_SB_TLV_RX_RNR_NAK_EN 0x4UL
+ #define CREQ_QUERY_ROCE_CC_GEN2_EXT_RESP_SB_TLV_MISSING_RESPONSE_EN 0x8UL
+ #define CREQ_QUERY_ROCE_CC_GEN2_EXT_RESP_SB_TLV_DUPLICATE_READ_EN 0x10UL
+ u8 reserved8;
+ __le16 hw_retx_reset_cc_cr_th;
+ #define CREQ_QUERY_ROCE_CC_GEN2_EXT_RESP_SB_TLV_HW_RETX_RESET_CC_CR_TH_MASK 0x3fffUL
+ #define CREQ_QUERY_ROCE_CC_GEN2_EXT_RESP_SB_TLV_HW_RETX_RESET_CC_CR_TH_SFT 0
+ __le16 reserved16;
+};
+
+/* cmdq_modify_roce_cc (size:448b/56B) */
+struct cmdq_modify_roce_cc {
+ u8 opcode;
+ #define CMDQ_MODIFY_ROCE_CC_OPCODE_MODIFY_ROCE_CC 0x8cUL
+ #define CMDQ_MODIFY_ROCE_CC_OPCODE_LAST CMDQ_MODIFY_ROCE_CC_OPCODE_MODIFY_ROCE_CC
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le32 modify_mask;
+ #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ENABLE_CC 0x1UL
+ #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_G 0x2UL
+ #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_NUMPHASEPERSTATE 0x4UL
+ #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INIT_CR 0x8UL
+ #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INIT_TR 0x10UL
+ #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_ECN 0x20UL
+ #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_DSCP 0x40UL
+ #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ALT_VLAN_PCP 0x80UL
+ #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ALT_TOS_DSCP 0x100UL
+ #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_RTT 0x200UL
+ #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_CC_MODE 0x400UL
+ #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TCP_CP 0x800UL
+ #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TX_QUEUE 0x1000UL
+ #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INACTIVITY_CP 0x2000UL
+ #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TIME_PER_PHASE 0x4000UL
+ #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_PKTS_PER_PHASE 0x8000UL
+ u8 enable_cc;
+ #define CMDQ_MODIFY_ROCE_CC_ENABLE_CC 0x1UL
+ #define CMDQ_MODIFY_ROCE_CC_RSVD1_MASK 0xfeUL
+ #define CMDQ_MODIFY_ROCE_CC_RSVD1_SFT 1
+ u8 g;
+ u8 num_phases_per_state;
+ u8 pkts_per_phase;
+ __le16 init_cr;
+ __le16 init_tr;
+ u8 tos_dscp_tos_ecn;
+ #define CMDQ_MODIFY_ROCE_CC_TOS_ECN_MASK 0x3UL
+ #define CMDQ_MODIFY_ROCE_CC_TOS_ECN_SFT 0
+ #define CMDQ_MODIFY_ROCE_CC_TOS_DSCP_MASK 0xfcUL
+ #define CMDQ_MODIFY_ROCE_CC_TOS_DSCP_SFT 2
+ u8 alt_vlan_pcp;
+ #define CMDQ_MODIFY_ROCE_CC_ALT_VLAN_PCP_MASK 0x7UL
+ #define CMDQ_MODIFY_ROCE_CC_ALT_VLAN_PCP_SFT 0
+ #define CMDQ_MODIFY_ROCE_CC_RSVD3_MASK 0xf8UL
+ #define CMDQ_MODIFY_ROCE_CC_RSVD3_SFT 3
+ __le16 alt_tos_dscp;
+ #define CMDQ_MODIFY_ROCE_CC_ALT_TOS_DSCP_MASK 0x3fUL
+ #define CMDQ_MODIFY_ROCE_CC_ALT_TOS_DSCP_SFT 0
+ #define CMDQ_MODIFY_ROCE_CC_RSVD4_MASK 0xffc0UL
+ #define CMDQ_MODIFY_ROCE_CC_RSVD4_SFT 6
+ __le16 rtt;
+ #define CMDQ_MODIFY_ROCE_CC_RTT_MASK 0x3fffUL
+ #define CMDQ_MODIFY_ROCE_CC_RTT_SFT 0
+ #define CMDQ_MODIFY_ROCE_CC_RSVD5_MASK 0xc000UL
+ #define CMDQ_MODIFY_ROCE_CC_RSVD5_SFT 14
+ __le16 tcp_cp;
+ #define CMDQ_MODIFY_ROCE_CC_TCP_CP_MASK 0x3ffUL
+ #define CMDQ_MODIFY_ROCE_CC_TCP_CP_SFT 0
+ #define CMDQ_MODIFY_ROCE_CC_RSVD6_MASK 0xfc00UL
+ #define CMDQ_MODIFY_ROCE_CC_RSVD6_SFT 10
+ u8 cc_mode;
+ #define CMDQ_MODIFY_ROCE_CC_CC_MODE_DCTCP_CC_MODE 0x0UL
+ #define CMDQ_MODIFY_ROCE_CC_CC_MODE_PROBABILISTIC_CC_MODE 0x1UL
+ #define CMDQ_MODIFY_ROCE_CC_CC_MODE_LAST CMDQ_MODIFY_ROCE_CC_CC_MODE_PROBABILISTIC_CC_MODE
+ u8 tx_queue;
+ __le16 inactivity_th;
+ u8 time_per_phase;
+ u8 reserved8_1;
+ __le16 reserved16;
+ __le32 reserved32;
+ __le64 reserved64;
+};
+
+/* cmdq_modify_roce_cc_tlv (size:640b/80B) */
+struct cmdq_modify_roce_cc_tlv {
+ __le16 cmd_discr;
+ u8 reserved_8b;
+ u8 tlv_flags;
+ #define CMDQ_MODIFY_ROCE_CC_TLV_TLV_FLAGS_MORE 0x1UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_TLV_FLAGS_MORE_LAST 0x0UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_TLV_FLAGS_MORE_NOT_LAST 0x1UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_TLV_FLAGS_REQUIRED 0x2UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_TLV_FLAGS_REQUIRED_NO (0x0UL << 1)
+ #define CMDQ_MODIFY_ROCE_CC_TLV_TLV_FLAGS_REQUIRED_YES (0x1UL << 1)
+ #define CMDQ_MODIFY_ROCE_CC_TLV_TLV_FLAGS_REQUIRED_LAST CMDQ_MODIFY_ROCE_CC_TLV_TLV_FLAGS_REQUIRED_YES
+ __le16 tlv_type;
+ __le16 length;
+ u8 total_size;
+ u8 reserved56[7];
+ u8 opcode;
+ #define CMDQ_MODIFY_ROCE_CC_TLV_OPCODE_MODIFY_ROCE_CC 0x8cUL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_OPCODE_LAST CMDQ_MODIFY_ROCE_CC_TLV_OPCODE_MODIFY_ROCE_CC
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le32 modify_mask;
+ #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_ENABLE_CC 0x1UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_G 0x2UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_NUMPHASEPERSTATE 0x4UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_INIT_CR 0x8UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_INIT_TR 0x10UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_TOS_ECN 0x20UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_TOS_DSCP 0x40UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_ALT_VLAN_PCP 0x80UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_ALT_TOS_DSCP 0x100UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_RTT 0x200UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_CC_MODE 0x400UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_TCP_CP 0x800UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_TX_QUEUE 0x1000UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_INACTIVITY_CP 0x2000UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_TIME_PER_PHASE 0x4000UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_PKTS_PER_PHASE 0x8000UL
+ u8 enable_cc;
+ #define CMDQ_MODIFY_ROCE_CC_TLV_ENABLE_CC 0x1UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD1_MASK 0xfeUL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD1_SFT 1
+ u8 g;
+ u8 num_phases_per_state;
+ u8 pkts_per_phase;
+ __le16 init_cr;
+ __le16 init_tr;
+ u8 tos_dscp_tos_ecn;
+ #define CMDQ_MODIFY_ROCE_CC_TLV_TOS_ECN_MASK 0x3UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_TOS_ECN_SFT 0
+ #define CMDQ_MODIFY_ROCE_CC_TLV_TOS_DSCP_MASK 0xfcUL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_TOS_DSCP_SFT 2
+ u8 alt_vlan_pcp;
+ #define CMDQ_MODIFY_ROCE_CC_TLV_ALT_VLAN_PCP_MASK 0x7UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_ALT_VLAN_PCP_SFT 0
+ #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD3_MASK 0xf8UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD3_SFT 3
+ __le16 alt_tos_dscp;
+ #define CMDQ_MODIFY_ROCE_CC_TLV_ALT_TOS_DSCP_MASK 0x3fUL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_ALT_TOS_DSCP_SFT 0
+ #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD4_MASK 0xffc0UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD4_SFT 6
+ __le16 rtt;
+ #define CMDQ_MODIFY_ROCE_CC_TLV_RTT_MASK 0x3fffUL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_RTT_SFT 0
+ #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD5_MASK 0xc000UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD5_SFT 14
+ __le16 tcp_cp;
+ #define CMDQ_MODIFY_ROCE_CC_TLV_TCP_CP_MASK 0x3ffUL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_TCP_CP_SFT 0
+ #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD6_MASK 0xfc00UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD6_SFT 10
+ u8 cc_mode;
+ #define CMDQ_MODIFY_ROCE_CC_TLV_CC_MODE_DCTCP_CC_MODE 0x0UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_CC_MODE_PROBABILISTIC_CC_MODE 0x1UL
+ #define CMDQ_MODIFY_ROCE_CC_TLV_CC_MODE_LAST CMDQ_MODIFY_ROCE_CC_TLV_CC_MODE_PROBABILISTIC_CC_MODE
+ u8 tx_queue;
+ __le16 inactivity_th;
+ u8 time_per_phase;
+ u8 reserved8_1;
+ __le16 reserved16;
+ __le32 reserved32;
+ __le64 reserved64;
+ __le64 reservedtlvpad;
+};
+
+/* cmdq_modify_roce_cc_gen1_tlv (size:768b/96B) */
+struct cmdq_modify_roce_cc_gen1_tlv {
+ __le16 cmd_discr;
+ u8 reserved_8b;
+ u8 tlv_flags;
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_TLV_FLAGS_MORE 0x1UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_TLV_FLAGS_MORE_LAST 0x0UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_TLV_FLAGS_MORE_NOT_LAST 0x1UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_TLV_FLAGS_REQUIRED 0x2UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_TLV_FLAGS_REQUIRED_NO (0x0UL << 1)
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_TLV_FLAGS_REQUIRED_YES (0x1UL << 1)
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_TLV_FLAGS_REQUIRED_LAST CMDQ_MODIFY_ROCE_CC_GEN1_TLV_TLV_FLAGS_REQUIRED_YES
+ __le16 tlv_type;
+ __le16 length;
+ __le64 reserved64;
+ __le64 modify_mask;
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_MIN_TIME_BETWEEN_CNPS 0x1UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_INIT_CP 0x2UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_TR_UPDATE_MODE 0x4UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_TR_UPDATE_CYCLES 0x8UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_FR_NUM_RTTS 0x10UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_AI_RATE_INCREASE 0x20UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_REDUCTION_RELAX_RTTS_TH 0x40UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_ADDITIONAL_RELAX_CR_TH 0x80UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CR_MIN_TH 0x100UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_BW_AVG_WEIGHT 0x200UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_ACTUAL_CR_FACTOR 0x400UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_MAX_CP_CR_TH 0x800UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CP_BIAS_EN 0x1000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CP_BIAS 0x2000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CNP_ECN 0x4000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_RTT_JITTER_EN 0x8000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_LINK_BYTES_PER_USEC 0x10000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_RESET_CC_CR_TH 0x20000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CR_WIDTH 0x40000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_QUOTA_PERIOD_MIN 0x80000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_QUOTA_PERIOD_MAX 0x100000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_QUOTA_PERIOD_ABS_MAX 0x200000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_TR_LOWER_BOUND 0x400000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CR_PROB_FACTOR 0x800000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_TR_PROB_FACTOR 0x1000000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_FAIRNESS_CR_TH 0x2000000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_RED_DIV 0x4000000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CNP_RATIO_TH 0x8000000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_EXP_AI_RTTS 0x10000000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_EXP_AI_CR_CP_RATIO 0x20000000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CP_EXP_UPDATE_TH 0x40000000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_HIGH_EXP_AI_RTTS_TH1 0x80000000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_HIGH_EXP_AI_RTTS_TH2 0x100000000ULL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_USE_RATE_TABLE 0x200000000ULL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_LINK64B_PER_RTT 0x400000000ULL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_ACTUAL_CR_CONG_FREE_RTTS_TH 0x800000000ULL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_SEVERE_CONG_CR_TH1 0x1000000000ULL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_SEVERE_CONG_CR_TH2 0x2000000000ULL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CC_ACK_BYTES 0x4000000000ULL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_REDUCE_INIT_EN 0x8000000000ULL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_REDUCE_INIT_CONG_FREE_RTTS_TH 0x10000000000ULL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_RANDOM_NO_RED_EN 0x20000000000ULL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_ACTUAL_CR_SHIFT_CORRECTION_EN 0x40000000000ULL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_QUOTA_PERIOD_ADJUST_EN 0x80000000000ULL
+ __le16 inactivity_th_hi;
+ __le16 min_time_between_cnps;
+ __le16 init_cp;
+ u8 tr_update_mode;
+ u8 tr_update_cycles;
+ u8 fr_num_rtts;
+ u8 ai_rate_increase;
+ __le16 reduction_relax_rtts_th;
+ __le16 additional_relax_cr_th;
+ __le16 cr_min_th;
+ u8 bw_avg_weight;
+ u8 actual_cr_factor;
+ __le16 max_cp_cr_th;
+ u8 cp_bias_en;
+ u8 cp_bias;
+ u8 cnp_ecn;
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_CNP_ECN_NOT_ECT 0x0UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_CNP_ECN_ECT_1 0x1UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_CNP_ECN_ECT_0 0x2UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_CNP_ECN_LAST CMDQ_MODIFY_ROCE_CC_GEN1_TLV_CNP_ECN_ECT_0
+ u8 rtt_jitter_en;
+ __le16 link_bytes_per_usec;
+ __le16 reset_cc_cr_th;
+ u8 cr_width;
+ u8 quota_period_min;
+ u8 quota_period_max;
+ u8 quota_period_abs_max;
+ __le16 tr_lower_bound;
+ u8 cr_prob_factor;
+ u8 tr_prob_factor;
+ __le16 fairness_cr_th;
+ u8 red_div;
+ u8 cnp_ratio_th;
+ __le16 exp_ai_rtts;
+ u8 exp_ai_cr_cp_ratio;
+ u8 use_rate_table;
+ __le16 cp_exp_update_th;
+ __le16 high_exp_ai_rtts_th1;
+ __le16 high_exp_ai_rtts_th2;
+ __le16 actual_cr_cong_free_rtts_th;
+ __le16 severe_cong_cr_th1;
+ __le16 severe_cong_cr_th2;
+ __le32 link64B_per_rtt;
+ u8 cc_ack_bytes;
+ u8 reduce_init_en;
+ __le16 reduce_init_cong_free_rtts_th;
+ u8 random_no_red_en;
+ u8 actual_cr_shift_correction_en;
+ u8 quota_period_adjust_en;
+ u8 reserved[5];
+};
+
+/* cmdq_modify_roce_cc_gen2_tlv (size:256b/32B) */
+struct cmdq_modify_roce_cc_gen2_tlv {
+ __le16 cmd_discr;
+ u8 reserved_8b;
+ u8 tlv_flags;
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_TLV_FLAGS_MORE 0x1UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_TLV_FLAGS_MORE_LAST 0x0UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_TLV_FLAGS_MORE_NOT_LAST 0x1UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_TLV_FLAGS_REQUIRED 0x2UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_TLV_FLAGS_REQUIRED_NO (0x0UL << 1)
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_TLV_FLAGS_REQUIRED_YES (0x1UL << 1)
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_TLV_FLAGS_REQUIRED_LAST CMDQ_MODIFY_ROCE_CC_GEN2_TLV_TLV_FLAGS_REQUIRED_YES
+ __le16 tlv_type;
+ __le16 length;
+ __le64 reserved64;
+ __le64 modify_mask;
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_MODIFY_MASK_DCN_QLEVEL_TBL_IDX 0x1UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_MODIFY_MASK_DCN_QLEVEL_TBL_THR 0x2UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_MODIFY_MASK_DCN_QLEVEL_TBL_CR 0x4UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_MODIFY_MASK_DCN_QLEVEL_TBL_INC_CNP 0x8UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_MODIFY_MASK_DCN_QLEVEL_TBL_UPD_IMM 0x10UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_MODIFY_MASK_DCN_QLEVEL_TBL_TR 0x20UL
+ u8 dcn_qlevel_tbl_idx;
+ u8 reserved8;
+ __le16 dcn_qlevel_tbl_thr;
+ __le32 dcn_qlevel_tbl_act;
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_DCN_QLEVEL_TBL_ACT_CR_MASK 0x3fffUL
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_DCN_QLEVEL_TBL_ACT_CR_SFT 0
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_DCN_QLEVEL_TBL_ACT_INC_CNP 0x4000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_DCN_QLEVEL_TBL_ACT_UPD_IMM 0x8000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_DCN_QLEVEL_TBL_ACT_TR_MASK 0x3fff0000UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_DCN_QLEVEL_TBL_ACT_TR_SFT 16
+};
+
+/* cmdq_modify_roce_cc_gen1_ext_tlv (size:384b/48B) */
+struct cmdq_modify_roce_cc_gen1_ext_tlv {
+ __le16 cmd_discr;
+ u8 reserved_8b;
+ u8 tlv_flags;
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_EXT_TLV_TLV_FLAGS_MORE 0x1UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_EXT_TLV_TLV_FLAGS_MORE_LAST 0x0UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_EXT_TLV_TLV_FLAGS_MORE_NOT_LAST 0x1UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_EXT_TLV_TLV_FLAGS_REQUIRED 0x2UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_EXT_TLV_TLV_FLAGS_REQUIRED_NO (0x0UL << 1)
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_EXT_TLV_TLV_FLAGS_REQUIRED_YES (0x1UL << 1)
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_EXT_TLV_TLV_FLAGS_REQUIRED_LAST CMDQ_MODIFY_ROCE_CC_GEN1_EXT_TLV_TLV_FLAGS_REQUIRED_YES
+ __le16 tlv_type;
+ __le16 length;
+ __le64 reserved64;
+ __le64 modify_mask;
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_EXT_TLV_MODIFY_MASK_RND_NO_RED_MULT 0x1UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_EXT_TLV_MODIFY_MASK_NO_RED_OFFSET 0x2UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_EXT_TLV_MODIFY_MASK_REDUCE2_INIT_CONG_FREE_RTTS_TH 0x4UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_EXT_TLV_MODIFY_MASK_REDUCE2_INIT_EN 0x8UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_EXT_TLV_MODIFY_MASK_PERIOD_ADJUST_COUNT 0x10UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_EXT_TLV_MODIFY_MASK_CURRENT_RATE_THRESHOLD_1 0x20UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_EXT_TLV_MODIFY_MASK_CURRENT_RATE_THRESHOLD_2 0x40UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_EXT_TLV_MODIFY_MASK_RATE_TABLE_IDX 0x80UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_EXT_TLV_MODIFY_MASK_RATE_TABLE_QUOTA_PERIOD 0x100UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_EXT_TLV_MODIFY_MASK_RATE_TABLE_BYTE_QUOTA 0x200UL
+ __le16 rnd_no_red_mult;
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_EXT_TLV_RND_NO_RED_MULT_MASK 0x3ffUL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_EXT_TLV_RND_NO_RED_MULT_SFT 0
+ __le16 no_red_offset;
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_EXT_TLV_NO_RED_OFFSET_MASK 0x7ffUL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_EXT_TLV_NO_RED_OFFSET_SFT 0
+ __le16 reduce2_init_cong_free_rtts_th;
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_EXT_TLV_REDUCE2_INIT_CONG_FREE_RTTS_TH_MASK 0x3ffUL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_EXT_TLV_REDUCE2_INIT_CONG_FREE_RTTS_TH_SFT 0
+ u8 reduce2_init_en;
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_EXT_TLV_REDUCE2_INIT_EN 0x1UL
+ u8 period_adjust_count;
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_EXT_TLV_PERIOD_ADJUST_COUNT_MASK 0xffUL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_EXT_TLV_PERIOD_ADJUST_COUNT_SFT 0
+ __le16 current_rate_threshold_1;
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_EXT_TLV_CURRENT_RATE_THRESHOLD_1_MASK 0x3fffUL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_EXT_TLV_CURRENT_RATE_THRESHOLD_1_SFT 0
+ __le16 current_rate_threshold_2;
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_EXT_TLV_CURRENT_RATE_THRESHOLD_2_MASK 0x3fffUL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_EXT_TLV_CURRENT_RATE_THRESHOLD_2_SFT 0
+ u8 rate_table_idx;
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_EXT_TLV_RATE_TABLE_IDX_MASK 0xffUL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_EXT_TLV_RATE_TABLE_IDX_SFT 0
+ u8 rate_table_quota_period;
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_EXT_TLV_RATE_TABLE_QUOTA_PERIOD_MASK 0xffUL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_EXT_TLV_RATE_TABLE_QUOTA_PERIOD_SFT 0
+ __le16 rate_table_byte_quota;
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_EXT_TLV_RATE_TABLE_BYTE_QUOTA_MASK 0xffffUL
+ #define CMDQ_MODIFY_ROCE_CC_GEN1_EXT_TLV_RATE_TABLE_BYTE_QUOTA_SFT 0
+ __le64 reserved64_1;
+};
+
+/* cmdq_modify_roce_cc_gen2_ext_tlv (size:384b/48B) */
+struct cmdq_modify_roce_cc_gen2_ext_tlv {
+ __le16 cmd_discr;
+ u8 reserved_8b;
+ u8 tlv_flags;
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_EXT_TLV_TLV_FLAGS_MORE 0x1UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_EXT_TLV_TLV_FLAGS_MORE_LAST 0x0UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_EXT_TLV_TLV_FLAGS_MORE_NOT_LAST 0x1UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_EXT_TLV_TLV_FLAGS_REQUIRED 0x2UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_EXT_TLV_TLV_FLAGS_REQUIRED_NO (0x0UL << 1)
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_EXT_TLV_TLV_FLAGS_REQUIRED_YES (0x1UL << 1)
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_EXT_TLV_TLV_FLAGS_REQUIRED_LAST CMDQ_MODIFY_ROCE_CC_GEN2_EXT_TLV_TLV_FLAGS_REQUIRED_YES
+ __le16 tlv_type;
+ __le16 length;
+ __le64 reserved64;
+ __le64 modify_mask;
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_EXT_TLV_MODIFY_MASK_CR2BW_64B_RATIO 0x1UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_EXT_TLV_MODIFY_MASK_SR2_CC_FIRST_CNP_EN 0x2UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_EXT_TLV_MODIFY_MASK_SR2_CC_ACTUAL_CR_EN 0x4UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_EXT_TLV_MODIFY_MASK_RETX_CP 0x8UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_EXT_TLV_MODIFY_MASK_RETX_CR 0x10UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_EXT_TLV_MODIFY_MASK_RETX_TR 0x20UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_EXT_TLV_MODIFY_MASK_HW_RETX_CC_RESET_EN 0x40UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_EXT_TLV_MODIFY_MASK_HW_RETX_RESET_CC_CR_TH 0x80UL
+ __le64 reserved64_1;
+ __le16 cr2bw_64b_ratio;
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_EXT_TLV_CR2BW_64B_RATIO_MASK 0x3ffUL
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_EXT_TLV_CR2BW_64B_RATIO_SFT 0
+ u8 sr2_cc_first_cnp_en;
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_EXT_TLV_SR2_CC_FIRST_CNP_EN 0x1UL
+ u8 sr2_cc_actual_cr_en;
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_EXT_TLV_SR2_CC_ACTUAL_CR_EN 0x1UL
+ __le16 retx_cp;
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_EXT_TLV_RETX_CP_MASK 0x3ffUL
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_EXT_TLV_RETX_CP_SFT 0
+ __le16 retx_cr;
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_EXT_TLV_RETX_CR_MASK 0x3fffUL
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_EXT_TLV_RETX_CR_SFT 0
+ __le16 retx_tr;
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_EXT_TLV_RETX_TR_MASK 0x3fffUL
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_EXT_TLV_RETX_TR_SFT 0
+ u8 hw_retx_cc_reset_en;
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_EXT_TLV_ACK_TIMEOUT_EN 0x1UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_EXT_TLV_RX_NAK_EN 0x2UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_EXT_TLV_RX_RNR_NAK_EN 0x4UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_EXT_TLV_MISSING_RESPONSE_EN 0x8UL
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_EXT_TLV_DUPLICATE_READ_EN 0x10UL
+ u8 reserved8;
+ __le16 hw_retx_reset_cc_cr_th;
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_EXT_TLV_HW_RETX_RESET_CC_CR_TH_MASK 0x3fffUL
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_EXT_TLV_HW_RETX_RESET_CC_CR_TH_SFT 0
+ __le16 reserved16;
+};
+
+/* creq_modify_roce_cc_resp (size:128b/16B) */
+struct creq_modify_roce_cc_resp {
+ u8 type;
+ #define CREQ_MODIFY_ROCE_CC_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_MODIFY_ROCE_CC_RESP_TYPE_SFT 0
+ #define CREQ_MODIFY_ROCE_CC_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_MODIFY_ROCE_CC_RESP_TYPE_LAST CREQ_MODIFY_ROCE_CC_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 reserved32;
+ u8 v;
+ #define CREQ_MODIFY_ROCE_CC_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_MODIFY_ROCE_CC_RESP_EVENT_MODIFY_ROCE_CC 0x8cUL
+ #define CREQ_MODIFY_ROCE_CC_RESP_EVENT_LAST CREQ_MODIFY_ROCE_CC_RESP_EVENT_MODIFY_ROCE_CC
+ u8 reserved48[6];
+};
+
+/* cmdq_set_link_aggr_mode_cc (size:320b/40B) */
+struct cmdq_set_link_aggr_mode_cc {
+ u8 opcode;
+ #define CMDQ_SET_LINK_AGGR_MODE_OPCODE_SET_LINK_AGGR_MODE 0x8fUL
+ #define CMDQ_SET_LINK_AGGR_MODE_OPCODE_LAST CMDQ_SET_LINK_AGGR_MODE_OPCODE_SET_LINK_AGGR_MODE
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le32 modify_mask;
+ #define CMDQ_SET_LINK_AGGR_MODE_MODIFY_MASK_AGGR_EN 0x1UL
+ #define CMDQ_SET_LINK_AGGR_MODE_MODIFY_MASK_ACTIVE_PORT_MAP 0x2UL
+ #define CMDQ_SET_LINK_AGGR_MODE_MODIFY_MASK_MEMBER_PORT_MAP 0x4UL
+ #define CMDQ_SET_LINK_AGGR_MODE_MODIFY_MASK_AGGR_MODE 0x8UL
+ #define CMDQ_SET_LINK_AGGR_MODE_MODIFY_MASK_STAT_CTX_ID 0x10UL
+ u8 aggr_enable;
+ #define CMDQ_SET_LINK_AGGR_MODE_AGGR_ENABLE 0x1UL
+ #define CMDQ_SET_LINK_AGGR_MODE_RSVD1_MASK 0xfeUL
+ #define CMDQ_SET_LINK_AGGR_MODE_RSVD1_SFT 1
+ u8 active_port_map;
+ #define CMDQ_SET_LINK_AGGR_MODE_ACTIVE_PORT_MAP_MASK 0xfUL
+ #define CMDQ_SET_LINK_AGGR_MODE_ACTIVE_PORT_MAP_SFT 0
+ #define CMDQ_SET_LINK_AGGR_MODE_RSVD2_MASK 0xf0UL
+ #define CMDQ_SET_LINK_AGGR_MODE_RSVD2_SFT 4
+ u8 member_port_map;
+ u8 link_aggr_mode;
+ #define CMDQ_SET_LINK_AGGR_MODE_AGGR_MODE_ACTIVE_ACTIVE 0x1UL
+ #define CMDQ_SET_LINK_AGGR_MODE_AGGR_MODE_ACTIVE_BACKUP 0x2UL
+ #define CMDQ_SET_LINK_AGGR_MODE_AGGR_MODE_BALANCE_XOR 0x3UL
+ #define CMDQ_SET_LINK_AGGR_MODE_AGGR_MODE_802_3_AD 0x4UL
+ #define CMDQ_SET_LINK_AGGR_MODE_AGGR_MODE_LAST CMDQ_SET_LINK_AGGR_MODE_AGGR_MODE_802_3_AD
+ __le16 stat_ctx_id[4];
+ __le64 rsvd1;
+};
+
+/* creq_set_link_aggr_mode_resources_resp (size:128b/16B) */
+struct creq_set_link_aggr_mode_resources_resp {
+ u8 type;
+ #define CREQ_SET_LINK_AGGR_MODE_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_SET_LINK_AGGR_MODE_RESP_TYPE_SFT 0
+ #define CREQ_SET_LINK_AGGR_MODE_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_SET_LINK_AGGR_MODE_RESP_TYPE_LAST CREQ_SET_LINK_AGGR_MODE_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 reserved32;
+ u8 v;
+ #define CREQ_SET_LINK_AGGR_MODE_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_SET_LINK_AGGR_MODE_RESP_EVENT_SET_LINK_AGGR_MODE 0x8fUL
+ #define CREQ_SET_LINK_AGGR_MODE_RESP_EVENT_LAST CREQ_SET_LINK_AGGR_MODE_RESP_EVENT_SET_LINK_AGGR_MODE
+ u8 reserved48[6];
+};
+
+/* cmdq_allocate_roce_stats_ext_ctx (size:256b/32B) */
+struct cmdq_allocate_roce_stats_ext_ctx {
+ u8 opcode;
+ #define CMDQ_ALLOCATE_ROCE_STATS_EXT_CTX_OPCODE_ALLOCATE_ROCE_STATS_EXT_CTX 0x96UL
+ #define CMDQ_ALLOCATE_ROCE_STATS_EXT_CTX_OPCODE_LAST CMDQ_ALLOCATE_ROCE_STATS_EXT_CTX_OPCODE_ALLOCATE_ROCE_STATS_EXT_CTX
+ u8 cmd_size;
+ __le16 flags;
+ #define CMDQ_ALLOCATE_ROCE_STATS_EXT_CTX_FLAGS_PER_FUNC 0x1UL
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le64 stats_dma_addr;
+ __le32 update_period_ms;
+ __le16 steering_tag;
+ __le16 reserved16;
+};
+
+/* creq_allocate_roce_stats_ext_ctx_resp (size:128b/16B) */
+struct creq_allocate_roce_stats_ext_ctx_resp {
+ u8 type;
+ #define CREQ_ALLOCATE_ROCE_STATS_EXT_CTX_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_ALLOCATE_ROCE_STATS_EXT_CTX_RESP_TYPE_SFT 0
+ #define CREQ_ALLOCATE_ROCE_STATS_EXT_CTX_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_ALLOCATE_ROCE_STATS_EXT_CTX_RESP_TYPE_LAST CREQ_ALLOCATE_ROCE_STATS_EXT_CTX_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 roce_stats_ext_xid;
+ u8 v;
+ #define CREQ_ALLOCATE_ROCE_STATS_EXT_CTX_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_ALLOCATE_ROCE_STATS_EXT_CTX_RESP_EVENT_ALLOCATE_ROCE_STATS_EXT_CTX 0x96UL
+ #define CREQ_ALLOCATE_ROCE_STATS_EXT_CTX_RESP_EVENT_LAST CREQ_ALLOCATE_ROCE_STATS_EXT_CTX_RESP_EVENT_ALLOCATE_ROCE_STATS_EXT_CTX
+ u8 reserved48[6];
+};
+
+/* cmdq_deallocate_roce_stats_ext_ctx (size:256b/32B) */
+struct cmdq_deallocate_roce_stats_ext_ctx {
+ u8 opcode;
+ #define CMDQ_DEALLOCATE_ROCE_STATS_EXT_CTX_OPCODE_DEALLOCATE_ROCE_STATS_EXT_CTX 0x97UL
+ #define CMDQ_DEALLOCATE_ROCE_STATS_EXT_CTX_OPCODE_LAST CMDQ_DEALLOCATE_ROCE_STATS_EXT_CTX_OPCODE_DEALLOCATE_ROCE_STATS_EXT_CTX
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le32 roce_stats_ext_xid;
+ __le32 reserved32;
+ __le64 reserved64;
+};
+
+/* creq_deallocate_roce_stats_ext_ctx_resp (size:128b/16B) */
+struct creq_deallocate_roce_stats_ext_ctx_resp {
+ u8 type;
+ #define CREQ_DEALLOCATE_ROCE_STATS_EXT_CTX_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_DEALLOCATE_ROCE_STATS_EXT_CTX_RESP_TYPE_SFT 0
+ #define CREQ_DEALLOCATE_ROCE_STATS_EXT_CTX_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_DEALLOCATE_ROCE_STATS_EXT_CTX_RESP_TYPE_LAST CREQ_DEALLOCATE_ROCE_STATS_EXT_CTX_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 roce_stats_ext_xid;
+ u8 v;
+ #define CREQ_DEALLOCATE_ROCE_STATS_EXT_CTX_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_DEALLOCATE_ROCE_STATS_EXT_CTX_RESP_EVENT_DEALLOCATE_ROCE_STATS_EXT_CTX 0x97UL
+ #define CREQ_DEALLOCATE_ROCE_STATS_EXT_CTX_RESP_EVENT_LAST CREQ_DEALLOCATE_ROCE_STATS_EXT_CTX_RESP_EVENT_DEALLOCATE_ROCE_STATS_EXT_CTX
+ u8 reserved48[6];
+};
+
+/* cmdq_query_roce_stats_ext_v2 (size:256b/32B) */
+struct cmdq_query_roce_stats_ext_v2 {
+ u8 opcode;
+ #define CMDQ_QUERY_ROCE_STATS_EXT_V2_OPCODE_QUERY_ROCE_STATS_EXT_V2 0x98UL
+ #define CMDQ_QUERY_ROCE_STATS_EXT_V2_OPCODE_LAST CMDQ_QUERY_ROCE_STATS_EXT_V2_OPCODE_QUERY_ROCE_STATS_EXT_V2
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le32 roce_stats_ext_xid;
+ __le32 reserved32;
+ __le64 reserved64;
+};
+
+/* creq_query_roce_stats_ext_v2_resp (size:128b/16B) */
+struct creq_query_roce_stats_ext_v2_resp {
+ u8 type;
+ #define CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_TYPE_SFT 0
+ #define CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_TYPE_LAST CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 size;
+ u8 v;
+ #define CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_EVENT_QUERY_ROCE_STATS_EXT_V2 0x98UL
+ #define CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_EVENT_LAST CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_EVENT_QUERY_ROCE_STATS_EXT_V2
+ u8 reserved48[6];
+};
+
+/* creq_query_roce_stats_ext_v2_resp_sb (size:2304b/288B) */
+struct creq_query_roce_stats_ext_v2_resp_sb {
+ u8 opcode;
+ #define CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_SB_OPCODE_QUERY_ROCE_STATS_EXT_V2 0x98UL
+ #define CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_SB_OPCODE_LAST CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_SB_OPCODE_QUERY_ROCE_STATS_EXT_V2
+ u8 status;
+ __le16 cookie;
+ __le16 flags;
+ #define CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_SB_TS_VALID 0x1UL
+ #define CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_SB_RSVD_MASK 0xfffeUL
+ #define CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_SB_RSVD_SFT 1
+ u8 resp_size;
+ u8 offset;
+ __le64 timestamp;
+ __le32 rsvd[8];
+ __le64 tx_atomic_req_pkts;
+ __le64 tx_read_req_pkts;
+ __le64 tx_read_res_pkts;
+ __le64 tx_write_req_pkts;
+ __le64 tx_rc_send_req_pkts;
+ __le64 tx_ud_send_req_pkts;
+ __le64 tx_cnp_pkts;
+ __le64 tx_roce_pkts;
+ __le64 tx_roce_bytes;
+ __le64 rx_out_of_buffer_pkts;
+ __le64 rx_out_of_sequence_pkts;
+ __le64 dup_req;
+ __le64 missing_resp;
+ __le64 seq_err_naks_rcvd;
+ __le64 rnr_naks_rcvd;
+ __le64 to_retransmits;
+ __le64 rx_atomic_req_pkts;
+ __le64 rx_read_req_pkts;
+ __le64 rx_read_res_pkts;
+ __le64 rx_write_req_pkts;
+ __le64 rx_rc_send_pkts;
+ __le64 rx_ud_send_pkts;
+ __le64 rx_dcn_payload_cut;
+ __le64 rx_ecn_marked_pkts;
+ __le64 rx_cnp_pkts;
+ __le64 rx_roce_pkts;
+ __le64 rx_roce_bytes;
+ __le64 rx_roce_good_pkts;
+ __le64 rx_roce_good_bytes;
+ __le64 rx_ack_pkts;
+};
+
+/* cmdq_roce_mirror_cfg (size:192b/24B) */
+struct cmdq_roce_mirror_cfg {
+ u8 opcode;
+ #define CMDQ_ROCE_MIRROR_CFG_OPCODE_ROCE_MIRROR_CFG 0xa6UL
+ #define CMDQ_ROCE_MIRROR_CFG_OPCODE_LAST CMDQ_ROCE_MIRROR_CFG_OPCODE_ROCE_MIRROR_CFG
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ u8 mirror_flags;
+ #define CMDQ_ROCE_MIRROR_CFG_MIRROR_ENABLE 0x1UL
+ u8 rsvd[7];
+};
+
+/* creq_roce_mirror_cfg_resp (size:128b/16B) */
+struct creq_roce_mirror_cfg_resp {
+ u8 type;
+ #define CREQ_ROCE_MIRROR_CFG_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_ROCE_MIRROR_CFG_RESP_TYPE_SFT 0
+ #define CREQ_ROCE_MIRROR_CFG_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_ROCE_MIRROR_CFG_RESP_TYPE_LAST CREQ_ROCE_MIRROR_CFG_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 reserved32;
+ u8 v;
+ #define CREQ_ROCE_MIRROR_CFG_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_ROCE_MIRROR_CFG_RESP_EVENT_ROCE_MIRROR_CFG 0xa6UL
+ #define CREQ_ROCE_MIRROR_CFG_RESP_EVENT_LAST CREQ_ROCE_MIRROR_CFG_RESP_EVENT_ROCE_MIRROR_CFG
+ u8 reserved48[6];
+};
+
+/* cmdq_roce_cfg (size:192b/24B) */
+struct cmdq_roce_cfg {
+ u8 opcode;
+ #define CMDQ_ROCE_CFG_OPCODE_ROCE_CFG 0xa7UL
+ #define CMDQ_ROCE_CFG_OPCODE_LAST CMDQ_ROCE_CFG_OPCODE_ROCE_CFG
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le32 feat_cfg;
+ #define CMDQ_ROCE_CFG_FEAT_CFG_ICRC_CHECK_DISABLE 0x1UL
+ #define CMDQ_ROCE_CFG_FEAT_CFG_FORCE_MIRROR_ENABLE 0x2UL
+ #define CMDQ_ROCE_CFG_FEAT_CFG_RSVD_MASK 0xfffffffcUL
+ #define CMDQ_ROCE_CFG_FEAT_CFG_RSVD_SFT 2
+ __le32 feat_enables;
+ #define CMDQ_ROCE_CFG_FEAT_ENABLES_ICRC_CHECK_DISABLE 0x1UL
+ #define CMDQ_ROCE_CFG_FEAT_ENABLES_FORCE_MIRROR_ENABLE 0x2UL
+ #define CMDQ_ROCE_CFG_FEAT_ENABLES_RSVD_MASK 0xfffffffcUL
+ #define CMDQ_ROCE_CFG_FEAT_ENABLES_RSVD_SFT 2
+};
+
+/* creq_roce_cfg_resp (size:128b/16B) */
+struct creq_roce_cfg_resp {
+ u8 type;
+ #define CREQ_ROCE_CFG_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_ROCE_CFG_RESP_TYPE_SFT 0
+ #define CREQ_ROCE_CFG_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_ROCE_CFG_RESP_TYPE_LAST CREQ_ROCE_CFG_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 reserved04;
+ u8 v;
+ #define CREQ_ROCE_CFG_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_ROCE_CFG_RESP_EVENT_ROCE_CFG 0xa7UL
+ #define CREQ_ROCE_CFG_RESP_EVENT_LAST CREQ_ROCE_CFG_RESP_EVENT_ROCE_CFG
+ __le16 reserved0A;
+ __le32 feat_cfg_cur;
+ #define CREQ_ROCE_CFG_RESP_ICRC_CHECK_DISABLED 0x1UL
+ #define CREQ_ROCE_CFG_RESP_FORCE_MIRROR_ENABLE 0x2UL
+ #define CREQ_ROCE_CFG_RESP_RSVD_MASK 0xfffffffcUL
+ #define CREQ_ROCE_CFG_RESP_RSVD_SFT 2
+};
+
+/* creq_func_event (size:128b/16B) */
+struct creq_func_event {
+ u8 type;
+ #define CREQ_FUNC_EVENT_TYPE_MASK 0x3fUL
+ #define CREQ_FUNC_EVENT_TYPE_SFT 0
+ #define CREQ_FUNC_EVENT_TYPE_FUNC_EVENT 0x3aUL
+ #define CREQ_FUNC_EVENT_TYPE_LAST CREQ_FUNC_EVENT_TYPE_FUNC_EVENT
+ u8 reserved56[7];
+ u8 v;
+ #define CREQ_FUNC_EVENT_V 0x1UL
+ u8 event;
+ #define CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR 0x1UL
+ #define CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR 0x2UL
+ #define CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR 0x3UL
+ #define CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR 0x4UL
+ #define CREQ_FUNC_EVENT_EVENT_CQ_ERROR 0x5UL
+ #define CREQ_FUNC_EVENT_EVENT_TQM_ERROR 0x6UL
+ #define CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR 0x7UL
+ #define CREQ_FUNC_EVENT_EVENT_CFCS_ERROR 0x8UL
+ #define CREQ_FUNC_EVENT_EVENT_CFCC_ERROR 0x9UL
+ #define CREQ_FUNC_EVENT_EVENT_CFCM_ERROR 0xaUL
+ #define CREQ_FUNC_EVENT_EVENT_TIM_ERROR 0xbUL
+ #define CREQ_FUNC_EVENT_EVENT_VF_COMM_REQUEST 0x80UL
+ #define CREQ_FUNC_EVENT_EVENT_RESOURCE_EXHAUSTED 0x81UL
+ #define CREQ_FUNC_EVENT_EVENT_LAST CREQ_FUNC_EVENT_EVENT_RESOURCE_EXHAUSTED
+ u8 reserved48[6];
+};
+
+/* creq_qp_event (size:128b/16B) */
+struct creq_qp_event {
+ u8 type;
+ #define CREQ_QP_EVENT_TYPE_MASK 0x3fUL
+ #define CREQ_QP_EVENT_TYPE_SFT 0
+ #define CREQ_QP_EVENT_TYPE_QP_EVENT 0x38UL
+ #define CREQ_QP_EVENT_TYPE_LAST CREQ_QP_EVENT_TYPE_QP_EVENT
+ u8 status;
+ #define CREQ_QP_EVENT_STATUS_SUCCESS 0x0UL
+ #define CREQ_QP_EVENT_STATUS_FAIL 0x1UL
+ #define CREQ_QP_EVENT_STATUS_RESOURCES 0x2UL
+ #define CREQ_QP_EVENT_STATUS_INVALID_CMD 0x3UL
+ #define CREQ_QP_EVENT_STATUS_NOT_IMPLEMENTED 0x4UL
+ #define CREQ_QP_EVENT_STATUS_INVALID_PARAMETER 0x5UL
+ #define CREQ_QP_EVENT_STATUS_HARDWARE_ERROR 0x6UL
+ #define CREQ_QP_EVENT_STATUS_INTERNAL_ERROR 0x7UL
+ #define CREQ_QP_EVENT_STATUS_LAST CREQ_QP_EVENT_STATUS_INTERNAL_ERROR
+ __le16 cookie;
+ __le32 reserved32;
+ u8 v;
+ #define CREQ_QP_EVENT_V 0x1UL
+ u8 event;
+ #define CREQ_QP_EVENT_EVENT_CREATE_QP 0x1UL
+ #define CREQ_QP_EVENT_EVENT_DESTROY_QP 0x2UL
+ #define CREQ_QP_EVENT_EVENT_MODIFY_QP 0x3UL
+ #define CREQ_QP_EVENT_EVENT_QUERY_QP 0x4UL
+ #define CREQ_QP_EVENT_EVENT_CREATE_SRQ 0x5UL
+ #define CREQ_QP_EVENT_EVENT_DESTROY_SRQ 0x6UL
+ #define CREQ_QP_EVENT_EVENT_QUERY_SRQ 0x8UL
+ #define CREQ_QP_EVENT_EVENT_CREATE_CQ 0x9UL
+ #define CREQ_QP_EVENT_EVENT_DESTROY_CQ 0xaUL
+ #define CREQ_QP_EVENT_EVENT_RESIZE_CQ 0xcUL
+ #define CREQ_QP_EVENT_EVENT_ALLOCATE_MRW 0xdUL
+ #define CREQ_QP_EVENT_EVENT_DEALLOCATE_KEY 0xeUL
+ #define CREQ_QP_EVENT_EVENT_REGISTER_MR 0xfUL
+ #define CREQ_QP_EVENT_EVENT_DEREGISTER_MR 0x10UL
+ #define CREQ_QP_EVENT_EVENT_ADD_GID 0x11UL
+ #define CREQ_QP_EVENT_EVENT_DELETE_GID 0x12UL
+ #define CREQ_QP_EVENT_EVENT_MODIFY_GID 0x17UL
+ #define CREQ_QP_EVENT_EVENT_QUERY_GID 0x18UL
+ #define CREQ_QP_EVENT_EVENT_CREATE_QP1 0x13UL
+ #define CREQ_QP_EVENT_EVENT_DESTROY_QP1 0x14UL
+ #define CREQ_QP_EVENT_EVENT_CREATE_AH 0x15UL
+ #define CREQ_QP_EVENT_EVENT_DESTROY_AH 0x16UL
+ #define CREQ_QP_EVENT_EVENT_INITIALIZE_FW 0x80UL
+ #define CREQ_QP_EVENT_EVENT_DEINITIALIZE_FW 0x81UL
+ #define CREQ_QP_EVENT_EVENT_STOP_FUNC 0x82UL
+ #define CREQ_QP_EVENT_EVENT_QUERY_FUNC 0x83UL
+ #define CREQ_QP_EVENT_EVENT_SET_FUNC_RESOURCES 0x84UL
+ #define CREQ_QP_EVENT_EVENT_READ_CONTEXT 0x85UL
+ #define CREQ_QP_EVENT_EVENT_MAP_TC_TO_COS 0x8aUL
+ #define CREQ_QP_EVENT_EVENT_QUERY_VERSION 0x8bUL
+ #define CREQ_QP_EVENT_EVENT_MODIFY_CC 0x8cUL
+ #define CREQ_QP_EVENT_EVENT_QUERY_CC 0x8dUL
+ #define CREQ_QP_EVENT_EVENT_QUERY_ROCE_STATS 0x8eUL
+ #define CREQ_QP_EVENT_EVENT_SET_LINK_AGGR_MODE 0x8fUL
+ #define CREQ_QP_EVENT_EVENT_QUERY_QP_EXTEND 0x91UL
+ #define CREQ_QP_EVENT_EVENT_PNO_STATS_CONFIG 0x99UL
+ #define CREQ_QP_EVENT_EVENT_PNO_DEBUG_TUNNEL_CONFIG 0x9aUL
+ #define CREQ_QP_EVENT_EVENT_SET_PNO_FABRIC_NEXTHOP_MAC 0x9bUL
+ #define CREQ_QP_EVENT_EVENT_PNO_PATH_STRPATH_CONFIG 0x9cUL
+ #define CREQ_QP_EVENT_EVENT_PNO_PATH_QUERY 0x9dUL
+ #define CREQ_QP_EVENT_EVENT_PNO_PATH_ACCESS_CONTROL 0x9eUL
+ #define CREQ_QP_EVENT_EVENT_QUERY_PNO_FABRIC_NEXTHOP_IP 0x9fUL
+ #define CREQ_QP_EVENT_EVENT_PNO_PATH_PLANE_CONFIG 0xa0UL
+ #define CREQ_QP_EVENT_EVENT_PNO_TUNNEL_CLOSE 0xa1UL
+ #define CREQ_QP_EVENT_EVENT_ROCE_MIRROR_CFG 0xa6UL
+ #define CREQ_QP_EVENT_EVENT_ROCE_CFG 0xa7UL
+ #define CREQ_QP_EVENT_EVENT_PNO_EV_MONITORING_CONFIG 0xa8UL
+ #define CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION 0xc0UL
+ #define CREQ_QP_EVENT_EVENT_CQ_ERROR_NOTIFICATION 0xc1UL
+ #define CREQ_QP_EVENT_EVENT_LAST CREQ_QP_EVENT_EVENT_CQ_ERROR_NOTIFICATION
+ u8 reserved48[6];
+};
+
+/* creq_qp_error_notification (size:128b/16B) */
+struct creq_qp_error_notification {
+ u8 type;
+ #define CREQ_QP_ERROR_NOTIFICATION_TYPE_MASK 0x3fUL
+ #define CREQ_QP_ERROR_NOTIFICATION_TYPE_SFT 0
+ #define CREQ_QP_ERROR_NOTIFICATION_TYPE_QP_EVENT 0x38UL
+ #define CREQ_QP_ERROR_NOTIFICATION_TYPE_LAST CREQ_QP_ERROR_NOTIFICATION_TYPE_QP_EVENT
+ u8 status;
+ u8 req_slow_path_state;
+ u8 req_err_state_reason;
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_NO_ERROR 0x0UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_OPCODE_ERROR 0x1UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TIMEOUT_RETRY_LIMIT 0x2UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RNR_TIMEOUT_RETRY_LIMIT 0x3UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_1 0x4UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_2 0x5UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_3 0x6UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_4 0x7UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RX_MEMORY_ERROR 0x8UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TX_MEMORY_ERROR 0x9UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_READ_RESP_LENGTH 0xaUL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_READ_RESP 0xbUL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_BIND 0xcUL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_FAST_REG 0xdUL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_INVALIDATE 0xeUL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_CMP_ERROR 0xfUL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RETRAN_LOCAL_ERROR 0x10UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_WQE_FORMAT_ERROR 0x11UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ORRQ_FORMAT_ERROR 0x12UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_AVID_ERROR 0x13UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_AV_DOMAIN_ERROR 0x14UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_CQ_LOAD_ERROR 0x15UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_SERV_TYPE_ERROR 0x16UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_OP_ERROR 0x17UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TX_PCI_ERROR 0x18UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RX_PCI_ERROR 0x19UL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_PROD_WQE_MSMTCH_ERROR 0x1aUL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_PSN_RANGE_CHECK_ERROR 0x1bUL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RETX_SETUP_ERROR 0x1cUL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_SQ_OVERFLOW 0x1dUL
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_LAST CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_SQ_OVERFLOW
+ __le32 xid;
+ u8 v;
+ #define CREQ_QP_ERROR_NOTIFICATION_V 0x1UL
+ u8 event;
+ #define CREQ_QP_ERROR_NOTIFICATION_EVENT_QP_ERROR_NOTIFICATION 0xc0UL
+ #define CREQ_QP_ERROR_NOTIFICATION_EVENT_LAST CREQ_QP_ERROR_NOTIFICATION_EVENT_QP_ERROR_NOTIFICATION
+ u8 res_slow_path_state;
+ u8 res_err_state_reason;
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_NO_ERROR 0x0UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_EXCEED_MAX 0x1UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PAYLOAD_LENGTH_MISMATCH 0x2UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_EXCEEDS_WQE 0x3UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_OPCODE_ERROR 0x4UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PSN_SEQ_ERROR_RETRY_LIMIT 0x5UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_INVALID_R_KEY 0x6UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_DOMAIN_ERROR 0x7UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_NO_PERMISSION 0x8UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_RANGE_ERROR 0x9UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_INVALID_R_KEY 0xaUL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_DOMAIN_ERROR 0xbUL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_NO_PERMISSION 0xcUL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_RANGE_ERROR 0xdUL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_IRRQ_OFLOW 0xeUL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_UNSUPPORTED_OPCODE 0xfUL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_UNALIGN_ATOMIC 0x10UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_REM_INVALIDATE 0x11UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_MEMORY_ERROR 0x12UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_SRQ_ERROR 0x13UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_CMP_ERROR 0x14UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_INVALID_DUP_RKEY 0x15UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_WQE_FORMAT_ERROR 0x16UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_IRRQ_FORMAT_ERROR 0x17UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_CQ_LOAD_ERROR 0x18UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_SRQ_LOAD_ERROR 0x19UL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_PCI_ERROR 0x1bUL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_PCI_ERROR 0x1cUL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PSN_NOT_FOUND 0x1dUL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RQ_OVERFLOW 0x1eUL
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_LAST CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RQ_OVERFLOW
+ __le16 sq_cons_idx;
+ __le16 rq_cons_idx;
+};
+
+/* creq_cq_error_notification (size:128b/16B) */
+struct creq_cq_error_notification {
+ u8 type;
+ #define CREQ_CQ_ERROR_NOTIFICATION_TYPE_MASK 0x3fUL
+ #define CREQ_CQ_ERROR_NOTIFICATION_TYPE_SFT 0
+ #define CREQ_CQ_ERROR_NOTIFICATION_TYPE_CQ_EVENT 0x38UL
+ #define CREQ_CQ_ERROR_NOTIFICATION_TYPE_LAST CREQ_CQ_ERROR_NOTIFICATION_TYPE_CQ_EVENT
+ u8 status;
+ u8 cq_err_reason;
+ #define CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_INVALID_ERROR 0x1UL
+ #define CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_OVERFLOW_ERROR 0x2UL
+ #define CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_LOAD_ERROR 0x3UL
+ #define CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_INVALID_ERROR 0x4UL
+ #define CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_OVERFLOW_ERROR 0x5UL
+ #define CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_LOAD_ERROR 0x6UL
+ #define CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_LAST CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_LOAD_ERROR
+ u8 reserved8;
+ __le32 xid;
+ u8 v;
+ #define CREQ_CQ_ERROR_NOTIFICATION_V 0x1UL
+ u8 event;
+ #define CREQ_CQ_ERROR_NOTIFICATION_EVENT_CQ_ERROR_NOTIFICATION 0xc1UL
+ #define CREQ_CQ_ERROR_NOTIFICATION_EVENT_LAST CREQ_CQ_ERROR_NOTIFICATION_EVENT_CQ_ERROR_NOTIFICATION
+ u8 reserved48[6];
+};
+
+/* sq_base (size:64b/8B) */
+struct sq_base {
+ u8 wqe_type;
+ #define SQ_BASE_WQE_TYPE_SEND 0x0UL
+ #define SQ_BASE_WQE_TYPE_SEND_W_IMMEAD 0x1UL
+ #define SQ_BASE_WQE_TYPE_SEND_W_INVALID 0x2UL
+ #define SQ_BASE_WQE_TYPE_WRITE_WQE 0x4UL
+ #define SQ_BASE_WQE_TYPE_WRITE_W_IMMEAD 0x5UL
+ #define SQ_BASE_WQE_TYPE_READ_WQE 0x6UL
+ #define SQ_BASE_WQE_TYPE_ATOMIC_CS 0x8UL
+ #define SQ_BASE_WQE_TYPE_ATOMIC_FA 0xbUL
+ #define SQ_BASE_WQE_TYPE_LOCAL_INVALID 0xcUL
+ #define SQ_BASE_WQE_TYPE_FR_PMR 0xdUL
+ #define SQ_BASE_WQE_TYPE_BIND 0xeUL
+ #define SQ_BASE_WQE_TYPE_FR_PPMR 0xfUL
+ #define SQ_BASE_WQE_TYPE_SEND_V3 0x10UL
+ #define SQ_BASE_WQE_TYPE_SEND_W_IMMED_V3 0x11UL
+ #define SQ_BASE_WQE_TYPE_SEND_W_INVALID_V3 0x12UL
+ #define SQ_BASE_WQE_TYPE_UDSEND_V3 0x13UL
+ #define SQ_BASE_WQE_TYPE_UDSEND_W_IMMED_V3 0x14UL
+ #define SQ_BASE_WQE_TYPE_WRITE_WQE_V3 0x15UL
+ #define SQ_BASE_WQE_TYPE_WRITE_W_IMMED_V3 0x16UL
+ #define SQ_BASE_WQE_TYPE_READ_WQE_V3 0x17UL
+ #define SQ_BASE_WQE_TYPE_ATOMIC_CS_V3 0x18UL
+ #define SQ_BASE_WQE_TYPE_ATOMIC_FA_V3 0x19UL
+ #define SQ_BASE_WQE_TYPE_LOCAL_INVALID_V3 0x1aUL
+ #define SQ_BASE_WQE_TYPE_FR_PMR_V3 0x1bUL
+ #define SQ_BASE_WQE_TYPE_BIND_V3 0x1cUL
+ #define SQ_BASE_WQE_TYPE_RAWQP1SEND_V3 0x1dUL
+ #define SQ_BASE_WQE_TYPE_CHANGE_UDPSRCPORT_V3 0x1eUL
+ #define SQ_BASE_WQE_TYPE_LAST SQ_BASE_WQE_TYPE_CHANGE_UDPSRCPORT_V3
+ u8 unused_0[7];
+};
+
+/* sq_sge (size:128b/16B) */
+struct sq_sge {
+ __le64 va_or_pa;
+ __le32 l_key;
+ __le32 size;
+};
+
+/* sq_psn_search (size:64b/8B) */
+struct sq_psn_search {
+ __le32 opcode_start_psn;
+ #define SQ_PSN_SEARCH_START_PSN_MASK 0xffffffUL
+ #define SQ_PSN_SEARCH_START_PSN_SFT 0
+ #define SQ_PSN_SEARCH_OPCODE_MASK 0xff000000UL
+ #define SQ_PSN_SEARCH_OPCODE_SFT 24
+ __le32 flags_next_psn;
+ #define SQ_PSN_SEARCH_NEXT_PSN_MASK 0xffffffUL
+ #define SQ_PSN_SEARCH_NEXT_PSN_SFT 0
+ #define SQ_PSN_SEARCH_FLAGS_MASK 0xff000000UL
+ #define SQ_PSN_SEARCH_FLAGS_SFT 24
+};
+
+/* sq_psn_search_ext (size:128b/16B) */
+struct sq_psn_search_ext {
+ __le32 opcode_start_psn;
+ #define SQ_PSN_SEARCH_EXT_START_PSN_MASK 0xffffffUL
+ #define SQ_PSN_SEARCH_EXT_START_PSN_SFT 0
+ #define SQ_PSN_SEARCH_EXT_OPCODE_MASK 0xff000000UL
+ #define SQ_PSN_SEARCH_EXT_OPCODE_SFT 24
+ __le32 flags_next_psn;
+ #define SQ_PSN_SEARCH_EXT_NEXT_PSN_MASK 0xffffffUL
+ #define SQ_PSN_SEARCH_EXT_NEXT_PSN_SFT 0
+ #define SQ_PSN_SEARCH_EXT_FLAGS_MASK 0xff000000UL
+ #define SQ_PSN_SEARCH_EXT_FLAGS_SFT 24
+ __le16 start_slot_idx;
+ __le16 reserved16;
+ __le32 reserved32;
+};
+
+/* sq_msn_search (size:64b/8B) */
+struct sq_msn_search {
+ __le64 start_idx_next_psn_start_psn;
+ #define SQ_MSN_SEARCH_START_PSN_MASK 0xffffffUL
+ #define SQ_MSN_SEARCH_START_PSN_SFT 0
+ #define SQ_MSN_SEARCH_NEXT_PSN_MASK 0xffffff000000ULL
+ #define SQ_MSN_SEARCH_NEXT_PSN_SFT 24
+ #define SQ_MSN_SEARCH_START_IDX_MASK 0xffff000000000000ULL
+ #define SQ_MSN_SEARCH_START_IDX_SFT 48
+};
+
+/* sq_send (size:1024b/128B) */
+struct sq_send {
+ u8 wqe_type;
+ #define SQ_SEND_WQE_TYPE_SEND 0x0UL
+ #define SQ_SEND_WQE_TYPE_SEND_W_IMMEAD 0x1UL
+ #define SQ_SEND_WQE_TYPE_SEND_W_INVALID 0x2UL
+ #define SQ_SEND_WQE_TYPE_LAST SQ_SEND_WQE_TYPE_SEND_W_INVALID
+ u8 flags;
+ #define SQ_SEND_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL
+ #define SQ_SEND_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0
+ #define SQ_SEND_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_SEND_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_SEND_FLAGS_UC_FENCE 0x4UL
+ #define SQ_SEND_FLAGS_SE 0x8UL
+ #define SQ_SEND_FLAGS_INLINE 0x10UL
+ #define SQ_SEND_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_SEND_FLAGS_DEBUG_TRACE 0x40UL
+ u8 wqe_size;
+ u8 reserved8_1;
+ __le32 inv_key_or_imm_data;
+ __le32 length;
+ __le32 q_key;
+ __le32 dst_qp;
+ #define SQ_SEND_DST_QP_MASK 0xffffffUL
+ #define SQ_SEND_DST_QP_SFT 0
+ __le32 avid;
+ #define SQ_SEND_AVID_MASK 0xfffffUL
+ #define SQ_SEND_AVID_SFT 0
+ __le32 reserved32;
+ __le32 timestamp;
+ #define SQ_SEND_TIMESTAMP_MASK 0xffffffUL
+ #define SQ_SEND_TIMESTAMP_SFT 0
+ __le32 data[24];
+};
+
+/* sq_send_hdr (size:256b/32B) */
+struct sq_send_hdr {
+ u8 wqe_type;
+ #define SQ_SEND_HDR_WQE_TYPE_SEND 0x0UL
+ #define SQ_SEND_HDR_WQE_TYPE_SEND_W_IMMEAD 0x1UL
+ #define SQ_SEND_HDR_WQE_TYPE_SEND_W_INVALID 0x2UL
+ #define SQ_SEND_HDR_WQE_TYPE_LAST SQ_SEND_HDR_WQE_TYPE_SEND_W_INVALID
+ u8 flags;
+ #define SQ_SEND_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL
+ #define SQ_SEND_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0
+ #define SQ_SEND_HDR_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_SEND_HDR_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_SEND_HDR_FLAGS_UC_FENCE 0x4UL
+ #define SQ_SEND_HDR_FLAGS_SE 0x8UL
+ #define SQ_SEND_HDR_FLAGS_INLINE 0x10UL
+ #define SQ_SEND_HDR_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_SEND_HDR_FLAGS_DEBUG_TRACE 0x40UL
+ u8 wqe_size;
+ u8 reserved8_1;
+ __le32 inv_key_or_imm_data;
+ __le32 length;
+ __le32 q_key;
+ __le32 dst_qp;
+ #define SQ_SEND_HDR_DST_QP_MASK 0xffffffUL
+ #define SQ_SEND_HDR_DST_QP_SFT 0
+ __le32 avid;
+ #define SQ_SEND_HDR_AVID_MASK 0xfffffUL
+ #define SQ_SEND_HDR_AVID_SFT 0
+ __le32 reserved32;
+ __le32 timestamp;
+ #define SQ_SEND_HDR_TIMESTAMP_MASK 0xffffffUL
+ #define SQ_SEND_HDR_TIMESTAMP_SFT 0
+};
+
+/* sq_send_raweth_qp1 (size:1024b/128B) */
+struct sq_send_raweth_qp1 {
+ u8 wqe_type;
+ #define SQ_SEND_RAWETH_QP1_WQE_TYPE_SEND 0x0UL
+ #define SQ_SEND_RAWETH_QP1_WQE_TYPE_LAST SQ_SEND_RAWETH_QP1_WQE_TYPE_SEND
+ u8 flags;
+ #define SQ_SEND_RAWETH_QP1_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL
+ #define SQ_SEND_RAWETH_QP1_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0
+ #define SQ_SEND_RAWETH_QP1_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_SEND_RAWETH_QP1_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_SEND_RAWETH_QP1_FLAGS_UC_FENCE 0x4UL
+ #define SQ_SEND_RAWETH_QP1_FLAGS_SE 0x8UL
+ #define SQ_SEND_RAWETH_QP1_FLAGS_INLINE 0x10UL
+ #define SQ_SEND_RAWETH_QP1_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_SEND_RAWETH_QP1_FLAGS_DEBUG_TRACE 0x40UL
+ u8 wqe_size;
+ u8 reserved8;
+ __le16 lflags;
+ #define SQ_SEND_RAWETH_QP1_LFLAGS_TCP_UDP_CHKSUM 0x1UL
+ #define SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM 0x2UL
+ #define SQ_SEND_RAWETH_QP1_LFLAGS_NOCRC 0x4UL
+ #define SQ_SEND_RAWETH_QP1_LFLAGS_STAMP 0x8UL
+ #define SQ_SEND_RAWETH_QP1_LFLAGS_T_IP_CHKSUM 0x10UL
+ #define SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC 0x100UL
+ #define SQ_SEND_RAWETH_QP1_LFLAGS_FCOE_CRC 0x200UL
+ __le16 cfa_action;
+ __le32 length;
+ __le32 reserved32_1;
+ __le32 cfa_meta;
+ #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK 0xfffUL
+ #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT 0
+ #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_DE 0x1000UL
+ #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_PRI_MASK 0xe000UL
+ #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_PRI_SFT 13
+ #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_MASK 0x70000UL
+ #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_SFT 16
+ #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_TPID88A8 (0x0UL << 16)
+ #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_TPID8100 (0x1UL << 16)
+ #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_TPID9100 (0x2UL << 16)
+ #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_TPID9200 (0x3UL << 16)
+ #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_TPID9300 (0x4UL << 16)
+ #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_TPIDCFG (0x5UL << 16)
+ #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_LAST SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_TPIDCFG
+ #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_RESERVED_MASK 0xff80000UL
+ #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_RESERVED_SFT 19
+ #define SQ_SEND_RAWETH_QP1_CFA_META_KEY_MASK 0xf0000000UL
+ #define SQ_SEND_RAWETH_QP1_CFA_META_KEY_SFT 28
+ #define SQ_SEND_RAWETH_QP1_CFA_META_KEY_NONE (0x0UL << 28)
+ #define SQ_SEND_RAWETH_QP1_CFA_META_KEY_VLAN_TAG (0x1UL << 28)
+ #define SQ_SEND_RAWETH_QP1_CFA_META_KEY_LAST SQ_SEND_RAWETH_QP1_CFA_META_KEY_VLAN_TAG
+ __le32 reserved32_2;
+ __le32 reserved32_3;
+ __le32 timestamp;
+ #define SQ_SEND_RAWETH_QP1_TIMESTAMP_MASK 0xffffffUL
+ #define SQ_SEND_RAWETH_QP1_TIMESTAMP_SFT 0
+ __le32 data[24];
+};
+
+/* sq_send_raweth_qp1_hdr (size:256b/32B) */
+struct sq_send_raweth_qp1_hdr {
+ u8 wqe_type;
+ #define SQ_SEND_RAWETH_QP1_HDR_WQE_TYPE_SEND 0x0UL
+ #define SQ_SEND_RAWETH_QP1_HDR_WQE_TYPE_LAST SQ_SEND_RAWETH_QP1_HDR_WQE_TYPE_SEND
+ u8 flags;
+ #define SQ_SEND_RAWETH_QP1_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL
+ #define SQ_SEND_RAWETH_QP1_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0
+ #define SQ_SEND_RAWETH_QP1_HDR_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_SEND_RAWETH_QP1_HDR_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_SEND_RAWETH_QP1_HDR_FLAGS_UC_FENCE 0x4UL
+ #define SQ_SEND_RAWETH_QP1_HDR_FLAGS_SE 0x8UL
+ #define SQ_SEND_RAWETH_QP1_HDR_FLAGS_INLINE 0x10UL
+ #define SQ_SEND_RAWETH_QP1_HDR_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_SEND_RAWETH_QP1_HDR_FLAGS_DEBUG_TRACE 0x40UL
+ u8 wqe_size;
+ u8 reserved8;
+ __le16 lflags;
+ #define SQ_SEND_RAWETH_QP1_HDR_LFLAGS_TCP_UDP_CHKSUM 0x1UL
+ #define SQ_SEND_RAWETH_QP1_HDR_LFLAGS_IP_CHKSUM 0x2UL
+ #define SQ_SEND_RAWETH_QP1_HDR_LFLAGS_NOCRC 0x4UL
+ #define SQ_SEND_RAWETH_QP1_HDR_LFLAGS_STAMP 0x8UL
+ #define SQ_SEND_RAWETH_QP1_HDR_LFLAGS_T_IP_CHKSUM 0x10UL
+ #define SQ_SEND_RAWETH_QP1_HDR_LFLAGS_ROCE_CRC 0x100UL
+ #define SQ_SEND_RAWETH_QP1_HDR_LFLAGS_FCOE_CRC 0x200UL
+ __le16 cfa_action;
+ __le32 length;
+ __le32 reserved32_1;
+ __le32 cfa_meta;
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_VID_MASK 0xfffUL
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_VID_SFT 0
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_DE 0x1000UL
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_PRI_MASK 0xe000UL
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_PRI_SFT 13
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_MASK 0x70000UL
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_SFT 16
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_TPID88A8 (0x0UL << 16)
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_TPID8100 (0x1UL << 16)
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_TPID9100 (0x2UL << 16)
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_TPID9200 (0x3UL << 16)
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_TPID9300 (0x4UL << 16)
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_TPIDCFG (0x5UL << 16)
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_LAST SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_TPIDCFG
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_RESERVED_MASK 0xff80000UL
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_RESERVED_SFT 19
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_KEY_MASK 0xf0000000UL
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_KEY_SFT 28
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_KEY_NONE (0x0UL << 28)
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_KEY_VLAN_TAG (0x1UL << 28)
+ #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_KEY_LAST SQ_SEND_RAWETH_QP1_HDR_CFA_META_KEY_VLAN_TAG
+ __le32 reserved32_2;
+ __le32 reserved32_3;
+ __le32 timestamp;
+ #define SQ_SEND_RAWETH_QP1_HDR_TIMESTAMP_MASK 0xffffffUL
+ #define SQ_SEND_RAWETH_QP1_HDR_TIMESTAMP_SFT 0
+};
+
+/* sq_rdma (size:1024b/128B) */
+struct sq_rdma {
+ u8 wqe_type;
+ #define SQ_RDMA_WQE_TYPE_WRITE_WQE 0x4UL
+ #define SQ_RDMA_WQE_TYPE_WRITE_W_IMMEAD 0x5UL
+ #define SQ_RDMA_WQE_TYPE_READ_WQE 0x6UL
+ #define SQ_RDMA_WQE_TYPE_LAST SQ_RDMA_WQE_TYPE_READ_WQE
+ u8 flags;
+ #define SQ_RDMA_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL
+ #define SQ_RDMA_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0
+ #define SQ_RDMA_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_RDMA_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_RDMA_FLAGS_UC_FENCE 0x4UL
+ #define SQ_RDMA_FLAGS_SE 0x8UL
+ #define SQ_RDMA_FLAGS_INLINE 0x10UL
+ #define SQ_RDMA_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_RDMA_FLAGS_DEBUG_TRACE 0x40UL
+ u8 wqe_size;
+ u8 reserved8;
+ __le32 imm_data;
+ __le32 length;
+ __le32 reserved32_1;
+ __le64 remote_va;
+ __le32 remote_key;
+ __le32 timestamp;
+ #define SQ_RDMA_TIMESTAMP_MASK 0xffffffUL
+ #define SQ_RDMA_TIMESTAMP_SFT 0
+ __le32 data[24];
+};
+
+/* sq_rdma_hdr (size:256b/32B) */
+struct sq_rdma_hdr {
+ u8 wqe_type;
+ #define SQ_RDMA_HDR_WQE_TYPE_WRITE_WQE 0x4UL
+ #define SQ_RDMA_HDR_WQE_TYPE_WRITE_W_IMMEAD 0x5UL
+ #define SQ_RDMA_HDR_WQE_TYPE_READ_WQE 0x6UL
+ #define SQ_RDMA_HDR_WQE_TYPE_LAST SQ_RDMA_HDR_WQE_TYPE_READ_WQE
+ u8 flags;
+ #define SQ_RDMA_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL
+ #define SQ_RDMA_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0
+ #define SQ_RDMA_HDR_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_RDMA_HDR_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_RDMA_HDR_FLAGS_UC_FENCE 0x4UL
+ #define SQ_RDMA_HDR_FLAGS_SE 0x8UL
+ #define SQ_RDMA_HDR_FLAGS_INLINE 0x10UL
+ #define SQ_RDMA_HDR_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_RDMA_HDR_FLAGS_DEBUG_TRACE 0x40UL
+ u8 wqe_size;
+ u8 reserved8;
+ __le32 imm_data;
+ __le32 length;
+ __le32 reserved32_1;
+ __le64 remote_va;
+ __le32 remote_key;
+ __le32 timestamp;
+ #define SQ_RDMA_HDR_TIMESTAMP_MASK 0xffffffUL
+ #define SQ_RDMA_HDR_TIMESTAMP_SFT 0
+};
+
+/* sq_atomic (size:1024b/128B) */
+struct sq_atomic {
+ u8 wqe_type;
+ #define SQ_ATOMIC_WQE_TYPE_ATOMIC_CS 0x8UL
+ #define SQ_ATOMIC_WQE_TYPE_ATOMIC_FA 0xbUL
+ #define SQ_ATOMIC_WQE_TYPE_LAST SQ_ATOMIC_WQE_TYPE_ATOMIC_FA
+ u8 flags;
+ #define SQ_ATOMIC_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL
+ #define SQ_ATOMIC_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0
+ #define SQ_ATOMIC_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_ATOMIC_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_ATOMIC_FLAGS_UC_FENCE 0x4UL
+ #define SQ_ATOMIC_FLAGS_SE 0x8UL
+ #define SQ_ATOMIC_FLAGS_INLINE 0x10UL
+ #define SQ_ATOMIC_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_ATOMIC_FLAGS_DEBUG_TRACE 0x40UL
+ __le16 reserved16;
+ __le32 remote_key;
+ __le64 remote_va;
+ __le64 swap_data;
+ __le64 cmp_data;
+ __le32 data[24];
+};
+
+/* sq_atomic_hdr (size:256b/32B) */
+struct sq_atomic_hdr {
+ u8 wqe_type;
+ #define SQ_ATOMIC_HDR_WQE_TYPE_ATOMIC_CS 0x8UL
+ #define SQ_ATOMIC_HDR_WQE_TYPE_ATOMIC_FA 0xbUL
+ #define SQ_ATOMIC_HDR_WQE_TYPE_LAST SQ_ATOMIC_HDR_WQE_TYPE_ATOMIC_FA
+ u8 flags;
+ #define SQ_ATOMIC_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL
+ #define SQ_ATOMIC_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0
+ #define SQ_ATOMIC_HDR_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_ATOMIC_HDR_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_ATOMIC_HDR_FLAGS_UC_FENCE 0x4UL
+ #define SQ_ATOMIC_HDR_FLAGS_SE 0x8UL
+ #define SQ_ATOMIC_HDR_FLAGS_INLINE 0x10UL
+ #define SQ_ATOMIC_HDR_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_ATOMIC_HDR_FLAGS_DEBUG_TRACE 0x40UL
+ __le16 reserved16;
+ __le32 remote_key;
+ __le64 remote_va;
+ __le64 swap_data;
+ __le64 cmp_data;
+};
+
+/* sq_localinvalidate (size:1024b/128B) */
+struct sq_localinvalidate {
+ u8 wqe_type;
+ #define SQ_LOCALINVALIDATE_WQE_TYPE_LOCAL_INVALID 0xcUL
+ #define SQ_LOCALINVALIDATE_WQE_TYPE_LAST SQ_LOCALINVALIDATE_WQE_TYPE_LOCAL_INVALID
+ u8 flags;
+ #define SQ_LOCALINVALIDATE_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL
+ #define SQ_LOCALINVALIDATE_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0
+ #define SQ_LOCALINVALIDATE_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_LOCALINVALIDATE_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_LOCALINVALIDATE_FLAGS_UC_FENCE 0x4UL
+ #define SQ_LOCALINVALIDATE_FLAGS_SE 0x8UL
+ #define SQ_LOCALINVALIDATE_FLAGS_INLINE 0x10UL
+ #define SQ_LOCALINVALIDATE_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_LOCALINVALIDATE_FLAGS_DEBUG_TRACE 0x40UL
+ __le16 reserved16;
+ __le32 inv_l_key;
+ __le64 reserved64;
+ u8 reserved128[16];
+ __le32 data[24];
+};
+
+/* sq_localinvalidate_hdr (size:256b/32B) */
+struct sq_localinvalidate_hdr {
+ u8 wqe_type;
+ #define SQ_LOCALINVALIDATE_HDR_WQE_TYPE_LOCAL_INVALID 0xcUL
+ #define SQ_LOCALINVALIDATE_HDR_WQE_TYPE_LAST SQ_LOCALINVALIDATE_HDR_WQE_TYPE_LOCAL_INVALID
+ u8 flags;
+ #define SQ_LOCALINVALIDATE_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL
+ #define SQ_LOCALINVALIDATE_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0
+ #define SQ_LOCALINVALIDATE_HDR_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_LOCALINVALIDATE_HDR_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_LOCALINVALIDATE_HDR_FLAGS_UC_FENCE 0x4UL
+ #define SQ_LOCALINVALIDATE_HDR_FLAGS_SE 0x8UL
+ #define SQ_LOCALINVALIDATE_HDR_FLAGS_INLINE 0x10UL
+ #define SQ_LOCALINVALIDATE_HDR_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_LOCALINVALIDATE_HDR_FLAGS_DEBUG_TRACE 0x40UL
+ __le16 reserved16;
+ __le32 inv_l_key;
+ __le64 reserved64;
+ u8 reserved128[16];
+};
+
+/* sq_fr_pmr (size:1024b/128B) */
+struct sq_fr_pmr {
+ u8 wqe_type;
+ #define SQ_FR_PMR_WQE_TYPE_FR_PMR 0xdUL
+ #define SQ_FR_PMR_WQE_TYPE_LAST SQ_FR_PMR_WQE_TYPE_FR_PMR
+ u8 flags;
+ #define SQ_FR_PMR_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_FR_PMR_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_FR_PMR_FLAGS_UC_FENCE 0x4UL
+ #define SQ_FR_PMR_FLAGS_SE 0x8UL
+ #define SQ_FR_PMR_FLAGS_INLINE 0x10UL
+ #define SQ_FR_PMR_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_FR_PMR_FLAGS_DEBUG_TRACE 0x40UL
+ u8 access_cntl;
+ #define SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE 0x1UL
+ #define SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ 0x2UL
+ #define SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE 0x4UL
+ #define SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC 0x8UL
+ #define SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND 0x10UL
+ u8 zero_based_page_size_log;
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_MASK 0x1fUL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_SFT 0
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_4K 0x0UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_8K 0x1UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_16K 0x2UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_32K 0x3UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_64K 0x4UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_128K 0x5UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_256K 0x6UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_512K 0x7UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_1M 0x8UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_2M 0x9UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_4M 0xaUL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_8M 0xbUL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_16M 0xcUL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_32M 0xdUL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_64M 0xeUL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_128M 0xfUL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_256M 0x10UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_512M 0x11UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_1G 0x12UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_2G 0x13UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_4G 0x14UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_8G 0x15UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_16G 0x16UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_32G 0x17UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_64G 0x18UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_128G 0x19UL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_256G 0x1aUL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_512G 0x1bUL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_1T 0x1cUL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_2T 0x1dUL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_4T 0x1eUL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_8T 0x1fUL
+ #define SQ_FR_PMR_PAGE_SIZE_LOG_LAST SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_8T
+ #define SQ_FR_PMR_ZERO_BASED 0x20UL
+ __le32 l_key;
+ u8 length[5];
+ u8 reserved8_1;
+ u8 reserved8_2;
+ u8 numlevels_pbl_page_size_log;
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK 0x1fUL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT 0
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_4K 0x0UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_8K 0x1UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_16K 0x2UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_32K 0x3UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_64K 0x4UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_128K 0x5UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_256K 0x6UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_512K 0x7UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_1M 0x8UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_2M 0x9UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_4M 0xaUL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_8M 0xbUL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_16M 0xcUL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_32M 0xdUL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_64M 0xeUL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_128M 0xfUL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_256M 0x10UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_512M 0x11UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_1G 0x12UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_2G 0x13UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_4G 0x14UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_8G 0x15UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_16G 0x16UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_32G 0x17UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_64G 0x18UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_128G 0x19UL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_256G 0x1aUL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_512G 0x1bUL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_1T 0x1cUL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_2T 0x1dUL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_4T 0x1eUL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_8T 0x1fUL
+ #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_LAST SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_8T
+ #define SQ_FR_PMR_NUMLEVELS_MASK 0xc0UL
+ #define SQ_FR_PMR_NUMLEVELS_SFT 6
+ #define SQ_FR_PMR_NUMLEVELS_PHYSICAL (0x0UL << 6)
+ #define SQ_FR_PMR_NUMLEVELS_LAYER1 (0x1UL << 6)
+ #define SQ_FR_PMR_NUMLEVELS_LAYER2 (0x2UL << 6)
+ #define SQ_FR_PMR_NUMLEVELS_LAST SQ_FR_PMR_NUMLEVELS_LAYER2
+ __le64 pblptr;
+ __le64 va;
+ __le32 data[24];
+};
+
+/* sq_fr_pmr_hdr (size:256b/32B) */
+struct sq_fr_pmr_hdr {
+ u8 wqe_type;
+ #define SQ_FR_PMR_HDR_WQE_TYPE_FR_PMR 0xdUL
+ #define SQ_FR_PMR_HDR_WQE_TYPE_LAST SQ_FR_PMR_HDR_WQE_TYPE_FR_PMR
+ u8 flags;
+ #define SQ_FR_PMR_HDR_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_FR_PMR_HDR_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_FR_PMR_HDR_FLAGS_UC_FENCE 0x4UL
+ #define SQ_FR_PMR_HDR_FLAGS_SE 0x8UL
+ #define SQ_FR_PMR_HDR_FLAGS_INLINE 0x10UL
+ #define SQ_FR_PMR_HDR_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_FR_PMR_HDR_FLAGS_DEBUG_TRACE 0x40UL
+ u8 access_cntl;
+ #define SQ_FR_PMR_HDR_ACCESS_CNTL_LOCAL_WRITE 0x1UL
+ #define SQ_FR_PMR_HDR_ACCESS_CNTL_REMOTE_READ 0x2UL
+ #define SQ_FR_PMR_HDR_ACCESS_CNTL_REMOTE_WRITE 0x4UL
+ #define SQ_FR_PMR_HDR_ACCESS_CNTL_REMOTE_ATOMIC 0x8UL
+ #define SQ_FR_PMR_HDR_ACCESS_CNTL_WINDOW_BIND 0x10UL
+ u8 zero_based_page_size_log;
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_MASK 0x1fUL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_SFT 0
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_4K 0x0UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_8K 0x1UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_16K 0x2UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_32K 0x3UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_64K 0x4UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_128K 0x5UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_256K 0x6UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_512K 0x7UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_1M 0x8UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_2M 0x9UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_4M 0xaUL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_8M 0xbUL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_16M 0xcUL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_32M 0xdUL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_64M 0xeUL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_128M 0xfUL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_256M 0x10UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_512M 0x11UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_1G 0x12UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_2G 0x13UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_4G 0x14UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_8G 0x15UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_16G 0x16UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_32G 0x17UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_64G 0x18UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_128G 0x19UL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_256G 0x1aUL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_512G 0x1bUL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_1T 0x1cUL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_2T 0x1dUL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_4T 0x1eUL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_8T 0x1fUL
+ #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_LAST SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_8T
+ #define SQ_FR_PMR_HDR_ZERO_BASED 0x20UL
+ __le32 l_key;
+ u8 length[5];
+ u8 reserved8_1;
+ u8 reserved8_2;
+ u8 numlevels_pbl_page_size_log;
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_MASK 0x1fUL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_SFT 0
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_4K 0x0UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_8K 0x1UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_16K 0x2UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_32K 0x3UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_64K 0x4UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_128K 0x5UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_256K 0x6UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_512K 0x7UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_1M 0x8UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_2M 0x9UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_4M 0xaUL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_8M 0xbUL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_16M 0xcUL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_32M 0xdUL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_64M 0xeUL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_128M 0xfUL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_256M 0x10UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_512M 0x11UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_1G 0x12UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_2G 0x13UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_4G 0x14UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_8G 0x15UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_16G 0x16UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_32G 0x17UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_64G 0x18UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_128G 0x19UL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_256G 0x1aUL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_512G 0x1bUL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_1T 0x1cUL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_2T 0x1dUL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_4T 0x1eUL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_8T 0x1fUL
+ #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_LAST SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_8T
+ #define SQ_FR_PMR_HDR_NUMLEVELS_MASK 0xc0UL
+ #define SQ_FR_PMR_HDR_NUMLEVELS_SFT 6
+ #define SQ_FR_PMR_HDR_NUMLEVELS_PHYSICAL (0x0UL << 6)
+ #define SQ_FR_PMR_HDR_NUMLEVELS_LAYER1 (0x1UL << 6)
+ #define SQ_FR_PMR_HDR_NUMLEVELS_LAYER2 (0x2UL << 6)
+ #define SQ_FR_PMR_HDR_NUMLEVELS_LAST SQ_FR_PMR_HDR_NUMLEVELS_LAYER2
+ __le64 pblptr;
+ __le64 va;
+};
+
+/* sq_bind (size:1024b/128B) */
+struct sq_bind {
+ u8 wqe_type;
+ #define SQ_BIND_WQE_TYPE_BIND 0xeUL
+ #define SQ_BIND_WQE_TYPE_LAST SQ_BIND_WQE_TYPE_BIND
+ u8 flags;
+ #define SQ_BIND_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL
+ #define SQ_BIND_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0
+ #define SQ_BIND_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_BIND_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_BIND_FLAGS_UC_FENCE 0x4UL
+ #define SQ_BIND_FLAGS_SE 0x8UL
+ #define SQ_BIND_FLAGS_INLINE 0x10UL
+ #define SQ_BIND_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_BIND_FLAGS_DEBUG_TRACE 0x40UL
+ u8 access_cntl;
+ #define SQ_BIND_ACCESS_CNTL_WINDOW_BIND_REMOTE_ATOMIC_REMOTE_WRITE_REMOTE_READ_LOCAL_WRITE_MASK 0xffUL
+ #define SQ_BIND_ACCESS_CNTL_WINDOW_BIND_REMOTE_ATOMIC_REMOTE_WRITE_REMOTE_READ_LOCAL_WRITE_SFT 0
+ #define SQ_BIND_ACCESS_CNTL_LOCAL_WRITE 0x1UL
+ #define SQ_BIND_ACCESS_CNTL_REMOTE_READ 0x2UL
+ #define SQ_BIND_ACCESS_CNTL_REMOTE_WRITE 0x4UL
+ #define SQ_BIND_ACCESS_CNTL_REMOTE_ATOMIC 0x8UL
+ #define SQ_BIND_ACCESS_CNTL_WINDOW_BIND 0x10UL
+ u8 reserved8_1;
+ u8 mw_type_zero_based;
+ #define SQ_BIND_ZERO_BASED 0x1UL
+ #define SQ_BIND_MW_TYPE 0x2UL
+ #define SQ_BIND_MW_TYPE_TYPE1 (0x0UL << 1)
+ #define SQ_BIND_MW_TYPE_TYPE2 (0x1UL << 1)
+ #define SQ_BIND_MW_TYPE_LAST SQ_BIND_MW_TYPE_TYPE2
+ u8 reserved8_2;
+ __le16 reserved16;
+ __le32 parent_l_key;
+ __le32 l_key;
+ __le64 va;
+ u8 length[5];
+ u8 reserved24[3];
+ __le32 data[24];
+};
+
+/* sq_bind_hdr (size:256b/32B) */
+struct sq_bind_hdr {
+ u8 wqe_type;
+ #define SQ_BIND_HDR_WQE_TYPE_BIND 0xeUL
+ #define SQ_BIND_HDR_WQE_TYPE_LAST SQ_BIND_HDR_WQE_TYPE_BIND
+ u8 flags;
+ #define SQ_BIND_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL
+ #define SQ_BIND_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0
+ #define SQ_BIND_HDR_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_BIND_HDR_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_BIND_HDR_FLAGS_UC_FENCE 0x4UL
+ #define SQ_BIND_HDR_FLAGS_SE 0x8UL
+ #define SQ_BIND_HDR_FLAGS_INLINE 0x10UL
+ #define SQ_BIND_HDR_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_BIND_HDR_FLAGS_DEBUG_TRACE 0x40UL
+ u8 access_cntl;
+ #define SQ_BIND_HDR_ACCESS_CNTL_WINDOW_BIND_REMOTE_ATOMIC_REMOTE_WRITE_REMOTE_READ_LOCAL_WRITE_MASK 0xffUL
+ #define SQ_BIND_HDR_ACCESS_CNTL_WINDOW_BIND_REMOTE_ATOMIC_REMOTE_WRITE_REMOTE_READ_LOCAL_WRITE_SFT 0
+ #define SQ_BIND_HDR_ACCESS_CNTL_LOCAL_WRITE 0x1UL
+ #define SQ_BIND_HDR_ACCESS_CNTL_REMOTE_READ 0x2UL
+ #define SQ_BIND_HDR_ACCESS_CNTL_REMOTE_WRITE 0x4UL
+ #define SQ_BIND_HDR_ACCESS_CNTL_REMOTE_ATOMIC 0x8UL
+ #define SQ_BIND_HDR_ACCESS_CNTL_WINDOW_BIND 0x10UL
+ u8 reserved8_1;
+ u8 mw_type_zero_based;
+ #define SQ_BIND_HDR_ZERO_BASED 0x1UL
+ #define SQ_BIND_HDR_MW_TYPE 0x2UL
+ #define SQ_BIND_HDR_MW_TYPE_TYPE1 (0x0UL << 1)
+ #define SQ_BIND_HDR_MW_TYPE_TYPE2 (0x1UL << 1)
+ #define SQ_BIND_HDR_MW_TYPE_LAST SQ_BIND_HDR_MW_TYPE_TYPE2
+ u8 reserved8_2;
+ __le16 reserved16;
+ __le32 parent_l_key;
+ __le32 l_key;
+ __le64 va;
+ u8 length[5];
+ u8 reserved24[3];
+};
+
+/* sq_msn_search_v3 (size:128b/16B) */
+struct sq_msn_search_v3 {
+ __le64 idx_psn;
+ #define SQ_MSN_SEARCH_V3_START_PSN_MASK 0xffffffUL
+ #define SQ_MSN_SEARCH_V3_START_PSN_SFT 0
+ #define SQ_MSN_SEARCH_V3_NEXT_PSN_MASK 0xffffff000000ULL
+ #define SQ_MSN_SEARCH_V3_NEXT_PSN_SFT 24
+ #define SQ_MSN_SEARCH_V3_START_IDX_MASK 0xffff000000000000ULL
+ #define SQ_MSN_SEARCH_V3_START_IDX_SFT 48
+ __le32 wqe_opaque;
+ u8 wqe_size;
+ u8 signal;
+ #define SQ_MSN_SEARCH_V3_SGNLD 0x1UL
+ #define SQ_MSN_SEARCH_V3_PREV_SGNLD_LOCAL_MEM_WQE 0x2UL
+ __le16 reserved;
+};
+
+/* sq_send_v3 (size:1024b/128B) */
+struct sq_send_v3 {
+ u8 wqe_type;
+ #define SQ_SEND_V3_WQE_TYPE_SEND_V3 0x10UL
+ #define SQ_SEND_V3_WQE_TYPE_SEND_W_IMMED_V3 0x11UL
+ #define SQ_SEND_V3_WQE_TYPE_SEND_W_INVALID_V3 0x12UL
+ #define SQ_SEND_V3_WQE_TYPE_LAST SQ_SEND_V3_WQE_TYPE_SEND_W_INVALID_V3
+ u8 flags;
+ #define SQ_SEND_V3_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_SEND_V3_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_SEND_V3_FLAGS_UC_FENCE 0x4UL
+ #define SQ_SEND_V3_FLAGS_SE 0x8UL
+ #define SQ_SEND_V3_FLAGS_INLINE 0x10UL
+ #define SQ_SEND_V3_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_SEND_V3_FLAGS_DEBUG_TRACE 0x40UL
+ u8 wqe_size;
+ #define SQ_SEND_V3_WQE_SIZE_MASK 0x3fUL
+ #define SQ_SEND_V3_WQE_SIZE_SFT 0
+ u8 inline_length;
+ #define SQ_SEND_V3_INLINE_LENGTH_MASK 0xfUL
+ #define SQ_SEND_V3_INLINE_LENGTH_SFT 0
+ __le32 opaque;
+ __le32 inv_key_or_imm_data;
+ __le32 timestamp;
+ #define SQ_SEND_V3_TIMESTAMP_MASK 0xffffffUL
+ #define SQ_SEND_V3_TIMESTAMP_SFT 0
+ __le32 data[28];
+};
+
+/* sq_send_hdr_v3 (size:128b/16B) */
+struct sq_send_hdr_v3 {
+ u8 wqe_type;
+ #define SQ_SEND_HDR_V3_WQE_TYPE_SEND_V3 0x10UL
+ #define SQ_SEND_HDR_V3_WQE_TYPE_SEND_W_IMMED_V3 0x11UL
+ #define SQ_SEND_HDR_V3_WQE_TYPE_SEND_W_INVALID_V3 0x12UL
+ #define SQ_SEND_HDR_V3_WQE_TYPE_LAST SQ_SEND_HDR_V3_WQE_TYPE_SEND_W_INVALID_V3
+ u8 flags;
+ #define SQ_SEND_HDR_V3_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_SEND_HDR_V3_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_SEND_HDR_V3_FLAGS_UC_FENCE 0x4UL
+ #define SQ_SEND_HDR_V3_FLAGS_SE 0x8UL
+ #define SQ_SEND_HDR_V3_FLAGS_INLINE 0x10UL
+ #define SQ_SEND_HDR_V3_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_SEND_HDR_V3_FLAGS_DEBUG_TRACE 0x40UL
+ u8 wqe_size;
+ #define SQ_SEND_HDR_V3_WQE_SIZE_MASK 0x3fUL
+ #define SQ_SEND_HDR_V3_WQE_SIZE_SFT 0
+ u8 inline_length;
+ #define SQ_SEND_HDR_V3_INLINE_LENGTH_MASK 0xfUL
+ #define SQ_SEND_HDR_V3_INLINE_LENGTH_SFT 0
+ __le32 opaque;
+ __le32 inv_key_or_imm_data;
+ __le32 timestamp;
+ #define SQ_SEND_HDR_V3_TIMESTAMP_MASK 0xffffffUL
+ #define SQ_SEND_HDR_V3_TIMESTAMP_SFT 0
+};
+
+/* sq_rawqp1send_v3 (size:1024b/128B) */
+struct sq_rawqp1send_v3 {
+ u8 wqe_type;
+ #define SQ_RAWQP1SEND_V3_WQE_TYPE_RAWQP1SEND_V3 0x1dUL
+ #define SQ_RAWQP1SEND_V3_WQE_TYPE_LAST SQ_RAWQP1SEND_V3_WQE_TYPE_RAWQP1SEND_V3
+ u8 flags;
+ #define SQ_RAWQP1SEND_V3_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_RAWQP1SEND_V3_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_RAWQP1SEND_V3_FLAGS_UC_FENCE 0x4UL
+ #define SQ_RAWQP1SEND_V3_FLAGS_SE 0x8UL
+ #define SQ_RAWQP1SEND_V3_FLAGS_INLINE 0x10UL
+ #define SQ_RAWQP1SEND_V3_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_RAWQP1SEND_V3_FLAGS_DEBUG_TRACE 0x40UL
+ u8 wqe_size;
+ #define SQ_RAWQP1SEND_V3_WQE_SIZE_MASK 0x3fUL
+ #define SQ_RAWQP1SEND_V3_WQE_SIZE_SFT 0
+ u8 inline_length;
+ #define SQ_RAWQP1SEND_V3_INLINE_LENGTH_MASK 0xfUL
+ #define SQ_RAWQP1SEND_V3_INLINE_LENGTH_SFT 0
+ __le32 opaque;
+ __le16 lflags;
+ #define SQ_RAWQP1SEND_V3_LFLAGS_TCP_UDP_CHKSUM 0x1UL
+ #define SQ_RAWQP1SEND_V3_LFLAGS_IP_CHKSUM 0x2UL
+ #define SQ_RAWQP1SEND_V3_LFLAGS_NOCRC 0x4UL
+ #define SQ_RAWQP1SEND_V3_LFLAGS_T_IP_CHKSUM 0x10UL
+ #define SQ_RAWQP1SEND_V3_LFLAGS_OT_IP_CHKSUM 0x20UL
+ #define SQ_RAWQP1SEND_V3_LFLAGS_ROCE_CRC 0x100UL
+ #define SQ_RAWQP1SEND_V3_LFLAGS_FCOE_CRC 0x200UL
+ __le16 cfa_action;
+ __le16 cfa_action_high;
+ #define SQ_RAWQP1SEND_V3_CFA_ACTION_HIGH_MASK 0x3ffUL
+ #define SQ_RAWQP1SEND_V3_CFA_ACTION_HIGH_SFT 0
+ __le16 reserved_2;
+ __le32 cfa_meta;
+ #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_VID_MASK 0xfffUL
+ #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_VID_SFT 0
+ #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_DE 0x1000UL
+ #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_PRI_MASK 0xe000UL
+ #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_PRI_SFT 13
+ #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_TPID_MASK 0x70000UL
+ #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_TPID_SFT 16
+ #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_TPID_TPID88A8 (0x0UL << 16)
+ #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_TPID_TPID8100 (0x1UL << 16)
+ #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_TPID_TPID9100 (0x2UL << 16)
+ #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_TPID_TPID9200 (0x3UL << 16)
+ #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_TPID_TPID9300 (0x4UL << 16)
+ #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_TPID_TPIDCFG (0x5UL << 16)
+ #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_TPID_LAST SQ_RAWQP1SEND_V3_CFA_META_VLAN_TPID_TPIDCFG
+ #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_RESERVED_MASK 0xff80000UL
+ #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_RESERVED_SFT 19
+ #define SQ_RAWQP1SEND_V3_CFA_META_KEY_MASK 0xf0000000UL
+ #define SQ_RAWQP1SEND_V3_CFA_META_KEY_SFT 28
+ #define SQ_RAWQP1SEND_V3_CFA_META_KEY_NONE (0x0UL << 28)
+ #define SQ_RAWQP1SEND_V3_CFA_META_KEY_VLAN_TAG (0x1UL << 28)
+ #define SQ_RAWQP1SEND_V3_CFA_META_KEY_LAST SQ_RAWQP1SEND_V3_CFA_META_KEY_VLAN_TAG
+ __le32 timestamp;
+ #define SQ_RAWQP1SEND_V3_TIMESTAMP_MASK 0xffffffUL
+ #define SQ_RAWQP1SEND_V3_TIMESTAMP_SFT 0
+ __le64 reserved_3;
+ __le32 data[24];
+};
+
+/* sq_rawqp1send_hdr_v3 (size:256b/32B) */
+struct sq_rawqp1send_hdr_v3 {
+ u8 wqe_type;
+ #define SQ_RAWQP1SEND_HDR_V3_WQE_TYPE_RAWQP1SEND_V3 0x1dUL
+ #define SQ_RAWQP1SEND_HDR_V3_WQE_TYPE_LAST SQ_RAWQP1SEND_HDR_V3_WQE_TYPE_RAWQP1SEND_V3
+ u8 flags;
+ #define SQ_RAWQP1SEND_HDR_V3_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_RAWQP1SEND_HDR_V3_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_RAWQP1SEND_HDR_V3_FLAGS_UC_FENCE 0x4UL
+ #define SQ_RAWQP1SEND_HDR_V3_FLAGS_SE 0x8UL
+ #define SQ_RAWQP1SEND_HDR_V3_FLAGS_INLINE 0x10UL
+ #define SQ_RAWQP1SEND_HDR_V3_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_RAWQP1SEND_HDR_V3_FLAGS_DEBUG_TRACE 0x40UL
+ u8 wqe_size;
+ #define SQ_RAWQP1SEND_HDR_V3_WQE_SIZE_MASK 0x3fUL
+ #define SQ_RAWQP1SEND_HDR_V3_WQE_SIZE_SFT 0
+ u8 inline_length;
+ #define SQ_RAWQP1SEND_HDR_V3_INLINE_LENGTH_MASK 0xfUL
+ #define SQ_RAWQP1SEND_HDR_V3_INLINE_LENGTH_SFT 0
+ __le32 opaque;
+ __le16 lflags;
+ #define SQ_RAWQP1SEND_HDR_V3_LFLAGS_TCP_UDP_CHKSUM 0x1UL
+ #define SQ_RAWQP1SEND_HDR_V3_LFLAGS_IP_CHKSUM 0x2UL
+ #define SQ_RAWQP1SEND_HDR_V3_LFLAGS_NOCRC 0x4UL
+ #define SQ_RAWQP1SEND_HDR_V3_LFLAGS_T_IP_CHKSUM 0x10UL
+ #define SQ_RAWQP1SEND_HDR_V3_LFLAGS_OT_IP_CHKSUM 0x20UL
+ #define SQ_RAWQP1SEND_HDR_V3_LFLAGS_ROCE_CRC 0x100UL
+ #define SQ_RAWQP1SEND_HDR_V3_LFLAGS_FCOE_CRC 0x200UL
+ __le16 cfa_action;
+ __le16 cfa_action_high;
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_ACTION_HIGH_MASK 0x3ffUL
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_ACTION_HIGH_SFT 0
+ __le16 reserved_2;
+ __le32 cfa_meta;
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_VID_MASK 0xfffUL
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_VID_SFT 0
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_DE 0x1000UL
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_PRI_MASK 0xe000UL
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_PRI_SFT 13
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_TPID_MASK 0x70000UL
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_TPID_SFT 16
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_TPID_TPID88A8 (0x0UL << 16)
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_TPID_TPID8100 (0x1UL << 16)
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_TPID_TPID9100 (0x2UL << 16)
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_TPID_TPID9200 (0x3UL << 16)
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_TPID_TPID9300 (0x4UL << 16)
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_TPID_TPIDCFG (0x5UL << 16)
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_TPID_LAST SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_TPID_TPIDCFG
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_RESERVED_MASK 0xff80000UL
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_RESERVED_SFT 19
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_KEY_MASK 0xf0000000UL
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_KEY_SFT 28
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_KEY_NONE (0x0UL << 28)
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_KEY_VLAN_TAG (0x1UL << 28)
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_KEY_LAST SQ_RAWQP1SEND_HDR_V3_CFA_META_KEY_VLAN_TAG
+ __le32 timestamp;
+ #define SQ_RAWQP1SEND_HDR_V3_TIMESTAMP_MASK 0xffffffUL
+ #define SQ_RAWQP1SEND_HDR_V3_TIMESTAMP_SFT 0
+ __le64 reserved_3;
+};
+
+/* sq_udsend_v3 (size:1024b/128B) */
+struct sq_udsend_v3 {
+ u8 wqe_type;
+ #define SQ_UDSEND_V3_WQE_TYPE_UDSEND_V3 0x13UL
+ #define SQ_UDSEND_V3_WQE_TYPE_UDSEND_W_IMMED_V3 0x14UL
+ #define SQ_UDSEND_V3_WQE_TYPE_LAST SQ_UDSEND_V3_WQE_TYPE_UDSEND_W_IMMED_V3
+ u8 flags;
+ #define SQ_UDSEND_V3_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_UDSEND_V3_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_UDSEND_V3_FLAGS_UC_FENCE 0x4UL
+ #define SQ_UDSEND_V3_FLAGS_SE 0x8UL
+ #define SQ_UDSEND_V3_FLAGS_INLINE 0x10UL
+ #define SQ_UDSEND_V3_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_UDSEND_V3_FLAGS_DEBUG_TRACE 0x40UL
+ u8 wqe_size;
+ #define SQ_UDSEND_V3_WQE_SIZE_MASK 0x3fUL
+ #define SQ_UDSEND_V3_WQE_SIZE_SFT 0
+ u8 inline_length;
+ #define SQ_UDSEND_V3_INLINE_LENGTH_MASK 0xfUL
+ #define SQ_UDSEND_V3_INLINE_LENGTH_SFT 0
+ __le32 opaque;
+ __le32 imm_data;
+ __le32 q_key;
+ __le32 dst_qp;
+ #define SQ_UDSEND_V3_DST_QP_MASK 0xffffffUL
+ #define SQ_UDSEND_V3_DST_QP_SFT 0
+ __le32 avid;
+ #define SQ_UDSEND_V3_AVID_MASK 0xfffffUL
+ #define SQ_UDSEND_V3_AVID_SFT 0
+ __le32 reserved2;
+ __le32 timestamp;
+ #define SQ_UDSEND_V3_TIMESTAMP_MASK 0xffffffUL
+ #define SQ_UDSEND_V3_TIMESTAMP_SFT 0
+ __le32 data[24];
+};
+
+/* sq_udsend_hdr_v3 (size:256b/32B) */
+struct sq_udsend_hdr_v3 {
+ u8 wqe_type;
+ #define SQ_UDSEND_HDR_V3_WQE_TYPE_UDSEND_V3 0x13UL
+ #define SQ_UDSEND_HDR_V3_WQE_TYPE_UDSEND_W_IMMED_V3 0x14UL
+ #define SQ_UDSEND_HDR_V3_WQE_TYPE_LAST SQ_UDSEND_HDR_V3_WQE_TYPE_UDSEND_W_IMMED_V3
+ u8 flags;
+ #define SQ_UDSEND_HDR_V3_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_UDSEND_HDR_V3_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_UDSEND_HDR_V3_FLAGS_UC_FENCE 0x4UL
+ #define SQ_UDSEND_HDR_V3_FLAGS_SE 0x8UL
+ #define SQ_UDSEND_HDR_V3_FLAGS_INLINE 0x10UL
+ #define SQ_UDSEND_HDR_V3_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_UDSEND_HDR_V3_FLAGS_DEBUG_TRACE 0x40UL
+ u8 wqe_size;
+ #define SQ_UDSEND_HDR_V3_WQE_SIZE_MASK 0x3fUL
+ #define SQ_UDSEND_HDR_V3_WQE_SIZE_SFT 0
+ u8 inline_length;
+ #define SQ_UDSEND_HDR_V3_INLINE_LENGTH_MASK 0xfUL
+ #define SQ_UDSEND_HDR_V3_INLINE_LENGTH_SFT 0
+ __le32 opaque;
+ __le32 imm_data;
+ __le32 q_key;
+ __le32 dst_qp;
+ #define SQ_UDSEND_HDR_V3_DST_QP_MASK 0xffffffUL
+ #define SQ_UDSEND_HDR_V3_DST_QP_SFT 0
+ __le32 avid;
+ #define SQ_UDSEND_HDR_V3_AVID_MASK 0xfffffUL
+ #define SQ_UDSEND_HDR_V3_AVID_SFT 0
+ __le32 reserved2;
+ __le32 timestamp;
+ #define SQ_UDSEND_HDR_V3_TIMESTAMP_MASK 0xffffffUL
+ #define SQ_UDSEND_HDR_V3_TIMESTAMP_SFT 0
+};
+
+/* sq_rdma_v3 (size:1024b/128B) */
+struct sq_rdma_v3 {
+ u8 wqe_type;
+ #define SQ_RDMA_V3_WQE_TYPE_WRITE_WQE_V3 0x15UL
+ #define SQ_RDMA_V3_WQE_TYPE_WRITE_W_IMMED_V3 0x16UL
+ #define SQ_RDMA_V3_WQE_TYPE_READ_WQE_V3 0x17UL
+ #define SQ_RDMA_V3_WQE_TYPE_LAST SQ_RDMA_V3_WQE_TYPE_READ_WQE_V3
+ u8 flags;
+ #define SQ_RDMA_V3_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_RDMA_V3_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_RDMA_V3_FLAGS_UC_FENCE 0x4UL
+ #define SQ_RDMA_V3_FLAGS_SE 0x8UL
+ #define SQ_RDMA_V3_FLAGS_INLINE 0x10UL
+ #define SQ_RDMA_V3_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_RDMA_V3_FLAGS_DEBUG_TRACE 0x40UL
+ u8 wqe_size;
+ #define SQ_RDMA_V3_WQE_SIZE_MASK 0x3fUL
+ #define SQ_RDMA_V3_WQE_SIZE_SFT 0
+ u8 inline_length;
+ #define SQ_RDMA_V3_INLINE_LENGTH_MASK 0xfUL
+ #define SQ_RDMA_V3_INLINE_LENGTH_SFT 0
+ __le32 opaque;
+ __le32 imm_data;
+ __le32 reserved2;
+ __le64 remote_va;
+ __le32 remote_key;
+ __le32 timestamp;
+ #define SQ_RDMA_V3_TIMESTAMP_MASK 0xffffffUL
+ #define SQ_RDMA_V3_TIMESTAMP_SFT 0
+ __le32 data[24];
+};
+
+/* sq_rdma_hdr_v3 (size:256b/32B) */
+struct sq_rdma_hdr_v3 {
+ u8 wqe_type;
+ #define SQ_RDMA_HDR_V3_WQE_TYPE_WRITE_WQE_V3 0x15UL
+ #define SQ_RDMA_HDR_V3_WQE_TYPE_WRITE_W_IMMED_V3 0x16UL
+ #define SQ_RDMA_HDR_V3_WQE_TYPE_READ_WQE_V3 0x17UL
+ #define SQ_RDMA_HDR_V3_WQE_TYPE_LAST SQ_RDMA_HDR_V3_WQE_TYPE_READ_WQE_V3
+ u8 flags;
+ #define SQ_RDMA_HDR_V3_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_RDMA_HDR_V3_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_RDMA_HDR_V3_FLAGS_UC_FENCE 0x4UL
+ #define SQ_RDMA_HDR_V3_FLAGS_SE 0x8UL
+ #define SQ_RDMA_HDR_V3_FLAGS_INLINE 0x10UL
+ #define SQ_RDMA_HDR_V3_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_RDMA_HDR_V3_FLAGS_DEBUG_TRACE 0x40UL
+ u8 wqe_size;
+ #define SQ_RDMA_HDR_V3_WQE_SIZE_MASK 0x3fUL
+ #define SQ_RDMA_HDR_V3_WQE_SIZE_SFT 0
+ u8 inline_length;
+ #define SQ_RDMA_HDR_V3_INLINE_LENGTH_MASK 0xfUL
+ #define SQ_RDMA_HDR_V3_INLINE_LENGTH_SFT 0
+ __le32 opaque;
+ __le32 imm_data;
+ __le32 reserved2;
+ __le64 remote_va;
+ __le32 remote_key;
+ __le32 timestamp;
+ #define SQ_RDMA_HDR_V3_TIMESTAMP_MASK 0xffffffUL
+ #define SQ_RDMA_HDR_V3_TIMESTAMP_SFT 0
+};
+
+/* sq_atomic_v3 (size:448b/56B) */
+struct sq_atomic_v3 {
+ u8 wqe_type;
+ #define SQ_ATOMIC_V3_WQE_TYPE_ATOMIC_CS_V3 0x18UL
+ #define SQ_ATOMIC_V3_WQE_TYPE_ATOMIC_FA_V3 0x19UL
+ #define SQ_ATOMIC_V3_WQE_TYPE_LAST SQ_ATOMIC_V3_WQE_TYPE_ATOMIC_FA_V3
+ u8 flags;
+ #define SQ_ATOMIC_V3_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_ATOMIC_V3_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_ATOMIC_V3_FLAGS_UC_FENCE 0x4UL
+ #define SQ_ATOMIC_V3_FLAGS_SE 0x8UL
+ #define SQ_ATOMIC_V3_FLAGS_INLINE 0x10UL
+ #define SQ_ATOMIC_V3_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_ATOMIC_V3_FLAGS_DEBUG_TRACE 0x40UL
+ u8 wqe_size;
+ #define SQ_ATOMIC_V3_WQE_SIZE_MASK 0x3fUL
+ #define SQ_ATOMIC_V3_WQE_SIZE_SFT 0
+ u8 reserved1;
+ __le32 opaque;
+ __le32 remote_key;
+ __le32 reserved2;
+ __le64 remote_va;
+ __le64 swap_data;
+ __le64 cmp_data;
+ __le64 va_or_pa;
+ __le32 l_key;
+ __le32 size;
+};
+
+/* sq_atomic_hdr_v3 (size:320b/40B) */
+struct sq_atomic_hdr_v3 {
+ u8 wqe_type;
+ #define SQ_ATOMIC_HDR_V3_WQE_TYPE_ATOMIC_CS_V3 0x18UL
+ #define SQ_ATOMIC_HDR_V3_WQE_TYPE_ATOMIC_FA_V3 0x19UL
+ #define SQ_ATOMIC_HDR_V3_WQE_TYPE_LAST SQ_ATOMIC_HDR_V3_WQE_TYPE_ATOMIC_FA_V3
+ u8 flags;
+ #define SQ_ATOMIC_HDR_V3_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_ATOMIC_HDR_V3_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_ATOMIC_HDR_V3_FLAGS_UC_FENCE 0x4UL
+ #define SQ_ATOMIC_HDR_V3_FLAGS_SE 0x8UL
+ #define SQ_ATOMIC_HDR_V3_FLAGS_INLINE 0x10UL
+ #define SQ_ATOMIC_HDR_V3_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_ATOMIC_HDR_V3_FLAGS_DEBUG_TRACE 0x40UL
+ u8 wqe_size;
+ #define SQ_ATOMIC_HDR_V3_WQE_SIZE_MASK 0x3fUL
+ #define SQ_ATOMIC_HDR_V3_WQE_SIZE_SFT 0
+ u8 reserved1;
+ __le32 opaque;
+ __le32 remote_key;
+ __le32 reserved2;
+ __le64 remote_va;
+ __le64 swap_data;
+ __le64 cmp_data;
+};
+
+/* sq_localinvalidate_v3 (size:128b/16B) */
+struct sq_localinvalidate_v3 {
+ u8 wqe_type;
+ #define SQ_LOCALINVALIDATE_V3_WQE_TYPE_LOCAL_INVALID_V3 0x1aUL
+ #define SQ_LOCALINVALIDATE_V3_WQE_TYPE_LAST SQ_LOCALINVALIDATE_V3_WQE_TYPE_LOCAL_INVALID_V3
+ u8 flags;
+ #define SQ_LOCALINVALIDATE_V3_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_LOCALINVALIDATE_V3_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_LOCALINVALIDATE_V3_FLAGS_UC_FENCE 0x4UL
+ #define SQ_LOCALINVALIDATE_V3_FLAGS_SE 0x8UL
+ #define SQ_LOCALINVALIDATE_V3_FLAGS_INLINE 0x10UL
+ #define SQ_LOCALINVALIDATE_V3_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_LOCALINVALIDATE_V3_FLAGS_DEBUG_TRACE 0x40UL
+ u8 wqe_size;
+ #define SQ_LOCALINVALIDATE_V3_WQE_SIZE_MASK 0x3fUL
+ #define SQ_LOCALINVALIDATE_V3_WQE_SIZE_SFT 0
+ u8 reserved1;
+ __le32 opaque;
+ __le32 inv_l_key;
+ __le32 reserved2;
+};
+
+/* sq_localinvalidate_hdr_v3 (size:128b/16B) */
+struct sq_localinvalidate_hdr_v3 {
+ u8 wqe_type;
+ #define SQ_LOCALINVALIDATE_HDR_V3_WQE_TYPE_LOCAL_INVALID_V3 0x1aUL
+ #define SQ_LOCALINVALIDATE_HDR_V3_WQE_TYPE_LAST SQ_LOCALINVALIDATE_HDR_V3_WQE_TYPE_LOCAL_INVALID_V3
+ u8 flags;
+ #define SQ_LOCALINVALIDATE_HDR_V3_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_LOCALINVALIDATE_HDR_V3_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_LOCALINVALIDATE_HDR_V3_FLAGS_UC_FENCE 0x4UL
+ #define SQ_LOCALINVALIDATE_HDR_V3_FLAGS_SE 0x8UL
+ #define SQ_LOCALINVALIDATE_HDR_V3_FLAGS_INLINE 0x10UL
+ #define SQ_LOCALINVALIDATE_HDR_V3_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_LOCALINVALIDATE_HDR_V3_FLAGS_DEBUG_TRACE 0x40UL
+ u8 wqe_size;
+ #define SQ_LOCALINVALIDATE_HDR_V3_WQE_SIZE_MASK 0x3fUL
+ #define SQ_LOCALINVALIDATE_HDR_V3_WQE_SIZE_SFT 0
+ u8 reserved1;
+ __le32 opaque;
+ __le32 inv_l_key;
+ __le32 reserved2;
+};
+
+/* sq_fr_pmr_v3 (size:320b/40B) */
+struct sq_fr_pmr_v3 {
+ u8 wqe_type;
+ #define SQ_FR_PMR_V3_WQE_TYPE_FR_PMR_V3 0x1bUL
+ #define SQ_FR_PMR_V3_WQE_TYPE_LAST SQ_FR_PMR_V3_WQE_TYPE_FR_PMR_V3
+ u8 flags;
+ #define SQ_FR_PMR_V3_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_FR_PMR_V3_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_FR_PMR_V3_FLAGS_UC_FENCE 0x4UL
+ #define SQ_FR_PMR_V3_FLAGS_SE 0x8UL
+ #define SQ_FR_PMR_V3_FLAGS_INLINE 0x10UL
+ #define SQ_FR_PMR_V3_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_FR_PMR_V3_FLAGS_DEBUG_TRACE 0x40UL
+ u8 wqe_size_zero_based;
+ #define SQ_FR_PMR_V3_WQE_SIZE_MASK 0x3fUL
+ #define SQ_FR_PMR_V3_WQE_SIZE_SFT 0
+ #define SQ_FR_PMR_V3_ZERO_BASED 0x40UL
+ u8 access_cntl;
+ #define SQ_FR_PMR_V3_ACCESS_CNTL_LOCAL_WRITE 0x1UL
+ #define SQ_FR_PMR_V3_ACCESS_CNTL_REMOTE_READ 0x2UL
+ #define SQ_FR_PMR_V3_ACCESS_CNTL_REMOTE_WRITE 0x4UL
+ #define SQ_FR_PMR_V3_ACCESS_CNTL_REMOTE_ATOMIC 0x8UL
+ #define SQ_FR_PMR_V3_ACCESS_CNTL_WINDOW_BIND 0x10UL
+ __le32 opaque;
+ __le32 l_key;
+ __le16 page_size_log;
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_MASK 0x1fUL
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_SFT 0
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_4K 0x0UL
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_8K 0x1UL
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_16K 0x2UL
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_32K 0x3UL
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_64K 0x4UL
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_128K 0x5UL
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_256K 0x6UL
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_512K 0x7UL
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_1M 0x8UL
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_2M 0x9UL
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_4M 0xaUL
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_8M 0xbUL
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_16M 0xcUL
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_32M 0xdUL
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_64M 0xeUL
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_128M 0xfUL
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_256M 0x10UL
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_512M 0x11UL
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_1G 0x12UL
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_2G 0x13UL
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_4G 0x14UL
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_8G 0x15UL
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_16G 0x16UL
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_32G 0x17UL
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_64G 0x18UL
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_128G 0x19UL
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_256G 0x1aUL
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_512G 0x1bUL
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_1T 0x1cUL
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_2T 0x1dUL
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_4T 0x1eUL
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_8T 0x1fUL
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_LAST SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_8T
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_MASK 0x3e0UL
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_SFT 5
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_4K (0x0UL << 5)
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_8K (0x1UL << 5)
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_16K (0x2UL << 5)
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_32K (0x3UL << 5)
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_64K (0x4UL << 5)
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_128K (0x5UL << 5)
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_256K (0x6UL << 5)
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_512K (0x7UL << 5)
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_1M (0x8UL << 5)
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_2M (0x9UL << 5)
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_4M (0xaUL << 5)
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_8M (0xbUL << 5)
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_16M (0xcUL << 5)
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_32M (0xdUL << 5)
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_64M (0xeUL << 5)
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_128M (0xfUL << 5)
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_256M (0x10UL << 5)
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_512M (0x11UL << 5)
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_1G (0x12UL << 5)
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_2G (0x13UL << 5)
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_4G (0x14UL << 5)
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_8G (0x15UL << 5)
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_16G (0x16UL << 5)
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_32G (0x17UL << 5)
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_64G (0x18UL << 5)
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_128G (0x19UL << 5)
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_256G (0x1aUL << 5)
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_512G (0x1bUL << 5)
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_1T (0x1cUL << 5)
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_2T (0x1dUL << 5)
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_4T (0x1eUL << 5)
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_8T (0x1fUL << 5)
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_LAST SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_8T
+ #define SQ_FR_PMR_V3_NUMLEVELS_MASK 0xc00UL
+ #define SQ_FR_PMR_V3_NUMLEVELS_SFT 10
+ #define SQ_FR_PMR_V3_NUMLEVELS_PHYSICAL (0x0UL << 10)
+ #define SQ_FR_PMR_V3_NUMLEVELS_LAYER1 (0x1UL << 10)
+ #define SQ_FR_PMR_V3_NUMLEVELS_LAYER2 (0x2UL << 10)
+ #define SQ_FR_PMR_V3_NUMLEVELS_LAST SQ_FR_PMR_V3_NUMLEVELS_LAYER2
+ __le16 reserved;
+ __le64 va;
+ __le64 length;
+ __le64 pbl_ptr;
+};
+
+/* sq_fr_pmr_hdr_v3 (size:320b/40B) */
+struct sq_fr_pmr_hdr_v3 {
+ u8 wqe_type;
+ #define SQ_FR_PMR_HDR_V3_WQE_TYPE_FR_PMR_V3 0x1bUL
+ #define SQ_FR_PMR_HDR_V3_WQE_TYPE_LAST SQ_FR_PMR_HDR_V3_WQE_TYPE_FR_PMR_V3
+ u8 flags;
+ #define SQ_FR_PMR_HDR_V3_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_FR_PMR_HDR_V3_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_FR_PMR_HDR_V3_FLAGS_UC_FENCE 0x4UL
+ #define SQ_FR_PMR_HDR_V3_FLAGS_SE 0x8UL
+ #define SQ_FR_PMR_HDR_V3_FLAGS_INLINE 0x10UL
+ #define SQ_FR_PMR_HDR_V3_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_FR_PMR_HDR_V3_FLAGS_DEBUG_TRACE 0x40UL
+ u8 wqe_size_zero_based;
+ #define SQ_FR_PMR_HDR_V3_WQE_SIZE_MASK 0x3fUL
+ #define SQ_FR_PMR_HDR_V3_WQE_SIZE_SFT 0
+ #define SQ_FR_PMR_HDR_V3_ZERO_BASED 0x40UL
+ u8 access_cntl;
+ #define SQ_FR_PMR_HDR_V3_ACCESS_CNTL_LOCAL_WRITE 0x1UL
+ #define SQ_FR_PMR_HDR_V3_ACCESS_CNTL_REMOTE_READ 0x2UL
+ #define SQ_FR_PMR_HDR_V3_ACCESS_CNTL_REMOTE_WRITE 0x4UL
+ #define SQ_FR_PMR_HDR_V3_ACCESS_CNTL_REMOTE_ATOMIC 0x8UL
+ #define SQ_FR_PMR_HDR_V3_ACCESS_CNTL_WINDOW_BIND 0x10UL
+ __le32 opaque;
+ __le32 l_key;
+ __le16 page_size_log;
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_MASK 0x1fUL
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_SFT 0
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_4K 0x0UL
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_8K 0x1UL
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_16K 0x2UL
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_32K 0x3UL
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_64K 0x4UL
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_128K 0x5UL
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_256K 0x6UL
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_512K 0x7UL
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_1M 0x8UL
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_2M 0x9UL
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_4M 0xaUL
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_8M 0xbUL
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_16M 0xcUL
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_32M 0xdUL
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_64M 0xeUL
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_128M 0xfUL
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_256M 0x10UL
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_512M 0x11UL
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_1G 0x12UL
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_2G 0x13UL
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_4G 0x14UL
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_8G 0x15UL
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_16G 0x16UL
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_32G 0x17UL
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_64G 0x18UL
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_128G 0x19UL
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_256G 0x1aUL
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_512G 0x1bUL
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_1T 0x1cUL
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_2T 0x1dUL
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_4T 0x1eUL
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_8T 0x1fUL
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_LAST SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_8T
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_MASK 0x3e0UL
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_SFT 5
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_4K (0x0UL << 5)
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_8K (0x1UL << 5)
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_16K (0x2UL << 5)
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_32K (0x3UL << 5)
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_64K (0x4UL << 5)
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_128K (0x5UL << 5)
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_256K (0x6UL << 5)
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_512K (0x7UL << 5)
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_1M (0x8UL << 5)
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_2M (0x9UL << 5)
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_4M (0xaUL << 5)
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_8M (0xbUL << 5)
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_16M (0xcUL << 5)
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_32M (0xdUL << 5)
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_64M (0xeUL << 5)
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_128M (0xfUL << 5)
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_256M (0x10UL << 5)
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_512M (0x11UL << 5)
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_1G (0x12UL << 5)
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_2G (0x13UL << 5)
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_4G (0x14UL << 5)
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_8G (0x15UL << 5)
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_16G (0x16UL << 5)
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_32G (0x17UL << 5)
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_64G (0x18UL << 5)
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_128G (0x19UL << 5)
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_256G (0x1aUL << 5)
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_512G (0x1bUL << 5)
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_1T (0x1cUL << 5)
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_2T (0x1dUL << 5)
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_4T (0x1eUL << 5)
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_8T (0x1fUL << 5)
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_LAST SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_8T
+ #define SQ_FR_PMR_HDR_V3_NUMLEVELS_MASK 0xc00UL
+ #define SQ_FR_PMR_HDR_V3_NUMLEVELS_SFT 10
+ #define SQ_FR_PMR_HDR_V3_NUMLEVELS_PHYSICAL (0x0UL << 10)
+ #define SQ_FR_PMR_HDR_V3_NUMLEVELS_LAYER1 (0x1UL << 10)
+ #define SQ_FR_PMR_HDR_V3_NUMLEVELS_LAYER2 (0x2UL << 10)
+ #define SQ_FR_PMR_HDR_V3_NUMLEVELS_LAST SQ_FR_PMR_HDR_V3_NUMLEVELS_LAYER2
+ __le16 reserved;
+ __le64 va;
+ __le64 length;
+ __le64 pbl_ptr;
+};
+
+/* sq_bind_v3 (size:256b/32B) */
+struct sq_bind_v3 {
+ u8 wqe_type;
+ #define SQ_BIND_V3_WQE_TYPE_BIND_V3 0x1cUL
+ #define SQ_BIND_V3_WQE_TYPE_LAST SQ_BIND_V3_WQE_TYPE_BIND_V3
+ u8 flags;
+ #define SQ_BIND_V3_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_BIND_V3_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_BIND_V3_FLAGS_UC_FENCE 0x4UL
+ #define SQ_BIND_V3_FLAGS_SE 0x8UL
+ #define SQ_BIND_V3_FLAGS_INLINE 0x10UL
+ #define SQ_BIND_V3_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_BIND_V3_FLAGS_DEBUG_TRACE 0x40UL
+ u8 wqe_size_zero_based_mw_type;
+ #define SQ_BIND_V3_WQE_SIZE_MASK 0x3fUL
+ #define SQ_BIND_V3_WQE_SIZE_SFT 0
+ #define SQ_BIND_V3_ZERO_BASED 0x40UL
+ #define SQ_BIND_V3_MW_TYPE 0x80UL
+ #define SQ_BIND_V3__TYPE1 (0x0UL << 7)
+ #define SQ_BIND_V3__TYPE2 (0x1UL << 7)
+ #define SQ_BIND_V3__LAST SQ_BIND_V3__TYPE2
+ u8 access_cntl;
+ #define SQ_BIND_V3_ACCESS_CNTL_LOCAL_WRITE 0x1UL
+ #define SQ_BIND_V3_ACCESS_CNTL_REMOTE_READ 0x2UL
+ #define SQ_BIND_V3_ACCESS_CNTL_REMOTE_WRITE 0x4UL
+ #define SQ_BIND_V3_ACCESS_CNTL_REMOTE_ATOMIC 0x8UL
+ #define SQ_BIND_V3_ACCESS_CNTL_WINDOW_BIND 0x10UL
+ __le32 opaque;
+ __le32 parent_l_key;
+ __le32 l_key;
+ __le64 va;
+ __le64 length;
+};
+
+/* sq_bind_hdr_v3 (size:256b/32B) */
+struct sq_bind_hdr_v3 {
+ u8 wqe_type;
+ #define SQ_BIND_HDR_V3_WQE_TYPE_BIND_V3 0x1cUL
+ #define SQ_BIND_HDR_V3_WQE_TYPE_LAST SQ_BIND_HDR_V3_WQE_TYPE_BIND_V3
+ u8 flags;
+ #define SQ_BIND_HDR_V3_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_BIND_HDR_V3_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_BIND_HDR_V3_FLAGS_UC_FENCE 0x4UL
+ #define SQ_BIND_HDR_V3_FLAGS_SE 0x8UL
+ #define SQ_BIND_HDR_V3_FLAGS_INLINE 0x10UL
+ #define SQ_BIND_HDR_V3_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_BIND_HDR_V3_FLAGS_DEBUG_TRACE 0x40UL
+ u8 wqe_size_zero_based_mw_type;
+ #define SQ_BIND_HDR_V3_WQE_SIZE_MASK 0x3fUL
+ #define SQ_BIND_HDR_V3_WQE_SIZE_SFT 0
+ #define SQ_BIND_HDR_V3_ZERO_BASED 0x40UL
+ #define SQ_BIND_HDR_V3_MW_TYPE 0x80UL
+ #define SQ_BIND_HDR_V3__TYPE1 (0x0UL << 7)
+ #define SQ_BIND_HDR_V3__TYPE2 (0x1UL << 7)
+ #define SQ_BIND_HDR_V3__LAST SQ_BIND_HDR_V3__TYPE2
+ u8 access_cntl;
+ #define SQ_BIND_HDR_V3_ACCESS_CNTL_LOCAL_WRITE 0x1UL
+ #define SQ_BIND_HDR_V3_ACCESS_CNTL_REMOTE_READ 0x2UL
+ #define SQ_BIND_HDR_V3_ACCESS_CNTL_REMOTE_WRITE 0x4UL
+ #define SQ_BIND_HDR_V3_ACCESS_CNTL_REMOTE_ATOMIC 0x8UL
+ #define SQ_BIND_HDR_V3_ACCESS_CNTL_WINDOW_BIND 0x10UL
+ __le32 opaque;
+ __le32 parent_l_key;
+ __le32 l_key;
+ __le64 va;
+ __le64 length;
+};
+
+/* sq_change_udpsrcport_v3 (size:128b/16B) */
+struct sq_change_udpsrcport_v3 {
+ u8 wqe_type;
+ #define SQ_CHANGE_UDPSRCPORT_V3_WQE_TYPE_CHANGE_UDPSRCPORT_V3 0x1eUL
+ #define SQ_CHANGE_UDPSRCPORT_V3_WQE_TYPE_LAST SQ_CHANGE_UDPSRCPORT_V3_WQE_TYPE_CHANGE_UDPSRCPORT_V3
+ u8 flags;
+ #define SQ_CHANGE_UDPSRCPORT_V3_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_CHANGE_UDPSRCPORT_V3_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_CHANGE_UDPSRCPORT_V3_FLAGS_UC_FENCE 0x4UL
+ #define SQ_CHANGE_UDPSRCPORT_V3_FLAGS_SE 0x8UL
+ #define SQ_CHANGE_UDPSRCPORT_V3_FLAGS_INLINE 0x10UL
+ #define SQ_CHANGE_UDPSRCPORT_V3_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_CHANGE_UDPSRCPORT_V3_FLAGS_DEBUG_TRACE 0x40UL
+ u8 wqe_size;
+ #define SQ_CHANGE_UDPSRCPORT_V3_WQE_SIZE_MASK 0x3fUL
+ #define SQ_CHANGE_UDPSRCPORT_V3_WQE_SIZE_SFT 0
+ u8 reserved_1;
+ __le32 opaque;
+ __le16 udp_src_port;
+ __le16 reserved_2;
+ __le32 reserved_3;
+};
+
+/* sq_change_udpsrcport_hdr_v3 (size:128b/16B) */
+struct sq_change_udpsrcport_hdr_v3 {
+ u8 wqe_type;
+ #define SQ_CHANGE_UDPSRCPORT_HDR_V3_WQE_TYPE_CHANGE_UDPSRCPORT_V3 0x1eUL
+ #define SQ_CHANGE_UDPSRCPORT_HDR_V3_WQE_TYPE_LAST SQ_CHANGE_UDPSRCPORT_HDR_V3_WQE_TYPE_CHANGE_UDPSRCPORT_V3
+ u8 flags;
+ #define SQ_CHANGE_UDPSRCPORT_HDR_V3_FLAGS_SIGNAL_COMP 0x1UL
+ #define SQ_CHANGE_UDPSRCPORT_HDR_V3_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL
+ #define SQ_CHANGE_UDPSRCPORT_HDR_V3_FLAGS_UC_FENCE 0x4UL
+ #define SQ_CHANGE_UDPSRCPORT_HDR_V3_FLAGS_SE 0x8UL
+ #define SQ_CHANGE_UDPSRCPORT_HDR_V3_FLAGS_INLINE 0x10UL
+ #define SQ_CHANGE_UDPSRCPORT_HDR_V3_FLAGS_WQE_TS_EN 0x20UL
+ #define SQ_CHANGE_UDPSRCPORT_HDR_V3_FLAGS_DEBUG_TRACE 0x40UL
+ u8 wqe_size;
+ #define SQ_CHANGE_UDPSRCPORT_HDR_V3_WQE_SIZE_MASK 0x3fUL
+ #define SQ_CHANGE_UDPSRCPORT_HDR_V3_WQE_SIZE_SFT 0
+ u8 reserved_1;
+ __le32 opaque;
+ __le16 udp_src_port;
+ __le16 reserved_2;
+ __le32 reserved_3;
+};
+
+/* rq_wqe (size:1024b/128B) */
+struct rq_wqe {
+ u8 wqe_type;
+ #define RQ_WQE_WQE_TYPE_RCV 0x80UL
+ #define RQ_WQE_WQE_TYPE_LAST RQ_WQE_WQE_TYPE_RCV
+ u8 flags;
+ u8 wqe_size;
+ u8 reserved8;
+ __le32 reserved32;
+ __le32 wr_id[2];
+ #define RQ_WQE_WR_ID_MASK 0xfffffUL
+ #define RQ_WQE_WR_ID_SFT 0
+ u8 reserved128[16];
+ __le32 data[24];
+};
+
+/* rq_wqe_hdr (size:256b/32B) */
+struct rq_wqe_hdr {
+ u8 wqe_type;
+ #define RQ_WQE_HDR_WQE_TYPE_RCV 0x80UL
+ #define RQ_WQE_HDR_WQE_TYPE_LAST RQ_WQE_HDR_WQE_TYPE_RCV
+ u8 flags;
+ u8 wqe_size;
+ u8 reserved8;
+ __le32 reserved32;
+ __le32 wr_id[2];
+ #define RQ_WQE_HDR_WR_ID_MASK 0xfffffUL
+ #define RQ_WQE_HDR_WR_ID_SFT 0
+ u8 reserved128[16];
+};
+
+/* rq_wqe_v3 (size:4096b/512B) */
+struct rq_wqe_v3 {
+ u8 wqe_type;
+ #define RQ_WQE_V3_WQE_TYPE_RCV_V3 0x90UL
+ #define RQ_WQE_V3_WQE_TYPE_LAST RQ_WQE_V3_WQE_TYPE_RCV_V3
+ u8 flags;
+ u8 wqe_size;
+ u8 reserved1;
+ __le32 opaque;
+ __le64 reserved2;
+ __le32 data[124];
+};
+
+/* rq_wqe_hdr_v3 (size:128b/16B) */
+struct rq_wqe_hdr_v3 {
+ u8 wqe_type;
+ #define RQ_WQE_HDR_V3_WQE_TYPE_RCV_V3 0x90UL
+ #define RQ_WQE_HDR_V3_WQE_TYPE_LAST RQ_WQE_HDR_V3_WQE_TYPE_RCV_V3
+ u8 flags;
+ u8 wqe_size;
+ u8 reserved1;
+ __le32 opaque;
+ __le64 reserved2;
+};
+
+/* cq_base (size:256b/32B) */
+struct cq_base {
+ __le64 reserved64_1;
+ __le64 reserved64_2;
+ __le64 reserved64_3;
+ u8 cqe_type_toggle;
+ #define CQ_BASE_TOGGLE 0x1UL
+ #define CQ_BASE_CQE_TYPE_MASK 0x1eUL
+ #define CQ_BASE_CQE_TYPE_SFT 1
+ #define CQ_BASE_CQE_TYPE_REQ (0x0UL << 1)
+ #define CQ_BASE_CQE_TYPE_RES_RC (0x1UL << 1)
+ #define CQ_BASE_CQE_TYPE_RES_UD (0x2UL << 1)
+ #define CQ_BASE_CQE_TYPE_RES_RAWETH_QP1 (0x3UL << 1)
+ #define CQ_BASE_CQE_TYPE_RES_UD_CFA (0x4UL << 1)
+ #define CQ_BASE_CQE_TYPE_REQ_V3 (0x8UL << 1)
+ #define CQ_BASE_CQE_TYPE_RES_RC_V3 (0x9UL << 1)
+ #define CQ_BASE_CQE_TYPE_RES_UD_V3 (0xaUL << 1)
+ #define CQ_BASE_CQE_TYPE_RES_RAWETH_QP1_V3 (0xbUL << 1)
+ #define CQ_BASE_CQE_TYPE_RES_UD_CFA_V3 (0xcUL << 1)
+ #define CQ_BASE_CQE_TYPE_NO_OP (0xdUL << 1)
+ #define CQ_BASE_CQE_TYPE_TERMINAL (0xeUL << 1)
+ #define CQ_BASE_CQE_TYPE_CUT_OFF (0xfUL << 1)
+ #define CQ_BASE_CQE_TYPE_LAST CQ_BASE_CQE_TYPE_CUT_OFF
+ u8 status;
+ #define CQ_BASE_STATUS_OK 0x0UL
+ #define CQ_BASE_STATUS_BAD_RESPONSE_ERR 0x1UL
+ #define CQ_BASE_STATUS_LOCAL_LENGTH_ERR 0x2UL
+ #define CQ_BASE_STATUS_HW_LOCAL_LENGTH_ERR 0x3UL
+ #define CQ_BASE_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL
+ #define CQ_BASE_STATUS_LOCAL_PROTECTION_ERR 0x5UL
+ #define CQ_BASE_STATUS_LOCAL_ACCESS_ERROR 0x6UL
+ #define CQ_BASE_STATUS_MEMORY_MGT_OPERATION_ERR 0x7UL
+ #define CQ_BASE_STATUS_REMOTE_INVALID_REQUEST_ERR 0x8UL
+ #define CQ_BASE_STATUS_REMOTE_ACCESS_ERR 0x9UL
+ #define CQ_BASE_STATUS_REMOTE_OPERATION_ERR 0xaUL
+ #define CQ_BASE_STATUS_RNR_NAK_RETRY_CNT_ERR 0xbUL
+ #define CQ_BASE_STATUS_TRANSPORT_RETRY_CNT_ERR 0xcUL
+ #define CQ_BASE_STATUS_WORK_REQUEST_FLUSHED_ERR 0xdUL
+ #define CQ_BASE_STATUS_HW_FLUSH_ERR 0xeUL
+ #define CQ_BASE_STATUS_OVERFLOW_ERR 0xfUL
+ #define CQ_BASE_STATUS_LAST CQ_BASE_STATUS_OVERFLOW_ERR
+ __le16 reserved16;
+ __le32 opaque;
+};
+
+/* cq_req (size:256b/32B) */
+struct cq_req {
+ __le64 qp_handle;
+ __le16 sq_cons_idx;
+ __le16 reserved16_1;
+ __le32 reserved32_2;
+ __le64 reserved64;
+ u8 cqe_type_toggle;
+ #define CQ_REQ_TOGGLE 0x1UL
+ #define CQ_REQ_CQE_TYPE_MASK 0x1eUL
+ #define CQ_REQ_CQE_TYPE_SFT 1
+ #define CQ_REQ_CQE_TYPE_REQ (0x0UL << 1)
+ #define CQ_REQ_CQE_TYPE_LAST CQ_REQ_CQE_TYPE_REQ
+ #define CQ_REQ_PUSH 0x20UL
+ u8 status;
+ #define CQ_REQ_STATUS_OK 0x0UL
+ #define CQ_REQ_STATUS_BAD_RESPONSE_ERR 0x1UL
+ #define CQ_REQ_STATUS_LOCAL_LENGTH_ERR 0x2UL
+ #define CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR 0x3UL
+ #define CQ_REQ_STATUS_LOCAL_PROTECTION_ERR 0x4UL
+ #define CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR 0x5UL
+ #define CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR 0x6UL
+ #define CQ_REQ_STATUS_REMOTE_ACCESS_ERR 0x7UL
+ #define CQ_REQ_STATUS_REMOTE_OPERATION_ERR 0x8UL
+ #define CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR 0x9UL
+ #define CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR 0xaUL
+ #define CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR 0xbUL
+ #define CQ_REQ_STATUS_LAST CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR
+ __le16 reserved16_2;
+ __le32 reserved32_1;
+};
+
+/* cq_res_rc (size:256b/32B) */
+struct cq_res_rc {
+ __le32 length;
+ __le32 imm_data_or_inv_r_key;
+ __le64 qp_handle;
+ __le64 mr_handle;
+ u8 cqe_type_toggle;
+ #define CQ_RES_RC_TOGGLE 0x1UL
+ #define CQ_RES_RC_CQE_TYPE_MASK 0x1eUL
+ #define CQ_RES_RC_CQE_TYPE_SFT 1
+ #define CQ_RES_RC_CQE_TYPE_RES_RC (0x1UL << 1)
+ #define CQ_RES_RC_CQE_TYPE_LAST CQ_RES_RC_CQE_TYPE_RES_RC
+ u8 status;
+ #define CQ_RES_RC_STATUS_OK 0x0UL
+ #define CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR 0x1UL
+ #define CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR 0x2UL
+ #define CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR 0x3UL
+ #define CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL
+ #define CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR 0x5UL
+ #define CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR 0x6UL
+ #define CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR 0x7UL
+ #define CQ_RES_RC_STATUS_HW_FLUSH_ERR 0x8UL
+ #define CQ_RES_RC_STATUS_LAST CQ_RES_RC_STATUS_HW_FLUSH_ERR
+ __le16 flags;
+ #define CQ_RES_RC_FLAGS_SRQ 0x1UL
+ #define CQ_RES_RC_FLAGS_SRQ_RQ 0x0UL
+ #define CQ_RES_RC_FLAGS_SRQ_SRQ 0x1UL
+ #define CQ_RES_RC_FLAGS_SRQ_LAST CQ_RES_RC_FLAGS_SRQ_SRQ
+ #define CQ_RES_RC_FLAGS_IMM 0x2UL
+ #define CQ_RES_RC_FLAGS_INV 0x4UL
+ #define CQ_RES_RC_FLAGS_RDMA 0x8UL
+ #define CQ_RES_RC_FLAGS_RDMA_SEND (0x0UL << 3)
+ #define CQ_RES_RC_FLAGS_RDMA_RDMA_WRITE (0x1UL << 3)
+ #define CQ_RES_RC_FLAGS_RDMA_LAST CQ_RES_RC_FLAGS_RDMA_RDMA_WRITE
+ __le32 srq_or_rq_wr_id;
+ #define CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK 0xfffffUL
+ #define CQ_RES_RC_SRQ_OR_RQ_WR_ID_SFT 0
+};
+
+/* cq_res_ud (size:256b/32B) */
+struct cq_res_ud {
+ __le16 length;
+ #define CQ_RES_UD_LENGTH_MASK 0x3fffUL
+ #define CQ_RES_UD_LENGTH_SFT 0
+ __le16 cfa_metadata;
+ #define CQ_RES_UD_CFA_METADATA_VID_MASK 0xfffUL
+ #define CQ_RES_UD_CFA_METADATA_VID_SFT 0
+ #define CQ_RES_UD_CFA_METADATA_DE 0x1000UL
+ #define CQ_RES_UD_CFA_METADATA_PRI_MASK 0xe000UL
+ #define CQ_RES_UD_CFA_METADATA_PRI_SFT 13
+ __le32 imm_data;
+ __le64 qp_handle;
+ __le16 src_mac[3];
+ __le16 src_qp_low;
+ u8 cqe_type_toggle;
+ #define CQ_RES_UD_TOGGLE 0x1UL
+ #define CQ_RES_UD_CQE_TYPE_MASK 0x1eUL
+ #define CQ_RES_UD_CQE_TYPE_SFT 1
+ #define CQ_RES_UD_CQE_TYPE_RES_UD (0x2UL << 1)
+ #define CQ_RES_UD_CQE_TYPE_LAST CQ_RES_UD_CQE_TYPE_RES_UD
+ u8 status;
+ #define CQ_RES_UD_STATUS_OK 0x0UL
+ #define CQ_RES_UD_STATUS_LOCAL_ACCESS_ERROR 0x1UL
+ #define CQ_RES_UD_STATUS_HW_LOCAL_LENGTH_ERR 0x2UL
+ #define CQ_RES_UD_STATUS_LOCAL_PROTECTION_ERR 0x3UL
+ #define CQ_RES_UD_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL
+ #define CQ_RES_UD_STATUS_MEMORY_MGT_OPERATION_ERR 0x5UL
+ #define CQ_RES_UD_STATUS_WORK_REQUEST_FLUSHED_ERR 0x7UL
+ #define CQ_RES_UD_STATUS_HW_FLUSH_ERR 0x8UL
+ #define CQ_RES_UD_STATUS_LAST CQ_RES_UD_STATUS_HW_FLUSH_ERR
+ __le16 flags;
+ #define CQ_RES_UD_FLAGS_SRQ 0x1UL
+ #define CQ_RES_UD_FLAGS_SRQ_RQ 0x0UL
+ #define CQ_RES_UD_FLAGS_SRQ_SRQ 0x1UL
+ #define CQ_RES_UD_FLAGS_SRQ_LAST CQ_RES_UD_FLAGS_SRQ_SRQ
+ #define CQ_RES_UD_FLAGS_IMM 0x2UL
+ #define CQ_RES_UD_FLAGS_UNUSED_MASK 0xcUL
+ #define CQ_RES_UD_FLAGS_UNUSED_SFT 2
+ #define CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK 0x30UL
+ #define CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT 4
+ #define CQ_RES_UD_FLAGS_ROCE_IP_VER_V1 (0x0UL << 4)
+ #define CQ_RES_UD_FLAGS_ROCE_IP_VER_V2IPV4 (0x2UL << 4)
+ #define CQ_RES_UD_FLAGS_ROCE_IP_VER_V2IPV6 (0x3UL << 4)
+ #define CQ_RES_UD_FLAGS_ROCE_IP_VER_LAST CQ_RES_UD_FLAGS_ROCE_IP_VER_V2IPV6
+ #define CQ_RES_UD_FLAGS_META_FORMAT_MASK 0x3c0UL
+ #define CQ_RES_UD_FLAGS_META_FORMAT_SFT 6
+ #define CQ_RES_UD_FLAGS_META_FORMAT_NONE (0x0UL << 6)
+ #define CQ_RES_UD_FLAGS_META_FORMAT_VLAN (0x1UL << 6)
+ #define CQ_RES_UD_FLAGS_META_FORMAT_TUNNEL_ID (0x2UL << 6)
+ #define CQ_RES_UD_FLAGS_META_FORMAT_CHDR_DATA (0x3UL << 6)
+ #define CQ_RES_UD_FLAGS_META_FORMAT_HDR_OFFSET (0x4UL << 6)
+ #define CQ_RES_UD_FLAGS_META_FORMAT_LAST CQ_RES_UD_FLAGS_META_FORMAT_HDR_OFFSET
+ #define CQ_RES_UD_FLAGS_EXT_META_FORMAT_MASK 0xc00UL
+ #define CQ_RES_UD_FLAGS_EXT_META_FORMAT_SFT 10
+ __le32 src_qp_high_srq_or_rq_wr_id;
+ #define CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK 0xfffffUL
+ #define CQ_RES_UD_SRQ_OR_RQ_WR_ID_SFT 0
+ #define CQ_RES_UD_SRC_QP_HIGH_MASK 0xff000000UL
+ #define CQ_RES_UD_SRC_QP_HIGH_SFT 24
+};
+
+/* cq_res_ud_v2 (size:256b/32B) */
+struct cq_res_ud_v2 {
+ __le16 length;
+ #define CQ_RES_UD_V2_LENGTH_MASK 0x3fffUL
+ #define CQ_RES_UD_V2_LENGTH_SFT 0
+ __le16 cfa_metadata0;
+ #define CQ_RES_UD_V2_CFA_METADATA0_VID_MASK 0xfffUL
+ #define CQ_RES_UD_V2_CFA_METADATA0_VID_SFT 0
+ #define CQ_RES_UD_V2_CFA_METADATA0_DE 0x1000UL
+ #define CQ_RES_UD_V2_CFA_METADATA0_PRI_MASK 0xe000UL
+ #define CQ_RES_UD_V2_CFA_METADATA0_PRI_SFT 13
+ __le32 imm_data;
+ __le64 qp_handle;
+ __le16 src_mac[3];
+ __le16 src_qp_low;
+ u8 cqe_type_toggle;
+ #define CQ_RES_UD_V2_TOGGLE 0x1UL
+ #define CQ_RES_UD_V2_CQE_TYPE_MASK 0x1eUL
+ #define CQ_RES_UD_V2_CQE_TYPE_SFT 1
+ #define CQ_RES_UD_V2_CQE_TYPE_RES_UD (0x2UL << 1)
+ #define CQ_RES_UD_V2_CQE_TYPE_LAST CQ_RES_UD_V2_CQE_TYPE_RES_UD
+ u8 status;
+ #define CQ_RES_UD_V2_STATUS_OK 0x0UL
+ #define CQ_RES_UD_V2_STATUS_LOCAL_ACCESS_ERROR 0x1UL
+ #define CQ_RES_UD_V2_STATUS_HW_LOCAL_LENGTH_ERR 0x2UL
+ #define CQ_RES_UD_V2_STATUS_LOCAL_PROTECTION_ERR 0x3UL
+ #define CQ_RES_UD_V2_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL
+ #define CQ_RES_UD_V2_STATUS_MEMORY_MGT_OPERATION_ERR 0x5UL
+ #define CQ_RES_UD_V2_STATUS_WORK_REQUEST_FLUSHED_ERR 0x7UL
+ #define CQ_RES_UD_V2_STATUS_HW_FLUSH_ERR 0x8UL
+ #define CQ_RES_UD_V2_STATUS_LAST CQ_RES_UD_V2_STATUS_HW_FLUSH_ERR
+ __le16 flags;
+ #define CQ_RES_UD_V2_FLAGS_SRQ 0x1UL
+ #define CQ_RES_UD_V2_FLAGS_SRQ_RQ 0x0UL
+ #define CQ_RES_UD_V2_FLAGS_SRQ_SRQ 0x1UL
+ #define CQ_RES_UD_V2_FLAGS_SRQ_LAST CQ_RES_UD_V2_FLAGS_SRQ_SRQ
+ #define CQ_RES_UD_V2_FLAGS_IMM 0x2UL
+ #define CQ_RES_UD_V2_FLAGS_UNUSED_MASK 0xcUL
+ #define CQ_RES_UD_V2_FLAGS_UNUSED_SFT 2
+ #define CQ_RES_UD_V2_FLAGS_ROCE_IP_VER_MASK 0x30UL
+ #define CQ_RES_UD_V2_FLAGS_ROCE_IP_VER_SFT 4
+ #define CQ_RES_UD_V2_FLAGS_ROCE_IP_VER_V1 (0x0UL << 4)
+ #define CQ_RES_UD_V2_FLAGS_ROCE_IP_VER_V2IPV4 (0x2UL << 4)
+ #define CQ_RES_UD_V2_FLAGS_ROCE_IP_VER_V2IPV6 (0x3UL << 4)
+ #define CQ_RES_UD_V2_FLAGS_ROCE_IP_VER_LAST CQ_RES_UD_V2_FLAGS_ROCE_IP_VER_V2IPV6
+ #define CQ_RES_UD_V2_FLAGS_META_FORMAT_MASK 0x3c0UL
+ #define CQ_RES_UD_V2_FLAGS_META_FORMAT_SFT 6
+ #define CQ_RES_UD_V2_FLAGS_META_FORMAT_NONE (0x0UL << 6)
+ #define CQ_RES_UD_V2_FLAGS_META_FORMAT_ACT_REC_PTR (0x1UL << 6)
+ #define CQ_RES_UD_V2_FLAGS_META_FORMAT_TUNNEL_ID (0x2UL << 6)
+ #define CQ_RES_UD_V2_FLAGS_META_FORMAT_CHDR_DATA (0x3UL << 6)
+ #define CQ_RES_UD_V2_FLAGS_META_FORMAT_HDR_OFFSET (0x4UL << 6)
+ #define CQ_RES_UD_V2_FLAGS_META_FORMAT_LAST CQ_RES_UD_V2_FLAGS_META_FORMAT_HDR_OFFSET
+ __le32 src_qp_high_srq_or_rq_wr_id;
+ #define CQ_RES_UD_V2_SRQ_OR_RQ_WR_ID_MASK 0xfffffUL
+ #define CQ_RES_UD_V2_SRQ_OR_RQ_WR_ID_SFT 0
+ #define CQ_RES_UD_V2_CFA_METADATA1_MASK 0xf00000UL
+ #define CQ_RES_UD_V2_CFA_METADATA1_SFT 20
+ #define CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_MASK 0x700000UL
+ #define CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_SFT 20
+ #define CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_TPID88A8 (0x0UL << 20)
+ #define CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_TPID8100 (0x1UL << 20)
+ #define CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_TPID9100 (0x2UL << 20)
+ #define CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_TPID9200 (0x3UL << 20)
+ #define CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_TPID9300 (0x4UL << 20)
+ #define CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_TPIDCFG (0x5UL << 20)
+ #define CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_LAST CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_TPIDCFG
+ #define CQ_RES_UD_V2_CFA_METADATA1_VALID 0x800000UL
+ #define CQ_RES_UD_V2_SRC_QP_HIGH_MASK 0xff000000UL
+ #define CQ_RES_UD_V2_SRC_QP_HIGH_SFT 24
+};
+
+/* cq_res_ud_cfa (size:256b/32B) */
+struct cq_res_ud_cfa {
+ __le16 length;
+ #define CQ_RES_UD_CFA_LENGTH_MASK 0x3fffUL
+ #define CQ_RES_UD_CFA_LENGTH_SFT 0
+ __le16 cfa_code;
+ __le32 imm_data;
+ __le32 qid;
+ #define CQ_RES_UD_CFA_QID_MASK 0xfffffUL
+ #define CQ_RES_UD_CFA_QID_SFT 0
+ __le32 cfa_metadata;
+ #define CQ_RES_UD_CFA_CFA_METADATA_VID_MASK 0xfffUL
+ #define CQ_RES_UD_CFA_CFA_METADATA_VID_SFT 0
+ #define CQ_RES_UD_CFA_CFA_METADATA_DE 0x1000UL
+ #define CQ_RES_UD_CFA_CFA_METADATA_PRI_MASK 0xe000UL
+ #define CQ_RES_UD_CFA_CFA_METADATA_PRI_SFT 13
+ #define CQ_RES_UD_CFA_CFA_METADATA_TPID_MASK 0xffff0000UL
+ #define CQ_RES_UD_CFA_CFA_METADATA_TPID_SFT 16
+ __le16 src_mac[3];
+ __le16 src_qp_low;
+ u8 cqe_type_toggle;
+ #define CQ_RES_UD_CFA_TOGGLE 0x1UL
+ #define CQ_RES_UD_CFA_CQE_TYPE_MASK 0x1eUL
+ #define CQ_RES_UD_CFA_CQE_TYPE_SFT 1
+ #define CQ_RES_UD_CFA_CQE_TYPE_RES_UD_CFA (0x4UL << 1)
+ #define CQ_RES_UD_CFA_CQE_TYPE_LAST CQ_RES_UD_CFA_CQE_TYPE_RES_UD_CFA
+ u8 status;
+ #define CQ_RES_UD_CFA_STATUS_OK 0x0UL
+ #define CQ_RES_UD_CFA_STATUS_LOCAL_ACCESS_ERROR 0x1UL
+ #define CQ_RES_UD_CFA_STATUS_HW_LOCAL_LENGTH_ERR 0x2UL
+ #define CQ_RES_UD_CFA_STATUS_LOCAL_PROTECTION_ERR 0x3UL
+ #define CQ_RES_UD_CFA_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL
+ #define CQ_RES_UD_CFA_STATUS_MEMORY_MGT_OPERATION_ERR 0x5UL
+ #define CQ_RES_UD_CFA_STATUS_WORK_REQUEST_FLUSHED_ERR 0x7UL
+ #define CQ_RES_UD_CFA_STATUS_HW_FLUSH_ERR 0x8UL
+ #define CQ_RES_UD_CFA_STATUS_LAST CQ_RES_UD_CFA_STATUS_HW_FLUSH_ERR
+ __le16 flags;
+ #define CQ_RES_UD_CFA_FLAGS_SRQ 0x1UL
+ #define CQ_RES_UD_CFA_FLAGS_SRQ_RQ 0x0UL
+ #define CQ_RES_UD_CFA_FLAGS_SRQ_SRQ 0x1UL
+ #define CQ_RES_UD_CFA_FLAGS_SRQ_LAST CQ_RES_UD_CFA_FLAGS_SRQ_SRQ
+ #define CQ_RES_UD_CFA_FLAGS_IMM 0x2UL
+ #define CQ_RES_UD_CFA_FLAGS_UNUSED_MASK 0xcUL
+ #define CQ_RES_UD_CFA_FLAGS_UNUSED_SFT 2
+ #define CQ_RES_UD_CFA_FLAGS_ROCE_IP_VER_MASK 0x30UL
+ #define CQ_RES_UD_CFA_FLAGS_ROCE_IP_VER_SFT 4
+ #define CQ_RES_UD_CFA_FLAGS_ROCE_IP_VER_V1 (0x0UL << 4)
+ #define CQ_RES_UD_CFA_FLAGS_ROCE_IP_VER_V2IPV4 (0x2UL << 4)
+ #define CQ_RES_UD_CFA_FLAGS_ROCE_IP_VER_V2IPV6 (0x3UL << 4)
+ #define CQ_RES_UD_CFA_FLAGS_ROCE_IP_VER_LAST CQ_RES_UD_CFA_FLAGS_ROCE_IP_VER_V2IPV6
+ #define CQ_RES_UD_CFA_FLAGS_META_FORMAT_MASK 0x3c0UL
+ #define CQ_RES_UD_CFA_FLAGS_META_FORMAT_SFT 6
+ #define CQ_RES_UD_CFA_FLAGS_META_FORMAT_NONE (0x0UL << 6)
+ #define CQ_RES_UD_CFA_FLAGS_META_FORMAT_VLAN (0x1UL << 6)
+ #define CQ_RES_UD_CFA_FLAGS_META_FORMAT_TUNNEL_ID (0x2UL << 6)
+ #define CQ_RES_UD_CFA_FLAGS_META_FORMAT_CHDR_DATA (0x3UL << 6)
+ #define CQ_RES_UD_CFA_FLAGS_META_FORMAT_HDR_OFFSET (0x4UL << 6)
+ #define CQ_RES_UD_CFA_FLAGS_META_FORMAT_LAST CQ_RES_UD_CFA_FLAGS_META_FORMAT_HDR_OFFSET
+ #define CQ_RES_UD_CFA_FLAGS_EXT_META_FORMAT_MASK 0xc00UL
+ #define CQ_RES_UD_CFA_FLAGS_EXT_META_FORMAT_SFT 10
+ __le32 src_qp_high_srq_or_rq_wr_id;
+ #define CQ_RES_UD_CFA_SRQ_OR_RQ_WR_ID_MASK 0xfffffUL
+ #define CQ_RES_UD_CFA_SRQ_OR_RQ_WR_ID_SFT 0
+ #define CQ_RES_UD_CFA_SRC_QP_HIGH_MASK 0xff000000UL
+ #define CQ_RES_UD_CFA_SRC_QP_HIGH_SFT 24
+};
+
+/* cq_res_ud_cfa_v2 (size:256b/32B) */
+struct cq_res_ud_cfa_v2 {
+ __le16 length;
+ #define CQ_RES_UD_CFA_V2_LENGTH_MASK 0x3fffUL
+ #define CQ_RES_UD_CFA_V2_LENGTH_SFT 0
+ __le16 cfa_metadata0;
+ #define CQ_RES_UD_CFA_V2_CFA_METADATA0_VID_MASK 0xfffUL
+ #define CQ_RES_UD_CFA_V2_CFA_METADATA0_VID_SFT 0
+ #define CQ_RES_UD_CFA_V2_CFA_METADATA0_DE 0x1000UL
+ #define CQ_RES_UD_CFA_V2_CFA_METADATA0_PRI_MASK 0xe000UL
+ #define CQ_RES_UD_CFA_V2_CFA_METADATA0_PRI_SFT 13
+ __le32 imm_data;
+ __le32 qid;
+ #define CQ_RES_UD_CFA_V2_QID_MASK 0xfffffUL
+ #define CQ_RES_UD_CFA_V2_QID_SFT 0
+ __le32 cfa_metadata2;
+ __le16 src_mac[3];
+ __le16 src_qp_low;
+ u8 cqe_type_toggle;
+ #define CQ_RES_UD_CFA_V2_TOGGLE 0x1UL
+ #define CQ_RES_UD_CFA_V2_CQE_TYPE_MASK 0x1eUL
+ #define CQ_RES_UD_CFA_V2_CQE_TYPE_SFT 1
+ #define CQ_RES_UD_CFA_V2_CQE_TYPE_RES_UD_CFA (0x4UL << 1)
+ #define CQ_RES_UD_CFA_V2_CQE_TYPE_LAST CQ_RES_UD_CFA_V2_CQE_TYPE_RES_UD_CFA
+ u8 status;
+ #define CQ_RES_UD_CFA_V2_STATUS_OK 0x0UL
+ #define CQ_RES_UD_CFA_V2_STATUS_LOCAL_ACCESS_ERROR 0x1UL
+ #define CQ_RES_UD_CFA_V2_STATUS_HW_LOCAL_LENGTH_ERR 0x2UL
+ #define CQ_RES_UD_CFA_V2_STATUS_LOCAL_PROTECTION_ERR 0x3UL
+ #define CQ_RES_UD_CFA_V2_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL
+ #define CQ_RES_UD_CFA_V2_STATUS_MEMORY_MGT_OPERATION_ERR 0x5UL
+ #define CQ_RES_UD_CFA_V2_STATUS_WORK_REQUEST_FLUSHED_ERR 0x7UL
+ #define CQ_RES_UD_CFA_V2_STATUS_HW_FLUSH_ERR 0x8UL
+ #define CQ_RES_UD_CFA_V2_STATUS_LAST CQ_RES_UD_CFA_V2_STATUS_HW_FLUSH_ERR
+ __le16 flags;
+ #define CQ_RES_UD_CFA_V2_FLAGS_SRQ 0x1UL
+ #define CQ_RES_UD_CFA_V2_FLAGS_SRQ_RQ 0x0UL
+ #define CQ_RES_UD_CFA_V2_FLAGS_SRQ_SRQ 0x1UL
+ #define CQ_RES_UD_CFA_V2_FLAGS_SRQ_LAST CQ_RES_UD_CFA_V2_FLAGS_SRQ_SRQ
+ #define CQ_RES_UD_CFA_V2_FLAGS_IMM 0x2UL
+ #define CQ_RES_UD_CFA_V2_FLAGS_UNUSED_MASK 0xcUL
+ #define CQ_RES_UD_CFA_V2_FLAGS_UNUSED_SFT 2
+ #define CQ_RES_UD_CFA_V2_FLAGS_ROCE_IP_VER_MASK 0x30UL
+ #define CQ_RES_UD_CFA_V2_FLAGS_ROCE_IP_VER_SFT 4
+ #define CQ_RES_UD_CFA_V2_FLAGS_ROCE_IP_VER_V1 (0x0UL << 4)
+ #define CQ_RES_UD_CFA_V2_FLAGS_ROCE_IP_VER_V2IPV4 (0x2UL << 4)
+ #define CQ_RES_UD_CFA_V2_FLAGS_ROCE_IP_VER_V2IPV6 (0x3UL << 4)
+ #define CQ_RES_UD_CFA_V2_FLAGS_ROCE_IP_VER_LAST CQ_RES_UD_CFA_V2_FLAGS_ROCE_IP_VER_V2IPV6
+ #define CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_MASK 0x3c0UL
+ #define CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_SFT 6
+ #define CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_NONE (0x0UL << 6)
+ #define CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_ACT_REC_PTR (0x1UL << 6)
+ #define CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_TUNNEL_ID (0x2UL << 6)
+ #define CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_CHDR_DATA (0x3UL << 6)
+ #define CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_HDR_OFFSET (0x4UL << 6)
+ #define CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_LAST CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_HDR_OFFSET
+ __le32 src_qp_high_srq_or_rq_wr_id;
+ #define CQ_RES_UD_CFA_V2_SRQ_OR_RQ_WR_ID_MASK 0xfffffUL
+ #define CQ_RES_UD_CFA_V2_SRQ_OR_RQ_WR_ID_SFT 0
+ #define CQ_RES_UD_CFA_V2_CFA_METADATA1_MASK 0xf00000UL
+ #define CQ_RES_UD_CFA_V2_CFA_METADATA1_SFT 20
+ #define CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_MASK 0x700000UL
+ #define CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_SFT 20
+ #define CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_TPID88A8 (0x0UL << 20)
+ #define CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_TPID8100 (0x1UL << 20)
+ #define CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_TPID9100 (0x2UL << 20)
+ #define CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_TPID9200 (0x3UL << 20)
+ #define CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_TPID9300 (0x4UL << 20)
+ #define CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_TPIDCFG (0x5UL << 20)
+ #define CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_LAST CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_TPIDCFG
+ #define CQ_RES_UD_CFA_V2_CFA_METADATA1_VALID 0x800000UL
+ #define CQ_RES_UD_CFA_V2_SRC_QP_HIGH_MASK 0xff000000UL
+ #define CQ_RES_UD_CFA_V2_SRC_QP_HIGH_SFT 24
+};
+
+/* cq_res_raweth_qp1 (size:256b/32B) */
+struct cq_res_raweth_qp1 {
+ __le16 length;
+ #define CQ_RES_RAWETH_QP1_LENGTH_MASK 0x3fffUL
+ #define CQ_RES_RAWETH_QP1_LENGTH_SFT 0
+ __le16 raweth_qp1_flags;
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_MASK 0x3ffUL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_SFT 0
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ERROR 0x1UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_MASK 0x3c0UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_SFT 6
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_NOT_KNOWN (0x0UL << 6)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_IP (0x1UL << 6)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_TCP (0x2UL << 6)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_UDP (0x3UL << 6)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_FCOE (0x4UL << 6)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE (0x5UL << 6)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ICMP (0x7UL << 6)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_PTP_WO_TIMESTAMP (0x8UL << 6)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_PTP_W_TIMESTAMP (0x9UL << 6)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_LAST CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_PTP_W_TIMESTAMP
+ __le16 raweth_qp1_errors;
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_IP_CS_ERROR 0x10UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_L4_CS_ERROR 0x20UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_IP_CS_ERROR 0x40UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_L4_CS_ERROR 0x80UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_CRC_ERROR 0x100UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_MASK 0xe00UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_SFT 9
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_NO_ERROR (0x0UL << 9)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION (0x1UL << 9)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN (0x2UL << 9)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR (0x3UL << 9)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR (0x4UL << 9)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR (0x5UL << 9)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL (0x6UL << 9)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_LAST CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_MASK 0xf000UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_SFT 12
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_NO_ERROR (0x0UL << 12)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_L3_BAD_VERSION (0x1UL << 12)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN (0x2UL << 12)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_L3_BAD_TTL (0x3UL << 12)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_IP_TOTAL_ERROR (0x4UL << 12)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR (0x5UL << 12)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN (0x6UL << 12)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL (0x7UL << 12)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN (0x8UL << 12)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_LAST CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN
+ __le16 raweth_qp1_cfa_code;
+ __le64 qp_handle;
+ __le32 raweth_qp1_flags2;
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC 0x1UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC 0x2UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_T_IP_CS_CALC 0x4UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_T_L4_CS_CALC 0x8UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_MASK 0xf0UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_SFT 4
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_NONE (0x0UL << 4)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN (0x1UL << 4)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_TUNNEL_ID (0x2UL << 4)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_CHDR_DATA (0x3UL << 4)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_HDR_OFFSET (0x4UL << 4)
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_LAST CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_HDR_OFFSET
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE 0x100UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_COMPLETE_CHECKSUM_CALC 0x200UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_EXT_META_FORMAT_MASK 0xc00UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_EXT_META_FORMAT_SFT 10
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_COMPLETE_CHECKSUM_MASK 0xffff0000UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_COMPLETE_CHECKSUM_SFT 16
+ __le32 raweth_qp1_metadata;
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_DE_VID_MASK 0xffffUL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_DE_VID_SFT 0
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK 0xfffUL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_SFT 0
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_DE 0x1000UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK 0xe000UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT 13
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK 0xffff0000UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT 16
+ u8 cqe_type_toggle;
+ #define CQ_RES_RAWETH_QP1_TOGGLE 0x1UL
+ #define CQ_RES_RAWETH_QP1_CQE_TYPE_MASK 0x1eUL
+ #define CQ_RES_RAWETH_QP1_CQE_TYPE_SFT 1
+ #define CQ_RES_RAWETH_QP1_CQE_TYPE_RES_RAWETH_QP1 (0x3UL << 1)
+ #define CQ_RES_RAWETH_QP1_CQE_TYPE_LAST CQ_RES_RAWETH_QP1_CQE_TYPE_RES_RAWETH_QP1
+ u8 status;
+ #define CQ_RES_RAWETH_QP1_STATUS_OK 0x0UL
+ #define CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR 0x1UL
+ #define CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR 0x2UL
+ #define CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR 0x3UL
+ #define CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL
+ #define CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR 0x5UL
+ #define CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR 0x7UL
+ #define CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR 0x8UL
+ #define CQ_RES_RAWETH_QP1_STATUS_LAST CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR
+ __le16 flags;
+ #define CQ_RES_RAWETH_QP1_FLAGS_SRQ 0x1UL
+ #define CQ_RES_RAWETH_QP1_FLAGS_SRQ_RQ 0x0UL
+ #define CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ 0x1UL
+ #define CQ_RES_RAWETH_QP1_FLAGS_SRQ_LAST CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ
+ __le32 raweth_qp1_payload_offset_srq_or_rq_wr_id;
+ #define CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK 0xfffffUL
+ #define CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_SFT 0
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_PAYLOAD_OFFSET_MASK 0xff000000UL
+ #define CQ_RES_RAWETH_QP1_RAWETH_QP1_PAYLOAD_OFFSET_SFT 24
+};
+
+/* cq_res_raweth_qp1_v2 (size:256b/32B) */
+struct cq_res_raweth_qp1_v2 {
+ __le16 length;
+ #define CQ_RES_RAWETH_QP1_V2_LENGTH_MASK 0x3fffUL
+ #define CQ_RES_RAWETH_QP1_V2_LENGTH_SFT 0
+ __le16 raweth_qp1_flags;
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_MASK 0x3ffUL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_SFT 0
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ERROR 0x1UL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_MASK 0x3c0UL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_SFT 6
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_NOT_KNOWN (0x0UL << 6)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_IP (0x1UL << 6)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_TCP (0x2UL << 6)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_UDP (0x3UL << 6)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_FCOE (0x4UL << 6)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_ROCE (0x5UL << 6)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_ICMP (0x7UL << 6)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_PTP_WO_TIMESTAMP (0x8UL << 6)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_PTP_W_TIMESTAMP (0x9UL << 6)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_LAST CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_PTP_W_TIMESTAMP
+ __le16 raweth_qp1_errors;
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_IP_CS_ERROR 0x10UL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_L4_CS_ERROR 0x20UL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_IP_CS_ERROR 0x40UL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_L4_CS_ERROR 0x80UL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_CRC_ERROR 0x100UL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_MASK 0xe00UL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_SFT 9
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_NO_ERROR (0x0UL << 9)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION (0x1UL << 9)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN (0x2UL << 9)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR (0x3UL << 9)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR (0x4UL << 9)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR (0x5UL << 9)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL (0x6UL << 9)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_LAST CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_MASK 0xf000UL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_SFT 12
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_NO_ERROR (0x0UL << 12)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_L3_BAD_VERSION (0x1UL << 12)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN (0x2UL << 12)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_L3_BAD_TTL (0x3UL << 12)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_IP_TOTAL_ERROR (0x4UL << 12)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR (0x5UL << 12)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN (0x6UL << 12)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL (0x7UL << 12)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN (0x8UL << 12)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_LAST CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN
+ __le16 cfa_metadata0;
+ #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA0_VID_MASK 0xfffUL
+ #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA0_VID_SFT 0
+ #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA0_DE 0x1000UL
+ #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA0_PRI_MASK 0xe000UL
+ #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA0_PRI_SFT 13
+ __le64 qp_handle;
+ __le32 raweth_qp1_flags2;
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_CS_ALL_OK_MODE 0x8UL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_MASK 0xf0UL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_SFT 4
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_NONE (0x0UL << 4)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_ACT_REC_PTR (0x1UL << 4)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_TUNNEL_ID (0x2UL << 4)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_CHDR_DATA (0x3UL << 4)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_HDR_OFFSET (0x4UL << 4)
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_LAST CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_HDR_OFFSET
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_IP_TYPE 0x100UL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_COMPLETE_CHECKSUM_CALC 0x200UL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_CS_OK_MASK 0xfc00UL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_CS_OK_SFT 10
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_COMPLETE_CHECKSUM_MASK 0xffff0000UL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_COMPLETE_CHECKSUM_SFT 16
+ __le32 cfa_metadata2;
+ u8 cqe_type_toggle;
+ #define CQ_RES_RAWETH_QP1_V2_TOGGLE 0x1UL
+ #define CQ_RES_RAWETH_QP1_V2_CQE_TYPE_MASK 0x1eUL
+ #define CQ_RES_RAWETH_QP1_V2_CQE_TYPE_SFT 1
+ #define CQ_RES_RAWETH_QP1_V2_CQE_TYPE_RES_RAWETH_QP1 (0x3UL << 1)
+ #define CQ_RES_RAWETH_QP1_V2_CQE_TYPE_LAST CQ_RES_RAWETH_QP1_V2_CQE_TYPE_RES_RAWETH_QP1
+ u8 status;
+ #define CQ_RES_RAWETH_QP1_V2_STATUS_OK 0x0UL
+ #define CQ_RES_RAWETH_QP1_V2_STATUS_LOCAL_ACCESS_ERROR 0x1UL
+ #define CQ_RES_RAWETH_QP1_V2_STATUS_HW_LOCAL_LENGTH_ERR 0x2UL
+ #define CQ_RES_RAWETH_QP1_V2_STATUS_LOCAL_PROTECTION_ERR 0x3UL
+ #define CQ_RES_RAWETH_QP1_V2_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL
+ #define CQ_RES_RAWETH_QP1_V2_STATUS_MEMORY_MGT_OPERATION_ERR 0x5UL
+ #define CQ_RES_RAWETH_QP1_V2_STATUS_WORK_REQUEST_FLUSHED_ERR 0x7UL
+ #define CQ_RES_RAWETH_QP1_V2_STATUS_HW_FLUSH_ERR 0x8UL
+ #define CQ_RES_RAWETH_QP1_V2_STATUS_LAST CQ_RES_RAWETH_QP1_V2_STATUS_HW_FLUSH_ERR
+ __le16 flags;
+ #define CQ_RES_RAWETH_QP1_V2_FLAGS_SRQ 0x1UL
+ #define CQ_RES_RAWETH_QP1_V2_FLAGS_SRQ_RQ 0x0UL
+ #define CQ_RES_RAWETH_QP1_V2_FLAGS_SRQ_SRQ 0x1UL
+ #define CQ_RES_RAWETH_QP1_V2_FLAGS_SRQ_LAST CQ_RES_RAWETH_QP1_V2_FLAGS_SRQ_SRQ
+ __le32 raweth_qp1_payload_offset_srq_or_rq_wr_id;
+ #define CQ_RES_RAWETH_QP1_V2_SRQ_OR_RQ_WR_ID_MASK 0xfffffUL
+ #define CQ_RES_RAWETH_QP1_V2_SRQ_OR_RQ_WR_ID_SFT 0
+ #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_MASK 0xf00000UL
+ #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_SFT 20
+ #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_MASK 0x700000UL
+ #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_SFT 20
+ #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_TPID88A8 (0x0UL << 20)
+ #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_TPID8100 (0x1UL << 20)
+ #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_TPID9100 (0x2UL << 20)
+ #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_TPID9200 (0x3UL << 20)
+ #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_TPID9300 (0x4UL << 20)
+ #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_TPIDCFG (0x5UL << 20)
+ #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_LAST CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_TPIDCFG
+ #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_VALID 0x800000UL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_PAYLOAD_OFFSET_MASK 0xff000000UL
+ #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_PAYLOAD_OFFSET_SFT 24
+};
+
+/* cq_terminal (size:256b/32B) */
+struct cq_terminal {
+ __le64 qp_handle;
+ __le16 sq_cons_idx;
+ __le16 rq_cons_idx;
+ __le32 reserved32_1;
+ __le64 reserved64_3;
+ u8 cqe_type_toggle;
+ #define CQ_TERMINAL_TOGGLE 0x1UL
+ #define CQ_TERMINAL_CQE_TYPE_MASK 0x1eUL
+ #define CQ_TERMINAL_CQE_TYPE_SFT 1
+ #define CQ_TERMINAL_CQE_TYPE_TERMINAL (0xeUL << 1)
+ #define CQ_TERMINAL_CQE_TYPE_LAST CQ_TERMINAL_CQE_TYPE_TERMINAL
+ u8 status;
+ #define CQ_TERMINAL_STATUS_OK 0x0UL
+ #define CQ_TERMINAL_STATUS_LAST CQ_TERMINAL_STATUS_OK
+ __le16 reserved16;
+ __le32 reserved32_2;
+};
+
+/* cq_cutoff (size:256b/32B) */
+struct cq_cutoff {
+ __le64 reserved64_1;
+ __le64 reserved64_2;
+ __le64 reserved64_3;
+ u8 cqe_type_toggle;
+ #define CQ_CUTOFF_TOGGLE 0x1UL
+ #define CQ_CUTOFF_CQE_TYPE_MASK 0x1eUL
+ #define CQ_CUTOFF_CQE_TYPE_SFT 1
+ #define CQ_CUTOFF_CQE_TYPE_CUT_OFF (0xfUL << 1)
+ #define CQ_CUTOFF_CQE_TYPE_LAST CQ_CUTOFF_CQE_TYPE_CUT_OFF
+ #define CQ_CUTOFF_RESIZE_TOGGLE_MASK 0x60UL
+ #define CQ_CUTOFF_RESIZE_TOGGLE_SFT 5
+ u8 status;
+ #define CQ_CUTOFF_STATUS_OK 0x0UL
+ #define CQ_CUTOFF_STATUS_LAST CQ_CUTOFF_STATUS_OK
+ __le16 reserved16;
+ __le32 reserved32;
+};
+
+/* cq_req_v3 (size:256b/32B) */
+struct cq_req_v3 {
+ __le64 qp_handle;
+ __le16 sq_cons_idx;
+ __le16 reserved1;
+ __le32 reserved2;
+ __le64 reserved3;
+ u8 cqe_type_toggle;
+ #define CQ_REQ_V3_TOGGLE 0x1UL
+ #define CQ_REQ_V3_CQE_TYPE_MASK 0x1eUL
+ #define CQ_REQ_V3_CQE_TYPE_SFT 1
+ #define CQ_REQ_V3_CQE_TYPE_REQ_V3 (0x8UL << 1)
+ #define CQ_REQ_V3_CQE_TYPE_LAST CQ_REQ_V3_CQE_TYPE_REQ_V3
+ #define CQ_REQ_V3_PUSH 0x20UL
+ u8 status;
+ #define CQ_REQ_V3_STATUS_OK 0x0UL
+ #define CQ_REQ_V3_STATUS_BAD_RESPONSE_ERR 0x1UL
+ #define CQ_REQ_V3_STATUS_LOCAL_LENGTH_ERR 0x2UL
+ #define CQ_REQ_V3_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL
+ #define CQ_REQ_V3_STATUS_LOCAL_PROTECTION_ERR 0x5UL
+ #define CQ_REQ_V3_STATUS_MEMORY_MGT_OPERATION_ERR 0x7UL
+ #define CQ_REQ_V3_STATUS_REMOTE_INVALID_REQUEST_ERR 0x8UL
+ #define CQ_REQ_V3_STATUS_REMOTE_ACCESS_ERR 0x9UL
+ #define CQ_REQ_V3_STATUS_REMOTE_OPERATION_ERR 0xaUL
+ #define CQ_REQ_V3_STATUS_RNR_NAK_RETRY_CNT_ERR 0xbUL
+ #define CQ_REQ_V3_STATUS_TRANSPORT_RETRY_CNT_ERR 0xcUL
+ #define CQ_REQ_V3_STATUS_WORK_REQUEST_FLUSHED_ERR 0xdUL
+ #define CQ_REQ_V3_STATUS_OVERFLOW_ERR 0xfUL
+ #define CQ_REQ_V3_STATUS_LAST CQ_REQ_V3_STATUS_OVERFLOW_ERR
+ __le16 reserved4;
+ __le32 opaque;
+};
+
+/* cq_res_rc_v3 (size:256b/32B) */
+struct cq_res_rc_v3 {
+ __le32 length;
+ __le32 imm_data_or_inv_r_key;
+ __le64 qp_handle;
+ __le64 mr_handle;
+ u8 cqe_type_toggle;
+ #define CQ_RES_RC_V3_TOGGLE 0x1UL
+ #define CQ_RES_RC_V3_CQE_TYPE_MASK 0x1eUL
+ #define CQ_RES_RC_V3_CQE_TYPE_SFT 1
+ #define CQ_RES_RC_V3_CQE_TYPE_RES_RC_V3 (0x9UL << 1)
+ #define CQ_RES_RC_V3_CQE_TYPE_LAST CQ_RES_RC_V3_CQE_TYPE_RES_RC_V3
+ u8 status;
+ #define CQ_RES_RC_V3_STATUS_OK 0x0UL
+ #define CQ_RES_RC_V3_STATUS_LOCAL_LENGTH_ERR 0x2UL
+ #define CQ_RES_RC_V3_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL
+ #define CQ_RES_RC_V3_STATUS_LOCAL_PROTECTION_ERR 0x5UL
+ #define CQ_RES_RC_V3_STATUS_LOCAL_ACCESS_ERROR 0x6UL
+ #define CQ_RES_RC_V3_STATUS_REMOTE_INVALID_REQUEST_ERR 0x8UL
+ #define CQ_RES_RC_V3_STATUS_WORK_REQUEST_FLUSHED_ERR 0xdUL
+ #define CQ_RES_RC_V3_STATUS_HW_FLUSH_ERR 0xeUL
+ #define CQ_RES_RC_V3_STATUS_OVERFLOW_ERR 0xfUL
+ #define CQ_RES_RC_V3_STATUS_LAST CQ_RES_RC_V3_STATUS_OVERFLOW_ERR
+ __le16 flags;
+ #define CQ_RES_RC_V3_FLAGS_SRQ 0x1UL
+ #define CQ_RES_RC_V3_FLAGS_SRQ_RQ 0x0UL
+ #define CQ_RES_RC_V3_FLAGS_SRQ_SRQ 0x1UL
+ #define CQ_RES_RC_V3_FLAGS_SRQ_LAST CQ_RES_RC_V3_FLAGS_SRQ_SRQ
+ #define CQ_RES_RC_V3_FLAGS_IMM 0x2UL
+ #define CQ_RES_RC_V3_FLAGS_INV 0x4UL
+ #define CQ_RES_RC_V3_FLAGS_RDMA 0x8UL
+ #define CQ_RES_RC_V3_FLAGS_RDMA_SEND (0x0UL << 3)
+ #define CQ_RES_RC_V3_FLAGS_RDMA_RDMA_WRITE (0x1UL << 3)
+ #define CQ_RES_RC_V3_FLAGS_RDMA_LAST CQ_RES_RC_V3_FLAGS_RDMA_RDMA_WRITE
+ __le32 opaque;
+};
+
+/* cq_res_ud_v3 (size:256b/32B) */
+struct cq_res_ud_v3 {
+ __le16 length;
+ #define CQ_RES_UD_V3_LENGTH_MASK 0x3fffUL
+ #define CQ_RES_UD_V3_LENGTH_SFT 0
+ u8 reserved1;
+ u8 src_qp_high;
+ __le32 imm_data;
+ __le64 qp_handle;
+ __le16 src_mac[3];
+ __le16 src_qp_low;
+ u8 cqe_type_toggle;
+ #define CQ_RES_UD_V3_TOGGLE 0x1UL
+ #define CQ_RES_UD_V3_CQE_TYPE_MASK 0x1eUL
+ #define CQ_RES_UD_V3_CQE_TYPE_SFT 1
+ #define CQ_RES_UD_V3_CQE_TYPE_RES_UD_V3 (0xaUL << 1)
+ #define CQ_RES_UD_V3_CQE_TYPE_LAST CQ_RES_UD_V3_CQE_TYPE_RES_UD_V3
+ u8 status;
+ #define CQ_RES_UD_V3_STATUS_OK 0x0UL
+ #define CQ_RES_UD_V3_STATUS_HW_LOCAL_LENGTH_ERR 0x3UL
+ #define CQ_RES_UD_V3_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL
+ #define CQ_RES_UD_V3_STATUS_LOCAL_PROTECTION_ERR 0x5UL
+ #define CQ_RES_UD_V3_STATUS_WORK_REQUEST_FLUSHED_ERR 0xdUL
+ #define CQ_RES_UD_V3_STATUS_HW_FLUSH_ERR 0xeUL
+ #define CQ_RES_UD_V3_STATUS_OVERFLOW_ERR 0xfUL
+ #define CQ_RES_UD_V3_STATUS_LAST CQ_RES_UD_V3_STATUS_OVERFLOW_ERR
+ __le16 flags;
+ #define CQ_RES_UD_V3_FLAGS_SRQ 0x1UL
+ #define CQ_RES_UD_V3_FLAGS_SRQ_RQ 0x0UL
+ #define CQ_RES_UD_V3_FLAGS_SRQ_SRQ 0x1UL
+ #define CQ_RES_UD_V3_FLAGS_SRQ_LAST CQ_RES_UD_V3_FLAGS_SRQ_SRQ
+ #define CQ_RES_UD_V3_FLAGS_IMM 0x2UL
+ #define CQ_RES_UD_V3_FLAGS_UNUSED_MASK 0xcUL
+ #define CQ_RES_UD_V3_FLAGS_UNUSED_SFT 2
+ #define CQ_RES_UD_V3_FLAGS_ROCE_IP_VER_MASK 0x30UL
+ #define CQ_RES_UD_V3_FLAGS_ROCE_IP_VER_SFT 4
+ #define CQ_RES_UD_V3_FLAGS_ROCE_IP_VER_V1 (0x0UL << 4)
+ #define CQ_RES_UD_V3_FLAGS_ROCE_IP_VER_V2IPV4 (0x2UL << 4)
+ #define CQ_RES_UD_V3_FLAGS_ROCE_IP_VER_V2IPV6 (0x3UL << 4)
+ #define CQ_RES_UD_V3_FLAGS_ROCE_IP_VER_LAST CQ_RES_UD_V3_FLAGS_ROCE_IP_VER_V2IPV6
+ __le32 opaque;
+};
+
+/* cq_res_raweth_qp1_v3 (size:256b/32B) */
+struct cq_res_raweth_qp1_v3 {
+ __le16 length;
+ #define CQ_RES_RAWETH_QP1_V3_LENGTH_MASK 0x3fffUL
+ #define CQ_RES_RAWETH_QP1_V3_LENGTH_SFT 0
+ __le16 raweth_qp1_flags_cfa_metadata1;
+ #define CQ_RES_RAWETH_QP1_V3_ERROR 0x1UL
+ #define CQ_RES_RAWETH_QP1_V3_ITYPE_MASK 0x3c0UL
+ #define CQ_RES_RAWETH_QP1_V3_ITYPE_SFT 6
+ #define CQ_RES_RAWETH_QP1_V3_ITYPE_NOT_KNOWN (0x0UL << 6)
+ #define CQ_RES_RAWETH_QP1_V3_ITYPE_IP (0x1UL << 6)
+ #define CQ_RES_RAWETH_QP1_V3_ITYPE_TCP (0x2UL << 6)
+ #define CQ_RES_RAWETH_QP1_V3_ITYPE_UDP (0x3UL << 6)
+ #define CQ_RES_RAWETH_QP1_V3_ITYPE_FCOE (0x4UL << 6)
+ #define CQ_RES_RAWETH_QP1_V3_ITYPE_ROCE (0x5UL << 6)
+ #define CQ_RES_RAWETH_QP1_V3_ITYPE_ICMP (0x7UL << 6)
+ #define CQ_RES_RAWETH_QP1_V3_ITYPE_PTP_WO_TIMESTAMP (0x8UL << 6)
+ #define CQ_RES_RAWETH_QP1_V3_ITYPE_PTP_W_TIMESTAMP (0x9UL << 6)
+ #define CQ_RES_RAWETH_QP1_V3_ITYPE_LAST CQ_RES_RAWETH_QP1_V3_ITYPE_PTP_W_TIMESTAMP
+ #define CQ_RES_RAWETH_QP1_V3_CFA_METADATA1_MASK 0xf000UL
+ #define CQ_RES_RAWETH_QP1_V3_CFA_METADATA1_SFT 12
+ #define CQ_RES_RAWETH_QP1_V3_CFA_METADATA1_TPID_SEL_MASK 0x7000UL
+ #define CQ_RES_RAWETH_QP1_V3_CFA_METADATA1_TPID_SEL_SFT 12
+ #define CQ_RES_RAWETH_QP1_V3_CFA_METADATA1_VALID 0x8000UL
+ __le16 raweth_qp1_errors;
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_IP_CS_ERROR 0x10UL
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_L4_CS_ERROR 0x20UL
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_IP_CS_ERROR 0x40UL
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_L4_CS_ERROR 0x80UL
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_CRC_ERROR 0x100UL
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_PKT_ERROR_MASK 0xe00UL
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_PKT_ERROR_SFT 9
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_PKT_ERROR_NO_ERROR (0x0UL << 9)
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION (0x1UL << 9)
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN (0x2UL << 9)
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR (0x3UL << 9)
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR (0x4UL << 9)
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL (0x5UL << 9)
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_TOTAL_ERROR (0x6UL << 9)
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_PKT_ERROR_LAST CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_TOTAL_ERROR
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_MASK 0xf000UL
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_SFT 12
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_NO_ERROR (0x0UL << 12)
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_L3_BAD_VERSION (0x1UL << 12)
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN (0x2UL << 12)
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_L3_BAD_TTL (0x3UL << 12)
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_IP_TOTAL_ERROR (0x4UL << 12)
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR (0x5UL << 12)
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN (0x6UL << 12)
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL (0x7UL << 12)
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN (0x8UL << 12)
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_SUPAR_CRC (0x9UL << 12)
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_LAST CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_SUPAR_CRC
+ __le16 cfa_metadata0;
+ #define CQ_RES_RAWETH_QP1_V3_CFA_METADATA0_VID_MASK 0xfffUL
+ #define CQ_RES_RAWETH_QP1_V3_CFA_METADATA0_VID_SFT 0
+ #define CQ_RES_RAWETH_QP1_V3_CFA_METADATA0_DE 0x1000UL
+ #define CQ_RES_RAWETH_QP1_V3_CFA_METADATA0_PRI_MASK 0xe000UL
+ #define CQ_RES_RAWETH_QP1_V3_CFA_METADATA0_PRI_SFT 13
+ __le64 qp_handle;
+ __le32 raweth_qp1_flags2;
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_IP_CS_CALC 0x1UL
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_L4_CS_CALC 0x2UL
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_T_IP_CS_CALC 0x4UL
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_T_L4_CS_CALC 0x8UL
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_META_FORMAT_MASK 0xf0UL
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_META_FORMAT_SFT 4
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_META_FORMAT_NONE (0x0UL << 4)
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_META_FORMAT_ACT_REC_PTR (0x1UL << 4)
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_META_FORMAT_TUNNEL_ID (0x2UL << 4)
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_META_FORMAT_CHDR_DATA (0x3UL << 4)
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_META_FORMAT_HDR_OFFSET (0x4UL << 4)
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_META_FORMAT_VNIC_ID (0x5UL << 4)
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_META_FORMAT_LAST CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_META_FORMAT_VNIC_ID
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_IP_TYPE 0x100UL
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_COMPLETE_CHECKSUM_CALC 0x200UL
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_T_IP_TYPE 0x400UL
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_T_IP_TYPE_IPV4 (0x0UL << 10)
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_T_IP_TYPE_IPV6 (0x1UL << 10)
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_T_IP_TYPE_LAST CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_T_IP_TYPE_IPV6
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_COMPLETE_CHECKSUM_MASK 0xffff0000UL
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_COMPLETE_CHECKSUM_SFT 16
+ __le32 cfa_metadata2;
+ u8 cqe_type_toggle;
+ #define CQ_RES_RAWETH_QP1_V3_TOGGLE 0x1UL
+ #define CQ_RES_RAWETH_QP1_V3_CQE_TYPE_MASK 0x1eUL
+ #define CQ_RES_RAWETH_QP1_V3_CQE_TYPE_SFT 1
+ #define CQ_RES_RAWETH_QP1_V3_CQE_TYPE_RES_RAWETH_QP1_V3 (0xbUL << 1)
+ #define CQ_RES_RAWETH_QP1_V3_CQE_TYPE_LAST CQ_RES_RAWETH_QP1_V3_CQE_TYPE_RES_RAWETH_QP1_V3
+ u8 status;
+ #define CQ_RES_RAWETH_QP1_V3_STATUS_OK 0x0UL
+ #define CQ_RES_RAWETH_QP1_V3_STATUS_HW_LOCAL_LENGTH_ERR 0x3UL
+ #define CQ_RES_RAWETH_QP1_V3_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL
+ #define CQ_RES_RAWETH_QP1_V3_STATUS_LOCAL_PROTECTION_ERR 0x5UL
+ #define CQ_RES_RAWETH_QP1_V3_STATUS_WORK_REQUEST_FLUSHED_ERR 0xdUL
+ #define CQ_RES_RAWETH_QP1_V3_STATUS_HW_FLUSH_ERR 0xeUL
+ #define CQ_RES_RAWETH_QP1_V3_STATUS_OVERFLOW_ERR 0xfUL
+ #define CQ_RES_RAWETH_QP1_V3_STATUS_LAST CQ_RES_RAWETH_QP1_V3_STATUS_OVERFLOW_ERR
+ u8 flags;
+ #define CQ_RES_RAWETH_QP1_V3_FLAGS_SRQ 0x1UL
+ #define CQ_RES_RAWETH_QP1_V3_FLAGS_SRQ_RQ 0x0UL
+ #define CQ_RES_RAWETH_QP1_V3_FLAGS_SRQ_SRQ 0x1UL
+ #define CQ_RES_RAWETH_QP1_V3_FLAGS_SRQ_LAST CQ_RES_RAWETH_QP1_V3_FLAGS_SRQ_SRQ
+ u8 raweth_qp1_payload_offset;
+ __le32 opaque;
+};
+
+/* cq_res_ud_cfa_v3 (size:256b/32B) */
+struct cq_res_ud_cfa_v3 {
+ __le16 length;
+ #define CQ_RES_UD_CFA_V3_LENGTH_MASK 0x3fffUL
+ #define CQ_RES_UD_CFA_V3_LENGTH_SFT 0
+ __le16 cfa_metadata0;
+ #define CQ_RES_UD_CFA_V3_CFA_METADATA0_VID_MASK 0xfffUL
+ #define CQ_RES_UD_CFA_V3_CFA_METADATA0_VID_SFT 0
+ #define CQ_RES_UD_CFA_V3_CFA_METADATA0_DE 0x1000UL
+ #define CQ_RES_UD_CFA_V3_CFA_METADATA0_PRI_MASK 0xe000UL
+ #define CQ_RES_UD_CFA_V3_CFA_METADATA0_PRI_SFT 13
+ __le32 imm_data;
+ __le32 qid_cfa_metadata1_src_qp_high;
+ #define CQ_RES_UD_CFA_V3_QID_MASK 0x7ffUL
+ #define CQ_RES_UD_CFA_V3_QID_SFT 0
+ #define CQ_RES_UD_CFA_V3_UNUSED_MASK 0xff800UL
+ #define CQ_RES_UD_CFA_V3_UNUSED_SFT 11
+ #define CQ_RES_UD_CFA_V3_CFA_METADATA1_MASK 0xf00000UL
+ #define CQ_RES_UD_CFA_V3_CFA_METADATA1_SFT 20
+ #define CQ_RES_UD_CFA_V3_CFA_METADATA1_TPID_SEL_MASK 0x700000UL
+ #define CQ_RES_UD_CFA_V3_CFA_METADATA1_TPID_SEL_SFT 20
+ #define CQ_RES_UD_CFA_V3_CFA_METADATA1_VALID 0x800000UL
+ #define CQ_RES_UD_CFA_V3_SRC_QP_HIGH_MASK 0xff000000UL
+ #define CQ_RES_UD_CFA_V3_SRC_QP_HIGH_SFT 24
+ __le32 cfa_metadata2;
+ __le16 src_mac[3];
+ __le16 src_qp_low;
+ u8 cqe_type_toggle;
+ #define CQ_RES_UD_CFA_V3_TOGGLE 0x1UL
+ #define CQ_RES_UD_CFA_V3_CQE_TYPE_MASK 0x1eUL
+ #define CQ_RES_UD_CFA_V3_CQE_TYPE_SFT 1
+ #define CQ_RES_UD_CFA_V3_CQE_TYPE_RES_UD_CFA_V3 (0xcUL << 1)
+ #define CQ_RES_UD_CFA_V3_CQE_TYPE_LAST CQ_RES_UD_CFA_V3_CQE_TYPE_RES_UD_CFA_V3
+ u8 status;
+ #define CQ_RES_UD_CFA_V3_STATUS_OK 0x0UL
+ #define CQ_RES_UD_CFA_V3_STATUS_HW_LOCAL_LENGTH_ERR 0x3UL
+ #define CQ_RES_UD_CFA_V3_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL
+ #define CQ_RES_UD_CFA_V3_STATUS_LOCAL_PROTECTION_ERR 0x5UL
+ #define CQ_RES_UD_CFA_V3_STATUS_WORK_REQUEST_FLUSHED_ERR 0xdUL
+ #define CQ_RES_UD_CFA_V3_STATUS_HW_FLUSH_ERR 0xeUL
+ #define CQ_RES_UD_CFA_V3_STATUS_OVERFLOW_ERR 0xfUL
+ #define CQ_RES_UD_CFA_V3_STATUS_LAST CQ_RES_UD_CFA_V3_STATUS_OVERFLOW_ERR
+ __le16 flags;
+ #define CQ_RES_UD_CFA_V3_FLAGS_SRQ 0x1UL
+ #define CQ_RES_UD_CFA_V3_FLAGS_SRQ_RQ 0x0UL
+ #define CQ_RES_UD_CFA_V3_FLAGS_SRQ_SRQ 0x1UL
+ #define CQ_RES_UD_CFA_V3_FLAGS_SRQ_LAST CQ_RES_UD_CFA_V3_FLAGS_SRQ_SRQ
+ #define CQ_RES_UD_CFA_V3_FLAGS_IMM 0x2UL
+ #define CQ_RES_UD_CFA_V3_FLAGS_UNUSED_MASK 0xcUL
+ #define CQ_RES_UD_CFA_V3_FLAGS_UNUSED_SFT 2
+ #define CQ_RES_UD_CFA_V3_FLAGS_ROCE_IP_VER_MASK 0x30UL
+ #define CQ_RES_UD_CFA_V3_FLAGS_ROCE_IP_VER_SFT 4
+ #define CQ_RES_UD_CFA_V3_FLAGS_ROCE_IP_VER_V1 (0x0UL << 4)
+ #define CQ_RES_UD_CFA_V3_FLAGS_ROCE_IP_VER_V2IPV4 (0x2UL << 4)
+ #define CQ_RES_UD_CFA_V3_FLAGS_ROCE_IP_VER_V2IPV6 (0x3UL << 4)
+ #define CQ_RES_UD_CFA_V3_FLAGS_ROCE_IP_VER_LAST CQ_RES_UD_CFA_V3_FLAGS_ROCE_IP_VER_V2IPV6
+ #define CQ_RES_UD_CFA_V3_FLAGS_META_FORMAT_MASK 0x3c0UL
+ #define CQ_RES_UD_CFA_V3_FLAGS_META_FORMAT_SFT 6
+ #define CQ_RES_UD_CFA_V3_FLAGS_META_FORMAT_NONE (0x0UL << 6)
+ #define CQ_RES_UD_CFA_V3_FLAGS_META_FORMAT_ACT_REC_PTR (0x1UL << 6)
+ #define CQ_RES_UD_CFA_V3_FLAGS_META_FORMAT_TUNNEL_ID (0x2UL << 6)
+ #define CQ_RES_UD_CFA_V3_FLAGS_META_FORMAT_CHDR_DATA (0x3UL << 6)
+ #define CQ_RES_UD_CFA_V3_FLAGS_META_FORMAT_HDR_OFFSET (0x4UL << 6)
+ #define CQ_RES_UD_CFA_V3_FLAGS_META_FORMAT_VNIC_ID (0x5UL << 6)
+ #define CQ_RES_UD_CFA_V3_FLAGS_META_FORMAT_LAST CQ_RES_UD_CFA_V3_FLAGS_META_FORMAT_VNIC_ID
+ __le32 opaque;
+};
+
+/* nq_base (size:128b/16B) */
+struct nq_base {
+ __le16 info10_type;
+ #define NQ_BASE_TYPE_MASK 0x3fUL
+ #define NQ_BASE_TYPE_SFT 0
+ #define NQ_BASE_TYPE_CQ_NOTIFICATION 0x30UL
+ #define NQ_BASE_TYPE_SRQ_EVENT 0x32UL
+ #define NQ_BASE_TYPE_DBQ_EVENT 0x34UL
+ #define NQ_BASE_TYPE_QP_EVENT 0x38UL
+ #define NQ_BASE_TYPE_FUNC_EVENT 0x3aUL
+ #define NQ_BASE_TYPE_NQ_REASSIGN 0x3cUL
+ #define NQ_BASE_TYPE_LAST NQ_BASE_TYPE_NQ_REASSIGN
+ #define NQ_BASE_INFO10_MASK 0xffc0UL
+ #define NQ_BASE_INFO10_SFT 6
+ __le16 info16;
+ __le32 info32;
+ __le32 info63_v[2];
+ #define NQ_BASE_V 0x1UL
+ #define NQ_BASE_INFO63_MASK 0xfffffffeUL
+ #define NQ_BASE_INFO63_SFT 1
+};
+
+/* nq_cn (size:128b/16B) */
+struct nq_cn {
+ __le16 type;
+ #define NQ_CN_TYPE_MASK 0x3fUL
+ #define NQ_CN_TYPE_SFT 0
+ #define NQ_CN_TYPE_CQ_NOTIFICATION 0x30UL
+ #define NQ_CN_TYPE_LAST NQ_CN_TYPE_CQ_NOTIFICATION
+ #define NQ_CN_TOGGLE_MASK 0xc0UL
+ #define NQ_CN_TOGGLE_SFT 6
+ __le16 reserved16;
+ __le32 cq_handle_low;
+ __le32 v;
+ #define NQ_CN_V 0x1UL
+ __le32 cq_handle_high;
+};
+
+/* nq_srq_event (size:128b/16B) */
+struct nq_srq_event {
+ u8 type;
+ #define NQ_SRQ_EVENT_TYPE_MASK 0x3fUL
+ #define NQ_SRQ_EVENT_TYPE_SFT 0
+ #define NQ_SRQ_EVENT_TYPE_SRQ_EVENT 0x32UL
+ #define NQ_SRQ_EVENT_TYPE_LAST NQ_SRQ_EVENT_TYPE_SRQ_EVENT
+ #define NQ_SRQ_EVENT_TOGGLE_MASK 0xc0UL
+ #define NQ_SRQ_EVENT_TOGGLE_SFT 6
+ u8 event;
+ #define NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT 0x1UL
+ #define NQ_SRQ_EVENT_EVENT_LAST NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT
+ __le16 reserved16;
+ __le32 srq_handle_low;
+ __le32 v;
+ #define NQ_SRQ_EVENT_V 0x1UL
+ __le32 srq_handle_high;
+};
+
+/* nq_dbq_event (size:128b/16B) */
+struct nq_dbq_event {
+ u8 type;
+ #define NQ_DBQ_EVENT_TYPE_MASK 0x3fUL
+ #define NQ_DBQ_EVENT_TYPE_SFT 0
+ #define NQ_DBQ_EVENT_TYPE_DBQ_EVENT 0x34UL
+ #define NQ_DBQ_EVENT_TYPE_LAST NQ_DBQ_EVENT_TYPE_DBQ_EVENT
+ u8 event;
+ #define NQ_DBQ_EVENT_EVENT_DBQ_THRESHOLD_EVENT 0x1UL
+ #define NQ_DBQ_EVENT_EVENT_LAST NQ_DBQ_EVENT_EVENT_DBQ_THRESHOLD_EVENT
+ __le16 db_pfid;
+ #define NQ_DBQ_EVENT_DB_PFID_MASK 0xfUL
+ #define NQ_DBQ_EVENT_DB_PFID_SFT 0
+ __le32 db_dpi;
+ #define NQ_DBQ_EVENT_DB_DPI_MASK 0xfffffUL
+ #define NQ_DBQ_EVENT_DB_DPI_SFT 0
+ __le32 v;
+ #define NQ_DBQ_EVENT_V 0x1UL
+ __le32 db_type_db_xid;
+ #define NQ_DBQ_EVENT_DB_XID_MASK 0xfffffUL
+ #define NQ_DBQ_EVENT_DB_XID_SFT 0
+ #define NQ_DBQ_EVENT_DB_TYPE_MASK 0xf0000000UL
+ #define NQ_DBQ_EVENT_DB_TYPE_SFT 28
+};
+
+/* nq_reassign (size:128b/16B) */
+struct nq_reassign {
+ __le16 type;
+ #define NQ_REASSIGN_TYPE_MASK 0x3fUL
+ #define NQ_REASSIGN_TYPE_SFT 0
+ #define NQ_REASSIGN_TYPE_NQ_REASSIGN 0x3cUL
+ #define NQ_REASSIGN_TYPE_LAST NQ_REASSIGN_TYPE_NQ_REASSIGN
+ __le16 reserved16;
+ __le32 cq_handle_low;
+ __le32 v;
+ #define NQ_REASSIGN_V 0x1UL
+ __le32 cq_handle_high;
+};
+
+/* xrrq_irrq (size:256b/32B) */
+struct xrrq_irrq {
+ __le16 credits_type;
+ #define XRRQ_IRRQ_TYPE 0x1UL
+ #define XRRQ_IRRQ_TYPE_READ_REQ 0x0UL
+ #define XRRQ_IRRQ_TYPE_ATOMIC_REQ 0x1UL
+ #define XRRQ_IRRQ_TYPE_LAST XRRQ_IRRQ_TYPE_ATOMIC_REQ
+ #define XRRQ_IRRQ_CREDITS_MASK 0xf800UL
+ #define XRRQ_IRRQ_CREDITS_SFT 11
+ __le16 reserved16;
+ __le32 reserved32;
+ __le32 psn;
+ #define XRRQ_IRRQ_PSN_MASK 0xffffffUL
+ #define XRRQ_IRRQ_PSN_SFT 0
+ __le32 msn;
+ #define XRRQ_IRRQ_MSN_MASK 0xffffffUL
+ #define XRRQ_IRRQ_MSN_SFT 0
+ __le64 va_or_atomic_result;
+ __le32 rdma_r_key;
+ __le32 length;
+};
+
+/* xrrq_orrq (size:256b/32B) */
+struct xrrq_orrq {
+ __le16 num_sges_type;
+ #define XRRQ_ORRQ_TYPE 0x1UL
+ #define XRRQ_ORRQ_TYPE_READ_REQ 0x0UL
+ #define XRRQ_ORRQ_TYPE_ATOMIC_REQ 0x1UL
+ #define XRRQ_ORRQ_TYPE_LAST XRRQ_ORRQ_TYPE_ATOMIC_REQ
+ #define XRRQ_ORRQ_NUM_SGES_MASK 0xf800UL
+ #define XRRQ_ORRQ_NUM_SGES_SFT 11
+ __le16 reserved16;
+ __le32 length;
+ __le32 psn;
+ #define XRRQ_ORRQ_PSN_MASK 0xffffffUL
+ #define XRRQ_ORRQ_PSN_SFT 0
+ __le32 end_psn;
+ #define XRRQ_ORRQ_END_PSN_MASK 0xffffffUL
+ #define XRRQ_ORRQ_END_PSN_SFT 0
+ __le64 first_sge_phy_or_sing_sge_va;
+ __le32 single_sge_l_key;
+ __le32 single_sge_size;
+};
+
+/* ptu_pte (size:64b/8B) */
+struct ptu_pte {
+ __le32 page_next_to_last_last_valid[2];
+ #define PTU_PTE_VALID 0x1UL
+ #define PTU_PTE_LAST 0x2UL
+ #define PTU_PTE_NEXT_TO_LAST 0x4UL
+ #define PTU_PTE_UNUSED_MASK 0xff8UL
+ #define PTU_PTE_UNUSED_SFT 3
+ #define PTU_PTE_PAGE_MASK 0xfffffffffffff000ULL
+ #define PTU_PTE_PAGE_SFT 12
+};
+
+/* ptu_pde (size:64b/8B) */
+struct ptu_pde {
+ __le32 page_valid[2];
+ #define PTU_PDE_VALID 0x1UL
+ #define PTU_PDE_UNUSED_MASK 0xffeUL
+ #define PTU_PDE_UNUSED_SFT 1
+ #define PTU_PDE_PAGE_MASK 0xfffffffffffff000ULL
+ #define PTU_PDE_PAGE_SFT 12
+};
+
+#endif /* _BNG_RE_HSI_H_ */
diff --git a/drivers/infiniband/hw/bng_re/bng_tlv.h b/drivers/infiniband/hw/bng_re/bng_tlv.h
index 278f4922962d..f74ffc4575c7 100644
--- a/drivers/infiniband/hw/bng_re/bng_tlv.h
+++ b/drivers/infiniband/hw/bng_re/bng_tlv.h
@@ -3,7 +3,7 @@
#ifndef __BNG_TLV_H__
#define __BNG_TLV_H__
-#include "roce_hsi.h"
+#include "bng_roce_hsi.h"
struct roce_tlv {
struct tlv tlv;
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 73003ad25ee8..ee882456319d 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -595,10 +595,10 @@ int bnxt_re_hwrm_cfg_vnic(struct bnxt_re_dev *rdev, u32 qp_id)
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_VNIC_CFG);
req.flags = cpu_to_le32(VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE);
- req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_RAW_QP_ID |
+ req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_QP_ID |
VNIC_CFG_REQ_ENABLES_MRU);
req.vnic_id = cpu_to_le16(rdev->mirror_vnic_id);
- req.raw_qp_id = cpu_to_le32(qp_id);
+ req.qp_id = cpu_to_le32(qp_id);
req.mru = cpu_to_le16(rdev->netdev->mtu + VLAN_ETH_HLEN);
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), NULL,
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 40284bbb45d6..947faacd75bb 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -511,7 +511,7 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed,
*active_width = IB_WIDTH_4X;
*active_speed = IB_SPEED_XDR;
break;
- case MLX5E_PROT_MASK(MLX5E_1600TAUI_8_1600TBASE_CR8_KR8):
+ case MLX5E_PROT_MASK(MLX5E_1600GAUI_8_1600GBASE_CR8_KR8):
*active_width = IB_WIDTH_8X;
*active_speed = IB_SPEED_XDR;
break;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 300afc27c561..4a504b7c4293 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1831,8 +1831,7 @@ static int ipoib_hwtstamp_get(struct net_device *dev,
struct ipoib_dev_priv *priv = ipoib_priv(dev);
if (!priv->rn_ops->ndo_hwtstamp_get)
- /* legacy */
- return dev_eth_ioctl(dev, config->ifr, SIOCGHWTSTAMP);
+ return -EOPNOTSUPP;
return priv->rn_ops->ndo_hwtstamp_get(dev, config);
}
@@ -1844,8 +1843,7 @@ static int ipoib_hwtstamp_set(struct net_device *dev,
struct ipoib_dev_priv *priv = ipoib_priv(dev);
if (!priv->rn_ops->ndo_hwtstamp_set)
- /* legacy */
- return dev_eth_ioctl(dev, config->ifr, SIOCSHWTSTAMP);
+ return -EOPNOTSUPP;
return priv->rn_ops->ndo_hwtstamp_set(dev, config, extack);
}
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 10799772494a..6e5bdc2f0cc8 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -232,6 +232,15 @@ static void sdio_bus_remove(struct device *dev)
pm_runtime_put_sync(dev);
}
+static void sdio_bus_shutdown(struct device *dev)
+{
+ struct sdio_driver *drv = to_sdio_driver(dev->driver);
+ struct sdio_func *func = dev_to_sdio_func(dev);
+
+ if (dev->driver && drv->shutdown)
+ drv->shutdown(func);
+}
+
static const struct dev_pm_ops sdio_bus_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(pm_generic_suspend, pm_generic_resume)
SET_RUNTIME_PM_OPS(
@@ -248,6 +257,7 @@ static const struct bus_type sdio_bus_type = {
.uevent = sdio_bus_uevent,
.probe = sdio_bus_probe,
.remove = sdio_bus_remove,
+ .shutdown = sdio_bus_shutdown,
.pm = &sdio_bus_pm_ops,
};
@@ -261,6 +271,14 @@ void sdio_unregister_bus(void)
bus_unregister(&sdio_bus_type);
}
+static void sdio_legacy_shutdown(struct sdio_func *func)
+{
+ struct device *dev = &func->dev;
+ struct device_driver *driver = dev->driver;
+
+ driver->shutdown(dev);
+}
+
/**
* __sdio_register_driver - register a function driver
* @drv: SDIO function driver
@@ -272,6 +290,13 @@ int __sdio_register_driver(struct sdio_driver *drv, struct module *owner)
drv->drv.bus = &sdio_bus_type;
drv->drv.owner = owner;
+ /*
+ * This driver needs updating. Note that driver_register() warns about
+ * this, so we're not adding another warning here.
+ */
+ if (!drv->shutdown && drv->drv.shutdown)
+ drv->shutdown = sdio_legacy_shutdown;
+
return driver_register(&drv->drv);
}
EXPORT_SYMBOL_GPL(__sdio_register_driver);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index ac12eaf11755..7e9becad91df 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -341,6 +341,7 @@ config NETCONSOLE_DYNAMIC
bool "Dynamic reconfiguration of logging targets"
depends on NETCONSOLE && SYSFS && CONFIGFS_FS && \
!(NETCONSOLE=y && CONFIGFS_FS=m)
+ select PRINTK_EXECUTION_CTX
help
This option enables the ability to dynamically reconfigure target
parameters (interface, IP addresses, port numbers, MAC addresses)
@@ -516,8 +517,6 @@ source "drivers/net/ethernet/Kconfig"
source "drivers/net/fddi/Kconfig"
-source "drivers/net/hippi/Kconfig"
-
source "drivers/net/ipa/Kconfig"
source "drivers/net/phy/Kconfig"
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 73bc63ecd65f..5b01215f6829 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -55,7 +55,6 @@ obj-y += dsa/
endif
obj-$(CONFIG_ETHERNET) += ethernet/
obj-$(CONFIG_FDDI) += fddi/
-obj-$(CONFIG_HIPPI) += hippi/
obj-$(CONFIG_HAMRADIO) += hamradio/
obj-$(CONFIG_QCOM_IPA) += ipa/
obj-$(CONFIG_PLIP) += plip/
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index 0472bcdff130..19e411b2e3a7 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -618,17 +618,4 @@ static struct pci_driver com20020pci_driver = {
.remove = com20020pci_remove,
};
-static int __init com20020pci_init(void)
-{
- if (BUGLVL(D_NORMAL))
- pr_info("%s\n", "COM20020 PCI support");
- return pci_register_driver(&com20020pci_driver);
-}
-
-static void __exit com20020pci_cleanup(void)
-{
- pci_unregister_driver(&com20020pci_driver);
-}
-
-module_init(com20020pci_init)
-module_exit(com20020pci_cleanup)
+module_pci_driver(com20020pci_driver);
diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c
index a0053e3992a3..b8526805ffac 100644
--- a/drivers/net/arcnet/com20020.c
+++ b/drivers/net/arcnet/com20020.c
@@ -401,19 +401,3 @@ EXPORT_SYMBOL(com20020_netdev_ops);
MODULE_DESCRIPTION("ARCnet COM20020 chipset core driver");
MODULE_LICENSE("GPL");
-
-#ifdef MODULE
-
-static int __init com20020_module_init(void)
-{
- if (BUGLVL(D_NORMAL))
- pr_info("%s\n", "COM20020 chipset support (by David Woodhouse et al.)");
- return 0;
-}
-
-static void __exit com20020_module_exit(void)
-{
-}
-module_init(com20020_module_init);
-module_exit(com20020_module_exit);
-#endif /* MODULE */
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 1a8de2bf8655..af7f74cfdc08 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -72,6 +72,7 @@ enum ad_link_speed_type {
AD_LINK_SPEED_40000MBPS,
AD_LINK_SPEED_50000MBPS,
AD_LINK_SPEED_56000MBPS,
+ AD_LINK_SPEED_80000MBPS,
AD_LINK_SPEED_100000MBPS,
AD_LINK_SPEED_200000MBPS,
AD_LINK_SPEED_400000MBPS,
@@ -297,6 +298,7 @@ static inline int __check_agg_selection_timer(struct port *port)
* %AD_LINK_SPEED_40000MBPS
* %AD_LINK_SPEED_50000MBPS
* %AD_LINK_SPEED_56000MBPS
+ * %AD_LINK_SPEED_80000MBPS
* %AD_LINK_SPEED_100000MBPS
* %AD_LINK_SPEED_200000MBPS
* %AD_LINK_SPEED_400000MBPS
@@ -365,6 +367,10 @@ static u16 __get_link_speed(struct port *port)
speed = AD_LINK_SPEED_56000MBPS;
break;
+ case SPEED_80000:
+ speed = AD_LINK_SPEED_80000MBPS;
+ break;
+
case SPEED_100000:
speed = AD_LINK_SPEED_100000MBPS;
break;
@@ -816,6 +822,9 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator)
case AD_LINK_SPEED_56000MBPS:
bandwidth = nports * 56000;
break;
+ case AD_LINK_SPEED_80000MBPS:
+ bandwidth = nports * 80000;
+ break;
case AD_LINK_SPEED_100000MBPS:
bandwidth = nports * 100000;
break;
@@ -1008,11 +1017,8 @@ static void ad_cond_set_peer_notif(struct port *port)
{
struct bonding *bond = port->slave->bond;
- if (bond->params.broadcast_neighbor && rtnl_trylock()) {
- bond->send_peer_notif = bond->params.num_peer_notif *
- max(1, bond->params.peer_notif_delay);
- rtnl_unlock();
- }
+ if (bond->params.broadcast_neighbor)
+ bond_peer_notify_work_rearm(bond, 0);
}
/**
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 45bd2bb102ff..78cff904cdc3 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -791,26 +791,29 @@ static int bond_update_speed_duplex(struct slave *slave)
struct ethtool_link_ksettings ecmd;
int res;
- slave->speed = SPEED_UNKNOWN;
- slave->duplex = DUPLEX_UNKNOWN;
-
res = __ethtool_get_link_ksettings(slave_dev, &ecmd);
if (res < 0)
- return 1;
+ goto speed_duplex_unknown;
if (ecmd.base.speed == 0 || ecmd.base.speed == ((__u32)-1))
- return 1;
+ goto speed_duplex_unknown;
switch (ecmd.base.duplex) {
case DUPLEX_FULL:
case DUPLEX_HALF:
break;
default:
- return 1;
+ goto speed_duplex_unknown;
}
slave->speed = ecmd.base.speed;
slave->duplex = ecmd.base.duplex;
return 0;
+
+speed_duplex_unknown:
+ slave->speed = SPEED_UNKNOWN;
+ slave->duplex = DUPLEX_UNKNOWN;
+
+ return 1;
}
const char *bond_slave_link_status(s8 link)
@@ -1195,6 +1198,49 @@ static bool bond_should_notify_peers(struct bonding *bond)
return true;
}
+/* Use this to update send_peer_notif when RTNL may be held in other places. */
+void bond_peer_notify_work_rearm(struct bonding *bond, unsigned long delay)
+{
+ queue_delayed_work(bond->wq, &bond->peer_notify_work, delay);
+}
+
+/* Peer notify update handler. Holds only RTNL */
+static void bond_peer_notify_reset(struct bonding *bond)
+{
+ WRITE_ONCE(bond->send_peer_notif,
+ bond->params.num_peer_notif *
+ max(1, bond->params.peer_notif_delay));
+}
+
+static void bond_peer_notify_handler(struct work_struct *work)
+{
+ struct bonding *bond = container_of(work, struct bonding,
+ peer_notify_work.work);
+
+ if (!rtnl_trylock()) {
+ bond_peer_notify_work_rearm(bond, 1);
+ return;
+ }
+
+ bond_peer_notify_reset(bond);
+
+ rtnl_unlock();
+}
+
+/* Peer notify events post. Holds only RTNL */
+static void bond_peer_notify_may_events(struct bonding *bond, bool force)
+{
+ bool notified = false;
+
+ if (bond_should_notify_peers(bond)) {
+ notified = true;
+ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
+ }
+
+ if (notified || force)
+ bond->send_peer_notif--;
+}
+
/**
* bond_change_active_slave - change the active slave into the specified one
* @bond: our bonding struct
@@ -1270,8 +1316,6 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
BOND_SLAVE_NOTIFY_NOW);
if (new_active) {
- bool should_notify_peers = false;
-
bond_set_slave_active_flags(new_active,
BOND_SLAVE_NOTIFY_NOW);
@@ -1279,19 +1323,11 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
bond_do_fail_over_mac(bond, new_active,
old_active);
- if (netif_running(bond->dev)) {
- bond->send_peer_notif =
- bond->params.num_peer_notif *
- max(1, bond->params.peer_notif_delay);
- should_notify_peers =
- bond_should_notify_peers(bond);
- }
-
call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev);
- if (should_notify_peers) {
- bond->send_peer_notif--;
- call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
- bond->dev);
+
+ if (netif_running(bond->dev)) {
+ bond_peer_notify_reset(bond);
+ bond_peer_notify_may_events(bond, false);
}
}
}
@@ -2790,11 +2826,10 @@ static void bond_mii_monitor(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
mii_work.work);
- bool should_notify_peers;
- bool commit;
- unsigned long delay;
- struct slave *slave;
struct list_head *iter;
+ struct slave *slave;
+ unsigned long delay;
+ bool commit;
delay = msecs_to_jiffies(bond->params.miimon);
@@ -2803,12 +2838,11 @@ static void bond_mii_monitor(struct work_struct *work)
rcu_read_lock();
- should_notify_peers = bond_should_notify_peers(bond);
commit = !!bond_miimon_inspect(bond);
rcu_read_unlock();
- if (commit || bond->send_peer_notif) {
+ if (commit || READ_ONCE(bond->send_peer_notif)) {
/* Race avoidance with bond_close cancel of workqueue */
if (!rtnl_trylock()) {
delay = 1;
@@ -2823,12 +2857,8 @@ static void bond_mii_monitor(struct work_struct *work)
bond_miimon_commit(bond);
}
- if (bond->send_peer_notif) {
- bond->send_peer_notif--;
- if (should_notify_peers)
- call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
- bond->dev);
- }
+ if (bond->send_peer_notif)
+ bond_peer_notify_may_events(bond, true);
rtnl_unlock(); /* might sleep, hold no other locks */
}
@@ -3741,8 +3771,7 @@ check_state:
static void bond_activebackup_arp_mon(struct bonding *bond)
{
- bool should_notify_peers = false;
- bool should_notify_rtnl = false;
+ bool should_notify_rtnl;
int delta_in_ticks;
delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
@@ -3752,15 +3781,12 @@ static void bond_activebackup_arp_mon(struct bonding *bond)
rcu_read_lock();
- should_notify_peers = bond_should_notify_peers(bond);
-
if (bond_ab_arp_inspect(bond)) {
rcu_read_unlock();
/* Race avoidance with bond_close flush of workqueue */
if (!rtnl_trylock()) {
delta_in_ticks = 1;
- should_notify_peers = false;
goto re_arm;
}
@@ -3773,19 +3799,15 @@ static void bond_activebackup_arp_mon(struct bonding *bond)
should_notify_rtnl = bond_ab_arp_probe(bond);
rcu_read_unlock();
-re_arm:
- if (bond->params.arp_interval)
- queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
+ if (READ_ONCE(bond->send_peer_notif) || should_notify_rtnl) {
+ if (!rtnl_trylock()) {
+ delta_in_ticks = 1;
+ goto re_arm;
+ }
- if (should_notify_peers || should_notify_rtnl) {
- if (!rtnl_trylock())
- return;
+ if (bond->send_peer_notif)
+ bond_peer_notify_may_events(bond, true);
- if (should_notify_peers) {
- bond->send_peer_notif--;
- call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
- bond->dev);
- }
if (should_notify_rtnl) {
bond_slave_state_notify(bond);
bond_slave_link_notify(bond);
@@ -3793,6 +3815,10 @@ re_arm:
rtnl_unlock();
}
+
+re_arm:
+ if (bond->params.arp_interval)
+ queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
}
static void bond_arp_monitor(struct work_struct *work)
@@ -4222,6 +4248,10 @@ static u32 bond_xmit_hash_xdp(struct bonding *bond, struct xdp_buff *xdp)
void bond_work_init_all(struct bonding *bond)
{
+ /* ndo_stop, bond_close() will try to flush the work under
+ * the rtnl lock. The workqueue must not block on rtnl lock
+ * to avoid deadlock.
+ */
INIT_DELAYED_WORK(&bond->mcast_work,
bond_resend_igmp_join_requests_delayed);
INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
@@ -4229,6 +4259,7 @@ void bond_work_init_all(struct bonding *bond)
INIT_DELAYED_WORK(&bond->arp_work, bond_arp_monitor);
INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler);
+ INIT_DELAYED_WORK(&bond->peer_notify_work, bond_peer_notify_handler);
}
void bond_work_cancel_all(struct bonding *bond)
@@ -4239,6 +4270,7 @@ void bond_work_cancel_all(struct bonding *bond)
cancel_delayed_work_sync(&bond->ad_work);
cancel_delayed_work_sync(&bond->mcast_work);
cancel_delayed_work_sync(&bond->slave_arr_work);
+ cancel_delayed_work_sync(&bond->peer_notify_work);
}
static int bond_open(struct net_device *bond_dev)
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index c398ac42eae9..b90890030751 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -284,6 +284,7 @@ static void ser_release(struct work_struct *work)
{
struct list_head list;
struct ser_device *ser, *tmp;
+ struct tty_struct *tty;
spin_lock(&ser_lock);
list_replace_init(&ser_release_list, &list);
@@ -292,9 +293,11 @@ static void ser_release(struct work_struct *work)
if (!list_empty(&list)) {
rtnl_lock();
list_for_each_entry_safe(ser, tmp, &list, node) {
+ tty = ser->tty;
dev_close(ser->dev);
unregister_netdevice(ser->dev);
debugfs_deinit(ser);
+ tty_kref_put(tty);
}
rtnl_unlock();
}
@@ -355,8 +358,6 @@ static void ldisc_close(struct tty_struct *tty)
{
struct ser_device *ser = tty->disc_data;
- tty_kref_put(ser->tty);
-
spin_lock(&ser_lock);
list_move(&ser->node, &ser_release_list);
spin_unlock(&ser_lock);
diff --git a/drivers/net/can/dev/skb.c b/drivers/net/can/dev/skb.c
index 3ebd4f779b9b..95fcdc1026f8 100644
--- a/drivers/net/can/dev/skb.c
+++ b/drivers/net/can/dev/skb.c
@@ -6,6 +6,7 @@
#include <linux/can/dev.h>
#include <linux/module.h>
+#include <net/can.h>
#define MOD_DESC "CAN device driver interface"
@@ -48,6 +49,7 @@ int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
unsigned int idx, unsigned int frame_len)
{
struct can_priv *priv = netdev_priv(dev);
+ struct can_skb_ext *csx;
if (idx >= priv->echo_skb_max) {
netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
@@ -74,7 +76,9 @@ int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
skb->dev = dev;
/* save frame_len to reuse it when transmission is completed */
- can_skb_prv(skb)->frame_len = frame_len;
+ csx = can_skb_ext_find(skb);
+ if (csx)
+ csx->can_framelen = frame_len;
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
@@ -111,7 +115,7 @@ __can_get_echo_skb(struct net_device *dev, unsigned int idx,
* length is supported on both CAN and CANFD frames.
*/
struct sk_buff *skb = priv->echo_skb[idx];
- struct can_skb_priv *can_skb_priv = can_skb_prv(skb);
+ struct can_skb_ext *csx;
if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)
skb_tstamp_tx(skb, skb_hwtstamps(skb));
@@ -119,8 +123,13 @@ __can_get_echo_skb(struct net_device *dev, unsigned int idx,
/* get the real payload length for netdev statistics */
*len_ptr = can_skb_get_data_len(skb);
- if (frame_len_ptr)
- *frame_len_ptr = can_skb_priv->frame_len;
+ if (frame_len_ptr) {
+ csx = can_skb_ext_find(skb);
+ if (csx)
+ *frame_len_ptr = csx->can_framelen;
+ else
+ *frame_len_ptr = 0;
+ }
priv->echo_skb[idx] = NULL;
@@ -180,10 +189,15 @@ void can_free_echo_skb(struct net_device *dev, unsigned int idx,
if (priv->echo_skb[idx]) {
struct sk_buff *skb = priv->echo_skb[idx];
- struct can_skb_priv *can_skb_priv = can_skb_prv(skb);
-
- if (frame_len_ptr)
- *frame_len_ptr = can_skb_priv->frame_len;
+ struct can_skb_ext *csx;
+
+ if (frame_len_ptr) {
+ csx = can_skb_ext_find(skb);
+ if (csx)
+ *frame_len_ptr = csx->can_framelen;
+ else
+ *frame_len_ptr = 0;
+ }
dev_kfree_skb_any(skb);
priv->echo_skb[idx] = NULL;
@@ -192,38 +206,39 @@ void can_free_echo_skb(struct net_device *dev, unsigned int idx,
EXPORT_SYMBOL_GPL(can_free_echo_skb);
/* fill common values for CAN sk_buffs */
-static void init_can_skb_reserve(struct sk_buff *skb)
+static void init_can_skb(struct sk_buff *skb)
{
skb->pkt_type = PACKET_BROADCAST;
skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- skb_reset_mac_header(skb);
- skb_reset_network_header(skb);
- skb_reset_transport_header(skb);
-
- can_skb_reserve(skb);
- can_skb_prv(skb)->skbcnt = 0;
}
struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
{
struct sk_buff *skb;
+ struct can_skb_ext *csx;
- skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
- sizeof(struct can_frame));
- if (unlikely(!skb)) {
- *cf = NULL;
+ skb = netdev_alloc_skb(dev, sizeof(struct can_frame));
+ if (unlikely(!skb))
+ goto out_error_cc;
- return NULL;
+ csx = can_skb_ext_add(skb);
+ if (!csx) {
+ kfree_skb(skb);
+ goto out_error_cc;
}
skb->protocol = htons(ETH_P_CAN);
- init_can_skb_reserve(skb);
- can_skb_prv(skb)->ifindex = dev->ifindex;
+ init_can_skb(skb);
+ csx->can_iif = dev->ifindex;
*cf = skb_put_zero(skb, sizeof(struct can_frame));
return skb;
+
+out_error_cc:
+ *cf = NULL;
+
+ return NULL;
}
EXPORT_SYMBOL_GPL(alloc_can_skb);
@@ -231,18 +246,21 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
struct canfd_frame **cfd)
{
struct sk_buff *skb;
+ struct can_skb_ext *csx;
- skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
- sizeof(struct canfd_frame));
- if (unlikely(!skb)) {
- *cfd = NULL;
+ skb = netdev_alloc_skb(dev, sizeof(struct canfd_frame));
+ if (unlikely(!skb))
+ goto out_error_fd;
- return NULL;
+ csx = can_skb_ext_add(skb);
+ if (!csx) {
+ kfree_skb(skb);
+ goto out_error_fd;
}
skb->protocol = htons(ETH_P_CANFD);
- init_can_skb_reserve(skb);
- can_skb_prv(skb)->ifindex = dev->ifindex;
+ init_can_skb(skb);
+ csx->can_iif = dev->ifindex;
*cfd = skb_put_zero(skb, sizeof(struct canfd_frame));
@@ -250,6 +268,11 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
(*cfd)->flags = CANFD_FDF;
return skb;
+
+out_error_fd:
+ *cfd = NULL;
+
+ return NULL;
}
EXPORT_SYMBOL_GPL(alloc_canfd_skb);
@@ -258,18 +281,24 @@ struct sk_buff *alloc_canxl_skb(struct net_device *dev,
unsigned int data_len)
{
struct sk_buff *skb;
+ struct can_skb_ext *csx;
if (data_len < CANXL_MIN_DLEN || data_len > CANXL_MAX_DLEN)
- goto out_error;
+ goto out_error_xl;
- skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
- CANXL_HDR_SIZE + data_len);
+ skb = netdev_alloc_skb(dev, CANXL_HDR_SIZE + data_len);
if (unlikely(!skb))
- goto out_error;
+ goto out_error_xl;
+
+ csx = can_skb_ext_add(skb);
+ if (!csx) {
+ kfree_skb(skb);
+ goto out_error_xl;
+ }
skb->protocol = htons(ETH_P_CANXL);
- init_can_skb_reserve(skb);
- can_skb_prv(skb)->ifindex = dev->ifindex;
+ init_can_skb(skb);
+ csx->can_iif = dev->ifindex;
*cxl = skb_put_zero(skb, CANXL_HDR_SIZE + data_len);
@@ -279,7 +308,7 @@ struct sk_buff *alloc_canxl_skb(struct net_device *dev,
return skb;
-out_error:
+out_error_xl:
*cxl = NULL;
return NULL;
@@ -302,18 +331,20 @@ struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
EXPORT_SYMBOL_GPL(alloc_can_err_skb);
/* Check for outgoing skbs that have not been created by the CAN subsystem */
-static bool can_skb_headroom_valid(struct net_device *dev, struct sk_buff *skb)
+static bool can_skb_init_valid(struct net_device *dev, struct sk_buff *skb)
{
- /* af_packet creates a headroom of HH_DATA_MOD bytes which is fine */
- if (WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct can_skb_priv)))
- return false;
+ struct can_skb_ext *csx = can_skb_ext_find(skb);
/* af_packet does not apply CAN skb specific settings */
- if (skb->ip_summed == CHECKSUM_NONE) {
- /* init headroom */
- can_skb_prv(skb)->ifindex = dev->ifindex;
- can_skb_prv(skb)->skbcnt = 0;
+ if (skb->ip_summed == CHECKSUM_NONE || !csx) {
+ /* init CAN skb content */
+ if (!csx) {
+ csx = can_skb_ext_add(skb);
+ if (!csx)
+ return false;
+ }
+ csx->can_iif = dev->ifindex;
skb->ip_summed = CHECKSUM_UNNECESSARY;
/* perform proper loopback on capable devices */
@@ -361,7 +392,7 @@ bool can_dropped_invalid_skb(struct net_device *dev, struct sk_buff *skb)
goto inval_skb;
}
- if (!can_skb_headroom_valid(dev, skb))
+ if (!can_skb_init_valid(dev, skb))
goto inval_skb;
return false;
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index 7895e1fdea1c..eaf8cac78038 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -472,6 +472,7 @@ struct rcar_canfd_global {
unsigned long channels_mask; /* Enabled channels mask */
bool extclk; /* CANFD or Ext clock */
bool fdmode; /* CAN FD or Classical CAN only mode */
+ bool fd_only_mode; /* FD-Only mode for CAN-FD */
struct reset_control *rstc1;
struct reset_control *rstc2;
const struct rcar_canfd_hw_info *info;
@@ -669,6 +670,23 @@ static const struct rcar_canfd_hw_info r9a09g047_hw_info = {
.external_clk = 0,
};
+static const struct rcar_canfd_hw_info r9a09g077_hw_info = {
+ .nom_bittiming = &rcar_canfd_gen4_nom_bittiming_const,
+ .data_bittiming = &rcar_canfd_gen4_data_bittiming_const,
+ .tdc_const = &rcar_canfd_gen4_tdc_const,
+ .regs = &rcar_gen4_regs,
+ .sh = &rcar_gen4_shift_data,
+ .rnc_field_width = 16,
+ .max_aflpn = 15,
+ .max_cftml = 31,
+ .max_channels = 2,
+ .postdiv = 1,
+ .multi_channel_irqs = 1,
+ .ch_interface_mode = 1,
+ .shared_can_regs = 1,
+ .external_clk = 1,
+};
+
/* Helper functions */
static inline void rcar_canfd_update(u32 mask, u32 val, u32 __iomem *reg)
{
@@ -829,12 +847,20 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
RCANFD_GEN4_FDCFG_FDOE);
rcar_canfd_set_bit_reg(&gpriv->fcbase[ch].cfdcfg,
RCANFD_GEN4_FDCFG_CLOE);
+ } else if (gpriv->fd_only_mode) {
+ rcar_canfd_clear_bit_reg(&gpriv->fcbase[ch].cfdcfg,
+ RCANFD_GEN4_FDCFG_CLOE);
+ rcar_canfd_set_bit_reg(&gpriv->fcbase[ch].cfdcfg,
+ RCANFD_GEN4_FDCFG_FDOE);
} else {
rcar_canfd_clear_bit_reg(&gpriv->fcbase[ch].cfdcfg,
RCANFD_GEN4_FDCFG_FDOE);
rcar_canfd_clear_bit_reg(&gpriv->fcbase[ch].cfdcfg,
RCANFD_GEN4_FDCFG_CLOE);
}
+ } else if (gpriv->fd_only_mode) {
+ rcar_canfd_set_bit_reg(&gpriv->fcbase[ch].cfdcfg,
+ RCANFD_GEN4_FDCFG_FDOE);
}
}
@@ -2140,6 +2166,9 @@ static int rcar_canfd_probe(struct platform_device *pdev)
gpriv->fdmode = fdmode;
gpriv->info = info;
+ if (of_property_read_bool(dev->of_node, "renesas,fd-only"))
+ gpriv->fd_only_mode = true; /* FD-Only mode for CAN-FD */
+
gpriv->rstc1 = devm_reset_control_get_optional_exclusive(dev, "rstp_n");
if (IS_ERR(gpriv->rstc1))
return dev_err_probe(dev, PTR_ERR(gpriv->rstc1),
@@ -2239,7 +2268,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, gpriv);
dev_info(dev, "global operational state (%s clk, %s mode)\n",
gpriv->extclk ? "ext" : "canfd",
- gpriv->fdmode ? "fd" : "classical");
+ gpriv->fdmode ? (gpriv->fd_only_mode ? "fd-only" : "fd") : "classical");
return 0;
fail_channel:
@@ -2333,6 +2362,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(rcar_canfd_pm_ops, rcar_canfd_suspend,
static const __maybe_unused struct of_device_id rcar_canfd_of_table[] = {
{ .compatible = "renesas,r8a779a0-canfd", .data = &rcar_gen4_hw_info },
{ .compatible = "renesas,r9a09g047-canfd", .data = &r9a09g047_hw_info },
+ { .compatible = "renesas,r9a09g077-canfd", .data = &r9a09g077_hw_info },
{ .compatible = "renesas,rcar-gen3-canfd", .data = &rcar_gen3_hw_info },
{ .compatible = "renesas,rcar-gen4-canfd", .data = &rcar_gen4_hw_info },
{ .compatible = "renesas,rzg2l-canfd", .data = &rzg2l_hw_info },
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index a8fa0d6516b9..3cdb583ee3e5 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -407,18 +407,22 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
{
struct sja1000_priv *priv = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
+ enum can_state state, rx_state, tx_state;
struct can_frame *cf;
struct sk_buff *skb;
- enum can_state state = priv->can.state;
- enum can_state rx_state, tx_state;
- unsigned int rxerr, txerr;
+ struct can_berr_counter bec;
uint8_t ecc, alc;
int ret = 0;
skb = alloc_can_err_skb(dev, &cf);
- txerr = priv->read_reg(priv, SJA1000_TXERR);
- rxerr = priv->read_reg(priv, SJA1000_RXERR);
+ sja1000_get_berr_counter(dev, &bec);
+ can_state_get_by_berr_counter(dev, &bec, &tx_state, &rx_state);
+
+ if (status & SR_BS)
+ rx_state = CAN_STATE_BUS_OFF;
+
+ state = max(tx_state, rx_state);
if (isrc & IRQ_DOI) {
/* data overrun interrupt */
@@ -441,22 +445,10 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
if (priv->flags & SJA1000_QUIRK_RESET_ON_OVERRUN)
ret = IRQ_WAKE_THREAD;
}
-
- if (isrc & IRQ_EI) {
- /* error warning interrupt */
- netdev_dbg(dev, "error warning interrupt\n");
-
- if (status & SR_BS)
- state = CAN_STATE_BUS_OFF;
- else if (status & SR_ES)
- state = CAN_STATE_ERROR_WARNING;
- else
- state = CAN_STATE_ERROR_ACTIVE;
- }
if (state != CAN_STATE_BUS_OFF && skb) {
cf->can_id |= CAN_ERR_CNT;
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
}
if (isrc & IRQ_BEI) {
/* bus error interrupt */
@@ -494,15 +486,6 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
stats->rx_errors++;
}
}
- if (isrc & IRQ_EPI) {
- /* error passive interrupt */
- netdev_dbg(dev, "error passive interrupt\n");
-
- if (state == CAN_STATE_ERROR_PASSIVE)
- state = CAN_STATE_ERROR_WARNING;
- else
- state = CAN_STATE_ERROR_PASSIVE;
- }
if (isrc & IRQ_ALI) {
/* arbitration lost interrupt */
netdev_dbg(dev, "arbitration lost interrupt\n");
@@ -515,9 +498,6 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
}
if (state != priv->can.state) {
- tx_state = txerr >= rxerr ? state : 0;
- rx_state = txerr <= rxerr ? state : 0;
-
can_change_state(dev, cf, tx_state, rx_state);
if(state == CAN_STATE_BUS_OFF)
@@ -725,19 +705,3 @@ void unregister_sja1000dev(struct net_device *dev)
unregister_candev(dev);
}
EXPORT_SYMBOL_GPL(unregister_sja1000dev);
-
-static __init int sja1000_init(void)
-{
- printk(KERN_INFO "%s CAN netdevice driver\n", DRV_NAME);
-
- return 0;
-}
-
-module_init(sja1000_init);
-
-static __exit void sja1000_exit(void)
-{
- printk(KERN_INFO "%s: driver removed\n", DRV_NAME);
-}
-
-module_exit(sja1000_exit);
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index f14c6f02b662..e882250180ef 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -21,6 +21,7 @@
#include <linux/can/vxcan.h>
#include <linux/can/can-ml.h>
#include <linux/slab.h>
+#include <net/can.h>
#include <net/rtnetlink.h>
#define DRV_NAME "vxcan"
@@ -39,6 +40,7 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *oskb, struct net_device *dev)
struct vxcan_priv *priv = netdev_priv(dev);
struct net_device *peer;
struct net_device_stats *peerstats, *srcstats = &dev->stats;
+ struct can_skb_ext *csx;
struct sk_buff *skb;
unsigned int len;
@@ -63,8 +65,19 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *oskb, struct net_device *dev)
goto out_unlock;
}
+ /* the cloned skb points to the skb extension of the already cloned
+ * oskb with an increased refcount. skb_ext_add() creates a copy to
+ * separate the skb extension data which is needed to start with a
+ * fresh can_gw_hops counter in the other namespace.
+ */
+ csx = skb_ext_add(skb, SKB_EXT_CAN);
+ if (!csx) {
+ kfree_skb(skb);
+ goto out_unlock;
+ }
+
/* reset CAN GW hop counter */
- skb->csum_start = 0;
+ csx->can_gw_hops = 0;
skb->pkt_type = PACKET_BROADCAST;
skb->dev = peer;
skb->ip_summed = CHECKSUM_UNNECESSARY;
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 7eb301fd987d..39fb8ead16b5 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -74,6 +74,8 @@ source "drivers/net/dsa/microchip/Kconfig"
source "drivers/net/dsa/mv88e6xxx/Kconfig"
+source "drivers/net/dsa/mxl862xx/Kconfig"
+
source "drivers/net/dsa/ocelot/Kconfig"
source "drivers/net/dsa/qca/Kconfig"
@@ -158,6 +160,7 @@ config NET_DSA_VITESSE_VSC73XX_PLATFORM
config NET_DSA_YT921X
tristate "Motorcomm YT9215 ethernet switch chip support"
select NET_DSA_TAG_YT921X
+ select NET_IEEE8021Q_HELPERS if DCB
help
This enables support for the Motorcomm YT9215 ethernet switch
chip.
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index 16de4ba3fa38..f5a463b87ec2 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -20,6 +20,7 @@ obj-y += hirschmann/
obj-y += lantiq/
obj-y += microchip/
obj-y += mv88e6xxx/
+obj-y += mxl862xx/
obj-y += ocelot/
obj-y += qca/
obj-y += realtek/
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index 4a416f2717ba..b41254b3ac42 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -395,6 +395,12 @@ static struct mdio_driver dsa_loop_drv = {
.shutdown = dsa_loop_drv_shutdown,
};
+static int dsa_loop_bus_match(struct device *dev,
+ const struct device_driver *drv)
+{
+ return drv == &dsa_loop_drv.mdiodrv.driver;
+}
+
static void dsa_loop_phydevs_unregister(void)
{
for (int i = 0; i < NUM_FIXED_PHYS; i++) {
@@ -428,7 +434,7 @@ static int __init dsa_loop_create_switch_mdiodev(void)
if (IS_ERR(switch_mdiodev))
goto out;
- strscpy(switch_mdiodev->modalias, "dsa-loop");
+ switch_mdiodev->bus_match = dsa_loop_bus_match;
switch_mdiodev->dev.platform_data = &dsa_loop_pdata;
ret = mdio_device_register(switch_mdiodev);
diff --git a/drivers/net/dsa/lantiq/Kconfig b/drivers/net/dsa/lantiq/Kconfig
index 4a9771be5d58..98efeef2661b 100644
--- a/drivers/net/dsa/lantiq/Kconfig
+++ b/drivers/net/dsa/lantiq/Kconfig
@@ -15,10 +15,13 @@ config NET_DSA_MXL_GSW1XX
tristate "MaxLinear GSW1xx Ethernet switch support"
select NET_DSA_TAG_MXL_GSW1XX
select NET_DSA_LANTIQ_COMMON
+ select PHY_COMMON_PROPS
help
- This enables support for the MaxLinear GSW1xx family of 1GE switches
+ This enables support for the Intel/MaxLinear GSW1xx family of 1GE
+ switches.
GSW120 4 port, 2 PHYs, RGMII & SGMII/2500Base-X
GSW125 4 port, 2 PHYs, RGMII & SGMII/2500Base-X, industrial temperature
GSW140 6 port, 4 PHYs, RGMII & SGMII/2500Base-X
GSW141 6 port, 4 PHYs, RGMII & SGMII
GSW145 6 port, 4 PHYs, RGMII & SGMII/2500Base-X, industrial temperature
+ GSW150 7 port, 5 PHYs, 1x GMII/RGMII, 1x RGMII
diff --git a/drivers/net/dsa/lantiq/lantiq_gswip.c b/drivers/net/dsa/lantiq/lantiq_gswip.c
index b094001a7c80..4d699d8c16f9 100644
--- a/drivers/net/dsa/lantiq/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq/lantiq_gswip.c
@@ -33,8 +33,7 @@ static void gswip_xrx200_phylink_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
switch (port) {
- case 0:
- case 1:
+ case 0 ... 1:
phy_interface_set_rgmii(config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_MII,
config->supported_interfaces);
@@ -44,9 +43,7 @@ static void gswip_xrx200_phylink_get_caps(struct dsa_switch *ds, int port,
config->supported_interfaces);
break;
- case 2:
- case 3:
- case 4:
+ case 2 ... 4:
case 6:
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
@@ -75,10 +72,7 @@ static void gswip_xrx300_phylink_get_caps(struct dsa_switch *ds, int port,
config->supported_interfaces);
break;
- case 1:
- case 2:
- case 3:
- case 4:
+ case 1 ... 4:
case 6:
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
@@ -463,10 +457,22 @@ static void gswip_shutdown(struct platform_device *pdev)
}
static const struct gswip_hw_info gswip_xrx200 = {
- .max_ports = 7,
+ .max_ports = GSWIP_MAX_PORTS,
.allowed_cpu_ports = BIT(6),
- .mii_ports = BIT(0) | BIT(1) | BIT(5),
- .mii_port_reg_offset = 0,
+ .mii_cfg = {
+ [0] = GSWIP_MII_CFGp(0),
+ [1] = GSWIP_MII_CFGp(1),
+ [2 ... 4] = -1,
+ [5] = GSWIP_MII_CFGp(5),
+ [6] = -1,
+ },
+ .mii_pcdu = {
+ [0] = GSWIP_MII_PCDU0,
+ [1] = GSWIP_MII_PCDU1,
+ [2 ... 4] = -1,
+ [5] = GSWIP_MII_PCDU5,
+ [6] = -1,
+ },
.phylink_get_caps = gswip_xrx200_phylink_get_caps,
.pce_microcode = &gswip_pce_microcode,
.pce_microcode_size = ARRAY_SIZE(gswip_pce_microcode),
@@ -474,10 +480,20 @@ static const struct gswip_hw_info gswip_xrx200 = {
};
static const struct gswip_hw_info gswip_xrx300 = {
- .max_ports = 7,
+ .max_ports = GSWIP_MAX_PORTS,
.allowed_cpu_ports = BIT(6),
- .mii_ports = BIT(0) | BIT(5),
- .mii_port_reg_offset = 0,
+ .mii_cfg = {
+ [0] = GSWIP_MII_CFGp(0),
+ [1 ... 4] = -1,
+ [5] = GSWIP_MII_CFGp(5),
+ [6] = -1,
+ },
+ .mii_pcdu = {
+ [0] = GSWIP_MII_PCDU0,
+ [1 ... 4] = -1,
+ [5] = GSWIP_MII_PCDU5,
+ [6] = -1,
+ },
.phylink_get_caps = gswip_xrx300_phylink_get_caps,
.pce_microcode = &gswip_pce_microcode,
.pce_microcode_size = ARRAY_SIZE(gswip_pce_microcode),
diff --git a/drivers/net/dsa/lantiq/lantiq_gswip.h b/drivers/net/dsa/lantiq/lantiq_gswip.h
index 2e0f2afbadbb..bc3686faad0d 100644
--- a/drivers/net/dsa/lantiq/lantiq_gswip.h
+++ b/drivers/net/dsa/lantiq/lantiq_gswip.h
@@ -243,6 +243,8 @@
#define GSWIP_VLAN_UNAWARE_PVID 0
+#define GSWIP_MAX_PORTS 7
+
struct gswip_pce_microcode {
u16 val_3;
u16 val_2;
@@ -253,8 +255,8 @@ struct gswip_pce_microcode {
struct gswip_hw_info {
int max_ports;
unsigned int allowed_cpu_ports;
- unsigned int mii_ports;
- int mii_port_reg_offset;
+ s16 mii_cfg[GSWIP_MAX_PORTS];
+ s16 mii_pcdu[GSWIP_MAX_PORTS];
bool supports_2500m;
const struct gswip_pce_microcode (*pce_microcode)[];
size_t pce_microcode_size;
@@ -263,6 +265,7 @@ struct gswip_hw_info {
struct phylink_config *config);
struct phylink_pcs *(*mac_select_pcs)(struct phylink_config *config,
phy_interface_t interface);
+ int (*port_setup)(struct dsa_switch *ds, int port);
};
struct gswip_gphy_fw {
diff --git a/drivers/net/dsa/lantiq/lantiq_gswip_common.c b/drivers/net/dsa/lantiq/lantiq_gswip_common.c
index e790f2ef7588..0e8eedf64d3a 100644
--- a/drivers/net/dsa/lantiq/lantiq_gswip_common.c
+++ b/drivers/net/dsa/lantiq/lantiq_gswip_common.c
@@ -118,15 +118,11 @@ static u32 gswip_switch_r_timeout(struct gswip_priv *priv, u32 offset,
static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 mask, u32 set,
int port)
{
- int reg_port;
-
/* MII_CFG register only exists for MII ports */
- if (!(priv->hw_info->mii_ports & BIT(port)))
+ if (priv->hw_info->mii_cfg[port] == -1)
return;
- reg_port = port + priv->hw_info->mii_port_reg_offset;
-
- regmap_write_bits(priv->mii, GSWIP_MII_CFGp(reg_port), mask,
+ regmap_write_bits(priv->mii, priv->hw_info->mii_cfg[port], mask,
set);
}
@@ -425,6 +421,12 @@ static int gswip_port_setup(struct dsa_switch *ds, int port)
struct gswip_priv *priv = ds->priv;
int err;
+ if (priv->hw_info->port_setup) {
+ err = priv->hw_info->port_setup(ds, port);
+ if (err)
+ return err;
+ }
+
if (!dsa_is_cpu_port(ds, port)) {
err = gswip_add_single_port_br(priv, port, true);
if (err)
@@ -604,28 +606,13 @@ static void gswip_mii_delay_setup(struct gswip_priv *priv, struct dsa_port *dp,
u32 tx_delay = GSWIP_MII_PCDU_TXDLY_DEFAULT;
u32 rx_delay = GSWIP_MII_PCDU_RXDLY_DEFAULT;
struct device_node *port_dn = dp->dn;
- u16 mii_pcdu_reg;
/* As MII_PCDU registers only exist for MII ports, silently return
* unless the port is an MII port
*/
- if (!(priv->hw_info->mii_ports & BIT(dp->index)))
+ if (priv->hw_info->mii_pcdu[dp->index] == -1)
return;
- switch (dp->index + priv->hw_info->mii_port_reg_offset) {
- case 0:
- mii_pcdu_reg = GSWIP_MII_PCDU0;
- break;
- case 1:
- mii_pcdu_reg = GSWIP_MII_PCDU1;
- break;
- case 5:
- mii_pcdu_reg = GSWIP_MII_PCDU5;
- break;
- default:
- return;
- }
-
/* legacy code to set default delays according to the interface mode */
switch (interface) {
case PHY_INTERFACE_MODE_RGMII_ID:
@@ -646,7 +633,7 @@ static void gswip_mii_delay_setup(struct gswip_priv *priv, struct dsa_port *dp,
of_property_read_u32(port_dn, "rx-internal-delay-ps", &rx_delay);
of_property_read_u32(port_dn, "tx-internal-delay-ps", &tx_delay);
- regmap_write_bits(priv->mii, mii_pcdu_reg,
+ regmap_write_bits(priv->mii, priv->hw_info->mii_pcdu[dp->index],
GSWIP_MII_PCDU_TXDLY_MASK |
GSWIP_MII_PCDU_RXDLY_MASK,
GSWIP_MII_PCDU_TXDLY(tx_delay) |
diff --git a/drivers/net/dsa/lantiq/mxl-gsw1xx.c b/drivers/net/dsa/lantiq/mxl-gsw1xx.c
index f8ff8a604bf5..a1104b2f92a9 100644
--- a/drivers/net/dsa/lantiq/mxl-gsw1xx.c
+++ b/drivers/net/dsa/lantiq/mxl-gsw1xx.c
@@ -15,6 +15,8 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_mdio.h>
+#include <linux/phy/phy-common-props.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/workqueue.h>
#include <net/dsa.h>
@@ -229,11 +231,17 @@ static int gsw1xx_pcs_phy_xaui_write(struct gsw1xx_priv *priv, u16 addr,
1000, 100000);
}
-static int gsw1xx_pcs_reset(struct gsw1xx_priv *priv)
+static int gsw1xx_pcs_reset(struct gsw1xx_priv *priv, phy_interface_t interface)
{
+ struct dsa_port *sgmii_port;
+ unsigned int pol;
int ret;
u16 val;
+ sgmii_port = dsa_to_port(priv->gswip.ds, GSW1XX_SGMII_PORT);
+ if (!sgmii_port)
+ return -EINVAL;
+
/* Assert and deassert SGMII shell reset */
ret = regmap_set_bits(priv->shell, GSW1XX_SHELL_RST_REQ,
GSW1XX_RST_REQ_SGMII_SHELL);
@@ -260,15 +268,20 @@ static int gsw1xx_pcs_reset(struct gsw1xx_priv *priv)
FIELD_PREP(GSW1XX_SGMII_PHY_RX0_CFG2_FILT_CNT,
GSW1XX_SGMII_PHY_RX0_CFG2_FILT_CNT_DEF);
+ ret = phy_get_manual_rx_polarity(of_fwnode_handle(sgmii_port->dn),
+ phy_modes(interface), &pol);
+ if (ret)
+ return ret;
+
/* RX lane seems to be inverted internally, so bit
* GSW1XX_SGMII_PHY_RX0_CFG2_INVERT needs to be set for normal
- * (ie. non-inverted) operation.
- *
- * TODO: Take care of inverted RX pair once generic property is
- * available
+ * (ie. non-inverted) operation matching the chips external pins as
+ * described in datasheets dated 2023-11-08, ie. pin B20 (RX0_P) being
+ * the positive signal and pin B21 (RX0_M) being the negative signal of
+ * the differential input pair.
*/
-
- val |= GSW1XX_SGMII_PHY_RX0_CFG2_INVERT;
+ if (pol == PHY_POL_NORMAL)
+ val |= GSW1XX_SGMII_PHY_RX0_CFG2_INVERT;
ret = regmap_write(priv->sgmii, GSW1XX_SGMII_PHY_RX0_CFG2, val);
if (ret < 0)
@@ -277,9 +290,13 @@ static int gsw1xx_pcs_reset(struct gsw1xx_priv *priv)
val = FIELD_PREP(GSW1XX_SGMII_PHY_TX0_CFG3_VBOOST_LEVEL,
GSW1XX_SGMII_PHY_TX0_CFG3_VBOOST_LEVEL_DEF);
- /* TODO: Take care of inverted TX pair once generic property is
- * available
- */
+ ret = phy_get_manual_tx_polarity(of_fwnode_handle(sgmii_port->dn),
+ phy_modes(interface), &pol);
+ if (ret)
+ return ret;
+
+ if (pol == PHY_POL_INVERT)
+ val |= GSW1XX_SGMII_PHY_TX0_CFG3_INVERT;
ret = regmap_write(priv->sgmii, GSW1XX_SGMII_PHY_TX0_CFG3, val);
if (ret < 0)
@@ -336,7 +353,7 @@ static int gsw1xx_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
priv->tbi_interface = PHY_INTERFACE_MODE_NA;
if (!reconf)
- ret = gsw1xx_pcs_reset(priv);
+ ret = gsw1xx_pcs_reset(priv, interface);
if (ret)
return ret;
@@ -502,6 +519,14 @@ static const struct phylink_pcs_ops gsw1xx_pcs_ops = {
.pcs_link_up = gsw1xx_pcs_link_up,
};
+static void gsw1xx_phylink_get_lpi_caps(struct phylink_config *config)
+{
+ config->lpi_capabilities = MAC_100FD | MAC_1000FD;
+ config->lpi_timer_default = 20;
+ memcpy(config->lpi_interfaces, config->supported_interfaces,
+ sizeof(config->lpi_interfaces));
+}
+
static void gsw1xx_phylink_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
@@ -511,14 +536,12 @@ static void gsw1xx_phylink_get_caps(struct dsa_switch *ds, int port,
MAC_10 | MAC_100 | MAC_1000;
switch (port) {
- case 0:
- case 1:
- case 2:
- case 3:
+ case 0 ... 3: /* built-in PHYs */
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
break;
- case 4: /* port 4: SGMII */
+
+ case 4: /* SGMII */
__set_bit(PHY_INTERFACE_MODE_SGMII,
config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_1000BASEX,
@@ -529,17 +552,40 @@ static void gsw1xx_phylink_get_caps(struct dsa_switch *ds, int port,
config->mac_capabilities |= MAC_2500FD;
}
return; /* no support for EEE on SGMII port */
- case 5: /* port 5: RGMII or RMII */
+
+ case 5: /* RGMII or RMII */
__set_bit(PHY_INTERFACE_MODE_RMII,
config->supported_interfaces);
phy_interface_set_rgmii(config->supported_interfaces);
break;
}
- config->lpi_capabilities = MAC_100FD | MAC_1000FD;
- config->lpi_timer_default = 20;
- memcpy(config->lpi_interfaces, config->supported_interfaces,
- sizeof(config->lpi_interfaces));
+ gsw1xx_phylink_get_lpi_caps(config);
+}
+
+static void gsw150_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
+
+ switch (port) {
+ case 0 ... 4: /* built-in PHYs */
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ break;
+
+ case 5: /* GMII or RGMII */
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ fallthrough;
+
+ case 6: /* RGMII */
+ phy_interface_set_rgmii(config->supported_interfaces);
+ break;
+ }
+
+ gsw1xx_phylink_get_lpi_caps(config);
}
static struct phylink_pcs *gsw1xx_phylink_mac_select_pcs(struct phylink_config *config,
@@ -559,6 +605,43 @@ static struct phylink_pcs *gsw1xx_phylink_mac_select_pcs(struct phylink_config *
}
}
+static int gsw1xx_rmii_slew_rate(const struct device_node *np, struct gsw1xx_priv *priv,
+ const char *prop, u16 mask)
+{
+ u32 rate;
+ int ret;
+
+ ret = of_property_read_u32(np, prop, &rate);
+ /* Optional property */
+ if (ret == -EINVAL)
+ return 0;
+ if (ret < 0 || rate > 1) {
+ dev_err(&priv->mdio_dev->dev, "Invalid %s value\n", prop);
+ return (ret < 0) ? ret : -EINVAL;
+ }
+
+ return regmap_update_bits(priv->shell, GSW1XX_SHELL_RGMII_SLEW_CFG, mask, mask * rate);
+}
+
+static int gsw1xx_port_setup(struct dsa_switch *ds, int port)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct device_node *np = dp->dn;
+ struct gsw1xx_priv *gsw1xx_priv;
+ struct gswip_priv *gswip_priv;
+
+ if (dp->index != GSW1XX_MII_PORT)
+ return 0;
+
+ gswip_priv = ds->priv;
+ gsw1xx_priv = container_of(gswip_priv, struct gsw1xx_priv, gswip);
+
+ return gsw1xx_rmii_slew_rate(np, gsw1xx_priv,
+ "maxlinear,slew-rate-txc", RGMII_SLEW_CFG_DRV_TXC) ?:
+ gsw1xx_rmii_slew_rate(np, gsw1xx_priv,
+ "maxlinear,slew-rate-txd", RGMII_SLEW_CFG_DRV_TXD);
+}
+
static struct regmap *gsw1xx_regmap_init(struct gsw1xx_priv *priv,
const char *name,
unsigned int reg_base,
@@ -579,11 +662,35 @@ static struct regmap *gsw1xx_regmap_init(struct gsw1xx_priv *priv,
priv, &config);
}
+static int gsw1xx_serdes_pcs_init(struct gsw1xx_priv *priv)
+{
+ /* do nothing if the chip doesn't have a SerDes PCS */
+ if (!priv->gswip.hw_info->mac_select_pcs)
+ return 0;
+
+ priv->pcs.ops = &gsw1xx_pcs_ops;
+ priv->pcs.poll = true;
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ priv->pcs.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ priv->pcs.supported_interfaces);
+ if (priv->gswip.hw_info->supports_2500m)
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ priv->pcs.supported_interfaces);
+ priv->tbi_interface = PHY_INTERFACE_MODE_NA;
+
+ /* assert SGMII reset to power down SGMII unit */
+ return regmap_set_bits(priv->shell, GSW1XX_SHELL_RST_REQ,
+ GSW1XX_RST_REQ_SGMII_SHELL);
+}
+
static int gsw1xx_probe(struct mdio_device *mdiodev)
{
struct device *dev = &mdiodev->dev;
struct gsw1xx_priv *priv;
- u32 version;
+ u32 version, val;
+ u8 shellver;
+ u16 pnum;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -631,20 +738,28 @@ static int gsw1xx_probe(struct mdio_device *mdiodev)
if (IS_ERR(priv->shell))
return PTR_ERR(priv->shell);
- priv->pcs.ops = &gsw1xx_pcs_ops;
- priv->pcs.poll = true;
- __set_bit(PHY_INTERFACE_MODE_SGMII,
- priv->pcs.supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_1000BASEX,
- priv->pcs.supported_interfaces);
- if (priv->gswip.hw_info->supports_2500m)
- __set_bit(PHY_INTERFACE_MODE_2500BASEX,
- priv->pcs.supported_interfaces);
- priv->tbi_interface = PHY_INTERFACE_MODE_NA;
+ ret = regmap_read(priv->shell, GSW1XX_SHELL_MANU_ID, &val);
+ if (ret < 0)
+ return ret;
- /* assert SGMII reset to power down SGMII unit */
- ret = regmap_set_bits(priv->shell, GSW1XX_SHELL_RST_REQ,
- GSW1XX_RST_REQ_SGMII_SHELL);
+ /* validate chip ID */
+ if (FIELD_GET(GSW1XX_SHELL_MANU_ID_FIX1, val) != 1)
+ return -ENODEV;
+
+ if (FIELD_GET(GSW1XX_SHELL_MANU_ID_MANID, val) !=
+ GSW1XX_SHELL_MANU_ID_MANID_VAL)
+ return -ENODEV;
+
+ pnum = FIELD_GET(GSW1XX_SHELL_MANU_ID_PNUML, val);
+
+ ret = regmap_read(priv->shell, GSW1XX_SHELL_PNUM_ID, &val);
+ if (ret < 0)
+ return ret;
+
+ pnum |= FIELD_GET(GSW1XX_SHELL_PNUM_ID_PNUMM, val) << 4;
+ shellver = FIELD_GET(GSW1XX_SHELL_PNUM_ID_VER, val);
+
+ ret = gsw1xx_serdes_pcs_init(priv);
if (ret < 0)
return ret;
@@ -664,6 +779,8 @@ static int gsw1xx_probe(struct mdio_device *mdiodev)
if (ret)
return ret;
+ dev_info(dev, "standalone switch part number 0x%x v1.%u\n", pnum, shellver);
+
dev_set_drvdata(dev, &priv->gswip);
return 0;
@@ -702,11 +819,20 @@ static void gsw1xx_shutdown(struct mdio_device *mdiodev)
static const struct gswip_hw_info gsw12x_data = {
.max_ports = GSW1XX_PORTS,
.allowed_cpu_ports = BIT(GSW1XX_MII_PORT) | BIT(GSW1XX_SGMII_PORT),
- .mii_ports = BIT(GSW1XX_MII_PORT),
- .mii_port_reg_offset = -GSW1XX_MII_PORT,
+ .mii_cfg = {
+ [0 ... GSW1XX_MII_PORT - 1] = -1,
+ [GSW1XX_MII_PORT] = GSWIP_MII_CFGp(0),
+ [GSW1XX_MII_PORT + 1 ... GSWIP_MAX_PORTS - 1] = -1,
+ },
+ .mii_pcdu = {
+ [0 ... GSW1XX_MII_PORT - 1] = -1,
+ [GSW1XX_MII_PORT] = GSWIP_MII_PCDU0,
+ [GSW1XX_MII_PORT + 1 ... GSWIP_MAX_PORTS - 1] = -1,
+ },
.mac_select_pcs = gsw1xx_phylink_mac_select_pcs,
.phylink_get_caps = &gsw1xx_phylink_get_caps,
.supports_2500m = true,
+ .port_setup = gsw1xx_port_setup,
.pce_microcode = &gsw1xx_pce_microcode,
.pce_microcode_size = ARRAY_SIZE(gsw1xx_pce_microcode),
.tag_protocol = DSA_TAG_PROTO_MXL_GSW1XX,
@@ -715,11 +841,20 @@ static const struct gswip_hw_info gsw12x_data = {
static const struct gswip_hw_info gsw140_data = {
.max_ports = GSW1XX_PORTS,
.allowed_cpu_ports = BIT(GSW1XX_MII_PORT) | BIT(GSW1XX_SGMII_PORT),
- .mii_ports = BIT(GSW1XX_MII_PORT),
- .mii_port_reg_offset = -GSW1XX_MII_PORT,
+ .mii_cfg = {
+ [0 ... GSW1XX_MII_PORT - 1] = -1,
+ [GSW1XX_MII_PORT] = GSWIP_MII_CFGp(0),
+ [GSW1XX_MII_PORT + 1 ... GSWIP_MAX_PORTS - 1] = -1,
+ },
+ .mii_pcdu = {
+ [0 ... GSW1XX_MII_PORT - 1] = -1,
+ [GSW1XX_MII_PORT] = GSWIP_MII_PCDU0,
+ [GSW1XX_MII_PORT + 1 ... GSWIP_MAX_PORTS - 1] = -1,
+ },
.mac_select_pcs = gsw1xx_phylink_mac_select_pcs,
.phylink_get_caps = &gsw1xx_phylink_get_caps,
.supports_2500m = true,
+ .port_setup = gsw1xx_port_setup,
.pce_microcode = &gsw1xx_pce_microcode,
.pce_microcode_size = ARRAY_SIZE(gsw1xx_pce_microcode),
.tag_protocol = DSA_TAG_PROTO_MXL_GSW1XX,
@@ -728,10 +863,44 @@ static const struct gswip_hw_info gsw140_data = {
static const struct gswip_hw_info gsw141_data = {
.max_ports = GSW1XX_PORTS,
.allowed_cpu_ports = BIT(GSW1XX_MII_PORT) | BIT(GSW1XX_SGMII_PORT),
- .mii_ports = BIT(GSW1XX_MII_PORT),
- .mii_port_reg_offset = -GSW1XX_MII_PORT,
+ .mii_cfg = {
+ [0 ... GSW1XX_MII_PORT - 1] = -1,
+ [GSW1XX_MII_PORT] = GSWIP_MII_CFGp(0),
+ [GSW1XX_MII_PORT + 1 ... GSWIP_MAX_PORTS - 1] = -1,
+ },
+ .mii_pcdu = {
+ [0 ... GSW1XX_MII_PORT - 1] = -1,
+ [GSW1XX_MII_PORT] = GSWIP_MII_PCDU0,
+ [GSW1XX_MII_PORT + 1 ... GSWIP_MAX_PORTS - 1] = -1,
+ },
.mac_select_pcs = gsw1xx_phylink_mac_select_pcs,
.phylink_get_caps = gsw1xx_phylink_get_caps,
+ .port_setup = gsw1xx_port_setup,
+ .pce_microcode = &gsw1xx_pce_microcode,
+ .pce_microcode_size = ARRAY_SIZE(gsw1xx_pce_microcode),
+ .tag_protocol = DSA_TAG_PROTO_MXL_GSW1XX,
+};
+
+static const struct gswip_hw_info gsw150_data = {
+ .max_ports = GSW150_PORTS,
+ .allowed_cpu_ports = BIT(5) | BIT(6),
+ .mii_cfg = {
+ [0 ... 4] = -1,
+ [5] = 0,
+ [6] = 10,
+ },
+ .mii_pcdu = {
+ [0 ... 4] = -1,
+ [5] = 1,
+ [6] = 11,
+ },
+ .phylink_get_caps = gsw150_phylink_get_caps,
+ /* There is only a single RGMII_SLEW_CFG register in GSW150 and it is
+ * unknown if RGMII slew configuration affects both RGMII ports
+ * or only port 5. Use .port_setup which assumes it affects port 5
+ * for now.
+ */
+ .port_setup = gsw1xx_port_setup,
.pce_microcode = &gsw1xx_pce_microcode,
.pce_microcode_size = ARRAY_SIZE(gsw1xx_pce_microcode),
.tag_protocol = DSA_TAG_PROTO_MXL_GSW1XX,
@@ -742,6 +911,8 @@ static const struct gswip_hw_info gsw141_data = {
* GSW145 is the industrial temperature version of GSW140.
*/
static const struct of_device_id gsw1xx_of_match[] = {
+ { .compatible = "intel,gsw150", .data = &gsw150_data },
+ { .compatible = "lantiq,peb7084", .data = &gsw150_data },
{ .compatible = "maxlinear,gsw120", .data = &gsw12x_data },
{ .compatible = "maxlinear,gsw125", .data = &gsw12x_data },
{ .compatible = "maxlinear,gsw140", .data = &gsw140_data },
@@ -765,5 +936,5 @@ static struct mdio_driver gsw1xx_driver = {
mdio_module_driver(gsw1xx_driver);
MODULE_AUTHOR("Daniel Golle <daniel@makrotopia.org>");
-MODULE_DESCRIPTION("Driver for MaxLinear GSW1xx ethernet switch");
+MODULE_DESCRIPTION("Driver for Intel/MaxLinear GSW1xx Ethernet switch");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/lantiq/mxl-gsw1xx.h b/drivers/net/dsa/lantiq/mxl-gsw1xx.h
index 38e03c048a26..caa8f1008587 100644
--- a/drivers/net/dsa/lantiq/mxl-gsw1xx.h
+++ b/drivers/net/dsa/lantiq/mxl-gsw1xx.h
@@ -10,6 +10,8 @@
#include <linux/bitfield.h>
#define GSW1XX_PORTS 6
+#define GSW150_PORTS 7
+
/* Port used for RGMII or optional RMII */
#define GSW1XX_MII_PORT 5
/* Port used for SGMII */
@@ -108,8 +110,19 @@
#define GSW1XX_SHELL_BASE 0xfa00
#define GSW1XX_SHELL_RST_REQ 0x01
#define GSW1XX_RST_REQ_SGMII_SHELL BIT(5)
+#define GSW1XX_SHELL_MANU_ID 0x10
+#define GSW1XX_SHELL_MANU_ID_PNUML GENMASK(15, 12)
+#define GSW1XX_SHELL_MANU_ID_MANID GENMASK(11, 1)
+#define GSW1XX_SHELL_MANU_ID_MANID_VAL 0x389
+#define GSW1XX_SHELL_MANU_ID_FIX1 BIT(0)
+#define GSW1XX_SHELL_PNUM_ID 0x11
+#define GSW1XX_SHELL_PNUM_ID_VER GENMASK(15, 12)
+#define GSW1XX_SHELL_PNUM_ID_PNUMM GENMASK(11, 0)
+
/* RGMII PAD Slew Control Register */
#define GSW1XX_SHELL_RGMII_SLEW_CFG 0x78
+#define RGMII_SLEW_CFG_DRV_TXC BIT(2)
+#define RGMII_SLEW_CFG_DRV_TXD BIT(3)
#define RGMII_SLEW_CFG_RX_2_5_V BIT(4)
#define RGMII_SLEW_CFG_TX_2_5_V BIT(5)
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 0c10351fe5eb..e5fa1f5fc09b 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -569,6 +569,12 @@ static const u16 ksz8463_regs[] = {
[S_START_CTRL] = 0x01,
[S_BROADCAST_CTRL] = 0x06,
[S_MULTICAST_CTRL] = 0x04,
+ [PTP_CLK_CTRL] = 0x0600,
+ [PTP_RTC_NANOSEC] = 0x0604,
+ [PTP_RTC_SEC] = 0x0608,
+ [PTP_RTC_SUB_NANOSEC] = 0x060C,
+ [PTP_SUBNANOSEC_RATE] = 0x0610,
+ [PTP_MSG_CONF1] = 0x0620,
};
static const u32 ksz8463_masks[] = {
@@ -803,6 +809,12 @@ static const u16 ksz9477_regs[] = {
[REG_SW_PME_CTRL] = 0x0006,
[REG_PORT_PME_STATUS] = 0x0013,
[REG_PORT_PME_CTRL] = 0x0017,
+ [PTP_CLK_CTRL] = 0x0500,
+ [PTP_RTC_SUB_NANOSEC] = 0x0502,
+ [PTP_RTC_NANOSEC] = 0x0504,
+ [PTP_RTC_SEC] = 0x0508,
+ [PTP_SUBNANOSEC_RATE] = 0x050C,
+ [PTP_MSG_CONF1] = 0x0514,
};
static const u32 ksz9477_masks[] = {
@@ -2905,7 +2917,6 @@ static int ksz_irq_common_setup(struct ksz_device *dev, struct ksz_irq *kirq)
int ret, n;
kirq->dev = dev;
- kirq->masked = ~0;
kirq->domain = irq_domain_create_simple(dev_fwnode(dev->dev), kirq->nirqs, 0,
&ksz_irq_domain_ops, kirq);
@@ -2935,6 +2946,7 @@ static int ksz_girq_setup(struct ksz_device *dev)
girq->nirqs = dev->info->port_cnt;
girq->reg_mask = REG_SW_PORT_INT_MASK__1;
girq->reg_status = REG_SW_PORT_INT_STATUS__1;
+ girq->masked = ~0;
snprintf(girq->name, sizeof(girq->name), "global_port_irq");
girq->irq_num = dev->irq;
@@ -2949,6 +2961,7 @@ static int ksz_pirq_setup(struct ksz_device *dev, u8 p)
pirq->nirqs = dev->info->port_nirqs;
pirq->reg_mask = dev->dev_ops->get_port_addr(p, REG_PORT_INT_MASK);
pirq->reg_status = dev->dev_ops->get_port_addr(p, REG_PORT_INT_STATUS);
+ pirq->masked = ~0;
snprintf(pirq->name, sizeof(pirq->name), "port_irq-%d", p);
pirq->irq_num = irq_find_mapping(dev->girq.domain, p);
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index c65188cd3c0a..929aff4c55de 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -108,6 +108,7 @@ struct ksz_irq {
int irq_num;
char name[16];
struct ksz_device *dev;
+ u16 irq0_offset;
};
struct ksz_ptp_irq {
@@ -270,6 +271,12 @@ enum ksz_regs {
REG_SW_PME_CTRL,
REG_PORT_PME_STATUS,
REG_PORT_PME_CTRL,
+ PTP_CLK_CTRL,
+ PTP_RTC_NANOSEC,
+ PTP_RTC_SEC,
+ PTP_RTC_SUB_NANOSEC,
+ PTP_SUBNANOSEC_RATE,
+ PTP_MSG_CONF1,
};
enum ksz_masks {
diff --git a/drivers/net/dsa/microchip/ksz_ptp.c b/drivers/net/dsa/microchip/ksz_ptp.c
index 997e4a76d0a6..4a2cc57a628f 100644
--- a/drivers/net/dsa/microchip/ksz_ptp.c
+++ b/drivers/net/dsa/microchip/ksz_ptp.c
@@ -263,6 +263,7 @@ static int ksz_ptp_enable_mode(struct ksz_device *dev)
{
struct ksz_tagger_data *tagger_data = ksz_tagger_data(dev->ds);
struct ksz_ptp_data *ptp_data = &dev->ptp_data;
+ const u16 *regs = dev->info->regs;
struct ksz_port *prt;
struct dsa_port *dp;
bool tag_en = false;
@@ -283,7 +284,7 @@ static int ksz_ptp_enable_mode(struct ksz_device *dev)
tagger_data->hwtstamp_set_state(dev->ds, tag_en);
- return ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_ENABLE,
+ return ksz_rmw16(dev, regs[PTP_MSG_CONF1], PTP_ENABLE,
tag_en ? PTP_ENABLE : 0);
}
@@ -335,6 +336,7 @@ static int ksz_set_hwtstamp_config(struct ksz_device *dev,
struct ksz_port *prt,
struct kernel_hwtstamp_config *config)
{
+ const u16 *regs = dev->info->regs;
int ret;
if (config->flags)
@@ -353,7 +355,7 @@ static int ksz_set_hwtstamp_config(struct ksz_device *dev,
prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = false;
prt->hwts_tx_en = true;
- ret = ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_1STEP, PTP_1STEP);
+ ret = ksz_rmw16(dev, regs[PTP_MSG_CONF1], PTP_1STEP, PTP_1STEP);
if (ret)
return ret;
@@ -367,7 +369,7 @@ static int ksz_set_hwtstamp_config(struct ksz_device *dev,
prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = true;
prt->hwts_tx_en = true;
- ret = ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_1STEP, 0);
+ ret = ksz_rmw16(dev, regs[PTP_MSG_CONF1], PTP_1STEP, 0);
if (ret)
return ret;
@@ -585,25 +587,26 @@ void ksz_port_deferred_xmit(struct kthread_work *work)
static int _ksz_ptp_gettime(struct ksz_device *dev, struct timespec64 *ts)
{
+ const u16 *regs = dev->info->regs;
u32 nanoseconds;
u32 seconds;
u8 phase;
int ret;
/* Copy current PTP clock into shadow registers and read */
- ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_READ_TIME, PTP_READ_TIME);
+ ret = ksz_rmw16(dev, regs[PTP_CLK_CTRL], PTP_READ_TIME, PTP_READ_TIME);
if (ret)
return ret;
- ret = ksz_read8(dev, REG_PTP_RTC_SUB_NANOSEC__2, &phase);
+ ret = ksz_read8(dev, regs[PTP_RTC_SUB_NANOSEC], &phase);
if (ret)
return ret;
- ret = ksz_read32(dev, REG_PTP_RTC_NANOSEC, &nanoseconds);
+ ret = ksz_read32(dev, regs[PTP_RTC_NANOSEC], &nanoseconds);
if (ret)
return ret;
- ret = ksz_read32(dev, REG_PTP_RTC_SEC, &seconds);
+ ret = ksz_read32(dev, regs[PTP_RTC_SEC], &seconds);
if (ret)
return ret;
@@ -676,24 +679,25 @@ static int ksz_ptp_settime(struct ptp_clock_info *ptp,
{
struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
+ const u16 *regs = dev->info->regs;
int ret;
mutex_lock(&ptp_data->lock);
/* Write to shadow registers and Load PTP clock */
- ret = ksz_write16(dev, REG_PTP_RTC_SUB_NANOSEC__2, PTP_RTC_0NS);
+ ret = ksz_write16(dev, regs[PTP_RTC_SUB_NANOSEC], PTP_RTC_0NS);
if (ret)
goto unlock;
- ret = ksz_write32(dev, REG_PTP_RTC_NANOSEC, ts->tv_nsec);
+ ret = ksz_write32(dev, regs[PTP_RTC_NANOSEC], ts->tv_nsec);
if (ret)
goto unlock;
- ret = ksz_write32(dev, REG_PTP_RTC_SEC, ts->tv_sec);
+ ret = ksz_write32(dev, regs[PTP_RTC_SEC], ts->tv_sec);
if (ret)
goto unlock;
- ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_LOAD_TIME, PTP_LOAD_TIME);
+ ret = ksz_rmw16(dev, regs[PTP_CLK_CTRL], PTP_LOAD_TIME, PTP_LOAD_TIME);
if (ret)
goto unlock;
@@ -723,6 +727,7 @@ static int ksz_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
+ const u16 *regs = dev->info->regs;
u64 base, adj;
bool negative;
u32 data32;
@@ -739,16 +744,16 @@ static int ksz_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
if (!negative)
data32 |= PTP_RATE_DIR;
- ret = ksz_write32(dev, REG_PTP_SUBNANOSEC_RATE, data32);
+ ret = ksz_write32(dev, regs[PTP_SUBNANOSEC_RATE], data32);
if (ret)
goto unlock;
- ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_CLK_ADJ_ENABLE,
+ ret = ksz_rmw16(dev, regs[PTP_CLK_CTRL], PTP_CLK_ADJ_ENABLE,
PTP_CLK_ADJ_ENABLE);
if (ret)
goto unlock;
} else {
- ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_CLK_ADJ_ENABLE, 0);
+ ret = ksz_rmw16(dev, regs[PTP_CLK_CTRL], PTP_CLK_ADJ_ENABLE, 0);
if (ret)
goto unlock;
}
@@ -763,6 +768,7 @@ static int ksz_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
struct timespec64 delta64 = ns_to_timespec64(delta);
+ const u16 *regs = dev->info->regs;
s32 sec, nsec;
u16 data16;
int ret;
@@ -774,15 +780,15 @@ static int ksz_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
*/
sec = div_s64_rem(delta, NSEC_PER_SEC, &nsec);
- ret = ksz_write32(dev, REG_PTP_RTC_NANOSEC, abs(nsec));
+ ret = ksz_write32(dev, regs[PTP_RTC_NANOSEC], abs(nsec));
if (ret)
goto unlock;
- ret = ksz_write32(dev, REG_PTP_RTC_SEC, abs(sec));
+ ret = ksz_write32(dev, regs[PTP_RTC_SEC], abs(sec));
if (ret)
goto unlock;
- ret = ksz_read16(dev, REG_PTP_CLK_CTRL, &data16);
+ ret = ksz_read16(dev, regs[PTP_CLK_CTRL], &data16);
if (ret)
goto unlock;
@@ -794,7 +800,7 @@ static int ksz_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
else
data16 |= PTP_STEP_DIR;
- ret = ksz_write16(dev, REG_PTP_CLK_CTRL, data16);
+ ret = ksz_write16(dev, regs[PTP_CLK_CTRL], data16);
if (ret)
goto unlock;
@@ -882,9 +888,10 @@ out:
static int ksz_ptp_start_clock(struct ksz_device *dev)
{
struct ksz_ptp_data *ptp_data = &dev->ptp_data;
+ const u16 *regs = dev->info->regs;
int ret;
- ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_CLK_ENABLE, PTP_CLK_ENABLE);
+ ret = ksz_rmw16(dev, regs[PTP_CLK_CTRL], PTP_CLK_ENABLE, PTP_CLK_ENABLE);
if (ret)
return ret;
@@ -897,6 +904,7 @@ static int ksz_ptp_start_clock(struct ksz_device *dev)
int ksz_ptp_clock_register(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
+ const u16 *regs = dev->info->regs;
struct ksz_ptp_data *ptp_data;
int ret;
u8 i;
@@ -936,7 +944,7 @@ int ksz_ptp_clock_register(struct dsa_switch *ds)
/* Currently only P2P mode is supported. When 802_1AS bit is set, it
* forwards all PTP packets to host port and none to other ports.
*/
- ret = ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_TC_P2P | PTP_802_1AS,
+ ret = ksz_rmw16(dev, regs[PTP_MSG_CONF1], PTP_TC_P2P | PTP_802_1AS,
PTP_TC_P2P | PTP_802_1AS);
if (ret)
return ret;
@@ -959,6 +967,11 @@ void ksz_ptp_clock_unregister(struct dsa_switch *ds)
ptp_clock_unregister(ptp_data->clock);
}
+static int ksz_read_ts(struct ksz_port *port, u16 reg, u32 *ts)
+{
+ return ksz_read32(port->ksz_dev, reg, ts);
+}
+
static irqreturn_t ksz_ptp_msg_thread_fn(int irq, void *dev_id)
{
struct ksz_ptp_irq *ptpmsg_irq = dev_id;
@@ -972,7 +985,7 @@ static irqreturn_t ksz_ptp_msg_thread_fn(int irq, void *dev_id)
dev = port->ksz_dev;
if (ptpmsg_irq->ts_en) {
- ret = ksz_read32(dev, ptpmsg_irq->ts_reg, &tstamp_raw);
+ ret = ksz_read_ts(port, ptpmsg_irq->ts_reg, &tstamp_raw);
if (ret)
return IRQ_NONE;
@@ -1008,7 +1021,7 @@ static irqreturn_t ksz_ptp_irq_thread_fn(int irq, void *dev_id)
return IRQ_NONE;
for (n = 0; n < ptpirq->nirqs; ++n) {
- if (data & BIT(n + KSZ_PTP_INT_START)) {
+ if (data & BIT(n + ptpirq->irq0_offset)) {
sub_irq = irq_find_mapping(ptpirq->domain, n);
handle_nested_irq(sub_irq);
++nhandled;
@@ -1023,14 +1036,14 @@ static void ksz_ptp_irq_mask(struct irq_data *d)
{
struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
- kirq->masked &= ~BIT(d->hwirq + KSZ_PTP_INT_START);
+ kirq->masked &= ~BIT(d->hwirq + kirq->irq0_offset);
}
static void ksz_ptp_irq_unmask(struct irq_data *d)
{
struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
- kirq->masked |= BIT(d->hwirq + KSZ_PTP_INT_START);
+ kirq->masked |= BIT(d->hwirq + kirq->irq0_offset);
}
static void ksz_ptp_irq_bus_lock(struct irq_data *d)
@@ -1126,6 +1139,8 @@ int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p)
ptpirq->reg_mask = ops->get_port_addr(p, REG_PTP_PORT_TX_INT_ENABLE__2);
ptpirq->reg_status = ops->get_port_addr(p,
REG_PTP_PORT_TX_INT_STATUS__2);
+ ptpirq->irq0_offset = KSZ_PTP_INT_START;
+
snprintf(ptpirq->name, sizeof(ptpirq->name), "ptp-irq-%d", p);
init_completion(&port->tstamp_msg_comp);
diff --git a/drivers/net/dsa/microchip/ksz_ptp_reg.h b/drivers/net/dsa/microchip/ksz_ptp_reg.h
index d71e85510cda..eab9aecb7fa8 100644
--- a/drivers/net/dsa/microchip/ksz_ptp_reg.h
+++ b/drivers/net/dsa/microchip/ksz_ptp_reg.h
@@ -15,8 +15,7 @@
#define LED_SRC_PTP_GPIO_2 BIT(2)
/* 5 - PTP Clock */
-#define REG_PTP_CLK_CTRL 0x0500
-
+/* REG_PTP_CLK_CTRL */
#define PTP_STEP_ADJ BIT(6)
#define PTP_STEP_DIR BIT(5)
#define PTP_READ_TIME BIT(4)
@@ -25,17 +24,11 @@
#define PTP_CLK_ENABLE BIT(1)
#define PTP_CLK_RESET BIT(0)
-#define REG_PTP_RTC_SUB_NANOSEC__2 0x0502
-
+/* REG_PTP_RTC_SUB_NANOSEC */
#define PTP_RTC_SUB_NANOSEC_M 0x0007
#define PTP_RTC_0NS 0x00
-#define REG_PTP_RTC_NANOSEC 0x0504
-
-#define REG_PTP_RTC_SEC 0x0508
-
-#define REG_PTP_SUBNANOSEC_RATE 0x050C
-
+/* REG_PTP_SUBNANOSEC_RATE */
#define PTP_SUBNANOSEC_M 0x3FFFFFFF
#define PTP_RATE_DIR BIT(31)
#define PTP_TMP_RATE_ENABLE BIT(30)
@@ -46,8 +39,7 @@
#define REG_PTP_RATE_DURATION_H 0x0510
#define REG_PTP_RATE_DURATION_L 0x0512
-#define REG_PTP_MSG_CONF1 0x0514
-
+/* REG_PTP_MSG_CONF1 */
#define PTP_802_1AS BIT(7)
#define PTP_ENABLE BIT(6)
#define PTP_ETH_ENABLE BIT(5)
diff --git a/drivers/net/dsa/mt7530-mdio.c b/drivers/net/dsa/mt7530-mdio.c
index 0286a6cecb6f..11ea924a9f35 100644
--- a/drivers/net/dsa/mt7530-mdio.c
+++ b/drivers/net/dsa/mt7530-mdio.c
@@ -113,8 +113,8 @@ mt7531_create_sgmii(struct mt7530_priv *priv)
ret = PTR_ERR(regmap);
break;
}
- pcs = mtk_pcs_lynxi_create(priv->dev, regmap,
- MT7531_PHYA_CTRL_SIGNAL3, 0);
+ pcs = mtk_pcs_lynxi_create(priv->dev, NULL, regmap,
+ MT7531_PHYA_CTRL_SIGNAL3);
if (!pcs) {
ret = -ENXIO;
break;
diff --git a/drivers/net/dsa/mxl862xx/Kconfig b/drivers/net/dsa/mxl862xx/Kconfig
new file mode 100644
index 000000000000..4db7bab21a71
--- /dev/null
+++ b/drivers/net/dsa/mxl862xx/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config NET_DSA_MXL862
+ tristate "MaxLinear MxL862xx"
+ depends on NET_DSA
+ select MAXLINEAR_GPHY
+ select NET_DSA_TAG_MXL_862XX
+ help
+ This enables support for the MaxLinear MxL862xx switch family.
+ These switches have two 10GE SerDes interfaces, one typically
+ used as CPU port.
+ - MxL86282 has eight 2.5 Gigabit PHYs
+ - MxL86252 has five 2.5 Gigabit PHYs
diff --git a/drivers/net/dsa/mxl862xx/Makefile b/drivers/net/dsa/mxl862xx/Makefile
new file mode 100644
index 000000000000..d23dd3cd511d
--- /dev/null
+++ b/drivers/net/dsa/mxl862xx/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_NET_DSA_MXL862) += mxl862xx_dsa.o
+mxl862xx_dsa-y := mxl862xx.o mxl862xx-host.o
diff --git a/drivers/net/dsa/mxl862xx/mxl862xx-api.h b/drivers/net/dsa/mxl862xx/mxl862xx-api.h
new file mode 100644
index 000000000000..a9f599dbca25
--- /dev/null
+++ b/drivers/net/dsa/mxl862xx/mxl862xx-api.h
@@ -0,0 +1,675 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef __MXL862XX_API_H
+#define __MXL862XX_API_H
+
+#include <linux/if_ether.h>
+
+/**
+ * struct mdio_relay_data - relayed access to the switch internal MDIO bus
+ * @data: data to be read or written
+ * @phy: PHY index
+ * @mmd: MMD device
+ * @reg: register index
+ */
+struct mdio_relay_data {
+ __le16 data;
+ u8 phy;
+ u8 mmd;
+ __le16 reg;
+} __packed;
+
+/**
+ * struct mxl862xx_register_mod - Register access parameter to directly
+ * modify internal registers
+ * @addr: Register address offset for modification
+ * @data: Value to write to the register address
+ * @mask: Mask of bits to be modified (1 to modify, 0 to ignore)
+ *
+ * Used for direct register modification operations.
+ */
+struct mxl862xx_register_mod {
+ __le16 addr;
+ __le16 data;
+ __le16 mask;
+} __packed;
+
+/**
+ * enum mxl862xx_mac_clear_type - MAC table clear type
+ * @MXL862XX_MAC_CLEAR_PHY_PORT: clear dynamic entries based on port_id
+ * @MXL862XX_MAC_CLEAR_DYNAMIC: clear all dynamic entries
+ */
+enum mxl862xx_mac_clear_type {
+ MXL862XX_MAC_CLEAR_PHY_PORT = 0,
+ MXL862XX_MAC_CLEAR_DYNAMIC,
+};
+
+/**
+ * struct mxl862xx_mac_table_clear - MAC table clear
+ * @type: see &enum mxl862xx_mac_clear_type
+ * @port_id: physical port id
+ */
+struct mxl862xx_mac_table_clear {
+ u8 type;
+ u8 port_id;
+} __packed;
+
+/**
+ * enum mxl862xx_age_timer - Aging Timer Value.
+ * @MXL862XX_AGETIMER_1_SEC: 1 second aging time
+ * @MXL862XX_AGETIMER_10_SEC: 10 seconds aging time
+ * @MXL862XX_AGETIMER_300_SEC: 300 seconds aging time
+ * @MXL862XX_AGETIMER_1_HOUR: 1 hour aging time
+ * @MXL862XX_AGETIMER_1_DAY: 24 hours aging time
+ * @MXL862XX_AGETIMER_CUSTOM: Custom aging time in seconds
+ */
+enum mxl862xx_age_timer {
+ MXL862XX_AGETIMER_1_SEC = 1,
+ MXL862XX_AGETIMER_10_SEC,
+ MXL862XX_AGETIMER_300_SEC,
+ MXL862XX_AGETIMER_1_HOUR,
+ MXL862XX_AGETIMER_1_DAY,
+ MXL862XX_AGETIMER_CUSTOM,
+};
+
+/**
+ * struct mxl862xx_bridge_alloc - Bridge Allocation
+ * @bridge_id: If the bridge allocation is successful, a valid ID will be
+ * returned in this field. Otherwise, INVALID_HANDLE is
+ * returned. For bridge free, this field should contain a
+ * valid ID returned by the bridge allocation. ID 0 is not
+ * used for historic reasons.
+ *
+ * Used by MXL862XX_BRIDGE_ALLOC and MXL862XX_BRIDGE_FREE.
+ */
+struct mxl862xx_bridge_alloc {
+ __le16 bridge_id;
+};
+
+/**
+ * enum mxl862xx_bridge_config_mask - Bridge configuration mask
+ * @MXL862XX_BRIDGE_CONFIG_MASK_MAC_LEARNING_LIMIT:
+ * Mask for mac_learning_limit_enable and mac_learning_limit.
+ * @MXL862XX_BRIDGE_CONFIG_MASK_MAC_LEARNED_COUNT:
+ * Mask for mac_learning_count
+ * @MXL862XX_BRIDGE_CONFIG_MASK_MAC_DISCARD_COUNT:
+ * Mask for learning_discard_event
+ * @MXL862XX_BRIDGE_CONFIG_MASK_SUB_METER:
+ * Mask for sub_metering_enable and traffic_sub_meter_id
+ * @MXL862XX_BRIDGE_CONFIG_MASK_FORWARDING_MODE:
+ * Mask for forward_broadcast, forward_unknown_multicast_ip,
+ * forward_unknown_multicast_non_ip and forward_unknown_unicast.
+ * @MXL862XX_BRIDGE_CONFIG_MASK_ALL: Enable all
+ * @MXL862XX_BRIDGE_CONFIG_MASK_FORCE: Bypass any check for debug purpose
+ */
+enum mxl862xx_bridge_config_mask {
+ MXL862XX_BRIDGE_CONFIG_MASK_MAC_LEARNING_LIMIT = BIT(0),
+ MXL862XX_BRIDGE_CONFIG_MASK_MAC_LEARNED_COUNT = BIT(1),
+ MXL862XX_BRIDGE_CONFIG_MASK_MAC_DISCARD_COUNT = BIT(2),
+ MXL862XX_BRIDGE_CONFIG_MASK_SUB_METER = BIT(3),
+ MXL862XX_BRIDGE_CONFIG_MASK_FORWARDING_MODE = BIT(4),
+ MXL862XX_BRIDGE_CONFIG_MASK_ALL = 0x7FFFFFFF,
+ MXL862XX_BRIDGE_CONFIG_MASK_FORCE = BIT(31)
+};
+
+/**
+ * enum mxl862xx_bridge_port_egress_meter - Meters for egress traffic type
+ * @MXL862XX_BRIDGE_PORT_EGRESS_METER_BROADCAST:
+ * Index of broadcast traffic meter
+ * @MXL862XX_BRIDGE_PORT_EGRESS_METER_MULTICAST:
+ * Index of known multicast traffic meter
+ * @MXL862XX_BRIDGE_PORT_EGRESS_METER_UNKNOWN_MC_IP:
+ * Index of unknown multicast IP traffic meter
+ * @MXL862XX_BRIDGE_PORT_EGRESS_METER_UNKNOWN_MC_NON_IP:
+ * Index of unknown multicast non-IP traffic meter
+ * @MXL862XX_BRIDGE_PORT_EGRESS_METER_UNKNOWN_UC:
+ * Index of unknown unicast traffic meter
+ * @MXL862XX_BRIDGE_PORT_EGRESS_METER_OTHERS:
+ * Index of traffic meter for other types
+ * @MXL862XX_BRIDGE_PORT_EGRESS_METER_MAX: Number of index
+ */
+enum mxl862xx_bridge_port_egress_meter {
+ MXL862XX_BRIDGE_PORT_EGRESS_METER_BROADCAST = 0,
+ MXL862XX_BRIDGE_PORT_EGRESS_METER_MULTICAST,
+ MXL862XX_BRIDGE_PORT_EGRESS_METER_UNKNOWN_MC_IP,
+ MXL862XX_BRIDGE_PORT_EGRESS_METER_UNKNOWN_MC_NON_IP,
+ MXL862XX_BRIDGE_PORT_EGRESS_METER_UNKNOWN_UC,
+ MXL862XX_BRIDGE_PORT_EGRESS_METER_OTHERS,
+ MXL862XX_BRIDGE_PORT_EGRESS_METER_MAX,
+};
+
+/**
+ * enum mxl862xx_bridge_forward_mode - Bridge forwarding type of packet
+ * @MXL862XX_BRIDGE_FORWARD_FLOOD: Packet is flooded to port members of
+ * ingress bridge port
+ * @MXL862XX_BRIDGE_FORWARD_DISCARD: Packet is discarded
+ */
+enum mxl862xx_bridge_forward_mode {
+ MXL862XX_BRIDGE_FORWARD_FLOOD = 0,
+ MXL862XX_BRIDGE_FORWARD_DISCARD,
+};
+
+/**
+ * struct mxl862xx_bridge_config - Bridge Configuration
+ * @bridge_id: Bridge ID (FID)
+ * @mask: See &enum mxl862xx_bridge_config_mask
+ * @mac_learning_limit_enable: Enable MAC learning limitation
+ * @mac_learning_limit: Max number of MAC addresses that can be learned in
+ * this bridge (all bridge ports)
+ * @mac_learning_count: Number of MAC addresses learned from this bridge
+ * @learning_discard_event: Number of learning discard events due to
+ * hardware resource not available
+ * @sub_metering_enable: Traffic metering on type of traffic (such as
+ * broadcast, multicast, unknown unicast, etc) applies
+ * @traffic_sub_meter_id: Meter for bridge process with specific type (such
+ * as broadcast, multicast, unknown unicast, etc)
+ * @forward_broadcast: Forwarding mode of broadcast traffic. See
+ * &enum mxl862xx_bridge_forward_mode
+ * @forward_unknown_multicast_ip: Forwarding mode of unknown multicast IP
+ * traffic.
+ * See &enum mxl862xx_bridge_forward_mode
+ * @forward_unknown_multicast_non_ip: Forwarding mode of unknown multicast
+ * non-IP traffic.
+ * See &enum mxl862xx_bridge_forward_mode
+ * @forward_unknown_unicast: Forwarding mode of unknown unicast traffic. See
+ * &enum mxl862xx_bridge_forward_mode
+ */
+struct mxl862xx_bridge_config {
+ __le16 bridge_id;
+ __le32 mask; /* enum mxl862xx_bridge_config_mask */
+ u8 mac_learning_limit_enable;
+ __le16 mac_learning_limit;
+ __le16 mac_learning_count;
+ __le32 learning_discard_event;
+ u8 sub_metering_enable[MXL862XX_BRIDGE_PORT_EGRESS_METER_MAX];
+ __le16 traffic_sub_meter_id[MXL862XX_BRIDGE_PORT_EGRESS_METER_MAX];
+ __le32 forward_broadcast; /* enum mxl862xx_bridge_forward_mode */
+ __le32 forward_unknown_multicast_ip; /* enum mxl862xx_bridge_forward_mode */
+ __le32 forward_unknown_multicast_non_ip; /* enum mxl862xx_bridge_forward_mode */
+ __le32 forward_unknown_unicast; /* enum mxl862xx_bridge_forward_mode */
+} __packed;
+
+/**
+ * struct mxl862xx_bridge_port_alloc - Bridge Port Allocation
+ * @bridge_port_id: If the bridge port allocation is successful, a valid ID
+ * will be returned in this field. Otherwise, INVALID_HANDLE
+ * is returned. For bridge port free, this field should
+ * contain a valid ID returned by the bridge port allocation.
+ *
+ * Used by MXL862XX_BRIDGE_PORT_ALLOC and MXL862XX_BRIDGE_PORT_FREE.
+ */
+struct mxl862xx_bridge_port_alloc {
+ __le16 bridge_port_id;
+};
+
+/**
+ * enum mxl862xx_bridge_port_config_mask - Bridge Port configuration mask
+ * @MXL862XX_BRIDGE_PORT_CONFIG_MASK_BRIDGE_ID:
+ * Mask for bridge_id
+ * @MXL862XX_BRIDGE_PORT_CONFIG_MASK_INGRESS_VLAN:
+ * Mask for ingress_extended_vlan_enable,
+ * ingress_extended_vlan_block_id and ingress_extended_vlan_block_size
+ * @MXL862XX_BRIDGE_PORT_CONFIG_MASK_EGRESS_VLAN:
+ * Mask for egress_extended_vlan_enable, egress_extended_vlan_block_id
+ * and egress_extended_vlan_block_size
+ * @MXL862XX_BRIDGE_PORT_CONFIG_MASK_INGRESS_MARKING:
+ * Mask for ingress_marking_mode
+ * @MXL862XX_BRIDGE_PORT_CONFIG_MASK_EGRESS_REMARKING:
+ * Mask for egress_remarking_mode
+ * @MXL862XX_BRIDGE_PORT_CONFIG_MASK_INGRESS_METER:
+ * Mask for ingress_metering_enable and ingress_traffic_meter_id
+ * @MXL862XX_BRIDGE_PORT_CONFIG_MASK_EGRESS_SUB_METER:
+ * Mask for egress_sub_metering_enable and egress_traffic_sub_meter_id
+ * @MXL862XX_BRIDGE_PORT_CONFIG_MASK_EGRESS_CTP_MAPPING:
+ * Mask for dest_logical_port_id, pmapper_enable, dest_sub_if_id_group,
+ * pmapper_mapping_mode, pmapper_id_valid and pmapper
+ * @MXL862XX_BRIDGE_PORT_CONFIG_MASK_BRIDGE_PORT_MAP:
+ * Mask for bridge_port_map
+ * @MXL862XX_BRIDGE_PORT_CONFIG_MASK_MC_DEST_IP_LOOKUP:
+ * Mask for mc_dest_ip_lookup_disable
+ * @MXL862XX_BRIDGE_PORT_CONFIG_MASK_MC_SRC_IP_LOOKUP:
+ * Mask for mc_src_ip_lookup_enable
+ * @MXL862XX_BRIDGE_PORT_CONFIG_MASK_MC_DEST_MAC_LOOKUP:
+ * Mask for dest_mac_lookup_disable
+ * @MXL862XX_BRIDGE_PORT_CONFIG_MASK_MC_SRC_MAC_LEARNING:
+ * Mask for src_mac_learning_disable
+ * @MXL862XX_BRIDGE_PORT_CONFIG_MASK_MAC_SPOOFING:
+ * Mask for mac_spoofing_detect_enable
+ * @MXL862XX_BRIDGE_PORT_CONFIG_MASK_PORT_LOCK:
+ * Mask for port_lock_enable
+ * @MXL862XX_BRIDGE_PORT_CONFIG_MASK_MAC_LEARNING_LIMIT:
+ * Mask for mac_learning_limit_enable and mac_learning_limit
+ * @MXL862XX_BRIDGE_PORT_CONFIG_MASK_MAC_LEARNED_COUNT:
+ * Mask for mac_learning_count
+ * @MXL862XX_BRIDGE_PORT_CONFIG_MASK_INGRESS_VLAN_FILTER:
+ * Mask for ingress_vlan_filter_enable, ingress_vlan_filter_block_id
+ * and ingress_vlan_filter_block_size
+ * @MXL862XX_BRIDGE_PORT_CONFIG_MASK_EGRESS_VLAN_FILTER1:
+ * Mask for bypass_egress_vlan_filter1, egress_vlan_filter1enable,
+ * egress_vlan_filter1block_id and egress_vlan_filter1block_size
+ * @MXL862XX_BRIDGE_PORT_CONFIG_MASK_EGRESS_VLAN_FILTER2:
+ * Mask for egress_vlan_filter2enable, egress_vlan_filter2block_id and
+ * egress_vlan_filter2block_size
+ * @MXL862XX_BRIDGE_PORT_CONFIG_MASK_VLAN_BASED_MAC_LEARNING:
+ * Mask for vlan_tag_selection, vlan_src_mac_priority_enable,
+ * vlan_src_mac_dei_enable, vlan_src_mac_vid_enable,
+ * vlan_dst_mac_priority_enable, vlan_dst_mac_dei_enable and
+ * vlan_dst_mac_vid_enable
+ * @MXL862XX_BRIDGE_PORT_CONFIG_MASK_VLAN_BASED_MULTICAST_LOOKUP:
+ * Mask for vlan_multicast_priority_enable,
+ * vlan_multicast_dei_enable and vlan_multicast_vid_enable
+ * @MXL862XX_BRIDGE_PORT_CONFIG_MASK_LOOP_VIOLATION_COUNTER:
+ * Mask for loop_violation_count
+ * @MXL862XX_BRIDGE_PORT_CONFIG_MASK_ALL: Enable all
+ * @MXL862XX_BRIDGE_PORT_CONFIG_MASK_FORCE: Bypass any check for debug purpose
+ */
+enum mxl862xx_bridge_port_config_mask {
+ MXL862XX_BRIDGE_PORT_CONFIG_MASK_BRIDGE_ID = BIT(0),
+ MXL862XX_BRIDGE_PORT_CONFIG_MASK_INGRESS_VLAN = BIT(1),
+ MXL862XX_BRIDGE_PORT_CONFIG_MASK_EGRESS_VLAN = BIT(2),
+ MXL862XX_BRIDGE_PORT_CONFIG_MASK_INGRESS_MARKING = BIT(3),
+ MXL862XX_BRIDGE_PORT_CONFIG_MASK_EGRESS_REMARKING = BIT(4),
+ MXL862XX_BRIDGE_PORT_CONFIG_MASK_INGRESS_METER = BIT(5),
+ MXL862XX_BRIDGE_PORT_CONFIG_MASK_EGRESS_SUB_METER = BIT(6),
+ MXL862XX_BRIDGE_PORT_CONFIG_MASK_EGRESS_CTP_MAPPING = BIT(7),
+ MXL862XX_BRIDGE_PORT_CONFIG_MASK_BRIDGE_PORT_MAP = BIT(8),
+ MXL862XX_BRIDGE_PORT_CONFIG_MASK_MC_DEST_IP_LOOKUP = BIT(9),
+ MXL862XX_BRIDGE_PORT_CONFIG_MASK_MC_SRC_IP_LOOKUP = BIT(10),
+ MXL862XX_BRIDGE_PORT_CONFIG_MASK_MC_DEST_MAC_LOOKUP = BIT(11),
+ MXL862XX_BRIDGE_PORT_CONFIG_MASK_MC_SRC_MAC_LEARNING = BIT(12),
+ MXL862XX_BRIDGE_PORT_CONFIG_MASK_MAC_SPOOFING = BIT(13),
+ MXL862XX_BRIDGE_PORT_CONFIG_MASK_PORT_LOCK = BIT(14),
+ MXL862XX_BRIDGE_PORT_CONFIG_MASK_MAC_LEARNING_LIMIT = BIT(15),
+ MXL862XX_BRIDGE_PORT_CONFIG_MASK_MAC_LEARNED_COUNT = BIT(16),
+ MXL862XX_BRIDGE_PORT_CONFIG_MASK_INGRESS_VLAN_FILTER = BIT(17),
+ MXL862XX_BRIDGE_PORT_CONFIG_MASK_EGRESS_VLAN_FILTER1 = BIT(18),
+ MXL862XX_BRIDGE_PORT_CONFIG_MASK_EGRESS_VLAN_FILTER2 = BIT(19),
+ MXL862XX_BRIDGE_PORT_CONFIG_MASK_VLAN_BASED_MAC_LEARNING = BIT(20),
+ MXL862XX_BRIDGE_PORT_CONFIG_MASK_VLAN_BASED_MULTICAST_LOOKUP = BIT(21),
+ MXL862XX_BRIDGE_PORT_CONFIG_MASK_LOOP_VIOLATION_COUNTER = BIT(22),
+ MXL862XX_BRIDGE_PORT_CONFIG_MASK_ALL = 0x7FFFFFFF,
+ MXL862XX_BRIDGE_PORT_CONFIG_MASK_FORCE = BIT(31)
+};
+
+/**
+ * enum mxl862xx_color_marking_mode - Color Marking Mode
+ * @MXL862XX_MARKING_ALL_GREEN: mark packets (except critical) to green
+ * @MXL862XX_MARKING_INTERNAL_MARKING: do not change color and priority
+ * @MXL862XX_MARKING_DEI: DEI mark mode
+ * @MXL862XX_MARKING_PCP_8P0D: PCP 8P0D mark mode
+ * @MXL862XX_MARKING_PCP_7P1D: PCP 7P1D mark mode
+ * @MXL862XX_MARKING_PCP_6P2D: PCP 6P2D mark mode
+ * @MXL862XX_MARKING_PCP_5P3D: PCP 5P3D mark mode
+ * @MXL862XX_MARKING_DSCP_AF: DSCP AF class
+ */
+enum mxl862xx_color_marking_mode {
+ MXL862XX_MARKING_ALL_GREEN = 0,
+ MXL862XX_MARKING_INTERNAL_MARKING,
+ MXL862XX_MARKING_DEI,
+ MXL862XX_MARKING_PCP_8P0D,
+ MXL862XX_MARKING_PCP_7P1D,
+ MXL862XX_MARKING_PCP_6P2D,
+ MXL862XX_MARKING_PCP_5P3D,
+ MXL862XX_MARKING_DSCP_AF,
+};
+
+/**
+ * enum mxl862xx_color_remarking_mode - Color Remarking Mode
+ * @MXL862XX_REMARKING_NONE: values from last process stage
+ * @MXL862XX_REMARKING_DEI: DEI mark mode
+ * @MXL862XX_REMARKING_PCP_8P0D: PCP 8P0D mark mode
+ * @MXL862XX_REMARKING_PCP_7P1D: PCP 7P1D mark mode
+ * @MXL862XX_REMARKING_PCP_6P2D: PCP 6P2D mark mode
+ * @MXL862XX_REMARKING_PCP_5P3D: PCP 5P3D mark mode
+ * @MXL862XX_REMARKING_DSCP_AF: DSCP AF class
+ */
+enum mxl862xx_color_remarking_mode {
+ MXL862XX_REMARKING_NONE = 0,
+ MXL862XX_REMARKING_DEI = 2,
+ MXL862XX_REMARKING_PCP_8P0D,
+ MXL862XX_REMARKING_PCP_7P1D,
+ MXL862XX_REMARKING_PCP_6P2D,
+ MXL862XX_REMARKING_PCP_5P3D,
+ MXL862XX_REMARKING_DSCP_AF,
+};
+
+/**
+ * enum mxl862xx_pmapper_mapping_mode - P-mapper Mapping Mode
+ * @MXL862XX_PMAPPER_MAPPING_PCP: Use PCP for VLAN tagged packets to derive
+ * sub interface ID group
+ * @MXL862XX_PMAPPER_MAPPING_LAG: Use LAG Index for Pmapper access
+ * regardless of IP and VLAN packet
+ * @MXL862XX_PMAPPER_MAPPING_DSCP: Use DSCP for VLAN tagged IP packets to
+ * derive sub interface ID group
+ */
+enum mxl862xx_pmapper_mapping_mode {
+ MXL862XX_PMAPPER_MAPPING_PCP = 0,
+ MXL862XX_PMAPPER_MAPPING_LAG,
+ MXL862XX_PMAPPER_MAPPING_DSCP,
+};
+
+/**
+ * struct mxl862xx_pmapper - P-mapper Configuration
+ * @pmapper_id: Index of P-mapper (0-31)
+ * @dest_sub_if_id_group: Sub interface ID group. Entry 0 is for non-IP and
+ * non-VLAN tagged packets.
+ * Entries 1-8 are PCP mapping entries for VLAN tagged
+ * packets.
+ * Entries 9-72 are DSCP or LAG mapping entries.
+ *
+ * Used by CTP port config and bridge port config. In case of LAG, it is
+ * user's responsibility to provide the mapped entries in given P-mapper
+ * table. In other modes the entries are auto mapped from input packet.
+ */
+struct mxl862xx_pmapper {
+ __le16 pmapper_id;
+ u8 dest_sub_if_id_group[73];
+} __packed;
+
+/**
+ * struct mxl862xx_bridge_port_config - Bridge Port Configuration
+ * @bridge_port_id: Bridge Port ID allocated by bridge port allocation
+ * @mask: See &enum mxl862xx_bridge_port_config_mask
+ * @bridge_id: Bridge ID (FID) to which this bridge port is associated
+ * @ingress_extended_vlan_enable: Enable extended VLAN processing for
+ * ingress traffic
+ * @ingress_extended_vlan_block_id: Extended VLAN block allocated for
+ * ingress traffic
+ * @ingress_extended_vlan_block_size: Extended VLAN block size for ingress
+ * traffic
+ * @egress_extended_vlan_enable: Enable extended VLAN processing for egress
+ * traffic
+ * @egress_extended_vlan_block_id: Extended VLAN block allocated for egress
+ * traffic
+ * @egress_extended_vlan_block_size: Extended VLAN block size for egress
+ * traffic
+ * @ingress_marking_mode: Ingress color marking mode. See
+ * &enum mxl862xx_color_marking_mode
+ * @egress_remarking_mode: Color remarking for egress traffic. See
+ * &enum mxl862xx_color_remarking_mode
+ * @ingress_metering_enable: Traffic metering on ingress traffic applies
+ * @ingress_traffic_meter_id: Meter for ingress Bridge Port process
+ * @egress_sub_metering_enable: Traffic metering on various types of egress
+ * traffic
+ * @egress_traffic_sub_meter_id: Meter for egress Bridge Port process with
+ * specific type
+ * @dest_logical_port_id: Destination logical port
+ * @pmapper_enable: Enable P-mapper
+ * @dest_sub_if_id_group: Destination sub interface ID group when
+ * pmapper_enable is false
+ * @pmapper_mapping_mode: P-mapper mapping mode. See
+ * &enum mxl862xx_pmapper_mapping_mode
+ * @pmapper_id_valid: When true, P-mapper is re-used; when false,
+ * allocation is handled by API
+ * @pmapper: P-mapper configuration used when pmapper_enable is true
+ * @bridge_port_map: Port map defining broadcast domain. Each bit
+ * represents one bridge port. Bridge port ID is
+ * index * 16 + bit offset.
+ * @mc_dest_ip_lookup_disable: Disable multicast IP destination table
+ * lookup
+ * @mc_src_ip_lookup_enable: Enable multicast IP source table lookup
+ * @dest_mac_lookup_disable: Disable destination MAC lookup; packet treated
+ * as unknown
+ * @src_mac_learning_disable: Disable source MAC address learning
+ * @mac_spoofing_detect_enable: Enable MAC spoofing detection
+ * @port_lock_enable: Enable port locking
+ * @mac_learning_limit_enable: Enable MAC learning limitation
+ * @mac_learning_limit: Maximum number of MAC addresses that can be learned
+ * from this bridge port
+ * @loop_violation_count: Number of loop violation events from this bridge
+ * port
+ * @mac_learning_count: Number of MAC addresses learned from this bridge
+ * port
+ * @ingress_vlan_filter_enable: Enable ingress VLAN filter
+ * @ingress_vlan_filter_block_id: VLAN filter block of ingress traffic
+ * @ingress_vlan_filter_block_size: VLAN filter block size for ingress
+ * traffic
+ * @bypass_egress_vlan_filter1: For ingress traffic, bypass VLAN filter 1
+ * at egress bridge port processing
+ * @egress_vlan_filter1enable: Enable egress VLAN filter 1
+ * @egress_vlan_filter1block_id: VLAN filter block 1 of egress traffic
+ * @egress_vlan_filter1block_size: VLAN filter block 1 size
+ * @egress_vlan_filter2enable: Enable egress VLAN filter 2
+ * @egress_vlan_filter2block_id: VLAN filter block 2 of egress traffic
+ * @egress_vlan_filter2block_size: VLAN filter block 2 size
+ * @vlan_tag_selection: VLAN tag selection for MAC address/multicast
+ * learning, lookup and filtering.
+ * 0 - Intermediate outer VLAN tag is used.
+ * 1 - Original outer VLAN tag is used.
+ * @vlan_src_mac_priority_enable: Enable VLAN Priority field for source MAC
+ * learning and filtering
+ * @vlan_src_mac_dei_enable: Enable VLAN DEI/CFI field for source MAC
+ * learning and filtering
+ * @vlan_src_mac_vid_enable: Enable VLAN ID field for source MAC learning
+ * and filtering
+ * @vlan_dst_mac_priority_enable: Enable VLAN Priority field for destination
+ * MAC lookup and filtering
+ * @vlan_dst_mac_dei_enable: Enable VLAN CFI/DEI field for destination MAC
+ * lookup and filtering
+ * @vlan_dst_mac_vid_enable: Enable VLAN ID field for destination MAC lookup
+ * and filtering
+ * @vlan_multicast_priority_enable: Enable VLAN Priority field for IP
+ * multicast lookup
+ * @vlan_multicast_dei_enable: Enable VLAN CFI/DEI field for IP multicast
+ * lookup
+ * @vlan_multicast_vid_enable: Enable VLAN ID field for IP multicast lookup
+ */
+struct mxl862xx_bridge_port_config {
+ __le16 bridge_port_id;
+ __le32 mask; /* enum mxl862xx_bridge_port_config_mask */
+ __le16 bridge_id;
+ u8 ingress_extended_vlan_enable;
+ __le16 ingress_extended_vlan_block_id;
+ __le16 ingress_extended_vlan_block_size;
+ u8 egress_extended_vlan_enable;
+ __le16 egress_extended_vlan_block_id;
+ __le16 egress_extended_vlan_block_size;
+ __le32 ingress_marking_mode; /* enum mxl862xx_color_marking_mode */
+ __le32 egress_remarking_mode; /* enum mxl862xx_color_remarking_mode */
+ u8 ingress_metering_enable;
+ __le16 ingress_traffic_meter_id;
+ u8 egress_sub_metering_enable[MXL862XX_BRIDGE_PORT_EGRESS_METER_MAX];
+ __le16 egress_traffic_sub_meter_id[MXL862XX_BRIDGE_PORT_EGRESS_METER_MAX];
+ u8 dest_logical_port_id;
+ u8 pmapper_enable;
+ __le16 dest_sub_if_id_group;
+ __le32 pmapper_mapping_mode; /* enum mxl862xx_pmapper_mapping_mode */
+ u8 pmapper_id_valid;
+ struct mxl862xx_pmapper pmapper;
+ __le16 bridge_port_map[8];
+ u8 mc_dest_ip_lookup_disable;
+ u8 mc_src_ip_lookup_enable;
+ u8 dest_mac_lookup_disable;
+ u8 src_mac_learning_disable;
+ u8 mac_spoofing_detect_enable;
+ u8 port_lock_enable;
+ u8 mac_learning_limit_enable;
+ __le16 mac_learning_limit;
+ __le16 loop_violation_count;
+ __le16 mac_learning_count;
+ u8 ingress_vlan_filter_enable;
+ __le16 ingress_vlan_filter_block_id;
+ __le16 ingress_vlan_filter_block_size;
+ u8 bypass_egress_vlan_filter1;
+ u8 egress_vlan_filter1enable;
+ __le16 egress_vlan_filter1block_id;
+ __le16 egress_vlan_filter1block_size;
+ u8 egress_vlan_filter2enable;
+ __le16 egress_vlan_filter2block_id;
+ __le16 egress_vlan_filter2block_size;
+ u8 vlan_tag_selection;
+ u8 vlan_src_mac_priority_enable;
+ u8 vlan_src_mac_dei_enable;
+ u8 vlan_src_mac_vid_enable;
+ u8 vlan_dst_mac_priority_enable;
+ u8 vlan_dst_mac_dei_enable;
+ u8 vlan_dst_mac_vid_enable;
+ u8 vlan_multicast_priority_enable;
+ u8 vlan_multicast_dei_enable;
+ u8 vlan_multicast_vid_enable;
+} __packed;
+
+/**
+ * struct mxl862xx_cfg - Global Switch configuration Attributes
+ * @mac_table_age_timer: See &enum mxl862xx_age_timer
+ * @age_timer: Custom MAC table aging timer in seconds
+ * @max_packet_len: Maximum Ethernet packet length
+ * @learning_limit_action: Automatic MAC address table learning limitation
+ * consecutive action
+ * @mac_locking_action: Accept or discard MAC port locking violation
+ * packets
+ * @mac_spoofing_action: Accept or discard MAC spoofing and port MAC locking
+ * violation packets
+ * @pause_mac_mode_src: Pause frame MAC source address mode
+ * @pause_mac_src: Pause frame MAC source address
+ */
+struct mxl862xx_cfg {
+ __le32 mac_table_age_timer; /* enum mxl862xx_age_timer */
+ __le32 age_timer;
+ __le16 max_packet_len;
+ u8 learning_limit_action;
+ u8 mac_locking_action;
+ u8 mac_spoofing_action;
+ u8 pause_mac_mode_src;
+ u8 pause_mac_src[ETH_ALEN];
+} __packed;
+
+/**
+ * enum mxl862xx_ss_sp_tag_mask - Special tag valid field indicator bits
+ * @MXL862XX_SS_SP_TAG_MASK_RX: valid RX special tag mode
+ * @MXL862XX_SS_SP_TAG_MASK_TX: valid TX special tag mode
+ * @MXL862XX_SS_SP_TAG_MASK_RX_PEN: valid RX special tag info over preamble
+ * @MXL862XX_SS_SP_TAG_MASK_TX_PEN: valid TX special tag info over preamble
+ */
+enum mxl862xx_ss_sp_tag_mask {
+ MXL862XX_SS_SP_TAG_MASK_RX = BIT(0),
+ MXL862XX_SS_SP_TAG_MASK_TX = BIT(1),
+ MXL862XX_SS_SP_TAG_MASK_RX_PEN = BIT(2),
+ MXL862XX_SS_SP_TAG_MASK_TX_PEN = BIT(3),
+};
+
+/**
+ * enum mxl862xx_ss_sp_tag_rx - RX special tag mode
+ * @MXL862XX_SS_SP_TAG_RX_NO_TAG_NO_INSERT: packet does NOT have special
+ * tag and special tag is NOT inserted
+ * @MXL862XX_SS_SP_TAG_RX_NO_TAG_INSERT: packet does NOT have special tag
+ * and special tag is inserted
+ * @MXL862XX_SS_SP_TAG_RX_TAG_NO_INSERT: packet has special tag and special
+ * tag is NOT inserted
+ */
+enum mxl862xx_ss_sp_tag_rx {
+ MXL862XX_SS_SP_TAG_RX_NO_TAG_NO_INSERT = 0,
+ MXL862XX_SS_SP_TAG_RX_NO_TAG_INSERT = 1,
+ MXL862XX_SS_SP_TAG_RX_TAG_NO_INSERT = 2,
+};
+
+/**
+ * enum mxl862xx_ss_sp_tag_tx - TX special tag mode
+ * @MXL862XX_SS_SP_TAG_TX_NO_TAG_NO_REMOVE: packet does NOT have special
+ * tag and special tag is NOT removed
+ * @MXL862XX_SS_SP_TAG_TX_TAG_REPLACE: packet has special tag and special
+ * tag is replaced
+ * @MXL862XX_SS_SP_TAG_TX_TAG_NO_REMOVE: packet has special tag and special
+ * tag is NOT removed
+ * @MXL862XX_SS_SP_TAG_TX_TAG_REMOVE: packet has special tag and special
+ * tag is removed
+ */
+enum mxl862xx_ss_sp_tag_tx {
+ MXL862XX_SS_SP_TAG_TX_NO_TAG_NO_REMOVE = 0,
+ MXL862XX_SS_SP_TAG_TX_TAG_REPLACE = 1,
+ MXL862XX_SS_SP_TAG_TX_TAG_NO_REMOVE = 2,
+ MXL862XX_SS_SP_TAG_TX_TAG_REMOVE = 3,
+};
+
+/**
+ * enum mxl862xx_ss_sp_tag_rx_pen - RX special tag info over preamble
+ * @MXL862XX_SS_SP_TAG_RX_PEN_ALL_0: special tag info inserted from byte 2
+ * to 7 are all 0
+ * @MXL862XX_SS_SP_TAG_RX_PEN_BYTE_5_IS_16: special tag byte 5 is 16, other
+ * bytes from 2 to 7 are 0
+ * @MXL862XX_SS_SP_TAG_RX_PEN_BYTE_5_FROM_PREAMBLE: special tag byte 5 is
+ * from preamble field, others
+ * are 0
+ * @MXL862XX_SS_SP_TAG_RX_PEN_BYTE_2_TO_7_FROM_PREAMBLE: special tag byte 2
+ * to 7 are from preamble
+ * field
+ */
+enum mxl862xx_ss_sp_tag_rx_pen {
+ MXL862XX_SS_SP_TAG_RX_PEN_ALL_0 = 0,
+ MXL862XX_SS_SP_TAG_RX_PEN_BYTE_5_IS_16 = 1,
+ MXL862XX_SS_SP_TAG_RX_PEN_BYTE_5_FROM_PREAMBLE = 2,
+ MXL862XX_SS_SP_TAG_RX_PEN_BYTE_2_TO_7_FROM_PREAMBLE = 3,
+};
+
+/**
+ * struct mxl862xx_ss_sp_tag - Special tag port settings
+ * @pid: port ID (1~16)
+ * @mask: See &enum mxl862xx_ss_sp_tag_mask
+ * @rx: See &enum mxl862xx_ss_sp_tag_rx
+ * @tx: See &enum mxl862xx_ss_sp_tag_tx
+ * @rx_pen: See &enum mxl862xx_ss_sp_tag_rx_pen
+ * @tx_pen: TX special tag info over preamble
+ * 0 - disabled
+ * 1 - enabled
+ */
+struct mxl862xx_ss_sp_tag {
+ u8 pid;
+ u8 mask; /* enum mxl862xx_ss_sp_tag_mask */
+ u8 rx; /* enum mxl862xx_ss_sp_tag_rx */
+ u8 tx; /* enum mxl862xx_ss_sp_tag_tx */
+ u8 rx_pen; /* enum mxl862xx_ss_sp_tag_rx_pen */
+ u8 tx_pen; /* boolean */
+} __packed;
+
+/**
+ * enum mxl862xx_logical_port_mode - Logical port mode
+ * @MXL862XX_LOGICAL_PORT_8BIT_WLAN: WLAN with 8-bit station ID
+ * @MXL862XX_LOGICAL_PORT_9BIT_WLAN: WLAN with 9-bit station ID
+ * @MXL862XX_LOGICAL_PORT_ETHERNET: Ethernet port
+ * @MXL862XX_LOGICAL_PORT_OTHER: Others
+ */
+enum mxl862xx_logical_port_mode {
+ MXL862XX_LOGICAL_PORT_8BIT_WLAN = 0,
+ MXL862XX_LOGICAL_PORT_9BIT_WLAN,
+ MXL862XX_LOGICAL_PORT_ETHERNET,
+ MXL862XX_LOGICAL_PORT_OTHER = 0xFF,
+};
+
+/**
+ * struct mxl862xx_ctp_port_assignment - CTP Port Assignment/association
+ * with logical port
+ * @logical_port_id: Logical Port Id. The valid range is hardware dependent
+ * @first_ctp_port_id: First CTP (Connectivity Termination Port) ID mapped
+ * to above logical port ID
+ * @number_of_ctp_port: Total number of CTP Ports mapped above logical port
+ * ID
+ * @mode: Logical port mode to define sub interface ID format. See
+ * &enum mxl862xx_logical_port_mode
+ * @bridge_port_id: Bridge Port ID (not FID). For allocation, each CTP
+ * allocated is mapped to the Bridge Port given by this field.
+ * The Bridge Port will be configured to use first CTP as
+ * egress CTP.
+ */
+struct mxl862xx_ctp_port_assignment {
+ u8 logical_port_id;
+ __le16 first_ctp_port_id;
+ __le16 number_of_ctp_port;
+ __le32 mode; /* enum mxl862xx_logical_port_mode */
+ __le16 bridge_port_id;
+} __packed;
+
+/**
+ * struct mxl862xx_sys_fw_image_version - Firmware version information
+ * @iv_major: firmware major version
+ * @iv_minor: firmware minor version
+ * @iv_revision: firmware revision
+ * @iv_build_num: firmware build number
+ */
+struct mxl862xx_sys_fw_image_version {
+ u8 iv_major;
+ u8 iv_minor;
+ __le16 iv_revision;
+ __le32 iv_build_num;
+} __packed;
+
+#endif /* __MXL862XX_API_H */
diff --git a/drivers/net/dsa/mxl862xx/mxl862xx-cmd.h b/drivers/net/dsa/mxl862xx/mxl862xx-cmd.h
new file mode 100644
index 000000000000..f6852ade64e7
--- /dev/null
+++ b/drivers/net/dsa/mxl862xx/mxl862xx-cmd.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef __MXL862XX_CMD_H
+#define __MXL862XX_CMD_H
+
+#define MXL862XX_MMD_DEV 30
+#define MXL862XX_MMD_REG_CTRL 0
+#define MXL862XX_MMD_REG_LEN_RET 1
+#define MXL862XX_MMD_REG_DATA_FIRST 2
+#define MXL862XX_MMD_REG_DATA_LAST 95
+#define MXL862XX_MMD_REG_DATA_MAX_SIZE \
+ (MXL862XX_MMD_REG_DATA_LAST - MXL862XX_MMD_REG_DATA_FIRST + 1)
+
+#define MXL862XX_COMMON_MAGIC 0x100
+#define MXL862XX_BRDG_MAGIC 0x300
+#define MXL862XX_BRDGPORT_MAGIC 0x400
+#define MXL862XX_CTP_MAGIC 0x500
+#define MXL862XX_SWMAC_MAGIC 0xa00
+#define MXL862XX_SS_MAGIC 0x1600
+#define GPY_GPY2XX_MAGIC 0x1800
+#define SYS_MISC_MAGIC 0x1900
+
+#define MXL862XX_COMMON_CFGGET (MXL862XX_COMMON_MAGIC + 0x9)
+#define MXL862XX_COMMON_REGISTERMOD (MXL862XX_COMMON_MAGIC + 0x11)
+
+#define MXL862XX_BRIDGE_ALLOC (MXL862XX_BRDG_MAGIC + 0x1)
+#define MXL862XX_BRIDGE_CONFIGSET (MXL862XX_BRDG_MAGIC + 0x2)
+#define MXL862XX_BRIDGE_CONFIGGET (MXL862XX_BRDG_MAGIC + 0x3)
+#define MXL862XX_BRIDGE_FREE (MXL862XX_BRDG_MAGIC + 0x4)
+
+#define MXL862XX_BRIDGEPORT_ALLOC (MXL862XX_BRDGPORT_MAGIC + 0x1)
+#define MXL862XX_BRIDGEPORT_CONFIGSET (MXL862XX_BRDGPORT_MAGIC + 0x2)
+#define MXL862XX_BRIDGEPORT_CONFIGGET (MXL862XX_BRDGPORT_MAGIC + 0x3)
+#define MXL862XX_BRIDGEPORT_FREE (MXL862XX_BRDGPORT_MAGIC + 0x4)
+
+#define MXL862XX_CTP_PORTASSIGNMENTSET (MXL862XX_CTP_MAGIC + 0x3)
+
+#define MXL862XX_MAC_TABLECLEARCOND (MXL862XX_SWMAC_MAGIC + 0x8)
+
+#define MXL862XX_SS_SPTAG_SET (MXL862XX_SS_MAGIC + 0x02)
+
+#define INT_GPHY_READ (GPY_GPY2XX_MAGIC + 0x01)
+#define INT_GPHY_WRITE (GPY_GPY2XX_MAGIC + 0x02)
+
+#define SYS_MISC_FW_VERSION (SYS_MISC_MAGIC + 0x02)
+
+#define MMD_API_MAXIMUM_ID 0x7fff
+
+#endif /* __MXL862XX_CMD_H */
diff --git a/drivers/net/dsa/mxl862xx/mxl862xx-host.c b/drivers/net/dsa/mxl862xx/mxl862xx-host.c
new file mode 100644
index 000000000000..8c55497a0ce8
--- /dev/null
+++ b/drivers/net/dsa/mxl862xx/mxl862xx-host.c
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Based upon the MaxLinear SDK driver
+ *
+ * Copyright (C) 2025 Daniel Golle <daniel@makrotopia.org>
+ * Copyright (C) 2025 John Crispin <john@phrozen.org>
+ * Copyright (C) 2024 MaxLinear Inc.
+ */
+
+#include <linux/bits.h>
+#include <linux/iopoll.h>
+#include <linux/limits.h>
+#include <net/dsa.h>
+#include "mxl862xx.h"
+#include "mxl862xx-host.h"
+
+#define CTRL_BUSY_MASK BIT(15)
+
+#define MXL862XX_MMD_REG_CTRL 0
+#define MXL862XX_MMD_REG_LEN_RET 1
+#define MXL862XX_MMD_REG_DATA_FIRST 2
+#define MXL862XX_MMD_REG_DATA_LAST 95
+#define MXL862XX_MMD_REG_DATA_MAX_SIZE \
+ (MXL862XX_MMD_REG_DATA_LAST - MXL862XX_MMD_REG_DATA_FIRST + 1)
+
+#define MMD_API_SET_DATA_0 2
+#define MMD_API_GET_DATA_0 5
+#define MMD_API_RST_DATA 8
+
+#define MXL862XX_SWITCH_RESET 0x9907
+
+static int mxl862xx_reg_read(struct mxl862xx_priv *priv, u32 addr)
+{
+ return __mdiodev_c45_read(priv->mdiodev, MDIO_MMD_VEND1, addr);
+}
+
+static int mxl862xx_reg_write(struct mxl862xx_priv *priv, u32 addr, u16 data)
+{
+ return __mdiodev_c45_write(priv->mdiodev, MDIO_MMD_VEND1, addr, data);
+}
+
+static int mxl862xx_ctrl_read(struct mxl862xx_priv *priv)
+{
+ return mxl862xx_reg_read(priv, MXL862XX_MMD_REG_CTRL);
+}
+
+static int mxl862xx_busy_wait(struct mxl862xx_priv *priv)
+{
+ int val;
+
+ return readx_poll_timeout(mxl862xx_ctrl_read, priv, val,
+ !(val & CTRL_BUSY_MASK), 15, 500000);
+}
+
+static int mxl862xx_set_data(struct mxl862xx_priv *priv, u16 words)
+{
+ int ret;
+ u16 cmd;
+
+ ret = mxl862xx_reg_write(priv, MXL862XX_MMD_REG_LEN_RET,
+ MXL862XX_MMD_REG_DATA_MAX_SIZE * sizeof(u16));
+ if (ret < 0)
+ return ret;
+
+ cmd = words / MXL862XX_MMD_REG_DATA_MAX_SIZE - 1;
+ if (!(cmd < 2))
+ return -EINVAL;
+
+ cmd += MMD_API_SET_DATA_0;
+ ret = mxl862xx_reg_write(priv, MXL862XX_MMD_REG_CTRL,
+ cmd | CTRL_BUSY_MASK);
+ if (ret < 0)
+ return ret;
+
+ return mxl862xx_busy_wait(priv);
+}
+
+static int mxl862xx_get_data(struct mxl862xx_priv *priv, u16 words)
+{
+ int ret;
+ u16 cmd;
+
+ ret = mxl862xx_reg_write(priv, MXL862XX_MMD_REG_LEN_RET,
+ MXL862XX_MMD_REG_DATA_MAX_SIZE * sizeof(u16));
+ if (ret < 0)
+ return ret;
+
+ cmd = words / MXL862XX_MMD_REG_DATA_MAX_SIZE;
+ if (!(cmd > 0 && cmd < 3))
+ return -EINVAL;
+
+ cmd += MMD_API_GET_DATA_0;
+ ret = mxl862xx_reg_write(priv, MXL862XX_MMD_REG_CTRL,
+ cmd | CTRL_BUSY_MASK);
+ if (ret < 0)
+ return ret;
+
+ return mxl862xx_busy_wait(priv);
+}
+
+static int mxl862xx_firmware_return(int ret)
+{
+ /* Only 16-bit values are valid. */
+ if (WARN_ON(ret & GENMASK(31, 16)))
+ return -EINVAL;
+
+ /* Interpret value as signed 16-bit integer. */
+ return (s16)ret;
+}
+
+static int mxl862xx_send_cmd(struct mxl862xx_priv *priv, u16 cmd, u16 size,
+ bool quiet)
+{
+ int ret;
+
+ ret = mxl862xx_reg_write(priv, MXL862XX_MMD_REG_LEN_RET, size);
+ if (ret)
+ return ret;
+
+ ret = mxl862xx_reg_write(priv, MXL862XX_MMD_REG_CTRL,
+ cmd | CTRL_BUSY_MASK);
+ if (ret)
+ return ret;
+
+ ret = mxl862xx_busy_wait(priv);
+ if (ret)
+ return ret;
+
+ ret = mxl862xx_reg_read(priv, MXL862XX_MMD_REG_LEN_RET);
+ if (ret < 0)
+ return ret;
+
+ /* handle errors returned by the firmware as -EIO
+ * The firmware is based on Zephyr OS and uses the errors as
+ * defined in errno.h of Zephyr OS. See
+ * https://github.com/zephyrproject-rtos/zephyr/blob/v3.7.0/lib/libc/minimal/include/errno.h
+ */
+ ret = mxl862xx_firmware_return(ret);
+ if (ret < 0) {
+ if (!quiet)
+ dev_err(&priv->mdiodev->dev,
+ "CMD %04x returned error %d\n", cmd, ret);
+ return -EIO;
+ }
+
+ return ret;
+}
+
+int mxl862xx_api_wrap(struct mxl862xx_priv *priv, u16 cmd, void *_data,
+ u16 size, bool read, bool quiet)
+{
+ __le16 *data = _data;
+ int ret, cmd_ret;
+ u16 max, i;
+
+ dev_dbg(&priv->mdiodev->dev, "CMD %04x DATA %*ph\n", cmd, size, data);
+
+ mutex_lock_nested(&priv->mdiodev->bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ max = (size + 1) / 2;
+
+ ret = mxl862xx_busy_wait(priv);
+ if (ret < 0)
+ goto out;
+
+ for (i = 0; i < max; i++) {
+ u16 off = i % MXL862XX_MMD_REG_DATA_MAX_SIZE;
+
+ if (i && off == 0) {
+ /* Send command to set data when every
+ * MXL862XX_MMD_REG_DATA_MAX_SIZE of WORDs are written.
+ */
+ ret = mxl862xx_set_data(priv, i);
+ if (ret < 0)
+ goto out;
+ }
+
+ ret = mxl862xx_reg_write(priv, MXL862XX_MMD_REG_DATA_FIRST + off,
+ le16_to_cpu(data[i]));
+ if (ret < 0)
+ goto out;
+ }
+
+ ret = mxl862xx_send_cmd(priv, cmd, size, quiet);
+ if (ret < 0 || !read)
+ goto out;
+
+ /* store result of mxl862xx_send_cmd() */
+ cmd_ret = ret;
+
+ for (i = 0; i < max; i++) {
+ u16 off = i % MXL862XX_MMD_REG_DATA_MAX_SIZE;
+
+ if (i && off == 0) {
+ /* Send command to fetch next batch of data when every
+ * MXL862XX_MMD_REG_DATA_MAX_SIZE of WORDs are read.
+ */
+ ret = mxl862xx_get_data(priv, i);
+ if (ret < 0)
+ goto out;
+ }
+
+ ret = mxl862xx_reg_read(priv, MXL862XX_MMD_REG_DATA_FIRST + off);
+ if (ret < 0)
+ goto out;
+
+ if ((i * 2 + 1) == size) {
+ /* Special handling for last BYTE if it's not WORD
+ * aligned to avoid writing beyond the allocated data
+ * structure.
+ */
+ *(uint8_t *)&data[i] = ret & 0xff;
+ } else {
+ data[i] = cpu_to_le16((u16)ret);
+ }
+ }
+
+ /* on success return the result of the mxl862xx_send_cmd() */
+ ret = cmd_ret;
+
+ dev_dbg(&priv->mdiodev->dev, "RET %d DATA %*ph\n", ret, size, data);
+
+out:
+ mutex_unlock(&priv->mdiodev->bus->mdio_lock);
+
+ return ret;
+}
+
+int mxl862xx_reset(struct mxl862xx_priv *priv)
+{
+ int ret;
+
+ mutex_lock_nested(&priv->mdiodev->bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ /* Software reset */
+ ret = mxl862xx_reg_write(priv, MXL862XX_MMD_REG_LEN_RET, 0);
+ if (ret)
+ goto out;
+
+ ret = mxl862xx_reg_write(priv, MXL862XX_MMD_REG_CTRL, MXL862XX_SWITCH_RESET);
+out:
+ mutex_unlock(&priv->mdiodev->bus->mdio_lock);
+
+ return ret;
+}
diff --git a/drivers/net/dsa/mxl862xx/mxl862xx-host.h b/drivers/net/dsa/mxl862xx/mxl862xx-host.h
new file mode 100644
index 000000000000..7cc496f6be5c
--- /dev/null
+++ b/drivers/net/dsa/mxl862xx/mxl862xx-host.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef __MXL862XX_HOST_H
+#define __MXL862XX_HOST_H
+
+#include "mxl862xx.h"
+
+int mxl862xx_api_wrap(struct mxl862xx_priv *priv, u16 cmd, void *data, u16 size,
+ bool read, bool quiet);
+int mxl862xx_reset(struct mxl862xx_priv *priv);
+
+#endif /* __MXL862XX_HOST_H */
diff --git a/drivers/net/dsa/mxl862xx/mxl862xx.c b/drivers/net/dsa/mxl862xx/mxl862xx.c
new file mode 100644
index 000000000000..b1e2094b5816
--- /dev/null
+++ b/drivers/net/dsa/mxl862xx/mxl862xx.c
@@ -0,0 +1,476 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for MaxLinear MxL862xx switch family
+ *
+ * Copyright (C) 2024 MaxLinear Inc.
+ * Copyright (C) 2025 John Crispin <john@phrozen.org>
+ * Copyright (C) 2025 Daniel Golle <daniel@makrotopia.org>
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/phylink.h>
+#include <net/dsa.h>
+
+#include "mxl862xx.h"
+#include "mxl862xx-api.h"
+#include "mxl862xx-cmd.h"
+#include "mxl862xx-host.h"
+
+#define MXL862XX_API_WRITE(dev, cmd, data) \
+ mxl862xx_api_wrap(dev, cmd, &(data), sizeof((data)), false, false)
+#define MXL862XX_API_READ(dev, cmd, data) \
+ mxl862xx_api_wrap(dev, cmd, &(data), sizeof((data)), true, false)
+#define MXL862XX_API_READ_QUIET(dev, cmd, data) \
+ mxl862xx_api_wrap(dev, cmd, &(data), sizeof((data)), true, true)
+
+#define MXL862XX_SDMA_PCTRLP(p) (0xbc0 + ((p) * 0x6))
+#define MXL862XX_SDMA_PCTRL_EN BIT(0)
+
+#define MXL862XX_FDMA_PCTRLP(p) (0xa80 + ((p) * 0x6))
+#define MXL862XX_FDMA_PCTRL_EN BIT(0)
+
+#define MXL862XX_READY_TIMEOUT_MS 10000
+#define MXL862XX_READY_POLL_MS 100
+
+static enum dsa_tag_protocol mxl862xx_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol m)
+{
+ return DSA_TAG_PROTO_MXL862;
+}
+
+/* PHY access via firmware relay */
+static int mxl862xx_phy_read_mmd(struct mxl862xx_priv *priv, int port,
+ int devadd, int reg)
+{
+ struct mdio_relay_data param = {
+ .phy = port,
+ .mmd = devadd,
+ .reg = cpu_to_le16(reg),
+ };
+ int ret;
+
+ ret = MXL862XX_API_READ(priv, INT_GPHY_READ, param);
+ if (ret)
+ return ret;
+
+ return le16_to_cpu(param.data);
+}
+
+static int mxl862xx_phy_write_mmd(struct mxl862xx_priv *priv, int port,
+ int devadd, int reg, u16 data)
+{
+ struct mdio_relay_data param = {
+ .phy = port,
+ .mmd = devadd,
+ .reg = cpu_to_le16(reg),
+ .data = cpu_to_le16(data),
+ };
+
+ return MXL862XX_API_WRITE(priv, INT_GPHY_WRITE, param);
+}
+
+static int mxl862xx_phy_read_mii_bus(struct mii_bus *bus, int port, int regnum)
+{
+ return mxl862xx_phy_read_mmd(bus->priv, port, 0, regnum);
+}
+
+static int mxl862xx_phy_write_mii_bus(struct mii_bus *bus, int port,
+ int regnum, u16 val)
+{
+ return mxl862xx_phy_write_mmd(bus->priv, port, 0, regnum, val);
+}
+
+static int mxl862xx_phy_read_c45_mii_bus(struct mii_bus *bus, int port,
+ int devadd, int regnum)
+{
+ return mxl862xx_phy_read_mmd(bus->priv, port, devadd, regnum);
+}
+
+static int mxl862xx_phy_write_c45_mii_bus(struct mii_bus *bus, int port,
+ int devadd, int regnum, u16 val)
+{
+ return mxl862xx_phy_write_mmd(bus->priv, port, devadd, regnum, val);
+}
+
+static int mxl862xx_wait_ready(struct dsa_switch *ds)
+{
+ struct mxl862xx_sys_fw_image_version ver = {};
+ unsigned long start = jiffies, timeout;
+ struct mxl862xx_priv *priv = ds->priv;
+ struct mxl862xx_cfg cfg = {};
+ int ret;
+
+ timeout = start + msecs_to_jiffies(MXL862XX_READY_TIMEOUT_MS);
+ msleep(2000); /* it always takes at least 2 seconds */
+ do {
+ ret = MXL862XX_API_READ_QUIET(priv, SYS_MISC_FW_VERSION, ver);
+ if (ret || !ver.iv_major)
+ goto not_ready_yet;
+
+ /* being able to perform CFGGET indicates that
+ * the firmware is ready
+ */
+ ret = MXL862XX_API_READ_QUIET(priv,
+ MXL862XX_COMMON_CFGGET,
+ cfg);
+ if (ret)
+ goto not_ready_yet;
+
+ dev_info(ds->dev, "switch ready after %ums, firmware %u.%u.%u (build %u)\n",
+ jiffies_to_msecs(jiffies - start),
+ ver.iv_major, ver.iv_minor,
+ le16_to_cpu(ver.iv_revision),
+ le32_to_cpu(ver.iv_build_num));
+ return 0;
+
+not_ready_yet:
+ msleep(MXL862XX_READY_POLL_MS);
+ } while (time_before(jiffies, timeout));
+
+ dev_err(ds->dev, "switch not responding after reset\n");
+ return -ETIMEDOUT;
+}
+
+static int mxl862xx_setup_mdio(struct dsa_switch *ds)
+{
+ struct mxl862xx_priv *priv = ds->priv;
+ struct device *dev = ds->dev;
+ struct device_node *mdio_np;
+ struct mii_bus *bus;
+ int ret;
+
+ bus = devm_mdiobus_alloc(dev);
+ if (!bus)
+ return -ENOMEM;
+
+ bus->priv = priv;
+ ds->user_mii_bus = bus;
+ bus->name = KBUILD_MODNAME "-mii";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(dev));
+ bus->read_c45 = mxl862xx_phy_read_c45_mii_bus;
+ bus->write_c45 = mxl862xx_phy_write_c45_mii_bus;
+ bus->read = mxl862xx_phy_read_mii_bus;
+ bus->write = mxl862xx_phy_write_mii_bus;
+ bus->parent = dev;
+ bus->phy_mask = ~ds->phys_mii_mask;
+
+ mdio_np = of_get_child_by_name(dev->of_node, "mdio");
+ if (!mdio_np)
+ return -ENODEV;
+
+ ret = devm_of_mdiobus_register(dev, bus, mdio_np);
+ of_node_put(mdio_np);
+
+ return ret;
+}
+
+static int mxl862xx_setup(struct dsa_switch *ds)
+{
+ struct mxl862xx_priv *priv = ds->priv;
+ int ret;
+
+ ret = mxl862xx_reset(priv);
+ if (ret)
+ return ret;
+
+ ret = mxl862xx_wait_ready(ds);
+ if (ret)
+ return ret;
+
+ return mxl862xx_setup_mdio(ds);
+}
+
+static int mxl862xx_port_state(struct dsa_switch *ds, int port, bool enable)
+{
+ struct mxl862xx_register_mod sdma = {
+ .addr = cpu_to_le16(MXL862XX_SDMA_PCTRLP(port)),
+ .data = cpu_to_le16(enable ? MXL862XX_SDMA_PCTRL_EN : 0),
+ .mask = cpu_to_le16(MXL862XX_SDMA_PCTRL_EN),
+ };
+ struct mxl862xx_register_mod fdma = {
+ .addr = cpu_to_le16(MXL862XX_FDMA_PCTRLP(port)),
+ .data = cpu_to_le16(enable ? MXL862XX_FDMA_PCTRL_EN : 0),
+ .mask = cpu_to_le16(MXL862XX_FDMA_PCTRL_EN),
+ };
+ int ret;
+
+ ret = MXL862XX_API_WRITE(ds->priv, MXL862XX_COMMON_REGISTERMOD, sdma);
+ if (ret)
+ return ret;
+
+ return MXL862XX_API_WRITE(ds->priv, MXL862XX_COMMON_REGISTERMOD, fdma);
+}
+
+static int mxl862xx_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ return mxl862xx_port_state(ds, port, true);
+}
+
+static void mxl862xx_port_disable(struct dsa_switch *ds, int port)
+{
+ if (mxl862xx_port_state(ds, port, false))
+ dev_err(ds->dev, "failed to disable port %d\n", port);
+}
+
+static void mxl862xx_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct mxl862xx_mac_table_clear param = {
+ .type = MXL862XX_MAC_CLEAR_PHY_PORT,
+ .port_id = port,
+ };
+
+ if (MXL862XX_API_WRITE(ds->priv, MXL862XX_MAC_TABLECLEARCOND, param))
+ dev_err(ds->dev, "failed to clear fdb on port %d\n", port);
+}
+
+static int mxl862xx_configure_ctp_port(struct dsa_switch *ds, int port,
+ u16 first_ctp_port_id,
+ u16 number_of_ctp_ports)
+{
+ struct mxl862xx_ctp_port_assignment ctp_assign = {
+ .logical_port_id = port,
+ .first_ctp_port_id = cpu_to_le16(first_ctp_port_id),
+ .number_of_ctp_port = cpu_to_le16(number_of_ctp_ports),
+ .mode = cpu_to_le32(MXL862XX_LOGICAL_PORT_ETHERNET),
+ };
+
+ return MXL862XX_API_WRITE(ds->priv, MXL862XX_CTP_PORTASSIGNMENTSET,
+ ctp_assign);
+}
+
+static int mxl862xx_configure_sp_tag_proto(struct dsa_switch *ds, int port,
+ bool enable)
+{
+ struct mxl862xx_ss_sp_tag tag = {
+ .pid = port,
+ .mask = MXL862XX_SS_SP_TAG_MASK_RX | MXL862XX_SS_SP_TAG_MASK_TX,
+ .rx = enable ? MXL862XX_SS_SP_TAG_RX_TAG_NO_INSERT :
+ MXL862XX_SS_SP_TAG_RX_NO_TAG_INSERT,
+ .tx = enable ? MXL862XX_SS_SP_TAG_TX_TAG_NO_REMOVE :
+ MXL862XX_SS_SP_TAG_TX_TAG_REMOVE,
+ };
+
+ return MXL862XX_API_WRITE(ds->priv, MXL862XX_SS_SPTAG_SET, tag);
+}
+
+static int mxl862xx_setup_cpu_bridge(struct dsa_switch *ds, int port)
+{
+ struct mxl862xx_bridge_port_config br_port_cfg = {};
+ struct mxl862xx_priv *priv = ds->priv;
+ u16 bridge_port_map = 0;
+ struct dsa_port *dp;
+
+ /* CPU port bridge setup */
+ br_port_cfg.mask = cpu_to_le32(MXL862XX_BRIDGE_PORT_CONFIG_MASK_BRIDGE_PORT_MAP |
+ MXL862XX_BRIDGE_PORT_CONFIG_MASK_MC_SRC_MAC_LEARNING |
+ MXL862XX_BRIDGE_PORT_CONFIG_MASK_VLAN_BASED_MAC_LEARNING);
+
+ br_port_cfg.bridge_port_id = cpu_to_le16(port);
+ br_port_cfg.src_mac_learning_disable = false;
+ br_port_cfg.vlan_src_mac_vid_enable = true;
+ br_port_cfg.vlan_dst_mac_vid_enable = true;
+
+ /* include all assigned user ports in the CPU portmap */
+ dsa_switch_for_each_user_port(dp, ds) {
+ /* it's safe to rely on cpu_dp being valid for user ports */
+ if (dp->cpu_dp->index != port)
+ continue;
+
+ bridge_port_map |= BIT(dp->index);
+ }
+ br_port_cfg.bridge_port_map[0] |= cpu_to_le16(bridge_port_map);
+
+ return MXL862XX_API_WRITE(priv, MXL862XX_BRIDGEPORT_CONFIGSET, br_port_cfg);
+}
+
+static int mxl862xx_add_single_port_bridge(struct dsa_switch *ds, int port)
+{
+ struct mxl862xx_bridge_port_config br_port_cfg = {};
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct mxl862xx_bridge_alloc br_alloc = {};
+ int ret;
+
+ ret = MXL862XX_API_READ(ds->priv, MXL862XX_BRIDGE_ALLOC, br_alloc);
+ if (ret) {
+ dev_err(ds->dev, "failed to allocate a bridge for port %d\n", port);
+ return ret;
+ }
+
+ br_port_cfg.bridge_id = br_alloc.bridge_id;
+ br_port_cfg.bridge_port_id = cpu_to_le16(port);
+ br_port_cfg.mask = cpu_to_le32(MXL862XX_BRIDGE_PORT_CONFIG_MASK_BRIDGE_ID |
+ MXL862XX_BRIDGE_PORT_CONFIG_MASK_BRIDGE_PORT_MAP |
+ MXL862XX_BRIDGE_PORT_CONFIG_MASK_MC_SRC_MAC_LEARNING |
+ MXL862XX_BRIDGE_PORT_CONFIG_MASK_VLAN_BASED_MAC_LEARNING);
+ br_port_cfg.src_mac_learning_disable = true;
+ br_port_cfg.vlan_src_mac_vid_enable = false;
+ br_port_cfg.vlan_dst_mac_vid_enable = false;
+ /* As this function is only called for user ports it is safe to rely on
+ * cpu_dp being valid
+ */
+ br_port_cfg.bridge_port_map[0] = cpu_to_le16(BIT(dp->cpu_dp->index));
+
+ return MXL862XX_API_WRITE(ds->priv, MXL862XX_BRIDGEPORT_CONFIGSET, br_port_cfg);
+}
+
+static int mxl862xx_port_setup(struct dsa_switch *ds, int port)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ bool is_cpu_port = dsa_port_is_cpu(dp);
+ int ret;
+
+ /* disable port and flush MAC entries */
+ ret = mxl862xx_port_state(ds, port, false);
+ if (ret)
+ return ret;
+
+ mxl862xx_port_fast_age(ds, port);
+
+ /* skip setup for unused and DSA ports */
+ if (dsa_port_is_unused(dp) ||
+ dsa_port_is_dsa(dp))
+ return 0;
+
+ /* configure tag protocol */
+ ret = mxl862xx_configure_sp_tag_proto(ds, port, is_cpu_port);
+ if (ret)
+ return ret;
+
+ /* assign CTP port IDs */
+ ret = mxl862xx_configure_ctp_port(ds, port, port,
+ is_cpu_port ? 32 - port : 1);
+ if (ret)
+ return ret;
+
+ if (is_cpu_port)
+ /* assign user ports to CPU port bridge */
+ return mxl862xx_setup_cpu_bridge(ds, port);
+
+ /* setup single-port bridge for user ports */
+ return mxl862xx_add_single_port_bridge(ds, port);
+}
+
+static void mxl862xx_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | MAC_10 |
+ MAC_100 | MAC_1000 | MAC_2500FD;
+
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+}
+
+static const struct dsa_switch_ops mxl862xx_switch_ops = {
+ .get_tag_protocol = mxl862xx_get_tag_protocol,
+ .setup = mxl862xx_setup,
+ .port_setup = mxl862xx_port_setup,
+ .phylink_get_caps = mxl862xx_phylink_get_caps,
+ .port_enable = mxl862xx_port_enable,
+ .port_disable = mxl862xx_port_disable,
+ .port_fast_age = mxl862xx_port_fast_age,
+};
+
+static void mxl862xx_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static void mxl862xx_phylink_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+}
+
+static void mxl862xx_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
+ unsigned int mode,
+ phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+}
+
+static const struct phylink_mac_ops mxl862xx_phylink_mac_ops = {
+ .mac_config = mxl862xx_phylink_mac_config,
+ .mac_link_down = mxl862xx_phylink_mac_link_down,
+ .mac_link_up = mxl862xx_phylink_mac_link_up,
+};
+
+static int mxl862xx_probe(struct mdio_device *mdiodev)
+{
+ struct device *dev = &mdiodev->dev;
+ struct mxl862xx_priv *priv;
+ struct dsa_switch *ds;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->mdiodev = mdiodev;
+
+ ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
+ if (!ds)
+ return -ENOMEM;
+
+ priv->ds = ds;
+ ds->dev = dev;
+ ds->priv = priv;
+ ds->ops = &mxl862xx_switch_ops;
+ ds->phylink_mac_ops = &mxl862xx_phylink_mac_ops;
+ ds->num_ports = MXL862XX_MAX_PORTS;
+
+ dev_set_drvdata(dev, ds);
+
+ return dsa_register_switch(ds);
+}
+
+static void mxl862xx_remove(struct mdio_device *mdiodev)
+{
+ struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
+
+ if (!ds)
+ return;
+
+ dsa_unregister_switch(ds);
+}
+
+static void mxl862xx_shutdown(struct mdio_device *mdiodev)
+{
+ struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
+
+ if (!ds)
+ return;
+
+ dsa_switch_shutdown(ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static const struct of_device_id mxl862xx_of_match[] = {
+ { .compatible = "maxlinear,mxl86282" },
+ { .compatible = "maxlinear,mxl86252" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxl862xx_of_match);
+
+static struct mdio_driver mxl862xx_driver = {
+ .probe = mxl862xx_probe,
+ .remove = mxl862xx_remove,
+ .shutdown = mxl862xx_shutdown,
+ .mdiodrv.driver = {
+ .name = "mxl862xx",
+ .of_match_table = mxl862xx_of_match,
+ },
+};
+
+mdio_module_driver(mxl862xx_driver);
+
+MODULE_DESCRIPTION("Driver for MaxLinear MxL862xx switch family");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/mxl862xx/mxl862xx.h b/drivers/net/dsa/mxl862xx/mxl862xx.h
new file mode 100644
index 000000000000..bfeb436942d5
--- /dev/null
+++ b/drivers/net/dsa/mxl862xx/mxl862xx.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef __MXL862XX_H
+#define __MXL862XX_H
+
+#include <linux/mdio.h>
+#include <net/dsa.h>
+
+#define MXL862XX_MAX_PORTS 17
+
+struct mxl862xx_priv {
+ struct dsa_switch *ds;
+ struct mdio_device *mdiodev;
+};
+
+#endif /* __MXL862XX_H */
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 9e5ede932b42..5d34eb82e639 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -2003,11 +2003,11 @@ static int felix_cls_flower_stats(struct dsa_switch *ds, int port,
}
static int felix_port_policer_add(struct dsa_switch *ds, int port,
- struct dsa_mall_policer_tc_entry *policer)
+ const struct flow_action_police *policer)
{
struct ocelot *ocelot = ds->priv;
struct ocelot_policer pol = {
- .rate = div_u64(policer->rate_bytes_per_sec, 1000) * 8,
+ .rate = div_u64(policer->rate_bytes_ps, 1000) * 8,
.burst = policer->burst,
};
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index aa2145cf29a6..71a817c07a90 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -1260,7 +1260,9 @@ static int sja1105_set_port_speed(struct sja1105_private *priv, int port,
int speed_mbps)
{
struct sja1105_mac_config_entry *mac;
+ struct device *dev = priv->ds->dev;
u64 speed;
+ int rc;
/* On P/Q/R/S, one can read from the device via the MAC reconfiguration
* tables. On E/T, MAC reconfig tables are not readable, only writable.
@@ -1305,26 +1307,6 @@ static int sja1105_set_port_speed(struct sja1105_private *priv, int port,
*/
mac[port].speed = speed;
- return 0;
-}
-
-/* Write the MAC Configuration Table entry and, if necessary, the CGU settings,
- * after a link speedchange for this port.
- */
-static int sja1105_set_port_config(struct sja1105_private *priv, int port)
-{
- struct sja1105_mac_config_entry *mac;
- struct device *dev = priv->ds->dev;
- int rc;
-
- /* On P/Q/R/S, one can read from the device via the MAC reconfiguration
- * tables. On E/T, MAC reconfig tables are not readable, only writable.
- * We have to *know* what the MAC looks like. For the sake of keeping
- * the code common, we'll use the static configuration tables as a
- * reasonable approximation for both E/T and P/Q/R/S.
- */
- mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
-
/* Write to the dynamic reconfiguration tables */
rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
&mac[port], true);
@@ -1380,9 +1362,7 @@ static void sja1105_mac_link_up(struct phylink_config *config,
struct sja1105_private *priv = dp->ds->priv;
int port = dp->index;
- if (!sja1105_set_port_speed(priv, port, speed))
- sja1105_set_port_config(priv, port);
-
+ sja1105_set_port_speed(priv, port, speed);
sja1105_inhibit_tx(priv, BIT(port), false);
}
@@ -2280,14 +2260,12 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
{
struct ptp_system_timestamp ptp_sts_before;
struct ptp_system_timestamp ptp_sts_after;
- u16 bmcr[SJA1105_MAX_NUM_PORTS] = {0};
- u64 mac_speed[SJA1105_MAX_NUM_PORTS];
struct sja1105_mac_config_entry *mac;
struct dsa_switch *ds = priv->ds;
+ struct dsa_port *dp;
s64 t1, t2, t3, t4;
- s64 t12, t34;
- int rc, i;
- s64 now;
+ s64 t12, t34, now;
+ int rc;
mutex_lock(&priv->fdb_lock);
mutex_lock(&priv->mgmt_lock);
@@ -2299,13 +2277,9 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
* switch wants to see in the static config in order to allow us to
* change it through the dynamic interface later.
*/
- for (i = 0; i < ds->num_ports; i++) {
- mac_speed[i] = mac[i].speed;
- mac[i].speed = priv->info->port_speed[SJA1105_SPEED_AUTO];
-
- if (priv->pcs[i])
- bmcr[i] = mdiobus_c45_read(priv->mdio_pcs, i,
- MDIO_MMD_VEND2, MDIO_CTRL1);
+ dsa_switch_for_each_available_port(dp, ds) {
+ phylink_replay_link_begin(dp->pl);
+ mac[dp->index].speed = priv->info->port_speed[SJA1105_SPEED_AUTO];
}
/* No PTP operations can run right now */
@@ -2359,44 +2333,8 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
goto out;
}
- for (i = 0; i < ds->num_ports; i++) {
- struct phylink_pcs *pcs = priv->pcs[i];
- unsigned int neg_mode;
-
- mac[i].speed = mac_speed[i];
- rc = sja1105_set_port_config(priv, i);
- if (rc < 0)
- goto out;
-
- if (!pcs)
- continue;
-
- if (bmcr[i] & BMCR_ANENABLE)
- neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED;
- else
- neg_mode = PHYLINK_PCS_NEG_OUTBAND;
-
- rc = pcs->ops->pcs_config(pcs, neg_mode, priv->phy_mode[i],
- NULL, true);
- if (rc < 0)
- goto out;
-
- if (neg_mode == PHYLINK_PCS_NEG_OUTBAND) {
- int speed = SPEED_UNKNOWN;
-
- if (priv->phy_mode[i] == PHY_INTERFACE_MODE_2500BASEX)
- speed = SPEED_2500;
- else if (bmcr[i] & BMCR_SPEED1000)
- speed = SPEED_1000;
- else if (bmcr[i] & BMCR_SPEED100)
- speed = SPEED_100;
- else
- speed = SPEED_10;
-
- pcs->ops->pcs_link_up(pcs, neg_mode, priv->phy_mode[i],
- speed, DUPLEX_FULL);
- }
- }
+ dsa_switch_for_each_available_port(dp, ds)
+ phylink_replay_link_end(dp->pl);
rc = sja1105_reload_cbs(priv);
if (rc < 0)
@@ -2903,7 +2841,7 @@ static void sja1105_mirror_del(struct dsa_switch *ds, int port,
}
static int sja1105_port_policer_add(struct dsa_switch *ds, int port,
- struct dsa_mall_policer_tc_entry *policer)
+ const struct flow_action_police *policer)
{
struct sja1105_l2_policing_entry *policing;
struct sja1105_private *priv = ds->priv;
@@ -2914,7 +2852,7 @@ static int sja1105_port_policer_add(struct dsa_switch *ds, int port,
* the value of RATE bytes divided by 64, up to a maximum of SMAX
* bytes.
*/
- policing[port].rate = div_u64(512 * policer->rate_bytes_per_sec,
+ policing[port].rate = div_u64(512 * policer->rate_bytes_ps,
1000000);
policing[port].smax = policer->burst;
diff --git a/drivers/net/dsa/yt921x.c b/drivers/net/dsa/yt921x.c
index 7b8c1549a0fb..904613f4694a 100644
--- a/drivers/net/dsa/yt921x.c
+++ b/drivers/net/dsa/yt921x.c
@@ -8,6 +8,7 @@
* Copyright (c) 2025 David Yang
*/
+#include <linux/dcbnl.h>
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
#include <linux/if_hsr.h>
@@ -18,8 +19,11 @@
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
+#include <linux/sort.h>
#include <net/dsa.h>
+#include <net/dscp.h>
+#include <net/ieee8021q.h>
#include "yt921x.h"
@@ -1118,6 +1122,188 @@ yt921x_dsa_port_mirror_add(struct dsa_switch *ds, int port,
return res;
}
+static int yt921x_lag_hash(struct yt921x_priv *priv, u32 ctrl, bool unique_lag,
+ struct netlink_ext_ack *extack)
+{
+ u32 val;
+ int res;
+
+ /* Hash Mode is global. Make sure the same Hash Mode is set to all the
+ * 2 possible lags.
+ * If we are the unique LAG we can set whatever hash mode we want.
+ * To change hash mode it's needed to remove all LAG and change the mode
+ * with the latest.
+ */
+ if (unique_lag) {
+ res = yt921x_reg_write(priv, YT921X_LAG_HASH, ctrl);
+ if (res)
+ return res;
+ } else {
+ res = yt921x_reg_read(priv, YT921X_LAG_HASH, &val);
+ if (res)
+ return res;
+
+ if (val != ctrl) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Mismatched Hash Mode across different lags is not supported");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int yt921x_lag_set(struct yt921x_priv *priv, u8 index, u16 ports_mask)
+{
+ unsigned long targets_mask = ports_mask;
+ unsigned int cnt;
+ u32 ctrl;
+ int port;
+ int res;
+
+ cnt = 0;
+ for_each_set_bit(port, &targets_mask, YT921X_PORT_NUM) {
+ ctrl = YT921X_LAG_MEMBER_PORT(port);
+ res = yt921x_reg_write(priv, YT921X_LAG_MEMBERnm(index, cnt),
+ ctrl);
+ if (res)
+ return res;
+
+ cnt++;
+ }
+
+ ctrl = YT921X_LAG_GROUP_PORTS(ports_mask) |
+ YT921X_LAG_GROUP_MEMBER_NUM(cnt);
+ return yt921x_reg_write(priv, YT921X_LAG_GROUPn(index), ctrl);
+}
+
+static int
+yt921x_dsa_port_lag_leave(struct dsa_switch *ds, int port, struct dsa_lag lag)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct dsa_port *dp;
+ u32 ctrl;
+ int res;
+
+ if (!lag.id)
+ return -EINVAL;
+
+ ctrl = 0;
+ dsa_lag_foreach_port(dp, ds->dst, &lag)
+ ctrl |= BIT(dp->index);
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_lag_set(priv, lag.id - 1, ctrl);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int
+yt921x_dsa_port_lag_check(struct dsa_switch *ds, struct dsa_lag lag,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
+{
+ unsigned int members;
+ struct dsa_port *dp;
+
+ if (!lag.id)
+ return -EINVAL;
+
+ members = 0;
+ dsa_lag_foreach_port(dp, ds->dst, &lag)
+ /* Includes the port joining the LAG */
+ members++;
+
+ if (members > YT921X_LAG_PORT_NUM) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload more than 4 LAG ports");
+ return -EOPNOTSUPP;
+ }
+
+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only offload LAG using hash TX type");
+ return -EOPNOTSUPP;
+ }
+
+ if (info->hash_type != NETDEV_LAG_HASH_L2 &&
+ info->hash_type != NETDEV_LAG_HASH_L23 &&
+ info->hash_type != NETDEV_LAG_HASH_L34) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only offload L2 or L2+L3 or L3+L4 TX hash");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+yt921x_dsa_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct dsa_port *dp;
+ bool unique_lag;
+ unsigned int i;
+ u32 ctrl;
+ int res;
+
+ res = yt921x_dsa_port_lag_check(ds, lag, info, extack);
+ if (res)
+ return res;
+
+ ctrl = 0;
+ switch (info->hash_type) {
+ case NETDEV_LAG_HASH_L34:
+ ctrl |= YT921X_LAG_HASH_IP_DST;
+ ctrl |= YT921X_LAG_HASH_IP_SRC;
+ ctrl |= YT921X_LAG_HASH_IP_PROTO;
+
+ ctrl |= YT921X_LAG_HASH_L4_DPORT;
+ ctrl |= YT921X_LAG_HASH_L4_SPORT;
+ break;
+ case NETDEV_LAG_HASH_L23:
+ ctrl |= YT921X_LAG_HASH_MAC_DA;
+ ctrl |= YT921X_LAG_HASH_MAC_SA;
+
+ ctrl |= YT921X_LAG_HASH_IP_DST;
+ ctrl |= YT921X_LAG_HASH_IP_SRC;
+ ctrl |= YT921X_LAG_HASH_IP_PROTO;
+ break;
+ case NETDEV_LAG_HASH_L2:
+ ctrl |= YT921X_LAG_HASH_MAC_DA;
+ ctrl |= YT921X_LAG_HASH_MAC_SA;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /* Check if we are the unique configured LAG */
+ unique_lag = true;
+ dsa_lags_foreach_id(i, ds->dst)
+ if (i != lag.id && dsa_lag_by_id(ds->dst, i)) {
+ unique_lag = false;
+ break;
+ }
+
+ mutex_lock(&priv->reg_lock);
+ do {
+ res = yt921x_lag_hash(priv, ctrl, unique_lag, extack);
+ if (res)
+ break;
+
+ ctrl = 0;
+ dsa_lag_foreach_port(dp, ds->dst, &lag)
+ ctrl |= BIT(dp->index);
+ res = yt921x_lag_set(priv, lag.id - 1, ctrl);
+ } while (0);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
static int yt921x_fdb_wait(struct yt921x_priv *priv, u32 *valp)
{
struct device *dev = to_device(priv);
@@ -1588,6 +1774,21 @@ yt921x_dsa_port_mdb_add(struct dsa_switch *ds, int port,
}
static int
+yt921x_vlan_aware_set(struct yt921x_priv *priv, int port, bool vlan_aware)
+{
+ u32 ctrl;
+
+ /* Abuse SVLAN for PCP parsing without polluting the FDB - it just works
+ * despite YT921X_VLAN_CTRL_SVLAN_EN never being set
+ */
+ if (!vlan_aware)
+ ctrl = YT921X_PORT_IGR_TPIDn_STAG(0);
+ else
+ ctrl = YT921X_PORT_IGR_TPIDn_CTAG(0);
+ return yt921x_reg_write(priv, YT921X_PORTn_IGR_TPID(port), ctrl);
+}
+
+static int
yt921x_port_set_pvid(struct yt921x_priv *priv, int port, u16 vid)
{
u32 mask;
@@ -1636,14 +1837,7 @@ yt921x_vlan_filtering(struct yt921x_priv *priv, int port, bool vlan_filtering)
if (res)
return res;
- /* Turn on / off VLAN awareness */
- mask = YT921X_PORT_IGR_TPIDn_CTAG_M;
- if (!vlan_filtering)
- ctrl = 0;
- else
- ctrl = YT921X_PORT_IGR_TPIDn_CTAG(0);
- res = yt921x_reg_update_bits(priv, YT921X_PORTn_IGR_TPID(port),
- mask, ctrl);
+ res = yt921x_vlan_aware_set(priv, port, vlan_filtering);
if (res)
return res;
@@ -1840,8 +2034,7 @@ static int yt921x_userport_standalone(struct yt921x_priv *priv, int port)
return res;
/* Turn off VLAN awareness */
- mask = YT921X_PORT_IGR_TPIDn_CTAG_M;
- res = yt921x_reg_clear_bits(priv, YT921X_PORTn_IGR_TPID(port), mask);
+ res = yt921x_vlan_aware_set(priv, port, false);
if (res)
return res;
@@ -2210,6 +2403,122 @@ yt921x_dsa_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
port, res);
}
+static int __maybe_unused
+yt921x_dsa_port_get_default_prio(struct dsa_switch *ds, int port)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ u32 val;
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_reg_read(priv, YT921X_PORTn_QOS(port), &val);
+ mutex_unlock(&priv->reg_lock);
+
+ if (res)
+ return res;
+
+ return FIELD_GET(YT921X_PORT_QOS_PRIO_M, val);
+}
+
+static int __maybe_unused
+yt921x_dsa_port_set_default_prio(struct dsa_switch *ds, int port, u8 prio)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ u32 mask;
+ u32 ctrl;
+ int res;
+
+ if (prio >= YT921X_PRIO_NUM)
+ return -EINVAL;
+
+ mutex_lock(&priv->reg_lock);
+ mask = YT921X_PORT_QOS_PRIO_M | YT921X_PORT_QOS_PRIO_EN;
+ ctrl = YT921X_PORT_QOS_PRIO(prio) | YT921X_PORT_QOS_PRIO_EN;
+ res = yt921x_reg_update_bits(priv, YT921X_PORTn_QOS(port), mask, ctrl);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int __maybe_unused appprios_cmp(const void *a, const void *b)
+{
+ return ((const u8 *)b)[1] - ((const u8 *)a)[1];
+}
+
+static int __maybe_unused
+yt921x_dsa_port_get_apptrust(struct dsa_switch *ds, int port, u8 *sel,
+ int *nselp)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ u8 appprios[2][2] = {};
+ int nsel;
+ u32 val;
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_reg_read(priv, YT921X_PORTn_PRIO_ORD(port), &val);
+ mutex_unlock(&priv->reg_lock);
+
+ if (res)
+ return res;
+
+ appprios[0][0] = IEEE_8021QAZ_APP_SEL_DSCP;
+ appprios[0][1] = (val >> (3 * YT921X_APP_SEL_DSCP)) & 7;
+ appprios[1][0] = DCB_APP_SEL_PCP;
+ appprios[1][1] = (val >> (3 * YT921X_APP_SEL_CVLAN_PCP)) & 7;
+ sort(appprios, ARRAY_SIZE(appprios), sizeof(appprios[0]), appprios_cmp,
+ NULL);
+
+ nsel = 0;
+ for (int i = 0; i < ARRAY_SIZE(appprios) && appprios[i][1]; i++) {
+ sel[nsel] = appprios[i][0];
+ nsel++;
+ }
+ *nselp = nsel;
+
+ return 0;
+}
+
+static int __maybe_unused
+yt921x_dsa_port_set_apptrust(struct dsa_switch *ds, int port, const u8 *sel,
+ int nsel)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct device *dev = to_device(priv);
+ u32 ctrl;
+ int res;
+
+ if (nsel > YT921X_APP_SEL_NUM)
+ return -EINVAL;
+
+ ctrl = 0;
+ for (int i = 0; i < nsel; i++) {
+ switch (sel[i]) {
+ case IEEE_8021QAZ_APP_SEL_DSCP:
+ ctrl |= YT921X_PORT_PRIO_ORD_APPm(YT921X_APP_SEL_DSCP,
+ 7 - i);
+ break;
+ case DCB_APP_SEL_PCP:
+ ctrl |= YT921X_PORT_PRIO_ORD_APPm(YT921X_APP_SEL_CVLAN_PCP,
+ 7 - i);
+ ctrl |= YT921X_PORT_PRIO_ORD_APPm(YT921X_APP_SEL_SVLAN_PCP,
+ 7 - i);
+ break;
+ default:
+ dev_err(dev,
+ "Invalid apptrust selector (at %d-th). Supported: dscp, pcp\n",
+ i + 1);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_reg_write(priv, YT921X_PORTn_PRIO_ORD(port), ctrl);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
static int yt921x_port_down(struct yt921x_priv *priv, int port)
{
u32 mask;
@@ -2534,6 +2843,13 @@ static int yt921x_port_setup(struct yt921x_priv *priv, int port)
if (res)
return res;
+ /* Clear prio order (even if DCB is not enabled) to avoid unsolicited
+ * priorities
+ */
+ res = yt921x_reg_write(priv, YT921X_PORTn_PRIO_ORD(port), 0);
+ if (res)
+ return res;
+
if (dsa_is_cpu_port(ds, port)) {
/* Egress of CPU port is supposed to be completely controlled
* via tagging, so set to oneway isolated (drop all packets
@@ -2577,6 +2893,66 @@ static int yt921x_dsa_port_setup(struct dsa_switch *ds, int port)
return res;
}
+/* Not "port" - DSCP mapping is global */
+static int __maybe_unused
+yt921x_dsa_port_get_dscp_prio(struct dsa_switch *ds, int port, u8 dscp)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ u32 val;
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_reg_read(priv, YT921X_IPM_DSCPn(dscp), &val);
+ mutex_unlock(&priv->reg_lock);
+
+ if (res)
+ return res;
+
+ return FIELD_GET(YT921X_IPM_PRIO_M, val);
+}
+
+static int __maybe_unused
+yt921x_dsa_port_del_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, u8 prio)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ u32 val;
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ /* During a "dcb app replace" command, the new app table entry will be
+ * added first, then the old one will be deleted. But the hardware only
+ * supports one QoS class per DSCP value (duh), so if we blindly delete
+ * the app table entry for this DSCP value, we end up deleting the
+ * entry with the new priority. Avoid that by checking whether user
+ * space wants to delete the priority which is currently configured, or
+ * something else which is no longer current.
+ */
+ res = yt921x_reg_read(priv, YT921X_IPM_DSCPn(dscp), &val);
+ if (!res && FIELD_GET(YT921X_IPM_PRIO_M, val) == prio)
+ res = yt921x_reg_write(priv, YT921X_IPM_DSCPn(dscp),
+ YT921X_IPM_PRIO(IEEE8021Q_TT_BK));
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int __maybe_unused
+yt921x_dsa_port_add_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, u8 prio)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ int res;
+
+ if (prio >= YT921X_PRIO_NUM)
+ return -EINVAL;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_reg_write(priv, YT921X_IPM_DSCPn(dscp),
+ YT921X_IPM_PRIO(prio));
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
static int yt921x_edata_wait(struct yt921x_priv *priv, u32 *valp)
{
u32 val = YT921X_EDATA_DATA_IDLE;
@@ -2716,7 +3092,7 @@ static int yt921x_chip_reset(struct yt921x_priv *priv)
return 0;
}
-static int yt921x_chip_setup(struct yt921x_priv *priv)
+static int yt921x_chip_setup_dsa(struct yt921x_priv *priv)
{
struct dsa_switch *ds = &priv->ds;
unsigned long cpu_ports_mask;
@@ -2734,16 +3110,6 @@ static int yt921x_chip_setup(struct yt921x_priv *priv)
if (res)
return res;
- /* Enable and clear MIB */
- res = yt921x_reg_set_bits(priv, YT921X_FUNC, YT921X_FUNC_MIB);
- if (res)
- return res;
-
- ctrl = YT921X_MIB_CTRL_CLEAN | YT921X_MIB_CTRL_ALL_PORT;
- res = yt921x_reg_write(priv, YT921X_MIB_CTRL, ctrl);
- if (res)
- return res;
-
/* Setup software switch */
ctrl = YT921X_CPU_COPY_TO_EXT_CPU;
res = yt921x_reg_write(priv, YT921X_CPU_COPY, ctrl);
@@ -2796,6 +3162,76 @@ static int yt921x_chip_setup(struct yt921x_priv *priv)
if (res)
return res;
+ return 0;
+}
+
+static int __maybe_unused yt921x_chip_setup_qos(struct yt921x_priv *priv)
+{
+ u32 ctrl;
+ int res;
+
+ /* DSCP to internal priorities */
+ for (u8 dscp = 0; dscp < DSCP_MAX; dscp++) {
+ int prio = ietf_dscp_to_ieee8021q_tt(dscp);
+
+ if (prio < 0)
+ return prio;
+
+ res = yt921x_reg_write(priv, YT921X_IPM_DSCPn(dscp),
+ YT921X_IPM_PRIO(prio));
+ if (res)
+ return res;
+ }
+
+ /* 802.1Q QoS to internal priorities */
+ for (u8 pcp = 0; pcp < 8; pcp++)
+ for (u8 dei = 0; dei < 2; dei++) {
+ ctrl = YT921X_IPM_PRIO(pcp);
+ if (dei)
+ /* "Red" almost means drop, so it's not that
+ * useful. Note that tc police does not support
+ * Three-Color very well
+ */
+ ctrl |= YT921X_IPM_COLOR_YELLOW;
+
+ for (u8 svlan = 0; svlan < 2; svlan++) {
+ u32 reg = YT921X_IPM_PCPn(svlan, dei, pcp);
+
+ res = yt921x_reg_write(priv, reg, ctrl);
+ if (res)
+ return res;
+ }
+ }
+
+ return 0;
+}
+
+static int yt921x_chip_setup(struct yt921x_priv *priv)
+{
+ u32 ctrl;
+ int res;
+
+ ctrl = YT921X_FUNC_MIB;
+ res = yt921x_reg_set_bits(priv, YT921X_FUNC, ctrl);
+ if (res)
+ return res;
+
+ res = yt921x_chip_setup_dsa(priv);
+ if (res)
+ return res;
+
+#if IS_ENABLED(CONFIG_DCB)
+ res = yt921x_chip_setup_qos(priv);
+ if (res)
+ return res;
+#endif
+
+ /* Clear MIB */
+ ctrl = YT921X_MIB_CTRL_CLEAN | YT921X_MIB_CTRL_ALL_PORT;
+ res = yt921x_reg_write(priv, YT921X_MIB_CTRL, ctrl);
+ if (res)
+ return res;
+
/* Miscellaneous */
res = yt921x_reg_set_bits(priv, YT921X_SENSOR, YT921X_SENSOR_TEMP);
if (res)
@@ -2881,6 +3317,9 @@ static const struct dsa_switch_ops yt921x_dsa_switch_ops = {
/* mirror */
.port_mirror_del = yt921x_dsa_port_mirror_del,
.port_mirror_add = yt921x_dsa_port_mirror_add,
+ /* lag */
+ .port_lag_leave = yt921x_dsa_port_lag_leave,
+ .port_lag_join = yt921x_dsa_port_lag_join,
/* fdb */
.port_fdb_dump = yt921x_dsa_port_fdb_dump,
.port_fast_age = yt921x_dsa_port_fast_age,
@@ -2902,10 +3341,23 @@ static const struct dsa_switch_ops yt921x_dsa_switch_ops = {
.port_mst_state_set = yt921x_dsa_port_mst_state_set,
.vlan_msti_set = yt921x_dsa_vlan_msti_set,
.port_stp_state_set = yt921x_dsa_port_stp_state_set,
+#if IS_ENABLED(CONFIG_DCB)
+ /* dcb */
+ .port_get_default_prio = yt921x_dsa_port_get_default_prio,
+ .port_set_default_prio = yt921x_dsa_port_set_default_prio,
+ .port_get_apptrust = yt921x_dsa_port_get_apptrust,
+ .port_set_apptrust = yt921x_dsa_port_set_apptrust,
+#endif
/* port */
.get_tag_protocol = yt921x_dsa_get_tag_protocol,
.phylink_get_caps = yt921x_dsa_phylink_get_caps,
.port_setup = yt921x_dsa_port_setup,
+#if IS_ENABLED(CONFIG_DCB)
+ /* dscp */
+ .port_get_dscp_prio = yt921x_dsa_port_get_dscp_prio,
+ .port_del_dscp_prio = yt921x_dsa_port_del_dscp_prio,
+ .port_add_dscp_prio = yt921x_dsa_port_add_dscp_prio,
+#endif
/* chip */
.setup = yt921x_dsa_setup,
};
@@ -2972,11 +3424,13 @@ static int yt921x_mdio_probe(struct mdio_device *mdiodev)
ds = &priv->ds;
ds->dev = dev;
ds->assisted_learning_on_cpu_port = true;
+ ds->dscp_prio_mapping_is_global = true;
ds->priv = priv;
ds->ops = &yt921x_dsa_switch_ops;
ds->ageing_time_min = 1 * 5000;
ds->ageing_time_max = U16_MAX * 5000;
ds->phylink_mac_ops = &yt921x_phylink_mac_ops;
+ ds->num_lag_ids = YT921X_LAG_NUM;
ds->num_ports = YT921X_PORT_NUM;
mdiodev_set_drvdata(mdiodev, priv);
diff --git a/drivers/net/dsa/yt921x.h b/drivers/net/dsa/yt921x.h
index 61bb0ab3b09a..3f129b8d403f 100644
--- a/drivers/net/dsa/yt921x.h
+++ b/drivers/net/dsa/yt921x.h
@@ -269,6 +269,38 @@
#define YT921X_TPID_EGRn(x) (0x100300 + 4 * (x)) /* [0, 3] */
#define YT921X_TPID_EGR_TPID_M GENMASK(15, 0)
+#define YT921X_IPM_DSCPn(n) (0x180000 + 4 * (n)) /* Internal Priority Map */
+#define YT921X_IPM_PCPn(map, dei, pcp) (0x180100 + 4 * (16 * (map) + 8 * (dei) + (pcp)))
+#define YT921X_IPM_PRIO_M GENMASK(4, 2)
+#define YT921X_IPM_PRIO(x) FIELD_PREP(YT921X_IPM_PRIO_M, (x))
+#define YT921X_IPM_COLOR_M GENMASK(1, 0)
+#define YT921X_IPM_COLOR(x) FIELD_PREP(YT921X_IPM_COLOR_M, (x))
+#define YT921X_IPM_COLOR_GREEN YT921X_IPM_COLOR(0)
+#define YT921X_IPM_COLOR_YELLOW YT921X_IPM_COLOR(1)
+#define YT921X_IPM_COLOR_RED YT921X_IPM_COLOR(2)
+#define YT921X_PORTn_QOS(port) (0x180180 + 4 * (port))
+#define YT921X_PORT_QOS_CVLAN_PRIO_MAP_ID BIT(5)
+#define YT921X_PORT_QOS_SVLAN_PRIO_MAP_ID BIT(4)
+#define YT921X_PORT_QOS_PRIO_M GENMASK(3, 1)
+#define YT921X_PORT_QOS_PRIO(x) FIELD_PREP(YT921X_PORT_QOS_PRIO_M, (x))
+#define YT921X_PORT_QOS_PRIO_EN BIT(0)
+#define YT921X_PORTn_PRIO_ORD(port) (0x180200 + 4 * (port))
+#define YT921X_PORT_PRIO_ORD_APPm_M(m) GENMASK(3 * (m) + 2, 3 * (m))
+#define YT921X_PORT_PRIO_ORD_APPm(m, x) ((x) << (3 * (m))) /* 0: disabled, except PORT_QOS_PRIO */
+
+enum yt921x_app_selector {
+ YT921X_APP_SEL_MAC_SA,
+ YT921X_APP_SEL_MAC_DA,
+ YT921X_APP_SEL_VID,
+ YT921X_APP_SEL_ACL,
+ YT921X_APP_SEL_DSCP,
+ YT921X_APP_SEL_CVLAN_PCP,
+ YT921X_APP_SEL_SVLAN_PCP,
+ /* The physical port, i.e. YT921X_PORT_QOS_PRIO */
+ YT921X_APP_SEL_PORT,
+ YT921X_APP_SEL_NUM
+};
+
#define YT921X_VLAN_IGR_FILTER 0x180280
#define YT921X_VLAN_IGR_FILTER_PORTn_BYPASS_IGMP(port) BIT((port) + 11)
#define YT921X_VLAN_IGR_FILTER_PORTn(port) BIT(port)
@@ -337,7 +369,7 @@
#define YT921X_FDB_OUT0 0x1804b0
#define YT921X_FDB_IO0_ADDR_HI4_M GENMASK(31, 0)
#define YT921X_FDB_OUT1 0x1804b4
-#define YT921X_FDB_IO1_EGR_INT_PRI_EN BIT(31)
+#define YT921X_FDB_IO1_EGR_PRIO_EN BIT(31)
#define YT921X_FDB_IO1_STATUS_M GENMASK(30, 28)
#define YT921X_FDB_IO1_STATUS(x) FIELD_PREP(YT921X_FDB_IO1_STATUS_M, (x))
#define YT921X_FDB_IO1_STATUS_INVALID YT921X_FDB_IO1_STATUS(0)
@@ -356,9 +388,9 @@
#define YT921X_FDB_IO2_EGR_PORTS(x) FIELD_PREP(YT921X_FDB_IO2_EGR_PORTS_M, (x))
#define YT921X_FDB_IO2_EGR_DROP BIT(17)
#define YT921X_FDB_IO2_COPY_TO_CPU BIT(16)
-#define YT921X_FDB_IO2_IGR_INT_PRI_EN BIT(15)
-#define YT921X_FDB_IO2_INT_PRI_M GENMASK(14, 12)
-#define YT921X_FDB_IO2_INT_PRI(x) FIELD_PREP(YT921X_FDB_IO2_INT_PRI_M, (x))
+#define YT921X_FDB_IO2_IGR_PRIO_EN BIT(15)
+#define YT921X_FDB_IO2_PRIO_M GENMASK(14, 12)
+#define YT921X_FDB_IO2_PRIO(x) FIELD_PREP(YT921X_FDB_IO2_PRIO_M, (x))
#define YT921X_FDB_IO2_NEW_VID_M GENMASK(11, 0)
#define YT921X_FDB_IO2_NEW_VID(x) FIELD_PREP(YT921X_FDB_IO2_NEW_VID_M, (x))
#define YT921X_FILTER_UNK_UCAST 0x180508
@@ -370,6 +402,14 @@
#define YT921X_FILTER_PORTn(port) BIT(port)
#define YT921X_VLAN_EGR_FILTER 0x180598
#define YT921X_VLAN_EGR_FILTER_PORTn(port) BIT(port)
+#define YT921X_LAG_GROUPn(n) (0x1805a8 + 4 * (n))
+#define YT921X_LAG_GROUP_PORTS_M GENMASK(13, 3)
+#define YT921X_LAG_GROUP_PORTS(x) FIELD_PREP(YT921X_LAG_GROUP_PORTS_M, (x))
+#define YT921X_LAG_GROUP_MEMBER_NUM_M GENMASK(2, 0)
+#define YT921X_LAG_GROUP_MEMBER_NUM(x) FIELD_PREP(YT921X_LAG_GROUP_MEMBER_NUM_M, (x))
+#define YT921X_LAG_MEMBERnm(n, m) (0x1805b0 + 4 * (4 * (n) + (m)))
+#define YT921X_LAG_MEMBER_PORT_M GENMASK(3, 0)
+#define YT921X_LAG_MEMBER_PORT(x) FIELD_PREP(YT921X_LAG_MEMBER_PORT_M, (x))
#define YT921X_CPU_COPY 0x180690
#define YT921X_CPU_COPY_FORCE_INT_PORT BIT(2)
#define YT921X_CPU_COPY_TO_INT_CPU BIT(1)
@@ -398,8 +438,9 @@
#define YT921X_VLAN_CTRL_FID_M GENMASK_ULL(34, 23)
#define YT921X_VLAN_CTRL_FID(x) FIELD_PREP(YT921X_VLAN_CTRL_FID_M, (x))
#define YT921X_VLAN_CTRL_LEARN_DIS BIT_ULL(22)
-#define YT921X_VLAN_CTRL_INT_PRI_EN BIT_ULL(21)
-#define YT921X_VLAN_CTRL_INT_PRI_M GENMASK_ULL(20, 18)
+#define YT921X_VLAN_CTRL_PRIO_EN BIT_ULL(21)
+#define YT921X_VLAN_CTRL_PRIO_M GENMASK_ULL(20, 18)
+#define YT921X_VLAN_CTRL_PRIO(x) FIELD_PREP(YT921X_VLAN_CTRL_PRIO_M, (x))
#define YT921X_VLAN_CTRL_PORTS_M GENMASK_ULL(17, 7)
#define YT921X_VLAN_CTRL_PORTS(x) FIELD_PREP(YT921X_VLAN_CTRL_PORTS_M, (x))
#define YT921X_VLAN_CTRL_PORTn(port) BIT_ULL((port) + 7)
@@ -414,16 +455,25 @@
#define YT921X_PORT_IGR_TPIDn_STAG(x) BIT((x) + 4)
#define YT921X_PORT_IGR_TPIDn_CTAG_M GENMASK(3, 0)
#define YT921X_PORT_IGR_TPIDn_CTAG(x) BIT(x)
+#define YT921X_LAG_HASH 0x210090
+#define YT921X_LAG_HASH_L4_SPORT BIT(7)
+#define YT921X_LAG_HASH_L4_DPORT BIT(6)
+#define YT921X_LAG_HASH_IP_PROTO BIT(5)
+#define YT921X_LAG_HASH_IP_SRC BIT(4)
+#define YT921X_LAG_HASH_IP_DST BIT(3)
+#define YT921X_LAG_HASH_MAC_SA BIT(2)
+#define YT921X_LAG_HASH_MAC_DA BIT(1)
+#define YT921X_LAG_HASH_SRC_PORT BIT(0)
#define YT921X_PORTn_VLAN_CTRL(port) (0x230010 + 4 * (port))
-#define YT921X_PORT_VLAN_CTRL_SVLAN_PRI_EN BIT(31)
-#define YT921X_PORT_VLAN_CTRL_CVLAN_PRI_EN BIT(30)
+#define YT921X_PORT_VLAN_CTRL_SVLAN_PRIO_EN BIT(31)
+#define YT921X_PORT_VLAN_CTRL_CVLAN_PRIO_EN BIT(30)
#define YT921X_PORT_VLAN_CTRL_SVID_M GENMASK(29, 18)
#define YT921X_PORT_VLAN_CTRL_SVID(x) FIELD_PREP(YT921X_PORT_VLAN_CTRL_SVID_M, (x))
#define YT921X_PORT_VLAN_CTRL_CVID_M GENMASK(17, 6)
#define YT921X_PORT_VLAN_CTRL_CVID(x) FIELD_PREP(YT921X_PORT_VLAN_CTRL_CVID_M, (x))
-#define YT921X_PORT_VLAN_CTRL_SVLAN_PRI_M GENMASK(5, 3)
-#define YT921X_PORT_VLAN_CTRL_CVLAN_PRI_M GENMASK(2, 0)
+#define YT921X_PORT_VLAN_CTRL_SVLAN_PRIO_M GENMASK(5, 3)
+#define YT921X_PORT_VLAN_CTRL_CVLAN_PRIO_M GENMASK(2, 0)
#define YT921X_PORTn_VLAN_CTRL1(port) (0x230080 + 4 * (port))
#define YT921X_PORT_VLAN_CTRL1_VLAN_RANGE_EN BIT(8)
#define YT921X_PORT_VLAN_CTRL1_VLAN_RANGE_PROFILE_ID_M GENMASK(7, 4)
@@ -458,6 +508,11 @@ enum yt921x_fdb_entry_status {
#define YT921X_MSTI_NUM 16
+#define YT921X_LAG_NUM 2
+#define YT921X_LAG_PORT_NUM 4
+
+#define YT921X_PRIO_NUM 8
+
#define YT9215_MAJOR 0x9002
#define YT9218_MAJOR 0x9001
diff --git a/drivers/net/ethernet/8390/8390.c b/drivers/net/ethernet/8390/8390.c
index c5636245f1ca..8e4354568f04 100644
--- a/drivers/net/ethernet/8390/8390.c
+++ b/drivers/net/ethernet/8390/8390.c
@@ -86,19 +86,5 @@ void NS8390_init(struct net_device *dev, int startp)
}
EXPORT_SYMBOL(NS8390_init);
-#if defined(MODULE)
-
-static int __init ns8390_module_init(void)
-{
- return 0;
-}
-
-static void __exit ns8390_module_exit(void)
-{
-}
-
-module_init(ns8390_module_init);
-module_exit(ns8390_module_exit);
-#endif /* MODULE */
MODULE_DESCRIPTION("National Semiconductor 8390 core driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/8390p.c b/drivers/net/ethernet/8390/8390p.c
index 6d429b11e9c6..a0bfc8e34f79 100644
--- a/drivers/net/ethernet/8390/8390p.c
+++ b/drivers/net/ethernet/8390/8390p.c
@@ -91,16 +91,5 @@ void NS8390p_init(struct net_device *dev, int startp)
}
EXPORT_SYMBOL(NS8390p_init);
-static int __init NS8390p_init_module(void)
-{
- return 0;
-}
-
-static void __exit NS8390p_cleanup_module(void)
-{
-}
-
-module_init(NS8390p_init_module);
-module_exit(NS8390p_cleanup_module);
MODULE_DESCRIPTION("National Semiconductor 8390 core for ISA driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 4a1b368ca7e6..aa7103e7f47f 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -55,18 +55,6 @@ source "drivers/net/ethernet/cirrus/Kconfig"
source "drivers/net/ethernet/cisco/Kconfig"
source "drivers/net/ethernet/cortina/Kconfig"
source "drivers/net/ethernet/davicom/Kconfig"
-
-config DNET
- tristate "Dave ethernet support (DNET)"
- depends on HAS_IOMEM
- select PHYLIB
- help
- The Dave ethernet interface (DNET) is found on Qong Board FPGA.
- Say Y to include support for the DNET chip.
-
- To compile this driver as a module, choose M here: the module
- will be called dnet.
-
source "drivers/net/ethernet/dec/Kconfig"
source "drivers/net/ethernet/dlink/Kconfig"
source "drivers/net/ethernet/emulex/Kconfig"
@@ -143,7 +131,6 @@ config FEALNX
source "drivers/net/ethernet/ni/Kconfig"
source "drivers/net/ethernet/natsemi/Kconfig"
-source "drivers/net/ethernet/neterion/Kconfig"
source "drivers/net/ethernet/netronome/Kconfig"
source "drivers/net/ethernet/8390/Kconfig"
source "drivers/net/ethernet/nvidia/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 2e18df8ca8ec..6615a67a63d5 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -34,7 +34,6 @@ obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
obj-$(CONFIG_NET_VENDOR_CORTINA) += cortina/
obj-$(CONFIG_CX_ECAT) += ec_bhf.o
obj-$(CONFIG_DM9000) += davicom/
-obj-$(CONFIG_DNET) += dnet.o
obj-$(CONFIG_NET_VENDOR_DEC) += dec/
obj-$(CONFIG_NET_VENDOR_DLINK) += dlink/
obj-$(CONFIG_NET_VENDOR_EMULEX) += emulex/
@@ -69,7 +68,6 @@ obj-$(CONFIG_NET_VENDOR_MUCSE) += mucse/
obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
obj-$(CONFIG_FEALNX) += fealnx.o
obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
-obj-$(CONFIG_NET_VENDOR_NETERION) += neterion/
obj-$(CONFIG_NET_VENDOR_NETRONOME) += netronome/
obj-$(CONFIG_NET_VENDOR_NI) += ni/
obj-$(CONFIG_NET_VENDOR_NVIDIA) += nvidia/
diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
index 71a2397edf2b..1b4e37d000b9 100644
--- a/drivers/net/ethernet/adi/adin1110.c
+++ b/drivers/net/ethernet/adi/adin1110.c
@@ -123,7 +123,7 @@ enum adin1110_chips_id {
struct adin1110_cfg {
enum adin1110_chips_id id;
- char name[MDIO_NAME_SIZE];
+ const char *name;
u32 phy_ids[PHY_MAX_ADDR];
u32 ports_nr;
u32 phy_id_val;
diff --git a/drivers/net/ethernet/airoha/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c
index 315d97036ac1..62bcbbbe2a95 100644
--- a/drivers/net/ethernet/airoha/airoha_eth.c
+++ b/drivers/net/ethernet/airoha/airoha_eth.c
@@ -108,11 +108,11 @@ static int airoha_set_vip_for_gdm_port(struct airoha_gdm_port *port,
u32 vip_port;
switch (port->id) {
- case 3:
+ case AIROHA_GDM3_IDX:
/* FIXME: handle XSI_PCIE1_PORT */
vip_port = XSI_PCIE0_VIP_PORT_MASK;
break;
- case 4:
+ case AIROHA_GDM4_IDX:
/* FIXME: handle XSI_USB_PORT */
vip_port = XSI_ETH_VIP_PORT_MASK;
break;
@@ -514,8 +514,8 @@ static int airoha_fe_init(struct airoha_eth *eth)
FIELD_PREP(IP_ASSEMBLE_PORT_MASK, 0) |
FIELD_PREP(IP_ASSEMBLE_NBQ_MASK, 22));
- airoha_fe_set(eth, REG_GDM_FWD_CFG(3), GDM_PAD_EN_MASK);
- airoha_fe_set(eth, REG_GDM_FWD_CFG(4), GDM_PAD_EN_MASK);
+ airoha_fe_set(eth, REG_GDM_FWD_CFG(AIROHA_GDM3_IDX), GDM_PAD_EN_MASK);
+ airoha_fe_set(eth, REG_GDM_FWD_CFG(AIROHA_GDM4_IDX), GDM_PAD_EN_MASK);
airoha_fe_crsn_qsel_init(eth);
@@ -1690,27 +1690,29 @@ static int airhoha_set_gdm2_loopback(struct airoha_gdm_port *port)
/* Forward the traffic to the proper GDM port */
pse_port = port->id == AIROHA_GDM3_IDX ? FE_PSE_PORT_GDM3
: FE_PSE_PORT_GDM4;
- airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(2), pse_port);
- airoha_fe_clear(eth, REG_GDM_FWD_CFG(2), GDM_STRIP_CRC_MASK);
+ airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(AIROHA_GDM2_IDX),
+ pse_port);
+ airoha_fe_clear(eth, REG_GDM_FWD_CFG(AIROHA_GDM2_IDX),
+ GDM_STRIP_CRC_MASK);
/* Enable GDM2 loopback */
- airoha_fe_wr(eth, REG_GDM_TXCHN_EN(2), 0xffffffff);
- airoha_fe_wr(eth, REG_GDM_RXCHN_EN(2), 0xffff);
+ airoha_fe_wr(eth, REG_GDM_TXCHN_EN(AIROHA_GDM2_IDX), 0xffffffff);
+ airoha_fe_wr(eth, REG_GDM_RXCHN_EN(AIROHA_GDM2_IDX), 0xffff);
chan = port->id == AIROHA_GDM3_IDX ? airoha_is_7581(eth) ? 4 : 3 : 0;
- airoha_fe_rmw(eth, REG_GDM_LPBK_CFG(2),
+ airoha_fe_rmw(eth, REG_GDM_LPBK_CFG(AIROHA_GDM2_IDX),
LPBK_CHAN_MASK | LPBK_MODE_MASK | LPBK_EN_MASK,
FIELD_PREP(LPBK_CHAN_MASK, chan) |
LBK_GAP_MODE_MASK | LBK_LEN_MODE_MASK |
LBK_CHAN_MODE_MASK | LPBK_EN_MASK);
- airoha_fe_rmw(eth, REG_GDM_LEN_CFG(2),
+ airoha_fe_rmw(eth, REG_GDM_LEN_CFG(AIROHA_GDM2_IDX),
GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
FIELD_PREP(GDM_LONG_LEN_MASK, AIROHA_MAX_MTU));
/* Disable VIP and IFC for GDM2 */
- airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, BIT(2));
- airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, BIT(2));
+ airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, BIT(AIROHA_GDM2_IDX));
+ airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, BIT(AIROHA_GDM2_IDX));
/* XXX: handle XSI_USB_PORT and XSI_PCE1_PORT */
nbq = port->id == AIROHA_GDM3_IDX && airoha_is_7581(eth) ? 4 : 0;
@@ -1746,8 +1748,8 @@ static int airoha_dev_init(struct net_device *dev)
airoha_set_macaddr(port, dev->dev_addr);
switch (port->id) {
- case 3:
- case 4:
+ case AIROHA_GDM3_IDX:
+ case AIROHA_GDM4_IDX:
/* If GDM2 is active we can't enable loopback */
if (!eth->ports[1]) {
int err;
@@ -1757,7 +1759,7 @@ static int airoha_dev_init(struct net_device *dev)
return err;
}
fallthrough;
- case 2:
+ case AIROHA_GDM2_IDX:
if (airoha_ppe_is_enabled(eth, 1)) {
/* For PPE2 always use secondary cpu port. */
fe_cpu_port = FE_PSE_PORT_CDM2;
@@ -2803,6 +2805,7 @@ static const struct ethtool_ops airoha_ethtool_ops = {
.get_drvinfo = airoha_ethtool_get_drvinfo,
.get_eth_mac_stats = airoha_ethtool_get_mac_stats,
.get_rmon_stats = airoha_ethtool_get_rmon_stats,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
.get_link = ethtool_op_get_link,
};
@@ -3101,14 +3104,14 @@ static const char * const en7581_xsi_rsts_names[] = {
static int airoha_en7581_get_src_port_id(struct airoha_gdm_port *port, int nbq)
{
switch (port->id) {
- case 3:
+ case AIROHA_GDM3_IDX:
/* 7581 SoC supports PCIe serdes on GDM3 port */
if (nbq == 4)
return HSGMII_LAN_7581_PCIE0_SRCPORT;
if (nbq == 5)
return HSGMII_LAN_7581_PCIE1_SRCPORT;
break;
- case 4:
+ case AIROHA_GDM4_IDX:
/* 7581 SoC supports eth and usb serdes on GDM4 port */
if (!nbq)
return HSGMII_LAN_7581_ETH_SRCPORT;
@@ -3132,12 +3135,12 @@ static const char * const an7583_xsi_rsts_names[] = {
static int airoha_an7583_get_src_port_id(struct airoha_gdm_port *port, int nbq)
{
switch (port->id) {
- case 3:
+ case AIROHA_GDM3_IDX:
/* 7583 SoC supports eth serdes on GDM3 port */
if (!nbq)
return HSGMII_LAN_7583_ETH_SRCPORT;
break;
- case 4:
+ case AIROHA_GDM4_IDX:
/* 7583 SoC supports PCIe and USB serdes on GDM4 port */
if (!nbq)
return HSGMII_LAN_7583_PCIE_SRCPORT;
diff --git a/drivers/net/ethernet/airoha/airoha_eth.h b/drivers/net/ethernet/airoha/airoha_eth.h
index fbbc58133364..20e602d61e61 100644
--- a/drivers/net/ethernet/airoha/airoha_eth.h
+++ b/drivers/net/ethernet/airoha/airoha_eth.h
@@ -21,7 +21,7 @@
#define AIROHA_MAX_NUM_IRQ_BANKS 4
#define AIROHA_MAX_DSA_PORTS 7
#define AIROHA_MAX_NUM_RSTS 3
-#define AIROHA_MAX_MTU 9216
+#define AIROHA_MAX_MTU 9220
#define AIROHA_MAX_PACKET_SIZE 2048
#define AIROHA_NUM_QOS_CHANNELS 4
#define AIROHA_NUM_QOS_QUEUES 8
diff --git a/drivers/net/ethernet/airoha/airoha_npu.c b/drivers/net/ethernet/airoha/airoha_npu.c
index 68b7f9684dc7..89f22f3f47dc 100644
--- a/drivers/net/ethernet/airoha/airoha_npu.c
+++ b/drivers/net/ethernet/airoha/airoha_npu.c
@@ -16,6 +16,8 @@
#define NPU_EN7581_FIRMWARE_DATA "airoha/en7581_npu_data.bin"
#define NPU_EN7581_FIRMWARE_RV32 "airoha/en7581_npu_rv32.bin"
+#define NPU_EN7581_7996_FIRMWARE_DATA "airoha/en7581_MT7996_npu_data.bin"
+#define NPU_EN7581_7996_FIRMWARE_RV32 "airoha/en7581_MT7996_npu_rv32.bin"
#define NPU_AN7583_FIRMWARE_DATA "airoha/an7583_npu_data.bin"
#define NPU_AN7583_FIRMWARE_RV32 "airoha/an7583_npu_rv32.bin"
#define NPU_EN7581_FIRMWARE_RV32_MAX_SIZE 0x200000
@@ -195,18 +197,18 @@ static int airoha_npu_send_msg(struct airoha_npu *npu, int func_id,
}
static int airoha_npu_load_firmware(struct device *dev, void __iomem *addr,
- const struct airoha_npu_fw *fw_info)
+ const char *fw_name, int fw_max_size)
{
const struct firmware *fw;
int ret;
- ret = request_firmware(&fw, fw_info->name, dev);
+ ret = request_firmware(&fw, fw_name, dev);
if (ret)
return ret == -ENOENT ? -EPROBE_DEFER : ret;
- if (fw->size > fw_info->max_size) {
+ if (fw->size > fw_max_size) {
dev_err(dev, "%s: fw size too overlimit (%zu)\n",
- fw_info->name, fw->size);
+ fw_name, fw->size);
ret = -E2BIG;
goto out;
}
@@ -218,6 +220,28 @@ out:
return ret;
}
+static int
+airoha_npu_load_firmware_from_dts(struct device *dev, void __iomem *addr,
+ void __iomem *base)
+{
+ const char *fw_names[2];
+ int ret;
+
+ ret = of_property_read_string_array(dev->of_node, "firmware-name",
+ fw_names, ARRAY_SIZE(fw_names));
+ if (ret != ARRAY_SIZE(fw_names))
+ return -EINVAL;
+
+ ret = airoha_npu_load_firmware(dev, addr, fw_names[0],
+ NPU_EN7581_FIRMWARE_RV32_MAX_SIZE);
+ if (ret)
+ return ret;
+
+ return airoha_npu_load_firmware(dev, base + REG_NPU_LOCAL_SRAM,
+ fw_names[1],
+ NPU_EN7581_FIRMWARE_DATA_MAX_SIZE);
+}
+
static int airoha_npu_run_firmware(struct device *dev, void __iomem *base,
struct resource *res)
{
@@ -233,14 +257,22 @@ static int airoha_npu_run_firmware(struct device *dev, void __iomem *base,
if (IS_ERR(addr))
return PTR_ERR(addr);
+ /* Try to load firmware images using the firmware names provided via
+ * dts if available.
+ */
+ if (of_find_property(dev->of_node, "firmware-name", NULL))
+ return airoha_npu_load_firmware_from_dts(dev, addr, base);
+
/* Load rv32 npu firmware */
- ret = airoha_npu_load_firmware(dev, addr, &soc->fw_rv32);
+ ret = airoha_npu_load_firmware(dev, addr, soc->fw_rv32.name,
+ soc->fw_rv32.max_size);
if (ret)
return ret;
/* Load data npu firmware */
return airoha_npu_load_firmware(dev, base + REG_NPU_LOCAL_SRAM,
- &soc->fw_data);
+ soc->fw_data.name,
+ soc->fw_data.max_size);
}
static irqreturn_t airoha_npu_mbox_handler(int irq, void *npu_instance)
@@ -519,6 +551,14 @@ static int airoha_npu_wlan_init_memory(struct airoha_npu *npu)
if (err)
return err;
+ if (of_property_match_string(npu->dev->of_node, "memory-region-names",
+ "ba") >= 0) {
+ cmd = WLAN_FUNC_SET_WAIT_DRAM_BA_NODE_ADDR;
+ err = airoha_npu_wlan_set_reserved_memory(npu, 0, "ba", cmd);
+ if (err)
+ return err;
+ }
+
cmd = WLAN_FUNC_SET_WAIT_IS_FORCE_TO_CPU;
return airoha_npu_wlan_msg_send(npu, 0, cmd, &val, sizeof(val),
GFP_KERNEL);
@@ -657,6 +697,7 @@ static int airoha_npu_probe(struct platform_device *pdev)
struct resource res;
void __iomem *base;
int i, irq, err;
+ u32 val;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
@@ -750,6 +791,11 @@ static int airoha_npu_probe(struct platform_device *pdev)
regmap_write(npu->regmap, REG_CR_BOOT_TRIGGER, 0x1);
msleep(100);
+ if (!airoha_npu_wlan_msg_get(npu, 0, WLAN_FUNC_GET_WAIT_NPU_VERSION,
+ &val, sizeof(val), GFP_KERNEL))
+ dev_info(dev, "NPU fw version: %0d.%d\n",
+ (val >> 16) & 0xffff, val & 0xffff);
+
platform_set_drvdata(pdev, npu);
return 0;
@@ -776,6 +822,8 @@ module_platform_driver(airoha_npu_driver);
MODULE_FIRMWARE(NPU_EN7581_FIRMWARE_DATA);
MODULE_FIRMWARE(NPU_EN7581_FIRMWARE_RV32);
+MODULE_FIRMWARE(NPU_EN7581_7996_FIRMWARE_DATA);
+MODULE_FIRMWARE(NPU_EN7581_7996_FIRMWARE_RV32);
MODULE_FIRMWARE(NPU_AN7583_FIRMWARE_DATA);
MODULE_FIRMWARE(NPU_AN7583_FIRMWARE_RV32);
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/alacritech/slic.h b/drivers/net/ethernet/alacritech/slic.h
index 82071d0e5f7f..f5bb2d9a61be 100644
--- a/drivers/net/ethernet/alacritech/slic.h
+++ b/drivers/net/ethernet/alacritech/slic.h
@@ -284,7 +284,7 @@
#define SLIC_INC_STATS_COUNTER(st, counter) \
do { \
u64_stats_update_begin(&(st)->syncp); \
- (st)->counter++; \
+ u64_stats_inc(&(st)->counter); \
u64_stats_update_end(&(st)->syncp); \
} while (0)
@@ -293,7 +293,7 @@ do { \
unsigned int start; \
do { \
start = u64_stats_fetch_begin(&(st)->syncp); \
- newst = (st)->counter; \
+ newst = u64_stats_read(&(st)->counter); \
} while (u64_stats_fetch_retry(&(st)->syncp, start)); \
}
@@ -407,34 +407,34 @@ struct slic_oasis_eeprom {
};
struct slic_stats {
- u64 rx_packets;
- u64 rx_bytes;
- u64 rx_mcasts;
- u64 rx_errors;
- u64 tx_packets;
- u64 tx_bytes;
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_mcasts;
+ u64_stats_t rx_errors;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
/* HW STATS */
- u64 rx_buff_miss;
- u64 tx_dropped;
- u64 irq_errs;
+ u64_stats_t rx_buff_miss;
+ u64_stats_t tx_dropped;
+ u64_stats_t irq_errs;
/* transport layer */
- u64 rx_tpcsum;
- u64 rx_tpoflow;
- u64 rx_tphlen;
+ u64_stats_t rx_tpcsum;
+ u64_stats_t rx_tpoflow;
+ u64_stats_t rx_tphlen;
/* ip layer */
- u64 rx_ipcsum;
- u64 rx_iplen;
- u64 rx_iphlen;
+ u64_stats_t rx_ipcsum;
+ u64_stats_t rx_iplen;
+ u64_stats_t rx_iphlen;
/* link layer */
- u64 rx_early;
- u64 rx_buffoflow;
- u64 rx_lcode;
- u64 rx_drbl;
- u64 rx_crc;
- u64 rx_oflow802;
- u64 rx_uflow802;
+ u64_stats_t rx_early;
+ u64_stats_t rx_buffoflow;
+ u64_stats_t rx_lcode;
+ u64_stats_t rx_drbl;
+ u64_stats_t rx_crc;
+ u64_stats_t rx_oflow802;
+ u64_stats_t rx_uflow802;
/* oasis only */
- u64 tx_carrier;
+ u64_stats_t tx_carrier;
struct u64_stats_sync syncp;
};
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
index f62851708d4f..7488fb6ace0b 100644
--- a/drivers/net/ethernet/alacritech/slicoss.c
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -378,8 +378,8 @@ static void slic_xmit_complete(struct slic_device *sdev)
smp_wmb();
u64_stats_update_begin(&sdev->stats.syncp);
- sdev->stats.tx_bytes += bytes;
- sdev->stats.tx_packets += frames;
+ u64_stats_add(&sdev->stats.tx_bytes, bytes);
+ u64_stats_add(&sdev->stats.tx_packets, frames);
u64_stats_update_end(&sdev->stats.syncp);
netif_tx_lock(dev);
@@ -615,8 +615,8 @@ static void slic_handle_receive(struct slic_device *sdev, unsigned int todo,
}
u64_stats_update_begin(&sdev->stats.syncp);
- sdev->stats.rx_bytes += bytes;
- sdev->stats.rx_packets += frames;
+ u64_stats_add(&sdev->stats.rx_bytes, bytes);
+ u64_stats_add(&sdev->stats.rx_packets, frames);
u64_stats_update_end(&sdev->stats.syncp);
slic_refill_rx_queue(sdev, GFP_ATOMIC);
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index fe3479b84a1f..2455d6dddc26 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -835,27 +835,11 @@ static int ena_set_rxfh_fields(struct net_device *netdev,
return ena_com_fill_hash_ctrl(ena_dev, proto, hash_fields);
}
-static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
- u32 *rules)
+static u32 ena_get_rx_ring_count(struct net_device *netdev)
{
struct ena_adapter *adapter = netdev_priv(netdev);
- int rc = 0;
- switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = adapter->num_io_queues;
- rc = 0;
- break;
- case ETHTOOL_GRXCLSRLCNT:
- case ETHTOOL_GRXCLSRULE:
- case ETHTOOL_GRXCLSRLALL:
- default:
- netif_err(adapter, drv, netdev,
- "Command parameter %d is not supported\n", info->cmd);
- rc = -EOPNOTSUPP;
- }
-
- return rc;
+ return adapter->num_io_queues;
}
static u32 ena_get_rxfh_indir_size(struct net_device *netdev)
@@ -1096,7 +1080,7 @@ static const struct ethtool_ops ena_ethtool_ops = {
.get_sset_count = ena_get_sset_count,
.get_strings = ena_get_ethtool_strings,
.get_ethtool_stats = ena_get_ethtool_stats,
- .get_rxnfc = ena_get_rxnfc,
+ .get_rx_ring_count = ena_get_rx_ring_count,
.get_rxfh_indir_size = ena_get_rxfh_indir_size,
.get_rxfh_key_size = ena_get_rxfh_key_size,
.get_rxfh = ena_get_rxfh,
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index d54dca3074eb..45e8d698781c 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -165,7 +165,7 @@ config AMD_XGBE
select CRC32
select PHYLIB
select AMD_XGBE_HAVE_ECC if X86
- select NET_SELFTESTS
+ imply NET_SELFTESTS
help
This driver supports the AMD 10GbE Ethernet device found on an
AMD SoC.
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 8d05a0c5f2d5..e6d56fcdc1dd 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -813,7 +813,7 @@ static int lance_open(struct net_device *dev)
if (lp->dma_irq >= 0) {
unsigned long flags;
- if (request_irq(lp->dma_irq, lance_dma_merr_int, IRQF_ONESHOT,
+ if (request_irq(lp->dma_irq, lance_dma_merr_int, 0,
"lance error", dev)) {
free_irq(dev->irq, dev);
printk("%s: Can't get DMA IRQ %d\n", dev->name,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 62b01de93db4..711f295eb777 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -598,6 +598,7 @@
#define MMC_RXVLANFRAMES_GB_LO 0x0998
#define MMC_RXVLANFRAMES_GB_HI 0x099c
#define MMC_RXWATCHDOGERROR 0x09a0
+#define MMC_RXALIGNMENTERROR 0x09bc
/* MMC register entry bit positions and sizes */
#define MMC_CR_CR_INDEX 0
@@ -658,6 +659,8 @@
#define MMC_RISR_RXVLANFRAMES_GB_WIDTH 1
#define MMC_RISR_RXWATCHDOGERROR_INDEX 22
#define MMC_RISR_RXWATCHDOGERROR_WIDTH 1
+#define MMC_RISR_RXALIGNMENTERROR_INDEX 27
+#define MMC_RISR_RXALIGNMENTERROR_WIDTH 1
#define MMC_TIER_ALL_INTERRUPTS_INDEX 0
#define MMC_TIER_ALL_INTERRUPTS_WIDTH 18
#define MMC_TISR_TXOCTETCOUNT_GB_INDEX 0
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index b646ae575e6a..c04a9c76bd40 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -2794,6 +2794,7 @@ static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
case MMC_RXUNDERSIZE_G:
case MMC_RXOVERSIZE_G:
case MMC_RXWATCHDOGERROR:
+ case MMC_RXALIGNMENTERROR:
read_hi = false;
break;
@@ -2997,6 +2998,10 @@ static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR))
stats->rxwatchdogerror +=
xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXALIGNMENTERROR))
+ stats->rxalignmenterror +=
+ xgbe_mmc_read(pdata, MMC_RXALIGNMENTERROR);
}
static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
@@ -3129,6 +3134,9 @@ static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
stats->rxwatchdogerror +=
xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
+ stats->rxalignmenterror +=
+ xgbe_mmc_read(pdata, MMC_RXALIGNMENTERROR);
+
/* Un-freeze counters */
XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index b5a60a048896..62bb4b8a68e1 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1838,6 +1838,7 @@ static void xgbe_get_stats64(struct net_device *netdev,
s->rx_length_errors = pstats->rxlengtherror;
s->rx_crc_errors = pstats->rxcrcerror;
s->rx_over_errors = pstats->rxfifooverflow;
+ s->rx_frame_errors = pstats->rxalignmenterror;
s->tx_packets = pstats->txframecount_gb;
s->tx_bytes = pstats->txoctetcount_gb;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 0d19b09497a0..a9f4fcc4daae 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -362,13 +362,16 @@ static int xgbe_set_coalesce(struct net_device *netdev,
/* Check the bounds of values for Rx */
if (rx_riwt > XGMAC_MAX_DMA_RIWT) {
- netdev_err(netdev, "rx-usec is limited to %d usecs\n",
- hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT));
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "rx-usec is limited to %d usecs",
+ hw_if->riwt_to_usec(pdata,
+ XGMAC_MAX_DMA_RIWT));
return -EINVAL;
}
if (rx_frames > pdata->rx_desc_count) {
- netdev_err(netdev, "rx-frames is limited to %d frames\n",
- pdata->rx_desc_count);
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "rx-frames is limited to %d frames",
+ pdata->rx_desc_count);
return -EINVAL;
}
@@ -377,8 +380,7 @@ static int xgbe_set_coalesce(struct net_device *netdev,
/* Check the bounds of values for Tx */
if (!tx_usecs) {
- NL_SET_ERR_MSG_FMT_MOD(extack,
- "tx-usecs must not be 0");
+ NL_SET_ERR_MSG_MOD(extack, "tx-usecs must not be 0");
return -EINVAL;
}
if (tx_usecs > XGMAC_MAX_COAL_TX_TICK) {
@@ -387,8 +389,9 @@ static int xgbe_set_coalesce(struct net_device *netdev,
return -EINVAL;
}
if (tx_frames > pdata->tx_desc_count) {
- netdev_err(netdev, "tx-frames is limited to %d frames\n",
- pdata->tx_desc_count);
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "tx-frames is limited to %d frames",
+ pdata->tx_desc_count);
return -EINVAL;
}
@@ -414,20 +417,11 @@ static int xgbe_set_coalesce(struct net_device *netdev,
return 0;
}
-static int xgbe_get_rxnfc(struct net_device *netdev,
- struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
+static u32 xgbe_get_rx_ring_count(struct net_device *netdev)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
- switch (rxnfc->cmd) {
- case ETHTOOL_GRXRINGS:
- rxnfc->data = pdata->rx_ring_count;
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- return 0;
+ return pdata->rx_ring_count;
}
static u32 xgbe_get_rxfh_key_size(struct net_device *netdev)
@@ -474,7 +468,7 @@ static int xgbe_set_rxfh(struct net_device *netdev,
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP) {
- netdev_err(netdev, "unsupported hash function\n");
+ NL_SET_ERR_MSG_MOD(extack, "unsupported hash function");
return -EOPNOTSUPP;
}
@@ -561,37 +555,39 @@ static int xgbe_set_ringparam(struct net_device *netdev,
unsigned int rx, tx;
if (ringparam->rx_mini_pending || ringparam->rx_jumbo_pending) {
- netdev_err(netdev, "unsupported ring parameter\n");
+ NL_SET_ERR_MSG_MOD(extack, "unsupported ring parameter");
return -EINVAL;
}
if ((ringparam->rx_pending < XGBE_RX_DESC_CNT_MIN) ||
(ringparam->rx_pending > XGBE_RX_DESC_CNT_MAX)) {
- netdev_err(netdev,
- "rx ring parameter must be between %u and %u\n",
- XGBE_RX_DESC_CNT_MIN, XGBE_RX_DESC_CNT_MAX);
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "rx ring parameter must be between %u and %u",
+ XGBE_RX_DESC_CNT_MIN,
+ XGBE_RX_DESC_CNT_MAX);
return -EINVAL;
}
if ((ringparam->tx_pending < XGBE_TX_DESC_CNT_MIN) ||
(ringparam->tx_pending > XGBE_TX_DESC_CNT_MAX)) {
- netdev_err(netdev,
- "tx ring parameter must be between %u and %u\n",
- XGBE_TX_DESC_CNT_MIN, XGBE_TX_DESC_CNT_MAX);
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "tx ring parameter must be between %u and %u",
+ XGBE_TX_DESC_CNT_MIN,
+ XGBE_TX_DESC_CNT_MAX);
return -EINVAL;
}
rx = __rounddown_pow_of_two(ringparam->rx_pending);
if (rx != ringparam->rx_pending)
- netdev_notice(netdev,
- "rx ring parameter rounded to power of two: %u\n",
- rx);
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "rx ring parameter rounded to power of two: %u",
+ rx);
tx = __rounddown_pow_of_two(ringparam->tx_pending);
if (tx != ringparam->tx_pending)
- netdev_notice(netdev,
- "tx ring parameter rounded to power of two: %u\n",
- tx);
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "tx ring parameter rounded to power of two: %u",
+ tx);
if ((rx == pdata->rx_desc_count) &&
(tx == pdata->tx_desc_count))
@@ -752,7 +748,7 @@ static const struct ethtool_ops xgbe_ethtool_ops = {
.get_strings = xgbe_get_strings,
.get_ethtool_stats = xgbe_get_ethtool_stats,
.get_sset_count = xgbe_get_sset_count,
- .get_rxnfc = xgbe_get_rxnfc,
+ .get_rx_ring_count = xgbe_get_rx_ring_count,
.get_rxfh_key_size = xgbe_get_rxfh_key_size,
.get_rxfh_indir_size = xgbe_get_rxfh_indir_size,
.get_rxfh = xgbe_get_rxfh,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 03ef0f548483..1269b8ce9249 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -659,6 +659,7 @@ struct xgbe_mmc_stats {
u64 rxfifooverflow;
u64 rxvlanframes_gb;
u64 rxwatchdogerror;
+ u64 rxalignmenterror;
};
struct xgbe_ext_stats {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 6fef47ba0a59..a6e1826dd5d7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -500,20 +500,25 @@ static int aq_ethtool_set_rss(struct net_device *netdev,
return err;
}
+static u32 aq_ethtool_get_rx_ring_count(struct net_device *ndev)
+{
+ struct aq_nic_cfg_s *cfg;
+ struct aq_nic_s *aq_nic;
+
+ aq_nic = netdev_priv(ndev);
+ cfg = aq_nic_get_cfg(aq_nic);
+
+ return cfg->vecs;
+}
+
static int aq_ethtool_get_rxnfc(struct net_device *ndev,
struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg;
int err = 0;
- cfg = aq_nic_get_cfg(aq_nic);
-
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = cfg->vecs;
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = aq_get_rxnfc_count_all_rules(aq_nic);
break;
@@ -1072,6 +1077,7 @@ const struct ethtool_ops aq_ethtool_ops = {
.set_rxfh = aq_ethtool_set_rss,
.get_rxnfc = aq_ethtool_get_rxnfc,
.set_rxnfc = aq_ethtool_set_rxnfc,
+ .get_rx_ring_count = aq_ethtool_get_rx_ring_count,
.get_msglevel = aq_get_msg_level,
.set_msglevel = aq_set_msg_level,
.get_sset_count = aq_ethtool_get_sset_count,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index b24eaa5283fa..ef9447810071 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -701,9 +701,6 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
} else if (l4proto == IPPROTO_UDP) {
dx_buff->is_gso_udp = 1U;
dx_buff->len_l4 = sizeof(struct udphdr);
- /* UDP GSO Hardware does not replace packet length. */
- udp_hdr(skb)->len = htons(dx_buff->mss +
- dx_buff->len_l4);
} else {
WARN_ONCE(true, "Bad GSO mode");
goto exit;
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
index 014340f33345..aa6d8606849f 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
@@ -1081,15 +1081,10 @@ static irqreturn_t bcmasp_isr_wol(int irq, void *data)
struct bcmasp_priv *priv = data;
u32 status;
- /* No L3 IRQ, so we good */
- if (priv->wol_irq <= 0)
- goto irq_handled;
-
status = wakeup_intr2_core_rl(priv, ASP_WAKEUP_INTR2_STATUS) &
~wakeup_intr2_core_rl(priv, ASP_WAKEUP_INTR2_MASK_STATUS);
wakeup_intr2_core_wl(priv, status, ASP_WAKEUP_INTR2_CLEAR);
-irq_handled:
pm_wakeup_event(&priv->pdev->dev, 0);
return IRQ_HANDLED;
}
@@ -1322,6 +1317,8 @@ static int bcmasp_probe(struct platform_device *pdev)
bcmasp_core_init_filters(priv);
+ bcmasp_init_wol(priv);
+
ports_node = of_find_node_by_name(dev->of_node, "ethernet-ports");
if (!ports_node) {
dev_warn(dev, "No ports found\n");
@@ -1333,16 +1330,14 @@ static int bcmasp_probe(struct platform_device *pdev)
intf = bcmasp_interface_create(priv, intf_node, i);
if (!intf) {
dev_err(dev, "Cannot create eth interface %d\n", i);
- bcmasp_remove_intfs(priv);
- ret = -ENOMEM;
- goto of_put_exit;
+ of_node_put(ports_node);
+ ret = -EINVAL;
+ goto err_cleanup;
}
list_add_tail(&intf->list, &priv->intfs);
i++;
}
-
- /* Check and enable WoL */
- bcmasp_init_wol(priv);
+ of_node_put(ports_node);
/* Drop the clock reference count now and let ndo_open()/ndo_close()
* manage it for us from now on.
@@ -1357,19 +1352,20 @@ static int bcmasp_probe(struct platform_device *pdev)
list_for_each_entry(intf, &priv->intfs, list) {
ret = register_netdev(intf->ndev);
if (ret) {
- netdev_err(intf->ndev,
- "failed to register net_device: %d\n", ret);
- bcmasp_wol_irq_destroy(priv);
- bcmasp_remove_intfs(priv);
- goto of_put_exit;
+ dev_err(dev, "failed to register net_device: %d\n", ret);
+ goto err_cleanup;
}
count++;
}
dev_info(dev, "Initialized %d port(s)\n", count);
-of_put_exit:
- of_node_put(ports_node);
+ return ret;
+
+err_cleanup:
+ bcmasp_wol_irq_destroy(priv);
+ bcmasp_remove_intfs(priv);
+
return ret;
}
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.h b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
index e238507be40a..29cd87335ec8 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.h
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
@@ -268,13 +268,6 @@ struct bcmasp_mib_counters {
u32 tx_timeout_cnt;
};
-struct bcmasp_intf_ops {
- unsigned long (*rx_desc_read)(struct bcmasp_intf *intf);
- void (*rx_buffer_write)(struct bcmasp_intf *intf, dma_addr_t addr);
- void (*rx_desc_write)(struct bcmasp_intf *intf, dma_addr_t addr);
- unsigned long (*tx_read)(struct bcmasp_intf *intf);
- void (*tx_write)(struct bcmasp_intf *intf, dma_addr_t addr);
-};
struct bcmasp_priv;
@@ -286,7 +279,6 @@ struct bcmasp_intf {
/* ASP Ch */
int channel;
int port;
- const struct bcmasp_intf_ops *ops;
/* Used for splitting shared resources */
int index;
@@ -407,34 +399,6 @@ struct bcmasp_priv {
struct mutex net_lock;
};
-static inline unsigned long bcmasp_intf_rx_desc_read(struct bcmasp_intf *intf)
-{
- return intf->ops->rx_desc_read(intf);
-}
-
-static inline void bcmasp_intf_rx_buffer_write(struct bcmasp_intf *intf,
- dma_addr_t addr)
-{
- intf->ops->rx_buffer_write(intf, addr);
-}
-
-static inline void bcmasp_intf_rx_desc_write(struct bcmasp_intf *intf,
- dma_addr_t addr)
-{
- intf->ops->rx_desc_write(intf, addr);
-}
-
-static inline unsigned long bcmasp_intf_tx_read(struct bcmasp_intf *intf)
-{
- return intf->ops->tx_read(intf);
-}
-
-static inline void bcmasp_intf_tx_write(struct bcmasp_intf *intf,
- dma_addr_t addr)
-{
- intf->ops->tx_write(intf, addr);
-}
-
#define __BCMASP_IO_MACRO(name, m) \
static inline u32 name##_rl(struct bcmasp_intf *intf, u32 off) \
{ \
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
index ceb6c11431dd..d0a480430a95 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
@@ -231,39 +231,6 @@ help:
return skb;
}
-static unsigned long bcmasp_rx_edpkt_dma_rq(struct bcmasp_intf *intf)
-{
- return rx_edpkt_dma_rq(intf, RX_EDPKT_DMA_VALID);
-}
-
-static void bcmasp_rx_edpkt_cfg_wq(struct bcmasp_intf *intf, dma_addr_t addr)
-{
- rx_edpkt_cfg_wq(intf, addr, RX_EDPKT_RING_BUFFER_READ);
-}
-
-static void bcmasp_rx_edpkt_dma_wq(struct bcmasp_intf *intf, dma_addr_t addr)
-{
- rx_edpkt_dma_wq(intf, addr, RX_EDPKT_DMA_READ);
-}
-
-static unsigned long bcmasp_tx_spb_dma_rq(struct bcmasp_intf *intf)
-{
- return tx_spb_dma_rq(intf, TX_SPB_DMA_READ);
-}
-
-static void bcmasp_tx_spb_dma_wq(struct bcmasp_intf *intf, dma_addr_t addr)
-{
- tx_spb_dma_wq(intf, addr, TX_SPB_DMA_VALID);
-}
-
-static const struct bcmasp_intf_ops bcmasp_intf_ops = {
- .rx_desc_read = bcmasp_rx_edpkt_dma_rq,
- .rx_buffer_write = bcmasp_rx_edpkt_cfg_wq,
- .rx_desc_write = bcmasp_rx_edpkt_dma_wq,
- .tx_read = bcmasp_tx_spb_dma_rq,
- .tx_write = bcmasp_tx_spb_dma_wq,
-};
-
static netdev_tx_t bcmasp_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bcmasp_intf *intf = netdev_priv(dev);
@@ -368,7 +335,7 @@ static netdev_tx_t bcmasp_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
- bcmasp_intf_tx_write(intf, intf->tx_spb_dma_valid);
+ tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_VALID);
if (tx_spb_ring_full(intf, MAX_SKB_FRAGS + 1))
netif_stop_queue(dev);
@@ -449,7 +416,7 @@ static int bcmasp_tx_reclaim(struct bcmasp_intf *intf)
struct bcmasp_desc *desc;
dma_addr_t mapping;
- read = bcmasp_intf_tx_read(intf);
+ read = tx_spb_dma_rq(intf, TX_SPB_DMA_READ);
while (intf->tx_spb_dma_read != read) {
txcb = &intf->tx_cbs[intf->tx_spb_clean_index];
mapping = dma_unmap_addr(txcb, dma_addr);
@@ -519,7 +486,7 @@ static int bcmasp_rx_poll(struct napi_struct *napi, int budget)
u64 flags;
u32 len;
- valid = bcmasp_intf_rx_desc_read(intf) + 1;
+ valid = rx_edpkt_dma_rq(intf, RX_EDPKT_DMA_VALID) + 1;
if (valid == intf->rx_edpkt_dma_addr + DESC_RING_SIZE)
valid = intf->rx_edpkt_dma_addr;
@@ -591,8 +558,8 @@ static int bcmasp_rx_poll(struct napi_struct *napi, int budget)
u64_stats_update_end(&stats->syncp);
next:
- bcmasp_intf_rx_buffer_write(intf, (DESC_ADDR(desc->buf) +
- desc->size));
+ rx_edpkt_cfg_wq(intf, (DESC_ADDR(desc->buf) + desc->size),
+ RX_EDPKT_RING_BUFFER_READ);
processed++;
intf->rx_edpkt_dma_read =
@@ -603,7 +570,7 @@ next:
DESC_RING_COUNT);
}
- bcmasp_intf_rx_desc_write(intf, intf->rx_edpkt_dma_read);
+ rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_read, RX_EDPKT_DMA_READ);
if (processed < budget && napi_complete_done(&intf->rx_napi, processed))
bcmasp_enable_rx_irq(intf, 1);
@@ -1271,7 +1238,6 @@ struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
}
SET_NETDEV_DEV(ndev, dev);
- intf->ops = &bcmasp_intf_ops;
ndev->netdev_ops = &bcmasp_netdev_ops;
ndev->ethtool_ops = &bcmasp_ethtool_ops;
intf->msg_enable = netif_msg_init(-1, NETIF_MSG_DRV |
@@ -1336,10 +1302,8 @@ static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf)
umac_enable_set(intf, UMC_CMD_RX_EN, 1);
- if (intf->parent->wol_irq > 0) {
- wakeup_intr2_core_wl(intf->parent, 0xffffffff,
- ASP_WAKEUP_INTR2_MASK_CLEAR);
- }
+ wakeup_intr2_core_wl(intf->parent, 0xffffffff,
+ ASP_WAKEUP_INTR2_MASK_CLEAR);
if (ndev->phydev && ndev->phydev->eee_cfg.eee_enabled &&
intf->parent->eee_fixup)
@@ -1392,10 +1356,8 @@ static void bcmasp_resume_from_wol(struct bcmasp_intf *intf)
reg &= ~UMC_MPD_CTRL_MPD_EN;
umac_wl(intf, reg, UMC_MPD_CTRL);
- if (intf->parent->wol_irq > 0) {
- wakeup_intr2_core_wl(intf->parent, 0xffffffff,
- ASP_WAKEUP_INTR2_MASK_SET);
- }
+ wakeup_intr2_core_wl(intf->parent, 0xffffffff,
+ ASP_WAKEUP_INTR2_MASK_SET);
}
int bcmasp_interface_resume(struct bcmasp_intf *intf)
diff --git a/drivers/net/ethernet/broadcom/bnge/Makefile b/drivers/net/ethernet/broadcom/bnge/Makefile
index ea6596854e5c..fa604ee20264 100644
--- a/drivers/net/ethernet/broadcom/bnge/Makefile
+++ b/drivers/net/ethernet/broadcom/bnge/Makefile
@@ -10,4 +10,5 @@ bng_en-y := bnge_core.o \
bnge_resc.o \
bnge_netdev.o \
bnge_ethtool.o \
- bnge_auxr.o
+ bnge_auxr.o \
+ bnge_txrx.o
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge.h b/drivers/net/ethernet/broadcom/bnge/bnge.h
index 32fc16a37d02..f376913aa321 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge.h
+++ b/drivers/net/ethernet/broadcom/bnge/bnge.h
@@ -8,7 +8,7 @@
#define DRV_SUMMARY "Broadcom ThorUltra NIC Ethernet Driver"
#include <linux/etherdevice.h>
-#include <linux/bnxt/hsi.h>
+#include <linux/bnge/hsi.h>
#include "bnge_rmem.h"
#include "bnge_resc.h"
#include "bnge_auxr.h"
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_auxr.c b/drivers/net/ethernet/broadcom/bnge/bnge_auxr.c
index d64592b64e17..5f4cb4991964 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_auxr.c
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_auxr.c
@@ -14,7 +14,7 @@
#include <asm/byteorder.h>
#include <linux/bitmap.h>
#include <linux/auxiliary_bus.h>
-#include <linux/bnxt/hsi.h>
+#include <linux/bnge/hsi.h>
#include "bnge.h"
#include "bnge_hwrm.h"
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_devlink.c b/drivers/net/ethernet/broadcom/bnge/bnge_devlink.c
index a987afebd64d..f3a984d4d5f1 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_devlink.c
@@ -221,7 +221,7 @@ static int bnge_devlink_info_get(struct devlink *devlink,
DEVLINK_INFO_VERSION_GENERIC_FW, buf);
if (rc) {
NL_SET_ERR_MSG_MOD(extack,
- "Failed to set roce firmware version");
+ "Failed to set firmware version");
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hw_def.h b/drivers/net/ethernet/broadcom/bnge/bnge_hw_def.h
new file mode 100644
index 000000000000..49828dc05514
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_hw_def.h
@@ -0,0 +1,446 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_HW_DEF_H_
+#define _BNGE_HW_DEF_H_
+
+#define TX_BD_FLAGS_TCP_UDP_CHKSUM BIT(0)
+#define TX_BD_FLAGS_IP_CKSUM BIT(1)
+#define TX_BD_FLAGS_NO_CRC BIT(2)
+#define TX_BD_FLAGS_STAMP BIT(3)
+#define TX_BD_FLAGS_T_IP_CHKSUM BIT(4)
+#define TX_BD_FLAGS_LSO BIT(5)
+#define TX_BD_FLAGS_IPID_FMT BIT(6)
+#define TX_BD_FLAGS_T_IPID BIT(7)
+#define TX_BD_HSIZE GENMASK(23, 16)
+#define TX_BD_HSIZE_SHIFT 16
+
+#define TX_BD_CFA_ACTION GENMASK(31, 16)
+#define TX_BD_CFA_ACTION_SHIFT 16
+
+#define TX_BD_CFA_META_MASK 0xfffffff
+#define TX_BD_CFA_META_VID_MASK 0xfff
+#define TX_BD_CFA_META_PRI_MASK GENMASK(15, 12)
+#define TX_BD_CFA_META_PRI_SHIFT 12
+#define TX_BD_CFA_META_TPID_MASK GENMASK(17, 16)
+#define TX_BD_CFA_META_TPID_SHIFT 16
+#define TX_BD_CFA_META_KEY GENMASK(31, 28)
+#define TX_BD_CFA_META_KEY_SHIFT 28
+#define TX_BD_CFA_META_KEY_VLAN BIT(28)
+
+struct tx_bd_ext {
+ __le32 tx_bd_hsize_lflags;
+ __le32 tx_bd_mss;
+ __le32 tx_bd_cfa_action;
+ __le32 tx_bd_cfa_meta;
+};
+
+#define TX_CMP_SQ_CONS_IDX(txcmp) \
+ (le32_to_cpu((txcmp)->sq_cons_idx) & TX_CMP_SQ_CONS_IDX_MASK)
+
+#define RX_CMP_CMP_TYPE GENMASK(5, 0)
+#define RX_CMP_FLAGS_ERROR BIT(6)
+#define RX_CMP_FLAGS_PLACEMENT GENMASK(9, 7)
+#define RX_CMP_FLAGS_RSS_VALID BIT(10)
+#define RX_CMP_FLAGS_PKT_METADATA_PRESENT BIT(11)
+#define RX_CMP_FLAGS_ITYPES_SHIFT 12
+#define RX_CMP_FLAGS_ITYPES_MASK 0xf000
+#define RX_CMP_FLAGS_ITYPE_UNKNOWN (0 << 12)
+#define RX_CMP_FLAGS_ITYPE_IP (1 << 12)
+#define RX_CMP_FLAGS_ITYPE_TCP (2 << 12)
+#define RX_CMP_FLAGS_ITYPE_UDP (3 << 12)
+#define RX_CMP_FLAGS_ITYPE_FCOE (4 << 12)
+#define RX_CMP_FLAGS_ITYPE_ROCE (5 << 12)
+#define RX_CMP_FLAGS_ITYPE_PTP_WO_TS (8 << 12)
+#define RX_CMP_FLAGS_ITYPE_PTP_W_TS (9 << 12)
+#define RX_CMP_LEN GENMASK(31, 16)
+#define RX_CMP_LEN_SHIFT 16
+
+#define RX_CMP_V1 BIT(0)
+#define RX_CMP_AGG_BUFS GENMASK(5, 1)
+#define RX_CMP_AGG_BUFS_SHIFT 1
+#define RX_CMP_RSS_HASH_TYPE GENMASK(15, 9)
+#define RX_CMP_RSS_HASH_TYPE_SHIFT 9
+#define RX_CMP_V3_RSS_EXT_OP_LEGACY GENMASK(15, 12)
+#define RX_CMP_V3_RSS_EXT_OP_LEGACY_SHIFT 12
+#define RX_CMP_V3_RSS_EXT_OP_NEW GENMASK(11, 8)
+#define RX_CMP_V3_RSS_EXT_OP_NEW_SHIFT 8
+#define RX_CMP_PAYLOAD_OFFSET GENMASK(23, 16)
+#define RX_CMP_PAYLOAD_OFFSET_SHIFT 16
+#define RX_CMP_SUB_NS_TS GENMASK(19, 16)
+#define RX_CMP_SUB_NS_TS_SHIFT 16
+#define RX_CMP_METADATA1 GENMASK(31, 28)
+#define RX_CMP_METADATA1_SHIFT 28
+#define RX_CMP_METADATA1_TPID_SEL GENMASK(30, 28)
+#define RX_CMP_METADATA1_TPID_8021Q BIT(28)
+#define RX_CMP_METADATA1_TPID_8021AD (0x0 << 28)
+#define RX_CMP_METADATA1_VALID BIT(31)
+
+struct rx_cmp {
+ __le32 rx_cmp_len_flags_type;
+ u32 rx_cmp_opaque;
+ __le32 rx_cmp_misc_v1;
+ __le32 rx_cmp_rss_hash;
+};
+
+#define RX_CMP_FLAGS2_IP_CS_CALC BIT(0)
+#define RX_CMP_FLAGS2_L4_CS_CALC BIT(1)
+#define RX_CMP_FLAGS2_T_IP_CS_CALC BIT(2)
+#define RX_CMP_FLAGS2_T_L4_CS_CALC BIT(3)
+#define RX_CMP_FLAGS2_META_FORMAT_VLAN BIT(4)
+
+#define RX_CMP_FLAGS2_METADATA_TCI_MASK GENMASK(15, 0)
+#define RX_CMP_FLAGS2_METADATA_VID_MASK GENMASK(11, 0)
+#define RX_CMP_FLAGS2_METADATA_TPID_MASK GENMASK(31, 16)
+#define RX_CMP_FLAGS2_METADATA_TPID_SFT 16
+
+#define RX_CMP_V BIT(0)
+#define RX_CMPL_ERRORS_MASK GENMASK(15, 1)
+#define RX_CMPL_ERRORS_SFT 1
+#define RX_CMPL_ERRORS_BUFFER_ERROR_MASK GENMASK(3, 1)
+#define RX_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0 << 1)
+#define RX_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT (0x1 << 1)
+#define RX_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP (0x2 << 1)
+#define RX_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3 << 1)
+#define RX_CMPL_ERRORS_IP_CS_ERROR BIT(4)
+#define RX_CMPL_ERRORS_L4_CS_ERROR BIT(5)
+#define RX_CMPL_ERRORS_T_IP_CS_ERROR BIT(6)
+#define RX_CMPL_ERRORS_T_L4_CS_ERROR BIT(7)
+#define RX_CMPL_ERRORS_CRC_ERROR BIT(8)
+#define RX_CMPL_ERRORS_T_PKT_ERROR_MASK GENMASK(11, 9)
+#define RX_CMPL_ERRORS_T_PKT_ERROR_NO_ERROR (0x0 << 9)
+#define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION (0x1 << 9)
+#define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN (0x2 << 9)
+#define RX_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR (0x3 << 9)
+#define RX_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR (0x4 << 9)
+#define RX_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR (0x5 << 9)
+#define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL (0x6 << 9)
+#define RX_CMPL_ERRORS_PKT_ERROR_MASK GENMASK(15, 12)
+#define RX_CMPL_ERRORS_PKT_ERROR_NO_ERROR (0x0 << 12)
+#define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION (0x1 << 12)
+#define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN (0x2 << 12)
+#define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_TTL (0x3 << 12)
+#define RX_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR (0x4 << 12)
+#define RX_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR (0x5 << 12)
+#define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN (0x6 << 12)
+#define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL (0x7 << 12)
+#define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN (0x8 << 12)
+
+#define RX_CMPL_CFA_CODE_MASK GENMASK(31, 16)
+#define RX_CMPL_CFA_CODE_SFT 16
+#define RX_CMPL_METADATA0_TCI_MASK GENMASK(31, 16)
+#define RX_CMPL_METADATA0_VID_MASK GENMASK(27, 16)
+#define RX_CMPL_METADATA0_SFT 16
+
+struct rx_cmp_ext {
+ __le32 rx_cmp_flags2;
+ __le32 rx_cmp_meta_data;
+ __le32 rx_cmp_cfa_code_errors_v2;
+ __le32 rx_cmp_timestamp;
+};
+
+#define RX_AGG_CMP_TYPE GENMASK(5, 0)
+#define RX_AGG_CMP_LEN GENMASK(31, 16)
+#define RX_AGG_CMP_LEN_SHIFT 16
+#define RX_AGG_CMP_V BIT(0)
+#define RX_AGG_CMP_AGG_ID GENMASK(25, 16)
+#define RX_AGG_CMP_AGG_ID_SHIFT 16
+
+struct rx_agg_cmp {
+ __le32 rx_agg_cmp_len_flags_type;
+ u32 rx_agg_cmp_opaque;
+ __le32 rx_agg_cmp_v;
+ __le32 rx_agg_cmp_unused;
+};
+
+#define RX_CMP_L2_ERRORS \
+ cpu_to_le32(RX_CMPL_ERRORS_BUFFER_ERROR_MASK | RX_CMPL_ERRORS_CRC_ERROR)
+
+#define RX_CMP_L4_CS_BITS \
+ (cpu_to_le32(RX_CMP_FLAGS2_L4_CS_CALC | RX_CMP_FLAGS2_T_L4_CS_CALC))
+
+#define RX_CMP_L4_CS_ERR_BITS \
+ (cpu_to_le32(RX_CMPL_ERRORS_L4_CS_ERROR | RX_CMPL_ERRORS_T_L4_CS_ERROR))
+
+#define RX_CMP_L4_CS_OK(rxcmp1) \
+ (((rxcmp1)->rx_cmp_flags2 & RX_CMP_L4_CS_BITS) && \
+ !((rxcmp1)->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS))
+
+#define RX_CMP_METADATA0_TCI(rxcmp1) \
+ ((le32_to_cpu((rxcmp1)->rx_cmp_cfa_code_errors_v2) & \
+ RX_CMPL_METADATA0_TCI_MASK) >> RX_CMPL_METADATA0_SFT)
+
+#define RX_CMP_ENCAP(rxcmp1) \
+ ((le32_to_cpu((rxcmp1)->rx_cmp_flags2) & \
+ RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3)
+
+#define RX_CMP_V3_HASH_TYPE_LEGACY(rxcmp) \
+ ((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & \
+ RX_CMP_V3_RSS_EXT_OP_LEGACY) >> RX_CMP_V3_RSS_EXT_OP_LEGACY_SHIFT)
+
+#define RX_CMP_V3_HASH_TYPE_NEW(rxcmp) \
+ ((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_V3_RSS_EXT_OP_NEW) >>\
+ RX_CMP_V3_RSS_EXT_OP_NEW_SHIFT)
+
+#define RX_CMP_V3_HASH_TYPE(bd, rxcmp) \
+ (((bd)->rss_cap & BNGE_RSS_CAP_RSS_TCAM) ? \
+ RX_CMP_V3_HASH_TYPE_NEW(rxcmp) : \
+ RX_CMP_V3_HASH_TYPE_LEGACY(rxcmp))
+
+#define EXT_OP_INNER_4 0x0
+#define EXT_OP_OUTER_4 0x2
+#define EXT_OP_INNFL_3 0x8
+#define EXT_OP_OUTFL_3 0xa
+
+#define RX_CMP_VLAN_VALID(rxcmp) \
+ ((rxcmp)->rx_cmp_misc_v1 & cpu_to_le32(RX_CMP_METADATA1_VALID))
+
+#define RX_CMP_VLAN_TPID_SEL(rxcmp) \
+ (le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_METADATA1_TPID_SEL)
+
+#define RSS_PROFILE_ID_MASK GENMASK(4, 0)
+
+#define RX_CMP_HASH_TYPE(rxcmp) \
+ (((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_RSS_HASH_TYPE) >>\
+ RX_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
+
+#define RX_CMP_HASH_VALID(rxcmp) \
+ ((rxcmp)->rx_cmp_len_flags_type & cpu_to_le32(RX_CMP_FLAGS_RSS_VALID))
+
+#define TPA_AGG_AGG_ID(rx_agg) \
+ ((le32_to_cpu((rx_agg)->rx_agg_cmp_v) & \
+ RX_AGG_CMP_AGG_ID) >> RX_AGG_CMP_AGG_ID_SHIFT)
+
+#define RX_TPA_START_CMP_TYPE GENMASK(5, 0)
+#define RX_TPA_START_CMP_FLAGS GENMASK(15, 6)
+#define RX_TPA_START_CMP_FLAGS_SHIFT 6
+#define RX_TPA_START_CMP_FLAGS_ERROR BIT(6)
+#define RX_TPA_START_CMP_FLAGS_PLACEMENT GENMASK(9, 7)
+#define RX_TPA_START_CMP_FLAGS_PLACEMENT_SHIFT 7
+#define RX_TPA_START_CMP_FLAGS_PLACEMENT_JUMBO BIT(7)
+#define RX_TPA_START_CMP_FLAGS_PLACEMENT_HDS (0x2 << 7)
+#define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_JUMBO (0x5 << 7)
+#define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_HDS (0x6 << 7)
+#define RX_TPA_START_CMP_FLAGS_RSS_VALID BIT(10)
+#define RX_TPA_START_CMP_FLAGS_TIMESTAMP BIT(11)
+#define RX_TPA_START_CMP_FLAGS_ITYPES GENMASK(15, 12)
+#define RX_TPA_START_CMP_FLAGS_ITYPES_SHIFT 12
+#define RX_TPA_START_CMP_FLAGS_ITYPE_TCP (0x2 << 12)
+#define RX_TPA_START_CMP_LEN GENMASK(31, 16)
+#define RX_TPA_START_CMP_LEN_SHIFT 16
+#define RX_TPA_START_CMP_V1 BIT(0)
+#define RX_TPA_START_CMP_RSS_HASH_TYPE GENMASK(15, 9)
+#define RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT 9
+#define RX_TPA_START_CMP_V3_RSS_HASH_TYPE GENMASK(15, 7)
+#define RX_TPA_START_CMP_V3_RSS_HASH_TYPE_SHIFT 7
+#define RX_TPA_START_CMP_AGG_ID GENMASK(25, 16)
+#define RX_TPA_START_CMP_AGG_ID_SHIFT 16
+#define RX_TPA_START_CMP_METADATA1 GENMASK(31, 28)
+#define RX_TPA_START_CMP_METADATA1_SHIFT 28
+#define RX_TPA_START_METADATA1_TPID_SEL GENMASK(30, 28)
+#define RX_TPA_START_METADATA1_TPID_8021Q BIT(28)
+#define RX_TPA_START_METADATA1_TPID_8021AD (0x0 << 28)
+#define RX_TPA_START_METADATA1_VALID BIT(31)
+
+struct rx_tpa_start_cmp {
+ __le32 rx_tpa_start_cmp_len_flags_type;
+ u32 rx_tpa_start_cmp_opaque;
+ __le32 rx_tpa_start_cmp_misc_v1;
+ __le32 rx_tpa_start_cmp_rss_hash;
+};
+
+#define TPA_START_HASH_VALID(rx_tpa_start) \
+ ((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type & \
+ cpu_to_le32(RX_TPA_START_CMP_FLAGS_RSS_VALID))
+
+#define TPA_START_HASH_TYPE(rx_tpa_start) \
+ (((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
+ RX_TPA_START_CMP_RSS_HASH_TYPE) >> \
+ RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
+
+#define TPA_START_V3_HASH_TYPE(rx_tpa_start) \
+ (((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
+ RX_TPA_START_CMP_V3_RSS_HASH_TYPE) >> \
+ RX_TPA_START_CMP_V3_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
+
+#define TPA_START_AGG_ID(rx_tpa_start) \
+ ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
+ RX_TPA_START_CMP_AGG_ID) >> RX_TPA_START_CMP_AGG_ID_SHIFT)
+
+#define TPA_START_ERROR(rx_tpa_start) \
+ ((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type & \
+ cpu_to_le32(RX_TPA_START_CMP_FLAGS_ERROR))
+
+#define TPA_START_VLAN_VALID(rx_tpa_start) \
+ ((rx_tpa_start)->rx_tpa_start_cmp_misc_v1 & \
+ cpu_to_le32(RX_TPA_START_METADATA1_VALID))
+
+#define TPA_START_VLAN_TPID_SEL(rx_tpa_start) \
+ (le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
+ RX_TPA_START_METADATA1_TPID_SEL)
+
+#define RX_TPA_START_CMP_FLAGS2_IP_CS_CALC BIT(0)
+#define RX_TPA_START_CMP_FLAGS2_L4_CS_CALC BIT(1)
+#define RX_TPA_START_CMP_FLAGS2_T_IP_CS_CALC BIT(2)
+#define RX_TPA_START_CMP_FLAGS2_T_L4_CS_CALC BIT(3)
+#define RX_TPA_START_CMP_FLAGS2_IP_TYPE BIT(8)
+#define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_VALID BIT(9)
+#define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT GENMASK(11, 10)
+#define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT_SHIFT 10
+#define RX_TPA_START_CMP_V3_FLAGS2_T_IP_TYPE BIT(10)
+#define RX_TPA_START_CMP_V3_FLAGS2_AGG_GRO BIT(11)
+#define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL GENMASK(31, 16)
+#define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_SHIFT 16
+#define RX_TPA_START_CMP_V2 BIT(0)
+#define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_MASK GENMASK(3, 1)
+#define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_SHIFT 1
+#define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0 << 1)
+#define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3 << 1)
+#define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_FLUSH (0x5 << 1)
+#define RX_TPA_START_CMP_CFA_CODE GENMASK(31, 16)
+#define RX_TPA_START_CMPL_CFA_CODE_SHIFT 16
+#define RX_TPA_START_CMP_METADATA0_TCI_MASK GENMASK(31, 16)
+#define RX_TPA_START_CMP_METADATA0_VID_MASK GENMASK(27, 16)
+#define RX_TPA_START_CMP_METADATA0_SFT 16
+
+struct rx_tpa_start_cmp_ext {
+ __le32 rx_tpa_start_cmp_flags2;
+ __le32 rx_tpa_start_cmp_metadata;
+ __le32 rx_tpa_start_cmp_cfa_code_v2;
+ __le32 rx_tpa_start_cmp_hdr_info;
+};
+
+#define TPA_START_CFA_CODE(rx_tpa_start) \
+ ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) & \
+ RX_TPA_START_CMP_CFA_CODE) >> RX_TPA_START_CMPL_CFA_CODE_SHIFT)
+
+#define TPA_START_IS_IPV6(rx_tpa_start) \
+ (!!((rx_tpa_start)->rx_tpa_start_cmp_flags2 & \
+ cpu_to_le32(RX_TPA_START_CMP_FLAGS2_IP_TYPE)))
+
+#define TPA_START_ERROR_CODE(rx_tpa_start) \
+ ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) & \
+ RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_MASK) >> \
+ RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_SHIFT)
+
+#define TPA_START_METADATA0_TCI(rx_tpa_start) \
+ ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) & \
+ RX_TPA_START_CMP_METADATA0_TCI_MASK) >> \
+ RX_TPA_START_CMP_METADATA0_SFT)
+
+#define RX_TPA_END_CMP_TYPE GENMASK(5, 0)
+#define RX_TPA_END_CMP_FLAGS GENMASK(15, 6)
+#define RX_TPA_END_CMP_FLAGS_SHIFT 6
+#define RX_TPA_END_CMP_FLAGS_PLACEMENT GENMASK(9, 7)
+#define RX_TPA_END_CMP_FLAGS_PLACEMENT_SHIFT 7
+#define RX_TPA_END_CMP_FLAGS_PLACEMENT_JUMBO BIT(7)
+#define RX_TPA_END_CMP_FLAGS_PLACEMENT_HDS (0x2 << 7)
+#define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO (0x5 << 7)
+#define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS (0x6 << 7)
+#define RX_TPA_END_CMP_FLAGS_RSS_VALID BIT(10)
+#define RX_TPA_END_CMP_FLAGS_ITYPES GENMASK(15, 12)
+#define RX_TPA_END_CMP_FLAGS_ITYPES_SHIFT 12
+#define RX_TPA_END_CMP_FLAGS_ITYPE_TCP (0x2 << 12)
+#define RX_TPA_END_CMP_LEN GENMASK(31, 16)
+#define RX_TPA_END_CMP_LEN_SHIFT 16
+#define RX_TPA_END_CMP_V1 BIT(0)
+#define RX_TPA_END_CMP_TPA_SEGS GENMASK(15, 8)
+#define RX_TPA_END_CMP_TPA_SEGS_SHIFT 8
+#define RX_TPA_END_CMP_AGG_ID GENMASK(25, 16)
+#define RX_TPA_END_CMP_AGG_ID_SHIFT 16
+#define RX_TPA_END_GRO_TS BIT(31)
+
+struct rx_tpa_end_cmp {
+ __le32 rx_tpa_end_cmp_len_flags_type;
+ u32 rx_tpa_end_cmp_opaque;
+ __le32 rx_tpa_end_cmp_misc_v1;
+ __le32 rx_tpa_end_cmp_tsdelta;
+};
+
+#define TPA_END_AGG_ID(rx_tpa_end) \
+ ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
+ RX_TPA_END_CMP_AGG_ID) >> RX_TPA_END_CMP_AGG_ID_SHIFT)
+
+#define TPA_END_TPA_SEGS(rx_tpa_end) \
+ ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
+ RX_TPA_END_CMP_TPA_SEGS) >> RX_TPA_END_CMP_TPA_SEGS_SHIFT)
+
+#define RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO \
+ cpu_to_le32(RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO & \
+ RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS)
+
+#define TPA_END_GRO(rx_tpa_end) \
+ ((rx_tpa_end)->rx_tpa_end_cmp_len_flags_type & \
+ RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO)
+
+#define TPA_END_GRO_TS(rx_tpa_end) \
+ (!!((rx_tpa_end)->rx_tpa_end_cmp_tsdelta & \
+ cpu_to_le32(RX_TPA_END_GRO_TS)))
+
+#define RX_TPA_END_CMP_TPA_DUP_ACKS GENMASK(3, 0)
+#define RX_TPA_END_CMP_PAYLOAD_OFFSET GENMASK(23, 16)
+#define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT 16
+#define RX_TPA_END_CMP_AGG_BUFS GENMASK(31, 24)
+#define RX_TPA_END_CMP_AGG_BUFS_SHIFT 24
+#define RX_TPA_END_CMP_TPA_SEG_LEN GENMASK(15, 0)
+#define RX_TPA_END_CMP_V2 BIT(0)
+#define RX_TPA_END_CMP_ERRORS GENMASK(2, 1)
+#define RX_TPA_END_CMPL_ERRORS_SHIFT 1
+#define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0 << 1)
+#define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_NOT_ON_CHIP (0x2 << 1)
+#define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3 << 1)
+#define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_RSV_ERROR (0x4 << 1)
+#define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_FLUSH (0x5 << 1)
+
+struct rx_tpa_end_cmp_ext {
+ __le32 rx_tpa_end_cmp_dup_acks;
+ __le32 rx_tpa_end_cmp_seg_len;
+ __le32 rx_tpa_end_cmp_errors_v2;
+ u32 rx_tpa_end_cmp_start_opaque;
+};
+
+#define TPA_END_ERRORS(rx_tpa_end_ext) \
+ ((rx_tpa_end_ext)->rx_tpa_end_cmp_errors_v2 & \
+ cpu_to_le32(RX_TPA_END_CMP_ERRORS))
+
+#define TPA_END_PAYLOAD_OFF(rx_tpa_end_ext) \
+ ((le32_to_cpu((rx_tpa_end_ext)->rx_tpa_end_cmp_dup_acks) & \
+ RX_TPA_END_CMP_PAYLOAD_OFFSET) >> \
+ RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT)
+
+#define TPA_END_AGG_BUFS(rx_tpa_end_ext) \
+ ((le32_to_cpu((rx_tpa_end_ext)->rx_tpa_end_cmp_dup_acks) & \
+ RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT)
+
+#define EVENT_DATA1_RESET_NOTIFY_FATAL(data1) \
+ (((data1) & \
+ ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK) ==\
+ ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL)
+
+#define EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1) \
+ (((data1) & \
+ ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK) ==\
+ ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION)
+
+#define EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2) \
+ ((data2) & \
+ ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_MASK)
+
+#define EVENT_DATA1_RECOVERY_MASTER_FUNC(data1) \
+ (!!((data1) & \
+ ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC))
+
+#define EVENT_DATA1_RECOVERY_ENABLED(data1) \
+ (!!((data1) & \
+ ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED))
+
+#define BNGE_EVENT_ERROR_REPORT_TYPE(data1) \
+ (((data1) & \
+ ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK) >>\
+ ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT)
+
+#define BNGE_EVENT_INVALID_SIGNAL_DATA(data2) \
+ (((data2) & \
+ ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_MASK) >>\
+ ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_SFT)
+#endif /* _BNGE_HW_DEF_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h
index 6df629761d95..2ed9c92c8c30 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h
@@ -4,7 +4,7 @@
#ifndef _BNGE_HWRM_H_
#define _BNGE_HWRM_H_
-#include <linux/bnxt/hsi.h>
+#include <linux/bnge/hsi.h>
enum bnge_hwrm_ctx_flags {
BNGE_HWRM_INTERNAL_CTX_OWNED = BIT(0),
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c
index 198f49b40dbf..84c90a957719 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c
@@ -5,7 +5,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/pci.h>
-#include <linux/bnxt/hsi.h>
+#include <linux/bnge/hsi.h>
#include <linux/if_vlan.h>
#include <net/netdev_queues.h>
@@ -1183,3 +1183,68 @@ int bnge_hwrm_set_async_event_cr(struct bnge_dev *bd, int idx)
req->async_event_cr = cpu_to_le16(idx);
return bnge_hwrm_req_send(bd, req);
}
+
+#define BNGE_DFLT_TUNL_TPA_BMAP \
+ (VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE | \
+ VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 | \
+ VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6)
+
+static void bnge_hwrm_vnic_update_tunl_tpa(struct bnge_dev *bd,
+ struct hwrm_vnic_tpa_cfg_input *req)
+{
+ struct bnge_net *bn = netdev_priv(bd->netdev);
+ u32 tunl_tpa_bmap = BNGE_DFLT_TUNL_TPA_BMAP;
+
+ if (!(bd->fw_cap & BNGE_FW_CAP_VNIC_TUNNEL_TPA))
+ return;
+
+ if (bn->vxlan_port)
+ tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN;
+ if (bn->vxlan_gpe_port)
+ tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE;
+ if (bn->nge_port)
+ tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE;
+
+ req->enables |= cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN);
+ req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap);
+}
+
+int bnge_hwrm_vnic_set_tpa(struct bnge_dev *bd, struct bnge_vnic_info *vnic,
+ u32 tpa_flags)
+{
+ struct bnge_net *bn = netdev_priv(bd->netdev);
+ struct hwrm_vnic_tpa_cfg_input *req;
+ int rc;
+
+ if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
+ return 0;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_TPA_CFG);
+ if (rc)
+ return rc;
+
+ if (tpa_flags) {
+ u32 flags;
+
+ flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
+ VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
+ VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
+ VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
+ VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
+ if (tpa_flags & BNGE_NET_EN_GRO)
+ flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
+
+ req->flags = cpu_to_le32(flags);
+ req->enables =
+ cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
+ VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
+ VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
+ req->max_agg_segs = cpu_to_le16(MAX_TPA_SEGS);
+ req->max_aggs = cpu_to_le16(bn->max_tpa);
+ req->min_agg_len = cpu_to_le32(512);
+ bnge_hwrm_vnic_update_tunl_tpa(bd, req);
+ }
+ req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+
+ return bnge_hwrm_req_send(bd, req);
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h
index 042f28e84a05..38b046237feb 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h
@@ -55,4 +55,6 @@ int hwrm_ring_alloc_send_msg(struct bnge_net *bn,
struct bnge_ring_struct *ring,
u32 ring_type, u32 map_index);
int bnge_hwrm_set_async_event_cr(struct bnge_dev *bd, int idx);
+int bnge_hwrm_vnic_set_tpa(struct bnge_dev *bd, struct bnge_vnic_info *vnic,
+ u32 tpa_flags);
#endif /* _BNGE_HWRM_LIB_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
index 832eeb960bd2..6ab317f1c16e 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
@@ -10,9 +10,13 @@
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
+#include <net/netdev_lock.h>
+#include <net/netdev_queues.h>
+#include <net/netdev_rx_queue.h>
#include <linux/etherdevice.h>
#include <linux/if.h>
#include <net/ip.h>
+#include <net/netdev_queues.h>
#include <linux/skbuff.h>
#include <net/page_pool/helpers.h>
@@ -20,6 +24,7 @@
#include "bnge_hwrm_lib.h"
#include "bnge_ethtool.h"
#include "bnge_rmem.h"
+#include "bnge_txrx.h"
#define BNGE_RING_TO_TC_OFF(bd, tx) \
((tx) % (bd)->tx_nr_rings_per_tc)
@@ -372,11 +377,37 @@ static void bnge_free_one_agg_ring_bufs(struct bnge_net *bn,
}
}
+static void bnge_free_one_tpa_info_data(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ int i;
+
+ for (i = 0; i < bn->max_tpa; i++) {
+ struct bnge_tpa_info *tpa_info = &rxr->rx_tpa[i];
+ u8 *data = tpa_info->data;
+
+ if (!data)
+ continue;
+
+ tpa_info->data = NULL;
+ page_pool_free_va(rxr->head_pool, data, false);
+ }
+}
+
static void bnge_free_one_rx_ring_pair_bufs(struct bnge_net *bn,
struct bnge_rx_ring_info *rxr)
{
+ struct bnge_tpa_idx_map *map;
+
+ if (rxr->rx_tpa)
+ bnge_free_one_tpa_info_data(bn, rxr);
+
bnge_free_one_rx_ring_bufs(bn, rxr);
bnge_free_one_agg_ring_bufs(bn, rxr);
+
+ map = rxr->rx_tpa_idx_map;
+ if (map)
+ memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
}
static void bnge_free_rx_ring_pair_bufs(struct bnge_net *bn)
@@ -391,9 +422,118 @@ static void bnge_free_rx_ring_pair_bufs(struct bnge_net *bn)
bnge_free_one_rx_ring_pair_bufs(bn, &bn->rx_ring[i]);
}
+static void bnge_free_tx_skbs(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ u16 max_idx;
+ int i;
+
+ max_idx = bn->tx_nr_pages * TX_DESC_CNT;
+ for (i = 0; i < bd->tx_nr_rings; i++) {
+ struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
+ int j;
+
+ if (!txr->tx_buf_ring)
+ continue;
+
+ for (j = 0; j < max_idx;) {
+ struct bnge_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
+ struct sk_buff *skb;
+ int k, last;
+
+ skb = tx_buf->skb;
+ if (!skb) {
+ j++;
+ continue;
+ }
+
+ tx_buf->skb = NULL;
+
+ dma_unmap_single(bd->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_headlen(skb),
+ DMA_TO_DEVICE);
+
+ last = tx_buf->nr_frags;
+ j += 2;
+ for (k = 0; k < last; k++, j++) {
+ int ring_idx = j & bn->tx_ring_mask;
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
+
+ tx_buf = &txr->tx_buf_ring[ring_idx];
+ dma_unmap_page(bd->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ }
+ dev_kfree_skb(skb);
+ }
+ netdev_tx_reset_queue(netdev_get_tx_queue(bd->netdev, i));
+ }
+}
+
static void bnge_free_all_rings_bufs(struct bnge_net *bn)
{
bnge_free_rx_ring_pair_bufs(bn);
+ bnge_free_tx_skbs(bn);
+}
+
+static void bnge_free_tpa_info(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, j;
+
+ for (i = 0; i < bd->rx_nr_rings; i++) {
+ struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
+
+ kfree(rxr->rx_tpa_idx_map);
+ rxr->rx_tpa_idx_map = NULL;
+ if (rxr->rx_tpa) {
+ for (j = 0; j < bn->max_tpa; j++) {
+ kfree(rxr->rx_tpa[j].agg_arr);
+ rxr->rx_tpa[j].agg_arr = NULL;
+ }
+ }
+ kfree(rxr->rx_tpa);
+ rxr->rx_tpa = NULL;
+ }
+}
+
+static int bnge_alloc_tpa_info(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, j;
+
+ if (!bd->max_tpa_v2)
+ return 0;
+
+ bn->max_tpa = max_t(u16, bd->max_tpa_v2, MAX_TPA);
+ for (i = 0; i < bd->rx_nr_rings; i++) {
+ struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
+
+ rxr->rx_tpa = kcalloc(bn->max_tpa, sizeof(struct bnge_tpa_info),
+ GFP_KERNEL);
+ if (!rxr->rx_tpa)
+ goto err_free_tpa_info;
+
+ for (j = 0; j < bn->max_tpa; j++) {
+ struct rx_agg_cmp *agg;
+
+ agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
+ if (!agg)
+ goto err_free_tpa_info;
+ rxr->rx_tpa[j].agg_arr = agg;
+ }
+ rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
+ GFP_KERNEL);
+ if (!rxr->rx_tpa_idx_map)
+ goto err_free_tpa_info;
+ }
+ return 0;
+
+err_free_tpa_info:
+ bnge_free_tpa_info(bn);
+ return -ENOMEM;
}
static void bnge_free_rx_rings(struct bnge_net *bn)
@@ -401,6 +541,7 @@ static void bnge_free_rx_rings(struct bnge_net *bn)
struct bnge_dev *bd = bn->bd;
int i;
+ bnge_free_tpa_info(bn);
for (i = 0; i < bd->rx_nr_rings; i++) {
struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
struct bnge_ring_struct *ring;
@@ -525,6 +666,12 @@ static int bnge_alloc_rx_rings(struct bnge_net *bn)
goto err_free_rx_rings;
}
}
+
+ if (bn->priv_flags & BNGE_NET_EN_TPA) {
+ rc = bnge_alloc_tpa_info(bn);
+ if (rc)
+ goto err_free_rx_rings;
+ }
return rc;
err_free_rx_rings:
@@ -856,6 +1003,13 @@ u16 bnge_cp_ring_for_tx(struct bnge_tx_ring_info *txr)
return txr->tx_cpr->ring_struct.fw_ring_id;
}
+static void bnge_db_nq_arm(struct bnge_net *bn,
+ struct bnge_db_info *db, u32 idx)
+{
+ bnge_writeq(bn->bd, db->db_key64 | DBR_TYPE_NQ_ARM |
+ DB_RING_IDX(db, idx), db->doorbell);
+}
+
static void bnge_db_nq(struct bnge_net *bn, struct bnge_db_info *db, u32 idx)
{
bnge_writeq(bn->bd, db->db_key64 | DBR_TYPE_NQ_MASK |
@@ -878,12 +1032,6 @@ static int bnge_cp_num_to_irq_num(struct bnge_net *bn, int n)
return nqr->ring_struct.map_idx;
}
-static irqreturn_t bnge_msix(int irq, void *dev_instance)
-{
- /* NAPI scheduling to be added in a future patch */
- return IRQ_HANDLED;
-}
-
static void bnge_init_nq_tree(struct bnge_net *bn)
{
struct bnge_dev *bd = bn->bd;
@@ -925,9 +1073,9 @@ static netmem_ref __bnge_alloc_rx_netmem(struct bnge_net *bn,
return netmem;
}
-static u8 *__bnge_alloc_rx_frag(struct bnge_net *bn, dma_addr_t *mapping,
- struct bnge_rx_ring_info *rxr,
- gfp_t gfp)
+u8 *__bnge_alloc_rx_frag(struct bnge_net *bn, dma_addr_t *mapping,
+ struct bnge_rx_ring_info *rxr,
+ gfp_t gfp)
{
unsigned int offset;
struct page *page;
@@ -941,9 +1089,8 @@ static u8 *__bnge_alloc_rx_frag(struct bnge_net *bn, dma_addr_t *mapping,
return page_address(page) + offset;
}
-static int bnge_alloc_rx_data(struct bnge_net *bn,
- struct bnge_rx_ring_info *rxr,
- u16 prod, gfp_t gfp)
+int bnge_alloc_rx_data(struct bnge_net *bn, struct bnge_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp)
{
struct bnge_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bn, prod)];
struct rx_bd *rxbd;
@@ -995,7 +1142,7 @@ static int bnge_alloc_one_rx_ring_bufs(struct bnge_net *bn,
return 0;
}
-static u16 bnge_find_next_agg_idx(struct bnge_rx_ring_info *rxr, u16 idx)
+u16 bnge_find_next_agg_idx(struct bnge_rx_ring_info *rxr, u16 idx)
{
u16 next, max = rxr->rx_agg_bmap_size;
@@ -1005,9 +1152,9 @@ static u16 bnge_find_next_agg_idx(struct bnge_rx_ring_info *rxr, u16 idx)
return next;
}
-static int bnge_alloc_rx_netmem(struct bnge_net *bn,
- struct bnge_rx_ring_info *rxr,
- u16 prod, gfp_t gfp)
+int bnge_alloc_rx_netmem(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp)
{
struct bnge_sw_rx_agg_bd *rx_agg_buf;
u16 sw_prod = rxr->rx_sw_agg_prod;
@@ -1070,6 +1217,30 @@ err_free_one_agg_ring_bufs:
return -ENOMEM;
}
+static int bnge_alloc_one_tpa_info_data(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ dma_addr_t mapping;
+ u8 *data;
+ int i;
+
+ for (i = 0; i < bn->max_tpa; i++) {
+ data = __bnge_alloc_rx_frag(bn, &mapping, rxr,
+ GFP_KERNEL);
+ if (!data)
+ goto err_free_tpa_info_data;
+
+ rxr->rx_tpa[i].data = data;
+ rxr->rx_tpa[i].data_ptr = data + bn->rx_offset;
+ rxr->rx_tpa[i].mapping = mapping;
+ }
+ return 0;
+
+err_free_tpa_info_data:
+ bnge_free_one_tpa_info_data(bn, rxr);
+ return -ENOMEM;
+}
+
static int bnge_alloc_one_rx_ring_pair_bufs(struct bnge_net *bn, int ring_nr)
{
struct bnge_rx_ring_info *rxr = &bn->rx_ring[ring_nr];
@@ -1084,8 +1255,17 @@ static int bnge_alloc_one_rx_ring_pair_bufs(struct bnge_net *bn, int ring_nr)
if (rc)
goto err_free_one_rx_ring_bufs;
}
+
+ if (rxr->rx_tpa) {
+ rc = bnge_alloc_one_tpa_info_data(bn, rxr);
+ if (rc)
+ goto err_free_one_agg_ring_bufs;
+ }
+
return 0;
+err_free_one_agg_ring_bufs:
+ bnge_free_one_agg_ring_bufs(bn, rxr);
err_free_one_rx_ring_bufs:
bnge_free_one_rx_ring_bufs(bn, rxr);
return rc;
@@ -1755,6 +1935,85 @@ skip_uc:
return rc;
}
+static void bnge_disable_int(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ if (!bn->bnapi)
+ return;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr;
+ struct bnge_ring_struct *ring;
+
+ nqr = &bnapi->nq_ring;
+ ring = &nqr->ring_struct;
+
+ if (ring->fw_ring_id != INVALID_HW_RING_ID)
+ bnge_db_nq(bn, &nqr->nq_db, nqr->nq_raw_cons);
+ }
+}
+
+static void bnge_disable_int_sync(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ bnge_disable_int(bn);
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ int map_idx = bnge_cp_num_to_irq_num(bn, i);
+
+ synchronize_irq(bd->irq_tbl[map_idx].vector);
+ }
+}
+
+static void bnge_enable_int(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr;
+
+ nqr = &bnapi->nq_ring;
+ bnge_db_nq_arm(bn, &nqr->nq_db, nqr->nq_raw_cons);
+ }
+}
+
+static void bnge_disable_napi(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ if (test_and_set_bit(BNGE_STATE_NAPI_DISABLED, &bn->state))
+ return;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+
+ napi_disable_locked(&bnapi->napi);
+ }
+}
+
+static void bnge_enable_napi(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ clear_bit(BNGE_STATE_NAPI_DISABLED, &bn->state);
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+
+ bnapi->in_reset = false;
+ bnapi->tx_fault = 0;
+
+ napi_enable_locked(&bnapi->napi);
+ }
+}
+
static void bnge_hwrm_vnic_free(struct bnge_net *bn)
{
int i;
@@ -1886,6 +2145,12 @@ static void bnge_hwrm_ring_free(struct bnge_net *bn, bool close_path)
bnge_hwrm_rx_agg_ring_free(bn, &bn->rx_ring[i], close_path);
}
+ /* The completion rings are about to be freed. After that the
+ * IRQ doorbell will not work anymore. So we need to disable
+ * IRQ here.
+ */
+ bnge_disable_int_sync(bn);
+
for (i = 0; i < bd->nq_nr_rings; i++) {
struct bnge_napi *bnapi = bn->bnapi[i];
struct bnge_nq_ring_info *nqr;
@@ -2015,6 +2280,27 @@ err_free_irq:
return rc;
}
+static int bnge_set_tpa(struct bnge_net *bn, bool set_tpa)
+{
+ u32 tpa_flags = 0;
+ int rc, i;
+
+ if (set_tpa)
+ tpa_flags = bn->priv_flags & BNGE_NET_EN_TPA;
+ else if (BNGE_NO_FW_ACCESS(bn->bd))
+ return 0;
+ for (i = 0; i < bn->nr_vnics; i++) {
+ rc = bnge_hwrm_vnic_set_tpa(bn->bd, &bn->vnic_info[i],
+ tpa_flags);
+ if (rc) {
+ netdev_err(bn->netdev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
+ i, rc);
+ return rc;
+ }
+ }
+ return 0;
+}
+
static int bnge_init_chip(struct bnge_net *bn)
{
struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
@@ -2049,6 +2335,12 @@ static int bnge_init_chip(struct bnge_net *bn)
if (bd->rss_cap & BNGE_RSS_CAP_RSS_HASH_TYPE_DELTA)
bnge_hwrm_update_rss_hash_cfg(bn);
+ if (bn->priv_flags & BNGE_NET_EN_TPA) {
+ rc = bnge_set_tpa(bn, true);
+ if (rc)
+ goto err_out;
+ }
+
/* Filter for default vnic 0 */
rc = bnge_hwrm_set_vnic_filter(bn, 0, 0, bn->netdev->dev_addr);
if (rc) {
@@ -2085,16 +2377,6 @@ err_out:
return rc;
}
-static int bnge_napi_poll(struct napi_struct *napi, int budget)
-{
- int work_done = 0;
-
- /* defer NAPI implementation to next patch series */
- napi_complete_done(napi, work_done);
-
- return work_done;
-}
-
static void bnge_init_napi(struct bnge_net *bn)
{
struct bnge_dev *bd = bn->bd;
@@ -2161,6 +2443,42 @@ err_free_rx_ring_pair_bufs:
return rc;
}
+static void bnge_tx_disable(struct bnge_net *bn)
+{
+ struct bnge_tx_ring_info *txr;
+ int i;
+
+ if (bn->tx_ring) {
+ for (i = 0; i < bn->bd->tx_nr_rings; i++) {
+ txr = &bn->tx_ring[i];
+ WRITE_ONCE(txr->dev_state, BNGE_DEV_STATE_CLOSING);
+ }
+ }
+ /* Make sure napi polls see @dev_state change */
+ synchronize_net();
+
+ if (!bn->netdev)
+ return;
+ /* Drop carrier first to prevent TX timeout */
+ netif_carrier_off(bn->netdev);
+ /* Stop all TX queues */
+ netif_tx_disable(bn->netdev);
+}
+
+static void bnge_tx_enable(struct bnge_net *bn)
+{
+ struct bnge_tx_ring_info *txr;
+ int i;
+
+ for (i = 0; i < bn->bd->tx_nr_rings; i++) {
+ txr = &bn->tx_ring[i];
+ WRITE_ONCE(txr->dev_state, 0);
+ }
+ /* Make sure napi polls see @dev_state change */
+ synchronize_net();
+ netif_tx_wake_all_queues(bn->netdev);
+}
+
static int bnge_open_core(struct bnge_net *bn)
{
struct bnge_dev *bd = bn->bd;
@@ -2192,7 +2510,14 @@ static int bnge_open_core(struct bnge_net *bn)
netdev_err(bn->netdev, "bnge_init_nic err: %d\n", rc);
goto err_free_irq;
}
+
+ bnge_enable_napi(bn);
+
set_bit(BNGE_STATE_OPEN, &bd->state);
+
+ bnge_enable_int(bn);
+
+ bnge_tx_enable(bn);
return 0;
err_free_irq:
@@ -2203,13 +2528,6 @@ err_del_napi:
return rc;
}
-static netdev_tx_t bnge_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- dev_kfree_skb_any(skb);
-
- return NETDEV_TX_OK;
-}
-
static int bnge_open(struct net_device *dev)
{
struct bnge_net *bn = netdev_priv(dev);
@@ -2224,8 +2542,7 @@ static int bnge_open(struct net_device *dev)
static int bnge_shutdown_nic(struct bnge_net *bn)
{
- /* TODO: close_path = 0 until we make NAPI functional */
- bnge_hwrm_resource_free(bn, 0);
+ bnge_hwrm_resource_free(bn, 1);
return 0;
}
@@ -2233,8 +2550,11 @@ static void bnge_close_core(struct bnge_net *bn)
{
struct bnge_dev *bd = bn->bd;
+ bnge_tx_disable(bn);
+
clear_bit(BNGE_STATE_OPEN, &bd->state);
bnge_shutdown_nic(bn);
+ bnge_disable_napi(bn);
bnge_free_all_rings_bufs(bn);
bnge_free_irq(bn);
bnge_del_napi(bn);
@@ -2255,6 +2575,7 @@ static const struct net_device_ops bnge_netdev_ops = {
.ndo_open = bnge_open,
.ndo_stop = bnge_close,
.ndo_start_xmit = bnge_start_xmit,
+ .ndo_features_check = bnge_features_check,
};
static void bnge_init_mac_addr(struct bnge_dev *bd)
@@ -2295,7 +2616,6 @@ void bnge_set_ring_params(struct bnge_dev *bd)
rx_space = rx_size + ALIGN(NET_SKB_PAD, 8) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- bn->rx_copy_thresh = BNGE_RX_COPY_THRESH;
ring_size = bn->rx_ring_size;
bn->rx_agg_ring_size = 0;
bn->rx_agg_nr_pages = 0;
@@ -2334,7 +2654,10 @@ void bnge_set_ring_params(struct bnge_dev *bd)
bn->rx_agg_ring_size = agg_ring_size;
bn->rx_agg_ring_mask = (bn->rx_agg_nr_pages * RX_DESC_CNT) - 1;
- rx_size = SKB_DATA_ALIGN(BNGE_RX_COPY_THRESH + NET_IP_ALIGN);
+ rx_size = max3(BNGE_DEFAULT_RX_COPYBREAK,
+ bn->rx_copybreak,
+ bn->netdev->cfg_pending->hds_thresh);
+ rx_size = SKB_DATA_ALIGN(rx_size + NET_IP_ALIGN);
rx_space = rx_size + NET_SKB_PAD +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
}
@@ -2367,6 +2690,17 @@ void bnge_set_ring_params(struct bnge_dev *bd)
bn->cp_ring_mask = bn->cp_bit - 1;
}
+static void bnge_init_ring_params(struct bnge_net *bn)
+{
+ u32 rx_size;
+
+ bn->rx_copybreak = BNGE_DEFAULT_RX_COPYBREAK;
+ /* Try to fit 4 chunks into a 4k page */
+ rx_size = SZ_1K -
+ NET_SKB_PAD - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ bn->netdev->cfg->hds_thresh = max(BNGE_DEFAULT_RX_COPYBREAK, rx_size);
+}
+
int bnge_netdev_alloc(struct bnge_dev *bd, int max_irqs)
{
struct net_device *netdev;
@@ -2456,6 +2790,7 @@ int bnge_netdev_alloc(struct bnge_dev *bd, int max_irqs)
bn->rx_dir = DMA_FROM_DEVICE;
bnge_set_tpa_flags(bd);
+ bnge_init_ring_params(bn);
bnge_set_ring_params(bd);
bnge_init_l2_fltr_tbl(bn);
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
index fb3b961536ba..70f1a7c24814 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
@@ -4,10 +4,11 @@
#ifndef _BNGE_NETDEV_H_
#define _BNGE_NETDEV_H_
-#include <linux/bnxt/hsi.h>
+#include <linux/bnge/hsi.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/refcount.h>
#include "bnge_db.h"
+#include "bnge_hw_def.h"
struct tx_bd {
__le32 tx_bd_len_flags_type;
@@ -76,6 +77,7 @@ struct tx_cmp {
#define CMPL_BASE_TYPE_HWRM_FWD_REQ 0x22UL
#define CMPL_BASE_TYPE_HWRM_FWD_RESP 0x24UL
#define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define CMPL_BA_TY_HWRM_ASY_EVT CMPL_BASE_TYPE_HWRM_ASYNC_EVENT
#define TX_CMP_FLAGS_ERROR (1 << 6)
#define TX_CMP_FLAGS_PUSH (1 << 7)
u32 tx_cmp_opaque;
@@ -135,7 +137,8 @@ struct bnge_ring_grp_info {
u16 nq_fw_ring_id;
};
-#define BNGE_RX_COPY_THRESH 256
+#define BNGE_DEFAULT_RX_COPYBREAK 256
+#define BNGE_MAX_RX_COPYBREAK 1024
#define BNGE_HW_FEATURE_VLAN_ALL_RX \
(NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)
@@ -150,6 +153,45 @@ enum {
#define BNGE_NET_EN_TPA (BNGE_NET_EN_GRO | BNGE_NET_EN_LRO)
+#define BNGE_NO_FW_ACCESS(bd) (pci_channel_offline((bd)->pdev))
+
+#define MAX_TPA 256
+#define MAX_TPA_MASK (MAX_TPA - 1)
+#define MAX_TPA_SEGS 0x3f
+
+#define BNGE_TPA_INNER_L3_OFF(hdr_info) \
+ (((hdr_info) >> 18) & 0x1ff)
+
+#define BNGE_TPA_INNER_L2_OFF(hdr_info) \
+ (((hdr_info) >> 9) & 0x1ff)
+
+#define BNGE_TPA_OUTER_L3_OFF(hdr_info) \
+ ((hdr_info) & 0x1ff)
+
+struct bnge_tpa_idx_map {
+ u16 agg_id_tbl[1024];
+ DECLARE_BITMAP(agg_idx_bmap, MAX_TPA);
+};
+
+struct bnge_tpa_info {
+ void *data;
+ u8 *data_ptr;
+ dma_addr_t mapping;
+ u16 len;
+ unsigned short gso_type;
+ u32 flags2;
+ u32 metadata;
+ enum pkt_hash_types hash_type;
+ u32 rss_hash;
+ u32 hdr_info;
+
+ u16 cfa_code; /* cfa_code in TPA start compl */
+ u8 agg_count;
+ bool vlan_valid;
+ bool cfa_code_valid;
+ struct rx_agg_cmp *agg_arr;
+};
+
/* Minimum TX BDs for a TX packet with MAX_SKB_FRAGS + 1. We need one extra
* BD because the first TX BD is always a long BD.
*/
@@ -172,10 +214,16 @@ enum {
#define RING_RX_AGG(bn, idx) ((idx) & (bn)->rx_agg_ring_mask)
#define NEXT_RX_AGG(idx) ((idx) + 1)
+#define BNGE_NQ_HDL_IDX_MASK 0x00ffffff
+#define BNGE_NQ_HDL_TYPE_MASK 0xff000000
#define BNGE_NQ_HDL_TYPE_SHIFT 24
#define BNGE_NQ_HDL_TYPE_RX 0x00
#define BNGE_NQ_HDL_TYPE_TX 0x01
+#define BNGE_NQ_HDL_IDX(hdl) ((hdl) & BNGE_NQ_HDL_IDX_MASK)
+#define BNGE_NQ_HDL_TYPE(hdl) (((hdl) & BNGE_NQ_HDL_TYPE_MASK) >> \
+ BNGE_NQ_HDL_TYPE_SHIFT)
+
struct bnge_net {
struct bnge_dev *bd;
struct net_device *netdev;
@@ -186,7 +234,7 @@ struct bnge_net {
u32 rx_buf_size;
u32 rx_buf_use_size; /* usable size */
u32 rx_agg_ring_size;
- u32 rx_copy_thresh;
+ u32 rx_copybreak;
u32 rx_ring_mask;
u32 rx_agg_ring_mask;
u16 rx_nr_pages;
@@ -231,6 +279,15 @@ struct bnge_net {
u8 rss_hash_key_updated:1;
int rsscos_nr_ctxs;
u32 stats_coal_ticks;
+
+ unsigned long state;
+#define BNGE_STATE_NAPI_DISABLED 0
+
+ u32 msg_enable;
+ u16 max_tpa;
+ __be16 vxlan_port;
+ __be16 nge_port;
+ __be16 vxlan_gpe_port;
};
#define BNGE_DEFAULT_RX_RING_SIZE 511
@@ -277,9 +334,25 @@ void bnge_set_ring_params(struct bnge_dev *bd);
txr = (iter < BNGE_MAX_TXR_PER_NAPI - 1) ? \
(bnapi)->tx_ring[++iter] : NULL)
+#define DB_EPOCH(db, idx) (((idx) & (db)->db_epoch_mask) << \
+ ((db)->db_epoch_shift))
+
+#define DB_TOGGLE(tgl) ((tgl) << DBR_TOGGLE_SFT)
+
+#define DB_RING_IDX(db, idx) (((idx) & (db)->db_ring_mask) | \
+ DB_EPOCH(db, idx))
+
#define BNGE_SET_NQ_HDL(cpr) \
(((cpr)->cp_ring_type << BNGE_NQ_HDL_TYPE_SHIFT) | (cpr)->cp_idx)
+#define BNGE_DB_NQ(bd, db, idx) \
+ bnge_writeq(bd, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\
+ (db)->doorbell)
+
+#define BNGE_DB_NQ_ARM(bd, db, idx) \
+ bnge_writeq(bd, (db)->db_key64 | DBR_TYPE_NQ_ARM | \
+ DB_RING_IDX(db, idx), (db)->doorbell)
+
struct bnge_stats_mem {
u64 *sw_stats;
u64 *hw_masks;
@@ -288,6 +361,25 @@ struct bnge_stats_mem {
int len;
};
+struct nqe_cn {
+ __le16 type;
+ #define NQ_CN_TYPE_MASK 0x3fUL
+ #define NQ_CN_TYPE_SFT 0
+ #define NQ_CN_TYPE_CQ_NOTIFICATION 0x30UL
+ #define NQ_CN_TYPE_LAST NQ_CN_TYPE_CQ_NOTIFICATION
+ #define NQ_CN_TOGGLE_MASK 0xc0UL
+ #define NQ_CN_TOGGLE_SFT 6
+ __le16 reserved16;
+ __le32 cq_handle_low;
+ __le32 v;
+ #define NQ_CN_V 0x1UL
+ __le32 cq_handle_high;
+};
+
+#define NQE_CN_TYPE(type) ((type) & NQ_CN_TYPE_MASK)
+#define NQE_CN_TOGGLE(type) (((type) & NQ_CN_TOGGLE_MASK) >> \
+ NQ_CN_TOGGLE_SFT)
+
struct bnge_cp_ring_info {
struct bnge_napi *bnapi;
dma_addr_t *desc_mapping;
@@ -297,6 +389,10 @@ struct bnge_cp_ring_info {
u8 cp_idx;
u32 cp_raw_cons;
struct bnge_db_info cp_db;
+ bool had_work_done;
+ bool has_more_work;
+ bool had_nqe_notify;
+ u8 toggle;
};
struct bnge_nq_ring_info {
@@ -309,8 +405,9 @@ struct bnge_nq_ring_info {
struct bnge_stats_mem stats;
u32 hw_stats_ctx_id;
+ bool has_more_work;
- int cp_ring_count;
+ u16 cp_ring_count;
struct bnge_cp_ring_info *cp_ring_arr;
};
@@ -336,6 +433,9 @@ struct bnge_rx_ring_info {
dma_addr_t rx_desc_mapping[MAX_RX_PAGES];
dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
+ struct bnge_tpa_info *rx_tpa;
+ struct bnge_tpa_idx_map *rx_tpa_idx_map;
+
struct bnge_ring_struct rx_ring_struct;
struct bnge_ring_struct rx_agg_ring_struct;
struct page_pool *page_pool;
@@ -373,6 +473,14 @@ struct bnge_napi {
struct bnge_nq_ring_info nq_ring;
struct bnge_rx_ring_info *rx_ring;
struct bnge_tx_ring_info *tx_ring[BNGE_MAX_TXR_PER_NAPI];
+ u8 events;
+#define BNGE_RX_EVENT 1
+#define BNGE_AGG_EVENT 2
+#define BNGE_TX_EVENT 4
+#define BNGE_REDIRECT_EVENT 8
+#define BNGE_TX_CMP_EVENT 0x10
+ bool in_reset;
+ bool tx_fault;
};
#define INVALID_STATS_CTX_ID -1
@@ -451,4 +559,11 @@ struct bnge_l2_filter {
u16 bnge_cp_ring_for_rx(struct bnge_rx_ring_info *rxr);
u16 bnge_cp_ring_for_tx(struct bnge_tx_ring_info *txr);
void bnge_fill_hw_rss_tbl(struct bnge_net *bn, struct bnge_vnic_info *vnic);
+int bnge_alloc_rx_data(struct bnge_net *bn, struct bnge_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp);
+u16 bnge_find_next_agg_idx(struct bnge_rx_ring_info *rxr, u16 idx);
+u8 *__bnge_alloc_rx_frag(struct bnge_net *bn, dma_addr_t *mapping,
+ struct bnge_rx_ring_info *rxr, gfp_t gfp);
+int bnge_alloc_rx_netmem(struct bnge_net *bn, struct bnge_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp);
#endif /* _BNGE_NETDEV_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c
index 79f5ce2e5d08..ee97be440c33 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c
@@ -9,7 +9,7 @@
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
#include <linux/crash_dump.h>
-#include <linux/bnxt/hsi.h>
+#include <linux/bnge/hsi.h>
#include "bnge.h"
#include "bnge_hwrm_lib.h"
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_txrx.c b/drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
new file mode 100644
index 000000000000..a2616f037557
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
@@ -0,0 +1,1642 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <asm/byteorder.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if.h>
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <net/gro.h>
+#include <linux/skbuff.h>
+#include <net/page_pool/helpers.h>
+#include <linux/if_vlan.h>
+#include <net/udp_tunnel.h>
+#include <net/dst_metadata.h>
+#include <net/netdev_queues.h>
+
+#include "bnge.h"
+#include "bnge_hwrm.h"
+#include "bnge_hwrm_lib.h"
+#include "bnge_netdev.h"
+#include "bnge_rmem.h"
+#include "bnge_txrx.h"
+
+irqreturn_t bnge_msix(int irq, void *dev_instance)
+{
+ struct bnge_napi *bnapi = dev_instance;
+ struct bnge_nq_ring_info *nqr;
+ struct bnge_net *bn;
+ u32 cons;
+
+ bn = bnapi->bn;
+ nqr = &bnapi->nq_ring;
+ cons = RING_CMP(bn, nqr->nq_raw_cons);
+
+ prefetch(&nqr->desc_ring[CP_RING(cons)][CP_IDX(cons)]);
+ napi_schedule(&bnapi->napi);
+ return IRQ_HANDLED;
+}
+
+static struct rx_agg_cmp *bnge_get_tpa_agg(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ u16 agg_id, u16 curr)
+{
+ struct bnge_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
+
+ return &tpa_info->agg_arr[curr];
+}
+
+static struct rx_agg_cmp *bnge_get_agg(struct bnge_net *bn,
+ struct bnge_cp_ring_info *cpr,
+ u16 cp_cons, u16 curr)
+{
+ struct rx_agg_cmp *agg;
+
+ cp_cons = RING_CMP(bn, ADV_RAW_CMP(cp_cons, curr));
+ agg = (struct rx_agg_cmp *)
+ &cpr->desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+ return agg;
+}
+
+static void bnge_reuse_rx_agg_bufs(struct bnge_cp_ring_info *cpr, u16 idx,
+ u16 start, u32 agg_bufs, bool tpa)
+{
+ struct bnge_napi *bnapi = cpr->bnapi;
+ struct bnge_net *bn = bnapi->bn;
+ struct bnge_rx_ring_info *rxr;
+ u16 prod, sw_prod;
+ u32 i;
+
+ rxr = bnapi->rx_ring;
+ sw_prod = rxr->rx_sw_agg_prod;
+ prod = rxr->rx_agg_prod;
+
+ for (i = 0; i < agg_bufs; i++) {
+ struct bnge_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
+ struct rx_agg_cmp *agg;
+ struct rx_bd *prod_bd;
+ netmem_ref netmem;
+ u16 cons;
+
+ if (tpa)
+ agg = bnge_get_tpa_agg(bn, rxr, idx, start + i);
+ else
+ agg = bnge_get_agg(bn, cpr, idx, start + i);
+ cons = agg->rx_agg_cmp_opaque;
+ __clear_bit(cons, rxr->rx_agg_bmap);
+
+ if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
+ sw_prod = bnge_find_next_agg_idx(rxr, sw_prod);
+
+ __set_bit(sw_prod, rxr->rx_agg_bmap);
+ prod_rx_buf = &rxr->rx_agg_buf_ring[sw_prod];
+ cons_rx_buf = &rxr->rx_agg_buf_ring[cons];
+
+ /* It is possible for sw_prod to be equal to cons, so
+ * set cons_rx_buf->netmem to 0 first.
+ */
+ netmem = cons_rx_buf->netmem;
+ cons_rx_buf->netmem = 0;
+ prod_rx_buf->netmem = netmem;
+ prod_rx_buf->offset = cons_rx_buf->offset;
+
+ prod_rx_buf->mapping = cons_rx_buf->mapping;
+
+ prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bn, prod)]
+ [RX_IDX(prod)];
+
+ prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
+ prod_bd->rx_bd_opaque = sw_prod;
+
+ prod = NEXT_RX_AGG(prod);
+ sw_prod = RING_RX_AGG(bn, NEXT_RX_AGG(sw_prod));
+ }
+ rxr->rx_agg_prod = prod;
+ rxr->rx_sw_agg_prod = sw_prod;
+}
+
+static int bnge_agg_bufs_valid(struct bnge_net *bn,
+ struct bnge_cp_ring_info *cpr,
+ u8 agg_bufs, u32 *raw_cons)
+{
+ struct rx_agg_cmp *agg;
+ u16 last;
+
+ *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
+ last = RING_CMP(bn, *raw_cons);
+ agg = (struct rx_agg_cmp *)
+ &cpr->desc_ring[CP_RING(last)][CP_IDX(last)];
+ return RX_AGG_CMP_VALID(bn, agg, *raw_cons);
+}
+
+static int bnge_discard_rx(struct bnge_net *bn, struct bnge_cp_ring_info *cpr,
+ u32 *raw_cons, void *cmp)
+{
+ u32 tmp_raw_cons = *raw_cons;
+ struct rx_cmp *rxcmp = cmp;
+ u8 cmp_type, agg_bufs = 0;
+
+ cmp_type = RX_CMP_TYPE(rxcmp);
+
+ if (cmp_type == CMP_TYPE_RX_L2_CMP) {
+ agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
+ RX_CMP_AGG_BUFS) >>
+ RX_CMP_AGG_BUFS_SHIFT;
+ } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
+ return 0;
+ }
+
+ if (agg_bufs) {
+ if (!bnge_agg_bufs_valid(bn, cpr, agg_bufs, &tmp_raw_cons))
+ return -EBUSY;
+ }
+ *raw_cons = tmp_raw_cons;
+ return 0;
+}
+
+static u32 __bnge_rx_agg_netmems(struct bnge_net *bn,
+ struct bnge_cp_ring_info *cpr,
+ u16 idx, u32 agg_bufs, bool tpa,
+ struct sk_buff *skb)
+{
+ struct bnge_napi *bnapi = cpr->bnapi;
+ struct skb_shared_info *shinfo;
+ struct bnge_rx_ring_info *rxr;
+ u32 i, total_frag_len = 0;
+ u16 prod;
+
+ rxr = bnapi->rx_ring;
+ prod = rxr->rx_agg_prod;
+ shinfo = skb_shinfo(skb);
+
+ for (i = 0; i < agg_bufs; i++) {
+ struct bnge_sw_rx_agg_bd *cons_rx_buf;
+ struct rx_agg_cmp *agg;
+ u16 cons, frag_len;
+ netmem_ref netmem;
+
+ if (tpa)
+ agg = bnge_get_tpa_agg(bn, rxr, idx, i);
+ else
+ agg = bnge_get_agg(bn, cpr, idx, i);
+ cons = agg->rx_agg_cmp_opaque;
+ frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
+ RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
+
+ cons_rx_buf = &rxr->rx_agg_buf_ring[cons];
+ skb_add_rx_frag_netmem(skb, i, cons_rx_buf->netmem,
+ cons_rx_buf->offset,
+ frag_len, BNGE_RX_PAGE_SIZE);
+ __clear_bit(cons, rxr->rx_agg_bmap);
+
+ /* It is possible for bnge_alloc_rx_netmem() to allocate
+ * a sw_prod index that equals the cons index, so we
+ * need to clear the cons entry now.
+ */
+ netmem = cons_rx_buf->netmem;
+ cons_rx_buf->netmem = 0;
+
+ if (bnge_alloc_rx_netmem(bn, rxr, prod, GFP_ATOMIC) != 0) {
+ skb->len -= frag_len;
+ skb->data_len -= frag_len;
+ skb->truesize -= BNGE_RX_PAGE_SIZE;
+
+ --shinfo->nr_frags;
+ cons_rx_buf->netmem = netmem;
+
+ /* Update prod since possibly some netmems have been
+ * allocated already.
+ */
+ rxr->rx_agg_prod = prod;
+ bnge_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
+ return 0;
+ }
+
+ page_pool_dma_sync_netmem_for_cpu(rxr->page_pool, netmem, 0,
+ BNGE_RX_PAGE_SIZE);
+
+ total_frag_len += frag_len;
+ prod = NEXT_RX_AGG(prod);
+ }
+ rxr->rx_agg_prod = prod;
+ return total_frag_len;
+}
+
+static struct sk_buff *bnge_rx_agg_netmems_skb(struct bnge_net *bn,
+ struct bnge_cp_ring_info *cpr,
+ struct sk_buff *skb, u16 idx,
+ u32 agg_bufs, bool tpa)
+{
+ u32 total_frag_len;
+
+ total_frag_len = __bnge_rx_agg_netmems(bn, cpr, idx, agg_bufs,
+ tpa, skb);
+ if (!total_frag_len) {
+ skb_mark_for_recycle(skb);
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+
+ return skb;
+}
+
+static void bnge_sched_reset_rxr(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ if (!rxr->bnapi->in_reset) {
+ rxr->bnapi->in_reset = true;
+
+ /* TODO: Initiate reset task */
+ }
+ rxr->rx_next_cons = 0xffff;
+}
+
+static void bnge_sched_reset_txr(struct bnge_net *bn,
+ struct bnge_tx_ring_info *txr,
+ u16 curr)
+{
+ struct bnge_napi *bnapi = txr->bnapi;
+
+ if (bnapi->tx_fault)
+ return;
+
+ netdev_err(bn->netdev, "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)",
+ txr->txq_index, txr->tx_hw_cons,
+ txr->tx_cons, txr->tx_prod, curr);
+ WARN_ON_ONCE(1);
+ bnapi->tx_fault = 1;
+ /* TODO: Initiate reset task */
+}
+
+static u16 bnge_tpa_alloc_agg_idx(struct bnge_rx_ring_info *rxr, u16 agg_id)
+{
+ struct bnge_tpa_idx_map *map = rxr->rx_tpa_idx_map;
+ u16 idx = agg_id & MAX_TPA_MASK;
+
+ if (test_bit(idx, map->agg_idx_bmap)) {
+ idx = find_first_zero_bit(map->agg_idx_bmap, MAX_TPA);
+ if (idx >= MAX_TPA)
+ return INVALID_HW_RING_ID;
+ }
+ __set_bit(idx, map->agg_idx_bmap);
+ map->agg_id_tbl[agg_id] = idx;
+ return idx;
+}
+
+static void bnge_free_agg_idx(struct bnge_rx_ring_info *rxr, u16 idx)
+{
+ struct bnge_tpa_idx_map *map = rxr->rx_tpa_idx_map;
+
+ __clear_bit(idx, map->agg_idx_bmap);
+}
+
+static u16 bnge_lookup_agg_idx(struct bnge_rx_ring_info *rxr, u16 agg_id)
+{
+ struct bnge_tpa_idx_map *map = rxr->rx_tpa_idx_map;
+
+ return map->agg_id_tbl[agg_id];
+}
+
+static void bnge_tpa_metadata(struct bnge_tpa_info *tpa_info,
+ struct rx_tpa_start_cmp *tpa_start,
+ struct rx_tpa_start_cmp_ext *tpa_start1)
+{
+ tpa_info->cfa_code_valid = 1;
+ tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
+ tpa_info->vlan_valid = 0;
+ if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
+ tpa_info->vlan_valid = 1;
+ tpa_info->metadata =
+ le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
+ }
+}
+
+static void bnge_tpa_metadata_v2(struct bnge_tpa_info *tpa_info,
+ struct rx_tpa_start_cmp *tpa_start,
+ struct rx_tpa_start_cmp_ext *tpa_start1)
+{
+ tpa_info->vlan_valid = 0;
+ if (TPA_START_VLAN_VALID(tpa_start)) {
+ u32 tpid_sel = TPA_START_VLAN_TPID_SEL(tpa_start);
+ u32 vlan_proto = ETH_P_8021Q;
+
+ tpa_info->vlan_valid = 1;
+ if (tpid_sel == RX_TPA_START_METADATA1_TPID_8021AD)
+ vlan_proto = ETH_P_8021AD;
+ tpa_info->metadata = vlan_proto << 16 |
+ TPA_START_METADATA0_TCI(tpa_start1);
+ }
+}
+
+static void bnge_tpa_start(struct bnge_net *bn, struct bnge_rx_ring_info *rxr,
+ u8 cmp_type, struct rx_tpa_start_cmp *tpa_start,
+ struct rx_tpa_start_cmp_ext *tpa_start1)
+{
+ struct bnge_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
+ struct bnge_tpa_info *tpa_info;
+ u16 cons, prod, agg_id;
+ struct rx_bd *prod_bd;
+ dma_addr_t mapping;
+
+ agg_id = TPA_START_AGG_ID(tpa_start);
+ agg_id = bnge_tpa_alloc_agg_idx(rxr, agg_id);
+ if (unlikely(agg_id == INVALID_HW_RING_ID)) {
+ netdev_warn(bn->netdev, "Unable to allocate agg ID for ring %d, agg 0x%lx\n",
+ rxr->bnapi->index, TPA_START_AGG_ID(tpa_start));
+ bnge_sched_reset_rxr(bn, rxr);
+ return;
+ }
+ cons = tpa_start->rx_tpa_start_cmp_opaque;
+ prod = rxr->rx_prod;
+ cons_rx_buf = &rxr->rx_buf_ring[cons];
+ prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bn, prod)];
+ tpa_info = &rxr->rx_tpa[agg_id];
+
+ if (unlikely(cons != rxr->rx_next_cons ||
+ TPA_START_ERROR(tpa_start))) {
+ netdev_warn(bn->netdev, "TPA cons %x, expected cons %x, error code %lx\n",
+ cons, rxr->rx_next_cons,
+ TPA_START_ERROR_CODE(tpa_start1));
+ bnge_sched_reset_rxr(bn, rxr);
+ return;
+ }
+ prod_rx_buf->data = tpa_info->data;
+ prod_rx_buf->data_ptr = tpa_info->data_ptr;
+
+ mapping = tpa_info->mapping;
+ prod_rx_buf->mapping = mapping;
+
+ prod_bd = &rxr->rx_desc_ring[RX_RING(bn, prod)][RX_IDX(prod)];
+
+ prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
+
+ tpa_info->data = cons_rx_buf->data;
+ tpa_info->data_ptr = cons_rx_buf->data_ptr;
+ cons_rx_buf->data = NULL;
+ tpa_info->mapping = cons_rx_buf->mapping;
+
+ tpa_info->len =
+ le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
+ RX_TPA_START_CMP_LEN_SHIFT;
+ if (likely(TPA_START_HASH_VALID(tpa_start))) {
+ tpa_info->hash_type = PKT_HASH_TYPE_L4;
+ if (TPA_START_IS_IPV6(tpa_start1))
+ tpa_info->gso_type = SKB_GSO_TCPV6;
+ else
+ tpa_info->gso_type = SKB_GSO_TCPV4;
+ tpa_info->rss_hash =
+ le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
+ } else {
+ tpa_info->hash_type = PKT_HASH_TYPE_NONE;
+ tpa_info->gso_type = 0;
+ netif_warn(bn, rx_err, bn->netdev, "TPA packet without valid hash\n");
+ }
+ tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
+ tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
+ if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP)
+ bnge_tpa_metadata(tpa_info, tpa_start, tpa_start1);
+ else
+ bnge_tpa_metadata_v2(tpa_info, tpa_start, tpa_start1);
+ tpa_info->agg_count = 0;
+
+ rxr->rx_prod = NEXT_RX(prod);
+ cons = RING_RX(bn, NEXT_RX(cons));
+ rxr->rx_next_cons = RING_RX(bn, NEXT_RX(cons));
+ cons_rx_buf = &rxr->rx_buf_ring[cons];
+
+ bnge_reuse_rx_data(rxr, cons, cons_rx_buf->data);
+ rxr->rx_prod = NEXT_RX(rxr->rx_prod);
+ cons_rx_buf->data = NULL;
+}
+
+static void bnge_abort_tpa(struct bnge_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
+{
+ if (agg_bufs)
+ bnge_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
+}
+
+static void bnge_tpa_agg(struct bnge_net *bn, struct bnge_rx_ring_info *rxr,
+ struct rx_agg_cmp *rx_agg)
+{
+ u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
+ struct bnge_tpa_info *tpa_info;
+
+ agg_id = bnge_lookup_agg_idx(rxr, agg_id);
+ tpa_info = &rxr->rx_tpa[agg_id];
+
+ if (unlikely(tpa_info->agg_count >= MAX_SKB_FRAGS)) {
+ netdev_warn(bn->netdev,
+ "TPA completion count %d exceeds limit for ring %d\n",
+ tpa_info->agg_count, rxr->bnapi->index);
+
+ bnge_sched_reset_rxr(bn, rxr);
+ return;
+ }
+
+ tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
+}
+
+void bnge_reuse_rx_data(struct bnge_rx_ring_info *rxr, u16 cons, void *data)
+{
+ struct bnge_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
+ struct bnge_net *bn = rxr->bnapi->bn;
+ struct rx_bd *cons_bd, *prod_bd;
+ u16 prod = rxr->rx_prod;
+
+ prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bn, prod)];
+ cons_rx_buf = &rxr->rx_buf_ring[cons];
+
+ prod_rx_buf->data = data;
+ prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
+
+ prod_rx_buf->mapping = cons_rx_buf->mapping;
+
+ prod_bd = &rxr->rx_desc_ring[RX_RING(bn, prod)][RX_IDX(prod)];
+ cons_bd = &rxr->rx_desc_ring[RX_RING(bn, cons)][RX_IDX(cons)];
+
+ prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
+}
+
+static void bnge_deliver_skb(struct bnge_net *bn, struct bnge_napi *bnapi,
+ struct sk_buff *skb)
+{
+ skb_mark_for_recycle(skb);
+ skb_record_rx_queue(skb, bnapi->index);
+ napi_gro_receive(&bnapi->napi, skb);
+}
+
+static struct sk_buff *bnge_copy_skb(struct bnge_napi *bnapi, u8 *data,
+ unsigned int len, dma_addr_t mapping)
+{
+ struct bnge_net *bn = bnapi->bn;
+ struct bnge_dev *bd = bn->bd;
+ struct sk_buff *skb;
+
+ skb = napi_alloc_skb(&bnapi->napi, len);
+ if (!skb)
+ return NULL;
+
+ dma_sync_single_for_cpu(bd->dev, mapping, len, bn->rx_dir);
+
+ memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
+ len + NET_IP_ALIGN);
+
+ dma_sync_single_for_device(bd->dev, mapping, len, bn->rx_dir);
+
+ skb_put(skb, len);
+
+ return skb;
+}
+
+#ifdef CONFIG_INET
+static void bnge_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
+{
+ struct udphdr *uh = NULL;
+
+ if (ip_proto == htons(ETH_P_IP)) {
+ struct iphdr *iph = (struct iphdr *)skb->data;
+
+ if (iph->protocol == IPPROTO_UDP)
+ uh = (struct udphdr *)(iph + 1);
+ } else {
+ struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
+
+ if (iph->nexthdr == IPPROTO_UDP)
+ uh = (struct udphdr *)(iph + 1);
+ }
+ if (uh) {
+ if (uh->check)
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
+ else
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
+ }
+}
+
+static struct sk_buff *bnge_gro_func(struct bnge_tpa_info *tpa_info,
+ int payload_off, int tcp_ts,
+ struct sk_buff *skb)
+{
+ u16 outer_ip_off, inner_ip_off, inner_mac_off;
+ u32 hdr_info = tpa_info->hdr_info;
+ int iphdr_len, nw_off;
+
+ inner_ip_off = BNGE_TPA_INNER_L3_OFF(hdr_info);
+ inner_mac_off = BNGE_TPA_INNER_L2_OFF(hdr_info);
+ outer_ip_off = BNGE_TPA_OUTER_L3_OFF(hdr_info);
+
+ nw_off = inner_ip_off - ETH_HLEN;
+ skb_set_network_header(skb, nw_off);
+ iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
+ sizeof(struct ipv6hdr) : sizeof(struct iphdr);
+ skb_set_transport_header(skb, nw_off + iphdr_len);
+
+ if (inner_mac_off) { /* tunnel */
+ __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
+ ETH_HLEN - 2));
+
+ bnge_gro_tunnel(skb, proto);
+ }
+
+ return skb;
+}
+
+static struct sk_buff *bnge_gro_skb(struct bnge_net *bn,
+ struct bnge_tpa_info *tpa_info,
+ struct rx_tpa_end_cmp *tpa_end,
+ struct rx_tpa_end_cmp_ext *tpa_end1,
+ struct sk_buff *skb)
+{
+ int payload_off;
+ u16 segs;
+
+ segs = TPA_END_TPA_SEGS(tpa_end);
+ if (segs == 1)
+ return skb;
+
+ NAPI_GRO_CB(skb)->count = segs;
+ skb_shinfo(skb)->gso_size =
+ le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
+ skb_shinfo(skb)->gso_type = tpa_info->gso_type;
+ payload_off = TPA_END_PAYLOAD_OFF(tpa_end1);
+ skb = bnge_gro_func(tpa_info, payload_off,
+ TPA_END_GRO_TS(tpa_end), skb);
+ if (likely(skb))
+ tcp_gro_complete(skb);
+
+ return skb;
+}
+#endif
+
+static struct sk_buff *bnge_tpa_end(struct bnge_net *bn,
+ struct bnge_cp_ring_info *cpr,
+ u32 *raw_cons,
+ struct rx_tpa_end_cmp *tpa_end,
+ struct rx_tpa_end_cmp_ext *tpa_end1,
+ u8 *event)
+{
+ struct bnge_napi *bnapi = cpr->bnapi;
+ struct net_device *dev = bn->netdev;
+ struct bnge_tpa_info *tpa_info;
+ struct bnge_rx_ring_info *rxr;
+ u8 *data_ptr, agg_bufs;
+ struct sk_buff *skb;
+ u16 idx = 0, agg_id;
+ dma_addr_t mapping;
+ unsigned int len;
+ void *data;
+
+ if (unlikely(bnapi->in_reset)) {
+ int rc = bnge_discard_rx(bn, cpr, raw_cons, tpa_end);
+
+ if (rc < 0)
+ return ERR_PTR(-EBUSY);
+ return NULL;
+ }
+
+ rxr = bnapi->rx_ring;
+ agg_id = TPA_END_AGG_ID(tpa_end);
+ agg_id = bnge_lookup_agg_idx(rxr, agg_id);
+ agg_bufs = TPA_END_AGG_BUFS(tpa_end1);
+ tpa_info = &rxr->rx_tpa[agg_id];
+ if (unlikely(agg_bufs != tpa_info->agg_count)) {
+ netdev_warn(bn->netdev, "TPA end agg_buf %d != expected agg_bufs %d\n",
+ agg_bufs, tpa_info->agg_count);
+ agg_bufs = tpa_info->agg_count;
+ }
+ tpa_info->agg_count = 0;
+ *event |= BNGE_AGG_EVENT;
+ bnge_free_agg_idx(rxr, agg_id);
+ idx = agg_id;
+ data = tpa_info->data;
+ data_ptr = tpa_info->data_ptr;
+ prefetch(data_ptr);
+ len = tpa_info->len;
+ mapping = tpa_info->mapping;
+
+ if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
+ bnge_abort_tpa(cpr, idx, agg_bufs);
+ if (agg_bufs > MAX_SKB_FRAGS)
+ netdev_warn(bn->netdev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
+ agg_bufs, (int)MAX_SKB_FRAGS);
+ return NULL;
+ }
+
+ if (len <= bn->rx_copybreak) {
+ skb = bnge_copy_skb(bnapi, data_ptr, len, mapping);
+ if (!skb) {
+ bnge_abort_tpa(cpr, idx, agg_bufs);
+ return NULL;
+ }
+ } else {
+ dma_addr_t new_mapping;
+ u8 *new_data;
+
+ new_data = __bnge_alloc_rx_frag(bn, &new_mapping, rxr,
+ GFP_ATOMIC);
+ if (!new_data) {
+ bnge_abort_tpa(cpr, idx, agg_bufs);
+ return NULL;
+ }
+
+ tpa_info->data = new_data;
+ tpa_info->data_ptr = new_data + bn->rx_offset;
+ tpa_info->mapping = new_mapping;
+
+ skb = napi_build_skb(data, bn->rx_buf_size);
+ dma_sync_single_for_cpu(bn->bd->dev, mapping,
+ bn->rx_buf_use_size, bn->rx_dir);
+
+ if (!skb) {
+ page_pool_free_va(rxr->head_pool, data, true);
+ bnge_abort_tpa(cpr, idx, agg_bufs);
+ return NULL;
+ }
+ skb_mark_for_recycle(skb);
+ skb_reserve(skb, bn->rx_offset);
+ skb_put(skb, len);
+ }
+
+ if (agg_bufs) {
+ skb = bnge_rx_agg_netmems_skb(bn, cpr, skb, idx, agg_bufs,
+ true);
+ /* Page reuse already handled by bnge_rx_agg_netmems_skb(). */
+ if (!skb)
+ return NULL;
+ }
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+ if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
+ skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
+
+ if (tpa_info->vlan_valid &&
+ (dev->features & BNGE_HW_FEATURE_VLAN_ALL_RX)) {
+ __be16 vlan_proto = htons(tpa_info->metadata >>
+ RX_CMP_FLAGS2_METADATA_TPID_SFT);
+ u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
+
+ if (eth_type_vlan(vlan_proto)) {
+ __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
+ } else {
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+ }
+
+ skb_checksum_none_assert(skb);
+ if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level =
+ (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
+ }
+
+#ifdef CONFIG_INET
+ if (bn->priv_flags & BNGE_NET_EN_GRO)
+ skb = bnge_gro_skb(bn, tpa_info, tpa_end, tpa_end1, skb);
+#endif
+
+ return skb;
+}
+
+static enum pkt_hash_types bnge_rss_ext_op(struct bnge_net *bn,
+ struct rx_cmp *rxcmp)
+{
+ u8 ext_op = RX_CMP_V3_HASH_TYPE(bn->bd, rxcmp);
+
+ switch (ext_op) {
+ case EXT_OP_INNER_4:
+ case EXT_OP_OUTER_4:
+ case EXT_OP_INNFL_3:
+ case EXT_OP_OUTFL_3:
+ return PKT_HASH_TYPE_L4;
+ default:
+ return PKT_HASH_TYPE_L3;
+ }
+}
+
+static struct sk_buff *bnge_rx_vlan(struct sk_buff *skb, u8 cmp_type,
+ struct rx_cmp *rxcmp,
+ struct rx_cmp_ext *rxcmp1)
+{
+ __be16 vlan_proto;
+ u16 vtag;
+
+ if (cmp_type == CMP_TYPE_RX_L2_CMP) {
+ __le32 flags2 = rxcmp1->rx_cmp_flags2;
+ u32 meta_data;
+
+ if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)))
+ return skb;
+
+ meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
+ vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
+ vlan_proto =
+ htons(meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT);
+ if (eth_type_vlan(vlan_proto))
+ __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
+ else
+ goto vlan_err;
+ } else if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
+ if (RX_CMP_VLAN_VALID(rxcmp)) {
+ u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp);
+
+ if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q)
+ vlan_proto = htons(ETH_P_8021Q);
+ else if (tpid_sel == RX_CMP_METADATA1_TPID_8021AD)
+ vlan_proto = htons(ETH_P_8021AD);
+ else
+ goto vlan_err;
+ vtag = RX_CMP_METADATA0_TCI(rxcmp1);
+ __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
+ }
+ }
+ return skb;
+
+vlan_err:
+ skb_mark_for_recycle(skb);
+ dev_kfree_skb(skb);
+ return NULL;
+}
+
+static struct sk_buff *bnge_rx_skb(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr, u16 cons,
+ void *data, u8 *data_ptr,
+ dma_addr_t dma_addr,
+ unsigned int len)
+{
+ struct bnge_dev *bd = bn->bd;
+ u16 prod = rxr->rx_prod;
+ struct sk_buff *skb;
+ int err;
+
+ err = bnge_alloc_rx_data(bn, rxr, prod, GFP_ATOMIC);
+ if (unlikely(err)) {
+ bnge_reuse_rx_data(rxr, cons, data);
+ return NULL;
+ }
+
+ dma_sync_single_for_cpu(bd->dev, dma_addr, len, bn->rx_dir);
+ skb = napi_build_skb(data, bn->rx_buf_size);
+ if (!skb) {
+ page_pool_free_va(rxr->head_pool, data, true);
+ return NULL;
+ }
+
+ skb_mark_for_recycle(skb);
+ skb_reserve(skb, bn->rx_offset);
+ skb_put(skb, len);
+ return skb;
+}
+
+/* returns the following:
+ * 1 - 1 packet successfully received
+ * 0 - successful TPA_START, packet not completed yet
+ * -EBUSY - completion ring does not have all the agg buffers yet
+ * -ENOMEM - packet aborted due to out of memory
+ * -EIO - packet aborted due to hw error indicated in BD
+ */
+static int bnge_rx_pkt(struct bnge_net *bn, struct bnge_cp_ring_info *cpr,
+ u32 *raw_cons, u8 *event)
+{
+ struct bnge_napi *bnapi = cpr->bnapi;
+ struct net_device *dev = bn->netdev;
+ struct bnge_rx_ring_info *rxr;
+ u32 tmp_raw_cons, flags, misc;
+ struct bnge_sw_rx_bd *rx_buf;
+ struct rx_cmp_ext *rxcmp1;
+ u16 cons, prod, cp_cons;
+ u8 *data_ptr, cmp_type;
+ struct rx_cmp *rxcmp;
+ dma_addr_t dma_addr;
+ struct sk_buff *skb;
+ unsigned int len;
+ u8 agg_bufs;
+ void *data;
+ int rc = 0;
+
+ rxr = bnapi->rx_ring;
+
+ tmp_raw_cons = *raw_cons;
+ cp_cons = RING_CMP(bn, tmp_raw_cons);
+ rxcmp = (struct rx_cmp *)
+ &cpr->desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+ cmp_type = RX_CMP_TYPE(rxcmp);
+
+ if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
+ bnge_tpa_agg(bn, rxr, (struct rx_agg_cmp *)rxcmp);
+ goto next_rx_no_prod_no_len;
+ }
+
+ tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
+ cp_cons = RING_CMP(bn, tmp_raw_cons);
+ rxcmp1 = (struct rx_cmp_ext *)
+ &cpr->desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+ if (!RX_CMP_VALID(bn, rxcmp1, tmp_raw_cons))
+ return -EBUSY;
+
+ /* The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
+ prod = rxr->rx_prod;
+
+ if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP ||
+ cmp_type == CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
+ bnge_tpa_start(bn, rxr, cmp_type,
+ (struct rx_tpa_start_cmp *)rxcmp,
+ (struct rx_tpa_start_cmp_ext *)rxcmp1);
+
+ *event |= BNGE_RX_EVENT;
+ goto next_rx_no_prod_no_len;
+
+ } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
+ skb = bnge_tpa_end(bn, cpr, &tmp_raw_cons,
+ (struct rx_tpa_end_cmp *)rxcmp,
+ (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
+ if (IS_ERR(skb))
+ return -EBUSY;
+
+ rc = -ENOMEM;
+ if (likely(skb)) {
+ bnge_deliver_skb(bn, bnapi, skb);
+ rc = 1;
+ }
+ *event |= BNGE_RX_EVENT;
+ goto next_rx_no_prod_no_len;
+ }
+
+ cons = rxcmp->rx_cmp_opaque;
+ if (unlikely(cons != rxr->rx_next_cons)) {
+ int rc1 = bnge_discard_rx(bn, cpr, &tmp_raw_cons, rxcmp);
+
+ /* 0xffff is forced error, don't print it */
+ if (rxr->rx_next_cons != 0xffff)
+ netdev_warn(bn->netdev, "RX cons %x != expected cons %x\n",
+ cons, rxr->rx_next_cons);
+ bnge_sched_reset_rxr(bn, rxr);
+ if (rc1)
+ return rc1;
+ goto next_rx_no_prod_no_len;
+ }
+ rx_buf = &rxr->rx_buf_ring[cons];
+ data = rx_buf->data;
+ data_ptr = rx_buf->data_ptr;
+ prefetch(data_ptr);
+
+ misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
+ agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
+
+ if (agg_bufs) {
+ if (!bnge_agg_bufs_valid(bn, cpr, agg_bufs, &tmp_raw_cons))
+ return -EBUSY;
+
+ cp_cons = NEXT_CMP(bn, cp_cons);
+ *event |= BNGE_AGG_EVENT;
+ }
+ *event |= BNGE_RX_EVENT;
+
+ rx_buf->data = NULL;
+ if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
+ bnge_reuse_rx_data(rxr, cons, data);
+ if (agg_bufs)
+ bnge_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
+ false);
+ rc = -EIO;
+ goto next_rx_no_len;
+ }
+
+ flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
+ len = flags >> RX_CMP_LEN_SHIFT;
+ dma_addr = rx_buf->mapping;
+
+ if (len <= bn->rx_copybreak) {
+ skb = bnge_copy_skb(bnapi, data_ptr, len, dma_addr);
+ bnge_reuse_rx_data(rxr, cons, data);
+ } else {
+ skb = bnge_rx_skb(bn, rxr, cons, data, data_ptr, dma_addr, len);
+ }
+
+ if (!skb) {
+ if (agg_bufs)
+ bnge_reuse_rx_agg_bufs(cpr, cp_cons, 0,
+ agg_bufs, false);
+ goto oom_next_rx;
+ }
+
+ if (agg_bufs) {
+ skb = bnge_rx_agg_netmems_skb(bn, cpr, skb, cp_cons,
+ agg_bufs, false);
+ if (!skb)
+ goto oom_next_rx;
+ }
+
+ if (RX_CMP_HASH_VALID(rxcmp)) {
+ enum pkt_hash_types type;
+
+ if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
+ type = bnge_rss_ext_op(bn, rxcmp);
+ } else {
+ u32 itypes = RX_CMP_ITYPES(rxcmp);
+
+ if (itypes == RX_CMP_FLAGS_ITYPE_TCP ||
+ itypes == RX_CMP_FLAGS_ITYPE_UDP)
+ type = PKT_HASH_TYPE_L4;
+ else
+ type = PKT_HASH_TYPE_L3;
+ }
+ skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
+ }
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+ if (skb->dev->features & BNGE_HW_FEATURE_VLAN_ALL_RX) {
+ skb = bnge_rx_vlan(skb, cmp_type, rxcmp, rxcmp1);
+ if (!skb)
+ goto next_rx;
+ }
+
+ skb_checksum_none_assert(skb);
+ if (RX_CMP_L4_CS_OK(rxcmp1)) {
+ if (dev->features & NETIF_F_RXCSUM) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = RX_CMP_ENCAP(rxcmp1);
+ }
+ }
+
+ bnge_deliver_skb(bn, bnapi, skb);
+ rc = 1;
+
+next_rx:
+ /* Update Stats */
+next_rx_no_len:
+ rxr->rx_prod = NEXT_RX(prod);
+ rxr->rx_next_cons = RING_RX(bn, NEXT_RX(cons));
+
+next_rx_no_prod_no_len:
+ *raw_cons = tmp_raw_cons;
+ return rc;
+
+oom_next_rx:
+ rc = -ENOMEM;
+ goto next_rx;
+}
+
+/* In netpoll mode, if we are using a combined completion ring, we need to
+ * discard the rx packets and recycle the buffers.
+ */
+static int bnge_force_rx_discard(struct bnge_net *bn,
+ struct bnge_cp_ring_info *cpr,
+ u32 *raw_cons, u8 *event)
+{
+ u32 tmp_raw_cons = *raw_cons;
+ struct rx_cmp_ext *rxcmp1;
+ struct rx_cmp *rxcmp;
+ u16 cp_cons;
+ u8 cmp_type;
+ int rc;
+
+ cp_cons = RING_CMP(bn, tmp_raw_cons);
+ rxcmp = (struct rx_cmp *)
+ &cpr->desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+ tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
+ cp_cons = RING_CMP(bn, tmp_raw_cons);
+ rxcmp1 = (struct rx_cmp_ext *)
+ &cpr->desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+ if (!RX_CMP_VALID(bn, rxcmp1, tmp_raw_cons))
+ return -EBUSY;
+
+ /* The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
+ cmp_type = RX_CMP_TYPE(rxcmp);
+ if (cmp_type == CMP_TYPE_RX_L2_CMP ||
+ cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
+ rxcmp1->rx_cmp_cfa_code_errors_v2 |=
+ cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
+ } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
+ struct rx_tpa_end_cmp_ext *tpa_end1;
+
+ tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
+ tpa_end1->rx_tpa_end_cmp_errors_v2 |=
+ cpu_to_le32(RX_TPA_END_CMP_ERRORS);
+ }
+ rc = bnge_rx_pkt(bn, cpr, raw_cons, event);
+ return rc;
+}
+
+static void __bnge_tx_int(struct bnge_net *bn, struct bnge_tx_ring_info *txr,
+ int budget)
+{
+ u16 hw_cons = txr->tx_hw_cons;
+ struct bnge_dev *bd = bn->bd;
+ unsigned int tx_bytes = 0;
+ unsigned int tx_pkts = 0;
+ struct netdev_queue *txq;
+ u16 cons = txr->tx_cons;
+ skb_frag_t *frag;
+
+ txq = netdev_get_tx_queue(bn->netdev, txr->txq_index);
+
+ while (SW_TX_RING(bn, cons) != hw_cons) {
+ struct bnge_sw_tx_bd *tx_buf;
+ struct sk_buff *skb;
+ int j, last;
+
+ tx_buf = &txr->tx_buf_ring[SW_TX_RING(bn, cons)];
+ skb = tx_buf->skb;
+ if (unlikely(!skb)) {
+ bnge_sched_reset_txr(bn, txr, cons);
+ return;
+ }
+
+ cons = NEXT_TX(cons);
+ tx_pkts++;
+ tx_bytes += skb->len;
+ tx_buf->skb = NULL;
+
+ dma_unmap_single(bd->dev, dma_unmap_addr(tx_buf, mapping),
+ skb_headlen(skb), DMA_TO_DEVICE);
+ last = tx_buf->nr_frags;
+
+ for (j = 0; j < last; j++) {
+ frag = &skb_shinfo(skb)->frags[j];
+ cons = NEXT_TX(cons);
+ tx_buf = &txr->tx_buf_ring[SW_TX_RING(bn, cons)];
+ netmem_dma_unmap_page_attrs(bd->dev,
+ dma_unmap_addr(tx_buf,
+ mapping),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE, 0);
+ }
+
+ cons = NEXT_TX(cons);
+
+ napi_consume_skb(skb, budget);
+ }
+
+ WRITE_ONCE(txr->tx_cons, cons);
+
+ __netif_txq_completed_wake(txq, tx_pkts, tx_bytes,
+ bnge_tx_avail(bn, txr), bn->tx_wake_thresh,
+ (READ_ONCE(txr->dev_state) ==
+ BNGE_DEV_STATE_CLOSING));
+}
+
+static void bnge_tx_int(struct bnge_net *bn, struct bnge_napi *bnapi,
+ int budget)
+{
+ struct bnge_tx_ring_info *txr;
+ int i;
+
+ bnge_for_each_napi_tx(i, bnapi, txr) {
+ if (txr->tx_hw_cons != SW_TX_RING(bn, txr->tx_cons))
+ __bnge_tx_int(bn, txr, budget);
+ }
+
+ bnapi->events &= ~BNGE_TX_CMP_EVENT;
+}
+
+static void __bnge_poll_work_done(struct bnge_net *bn, struct bnge_napi *bnapi,
+ int budget)
+{
+ struct bnge_rx_ring_info *rxr = bnapi->rx_ring;
+
+ if ((bnapi->events & BNGE_TX_CMP_EVENT) && !bnapi->tx_fault)
+ bnge_tx_int(bn, bnapi, budget);
+
+ if ((bnapi->events & BNGE_RX_EVENT)) {
+ bnge_db_write(bn->bd, &rxr->rx_db, rxr->rx_prod);
+ bnapi->events &= ~BNGE_RX_EVENT;
+ }
+
+ if (bnapi->events & BNGE_AGG_EVENT) {
+ bnge_db_write(bn->bd, &rxr->rx_agg_db, rxr->rx_agg_prod);
+ bnapi->events &= ~BNGE_AGG_EVENT;
+ }
+}
+
+static void
+bnge_hwrm_update_token(struct bnge_dev *bd, u16 seq_id,
+ enum bnge_hwrm_wait_state state)
+{
+ struct bnge_hwrm_wait_token *token;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(token, &bd->hwrm_pending_list, node) {
+ if (token->seq_id == seq_id) {
+ WRITE_ONCE(token->state, state);
+ rcu_read_unlock();
+ return;
+ }
+ }
+ rcu_read_unlock();
+ dev_err(bd->dev, "Invalid hwrm seq id %d\n", seq_id);
+}
+
+static int bnge_hwrm_handler(struct bnge_dev *bd, struct tx_cmp *txcmp)
+{
+ struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
+ u16 cmpl_type = TX_CMP_TYPE(txcmp), seq_id;
+
+ switch (cmpl_type) {
+ case CMPL_BASE_TYPE_HWRM_DONE:
+ seq_id = le16_to_cpu(h_cmpl->sequence_id);
+ bnge_hwrm_update_token(bd, seq_id, BNGE_HWRM_COMPLETE);
+ break;
+
+ case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int __bnge_poll_work(struct bnge_net *bn, struct bnge_cp_ring_info *cpr,
+ int budget)
+{
+ struct bnge_napi *bnapi = cpr->bnapi;
+ u32 raw_cons = cpr->cp_raw_cons;
+ struct tx_cmp *txcmp;
+ int rx_pkts = 0;
+ u8 event = 0;
+ u32 cons;
+
+ cpr->has_more_work = 0;
+ cpr->had_work_done = 1;
+ while (1) {
+ u8 cmp_type;
+ int rc;
+
+ cons = RING_CMP(bn, raw_cons);
+ txcmp = &cpr->desc_ring[CP_RING(cons)][CP_IDX(cons)];
+
+ if (!TX_CMP_VALID(bn, txcmp, raw_cons))
+ break;
+
+ /* The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
+ cmp_type = TX_CMP_TYPE(txcmp);
+ if (cmp_type == CMP_TYPE_TX_L2_CMP ||
+ cmp_type == CMP_TYPE_TX_L2_COAL_CMP) {
+ u32 opaque = txcmp->tx_cmp_opaque;
+ struct bnge_tx_ring_info *txr;
+ u16 tx_freed;
+
+ txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)];
+ event |= BNGE_TX_CMP_EVENT;
+ if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP)
+ txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp);
+ else
+ txr->tx_hw_cons = TX_OPAQUE_PROD(bn, opaque);
+ tx_freed = ((txr->tx_hw_cons - txr->tx_cons) &
+ bn->tx_ring_mask);
+ /* return full budget so NAPI will complete. */
+ if (unlikely(tx_freed >= bn->tx_wake_thresh)) {
+ rx_pkts = budget;
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+ if (budget)
+ cpr->has_more_work = 1;
+ break;
+ }
+ } else if (cmp_type >= CMP_TYPE_RX_L2_CMP &&
+ cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
+ if (likely(budget))
+ rc = bnge_rx_pkt(bn, cpr, &raw_cons, &event);
+ else
+ rc = bnge_force_rx_discard(bn, cpr, &raw_cons,
+ &event);
+ if (likely(rc >= 0))
+ rx_pkts += rc;
+ /* Increment rx_pkts when rc is -ENOMEM to count towards
+ * the NAPI budget. Otherwise, we may potentially loop
+ * here forever if we consistently cannot allocate
+ * buffers.
+ */
+ else if (rc == -ENOMEM && budget)
+ rx_pkts++;
+ else if (rc == -EBUSY) /* partial completion */
+ break;
+ } else if (unlikely(cmp_type == CMPL_BASE_TYPE_HWRM_DONE ||
+ cmp_type == CMPL_BASE_TYPE_HWRM_FWD_REQ ||
+ cmp_type == CMPL_BA_TY_HWRM_ASY_EVT)) {
+ bnge_hwrm_handler(bn->bd, txcmp);
+ }
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+
+ if (rx_pkts && rx_pkts == budget) {
+ cpr->has_more_work = 1;
+ break;
+ }
+ }
+
+ cpr->cp_raw_cons = raw_cons;
+ bnapi->events |= event;
+ return rx_pkts;
+}
+
+static void __bnge_poll_cqs_done(struct bnge_net *bn, struct bnge_napi *bnapi,
+ u64 dbr_type, int budget)
+{
+ struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
+ int i;
+
+ for (i = 0; i < nqr->cp_ring_count; i++) {
+ struct bnge_cp_ring_info *cpr = &nqr->cp_ring_arr[i];
+ struct bnge_db_info *db;
+
+ if (cpr->had_work_done) {
+ u32 tgl = 0;
+
+ if (dbr_type == DBR_TYPE_CQ_ARMALL) {
+ cpr->had_nqe_notify = 0;
+ tgl = cpr->toggle;
+ }
+ db = &cpr->cp_db;
+ bnge_writeq(bn->bd,
+ db->db_key64 | dbr_type | DB_TOGGLE(tgl) |
+ DB_RING_IDX(db, cpr->cp_raw_cons),
+ db->doorbell);
+ cpr->had_work_done = 0;
+ }
+ }
+ __bnge_poll_work_done(bn, bnapi, budget);
+}
+
+static int __bnge_poll_cqs(struct bnge_net *bn, struct bnge_napi *bnapi,
+ int budget)
+{
+ struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
+ int i, work_done = 0;
+
+ for (i = 0; i < nqr->cp_ring_count; i++) {
+ struct bnge_cp_ring_info *cpr = &nqr->cp_ring_arr[i];
+
+ if (cpr->had_nqe_notify) {
+ work_done += __bnge_poll_work(bn, cpr,
+ budget - work_done);
+ nqr->has_more_work |= cpr->has_more_work;
+ }
+ }
+ return work_done;
+}
+
+int bnge_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct bnge_napi *bnapi = container_of(napi, struct bnge_napi, napi);
+ struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
+ u32 raw_cons = nqr->nq_raw_cons;
+ struct bnge_net *bn = bnapi->bn;
+ struct bnge_dev *bd = bn->bd;
+ struct nqe_cn *nqcmp;
+ int work_done = 0;
+ u32 cons;
+
+ if (nqr->has_more_work) {
+ nqr->has_more_work = 0;
+ work_done = __bnge_poll_cqs(bn, bnapi, budget);
+ }
+
+ while (1) {
+ u16 type;
+
+ cons = RING_CMP(bn, raw_cons);
+ nqcmp = &nqr->desc_ring[CP_RING(cons)][CP_IDX(cons)];
+
+ if (!NQ_CMP_VALID(bn, nqcmp, raw_cons)) {
+ if (nqr->has_more_work)
+ break;
+
+ __bnge_poll_cqs_done(bn, bnapi, DBR_TYPE_CQ_ARMALL,
+ budget);
+ nqr->nq_raw_cons = raw_cons;
+ if (napi_complete_done(napi, work_done))
+ BNGE_DB_NQ_ARM(bd, &nqr->nq_db,
+ nqr->nq_raw_cons);
+ goto poll_done;
+ }
+
+ /* The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
+
+ type = le16_to_cpu(nqcmp->type);
+ if (NQE_CN_TYPE(type) == NQ_CN_TYPE_CQ_NOTIFICATION) {
+ u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
+ u32 cq_type = BNGE_NQ_HDL_TYPE(idx);
+ struct bnge_cp_ring_info *cpr;
+
+ /* No more budget for RX work */
+ if (budget && work_done >= budget &&
+ cq_type == BNGE_NQ_HDL_TYPE_RX)
+ break;
+
+ idx = BNGE_NQ_HDL_IDX(idx);
+ cpr = &nqr->cp_ring_arr[idx];
+ cpr->had_nqe_notify = 1;
+ cpr->toggle = NQE_CN_TOGGLE(type);
+ work_done += __bnge_poll_work(bn, cpr,
+ budget - work_done);
+ nqr->has_more_work |= cpr->has_more_work;
+ } else {
+ bnge_hwrm_handler(bn->bd, (struct tx_cmp *)nqcmp);
+ }
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+ }
+
+ __bnge_poll_cqs_done(bn, bnapi, DBR_TYPE_CQ, budget);
+ if (raw_cons != nqr->nq_raw_cons) {
+ nqr->nq_raw_cons = raw_cons;
+ BNGE_DB_NQ(bd, &nqr->nq_db, raw_cons);
+ }
+poll_done:
+ return work_done;
+}
+
+static u16 bnge_xmit_get_cfa_action(struct sk_buff *skb)
+{
+ struct metadata_dst *md_dst = skb_metadata_dst(skb);
+
+ if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
+ return 0;
+
+ return md_dst->u.port_info.port_id;
+}
+
+static const u16 bnge_lhint_arr[] = {
+ TX_BD_FLAGS_LHINT_512_AND_SMALLER,
+ TX_BD_FLAGS_LHINT_512_TO_1023,
+ TX_BD_FLAGS_LHINT_1024_TO_2047,
+ TX_BD_FLAGS_LHINT_1024_TO_2047,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+};
+
+static void bnge_txr_db_kick(struct bnge_net *bn, struct bnge_tx_ring_info *txr,
+ u16 prod)
+{
+ /* Sync BD data before updating doorbell */
+ wmb();
+ bnge_db_write(bn->bd, &txr->tx_db, prod);
+ txr->kick_pending = 0;
+}
+
+static u32 bnge_get_gso_hdr_len(struct sk_buff *skb)
+{
+ bool udp_gso = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4);
+ u32 hdr_len;
+
+ if (skb->encapsulation) {
+ if (udp_gso)
+ hdr_len = skb_inner_transport_offset(skb) +
+ sizeof(struct udphdr);
+ else
+ hdr_len = skb_inner_tcp_all_headers(skb);
+ } else if (udp_gso) {
+ hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
+ } else {
+ hdr_len = skb_tcp_all_headers(skb);
+ }
+
+ return hdr_len;
+}
+
+netdev_tx_t bnge_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ u32 len, free_size, vlan_tag_flags, cfa_action, flags;
+ struct bnge_net *bn = netdev_priv(dev);
+ struct bnge_tx_ring_info *txr;
+ struct bnge_dev *bd = bn->bd;
+ struct bnge_sw_tx_bd *tx_buf;
+ struct tx_bd *txbd, *txbd0;
+ struct netdev_queue *txq;
+ struct tx_bd_ext *txbd1;
+ u16 prod, last_frag;
+ unsigned int length;
+ dma_addr_t mapping;
+ __le32 lflags = 0;
+ skb_frag_t *frag;
+ int i;
+
+ i = skb_get_queue_mapping(skb);
+ txq = netdev_get_tx_queue(dev, i);
+ txr = &bn->tx_ring[bn->tx_ring_map[i]];
+ prod = txr->tx_prod;
+
+ free_size = bnge_tx_avail(bn, txr);
+ if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
+ /* We must have raced with NAPI cleanup */
+ if (net_ratelimit() && txr->kick_pending)
+ netif_warn(bn, tx_err, dev,
+ "bnge: ring busy w/ flush pending!\n");
+ if (!netif_txq_try_stop(txq, bnge_tx_avail(bn, txr),
+ bn->tx_wake_thresh))
+ return NETDEV_TX_BUSY;
+ }
+
+ last_frag = skb_shinfo(skb)->nr_frags;
+
+ txbd = &txr->tx_desc_ring[TX_RING(bn, prod)][TX_IDX(prod)];
+
+ tx_buf = &txr->tx_buf_ring[SW_TX_RING(bn, prod)];
+ tx_buf->skb = skb;
+ tx_buf->nr_frags = last_frag;
+
+ vlan_tag_flags = 0;
+ cfa_action = bnge_xmit_get_cfa_action(skb);
+ if (skb_vlan_tag_present(skb)) {
+ vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
+ skb_vlan_tag_get(skb);
+ /* Currently supports 8021Q, 8021AD vlan offloads
+ * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
+ */
+ if (skb->vlan_proto == htons(ETH_P_8021Q))
+ vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
+ }
+
+ if (unlikely(skb->no_fcs))
+ lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
+
+ if (eth_skb_pad(skb))
+ goto tx_kick_pending;
+
+ len = skb_headlen(skb);
+
+ mapping = dma_map_single(bd->dev, skb->data, len, DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(bd->dev, mapping)))
+ goto tx_free;
+
+ dma_unmap_addr_set(tx_buf, mapping, mapping);
+ flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
+ TX_BD_CNT(last_frag + 2);
+
+ txbd->tx_bd_haddr = cpu_to_le64(mapping);
+ txbd->tx_bd_opaque = SET_TX_OPAQUE(bn, txr, prod, 2 + last_frag);
+
+ prod = NEXT_TX(prod);
+ txbd1 = (struct tx_bd_ext *)
+ &txr->tx_desc_ring[TX_RING(bn, prod)][TX_IDX(prod)];
+
+ if (skb_is_gso(skb)) {
+ u32 hdr_len = bnge_get_gso_hdr_len(skb);
+
+ lflags |= cpu_to_le32(TX_BD_FLAGS_LSO | TX_BD_FLAGS_T_IPID |
+ (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
+ length = skb_shinfo(skb)->gso_size;
+ txbd1->tx_bd_mss = cpu_to_le32(length);
+ length += hdr_len;
+ } else {
+ length = skb->len;
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ lflags |= cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
+ txbd1->tx_bd_mss = 0;
+ }
+ }
+
+ flags |= bnge_lhint_arr[length >> 9];
+
+ txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+ txbd1->tx_bd_hsize_lflags = lflags;
+ txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
+ txbd1->tx_bd_cfa_action =
+ cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
+ txbd0 = txbd;
+ for (i = 0; i < last_frag; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+
+ prod = NEXT_TX(prod);
+ txbd = &txr->tx_desc_ring[TX_RING(bn, prod)][TX_IDX(prod)];
+
+ len = skb_frag_size(frag);
+ mapping = skb_frag_dma_map(bd->dev, frag, 0, len,
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(bd->dev, mapping)))
+ goto tx_dma_error;
+
+ tx_buf = &txr->tx_buf_ring[SW_TX_RING(bn, prod)];
+ netmem_dma_unmap_addr_set(skb_frag_netmem(frag), tx_buf,
+ mapping, mapping);
+
+ txbd->tx_bd_haddr = cpu_to_le64(mapping);
+
+ flags = len << TX_BD_LEN_SHIFT;
+ txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+ }
+
+ flags &= ~TX_BD_LEN;
+ txbd->tx_bd_len_flags_type =
+ cpu_to_le32(((len) << TX_BD_LEN_SHIFT) | flags |
+ TX_BD_FLAGS_PACKET_END);
+
+ netdev_tx_sent_queue(txq, skb->len);
+
+ prod = NEXT_TX(prod);
+ WRITE_ONCE(txr->tx_prod, prod);
+
+ if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
+ bnge_txr_db_kick(bn, txr, prod);
+ } else {
+ if (free_size >= bn->tx_wake_thresh)
+ txbd0->tx_bd_len_flags_type |=
+ cpu_to_le32(TX_BD_FLAGS_NO_CMPL);
+ txr->kick_pending = 1;
+ }
+
+ if (unlikely(bnge_tx_avail(bn, txr) <= MAX_SKB_FRAGS + 1)) {
+ if (netdev_xmit_more()) {
+ txbd0->tx_bd_len_flags_type &=
+ cpu_to_le32(~TX_BD_FLAGS_NO_CMPL);
+ bnge_txr_db_kick(bn, txr, prod);
+ }
+
+ netif_txq_try_stop(txq, bnge_tx_avail(bn, txr),
+ bn->tx_wake_thresh);
+ }
+ return NETDEV_TX_OK;
+
+tx_dma_error:
+ last_frag = i;
+
+ /* start back at beginning and unmap skb */
+ prod = txr->tx_prod;
+ tx_buf = &txr->tx_buf_ring[SW_TX_RING(bn, prod)];
+ dma_unmap_single(bd->dev, dma_unmap_addr(tx_buf, mapping),
+ skb_headlen(skb), DMA_TO_DEVICE);
+ prod = NEXT_TX(prod);
+
+ /* unmap remaining mapped pages */
+ for (i = 0; i < last_frag; i++) {
+ prod = NEXT_TX(prod);
+ tx_buf = &txr->tx_buf_ring[SW_TX_RING(bn, prod)];
+ frag = &skb_shinfo(skb)->frags[i];
+ netmem_dma_unmap_page_attrs(bd->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE, 0);
+ }
+
+tx_free:
+ dev_kfree_skb_any(skb);
+
+tx_kick_pending:
+ if (txr->kick_pending)
+ bnge_txr_db_kick(bn, txr, txr->tx_prod);
+ txr->tx_buf_ring[SW_TX_RING(bn, txr->tx_prod)].skb = NULL;
+ dev_core_stats_tx_dropped_inc(dev);
+ return NETDEV_TX_OK;
+}
+
+netdev_features_t bnge_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ u32 len;
+
+ features = vlan_features_check(skb, features);
+#if (MAX_SKB_FRAGS > TX_MAX_FRAGS)
+ if (skb_shinfo(skb)->nr_frags > TX_MAX_FRAGS)
+ features &= ~NETIF_F_SG;
+#endif
+
+ if (skb_is_gso(skb))
+ len = bnge_get_gso_hdr_len(skb) + skb_shinfo(skb)->gso_size;
+ else
+ len = skb->len;
+
+ len >>= 9;
+ if (unlikely(len >= ARRAY_SIZE(bnge_lhint_arr)))
+ features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+
+ return features;
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_txrx.h b/drivers/net/ethernet/broadcom/bnge/bnge_txrx.h
new file mode 100644
index 000000000000..bd0aa6c221a4
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_txrx.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_TXRX_H_
+#define _BNGE_TXRX_H_
+
+#include <linux/bnge/hsi.h>
+#include "bnge_netdev.h"
+
+static inline u32 bnge_tx_avail(struct bnge_net *bn,
+ const struct bnge_tx_ring_info *txr)
+{
+ u32 used = READ_ONCE(txr->tx_prod) - READ_ONCE(txr->tx_cons);
+
+ return bn->tx_ring_size - (used & bn->tx_ring_mask);
+}
+
+static inline void bnge_writeq_relaxed(struct bnge_dev *bd, u64 val,
+ void __iomem *addr)
+{
+#if BITS_PER_LONG == 32
+ spin_lock(&bd->db_lock);
+ lo_hi_writeq_relaxed(val, addr);
+ spin_unlock(&bd->db_lock);
+#else
+ writeq_relaxed(val, addr);
+#endif
+}
+
+/* For TX and RX ring doorbells with no ordering guarantee*/
+static inline void bnge_db_write_relaxed(struct bnge_net *bn,
+ struct bnge_db_info *db, u32 idx)
+{
+ bnge_writeq_relaxed(bn->bd, db->db_key64 | DB_RING_IDX(db, idx),
+ db->doorbell);
+}
+
+#define TX_OPAQUE_IDX_MASK 0x0000ffff
+#define TX_OPAQUE_BDS_MASK 0x00ff0000
+#define TX_OPAQUE_BDS_SHIFT 16
+#define TX_OPAQUE_RING_MASK 0xff000000
+#define TX_OPAQUE_RING_SHIFT 24
+
+#define SET_TX_OPAQUE(bn, txr, idx, bds) \
+ (((txr)->tx_napi_idx << TX_OPAQUE_RING_SHIFT) | \
+ ((bds) << TX_OPAQUE_BDS_SHIFT) | ((idx) & (bn)->tx_ring_mask))
+
+#define TX_OPAQUE_IDX(opq) ((opq) & TX_OPAQUE_IDX_MASK)
+#define TX_OPAQUE_RING(opq) (((opq) & TX_OPAQUE_RING_MASK) >> \
+ TX_OPAQUE_RING_SHIFT)
+#define TX_OPAQUE_BDS(opq) (((opq) & TX_OPAQUE_BDS_MASK) >> \
+ TX_OPAQUE_BDS_SHIFT)
+#define TX_OPAQUE_PROD(bn, opq) ((TX_OPAQUE_IDX(opq) + TX_OPAQUE_BDS(opq)) &\
+ (bn)->tx_ring_mask)
+#define TX_BD_CNT(n) (((n) << TX_BD_FLAGS_BD_CNT_SHIFT) & TX_BD_FLAGS_BD_CNT)
+
+#define TX_MAX_BD_CNT 32
+
+#define TX_MAX_FRAGS (TX_MAX_BD_CNT - 2)
+
+/* Minimum TX BDs for a TX packet with MAX_SKB_FRAGS + 1. We need one extra
+ * BD because the first TX BD is always a long BD.
+ */
+#define BNGE_MIN_TX_DESC_CNT (MAX_SKB_FRAGS + 2)
+
+#define RX_RING(bn, x) (((x) & (bn)->rx_ring_mask) >> (BNGE_PAGE_SHIFT - 4))
+#define RX_AGG_RING(bn, x) (((x) & (bn)->rx_agg_ring_mask) >> \
+ (BNGE_PAGE_SHIFT - 4))
+#define RX_IDX(x) ((x) & (RX_DESC_CNT - 1))
+
+#define TX_RING(bn, x) (((x) & (bn)->tx_ring_mask) >> (BNGE_PAGE_SHIFT - 4))
+#define TX_IDX(x) ((x) & (TX_DESC_CNT - 1))
+
+#define CP_RING(x) (((x) & ~(CP_DESC_CNT - 1)) >> (BNGE_PAGE_SHIFT - 4))
+#define CP_IDX(x) ((x) & (CP_DESC_CNT - 1))
+
+#define TX_CMP_VALID(bn, txcmp, raw_cons) \
+ (!!((txcmp)->tx_cmp_errors_v & cpu_to_le32(TX_CMP_V)) == \
+ !((raw_cons) & (bn)->cp_bit))
+
+#define RX_CMP_VALID(bn, rxcmp1, raw_cons) \
+ (!!((rxcmp1)->rx_cmp_cfa_code_errors_v2 & cpu_to_le32(RX_CMP_V)) ==\
+ !((raw_cons) & (bn)->cp_bit))
+
+#define RX_AGG_CMP_VALID(bn, agg, raw_cons) \
+ (!!((agg)->rx_agg_cmp_v & cpu_to_le32(RX_AGG_CMP_V)) == \
+ !((raw_cons) & (bn)->cp_bit))
+
+#define NQ_CMP_VALID(bn, nqcmp, raw_cons) \
+ (!!((nqcmp)->v & cpu_to_le32(NQ_CN_V)) == !((raw_cons) & (bn)->cp_bit))
+
+#define TX_CMP_TYPE(txcmp) \
+ (le32_to_cpu((txcmp)->tx_cmp_flags_type) & CMP_TYPE)
+
+#define RX_CMP_TYPE(rxcmp) \
+ (le32_to_cpu((rxcmp)->rx_cmp_len_flags_type) & RX_CMP_CMP_TYPE)
+
+#define RING_RX(bn, idx) ((idx) & (bn)->rx_ring_mask)
+#define NEXT_RX(idx) ((idx) + 1)
+
+#define RING_RX_AGG(bn, idx) ((idx) & (bn)->rx_agg_ring_mask)
+#define NEXT_RX_AGG(idx) ((idx) + 1)
+
+#define SW_TX_RING(bn, idx) ((idx) & (bn)->tx_ring_mask)
+#define NEXT_TX(idx) ((idx) + 1)
+
+#define ADV_RAW_CMP(idx, n) ((idx) + (n))
+#define NEXT_RAW_CMP(idx) ADV_RAW_CMP(idx, 1)
+#define RING_CMP(bn, idx) ((idx) & (bn)->cp_ring_mask)
+#define NEXT_CMP(bn, idx) RING_CMP(bn, ADV_RAW_CMP(idx, 1))
+
+#define RX_CMP_ITYPES(rxcmp) \
+ (le32_to_cpu((rxcmp)->rx_cmp_len_flags_type) & RX_CMP_FLAGS_ITYPES_MASK)
+
+#define RX_CMP_CFA_CODE(rxcmpl1) \
+ ((le32_to_cpu((rxcmpl1)->rx_cmp_cfa_code_errors_v2) & \
+ RX_CMPL_CFA_CODE_MASK) >> RX_CMPL_CFA_CODE_SFT)
+
+irqreturn_t bnge_msix(int irq, void *dev_instance);
+netdev_tx_t bnge_start_xmit(struct sk_buff *skb, struct net_device *dev);
+void bnge_reuse_rx_data(struct bnge_rx_ring_info *rxr, u16 cons, void *data);
+int bnge_napi_poll(struct napi_struct *napi, int budget);
+netdev_features_t bnge_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features);
+#endif /* _BNGE_TXRX_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 8419d1eb4035..fb45e1dd1dd7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -517,9 +517,6 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
- if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
- goto tx_free;
-
length = skb->len;
len = skb_headlen(skb);
last_frag = skb_shinfo(skb)->nr_frags;
@@ -905,7 +902,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
static bool bnxt_separate_head_pool(struct bnxt_rx_ring_info *rxr)
{
- return rxr->need_head_pool || PAGE_SIZE > BNXT_RX_PAGE_SIZE;
+ return rxr->need_head_pool || rxr->rx_page_size < PAGE_SIZE;
}
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
@@ -915,9 +912,9 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
{
struct page *page;
- if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
+ if (rxr->rx_page_size < PAGE_SIZE) {
page = page_pool_dev_alloc_frag(rxr->page_pool, offset,
- BNXT_RX_PAGE_SIZE);
+ rxr->rx_page_size);
} else {
page = page_pool_dev_alloc_pages(rxr->page_pool);
*offset = 0;
@@ -936,8 +933,9 @@ static netmem_ref __bnxt_alloc_rx_netmem(struct bnxt *bp, dma_addr_t *mapping,
{
netmem_ref netmem;
- if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
- netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset, BNXT_RX_PAGE_SIZE, gfp);
+ if (rxr->rx_page_size < PAGE_SIZE) {
+ netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset,
+ rxr->rx_page_size, gfp);
} else {
netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
*offset = 0;
@@ -1155,9 +1153,9 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
return NULL;
}
dma_addr -= bp->rx_dma_offset;
- dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
+ dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, rxr->rx_page_size,
bp->rx_dir);
- skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
+ skb = napi_build_skb(data_ptr - bp->rx_offset, rxr->rx_page_size);
if (!skb) {
page_pool_recycle_direct(rxr->page_pool, page);
return NULL;
@@ -1189,7 +1187,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
return NULL;
}
dma_addr -= bp->rx_dma_offset;
- dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
+ dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, rxr->rx_page_size,
bp->rx_dir);
if (unlikely(!payload))
@@ -1203,7 +1201,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
skb_mark_for_recycle(skb);
off = (void *)data_ptr - page_address(page);
- skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE);
+ skb_add_rx_frag(skb, 0, page, off, len, rxr->rx_page_size);
memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
payload + NET_IP_ALIGN);
@@ -1288,7 +1286,7 @@ static u32 __bnxt_rx_agg_netmems(struct bnxt *bp,
if (skb) {
skb_add_rx_frag_netmem(skb, i, cons_rx_buf->netmem,
cons_rx_buf->offset,
- frag_len, BNXT_RX_PAGE_SIZE);
+ frag_len, rxr->rx_page_size);
} else {
skb_frag_t *frag = &shinfo->frags[i];
@@ -1313,7 +1311,7 @@ static u32 __bnxt_rx_agg_netmems(struct bnxt *bp,
if (skb) {
skb->len -= frag_len;
skb->data_len -= frag_len;
- skb->truesize -= BNXT_RX_PAGE_SIZE;
+ skb->truesize -= rxr->rx_page_size;
}
--shinfo->nr_frags;
@@ -1328,7 +1326,7 @@ static u32 __bnxt_rx_agg_netmems(struct bnxt *bp,
}
page_pool_dma_sync_netmem_for_cpu(rxr->page_pool, netmem, 0,
- BNXT_RX_PAGE_SIZE);
+ rxr->rx_page_size);
total_frag_len += frag_len;
prod = NEXT_RX_AGG(prod);
@@ -1803,7 +1801,8 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
struct bnxt_tpa_info *tpa_info,
struct rx_tpa_end_cmp *tpa_end,
struct rx_tpa_end_cmp_ext *tpa_end1,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct bnxt_rx_sw_stats *rx_stats)
{
#ifdef CONFIG_INET
int payload_off;
@@ -1813,6 +1812,9 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
if (segs == 1)
return skb;
+ rx_stats->rx_hw_gro_packets++;
+ rx_stats->rx_hw_gro_wire_packets += segs;
+
NAPI_GRO_CB(skb)->count = segs;
skb_shinfo(skb)->gso_size =
le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
@@ -1986,7 +1988,8 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
}
if (gro)
- skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
+ skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb,
+ &cpr->sw_stats->rx);
return skb;
}
@@ -2290,8 +2293,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (!skb)
goto oom_next_rx;
} else {
- skb = bnxt_xdp_build_skb(bp, skb, agg_bufs,
- rxr->page_pool, &xdp);
+ skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr, &xdp);
if (!skb) {
/* we should be able to free the old skb here */
bnxt_xdp_buff_frags_free(rxr, &xdp);
@@ -3825,23 +3827,40 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
}
}
+static int bnxt_rx_agg_ring_fill_level(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ /* User may have chosen larger than default rx_page_size,
+ * we keep the ring sizes uniform and also want uniform amount
+ * of bytes consumed per ring, so cap how much of the rings we fill.
+ */
+ int fill_level = bp->rx_agg_ring_size;
+
+ if (rxr->rx_page_size > BNXT_RX_PAGE_SIZE)
+ fill_level /= rxr->rx_page_size / BNXT_RX_PAGE_SIZE;
+
+ return fill_level;
+}
+
static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr,
int numa_node)
{
- const unsigned int agg_size_fac = PAGE_SIZE / BNXT_RX_PAGE_SIZE;
+ unsigned int agg_size_fac = rxr->rx_page_size / BNXT_RX_PAGE_SIZE;
const unsigned int rx_size_fac = PAGE_SIZE / SZ_4K;
struct page_pool_params pp = { 0 };
struct page_pool *pool;
- pp.pool_size = bp->rx_agg_ring_size / agg_size_fac;
+ pp.pool_size = bnxt_rx_agg_ring_fill_level(bp, rxr) / agg_size_fac;
if (BNXT_RX_PAGE_MODE(bp))
pp.pool_size += bp->rx_ring_size / rx_size_fac;
+
+ pp.order = get_order(rxr->rx_page_size);
pp.nid = numa_node;
pp.netdev = bp->dev;
pp.dev = &bp->pdev->dev;
pp.dma_dir = bp->rx_dir;
- pp.max_len = PAGE_SIZE;
+ pp.max_len = PAGE_SIZE << pp.order;
pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
PP_FLAG_ALLOW_UNREADABLE_NETMEM;
pp.queue_idx = rxr->bnapi->index;
@@ -3852,7 +3871,10 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
rxr->page_pool = pool;
rxr->need_head_pool = page_pool_is_unreadable(pool);
+ rxr->need_head_pool |= !!pp.order;
if (bnxt_separate_head_pool(rxr)) {
+ pp.order = 0;
+ pp.max_len = PAGE_SIZE;
pp.pool_size = min(bp->rx_ring_size / rx_size_fac, 1024);
pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
pool = page_pool_create(&pp);
@@ -4306,6 +4328,7 @@ static void bnxt_init_ring_struct(struct bnxt *bp)
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct netdev_queue_config qcfg;
struct bnxt_ring_mem_info *rmem;
struct bnxt_cp_ring_info *cpr;
struct bnxt_rx_ring_info *rxr;
@@ -4328,6 +4351,9 @@ static void bnxt_init_ring_struct(struct bnxt *bp)
if (!rxr)
goto skip_rx;
+ netdev_queue_config(bp->dev, i, &qcfg);
+ rxr->rx_page_size = qcfg.rx_page_size;
+
ring = &rxr->rx_ring_struct;
rmem = &ring->ring_mem;
rmem->nr_pages = bp->rx_nr_pages;
@@ -4405,11 +4431,13 @@ static void bnxt_alloc_one_rx_ring_netmem(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr,
int ring_nr)
{
+ int fill_level, i;
u32 prod;
- int i;
+
+ fill_level = bnxt_rx_agg_ring_fill_level(bp, rxr);
prod = rxr->rx_agg_prod;
- for (i = 0; i < bp->rx_agg_ring_size; i++) {
+ for (i = 0; i < fill_level; i++) {
if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_KERNEL)) {
netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n",
ring_nr, i, bp->rx_agg_ring_size);
@@ -4487,7 +4515,7 @@ static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp,
ring = &rxr->rx_agg_ring_struct;
ring->fw_ring_id = INVALID_HW_RING_ID;
if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
- type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
+ type = ((u32)rxr->rx_page_size << RX_BD_LEN_SHIFT) |
RX_BD_TYPE_RX_AGG_BD;
/* On P7, setting EOP will cause the chip to disable
@@ -6567,6 +6595,9 @@ int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
if (!rx_rings)
return 0;
+ if (bp->rss_cap & BNXT_RSS_CAP_LARGE_RSS_CTX)
+ return BNXT_RSS_TABLE_MAX_TBL_P5;
+
return bnxt_calc_nr_ring_pages(rx_rings - 1,
BNXT_RSS_TABLE_ENTRIES_P5);
}
@@ -7065,6 +7096,7 @@ static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type,
struct hwrm_ring_alloc_input *req,
+ struct bnxt_rx_ring_info *rxr,
struct bnxt_ring_struct *ring)
{
struct bnxt_ring_grp_info *grp_info = &bp->grp_info[ring->grp_idx];
@@ -7074,7 +7106,7 @@ static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type,
if (ring_type == HWRM_RING_ALLOC_AGG) {
req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
- req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
+ req->rx_buf_size = cpu_to_le16(rxr->rx_page_size);
enables |= RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID;
} else {
req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
@@ -7088,6 +7120,7 @@ static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type,
}
static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
struct bnxt_ring_struct *ring,
u32 ring_type, u32 map_index)
{
@@ -7144,7 +7177,8 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
cpu_to_le32(bp->rx_ring_mask + 1) :
cpu_to_le32(bp->rx_agg_ring_mask + 1);
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
- bnxt_set_rx_ring_params_p5(bp, ring_type, req, ring);
+ bnxt_set_rx_ring_params_p5(bp, ring_type, req,
+ rxr, ring);
break;
case HWRM_RING_ALLOC_CMPL:
req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
@@ -7292,7 +7326,7 @@ static int bnxt_hwrm_rx_ring_alloc(struct bnxt *bp,
u32 map_idx = bnapi->index;
int rc;
- rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ rc = hwrm_ring_alloc_send_msg(bp, rxr, ring, type, map_idx);
if (rc)
return rc;
@@ -7312,7 +7346,7 @@ static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp,
int rc;
map_idx = grp_idx + bp->rx_nr_rings;
- rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ rc = hwrm_ring_alloc_send_msg(bp, rxr, ring, type, map_idx);
if (rc)
return rc;
@@ -7336,7 +7370,7 @@ static int bnxt_hwrm_cp_ring_alloc_p5(struct bnxt *bp,
ring = &cpr->cp_ring_struct;
ring->handle = BNXT_SET_NQ_HDL(cpr);
- rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, map_idx);
if (rc)
return rc;
bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
@@ -7351,7 +7385,7 @@ static int bnxt_hwrm_tx_ring_alloc(struct bnxt *bp,
const u32 type = HWRM_RING_ALLOC_TX;
int rc;
- rc = hwrm_ring_alloc_send_msg(bp, ring, type, tx_idx);
+ rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, tx_idx);
if (rc)
return rc;
bnxt_set_db(bp, &txr->tx_db, type, tx_idx, ring->fw_ring_id);
@@ -7377,7 +7411,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
vector = bp->irq_tbl[map_idx].vector;
disable_irq_nosync(vector);
- rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, map_idx);
if (rc) {
enable_irq(vector);
goto err_out;
@@ -7917,13 +7951,28 @@ static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings)
return 1;
}
+static void bnxt_get_total_resources(struct bnxt *bp, struct bnxt_hw_rings *hwr)
+{
+ hwr->cp = bnxt_nq_rings_in_use(bp);
+ hwr->cp_p5 = 0;
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ hwr->cp_p5 = bnxt_cp_rings_in_use(bp);
+ hwr->tx = bp->tx_nr_rings;
+ hwr->rx = bp->rx_nr_rings;
+ hwr->grp = hwr->rx;
+ hwr->vnic = bnxt_get_total_vnics(bp, hwr->rx);
+ hwr->rss_ctx = bnxt_get_total_rss_ctxs(bp, hwr);
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ hwr->rx <<= 1;
+ hwr->stat = bnxt_get_func_stat_ctxs(bp);
+}
+
static bool bnxt_need_reserve_rings(struct bnxt *bp)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
- int cp = bnxt_cp_rings_in_use(bp);
- int nq = bnxt_nq_rings_in_use(bp);
- int rx = bp->rx_nr_rings, stat;
- int vnic, grp = rx;
+ struct bnxt_hw_rings hwr;
+
+ bnxt_get_total_resources(bp, &hwr);
/* Old firmware does not need RX ring reservations but we still
* need to setup a default RSS map when needed. With new firmware
@@ -7933,25 +7982,27 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
if (!BNXT_NEW_RM(bp))
bnxt_check_rss_tbl_no_rmgr(bp);
- if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
- bp->hwrm_spec_code >= 0x10601)
+ if (hw_resc->resv_tx_rings != hwr.tx && bp->hwrm_spec_code >= 0x10601)
return true;
if (!BNXT_NEW_RM(bp))
return false;
- vnic = bnxt_get_total_vnics(bp, rx);
-
- if (bp->flags & BNXT_FLAG_AGG_RINGS)
- rx <<= 1;
- stat = bnxt_get_func_stat_ctxs(bp);
- if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
- hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
- (hw_resc->resv_hw_ring_grps != grp &&
+ if (hw_resc->resv_rx_rings != hwr.rx ||
+ hw_resc->resv_vnics != hwr.vnic ||
+ hw_resc->resv_stat_ctxs != hwr.stat ||
+ hw_resc->resv_rsscos_ctxs != hwr.rss_ctx ||
+ (hw_resc->resv_hw_ring_grps != hwr.grp &&
!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)))
return true;
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ if (hw_resc->resv_cp_rings != hwr.cp_p5)
+ return true;
+ } else if (hw_resc->resv_cp_rings != hwr.cp) {
+ return true;
+ }
if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) &&
- hw_resc->resv_irqs != nq)
+ hw_resc->resv_irqs != hwr.cp)
return true;
return false;
}
@@ -8077,6 +8128,11 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
bp->rx_nr_rings = rx_rings;
bp->cp_nr_rings = hwr.cp;
+ /* Fall back if we cannot reserve enough HW RSS contexts */
+ if ((bp->rss_cap & BNXT_RSS_CAP_LARGE_RSS_CTX) &&
+ hwr.rss_ctx < bnxt_get_total_rss_ctxs(bp, &hwr))
+ bp->rss_cap &= ~BNXT_RSS_CAP_LARGE_RSS_CTX;
+
if (!bnxt_rings_ok(bp, &hwr))
return -ENOMEM;
@@ -9567,6 +9623,10 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+ if (hw_resc->max_rsscos_ctxs >=
+ hw_resc->max_vnics * BNXT_LARGE_RSS_TO_VNIC_RATIO)
+ bp->rss_cap |= BNXT_RSS_CAP_LARGE_RSS_CTX;
+
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
u16 max_msix = le16_to_cpu(resp->max_msix);
@@ -9700,6 +9760,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
+ if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PTM_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_PTP_PTM;
if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_PTP_RTC;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
@@ -11901,6 +11963,26 @@ static char *bnxt_report_fec(struct bnxt_link_info *link_info)
}
}
+static char *bnxt_link_down_reason(struct bnxt_link_info *link_info)
+{
+ u8 reason = link_info->link_down_reason;
+
+ /* Multiple bits can be set, we report 1 bit only in order of
+ * priority.
+ */
+ if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_RF)
+ return "(Remote fault)";
+ if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_OTP_SPEED_VIOLATION)
+ return "(OTP Speed limit violation)";
+ if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_CABLE_REMOVED)
+ return "(Cable removed)";
+ if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_MODULE_FAULT)
+ return "(Module fault)";
+ if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_BMC_REQUEST)
+ return "(BMC request down)";
+ return "";
+}
+
void bnxt_report_link(struct bnxt *bp)
{
if (BNXT_LINK_IS_UP(bp)) {
@@ -11958,8 +12040,10 @@ void bnxt_report_link(struct bnxt *bp)
(fec & BNXT_FEC_AUTONEG) ? "on" : "off",
bnxt_report_fec(&bp->link_info));
} else {
+ char *str = bnxt_link_down_reason(&bp->link_info);
+
netif_carrier_off(bp->dev);
- netdev_err(bp->dev, "NIC Link is Down\n");
+ netdev_err(bp->dev, "NIC Link is Down %s\n", str);
}
}
@@ -12159,6 +12243,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
link_info->phy_addr = resp->eee_config_phy_addr &
PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
link_info->module_status = resp->module_status;
+ link_info->link_down_reason = resp->link_down_reason;
if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
struct ethtool_keee *eee = &bp->eee;
@@ -13426,6 +13511,8 @@ static void bnxt_get_one_ring_err_stats(struct bnxt *bp,
stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards;
stats->rx_total_ring_discards +=
BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts);
+ stats->rx_total_hw_gro_packets += sw_stats->rx.rx_hw_gro_packets;
+ stats->rx_total_hw_gro_wire_packets += sw_stats->rx.rx_hw_gro_wire_packets;
stats->tx_total_resets += sw_stats->tx.tx_resets;
stats->tx_total_ring_discards +=
BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts);
@@ -13815,7 +13902,6 @@ static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
u8 **nextp)
{
struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
- struct hop_jumbo_hdr *jhdr;
int hdr_count = 0;
u8 *nexthdr;
int start;
@@ -13844,24 +13930,7 @@ static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
if (hdrlen > 64)
return false;
- /* The ext header may be a hop-by-hop header inserted for
- * big TCP purposes. This will be removed before sending
- * from NIC, so do not count it.
- */
- if (*nexthdr == NEXTHDR_HOP) {
- if (likely(skb->len <= GRO_LEGACY_MAX_SIZE))
- goto increment_hdr;
-
- jhdr = (struct hop_jumbo_hdr *)hp;
- if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 ||
- jhdr->nexthdr != IPPROTO_TCP)
- goto increment_hdr;
-
- goto next_hdr;
- }
-increment_hdr:
hdr_count++;
-next_hdr:
nexthdr = &hp->nexthdr;
start += hdrlen;
}
@@ -15865,6 +15934,8 @@ static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
stats->alloc_fail = cpr->sw_stats->rx.rx_oom_discards;
+ stats->hw_gro_packets = cpr->sw_stats->rx.rx_hw_gro_packets;
+ stats->hw_gro_wire_packets = cpr->sw_stats->rx.rx_hw_gro_wire_packets;
}
static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
@@ -15900,6 +15971,8 @@ static void bnxt_get_base_stats(struct net_device *dev,
rx->packets = bp->net_stats_prev.rx_packets;
rx->bytes = bp->net_stats_prev.rx_bytes;
rx->alloc_fail = bp->ring_err_stats_prev.rx_total_oom_discards;
+ rx->hw_gro_packets = bp->ring_err_stats_prev.rx_total_hw_gro_packets;
+ rx->hw_gro_wire_packets = bp->ring_err_stats_prev.rx_total_hw_gro_wire_packets;
tx->packets = bp->net_stats_prev.tx_packets;
tx->bytes = bp->net_stats_prev.tx_bytes;
@@ -15911,7 +15984,36 @@ static const struct netdev_stat_ops bnxt_stat_ops = {
.get_base_stats = bnxt_get_base_stats,
};
-static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
+static void bnxt_queue_default_qcfg(struct net_device *dev,
+ struct netdev_queue_config *qcfg)
+{
+ qcfg->rx_page_size = BNXT_RX_PAGE_SIZE;
+}
+
+static int bnxt_validate_qcfg(struct net_device *dev,
+ struct netdev_queue_config *qcfg,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ /* Older chips need MSS calc so rx_page_size is not supported */
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
+ qcfg->rx_page_size != BNXT_RX_PAGE_SIZE)
+ return -EINVAL;
+
+ if (!is_power_of_2(qcfg->rx_page_size))
+ return -ERANGE;
+
+ if (qcfg->rx_page_size < BNXT_RX_PAGE_SIZE ||
+ qcfg->rx_page_size > BNXT_MAX_RX_PAGE_SIZE)
+ return -ERANGE;
+
+ return 0;
+}
+
+static int bnxt_queue_mem_alloc(struct net_device *dev,
+ struct netdev_queue_config *qcfg,
+ void *qmem, int idx)
{
struct bnxt_rx_ring_info *rxr, *clone;
struct bnxt *bp = netdev_priv(dev);
@@ -15932,6 +16034,7 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
clone->rx_sw_agg_prod = 0;
clone->rx_next_cons = 0;
clone->need_head_pool = false;
+ clone->rx_page_size = qcfg->rx_page_size;
rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid);
if (rc)
@@ -16058,6 +16161,8 @@ static void bnxt_copy_rx_ring(struct bnxt *bp,
src_ring = &src->rx_agg_ring_struct;
src_rmem = &src_ring->ring_mem;
+ dst->rx_page_size = src->rx_page_size;
+
WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
WARN_ON(dst_rmem->page_size != src_rmem->page_size);
WARN_ON(dst_rmem->flags != src_rmem->flags);
@@ -16077,7 +16182,9 @@ static void bnxt_copy_rx_ring(struct bnxt *bp,
dst->rx_agg_bmap = src->rx_agg_bmap;
}
-static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
+static int bnxt_queue_start(struct net_device *dev,
+ struct netdev_queue_config *qcfg,
+ void *qmem, int idx)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_rx_ring_info *rxr, *clone;
@@ -16210,6 +16317,13 @@ static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops = {
.ndo_queue_mem_free = bnxt_queue_mem_free,
.ndo_queue_start = bnxt_queue_start,
.ndo_queue_stop = bnxt_queue_stop,
+ .ndo_default_qcfg = bnxt_queue_default_qcfg,
+ .ndo_validate_qcfg = bnxt_validate_qcfg,
+ .supported_params = QCFG_RX_PAGE_SIZE,
+};
+
+static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops_unsupp = {
+ .ndo_default_qcfg = bnxt_queue_default_qcfg,
};
static void bnxt_remove_one(struct pci_dev *pdev)
@@ -16864,9 +16978,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
+
+ dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops_unsupp;
if (BNXT_SUPPORTS_QUEUE_API(bp))
dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
- dev->request_ops_lock = true;
dev->netmem_tx = true;
rc = register_netdev(dev);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index f88e7769a838..9a41b9e0423c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -760,6 +760,7 @@ struct nqe_cn {
#endif
#define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
+#define BNXT_MAX_RX_PAGE_SIZE BIT(15)
#define BNXT_MAX_MTU 9500
@@ -1105,6 +1106,7 @@ struct bnxt_rx_ring_info {
unsigned long *rx_agg_bmap;
u16 rx_agg_bmap_size;
+ u32 rx_page_size;
bool need_head_pool;
dma_addr_t rx_desc_mapping[MAX_RX_PAGES];
@@ -1124,8 +1126,11 @@ struct bnxt_rx_sw_stats {
u64 rx_l4_csum_errors;
u64 rx_resets;
u64 rx_buf_errors;
+ /* end of ethtool -S stats */
u64 rx_oom_discards;
u64 rx_netpoll_discards;
+ u64 rx_hw_gro_packets;
+ u64 rx_hw_gro_wire_packets;
};
struct bnxt_tx_sw_stats {
@@ -1152,6 +1157,9 @@ struct bnxt_total_ring_err_stats {
u64 tx_total_resets;
u64 tx_total_ring_discards;
u64 total_missed_irqs;
+ /* end of ethtool -S stats */
+ u64 rx_total_hw_gro_packets;
+ u64 rx_total_hw_gro_wire_packets;
};
struct bnxt_stats_mem {
@@ -1367,6 +1375,8 @@ struct bnxt_hw_resc {
u32 max_rx_wm_flows;
};
+#define BNXT_LARGE_RSS_TO_VNIC_RATIO 7
+
#if defined(CONFIG_BNXT_SRIOV)
struct bnxt_vf_info {
u16 fw_fid;
@@ -1551,6 +1561,7 @@ struct bnxt_link_info {
#define BNXT_LINK_STATE_DOWN 1
#define BNXT_LINK_STATE_UP 2
#define BNXT_LINK_IS_UP(bp) ((bp)->link_info.link_state == BNXT_LINK_STATE_UP)
+ u8 link_down_reason;
u8 active_lanes;
u8 duplex;
#define BNXT_LINK_DUPLEX_HALF PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF
@@ -2410,6 +2421,7 @@ struct bnxt {
#define BNXT_RSS_CAP_ESP_V6_RSS_CAP BIT(7)
#define BNXT_RSS_CAP_MULTI_RSS_CTX BIT(8)
#define BNXT_RSS_CAP_IPV6_FLOW_LABEL_RSS_CAP BIT(9)
+#define BNXT_RSS_CAP_LARGE_RSS_CTX BIT(10)
u8 rss_hash_key[HW_HASH_KEY_SIZE];
u8 rss_hash_key_valid:1;
@@ -2516,6 +2528,7 @@ struct bnxt {
#define BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS BIT_ULL(41)
#define BNXT_FW_CAP_NPAR_1_2 BIT_ULL(42)
#define BNXT_FW_CAP_MIRROR_ON_ROCE BIT_ULL(43)
+ #define BNXT_FW_CAP_PTP_PTM BIT_ULL(44)
u32 fw_dbg_cap;
@@ -2701,6 +2714,7 @@ struct bnxt {
#define BNXT_PHY_FL_NO_PFC (PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED << 8)
#define BNXT_PHY_FL_BANK_SEL (PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED << 8)
#define BNXT_PHY_FL_SPEEDS2 (PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED << 8)
+#define BNXT_PHY_FL_FDRSTATS (PORT_PHY_QCAPS_RESP_FLAGS2_FDRSTAT_CMD_SUPPORTED << 8)
/* copied from flags in hwrm_port_mac_qcaps_output */
u8 mac_flags;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 068e191ede19..53a83b6680c4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1346,16 +1346,17 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
struct bnxt_l2_filter *l2_fltr;
struct bnxt_flow_masks *fmasks;
struct flow_keys *fkeys;
- u32 idx, ring;
+ u32 idx;
int rc;
- u8 vf;
if (!bp->vnic_info)
return -EAGAIN;
- vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
- ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
- if ((fs->flow_type & (FLOW_MAC_EXT | FLOW_EXT)) || vf)
+ if (fs->flow_type & (FLOW_MAC_EXT | FLOW_EXT))
+ return -EOPNOTSUPP;
+
+ if (fs->ring_cookie != RX_CLS_FLOW_DISC &&
+ ethtool_get_flow_spec_ring_vf(fs->ring_cookie))
return -EOPNOTSUPP;
if (flow_type == IP_USER_FLOW) {
@@ -1481,7 +1482,7 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
if (fs->ring_cookie == RX_CLS_FLOW_DISC)
new_fltr->base.flags |= BNXT_ACT_DROP;
else
- new_fltr->base.rxq = ring;
+ new_fltr->base.rxq = ethtool_get_flow_spec_ring(fs->ring_cookie);
__set_bit(BNXT_FLTR_VALID, &new_fltr->base.state);
rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
if (!rc) {
@@ -3216,6 +3217,56 @@ static int bnxt_get_fecparam(struct net_device *dev,
return 0;
}
+static const struct ethtool_fec_hist_range bnxt_fec_ranges[] = {
+ { 0, 0},
+ { 1, 1},
+ { 2, 2},
+ { 3, 3},
+ { 4, 4},
+ { 5, 5},
+ { 6, 6},
+ { 7, 7},
+ { 8, 8},
+ { 9, 9},
+ { 10, 10},
+ { 11, 11},
+ { 12, 12},
+ { 13, 13},
+ { 14, 14},
+ { 15, 15},
+ { 0, 0},
+};
+
+static void bnxt_hwrm_port_phy_fdrstat(struct bnxt *bp,
+ struct ethtool_fec_hist *hist)
+{
+ struct ethtool_fec_hist_value *values = hist->values;
+ struct hwrm_port_phy_fdrstat_output *resp;
+ struct hwrm_port_phy_fdrstat_input *req;
+ int rc, i;
+
+ if (!(bp->phy_flags & BNXT_PHY_FL_FDRSTATS))
+ return;
+
+ rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_FDRSTAT);
+ if (rc)
+ return;
+
+ req->port_id = cpu_to_le16(bp->pf.port_id);
+ req->ops = cpu_to_le16(PORT_PHY_FDRSTAT_REQ_OPS_COUNTER);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (!rc) {
+ hist->ranges = bnxt_fec_ranges;
+ for (i = 0; i <= 15; i++) {
+ __le64 sum = resp->accumulated_codewords_err_s[i];
+
+ values[i].sum = le64_to_cpu(sum);
+ }
+ }
+ hwrm_req_drop(bp, req);
+}
+
static void bnxt_get_fec_stats(struct net_device *dev,
struct ethtool_fec_stats *fec_stats,
struct ethtool_fec_hist *hist)
@@ -3237,6 +3288,7 @@ static void bnxt_get_fec_stats(struct net_device *dev,
*(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_corrected_blocks));
fec_stats->uncorrectable_blocks.total =
*(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_uncorrectable_blocks));
+ bnxt_hwrm_port_phy_fdrstat(bp, hist);
}
static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info,
@@ -3381,6 +3433,40 @@ static u32 bnxt_get_link(struct net_device *dev)
return BNXT_LINK_IS_UP(bp);
}
+static int bnxt_get_link_ext_state(struct net_device *dev,
+ struct ethtool_link_ext_state_info *info)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u8 reason;
+
+ if (BNXT_LINK_IS_UP(bp))
+ return -ENODATA;
+
+ reason = bp->link_info.link_down_reason;
+ if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_RF) {
+ info->link_ext_state = ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE;
+ info->link_training = ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT;
+ return 0;
+ }
+ if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_CABLE_REMOVED) {
+ info->link_ext_state = ETHTOOL_LINK_EXT_STATE_NO_CABLE;
+ return 0;
+ }
+ if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_OTP_SPEED_VIOLATION) {
+ info->link_ext_state = ETHTOOL_LINK_EXT_STATE_OTP_SPEED_VIOLATION;
+ return 0;
+ }
+ if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_MODULE_FAULT) {
+ info->link_ext_state = ETHTOOL_LINK_EXT_STATE_MODULE;
+ return 0;
+ }
+ if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_BMC_REQUEST) {
+ info->link_ext_state = ETHTOOL_LINK_EXT_STATE_BMC_REQUEST_DOWN;
+ return 0;
+ }
+ return -ENODATA;
+}
+
int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
struct hwrm_nvm_get_dev_info_output *nvm_dev_info)
{
@@ -3797,9 +3883,25 @@ static int nvm_update_err_to_stderr(struct net_device *dev, u8 result,
#define BNXT_NVM_MORE_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_MODE))
#define BNXT_NVM_LAST_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_LAST))
+static int bnxt_hwrm_nvm_defrag(struct bnxt *bp)
+{
+ struct hwrm_nvm_defrag_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_NVM_DEFRAG);
+ if (rc)
+ return rc;
+ req->flags = cpu_to_le32(NVM_DEFRAG_REQ_FLAGS_DEFRAG);
+ hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout);
+
+ return hwrm_req_send(bp, req);
+}
+
static int bnxt_resize_update_entry(struct net_device *dev, size_t fw_size,
struct netlink_ext_ack *extack)
{
+ struct bnxt *bp = netdev_priv(dev);
+ bool retry = false;
u32 item_len;
int rc;
@@ -3812,9 +3914,19 @@ static int bnxt_resize_update_entry(struct net_device *dev, size_t fw_size,
}
if (fw_size > item_len) {
- rc = bnxt_flash_nvram(dev, BNX_DIR_TYPE_UPDATE,
- BNX_DIR_ORDINAL_FIRST, 0, 1,
- round_up(fw_size, 4096), NULL, 0);
+ do {
+ rc = bnxt_flash_nvram(dev, BNX_DIR_TYPE_UPDATE,
+ BNX_DIR_ORDINAL_FIRST, 0, 1,
+ round_up(fw_size, 4096), NULL,
+ 0);
+
+ if (rc == -ENOSPC) {
+ if (retry || bnxt_hwrm_nvm_defrag(bp))
+ break;
+ retry = true;
+ }
+ } while (rc == -ENOSPC);
+
if (rc) {
BNXT_NVM_ERR_MSG(dev, extack, MSG_RESIZE_UPDATE_ERR);
return rc;
@@ -5634,6 +5746,7 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_eeprom = bnxt_get_eeprom,
.set_eeprom = bnxt_set_eeprom,
.get_link = bnxt_get_link,
+ .get_link_ext_state = bnxt_get_link_ext_state,
.get_link_ext_stats = bnxt_get_link_ext_stats,
.get_eee = bnxt_get_eee,
.set_eee = bnxt_set_eee,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index a8a74f07bb54..ad89c5fa9b40 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -882,6 +882,51 @@ void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi,
}
}
+#ifdef CONFIG_X86
+static int bnxt_phc_get_syncdevicetime(ktime_t *device,
+ struct system_counterval_t *system,
+ void *ctx)
+{
+ struct bnxt_ptp_cfg *ptp = (struct bnxt_ptp_cfg *)ctx;
+ struct hwrm_func_ptp_ts_query_output *resp;
+ struct hwrm_func_ptp_ts_query_input *req;
+ struct bnxt *bp = ptp->bp;
+ u64 ptm_local_ts;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_PTP_TS_QUERY);
+ if (rc)
+ return rc;
+ req->flags = cpu_to_le32(FUNC_PTP_TS_QUERY_REQ_FLAGS_PTM_TIME);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (rc) {
+ hwrm_req_drop(bp, req);
+ return rc;
+ }
+ ptm_local_ts = le64_to_cpu(resp->ptm_local_ts);
+ *device = ns_to_ktime(bnxt_timecounter_cyc2time(ptp, ptm_local_ts));
+ /* ptm_system_ts is 64-bit */
+ system->cycles = le64_to_cpu(resp->ptm_system_ts);
+ system->cs_id = CSID_X86_ART;
+ system->use_nsecs = true;
+
+ hwrm_req_drop(bp, req);
+
+ return 0;
+}
+
+static int bnxt_ptp_getcrosststamp(struct ptp_clock_info *ptp_info,
+ struct system_device_crosststamp *xtstamp)
+{
+ struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
+ ptp_info);
+
+ return get_device_system_crosststamp(bnxt_phc_get_syncdevicetime,
+ ptp, NULL, xtstamp);
+}
+#endif /* CONFIG_X86 */
+
static const struct ptp_clock_info bnxt_ptp_caps = {
.owner = THIS_MODULE,
.name = "bnxt clock",
@@ -1094,6 +1139,12 @@ int bnxt_ptp_init(struct bnxt *bp)
if (bnxt_ptp_pps_init(bp))
netdev_err(bp->dev, "1pps not initialized, continuing without 1pps support\n");
}
+#ifdef CONFIG_X86
+ if ((bp->fw_cap & BNXT_FW_CAP_PTP_PTM) && pcie_ptm_enabled(bp->pdev) &&
+ boot_cpu_has(X86_FEATURE_ART))
+ ptp->ptp_info.getcrosststamp = bnxt_ptp_getcrosststamp;
+#endif /* CONFIG_X86 */
+
ptp->ptp_clock = ptp_clock_register(&ptp->ptp_info, &bp->pdev->dev);
if (IS_ERR(ptp->ptp_clock)) {
int err = PTR_ERR(ptp->ptp_clock);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index c94a391b1ba5..85cbeb35681c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -183,7 +183,7 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
u16 cons, u8 *data_ptr, unsigned int len,
struct xdp_buff *xdp)
{
- u32 buflen = BNXT_RX_PAGE_SIZE;
+ u32 buflen = rxr->rx_page_size;
struct bnxt_sw_rx_bd *rx_buf;
struct pci_dev *pdev;
dma_addr_t mapping;
@@ -460,7 +460,7 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
struct sk_buff *
bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
- struct page_pool *pool, struct xdp_buff *xdp)
+ struct bnxt_rx_ring_info *rxr, struct xdp_buff *xdp)
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
@@ -468,7 +468,7 @@ bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
return NULL;
xdp_update_skb_frags_info(skb, num_frags, sinfo->xdp_frags_size,
- BNXT_RX_PAGE_SIZE * num_frags,
+ rxr->rx_page_size * num_frags,
xdp_buff_get_skb_flags(xdp));
return skb;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
index 220285e190fc..8933a0dec09a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
@@ -32,6 +32,6 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
struct xdp_buff *xdp);
struct sk_buff *bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb,
- u8 num_frags, struct page_pool *pool,
+ u8 num_frags, struct bnxt_rx_ring_info *rxr,
struct xdp_buff *xdp);
#endif
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 6511ecd5856b..43cd013bb70e 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -705,14 +705,12 @@ static void macb_mac_link_up(struct phylink_config *config,
if (rx_pause)
ctrl |= MACB_BIT(PAE);
- /* Initialize rings & buffers as clearing MACB_BIT(TE) in link down
- * cleared the pipeline and control registers.
- */
- macb_init_buffers(bp);
-
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ queue->tx_head = 0;
+ queue->tx_tail = 0;
queue_writel(queue, IER,
bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
+ }
}
macb_or_gem_writel(bp, NCFGR, ctrl);
@@ -2954,6 +2952,7 @@ static int macb_open(struct net_device *dev)
}
bp->macbgem_ops.mog_init_rings(bp);
+ macb_init_buffers(bp);
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
napi_enable(&queue->napi_rx);
@@ -3850,6 +3849,13 @@ static int gem_get_all_flow_entries(struct net_device *netdev,
return 0;
}
+static u32 gem_get_rx_ring_count(struct net_device *netdev)
+{
+ struct macb *bp = netdev_priv(netdev);
+
+ return bp->num_queues;
+}
+
static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
@@ -3857,9 +3863,6 @@ static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
int ret = 0;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = bp->num_queues;
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = bp->rx_fs_list.count;
break;
@@ -3941,6 +3944,7 @@ static const struct ethtool_ops gem_ethtool_ops = {
.set_ringparam = macb_set_ringparam,
.get_rxnfc = gem_get_rxnfc,
.set_rxnfc = gem_set_rxnfc,
+ .get_rx_ring_count = gem_get_rx_ring_count,
};
static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -4811,7 +4815,9 @@ static int at91ether_close(struct net_device *dev)
at91ether_stop(lp);
- return pm_runtime_put(&lp->pdev->dev);
+ pm_runtime_put(&lp->pdev->dev);
+
+ return 0;
}
/* Transmit packet */
@@ -5431,9 +5437,9 @@ static const struct macb_config default_gem_config = {
static int macb_probe(struct platform_device *pdev)
{
- const struct macb_config *macb_config = &default_gem_config;
- struct device_node *np = pdev->dev.of_node;
struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
+ struct device_node *np = pdev->dev.of_node;
+ const struct macb_config *macb_config;
struct clk *tsu_clk = NULL;
phy_interface_t interface;
struct net_device *dev;
@@ -5449,13 +5455,9 @@ static int macb_probe(struct platform_device *pdev)
if (IS_ERR(mem))
return PTR_ERR(mem);
- if (np) {
- const struct of_device_id *match;
-
- match = of_match_node(macb_dt_ids, np);
- if (match && match->data)
- macb_config = match->data;
- }
+ macb_config = of_device_get_match_data(&pdev->dev);
+ if (!macb_config)
+ macb_config = &default_gem_config;
err = macb_config->clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk);
if (err)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 23326235d4ab..faf8f7e86520 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -1784,6 +1784,13 @@ static int cxgb4_get_rxfh_fields(struct net_device *dev,
return 0;
}
+static u32 get_rx_ring_count(struct net_device *dev)
+{
+ const struct port_info *pi = netdev_priv(dev);
+
+ return pi->nqsets;
+}
+
static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
u32 *rules)
{
@@ -1793,9 +1800,6 @@ static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
int ret = 0;
switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = pi->nqsets;
- return 0;
case ETHTOOL_GRXCLSRLCNT:
info->rule_cnt =
adap->ethtool_filters->port[pi->port_id].in_use;
@@ -2200,6 +2204,7 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
.get_regs = get_regs,
.get_rxnfc = get_rxnfc,
.set_rxnfc = set_rxnfc,
+ .get_rx_ring_count = get_rx_ring_count,
.get_rxfh_indir_size = get_rss_table_size,
.get_rxfh = get_rss_table,
.set_rxfh = set_rss_table,
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index a50f5dad34d5..471613899ec0 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -573,6 +573,13 @@ static int enic_get_rx_flow_hash(struct net_device *dev,
return 0;
}
+static u32 enic_get_rx_ring_count(struct net_device *dev)
+{
+ struct enic *enic = netdev_priv(dev);
+
+ return enic->rq_count;
+}
+
static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
@@ -580,9 +587,6 @@ static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
int ret = 0;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = enic->rq_count;
- break;
case ETHTOOL_GRXCLSRLCNT:
spin_lock_bh(&enic->rfs_h.lock);
cmd->rule_cnt = enic->rfs_h.max - enic->rfs_h.free;
@@ -689,6 +693,7 @@ static const struct ethtool_ops enic_ethtool_ops = {
.get_coalesce = enic_get_coalesce,
.set_coalesce = enic_set_coalesce,
.get_rxnfc = enic_get_rxnfc,
+ .get_rx_ring_count = enic_get_rx_ring_count,
.get_rxfh_key_size = enic_get_rxfh_key_size,
.get_rxfh = enic_get_rxfh,
.set_rxfh = enic_set_rxfh,
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 846d58c769ea..69bfb8265d57 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -279,18 +279,15 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
card_idx++;
- printk (KERN_INFO "%s: %s, %pM, IRQ %d\n",
- dev->name, np->name, dev->dev_addr, irq);
+ netdev_info(dev, "%s, %pM, IRQ %d", np->name, dev->dev_addr, irq);
if (tx_coalesce > 1)
- printk(KERN_INFO "tx_coalesce:\t%d packets\n",
- tx_coalesce);
- if (np->coalesce)
- printk(KERN_INFO
- "rx_coalesce:\t%d packets\n"
- "rx_timeout: \t%d ns\n",
- np->rx_coalesce, np->rx_timeout*640);
+ netdev_dbg(dev, "tx_coalesce:\t%d packets", tx_coalesce);
+ if (np->coalesce) {
+ netdev_dbg(dev, "rx_coalesce:\t%d packets", np->rx_coalesce);
+ netdev_dbg(dev, "rx_timeout: \t%d ns", np->rx_timeout * 640);
+ }
if (np->vlan)
- printk(KERN_INFO "vlan(id):\t%d\n", np->vlan);
+ netdev_dbg(dev, "vlan(id):\t%d", np->vlan);
return 0;
err_out_unmap_rx:
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
deleted file mode 100644
index 0de3cd660ec8..000000000000
--- a/drivers/net/ethernet/dnet.c
+++ /dev/null
@@ -1,877 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Dave DNET Ethernet Controller driver
- *
- * Copyright (C) 2008 Dave S.r.l. <www.dave.eu>
- * Copyright (C) 2009 Ilya Yanok, Emcraft Systems Ltd, <yanok@emcraft.com>
- */
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
-#include <linux/phy.h>
-
-#include "dnet.h"
-
-#undef DEBUG
-
-/* function for reading internal MAC register */
-static u16 dnet_readw_mac(struct dnet *bp, u16 reg)
-{
- u16 data_read;
-
- /* issue a read */
- dnet_writel(bp, reg, MACREG_ADDR);
-
- /* since a read/write op to the MAC is very slow,
- * we must wait before reading the data */
- ndelay(500);
-
- /* read data read from the MAC register */
- data_read = dnet_readl(bp, MACREG_DATA);
-
- /* all done */
- return data_read;
-}
-
-/* function for writing internal MAC register */
-static void dnet_writew_mac(struct dnet *bp, u16 reg, u16 val)
-{
- /* load data to write */
- dnet_writel(bp, val, MACREG_DATA);
-
- /* issue a write */
- dnet_writel(bp, reg | DNET_INTERNAL_WRITE, MACREG_ADDR);
-
- /* since a read/write op to the MAC is very slow,
- * we must wait before exiting */
- ndelay(500);
-}
-
-static void __dnet_set_hwaddr(struct dnet *bp)
-{
- u16 tmp;
-
- tmp = be16_to_cpup((const __be16 *)bp->dev->dev_addr);
- dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG, tmp);
- tmp = be16_to_cpup((const __be16 *)(bp->dev->dev_addr + 2));
- dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG, tmp);
- tmp = be16_to_cpup((const __be16 *)(bp->dev->dev_addr + 4));
- dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp);
-}
-
-static void dnet_get_hwaddr(struct dnet *bp)
-{
- u16 tmp;
- u8 addr[6];
-
- /*
- * from MAC docs:
- * "Note that the MAC address is stored in the registers in Hexadecimal
- * form. For example, to set the MAC Address to: AC-DE-48-00-00-80
- * would require writing 0xAC (octet 0) to address 0x0B (high byte of
- * Mac_addr[15:0]), 0xDE (octet 1) to address 0x0A (Low byte of
- * Mac_addr[15:0]), 0x48 (octet 2) to address 0x0D (high byte of
- * Mac_addr[15:0]), 0x00 (octet 3) to address 0x0C (Low byte of
- * Mac_addr[15:0]), 0x00 (octet 4) to address 0x0F (high byte of
- * Mac_addr[15:0]), and 0x80 (octet 5) to address * 0x0E (Low byte of
- * Mac_addr[15:0]).
- */
- tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG);
- *((__be16 *)addr) = cpu_to_be16(tmp);
- tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG);
- *((__be16 *)(addr + 2)) = cpu_to_be16(tmp);
- tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG);
- *((__be16 *)(addr + 4)) = cpu_to_be16(tmp);
-
- if (is_valid_ether_addr(addr))
- eth_hw_addr_set(bp->dev, addr);
-}
-
-static int dnet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
-{
- struct dnet *bp = bus->priv;
- u16 value;
-
- while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
- & DNET_INTERNAL_GMII_MNG_CMD_FIN))
- cpu_relax();
-
- /* only 5 bits allowed for phy-addr and reg_offset */
- mii_id &= 0x1f;
- regnum &= 0x1f;
-
- /* prepare reg_value for a read */
- value = (mii_id << 8);
- value |= regnum;
-
- /* write control word */
- dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, value);
-
- /* wait for end of transfer */
- while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
- & DNET_INTERNAL_GMII_MNG_CMD_FIN))
- cpu_relax();
-
- value = dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG);
-
- pr_debug("mdio_read %02x:%02x <- %04x\n", mii_id, regnum, value);
-
- return value;
-}
-
-static int dnet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
- u16 value)
-{
- struct dnet *bp = bus->priv;
- u16 tmp;
-
- pr_debug("mdio_write %02x:%02x <- %04x\n", mii_id, regnum, value);
-
- while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
- & DNET_INTERNAL_GMII_MNG_CMD_FIN))
- cpu_relax();
-
- /* prepare for a write operation */
- tmp = (1 << 13);
-
- /* only 5 bits allowed for phy-addr and reg_offset */
- mii_id &= 0x1f;
- regnum &= 0x1f;
-
- /* only 16 bits on data */
- value &= 0xffff;
-
- /* prepare reg_value for a write */
- tmp |= (mii_id << 8);
- tmp |= regnum;
-
- /* write data to write first */
- dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG, value);
-
- /* write control word */
- dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, tmp);
-
- while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
- & DNET_INTERNAL_GMII_MNG_CMD_FIN))
- cpu_relax();
-
- return 0;
-}
-
-static void dnet_handle_link_change(struct net_device *dev)
-{
- struct dnet *bp = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
- unsigned long flags;
- u32 mode_reg, ctl_reg;
-
- int status_change = 0;
-
- spin_lock_irqsave(&bp->lock, flags);
-
- mode_reg = dnet_readw_mac(bp, DNET_INTERNAL_MODE_REG);
- ctl_reg = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG);
-
- if (phydev->link) {
- if (bp->duplex != phydev->duplex) {
- if (phydev->duplex)
- ctl_reg &=
- ~(DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP);
- else
- ctl_reg |=
- DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP;
-
- bp->duplex = phydev->duplex;
- status_change = 1;
- }
-
- if (bp->speed != phydev->speed) {
- status_change = 1;
- switch (phydev->speed) {
- case 1000:
- mode_reg |= DNET_INTERNAL_MODE_GBITEN;
- break;
- case 100:
- case 10:
- mode_reg &= ~DNET_INTERNAL_MODE_GBITEN;
- break;
- default:
- printk(KERN_WARNING
- "%s: Ack! Speed (%d) is not "
- "10/100/1000!\n", dev->name,
- phydev->speed);
- break;
- }
- bp->speed = phydev->speed;
- }
- }
-
- if (phydev->link != bp->link) {
- if (phydev->link) {
- mode_reg |=
- (DNET_INTERNAL_MODE_RXEN | DNET_INTERNAL_MODE_TXEN);
- } else {
- mode_reg &=
- ~(DNET_INTERNAL_MODE_RXEN |
- DNET_INTERNAL_MODE_TXEN);
- bp->speed = 0;
- bp->duplex = -1;
- }
- bp->link = phydev->link;
-
- status_change = 1;
- }
-
- if (status_change) {
- dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, ctl_reg);
- dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, mode_reg);
- }
-
- spin_unlock_irqrestore(&bp->lock, flags);
-
- if (status_change) {
- if (phydev->link)
- printk(KERN_INFO "%s: link up (%d/%s)\n",
- dev->name, phydev->speed,
- DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
- else
- printk(KERN_INFO "%s: link down\n", dev->name);
- }
-}
-
-static int dnet_mii_probe(struct net_device *dev)
-{
- struct dnet *bp = netdev_priv(dev);
- struct phy_device *phydev = NULL;
-
- /* find the first phy */
- phydev = phy_find_first(bp->mii_bus);
-
- if (!phydev) {
- printk(KERN_ERR "%s: no PHY found\n", dev->name);
- return -ENODEV;
- }
-
- /* TODO : add pin_irq */
-
- /* attach the mac to the phy */
- if (bp->capabilities & DNET_HAS_RMII) {
- phydev = phy_connect(dev, phydev_name(phydev),
- &dnet_handle_link_change,
- PHY_INTERFACE_MODE_RMII);
- } else {
- phydev = phy_connect(dev, phydev_name(phydev),
- &dnet_handle_link_change,
- PHY_INTERFACE_MODE_MII);
- }
-
- if (IS_ERR(phydev)) {
- printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
- return PTR_ERR(phydev);
- }
-
- /* mask with MAC supported features */
- if (bp->capabilities & DNET_HAS_GIGABIT)
- phy_set_max_speed(phydev, SPEED_1000);
- else
- phy_set_max_speed(phydev, SPEED_100);
-
- phy_support_asym_pause(phydev);
-
- bp->link = 0;
- bp->speed = 0;
- bp->duplex = -1;
-
- return 0;
-}
-
-static int dnet_mii_init(struct dnet *bp)
-{
- int err;
-
- bp->mii_bus = mdiobus_alloc();
- if (bp->mii_bus == NULL)
- return -ENOMEM;
-
- bp->mii_bus->name = "dnet_mii_bus";
- bp->mii_bus->read = &dnet_mdio_read;
- bp->mii_bus->write = &dnet_mdio_write;
-
- snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
- bp->pdev->name, bp->pdev->id);
-
- bp->mii_bus->priv = bp;
-
- if (mdiobus_register(bp->mii_bus)) {
- err = -ENXIO;
- goto err_out;
- }
-
- if (dnet_mii_probe(bp->dev) != 0) {
- err = -ENXIO;
- goto err_out_unregister_bus;
- }
-
- return 0;
-
-err_out_unregister_bus:
- mdiobus_unregister(bp->mii_bus);
-err_out:
- mdiobus_free(bp->mii_bus);
- return err;
-}
-
-/* For Neptune board: LINK1000 as Link LED and TX as activity LED */
-static int dnet_phy_marvell_fixup(struct phy_device *phydev)
-{
- return phy_write(phydev, 0x18, 0x4148);
-}
-
-static void dnet_update_stats(struct dnet *bp)
-{
- u32 __iomem *reg = bp->regs + DNET_RX_PKT_IGNR_CNT;
- u32 *p = &bp->hw_stats.rx_pkt_ignr;
- u32 *end = &bp->hw_stats.rx_byte + 1;
-
- WARN_ON((unsigned long)(end - p - 1) !=
- (DNET_RX_BYTE_CNT - DNET_RX_PKT_IGNR_CNT) / 4);
-
- for (; p < end; p++, reg++)
- *p += readl(reg);
-
- reg = bp->regs + DNET_TX_UNICAST_CNT;
- p = &bp->hw_stats.tx_unicast;
- end = &bp->hw_stats.tx_byte + 1;
-
- WARN_ON((unsigned long)(end - p - 1) !=
- (DNET_TX_BYTE_CNT - DNET_TX_UNICAST_CNT) / 4);
-
- for (; p < end; p++, reg++)
- *p += readl(reg);
-}
-
-static int dnet_poll(struct napi_struct *napi, int budget)
-{
- struct dnet *bp = container_of(napi, struct dnet, napi);
- struct net_device *dev = bp->dev;
- int npackets = 0;
- unsigned int pkt_len;
- struct sk_buff *skb;
- unsigned int *data_ptr;
- u32 int_enable;
- u32 cmd_word;
- int i;
-
- while (npackets < budget) {
- /*
- * break out of while loop if there are no more
- * packets waiting
- */
- if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16))
- break;
-
- cmd_word = dnet_readl(bp, RX_LEN_FIFO);
- pkt_len = cmd_word & 0xFFFF;
-
- if (cmd_word & 0xDF180000)
- printk(KERN_ERR "%s packet receive error %x\n",
- __func__, cmd_word);
-
- skb = netdev_alloc_skb(dev, pkt_len + 5);
- if (skb != NULL) {
- /* Align IP on 16 byte boundaries */
- skb_reserve(skb, 2);
- /*
- * 'skb_put()' points to the start of sk_buff
- * data area.
- */
- data_ptr = skb_put(skb, pkt_len);
- for (i = 0; i < (pkt_len + 3) >> 2; i++)
- *data_ptr++ = dnet_readl(bp, RX_DATA_FIFO);
- skb->protocol = eth_type_trans(skb, dev);
- netif_receive_skb(skb);
- npackets++;
- } else
- printk(KERN_NOTICE
- "%s: No memory to allocate a sk_buff of "
- "size %u.\n", dev->name, pkt_len);
- }
-
- if (npackets < budget) {
- /* We processed all packets available. Tell NAPI it can
- * stop polling then re-enable rx interrupts.
- */
- napi_complete_done(napi, npackets);
- int_enable = dnet_readl(bp, INTR_ENB);
- int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
- dnet_writel(bp, int_enable, INTR_ENB);
- }
-
- return npackets;
-}
-
-static irqreturn_t dnet_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct dnet *bp = netdev_priv(dev);
- u32 int_src, int_enable, int_current;
- unsigned long flags;
- unsigned int handled = 0;
-
- spin_lock_irqsave(&bp->lock, flags);
-
- /* read and clear the DNET irq (clear on read) */
- int_src = dnet_readl(bp, INTR_SRC);
- int_enable = dnet_readl(bp, INTR_ENB);
- int_current = int_src & int_enable;
-
- /* restart the queue if we had stopped it for TX fifo almost full */
- if (int_current & DNET_INTR_SRC_TX_FIFOAE) {
- int_enable = dnet_readl(bp, INTR_ENB);
- int_enable &= ~DNET_INTR_ENB_TX_FIFOAE;
- dnet_writel(bp, int_enable, INTR_ENB);
- netif_wake_queue(dev);
- handled = 1;
- }
-
- /* RX FIFO error checking */
- if (int_current &
- (DNET_INTR_SRC_RX_CMDFIFOFF | DNET_INTR_SRC_RX_DATAFIFOFF)) {
- printk(KERN_ERR "%s: RX fifo error %x, irq %x\n", __func__,
- dnet_readl(bp, RX_STATUS), int_current);
- /* we can only flush the RX FIFOs */
- dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH, SYS_CTL);
- ndelay(500);
- dnet_writel(bp, 0, SYS_CTL);
- handled = 1;
- }
-
- /* TX FIFO error checking */
- if (int_current &
- (DNET_INTR_SRC_TX_FIFOFULL | DNET_INTR_SRC_TX_DISCFRM)) {
- printk(KERN_ERR "%s: TX fifo error %x, irq %x\n", __func__,
- dnet_readl(bp, TX_STATUS), int_current);
- /* we can only flush the TX FIFOs */
- dnet_writel(bp, DNET_SYS_CTL_TXFIFOFLUSH, SYS_CTL);
- ndelay(500);
- dnet_writel(bp, 0, SYS_CTL);
- handled = 1;
- }
-
- if (int_current & DNET_INTR_SRC_RX_CMDFIFOAF) {
- if (napi_schedule_prep(&bp->napi)) {
- /*
- * There's no point taking any more interrupts
- * until we have processed the buffers
- */
- /* Disable Rx interrupts and schedule NAPI poll */
- int_enable = dnet_readl(bp, INTR_ENB);
- int_enable &= ~DNET_INTR_SRC_RX_CMDFIFOAF;
- dnet_writel(bp, int_enable, INTR_ENB);
- __napi_schedule(&bp->napi);
- }
- handled = 1;
- }
-
- if (!handled)
- pr_debug("%s: irq %x remains\n", __func__, int_current);
-
- spin_unlock_irqrestore(&bp->lock, flags);
-
- return IRQ_RETVAL(handled);
-}
-
-#ifdef DEBUG
-static inline void dnet_print_skb(struct sk_buff *skb)
-{
- int k;
- printk(KERN_DEBUG PFX "data:");
- for (k = 0; k < skb->len; k++)
- printk(" %02x", (unsigned int)skb->data[k]);
- printk("\n");
-}
-#else
-#define dnet_print_skb(skb) do {} while (0)
-#endif
-
-static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
-
- struct dnet *bp = netdev_priv(dev);
- unsigned int i, tx_cmd, wrsz;
- unsigned long flags;
- unsigned int *bufp;
- u32 irq_enable;
-
- dnet_readl(bp, TX_STATUS);
-
- pr_debug("start_xmit: len %u head %p data %p\n",
- skb->len, skb->head, skb->data);
- dnet_print_skb(skb);
-
- spin_lock_irqsave(&bp->lock, flags);
-
- dnet_readl(bp, TX_STATUS);
-
- bufp = (unsigned int *)(((unsigned long) skb->data) & ~0x3UL);
- wrsz = (u32) skb->len + 3;
- wrsz += ((unsigned long) skb->data) & 0x3;
- wrsz >>= 2;
- tx_cmd = ((((unsigned long)(skb->data)) & 0x03) << 16) | (u32) skb->len;
-
- /* check if there is enough room for the current frame */
- if (wrsz < (DNET_FIFO_SIZE - dnet_readl(bp, TX_FIFO_WCNT))) {
- for (i = 0; i < wrsz; i++)
- dnet_writel(bp, *bufp++, TX_DATA_FIFO);
-
- /*
- * inform MAC that a packet's written and ready to be
- * shipped out
- */
- dnet_writel(bp, tx_cmd, TX_LEN_FIFO);
- }
-
- if (dnet_readl(bp, TX_FIFO_WCNT) > DNET_FIFO_TX_DATA_AF_TH) {
- netif_stop_queue(dev);
- dnet_readl(bp, INTR_SRC);
- irq_enable = dnet_readl(bp, INTR_ENB);
- irq_enable |= DNET_INTR_ENB_TX_FIFOAE;
- dnet_writel(bp, irq_enable, INTR_ENB);
- }
-
- skb_tx_timestamp(skb);
-
- spin_unlock_irqrestore(&bp->lock, flags);
-
- /* free the buffer */
- dev_kfree_skb(skb);
-
- return NETDEV_TX_OK;
-}
-
-static void dnet_reset_hw(struct dnet *bp)
-{
- /* put ts_mac in IDLE state i.e. disable rx/tx */
- dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, DNET_INTERNAL_MODE_FCEN);
-
- /*
- * RX FIFO almost full threshold: only cmd FIFO almost full is
- * implemented for RX side
- */
- dnet_writel(bp, DNET_FIFO_RX_CMD_AF_TH, RX_FIFO_TH);
- /*
- * TX FIFO almost empty threshold: only data FIFO almost empty
- * is implemented for TX side
- */
- dnet_writel(bp, DNET_FIFO_TX_DATA_AE_TH, TX_FIFO_TH);
-
- /* flush rx/tx fifos */
- dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH | DNET_SYS_CTL_TXFIFOFLUSH,
- SYS_CTL);
- msleep(1);
- dnet_writel(bp, 0, SYS_CTL);
-}
-
-static void dnet_init_hw(struct dnet *bp)
-{
- u32 config;
-
- dnet_reset_hw(bp);
- __dnet_set_hwaddr(bp);
-
- config = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG);
-
- if (bp->dev->flags & IFF_PROMISC)
- /* Copy All Frames */
- config |= DNET_INTERNAL_RXTX_CONTROL_ENPROMISC;
- if (!(bp->dev->flags & IFF_BROADCAST))
- /* No BroadCast */
- config |= DNET_INTERNAL_RXTX_CONTROL_RXMULTICAST;
-
- config |= DNET_INTERNAL_RXTX_CONTROL_RXPAUSE |
- DNET_INTERNAL_RXTX_CONTROL_RXBROADCAST |
- DNET_INTERNAL_RXTX_CONTROL_DROPCONTROL |
- DNET_INTERNAL_RXTX_CONTROL_DISCFXFCS;
-
- dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, config);
-
- /* clear irq before enabling them */
- config = dnet_readl(bp, INTR_SRC);
-
- /* enable RX/TX interrupt, recv packet ready interrupt */
- dnet_writel(bp, DNET_INTR_ENB_GLOBAL_ENABLE | DNET_INTR_ENB_RX_SUMMARY |
- DNET_INTR_ENB_TX_SUMMARY | DNET_INTR_ENB_RX_FIFOERR |
- DNET_INTR_ENB_RX_ERROR | DNET_INTR_ENB_RX_FIFOFULL |
- DNET_INTR_ENB_TX_FIFOFULL | DNET_INTR_ENB_TX_DISCFRM |
- DNET_INTR_ENB_RX_PKTRDY, INTR_ENB);
-}
-
-static int dnet_open(struct net_device *dev)
-{
- struct dnet *bp = netdev_priv(dev);
-
- /* if the phy is not yet register, retry later */
- if (!dev->phydev)
- return -EAGAIN;
-
- napi_enable(&bp->napi);
- dnet_init_hw(bp);
-
- phy_start_aneg(dev->phydev);
-
- /* schedule a link state check */
- phy_start(dev->phydev);
-
- netif_start_queue(dev);
-
- return 0;
-}
-
-static int dnet_close(struct net_device *dev)
-{
- struct dnet *bp = netdev_priv(dev);
-
- netif_stop_queue(dev);
- napi_disable(&bp->napi);
-
- if (dev->phydev)
- phy_stop(dev->phydev);
-
- dnet_reset_hw(bp);
- netif_carrier_off(dev);
-
- return 0;
-}
-
-static inline void dnet_print_pretty_hwstats(struct dnet_stats *hwstat)
-{
- pr_debug("%s\n", __func__);
- pr_debug("----------------------------- RX statistics "
- "-------------------------------\n");
- pr_debug("RX_PKT_IGNR_CNT %-8x\n", hwstat->rx_pkt_ignr);
- pr_debug("RX_LEN_CHK_ERR_CNT %-8x\n", hwstat->rx_len_chk_err);
- pr_debug("RX_LNG_FRM_CNT %-8x\n", hwstat->rx_lng_frm);
- pr_debug("RX_SHRT_FRM_CNT %-8x\n", hwstat->rx_shrt_frm);
- pr_debug("RX_IPG_VIOL_CNT %-8x\n", hwstat->rx_ipg_viol);
- pr_debug("RX_CRC_ERR_CNT %-8x\n", hwstat->rx_crc_err);
- pr_debug("RX_OK_PKT_CNT %-8x\n", hwstat->rx_ok_pkt);
- pr_debug("RX_CTL_FRM_CNT %-8x\n", hwstat->rx_ctl_frm);
- pr_debug("RX_PAUSE_FRM_CNT %-8x\n", hwstat->rx_pause_frm);
- pr_debug("RX_MULTICAST_CNT %-8x\n", hwstat->rx_multicast);
- pr_debug("RX_BROADCAST_CNT %-8x\n", hwstat->rx_broadcast);
- pr_debug("RX_VLAN_TAG_CNT %-8x\n", hwstat->rx_vlan_tag);
- pr_debug("RX_PRE_SHRINK_CNT %-8x\n", hwstat->rx_pre_shrink);
- pr_debug("RX_DRIB_NIB_CNT %-8x\n", hwstat->rx_drib_nib);
- pr_debug("RX_UNSUP_OPCD_CNT %-8x\n", hwstat->rx_unsup_opcd);
- pr_debug("RX_BYTE_CNT %-8x\n", hwstat->rx_byte);
- pr_debug("----------------------------- TX statistics "
- "-------------------------------\n");
- pr_debug("TX_UNICAST_CNT %-8x\n", hwstat->tx_unicast);
- pr_debug("TX_PAUSE_FRM_CNT %-8x\n", hwstat->tx_pause_frm);
- pr_debug("TX_MULTICAST_CNT %-8x\n", hwstat->tx_multicast);
- pr_debug("TX_BRDCAST_CNT %-8x\n", hwstat->tx_brdcast);
- pr_debug("TX_VLAN_TAG_CNT %-8x\n", hwstat->tx_vlan_tag);
- pr_debug("TX_BAD_FCS_CNT %-8x\n", hwstat->tx_bad_fcs);
- pr_debug("TX_JUMBO_CNT %-8x\n", hwstat->tx_jumbo);
- pr_debug("TX_BYTE_CNT %-8x\n", hwstat->tx_byte);
-}
-
-static struct net_device_stats *dnet_get_stats(struct net_device *dev)
-{
-
- struct dnet *bp = netdev_priv(dev);
- struct net_device_stats *nstat = &dev->stats;
- struct dnet_stats *hwstat = &bp->hw_stats;
-
- /* read stats from hardware */
- dnet_update_stats(bp);
-
- /* Convert HW stats into netdevice stats */
- nstat->rx_errors = (hwstat->rx_len_chk_err +
- hwstat->rx_lng_frm + hwstat->rx_shrt_frm +
- /* ignore IGP violation error
- hwstat->rx_ipg_viol + */
- hwstat->rx_crc_err +
- hwstat->rx_pre_shrink +
- hwstat->rx_drib_nib + hwstat->rx_unsup_opcd);
- nstat->tx_errors = hwstat->tx_bad_fcs;
- nstat->rx_length_errors = (hwstat->rx_len_chk_err +
- hwstat->rx_lng_frm +
- hwstat->rx_shrt_frm + hwstat->rx_pre_shrink);
- nstat->rx_crc_errors = hwstat->rx_crc_err;
- nstat->rx_frame_errors = hwstat->rx_pre_shrink + hwstat->rx_drib_nib;
- nstat->rx_packets = hwstat->rx_ok_pkt;
- nstat->tx_packets = (hwstat->tx_unicast +
- hwstat->tx_multicast + hwstat->tx_brdcast);
- nstat->rx_bytes = hwstat->rx_byte;
- nstat->tx_bytes = hwstat->tx_byte;
- nstat->multicast = hwstat->rx_multicast;
- nstat->rx_missed_errors = hwstat->rx_pkt_ignr;
-
- dnet_print_pretty_hwstats(hwstat);
-
- return nstat;
-}
-
-static void dnet_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strscpy(info->driver, DRV_NAME, sizeof(info->driver));
- strscpy(info->bus_info, "0", sizeof(info->bus_info));
-}
-
-static const struct ethtool_ops dnet_ethtool_ops = {
- .get_drvinfo = dnet_get_drvinfo,
- .get_link = ethtool_op_get_link,
- .get_ts_info = ethtool_op_get_ts_info,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
-};
-
-static const struct net_device_ops dnet_netdev_ops = {
- .ndo_open = dnet_open,
- .ndo_stop = dnet_close,
- .ndo_get_stats = dnet_get_stats,
- .ndo_start_xmit = dnet_start_xmit,
- .ndo_eth_ioctl = phy_do_ioctl_running,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int dnet_probe(struct platform_device *pdev)
-{
- struct resource *res;
- struct net_device *dev;
- struct dnet *bp;
- struct phy_device *phydev;
- int err;
- unsigned int irq;
-
- irq = platform_get_irq(pdev, 0);
-
- dev = alloc_etherdev(sizeof(*bp));
- if (!dev)
- return -ENOMEM;
-
- /* TODO: Actually, we have some interesting features... */
- dev->features |= 0;
-
- bp = netdev_priv(dev);
- bp->dev = dev;
-
- platform_set_drvdata(pdev, dev);
- SET_NETDEV_DEV(dev, &pdev->dev);
-
- spin_lock_init(&bp->lock);
-
- bp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
- if (IS_ERR(bp->regs)) {
- err = PTR_ERR(bp->regs);
- goto err_out_free_dev;
- }
-
- dev->irq = irq;
- err = request_irq(dev->irq, dnet_interrupt, 0, DRV_NAME, dev);
- if (err) {
- dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
- irq, err);
- goto err_out_free_dev;
- }
-
- dev->netdev_ops = &dnet_netdev_ops;
- netif_napi_add(dev, &bp->napi, dnet_poll);
- dev->ethtool_ops = &dnet_ethtool_ops;
-
- dev->base_addr = (unsigned long)bp->regs;
-
- bp->capabilities = dnet_readl(bp, VERCAPS) & DNET_CAPS_MASK;
-
- dnet_get_hwaddr(bp);
-
- if (!is_valid_ether_addr(dev->dev_addr)) {
- /* choose a random ethernet address */
- eth_hw_addr_random(dev);
- __dnet_set_hwaddr(bp);
- }
-
- err = register_netdev(dev);
- if (err) {
- dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
- goto err_out_free_irq;
- }
-
- /* register the PHY board fixup (for Marvell 88E1111) */
- err = phy_register_fixup_for_uid(0x01410cc0, 0xfffffff0,
- dnet_phy_marvell_fixup);
- /* we can live without it, so just issue a warning */
- if (err)
- dev_warn(&pdev->dev, "Cannot register PHY board fixup.\n");
-
- err = dnet_mii_init(bp);
- if (err)
- goto err_out_unregister_netdev;
-
- dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n",
- bp->regs, (unsigned int)res->start, dev->irq, dev->dev_addr);
- dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma\n",
- (bp->capabilities & DNET_HAS_MDIO) ? "" : "no ",
- (bp->capabilities & DNET_HAS_IRQ) ? "" : "no ",
- (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ",
- (bp->capabilities & DNET_HAS_DMA) ? "" : "no ");
- phydev = dev->phydev;
- phy_attached_info(phydev);
-
- return 0;
-
-err_out_unregister_netdev:
- unregister_netdev(dev);
-err_out_free_irq:
- free_irq(dev->irq, dev);
-err_out_free_dev:
- free_netdev(dev);
- return err;
-}
-
-static void dnet_remove(struct platform_device *pdev)
-{
-
- struct net_device *dev;
- struct dnet *bp;
-
- dev = platform_get_drvdata(pdev);
-
- if (dev) {
- bp = netdev_priv(dev);
- if (dev->phydev)
- phy_disconnect(dev->phydev);
- mdiobus_unregister(bp->mii_bus);
- mdiobus_free(bp->mii_bus);
- unregister_netdev(dev);
- free_irq(dev->irq, dev);
- free_netdev(dev);
- }
-}
-
-static struct platform_driver dnet_driver = {
- .probe = dnet_probe,
- .remove = dnet_remove,
- .driver = {
- .name = "dnet",
- },
-};
-
-module_platform_driver(dnet_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Dave DNET Ethernet driver");
-MODULE_AUTHOR("Ilya Yanok <yanok@emcraft.com>, "
- "Matteo Vit <matteo.vit@dave.eu>");
diff --git a/drivers/net/ethernet/dnet.h b/drivers/net/ethernet/dnet.h
deleted file mode 100644
index 030724484b49..000000000000
--- a/drivers/net/ethernet/dnet.h
+++ /dev/null
@@ -1,220 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Dave DNET Ethernet Controller driver
- *
- * Copyright (C) 2008 Dave S.r.l. <www.dave.eu>
- */
-#ifndef _DNET_H
-#define _DNET_H
-
-#define DRV_NAME "dnet"
-#define PFX DRV_NAME ": "
-
-/* Register access macros */
-#define dnet_writel(port, value, reg) \
- writel((value), (port)->regs + DNET_##reg)
-#define dnet_readl(port, reg) readl((port)->regs + DNET_##reg)
-
-/* ALL DNET FIFO REGISTERS */
-#define DNET_RX_LEN_FIFO 0x000 /* RX_LEN_FIFO */
-#define DNET_RX_DATA_FIFO 0x004 /* RX_DATA_FIFO */
-#define DNET_TX_LEN_FIFO 0x008 /* TX_LEN_FIFO */
-#define DNET_TX_DATA_FIFO 0x00C /* TX_DATA_FIFO */
-
-/* ALL DNET CONTROL/STATUS REGISTERS OFFSETS */
-#define DNET_VERCAPS 0x100 /* VERCAPS */
-#define DNET_INTR_SRC 0x104 /* INTR_SRC */
-#define DNET_INTR_ENB 0x108 /* INTR_ENB */
-#define DNET_RX_STATUS 0x10C /* RX_STATUS */
-#define DNET_TX_STATUS 0x110 /* TX_STATUS */
-#define DNET_RX_FRAMES_CNT 0x114 /* RX_FRAMES_CNT */
-#define DNET_TX_FRAMES_CNT 0x118 /* TX_FRAMES_CNT */
-#define DNET_RX_FIFO_TH 0x11C /* RX_FIFO_TH */
-#define DNET_TX_FIFO_TH 0x120 /* TX_FIFO_TH */
-#define DNET_SYS_CTL 0x124 /* SYS_CTL */
-#define DNET_PAUSE_TMR 0x128 /* PAUSE_TMR */
-#define DNET_RX_FIFO_WCNT 0x12C /* RX_FIFO_WCNT */
-#define DNET_TX_FIFO_WCNT 0x130 /* TX_FIFO_WCNT */
-
-/* ALL DNET MAC REGISTERS */
-#define DNET_MACREG_DATA 0x200 /* Mac-Reg Data */
-#define DNET_MACREG_ADDR 0x204 /* Mac-Reg Addr */
-
-/* ALL DNET RX STATISTICS COUNTERS */
-#define DNET_RX_PKT_IGNR_CNT 0x300
-#define DNET_RX_LEN_CHK_ERR_CNT 0x304
-#define DNET_RX_LNG_FRM_CNT 0x308
-#define DNET_RX_SHRT_FRM_CNT 0x30C
-#define DNET_RX_IPG_VIOL_CNT 0x310
-#define DNET_RX_CRC_ERR_CNT 0x314
-#define DNET_RX_OK_PKT_CNT 0x318
-#define DNET_RX_CTL_FRM_CNT 0x31C
-#define DNET_RX_PAUSE_FRM_CNT 0x320
-#define DNET_RX_MULTICAST_CNT 0x324
-#define DNET_RX_BROADCAST_CNT 0x328
-#define DNET_RX_VLAN_TAG_CNT 0x32C
-#define DNET_RX_PRE_SHRINK_CNT 0x330
-#define DNET_RX_DRIB_NIB_CNT 0x334
-#define DNET_RX_UNSUP_OPCD_CNT 0x338
-#define DNET_RX_BYTE_CNT 0x33C
-
-/* DNET TX STATISTICS COUNTERS */
-#define DNET_TX_UNICAST_CNT 0x400
-#define DNET_TX_PAUSE_FRM_CNT 0x404
-#define DNET_TX_MULTICAST_CNT 0x408
-#define DNET_TX_BRDCAST_CNT 0x40C
-#define DNET_TX_VLAN_TAG_CNT 0x410
-#define DNET_TX_BAD_FCS_CNT 0x414
-#define DNET_TX_JUMBO_CNT 0x418
-#define DNET_TX_BYTE_CNT 0x41C
-
-/* SOME INTERNAL MAC-CORE REGISTER */
-#define DNET_INTERNAL_MODE_REG 0x0
-#define DNET_INTERNAL_RXTX_CONTROL_REG 0x2
-#define DNET_INTERNAL_MAX_PKT_SIZE_REG 0x4
-#define DNET_INTERNAL_IGP_REG 0x8
-#define DNET_INTERNAL_MAC_ADDR_0_REG 0xa
-#define DNET_INTERNAL_MAC_ADDR_1_REG 0xc
-#define DNET_INTERNAL_MAC_ADDR_2_REG 0xe
-#define DNET_INTERNAL_TX_RX_STS_REG 0x12
-#define DNET_INTERNAL_GMII_MNG_CTL_REG 0x14
-#define DNET_INTERNAL_GMII_MNG_DAT_REG 0x16
-
-#define DNET_INTERNAL_GMII_MNG_CMD_FIN (1 << 14)
-
-#define DNET_INTERNAL_WRITE (1 << 31)
-
-/* MAC-CORE REGISTER FIELDS */
-
-/* MAC-CORE MODE REGISTER FIELDS */
-#define DNET_INTERNAL_MODE_GBITEN (1 << 0)
-#define DNET_INTERNAL_MODE_FCEN (1 << 1)
-#define DNET_INTERNAL_MODE_RXEN (1 << 2)
-#define DNET_INTERNAL_MODE_TXEN (1 << 3)
-
-/* MAC-CORE RXTX CONTROL REGISTER FIELDS */
-#define DNET_INTERNAL_RXTX_CONTROL_RXSHORTFRAME (1 << 8)
-#define DNET_INTERNAL_RXTX_CONTROL_RXBROADCAST (1 << 7)
-#define DNET_INTERNAL_RXTX_CONTROL_RXMULTICAST (1 << 4)
-#define DNET_INTERNAL_RXTX_CONTROL_RXPAUSE (1 << 3)
-#define DNET_INTERNAL_RXTX_CONTROL_DISTXFCS (1 << 2)
-#define DNET_INTERNAL_RXTX_CONTROL_DISCFXFCS (1 << 1)
-#define DNET_INTERNAL_RXTX_CONTROL_ENPROMISC (1 << 0)
-#define DNET_INTERNAL_RXTX_CONTROL_DROPCONTROL (1 << 6)
-#define DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP (1 << 5)
-
-/* SYSTEM CONTROL REGISTER FIELDS */
-#define DNET_SYS_CTL_IGNORENEXTPKT (1 << 0)
-#define DNET_SYS_CTL_SENDPAUSE (1 << 2)
-#define DNET_SYS_CTL_RXFIFOFLUSH (1 << 3)
-#define DNET_SYS_CTL_TXFIFOFLUSH (1 << 4)
-
-/* TX STATUS REGISTER FIELDS */
-#define DNET_TX_STATUS_FIFO_ALMOST_EMPTY (1 << 2)
-#define DNET_TX_STATUS_FIFO_ALMOST_FULL (1 << 1)
-
-/* INTERRUPT SOURCE REGISTER FIELDS */
-#define DNET_INTR_SRC_TX_PKTSENT (1 << 0)
-#define DNET_INTR_SRC_TX_FIFOAF (1 << 1)
-#define DNET_INTR_SRC_TX_FIFOAE (1 << 2)
-#define DNET_INTR_SRC_TX_DISCFRM (1 << 3)
-#define DNET_INTR_SRC_TX_FIFOFULL (1 << 4)
-#define DNET_INTR_SRC_RX_CMDFIFOAF (1 << 8)
-#define DNET_INTR_SRC_RX_CMDFIFOFF (1 << 9)
-#define DNET_INTR_SRC_RX_DATAFIFOFF (1 << 10)
-#define DNET_INTR_SRC_TX_SUMMARY (1 << 16)
-#define DNET_INTR_SRC_RX_SUMMARY (1 << 17)
-#define DNET_INTR_SRC_PHY (1 << 19)
-
-/* INTERRUPT ENABLE REGISTER FIELDS */
-#define DNET_INTR_ENB_TX_PKTSENT (1 << 0)
-#define DNET_INTR_ENB_TX_FIFOAF (1 << 1)
-#define DNET_INTR_ENB_TX_FIFOAE (1 << 2)
-#define DNET_INTR_ENB_TX_DISCFRM (1 << 3)
-#define DNET_INTR_ENB_TX_FIFOFULL (1 << 4)
-#define DNET_INTR_ENB_RX_PKTRDY (1 << 8)
-#define DNET_INTR_ENB_RX_FIFOAF (1 << 9)
-#define DNET_INTR_ENB_RX_FIFOERR (1 << 10)
-#define DNET_INTR_ENB_RX_ERROR (1 << 11)
-#define DNET_INTR_ENB_RX_FIFOFULL (1 << 12)
-#define DNET_INTR_ENB_RX_FIFOAE (1 << 13)
-#define DNET_INTR_ENB_TX_SUMMARY (1 << 16)
-#define DNET_INTR_ENB_RX_SUMMARY (1 << 17)
-#define DNET_INTR_ENB_GLOBAL_ENABLE (1 << 18)
-
-/* default values:
- * almost empty = less than one full sized ethernet frame (no jumbo) inside
- * the fifo almost full = can write less than one full sized ethernet frame
- * (no jumbo) inside the fifo
- */
-#define DNET_CFG_TX_FIFO_FULL_THRES 25
-#define DNET_CFG_RX_FIFO_FULL_THRES 20
-
-/*
- * Capabilities. Used by the driver to know the capabilities that the ethernet
- * controller inside the FPGA have.
- */
-
-#define DNET_HAS_MDIO (1 << 0)
-#define DNET_HAS_IRQ (1 << 1)
-#define DNET_HAS_GIGABIT (1 << 2)
-#define DNET_HAS_DMA (1 << 3)
-
-#define DNET_HAS_MII (1 << 4) /* or GMII */
-#define DNET_HAS_RMII (1 << 5) /* or RGMII */
-
-#define DNET_CAPS_MASK 0xFFFF
-
-#define DNET_FIFO_SIZE 1024 /* 1K x 32 bit */
-#define DNET_FIFO_TX_DATA_AF_TH (DNET_FIFO_SIZE - 384) /* 384 = 1536 / 4 */
-#define DNET_FIFO_TX_DATA_AE_TH 384
-
-#define DNET_FIFO_RX_CMD_AF_TH (1 << 16) /* just one frame inside the FIFO */
-
-/*
- * Hardware-collected statistics.
- */
-struct dnet_stats {
- u32 rx_pkt_ignr;
- u32 rx_len_chk_err;
- u32 rx_lng_frm;
- u32 rx_shrt_frm;
- u32 rx_ipg_viol;
- u32 rx_crc_err;
- u32 rx_ok_pkt;
- u32 rx_ctl_frm;
- u32 rx_pause_frm;
- u32 rx_multicast;
- u32 rx_broadcast;
- u32 rx_vlan_tag;
- u32 rx_pre_shrink;
- u32 rx_drib_nib;
- u32 rx_unsup_opcd;
- u32 rx_byte;
- u32 tx_unicast;
- u32 tx_pause_frm;
- u32 tx_multicast;
- u32 tx_brdcast;
- u32 tx_vlan_tag;
- u32 tx_bad_fcs;
- u32 tx_jumbo;
- u32 tx_byte;
-};
-
-struct dnet {
- void __iomem *regs;
- spinlock_t lock;
- struct platform_device *pdev;
- struct net_device *dev;
- struct dnet_stats hw_stats;
- unsigned int capabilities; /* read from FPGA */
- struct napi_struct napi;
-
- /* PHY stuff */
- struct mii_bus *mii_bus;
- unsigned int link;
- unsigned int speed;
- unsigned int duplex;
-};
-
-#endif /* _DNET_H */
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 270ff9aab335..d2623e35de43 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -672,7 +672,7 @@ struct be_adapter {
struct be_error_recovery error_recovery;
};
-/* Used for defered FW config cmds. Add fields to this struct as reqd */
+/* Used for deferred FW config cmds. Add fields to this struct as reqd */
struct be_cmd_work {
struct work_struct work;
struct be_adapter *adapter;
@@ -700,19 +700,19 @@ struct be_cmd_work {
#define be_max_rxqs(adapter) (adapter->res.max_rx_qs)
/* Max number of EQs available for the function (NIC + RoCE (if enabled)) */
#define be_max_func_eqs(adapter) (adapter->res.max_evt_qs)
-/* Max number of EQs available avaialble only for NIC */
+/* Max number of EQs available only for NIC */
#define be_max_nic_eqs(adapter) (adapter->res.max_nic_evt_qs)
#define be_if_cap_flags(adapter) (adapter->res.if_cap_flags)
#define be_max_pf_pool_rss_tables(adapter) \
(adapter->pool_res.max_rss_tables)
-/* Max irqs avaialble for NIC */
+/* Max irqs available for NIC */
#define be_max_irqs(adapter) \
(min_t(u16, be_max_nic_eqs(adapter), num_online_cpus()))
/* Max irqs *needed* for RX queues */
static inline u16 be_max_rx_irqs(struct be_adapter *adapter)
{
- /* If no RSS, need atleast one irq for def-RXQ */
+ /* If no RSS, need at least one irq for def-RXQ */
u16 num = max_t(u16, be_max_rss(adapter), 1);
return min_t(u16, num, be_max_irqs(adapter));
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 8ed45bceb537..eab81e073e1e 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1941,7 +1941,7 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
return 0;
}
-/* Uses sycnhronous mcc */
+/* Uses synchronous mcc */
int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
u32 num, u32 domain)
{
@@ -2035,7 +2035,7 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
return __be_cmd_rx_filter(adapter, flags, value);
}
-/* Uses synchrounous mcc */
+/* Uses synchronous mcc */
int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
{
struct be_mcc_wrb *wrb;
@@ -2074,7 +2074,7 @@ err:
return status;
}
-/* Uses sycn mcc */
+/* Uses sync mcc */
int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
{
struct be_mcc_wrb *wrb;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 5e2d3ddb5d43..fcc298ce2c77 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1134,14 +1134,14 @@ struct be_cmd_resp_get_fw_version {
u8 fw_on_flash_version_string[FW_VER_LEN];
} __packed;
-/******************** Set Flow Contrl *******************/
+/******************** Set Flow Control *******************/
struct be_cmd_req_set_flow_control {
struct be_cmd_req_hdr hdr;
u16 tx_flow_control;
u16 rx_flow_control;
} __packed;
-/******************** Get Flow Contrl *******************/
+/******************** Get Flow Control *******************/
struct be_cmd_req_get_flow_control {
struct be_cmd_req_hdr hdr;
u32 rsvd;
@@ -2069,7 +2069,7 @@ struct be_cmd_resp_get_stats_v2 {
struct be_hw_stats_v2 hw_stats;
};
-/************** get fat capabilites *******************/
+/************** get fat capabilities *******************/
#define MAX_MODULES 27
#define MAX_MODES 4
#define MODE_UART 0
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index f9216326bdfe..87dbbd5b7f4e 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -142,7 +142,7 @@ static const struct be_ethtool_stat et_rx_stats[] = {
* to HW.
*/
{DRVSTAT_RX_INFO(rx_post_fail)},
- /* Recevied packets dropped due to skb allocation failure */
+ /* Received packets dropped due to skb allocation failure */
{DRVSTAT_RX_INFO(rx_drops_no_skbs)},
/* Received packets dropped due to lack of available fetched buffers
* posted by the driver.
@@ -189,7 +189,7 @@ static const struct be_ethtool_stat et_tx_stats[] = {
{DRVSTAT_TX_INFO(tx_bytes)},
{DRVSTAT_TX_INFO(tx_pkts)},
{DRVSTAT_TX_INFO(tx_vxlan_offload_pkts)},
- /* Number of skbs queued for trasmission by the driver */
+ /* Number of skbs queued for transmission by the driver */
{DRVSTAT_TX_INFO(tx_reqs)},
/* Number of times the TX queue was stopped due to lack
* of spaces in the TXQ.
@@ -1073,6 +1073,13 @@ static void be_set_msg_level(struct net_device *netdev, u32 level)
adapter->msg_enable = level;
}
+static u32 be_get_rx_ring_count(struct net_device *netdev)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->num_rx_qs;
+}
+
static int be_get_rxfh_fields(struct net_device *netdev,
struct ethtool_rxfh_fields *cmd)
{
@@ -1117,28 +1124,6 @@ static int be_get_rxfh_fields(struct net_device *netdev,
return 0;
}
-static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
- u32 *rule_locs)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
-
- if (!be_multi_rxq(adapter)) {
- dev_info(&adapter->pdev->dev,
- "ethtool::get_rxnfc: RX flow hashing is disabled\n");
- return -EINVAL;
- }
-
- switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = adapter->num_rx_qs;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
static int be_set_rxfh_fields(struct net_device *netdev,
const struct ethtool_rxfh_fields *cmd,
struct netlink_ext_ack *extack)
@@ -1222,7 +1207,7 @@ static void be_get_channels(struct net_device *netdev,
ch->tx_count = adapter->num_tx_qs - ch->combined_count;
ch->max_combined = be_max_qp_irqs(adapter);
- /* The user must create atleast one combined channel */
+ /* The user must create at least one combined channel */
ch->max_rx = be_max_rx_irqs(adapter) - 1;
ch->max_tx = be_max_tx_irqs(adapter) - 1;
}
@@ -1293,6 +1278,12 @@ static int be_set_rxfh(struct net_device *netdev,
u8 *hkey = rxfh->key;
u8 rsstable[RSS_INDIR_TABLE_LEN];
+ if (!be_multi_rxq(adapter)) {
+ dev_info(&adapter->pdev->dev,
+ "ethtool::set_rxfh: RX flow hashing is disabled\n");
+ return -EINVAL;
+ }
+
/* We do not allow change in unsupported parameters */
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP)
@@ -1441,7 +1432,7 @@ const struct ethtool_ops be_ethtool_ops = {
.get_ethtool_stats = be_get_ethtool_stats,
.flash_device = be_do_flash,
.self_test = be_self_test,
- .get_rxnfc = be_get_rxnfc,
+ .get_rx_ring_count = be_get_rx_ring_count,
.get_rxfh_fields = be_get_rxfh_fields,
.set_rxfh_fields = be_set_rxfh_fields,
.get_rxfh_indir_size = be_get_rxfh_indir_size,
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 3476194f0855..42e83ff9c52f 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -16,7 +16,7 @@
* The software must write this register twice to post any command. First,
* it writes the register with hi=1 and the upper bits of the physical address
* for the MAILBOX structure. Software must poll the ready bit until this
- * is acknowledged. Then, sotware writes the register with hi=0 with the lower
+ * is acknowledged. Then, software writes the register with hi=0 with the lower
* bits in the address. It must poll the ready bit until the command is
* complete. Upon completion, the MAILBOX will contain a valid completion
* queue entry.
@@ -27,7 +27,7 @@
#define MPU_EP_CONTROL 0
-/********** MPU semphore: used for SH & BE *************/
+/********** MPU semaphore: used for SH & BE *************/
#define SLIPORT_SOFTRESET_OFFSET 0x5c /* CSR BAR offset */
#define SLIPORT_SEMAPHORE_OFFSET_BEx 0xac /* CSR BAR offset */
#define SLIPORT_SEMAPHORE_OFFSET_SH 0x94 /* PCI-CFG offset */
@@ -39,7 +39,7 @@
/* Soft Reset register masks */
#define SLIPORT_SOFTRESET_SR_MASK 0x00000080 /* SR bit */
-/* MPU semphore POST stage values */
+/* MPU semaphore POST stage values */
#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */
#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */
#define POST_STAGE_BE_RESET 0x3 /* Host wants to reset chip */
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 995c159003d7..52e10467b3e4 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -61,7 +61,7 @@ static const struct pci_device_id be_dev_ids[] = {
};
MODULE_DEVICE_TABLE(pci, be_dev_ids);
-/* Workqueue used by all functions for defering cmd calls to the adapter */
+/* Workqueue used by all functions for deferring cmd calls to the adapter */
static struct workqueue_struct *be_wq;
/* UE Status Low CSR */
@@ -1129,7 +1129,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
struct iphdr *ip;
/* For padded packets, BE HW modifies tot_len field in IP header
- * incorrecly when VLAN tag is inserted by HW.
+ * incorrectly when VLAN tag is inserted by HW.
* For padded packets, Lancer computes incorrect checksum.
*/
eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
@@ -2570,7 +2570,7 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
rxcp->vlanf = 0;
}
- /* As the compl has been parsed, reset it; we wont touch it again */
+ /* As the compl has been parsed, reset it; we won't touch it again */
compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
queue_tail_inc(&rxo->cq);
@@ -2729,7 +2729,7 @@ static struct be_tx_compl_info *be_tx_compl_get(struct be_adapter *adapter,
if (txcp->status) {
if (lancer_chip(adapter)) {
lancer_update_tx_err(txo, txcp->status);
- /* Reset the adapter incase of TSO,
+ /* Reset the adapter in case of TSO,
* SGE or Parity error
*/
if (txcp->status == LANCER_TX_COMP_LSO_ERR ||
@@ -3127,7 +3127,7 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
adapter->num_rss_qs =
min(adapter->num_evt_qs, adapter->cfg_num_rx_irqs);
- /* We'll use RSS only if atleast 2 RSS rings are supported. */
+ /* We'll use RSS only if at least 2 RSS rings are supported. */
if (adapter->num_rss_qs < 2)
adapter->num_rss_qs = 0;
@@ -3169,7 +3169,7 @@ static irqreturn_t be_intx(int irq, void *dev)
/* IRQ is not expected when NAPI is scheduled as the EQ
* will not be armed.
* But, this can happen on Lancer INTx where it takes
- * a while to de-assert INTx or in BE2 where occasionaly
+ * a while to de-assert INTx or in BE2 where occasionally
* an interrupt may be raised even when EQ is unarmed.
* If NAPI is already scheduled, then counting & notifying
* events will orphan them.
@@ -4417,7 +4417,7 @@ static void be_setup_init(struct be_adapter *adapter)
/* HW supports only MAX_PORT_RSS_TABLES RSS Policy Tables per port.
* However, this HW limitation is not exposed to the host via any SLI cmd.
* As a result, in the case of SRIOV and in particular multi-partition configs
- * the driver needs to calcuate a proportional share of RSS Tables per PF-pool
+ * the driver needs to calculate a proportional share of RSS Tables per PF-pool
* for distribution between the VFs. This self-imposed limit will determine the
* no: of VFs for which RSS can be enabled.
*/
@@ -4521,7 +4521,7 @@ static int be_get_resources(struct be_adapter *adapter)
if (status)
return status;
- /* If a deafault RXQ must be created, we'll use up one RSSQ*/
+ /* If a default RXQ must be created, we'll use up one RSSQ*/
if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
!(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
res.max_rss_qs -= 1;
diff --git a/drivers/net/ethernet/engleder/tsnep_ethtool.c b/drivers/net/ethernet/engleder/tsnep_ethtool.c
index 228a638eae16..d11168278515 100644
--- a/drivers/net/ethernet/engleder/tsnep_ethtool.c
+++ b/drivers/net/ethernet/engleder/tsnep_ethtool.c
@@ -257,15 +257,19 @@ static int tsnep_ethtool_get_sset_count(struct net_device *netdev, int sset)
}
}
+static u32 tsnep_ethtool_get_rx_ring_count(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->num_rx_queues;
+}
+
static int tsnep_ethtool_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *cmd, u32 *rule_locs)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = adapter->num_rx_queues;
- return 0;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = adapter->rxnfc_count;
cmd->data = adapter->rxnfc_max;
@@ -469,6 +473,7 @@ const struct ethtool_ops tsnep_ethtool_ops = {
.get_sset_count = tsnep_ethtool_get_sset_count,
.get_rxnfc = tsnep_ethtool_get_rxnfc,
.set_rxnfc = tsnep_ethtool_set_rxnfc,
+ .get_rx_ring_count = tsnep_ethtool_get_rx_ring_count,
.get_channels = tsnep_ethtool_get_channels,
.get_ts_info = tsnep_ethtool_get_ts_info,
.get_coalesce = tsnep_ethtool_get_coalesce,
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index a863f7841210..1e91e79c8134 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -33,6 +33,17 @@
#define DRV_NAME "ftgmac100"
+enum ftgmac100_mac_id {
+ FTGMAC100_FARADAY = 1,
+ FTGMAC100_AST2400,
+ FTGMAC100_AST2500,
+ FTGMAC100_AST2600
+};
+
+struct ftgmac100_match_data {
+ enum ftgmac100_mac_id mac_id;
+};
+
/* Arbitrary values, I am not sure the HW has limits */
#define MAX_RX_QUEUE_ENTRIES 1024
#define MAX_TX_QUEUE_ENTRIES 1024
@@ -66,6 +77,8 @@ struct ftgmac100 {
struct resource *res;
void __iomem *base;
+ enum ftgmac100_mac_id mac_id;
+
/* Rx ring */
unsigned int rx_q_entries;
struct ftgmac100_rxdes *rxdes;
@@ -1470,6 +1483,11 @@ static int ftgmac100_mii_probe(struct net_device *netdev)
phy_interface_t phy_intf;
int err;
+ if (!priv->mii_bus) {
+ dev_err(priv->dev, "No MDIO bus available\n");
+ return -ENODEV;
+ }
+
/* Default to RGMII. It's a gigabit part after all */
err = of_get_phy_mode(np, &phy_intf);
if (err)
@@ -1699,16 +1717,16 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
struct platform_device *pdev = to_platform_device(priv->dev);
struct device_node *np = pdev->dev.of_node;
struct device_node *mdio_np;
- int i, err = 0;
+ int err = 0;
u32 reg;
/* initialize mdio bus */
- priv->mii_bus = mdiobus_alloc();
+ priv->mii_bus = devm_mdiobus_alloc(priv->dev);
if (!priv->mii_bus)
return -EIO;
- if (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
- of_device_is_compatible(np, "aspeed,ast2500-mac")) {
+ if (priv->mac_id == FTGMAC100_AST2400 ||
+ priv->mac_id == FTGMAC100_AST2500) {
/* The AST2600 has a separate MDIO controller */
/* For the AST2400 and AST2500 this driver only supports the
@@ -1727,24 +1745,16 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
priv->mii_bus->read = ftgmac100_mdiobus_read;
priv->mii_bus->write = ftgmac100_mdiobus_write;
- for (i = 0; i < PHY_MAX_ADDR; i++)
- priv->mii_bus->irq[i] = PHY_POLL;
-
mdio_np = of_get_child_by_name(np, "mdio");
- err = of_mdiobus_register(priv->mii_bus, mdio_np);
+ err = devm_of_mdiobus_register(priv->dev, priv->mii_bus, mdio_np);
+ of_node_put(mdio_np);
if (err) {
dev_err(priv->dev, "Cannot register MDIO bus!\n");
- goto err_register_mdiobus;
+ return err;
}
- of_node_put(mdio_np);
-
return 0;
-
-err_register_mdiobus:
- mdiobus_free(priv->mii_bus);
- return err;
}
static void ftgmac100_phy_disconnect(struct net_device *netdev)
@@ -1763,17 +1773,6 @@ static void ftgmac100_phy_disconnect(struct net_device *netdev)
fixed_phy_unregister(phydev);
}
-static void ftgmac100_destroy_mdio(struct net_device *netdev)
-{
- struct ftgmac100 *priv = netdev_priv(netdev);
-
- if (!priv->mii_bus)
- return;
-
- mdiobus_unregister(priv->mii_bus);
- mdiobus_free(priv->mii_bus);
-}
-
static void ftgmac100_ncsi_handler(struct ncsi_dev *nd)
{
if (unlikely(nd->state != ncsi_dev_state_functional))
@@ -1788,13 +1787,10 @@ static int ftgmac100_setup_clk(struct ftgmac100 *priv)
struct clk *clk;
int rc;
- clk = devm_clk_get(priv->dev, NULL /* MACCLK */);
+ clk = devm_clk_get_enabled(priv->dev, NULL /* MACCLK */);
if (IS_ERR(clk))
return PTR_ERR(clk);
priv->clk = clk;
- rc = clk_prepare_enable(priv->clk);
- if (rc)
- return rc;
/* Aspeed specifies a 100MHz clock is required for up to
* 1000Mbit link speeds. As NCSI is limited to 100Mbit, 25MHz
@@ -1803,21 +1799,17 @@ static int ftgmac100_setup_clk(struct ftgmac100 *priv)
rc = clk_set_rate(priv->clk, priv->use_ncsi ? FTGMAC_25MHZ :
FTGMAC_100MHZ);
if (rc)
- goto cleanup_clk;
+ return rc;
/* RCLK is for RMII, typically used for NCSI. Optional because it's not
* necessary if it's the AST2400 MAC, or the MAC is configured for
* RGMII, or the controller is not an ASPEED-based controller.
*/
- priv->rclk = devm_clk_get_optional(priv->dev, "RCLK");
- rc = clk_prepare_enable(priv->rclk);
- if (!rc)
- return 0;
-
-cleanup_clk:
- clk_disable_unprepare(priv->clk);
+ priv->rclk = devm_clk_get_optional_enabled(priv->dev, "RCLK");
+ if (IS_ERR(priv->rclk))
+ return PTR_ERR(priv->rclk);
- return rc;
+ return 0;
}
static bool ftgmac100_has_child_node(struct device_node *np, const char *name)
@@ -1833,16 +1825,121 @@ static bool ftgmac100_has_child_node(struct device_node *np, const char *name)
return ret;
}
+static int ftgmac100_probe_ncsi(struct net_device *netdev,
+ struct ftgmac100 *priv,
+ struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct phy_device *phydev;
+ int err;
+
+ if (!IS_ENABLED(CONFIG_NET_NCSI)) {
+ dev_err(&pdev->dev, "NCSI stack not enabled\n");
+ return -EINVAL;
+ }
+
+ dev_info(&pdev->dev, "Using NCSI interface\n");
+ priv->use_ncsi = true;
+ priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler);
+ if (!priv->ndev)
+ return -EINVAL;
+
+ phydev = fixed_phy_register(&ncsi_phy_status, np);
+ if (IS_ERR(phydev)) {
+ dev_err(&pdev->dev, "failed to register fixed PHY device\n");
+ err = PTR_ERR(phydev);
+ goto err_register_ndev;
+ }
+ err = phy_connect_direct(netdev, phydev, ftgmac100_adjust_link,
+ PHY_INTERFACE_MODE_RMII);
+ if (err) {
+ dev_err(&pdev->dev, "Connecting PHY failed\n");
+ goto err_register_phy;
+ }
+
+ return 0;
+err_register_phy:
+ fixed_phy_unregister(phydev);
+err_register_ndev:
+ if (priv->ndev)
+ ncsi_unregister_dev(priv->ndev);
+ priv->ndev = NULL;
+ return err;
+}
+
+static int ftgmac100_probe_dt(struct net_device *netdev,
+ struct platform_device *pdev,
+ struct ftgmac100 *priv,
+ struct device_node *np)
+{
+ struct phy_device *phy;
+ int err;
+
+ if (of_get_property(np, "use-ncsi", NULL))
+ return ftgmac100_probe_ncsi(netdev, priv, pdev);
+
+ if (of_phy_is_fixed_link(np) ||
+ of_get_property(np, "phy-handle", NULL)) {
+ /* Support "mdio"/"phy" child nodes for ast2400/2500
+ * with an embedded MDIO controller. Automatically
+ * scan the DTS for available PHYs and register
+ * them. 2600 has an independent MDIO controller, not
+ * part of the MAC.
+ */
+ phy = of_phy_get_and_connect(priv->netdev, np,
+ &ftgmac100_adjust_link);
+ if (!phy) {
+ dev_err(&pdev->dev, "Failed to connect to phy\n");
+ return -EINVAL;
+ }
+
+ /* Indicate that we support PAUSE frames (see comment in
+ * Documentation/networking/phy.rst)
+ */
+ phy_support_asym_pause(phy);
+
+ /* Display what we found */
+ phy_attached_info(phy);
+ return 0;
+ }
+
+ if (!ftgmac100_has_child_node(np, "mdio")) {
+ /* Support legacy ASPEED devicetree descriptions that
+ * decribe a MAC with an embedded MDIO controller but
+ * have no "mdio" child node. Automatically scan the
+ * MDIO bus for available PHYs.
+ */
+ err = ftgmac100_mii_probe(netdev);
+ if (err) {
+ dev_err(priv->dev, "MII probe failed!\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
static int ftgmac100_probe(struct platform_device *pdev)
{
+ const struct ftgmac100_match_data *match_data;
+ enum ftgmac100_mac_id mac_id;
struct resource *res;
int irq;
struct net_device *netdev;
- struct phy_device *phydev;
struct ftgmac100 *priv;
struct device_node *np;
int err = 0;
+ np = pdev->dev.of_node;
+ if (np) {
+ match_data = of_device_get_match_data(&pdev->dev);
+ if (!match_data)
+ return -EINVAL;
+ mac_id = match_data->mac_id;
+ } else {
+ mac_id = FTGMAC100_FARADAY;
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENXIO;
@@ -1852,11 +1949,9 @@ static int ftgmac100_probe(struct platform_device *pdev)
return irq;
/* setup net_device */
- netdev = alloc_etherdev(sizeof(*priv));
- if (!netdev) {
- err = -ENOMEM;
- goto err_alloc_etherdev;
- }
+ netdev = devm_alloc_etherdev(&pdev->dev, sizeof(*priv));
+ if (!netdev)
+ return -ENOMEM;
SET_NETDEV_DEV(netdev, &pdev->dev);
@@ -1870,22 +1965,22 @@ static int ftgmac100_probe(struct platform_device *pdev)
priv = netdev_priv(netdev);
priv->netdev = netdev;
priv->dev = &pdev->dev;
+ priv->mac_id = mac_id;
INIT_WORK(&priv->reset_task, ftgmac100_reset_task);
/* map io memory */
- priv->res = request_mem_region(res->start, resource_size(res),
- dev_name(&pdev->dev));
+ priv->res = devm_request_mem_region(&pdev->dev,
+ res->start, resource_size(res),
+ dev_name(&pdev->dev));
if (!priv->res) {
dev_err(&pdev->dev, "Could not reserve memory region\n");
- err = -ENOMEM;
- goto err_req_mem;
+ return -ENOMEM;
}
- priv->base = ioremap(res->start, resource_size(res));
+ priv->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!priv->base) {
dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
- err = -EIO;
- goto err_ioremap;
+ return -EIO;
}
netdev->irq = irq;
@@ -1898,12 +1993,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
/* MAC address from chip or random one */
err = ftgmac100_initial_mac(priv);
if (err)
- goto err_phy_connect;
+ return err;
- np = pdev->dev.of_node;
- if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
- of_device_is_compatible(np, "aspeed,ast2500-mac") ||
- of_device_is_compatible(np, "aspeed,ast2600-mac"))) {
+ if (priv->mac_id == FTGMAC100_AST2400 ||
+ priv->mac_id == FTGMAC100_AST2500 ||
+ priv->mac_id == FTGMAC100_AST2600) {
priv->rxdes0_edorr_mask = BIT(30);
priv->txdes0_edotr_mask = BIT(30);
priv->is_aspeed = true;
@@ -1912,100 +2006,37 @@ static int ftgmac100_probe(struct platform_device *pdev)
priv->txdes0_edotr_mask = BIT(15);
}
- if (np && of_get_property(np, "use-ncsi", NULL)) {
- if (!IS_ENABLED(CONFIG_NET_NCSI)) {
- dev_err(&pdev->dev, "NCSI stack not enabled\n");
- err = -EINVAL;
- goto err_phy_connect;
- }
-
- dev_info(&pdev->dev, "Using NCSI interface\n");
- priv->use_ncsi = true;
- priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler);
- if (!priv->ndev) {
- err = -EINVAL;
- goto err_phy_connect;
- }
-
- phydev = fixed_phy_register(&ncsi_phy_status, np);
- if (IS_ERR(phydev)) {
- dev_err(&pdev->dev, "failed to register fixed PHY device\n");
- err = PTR_ERR(phydev);
- goto err_phy_connect;
- }
- err = phy_connect_direct(netdev, phydev, ftgmac100_adjust_link,
- PHY_INTERFACE_MODE_RMII);
- if (err) {
- dev_err(&pdev->dev, "Connecting PHY failed\n");
- goto err_phy_connect;
- }
- } else if (np && (of_phy_is_fixed_link(np) ||
- of_get_property(np, "phy-handle", NULL))) {
- struct phy_device *phy;
-
- /* Support "mdio"/"phy" child nodes for ast2400/2500 with
- * an embedded MDIO controller. Automatically scan the DTS for
- * available PHYs and register them.
- */
- if (of_get_property(np, "phy-handle", NULL) &&
- (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
- of_device_is_compatible(np, "aspeed,ast2500-mac"))) {
- err = ftgmac100_setup_mdio(netdev);
- if (err)
- goto err_setup_mdio;
- }
-
- phy = of_phy_get_and_connect(priv->netdev, np,
- &ftgmac100_adjust_link);
- if (!phy) {
- dev_err(&pdev->dev, "Failed to connect to phy\n");
- err = -EINVAL;
- goto err_phy_connect;
- }
-
- /* Indicate that we support PAUSE frames (see comment in
- * Documentation/networking/phy.rst)
- */
- phy_support_asym_pause(phy);
-
- /* Display what we found */
- phy_attached_info(phy);
- } else if (np && !ftgmac100_has_child_node(np, "mdio")) {
- /* Support legacy ASPEED devicetree descriptions that decribe a
- * MAC with an embedded MDIO controller but have no "mdio"
- * child node. Automatically scan the MDIO bus for available
- * PHYs.
- */
- priv->use_ncsi = false;
+ if (priv->mac_id == FTGMAC100_FARADAY ||
+ priv->mac_id == FTGMAC100_AST2400 ||
+ priv->mac_id == FTGMAC100_AST2500) {
err = ftgmac100_setup_mdio(netdev);
if (err)
- goto err_setup_mdio;
-
- err = ftgmac100_mii_probe(netdev);
- if (err) {
- dev_err(priv->dev, "MII probe failed!\n");
- goto err_ncsi_dev;
- }
+ return err;
+ }
+ if (np) {
+ err = ftgmac100_probe_dt(netdev, pdev, priv, np);
+ if (err)
+ goto err;
}
priv->rst = devm_reset_control_get_optional_exclusive(priv->dev, NULL);
if (IS_ERR(priv->rst)) {
err = PTR_ERR(priv->rst);
- goto err_phy_connect;
+ goto err;
}
if (priv->is_aspeed) {
err = ftgmac100_setup_clk(priv);
if (err)
- goto err_phy_connect;
-
- /* Disable ast2600 problematic HW arbitration */
- if (of_device_is_compatible(np, "aspeed,ast2600-mac"))
- iowrite32(FTGMAC100_TM_DEFAULT,
- priv->base + FTGMAC100_OFFSET_TM);
+ goto err;
}
+ /* Disable ast2600 problematic HW arbitration */
+ if (priv->mac_id == FTGMAC100_AST2600)
+ iowrite32(FTGMAC100_TM_DEFAULT,
+ priv->base + FTGMAC100_OFFSET_TM);
+
/* Default ring sizes */
priv->rx_q_entries = priv->new_rx_q_entries = DEF_RX_QUEUE_ENTRIES;
priv->tx_q_entries = priv->new_tx_q_entries = DEF_TX_QUEUE_ENTRIES;
@@ -2019,11 +2050,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
/* AST2400 doesn't have working HW checksum generation */
- if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac")))
+ if (priv->mac_id == FTGMAC100_AST2400)
netdev->hw_features &= ~NETIF_F_HW_CSUM;
/* AST2600 tx checksum with NCSI is broken */
- if (priv->use_ncsi && of_device_is_compatible(np, "aspeed,ast2600-mac"))
+ if (priv->use_ncsi && priv->mac_id == FTGMAC100_AST2600)
netdev->hw_features &= ~NETIF_F_HW_CSUM;
if (np && of_get_property(np, "no-hw-checksum", NULL))
@@ -2034,29 +2065,17 @@ static int ftgmac100_probe(struct platform_device *pdev)
err = register_netdev(netdev);
if (err) {
dev_err(&pdev->dev, "Failed to register netdev\n");
- goto err_register_netdev;
+ goto err;
}
netdev_info(netdev, "irq %d, mapped at %p\n", netdev->irq, priv->base);
return 0;
-err_register_netdev:
- clk_disable_unprepare(priv->rclk);
- clk_disable_unprepare(priv->clk);
-err_phy_connect:
+err:
ftgmac100_phy_disconnect(netdev);
-err_ncsi_dev:
if (priv->ndev)
ncsi_unregister_dev(priv->ndev);
- ftgmac100_destroy_mdio(netdev);
-err_setup_mdio:
- iounmap(priv->base);
-err_ioremap:
- release_resource(priv->res);
-err_req_mem:
- free_netdev(netdev);
-err_alloc_etherdev:
return err;
}
@@ -2072,26 +2091,39 @@ static void ftgmac100_remove(struct platform_device *pdev)
ncsi_unregister_dev(priv->ndev);
unregister_netdev(netdev);
- clk_disable_unprepare(priv->rclk);
- clk_disable_unprepare(priv->clk);
-
/* There's a small chance the reset task will have been re-queued,
* during stop, make sure it's gone before we free the structure.
*/
cancel_work_sync(&priv->reset_task);
ftgmac100_phy_disconnect(netdev);
- ftgmac100_destroy_mdio(netdev);
+}
- iounmap(priv->base);
- release_resource(priv->res);
+static const struct ftgmac100_match_data ftgmac100_match_data_ast2400 = {
+ .mac_id = FTGMAC100_AST2400
+};
- netif_napi_del(&priv->napi);
- free_netdev(netdev);
-}
+static const struct ftgmac100_match_data ftgmac100_match_data_ast2500 = {
+ .mac_id = FTGMAC100_AST2500
+};
+
+static const struct ftgmac100_match_data ftgmac100_match_data_ast2600 = {
+ .mac_id = FTGMAC100_AST2600
+};
+
+static const struct ftgmac100_match_data ftgmac100_match_data_faraday = {
+ .mac_id = FTGMAC100_FARADAY
+};
static const struct of_device_id ftgmac100_of_match[] = {
- { .compatible = "faraday,ftgmac100" },
+ { .compatible = "aspeed,ast2400-mac",
+ .data = &ftgmac100_match_data_ast2400},
+ { .compatible = "aspeed,ast2500-mac",
+ .data = &ftgmac100_match_data_ast2500 },
+ { .compatible = "aspeed,ast2600-mac",
+ .data = &ftgmac100_match_data_ast2600 },
+ { .compatible = "faraday,ftgmac100",
+ .data = &ftgmac100_match_data_faraday },
{ }
};
MODULE_DEVICE_TABLE(of, ftgmac100_of_match);
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index fd9a93d02f8e..7176803146f3 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -340,6 +340,7 @@ struct bufdesc_ex {
#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
#define TX_RING_SIZE 1024 /* Must be power of two */
#define TX_RING_MOD_MASK 511 /* for this to work */
+#define FEC_XSK_TX_BUDGET_MAX 256
#define BD_ENET_RX_INT 0x00800000
#define BD_ENET_RX_PTP ((ushort)0x0400)
@@ -528,6 +529,8 @@ enum fec_txbuf_type {
FEC_TXBUF_T_SKB,
FEC_TXBUF_T_XDP_NDO,
FEC_TXBUF_T_XDP_TX,
+ FEC_TXBUF_T_XSK_XMIT,
+ FEC_TXBUF_T_XSK_TX,
};
struct fec_tx_buffer {
@@ -539,6 +542,7 @@ struct fec_enet_priv_tx_q {
struct bufdesc_prop bd;
unsigned char *tx_bounce[TX_RING_SIZE];
struct fec_tx_buffer tx_buf[TX_RING_SIZE];
+ struct xsk_buff_pool *xsk_pool;
unsigned short tx_stop_threshold;
unsigned short tx_wake_threshold;
@@ -548,9 +552,16 @@ struct fec_enet_priv_tx_q {
dma_addr_t tso_hdrs_dma;
};
+union fec_rx_buffer {
+ void *buf_p;
+ struct page *page;
+ struct xdp_buff *xdp;
+};
+
struct fec_enet_priv_rx_q {
struct bufdesc_prop bd;
- struct page *rx_buf[RX_RING_SIZE];
+ union fec_rx_buffer rx_buf[RX_RING_SIZE];
+ struct xsk_buff_pool *xsk_pool;
/* page_pool */
struct page_pool *page_pool;
@@ -643,6 +654,7 @@ struct fec_enet_private {
struct pm_qos_request pm_qos_req;
unsigned int tx_align;
+ unsigned int rx_shift;
/* hw interrupt coalesce */
unsigned int rx_pkts_itr;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 797ef6899657..0d926bf18195 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -71,6 +71,7 @@
#include <net/page_pool/helpers.h>
#include <net/selftests.h>
#include <net/tso.h>
+#include <net/xdp_sock_drv.h>
#include <soc/imx/cpuidle.h>
#include "fec.h"
@@ -79,7 +80,7 @@ static void set_multicast_list(struct net_device *ndev);
static void fec_enet_itr_coal_set(struct net_device *ndev);
static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
int cpu, struct xdp_buff *xdp,
- u32 dma_sync_len);
+ u32 dma_sync_len, int queue);
#define DRIVER_NAME "fec"
@@ -467,13 +468,13 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
static int
fec_enet_create_page_pool(struct fec_enet_private *fep,
- struct fec_enet_priv_rx_q *rxq, int size)
+ struct fec_enet_priv_rx_q *rxq)
{
struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
struct page_pool_params pp_params = {
.order = fep->pagepool_order,
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
- .pool_size = size,
+ .pool_size = rxq->bd.ring_size,
.nid = dev_to_node(&fep->pdev->dev),
.dev = &fep->pdev->dev,
.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
@@ -489,23 +490,18 @@ fec_enet_create_page_pool(struct fec_enet_private *fep,
return err;
}
- err = xdp_rxq_info_reg(&rxq->xdp_rxq, fep->netdev, rxq->id, 0);
- if (err < 0)
- goto err_free_pp;
-
- err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
- rxq->page_pool);
- if (err)
- goto err_unregister_rxq;
-
return 0;
+}
-err_unregister_rxq:
- xdp_rxq_info_unreg(&rxq->xdp_rxq);
-err_free_pp:
- page_pool_destroy(rxq->page_pool);
- rxq->page_pool = NULL;
- return err;
+static void fec_txq_trigger_xmit(struct fec_enet_private *fep,
+ struct fec_enet_priv_tx_q *txq)
+{
+ if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
+ !readl(txq->bd.reg_desc_active) ||
+ !readl(txq->bd.reg_desc_active) ||
+ !readl(txq->bd.reg_desc_active) ||
+ !readl(txq->bd.reg_desc_active))
+ writel(0, txq->bd.reg_desc_active);
}
static struct bufdesc *
@@ -717,12 +713,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
txq->bd.cur = bdp;
/* Trigger transmission start */
- if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
- !readl(txq->bd.reg_desc_active) ||
- !readl(txq->bd.reg_desc_active) ||
- !readl(txq->bd.reg_desc_active) ||
- !readl(txq->bd.reg_desc_active))
- writel(0, txq->bd.reg_desc_active);
+ fec_txq_trigger_xmit(fep, txq);
return 0;
}
@@ -913,12 +904,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
txq->bd.cur = bdp;
/* Trigger transmission start */
- if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
- !readl(txq->bd.reg_desc_active) ||
- !readl(txq->bd.reg_desc_active) ||
- !readl(txq->bd.reg_desc_active) ||
- !readl(txq->bd.reg_desc_active))
- writel(0, txq->bd.reg_desc_active);
+ fec_txq_trigger_xmit(fep, txq);
return 0;
@@ -1005,6 +991,13 @@ static void fec_enet_bd_init(struct net_device *dev)
bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
else
bdp->cbd_sc = cpu_to_fec16(0);
+
+ if (fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+
+ ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
+ }
+
bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
}
@@ -1022,33 +1015,38 @@ static void fec_enet_bd_init(struct net_device *dev)
txq->bd.cur = bdp;
for (i = 0; i < txq->bd.ring_size; i++) {
+ struct page *page;
+
/* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = cpu_to_fec16(0);
- if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
+
+ switch (txq->tx_buf[i].type) {
+ case FEC_TXBUF_T_SKB:
if (bdp->cbd_bufaddr &&
!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
dma_unmap_single(&fep->pdev->dev,
fec32_to_cpu(bdp->cbd_bufaddr),
fec16_to_cpu(bdp->cbd_datlen),
DMA_TO_DEVICE);
- if (txq->tx_buf[i].buf_p)
- dev_kfree_skb_any(txq->tx_buf[i].buf_p);
- } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
- if (bdp->cbd_bufaddr)
- dma_unmap_single(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
- fec16_to_cpu(bdp->cbd_datlen),
- DMA_TO_DEVICE);
-
- if (txq->tx_buf[i].buf_p)
- xdp_return_frame(txq->tx_buf[i].buf_p);
- } else {
- struct page *page = txq->tx_buf[i].buf_p;
-
- if (page)
- page_pool_put_page(pp_page_to_nmdesc(page)->pp,
- page, 0,
- false);
+ dev_kfree_skb_any(txq->tx_buf[i].buf_p);
+ break;
+ case FEC_TXBUF_T_XDP_NDO:
+ dma_unmap_single(&fep->pdev->dev,
+ fec32_to_cpu(bdp->cbd_bufaddr),
+ fec16_to_cpu(bdp->cbd_datlen),
+ DMA_TO_DEVICE);
+ xdp_return_frame(txq->tx_buf[i].buf_p);
+ break;
+ case FEC_TXBUF_T_XDP_TX:
+ page = txq->tx_buf[i].buf_p;
+ page_pool_put_page(pp_page_to_nmdesc(page)->pp,
+ page, 0, false);
+ break;
+ case FEC_TXBUF_T_XSK_TX:
+ xsk_buff_free(txq->tx_buf[i].buf_p);
+ break;
+ default:
+ break;
}
txq->tx_buf[i].buf_p = NULL;
@@ -1335,7 +1333,9 @@ fec_restart(struct net_device *ndev)
static int fec_enet_ipc_handle_init(struct fec_enet_private *fep)
{
if (!(of_machine_is_compatible("fsl,imx8qm") ||
+ of_machine_is_compatible("fsl,imx8qp") ||
of_machine_is_compatible("fsl,imx8qxp") ||
+ of_machine_is_compatible("fsl,imx8dx") ||
of_machine_is_compatible("fsl,imx8dxl")))
return 0;
@@ -1479,27 +1479,102 @@ fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
hwtstamps->hwtstamp = ns_to_ktime(ns);
}
-static void
-fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
+static bool fec_enet_xsk_xmit(struct fec_enet_private *fep,
+ struct xsk_buff_pool *pool,
+ u32 queue)
{
- struct fec_enet_private *fep;
- struct xdp_frame *xdpf;
- struct bufdesc *bdp;
- unsigned short status;
- struct sk_buff *skb;
- struct fec_enet_priv_tx_q *txq;
+ struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue];
+ struct xdp_desc *xsk_desc = pool->tx_descs;
+ int cpu = smp_processor_id();
+ int free_bds, budget, batch;
struct netdev_queue *nq;
- int index = 0;
- int entries_free;
- struct page *page;
- int frame_len;
+ struct bufdesc *bdp;
+ dma_addr_t dma;
+ u32 estatus;
+ u16 status;
+ int i, j;
- fep = netdev_priv(ndev);
+ nq = netdev_get_tx_queue(fep->netdev, queue);
+ __netif_tx_lock(nq, cpu);
- txq = fep->tx_queue[queue_id];
- /* get next bdp of dirty_tx */
- nq = netdev_get_tx_queue(ndev, queue_id);
- bdp = txq->dirty_tx;
+ txq_trans_cond_update(nq);
+ free_bds = fec_enet_get_free_txdesc_num(txq);
+ if (!free_bds)
+ goto tx_unlock;
+
+ budget = min(free_bds, FEC_XSK_TX_BUDGET_MAX);
+ batch = xsk_tx_peek_release_desc_batch(pool, budget);
+ if (!batch)
+ goto tx_unlock;
+
+ bdp = txq->bd.cur;
+ for (i = 0; i < batch; i++) {
+ dma = xsk_buff_raw_get_dma(pool, xsk_desc[i].addr);
+ xsk_buff_raw_dma_sync_for_device(pool, dma, xsk_desc[i].len);
+
+ j = fec_enet_get_bd_index(bdp, &txq->bd);
+ txq->tx_buf[j].type = FEC_TXBUF_T_XSK_XMIT;
+ txq->tx_buf[j].buf_p = NULL;
+
+ status = fec16_to_cpu(bdp->cbd_sc);
+ status &= ~BD_ENET_TX_STATS;
+ status |= BD_ENET_TX_INTR | BD_ENET_TX_LAST;
+ bdp->cbd_datlen = cpu_to_fec16(xsk_desc[i].len);
+ bdp->cbd_bufaddr = cpu_to_fec32(dma);
+
+ if (fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+
+ estatus = BD_ENET_TX_INT;
+ if (fep->quirks & FEC_QUIRK_HAS_AVB)
+ estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
+
+ ebdp->cbd_bdu = 0;
+ ebdp->cbd_esc = cpu_to_fec32(estatus);
+ }
+
+ /* Make sure the updates to rest of the descriptor are performed
+ * before transferring ownership.
+ */
+ dma_wmb();
+
+ /* Send it on its way. Tell FEC it's ready, interrupt when done,
+ * it's the last BD of the frame, and to put the CRC on the end.
+ */
+ status |= BD_ENET_TX_READY | BD_ENET_TX_TC;
+ bdp->cbd_sc = cpu_to_fec16(status);
+ dma_wmb();
+
+ bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
+ txq->bd.cur = bdp;
+ }
+
+ /* Trigger transmission start */
+ fec_txq_trigger_xmit(fep, txq);
+
+ __netif_tx_unlock(nq);
+
+ return batch < budget;
+
+tx_unlock:
+ __netif_tx_unlock(nq);
+
+ return true;
+}
+
+static int fec_enet_tx_queue(struct fec_enet_private *fep,
+ u16 queue, int budget)
+{
+ struct netdev_queue *nq = netdev_get_tx_queue(fep->netdev, queue);
+ struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue];
+ struct net_device *ndev = fep->netdev;
+ struct bufdesc *bdp = txq->dirty_tx;
+ int index, frame_len, entries_free;
+ struct fec_tx_buffer *tx_buf;
+ unsigned short status;
+ struct sk_buff *skb;
+ struct page *page;
+ int xsk_cnt = 0;
/* get next bdp of dirty_tx */
bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
@@ -1512,45 +1587,77 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
break;
index = fec_enet_get_bd_index(bdp, &txq->bd);
+ tx_buf = &txq->tx_buf[index];
+ frame_len = fec16_to_cpu(bdp->cbd_datlen);
- if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
- skb = txq->tx_buf[index].buf_p;
+ switch (tx_buf->type) {
+ case FEC_TXBUF_T_SKB:
if (bdp->cbd_bufaddr &&
!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
dma_unmap_single(&fep->pdev->dev,
fec32_to_cpu(bdp->cbd_bufaddr),
- fec16_to_cpu(bdp->cbd_datlen),
- DMA_TO_DEVICE);
+ frame_len, DMA_TO_DEVICE);
+
bdp->cbd_bufaddr = cpu_to_fec32(0);
+ skb = tx_buf->buf_p;
if (!skb)
goto tx_buf_done;
- } else {
+
+ frame_len = skb->len;
+
+ /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
+ * are to time stamp the packet, so we still need to check time
+ * stamping enabled flag.
+ */
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
+ fep->hwts_tx_en) && fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+ struct skb_shared_hwtstamps shhwtstamps;
+
+ fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ }
+
+ /* Free the sk buffer associated with this last transmit */
+ napi_consume_skb(skb, budget);
+ break;
+ case FEC_TXBUF_T_XDP_NDO:
/* Tx processing cannot call any XDP (or page pool) APIs if
* the "budget" is 0. Because NAPI is called with budget of
* 0 (such as netpoll) indicates we may be in an IRQ context,
* however, we can't use the page pool from IRQ context.
*/
if (unlikely(!budget))
- break;
-
- if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
- xdpf = txq->tx_buf[index].buf_p;
- if (bdp->cbd_bufaddr)
- dma_unmap_single(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
- fec16_to_cpu(bdp->cbd_datlen),
- DMA_TO_DEVICE);
- } else {
- page = txq->tx_buf[index].buf_p;
- }
+ goto out;
+ dma_unmap_single(&fep->pdev->dev,
+ fec32_to_cpu(bdp->cbd_bufaddr),
+ frame_len, DMA_TO_DEVICE);
bdp->cbd_bufaddr = cpu_to_fec32(0);
- if (unlikely(!txq->tx_buf[index].buf_p)) {
- txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
- goto tx_buf_done;
- }
+ xdp_return_frame_rx_napi(tx_buf->buf_p);
+ break;
+ case FEC_TXBUF_T_XDP_TX:
+ if (unlikely(!budget))
+ goto out;
- frame_len = fec16_to_cpu(bdp->cbd_datlen);
+ bdp->cbd_bufaddr = cpu_to_fec32(0);
+ page = tx_buf->buf_p;
+ /* The dma_sync_size = 0 as XDP_TX has already synced
+ * DMA for_device
+ */
+ page_pool_put_page(pp_page_to_nmdesc(page)->pp, page,
+ 0, true);
+ break;
+ case FEC_TXBUF_T_XSK_XMIT:
+ bdp->cbd_bufaddr = cpu_to_fec32(0);
+ xsk_cnt++;
+ break;
+ case FEC_TXBUF_T_XSK_TX:
+ bdp->cbd_bufaddr = cpu_to_fec32(0);
+ xsk_buff_free(tx_buf->buf_p);
+ break;
+ default:
+ break;
}
/* Check for errors. */
@@ -1570,11 +1677,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
ndev->stats.tx_carrier_errors++;
} else {
ndev->stats.tx_packets++;
-
- if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB)
- ndev->stats.tx_bytes += skb->len;
- else
- ndev->stats.tx_bytes += frame_len;
+ ndev->stats.tx_bytes += frame_len;
}
/* Deferred means some collisions occurred during transmit,
@@ -1583,33 +1686,9 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
if (status & BD_ENET_TX_DEF)
ndev->stats.collisions++;
- if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
- /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
- * are to time stamp the packet, so we still need to check time
- * stamping enabled flag.
- */
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
- fep->hwts_tx_en) && fep->bufdesc_ex) {
- struct skb_shared_hwtstamps shhwtstamps;
- struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
-
- fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
- skb_tstamp_tx(skb, &shhwtstamps);
- }
-
- /* Free the sk buffer associated with this last transmit */
- napi_consume_skb(skb, budget);
- } else if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
- xdp_return_frame_rx_napi(xdpf);
- } else { /* recycle pages of XDP_TX frames */
- /* The dma_sync_size = 0 as XDP_TX has already synced DMA for_device */
- page_pool_put_page(pp_page_to_nmdesc(page)->pp, page,
- 0, true);
- }
-
- txq->tx_buf[index].buf_p = NULL;
+ tx_buf->buf_p = NULL;
/* restore default tx buffer type: FEC_TXBUF_T_SKB */
- txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
+ tx_buf->type = FEC_TXBUF_T_SKB;
tx_buf_done:
/* Make sure the update to bdp and tx_buf are performed
@@ -1630,20 +1709,43 @@ tx_buf_done:
}
}
+out:
+
/* ERR006358: Keep the transmitter going */
if (bdp != txq->bd.cur &&
readl(txq->bd.reg_desc_active) == 0)
writel(0, txq->bd.reg_desc_active);
+
+ if (txq->xsk_pool) {
+ struct xsk_buff_pool *pool = txq->xsk_pool;
+
+ if (xsk_cnt)
+ xsk_tx_completed(pool, xsk_cnt);
+
+ if (xsk_uses_need_wakeup(pool))
+ xsk_set_tx_need_wakeup(pool);
+
+ /* If the condition is true, it indicates that there are still
+ * packets to be transmitted, so return "budget" to make the
+ * NAPI continue polling.
+ */
+ if (!fec_enet_xsk_xmit(fep, pool, queue))
+ return budget;
+ }
+
+ return 0;
}
-static void fec_enet_tx(struct net_device *ndev, int budget)
+static int fec_enet_tx(struct net_device *ndev, int budget)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- int i;
+ int i, count = 0;
/* Make sure that AVB queues are processed first. */
for (i = fep->num_tx_queues - 1; i >= 0; i--)
- fec_enet_tx_queue(ndev, i, budget);
+ count += fec_enet_tx_queue(fep, i, budget);
+
+ return count;
}
static int fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
@@ -1656,76 +1758,28 @@ static int fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
if (unlikely(!new_page))
return -ENOMEM;
- rxq->rx_buf[index] = new_page;
+ rxq->rx_buf[index].page = new_page;
phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM;
bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
return 0;
}
-static u32
-fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog,
- struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int cpu)
+static int fec_enet_update_cbd_zc(struct fec_enet_priv_rx_q *rxq,
+ struct bufdesc *bdp, int index)
{
- unsigned int sync, len = xdp->data_end - xdp->data;
- u32 ret = FEC_ENET_XDP_PASS;
- struct page *page;
- int err;
- u32 act;
-
- act = bpf_prog_run_xdp(prog, xdp);
-
- /* Due xdp_adjust_tail and xdp_adjust_head: DMA sync for_device cover
- * max len CPU touch
- */
- sync = xdp->data_end - xdp->data;
- sync = max(sync, len);
-
- switch (act) {
- case XDP_PASS:
- rxq->stats[RX_XDP_PASS]++;
- ret = FEC_ENET_XDP_PASS;
- break;
-
- case XDP_REDIRECT:
- rxq->stats[RX_XDP_REDIRECT]++;
- err = xdp_do_redirect(fep->netdev, xdp, prog);
- if (unlikely(err))
- goto xdp_err;
-
- ret = FEC_ENET_XDP_REDIR;
- break;
-
- case XDP_TX:
- rxq->stats[RX_XDP_TX]++;
- err = fec_enet_xdp_tx_xmit(fep, cpu, xdp, sync);
- if (unlikely(err)) {
- rxq->stats[RX_XDP_TX_ERRORS]++;
- goto xdp_err;
- }
+ struct xdp_buff *new_xdp;
+ dma_addr_t phys_addr;
- ret = FEC_ENET_XDP_TX;
- break;
+ new_xdp = xsk_buff_alloc(rxq->xsk_pool);
+ if (unlikely(!new_xdp))
+ return -ENOMEM;
- default:
- bpf_warn_invalid_xdp_action(fep->netdev, prog, act);
- fallthrough;
-
- case XDP_ABORTED:
- fallthrough; /* handle aborts by dropping packet */
-
- case XDP_DROP:
- rxq->stats[RX_XDP_DROP]++;
-xdp_err:
- ret = FEC_ENET_XDP_CONSUMED;
- page = virt_to_head_page(xdp->data);
- page_pool_put_page(rxq->page_pool, page, sync, true);
- if (act != XDP_DROP)
- trace_xdp_exception(fep->netdev, prog, act);
- break;
- }
+ rxq->rx_buf[index].xdp = new_xdp;
+ phys_addr = xsk_buff_xdp_get_dma(new_xdp);
+ bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
- return ret;
+ return 0;
}
static void fec_enet_rx_vlan(const struct net_device *ndev, struct sk_buff *skb)
@@ -1744,40 +1798,113 @@ static void fec_enet_rx_vlan(const struct net_device *ndev, struct sk_buff *skb)
}
}
+static int fec_rx_error_check(struct net_device *ndev, u16 status)
+{
+ if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
+ BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST |
+ BD_ENET_RX_CL)) {
+ ndev->stats.rx_errors++;
+
+ if (status & BD_ENET_RX_OV) {
+ /* FIFO overrun */
+ ndev->stats.rx_fifo_errors++;
+ return -EIO;
+ }
+
+ if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH |
+ BD_ENET_RX_LAST)) {
+ /* Frame too long or too short. */
+ ndev->stats.rx_length_errors++;
+ if ((status & BD_ENET_RX_LAST) && net_ratelimit())
+ netdev_err(ndev, "rcv is not +last\n");
+ }
+
+ /* CRC Error */
+ if (status & BD_ENET_RX_CR)
+ ndev->stats.rx_crc_errors++;
+
+ /* Report late collisions as a frame error. */
+ if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL))
+ ndev->stats.rx_frame_errors++;
+
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static struct sk_buff *fec_build_skb(struct fec_enet_private *fep,
+ struct fec_enet_priv_rx_q *rxq,
+ struct bufdesc *bdp,
+ struct page *page, u32 len)
+{
+ struct net_device *ndev = fep->netdev;
+ struct bufdesc_ex *ebdp;
+ struct sk_buff *skb;
+
+ skb = build_skb(page_address(page),
+ PAGE_SIZE << fep->pagepool_order);
+ if (unlikely(!skb)) {
+ page_pool_recycle_direct(rxq->page_pool, page);
+ ndev->stats.rx_dropped++;
+ if (net_ratelimit())
+ netdev_err(ndev, "build_skb failed\n");
+
+ return NULL;
+ }
+
+ skb_reserve(skb, FEC_ENET_XDP_HEADROOM + fep->rx_shift);
+ skb_put(skb, len);
+ skb_mark_for_recycle(skb);
+
+ /* Get offloads from the enhanced buffer descriptor */
+ if (fep->bufdesc_ex) {
+ ebdp = (struct bufdesc_ex *)bdp;
+
+ /* If this is a VLAN packet remove the VLAN Tag */
+ if (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))
+ fec_enet_rx_vlan(ndev, skb);
+
+ /* Get receive timestamp from the skb */
+ if (fep->hwts_rx_en)
+ fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts),
+ skb_hwtstamps(skb));
+
+ if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) {
+ if (!(ebdp->cbd_esc &
+ cpu_to_fec32(FLAG_RX_CSUM_ERROR)))
+ /* don't check it */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
+ }
+ }
+
+ skb->protocol = eth_type_trans(skb, ndev);
+ skb_record_rx_queue(skb, rxq->bd.qid);
+
+ return skb;
+}
+
/* During a receive, the bd_rx.cur points to the current incoming buffer.
* When we update through the ring, if the next incoming buffer has
* not been given to the system, we just set the empty indicator,
* effectively tossing the packet.
*/
-static int
-fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget)
+static int fec_enet_rx_queue(struct fec_enet_private *fep,
+ u16 queue, int budget)
{
- struct fec_enet_private *fep = netdev_priv(ndev);
- struct fec_enet_priv_rx_q *rxq;
- struct bufdesc *bdp;
- unsigned short status;
- struct sk_buff *skb;
- ushort pkt_len;
- int pkt_received = 0;
- struct bufdesc_ex *ebdp = NULL;
- int index = 0;
- bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
- struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
- u32 ret, xdp_result = FEC_ENET_XDP_PASS;
- u32 data_start = FEC_ENET_XDP_HEADROOM;
- int cpu = smp_processor_id();
- struct xdp_buff xdp;
+ struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue];
+ bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
+ struct net_device *ndev = fep->netdev;
+ struct bufdesc *bdp = rxq->bd.cur;
+ u32 sub_len = 4 + fep->rx_shift;
+ int pkt_received = 0;
+ u16 status, pkt_len;
+ struct sk_buff *skb;
struct page *page;
- __fec32 cbd_bufaddr;
- u32 sub_len = 4;
-
- /*If it has the FEC_QUIRK_HAS_RACC quirk property, the bit of
- * FEC_RACC_SHIFT16 is set by default in the probe function.
- */
- if (fep->quirks & FEC_QUIRK_HAS_RACC) {
- data_start += 2;
- sub_len += 2;
- }
+ dma_addr_t dma;
+ int index;
#if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA)
/*
@@ -1786,139 +1913,497 @@ fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget)
*/
flush_cache_all();
#endif
- rxq = fep->rx_queue[queue_id];
/* First, grab all of the stats for the incoming packet.
* These get messed up if we get called due to a busy condition.
*/
- bdp = rxq->bd.cur;
- xdp_init_buff(&xdp, PAGE_SIZE << fep->pagepool_order, &rxq->xdp_rxq);
-
while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
if (pkt_received >= budget)
break;
pkt_received++;
- writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT);
+ writel(FEC_ENET_RXF_GET(queue), fep->hwp + FEC_IEVENT);
/* Check for errors. */
status ^= BD_ENET_RX_LAST;
- if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
- BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST |
- BD_ENET_RX_CL)) {
- ndev->stats.rx_errors++;
- if (status & BD_ENET_RX_OV) {
- /* FIFO overrun */
- ndev->stats.rx_fifo_errors++;
- goto rx_processing_done;
- }
- if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH
- | BD_ENET_RX_LAST)) {
- /* Frame too long or too short. */
- ndev->stats.rx_length_errors++;
- if (status & BD_ENET_RX_LAST)
- netdev_err(ndev, "rcv is not +last\n");
- }
- if (status & BD_ENET_RX_CR) /* CRC Error */
- ndev->stats.rx_crc_errors++;
- /* Report late collisions as a frame error. */
- if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL))
- ndev->stats.rx_frame_errors++;
+ if (unlikely(fec_rx_error_check(ndev, status)))
goto rx_processing_done;
- }
/* Process the incoming frame. */
ndev->stats.rx_packets++;
pkt_len = fec16_to_cpu(bdp->cbd_datlen);
- ndev->stats.rx_bytes += pkt_len;
- if (fep->quirks & FEC_QUIRK_HAS_RACC)
- ndev->stats.rx_bytes -= 2;
+ ndev->stats.rx_bytes += pkt_len - fep->rx_shift;
index = fec_enet_get_bd_index(bdp, &rxq->bd);
- page = rxq->rx_buf[index];
- cbd_bufaddr = bdp->cbd_bufaddr;
+ page = rxq->rx_buf[index].page;
+ dma = fec32_to_cpu(bdp->cbd_bufaddr);
if (fec_enet_update_cbd(rxq, bdp, index)) {
ndev->stats.rx_dropped++;
goto rx_processing_done;
}
- dma_sync_single_for_cpu(&fep->pdev->dev,
- fec32_to_cpu(cbd_bufaddr),
- pkt_len,
+ dma_sync_single_for_cpu(&fep->pdev->dev, dma, pkt_len,
DMA_FROM_DEVICE);
prefetch(page_address(page));
- if (xdp_prog) {
- xdp_buff_clear_frags_flag(&xdp);
- /* subtract 16bit shift and FCS */
- xdp_prepare_buff(&xdp, page_address(page),
- data_start, pkt_len - sub_len, false);
- ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, cpu);
- xdp_result |= ret;
- if (ret != FEC_ENET_XDP_PASS)
- goto rx_processing_done;
+ if (unlikely(need_swap)) {
+ u8 *data;
+
+ data = page_address(page) + FEC_ENET_XDP_HEADROOM;
+ swap_buffer(data, pkt_len);
}
/* The packet length includes FCS, but we don't want to
* include that when passing upstream as it messes up
* bridging applications.
*/
- skb = build_skb(page_address(page),
- PAGE_SIZE << fep->pagepool_order);
- if (unlikely(!skb)) {
- page_pool_recycle_direct(rxq->page_pool, page);
- ndev->stats.rx_dropped++;
+ skb = fec_build_skb(fep, rxq, bdp, page, pkt_len - sub_len);
+ if (!skb)
+ goto rx_processing_done;
+
+ napi_gro_receive(&fep->napi, skb);
- netdev_err_once(ndev, "build_skb failed!\n");
+rx_processing_done:
+ /* Clear the status flags for this buffer */
+ status &= ~BD_ENET_RX_STATS;
+
+ /* Mark the buffer empty */
+ status |= BD_ENET_RX_EMPTY;
+
+ if (fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+
+ ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
+ ebdp->cbd_prot = 0;
+ ebdp->cbd_bdu = 0;
+ }
+ /* Make sure the updates to rest of the descriptor are
+ * performed before transferring ownership.
+ */
+ wmb();
+ bdp->cbd_sc = cpu_to_fec16(status);
+
+ /* Update BD pointer to next entry */
+ bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
+
+ /* Doing this here will keep the FEC running while we process
+ * incoming frames. On a heavily loaded network, we should be
+ * able to keep up at the expense of system resources.
+ */
+ writel(0, rxq->bd.reg_desc_active);
+ }
+ rxq->bd.cur = bdp;
+
+ return pkt_received;
+}
+
+static void fec_xdp_drop(struct fec_enet_priv_rx_q *rxq,
+ struct xdp_buff *xdp, u32 sync)
+{
+ struct page *page = virt_to_head_page(xdp->data);
+
+ page_pool_put_page(rxq->page_pool, page, sync, true);
+}
+
+static int
+fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index)
+{
+ if (unlikely(index < 0))
+ return 0;
+
+ return (index % fep->num_tx_queues);
+}
+
+static int fec_enet_rx_queue_xdp(struct fec_enet_private *fep, int queue,
+ int budget, struct bpf_prog *prog)
+{
+ u32 data_start = FEC_ENET_XDP_HEADROOM + fep->rx_shift;
+ struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue];
+ struct net_device *ndev = fep->netdev;
+ struct bufdesc *bdp = rxq->bd.cur;
+ u32 sub_len = 4 + fep->rx_shift;
+ int cpu = smp_processor_id();
+ int pkt_received = 0;
+ struct sk_buff *skb;
+ u16 status, pkt_len;
+ struct xdp_buff xdp;
+ int tx_qid = queue;
+ struct page *page;
+ u32 xdp_res = 0;
+ dma_addr_t dma;
+ int index, err;
+ u32 act, sync;
+
+#if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA)
+ /*
+ * Hacky flush of all caches instead of using the DMA API for the TSO
+ * headers.
+ */
+ flush_cache_all();
+#endif
+
+ if (unlikely(tx_qid >= fep->num_tx_queues))
+ tx_qid = fec_enet_xdp_get_tx_queue(fep, cpu);
+
+ xdp_init_buff(&xdp, PAGE_SIZE << fep->pagepool_order, &rxq->xdp_rxq);
+
+ while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
+ if (pkt_received >= budget)
+ break;
+ pkt_received++;
+
+ writel(FEC_ENET_RXF_GET(queue), fep->hwp + FEC_IEVENT);
+
+ /* Check for errors. */
+ status ^= BD_ENET_RX_LAST;
+ if (unlikely(fec_rx_error_check(ndev, status)))
+ goto rx_processing_done;
+
+ /* Process the incoming frame. */
+ ndev->stats.rx_packets++;
+ pkt_len = fec16_to_cpu(bdp->cbd_datlen);
+ ndev->stats.rx_bytes += pkt_len - fep->rx_shift;
+
+ index = fec_enet_get_bd_index(bdp, &rxq->bd);
+ page = rxq->rx_buf[index].page;
+ dma = fec32_to_cpu(bdp->cbd_bufaddr);
+
+ if (fec_enet_update_cbd(rxq, bdp, index)) {
+ ndev->stats.rx_dropped++;
goto rx_processing_done;
}
- skb_reserve(skb, data_start);
- skb_put(skb, pkt_len - sub_len);
- skb_mark_for_recycle(skb);
+ dma_sync_single_for_cpu(&fep->pdev->dev, dma, pkt_len,
+ DMA_FROM_DEVICE);
+ prefetch(page_address(page));
- if (unlikely(need_swap)) {
- u8 *data;
+ xdp_buff_clear_frags_flag(&xdp);
+ /* subtract 16bit shift and FCS */
+ pkt_len -= sub_len;
+ xdp_prepare_buff(&xdp, page_address(page), data_start,
+ pkt_len, false);
- data = page_address(page) + FEC_ENET_XDP_HEADROOM;
- swap_buffer(data, pkt_len);
+ act = bpf_prog_run_xdp(prog, &xdp);
+ /* Due xdp_adjust_tail and xdp_adjust_head: DMA sync
+ * for_device cover max len CPU touch.
+ */
+ sync = xdp.data_end - xdp.data;
+ sync = max(sync, pkt_len);
+
+ switch (act) {
+ case XDP_PASS:
+ rxq->stats[RX_XDP_PASS]++;
+ /* The packet length includes FCS, but we don't want to
+ * include that when passing upstream as it messes up
+ * bridging applications.
+ */
+ skb = fec_build_skb(fep, rxq, bdp, page, pkt_len);
+ if (!skb)
+ trace_xdp_exception(ndev, prog, XDP_PASS);
+ else
+ napi_gro_receive(&fep->napi, skb);
+
+ break;
+ case XDP_REDIRECT:
+ rxq->stats[RX_XDP_REDIRECT]++;
+ err = xdp_do_redirect(ndev, &xdp, prog);
+ if (unlikely(err)) {
+ fec_xdp_drop(rxq, &xdp, sync);
+ trace_xdp_exception(ndev, prog, XDP_REDIRECT);
+ } else {
+ xdp_res |= FEC_ENET_XDP_REDIR;
+ }
+ break;
+ case XDP_TX:
+ rxq->stats[RX_XDP_TX]++;
+ err = fec_enet_xdp_tx_xmit(fep, cpu, &xdp, sync, tx_qid);
+ if (unlikely(err)) {
+ rxq->stats[RX_XDP_TX_ERRORS]++;
+ fec_xdp_drop(rxq, &xdp, sync);
+ trace_xdp_exception(ndev, prog, XDP_TX);
+ } else {
+ xdp_res |= FEC_ENET_XDP_TX;
+ }
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(ndev, prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(ndev, prog, act);
+ /* handle aborts by dropping packet */
+ fallthrough;
+ case XDP_DROP:
+ rxq->stats[RX_XDP_DROP]++;
+ fec_xdp_drop(rxq, &xdp, sync);
+ break;
}
- /* Extract the enhanced buffer descriptor */
- ebdp = NULL;
- if (fep->bufdesc_ex)
- ebdp = (struct bufdesc_ex *)bdp;
+rx_processing_done:
+ /* Clear the status flags for this buffer */
+ status &= ~BD_ENET_RX_STATS;
+ /* Mark the buffer empty */
+ status |= BD_ENET_RX_EMPTY;
- /* If this is a VLAN packet remove the VLAN Tag */
- if (fep->bufdesc_ex &&
- (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN)))
- fec_enet_rx_vlan(ndev, skb);
+ if (fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
- skb->protocol = eth_type_trans(skb, ndev);
+ ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
+ ebdp->cbd_prot = 0;
+ ebdp->cbd_bdu = 0;
+ }
- /* Get receive timestamp from the skb */
- if (fep->hwts_rx_en && fep->bufdesc_ex)
- fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts),
- skb_hwtstamps(skb));
+ /* Make sure the updates to rest of the descriptor are
+ * performed before transferring ownership.
+ */
+ dma_wmb();
+ bdp->cbd_sc = cpu_to_fec16(status);
- if (fep->bufdesc_ex &&
- (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
- if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) {
- /* don't check it */
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else {
- skb_checksum_none_assert(skb);
+ /* Update BD pointer to next entry */
+ bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
+
+ /* Doing this here will keep the FEC running while we process
+ * incoming frames. On a heavily loaded network, we should be
+ * able to keep up at the expense of system resources.
+ */
+ writel(0, rxq->bd.reg_desc_active);
+ }
+
+ rxq->bd.cur = bdp;
+
+ if (xdp_res & FEC_ENET_XDP_REDIR)
+ xdp_do_flush();
+
+ if (xdp_res & FEC_ENET_XDP_TX)
+ /* Trigger transmission start */
+ fec_txq_trigger_xmit(fep, fep->tx_queue[tx_qid]);
+
+ return pkt_received;
+}
+
+static struct sk_buff *fec_build_skb_zc(struct xdp_buff *xsk,
+ struct napi_struct *napi)
+{
+ size_t len = xdp_get_buff_len(xsk);
+ struct sk_buff *skb;
+
+ skb = napi_alloc_skb(napi, len);
+ if (unlikely(!skb)) {
+ xsk_buff_free(xsk);
+ return NULL;
+ }
+
+ skb_put_data(skb, xsk->data, len);
+ xsk_buff_free(xsk);
+
+ return skb;
+}
+
+static int fec_enet_xsk_tx_xmit(struct fec_enet_private *fep,
+ struct xdp_buff *xsk, int cpu,
+ int queue)
+{
+ struct netdev_queue *nq = netdev_get_tx_queue(fep->netdev, queue);
+ struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue];
+ u32 offset = xsk->data - xsk->data_hard_start;
+ u32 headroom = txq->xsk_pool->headroom;
+ u32 len = xsk->data_end - xsk->data;
+ u32 index, status, estatus;
+ struct bufdesc *bdp;
+ dma_addr_t dma;
+
+ __netif_tx_lock(nq, cpu);
+
+ /* Avoid tx timeout as XDP shares the queue with kernel stack */
+ txq_trans_cond_update(nq);
+
+ if (!fec_enet_get_free_txdesc_num(txq)) {
+ __netif_tx_unlock(nq);
+
+ return -EBUSY;
+ }
+
+ /* Fill in a Tx ring entry */
+ bdp = txq->bd.cur;
+ status = fec16_to_cpu(bdp->cbd_sc);
+ status &= ~BD_ENET_TX_STATS;
+
+ index = fec_enet_get_bd_index(bdp, &txq->bd);
+ dma = xsk_buff_xdp_get_frame_dma(xsk) + headroom + offset;
+
+ xsk_buff_raw_dma_sync_for_device(txq->xsk_pool, dma, len);
+
+ txq->tx_buf[index].buf_p = xsk;
+ txq->tx_buf[index].type = FEC_TXBUF_T_XSK_TX;
+
+ status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
+ if (fep->bufdesc_ex)
+ estatus = BD_ENET_TX_INT;
+
+ bdp->cbd_bufaddr = cpu_to_fec32(dma);
+ bdp->cbd_datlen = cpu_to_fec16(len);
+
+ if (fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+
+ if (fep->quirks & FEC_QUIRK_HAS_AVB)
+ estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
+
+ ebdp->cbd_bdu = 0;
+ ebdp->cbd_esc = cpu_to_fec32(estatus);
+ }
+
+ dma_wmb();
+ status |= BD_ENET_TX_READY | BD_ENET_TX_TC;
+ bdp->cbd_sc = cpu_to_fec16(status);
+ dma_wmb();
+
+ bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
+ txq->bd.cur = bdp;
+
+ __netif_tx_unlock(nq);
+
+ return 0;
+}
+
+static int fec_enet_rx_queue_xsk(struct fec_enet_private *fep, int queue,
+ int budget, struct bpf_prog *prog)
+{
+ u32 data_start = FEC_ENET_XDP_HEADROOM + fep->rx_shift;
+ struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue];
+ struct net_device *ndev = fep->netdev;
+ struct bufdesc *bdp = rxq->bd.cur;
+ u32 sub_len = 4 + fep->rx_shift;
+ int cpu = smp_processor_id();
+ bool wakeup_xsk = false;
+ struct xdp_buff *xsk;
+ int pkt_received = 0;
+ struct sk_buff *skb;
+ u16 status, pkt_len;
+ u32 xdp_res = 0;
+ int index, err;
+ u32 act;
+
+#if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA)
+ /*
+ * Hacky flush of all caches instead of using the DMA API for the TSO
+ * headers.
+ */
+ flush_cache_all();
+#endif
+
+ while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
+ if (unlikely(pkt_received >= budget))
+ break;
+
+ writel(FEC_ENET_RXF_GET(queue), fep->hwp + FEC_IEVENT);
+
+ index = fec_enet_get_bd_index(bdp, &rxq->bd);
+ xsk = rxq->rx_buf[index].xdp;
+ if (unlikely(!xsk)) {
+ if (fec_enet_update_cbd_zc(rxq, bdp, index))
+ break;
+
+ if (fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+
+ ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
+ ebdp->cbd_prot = 0;
+ ebdp->cbd_bdu = 0;
}
+
+ dma_wmb();
+ status &= ~BD_ENET_RX_STATS;
+ status |= BD_ENET_RX_EMPTY;
+ bdp->cbd_sc = cpu_to_fec16(status);
+ break;
}
- skb_record_rx_queue(skb, queue_id);
- napi_gro_receive(&fep->napi, skb);
+ pkt_received++;
+ /* Check for errors. */
+ status ^= BD_ENET_RX_LAST;
+ if (unlikely(fec_rx_error_check(ndev, status)))
+ goto rx_processing_done;
+
+ /* Process the incoming frame. */
+ ndev->stats.rx_packets++;
+ pkt_len = fec16_to_cpu(bdp->cbd_datlen);
+ ndev->stats.rx_bytes += pkt_len - fep->rx_shift;
+
+ if (fec_enet_update_cbd_zc(rxq, bdp, index)) {
+ ndev->stats.rx_dropped++;
+ goto rx_processing_done;
+ }
+
+ pkt_len -= sub_len;
+ xsk->data = xsk->data_hard_start + data_start;
+ /* Subtract FCS and 16bit shift */
+ xsk->data_end = xsk->data + pkt_len;
+ xsk->data_meta = xsk->data;
+ xsk_buff_dma_sync_for_cpu(xsk);
+
+ /* If the XSK pool is enabled before the bpf program is
+ * installed, or the bpf program is uninstalled before
+ * the XSK pool is disabled. prog will be NULL and we
+ * need to set a default XDP_PASS action.
+ */
+ if (unlikely(!prog))
+ act = XDP_PASS;
+ else
+ act = bpf_prog_run_xdp(prog, xsk);
+
+ switch (act) {
+ case XDP_PASS:
+ rxq->stats[RX_XDP_PASS]++;
+ skb = fec_build_skb_zc(xsk, &fep->napi);
+ if (unlikely(!skb)) {
+ ndev->stats.rx_dropped++;
+ trace_xdp_exception(ndev, prog, XDP_PASS);
+ } else {
+ napi_gro_receive(&fep->napi, skb);
+ }
+
+ break;
+ case XDP_TX:
+ rxq->stats[RX_XDP_TX]++;
+ err = fec_enet_xsk_tx_xmit(fep, xsk, cpu, queue);
+ if (unlikely(err)) {
+ rxq->stats[RX_XDP_TX_ERRORS]++;
+ xsk_buff_free(xsk);
+ trace_xdp_exception(ndev, prog, XDP_TX);
+ } else {
+ xdp_res |= FEC_ENET_XDP_TX;
+ }
+ break;
+ case XDP_REDIRECT:
+ rxq->stats[RX_XDP_REDIRECT]++;
+ err = xdp_do_redirect(ndev, xsk, prog);
+ if (unlikely(err)) {
+ if (err == -ENOBUFS)
+ wakeup_xsk = true;
+
+ rxq->stats[RX_XDP_DROP]++;
+ xsk_buff_free(xsk);
+ trace_xdp_exception(ndev, prog, XDP_REDIRECT);
+ } else {
+ xdp_res |= FEC_ENET_XDP_REDIR;
+ }
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(ndev, prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(ndev, prog, act);
+ fallthrough;
+ case XDP_DROP:
+ rxq->stats[RX_XDP_DROP]++;
+ xsk_buff_free(xsk);
+ break;
+ }
rx_processing_done:
/* Clear the status flags for this buffer */
status &= ~BD_ENET_RX_STATS;
-
/* Mark the buffer empty */
status |= BD_ENET_RX_EMPTY;
@@ -1929,37 +2414,59 @@ rx_processing_done:
ebdp->cbd_prot = 0;
ebdp->cbd_bdu = 0;
}
+
/* Make sure the updates to rest of the descriptor are
* performed before transferring ownership.
*/
- wmb();
+ dma_wmb();
bdp->cbd_sc = cpu_to_fec16(status);
/* Update BD pointer to next entry */
bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
/* Doing this here will keep the FEC running while we process
- * incoming frames. On a heavily loaded network, we should be
+ * incoming frames. On a heavily loaded network, we should be
* able to keep up at the expense of system resources.
*/
writel(0, rxq->bd.reg_desc_active);
}
+
rxq->bd.cur = bdp;
- if (xdp_result & FEC_ENET_XDP_REDIR)
+ if (xdp_res & FEC_ENET_XDP_REDIR)
xdp_do_flush();
+ if (xdp_res & FEC_ENET_XDP_TX)
+ fec_txq_trigger_xmit(fep, fep->tx_queue[queue]);
+
+ if (rxq->xsk_pool && xsk_uses_need_wakeup(rxq->xsk_pool)) {
+ if (wakeup_xsk)
+ xsk_set_rx_need_wakeup(rxq->xsk_pool);
+ else
+ xsk_clear_rx_need_wakeup(rxq->xsk_pool);
+ }
+
return pkt_received;
}
static int fec_enet_rx(struct net_device *ndev, int budget)
{
struct fec_enet_private *fep = netdev_priv(ndev);
+ struct bpf_prog *prog = READ_ONCE(fep->xdp_prog);
int i, done = 0;
/* Make sure that AVB queues are processed first. */
- for (i = fep->num_rx_queues - 1; i >= 0; i--)
- done += fec_enet_rx_queue(ndev, i, budget - done);
+ for (i = fep->num_rx_queues - 1; i >= 0; i--) {
+ struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i];
+ int batch = budget - done;
+
+ if (rxq->xsk_pool)
+ done += fec_enet_rx_queue_xsk(fep, i, batch, prog);
+ else if (prog)
+ done += fec_enet_rx_queue_xdp(fep, i, batch, prog);
+ else
+ done += fec_enet_rx_queue(fep, i, batch);
+ }
return done;
}
@@ -2002,19 +2509,22 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
{
struct net_device *ndev = napi->dev;
struct fec_enet_private *fep = netdev_priv(ndev);
- int done = 0;
+ int rx_done = 0, tx_done = 0;
+ int max_done;
do {
- done += fec_enet_rx(ndev, budget - done);
- fec_enet_tx(ndev, budget);
- } while ((done < budget) && fec_enet_collect_events(fep));
+ rx_done += fec_enet_rx(ndev, budget - rx_done);
+ tx_done += fec_enet_tx(ndev, budget);
+ max_done = max(rx_done, tx_done);
+ } while ((max_done < budget) && fec_enet_collect_events(fep));
- if (done < budget) {
- napi_complete_done(napi, done);
+ if (max_done < budget) {
+ napi_complete_done(napi, max_done);
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+ return max_done;
}
- return done;
+ return budget;
}
/* ------------------------------------------------------------------------- */
@@ -3301,27 +3811,86 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
.self_test = net_selftest,
};
+static int fec_xdp_rxq_info_reg(struct fec_enet_private *fep,
+ struct fec_enet_priv_rx_q *rxq)
+{
+ struct net_device *ndev = fep->netdev;
+ void *allocator;
+ int type, err;
+
+ err = xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq->id, 0);
+ if (err) {
+ netdev_err(ndev, "Failed to register xdp rxq info\n");
+ return err;
+ }
+
+ allocator = rxq->xsk_pool ? NULL : rxq->page_pool;
+ type = rxq->xsk_pool ? MEM_TYPE_XSK_BUFF_POOL : MEM_TYPE_PAGE_POOL;
+ err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, type, allocator);
+ if (err) {
+ netdev_err(ndev, "Failed to register XDP mem model\n");
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+
+ return err;
+ }
+
+ if (rxq->xsk_pool)
+ xsk_pool_set_rxq_info(rxq->xsk_pool, &rxq->xdp_rxq);
+
+ return 0;
+}
+
+static void fec_xdp_rxq_info_unreg(struct fec_enet_priv_rx_q *rxq)
+{
+ if (xdp_rxq_info_is_reg(&rxq->xdp_rxq)) {
+ xdp_rxq_info_unreg_mem_model(&rxq->xdp_rxq);
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+ }
+}
+
+static void fec_free_rxq_buffers(struct fec_enet_priv_rx_q *rxq)
+{
+ bool xsk = !!rxq->xsk_pool;
+ int i;
+
+ for (i = 0; i < rxq->bd.ring_size; i++) {
+ union fec_rx_buffer *buf = &rxq->rx_buf[i];
+
+ if (!buf->buf_p)
+ continue;
+
+ if (xsk)
+ xsk_buff_free(buf->xdp);
+ else
+ page_pool_put_full_page(rxq->page_pool,
+ buf->page, false);
+
+ rxq->rx_buf[i].buf_p = NULL;
+ }
+
+ if (!xsk) {
+ page_pool_destroy(rxq->page_pool);
+ rxq->page_pool = NULL;
+ }
+}
+
static void fec_enet_free_buffers(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
unsigned int i;
struct fec_enet_priv_tx_q *txq;
struct fec_enet_priv_rx_q *rxq;
+ struct page *page;
unsigned int q;
for (q = 0; q < fep->num_rx_queues; q++) {
rxq = fep->rx_queue[q];
- for (i = 0; i < rxq->bd.ring_size; i++)
- page_pool_put_full_page(rxq->page_pool, rxq->rx_buf[i],
- false);
+
+ fec_xdp_rxq_info_unreg(rxq);
+ fec_free_rxq_buffers(rxq);
for (i = 0; i < XDP_STATS_TOTAL; i++)
rxq->stats[i] = 0;
-
- if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
- xdp_rxq_info_unreg(&rxq->xdp_rxq);
- page_pool_destroy(rxq->page_pool);
- rxq->page_pool = NULL;
}
for (q = 0; q < fep->num_tx_queues; q++) {
@@ -3330,20 +3899,23 @@ static void fec_enet_free_buffers(struct net_device *ndev)
kfree(txq->tx_bounce[i]);
txq->tx_bounce[i] = NULL;
- if (!txq->tx_buf[i].buf_p) {
- txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
- continue;
- }
-
- if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
+ switch (txq->tx_buf[i].type) {
+ case FEC_TXBUF_T_SKB:
dev_kfree_skb(txq->tx_buf[i].buf_p);
- } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
+ break;
+ case FEC_TXBUF_T_XDP_NDO:
xdp_return_frame(txq->tx_buf[i].buf_p);
- } else {
- struct page *page = txq->tx_buf[i].buf_p;
-
+ break;
+ case FEC_TXBUF_T_XDP_TX:
+ page = txq->tx_buf[i].buf_p;
page_pool_put_page(pp_page_to_nmdesc(page)->pp,
page, 0, false);
+ break;
+ case FEC_TXBUF_T_XSK_TX:
+ xsk_buff_free(txq->tx_buf[i].buf_p);
+ break;
+ default:
+ break;
}
txq->tx_buf[i].buf_p = NULL;
@@ -3420,22 +3992,18 @@ alloc_failed:
return ret;
}
-static int
-fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
+static int fec_alloc_rxq_buffers_pp(struct fec_enet_private *fep,
+ struct fec_enet_priv_rx_q *rxq)
{
- struct fec_enet_private *fep = netdev_priv(ndev);
- struct fec_enet_priv_rx_q *rxq;
+ struct bufdesc *bdp = rxq->bd.base;
dma_addr_t phys_addr;
- struct bufdesc *bdp;
struct page *page;
int i, err;
- rxq = fep->rx_queue[queue];
- bdp = rxq->bd.base;
-
- err = fec_enet_create_page_pool(fep, rxq, rxq->bd.ring_size);
+ err = fec_enet_create_page_pool(fep, rxq);
if (err < 0) {
- netdev_err(ndev, "%s failed queue %d (%d)\n", __func__, queue, err);
+ netdev_err(fep->netdev, "%s failed queue %d (%d)\n",
+ __func__, rxq->bd.qid, err);
return err;
}
@@ -3454,31 +4022,81 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
for (i = 0; i < rxq->bd.ring_size; i++) {
page = page_pool_dev_alloc_pages(rxq->page_pool);
- if (!page)
- goto err_alloc;
+ if (!page) {
+ err = -ENOMEM;
+ goto free_rx_buffers;
+ }
phys_addr = page_pool_get_dma_addr(page) + FEC_ENET_XDP_HEADROOM;
bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
+ rxq->rx_buf[i].page = page;
+ bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
+ }
- rxq->rx_buf[i] = page;
- bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
+ return 0;
- if (fep->bufdesc_ex) {
- struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
- ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
- }
+free_rx_buffers:
+ fec_free_rxq_buffers(rxq);
+
+ return err;
+}
+static int fec_alloc_rxq_buffers_zc(struct fec_enet_private *fep,
+ struct fec_enet_priv_rx_q *rxq)
+{
+ union fec_rx_buffer *buf = &rxq->rx_buf[0];
+ struct bufdesc *bdp = rxq->bd.base;
+ dma_addr_t phys_addr;
+ int i;
+
+ for (i = 0; i < rxq->bd.ring_size; i++) {
+ buf[i].xdp = xsk_buff_alloc(rxq->xsk_pool);
+ if (!buf[i].xdp)
+ break;
+
+ phys_addr = xsk_buff_xdp_get_dma(buf[i].xdp);
+ bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
}
- /* Set the last buffer to wrap. */
- bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
- bdp->cbd_sc |= cpu_to_fec16(BD_ENET_RX_WRAP);
+ for (; i < rxq->bd.ring_size; i++) {
+ buf[i].xdp = NULL;
+ bdp->cbd_bufaddr = cpu_to_fec32(0);
+ bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
+ }
+
+ return 0;
+}
+
+static int
+fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct fec_enet_priv_rx_q *rxq;
+ int err;
+
+ rxq = fep->rx_queue[queue];
+ if (rxq->xsk_pool) {
+ /* RX XDP ZC buffer pool may not be populated, e.g.
+ * xdpsock TX-only.
+ */
+ fec_alloc_rxq_buffers_zc(fep, rxq);
+ } else {
+ err = fec_alloc_rxq_buffers_pp(fep, rxq);
+ if (err)
+ goto free_buffers;
+ }
+
+ err = fec_xdp_rxq_info_reg(fep, rxq);
+ if (err)
+ goto free_buffers;
+
return 0;
- err_alloc:
+free_buffers:
fec_enet_free_buffers(ndev);
- return -ENOMEM;
+
+ return err;
}
static int
@@ -3792,21 +4410,237 @@ static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb,
return fec_enet_vlan_pri_to_queue[vlan_tag >> 13];
}
+static void fec_free_rxq(struct fec_enet_priv_rx_q *rxq)
+{
+ fec_xdp_rxq_info_unreg(rxq);
+ fec_free_rxq_buffers(rxq);
+ kfree(rxq);
+}
+
+static struct fec_enet_priv_rx_q *
+fec_alloc_new_rxq_xsk(struct fec_enet_private *fep, int queue,
+ struct xsk_buff_pool *pool)
+{
+ struct fec_enet_priv_rx_q *old_rxq = fep->rx_queue[queue];
+ struct fec_enet_priv_rx_q *rxq;
+ union fec_rx_buffer *buf;
+ int i;
+
+ rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
+ if (!rxq)
+ return NULL;
+
+ /* Copy the BD ring to the new rxq */
+ rxq->bd = old_rxq->bd;
+ rxq->id = queue;
+ rxq->xsk_pool = pool;
+ buf = &rxq->rx_buf[0];
+
+ for (i = 0; i < rxq->bd.ring_size; i++) {
+ buf[i].xdp = xsk_buff_alloc(pool);
+ /* RX XDP ZC buffer pool may not be populated, e.g.
+ * xdpsock TX-only.
+ */
+ if (!buf[i].xdp)
+ break;
+ }
+
+ if (fec_xdp_rxq_info_reg(fep, rxq))
+ goto free_buffers;
+
+ return rxq;
+
+free_buffers:
+ while (--i >= 0)
+ xsk_buff_free(buf[i].xdp);
+
+ kfree(rxq);
+
+ return NULL;
+}
+
+static struct fec_enet_priv_rx_q *
+fec_alloc_new_rxq_pp(struct fec_enet_private *fep, int queue)
+{
+ struct fec_enet_priv_rx_q *old_rxq = fep->rx_queue[queue];
+ struct fec_enet_priv_rx_q *rxq;
+ union fec_rx_buffer *buf;
+ int i = 0;
+
+ rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
+ if (!rxq)
+ return NULL;
+
+ rxq->bd = old_rxq->bd;
+ rxq->id = queue;
+
+ if (fec_enet_create_page_pool(fep, rxq))
+ goto free_rxq;
+
+ buf = &rxq->rx_buf[0];
+ for (; i < rxq->bd.ring_size; i++) {
+ buf[i].page = page_pool_dev_alloc_pages(rxq->page_pool);
+ if (!buf[i].page)
+ goto free_buffers;
+ }
+
+ if (fec_xdp_rxq_info_reg(fep, rxq))
+ goto free_buffers;
+
+ return rxq;
+
+free_buffers:
+ while (--i >= 0)
+ page_pool_put_full_page(rxq->page_pool,
+ buf[i].page, false);
+
+ page_pool_destroy(rxq->page_pool);
+free_rxq:
+ kfree(rxq);
+
+ return NULL;
+}
+
+static void fec_init_rxq_bd_buffers(struct fec_enet_priv_rx_q *rxq, bool xsk)
+{
+ union fec_rx_buffer *buf = &rxq->rx_buf[0];
+ struct bufdesc *bdp = rxq->bd.base;
+ dma_addr_t dma;
+
+ for (int i = 0; i < rxq->bd.ring_size; i++) {
+ if (xsk)
+ dma = buf[i].xdp ?
+ xsk_buff_xdp_get_dma(buf[i].xdp) : 0;
+ else
+ dma = page_pool_get_dma_addr(buf[i].page) +
+ FEC_ENET_XDP_HEADROOM;
+
+ bdp->cbd_bufaddr = cpu_to_fec32(dma);
+ bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
+ }
+}
+
+static int fec_xsk_restart_napi(struct fec_enet_private *fep,
+ struct xsk_buff_pool *pool,
+ u16 queue)
+{
+ struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue];
+ struct net_device *ndev = fep->netdev;
+ struct fec_enet_priv_rx_q *rxq;
+ int err;
+
+ napi_disable(&fep->napi);
+ netif_tx_disable(ndev);
+ synchronize_rcu();
+
+ rxq = pool ? fec_alloc_new_rxq_xsk(fep, queue, pool) :
+ fec_alloc_new_rxq_pp(fep, queue);
+ if (!rxq) {
+ err = -ENOMEM;
+ goto err_alloc_new_rxq;
+ }
+
+ /* Replace the old rxq with the new rxq */
+ fec_free_rxq(fep->rx_queue[queue]);
+ fep->rx_queue[queue] = rxq;
+ fec_init_rxq_bd_buffers(rxq, !!pool);
+ txq->xsk_pool = pool;
+
+ fec_restart(ndev);
+ napi_enable(&fep->napi);
+ netif_tx_start_all_queues(ndev);
+
+ return 0;
+
+err_alloc_new_rxq:
+ napi_enable(&fep->napi);
+ netif_tx_start_all_queues(ndev);
+
+ return err;
+}
+
+static int fec_enable_xsk_pool(struct fec_enet_private *fep,
+ struct xsk_buff_pool *pool,
+ u16 queue)
+{
+ int err;
+
+ err = xsk_pool_dma_map(pool, &fep->pdev->dev, 0);
+ if (err) {
+ netdev_err(fep->netdev, "Failed to map xsk pool\n");
+ return err;
+ }
+
+ if (!netif_running(fep->netdev)) {
+ struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue];
+ struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue];
+
+ rxq->xsk_pool = pool;
+ txq->xsk_pool = pool;
+
+ return 0;
+ }
+
+ err = fec_xsk_restart_napi(fep, pool, queue);
+ if (err) {
+ xsk_pool_dma_unmap(pool, 0);
+ return err;
+ }
+
+ return 0;
+}
+
+static int fec_disable_xsk_pool(struct fec_enet_private *fep,
+ u16 queue)
+{
+ struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue];
+ struct xsk_buff_pool *old_pool = txq->xsk_pool;
+ int err;
+
+ if (!netif_running(fep->netdev)) {
+ struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue];
+
+ xsk_pool_dma_unmap(old_pool, 0);
+ rxq->xsk_pool = NULL;
+ txq->xsk_pool = NULL;
+
+ return 0;
+ }
+
+ err = fec_xsk_restart_napi(fep, NULL, queue);
+ if (err)
+ return err;
+
+ xsk_pool_dma_unmap(old_pool, 0);
+
+ return 0;
+}
+
+static int fec_setup_xsk_pool(struct fec_enet_private *fep,
+ struct xsk_buff_pool *pool,
+ u16 queue)
+{
+ if (queue >= fep->num_rx_queues || queue >= fep->num_tx_queues)
+ return -ERANGE;
+
+ return pool ? fec_enable_xsk_pool(fep, pool, queue) :
+ fec_disable_xsk_pool(fep, queue);
+}
+
static int fec_enet_bpf(struct net_device *dev, struct netdev_bpf *bpf)
{
struct fec_enet_private *fep = netdev_priv(dev);
bool is_run = netif_running(dev);
struct bpf_prog *old_prog;
+ /* No need to support the SoCs that require to do the frame swap
+ * because the performance wouldn't be better than the skb mode.
+ */
+ if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
+ return -EOPNOTSUPP;
+
switch (bpf->command) {
case XDP_SETUP_PROG:
- /* No need to support the SoCs that require to
- * do the frame swap because the performance wouldn't be
- * better than the skb mode.
- */
- if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
- return -EOPNOTSUPP;
-
if (!bpf->prog)
xdp_features_clear_redirect_target(dev);
@@ -3830,24 +4664,14 @@ static int fec_enet_bpf(struct net_device *dev, struct netdev_bpf *bpf)
xdp_features_set_redirect_target(dev, false);
return 0;
-
case XDP_SETUP_XSK_POOL:
- return -EOPNOTSUPP;
-
+ return fec_setup_xsk_pool(fep, bpf->xsk.pool,
+ bpf->xsk.queue_id);
default:
return -EOPNOTSUPP;
}
}
-static int
-fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index)
-{
- if (unlikely(index < 0))
- return 0;
-
- return (index % fep->num_tx_queues);
-}
-
static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
struct fec_enet_priv_tx_q *txq,
void *frame, u32 dma_sync_len,
@@ -3933,28 +4757,16 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
txq->bd.cur = bdp;
- /* Trigger transmission start */
- if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
- !readl(txq->bd.reg_desc_active) ||
- !readl(txq->bd.reg_desc_active) ||
- !readl(txq->bd.reg_desc_active) ||
- !readl(txq->bd.reg_desc_active))
- writel(0, txq->bd.reg_desc_active);
-
return 0;
}
static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
int cpu, struct xdp_buff *xdp,
- u32 dma_sync_len)
+ u32 dma_sync_len, int queue)
{
- struct fec_enet_priv_tx_q *txq;
- struct netdev_queue *nq;
- int queue, ret;
-
- queue = fec_enet_xdp_get_tx_queue(fep, cpu);
- txq = fep->tx_queue[queue];
- nq = netdev_get_tx_queue(fep->netdev, queue);
+ struct netdev_queue *nq = netdev_get_tx_queue(fep->netdev, queue);
+ struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue];
+ int ret;
__netif_tx_lock(nq, cpu);
@@ -3994,11 +4806,37 @@ static int fec_enet_xdp_xmit(struct net_device *dev,
sent_frames++;
}
+ if (sent_frames)
+ fec_txq_trigger_xmit(fep, txq);
+
__netif_tx_unlock(nq);
return sent_frames;
}
+static int fec_enet_xsk_wakeup(struct net_device *ndev, u32 queue, u32 flags)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct fec_enet_priv_rx_q *rxq;
+
+ if (!netif_running(ndev) || !netif_carrier_ok(ndev))
+ return -ENETDOWN;
+
+ if (queue >= fep->num_rx_queues || queue >= fep->num_tx_queues)
+ return -ERANGE;
+
+ rxq = fep->rx_queue[queue];
+ if (!rxq->xsk_pool)
+ return -EINVAL;
+
+ if (!napi_if_scheduled_mark_missed(&fep->napi)) {
+ if (likely(napi_schedule_prep(&fep->napi)))
+ __napi_schedule(&fep->napi);
+ }
+
+ return 0;
+}
+
static int fec_hwtstamp_get(struct net_device *ndev,
struct kernel_hwtstamp_config *config)
{
@@ -4061,6 +4899,7 @@ static const struct net_device_ops fec_netdev_ops = {
.ndo_set_features = fec_set_features,
.ndo_bpf = fec_enet_bpf,
.ndo_xdp_xmit = fec_enet_xdp_xmit,
+ .ndo_xsk_wakeup = fec_enet_xsk_wakeup,
.ndo_hwtstamp_get = fec_hwtstamp_get,
.ndo_hwtstamp_set = fec_hwtstamp_set,
};
@@ -4188,7 +5027,8 @@ static int fec_enet_init(struct net_device *ndev)
if (!(fep->quirks & FEC_QUIRK_SWAP_FRAME))
ndev->xdp_features = NETDEV_XDP_ACT_BASIC |
- NETDEV_XDP_ACT_REDIRECT;
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY;
fec_restart(ndev);
@@ -4591,6 +5431,11 @@ fec_probe(struct platform_device *pdev)
ndev->max_mtu = fep->max_buf_size - VLAN_ETH_HLEN - ETH_FCS_LEN;
+ if (fep->quirks & FEC_QUIRK_HAS_RACC)
+ fep->rx_shift = 2;
+ else
+ fep->rx_shift = 0;
+
ret = register_netdev(ndev);
if (ret)
goto failed_register;
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
index 1966dba512f8..106adf7a870f 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
@@ -946,17 +946,9 @@ static void fun_get_fec_stats(struct net_device *netdev,
#undef TX_STAT
#undef FEC_STAT
-static int fun_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
- u32 *rule_locs)
+static u32 fun_get_rx_ring_count(struct net_device *netdev)
{
- switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = netdev->real_num_rx_queues;
- return 0;
- default:
- break;
- }
- return -EOPNOTSUPP;
+ return netdev->real_num_rx_queues;
}
static int fun_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info)
@@ -1169,8 +1161,8 @@ static const struct ethtool_ops fun_ethtool_ops = {
.get_sset_count = fun_get_sset_count,
.get_strings = fun_get_strings,
.get_ethtool_stats = fun_get_ethtool_stats,
- .get_rxnfc = fun_get_rxnfc,
.set_rxnfc = fun_set_rxnfc,
+ .get_rx_ring_count = fun_get_rx_ring_count,
.get_rxfh_indir_size = fun_get_rxfh_indir_size,
.get_rxfh_key_size = fun_get_rxfh_key_size,
.get_rxfh = fun_get_rxfh,
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 66ddc4413f8d..42a0a6f7b296 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -840,15 +840,19 @@ static int gve_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
return err;
}
+static u32 gve_get_rx_ring_count(struct net_device *netdev)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ return priv->rx_cfg.num_queues;
+}
+
static int gve_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs)
{
struct gve_priv *priv = netdev_priv(netdev);
int err = 0;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = priv->rx_cfg.num_queues;
- break;
case ETHTOOL_GRXCLSRLCNT:
if (!priv->max_flow_rules)
return -EOPNOTSUPP;
@@ -991,6 +995,7 @@ const struct ethtool_ops gve_ethtool_ops = {
.get_channels = gve_get_channels,
.set_rxnfc = gve_set_rxnfc,
.get_rxnfc = gve_get_rxnfc,
+ .get_rx_ring_count = gve_get_rx_ring_count,
.get_rxfh_indir_size = gve_get_rxfh_indir_size,
.get_rxfh_key_size = gve_get_rxfh_key_size,
.get_rxfh = gve_get_rxfh,
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index dbc84de39b70..0ee864b0afe0 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -2618,8 +2618,9 @@ static void gve_rx_queue_mem_free(struct net_device *dev, void *per_q_mem)
gve_rx_free_ring_dqo(priv, gve_per_q_mem, &cfg);
}
-static int gve_rx_queue_mem_alloc(struct net_device *dev, void *per_q_mem,
- int idx)
+static int gve_rx_queue_mem_alloc(struct net_device *dev,
+ struct netdev_queue_config *qcfg,
+ void *per_q_mem, int idx)
{
struct gve_priv *priv = netdev_priv(dev);
struct gve_rx_alloc_rings_cfg cfg = {0};
@@ -2640,7 +2641,9 @@ static int gve_rx_queue_mem_alloc(struct net_device *dev, void *per_q_mem,
return err;
}
-static int gve_rx_queue_start(struct net_device *dev, void *per_q_mem, int idx)
+static int gve_rx_queue_start(struct net_device *dev,
+ struct netdev_queue_config *qcfg,
+ void *per_q_mem, int idx)
{
struct gve_priv *priv = netdev_priv(dev);
struct gve_rx_ring *gve_per_q_mem;
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index 40b89b3e5a31..28e85730f785 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -963,9 +963,6 @@ static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx,
int num_buffer_descs;
int total_num_descs;
- if (skb_is_gso(skb) && unlikely(ipv6_hopopt_jumbo_remove(skb)))
- goto drop;
-
if (tx->dqo.qpl) {
/* We do not need to verify the number of buffers used per
* packet or per segment in case of TSO as with 2K size buffers
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 60a586a951a0..23b295dedaef 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -1230,21 +1230,11 @@ hns_set_rss(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
rxfh->indir, rxfh->key, rxfh->hfunc);
}
-static int hns_get_rxnfc(struct net_device *netdev,
- struct ethtool_rxnfc *cmd,
- u32 *rule_locs)
+static u32 hns_get_rx_ring_count(struct net_device *netdev)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
- switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = priv->ae_handle->q_num;
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- return 0;
+ return priv->ae_handle->q_num;
}
static const struct ethtool_ops hns_ethtool_ops = {
@@ -1273,7 +1263,7 @@ static const struct ethtool_ops hns_ethtool_ops = {
.get_rxfh_indir_size = hns_get_rss_indir_size,
.get_rxfh = hns_get_rss,
.set_rxfh = hns_set_rss,
- .get_rxnfc = hns_get_rxnfc,
+ .get_rx_ring_count = hns_get_rx_ring_count,
.get_link_ksettings = hns_nic_get_link_ksettings,
.set_link_ksettings = hns_nic_set_link_ksettings,
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 7a9573dcab74..a3206c97923e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -25,6 +25,7 @@
#include <net/tcp.h>
#include <net/vxlan.h>
#include <net/geneve.h>
+#include <net/netdev_queues.h>
#include "hnae3.h"
#include "hns3_enet.h"
@@ -1048,13 +1049,13 @@ static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
int order;
if (!alloc_size)
- return;
+ goto not_init;
order = get_order(alloc_size);
if (order > MAX_PAGE_ORDER) {
if (net_ratelimit())
dev_warn(ring_to_dev(ring), "failed to allocate tx spare buffer, exceed to max order\n");
- return;
+ goto not_init;
}
tx_spare = devm_kzalloc(ring_to_dev(ring), sizeof(*tx_spare),
@@ -1092,6 +1093,13 @@ alloc_pages_error:
devm_kfree(ring_to_dev(ring), tx_spare);
devm_kzalloc_error:
ring->tqp->handle->kinfo.tx_spare_buf_size = 0;
+not_init:
+ /* When driver init or reset_init, the ring->tx_spare is always NULL;
+ * but when called from hns3_set_ringparam, it's usually not NULL, and
+ * will be restored if hns3_init_all_ring() failed. So it's safe to set
+ * ring->tx_spare to NULL here.
+ */
+ ring->tx_spare = NULL;
}
/* Use hns3_tx_spare_space() to make sure there is enough buffer
@@ -2810,14 +2818,12 @@ static int hns3_get_timeout_queue(struct net_device *ndev)
/* Find the stopped queue the same way the stack does */
for (i = 0; i < ndev->num_tx_queues; i++) {
+ unsigned int timedout_ms;
struct netdev_queue *q;
- unsigned long trans_start;
q = netdev_get_tx_queue(ndev, i);
- trans_start = READ_ONCE(q->trans_start);
- if (netif_xmit_stopped(q) &&
- time_after(jiffies,
- (trans_start + ndev->watchdog_timeo))) {
+ timedout_ms = netif_xmit_timeout_ms(q);
+ if (timedout_ms) {
#ifdef CONFIG_BQL
struct dql *dql = &q->dql;
@@ -2826,8 +2832,7 @@ static int hns3_get_timeout_queue(struct net_device *ndev)
dql->adj_limit, dql->num_completed);
#endif
netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n",
- q->state,
- jiffies_to_msecs(jiffies - trans_start));
+ q->state, timedout_ms);
break;
}
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index a5eefa28454c..6d746a9fb687 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -988,6 +988,13 @@ static int hns3_get_rxfh_fields(struct net_device *netdev,
return -EOPNOTSUPP;
}
+static u32 hns3_get_rx_ring_count(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ return h->kinfo.num_tqps;
+}
+
static int hns3_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *cmd,
u32 *rule_locs)
@@ -995,9 +1002,6 @@ static int hns3_get_rxnfc(struct net_device *netdev,
struct hnae3_handle *h = hns3_get_handle(netdev);
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = h->kinfo.num_tqps;
- return 0;
case ETHTOOL_GRXCLSRLCNT:
if (h->ae_algo->ops->get_fd_rule_cnt)
return h->ae_algo->ops->get_fd_rule_cnt(h, cmd);
@@ -2148,6 +2152,7 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
.get_sset_count = hns3_get_sset_count,
.get_rxnfc = hns3_get_rxnfc,
.set_rxnfc = hns3_set_rxnfc,
+ .get_rx_ring_count = hns3_get_rx_ring_count,
.get_rxfh_key_size = hns3_get_rss_key_size,
.get_rxfh_indir_size = hns3_get_rss_indir_size,
.get_rxfh = hns3_get_rss,
@@ -2187,6 +2192,7 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.get_sset_count = hns3_get_sset_count,
.get_rxnfc = hns3_get_rxnfc,
.set_rxnfc = hns3_set_rxnfc,
+ .get_rx_ring_count = hns3_get_rx_ring_count,
.get_rxfh_key_size = hns3_get_rss_key_size,
.get_rxfh_indir_size = hns3_get_rss_indir_size,
.get_rxfh = hns3_get_rss,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 416e02e7b995..4ce92ddefcde 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -26,6 +26,7 @@ struct hclge_misc_vector {
#define HCLGE_TQP_REG_OFFSET 0x80000
#define HCLGE_TQP_REG_SIZE 0x200
+#define HCLGE_FD_COUNTER_MAX_SIZE_DEV_V2 128
#define HCLGE_TQP_MAX_SIZE_DEV_V2 1024
#define HCLGE_TQP_EXT_REG_OFFSET 0x100
@@ -727,11 +728,11 @@ struct hclge_fd_tcam_config_3_cmd {
#define HCLGE_FD_AD_DROP_B 0
#define HCLGE_FD_AD_DIRECT_QID_B 1
-#define HCLGE_FD_AD_QID_S 2
-#define HCLGE_FD_AD_QID_M GENMASK(11, 2)
+#define HCLGE_FD_AD_QID_L_S 2
+#define HCLGE_FD_AD_QID_L_M GENMASK(11, 2)
#define HCLGE_FD_AD_USE_COUNTER_B 12
-#define HCLGE_FD_AD_COUNTER_NUM_S 13
-#define HCLGE_FD_AD_COUNTER_NUM_M GENMASK(19, 13)
+#define HCLGE_FD_AD_COUNTER_NUM_L_S 13
+#define HCLGE_FD_AD_COUNTER_NUM_L_M GENMASK(19, 13)
#define HCLGE_FD_AD_NXT_STEP_B 20
#define HCLGE_FD_AD_NXT_KEY_S 21
#define HCLGE_FD_AD_NXT_KEY_M GENMASK(25, 21)
@@ -741,6 +742,8 @@ struct hclge_fd_tcam_config_3_cmd {
#define HCLGE_FD_AD_TC_OVRD_B 16
#define HCLGE_FD_AD_TC_SIZE_S 17
#define HCLGE_FD_AD_TC_SIZE_M GENMASK(20, 17)
+#define HCLGE_FD_AD_QID_H_B 21
+#define HCLGE_FD_AD_COUNTER_NUM_H_B 26
struct hclge_fd_ad_config_cmd {
u8 stage;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index b8e2aa19f9e6..edec994981c7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -5679,15 +5679,20 @@ static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
}
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_QID_H_B,
+ action->queue_id >= HCLGE_TQP_MAX_SIZE_DEV_V2 ? 1 : 0);
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_COUNTER_NUM_H_B,
+ action->counter_id >= HCLGE_FD_COUNTER_MAX_SIZE_DEV_V2 ?
+ 1 : 0);
ad_data <<= 32;
hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
action->forward_to_direct_queue);
- hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
+ hnae3_set_field(ad_data, HCLGE_FD_AD_QID_L_M, HCLGE_FD_AD_QID_L_S,
action->queue_id);
hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
- hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
- HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
+ hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_L_M,
+ HCLGE_FD_AD_COUNTER_NUM_L_S, action->counter_id);
hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
action->next_input_key);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
index e9f338e9dbe7..f28528df5aac 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
@@ -1101,22 +1101,11 @@ static int __set_rss_rxfh(struct net_device *netdev,
return 0;
}
-static int hinic_get_rxnfc(struct net_device *netdev,
- struct ethtool_rxnfc *cmd, u32 *rule_locs)
+static u32 hinic_get_rx_ring_count(struct net_device *netdev)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
- int err = 0;
- switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = nic_dev->num_qps;
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
-
- return err;
+ return nic_dev->num_qps;
}
static int hinic_get_rxfh(struct net_device *netdev,
@@ -1779,7 +1768,7 @@ static const struct ethtool_ops hinic_ethtool_ops = {
.set_pauseparam = hinic_set_pauseparam,
.get_channels = hinic_get_channels,
.set_channels = hinic_set_channels,
- .get_rxnfc = hinic_get_rxnfc,
+ .get_rx_ring_count = hinic_get_rx_ring_count,
.get_rxfh_key_size = hinic_get_rxfh_key_size,
.get_rxfh_indir_size = hinic_get_rxfh_indir_size,
.get_rxfh = hinic_get_rxfh,
@@ -1812,7 +1801,7 @@ static const struct ethtool_ops hinicvf_ethtool_ops = {
.set_per_queue_coalesce = hinic_set_per_queue_coalesce,
.get_channels = hinic_get_channels,
.set_channels = hinic_set_channels,
- .get_rxnfc = hinic_get_rxnfc,
+ .get_rx_ring_count = hinic_get_rx_ring_count,
.get_rxfh_key_size = hinic_get_rxfh_key_size,
.get_rxfh_indir_size = hinic_get_rxfh_indir_size,
.get_rxfh = hinic_get_rxfh,
diff --git a/drivers/net/ethernet/huawei/hinic3/Kconfig b/drivers/net/ethernet/huawei/hinic3/Kconfig
index ce4331d1387b..02d6f91a7f4a 100644
--- a/drivers/net/ethernet/huawei/hinic3/Kconfig
+++ b/drivers/net/ethernet/huawei/hinic3/Kconfig
@@ -11,6 +11,7 @@ config HINIC3
depends on X86 || ARM64 || COMPILE_TEST
depends on PCI_MSI && 64BIT
select AUXILIARY_BUS
+ select DIMLIB
select PAGE_POOL
help
This driver supports HiNIC 3rd gen Network Adapter (HINIC3).
diff --git a/drivers/net/ethernet/huawei/hinic3/Makefile b/drivers/net/ethernet/huawei/hinic3/Makefile
index c3efa45a6a42..26c05ecf31c9 100644
--- a/drivers/net/ethernet/huawei/hinic3/Makefile
+++ b/drivers/net/ethernet/huawei/hinic3/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_HINIC3) += hinic3.o
hinic3-objs := hinic3_cmdq.o \
hinic3_common.o \
hinic3_eqs.o \
+ hinic3_filter.o \
hinic3_hw_cfg.o \
hinic3_hw_comm.o \
hinic3_hwdev.o \
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c b/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c
index ef539d1b69a3..86720bb119e9 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c
@@ -878,14 +878,11 @@ err_free_cmd_infos:
}
hinic3_free_db_addr(hwdev, cmdqs->cmdqs_db_base);
-
err_destroy_cmdq_wq:
destroy_cmdq_wq(hwdev, cmdqs);
-
err_free_cmdqs:
dma_pool_destroy(cmdqs->cmd_buf_pool);
kfree(cmdqs);
-
err_out:
return err;
}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_csr.h b/drivers/net/ethernet/huawei/hinic3/hinic3_csr.h
index e7417e8efa99..f7083a6e7df9 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_csr.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_csr.h
@@ -5,6 +5,7 @@
#define _HINIC3_CSR_H_
#define HINIC3_CFG_REGS_FLAG 0x40000000
+#define HINIC3_MGMT_REGS_FLAG 0xC0000000
#define HINIC3_REGS_FLAG_MASK 0x3FFFFFFF
#define HINIC3_VF_CFG_REG_OFFSET 0x2000
@@ -24,6 +25,11 @@
#define HINIC3_FUNC_CSR_MAILBOX_RESULT_H_OFF (HINIC3_CFG_REGS_FLAG + 0x0108)
#define HINIC3_FUNC_CSR_MAILBOX_RESULT_L_OFF (HINIC3_CFG_REGS_FLAG + 0x010C)
+#define HINIC3_HOST_CSR_BASE_ADDR (HINIC3_MGMT_REGS_FLAG + 0x6000)
+#define HINIC3_PPF_ELECTION_OFFSET 0x0
+#define HINIC3_CSR_PPF_ELECTION_ADDR \
+ (HINIC3_HOST_CSR_BASE_ADDR + HINIC3_PPF_ELECTION_OFFSET)
+
#define HINIC3_CSR_DMA_ATTR_TBL_ADDR (HINIC3_CFG_REGS_FLAG + 0x380)
#define HINIC3_CSR_DMA_ATTR_INDIR_IDX_ADDR (HINIC3_CFG_REGS_FLAG + 0x390)
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c b/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c
index 01686472985b..a2c3962116d5 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c
@@ -655,7 +655,7 @@ int hinic3_aeqs_init(struct hinic3_hwdev *hwdev, u16 num_aeqs,
hwdev->aeqs = aeqs;
aeqs->hwdev = hwdev;
aeqs->num_aeqs = num_aeqs;
- aeqs->workq = alloc_workqueue(HINIC3_EQS_WQ_NAME, WQ_MEM_RECLAIM,
+ aeqs->workq = alloc_workqueue(HINIC3_EQS_WQ_NAME, WQ_MEM_RECLAIM | WQ_PERCPU,
HINIC3_MAX_AEQS);
if (!aeqs->workq) {
dev_err(hwdev->dev, "Failed to initialize aeq workqueue\n");
@@ -686,7 +686,6 @@ err_remove_eqs:
}
destroy_workqueue(aeqs->workq);
-
err_free_aeqs:
kfree(aeqs);
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_filter.c b/drivers/net/ethernet/huawei/hinic3/hinic3_filter.c
new file mode 100644
index 000000000000..6349d71f574b
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_filter.c
@@ -0,0 +1,417 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include "hinic3_hwif.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_nic_cfg.h"
+
+static int hinic3_filter_addr_sync(struct net_device *netdev, u8 *addr)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ return hinic3_set_mac(nic_dev->hwdev, addr, 0,
+ hinic3_global_func_id(nic_dev->hwdev));
+}
+
+static int hinic3_filter_addr_unsync(struct net_device *netdev, u8 *addr)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ /* The addr is in use */
+ if (ether_addr_equal(addr, netdev->dev_addr))
+ return 0;
+
+ return hinic3_del_mac(nic_dev->hwdev, addr, 0,
+ hinic3_global_func_id(nic_dev->hwdev));
+}
+
+void hinic3_clean_mac_list_filter(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_mac_filter *ftmp;
+ struct hinic3_mac_filter *f;
+
+ list_for_each_entry_safe(f, ftmp, &nic_dev->uc_filter_list, list) {
+ if (f->state == HINIC3_MAC_HW_SYNCED)
+ hinic3_filter_addr_unsync(netdev, f->addr);
+ list_del(&f->list);
+ kfree(f);
+ }
+
+ list_for_each_entry_safe(f, ftmp, &nic_dev->mc_filter_list, list) {
+ if (f->state == HINIC3_MAC_HW_SYNCED)
+ hinic3_filter_addr_unsync(netdev, f->addr);
+ list_del(&f->list);
+ kfree(f);
+ }
+}
+
+static struct hinic3_mac_filter *
+hinic3_find_mac(const struct list_head *filter_list, u8 *addr)
+{
+ struct hinic3_mac_filter *f;
+
+ list_for_each_entry(f, filter_list, list) {
+ if (ether_addr_equal(addr, f->addr))
+ return f;
+ }
+ return NULL;
+}
+
+static void hinic3_add_filter(struct net_device *netdev,
+ struct list_head *mac_filter_list,
+ u8 *addr)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_mac_filter *f;
+
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (!f)
+ return;
+
+ ether_addr_copy(f->addr, addr);
+
+ INIT_LIST_HEAD(&f->list);
+ list_add_tail(&f->list, mac_filter_list);
+
+ f->state = HINIC3_MAC_WAIT_HW_SYNC;
+ set_bit(HINIC3_MAC_FILTER_CHANGED, &nic_dev->flags);
+}
+
+static void hinic3_del_filter(struct net_device *netdev,
+ struct hinic3_mac_filter *f)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ set_bit(HINIC3_MAC_FILTER_CHANGED, &nic_dev->flags);
+
+ if (f->state == HINIC3_MAC_WAIT_HW_SYNC) {
+ /* have not added to hw, delete it directly */
+ list_del(&f->list);
+ kfree(f);
+ return;
+ }
+
+ f->state = HINIC3_MAC_WAIT_HW_UNSYNC;
+}
+
+static struct hinic3_mac_filter *
+hinic3_mac_filter_entry_clone(const struct hinic3_mac_filter *src)
+{
+ struct hinic3_mac_filter *f;
+
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (!f)
+ return NULL;
+
+ *f = *src;
+ INIT_LIST_HEAD(&f->list);
+
+ return f;
+}
+
+static void hinic3_undo_del_filter_entries(struct list_head *filter_list,
+ const struct list_head *from)
+{
+ struct hinic3_mac_filter *ftmp;
+ struct hinic3_mac_filter *f;
+
+ list_for_each_entry_safe(f, ftmp, from, list) {
+ if (hinic3_find_mac(filter_list, f->addr))
+ continue;
+
+ if (f->state == HINIC3_MAC_HW_UNSYNCED)
+ f->state = HINIC3_MAC_WAIT_HW_UNSYNC;
+
+ list_move_tail(&f->list, filter_list);
+ }
+}
+
+static void hinic3_undo_add_filter_entries(struct list_head *filter_list,
+ const struct list_head *from)
+{
+ struct hinic3_mac_filter *ftmp;
+ struct hinic3_mac_filter *tmp;
+ struct hinic3_mac_filter *f;
+
+ list_for_each_entry_safe(f, ftmp, from, list) {
+ tmp = hinic3_find_mac(filter_list, f->addr);
+ if (tmp && tmp->state == HINIC3_MAC_HW_SYNCING)
+ tmp->state = HINIC3_MAC_WAIT_HW_SYNC;
+ }
+}
+
+static void hinic3_cleanup_filter_list(const struct list_head *head)
+{
+ struct hinic3_mac_filter *ftmp;
+ struct hinic3_mac_filter *f;
+
+ list_for_each_entry_safe(f, ftmp, head, list) {
+ list_del(&f->list);
+ kfree(f);
+ }
+}
+
+static int hinic3_mac_filter_sync_hw(struct net_device *netdev,
+ struct list_head *del_list,
+ struct list_head *add_list,
+ int *add_count)
+{
+ struct hinic3_mac_filter *ftmp;
+ struct hinic3_mac_filter *f;
+ int err;
+
+ if (!list_empty(del_list)) {
+ list_for_each_entry_safe(f, ftmp, del_list, list) {
+ /* ignore errors when deleting mac */
+ hinic3_filter_addr_unsync(netdev, f->addr);
+ list_del(&f->list);
+ kfree(f);
+ }
+ }
+
+ if (!list_empty(add_list)) {
+ list_for_each_entry_safe(f, ftmp, add_list, list) {
+ if (f->state != HINIC3_MAC_HW_SYNCING)
+ continue;
+
+ err = hinic3_filter_addr_sync(netdev, f->addr);
+ if (err) {
+ netdev_err(netdev, "Failed to add mac\n");
+ return err;
+ }
+
+ f->state = HINIC3_MAC_HW_SYNCED;
+ (*add_count)++;
+ }
+ }
+
+ return 0;
+}
+
+static int hinic3_mac_filter_sync(struct net_device *netdev,
+ struct list_head *mac_filter_list, bool uc)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct list_head tmp_del_list, tmp_add_list;
+ struct hinic3_mac_filter *fclone;
+ struct hinic3_mac_filter *ftmp;
+ struct hinic3_mac_filter *f;
+ int err = 0, add_count = 0;
+
+ INIT_LIST_HEAD(&tmp_del_list);
+ INIT_LIST_HEAD(&tmp_add_list);
+
+ list_for_each_entry_safe(f, ftmp, mac_filter_list, list) {
+ if (f->state != HINIC3_MAC_WAIT_HW_UNSYNC)
+ continue;
+
+ f->state = HINIC3_MAC_HW_UNSYNCED;
+ list_move_tail(&f->list, &tmp_del_list);
+ }
+
+ list_for_each_entry_safe(f, ftmp, mac_filter_list, list) {
+ if (f->state != HINIC3_MAC_WAIT_HW_SYNC)
+ continue;
+
+ fclone = hinic3_mac_filter_entry_clone(f);
+ if (!fclone) {
+ hinic3_undo_del_filter_entries(mac_filter_list,
+ &tmp_del_list);
+ hinic3_undo_add_filter_entries(mac_filter_list,
+ &tmp_add_list);
+
+ netdev_err(netdev,
+ "Failed to clone mac_filter_entry\n");
+ err = -ENOMEM;
+ goto cleanup_tmp_filter_list;
+ }
+
+ f->state = HINIC3_MAC_HW_SYNCING;
+ list_add_tail(&fclone->list, &tmp_add_list);
+ }
+
+ err = hinic3_mac_filter_sync_hw(netdev, &tmp_del_list,
+ &tmp_add_list, &add_count);
+ if (err) {
+ /* there were errors, delete all mac in hw */
+ hinic3_undo_add_filter_entries(mac_filter_list, &tmp_add_list);
+ add_count = 0;
+ /* VF does not support promiscuous mode,
+ * don't delete any other uc mac.
+ */
+ if (!HINIC3_IS_VF(nic_dev->hwdev) || !uc) {
+ list_for_each_entry_safe(f, ftmp, mac_filter_list,
+ list) {
+ if (f->state != HINIC3_MAC_HW_SYNCED)
+ continue;
+
+ fclone = hinic3_mac_filter_entry_clone(f);
+ if (!fclone)
+ break;
+
+ f->state = HINIC3_MAC_WAIT_HW_SYNC;
+ list_add_tail(&fclone->list, &tmp_del_list);
+ }
+ }
+
+ hinic3_mac_filter_sync_hw(netdev, &tmp_del_list,
+ &tmp_add_list, &add_count);
+ }
+
+cleanup_tmp_filter_list:
+ hinic3_cleanup_filter_list(&tmp_del_list);
+ hinic3_cleanup_filter_list(&tmp_add_list);
+
+ return err ? err : add_count;
+}
+
+static void hinic3_mac_filter_sync_all(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ int add_count;
+
+ if (test_bit(HINIC3_MAC_FILTER_CHANGED, &nic_dev->flags)) {
+ clear_bit(HINIC3_MAC_FILTER_CHANGED, &nic_dev->flags);
+ add_count = hinic3_mac_filter_sync(netdev,
+ &nic_dev->uc_filter_list,
+ true);
+ if (add_count < 0 &&
+ hinic3_test_support(nic_dev, HINIC3_NIC_F_PROMISC))
+ set_bit(HINIC3_PROMISC_FORCE_ON,
+ &nic_dev->rx_mod_state);
+ else if (add_count)
+ clear_bit(HINIC3_PROMISC_FORCE_ON,
+ &nic_dev->rx_mod_state);
+
+ add_count = hinic3_mac_filter_sync(netdev,
+ &nic_dev->mc_filter_list,
+ false);
+ if (add_count < 0 &&
+ hinic3_test_support(nic_dev, HINIC3_NIC_F_ALLMULTI))
+ set_bit(HINIC3_ALLMULTI_FORCE_ON,
+ &nic_dev->rx_mod_state);
+ else if (add_count)
+ clear_bit(HINIC3_ALLMULTI_FORCE_ON,
+ &nic_dev->rx_mod_state);
+ }
+}
+
+#define HINIC3_DEFAULT_RX_MODE \
+ (L2NIC_RX_MODE_UC | L2NIC_RX_MODE_MC | L2NIC_RX_MODE_BC)
+
+static void hinic3_update_mac_filter(struct net_device *netdev,
+ const struct netdev_hw_addr_list *src_list,
+ struct list_head *filter_list)
+{
+ struct hinic3_mac_filter *filter;
+ struct hinic3_mac_filter *ftmp;
+ struct hinic3_mac_filter *f;
+ struct netdev_hw_addr *ha;
+
+ /* add addr if not already in the filter list */
+ netif_addr_lock_bh(netdev);
+ netdev_hw_addr_list_for_each(ha, src_list) {
+ filter = hinic3_find_mac(filter_list, ha->addr);
+ if (!filter)
+ hinic3_add_filter(netdev, filter_list, ha->addr);
+ else if (filter->state == HINIC3_MAC_WAIT_HW_UNSYNC)
+ filter->state = HINIC3_MAC_HW_SYNCED;
+ }
+ netif_addr_unlock_bh(netdev);
+
+ /* delete addr if not in netdev list */
+ list_for_each_entry_safe(f, ftmp, filter_list, list) {
+ bool found = false;
+
+ netif_addr_lock_bh(netdev);
+ netdev_hw_addr_list_for_each(ha, src_list)
+ if (ether_addr_equal(ha->addr, f->addr)) {
+ found = true;
+ break;
+ }
+ netif_addr_unlock_bh(netdev);
+
+ if (found)
+ continue;
+
+ hinic3_del_filter(netdev, f);
+ }
+}
+
+static void hinic3_sync_rx_mode_to_hw(struct net_device *netdev, int promisc_en,
+ int allmulti_en)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ u32 rx_mode = HINIC3_DEFAULT_RX_MODE;
+ int err;
+
+ rx_mode |= (promisc_en ? L2NIC_RX_MODE_PROMISC : 0);
+ rx_mode |= (allmulti_en ? L2NIC_RX_MODE_MC_ALL : 0);
+
+ if (promisc_en != test_bit(HINIC3_HW_PROMISC_ON,
+ &nic_dev->rx_mod_state))
+ netdev_dbg(netdev, "%s promisc mode\n",
+ promisc_en ? "Enter" : "Left");
+ if (allmulti_en !=
+ test_bit(HINIC3_HW_ALLMULTI_ON, &nic_dev->rx_mod_state))
+ netdev_dbg(netdev, "%s all_multi mode\n",
+ allmulti_en ? "Enter" : "Left");
+
+ err = hinic3_set_rx_mode(nic_dev->hwdev, rx_mode);
+ if (err) {
+ netdev_err(netdev, "Failed to set rx_mode\n");
+ return;
+ }
+
+ promisc_en ? set_bit(HINIC3_HW_PROMISC_ON, &nic_dev->rx_mod_state) :
+ clear_bit(HINIC3_HW_PROMISC_ON, &nic_dev->rx_mod_state);
+
+ allmulti_en ? set_bit(HINIC3_HW_ALLMULTI_ON, &nic_dev->rx_mod_state) :
+ clear_bit(HINIC3_HW_ALLMULTI_ON, &nic_dev->rx_mod_state);
+}
+
+void hinic3_set_rx_mode_work(struct work_struct *work)
+{
+ int promisc_en = 0, allmulti_en = 0;
+ struct hinic3_nic_dev *nic_dev;
+ struct net_device *netdev;
+
+ nic_dev = container_of(work, struct hinic3_nic_dev, rx_mode_work);
+ netdev = nic_dev->netdev;
+
+ if (test_and_clear_bit(HINIC3_UPDATE_MAC_FILTER, &nic_dev->flags)) {
+ hinic3_update_mac_filter(netdev, &netdev->uc,
+ &nic_dev->uc_filter_list);
+ hinic3_update_mac_filter(netdev, &netdev->mc,
+ &nic_dev->mc_filter_list);
+ }
+
+ hinic3_mac_filter_sync_all(netdev);
+
+ if (hinic3_test_support(nic_dev, HINIC3_NIC_F_PROMISC))
+ promisc_en = !!(netdev->flags & IFF_PROMISC) ||
+ test_bit(HINIC3_PROMISC_FORCE_ON,
+ &nic_dev->rx_mod_state);
+
+ if (hinic3_test_support(nic_dev, HINIC3_NIC_F_ALLMULTI))
+ allmulti_en = !!(netdev->flags & IFF_ALLMULTI) ||
+ test_bit(HINIC3_ALLMULTI_FORCE_ON,
+ &nic_dev->rx_mod_state);
+
+ if (promisc_en != test_bit(HINIC3_HW_PROMISC_ON,
+ &nic_dev->rx_mod_state) ||
+ allmulti_en != test_bit(HINIC3_HW_ALLMULTI_ON,
+ &nic_dev->rx_mod_state))
+ hinic3_sync_rx_mode_to_hw(netdev, promisc_en, allmulti_en);
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c
index 89638813df40..ecfe6265954e 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c
@@ -9,6 +9,36 @@
#include "hinic3_hwif.h"
#include "hinic3_mbox.h"
+static int hinic3_get_interrupt_cfg(struct hinic3_hwdev *hwdev,
+ struct hinic3_interrupt_info *info)
+{
+ struct comm_cmd_cfg_msix_ctrl_reg msix_cfg = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ msix_cfg.func_id = hinic3_global_func_id(hwdev);
+ msix_cfg.msix_index = info->msix_index;
+ msix_cfg.opcode = MGMT_MSG_CMD_OP_GET;
+
+ mgmt_msg_params_init_default(&msg_params, &msix_cfg, sizeof(msix_cfg));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_CFG_MSIX_CTRL_REG, &msg_params);
+ if (err || msix_cfg.head.status) {
+ dev_err(hwdev->dev, "Failed to get interrupt config, err: %d, status: 0x%x\n",
+ err, msix_cfg.head.status);
+ return -EFAULT;
+ }
+
+ info->lli_credit_limit = msix_cfg.lli_credit_cnt;
+ info->lli_timer_cfg = msix_cfg.lli_timer_cnt;
+ info->pending_limit = msix_cfg.pending_cnt;
+ info->coalesc_timer_cfg = msix_cfg.coalesce_timer_cnt;
+ info->resend_timer_cfg = msix_cfg.resend_timer_cnt;
+
+ return 0;
+}
+
int hinic3_set_interrupt_cfg_direct(struct hinic3_hwdev *hwdev,
const struct hinic3_interrupt_info *info)
{
@@ -40,6 +70,30 @@ int hinic3_set_interrupt_cfg_direct(struct hinic3_hwdev *hwdev,
return 0;
}
+int hinic3_set_interrupt_cfg(struct hinic3_hwdev *hwdev,
+ struct hinic3_interrupt_info info)
+{
+ struct hinic3_interrupt_info temp_info;
+ int err;
+
+ temp_info.msix_index = info.msix_index;
+
+ err = hinic3_get_interrupt_cfg(hwdev, &temp_info);
+ if (err)
+ return err;
+
+ info.lli_credit_limit = temp_info.lli_credit_limit;
+ info.lli_timer_cfg = temp_info.lli_timer_cfg;
+
+ if (!info.interrupt_coalesc_set) {
+ info.pending_limit = temp_info.pending_limit;
+ info.coalesc_timer_cfg = temp_info.coalesc_timer_cfg;
+ info.resend_timer_cfg = temp_info.resend_timer_cfg;
+ }
+
+ return hinic3_set_interrupt_cfg_direct(hwdev, &info);
+}
+
int hinic3_func_reset(struct hinic3_hwdev *hwdev, u16 func_id, u64 reset_flag)
{
struct comm_cmd_func_reset func_reset = {};
@@ -314,6 +368,8 @@ int hinic3_func_rx_tx_flush(struct hinic3_hwdev *hwdev)
ret = -EFAULT;
}
+ hinic3_set_pf_status(hwif, HINIC3_PF_STATUS_FLR_START_FLAG);
+
clr_res.func_id = hwif->attr.func_global_idx;
msg_params.buf_in = &clr_res;
msg_params.in_size = sizeof(clr_res);
@@ -337,6 +393,65 @@ int hinic3_func_rx_tx_flush(struct hinic3_hwdev *hwdev)
return ret;
}
+int hinic3_set_bdf_ctxt(struct hinic3_hwdev *hwdev,
+ struct comm_cmd_bdf_info *bdf_info)
+{
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ mgmt_msg_params_init_default(&msg_params, bdf_info, sizeof(*bdf_info));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_SEND_BDF_INFO, &msg_params);
+ if (err || bdf_info->head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set bdf info to fw, err: %d, status: 0x%x\n",
+ err, bdf_info->head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int hinic3_sync_time(struct hinic3_hwdev *hwdev, u64 time)
+{
+ struct comm_cmd_sync_time time_info = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ time_info.mstime = time;
+
+ mgmt_msg_params_init_default(&msg_params, &time_info,
+ sizeof(time_info));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_SYNC_TIME, &msg_params);
+ if (err || time_info.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to sync time to mgmt, err: %d, status: 0x%x\n",
+ err, time_info.head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+void hinic3_sync_time_to_fw(struct hinic3_hwdev *hwdev)
+{
+ struct timespec64 ts = {};
+ u64 time;
+ int err;
+
+ ktime_get_real_ts64(&ts);
+ time = (u64)(ts.tv_sec * MSEC_PER_SEC + ts.tv_nsec / NSEC_PER_MSEC);
+
+ err = hinic3_sync_time(hwdev, time);
+ if (err)
+ dev_err(hwdev->dev,
+ "Synchronize UTC time to firmware failed, err=%d\n",
+ err);
+}
+
static int get_hw_rx_buf_size_idx(int rx_buf_sz, u16 *buf_sz_idx)
{
/* Supported RX buffer sizes in bytes. Configured by array index. */
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h
index 304f5691f0c2..8e4737c486b7 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h
@@ -23,6 +23,8 @@ struct hinic3_interrupt_info {
int hinic3_set_interrupt_cfg_direct(struct hinic3_hwdev *hwdev,
const struct hinic3_interrupt_info *info);
+int hinic3_set_interrupt_cfg(struct hinic3_hwdev *hwdev,
+ struct hinic3_interrupt_info info);
int hinic3_func_reset(struct hinic3_hwdev *hwdev, u16 func_id, u64 reset_flag);
int hinic3_get_comm_features(struct hinic3_hwdev *hwdev, u64 *s_feature,
@@ -40,6 +42,10 @@ int hinic3_set_wq_page_size(struct hinic3_hwdev *hwdev, u16 func_idx,
u32 page_size);
int hinic3_set_cmdq_depth(struct hinic3_hwdev *hwdev, u16 cmdq_depth);
int hinic3_func_rx_tx_flush(struct hinic3_hwdev *hwdev);
+int hinic3_set_bdf_ctxt(struct hinic3_hwdev *hwdev,
+ struct comm_cmd_bdf_info *bdf_info);
+void hinic3_sync_time_to_fw(struct hinic3_hwdev *hwdev);
+
int hinic3_set_root_ctxt(struct hinic3_hwdev *hwdev, u32 rq_depth, u32 sq_depth,
int rx_buf_sz);
int hinic3_clean_root_ctxt(struct hinic3_hwdev *hwdev);
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h
index 623cf2d14cbc..329a9c464ff9 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h
@@ -39,6 +39,8 @@ enum mgmt_mod_type {
/* Configuration module */
MGMT_MOD_CFGM = 7,
MGMT_MOD_HILINK = 14,
+ /* hardware max module id */
+ MGMT_MOD_HW_MAX = 20,
};
static inline void mgmt_msg_params_init_default(struct mgmt_msg_params *msg_params,
@@ -110,6 +112,10 @@ enum comm_cmd {
COMM_CMD_CFG_MSIX_CTRL_REG = 23,
COMM_CMD_SET_CEQ_CTRL_REG = 24,
COMM_CMD_SET_DMA_ATTR = 25,
+
+ /* Commands for obtaining information */
+ COMM_CMD_SYNC_TIME = 62,
+ COMM_CMD_SEND_BDF_INFO = 64,
};
struct comm_cmd_cfg_msix_ctrl_reg {
@@ -251,6 +257,24 @@ struct comm_cmd_clear_resource {
u16 rsvd1[3];
};
+struct comm_cmd_sync_time {
+ struct mgmt_msg_head head;
+
+ u64 mstime;
+ u64 rsvd1;
+};
+
+struct comm_cmd_bdf_info {
+ struct mgmt_msg_head head;
+
+ u16 function_idx;
+ u8 rsvd1[2];
+ u8 bus;
+ u8 device;
+ u8 function;
+ u8 rsvd2[5];
+};
+
/* Services supported by HW. HW uses these values when delivering events.
* HW supports multiple services that are not yet supported by driver
* (e.g. RoCE).
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c
index 95a213133be9..7906d4057cf2 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c
@@ -13,6 +13,8 @@
#define HINIC3_PCIE_SNOOP 0
#define HINIC3_PCIE_TPH_DISABLE 0
+#define HINIC3_SYNFW_TIME_PERIOD (60 * 60 * 1000)
+
#define HINIC3_DMA_ATTR_INDIR_IDX_MASK GENMASK(9, 0)
#define HINIC3_DMA_ATTR_INDIR_IDX_SET(val, member) \
FIELD_PREP(HINIC3_DMA_ATTR_INDIR_##member##_MASK, val)
@@ -38,6 +40,7 @@
#define HINIC3_WQ_MAX_REQ 10
enum hinic3_hwdev_init_state {
+ HINIC3_HWDEV_MGMT_INITED = 1,
HINIC3_HWDEV_MBOX_INITED = 2,
HINIC3_HWDEV_CMDQ_INITED = 3,
};
@@ -197,7 +200,7 @@ static int init_ceqs_msix_attr(struct hinic3_hwdev *hwdev)
for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) {
eq = &ceqs->ceq[q_id];
info.msix_index = eq->msix_entry_idx;
- err = hinic3_set_interrupt_cfg_direct(hwdev, &info);
+ err = hinic3_set_interrupt_cfg(hwdev, info);
if (err) {
dev_err(hwdev->dev, "Set msix attr for ceq %u failed\n",
q_id);
@@ -208,6 +211,36 @@ static int init_ceqs_msix_attr(struct hinic3_hwdev *hwdev)
return 0;
}
+static int hinic3_comm_pf_to_mgmt_init(struct hinic3_hwdev *hwdev)
+{
+ int err;
+
+ if (HINIC3_IS_VF(hwdev))
+ return 0;
+
+ err = hinic3_pf_to_mgmt_init(hwdev);
+ if (err)
+ return err;
+
+ set_bit(HINIC3_HWDEV_MGMT_INITED, &hwdev->func_state);
+
+ return 0;
+}
+
+static void hinic3_comm_pf_to_mgmt_free(struct hinic3_hwdev *hwdev)
+{
+ if (HINIC3_IS_VF(hwdev))
+ return;
+
+ spin_lock_bh(&hwdev->channel_lock);
+ clear_bit(HINIC3_HWDEV_MGMT_INITED, &hwdev->func_state);
+ spin_unlock_bh(&hwdev->channel_lock);
+
+ hinic3_aeq_unregister_cb(hwdev, HINIC3_MSG_FROM_FW);
+
+ hinic3_pf_to_mgmt_free(hwdev);
+}
+
static int init_basic_mgmt_channel(struct hinic3_hwdev *hwdev)
{
int err;
@@ -409,20 +442,28 @@ static int hinic3_init_comm_ch(struct hinic3_hwdev *hwdev)
if (err)
return err;
- err = init_basic_attributes(hwdev);
+ err = hinic3_comm_pf_to_mgmt_init(hwdev);
if (err)
goto err_free_basic_mgmt_ch;
+ err = init_basic_attributes(hwdev);
+ if (err)
+ goto err_free_comm_pf_to_mgmt;
+
err = init_cmdqs_channel(hwdev);
if (err) {
dev_err(hwdev->dev, "Failed to init cmdq channel\n");
goto err_clear_func_svc_used_state;
}
+ hinic3_set_pf_status(hwdev->hwif, HINIC3_PF_STATUS_ACTIVE_FLAG);
+
return 0;
err_clear_func_svc_used_state:
hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_COMM, 0);
+err_free_comm_pf_to_mgmt:
+ hinic3_comm_pf_to_mgmt_free(hwdev);
err_free_basic_mgmt_ch:
free_base_mgmt_channel(hwdev);
@@ -431,11 +472,44 @@ err_free_basic_mgmt_ch:
static void hinic3_uninit_comm_ch(struct hinic3_hwdev *hwdev)
{
+ hinic3_set_pf_status(hwdev->hwif, HINIC3_PF_STATUS_INIT);
hinic3_free_cmdqs_channel(hwdev);
hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_COMM, 0);
+ hinic3_comm_pf_to_mgmt_free(hwdev);
free_base_mgmt_channel(hwdev);
}
+static void hinic3_auto_sync_time_work(struct work_struct *work)
+{
+ struct delayed_work *delay = to_delayed_work(work);
+ struct hinic3_hwdev *hwdev;
+
+ hwdev = container_of(delay, struct hinic3_hwdev, sync_time_task);
+
+ hinic3_sync_time_to_fw(hwdev);
+
+ queue_delayed_work(hwdev->workq, &hwdev->sync_time_task,
+ msecs_to_jiffies(HINIC3_SYNFW_TIME_PERIOD));
+}
+
+static void hinic3_init_ppf_work(struct hinic3_hwdev *hwdev)
+{
+ if (hinic3_ppf_idx(hwdev) != hinic3_global_func_id(hwdev))
+ return;
+
+ INIT_DELAYED_WORK(&hwdev->sync_time_task, hinic3_auto_sync_time_work);
+ queue_delayed_work(hwdev->workq, &hwdev->sync_time_task,
+ msecs_to_jiffies(HINIC3_SYNFW_TIME_PERIOD));
+}
+
+static void hinic3_free_ppf_work(struct hinic3_hwdev *hwdev)
+{
+ if (hinic3_ppf_idx(hwdev) != hinic3_global_func_id(hwdev))
+ return;
+
+ disable_delayed_work_sync(&hwdev->sync_time_task);
+}
+
static DEFINE_IDA(hinic3_adev_ida);
static int hinic3_adev_idx_alloc(void)
@@ -472,7 +546,7 @@ int hinic3_init_hwdev(struct pci_dev *pdev)
goto err_free_hwdev;
}
- hwdev->workq = alloc_workqueue(HINIC3_HWDEV_WQ_NAME, WQ_MEM_RECLAIM,
+ hwdev->workq = alloc_workqueue(HINIC3_HWDEV_WQ_NAME, WQ_MEM_RECLAIM | WQ_PERCPU,
HINIC3_WQ_MAX_REQ);
if (!hwdev->workq) {
dev_err(hwdev->dev, "Failed to alloc hardware workq\n");
@@ -498,15 +572,19 @@ int hinic3_init_hwdev(struct pci_dev *pdev)
goto err_uninit_comm_ch;
}
+ hinic3_init_ppf_work(hwdev);
+
err = hinic3_set_comm_features(hwdev, hwdev->features,
COMM_MAX_FEATURE_QWORD);
if (err) {
dev_err(hwdev->dev, "Failed to set comm features\n");
- goto err_uninit_comm_ch;
+ goto err_free_ppf_work;
}
return 0;
+err_free_ppf_work:
+ hinic3_free_ppf_work(hwdev);
err_uninit_comm_ch:
hinic3_uninit_comm_ch(hwdev);
err_free_cfg_mgmt:
@@ -528,6 +606,7 @@ void hinic3_free_hwdev(struct hinic3_hwdev *hwdev)
u64 drv_features[COMM_MAX_FEATURE_QWORD] = {};
hinic3_set_comm_features(hwdev, drv_features, COMM_MAX_FEATURE_QWORD);
+ hinic3_free_ppf_work(hwdev);
hinic3_func_rx_tx_flush(hwdev);
hinic3_uninit_comm_ch(hwdev);
hinic3_free_cfg_mgmt(hwdev);
@@ -539,9 +618,21 @@ void hinic3_free_hwdev(struct hinic3_hwdev *hwdev)
void hinic3_set_api_stop(struct hinic3_hwdev *hwdev)
{
+ struct hinic3_recv_msg *recv_resp_msg;
struct hinic3_mbox *mbox;
spin_lock_bh(&hwdev->channel_lock);
+ if (HINIC3_IS_PF(hwdev) &&
+ test_bit(HINIC3_HWDEV_MGMT_INITED, &hwdev->func_state)) {
+ recv_resp_msg = &hwdev->pf_to_mgmt->recv_resp_msg_from_mgmt;
+ spin_lock_bh(&hwdev->pf_to_mgmt->sync_event_lock);
+ if (hwdev->pf_to_mgmt->event_flag == COMM_SEND_EVENT_START) {
+ complete(&recv_resp_msg->recv_done);
+ hwdev->pf_to_mgmt->event_flag = COMM_SEND_EVENT_TIMEOUT;
+ }
+ spin_unlock_bh(&hwdev->pf_to_mgmt->sync_event_lock);
+ }
+
if (test_bit(HINIC3_HWDEV_MBOX_INITED, &hwdev->func_state)) {
mbox = hwdev->mbox;
spin_lock(&mbox->mbox_lock);
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h
index 62e2745e9316..9686c2600b46 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h
@@ -17,6 +17,24 @@ enum hinic3_event_service_type {
HINIC3_EVENT_SRV_NIC = 1
};
+enum hinic3_comm_event_type {
+ HINIC3_COMM_EVENT_PCIE_LINK_DOWN = 0,
+ HINIC3_COMM_EVENT_HEART_LOST = 1,
+ HINIC3_COMM_EVENT_FAULT = 2,
+ HINIC3_COMM_EVENT_SRIOV_STATE_CHANGE = 3,
+ HINIC3_COMM_EVENT_CARD_REMOVE = 4,
+ HINIC3_COMM_EVENT_MGMT_WATCHDOG = 5,
+};
+
+enum hinic3_fault_err_level {
+ HINIC3_FAULT_LEVEL_SERIOUS_FLR = 3,
+};
+
+enum hinic3_fault_source_type {
+ HINIC3_FAULT_SRC_HW_PHY_FAULT = 9,
+ HINIC3_FAULT_SRC_TX_TIMEOUT = 22,
+};
+
#define HINIC3_SRV_EVENT_TYPE(svc, type) (((svc) << 16) | (type))
/* driver-specific data of pci_dev */
@@ -28,6 +46,7 @@ struct hinic3_pcidev {
void __iomem *cfg_reg_base;
void __iomem *intr_reg_base;
+ void __iomem *mgmt_reg_base;
void __iomem *db_base;
u64 db_dwqe_len;
u64 db_base_phy;
@@ -48,7 +67,9 @@ struct hinic3_hwdev {
struct hinic3_ceqs *ceqs;
struct hinic3_mbox *mbox;
struct hinic3_cmdqs *cmdqs;
+ struct delayed_work sync_time_task;
struct workqueue_struct *workq;
+ struct hinic3_msg_pf_to_mgmt *pf_to_mgmt;
/* protect channel init and uninit */
spinlock_t channel_lock;
u64 features[COMM_MAX_FEATURE_QWORD];
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c
index f76f140fb6f7..801f48e241f8 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c
@@ -31,6 +31,7 @@
#define HINIC3_AF0_GET(val, member) \
FIELD_GET(HINIC3_AF0_##member##_MASK, val)
+#define HINIC3_AF1_PPF_IDX_MASK GENMASK(5, 0)
#define HINIC3_AF1_AEQS_PER_FUNC_MASK GENMASK(9, 8)
#define HINIC3_AF1_MGMT_INIT_STATUS_MASK BIT(30)
#define HINIC3_AF1_GET(val, member) \
@@ -41,6 +42,10 @@
#define HINIC3_AF2_GET(val, member) \
FIELD_GET(HINIC3_AF2_##member##_MASK, val)
+#define HINIC3_AF3_GLOBAL_VF_ID_OF_PF_MASK GENMASK(27, 16)
+#define HINIC3_AF3_GET(val, member) \
+ FIELD_GET(HINIC3_AF3_##member##_MASK, val)
+
#define HINIC3_AF4_DOORBELL_CTRL_MASK BIT(0)
#define HINIC3_AF4_GET(val, member) \
FIELD_GET(HINIC3_AF4_##member##_MASK, val)
@@ -54,9 +59,17 @@
#define HINIC3_AF6_PF_STATUS_MASK GENMASK(15, 0)
#define HINIC3_AF6_FUNC_MAX_SQ_MASK GENMASK(31, 23)
#define HINIC3_AF6_MSIX_FLEX_EN_MASK BIT(22)
+#define HINIC3_AF6_SET(val, member) \
+ FIELD_PREP(HINIC3_AF6_##member##_MASK, val)
#define HINIC3_AF6_GET(val, member) \
FIELD_GET(HINIC3_AF6_##member##_MASK, val)
+#define HINIC3_PPF_ELECTION_IDX_MASK GENMASK(5, 0)
+#define HINIC3_PPF_ELECTION_SET(val, member) \
+ FIELD_PREP(HINIC3_PPF_ELECTION_##member##_MASK, val)
+#define HINIC3_PPF_ELECTION_GET(val, member) \
+ FIELD_GET(HINIC3_PPF_ELECTION_##member##_MASK, val)
+
#define HINIC3_GET_REG_ADDR(reg) ((reg) & (HINIC3_REGS_FLAG_MASK))
static void __iomem *hinic3_reg_addr(struct hinic3_hwif *hwif, u32 reg)
@@ -105,12 +118,15 @@ static void set_hwif_attr(struct hinic3_func_attr *attr, u32 attr0, u32 attr1,
attr->pci_intf_idx = HINIC3_AF0_GET(attr0, PCI_INTF_IDX);
attr->func_type = HINIC3_AF0_GET(attr0, FUNC_TYPE);
+ attr->ppf_idx = HINIC3_AF1_GET(attr1, PPF_IDX);
attr->num_aeqs = BIT(HINIC3_AF1_GET(attr1, AEQS_PER_FUNC));
attr->num_ceqs = HINIC3_AF2_GET(attr2, CEQS_PER_FUNC);
attr->num_irqs = HINIC3_AF2_GET(attr2, IRQS_PER_FUNC);
if (attr->num_irqs > HINIC3_MAX_MSIX_ENTRY)
attr->num_irqs = HINIC3_MAX_MSIX_ENTRY;
+ attr->global_vf_id_of_pf = HINIC3_AF3_GET(attr3, GLOBAL_VF_ID_OF_PF);
+
attr->num_sq = HINIC3_AF6_GET(attr6, FUNC_MAX_SQ);
attr->msix_flex_en = HINIC3_AF6_GET(attr6, MSIX_FLEX_EN);
}
@@ -187,6 +203,28 @@ void hinic3_toggle_doorbell(struct hinic3_hwif *hwif,
hinic3_hwif_write_reg(hwif, addr, attr4);
}
+static void hinic3_set_ppf(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_hwif *hwif = hwdev->hwif;
+ struct hinic3_func_attr *attr;
+ u32 addr, val;
+
+ if (HINIC3_IS_VF(hwdev))
+ return;
+
+ /* Read Modify Write */
+ attr = &hwif->attr;
+ addr = HINIC3_CSR_PPF_ELECTION_ADDR;
+ val = hinic3_hwif_read_reg(hwif, addr);
+ val &= ~HINIC3_PPF_ELECTION_IDX_MASK;
+ val |= HINIC3_PPF_ELECTION_SET(attr->func_global_idx, IDX);
+ hinic3_hwif_write_reg(hwif, addr, val);
+
+ /* Check PPF index */
+ val = hinic3_hwif_read_reg(hwif, addr);
+ attr->ppf_idx = HINIC3_PPF_ELECTION_GET(val, IDX);
+}
+
static int db_area_idx_init(struct hinic3_hwif *hwif, u64 db_base_phy,
u8 __iomem *db_base, u64 db_dwqe_len)
{
@@ -366,6 +404,27 @@ static int wait_until_doorbell_and_outbound_enabled(struct hinic3_hwif *hwif)
USEC_PER_MSEC);
}
+void hinic3_set_pf_status(struct hinic3_hwif *hwif,
+ enum hinic3_pf_status status)
+{
+ u32 attr6 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR6_ADDR);
+
+ attr6 &= ~HINIC3_AF6_PF_STATUS_MASK;
+ attr6 |= HINIC3_AF6_SET(status, PF_STATUS);
+
+ if (hwif->attr.func_type == HINIC3_FUNC_TYPE_VF)
+ return;
+
+ hinic3_hwif_write_reg(hwif, HINIC3_CSR_FUNC_ATTR6_ADDR, attr6);
+}
+
+enum hinic3_pf_status hinic3_get_pf_status(struct hinic3_hwif *hwif)
+{
+ u32 attr6 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR6_ADDR);
+
+ return HINIC3_AF6_GET(attr6, PF_STATUS);
+}
+
int hinic3_init_hwif(struct hinic3_hwdev *hwdev)
{
struct hinic3_pcidev *pci_adapter = hwdev->adapter;
@@ -378,9 +437,15 @@ int hinic3_init_hwif(struct hinic3_hwdev *hwdev)
return -ENOMEM;
hwdev->hwif = hwif;
- hwif->cfg_regs_base = (u8 __iomem *)pci_adapter->cfg_reg_base +
+ /* if function is VF, mgmt_regs_base will be NULL */
+ hwif->cfg_regs_base = pci_adapter->mgmt_reg_base ?
+ pci_adapter->cfg_reg_base :
+ (u8 __iomem *)pci_adapter->cfg_reg_base +
HINIC3_VF_CFG_REG_OFFSET;
+ hwif->intr_regs_base = pci_adapter->intr_reg_base;
+ hwif->mgmt_regs_base = pci_adapter->mgmt_reg_base;
+
err = db_area_idx_init(hwif, pci_adapter->db_base_phy,
pci_adapter->db_base,
pci_adapter->db_dwqe_len);
@@ -412,7 +477,15 @@ int hinic3_init_hwif(struct hinic3_hwdev *hwdev)
goto err_free_db_area_idx;
}
+ hinic3_set_ppf(hwdev);
+
disable_all_msix(hwdev);
+ /* disable mgmt cpu from reporting any event */
+ hinic3_set_pf_status(hwdev->hwif, HINIC3_PF_STATUS_INIT);
+
+ dev_dbg(hwdev->dev, "global_func_idx: %u, func_type: %d, host_id: %u, ppf: %u\n",
+ hwif->attr.func_global_idx, hwif->attr.func_type,
+ hwif->attr.pci_intf_idx, hwif->attr.ppf_idx);
return 0;
@@ -434,3 +507,18 @@ u16 hinic3_global_func_id(struct hinic3_hwdev *hwdev)
{
return hwdev->hwif->attr.func_global_idx;
}
+
+u8 hinic3_pf_id_of_vf(struct hinic3_hwdev *hwdev)
+{
+ return hwdev->hwif->attr.port_to_port_idx;
+}
+
+u16 hinic3_glb_pf_vf_offset(struct hinic3_hwdev *hwdev)
+{
+ return hwdev->hwif->attr.global_vf_id_of_pf;
+}
+
+u8 hinic3_ppf_idx(struct hinic3_hwdev *hwdev)
+{
+ return hwdev->hwif->attr.ppf_idx;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h
index c02904e861cc..445bf7fa79b4 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h
@@ -10,6 +10,7 @@
struct hinic3_hwdev;
enum hinic3_func_type {
+ HINIC3_FUNC_TYPE_PF = 0,
HINIC3_FUNC_TYPE_VF = 1,
};
@@ -38,6 +39,8 @@ static_assert(sizeof(struct hinic3_func_attr) == 20);
struct hinic3_hwif {
u8 __iomem *cfg_regs_base;
+ u8 __iomem *intr_regs_base;
+ u8 __iomem *mgmt_regs_base;
u64 db_base_phy;
u64 db_dwqe_len;
u8 __iomem *db_base;
@@ -50,6 +53,13 @@ enum hinic3_outbound_ctrl {
DISABLE_OUTBOUND = 0x1,
};
+enum hinic3_pf_status {
+ HINIC3_PF_STATUS_INIT = 0x0,
+ HINIC3_PF_STATUS_ACTIVE_FLAG = 0x11,
+ HINIC3_PF_STATUS_FLR_START_FLAG = 0x12,
+ HINIC3_PF_STATUS_FLR_FINISH_FLAG = 0x13,
+};
+
enum hinic3_doorbell_ctrl {
ENABLE_DOORBELL = 0,
DISABLE_DOORBELL = 1,
@@ -65,6 +75,12 @@ enum hinic3_msix_auto_mask {
HINIC3_SET_MSIX_AUTO_MASK,
};
+#define HINIC3_FUNC_TYPE(hwdev) ((hwdev)->hwif->attr.func_type)
+#define HINIC3_IS_PF(hwdev) \
+ (HINIC3_FUNC_TYPE(hwdev) == HINIC3_FUNC_TYPE_PF)
+#define HINIC3_IS_VF(hwdev) \
+ (HINIC3_FUNC_TYPE(hwdev) == HINIC3_FUNC_TYPE_VF)
+
u32 hinic3_hwif_read_reg(struct hinic3_hwif *hwif, u32 reg);
void hinic3_hwif_write_reg(struct hinic3_hwif *hwif, u32 reg, u32 val);
@@ -75,6 +91,10 @@ int hinic3_alloc_db_addr(struct hinic3_hwdev *hwdev, void __iomem **db_base,
void __iomem **dwqe_base);
void hinic3_free_db_addr(struct hinic3_hwdev *hwdev, const u8 __iomem *db_base);
+void hinic3_set_pf_status(struct hinic3_hwif *hwif,
+ enum hinic3_pf_status status);
+enum hinic3_pf_status hinic3_get_pf_status(struct hinic3_hwif *hwif);
+
int hinic3_init_hwif(struct hinic3_hwdev *hwdev);
void hinic3_free_hwif(struct hinic3_hwdev *hwdev);
@@ -86,5 +106,8 @@ void hinic3_set_msix_auto_mask_state(struct hinic3_hwdev *hwdev, u16 msix_idx,
enum hinic3_msix_auto_mask flag);
u16 hinic3_global_func_id(struct hinic3_hwdev *hwdev);
+u8 hinic3_pf_id_of_vf(struct hinic3_hwdev *hwdev);
+u16 hinic3_glb_pf_vf_offset(struct hinic3_hwdev *hwdev);
+u8 hinic3_ppf_idx(struct hinic3_hwdev *hwdev);
#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c b/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
index 84bee5d6e638..e7d6c2033b45 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+#include <linux/dim.h>
#include <linux/netdevice.h>
#include "hinic3_hw_comm.h"
@@ -10,6 +11,23 @@
#include "hinic3_rx.h"
#include "hinic3_tx.h"
+#define HINIC3_COAL_PKT_SHIFT 5
+
+static void hinic3_net_dim(struct hinic3_nic_dev *nic_dev,
+ struct hinic3_irq_cfg *irq_cfg)
+{
+ struct hinic3_rxq *rxq = irq_cfg->rxq;
+ struct dim_sample sample = {};
+
+ if (!test_bit(HINIC3_INTF_UP, &nic_dev->flags) ||
+ !nic_dev->adaptive_rx_coal)
+ return;
+
+ dim_update_sample(irq_cfg->total_events, rxq->rxq_stats.packets,
+ rxq->rxq_stats.bytes, &sample);
+ net_dim(&rxq->dim, &sample);
+}
+
static int hinic3_poll(struct napi_struct *napi, int budget)
{
struct hinic3_irq_cfg *irq_cfg =
@@ -31,9 +49,11 @@ static int hinic3_poll(struct napi_struct *napi, int budget)
if (busy)
return budget;
- if (likely(napi_complete_done(napi, work_done)))
+ if (likely(napi_complete_done(napi, work_done))) {
+ hinic3_net_dim(nic_dev, irq_cfg);
hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx,
HINIC3_MSIX_ENABLE);
+ }
return work_done;
}
@@ -61,6 +81,8 @@ static irqreturn_t qp_irq(int irq, void *data)
hinic3_msix_intr_clear_resend_bit(nic_dev->hwdev,
irq_cfg->msix_entry_idx, 1);
+ irq_cfg->total_events++;
+
napi_schedule(&irq_cfg->napi);
return IRQ_HANDLED;
@@ -83,7 +105,7 @@ static int hinic3_request_irq(struct hinic3_irq_cfg *irq_cfg, u16 q_id)
info.coalesc_timer_cfg =
nic_dev->intr_coalesce[q_id].coalesce_timer_cfg;
info.resend_timer_cfg = nic_dev->intr_coalesce[q_id].resend_timer_cfg;
- err = hinic3_set_interrupt_cfg_direct(nic_dev->hwdev, &info);
+ err = hinic3_set_interrupt_cfg(nic_dev->hwdev, info);
if (err) {
netdev_err(netdev, "Failed to set RX interrupt coalescing attribute.\n");
qp_del_napi(irq_cfg);
@@ -108,6 +130,71 @@ static void hinic3_release_irq(struct hinic3_irq_cfg *irq_cfg)
free_irq(irq_cfg->irq_id, irq_cfg);
}
+static int hinic3_set_interrupt_moder(struct net_device *netdev, u16 q_id,
+ u8 coalesc_timer_cfg, u8 pending_limit)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_interrupt_info info = {};
+ int err;
+
+ if (q_id >= nic_dev->q_params.num_qps)
+ return 0;
+
+ info.interrupt_coalesc_set = 1;
+ info.coalesc_timer_cfg = coalesc_timer_cfg;
+ info.pending_limit = pending_limit;
+ info.msix_index = nic_dev->q_params.irq_cfg[q_id].msix_entry_idx;
+ info.resend_timer_cfg =
+ nic_dev->intr_coalesce[q_id].resend_timer_cfg;
+
+ err = hinic3_set_interrupt_cfg(nic_dev->hwdev, info);
+ if (err) {
+ netdev_err(netdev,
+ "Failed to modify moderation for Queue: %u\n", q_id);
+ } else {
+ nic_dev->rxqs[q_id].last_coalesc_timer_cfg = coalesc_timer_cfg;
+ nic_dev->rxqs[q_id].last_pending_limit = pending_limit;
+ }
+
+ return err;
+}
+
+static void hinic3_update_queue_coal(struct net_device *netdev, u16 q_id,
+ u16 coal_timer, u16 coal_pkts)
+{
+ struct hinic3_intr_coal_info *q_coal;
+ u8 coalesc_timer_cfg, pending_limit;
+ struct hinic3_nic_dev *nic_dev;
+
+ nic_dev = netdev_priv(netdev);
+
+ q_coal = &nic_dev->intr_coalesce[q_id];
+ coalesc_timer_cfg = (u8)coal_timer;
+ pending_limit = clamp_t(u8, coal_pkts >> HINIC3_COAL_PKT_SHIFT,
+ q_coal->rx_pending_limit_low,
+ q_coal->rx_pending_limit_high);
+
+ hinic3_set_interrupt_moder(nic_dev->netdev, q_id,
+ coalesc_timer_cfg, pending_limit);
+}
+
+static void hinic3_rx_dim_work(struct work_struct *work)
+{
+ struct dim_cq_moder cur_moder;
+ struct hinic3_rxq *rxq;
+ struct dim *dim;
+
+ dim = container_of(work, struct dim, work);
+ rxq = container_of(dim, struct hinic3_rxq, dim);
+
+ cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+
+ hinic3_update_queue_coal(rxq->netdev, rxq->q_id,
+ cur_moder.usec, cur_moder.pkts);
+
+ dim->state = DIM_START_MEASURE;
+}
+
int hinic3_qps_irq_init(struct net_device *netdev)
{
struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
@@ -141,6 +228,9 @@ int hinic3_qps_irq_init(struct net_device *netdev)
goto err_release_irqs;
}
+ INIT_WORK(&irq_cfg->rxq->dim.work, hinic3_rx_dim_work);
+ irq_cfg->rxq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
+
netif_queue_set_napi(irq_cfg->netdev, q_id,
NETDEV_QUEUE_TYPE_RX, &irq_cfg->napi);
netif_queue_set_napi(irq_cfg->netdev, q_id,
@@ -164,12 +254,14 @@ err_release_irqs:
NETDEV_QUEUE_TYPE_RX, NULL);
netif_queue_set_napi(irq_cfg->netdev, q_id,
NETDEV_QUEUE_TYPE_TX, NULL);
+
hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx,
HINIC3_MSIX_DISABLE);
hinic3_set_msix_auto_mask_state(nic_dev->hwdev,
irq_cfg->msix_entry_idx,
HINIC3_CLR_MSIX_AUTO_MASK);
hinic3_release_irq(irq_cfg);
+ disable_work_sync(&irq_cfg->rxq->dim.work);
}
return err;
@@ -194,5 +286,6 @@ void hinic3_qps_irq_uninit(struct net_device *netdev)
irq_cfg->msix_entry_idx,
HINIC3_CLR_MSIX_AUTO_MASK);
hinic3_release_irq(irq_cfg);
+ disable_work_sync(&irq_cfg->rxq->dim.work);
}
}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c
index 3db8241a3b0c..87413e192f10 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c
@@ -5,15 +5,22 @@
#include <linux/iopoll.h>
#include "hinic3_hw_cfg.h"
+#include "hinic3_hw_comm.h"
#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
#include "hinic3_lld.h"
#include "hinic3_mgmt.h"
#include "hinic3_pci_id_tbl.h"
#define HINIC3_VF_PCI_CFG_REG_BAR 0
+#define HINIC3_PF_PCI_CFG_REG_BAR 1
#define HINIC3_PCI_INTR_REG_BAR 2
+/* Only PF has mgmt bar */
+#define HINIC3_PCI_MGMT_REG_BAR 3
#define HINIC3_PCI_DB_BAR 4
+#define HINIC3_IS_VF_DEV(pdev) ((pdev)->device == PCI_DEV_ID_HINIC3_VF)
+
#define HINIC3_EVENT_POLL_SLEEP_US 1000
#define HINIC3_EVENT_POLL_TIMEOUT_US 10000000
@@ -181,8 +188,12 @@ void hinic3_adev_event_unregister(struct auxiliary_device *adev)
static int hinic3_mapping_bar(struct pci_dev *pdev,
struct hinic3_pcidev *pci_adapter)
{
- pci_adapter->cfg_reg_base = pci_ioremap_bar(pdev,
- HINIC3_VF_PCI_CFG_REG_BAR);
+ int cfg_bar;
+
+ cfg_bar = HINIC3_IS_VF_DEV(pdev) ?
+ HINIC3_VF_PCI_CFG_REG_BAR : HINIC3_PF_PCI_CFG_REG_BAR;
+
+ pci_adapter->cfg_reg_base = pci_ioremap_bar(pdev, cfg_bar);
if (!pci_adapter->cfg_reg_base) {
dev_err(&pdev->dev, "Failed to map configuration regs\n");
return -ENOMEM;
@@ -195,19 +206,30 @@ static int hinic3_mapping_bar(struct pci_dev *pdev,
goto err_unmap_cfg_reg_base;
}
+ if (!HINIC3_IS_VF_DEV(pdev)) {
+ pci_adapter->mgmt_reg_base =
+ pci_ioremap_bar(pdev, HINIC3_PCI_MGMT_REG_BAR);
+ if (!pci_adapter->mgmt_reg_base) {
+ dev_err(&pdev->dev, "Failed to map mgmt regs\n");
+ goto err_unmap_intr_reg_base;
+ }
+ }
+
pci_adapter->db_base_phy = pci_resource_start(pdev, HINIC3_PCI_DB_BAR);
pci_adapter->db_dwqe_len = pci_resource_len(pdev, HINIC3_PCI_DB_BAR);
pci_adapter->db_base = pci_ioremap_bar(pdev, HINIC3_PCI_DB_BAR);
if (!pci_adapter->db_base) {
dev_err(&pdev->dev, "Failed to map doorbell regs\n");
- goto err_unmap_intr_reg_base;
+ goto err_unmap_mgmt_reg_base;
}
return 0;
+err_unmap_mgmt_reg_base:
+ if (!HINIC3_IS_VF_DEV(pdev))
+ iounmap(pci_adapter->mgmt_reg_base);
err_unmap_intr_reg_base:
iounmap(pci_adapter->intr_reg_base);
-
err_unmap_cfg_reg_base:
iounmap(pci_adapter->cfg_reg_base);
@@ -217,6 +239,8 @@ err_unmap_cfg_reg_base:
static void hinic3_unmapping_bar(struct hinic3_pcidev *pci_adapter)
{
iounmap(pci_adapter->db_base);
+ if (!HINIC3_IS_VF_DEV(pci_adapter->pdev))
+ iounmap(pci_adapter->mgmt_reg_base);
iounmap(pci_adapter->intr_reg_base);
iounmap(pci_adapter->cfg_reg_base);
}
@@ -260,10 +284,8 @@ static int hinic3_pci_init(struct pci_dev *pdev)
err_release_regions:
pci_clear_master(pdev);
pci_release_regions(pdev);
-
err_disable_device:
pci_disable_device(pdev);
-
err_free_pci_adapter:
pci_set_drvdata(pdev, NULL);
mutex_destroy(&pci_adapter->pdev_mutex);
@@ -295,6 +317,9 @@ static int hinic3_func_init(struct pci_dev *pdev,
return err;
}
+ if (HINIC3_IS_PF(pci_adapter->hwdev))
+ hinic3_sync_time_to_fw(pci_adapter->hwdev);
+
err = hinic3_attach_aux_devices(pci_adapter->hwdev);
if (err)
goto err_free_hwdev;
@@ -311,6 +336,8 @@ static void hinic3_func_uninit(struct pci_dev *pdev)
{
struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev);
+ /* disable mgmt reporting before flushing mgmt work-queue. */
+ hinic3_set_pf_status(pci_adapter->hwdev->hwif, HINIC3_PF_STATUS_INIT);
hinic3_flush_mgmt_workq(pci_adapter->hwdev);
hinic3_detach_aux_devices(pci_adapter->hwdev);
hinic3_free_hwdev(pci_adapter->hwdev);
@@ -319,6 +346,7 @@ static void hinic3_func_uninit(struct pci_dev *pdev)
static int hinic3_probe_func(struct hinic3_pcidev *pci_adapter)
{
struct pci_dev *pdev = pci_adapter->pdev;
+ struct comm_cmd_bdf_info bdf_info = {};
int err;
err = hinic3_mapping_bar(pdev, pci_adapter);
@@ -331,11 +359,26 @@ static int hinic3_probe_func(struct hinic3_pcidev *pci_adapter)
if (err)
goto err_unmap_bar;
+ if (HINIC3_IS_PF(pci_adapter->hwdev)) {
+ bdf_info.function_idx =
+ hinic3_global_func_id(pci_adapter->hwdev);
+ bdf_info.bus = pdev->bus->number;
+ bdf_info.device = PCI_SLOT(pdev->devfn);
+ bdf_info.function = PCI_FUNC(pdev->devfn);
+
+ err = hinic3_set_bdf_ctxt(pci_adapter->hwdev, &bdf_info);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set BDF info to fw\n");
+ goto err_uninit_func;
+ }
+ }
+
return 0;
+err_uninit_func:
+ hinic3_func_uninit(pdev);
err_unmap_bar:
hinic3_unmapping_bar(pci_adapter);
-
err_out:
dev_err(&pdev->dev, "PCIe device probe function failed\n");
@@ -368,7 +411,6 @@ static int hinic3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_uninit_pci:
hinic3_pci_uninit(pdev);
-
err_out:
dev_err(&pdev->dev, "PCIe device probe failed\n");
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_main.c b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c
index 6d87d4d895ba..6275d94dfefd 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_main.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c
@@ -29,6 +29,9 @@
#define HINIC3_DEFAULT_TXRX_MSIX_COALESC_TIMER_CFG 25
#define HINIC3_DEFAULT_TXRX_MSIX_RESEND_TIMER_CFG 7
+#define HINIC3_RX_PENDING_LIMIT_LOW 2
+#define HINIC3_RX_PENDING_LIMIT_HIGH 8
+
static void init_intr_coal_param(struct net_device *netdev)
{
struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
@@ -38,9 +41,16 @@ static void init_intr_coal_param(struct net_device *netdev)
for (i = 0; i < nic_dev->max_qps; i++) {
info = &nic_dev->intr_coalesce[i];
info->pending_limit = HINIC3_DEFAULT_TXRX_MSIX_PENDING_LIMIT;
- info->coalesce_timer_cfg = HINIC3_DEFAULT_TXRX_MSIX_COALESC_TIMER_CFG;
- info->resend_timer_cfg = HINIC3_DEFAULT_TXRX_MSIX_RESEND_TIMER_CFG;
+ info->coalesce_timer_cfg =
+ HINIC3_DEFAULT_TXRX_MSIX_COALESC_TIMER_CFG;
+ info->resend_timer_cfg =
+ HINIC3_DEFAULT_TXRX_MSIX_RESEND_TIMER_CFG;
+
+ info->rx_pending_limit_high = HINIC3_RX_PENDING_LIMIT_HIGH;
+ info->rx_pending_limit_low = HINIC3_RX_PENDING_LIMIT_LOW;
}
+
+ nic_dev->adaptive_rx_coal = 1;
}
static int hinic3_init_intr_coalesce(struct net_device *netdev)
@@ -94,7 +104,6 @@ static int hinic3_alloc_txrxqs(struct net_device *netdev)
err_free_rxqs:
hinic3_free_rxqs(netdev);
-
err_free_txqs:
hinic3_free_txqs(netdev);
@@ -108,6 +117,22 @@ static void hinic3_free_txrxqs(struct net_device *netdev)
hinic3_free_txqs(netdev);
}
+static void hinic3_periodic_work_handler(struct work_struct *work)
+{
+ struct delayed_work *delay = to_delayed_work(work);
+ struct hinic3_nic_dev *nic_dev;
+
+ nic_dev = container_of(delay, struct hinic3_nic_dev, periodic_work);
+ if (test_and_clear_bit(HINIC3_EVENT_WORK_TX_TIMEOUT,
+ &nic_dev->event_flag))
+ dev_info(nic_dev->hwdev->dev,
+ "Fault event report, src: %u, level: %u\n",
+ HINIC3_FAULT_SRC_TX_TIMEOUT,
+ HINIC3_FAULT_LEVEL_SERIOUS_FLR);
+
+ queue_delayed_work(nic_dev->workq, &nic_dev->periodic_work, HZ);
+}
+
static int hinic3_init_nic_dev(struct net_device *netdev,
struct hinic3_hwdev *hwdev)
{
@@ -121,8 +146,27 @@ static int hinic3_init_nic_dev(struct net_device *netdev,
nic_dev->rx_buf_len = HINIC3_RX_BUF_LEN;
nic_dev->lro_replenish_thld = HINIC3_LRO_REPLENISH_THLD;
+ nic_dev->vlan_bitmap = kzalloc(HINIC3_VLAN_BITMAP_SIZE(nic_dev),
+ GFP_KERNEL);
+ if (!nic_dev->vlan_bitmap)
+ return -ENOMEM;
+
nic_dev->nic_svc_cap = hwdev->cfg_mgmt->cap.nic_svc_cap;
+ nic_dev->workq = create_singlethread_workqueue(HINIC3_NIC_DEV_WQ_NAME);
+ if (!nic_dev->workq) {
+ dev_err(hwdev->dev, "Failed to initialize nic workqueue\n");
+ kfree(nic_dev->vlan_bitmap);
+ return -ENOMEM;
+ }
+
+ INIT_DELAYED_WORK(&nic_dev->periodic_work,
+ hinic3_periodic_work_handler);
+
+ INIT_LIST_HEAD(&nic_dev->uc_filter_list);
+ INIT_LIST_HEAD(&nic_dev->mc_filter_list);
+ INIT_WORK(&nic_dev->rx_mode_work, hinic3_set_rx_mode_work);
+
return 0;
}
@@ -130,23 +174,39 @@ static int hinic3_sw_init(struct net_device *netdev)
{
struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ u8 mac_addr[ETH_ALEN];
int err;
+ mutex_init(&nic_dev->port_state_mutex);
+
nic_dev->q_params.sq_depth = HINIC3_SQ_DEPTH;
nic_dev->q_params.rq_depth = HINIC3_RQ_DEPTH;
hinic3_try_to_enable_rss(netdev);
- /* VF driver always uses random MAC address. During VM migration to a
- * new device, the new device should learn the VMs old MAC rather than
- * provide its own MAC. The product design assumes that every VF is
- * suspectable to migration so the device avoids offering MAC address
- * to VFs.
- */
- eth_hw_addr_random(netdev);
+ if (HINIC3_IS_VF(hwdev)) {
+ /* VF driver always uses random MAC address. During VM migration
+ * to a new device, the new device should learn the VMs old MAC
+ * rather than provide its own MAC. The product design assumes
+ * that every VF is susceptible to migration so the device
+ * avoids offering MAC address to VFs.
+ */
+ eth_hw_addr_random(netdev);
+ } else {
+ err = hinic3_get_default_mac(hwdev, mac_addr);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to get MAC address\n");
+ goto err_clear_rss_config;
+ }
+ eth_hw_addr_set(netdev, mac_addr);
+ }
+
err = hinic3_set_mac(hwdev, netdev->dev_addr, 0,
hinic3_global_func_id(hwdev));
- if (err) {
+ /* Failure to set MAC is not a fatal error for VF since its MAC may have
+ * already been set by PF
+ */
+ if (err && err != -EADDRINUSE) {
dev_err(hwdev->dev, "Failed to set default MAC\n");
goto err_clear_rss_config;
}
@@ -173,6 +233,7 @@ static void hinic3_sw_uninit(struct net_device *netdev)
struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
hinic3_free_txrxqs(netdev);
+ hinic3_clean_mac_list_filter(netdev);
hinic3_del_mac(nic_dev->hwdev, netdev->dev_addr, 0,
hinic3_global_func_id(nic_dev->hwdev));
hinic3_clear_rss_config(netdev);
@@ -186,6 +247,8 @@ static void hinic3_assign_netdev_ops(struct net_device *netdev)
static void netdev_feature_init(struct net_device *netdev)
{
struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ netdev_features_t hw_features = 0;
+ netdev_features_t vlan_fts = 0;
netdev_features_t cso_fts = 0;
netdev_features_t tso_fts = 0;
netdev_features_t dft_fts;
@@ -198,7 +261,29 @@ static void netdev_feature_init(struct net_device *netdev)
if (hinic3_test_support(nic_dev, HINIC3_NIC_F_TSO))
tso_fts |= NETIF_F_TSO | NETIF_F_TSO6;
- netdev->features |= dft_fts | cso_fts | tso_fts;
+ if (hinic3_test_support(nic_dev, HINIC3_NIC_F_RX_VLAN_STRIP |
+ HINIC3_NIC_F_TX_VLAN_INSERT))
+ vlan_fts |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+
+ if (hinic3_test_support(nic_dev, HINIC3_NIC_F_RX_VLAN_FILTER))
+ vlan_fts |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ if (hinic3_test_support(nic_dev, HINIC3_NIC_F_VXLAN_OFFLOAD))
+ tso_fts |= NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+ /* LRO is disabled by default, only set hw features */
+ if (hinic3_test_support(nic_dev, HINIC3_NIC_F_LRO))
+ hw_features |= NETIF_F_LRO;
+
+ netdev->features |= dft_fts | cso_fts | tso_fts | vlan_fts;
+ netdev->vlan_features |= dft_fts | cso_fts | tso_fts;
+ hw_features |= netdev->hw_features | netdev->features;
+ netdev->hw_features = hw_features;
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ netdev->hw_enc_features |= dft_fts;
+ if (hinic3_test_support(nic_dev, HINIC3_NIC_F_VXLAN_OFFLOAD))
+ netdev->hw_enc_features |= cso_fts | tso_fts | NETIF_F_TSO_ECN;
}
static int hinic3_set_default_hw_feature(struct net_device *netdev)
@@ -213,6 +298,13 @@ static int hinic3_set_default_hw_feature(struct net_device *netdev)
return err;
}
+ err = hinic3_set_hw_features(netdev);
+ if (err) {
+ hinic3_update_nic_feature(nic_dev, 0);
+ hinic3_set_nic_feature_to_hw(nic_dev);
+ return err;
+ }
+
return 0;
}
@@ -238,6 +330,44 @@ static void hinic3_link_status_change(struct net_device *netdev,
}
}
+static void hinic3_port_module_event_handler(struct net_device *netdev,
+ struct hinic3_event_info *event)
+{
+ const char *g_hinic3_module_link_err[LINK_ERR_NUM] = {
+ "Unrecognized module"
+ };
+ struct hinic3_port_module_event *module_event;
+ enum port_module_event_type type;
+ enum link_err_type err_type;
+
+ module_event = (struct hinic3_port_module_event *)event->event_data;
+ type = module_event->type;
+ err_type = module_event->err_type;
+
+ switch (type) {
+ case HINIC3_PORT_MODULE_CABLE_PLUGGED:
+ case HINIC3_PORT_MODULE_CABLE_UNPLUGGED:
+ netdev_info(netdev, "Port module event: Cable %s\n",
+ type == HINIC3_PORT_MODULE_CABLE_PLUGGED ?
+ "plugged" : "unplugged");
+ break;
+ case HINIC3_PORT_MODULE_LINK_ERR:
+ if (err_type >= LINK_ERR_NUM) {
+ netdev_info(netdev, "Link failed, Unknown error type: 0x%x\n",
+ err_type);
+ } else {
+ netdev_info(netdev,
+ "Link failed, error type: 0x%x: %s\n",
+ err_type,
+ g_hinic3_module_link_err[err_type]);
+ }
+ break;
+ default:
+ netdev_err(netdev, "Unknown port module type %d\n", type);
+ break;
+ }
+}
+
static void hinic3_nic_event(struct auxiliary_device *adev,
struct hinic3_event_info *event)
{
@@ -252,7 +382,19 @@ static void hinic3_nic_event(struct auxiliary_device *adev,
hinic3_link_status_change(netdev, true);
break;
case HINIC3_SRV_EVENT_TYPE(HINIC3_EVENT_SRV_NIC,
+ HINIC3_NIC_EVENT_PORT_MODULE_EVENT):
+ hinic3_port_module_event_handler(netdev, event);
+ break;
+ case HINIC3_SRV_EVENT_TYPE(HINIC3_EVENT_SRV_NIC,
HINIC3_NIC_EVENT_LINK_DOWN):
+ case HINIC3_SRV_EVENT_TYPE(HINIC3_EVENT_SRV_COMM,
+ HINIC3_COMM_EVENT_FAULT):
+ case HINIC3_SRV_EVENT_TYPE(HINIC3_EVENT_SRV_COMM,
+ HINIC3_COMM_EVENT_PCIE_LINK_DOWN):
+ case HINIC3_SRV_EVENT_TYPE(HINIC3_EVENT_SRV_COMM,
+ HINIC3_COMM_EVENT_HEART_LOST):
+ case HINIC3_SRV_EVENT_TYPE(HINIC3_EVENT_SRV_COMM,
+ HINIC3_COMM_EVENT_MGMT_WATCHDOG):
hinic3_link_status_change(netdev, false);
break;
default:
@@ -260,6 +402,12 @@ static void hinic3_nic_event(struct auxiliary_device *adev,
}
}
+static void hinic3_free_nic_dev(struct hinic3_nic_dev *nic_dev)
+{
+ destroy_workqueue(nic_dev->workq);
+ kfree(nic_dev->vlan_bitmap);
+}
+
static int hinic3_nic_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
@@ -300,7 +448,7 @@ static int hinic3_nic_probe(struct auxiliary_device *adev,
err = hinic3_init_nic_io(nic_dev);
if (err)
- goto err_free_netdev;
+ goto err_free_nic_dev;
err = hinic3_sw_init(netdev);
if (err)
@@ -313,6 +461,7 @@ static int hinic3_nic_probe(struct auxiliary_device *adev,
if (err)
goto err_uninit_sw;
+ queue_delayed_work(nic_dev->workq, &nic_dev->periodic_work, HZ);
netif_carrier_off(netdev);
err = register_netdev(netdev);
@@ -322,18 +471,17 @@ static int hinic3_nic_probe(struct auxiliary_device *adev,
return 0;
err_uninit_nic_feature:
+ disable_delayed_work_sync(&nic_dev->periodic_work);
hinic3_update_nic_feature(nic_dev, 0);
hinic3_set_nic_feature_to_hw(nic_dev);
-
err_uninit_sw:
hinic3_sw_uninit(netdev);
-
err_free_nic_io:
hinic3_free_nic_io(nic_dev);
-
+err_free_nic_dev:
+ hinic3_free_nic_dev(nic_dev);
err_free_netdev:
free_netdev(netdev);
-
err_unregister_adev_event:
hinic3_adev_event_unregister(adev);
dev_err(&pdev->dev, "NIC service probe failed\n");
@@ -352,6 +500,10 @@ static void hinic3_nic_remove(struct auxiliary_device *adev)
netdev = nic_dev->netdev;
unregister_netdev(netdev);
+ disable_delayed_work_sync(&nic_dev->periodic_work);
+ cancel_work_sync(&nic_dev->rx_mode_work);
+ hinic3_free_nic_dev(nic_dev);
+
hinic3_update_nic_feature(nic_dev, 0);
hinic3_set_nic_feature_to_hw(nic_dev);
hinic3_sw_uninit(netdev);
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c
index cf67e26acece..c871fd0fb109 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c
@@ -82,10 +82,19 @@ static struct hinic3_msg_desc *get_mbox_msg_desc(struct hinic3_mbox *mbox,
enum mbox_msg_direction_type dir,
u16 src_func_id)
{
+ struct hinic3_hwdev *hwdev = mbox->hwdev;
struct hinic3_msg_channel *msg_ch;
- msg_ch = (src_func_id == MBOX_MGMT_FUNC_ID) ?
- &mbox->mgmt_msg : mbox->func_msg;
+ if (src_func_id == MBOX_MGMT_FUNC_ID) {
+ msg_ch = &mbox->mgmt_msg;
+ } else if (HINIC3_IS_VF(hwdev)) {
+ /* message from pf */
+ msg_ch = mbox->func_msg;
+ if (src_func_id != hinic3_pf_id_of_vf(hwdev) || !msg_ch)
+ return NULL;
+ } else {
+ return NULL;
+ }
return (dir == MBOX_MSG_SEND) ?
&msg_ch->recv_msg : &msg_ch->resp_msg;
@@ -191,6 +200,12 @@ void hinic3_mbox_func_aeqe_handler(struct hinic3_hwdev *hwdev, u8 *header,
dir = MBOX_MSG_HEADER_GET(mbox_header, DIRECTION);
src_func_id = MBOX_MSG_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX);
msg_desc = get_mbox_msg_desc(mbox, dir, src_func_id);
+ if (!msg_desc) {
+ dev_err(mbox->hwdev->dev,
+ "Mailbox source function id: %u is invalid for current function\n",
+ src_func_id);
+ return;
+ }
recv_mbox_handler(mbox, header, msg_desc);
}
@@ -409,9 +424,12 @@ int hinic3_init_mbox(struct hinic3_hwdev *hwdev)
if (err)
goto err_destroy_workqueue;
- err = hinic3_init_func_mbox_msg_channel(hwdev);
- if (err)
- goto err_uninit_mgmt_msg_ch;
+ if (HINIC3_IS_VF(hwdev)) {
+ /* VF to PF mbox message channel */
+ err = hinic3_init_func_mbox_msg_channel(hwdev);
+ if (err)
+ goto err_uninit_mgmt_msg_ch;
+ }
err = alloc_mbox_wb_status(mbox);
if (err) {
@@ -424,14 +442,12 @@ int hinic3_init_mbox(struct hinic3_hwdev *hwdev)
return 0;
err_uninit_func_mbox_msg_ch:
- hinic3_uninit_func_mbox_msg_channel(hwdev);
-
+ if (HINIC3_IS_VF(hwdev))
+ hinic3_uninit_func_mbox_msg_channel(hwdev);
err_uninit_mgmt_msg_ch:
uninit_mgmt_msg_channel(mbox);
-
err_destroy_workqueue:
destroy_workqueue(mbox->workq);
-
err_free_mbox:
kfree(mbox);
@@ -576,7 +592,13 @@ static void write_mbox_msg_attr(struct hinic3_mbox *mbox,
{
struct hinic3_hwif *hwif = mbox->hwdev->hwif;
u32 mbox_int, mbox_ctrl, tx_size;
+ u16 func = dst_func;
+ /* VF can send non-management messages only to PF. We set DST_FUNC field
+ * to 0 since HW will ignore it anyway.
+ */
+ if (HINIC3_IS_VF(mbox->hwdev) && dst_func != MBOX_MGMT_FUNC_ID)
+ func = 0;
tx_size = ALIGN(seg_len + MBOX_HEADER_SZ, MBOX_SEG_LEN_ALIGN) >> 2;
mbox_int = MBOX_INT_SET(dst_aeqn, DST_AEQN) |
@@ -587,7 +609,7 @@ static void write_mbox_msg_attr(struct hinic3_mbox *mbox,
mbox_ctrl = MBOX_CTRL_SET(1, TX_STATUS) |
MBOX_CTRL_SET(0, TRIGGER_AEQE) |
- MBOX_CTRL_SET(dst_func, DST_FUNC);
+ MBOX_CTRL_SET(func, DST_FUNC);
hinic3_hwif_write_reg(hwif, HINIC3_FUNC_CSR_MAILBOX_INT_OFF, mbox_int);
hinic3_hwif_write_reg(hwif, HINIC3_FUNC_CSR_MAILBOX_CONTROL_OFF,
@@ -840,6 +862,19 @@ err_send:
return err;
}
+void hinic3_response_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
+ const void *buf_in, u32 in_size, u16 msg_id)
+{
+ struct mbox_msg_info msg_info;
+
+ msg_info.msg_id = (u8)msg_id;
+ msg_info.status = 0;
+
+ send_mbox_msg(hwdev->mbox, mod, cmd, buf_in, in_size,
+ MBOX_MGMT_FUNC_ID, MBOX_MSG_RESP,
+ MBOX_MSG_NO_ACK, &msg_info);
+}
+
int hinic3_send_mbox_to_mgmt_no_ack(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
const struct mgmt_msg_params *msg_params)
{
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h
index e71629e95086..e26f22d1d564 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h
@@ -135,6 +135,8 @@ void hinic3_free_mbox(struct hinic3_hwdev *hwdev);
int hinic3_send_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
const struct mgmt_msg_params *msg_params);
+void hinic3_response_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
+ const void *buf_in, u32 in_size, u16 msg_id);
int hinic3_send_mbox_to_mgmt_no_ack(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
const struct mgmt_msg_params *msg_params);
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c
index c38d10cd7fac..be2a2ae75fc0 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c
@@ -3,19 +3,330 @@
#include "hinic3_eqs.h"
#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
#include "hinic3_mbox.h"
#include "hinic3_mgmt.h"
+#define HINIC3_MSG_TO_MGMT_MAX_LEN 2016
+
+#define MGMT_MAX_PF_BUF_SIZE 2048UL
+#define MGMT_SEG_LEN_MAX 48
+#define MGMT_ASYNC_MSG_FLAG 0x8
+
+#define HINIC3_MGMT_WQ_NAME "hinic3_mgmt"
+
+/* Bogus sequence ID to prevent accidental match following partial message */
+#define MGMT_BOGUS_SEQ_ID \
+ (MGMT_MAX_PF_BUF_SIZE / MGMT_SEG_LEN_MAX + 1)
+
+static void
+hinic3_mgmt_resp_msg_handler(struct hinic3_msg_pf_to_mgmt *pf_to_mgmt,
+ struct hinic3_recv_msg *recv_msg)
+{
+ struct device *dev = pf_to_mgmt->hwdev->dev;
+
+ /* Ignore async msg */
+ if (recv_msg->msg_id & MGMT_ASYNC_MSG_FLAG)
+ return;
+
+ spin_lock(&pf_to_mgmt->sync_event_lock);
+ if (recv_msg->msg_id != pf_to_mgmt->sync_msg_id) {
+ dev_err(dev, "msg id mismatch, send msg id: 0x%x, recv msg id: 0x%x, event state: %d\n",
+ pf_to_mgmt->sync_msg_id, recv_msg->msg_id,
+ pf_to_mgmt->event_flag);
+ } else if (pf_to_mgmt->event_flag == COMM_SEND_EVENT_START) {
+ pf_to_mgmt->event_flag = COMM_SEND_EVENT_SUCCESS;
+ complete(&recv_msg->recv_done);
+ } else {
+ dev_err(dev, "Wait timeout, send msg id: 0x%x, recv msg id: 0x%x, event state: %d\n",
+ pf_to_mgmt->sync_msg_id, recv_msg->msg_id,
+ pf_to_mgmt->event_flag);
+ }
+ spin_unlock(&pf_to_mgmt->sync_event_lock);
+}
+
+static void hinic3_recv_mgmt_msg_work_handler(struct work_struct *work)
+{
+ struct hinic3_msg_pf_to_mgmt *pf_to_mgmt;
+ struct mgmt_msg_handle_work *mgmt_work;
+ struct mgmt_msg_head *ack_cmd;
+
+ mgmt_work = container_of(work, struct mgmt_msg_handle_work, work);
+
+ /* At the moment, we do not expect any meaningful messages but if the
+ * sender expects an ACK we still need to provide one with "unsupported"
+ * status.
+ */
+ if (mgmt_work->async_mgmt_to_pf)
+ goto out;
+
+ pf_to_mgmt = mgmt_work->pf_to_mgmt;
+ ack_cmd = pf_to_mgmt->mgmt_ack_buf;
+ memset(ack_cmd, 0, sizeof(*ack_cmd));
+ ack_cmd->status = MGMT_STATUS_CMD_UNSUPPORTED;
+
+ hinic3_response_mbox_to_mgmt(pf_to_mgmt->hwdev, mgmt_work->mod,
+ mgmt_work->cmd, ack_cmd, sizeof(*ack_cmd),
+ mgmt_work->msg_id);
+
+out:
+ kfree(mgmt_work->msg);
+ kfree(mgmt_work);
+}
+
+static int hinic3_recv_msg_add_seg(struct hinic3_recv_msg *recv_msg,
+ __le64 msg_header, const void *seg_data,
+ bool *is_complete)
+{
+ u8 seq_id, msg_id, seg_len, is_last;
+ char *msg_buff;
+ u32 offset;
+
+ seg_len = MBOX_MSG_HEADER_GET(msg_header, SEG_LEN);
+ is_last = MBOX_MSG_HEADER_GET(msg_header, LAST);
+ seq_id = MBOX_MSG_HEADER_GET(msg_header, SEQID);
+ msg_id = MBOX_MSG_HEADER_GET(msg_header, MSG_ID);
+
+ if (seg_len > MGMT_SEG_LEN_MAX)
+ return -EINVAL;
+
+ /* All segments but last must be of maximal size */
+ if (seg_len != MGMT_SEG_LEN_MAX && !is_last)
+ return -EINVAL;
+
+ if (seq_id == 0) {
+ recv_msg->seq_id = seq_id;
+ recv_msg->msg_id = msg_id;
+ } else if (seq_id != recv_msg->seq_id + 1 ||
+ msg_id != recv_msg->msg_id) {
+ return -EINVAL;
+ }
+
+ offset = seq_id * MGMT_SEG_LEN_MAX;
+ if (offset + seg_len > MGMT_MAX_PF_BUF_SIZE)
+ return -EINVAL;
+
+ msg_buff = recv_msg->msg;
+ memcpy(msg_buff + offset, seg_data, seg_len);
+ recv_msg->msg_len = offset + seg_len;
+ recv_msg->seq_id = seq_id;
+ *is_complete = !!is_last;
+
+ return 0;
+}
+
+static void hinic3_init_mgmt_msg_work(struct hinic3_msg_pf_to_mgmt *pf_to_mgmt,
+ struct hinic3_recv_msg *recv_msg)
+{
+ struct mgmt_msg_handle_work *mgmt_work;
+
+ mgmt_work = kmalloc(sizeof(*mgmt_work), GFP_KERNEL);
+ if (!mgmt_work)
+ return;
+
+ if (recv_msg->msg_len) {
+ mgmt_work->msg = kmemdup(recv_msg->msg, recv_msg->msg_len,
+ GFP_KERNEL);
+ if (!mgmt_work->msg) {
+ kfree(mgmt_work);
+ return;
+ }
+ } else {
+ mgmt_work->msg = NULL;
+ }
+
+ mgmt_work->pf_to_mgmt = pf_to_mgmt;
+ mgmt_work->msg_len = recv_msg->msg_len;
+ mgmt_work->msg_id = recv_msg->msg_id;
+ mgmt_work->mod = recv_msg->mod;
+ mgmt_work->cmd = recv_msg->cmd;
+ mgmt_work->async_mgmt_to_pf = recv_msg->async_mgmt_to_pf;
+
+ INIT_WORK(&mgmt_work->work, hinic3_recv_mgmt_msg_work_handler);
+ queue_work(pf_to_mgmt->workq, &mgmt_work->work);
+}
+
+static void
+hinic3_recv_mgmt_msg_handler(struct hinic3_msg_pf_to_mgmt *pf_to_mgmt,
+ const u8 *header,
+ struct hinic3_recv_msg *recv_msg)
+{
+ struct hinic3_hwdev *hwdev = pf_to_mgmt->hwdev;
+ const void *seg_data;
+ __le64 msg_header;
+ bool is_complete;
+ u8 dir, msg_id;
+ int err;
+
+ msg_header = *(__force __le64 *)header;
+ dir = MBOX_MSG_HEADER_GET(msg_header, DIRECTION);
+ msg_id = MBOX_MSG_HEADER_GET(msg_header, MSG_ID);
+ /* Don't need to get anything from hw when cmd is async */
+ if (dir == MBOX_MSG_RESP && (msg_id & MGMT_ASYNC_MSG_FLAG))
+ return;
+
+ seg_data = header + sizeof(msg_header);
+ err = hinic3_recv_msg_add_seg(recv_msg, msg_header,
+ seg_data, &is_complete);
+ if (err) {
+ dev_err(hwdev->dev, "invalid receive segment\n");
+ /* set seq_id to invalid seq_id */
+ recv_msg->seq_id = MGMT_BOGUS_SEQ_ID;
+
+ return;
+ }
+
+ if (!is_complete)
+ return;
+
+ recv_msg->cmd = MBOX_MSG_HEADER_GET(msg_header, CMD);
+ recv_msg->mod = MBOX_MSG_HEADER_GET(msg_header, MODULE);
+ recv_msg->async_mgmt_to_pf = MBOX_MSG_HEADER_GET(msg_header, NO_ACK);
+ recv_msg->seq_id = MGMT_BOGUS_SEQ_ID;
+
+ if (dir == MBOX_MSG_RESP)
+ hinic3_mgmt_resp_msg_handler(pf_to_mgmt, recv_msg);
+ else
+ hinic3_init_mgmt_msg_work(pf_to_mgmt, recv_msg);
+}
+
+static int alloc_recv_msg(struct hinic3_recv_msg *recv_msg)
+{
+ recv_msg->seq_id = MGMT_BOGUS_SEQ_ID;
+
+ recv_msg->msg = kzalloc(MGMT_MAX_PF_BUF_SIZE, GFP_KERNEL);
+ if (!recv_msg->msg)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void free_recv_msg(struct hinic3_recv_msg *recv_msg)
+{
+ kfree(recv_msg->msg);
+}
+
+static int alloc_msg_buf(struct hinic3_msg_pf_to_mgmt *pf_to_mgmt)
+{
+ struct device *dev = pf_to_mgmt->hwdev->dev;
+ int err;
+
+ err = alloc_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt);
+ if (err) {
+ dev_err(dev, "Failed to allocate recv msg\n");
+ return err;
+ }
+
+ err = alloc_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt);
+ if (err) {
+ dev_err(dev, "Failed to allocate resp recv msg\n");
+ goto err_free_msg_from_mgmt;
+ }
+
+ pf_to_mgmt->mgmt_ack_buf = kzalloc(MGMT_MAX_PF_BUF_SIZE, GFP_KERNEL);
+ if (!pf_to_mgmt->mgmt_ack_buf) {
+ err = -ENOMEM;
+ goto err_free_resp_msg_from_mgmt;
+ }
+
+ return 0;
+
+err_free_resp_msg_from_mgmt:
+ free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt);
+err_free_msg_from_mgmt:
+ free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt);
+
+ return err;
+}
+
+static void free_msg_buf(struct hinic3_msg_pf_to_mgmt *pf_to_mgmt)
+{
+ kfree(pf_to_mgmt->mgmt_ack_buf);
+
+ free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt);
+ free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt);
+}
+
+int hinic3_pf_to_mgmt_init(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_msg_pf_to_mgmt *pf_to_mgmt;
+ int err;
+
+ pf_to_mgmt = kzalloc(sizeof(*pf_to_mgmt), GFP_KERNEL);
+ if (!pf_to_mgmt)
+ return -ENOMEM;
+
+ hwdev->pf_to_mgmt = pf_to_mgmt;
+ pf_to_mgmt->hwdev = hwdev;
+ spin_lock_init(&pf_to_mgmt->sync_event_lock);
+ pf_to_mgmt->workq = create_singlethread_workqueue(HINIC3_MGMT_WQ_NAME);
+ if (!pf_to_mgmt->workq) {
+ dev_err(hwdev->dev, "Failed to initialize MGMT workqueue\n");
+ err = -ENOMEM;
+ goto err_free_pf_to_mgmt;
+ }
+
+ err = alloc_msg_buf(pf_to_mgmt);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to allocate msg buffers\n");
+ goto err_destroy_workqueue;
+ }
+
+ return 0;
+
+err_destroy_workqueue:
+ destroy_workqueue(pf_to_mgmt->workq);
+err_free_pf_to_mgmt:
+ kfree(pf_to_mgmt);
+
+ return err;
+}
+
+void hinic3_pf_to_mgmt_free(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_msg_pf_to_mgmt *pf_to_mgmt = hwdev->pf_to_mgmt;
+
+ /* destroy workqueue before free related pf_to_mgmt resources in case of
+ * illegal resource access
+ */
+ destroy_workqueue(pf_to_mgmt->workq);
+
+ free_msg_buf(pf_to_mgmt);
+ kfree(pf_to_mgmt);
+}
+
void hinic3_flush_mgmt_workq(struct hinic3_hwdev *hwdev)
{
if (hwdev->aeqs)
flush_workqueue(hwdev->aeqs->workq);
+
+ if (HINIC3_IS_PF(hwdev) && hwdev->pf_to_mgmt)
+ flush_workqueue(hwdev->pf_to_mgmt->workq);
}
void hinic3_mgmt_msg_aeqe_handler(struct hinic3_hwdev *hwdev, u8 *header,
u8 size)
{
+ struct hinic3_msg_pf_to_mgmt *pf_to_mgmt;
+ struct hinic3_recv_msg *recv_msg;
+ __le64 msg_header;
+ bool is_send_dir;
+
if (MBOX_MSG_HEADER_GET(*(__force __le64 *)header, SOURCE) ==
- MBOX_MSG_FROM_MBOX)
+ MBOX_MSG_FROM_MBOX) {
hinic3_mbox_func_aeqe_handler(hwdev, header, size);
+
+ return;
+ }
+
+ pf_to_mgmt = hwdev->pf_to_mgmt;
+ msg_header = *(__force __le64 *)header;
+
+ is_send_dir = (MBOX_MSG_HEADER_GET(msg_header, DIRECTION) ==
+ MBOX_MSG_SEND) ? true : false;
+
+ recv_msg = is_send_dir ? &pf_to_mgmt->recv_msg_from_mgmt :
+ &pf_to_mgmt->recv_resp_msg_from_mgmt;
+
+ hinic3_recv_mgmt_msg_handler(pf_to_mgmt, header, recv_msg);
}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h
index bbef3b32a6ec..56f48d5442bc 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h
@@ -6,8 +6,61 @@
#include <linux/types.h>
+#include "hinic3_mbox.h"
+#include "hinic3_hw_intf.h"
+
struct hinic3_hwdev;
+struct hinic3_recv_msg {
+ /* Preallocated buffer of size MAX_PF_MGMT_BUF_SIZE that accumulates
+ * receive message, segment-by-segment.
+ */
+ void *msg;
+ /* Message id for which segments are accumulated. */
+ u8 msg_id;
+ /* Sequence id of last received segment of current message. */
+ u8 seq_id;
+ u16 msg_len;
+ int async_mgmt_to_pf;
+ enum mgmt_mod_type mod;
+ u16 cmd;
+ struct completion recv_done;
+};
+
+enum comm_pf_to_mgmt_event_state {
+ COMM_SEND_EVENT_UNINIT,
+ COMM_SEND_EVENT_START,
+ COMM_SEND_EVENT_SUCCESS,
+ COMM_SEND_EVENT_TIMEOUT,
+};
+
+struct hinic3_msg_pf_to_mgmt {
+ struct hinic3_hwdev *hwdev;
+ struct workqueue_struct *workq;
+ void *mgmt_ack_buf;
+ struct hinic3_recv_msg recv_msg_from_mgmt;
+ struct hinic3_recv_msg recv_resp_msg_from_mgmt;
+ u16 async_msg_id;
+ u16 sync_msg_id;
+ void *async_msg_cb_data[MGMT_MOD_HW_MAX];
+ /* synchronizes message send with message receives via event queue */
+ spinlock_t sync_event_lock;
+ enum comm_pf_to_mgmt_event_state event_flag;
+};
+
+struct mgmt_msg_handle_work {
+ struct work_struct work;
+ struct hinic3_msg_pf_to_mgmt *pf_to_mgmt;
+ void *msg;
+ u16 msg_len;
+ enum mgmt_mod_type mod;
+ u16 cmd;
+ u16 msg_id;
+ int async_mgmt_to_pf;
+};
+
+int hinic3_pf_to_mgmt_init(struct hinic3_hwdev *hwdev);
+void hinic3_pf_to_mgmt_free(struct hinic3_hwdev *hwdev);
void hinic3_flush_mgmt_workq(struct hinic3_hwdev *hwdev);
void hinic3_mgmt_msg_aeqe_handler(struct hinic3_hwdev *hwdev,
u8 *header, u8 size);
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h
index 6cc0345c39e4..c0c87a8c2198 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h
@@ -56,6 +56,31 @@ struct l2nic_cmd_update_mac {
u8 new_mac[ETH_ALEN];
};
+struct l2nic_cmd_vlan_config {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u8 opcode;
+ u8 rsvd1;
+ u16 vlan_id;
+ u16 rsvd2;
+};
+
+struct l2nic_cmd_vlan_offload {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u8 vlan_offload;
+ u8 rsvd1[5];
+};
+
+/* set vlan filter */
+struct l2nic_cmd_set_vlan_filter {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u8 rsvd[2];
+ /* bit0:vlan filter en; bit1:broadcast_filter_en */
+ u32 vlan_filter_ctrl;
+};
+
struct l2nic_cmd_set_ci_attr {
struct mgmt_msg_head msg_head;
u16 func_idx;
@@ -90,6 +115,22 @@ struct l2nic_cmd_set_vport_state {
u8 rsvd2[3];
};
+/* *
+ * Definition of the NIC receiving mode
+ */
+#define L2NIC_RX_MODE_UC 0x01
+#define L2NIC_RX_MODE_MC 0x02
+#define L2NIC_RX_MODE_BC 0x04
+#define L2NIC_RX_MODE_MC_ALL 0x08
+#define L2NIC_RX_MODE_PROMISC 0x10
+
+struct l2nic_rx_mode_config {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u16 rsvd1;
+ u32 rx_mode;
+};
+
struct l2nic_cmd_set_dcb_state {
struct mgmt_msg_head head;
u16 func_id;
@@ -102,6 +143,26 @@ struct l2nic_cmd_set_dcb_state {
u8 rsvd[7];
};
+struct l2nic_cmd_lro_config {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u8 opcode;
+ u8 rsvd1;
+ u8 lro_ipv4_en;
+ u8 lro_ipv6_en;
+ /* unit is 1K */
+ u8 lro_max_pkt_len;
+ u8 resv2[13];
+};
+
+struct l2nic_cmd_lro_timer {
+ struct mgmt_msg_head msg_head;
+ /* 1: set timer value, 0: get timer value */
+ u8 opcode;
+ u8 rsvd[3];
+ u32 timer;
+};
+
#define L2NIC_RSS_TYPE_VALID_MASK BIT(23)
#define L2NIC_RSS_TYPE_TCP_IPV6_EXT_MASK BIT(24)
#define L2NIC_RSS_TYPE_IPV6_EXT_MASK BIT(25)
@@ -160,12 +221,19 @@ enum l2nic_cmd {
/* FUNC CFG */
L2NIC_CMD_SET_FUNC_TBL = 5,
L2NIC_CMD_SET_VPORT_ENABLE = 6,
+ L2NIC_CMD_SET_RX_MODE = 7,
L2NIC_CMD_SET_SQ_CI_ATTR = 8,
L2NIC_CMD_CLEAR_QP_RESOURCE = 11,
+ L2NIC_CMD_CFG_RX_LRO = 13,
+ L2NIC_CMD_CFG_LRO_TIMER = 14,
L2NIC_CMD_FEATURE_NEGO = 15,
+ L2NIC_CMD_GET_MAC = 20,
L2NIC_CMD_SET_MAC = 21,
L2NIC_CMD_DEL_MAC = 22,
L2NIC_CMD_UPDATE_MAC = 23,
+ L2NIC_CMD_CFG_FUNC_VLAN = 25,
+ L2NIC_CMD_SET_VLAN_FILTER_EN = 26,
+ L2NIC_CMD_SET_RX_VLAN_OFFLOAD = 27,
L2NIC_CMD_CFG_RSS = 60,
L2NIC_CMD_CFG_RSS_HASH_KEY = 63,
L2NIC_CMD_CFG_RSS_HASH_ENGINE = 64,
@@ -189,6 +257,7 @@ enum l2nic_ucode_cmd {
/* hilink mac group command */
enum mag_cmd {
+ MAG_CMD_SET_PORT_ENABLE = 6,
MAG_CMD_GET_LINK_STATUS = 7,
};
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c
index bbf22811a029..75adfe897e81 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c
@@ -2,7 +2,9 @@
// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
#include <linux/netdevice.h>
+#include <net/vxlan.h>
#include "hinic3_hwif.h"
#include "hinic3_nic_cfg.h"
@@ -12,6 +14,15 @@
#include "hinic3_rx.h"
#include "hinic3_tx.h"
+#define HINIC3_LRO_DEFAULT_COAL_PKT_SIZE 32
+#define HINIC3_LRO_DEFAULT_TIME_LIMIT 16
+
+#define VLAN_BITMAP_BITS_SIZE(nic_dev) (sizeof(*(nic_dev)->vlan_bitmap) * 8)
+#define VID_LINE(nic_dev, vid) \
+ ((vid) / VLAN_BITMAP_BITS_SIZE(nic_dev))
+#define VID_COL(nic_dev, vid) \
+ ((vid) & (VLAN_BITMAP_BITS_SIZE(nic_dev) - 1))
+
/* try to modify the number of irq to the target number,
* and return the actual number of irq.
*/
@@ -327,6 +338,31 @@ static void hinic3_close_channel(struct net_device *netdev)
hinic3_free_qp_ctxts(nic_dev);
}
+static int hinic3_maybe_set_port_state(struct net_device *netdev, bool enable)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ int err;
+
+ mutex_lock(&nic_dev->port_state_mutex);
+ err = hinic3_set_port_enable(nic_dev->hwdev, enable);
+ mutex_unlock(&nic_dev->port_state_mutex);
+
+ return err;
+}
+
+static void hinic3_print_link_message(struct net_device *netdev,
+ bool link_status_up)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ if (nic_dev->link_status_up == link_status_up)
+ return;
+
+ nic_dev->link_status_up = link_status_up;
+
+ netdev_dbg(netdev, "Link is %s\n", str_up_down(link_status_up));
+}
+
static int hinic3_vport_up(struct net_device *netdev)
{
struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
@@ -341,11 +377,17 @@ static int hinic3_vport_up(struct net_device *netdev)
goto err_flush_qps_res;
}
+ err = hinic3_maybe_set_port_state(netdev, true);
+ if (err) {
+ netdev_err(netdev, "Failed to enable port\n");
+ goto err_disable_vport;
+ }
+
err = netif_set_real_num_queues(netdev, nic_dev->q_params.num_qps,
nic_dev->q_params.num_qps);
if (err) {
netdev_err(netdev, "Failed to set real number of queues\n");
- goto err_flush_qps_res;
+ goto err_disable_vport;
}
netif_tx_start_all_queues(netdev);
@@ -353,8 +395,12 @@ static int hinic3_vport_up(struct net_device *netdev)
if (!err && link_status_up)
netif_carrier_on(netdev);
+ hinic3_print_link_message(netdev, link_status_up);
+
return 0;
+err_disable_vport:
+ hinic3_set_vport_enable(nic_dev->hwdev, glb_func_id, false);
err_flush_qps_res:
hinic3_flush_qps_res(nic_dev->hwdev);
/* wait to guarantee that no packets will be sent to host */
@@ -386,6 +432,11 @@ static int hinic3_open(struct net_device *netdev)
struct hinic3_dyna_qp_params qp_params;
int err;
+ if (test_bit(HINIC3_INTF_UP, &nic_dev->flags)) {
+ netdev_dbg(netdev, "Netdev already open, do nothing\n");
+ return 0;
+ }
+
err = hinic3_init_nicio_res(nic_dev);
if (err) {
netdev_err(netdev, "Failed to init nicio resources\n");
@@ -413,6 +464,8 @@ static int hinic3_open(struct net_device *netdev)
if (err)
goto err_close_channel;
+ set_bit(HINIC3_INTF_UP, &nic_dev->flags);
+
return 0;
err_close_channel:
@@ -433,6 +486,11 @@ static int hinic3_close(struct net_device *netdev)
struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
struct hinic3_dyna_qp_params qp_params;
+ if (!test_and_clear_bit(HINIC3_INTF_UP, &nic_dev->flags)) {
+ netdev_dbg(netdev, "Netdev already close, do nothing\n");
+ return 0;
+ }
+
hinic3_vport_down(netdev);
hinic3_close_channel(netdev);
hinic3_uninit_qps(nic_dev, &qp_params);
@@ -441,6 +499,172 @@ static int hinic3_close(struct net_device *netdev)
return 0;
}
+#define SET_FEATURES_OP_STR(op) ((op) ? "Enable" : "Disable")
+
+static int hinic3_set_feature_rx_csum(struct net_device *netdev,
+ netdev_features_t wanted_features,
+ netdev_features_t features,
+ netdev_features_t *failed_features)
+{
+ netdev_features_t changed = wanted_features ^ features;
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+
+ if (changed & NETIF_F_RXCSUM)
+ dev_dbg(hwdev->dev, "%s rx csum success\n",
+ SET_FEATURES_OP_STR(wanted_features & NETIF_F_RXCSUM));
+
+ return 0;
+}
+
+static int hinic3_set_feature_tso(struct net_device *netdev,
+ netdev_features_t wanted_features,
+ netdev_features_t features,
+ netdev_features_t *failed_features)
+{
+ netdev_features_t changed = wanted_features ^ features;
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+
+ if (changed & NETIF_F_TSO)
+ dev_dbg(hwdev->dev, "%s tso success\n",
+ SET_FEATURES_OP_STR(wanted_features & NETIF_F_TSO));
+
+ return 0;
+}
+
+static int hinic3_set_feature_lro(struct net_device *netdev,
+ netdev_features_t wanted_features,
+ netdev_features_t features,
+ netdev_features_t *failed_features)
+{
+ netdev_features_t changed = wanted_features ^ features;
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ bool en = !!(wanted_features & NETIF_F_LRO);
+ int err;
+
+ if (!(changed & NETIF_F_LRO))
+ return 0;
+
+ err = hinic3_set_rx_lro_state(hwdev, en,
+ HINIC3_LRO_DEFAULT_TIME_LIMIT,
+ HINIC3_LRO_DEFAULT_COAL_PKT_SIZE);
+ if (err) {
+ dev_err(hwdev->dev, "%s lro failed\n", SET_FEATURES_OP_STR(en));
+ *failed_features |= NETIF_F_LRO;
+ }
+
+ return err;
+}
+
+static int hinic3_set_feature_rx_cvlan(struct net_device *netdev,
+ netdev_features_t wanted_features,
+ netdev_features_t features,
+ netdev_features_t *failed_features)
+{
+ bool en = !!(wanted_features & NETIF_F_HW_VLAN_CTAG_RX);
+ netdev_features_t changed = wanted_features ^ features;
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ int err;
+
+ if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
+ return 0;
+
+ err = hinic3_set_rx_vlan_offload(hwdev, en);
+ if (err) {
+ dev_err(hwdev->dev, "%s rx vlan offload failed\n",
+ SET_FEATURES_OP_STR(en));
+ *failed_features |= NETIF_F_HW_VLAN_CTAG_RX;
+ }
+
+ return err;
+}
+
+static int hinic3_set_feature_vlan_filter(struct net_device *netdev,
+ netdev_features_t wanted_features,
+ netdev_features_t features,
+ netdev_features_t *failed_features)
+{
+ bool en = !!(wanted_features & NETIF_F_HW_VLAN_CTAG_FILTER);
+ netdev_features_t changed = wanted_features ^ features;
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ int err;
+
+ if (!(changed & NETIF_F_HW_VLAN_CTAG_FILTER))
+ return 0;
+
+ err = hinic3_set_vlan_filter(hwdev, en);
+ if (err) {
+ dev_err(hwdev->dev, "%s rx vlan filter failed\n",
+ SET_FEATURES_OP_STR(en));
+ *failed_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ }
+
+ return err;
+}
+
+static int hinic3_set_features(struct net_device *netdev,
+ netdev_features_t curr,
+ netdev_features_t wanted)
+{
+ netdev_features_t failed = 0;
+ int err;
+
+ err = hinic3_set_feature_rx_csum(netdev, wanted, curr, &failed) |
+ hinic3_set_feature_tso(netdev, wanted, curr, &failed) |
+ hinic3_set_feature_lro(netdev, wanted, curr, &failed) |
+ hinic3_set_feature_rx_cvlan(netdev, wanted, curr, &failed) |
+ hinic3_set_feature_vlan_filter(netdev, wanted, curr, &failed);
+ if (err) {
+ netdev->features = wanted ^ failed;
+ return err;
+ }
+
+ return 0;
+}
+
+static int hinic3_ndo_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ return hinic3_set_features(netdev, netdev->features, features);
+}
+
+static netdev_features_t hinic3_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ netdev_features_t features_tmp = features;
+
+ /* If Rx checksum is disabled, then LRO should also be disabled */
+ if (!(features_tmp & NETIF_F_RXCSUM))
+ features_tmp &= ~NETIF_F_LRO;
+
+ return features_tmp;
+}
+
+static netdev_features_t hinic3_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ features = vlan_features_check(skb, features);
+ features = vxlan_features_check(skb, features);
+
+ return features;
+}
+
+int hinic3_set_hw_features(struct net_device *netdev)
+{
+ netdev_features_t wanted, curr;
+
+ wanted = netdev->features;
+ /* fake current features so all wanted are enabled */
+ curr = ~wanted;
+
+ return hinic3_set_features(netdev, curr, wanted);
+}
+
static int hinic3_change_mtu(struct net_device *netdev, int new_mtu)
{
int err;
@@ -482,11 +706,162 @@ static int hinic3_set_mac_addr(struct net_device *netdev, void *addr)
return 0;
}
+static int hinic3_vlan_rx_add_vid(struct net_device *netdev,
+ __be16 proto, u16 vid)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ unsigned long *vlan_bitmap = nic_dev->vlan_bitmap;
+ u32 column, row;
+ u16 func_id;
+ int err;
+
+ column = VID_COL(nic_dev, vid);
+ row = VID_LINE(nic_dev, vid);
+
+ func_id = hinic3_global_func_id(nic_dev->hwdev);
+
+ err = hinic3_add_vlan(nic_dev->hwdev, vid, func_id);
+ if (err) {
+ netdev_err(netdev, "Failed to add vlan %u\n", vid);
+ goto out;
+ }
+
+ set_bit(column, &vlan_bitmap[row]);
+ netdev_dbg(netdev, "Add vlan %u\n", vid);
+
+out:
+ return err;
+}
+
+static int hinic3_vlan_rx_kill_vid(struct net_device *netdev,
+ __be16 proto, u16 vid)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ unsigned long *vlan_bitmap = nic_dev->vlan_bitmap;
+ u32 column, row;
+ u16 func_id;
+ int err;
+
+ column = VID_COL(nic_dev, vid);
+ row = VID_LINE(nic_dev, vid);
+
+ func_id = hinic3_global_func_id(nic_dev->hwdev);
+ err = hinic3_del_vlan(nic_dev->hwdev, vid, func_id);
+ if (err) {
+ netdev_err(netdev, "Failed to delete vlan %u\n", vid);
+ goto out;
+ }
+
+ clear_bit(column, &vlan_bitmap[row]);
+ netdev_dbg(netdev, "Remove vlan %u\n", vid);
+
+out:
+ return err;
+}
+
+static void hinic3_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_io_queue *sq;
+ u16 sw_pi, hw_ci;
+
+ sq = nic_dev->txqs[txqueue].sq;
+ sw_pi = hinic3_get_sq_local_pi(sq);
+ hw_ci = hinic3_get_sq_hw_ci(sq);
+ netdev_dbg(netdev,
+ "txq%u: sw_pi: %u, hw_ci: %u, sw_ci: %u, napi->state: 0x%lx.\n",
+ txqueue, sw_pi, hw_ci, hinic3_get_sq_local_ci(sq),
+ nic_dev->q_params.irq_cfg[txqueue].napi.state);
+
+ if (sw_pi != hw_ci)
+ set_bit(HINIC3_EVENT_WORK_TX_TIMEOUT, &nic_dev->event_flag);
+}
+
+static void hinic3_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ u64 bytes, packets, dropped, errors;
+ struct hinic3_txq_stats *txq_stats;
+ struct hinic3_rxq_stats *rxq_stats;
+ struct hinic3_txq *txq;
+ struct hinic3_rxq *rxq;
+ unsigned int start;
+ int i;
+
+ bytes = 0;
+ packets = 0;
+ dropped = 0;
+ for (i = 0; i < nic_dev->max_qps; i++) {
+ if (!nic_dev->txqs)
+ break;
+
+ txq = &nic_dev->txqs[i];
+ txq_stats = &txq->txq_stats;
+ do {
+ start = u64_stats_fetch_begin(&txq_stats->syncp);
+ bytes += txq_stats->bytes;
+ packets += txq_stats->packets;
+ dropped += txq_stats->dropped;
+ } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
+ }
+ stats->tx_packets = packets;
+ stats->tx_bytes = bytes;
+ stats->tx_dropped = dropped;
+
+ bytes = 0;
+ packets = 0;
+ errors = 0;
+ dropped = 0;
+ for (i = 0; i < nic_dev->max_qps; i++) {
+ if (!nic_dev->rxqs)
+ break;
+
+ rxq = &nic_dev->rxqs[i];
+ rxq_stats = &rxq->rxq_stats;
+ do {
+ start = u64_stats_fetch_begin(&rxq_stats->syncp);
+ bytes += rxq_stats->bytes;
+ packets += rxq_stats->packets;
+ errors += rxq_stats->csum_errors +
+ rxq_stats->other_errors;
+ dropped += rxq_stats->dropped;
+ } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
+ }
+ stats->rx_packets = packets;
+ stats->rx_bytes = bytes;
+ stats->rx_errors = errors;
+ stats->rx_dropped = dropped;
+}
+
+static void hinic3_nic_set_rx_mode(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ if (netdev_uc_count(netdev) != nic_dev->netdev_uc_cnt ||
+ netdev_mc_count(netdev) != nic_dev->netdev_mc_cnt) {
+ set_bit(HINIC3_UPDATE_MAC_FILTER, &nic_dev->flags);
+ nic_dev->netdev_uc_cnt = netdev_uc_count(netdev);
+ nic_dev->netdev_mc_cnt = netdev_mc_count(netdev);
+ }
+
+ queue_work(nic_dev->workq, &nic_dev->rx_mode_work);
+}
+
static const struct net_device_ops hinic3_netdev_ops = {
.ndo_open = hinic3_open,
.ndo_stop = hinic3_close,
+ .ndo_set_features = hinic3_ndo_set_features,
+ .ndo_fix_features = hinic3_fix_features,
+ .ndo_features_check = hinic3_features_check,
.ndo_change_mtu = hinic3_change_mtu,
.ndo_set_mac_address = hinic3_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_vlan_rx_add_vid = hinic3_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = hinic3_vlan_rx_kill_vid,
+ .ndo_tx_timeout = hinic3_tx_timeout,
+ .ndo_get_stats64 = hinic3_get_stats64,
+ .ndo_set_rx_mode = hinic3_nic_set_rx_mode,
.ndo_start_xmit = hinic3_xmit_frame,
};
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c
index 979f47ca77f9..44abccf9cb29 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c
@@ -10,6 +10,9 @@
#include "hinic3_nic_dev.h"
#include "hinic3_nic_io.h"
+#define MGMT_MSG_CMD_OP_ADD 1
+#define MGMT_MSG_CMD_OP_DEL 0
+
static int hinic3_feature_nego(struct hinic3_hwdev *hwdev, u8 opcode,
u64 *s_feature, u16 size)
{
@@ -20,7 +23,8 @@ static int hinic3_feature_nego(struct hinic3_hwdev *hwdev, u8 opcode,
feature_nego.func_id = hinic3_global_func_id(hwdev);
feature_nego.opcode = opcode;
if (opcode == MGMT_MSG_CMD_OP_SET)
- memcpy(feature_nego.s_feature, s_feature, size * sizeof(u64));
+ memcpy(feature_nego.s_feature, s_feature,
+ array_size(size, sizeof(u64)));
mgmt_msg_params_init_default(&msg_params, &feature_nego,
sizeof(feature_nego));
@@ -34,7 +38,8 @@ static int hinic3_feature_nego(struct hinic3_hwdev *hwdev, u8 opcode,
}
if (opcode == MGMT_MSG_CMD_OP_GET)
- memcpy(s_feature, feature_nego.s_feature, size * sizeof(u64));
+ memcpy(s_feature, feature_nego.s_feature,
+ array_size(size, sizeof(u64)));
return 0;
}
@@ -57,6 +62,136 @@ bool hinic3_test_support(struct hinic3_nic_dev *nic_dev,
return (nic_dev->nic_io->feature_cap & feature_bits) == feature_bits;
}
+static int hinic3_set_rx_lro(struct hinic3_hwdev *hwdev, u8 ipv4_en, u8 ipv6_en,
+ u8 lro_max_pkt_len)
+{
+ struct l2nic_cmd_lro_config lro_cfg = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ lro_cfg.func_id = hinic3_global_func_id(hwdev);
+ lro_cfg.opcode = MGMT_MSG_CMD_OP_SET;
+ lro_cfg.lro_ipv4_en = ipv4_en;
+ lro_cfg.lro_ipv6_en = ipv6_en;
+ lro_cfg.lro_max_pkt_len = lro_max_pkt_len;
+
+ mgmt_msg_params_init_default(&msg_params, &lro_cfg,
+ sizeof(lro_cfg));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_CFG_RX_LRO,
+ &msg_params);
+
+ if (err || lro_cfg.msg_head.status) {
+ dev_err(hwdev->dev, "Failed to set lro offload, err: %d, status: 0x%x\n",
+ err, lro_cfg.msg_head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int hinic3_set_rx_lro_timer(struct hinic3_hwdev *hwdev, u32 timer_value)
+{
+ struct l2nic_cmd_lro_timer lro_timer = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ lro_timer.opcode = MGMT_MSG_CMD_OP_SET;
+ lro_timer.timer = timer_value;
+
+ mgmt_msg_params_init_default(&msg_params, &lro_timer,
+ sizeof(lro_timer));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_CFG_LRO_TIMER,
+ &msg_params);
+
+ if (err || lro_timer.msg_head.status) {
+ dev_err(hwdev->dev, "Failed to set lro timer, err: %d, status: 0x%x\n",
+ err, lro_timer.msg_head.status);
+
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic3_set_rx_lro_state(struct hinic3_hwdev *hwdev, u8 lro_en,
+ u32 lro_timer, u8 lro_max_pkt_len)
+{
+ u8 ipv4_en, ipv6_en;
+ int err;
+
+ ipv4_en = lro_en ? 1 : 0;
+ ipv6_en = lro_en ? 1 : 0;
+
+ dev_dbg(hwdev->dev, "Set LRO max coalesce packet size to %uK\n",
+ lro_max_pkt_len);
+
+ err = hinic3_set_rx_lro(hwdev, ipv4_en, ipv6_en, lro_max_pkt_len);
+ if (err)
+ return err;
+
+ /* we don't set LRO timer for VF */
+ if (HINIC3_IS_VF(hwdev))
+ return 0;
+
+ dev_dbg(hwdev->dev, "Set LRO timer to %u\n", lro_timer);
+
+ return hinic3_set_rx_lro_timer(hwdev, lro_timer);
+}
+
+int hinic3_set_rx_vlan_offload(struct hinic3_hwdev *hwdev, u8 en)
+{
+ struct l2nic_cmd_vlan_offload vlan_cfg = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ vlan_cfg.func_id = hinic3_global_func_id(hwdev);
+ vlan_cfg.vlan_offload = en;
+
+ mgmt_msg_params_init_default(&msg_params, &vlan_cfg,
+ sizeof(vlan_cfg));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_SET_RX_VLAN_OFFLOAD,
+ &msg_params);
+
+ if (err || vlan_cfg.msg_head.status) {
+ dev_err(hwdev->dev, "Failed to set rx vlan offload, err: %d, status: 0x%x\n",
+ err, vlan_cfg.msg_head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic3_set_vlan_filter(struct hinic3_hwdev *hwdev, u32 vlan_filter_ctrl)
+{
+ struct l2nic_cmd_set_vlan_filter vlan_filter = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ vlan_filter.func_id = hinic3_global_func_id(hwdev);
+ vlan_filter.vlan_filter_ctrl = vlan_filter_ctrl;
+
+ mgmt_msg_params_init_default(&msg_params, &vlan_filter,
+ sizeof(vlan_filter));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_SET_VLAN_FILTER_EN,
+ &msg_params);
+
+ if (err || vlan_filter.msg_head.status) {
+ dev_err(hwdev->dev, "Failed to set vlan filter, err: %d, status: 0x%x\n",
+ err, vlan_filter.msg_head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
void hinic3_update_nic_feature(struct hinic3_nic_dev *nic_dev, u64 feature_cap)
{
nic_dev->nic_io->feature_cap = feature_cap;
@@ -117,17 +252,52 @@ int hinic3_set_port_mtu(struct net_device *netdev, u16 new_mtu)
&func_tbl_cfg);
}
+static bool hinic3_check_vf_set_by_pf(struct hinic3_hwdev *hwdev,
+ u8 status)
+{
+ return HINIC3_IS_VF(hwdev) && status == HINIC3_PF_SET_VF_ALREADY;
+}
+
static int hinic3_check_mac_info(struct hinic3_hwdev *hwdev, u8 status,
u16 vlan_id)
{
if ((status && status != MGMT_STATUS_EXIST) ||
((vlan_id & BIT(15)) && status == MGMT_STATUS_EXIST)) {
+ if (hinic3_check_vf_set_by_pf(hwdev, status))
+ return 0;
+
return -EINVAL;
}
return 0;
}
+int hinic3_get_default_mac(struct hinic3_hwdev *hwdev, u8 *mac_addr)
+{
+ struct l2nic_cmd_set_mac mac_info = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ mac_info.func_id = hinic3_global_func_id(hwdev);
+
+ mgmt_msg_params_init_default(&msg_params, &mac_info, sizeof(mac_info));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_GET_MAC,
+ &msg_params);
+
+ if (err || mac_info.msg_head.status) {
+ dev_err(hwdev->dev,
+ "Failed to get mac, err: %d, status: 0x%x\n",
+ err, mac_info.msg_head.status);
+ return -EFAULT;
+ }
+
+ ether_addr_copy(mac_addr, mac_info.mac);
+
+ return 0;
+}
+
int hinic3_set_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id,
u16 func_id)
{
@@ -157,9 +327,9 @@ int hinic3_set_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id,
return -EIO;
}
- if (mac_info.msg_head.status == MGMT_STATUS_PF_SET_VF_ALREADY) {
+ if (hinic3_check_vf_set_by_pf(hwdev, mac_info.msg_head.status)) {
dev_warn(hwdev->dev, "PF has already set VF mac, Ignore set operation\n");
- return 0;
+ return -EADDRINUSE;
}
if (mac_info.msg_head.status == MGMT_STATUS_EXIST) {
@@ -191,11 +361,18 @@ int hinic3_del_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id,
err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
L2NIC_CMD_DEL_MAC, &msg_params);
- if (err) {
+ if (err ||
+ (mac_info.msg_head.status &&
+ !hinic3_check_vf_set_by_pf(hwdev, mac_info.msg_head.status))) {
dev_err(hwdev->dev,
"Failed to delete MAC, err: %d, status: 0x%x\n",
err, mac_info.msg_head.status);
- return err;
+ return -EFAULT;
+ }
+
+ if (hinic3_check_vf_set_by_pf(hwdev, mac_info.msg_head.status)) {
+ dev_warn(hwdev->dev, "PF has already set VF mac, Ignore delete operation.\n");
+ return -EADDRINUSE;
}
return 0;
@@ -231,6 +408,17 @@ int hinic3_update_mac(struct hinic3_hwdev *hwdev, const u8 *old_mac,
return -EIO;
}
+ if (hinic3_check_vf_set_by_pf(hwdev, mac_info.msg_head.status)) {
+ dev_warn(hwdev->dev, "PF has already set VF MAC. Ignore update operation\n");
+ return -EADDRINUSE;
+ }
+
+ if (mac_info.msg_head.status == HINIC3_MGMT_STATUS_EXIST) {
+ dev_warn(hwdev->dev,
+ "MAC is repeated. Ignore update operation\n");
+ return 0;
+ }
+
return 0;
}
@@ -313,6 +501,96 @@ int hinic3_force_drop_tx_pkt(struct hinic3_hwdev *hwdev)
return pkt_drop.msg_head.status;
}
+int hinic3_set_rx_mode(struct hinic3_hwdev *hwdev, u32 rx_mode)
+{
+ struct l2nic_rx_mode_config rx_mode_cfg = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ rx_mode_cfg.func_id = hinic3_global_func_id(hwdev);
+ rx_mode_cfg.rx_mode = rx_mode;
+
+ mgmt_msg_params_init_default(&msg_params, &rx_mode_cfg,
+ sizeof(rx_mode_cfg));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_SET_RX_MODE, &msg_params);
+
+ if (err || rx_mode_cfg.msg_head.status) {
+ dev_err(hwdev->dev, "Failed to set rx mode, err: %d, status: 0x%x\n",
+ err, rx_mode_cfg.msg_head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int hinic3_config_vlan(struct hinic3_hwdev *hwdev,
+ u8 opcode, u16 vlan_id, u16 func_id)
+{
+ struct l2nic_cmd_vlan_config vlan_info = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ vlan_info.opcode = opcode;
+ vlan_info.func_id = func_id;
+ vlan_info.vlan_id = vlan_id;
+
+ mgmt_msg_params_init_default(&msg_params, &vlan_info,
+ sizeof(vlan_info));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_CFG_FUNC_VLAN, &msg_params);
+
+ if (err || vlan_info.msg_head.status) {
+ dev_err(hwdev->dev,
+ "Failed to %s vlan, err: %d, status: 0x%x\n",
+ opcode == MGMT_MSG_CMD_OP_ADD ? "add" : "delete",
+ err, vlan_info.msg_head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic3_add_vlan(struct hinic3_hwdev *hwdev, u16 vlan_id, u16 func_id)
+{
+ return hinic3_config_vlan(hwdev, MGMT_MSG_CMD_OP_ADD, vlan_id, func_id);
+}
+
+int hinic3_del_vlan(struct hinic3_hwdev *hwdev, u16 vlan_id, u16 func_id)
+{
+ return hinic3_config_vlan(hwdev, MGMT_MSG_CMD_OP_DEL, vlan_id, func_id);
+}
+
+int hinic3_set_port_enable(struct hinic3_hwdev *hwdev, bool enable)
+{
+ struct mag_cmd_set_port_enable en_state = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ if (HINIC3_IS_VF(hwdev))
+ return 0;
+
+ en_state.function_id = hinic3_global_func_id(hwdev);
+ en_state.state = enable ? MAG_CMD_TX_ENABLE | MAG_CMD_RX_ENABLE :
+ MAG_CMD_PORT_DISABLE;
+
+ mgmt_msg_params_init_default(&msg_params, &en_state,
+ sizeof(en_state));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_HILINK,
+ MAG_CMD_SET_PORT_ENABLE, &msg_params);
+
+ if (err || en_state.head.status) {
+ dev_err(hwdev->dev, "Failed to set port state, err: %d, status: 0x%x\n",
+ err, en_state.head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
int hinic3_sync_dcb_state(struct hinic3_hwdev *hwdev, u8 op_code, u8 state)
{
struct l2nic_cmd_set_dcb_state dcb_state = {};
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h
index b83b567fa542..c32eaa886e17 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h
@@ -16,10 +16,13 @@ struct hinic3_nic_dev;
#define HINIC3_MAX_JUMBO_FRAME_SIZE 9600
#define HINIC3_VLAN_ID_MASK 0x7FFF
+#define HINIC3_PF_SET_VF_ALREADY 0x4
+#define HINIC3_MGMT_STATUS_EXIST 0x6
enum hinic3_nic_event_type {
HINIC3_NIC_EVENT_LINK_DOWN = 0,
HINIC3_NIC_EVENT_LINK_UP = 1,
+ HINIC3_NIC_EVENT_PORT_MODULE_EVENT = 2,
};
struct hinic3_sq_attr {
@@ -32,15 +35,55 @@ struct hinic3_sq_attr {
u64 ci_dma_base;
};
+#define MAG_CMD_PORT_DISABLE 0x0
+#define MAG_CMD_TX_ENABLE 0x1
+#define MAG_CMD_RX_ENABLE 0x2
+/* the physical port is disabled only when all pf of the port are set to down,
+ * if any pf is enabled, the port is enabled
+ */
+struct mag_cmd_set_port_enable {
+ struct mgmt_msg_head head;
+
+ u16 function_id;
+ u16 rsvd0;
+
+ /* bitmap bit0:tx_en bit1:rx_en */
+ u8 state;
+ u8 rsvd1[3];
+};
+
+enum link_err_type {
+ LINK_ERR_MODULE_UNRECOGENIZED,
+ LINK_ERR_NUM,
+};
+
+enum port_module_event_type {
+ HINIC3_PORT_MODULE_CABLE_PLUGGED,
+ HINIC3_PORT_MODULE_CABLE_UNPLUGGED,
+ HINIC3_PORT_MODULE_LINK_ERR,
+ HINIC3_PORT_MODULE_MAX_EVENT,
+};
+
+struct hinic3_port_module_event {
+ enum port_module_event_type type;
+ enum link_err_type err_type;
+};
+
int hinic3_get_nic_feature_from_hw(struct hinic3_nic_dev *nic_dev);
int hinic3_set_nic_feature_to_hw(struct hinic3_nic_dev *nic_dev);
bool hinic3_test_support(struct hinic3_nic_dev *nic_dev,
enum hinic3_nic_feature_cap feature_bits);
void hinic3_update_nic_feature(struct hinic3_nic_dev *nic_dev, u64 feature_cap);
+int hinic3_set_rx_lro_state(struct hinic3_hwdev *hwdev, u8 lro_en,
+ u32 lro_timer, u8 lro_max_pkt_len);
+int hinic3_set_rx_vlan_offload(struct hinic3_hwdev *hwdev, u8 en);
+int hinic3_set_vlan_filter(struct hinic3_hwdev *hwdev, u32 vlan_filter_ctrl);
+
int hinic3_init_function_table(struct hinic3_nic_dev *nic_dev);
int hinic3_set_port_mtu(struct net_device *netdev, u16 new_mtu);
+int hinic3_get_default_mac(struct hinic3_hwdev *hwdev, u8 *mac_addr);
int hinic3_set_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id,
u16 func_id);
int hinic3_del_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id,
@@ -52,10 +95,14 @@ int hinic3_set_ci_table(struct hinic3_hwdev *hwdev,
struct hinic3_sq_attr *attr);
int hinic3_flush_qps_res(struct hinic3_hwdev *hwdev);
int hinic3_force_drop_tx_pkt(struct hinic3_hwdev *hwdev);
+int hinic3_set_rx_mode(struct hinic3_hwdev *hwdev, u32 rx_mode);
int hinic3_sync_dcb_state(struct hinic3_hwdev *hwdev, u8 op_code, u8 state);
+int hinic3_set_port_enable(struct hinic3_hwdev *hwdev, bool enable);
int hinic3_get_link_status(struct hinic3_hwdev *hwdev, bool *link_status_up);
int hinic3_set_vport_enable(struct hinic3_hwdev *hwdev, u16 func_id,
bool enable);
+int hinic3_add_vlan(struct hinic3_hwdev *hwdev, u16 vlan_id, u16 func_id);
+int hinic3_del_vlan(struct hinic3_hwdev *hwdev, u16 vlan_id, u16 func_id);
#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h
index 5ba83261616c..29189241f446 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h
@@ -4,13 +4,47 @@
#ifndef _HINIC3_NIC_DEV_H_
#define _HINIC3_NIC_DEV_H_
+#include <linux/if_vlan.h>
#include <linux/netdevice.h>
#include "hinic3_hw_cfg.h"
+#include "hinic3_hwdev.h"
#include "hinic3_mgmt_interface.h"
+#define HINIC3_VLAN_BITMAP_BYTE_SIZE(nic_dev) (sizeof(*(nic_dev)->vlan_bitmap))
+#define HINIC3_VLAN_BITMAP_SIZE(nic_dev) \
+ (VLAN_N_VID / HINIC3_VLAN_BITMAP_BYTE_SIZE(nic_dev))
+
enum hinic3_flags {
+ HINIC3_INTF_UP,
+ HINIC3_MAC_FILTER_CHANGED,
HINIC3_RSS_ENABLE,
+ HINIC3_UPDATE_MAC_FILTER,
+};
+
+enum hinic3_event_work_flags {
+ HINIC3_EVENT_WORK_TX_TIMEOUT,
+};
+
+enum hinic3_rx_mode_state {
+ HINIC3_HW_PROMISC_ON,
+ HINIC3_HW_ALLMULTI_ON,
+ HINIC3_PROMISC_FORCE_ON,
+ HINIC3_ALLMULTI_FORCE_ON,
+};
+
+enum hinic3_mac_filter_state {
+ HINIC3_MAC_WAIT_HW_SYNC,
+ HINIC3_MAC_HW_SYNCING,
+ HINIC3_MAC_HW_SYNCED,
+ HINIC3_MAC_WAIT_HW_UNSYNC,
+ HINIC3_MAC_HW_UNSYNCED,
+};
+
+struct hinic3_mac_filter {
+ struct list_head list;
+ u8 addr[ETH_ALEN];
+ unsigned long state;
};
enum hinic3_rss_hash_type {
@@ -39,6 +73,7 @@ struct hinic3_irq_cfg {
cpumask_t affinity_mask;
struct hinic3_txq *txq;
struct hinic3_rxq *rxq;
+ u16 total_events;
};
struct hinic3_dyna_txrxq_params {
@@ -55,6 +90,9 @@ struct hinic3_intr_coal_info {
u8 pending_limit;
u8 coalesce_timer_cfg;
u8 resend_timer_cfg;
+
+ u8 rx_pending_limit_low;
+ u8 rx_pending_limit_high;
};
struct hinic3_nic_dev {
@@ -66,6 +104,7 @@ struct hinic3_nic_dev {
u16 max_qps;
u16 rx_buf_len;
u32 lro_replenish_thld;
+ unsigned long *vlan_bitmap;
unsigned long flags;
struct hinic3_nic_service_cap nic_svc_cap;
@@ -82,12 +121,31 @@ struct hinic3_nic_dev {
struct msix_entry *qps_msix_entries;
struct hinic3_intr_coal_info *intr_coalesce;
-
+ u32 adaptive_rx_coal;
+
+ struct workqueue_struct *workq;
+ struct delayed_work periodic_work;
+ struct work_struct rx_mode_work;
+ /* lock for enable/disable port */
+ struct mutex port_state_mutex;
+
+ struct list_head uc_filter_list;
+ struct list_head mc_filter_list;
+ unsigned long rx_mod_state;
+ int netdev_uc_cnt;
+ int netdev_mc_cnt;
+
+ /* flag bits defined by hinic3_event_work_flags */
+ unsigned long event_flag;
bool link_status_up;
};
void hinic3_set_netdev_ops(struct net_device *netdev);
+int hinic3_set_hw_features(struct net_device *netdev);
int hinic3_qps_irq_init(struct net_device *netdev);
void hinic3_qps_irq_uninit(struct net_device *netdev);
+void hinic3_set_rx_mode_work(struct work_struct *work);
+void hinic3_clean_mac_list_filter(struct net_device *netdev);
+
#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
index d86cd1ba4605..90887d2bb127 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
@@ -162,6 +162,9 @@ struct hinic3_clean_queue_ctxt {
#define SQ_CTXT_WQ_BLOCK_SET(val, member) \
FIELD_PREP(SQ_CTXT_WQ_BLOCK_##member##_MASK, val)
+/* reuse SQ macro for RQ because the hardware format is identical */
+#define RQ_CTXT_PREF_CI_HI(val) SQ_CTXT_PREF_CI_HI(val)
+
#define RQ_CTXT_PI_IDX_MASK GENMASK(15, 0)
#define RQ_CTXT_CI_IDX_MASK GENMASK(31, 16)
#define RQ_CTXT_CI_PI_SET(val, member) \
@@ -629,7 +632,8 @@ static void hinic3_rq_prepare_ctxt(struct hinic3_io_queue *rq,
RQ_CTXT_PREF_SET(RQ_WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD));
rq_ctxt->pref_ci_owner =
- cpu_to_le32(RQ_CTXT_PREF_SET(SQ_CTXT_PREF_CI_HI(ci_start), CI_HI) |
+ cpu_to_le32(RQ_CTXT_PREF_SET(RQ_CTXT_PREF_CI_HI(ci_start),
+ CI_HI) |
RQ_CTXT_PREF_SET(1, OWNER));
rq_ctxt->pref_wq_pfn_hi_ci =
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
index 16c00c3bb1ed..159c291fa293 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
@@ -33,6 +33,31 @@
HINIC3_RX_IPV6_PKT ? HINIC3_LRO_PKT_HDR_LEN_IPV6 : \
HINIC3_LRO_PKT_HDR_LEN_IPV4)
+static void hinic3_rxq_clean_stats(struct hinic3_rxq_stats *rxq_stats)
+{
+ u64_stats_update_begin(&rxq_stats->syncp);
+ rxq_stats->bytes = 0;
+ rxq_stats->packets = 0;
+ rxq_stats->errors = 0;
+ rxq_stats->csum_errors = 0;
+ rxq_stats->other_errors = 0;
+ rxq_stats->dropped = 0;
+ rxq_stats->rx_buf_empty = 0;
+
+ rxq_stats->alloc_skb_err = 0;
+ rxq_stats->alloc_rx_buf_err = 0;
+ rxq_stats->restore_drop_sge = 0;
+ u64_stats_update_end(&rxq_stats->syncp);
+}
+
+static void hinic3_rxq_stats_init(struct hinic3_rxq *rxq)
+{
+ struct hinic3_rxq_stats *rxq_stats = &rxq->rxq_stats;
+
+ u64_stats_init(&rxq_stats->syncp);
+ hinic3_rxq_clean_stats(rxq_stats);
+}
+
int hinic3_alloc_rxqs(struct net_device *netdev)
{
struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
@@ -54,6 +79,8 @@ int hinic3_alloc_rxqs(struct net_device *netdev)
rxq->buf_len_shift = ilog2(nic_dev->rx_buf_len);
rxq->q_depth = nic_dev->q_params.rq_depth;
rxq->q_mask = nic_dev->q_params.rq_depth - 1;
+
+ hinic3_rxq_stats_init(rxq);
}
return 0;
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h
index 44ae841a3648..31622e0a63d0 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h
@@ -5,6 +5,7 @@
#define _HINIC3_RX_H_
#include <linux/bitfield.h>
+#include <linux/dim.h>
#include <linux/netdevice.h>
#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_MASK GENMASK(4, 0)
@@ -25,6 +26,20 @@
#define RQ_CQE_STATUS_GET(val, member) \
FIELD_GET(RQ_CQE_STATUS_##member##_MASK, val)
+struct hinic3_rxq_stats {
+ u64 packets;
+ u64 bytes;
+ u64 errors;
+ u64 csum_errors;
+ u64 other_errors;
+ u64 dropped;
+ u64 rx_buf_empty;
+ u64 alloc_skb_err;
+ u64 alloc_rx_buf_err;
+ u64 restore_drop_sge;
+ struct u64_stats_sync syncp;
+};
+
/* RX Completion information that is provided by HW for a specific RX WQE */
struct hinic3_rq_cqe {
__le32 status;
@@ -59,6 +74,7 @@ struct hinic3_rxq {
u16 buf_len;
u32 buf_len_shift;
+ struct hinic3_rxq_stats rxq_stats;
u32 cons_idx;
u32 delta;
@@ -80,6 +96,11 @@ struct hinic3_rxq {
struct device *dev; /* device for DMA mapping */
dma_addr_t cqe_start_paddr;
+
+ struct dim dim;
+
+ u8 last_coalesc_timer_cfg;
+ u8 last_pending_limit;
} ____cacheline_aligned;
struct hinic3_dyna_rxq_res {
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
index 92c43c05e3f2..6d3dc930ca97 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
@@ -16,19 +16,38 @@
#define MIN_SKB_LEN 32
+static void hinic3_txq_clean_stats(struct hinic3_txq_stats *txq_stats)
+{
+ u64_stats_update_begin(&txq_stats->syncp);
+ txq_stats->bytes = 0;
+ txq_stats->packets = 0;
+ txq_stats->busy = 0;
+ txq_stats->dropped = 0;
+
+ txq_stats->skb_pad_err = 0;
+ txq_stats->frag_len_overflow = 0;
+ txq_stats->offload_cow_skb_err = 0;
+ txq_stats->map_frag_err = 0;
+ txq_stats->unknown_tunnel_pkt = 0;
+ txq_stats->frag_size_err = 0;
+ u64_stats_update_end(&txq_stats->syncp);
+}
+
+static void hinic3_txq_stats_init(struct hinic3_txq *txq)
+{
+ struct hinic3_txq_stats *txq_stats = &txq->txq_stats;
+
+ u64_stats_init(&txq_stats->syncp);
+ hinic3_txq_clean_stats(txq_stats);
+}
+
int hinic3_alloc_txqs(struct net_device *netdev)
{
struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
- struct hinic3_hwdev *hwdev = nic_dev->hwdev;
u16 q_id, num_txqs = nic_dev->max_qps;
struct pci_dev *pdev = nic_dev->pdev;
struct hinic3_txq *txq;
- if (!num_txqs) {
- dev_err(hwdev->dev, "Cannot allocate zero size txqs\n");
- return -EINVAL;
- }
-
nic_dev->txqs = kcalloc(num_txqs, sizeof(*nic_dev->txqs), GFP_KERNEL);
if (!nic_dev->txqs)
return -ENOMEM;
@@ -40,6 +59,8 @@ int hinic3_alloc_txqs(struct net_device *netdev)
txq->q_depth = nic_dev->q_params.sq_depth;
txq->q_mask = nic_dev->q_params.sq_depth - 1;
txq->dev = &pdev->dev;
+
+ hinic3_txq_stats_init(txq);
}
return 0;
@@ -582,7 +603,6 @@ static netdev_tx_t hinic3_send_one_skb(struct sk_buff *skb,
err_drop_pkt:
dev_kfree_skb_any(skb);
-
err_out:
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h
index 7e1b872ba752..00194f2a1bcc 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h
@@ -100,6 +100,20 @@ struct hinic3_sq_wqe_combo {
u32 task_type;
};
+struct hinic3_txq_stats {
+ u64 packets;
+ u64 bytes;
+ u64 busy;
+ u64 dropped;
+ u64 skb_pad_err;
+ u64 frag_len_overflow;
+ u64 offload_cow_skb_err;
+ u64 map_frag_err;
+ u64 unknown_tunnel_pkt;
+ u64 frag_size_err;
+ struct u64_stats_sync syncp;
+};
+
struct hinic3_dma_info {
dma_addr_t dma;
u32 len;
@@ -123,6 +137,8 @@ struct hinic3_txq {
struct hinic3_tx_info *tx_info;
struct hinic3_io_queue *sq;
+
+ struct hinic3_txq_stats txq_stats;
} ____cacheline_aligned;
struct hinic3_dyna_txq_res {
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 00f75d87c73f..def7efa15447 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -957,9 +957,6 @@ u16 ice_get_avail_rxq_count(struct ice_pf *pf);
int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked);
void ice_update_vsi_stats(struct ice_vsi *vsi);
void ice_update_pf_stats(struct ice_pf *pf);
-void
-ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
- struct ice_q_stats stats, u64 *pkts, u64 *bytes);
int ice_up(struct ice_vsi *vsi);
int ice_down(struct ice_vsi *vsi);
int ice_down_up(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index eadb1e3d12b3..afbff8aa9ceb 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -1414,8 +1414,8 @@ static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
if (!vsi_stat)
return;
- memset(&vsi_stat->rx_ring_stats[q_idx]->rx_stats, 0,
- sizeof(vsi_stat->rx_ring_stats[q_idx]->rx_stats));
+ memset(&vsi_stat->rx_ring_stats[q_idx]->stats, 0,
+ sizeof(vsi_stat->rx_ring_stats[q_idx]->stats));
memset(&vsi_stat->tx_ring_stats[q_idx]->stats, 0,
sizeof(vsi_stat->tx_ring_stats[q_idx]->stats));
if (vsi->xdp_rings)
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 785bf5cc1b25..64e798b8f18f 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -204,42 +204,6 @@ bool ice_is_generic_mac(struct ice_hw *hw)
}
/**
- * ice_is_pf_c827 - check if pf contains c827 phy
- * @hw: pointer to the hw struct
- *
- * Return: true if the device has c827 phy.
- */
-static bool ice_is_pf_c827(struct ice_hw *hw)
-{
- struct ice_aqc_get_link_topo cmd = {};
- u8 node_part_number;
- u16 node_handle;
- int status;
-
- if (hw->mac_type != ICE_MAC_E810)
- return false;
-
- if (hw->device_id != ICE_DEV_ID_E810C_QSFP)
- return true;
-
- cmd.addr.topo_params.node_type_ctx =
- FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) |
- FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT);
- cmd.addr.topo_params.index = 0;
-
- status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number,
- &node_handle);
-
- if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827)
- return false;
-
- if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE)
- return true;
-
- return false;
-}
-
-/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
*
@@ -958,30 +922,31 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw)
}
/**
- * ice_wait_for_fw - wait for full FW readiness
+ * ice_wait_fw_load - wait for PHY firmware loading to complete
* @hw: pointer to the hardware structure
- * @timeout: milliseconds that can elapse before timing out
+ * @timeout: milliseconds that can elapse before timing out, 0 to bypass waiting
*
- * Return: 0 on success, -ETIMEDOUT on timeout.
+ * Return:
+ * * 0 on success
+ * * negative on timeout
*/
-static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
+static int ice_wait_fw_load(struct ice_hw *hw, u32 timeout)
{
- int fw_loading;
- u32 elapsed = 0;
+ int fw_loading_reg;
- while (elapsed <= timeout) {
- fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M;
+ if (!timeout)
+ return 0;
- /* firmware was not yet loaded, we have to wait more */
- if (fw_loading) {
- elapsed += 100;
- msleep(100);
- continue;
- }
+ fw_loading_reg = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M;
+ /* notify the user only once if PHY FW is still loading */
+ if (fw_loading_reg)
+ dev_info(ice_hw_to_dev(hw), "Link initialization is blocked by PHY FW initialization. Link initialization will continue after PHY FW initialization completes.\n");
+ else
return 0;
- }
- return -ETIMEDOUT;
+ return rd32_poll_timeout(hw, GL_MNG_FWSM, fw_loading_reg,
+ !(fw_loading_reg & GL_MNG_FWSM_FW_LOADING_M),
+ 10000, timeout * 1000);
}
static int __fwlog_send_cmd(void *priv, struct libie_aq_desc *desc, void *buf,
@@ -1171,12 +1136,10 @@ int ice_init_hw(struct ice_hw *hw)
* due to necessity of loading FW from an external source.
* This can take even half a minute.
*/
- if (ice_is_pf_c827(hw)) {
- status = ice_wait_for_fw(hw, 30000);
- if (status) {
- dev_err(ice_hw_to_dev(hw), "ice_wait_for_fw timed out");
- goto err_unroll_fltr_mgmt_struct;
- }
+ status = ice_wait_fw_load(hw, 30000);
+ if (status) {
+ dev_err(ice_hw_to_dev(hw), "ice_wait_fw_load timed out");
+ goto err_unroll_fltr_mgmt_struct;
}
hw->lane_num = ice_get_phy_lane_number(hw);
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c
index 53b54e395a2e..baf02512d041 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.c
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.c
@@ -5,6 +5,7 @@
#include "ice_lib.h"
#include "ice_trace.h"
#include <linux/dpll.h>
+#include <linux/property.h>
#define ICE_CGU_STATE_ACQ_ERR_THRESHOLD 50
#define ICE_DPLL_PIN_IDX_INVALID 0xff
@@ -529,6 +530,94 @@ ice_dpll_pin_disable(struct ice_hw *hw, struct ice_dpll_pin *pin,
}
/**
+ * ice_dpll_pin_store_state - updates the state of pin in SW bookkeeping
+ * @pin: pointer to a pin
+ * @parent: parent pin index
+ * @state: pin state (connected or disconnected)
+ */
+static void
+ice_dpll_pin_store_state(struct ice_dpll_pin *pin, int parent, bool state)
+{
+ pin->state[parent] = state ? DPLL_PIN_STATE_CONNECTED :
+ DPLL_PIN_STATE_DISCONNECTED;
+}
+
+/**
+ * ice_dpll_rclk_update_e825c - updates the state of rclk pin on e825c device
+ * @pf: private board struct
+ * @pin: pointer to a pin
+ *
+ * Update struct holding pin states info, states are separate for each parent
+ *
+ * Context: Called under pf->dplls.lock
+ * Return:
+ * * 0 - OK
+ * * negative - error
+ */
+static int ice_dpll_rclk_update_e825c(struct ice_pf *pf,
+ struct ice_dpll_pin *pin)
+{
+ u8 rclk_bits;
+ int err;
+ u32 reg;
+
+ if (pf->dplls.rclk.num_parents > ICE_SYNCE_CLK_NUM)
+ return -EINVAL;
+
+ err = ice_read_cgu_reg(&pf->hw, ICE_CGU_R10, &reg);
+ if (err)
+ return err;
+
+ rclk_bits = FIELD_GET(ICE_CGU_R10_SYNCE_S_REF_CLK, reg);
+ ice_dpll_pin_store_state(pin, ICE_SYNCE_CLK0, rclk_bits ==
+ (pf->ptp.port.port_num + ICE_CGU_BYPASS_MUX_OFFSET_E825C));
+
+ err = ice_read_cgu_reg(&pf->hw, ICE_CGU_R11, &reg);
+ if (err)
+ return err;
+
+ rclk_bits = FIELD_GET(ICE_CGU_R11_SYNCE_S_BYP_CLK, reg);
+ ice_dpll_pin_store_state(pin, ICE_SYNCE_CLK1, rclk_bits ==
+ (pf->ptp.port.port_num + ICE_CGU_BYPASS_MUX_OFFSET_E825C));
+
+ return 0;
+}
+
+/**
+ * ice_dpll_rclk_update - updates the state of rclk pin on a device
+ * @pf: private board struct
+ * @pin: pointer to a pin
+ * @port_num: port number
+ *
+ * Update struct holding pin states info, states are separate for each parent
+ *
+ * Context: Called under pf->dplls.lock
+ * Return:
+ * * 0 - OK
+ * * negative - error
+ */
+static int ice_dpll_rclk_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
+ u8 port_num)
+{
+ int ret;
+
+ for (u8 parent = 0; parent < pf->dplls.rclk.num_parents; parent++) {
+ u8 p = parent;
+
+ ret = ice_aq_get_phy_rec_clk_out(&pf->hw, &p, &port_num,
+ &pin->flags[parent], NULL);
+ if (ret)
+ return ret;
+
+ ice_dpll_pin_store_state(pin, parent,
+ ICE_AQC_GET_PHY_REC_CLK_OUT_OUT_EN &
+ pin->flags[parent]);
+ }
+
+ return 0;
+}
+
+/**
* ice_dpll_sw_pins_update - update status of all SW pins
* @pf: private board struct
*
@@ -668,22 +757,14 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
}
break;
case ICE_DPLL_PIN_TYPE_RCLK_INPUT:
- for (parent = 0; parent < pf->dplls.rclk.num_parents;
- parent++) {
- u8 p = parent;
-
- ret = ice_aq_get_phy_rec_clk_out(&pf->hw, &p,
- &port_num,
- &pin->flags[parent],
- NULL);
+ if (pf->hw.mac_type == ICE_MAC_GENERIC_3K_E825) {
+ ret = ice_dpll_rclk_update_e825c(pf, pin);
+ if (ret)
+ goto err;
+ } else {
+ ret = ice_dpll_rclk_update(pf, pin, port_num);
if (ret)
goto err;
- if (ICE_AQC_GET_PHY_REC_CLK_OUT_OUT_EN &
- pin->flags[parent])
- pin->state[parent] = DPLL_PIN_STATE_CONNECTED;
- else
- pin->state[parent] =
- DPLL_PIN_STATE_DISCONNECTED;
}
break;
case ICE_DPLL_PIN_TYPE_SOFTWARE:
@@ -1843,6 +1924,40 @@ ice_dpll_phase_offset_get(const struct dpll_pin *pin, void *pin_priv,
}
/**
+ * ice_dpll_synce_update_e825c - setting PHY recovered clock pins on e825c
+ * @hw: Pointer to the HW struct
+ * @ena: true if enable, false in disable
+ * @port_num: port number
+ * @output: output pin, we have two in E825C
+ *
+ * DPLL subsystem callback. Set proper signals to recover clock from port.
+ *
+ * Context: Called under pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int ice_dpll_synce_update_e825c(struct ice_hw *hw, bool ena,
+ u32 port_num, enum ice_synce_clk output)
+{
+ int err;
+
+ /* configure the mux to deliver proper signal to DPLL from the MUX */
+ err = ice_tspll_cfg_bypass_mux_e825c(hw, ena, port_num, output);
+ if (err)
+ return err;
+
+ err = ice_tspll_cfg_synce_ethdiv_e825c(hw, output);
+ if (err)
+ return err;
+
+ dev_dbg(ice_hw_to_dev(hw), "CLK_SYNCE%u recovered clock: pin %s\n",
+ output, str_enabled_disabled(ena));
+
+ return 0;
+}
+
+/**
* ice_dpll_output_esync_set - callback for setting embedded sync
* @pin: pointer to a pin
* @pin_priv: private data pointer passed on pin registration
@@ -2263,6 +2378,28 @@ ice_dpll_sw_input_ref_sync_get(const struct dpll_pin *pin, void *pin_priv,
state, extack);
}
+static int
+ice_dpll_pin_get_parent_num(struct ice_dpll_pin *pin,
+ const struct dpll_pin *parent)
+{
+ int i;
+
+ for (i = 0; i < pin->num_parents; i++)
+ if (pin->pf->dplls.inputs[pin->parent_idx[i]].pin == parent)
+ return i;
+
+ return -ENOENT;
+}
+
+static int
+ice_dpll_pin_get_parent_idx(struct ice_dpll_pin *pin,
+ const struct dpll_pin *parent)
+{
+ int num = ice_dpll_pin_get_parent_num(pin, parent);
+
+ return num < 0 ? num : pin->parent_idx[num];
+}
+
/**
* ice_dpll_rclk_state_on_pin_set - set a state on rclk pin
* @pin: pointer to a pin
@@ -2286,35 +2423,45 @@ ice_dpll_rclk_state_on_pin_set(const struct dpll_pin *pin, void *pin_priv,
enum dpll_pin_state state,
struct netlink_ext_ack *extack)
{
- struct ice_dpll_pin *p = pin_priv, *parent = parent_pin_priv;
bool enable = state == DPLL_PIN_STATE_CONNECTED;
+ struct ice_dpll_pin *p = pin_priv;
struct ice_pf *pf = p->pf;
+ struct ice_hw *hw;
int ret = -EINVAL;
- u32 hw_idx;
+ int hw_idx;
+
+ hw = &pf->hw;
if (ice_dpll_is_reset(pf, extack))
return -EBUSY;
mutex_lock(&pf->dplls.lock);
- hw_idx = parent->idx - pf->dplls.base_rclk_idx;
- if (hw_idx >= pf->dplls.num_inputs)
+ hw_idx = ice_dpll_pin_get_parent_idx(p, parent_pin);
+ if (hw_idx < 0)
goto unlock;
+ hw_idx -= pf->dplls.base_rclk_idx;
if ((enable && p->state[hw_idx] == DPLL_PIN_STATE_CONNECTED) ||
(!enable && p->state[hw_idx] == DPLL_PIN_STATE_DISCONNECTED)) {
NL_SET_ERR_MSG_FMT(extack,
"pin:%u state:%u on parent:%u already set",
- p->idx, state, parent->idx);
+ p->idx, state,
+ ice_dpll_pin_get_parent_num(p, parent_pin));
goto unlock;
}
- ret = ice_aq_set_phy_rec_clk_out(&pf->hw, hw_idx, enable,
- &p->freq);
+
+ ret = hw->mac_type == ICE_MAC_GENERIC_3K_E825 ?
+ ice_dpll_synce_update_e825c(hw, enable,
+ pf->ptp.port.port_num,
+ (enum ice_synce_clk)hw_idx) :
+ ice_aq_set_phy_rec_clk_out(hw, hw_idx, enable, &p->freq);
if (ret)
NL_SET_ERR_MSG_FMT(extack,
"err:%d %s failed to set pin state:%u for pin:%u on parent:%u",
ret,
- libie_aq_str(pf->hw.adminq.sq_last_status),
- state, p->idx, parent->idx);
+ libie_aq_str(hw->adminq.sq_last_status),
+ state, p->idx,
+ ice_dpll_pin_get_parent_num(p, parent_pin));
unlock:
mutex_unlock(&pf->dplls.lock);
@@ -2344,17 +2491,17 @@ ice_dpll_rclk_state_on_pin_get(const struct dpll_pin *pin, void *pin_priv,
enum dpll_pin_state *state,
struct netlink_ext_ack *extack)
{
- struct ice_dpll_pin *p = pin_priv, *parent = parent_pin_priv;
+ struct ice_dpll_pin *p = pin_priv;
struct ice_pf *pf = p->pf;
int ret = -EINVAL;
- u32 hw_idx;
+ int hw_idx;
if (ice_dpll_is_reset(pf, extack))
return -EBUSY;
mutex_lock(&pf->dplls.lock);
- hw_idx = parent->idx - pf->dplls.base_rclk_idx;
- if (hw_idx >= pf->dplls.num_inputs)
+ hw_idx = ice_dpll_pin_get_parent_idx(p, parent_pin);
+ if (hw_idx < 0)
goto unlock;
ret = ice_dpll_pin_state_update(pf, p, ICE_DPLL_PIN_TYPE_RCLK_INPUT,
@@ -2814,7 +2961,8 @@ static void ice_dpll_release_pins(struct ice_dpll_pin *pins, int count)
int i;
for (i = 0; i < count; i++)
- dpll_pin_put(pins[i].pin);
+ if (!IS_ERR_OR_NULL(pins[i].pin))
+ dpll_pin_put(pins[i].pin, &pins[i].tracker);
}
/**
@@ -2836,11 +2984,15 @@ static int
ice_dpll_get_pins(struct ice_pf *pf, struct ice_dpll_pin *pins,
int start_idx, int count, u64 clock_id)
{
+ u32 pin_index;
int i, ret;
for (i = 0; i < count; i++) {
- pins[i].pin = dpll_pin_get(clock_id, i + start_idx, THIS_MODULE,
- &pins[i].prop);
+ pin_index = start_idx;
+ if (start_idx != DPLL_PIN_IDX_UNSPEC)
+ pin_index += i;
+ pins[i].pin = dpll_pin_get(clock_id, pin_index, THIS_MODULE,
+ &pins[i].prop, &pins[i].tracker);
if (IS_ERR(pins[i].pin)) {
ret = PTR_ERR(pins[i].pin);
goto release_pins;
@@ -2851,7 +3003,7 @@ ice_dpll_get_pins(struct ice_pf *pf, struct ice_dpll_pin *pins,
release_pins:
while (--i >= 0)
- dpll_pin_put(pins[i].pin);
+ dpll_pin_put(pins[i].pin, &pins[i].tracker);
return ret;
}
@@ -2944,6 +3096,7 @@ unregister_pins:
/**
* ice_dpll_deinit_direct_pins - deinitialize direct pins
+ * @pf: board private structure
* @cgu: if cgu is present and controlled by this NIC
* @pins: pointer to pins array
* @count: number of pins
@@ -2955,7 +3108,8 @@ unregister_pins:
* Release pins resources to the dpll subsystem.
*/
static void
-ice_dpll_deinit_direct_pins(bool cgu, struct ice_dpll_pin *pins, int count,
+ice_dpll_deinit_direct_pins(struct ice_pf *pf, bool cgu,
+ struct ice_dpll_pin *pins, int count,
const struct dpll_pin_ops *ops,
struct dpll_device *first,
struct dpll_device *second)
@@ -3024,77 +3178,230 @@ static void ice_dpll_deinit_rclk_pin(struct ice_pf *pf)
{
struct ice_dpll_pin *rclk = &pf->dplls.rclk;
struct ice_vsi *vsi = ice_get_main_vsi(pf);
- struct dpll_pin *parent;
+ struct ice_dpll_pin *parent;
int i;
for (i = 0; i < rclk->num_parents; i++) {
- parent = pf->dplls.inputs[rclk->parent_idx[i]].pin;
- if (!parent)
+ parent = &pf->dplls.inputs[rclk->parent_idx[i]];
+ if (IS_ERR_OR_NULL(parent->pin))
continue;
- dpll_pin_on_pin_unregister(parent, rclk->pin,
+ dpll_pin_on_pin_unregister(parent->pin, rclk->pin,
&ice_dpll_rclk_ops, rclk);
}
if (WARN_ON_ONCE(!vsi || !vsi->netdev))
return;
dpll_netdev_pin_clear(vsi->netdev);
- dpll_pin_put(rclk->pin);
+ dpll_pin_put(rclk->pin, &rclk->tracker);
+}
+
+static bool ice_dpll_is_fwnode_pin(struct ice_dpll_pin *pin)
+{
+ return !IS_ERR_OR_NULL(pin->fwnode);
+}
+
+static void ice_dpll_pin_notify_work(struct work_struct *work)
+{
+ struct ice_dpll_pin_work *w = container_of(work,
+ struct ice_dpll_pin_work,
+ work);
+ struct ice_dpll_pin *pin, *parent = w->pin;
+ struct ice_pf *pf = parent->pf;
+ int ret;
+
+ wait_for_completion(&pf->dplls.dpll_init);
+ if (!test_bit(ICE_FLAG_DPLL, pf->flags))
+ goto out; /* DPLL initialization failed */
+
+ switch (w->action) {
+ case DPLL_PIN_CREATED:
+ if (!IS_ERR_OR_NULL(parent->pin)) {
+ /* We have already our pin registered */
+ goto out;
+ }
+
+ /* Grab reference on fwnode pin */
+ parent->pin = fwnode_dpll_pin_find(parent->fwnode,
+ &parent->tracker);
+ if (IS_ERR_OR_NULL(parent->pin)) {
+ dev_err(ice_pf_to_dev(pf),
+ "Cannot get fwnode pin reference\n");
+ goto out;
+ }
+
+ /* Register rclk pin */
+ pin = &pf->dplls.rclk;
+ ret = dpll_pin_on_pin_register(parent->pin, pin->pin,
+ &ice_dpll_rclk_ops, pin);
+ if (ret) {
+ dev_err(ice_pf_to_dev(pf),
+ "Failed to register pin: %pe\n", ERR_PTR(ret));
+ dpll_pin_put(parent->pin, &parent->tracker);
+ parent->pin = NULL;
+ goto out;
+ }
+ break;
+ case DPLL_PIN_DELETED:
+ if (IS_ERR_OR_NULL(parent->pin)) {
+ /* We have already our pin unregistered */
+ goto out;
+ }
+
+ /* Unregister rclk pin */
+ pin = &pf->dplls.rclk;
+ dpll_pin_on_pin_unregister(parent->pin, pin->pin,
+ &ice_dpll_rclk_ops, pin);
+
+ /* Drop fwnode pin reference */
+ dpll_pin_put(parent->pin, &parent->tracker);
+ parent->pin = NULL;
+ break;
+ default:
+ break;
+ }
+out:
+ kfree(w);
+}
+
+static int ice_dpll_pin_notify(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct ice_dpll_pin *pin = container_of(nb, struct ice_dpll_pin, nb);
+ struct dpll_pin_notifier_info *info = data;
+ struct ice_dpll_pin_work *work;
+
+ if (action != DPLL_PIN_CREATED && action != DPLL_PIN_DELETED)
+ return NOTIFY_DONE;
+
+ /* Check if the reported pin is this one */
+ if (pin->fwnode != info->fwnode)
+ return NOTIFY_DONE; /* Not this pin */
+
+ work = kzalloc(sizeof(*work), GFP_KERNEL);
+ if (!work)
+ return NOTIFY_DONE;
+
+ INIT_WORK(&work->work, ice_dpll_pin_notify_work);
+ work->action = action;
+ work->pin = pin;
+
+ queue_work(pin->pf->dplls.wq, &work->work);
+
+ return NOTIFY_OK;
}
/**
- * ice_dpll_init_rclk_pins - initialize recovered clock pin
+ * ice_dpll_init_pin_common - initialize pin
* @pf: board private structure
* @pin: pin to register
* @start_idx: on which index shall allocation start in dpll subsystem
* @ops: callback ops registered with the pins
*
- * Allocate resource for recovered clock pin in dpll subsystem. Register the
- * pin with the parents it has in the info. Register pin with the pf's main vsi
- * netdev.
+ * Allocate resource for given pin in dpll subsystem. Register the pin with
+ * the parents it has in the info.
*
* Return:
* * 0 - success
* * negative - registration failure reason
*/
static int
-ice_dpll_init_rclk_pins(struct ice_pf *pf, struct ice_dpll_pin *pin,
- int start_idx, const struct dpll_pin_ops *ops)
+ice_dpll_init_pin_common(struct ice_pf *pf, struct ice_dpll_pin *pin,
+ int start_idx, const struct dpll_pin_ops *ops)
{
- struct ice_vsi *vsi = ice_get_main_vsi(pf);
- struct dpll_pin *parent;
+ struct ice_dpll_pin *parent;
int ret, i;
- if (WARN_ON((!vsi || !vsi->netdev)))
- return -EINVAL;
- ret = ice_dpll_get_pins(pf, pin, start_idx, ICE_DPLL_RCLK_NUM_PER_PF,
- pf->dplls.clock_id);
+ ret = ice_dpll_get_pins(pf, pin, start_idx, 1, pf->dplls.clock_id);
if (ret)
return ret;
- for (i = 0; i < pf->dplls.rclk.num_parents; i++) {
- parent = pf->dplls.inputs[pf->dplls.rclk.parent_idx[i]].pin;
- if (!parent) {
- ret = -ENODEV;
- goto unregister_pins;
+
+ for (i = 0; i < pin->num_parents; i++) {
+ parent = &pf->dplls.inputs[pin->parent_idx[i]];
+ if (IS_ERR_OR_NULL(parent->pin)) {
+ if (!ice_dpll_is_fwnode_pin(parent)) {
+ ret = -ENODEV;
+ goto unregister_pins;
+ }
+ parent->pin = fwnode_dpll_pin_find(parent->fwnode,
+ &parent->tracker);
+ if (IS_ERR_OR_NULL(parent->pin)) {
+ dev_info(ice_pf_to_dev(pf),
+ "Mux pin not registered yet\n");
+ continue;
+ }
}
- ret = dpll_pin_on_pin_register(parent, pf->dplls.rclk.pin,
- ops, &pf->dplls.rclk);
+ ret = dpll_pin_on_pin_register(parent->pin, pin->pin, ops, pin);
if (ret)
goto unregister_pins;
}
- dpll_netdev_pin_set(vsi->netdev, pf->dplls.rclk.pin);
return 0;
unregister_pins:
while (i) {
- parent = pf->dplls.inputs[pf->dplls.rclk.parent_idx[--i]].pin;
- dpll_pin_on_pin_unregister(parent, pf->dplls.rclk.pin,
- &ice_dpll_rclk_ops, &pf->dplls.rclk);
+ parent = &pf->dplls.inputs[pin->parent_idx[--i]];
+ if (IS_ERR_OR_NULL(parent->pin))
+ continue;
+ dpll_pin_on_pin_unregister(parent->pin, pin->pin, ops, pin);
}
- ice_dpll_release_pins(pin, ICE_DPLL_RCLK_NUM_PER_PF);
+ ice_dpll_release_pins(pin, 1);
+
return ret;
}
/**
+ * ice_dpll_init_rclk_pin - initialize recovered clock pin
+ * @pf: board private structure
+ * @start_idx: on which index shall allocation start in dpll subsystem
+ * @ops: callback ops registered with the pins
+ *
+ * Allocate resource for recovered clock pin in dpll subsystem. Register the
+ * pin with the parents it has in the info.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - registration failure reason
+ */
+static int
+ice_dpll_init_rclk_pin(struct ice_pf *pf, int start_idx,
+ const struct dpll_pin_ops *ops)
+{
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+ int ret;
+
+ ret = ice_dpll_init_pin_common(pf, &pf->dplls.rclk, start_idx, ops);
+ if (ret)
+ return ret;
+
+ dpll_netdev_pin_set(vsi->netdev, pf->dplls.rclk.pin);
+
+ return 0;
+}
+
+static void
+ice_dpll_deinit_fwnode_pin(struct ice_dpll_pin *pin)
+{
+ unregister_dpll_notifier(&pin->nb);
+ flush_workqueue(pin->pf->dplls.wq);
+ if (!IS_ERR_OR_NULL(pin->pin)) {
+ dpll_pin_put(pin->pin, &pin->tracker);
+ pin->pin = NULL;
+ }
+ fwnode_handle_put(pin->fwnode);
+ pin->fwnode = NULL;
+}
+
+static void
+ice_dpll_deinit_fwnode_pins(struct ice_pf *pf, struct ice_dpll_pin *pins,
+ int start_idx)
+{
+ int i;
+
+ for (i = 0; i < pf->dplls.rclk.num_parents; i++)
+ ice_dpll_deinit_fwnode_pin(&pins[start_idx + i]);
+ destroy_workqueue(pf->dplls.wq);
+}
+
+/**
* ice_dpll_deinit_pins - deinitialize direct pins
* @pf: board private structure
* @cgu: if cgu is controlled by this pf
@@ -3113,6 +3420,8 @@ static void ice_dpll_deinit_pins(struct ice_pf *pf, bool cgu)
struct ice_dpll *dp = &d->pps;
ice_dpll_deinit_rclk_pin(pf);
+ if (pf->hw.mac_type == ICE_MAC_GENERIC_3K_E825)
+ ice_dpll_deinit_fwnode_pins(pf, pf->dplls.inputs, 0);
if (cgu) {
ice_dpll_unregister_pins(dp->dpll, inputs, &ice_dpll_input_ops,
num_inputs);
@@ -3127,12 +3436,12 @@ static void ice_dpll_deinit_pins(struct ice_pf *pf, bool cgu)
&ice_dpll_output_ops, num_outputs);
ice_dpll_release_pins(outputs, num_outputs);
if (!pf->dplls.generic) {
- ice_dpll_deinit_direct_pins(cgu, pf->dplls.ufl,
+ ice_dpll_deinit_direct_pins(pf, cgu, pf->dplls.ufl,
ICE_DPLL_PIN_SW_NUM,
&ice_dpll_pin_ufl_ops,
pf->dplls.pps.dpll,
pf->dplls.eec.dpll);
- ice_dpll_deinit_direct_pins(cgu, pf->dplls.sma,
+ ice_dpll_deinit_direct_pins(pf, cgu, pf->dplls.sma,
ICE_DPLL_PIN_SW_NUM,
&ice_dpll_pin_sma_ops,
pf->dplls.pps.dpll,
@@ -3141,6 +3450,141 @@ static void ice_dpll_deinit_pins(struct ice_pf *pf, bool cgu)
}
}
+static struct fwnode_handle *
+ice_dpll_pin_node_get(struct ice_pf *pf, const char *name)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(ice_pf_to_dev(pf));
+ int index;
+
+ index = fwnode_property_match_string(fwnode, "dpll-pin-names", name);
+ if (index < 0)
+ return ERR_PTR(-ENOENT);
+
+ return fwnode_find_reference(fwnode, "dpll-pins", index);
+}
+
+static int
+ice_dpll_init_fwnode_pin(struct ice_dpll_pin *pin, const char *name)
+{
+ struct ice_pf *pf = pin->pf;
+ int ret;
+
+ pin->fwnode = ice_dpll_pin_node_get(pf, name);
+ if (IS_ERR(pin->fwnode)) {
+ dev_err(ice_pf_to_dev(pf),
+ "Failed to find %s firmware node: %pe\n", name,
+ pin->fwnode);
+ pin->fwnode = NULL;
+ return -ENODEV;
+ }
+
+ dev_dbg(ice_pf_to_dev(pf), "Found fwnode node for %s\n", name);
+
+ pin->pin = fwnode_dpll_pin_find(pin->fwnode, &pin->tracker);
+ if (IS_ERR_OR_NULL(pin->pin)) {
+ dev_info(ice_pf_to_dev(pf),
+ "DPLL pin for %pfwp not registered yet\n",
+ pin->fwnode);
+ pin->pin = NULL;
+ }
+
+ pin->nb.notifier_call = ice_dpll_pin_notify;
+ ret = register_dpll_notifier(&pin->nb);
+ if (ret) {
+ dev_err(ice_pf_to_dev(pf),
+ "Failed to subscribe for DPLL notifications\n");
+
+ if (!IS_ERR_OR_NULL(pin->pin)) {
+ dpll_pin_put(pin->pin, &pin->tracker);
+ pin->pin = NULL;
+ }
+ fwnode_handle_put(pin->fwnode);
+ pin->fwnode = NULL;
+
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * ice_dpll_init_fwnode_pins - initialize pins from device tree
+ * @pf: board private structure
+ * @pins: pointer to pins array
+ * @start_idx: starting index for pins
+ * @count: number of pins to initialize
+ *
+ * Initialize input pins for E825 RCLK support. The parent pins (rclk0, rclk1)
+ * are expected to be defined by the system firmware (ACPI). This function
+ * allocates them in the dpll subsystem and stores their indices for later
+ * registration with the rclk pin.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - initialization failure reason
+ */
+static int
+ice_dpll_init_fwnode_pins(struct ice_pf *pf, struct ice_dpll_pin *pins,
+ int start_idx)
+{
+ char pin_name[8];
+ int i, ret;
+
+ pf->dplls.wq = create_singlethread_workqueue("ice_dpll_wq");
+ if (!pf->dplls.wq)
+ return -ENOMEM;
+
+ for (i = 0; i < pf->dplls.rclk.num_parents; i++) {
+ pins[start_idx + i].pf = pf;
+ snprintf(pin_name, sizeof(pin_name), "rclk%u", i);
+ ret = ice_dpll_init_fwnode_pin(&pins[start_idx + i], pin_name);
+ if (ret)
+ goto error;
+ }
+
+ return 0;
+error:
+ while (i--)
+ ice_dpll_deinit_fwnode_pin(&pins[start_idx + i]);
+
+ destroy_workqueue(pf->dplls.wq);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_init_pins_e825 - init pins and register pins with a dplls
+ * @pf: board private structure
+ * @cgu: if cgu is present and controlled by this NIC
+ *
+ * Initialize directly connected pf's pins within pf's dplls in a Linux dpll
+ * subsystem.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - initialization failure reason
+ */
+static int ice_dpll_init_pins_e825(struct ice_pf *pf)
+{
+ int ret;
+
+ ret = ice_dpll_init_fwnode_pins(pf, pf->dplls.inputs, 0);
+ if (ret)
+ return ret;
+
+ ret = ice_dpll_init_rclk_pin(pf, DPLL_PIN_IDX_UNSPEC,
+ &ice_dpll_rclk_ops);
+ if (ret) {
+ /* Inform DPLL notifier works that DPLL init was finished
+ * unsuccessfully (ICE_DPLL_FLAG not set).
+ */
+ complete_all(&pf->dplls.dpll_init);
+ ice_dpll_deinit_fwnode_pins(pf, pf->dplls.inputs, 0);
+ }
+
+ return ret;
+}
+
/**
* ice_dpll_init_pins - init pins and register pins with a dplls
* @pf: board private structure
@@ -3155,21 +3599,24 @@ static void ice_dpll_deinit_pins(struct ice_pf *pf, bool cgu)
*/
static int ice_dpll_init_pins(struct ice_pf *pf, bool cgu)
{
+ const struct dpll_pin_ops *output_ops;
+ const struct dpll_pin_ops *input_ops;
int ret, count;
+ input_ops = &ice_dpll_input_ops;
+ output_ops = &ice_dpll_output_ops;
+
ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.inputs, 0,
- pf->dplls.num_inputs,
- &ice_dpll_input_ops,
- pf->dplls.eec.dpll, pf->dplls.pps.dpll);
+ pf->dplls.num_inputs, input_ops,
+ pf->dplls.eec.dpll,
+ pf->dplls.pps.dpll);
if (ret)
return ret;
count = pf->dplls.num_inputs;
if (cgu) {
ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.outputs,
- count,
- pf->dplls.num_outputs,
- &ice_dpll_output_ops,
- pf->dplls.eec.dpll,
+ count, pf->dplls.num_outputs,
+ output_ops, pf->dplls.eec.dpll,
pf->dplls.pps.dpll);
if (ret)
goto deinit_inputs;
@@ -3205,30 +3652,30 @@ static int ice_dpll_init_pins(struct ice_pf *pf, bool cgu)
} else {
count += pf->dplls.num_outputs + 2 * ICE_DPLL_PIN_SW_NUM;
}
- ret = ice_dpll_init_rclk_pins(pf, &pf->dplls.rclk, count + pf->hw.pf_id,
- &ice_dpll_rclk_ops);
+
+ ret = ice_dpll_init_rclk_pin(pf, count + pf->ptp.port.port_num,
+ &ice_dpll_rclk_ops);
if (ret)
goto deinit_ufl;
return 0;
deinit_ufl:
- ice_dpll_deinit_direct_pins(cgu, pf->dplls.ufl,
- ICE_DPLL_PIN_SW_NUM,
- &ice_dpll_pin_ufl_ops,
- pf->dplls.pps.dpll, pf->dplls.eec.dpll);
+ ice_dpll_deinit_direct_pins(pf, cgu, pf->dplls.ufl, ICE_DPLL_PIN_SW_NUM,
+ &ice_dpll_pin_ufl_ops, pf->dplls.pps.dpll,
+ pf->dplls.eec.dpll);
deinit_sma:
- ice_dpll_deinit_direct_pins(cgu, pf->dplls.sma,
- ICE_DPLL_PIN_SW_NUM,
- &ice_dpll_pin_sma_ops,
- pf->dplls.pps.dpll, pf->dplls.eec.dpll);
+ ice_dpll_deinit_direct_pins(pf, cgu, pf->dplls.sma, ICE_DPLL_PIN_SW_NUM,
+ &ice_dpll_pin_sma_ops, pf->dplls.pps.dpll,
+ pf->dplls.eec.dpll);
deinit_outputs:
- ice_dpll_deinit_direct_pins(cgu, pf->dplls.outputs,
+ ice_dpll_deinit_direct_pins(pf, cgu, pf->dplls.outputs,
pf->dplls.num_outputs,
- &ice_dpll_output_ops, pf->dplls.pps.dpll,
+ output_ops, pf->dplls.pps.dpll,
pf->dplls.eec.dpll);
deinit_inputs:
- ice_dpll_deinit_direct_pins(cgu, pf->dplls.inputs, pf->dplls.num_inputs,
- &ice_dpll_input_ops, pf->dplls.pps.dpll,
+ ice_dpll_deinit_direct_pins(pf, cgu, pf->dplls.inputs,
+ pf->dplls.num_inputs,
+ input_ops, pf->dplls.pps.dpll,
pf->dplls.eec.dpll);
return ret;
}
@@ -3239,15 +3686,15 @@ deinit_inputs:
* @d: pointer to ice_dpll
* @cgu: if cgu is present and controlled by this NIC
*
- * If cgu is owned unregister the dpll from dpll subsystem.
- * Release resources of dpll device from dpll subsystem.
+ * If cgu is owned, unregister the DPLL from DPLL subsystem.
+ * Release resources of DPLL device from DPLL subsystem.
*/
static void
ice_dpll_deinit_dpll(struct ice_pf *pf, struct ice_dpll *d, bool cgu)
{
if (cgu)
dpll_device_unregister(d->dpll, d->ops, d);
- dpll_device_put(d->dpll);
+ dpll_device_put(d->dpll, &d->tracker);
}
/**
@@ -3257,8 +3704,8 @@ ice_dpll_deinit_dpll(struct ice_pf *pf, struct ice_dpll *d, bool cgu)
* @cgu: if cgu is present and controlled by this NIC
* @type: type of dpll being initialized
*
- * Allocate dpll instance for this board in dpll subsystem, if cgu is controlled
- * by this NIC, register dpll with the callback ops.
+ * Allocate DPLL instance for this board in dpll subsystem, if cgu is controlled
+ * by this NIC, register DPLL with the callback ops.
*
* Return:
* * 0 - success
@@ -3271,7 +3718,8 @@ ice_dpll_init_dpll(struct ice_pf *pf, struct ice_dpll *d, bool cgu,
u64 clock_id = pf->dplls.clock_id;
int ret;
- d->dpll = dpll_device_get(clock_id, d->dpll_idx, THIS_MODULE);
+ d->dpll = dpll_device_get(clock_id, d->dpll_idx, THIS_MODULE,
+ &d->tracker);
if (IS_ERR(d->dpll)) {
ret = PTR_ERR(d->dpll);
dev_err(ice_pf_to_dev(pf),
@@ -3287,7 +3735,8 @@ ice_dpll_init_dpll(struct ice_pf *pf, struct ice_dpll *d, bool cgu,
ice_dpll_update_state(pf, d, true);
ret = dpll_device_register(d->dpll, type, ops, d);
if (ret) {
- dpll_device_put(d->dpll);
+ dpll_device_put(d->dpll, &d->tracker);
+ d->dpll = NULL;
return ret;
}
d->ops = ops;
@@ -3506,6 +3955,26 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
}
/**
+ * ice_dpll_init_info_pin_on_pin_e825c - initializes rclk pin information
+ * @pf: board private structure
+ *
+ * Init information for rclk pin, cache them in pf->dplls.rclk.
+ *
+ * Return:
+ * * 0 - success
+ */
+static int ice_dpll_init_info_pin_on_pin_e825c(struct ice_pf *pf)
+{
+ struct ice_dpll_pin *rclk_pin = &pf->dplls.rclk;
+
+ rclk_pin->prop.type = DPLL_PIN_TYPE_SYNCE_ETH_PORT;
+ rclk_pin->prop.capabilities |= DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
+ rclk_pin->pf = pf;
+
+ return 0;
+}
+
+/**
* ice_dpll_init_info_rclk_pin - initializes rclk pin information
* @pf: board private structure
*
@@ -3631,7 +4100,10 @@ ice_dpll_init_pins_info(struct ice_pf *pf, enum ice_dpll_pin_type pin_type)
case ICE_DPLL_PIN_TYPE_OUTPUT:
return ice_dpll_init_info_direct_pins(pf, pin_type);
case ICE_DPLL_PIN_TYPE_RCLK_INPUT:
- return ice_dpll_init_info_rclk_pin(pf);
+ if (pf->hw.mac_type == ICE_MAC_GENERIC_3K_E825)
+ return ice_dpll_init_info_pin_on_pin_e825c(pf);
+ else
+ return ice_dpll_init_info_rclk_pin(pf);
case ICE_DPLL_PIN_TYPE_SOFTWARE:
return ice_dpll_init_info_sw_pins(pf);
default:
@@ -3654,6 +4126,50 @@ static void ice_dpll_deinit_info(struct ice_pf *pf)
}
/**
+ * ice_dpll_init_info_e825c - prepare pf's dpll information structure for e825c
+ * device
+ * @pf: board private structure
+ *
+ * Acquire (from HW) and set basic DPLL information (on pf->dplls struct).
+ *
+ * Return:
+ * * 0 - success
+ * * negative - init failure reason
+ */
+static int ice_dpll_init_info_e825c(struct ice_pf *pf)
+{
+ struct ice_dplls *d = &pf->dplls;
+ int ret = 0;
+ int i;
+
+ d->clock_id = ice_generate_clock_id(pf);
+ d->num_inputs = ICE_SYNCE_CLK_NUM;
+
+ d->inputs = kcalloc(d->num_inputs, sizeof(*d->inputs), GFP_KERNEL);
+ if (!d->inputs)
+ return -ENOMEM;
+
+ ret = ice_get_cgu_rclk_pin_info(&pf->hw, &d->base_rclk_idx,
+ &pf->dplls.rclk.num_parents);
+ if (ret)
+ goto deinit_info;
+
+ for (i = 0; i < pf->dplls.rclk.num_parents; i++)
+ pf->dplls.rclk.parent_idx[i] = d->base_rclk_idx + i;
+
+ ret = ice_dpll_init_pins_info(pf, ICE_DPLL_PIN_TYPE_RCLK_INPUT);
+ if (ret)
+ goto deinit_info;
+ dev_dbg(ice_pf_to_dev(pf),
+ "%s - success, inputs: %u, outputs: %u, rclk-parents: %u\n",
+ __func__, d->num_inputs, d->num_outputs, d->rclk.num_parents);
+ return 0;
+deinit_info:
+ ice_dpll_deinit_info(pf);
+ return ret;
+}
+
+/**
* ice_dpll_init_info - prepare pf's dpll information structure
* @pf: board private structure
* @cgu: if cgu is present and controlled by this NIC
@@ -3772,14 +4288,16 @@ void ice_dpll_deinit(struct ice_pf *pf)
ice_dpll_deinit_worker(pf);
ice_dpll_deinit_pins(pf, cgu);
- ice_dpll_deinit_dpll(pf, &pf->dplls.pps, cgu);
- ice_dpll_deinit_dpll(pf, &pf->dplls.eec, cgu);
+ if (!IS_ERR_OR_NULL(pf->dplls.pps.dpll))
+ ice_dpll_deinit_dpll(pf, &pf->dplls.pps, cgu);
+ if (!IS_ERR_OR_NULL(pf->dplls.eec.dpll))
+ ice_dpll_deinit_dpll(pf, &pf->dplls.eec, cgu);
ice_dpll_deinit_info(pf);
mutex_destroy(&pf->dplls.lock);
}
/**
- * ice_dpll_init - initialize support for dpll subsystem
+ * ice_dpll_init_e825 - initialize support for dpll subsystem
* @pf: board private structure
*
* Set up the device dplls, register them and pins connected within Linux dpll
@@ -3788,7 +4306,43 @@ void ice_dpll_deinit(struct ice_pf *pf)
*
* Context: Initializes pf->dplls.lock mutex.
*/
-void ice_dpll_init(struct ice_pf *pf)
+static void ice_dpll_init_e825(struct ice_pf *pf)
+{
+ struct ice_dplls *d = &pf->dplls;
+ int err;
+
+ mutex_init(&d->lock);
+ init_completion(&d->dpll_init);
+
+ err = ice_dpll_init_info_e825c(pf);
+ if (err)
+ goto err_exit;
+ err = ice_dpll_init_pins_e825(pf);
+ if (err)
+ goto deinit_info;
+ set_bit(ICE_FLAG_DPLL, pf->flags);
+ complete_all(&d->dpll_init);
+
+ return;
+
+deinit_info:
+ ice_dpll_deinit_info(pf);
+err_exit:
+ mutex_destroy(&d->lock);
+ dev_warn(ice_pf_to_dev(pf), "DPLLs init failure err:%d\n", err);
+}
+
+/**
+ * ice_dpll_init_e810 - initialize support for dpll subsystem
+ * @pf: board private structure
+ *
+ * Set up the device dplls, register them and pins connected within Linux dpll
+ * subsystem. Allow userspace to obtain state of DPLL and handling of DPLL
+ * configuration requests.
+ *
+ * Context: Initializes pf->dplls.lock mutex.
+ */
+static void ice_dpll_init_e810(struct ice_pf *pf)
{
bool cgu = ice_is_feature_supported(pf, ICE_F_CGU);
struct ice_dplls *d = &pf->dplls;
@@ -3828,3 +4382,15 @@ err_exit:
mutex_destroy(&d->lock);
dev_warn(ice_pf_to_dev(pf), "DPLLs init failure err:%d\n", err);
}
+
+void ice_dpll_init(struct ice_pf *pf)
+{
+ switch (pf->hw.mac_type) {
+ case ICE_MAC_GENERIC_3K_E825:
+ ice_dpll_init_e825(pf);
+ break;
+ default:
+ ice_dpll_init_e810(pf);
+ break;
+ }
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.h b/drivers/net/ethernet/intel/ice/ice_dpll.h
index c0da03384ce9..ae42cdea0ee1 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.h
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.h
@@ -20,9 +20,16 @@ enum ice_dpll_pin_sw {
ICE_DPLL_PIN_SW_NUM
};
+struct ice_dpll_pin_work {
+ struct work_struct work;
+ unsigned long action;
+ struct ice_dpll_pin *pin;
+};
+
/** ice_dpll_pin - store info about pins
* @pin: dpll pin structure
* @pf: pointer to pf, which has registered the dpll_pin
+ * @tracker: reference count tracker
* @idx: ice pin private idx
* @num_parents: hols number of parent pins
* @parent_idx: hold indexes of parent pins
@@ -37,6 +44,9 @@ enum ice_dpll_pin_sw {
struct ice_dpll_pin {
struct dpll_pin *pin;
struct ice_pf *pf;
+ dpll_tracker tracker;
+ struct fwnode_handle *fwnode;
+ struct notifier_block nb;
u8 idx;
u8 num_parents;
u8 parent_idx[ICE_DPLL_RCLK_NUM_MAX];
@@ -58,6 +68,7 @@ struct ice_dpll_pin {
/** ice_dpll - store info required for DPLL control
* @dpll: pointer to dpll dev
* @pf: pointer to pf, which has registered the dpll_device
+ * @tracker: reference count tracker
* @dpll_idx: index of dpll on the NIC
* @input_idx: currently selected input index
* @prev_input_idx: previously selected input index
@@ -76,6 +87,7 @@ struct ice_dpll_pin {
struct ice_dpll {
struct dpll_device *dpll;
struct ice_pf *pf;
+ dpll_tracker tracker;
u8 dpll_idx;
u8 input_idx;
u8 prev_input_idx;
@@ -114,7 +126,9 @@ struct ice_dpll {
struct ice_dplls {
struct kthread_worker *kworker;
struct kthread_delayed_work work;
+ struct workqueue_struct *wq;
struct mutex lock;
+ struct completion dpll_init;
struct ice_dpll eec;
struct ice_dpll pps;
struct ice_dpll_pin *inputs;
@@ -143,3 +157,19 @@ static inline void ice_dpll_deinit(struct ice_pf *pf) { }
#endif
#endif
+
+#define ICE_CGU_R10 0x28
+#define ICE_CGU_R10_SYNCE_CLKO_SEL GENMASK(8, 5)
+#define ICE_CGU_R10_SYNCE_CLKODIV_M1 GENMASK(13, 9)
+#define ICE_CGU_R10_SYNCE_CLKODIV_LOAD BIT(14)
+#define ICE_CGU_R10_SYNCE_DCK_RST BIT(15)
+#define ICE_CGU_R10_SYNCE_ETHCLKO_SEL GENMASK(18, 16)
+#define ICE_CGU_R10_SYNCE_ETHDIV_M1 GENMASK(23, 19)
+#define ICE_CGU_R10_SYNCE_ETHDIV_LOAD BIT(24)
+#define ICE_CGU_R10_SYNCE_DCK2_RST BIT(25)
+#define ICE_CGU_R10_SYNCE_S_REF_CLK GENMASK(31, 27)
+
+#define ICE_CGU_R11 0x2C
+#define ICE_CGU_R11_SYNCE_S_BYP_CLK GENMASK(6, 1)
+
+#define ICE_CGU_BYPASS_MUX_OFFSET_E825C 3
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 3565a5d96c6d..c6bc29cfb8e6 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -33,8 +33,8 @@ static int ice_q_stats_len(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
- return ((np->vsi->alloc_txq + np->vsi->alloc_rxq) *
- (sizeof(struct ice_q_stats) / sizeof(u64)));
+ /* One packets and one bytes count per queue */
+ return ((np->vsi->alloc_txq + np->vsi->alloc_rxq) * 2);
}
#define ICE_PF_STATS_LEN ARRAY_SIZE(ice_gstrings_pf_stats)
@@ -1942,25 +1942,35 @@ __ice_get_ethtool_stats(struct net_device *netdev,
rcu_read_lock();
ice_for_each_alloc_txq(vsi, j) {
+ u64 pkts, bytes;
+
tx_ring = READ_ONCE(vsi->tx_rings[j]);
- if (tx_ring && tx_ring->ring_stats) {
- data[i++] = tx_ring->ring_stats->stats.pkts;
- data[i++] = tx_ring->ring_stats->stats.bytes;
- } else {
+ if (!tx_ring || !tx_ring->ring_stats) {
data[i++] = 0;
data[i++] = 0;
+ continue;
}
+
+ ice_fetch_tx_ring_stats(tx_ring, &pkts, &bytes);
+
+ data[i++] = pkts;
+ data[i++] = bytes;
}
ice_for_each_alloc_rxq(vsi, j) {
+ u64 pkts, bytes;
+
rx_ring = READ_ONCE(vsi->rx_rings[j]);
- if (rx_ring && rx_ring->ring_stats) {
- data[i++] = rx_ring->ring_stats->stats.pkts;
- data[i++] = rx_ring->ring_stats->stats.bytes;
- } else {
+ if (!rx_ring || !rx_ring->ring_stats) {
data[i++] = 0;
data[i++] = 0;
+ continue;
}
+
+ ice_fetch_rx_ring_stats(rx_ring, &pkts, &bytes);
+
+ data[i++] = pkts;
+ data[i++] = bytes;
}
rcu_read_unlock();
@@ -3378,7 +3388,6 @@ process_link:
*/
rx_rings[i].next_to_use = 0;
rx_rings[i].next_to_clean = 0;
- rx_rings[i].next_to_alloc = 0;
*vsi->rx_rings[i] = rx_rings[i];
}
kfree(rx_rings);
diff --git a/drivers/net/ethernet/intel/ice/ice_irq.c b/drivers/net/ethernet/intel/ice/ice_irq.c
index 30801fd375f0..1d9b2d646474 100644
--- a/drivers/net/ethernet/intel/ice/ice_irq.c
+++ b/drivers/net/ethernet/intel/ice/ice_irq.c
@@ -106,9 +106,10 @@ static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf,
#define ICE_RDMA_AEQ_MSIX 1
static int ice_get_default_msix_amount(struct ice_pf *pf)
{
- return ICE_MIN_LAN_OICR_MSIX + num_online_cpus() +
+ return ICE_MIN_LAN_OICR_MSIX + netif_get_num_default_rss_queues() +
(test_bit(ICE_FLAG_FD_ENA, pf->flags) ? ICE_FDIR_MSIX : 0) +
- (ice_is_rdma_ena(pf) ? num_online_cpus() + ICE_RDMA_AEQ_MSIX : 0);
+ (ice_is_rdma_ena(pf) ? netif_get_num_default_rss_queues() +
+ ICE_RDMA_AEQ_MSIX : 0);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index d47af94f31a9..d921269e1fe7 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -159,12 +159,14 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
static u16 ice_get_rxq_count(struct ice_pf *pf)
{
- return min(ice_get_avail_rxq_count(pf), num_online_cpus());
+ return min(ice_get_avail_rxq_count(pf),
+ netif_get_num_default_rss_queues());
}
static u16 ice_get_txq_count(struct ice_pf *pf)
{
- return min(ice_get_avail_txq_count(pf), num_online_cpus());
+ return min(ice_get_avail_txq_count(pf),
+ netif_get_num_default_rss_queues());
}
/**
@@ -911,13 +913,15 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
if (vsi->type == ICE_VSI_CHNL)
vsi->rss_size = min_t(u16, vsi->num_rxq, max_rss_size);
else
- vsi->rss_size = min_t(u16, num_online_cpus(),
+ vsi->rss_size = min_t(u16,
+ netif_get_num_default_rss_queues(),
max_rss_size);
vsi->rss_lut_type = ICE_LUT_PF;
break;
case ICE_VSI_SF:
vsi->rss_table_size = ICE_LUT_VSI_SIZE;
- vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size);
+ vsi->rss_size = min_t(u16, netif_get_num_default_rss_queues(),
+ max_rss_size);
vsi->rss_lut_type = ICE_LUT_VSI;
break;
case ICE_VSI_VF:
@@ -3431,20 +3435,6 @@ out:
}
/**
- * ice_update_ring_stats - Update ring statistics
- * @stats: stats to be updated
- * @pkts: number of processed packets
- * @bytes: number of processed bytes
- *
- * This function assumes that caller has acquired a u64_stats_sync lock.
- */
-static void ice_update_ring_stats(struct ice_q_stats *stats, u64 pkts, u64 bytes)
-{
- stats->bytes += bytes;
- stats->pkts += pkts;
-}
-
-/**
* ice_update_tx_ring_stats - Update Tx ring specific counters
* @tx_ring: ring to update
* @pkts: number of processed packets
@@ -3453,7 +3443,8 @@ static void ice_update_ring_stats(struct ice_q_stats *stats, u64 pkts, u64 bytes
void ice_update_tx_ring_stats(struct ice_tx_ring *tx_ring, u64 pkts, u64 bytes)
{
u64_stats_update_begin(&tx_ring->ring_stats->syncp);
- ice_update_ring_stats(&tx_ring->ring_stats->stats, pkts, bytes);
+ u64_stats_add(&tx_ring->ring_stats->pkts, pkts);
+ u64_stats_add(&tx_ring->ring_stats->bytes, bytes);
u64_stats_update_end(&tx_ring->ring_stats->syncp);
}
@@ -3466,11 +3457,48 @@ void ice_update_tx_ring_stats(struct ice_tx_ring *tx_ring, u64 pkts, u64 bytes)
void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes)
{
u64_stats_update_begin(&rx_ring->ring_stats->syncp);
- ice_update_ring_stats(&rx_ring->ring_stats->stats, pkts, bytes);
+ u64_stats_add(&rx_ring->ring_stats->pkts, pkts);
+ u64_stats_add(&rx_ring->ring_stats->bytes, bytes);
u64_stats_update_end(&rx_ring->ring_stats->syncp);
}
/**
+ * ice_fetch_tx_ring_stats - Fetch Tx ring packet and byte counters
+ * @ring: ring to update
+ * @pkts: number of processed packets
+ * @bytes: number of processed bytes
+ */
+void ice_fetch_tx_ring_stats(const struct ice_tx_ring *ring,
+ u64 *pkts, u64 *bytes)
+{
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&ring->ring_stats->syncp);
+ *pkts = u64_stats_read(&ring->ring_stats->pkts);
+ *bytes = u64_stats_read(&ring->ring_stats->bytes);
+ } while (u64_stats_fetch_retry(&ring->ring_stats->syncp, start));
+}
+
+/**
+ * ice_fetch_rx_ring_stats - Fetch Rx ring packet and byte counters
+ * @ring: ring to read
+ * @pkts: number of processed packets
+ * @bytes: number of processed bytes
+ */
+void ice_fetch_rx_ring_stats(const struct ice_rx_ring *ring,
+ u64 *pkts, u64 *bytes)
+{
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&ring->ring_stats->syncp);
+ *pkts = u64_stats_read(&ring->ring_stats->pkts);
+ *bytes = u64_stats_read(&ring->ring_stats->bytes);
+ } while (u64_stats_fetch_retry(&ring->ring_stats->syncp, start));
+}
+
+/**
* ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used
* @pi: port info of the switch with default VSI
*
@@ -3961,6 +3989,9 @@ void ice_init_feature_support(struct ice_pf *pf)
break;
}
+ if (pf->hw.mac_type == ICE_MAC_GENERIC_3K_E825)
+ ice_set_feature_support(pf, ICE_F_PHY_RCLK);
+
if (pf->hw.mac_type == ICE_MAC_E830) {
ice_set_feature_support(pf, ICE_F_MBX_LIMIT);
ice_set_feature_support(pf, ICE_F_GCS);
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 2cb1eb98b9da..49454d98dcfe 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -92,6 +92,12 @@ void ice_update_tx_ring_stats(struct ice_tx_ring *ring, u64 pkts, u64 bytes);
void ice_update_rx_ring_stats(struct ice_rx_ring *ring, u64 pkts, u64 bytes);
+void ice_fetch_tx_ring_stats(const struct ice_tx_ring *ring,
+ u64 *pkts, u64 *bytes);
+
+void ice_fetch_rx_ring_stats(const struct ice_rx_ring *ring,
+ u64 *pkts, u64 *bytes);
+
void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl);
void ice_write_itr(struct ice_ring_container *rc, u16 itr);
void ice_set_q_vector_intrl(struct ice_q_vector *q_vector);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index d04605d3e61a..4da37caa3ec9 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -159,8 +159,8 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
* prev_pkt would be negative if there was no
* pending work.
*/
- packets = ring_stats->stats.pkts & INT_MAX;
- if (ring_stats->tx_stats.prev_pkt == packets) {
+ packets = ice_stats_read(ring_stats, pkts) & INT_MAX;
+ if (ring_stats->tx.prev_pkt == packets) {
/* Trigger sw interrupt to revive the queue */
ice_trigger_sw_intr(hw, tx_ring->q_vector);
continue;
@@ -170,7 +170,7 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
* to ice_get_tx_pending()
*/
smp_rmb();
- ring_stats->tx_stats.prev_pkt =
+ ring_stats->tx.prev_pkt =
ice_get_tx_pending(tx_ring) ? packets : -1;
}
}
@@ -6824,58 +6824,132 @@ int ice_up(struct ice_vsi *vsi)
return err;
}
+struct ice_vsi_tx_stats {
+ u64 pkts;
+ u64 bytes;
+ u64 tx_restart_q;
+ u64 tx_busy;
+ u64 tx_linearize;
+};
+
+struct ice_vsi_rx_stats {
+ u64 pkts;
+ u64 bytes;
+ u64 rx_non_eop_descs;
+ u64 rx_page_failed;
+ u64 rx_buf_failed;
+};
+
/**
- * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
- * @syncp: pointer to u64_stats_sync
- * @stats: stats that pkts and bytes count will be taken from
- * @pkts: packets stats counter
- * @bytes: bytes stats counter
+ * ice_fetch_u64_tx_stats - get Tx stats from a ring
+ * @ring: the Tx ring to copy stats from
+ * @copy: temporary storage for the ring statistics
*
- * This function fetches stats from the ring considering the atomic operations
- * that needs to be performed to read u64 values in 32 bit machine.
+ * Fetch the u64 stats from the ring using u64_stats_fetch. This ensures each
+ * stat value is self-consistent, though not necessarily consistent w.r.t
+ * other stats.
*/
-void
-ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
- struct ice_q_stats stats, u64 *pkts, u64 *bytes)
+static void ice_fetch_u64_tx_stats(struct ice_tx_ring *ring,
+ struct ice_vsi_tx_stats *copy)
{
+ struct ice_ring_stats *stats = ring->ring_stats;
unsigned int start;
do {
- start = u64_stats_fetch_begin(syncp);
- *pkts = stats.pkts;
- *bytes = stats.bytes;
- } while (u64_stats_fetch_retry(syncp, start));
+ start = u64_stats_fetch_begin(&stats->syncp);
+ copy->pkts = u64_stats_read(&stats->pkts);
+ copy->bytes = u64_stats_read(&stats->bytes);
+ copy->tx_restart_q = u64_stats_read(&stats->tx_restart_q);
+ copy->tx_busy = u64_stats_read(&stats->tx_busy);
+ copy->tx_linearize = u64_stats_read(&stats->tx_linearize);
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+}
+
+/**
+ * ice_fetch_u64_rx_stats - get Rx stats from a ring
+ * @ring: the Rx ring to copy stats from
+ * @copy: temporary storage for the ring statistics
+ *
+ * Fetch the u64 stats from the ring using u64_stats_fetch. This ensures each
+ * stat value is self-consistent, though not necessarily consistent w.r.t
+ * other stats.
+ */
+static void ice_fetch_u64_rx_stats(struct ice_rx_ring *ring,
+ struct ice_vsi_rx_stats *copy)
+{
+ struct ice_ring_stats *stats = ring->ring_stats;
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ copy->pkts = u64_stats_read(&stats->pkts);
+ copy->bytes = u64_stats_read(&stats->bytes);
+ copy->rx_non_eop_descs =
+ u64_stats_read(&stats->rx_non_eop_descs);
+ copy->rx_page_failed = u64_stats_read(&stats->rx_page_failed);
+ copy->rx_buf_failed = u64_stats_read(&stats->rx_buf_failed);
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
}
/**
* ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
* @vsi: the VSI to be updated
- * @vsi_stats: the stats struct to be updated
+ * @vsi_stats: accumulated stats for this VSI
* @rings: rings to work on
* @count: number of rings
*/
-static void
-ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
- struct rtnl_link_stats64 *vsi_stats,
- struct ice_tx_ring **rings, u16 count)
+static void ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
+ struct ice_vsi_tx_stats *vsi_stats,
+ struct ice_tx_ring **rings, u16 count)
{
+ struct ice_vsi_tx_stats copy = {};
u16 i;
for (i = 0; i < count; i++) {
struct ice_tx_ring *ring;
- u64 pkts = 0, bytes = 0;
ring = READ_ONCE(rings[i]);
if (!ring || !ring->ring_stats)
continue;
- ice_fetch_u64_stats_per_ring(&ring->ring_stats->syncp,
- ring->ring_stats->stats, &pkts,
- &bytes);
- vsi_stats->tx_packets += pkts;
- vsi_stats->tx_bytes += bytes;
- vsi->tx_restart += ring->ring_stats->tx_stats.restart_q;
- vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy;
- vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize;
+
+ ice_fetch_u64_tx_stats(ring, &copy);
+
+ vsi_stats->pkts += copy.pkts;
+ vsi_stats->bytes += copy.bytes;
+ vsi_stats->tx_restart_q += copy.tx_restart_q;
+ vsi_stats->tx_busy += copy.tx_busy;
+ vsi_stats->tx_linearize += copy.tx_linearize;
+ }
+}
+
+/**
+ * ice_update_vsi_rx_ring_stats - Update VSI Rx ring stats counters
+ * @vsi: the VSI to be updated
+ * @vsi_stats: accumulated stats for this VSI
+ * @rings: rings to work on
+ * @count: number of rings
+ */
+static void ice_update_vsi_rx_ring_stats(struct ice_vsi *vsi,
+ struct ice_vsi_rx_stats *vsi_stats,
+ struct ice_rx_ring **rings, u16 count)
+{
+ struct ice_vsi_rx_stats copy = {};
+ u16 i;
+
+ for (i = 0; i < count; i++) {
+ struct ice_rx_ring *ring;
+
+ ring = READ_ONCE(rings[i]);
+ if (!ring || !ring->ring_stats)
+ continue;
+
+ ice_fetch_u64_rx_stats(ring, &copy);
+
+ vsi_stats->pkts += copy.pkts;
+ vsi_stats->bytes += copy.bytes;
+ vsi_stats->rx_non_eop_descs += copy.rx_non_eop_descs;
+ vsi_stats->rx_page_failed += copy.rx_page_failed;
+ vsi_stats->rx_buf_failed += copy.rx_buf_failed;
}
}
@@ -6886,50 +6960,34 @@ ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
{
struct rtnl_link_stats64 *net_stats, *stats_prev;
- struct rtnl_link_stats64 *vsi_stats;
+ struct ice_vsi_tx_stats tx_stats = {};
+ struct ice_vsi_rx_stats rx_stats = {};
struct ice_pf *pf = vsi->back;
- u64 pkts, bytes;
- int i;
-
- vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC);
- if (!vsi_stats)
- return;
-
- /* reset non-netdev (extended) stats */
- vsi->tx_restart = 0;
- vsi->tx_busy = 0;
- vsi->tx_linearize = 0;
- vsi->rx_buf_failed = 0;
- vsi->rx_page_failed = 0;
rcu_read_lock();
/* update Tx rings counters */
- ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings,
+ ice_update_vsi_tx_ring_stats(vsi, &tx_stats, vsi->tx_rings,
vsi->num_txq);
/* update Rx rings counters */
- ice_for_each_rxq(vsi, i) {
- struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
- struct ice_ring_stats *ring_stats;
-
- ring_stats = ring->ring_stats;
- ice_fetch_u64_stats_per_ring(&ring_stats->syncp,
- ring_stats->stats, &pkts,
- &bytes);
- vsi_stats->rx_packets += pkts;
- vsi_stats->rx_bytes += bytes;
- vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed;
- vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed;
- }
+ ice_update_vsi_rx_ring_stats(vsi, &rx_stats, vsi->rx_rings,
+ vsi->num_rxq);
/* update XDP Tx rings counters */
if (ice_is_xdp_ena_vsi(vsi))
- ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings,
+ ice_update_vsi_tx_ring_stats(vsi, &tx_stats, vsi->xdp_rings,
vsi->num_xdp_txq);
rcu_read_unlock();
+ /* Save non-netdev (extended) stats */
+ vsi->tx_restart = tx_stats.tx_restart_q;
+ vsi->tx_busy = tx_stats.tx_busy;
+ vsi->tx_linearize = tx_stats.tx_linearize;
+ vsi->rx_buf_failed = rx_stats.rx_buf_failed;
+ vsi->rx_page_failed = rx_stats.rx_page_failed;
+
net_stats = &vsi->net_stats;
stats_prev = &vsi->net_stats_prev;
@@ -6939,18 +6997,16 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
* let's skip this round.
*/
if (likely(pf->stat_prev_loaded)) {
- net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
- net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
- net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
- net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
+ net_stats->tx_packets += tx_stats.pkts - stats_prev->tx_packets;
+ net_stats->tx_bytes += tx_stats.bytes - stats_prev->tx_bytes;
+ net_stats->rx_packets += rx_stats.pkts - stats_prev->rx_packets;
+ net_stats->rx_bytes += rx_stats.bytes - stats_prev->rx_bytes;
}
- stats_prev->tx_packets = vsi_stats->tx_packets;
- stats_prev->tx_bytes = vsi_stats->tx_bytes;
- stats_prev->rx_packets = vsi_stats->rx_packets;
- stats_prev->rx_bytes = vsi_stats->rx_bytes;
-
- kfree(vsi_stats);
+ stats_prev->tx_packets = tx_stats.pkts;
+ stats_prev->tx_bytes = tx_stats.bytes;
+ stats_prev->rx_packets = rx_stats.pkts;
+ stats_prev->rx_bytes = rx_stats.bytes;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 272683001476..22c3986b910a 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -1296,6 +1296,38 @@ void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
if (pf->hw.reset_ongoing)
return;
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825) {
+ int pin, err;
+
+ if (!test_bit(ICE_FLAG_DPLL, pf->flags))
+ return;
+
+ mutex_lock(&pf->dplls.lock);
+ for (pin = 0; pin < ICE_SYNCE_CLK_NUM; pin++) {
+ enum ice_synce_clk clk_pin;
+ bool active;
+ u8 port_num;
+
+ port_num = ptp_port->port_num;
+ clk_pin = (enum ice_synce_clk)pin;
+ err = ice_tspll_bypass_mux_active_e825c(hw,
+ port_num,
+ &active,
+ clk_pin);
+ if (WARN_ON_ONCE(err)) {
+ mutex_unlock(&pf->dplls.lock);
+ return;
+ }
+
+ err = ice_tspll_cfg_synce_ethdiv_e825c(hw, clk_pin);
+ if (active && WARN_ON_ONCE(err)) {
+ mutex_unlock(&pf->dplls.lock);
+ return;
+ }
+ }
+ mutex_unlock(&pf->dplls.lock);
+ }
+
switch (hw->mac_type) {
case ICE_MAC_E810:
case ICE_MAC_E830:
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index 35680dbe4a7f..61c0a0d93ea8 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -5903,7 +5903,14 @@ int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num)
*base_idx = SI_REF1P;
else
ret = -ENODEV;
-
+ break;
+ case ICE_DEV_ID_E825C_BACKPLANE:
+ case ICE_DEV_ID_E825C_QSFP:
+ case ICE_DEV_ID_E825C_SFP:
+ case ICE_DEV_ID_E825C_SGMII:
+ *pin_num = ICE_SYNCE_CLK_NUM;
+ *base_idx = 0;
+ ret = 0;
break;
default:
ret = -ENODEV;
diff --git a/drivers/net/ethernet/intel/ice/ice_tspll.c b/drivers/net/ethernet/intel/ice/ice_tspll.c
index 66320a4ab86f..fd4b58eb9bc0 100644
--- a/drivers/net/ethernet/intel/ice/ice_tspll.c
+++ b/drivers/net/ethernet/intel/ice/ice_tspll.c
@@ -624,3 +624,220 @@ int ice_tspll_init(struct ice_hw *hw)
return err;
}
+
+/**
+ * ice_tspll_bypass_mux_active_e825c - check if the given port is set active
+ * @hw: Pointer to the HW struct
+ * @port: Number of the port
+ * @active: Output flag showing if port is active
+ * @output: Output pin, we have two in E825C
+ *
+ * Check if given port is selected as recovered clock source for given output.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+int ice_tspll_bypass_mux_active_e825c(struct ice_hw *hw, u8 port, bool *active,
+ enum ice_synce_clk output)
+{
+ u8 active_clk;
+ u32 val;
+ int err;
+
+ switch (output) {
+ case ICE_SYNCE_CLK0:
+ err = ice_read_cgu_reg(hw, ICE_CGU_R10, &val);
+ if (err)
+ return err;
+ active_clk = FIELD_GET(ICE_CGU_R10_SYNCE_S_REF_CLK, val);
+ break;
+ case ICE_SYNCE_CLK1:
+ err = ice_read_cgu_reg(hw, ICE_CGU_R11, &val);
+ if (err)
+ return err;
+ active_clk = FIELD_GET(ICE_CGU_R11_SYNCE_S_BYP_CLK, val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (active_clk == port % hw->ptp.ports_per_phy +
+ ICE_CGU_BYPASS_MUX_OFFSET_E825C)
+ *active = true;
+ else
+ *active = false;
+
+ return 0;
+}
+
+/**
+ * ice_tspll_cfg_bypass_mux_e825c - configure reference clock mux
+ * @hw: Pointer to the HW struct
+ * @ena: true to enable the reference, false if disable
+ * @port_num: Number of the port
+ * @output: Output pin, we have two in E825C
+ *
+ * Set reference clock source and output clock selection.
+ *
+ * Context: Called under pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+int ice_tspll_cfg_bypass_mux_e825c(struct ice_hw *hw, bool ena, u32 port_num,
+ enum ice_synce_clk output)
+{
+ u8 first_mux;
+ int err;
+ u32 r10;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R10, &r10);
+ if (err)
+ return err;
+
+ if (!ena)
+ first_mux = ICE_CGU_NET_REF_CLK0;
+ else
+ first_mux = port_num + ICE_CGU_BYPASS_MUX_OFFSET_E825C;
+
+ r10 &= ~(ICE_CGU_R10_SYNCE_DCK_RST | ICE_CGU_R10_SYNCE_DCK2_RST);
+
+ switch (output) {
+ case ICE_SYNCE_CLK0:
+ r10 &= ~(ICE_CGU_R10_SYNCE_ETHCLKO_SEL |
+ ICE_CGU_R10_SYNCE_ETHDIV_LOAD |
+ ICE_CGU_R10_SYNCE_S_REF_CLK);
+ r10 |= FIELD_PREP(ICE_CGU_R10_SYNCE_S_REF_CLK, first_mux);
+ r10 |= FIELD_PREP(ICE_CGU_R10_SYNCE_ETHCLKO_SEL,
+ ICE_CGU_REF_CLK_BYP0_DIV);
+ break;
+ case ICE_SYNCE_CLK1:
+ {
+ u32 val;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R11, &val);
+ if (err)
+ return err;
+ val &= ~ICE_CGU_R11_SYNCE_S_BYP_CLK;
+ val |= FIELD_PREP(ICE_CGU_R11_SYNCE_S_BYP_CLK, first_mux);
+ err = ice_write_cgu_reg(hw, ICE_CGU_R11, val);
+ if (err)
+ return err;
+ r10 &= ~(ICE_CGU_R10_SYNCE_CLKODIV_LOAD |
+ ICE_CGU_R10_SYNCE_CLKO_SEL);
+ r10 |= FIELD_PREP(ICE_CGU_R10_SYNCE_CLKO_SEL,
+ ICE_CGU_REF_CLK_BYP1_DIV);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R10, r10);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_tspll_get_div_e825c - get the divider for the given speed
+ * @link_speed: link speed of the port
+ * @divider: output value, calculated divider
+ *
+ * Get CGU divider value based on the link speed.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int ice_tspll_get_div_e825c(u16 link_speed, unsigned int *divider)
+{
+ switch (link_speed) {
+ case ICE_AQ_LINK_SPEED_100GB:
+ case ICE_AQ_LINK_SPEED_50GB:
+ case ICE_AQ_LINK_SPEED_25GB:
+ *divider = 10;
+ break;
+ case ICE_AQ_LINK_SPEED_40GB:
+ case ICE_AQ_LINK_SPEED_10GB:
+ *divider = 4;
+ break;
+ case ICE_AQ_LINK_SPEED_5GB:
+ case ICE_AQ_LINK_SPEED_2500MB:
+ case ICE_AQ_LINK_SPEED_1000MB:
+ *divider = 2;
+ break;
+ case ICE_AQ_LINK_SPEED_100MB:
+ *divider = 1;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_tspll_cfg_synce_ethdiv_e825c - set the divider on the mux
+ * @hw: Pointer to the HW struct
+ * @output: Output pin, we have two in E825C
+ *
+ * Set the correct CGU divider for RCLKA or RCLKB.
+ *
+ * Context: Called under pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+int ice_tspll_cfg_synce_ethdiv_e825c(struct ice_hw *hw,
+ enum ice_synce_clk output)
+{
+ unsigned int divider;
+ u16 link_speed;
+ u32 val;
+ int err;
+
+ link_speed = hw->port_info->phy.link_info.link_speed;
+ if (!link_speed)
+ return 0;
+
+ err = ice_tspll_get_div_e825c(link_speed, &divider);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R10, &val);
+ if (err)
+ return err;
+
+ /* programmable divider value (from 2 to 16) minus 1 for ETHCLKOUT */
+ switch (output) {
+ case ICE_SYNCE_CLK0:
+ val &= ~(ICE_CGU_R10_SYNCE_ETHDIV_M1 |
+ ICE_CGU_R10_SYNCE_ETHDIV_LOAD);
+ val |= FIELD_PREP(ICE_CGU_R10_SYNCE_ETHDIV_M1, divider - 1);
+ err = ice_write_cgu_reg(hw, ICE_CGU_R10, val);
+ if (err)
+ return err;
+ val |= ICE_CGU_R10_SYNCE_ETHDIV_LOAD;
+ break;
+ case ICE_SYNCE_CLK1:
+ val &= ~(ICE_CGU_R10_SYNCE_CLKODIV_M1 |
+ ICE_CGU_R10_SYNCE_CLKODIV_LOAD);
+ val |= FIELD_PREP(ICE_CGU_R10_SYNCE_CLKODIV_M1, divider - 1);
+ err = ice_write_cgu_reg(hw, ICE_CGU_R10, val);
+ if (err)
+ return err;
+ val |= ICE_CGU_R10_SYNCE_CLKODIV_LOAD;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R10, val);
+ if (err)
+ return err;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_tspll.h b/drivers/net/ethernet/intel/ice/ice_tspll.h
index c0b1232cc07c..d650867004d1 100644
--- a/drivers/net/ethernet/intel/ice/ice_tspll.h
+++ b/drivers/net/ethernet/intel/ice/ice_tspll.h
@@ -21,11 +21,22 @@ struct ice_tspll_params_e82x {
u32 frac_n_div;
};
+#define ICE_CGU_NET_REF_CLK0 0x0
+#define ICE_CGU_REF_CLK_BYP0 0x5
+#define ICE_CGU_REF_CLK_BYP0_DIV 0x0
+#define ICE_CGU_REF_CLK_BYP1 0x4
+#define ICE_CGU_REF_CLK_BYP1_DIV 0x1
+
#define ICE_TSPLL_CK_REFCLKFREQ_E825 0x1F
#define ICE_TSPLL_NDIVRATIO_E825 5
#define ICE_TSPLL_FBDIV_INTGR_E825 256
int ice_tspll_cfg_pps_out_e825c(struct ice_hw *hw, bool enable);
int ice_tspll_init(struct ice_hw *hw);
-
+int ice_tspll_bypass_mux_active_e825c(struct ice_hw *hw, u8 port, bool *active,
+ enum ice_synce_clk output);
+int ice_tspll_cfg_bypass_mux_e825c(struct ice_hw *hw, bool ena, u32 port_num,
+ enum ice_synce_clk output);
+int ice_tspll_cfg_synce_ethdiv_e825c(struct ice_hw *hw,
+ enum ice_synce_clk output);
#endif /* _ICE_TSPLL_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index ad76768a4232..6fa201a14f51 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -379,7 +379,7 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
if (netif_tx_queue_stopped(txring_txq(tx_ring)) &&
!test_bit(ICE_VSI_DOWN, vsi->state)) {
netif_tx_wake_queue(txring_txq(tx_ring));
- ++tx_ring->ring_stats->tx_stats.restart_q;
+ ice_stats_inc(tx_ring->ring_stats, tx_restart_q);
}
}
@@ -499,7 +499,7 @@ int ice_setup_tx_ring(struct ice_tx_ring *tx_ring)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
- tx_ring->ring_stats->tx_stats.prev_pkt = -1;
+ tx_ring->ring_stats->tx.prev_pkt = -1;
return 0;
err:
@@ -574,7 +574,6 @@ rx_skip_free:
PAGE_SIZE);
memset(rx_ring->desc, 0, size);
- rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
}
@@ -849,7 +848,7 @@ bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsigned int cleaned_count)
addr = libeth_rx_alloc(&fq, ntu);
if (addr == DMA_MAPPING_ERROR) {
- rx_ring->ring_stats->rx_stats.alloc_page_failed++;
+ ice_stats_inc(rx_ring->ring_stats, rx_page_failed);
break;
}
@@ -863,7 +862,7 @@ bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsigned int cleaned_count)
addr = libeth_rx_alloc(&hdr_fq, ntu);
if (addr == DMA_MAPPING_ERROR) {
- rx_ring->ring_stats->rx_stats.alloc_page_failed++;
+ ice_stats_inc(rx_ring->ring_stats, rx_page_failed);
libeth_rx_recycle_slow(fq.fqes[ntu].netmem);
break;
@@ -1045,7 +1044,7 @@ construct_skb:
/* exit if we failed to retrieve a buffer */
if (!skb) {
libeth_xdp_return_buff_slow(xdp);
- rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
+ ice_stats_inc(rx_ring->ring_stats, rx_buf_failed);
continue;
}
@@ -1087,35 +1086,36 @@ static void __ice_update_sample(struct ice_q_vector *q_vector,
struct dim_sample *sample,
bool is_tx)
{
- u64 packets = 0, bytes = 0;
+ u64 total_packets = 0, total_bytes = 0, pkts, bytes;
if (is_tx) {
struct ice_tx_ring *tx_ring;
ice_for_each_tx_ring(tx_ring, *rc) {
- struct ice_ring_stats *ring_stats;
-
- ring_stats = tx_ring->ring_stats;
- if (!ring_stats)
+ if (!tx_ring->ring_stats)
continue;
- packets += ring_stats->stats.pkts;
- bytes += ring_stats->stats.bytes;
+
+ ice_fetch_tx_ring_stats(tx_ring, &pkts, &bytes);
+
+ total_packets += pkts;
+ total_bytes += bytes;
}
} else {
struct ice_rx_ring *rx_ring;
ice_for_each_rx_ring(rx_ring, *rc) {
- struct ice_ring_stats *ring_stats;
-
- ring_stats = rx_ring->ring_stats;
- if (!ring_stats)
+ if (!rx_ring->ring_stats)
continue;
- packets += ring_stats->stats.pkts;
- bytes += ring_stats->stats.bytes;
+
+ ice_fetch_rx_ring_stats(rx_ring, &pkts, &bytes);
+
+ total_packets += pkts;
+ total_bytes += bytes;
}
}
- dim_update_sample(q_vector->total_events, packets, bytes, sample);
+ dim_update_sample(q_vector->total_events,
+ total_packets, total_bytes, sample);
sample->comp_ctr = 0;
/* if dim settings get stale, like when not updated for 1
@@ -1362,7 +1362,7 @@ static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
/* A reprieve! - use start_queue because it doesn't call schedule */
netif_tx_start_queue(txring_txq(tx_ring));
- ++tx_ring->ring_stats->tx_stats.restart_q;
+ ice_stats_inc(tx_ring->ring_stats, tx_restart_q);
return 0;
}
@@ -2156,15 +2156,12 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
ice_trace(xmit_frame_ring, tx_ring, skb);
- if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
- goto out_drop;
-
count = ice_xmit_desc_count(skb);
if (ice_chk_linearize(skb, count)) {
if (__skb_linearize(skb))
goto out_drop;
count = ice_txd_use_count(skb->len);
- tx_ring->ring_stats->tx_stats.tx_linearize++;
+ ice_stats_inc(tx_ring->ring_stats, tx_linearize);
}
/* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
@@ -2175,7 +2172,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
*/
if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
ICE_DESCS_FOR_CTX_DESC)) {
- tx_ring->ring_stats->tx_stats.tx_busy++;
+ ice_stats_inc(tx_ring->ring_stats, tx_busy);
return NETDEV_TX_BUSY;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index e440c55d9e9f..b6547e1b7c42 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -129,34 +129,65 @@ struct ice_tx_offload_params {
u8 header_len;
};
-struct ice_q_stats {
- u64 pkts;
- u64 bytes;
-};
-
-struct ice_txq_stats {
- u64 restart_q;
- u64 tx_busy;
- u64 tx_linearize;
- int prev_pkt; /* negative if no pending Tx descriptors */
-};
-
-struct ice_rxq_stats {
- u64 non_eop_descs;
- u64 alloc_page_failed;
- u64 alloc_buf_failed;
-};
-
struct ice_ring_stats {
struct rcu_head rcu; /* to avoid race on free */
- struct ice_q_stats stats;
struct u64_stats_sync syncp;
- union {
- struct ice_txq_stats tx_stats;
- struct ice_rxq_stats rx_stats;
- };
+ struct_group(stats,
+ u64_stats_t pkts;
+ u64_stats_t bytes;
+ union {
+ struct_group(tx,
+ u64_stats_t tx_restart_q;
+ u64_stats_t tx_busy;
+ u64_stats_t tx_linearize;
+ /* negative if no pending Tx descriptors */
+ int prev_pkt;
+ );
+ struct_group(rx,
+ u64_stats_t rx_non_eop_descs;
+ u64_stats_t rx_page_failed;
+ u64_stats_t rx_buf_failed;
+ );
+ };
+ );
};
+/**
+ * ice_stats_read - Read a single ring stat value
+ * @stats: pointer to ring_stats structure for a queue
+ * @member: the ice_ring_stats member to read
+ *
+ * Shorthand for reading a single 64-bit stat value from struct
+ * ice_ring_stats.
+ *
+ * Return: the value of the requested stat.
+ */
+#define ice_stats_read(stats, member) ({ \
+ struct ice_ring_stats *__stats = (stats); \
+ unsigned int start; \
+ u64 val; \
+ do { \
+ start = u64_stats_fetch_begin(&__stats->syncp); \
+ val = u64_stats_read(&__stats->member); \
+ } while (u64_stats_fetch_retry(&__stats->syncp, start)); \
+ val; \
+})
+
+/**
+ * ice_stats_inc - Increment a single ring stat value
+ * @stats: pointer to the ring_stats structure for a queue
+ * @member: the ice_ring_stats member to increment
+ *
+ * Shorthand for incrementing a single 64-bit stat value in struct
+ * ice_ring_stats.
+ */
+#define ice_stats_inc(stats, member) do { \
+ struct ice_ring_stats *__stats = (stats); \
+ u64_stats_update_begin(&__stats->syncp); \
+ u64_stats_inc(&__stats->member); \
+ u64_stats_update_end(&__stats->syncp); \
+} while (0)
+
enum ice_ring_state_t {
ICE_TX_XPS_INIT_DONE,
ICE_TX_NBITS,
@@ -236,34 +267,49 @@ struct ice_tstamp_ring {
} ____cacheline_internodealigned_in_smp;
struct ice_rx_ring {
- /* CL1 - 1st cacheline starts here */
+ __cacheline_group_begin_aligned(read_mostly);
void *desc; /* Descriptor ring memory */
struct page_pool *pp;
struct net_device *netdev; /* netdev ring maps to */
- struct ice_vsi *vsi; /* Backreference to associated VSI */
struct ice_q_vector *q_vector; /* Backreference to associated vector */
u8 __iomem *tail;
- u16 q_index; /* Queue number of ring */
-
- u16 count; /* Number of descriptors */
- u16 reg_idx; /* HW register index of the ring */
- u16 next_to_alloc;
union {
struct libeth_fqe *rx_fqes;
struct xdp_buff **xdp_buf;
};
- /* CL2 - 2nd cacheline starts here */
- struct libeth_fqe *hdr_fqes;
+ u16 count; /* Number of descriptors */
+ u8 ptp_rx;
+
+ u8 flags;
+#define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2)
+#define ICE_RX_FLAGS_MULTIDEV BIT(3)
+#define ICE_RX_FLAGS_RING_GCS BIT(4)
+
+ u32 truesize;
+
struct page_pool *hdr_pp;
+ struct libeth_fqe *hdr_fqes;
+
+ struct bpf_prog *xdp_prog;
+ struct ice_tx_ring *xdp_ring;
+ struct xsk_buff_pool *xsk_pool;
+
+ /* stats structs */
+ struct ice_ring_stats *ring_stats;
+ struct ice_rx_ring *next; /* pointer to next ring in q_vector */
+
+ u32 hdr_truesize;
+
+ struct xdp_rxq_info xdp_rxq;
+ __cacheline_group_end_aligned(read_mostly);
+ __cacheline_group_begin_aligned(read_write);
union {
struct libeth_xdp_buff_stash xdp;
struct libeth_xdp_buff *xsk;
};
-
- /* CL3 - 3rd cacheline starts here */
union {
struct ice_pkt_ctx pkt_ctx;
struct {
@@ -271,75 +317,78 @@ struct ice_rx_ring {
__be16 vlan_proto;
};
};
- struct bpf_prog *xdp_prog;
/* used in interrupt processing */
u16 next_to_use;
u16 next_to_clean;
+ __cacheline_group_end_aligned(read_write);
- u32 hdr_truesize;
- u32 truesize;
-
- /* stats structs */
- struct ice_ring_stats *ring_stats;
-
+ __cacheline_group_begin_aligned(cold);
struct rcu_head rcu; /* to avoid race on free */
- /* CL4 - 4th cacheline starts here */
+ struct ice_vsi *vsi; /* Backreference to associated VSI */
struct ice_channel *ch;
- struct ice_tx_ring *xdp_ring;
- struct ice_rx_ring *next; /* pointer to next ring in q_vector */
- struct xsk_buff_pool *xsk_pool;
- u16 rx_hdr_len;
- u16 rx_buf_len;
+
dma_addr_t dma; /* physical address of ring */
+ u16 q_index; /* Queue number of ring */
+ u16 reg_idx; /* HW register index of the ring */
u8 dcb_tc; /* Traffic class of ring */
- u8 ptp_rx;
-#define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2)
-#define ICE_RX_FLAGS_MULTIDEV BIT(3)
-#define ICE_RX_FLAGS_RING_GCS BIT(4)
- u8 flags;
- /* CL5 - 5th cacheline starts here */
- struct xdp_rxq_info xdp_rxq;
+
+ u16 rx_hdr_len;
+ u16 rx_buf_len;
+ __cacheline_group_end_aligned(cold);
} ____cacheline_internodealigned_in_smp;
struct ice_tx_ring {
- /* CL1 - 1st cacheline starts here */
- struct ice_tx_ring *next; /* pointer to next ring in q_vector */
+ __cacheline_group_begin_aligned(read_mostly);
void *desc; /* Descriptor ring memory */
struct device *dev; /* Used for DMA mapping */
u8 __iomem *tail;
struct ice_tx_buf *tx_buf;
+
struct ice_q_vector *q_vector; /* Backreference to associated vector */
struct net_device *netdev; /* netdev ring maps to */
struct ice_vsi *vsi; /* Backreference to associated VSI */
- /* CL2 - 2nd cacheline starts here */
- dma_addr_t dma; /* physical address of ring */
- struct xsk_buff_pool *xsk_pool;
- u16 next_to_use;
- u16 next_to_clean;
- u16 q_handle; /* Queue handle per TC */
- u16 reg_idx; /* HW register index of the ring */
+
u16 count; /* Number of descriptors */
u16 q_index; /* Queue number of ring */
- u16 xdp_tx_active;
+
+ u8 flags;
+#define ICE_TX_FLAGS_RING_XDP BIT(0)
+#define ICE_TX_FLAGS_RING_VLAN_L2TAG1 BIT(1)
+#define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2)
+#define ICE_TX_FLAGS_TXTIME BIT(3)
+
+ struct xsk_buff_pool *xsk_pool;
+
/* stats structs */
struct ice_ring_stats *ring_stats;
- /* CL3 - 3rd cacheline starts here */
+ struct ice_tx_ring *next; /* pointer to next ring in q_vector */
+
+ struct ice_tstamp_ring *tstamp_ring;
+ struct ice_ptp_tx *tx_tstamps;
+ __cacheline_group_end_aligned(read_mostly);
+
+ __cacheline_group_begin_aligned(read_write);
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ u16 xdp_tx_active;
+ spinlock_t tx_lock;
+ __cacheline_group_end_aligned(read_write);
+
+ __cacheline_group_begin_aligned(cold);
struct rcu_head rcu; /* to avoid race on free */
DECLARE_BITMAP(xps_state, ICE_TX_NBITS); /* XPS Config State */
struct ice_channel *ch;
- struct ice_ptp_tx *tx_tstamps;
- spinlock_t tx_lock;
- u32 txq_teid; /* Added Tx queue TEID */
- /* CL4 - 4th cacheline starts here */
- struct ice_tstamp_ring *tstamp_ring;
-#define ICE_TX_FLAGS_RING_XDP BIT(0)
-#define ICE_TX_FLAGS_RING_VLAN_L2TAG1 BIT(1)
-#define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2)
-#define ICE_TX_FLAGS_TXTIME BIT(3)
- u8 flags;
+
+ dma_addr_t dma; /* physical address of ring */
+ u16 q_handle; /* Queue handle per TC */
+ u16 reg_idx; /* HW register index of the ring */
u8 dcb_tc; /* Traffic class of ring */
+
u16 quanta_prof_id;
+ u32 txq_teid; /* Added Tx queue TEID */
+ __cacheline_group_end_aligned(cold);
} ____cacheline_internodealigned_in_smp;
static inline bool ice_ring_ch_enabled(struct ice_tx_ring *ring)
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 956da38d63b0..e695a664e53d 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -20,9 +20,6 @@ void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val)
rx_ring->next_to_use = val;
- /* update next to alloc since we have filled the ring */
- rx_ring->next_to_alloc = val;
-
/* QRX_TAIL will be updated with any tail value, but hardware ignores
* the lower 3 bits. This makes it so we only bump tail on meaningful
* boundaries. Also, this allows us to bump tail on intervals of 8 up to
@@ -480,7 +477,7 @@ dma_unmap:
return ICE_XDP_CONSUMED;
busy:
- xdp_ring->ring_stats->tx_stats.tx_busy++;
+ ice_stats_inc(xdp_ring->ring_stats, tx_busy);
return ICE_XDP_CONSUMED;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
index 6a3f10f7a53f..f17990b68b62 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -38,7 +38,7 @@ ice_is_non_eop(const struct ice_rx_ring *rx_ring,
if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF)))
return false;
- rx_ring->ring_stats->rx_stats.non_eop_descs++;
+ ice_stats_inc(rx_ring->ring_stats, rx_non_eop_descs);
return true;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 6a2ec8389a8f..1e82f4c40b32 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -349,6 +349,12 @@ enum ice_clk_src {
NUM_ICE_CLK_SRC
};
+enum ice_synce_clk {
+ ICE_SYNCE_CLK0,
+ ICE_SYNCE_CLK1,
+ ICE_SYNCE_CLK_NUM
+};
+
struct ice_ts_func_info {
/* Function specific info */
enum ice_tspll_freq time_ref;
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 989ff1fd9110..953e68ed0f9a 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -497,7 +497,7 @@ static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
return ICE_XDP_TX;
busy:
- xdp_ring->ring_stats->tx_stats.tx_busy++;
+ ice_stats_inc(xdp_ring->ring_stats, tx_busy);
return ICE_XDP_CONSUMED;
}
@@ -659,7 +659,7 @@ construct_skb:
xsk_buff_free(first);
first = NULL;
- rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
+ ice_stats_inc(rx_ring->ring_stats, rx_buf_failed);
continue;
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h
index 1bf7934d4e28..b206fba092c8 100644
--- a/drivers/net/ethernet/intel/idpf/idpf.h
+++ b/drivers/net/ethernet/intel/idpf/idpf.h
@@ -8,6 +8,8 @@
struct idpf_adapter;
struct idpf_vport;
struct idpf_vport_max_q;
+struct idpf_q_vec_rsrc;
+struct idpf_rss_data;
#include <net/pkt_sched.h>
#include <linux/aer.h>
@@ -201,7 +203,8 @@ struct idpf_vport_max_q {
struct idpf_reg_ops {
void (*ctlq_reg_init)(struct idpf_adapter *adapter,
struct idpf_ctlq_create_info *cq);
- int (*intr_reg_init)(struct idpf_vport *vport);
+ int (*intr_reg_init)(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
void (*mb_intr_reg_init)(struct idpf_adapter *adapter);
void (*reset_reg_init)(struct idpf_adapter *adapter);
void (*trigger_reset)(struct idpf_adapter *adapter,
@@ -288,54 +291,88 @@ struct idpf_fsteer_fltr {
};
/**
- * struct idpf_vport - Handle for netdevices and queue resources
- * @num_txq: Number of allocated TX queues
- * @num_complq: Number of allocated completion queues
+ * struct idpf_q_vec_rsrc - handle for queue and vector resources
+ * @dev: device pointer for DMA mapping
+ * @q_vectors: array of queue vectors
+ * @q_vector_idxs: starting index of queue vectors
+ * @num_q_vectors: number of IRQ vectors allocated
+ * @noirq_v_idx: ID of the NOIRQ vector
+ * @noirq_dyn_ctl_ena: value to write to the above to enable it
+ * @noirq_dyn_ctl: register to enable/disable the vector for NOIRQ queues
+ * @txq_grps: array of TX queue groups
* @txq_desc_count: TX queue descriptor count
- * @complq_desc_count: Completion queue descriptor count
- * @compln_clean_budget: Work budget for completion clean
- * @num_txq_grp: Number of TX queue groups
- * @txq_grps: Array of TX queue groups
- * @txq_model: Split queue or single queue queuing model
- * @txqs: Used only in hotpath to get to the right queue very fast
- * @crc_enable: Enable CRC insertion offload
- * @xdpsq_share: whether XDPSQ sharing is enabled
- * @num_xdp_txq: number of XDPSQs
+ * @complq_desc_count: completion queue descriptor count
+ * @txq_model: split queue or single queue queuing model
+ * @num_txq: number of allocated TX queues
+ * @num_complq: number of allocated completion queues
+ * @num_txq_grp: number of TX queue groups
* @xdp_txq_offset: index of the first XDPSQ (== number of regular SQs)
- * @xdp_prog: installed XDP program
- * @num_rxq: Number of allocated RX queues
- * @num_bufq: Number of allocated buffer queues
+ * @num_rxq_grp: number of RX queues in a group
+ * @rxq_model: splitq queue or single queue queuing model
+ * @rxq_grps: total number of RX groups. Number of groups * number of RX per
+ * group will yield total number of RX queues.
+ * @num_rxq: number of allocated RX queues
+ * @num_bufq: number of allocated buffer queues
* @rxq_desc_count: RX queue descriptor count. *MUST* have enough descriptors
* to complete all buffer descriptors for all buffer queues in
* the worst case.
- * @num_bufqs_per_qgrp: Buffer queues per RX queue in a given grouping
- * @bufq_desc_count: Buffer queue descriptor count
- * @num_rxq_grp: Number of RX queues in a group
- * @rxq_grps: Total number of RX groups. Number of groups * number of RX per
- * group will yield total number of RX queues.
- * @rxq_model: Splitq queue or single queue queuing model
- * @rx_ptype_lkup: Lookup table for ptypes on RX
+ * @bufq_desc_count: buffer queue descriptor count
+ * @num_bufqs_per_qgrp: buffer queues per RX queue in a given grouping
+ * @base_rxd: true if the driver should use base descriptors instead of flex
+ */
+struct idpf_q_vec_rsrc {
+ struct device *dev;
+ struct idpf_q_vector *q_vectors;
+ u16 *q_vector_idxs;
+ u16 num_q_vectors;
+ u16 noirq_v_idx;
+ u32 noirq_dyn_ctl_ena;
+ void __iomem *noirq_dyn_ctl;
+
+ struct idpf_txq_group *txq_grps;
+ u32 txq_desc_count;
+ u32 complq_desc_count;
+ u32 txq_model;
+ u16 num_txq;
+ u16 num_complq;
+ u16 num_txq_grp;
+ u16 xdp_txq_offset;
+
+ u16 num_rxq_grp;
+ u32 rxq_model;
+ struct idpf_rxq_group *rxq_grps;
+ u16 num_rxq;
+ u16 num_bufq;
+ u32 rxq_desc_count;
+ u32 bufq_desc_count[IDPF_MAX_BUFQS_PER_RXQ_GRP];
+ u8 num_bufqs_per_qgrp;
+ bool base_rxd;
+};
+
+/**
+ * struct idpf_vport - Handle for netdevices and queue resources
+ * @dflt_qv_rsrc: contains default queue and vector resources
+ * @txqs: Used only in hotpath to get to the right queue very fast
+ * @num_txq: Number of allocated TX queues
+ * @num_xdp_txq: number of XDPSQs
+ * @xdpsq_share: whether XDPSQ sharing is enabled
+ * @xdp_prog: installed XDP program
* @vdev_info: IDC vport device info pointer
* @adapter: back pointer to associated adapter
* @netdev: Associated net_device. Each vport should have one and only one
* associated netdev.
* @flags: See enum idpf_vport_flags
- * @vport_type: Default SRIOV, SIOV, etc.
+ * @compln_clean_budget: Work budget for completion clean
* @vport_id: Device given vport identifier
+ * @vport_type: Default SRIOV, SIOV, etc.
* @idx: Software index in adapter vports struct
- * @default_vport: Use this vport if one isn't specified
- * @base_rxd: True if the driver should use base descriptors instead of flex
- * @num_q_vectors: Number of IRQ vectors allocated
- * @q_vectors: Array of queue vectors
- * @q_vector_idxs: Starting index of queue vectors
- * @noirq_dyn_ctl: register to enable/disable the vector for NOIRQ queues
- * @noirq_dyn_ctl_ena: value to write to the above to enable it
- * @noirq_v_idx: ID of the NOIRQ vector
* @max_mtu: device given max possible MTU
* @default_mac_addr: device will give a default MAC to use
* @rx_itr_profile: RX profiles for Dynamic Interrupt Moderation
* @tx_itr_profile: TX profiles for Dynamic Interrupt Moderation
* @port_stats: per port csum, header split, and other offload stats
+ * @default_vport: Use this vport if one isn't specified
+ * @crc_enable: Enable CRC insertion offload
* @link_up: True if link is up
* @tx_tstamp_caps: Capabilities negotiated for Tx timestamping
* @tstamp_config: The Tx tstamp config
@@ -343,57 +380,31 @@ struct idpf_fsteer_fltr {
* @tstamp_stats: Tx timestamping statistics
*/
struct idpf_vport {
- u16 num_txq;
- u16 num_complq;
- u32 txq_desc_count;
- u32 complq_desc_count;
- u32 compln_clean_budget;
- u16 num_txq_grp;
- struct idpf_txq_group *txq_grps;
- u32 txq_model;
+ struct idpf_q_vec_rsrc dflt_qv_rsrc;
struct idpf_tx_queue **txqs;
- bool crc_enable;
-
- bool xdpsq_share;
+ u16 num_txq;
u16 num_xdp_txq;
- u16 xdp_txq_offset;
+ bool xdpsq_share;
struct bpf_prog *xdp_prog;
- u16 num_rxq;
- u16 num_bufq;
- u32 rxq_desc_count;
- u8 num_bufqs_per_qgrp;
- u32 bufq_desc_count[IDPF_MAX_BUFQS_PER_RXQ_GRP];
- u16 num_rxq_grp;
- struct idpf_rxq_group *rxq_grps;
- u32 rxq_model;
- struct libeth_rx_pt *rx_ptype_lkup;
-
struct iidc_rdma_vport_dev_info *vdev_info;
struct idpf_adapter *adapter;
struct net_device *netdev;
DECLARE_BITMAP(flags, IDPF_VPORT_FLAGS_NBITS);
- u16 vport_type;
+ u32 compln_clean_budget;
u32 vport_id;
+ u16 vport_type;
u16 idx;
- bool default_vport;
- bool base_rxd;
-
- u16 num_q_vectors;
- struct idpf_q_vector *q_vectors;
- u16 *q_vector_idxs;
-
- void __iomem *noirq_dyn_ctl;
- u32 noirq_dyn_ctl_ena;
- u16 noirq_v_idx;
u16 max_mtu;
u8 default_mac_addr[ETH_ALEN];
u16 rx_itr_profile[IDPF_DIM_PROFILE_SLOTS];
u16 tx_itr_profile[IDPF_DIM_PROFILE_SLOTS];
- struct idpf_port_stats port_stats;
+ struct idpf_port_stats port_stats;
+ bool default_vport;
+ bool crc_enable;
bool link_up;
struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
@@ -550,10 +561,37 @@ struct idpf_vector_lifo {
};
/**
+ * struct idpf_queue_id_reg_chunk - individual queue ID and register chunk
+ * @qtail_reg_start: queue tail register offset
+ * @qtail_reg_spacing: queue tail register spacing
+ * @type: queue type of the queues in the chunk
+ * @start_queue_id: starting queue ID in the chunk
+ * @num_queues: number of queues in the chunk
+ */
+struct idpf_queue_id_reg_chunk {
+ u64 qtail_reg_start;
+ u32 qtail_reg_spacing;
+ u32 type;
+ u32 start_queue_id;
+ u32 num_queues;
+};
+
+/**
+ * struct idpf_queue_id_reg_info - queue ID and register chunk info received
+ * over the mailbox
+ * @num_chunks: number of chunks
+ * @queue_chunks: array of chunks
+ */
+struct idpf_queue_id_reg_info {
+ u16 num_chunks;
+ struct idpf_queue_id_reg_chunk *queue_chunks;
+};
+
+/**
* struct idpf_vport_config - Vport configuration data
* @user_config: see struct idpf_vport_user_config_data
* @max_q: Maximum possible queues
- * @req_qs_chunks: Queue chunk data for requested queues
+ * @qid_reg_info: Struct to store the queue ID and register info
* @mac_filter_list_lock: Lock to protect mac filters
* @flow_steer_list_lock: Lock to protect fsteer filters
* @flags: See enum idpf_vport_config_flags
@@ -561,7 +599,7 @@ struct idpf_vector_lifo {
struct idpf_vport_config {
struct idpf_vport_user_config_data user_config;
struct idpf_vport_max_q max_q;
- struct virtchnl2_add_queues *req_qs_chunks;
+ struct idpf_queue_id_reg_info qid_reg_info;
spinlock_t mac_filter_list_lock;
spinlock_t flow_steer_list_lock;
DECLARE_BITMAP(flags, IDPF_VPORT_CONFIG_FLAGS_NBITS);
@@ -603,6 +641,8 @@ struct idpf_vc_xn_manager;
* @vport_params_reqd: Vport params requested
* @vport_params_recvd: Vport params received
* @vport_ids: Array of device given vport identifiers
+ * @singleq_pt_lkup: Lookup table for singleq RX ptypes
+ * @splitq_pt_lkup: Lookup table for splitq RX ptypes
* @vport_config: Vport config parameters
* @max_vports: Maximum vports that can be allocated
* @num_alloc_vports: Current number of vports allocated
@@ -661,6 +701,9 @@ struct idpf_adapter {
struct virtchnl2_create_vport **vport_params_recvd;
u32 *vport_ids;
+ struct libeth_rx_pt *singleq_pt_lkup;
+ struct libeth_rx_pt *splitq_pt_lkup;
+
struct idpf_vport_config **vport_config;
u16 max_vports;
u16 num_alloc_vports;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_dev.c
index 3a04a6bd0d7c..a4625638cf3f 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_dev.c
@@ -70,11 +70,13 @@ static void idpf_mb_intr_reg_init(struct idpf_adapter *adapter)
/**
* idpf_intr_reg_init - Initialize interrupt registers
* @vport: virtual port structure
+ * @rsrc: pointer to queue and vector resources
*/
-static int idpf_intr_reg_init(struct idpf_vport *vport)
+static int idpf_intr_reg_init(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_adapter *adapter = vport->adapter;
- int num_vecs = vport->num_q_vectors;
+ u16 num_vecs = rsrc->num_q_vectors;
struct idpf_vec_regs *reg_vals;
int num_regs, i, err = 0;
u32 rx_itr, tx_itr, val;
@@ -86,15 +88,15 @@ static int idpf_intr_reg_init(struct idpf_vport *vport)
if (!reg_vals)
return -ENOMEM;
- num_regs = idpf_get_reg_intr_vecs(vport, reg_vals);
+ num_regs = idpf_get_reg_intr_vecs(adapter, reg_vals);
if (num_regs < num_vecs) {
err = -EINVAL;
goto free_reg_vals;
}
for (i = 0; i < num_vecs; i++) {
- struct idpf_q_vector *q_vector = &vport->q_vectors[i];
- u16 vec_id = vport->q_vector_idxs[i] - IDPF_MBX_Q_VEC;
+ struct idpf_q_vector *q_vector = &rsrc->q_vectors[i];
+ u16 vec_id = rsrc->q_vector_idxs[i] - IDPF_MBX_Q_VEC;
struct idpf_intr_reg *intr = &q_vector->intr_reg;
u32 spacing;
@@ -123,12 +125,12 @@ static int idpf_intr_reg_init(struct idpf_vport *vport)
/* Data vector for NOIRQ queues */
- val = reg_vals[vport->q_vector_idxs[i] - IDPF_MBX_Q_VEC].dyn_ctl_reg;
- vport->noirq_dyn_ctl = idpf_get_reg_addr(adapter, val);
+ val = reg_vals[rsrc->q_vector_idxs[i] - IDPF_MBX_Q_VEC].dyn_ctl_reg;
+ rsrc->noirq_dyn_ctl = idpf_get_reg_addr(adapter, val);
val = PF_GLINT_DYN_CTL_WB_ON_ITR_M | PF_GLINT_DYN_CTL_INTENA_MSK_M |
FIELD_PREP(PF_GLINT_DYN_CTL_ITR_INDX_M, IDPF_NO_ITR_UPDATE_IDX);
- vport->noirq_dyn_ctl_ena = val;
+ rsrc->noirq_dyn_ctl_ena = val;
free_reg_vals:
kfree(reg_vals);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
index 2efa3c08aba5..1d78a621d65b 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
@@ -18,7 +18,7 @@ static u32 idpf_get_rx_ring_count(struct net_device *netdev)
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
- num_rxq = vport->num_rxq;
+ num_rxq = vport->dflt_qv_rsrc.num_rxq;
idpf_vport_ctrl_unlock(netdev);
return num_rxq;
@@ -503,7 +503,7 @@ static int idpf_set_rxfh(struct net_device *netdev,
}
if (test_bit(IDPF_VPORT_UP, np->state))
- err = idpf_config_rss(vport);
+ err = idpf_config_rss(vport, rss_data);
unlock_mutex:
idpf_vport_ctrl_unlock(netdev);
@@ -644,8 +644,8 @@ static void idpf_get_ringparam(struct net_device *netdev,
ring->rx_max_pending = IDPF_MAX_RXQ_DESC;
ring->tx_max_pending = IDPF_MAX_TXQ_DESC;
- ring->rx_pending = vport->rxq_desc_count;
- ring->tx_pending = vport->txq_desc_count;
+ ring->rx_pending = vport->dflt_qv_rsrc.rxq_desc_count;
+ ring->tx_pending = vport->dflt_qv_rsrc.txq_desc_count;
kring->tcp_data_split = idpf_vport_get_hsplit(vport);
@@ -669,8 +669,9 @@ static int idpf_set_ringparam(struct net_device *netdev,
{
struct idpf_vport_user_config_data *config_data;
u32 new_rx_count, new_tx_count;
+ struct idpf_q_vec_rsrc *rsrc;
struct idpf_vport *vport;
- int i, err = 0;
+ int err = 0;
u16 idx;
idpf_vport_ctrl_lock(netdev);
@@ -704,8 +705,9 @@ static int idpf_set_ringparam(struct net_device *netdev,
netdev_info(netdev, "Requested Tx descriptor count rounded up to %u\n",
new_tx_count);
- if (new_tx_count == vport->txq_desc_count &&
- new_rx_count == vport->rxq_desc_count &&
+ rsrc = &vport->dflt_qv_rsrc;
+ if (new_tx_count == rsrc->txq_desc_count &&
+ new_rx_count == rsrc->rxq_desc_count &&
kring->tcp_data_split == idpf_vport_get_hsplit(vport))
goto unlock_mutex;
@@ -724,10 +726,10 @@ static int idpf_set_ringparam(struct net_device *netdev,
/* Since we adjusted the RX completion queue count, the RX buffer queue
* descriptor count needs to be adjusted as well
*/
- for (i = 0; i < vport->num_bufqs_per_qgrp; i++)
- vport->bufq_desc_count[i] =
+ for (unsigned int i = 0; i < rsrc->num_bufqs_per_qgrp; i++)
+ rsrc->bufq_desc_count[i] =
IDPF_RX_BUFQ_DESC_COUNT(new_rx_count,
- vport->num_bufqs_per_qgrp);
+ rsrc->num_bufqs_per_qgrp);
err = idpf_initiate_soft_reset(vport, IDPF_SR_Q_DESC_CHANGE);
@@ -1104,7 +1106,7 @@ static void idpf_add_port_stats(struct idpf_vport *vport, u64 **data)
static void idpf_collect_queue_stats(struct idpf_vport *vport)
{
struct idpf_port_stats *pstats = &vport->port_stats;
- int i, j;
+ struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
/* zero out port stats since they're actually tracked in per
* queue stats; this is only for reporting
@@ -1120,22 +1122,22 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport)
u64_stats_set(&pstats->tx_dma_map_errs, 0);
u64_stats_update_end(&pstats->stats_sync);
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i];
+ for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rxq_grp = &rsrc->rxq_grps[i];
u16 num_rxq;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
num_rxq = rxq_grp->splitq.num_rxq_sets;
else
num_rxq = rxq_grp->singleq.num_rxq;
- for (j = 0; j < num_rxq; j++) {
+ for (unsigned int j = 0; j < num_rxq; j++) {
u64 hw_csum_err, hsplit, hsplit_hbo, bad_descs;
struct idpf_rx_queue_stats *stats;
struct idpf_rx_queue *rxq;
unsigned int start;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
rxq = &rxq_grp->splitq.rxq_sets[j]->rxq;
else
rxq = rxq_grp->singleq.rxqs[j];
@@ -1162,10 +1164,10 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport)
}
}
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
+ for (unsigned int i = 0; i < rsrc->num_txq_grp; i++) {
+ struct idpf_txq_group *txq_grp = &rsrc->txq_grps[i];
- for (j = 0; j < txq_grp->num_txq; j++) {
+ for (unsigned int j = 0; j < txq_grp->num_txq; j++) {
u64 linearize, qbusy, skb_drops, dma_map_errs;
struct idpf_tx_queue *txq = txq_grp->txqs[j];
struct idpf_tx_queue_stats *stats;
@@ -1208,9 +1210,9 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_vport_config *vport_config;
+ struct idpf_q_vec_rsrc *rsrc;
struct idpf_vport *vport;
unsigned int total = 0;
- unsigned int i, j;
bool is_splitq;
u16 qtype;
@@ -1228,12 +1230,13 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
idpf_collect_queue_stats(vport);
idpf_add_port_stats(vport, &data);
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
+ rsrc = &vport->dflt_qv_rsrc;
+ for (unsigned int i = 0; i < rsrc->num_txq_grp; i++) {
+ struct idpf_txq_group *txq_grp = &rsrc->txq_grps[i];
qtype = VIRTCHNL2_QUEUE_TYPE_TX;
- for (j = 0; j < txq_grp->num_txq; j++, total++) {
+ for (unsigned int j = 0; j < txq_grp->num_txq; j++, total++) {
struct idpf_tx_queue *txq = txq_grp->txqs[j];
if (!txq)
@@ -1253,10 +1256,10 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_TX);
total = 0;
- is_splitq = idpf_is_queue_model_split(vport->rxq_model);
+ is_splitq = idpf_is_queue_model_split(rsrc->rxq_model);
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i];
+ for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rxq_grp = &rsrc->rxq_grps[i];
u16 num_rxq;
qtype = VIRTCHNL2_QUEUE_TYPE_RX;
@@ -1266,7 +1269,7 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
else
num_rxq = rxq_grp->singleq.num_rxq;
- for (j = 0; j < num_rxq; j++, total++) {
+ for (unsigned int j = 0; j < num_rxq; j++, total++) {
struct idpf_rx_queue *rxq;
if (is_splitq)
@@ -1298,15 +1301,16 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
u32 q_num)
{
+ const struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
int q_grp, q_idx;
- if (!idpf_is_queue_model_split(vport->rxq_model))
- return vport->rxq_grps->singleq.rxqs[q_num]->q_vector;
+ if (!idpf_is_queue_model_split(rsrc->rxq_model))
+ return rsrc->rxq_grps->singleq.rxqs[q_num]->q_vector;
q_grp = q_num / IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
q_idx = q_num % IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
- return vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq.q_vector;
+ return rsrc->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq.q_vector;
}
/**
@@ -1319,14 +1323,15 @@ struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport,
u32 q_num)
{
+ const struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
int q_grp;
- if (!idpf_is_queue_model_split(vport->txq_model))
+ if (!idpf_is_queue_model_split(rsrc->txq_model))
return vport->txqs[q_num]->q_vector;
q_grp = q_num / IDPF_DFLT_SPLITQ_TXQ_PER_GROUP;
- return vport->txq_grps[q_grp].complq->q_vector;
+ return rsrc->txq_grps[q_grp].complq->q_vector;
}
/**
@@ -1363,7 +1368,8 @@ static int idpf_get_q_coalesce(struct net_device *netdev,
u32 q_num)
{
const struct idpf_netdev_priv *np = netdev_priv(netdev);
- const struct idpf_vport *vport;
+ struct idpf_q_vec_rsrc *rsrc;
+ struct idpf_vport *vport;
int err = 0;
idpf_vport_ctrl_lock(netdev);
@@ -1372,16 +1378,17 @@ static int idpf_get_q_coalesce(struct net_device *netdev,
if (!test_bit(IDPF_VPORT_UP, np->state))
goto unlock_mutex;
- if (q_num >= vport->num_rxq && q_num >= vport->num_txq) {
+ rsrc = &vport->dflt_qv_rsrc;
+ if (q_num >= rsrc->num_rxq && q_num >= rsrc->num_txq) {
err = -EINVAL;
goto unlock_mutex;
}
- if (q_num < vport->num_rxq)
+ if (q_num < rsrc->num_rxq)
__idpf_get_q_coalesce(ec, idpf_find_rxq_vec(vport, q_num),
VIRTCHNL2_QUEUE_TYPE_RX);
- if (q_num < vport->num_txq)
+ if (q_num < rsrc->num_txq)
__idpf_get_q_coalesce(ec, idpf_find_txq_vec(vport, q_num),
VIRTCHNL2_QUEUE_TYPE_TX);
@@ -1549,8 +1556,9 @@ static int idpf_set_coalesce(struct net_device *netdev,
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_vport_user_config_data *user_config;
struct idpf_q_coalesce *q_coal;
+ struct idpf_q_vec_rsrc *rsrc;
struct idpf_vport *vport;
- int i, err = 0;
+ int err = 0;
user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
@@ -1560,14 +1568,15 @@ static int idpf_set_coalesce(struct net_device *netdev,
if (!test_bit(IDPF_VPORT_UP, np->state))
goto unlock_mutex;
- for (i = 0; i < vport->num_txq; i++) {
+ rsrc = &vport->dflt_qv_rsrc;
+ for (unsigned int i = 0; i < rsrc->num_txq; i++) {
q_coal = &user_config->q_coalesce[i];
err = idpf_set_q_coalesce(vport, q_coal, ec, i, false);
if (err)
goto unlock_mutex;
}
- for (i = 0; i < vport->num_rxq; i++) {
+ for (unsigned int i = 0; i < rsrc->num_rxq; i++) {
q_coal = &user_config->q_coalesce[i];
err = idpf_set_q_coalesce(vport, q_coal, ec, i, true);
if (err)
@@ -1748,6 +1757,7 @@ static void idpf_get_ts_stats(struct net_device *netdev,
struct ethtool_ts_stats *ts_stats)
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_q_vec_rsrc *rsrc;
struct idpf_vport *vport;
unsigned int start;
@@ -1763,8 +1773,9 @@ static void idpf_get_ts_stats(struct net_device *netdev,
if (!test_bit(IDPF_VPORT_UP, np->state))
goto exit;
- for (u16 i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
+ rsrc = &vport->dflt_qv_rsrc;
+ for (u16 i = 0; i < rsrc->num_txq_grp; i++) {
+ struct idpf_txq_group *txq_grp = &rsrc->txq_grps[i];
for (u16 j = 0; j < txq_grp->num_txq; j++) {
struct idpf_tx_queue *txq = txq_grp->txqs[j];
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index 131a8121839b..94da5fbd56f1 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -545,7 +545,9 @@ static int idpf_del_mac_filter(struct idpf_vport *vport,
if (test_bit(IDPF_VPORT_UP, np->state)) {
int err;
- err = idpf_add_del_mac_filters(vport, np, false, async);
+ err = idpf_add_del_mac_filters(np->adapter, vport_config,
+ vport->default_mac_addr,
+ np->vport_id, false, async);
if (err)
return err;
}
@@ -614,7 +616,9 @@ static int idpf_add_mac_filter(struct idpf_vport *vport,
return err;
if (test_bit(IDPF_VPORT_UP, np->state))
- err = idpf_add_del_mac_filters(vport, np, true, async);
+ err = idpf_add_del_mac_filters(np->adapter, vport_config,
+ vport->default_mac_addr,
+ np->vport_id, true, async);
return err;
}
@@ -662,7 +666,8 @@ static void idpf_restore_mac_filters(struct idpf_vport *vport)
spin_unlock_bh(&vport_config->mac_filter_list_lock);
- idpf_add_del_mac_filters(vport, netdev_priv(vport->netdev),
+ idpf_add_del_mac_filters(vport->adapter, vport_config,
+ vport->default_mac_addr, vport->vport_id,
true, false);
}
@@ -686,7 +691,8 @@ static void idpf_remove_mac_filters(struct idpf_vport *vport)
spin_unlock_bh(&vport_config->mac_filter_list_lock);
- idpf_add_del_mac_filters(vport, netdev_priv(vport->netdev),
+ idpf_add_del_mac_filters(vport->adapter, vport_config,
+ vport->default_mac_addr, vport->vport_id,
false, false);
}
@@ -975,6 +981,10 @@ static void idpf_remove_features(struct idpf_vport *vport)
static void idpf_vport_stop(struct idpf_vport *vport, bool rtnl)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+ struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_queue_id_reg_info *chunks;
+ u32 vport_id = vport->vport_id;
if (!test_bit(IDPF_VPORT_UP, np->state))
return;
@@ -985,24 +995,26 @@ static void idpf_vport_stop(struct idpf_vport *vport, bool rtnl)
netif_carrier_off(vport->netdev);
netif_tx_disable(vport->netdev);
- idpf_send_disable_vport_msg(vport);
+ chunks = &adapter->vport_config[vport->idx]->qid_reg_info;
+
+ idpf_send_disable_vport_msg(adapter, vport_id);
idpf_send_disable_queues_msg(vport);
- idpf_send_map_unmap_queue_vector_msg(vport, false);
+ idpf_send_map_unmap_queue_vector_msg(adapter, rsrc, vport_id, false);
/* Normally we ask for queues in create_vport, but if the number of
* initially requested queues have changed, for example via ethtool
* set channels, we do delete queues and then add the queues back
* instead of deleting and reallocating the vport.
*/
if (test_and_clear_bit(IDPF_VPORT_DEL_QUEUES, vport->flags))
- idpf_send_delete_queues_msg(vport);
+ idpf_send_delete_queues_msg(adapter, chunks, vport_id);
idpf_remove_features(vport);
vport->link_up = false;
- idpf_vport_intr_deinit(vport);
- idpf_xdp_rxq_info_deinit_all(vport);
- idpf_vport_queues_rel(vport);
- idpf_vport_intr_rel(vport);
+ idpf_vport_intr_deinit(vport, rsrc);
+ idpf_xdp_rxq_info_deinit_all(rsrc);
+ idpf_vport_queues_rel(vport, rsrc);
+ idpf_vport_intr_rel(rsrc);
clear_bit(IDPF_VPORT_UP, np->state);
if (rtnl)
@@ -1046,9 +1058,6 @@ static void idpf_decfg_netdev(struct idpf_vport *vport)
struct idpf_adapter *adapter = vport->adapter;
u16 idx = vport->idx;
- kfree(vport->rx_ptype_lkup);
- vport->rx_ptype_lkup = NULL;
-
if (test_and_clear_bit(IDPF_VPORT_REG_NETDEV,
adapter->vport_config[idx]->flags)) {
unregister_netdev(vport->netdev);
@@ -1065,6 +1074,7 @@ static void idpf_decfg_netdev(struct idpf_vport *vport)
*/
static void idpf_vport_rel(struct idpf_vport *vport)
{
+ struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct idpf_adapter *adapter = vport->adapter;
struct idpf_vport_config *vport_config;
struct idpf_vector_info vec_info;
@@ -1073,12 +1083,12 @@ static void idpf_vport_rel(struct idpf_vport *vport)
u16 idx = vport->idx;
vport_config = adapter->vport_config[vport->idx];
- idpf_deinit_rss_lut(vport);
rss_data = &vport_config->user_config.rss_data;
+ idpf_deinit_rss_lut(rss_data);
kfree(rss_data->rss_key);
rss_data->rss_key = NULL;
- idpf_send_destroy_vport_msg(vport);
+ idpf_send_destroy_vport_msg(adapter, vport->vport_id);
/* Release all max queues allocated to the adapter's pool */
max_q.max_rxq = vport_config->max_q.max_rxq;
@@ -1089,24 +1099,21 @@ static void idpf_vport_rel(struct idpf_vport *vport)
/* Release all the allocated vectors on the stack */
vec_info.num_req_vecs = 0;
- vec_info.num_curr_vecs = vport->num_q_vectors;
+ vec_info.num_curr_vecs = rsrc->num_q_vectors;
vec_info.default_vport = vport->default_vport;
- idpf_req_rel_vector_indexes(adapter, vport->q_vector_idxs, &vec_info);
+ idpf_req_rel_vector_indexes(adapter, rsrc->q_vector_idxs, &vec_info);
- kfree(vport->q_vector_idxs);
- vport->q_vector_idxs = NULL;
+ kfree(rsrc->q_vector_idxs);
+ rsrc->q_vector_idxs = NULL;
+
+ idpf_vport_deinit_queue_reg_chunks(vport_config);
kfree(adapter->vport_params_recvd[idx]);
adapter->vport_params_recvd[idx] = NULL;
kfree(adapter->vport_params_reqd[idx]);
adapter->vport_params_reqd[idx] = NULL;
- if (adapter->vport_config[idx]) {
- kfree(adapter->vport_config[idx]->req_qs_chunks);
- adapter->vport_config[idx]->req_qs_chunks = NULL;
- }
- kfree(vport->rx_ptype_lkup);
- vport->rx_ptype_lkup = NULL;
+
kfree(vport);
adapter->num_alloc_vports--;
}
@@ -1155,7 +1162,7 @@ static void idpf_vport_dealloc(struct idpf_vport *vport)
*/
static bool idpf_is_hsplit_supported(const struct idpf_vport *vport)
{
- return idpf_is_queue_model_split(vport->rxq_model) &&
+ return idpf_is_queue_model_split(vport->dflt_qv_rsrc.rxq_model) &&
idpf_is_cap_ena_all(vport->adapter, IDPF_HSPLIT_CAPS,
IDPF_CAP_HSPLIT);
}
@@ -1224,6 +1231,7 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
{
struct idpf_rss_data *rss_data;
u16 idx = adapter->next_vport;
+ struct idpf_q_vec_rsrc *rsrc;
struct idpf_vport *vport;
u16 num_max_q;
int err;
@@ -1271,11 +1279,15 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
vport->default_vport = adapter->num_alloc_vports <
idpf_get_default_vports(adapter);
- vport->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL);
- if (!vport->q_vector_idxs)
+ rsrc = &vport->dflt_qv_rsrc;
+ rsrc->dev = &adapter->pdev->dev;
+ rsrc->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL);
+ if (!rsrc->q_vector_idxs)
goto free_vport;
- idpf_vport_init(vport, max_q);
+ err = idpf_vport_init(vport, max_q);
+ if (err)
+ goto free_vector_idxs;
/* LUT and key are both initialized here. Key is not strictly dependent
* on how many queues we have. If we change number of queues and soft
@@ -1286,13 +1298,13 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
rss_data = &adapter->vport_config[idx]->user_config.rss_data;
rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL);
if (!rss_data->rss_key)
- goto free_vector_idxs;
+ goto free_qreg_chunks;
- /* Initialize default rss key */
+ /* Initialize default RSS key */
netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size);
- /* Initialize default rss LUT */
- err = idpf_init_rss_lut(vport);
+ /* Initialize default RSS LUT */
+ err = idpf_init_rss_lut(vport, rss_data);
if (err)
goto free_rss_key;
@@ -1308,8 +1320,10 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
free_rss_key:
kfree(rss_data->rss_key);
+free_qreg_chunks:
+ idpf_vport_deinit_queue_reg_chunks(adapter->vport_config[idx]);
free_vector_idxs:
- kfree(vport->q_vector_idxs);
+ kfree(rsrc->q_vector_idxs);
free_vport:
kfree(vport);
@@ -1346,7 +1360,8 @@ void idpf_statistics_task(struct work_struct *work)
struct idpf_vport *vport = adapter->vports[i];
if (vport && !test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
- idpf_send_get_stats_msg(vport);
+ idpf_send_get_stats_msg(netdev_priv(vport->netdev),
+ &vport->port_stats);
}
queue_delayed_work(adapter->stats_wq, &adapter->stats_task,
@@ -1369,7 +1384,7 @@ void idpf_mbx_task(struct work_struct *work)
queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task,
usecs_to_jiffies(300));
- idpf_recv_mb_msg(adapter);
+ idpf_recv_mb_msg(adapter, adapter->hw.arq);
}
/**
@@ -1417,9 +1432,10 @@ static void idpf_restore_features(struct idpf_vport *vport)
*/
static int idpf_set_real_num_queues(struct idpf_vport *vport)
{
- int err, txq = vport->num_txq - vport->num_xdp_txq;
+ int err, txq = vport->dflt_qv_rsrc.num_txq - vport->num_xdp_txq;
- err = netif_set_real_num_rx_queues(vport->netdev, vport->num_rxq);
+ err = netif_set_real_num_rx_queues(vport->netdev,
+ vport->dflt_qv_rsrc.num_rxq);
if (err)
return err;
@@ -1429,10 +1445,8 @@ static int idpf_set_real_num_queues(struct idpf_vport *vport)
/**
* idpf_up_complete - Complete interface up sequence
* @vport: virtual port structure
- *
- * Returns 0 on success, negative on failure.
*/
-static int idpf_up_complete(struct idpf_vport *vport)
+static void idpf_up_complete(struct idpf_vport *vport)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
@@ -1442,30 +1456,26 @@ static int idpf_up_complete(struct idpf_vport *vport)
}
set_bit(IDPF_VPORT_UP, np->state);
-
- return 0;
}
/**
* idpf_rx_init_buf_tail - Write initial buffer ring tail value
- * @vport: virtual port struct
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_rx_init_buf_tail(struct idpf_vport *vport)
+static void idpf_rx_init_buf_tail(struct idpf_q_vec_rsrc *rsrc)
{
- int i, j;
+ for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *grp = &rsrc->rxq_grps[i];
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *grp = &vport->rxq_grps[i];
-
- if (idpf_is_queue_model_split(vport->rxq_model)) {
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ if (idpf_is_queue_model_split(rsrc->rxq_model)) {
+ for (unsigned int j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
const struct idpf_buf_queue *q =
&grp->splitq.bufq_sets[j].bufq;
writel(q->next_to_alloc, q->tail);
}
} else {
- for (j = 0; j < grp->singleq.num_rxq; j++) {
+ for (unsigned int j = 0; j < grp->singleq.num_rxq; j++) {
const struct idpf_rx_queue *q =
grp->singleq.rxqs[j];
@@ -1483,7 +1493,12 @@ static void idpf_rx_init_buf_tail(struct idpf_vport *vport)
static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+ struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_vport_config *vport_config;
+ struct idpf_queue_id_reg_info *chunks;
+ struct idpf_rss_data *rss_data;
+ u32 vport_id = vport->vport_id;
int err;
if (test_bit(IDPF_VPORT_UP, np->state))
@@ -1495,48 +1510,51 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
/* we do not allow interface up just yet */
netif_carrier_off(vport->netdev);
- err = idpf_vport_intr_alloc(vport);
+ err = idpf_vport_intr_alloc(vport, rsrc);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to allocate interrupts for vport %u: %d\n",
vport->vport_id, err);
goto err_rtnl_unlock;
}
- err = idpf_vport_queues_alloc(vport);
+ err = idpf_vport_queues_alloc(vport, rsrc);
if (err)
goto intr_rel;
- err = idpf_vport_queue_ids_init(vport);
+ vport_config = adapter->vport_config[vport->idx];
+ chunks = &vport_config->qid_reg_info;
+
+ err = idpf_vport_queue_ids_init(vport, rsrc, chunks);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize queue ids for vport %u: %d\n",
vport->vport_id, err);
goto queues_rel;
}
- err = idpf_vport_intr_init(vport);
+ err = idpf_vport_intr_init(vport, rsrc);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize interrupts for vport %u: %d\n",
vport->vport_id, err);
goto queues_rel;
}
- err = idpf_queue_reg_init(vport);
+ err = idpf_queue_reg_init(vport, rsrc, chunks);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n",
vport->vport_id, err);
goto intr_deinit;
}
- err = idpf_rx_bufs_init_all(vport);
+ err = idpf_rx_bufs_init_all(vport, rsrc);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n",
vport->vport_id, err);
goto intr_deinit;
}
- idpf_rx_init_buf_tail(vport);
+ idpf_rx_init_buf_tail(rsrc);
- err = idpf_xdp_rxq_info_init_all(vport);
+ err = idpf_xdp_rxq_info_init_all(rsrc);
if (err) {
netdev_err(vport->netdev,
"Failed to initialize XDP RxQ info for vport %u: %pe\n",
@@ -1544,16 +1562,17 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
goto intr_deinit;
}
- idpf_vport_intr_ena(vport);
+ idpf_vport_intr_ena(vport, rsrc);
- err = idpf_send_config_queues_msg(vport);
+ err = idpf_send_config_queues_msg(adapter, rsrc, vport_id);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to configure queues for vport %u, %d\n",
vport->vport_id, err);
goto rxq_deinit;
}
- err = idpf_send_map_unmap_queue_vector_msg(vport, true);
+ err = idpf_send_map_unmap_queue_vector_msg(adapter, rsrc, vport_id,
+ true);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to map queue vectors for vport %u: %d\n",
vport->vport_id, err);
@@ -1567,7 +1586,7 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
goto unmap_queue_vectors;
}
- err = idpf_send_enable_vport_msg(vport);
+ err = idpf_send_enable_vport_msg(adapter, vport_id);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to enable vport %u: %d\n",
vport->vport_id, err);
@@ -1577,19 +1596,15 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
idpf_restore_features(vport);
- err = idpf_config_rss(vport);
+ rss_data = &vport_config->user_config.rss_data;
+ err = idpf_config_rss(vport, rss_data);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to configure RSS for vport %u: %d\n",
vport->vport_id, err);
goto disable_vport;
}
- err = idpf_up_complete(vport);
- if (err) {
- dev_err(&adapter->pdev->dev, "Failed to complete interface up for vport %u: %d\n",
- vport->vport_id, err);
- goto disable_vport;
- }
+ idpf_up_complete(vport);
if (rtnl)
rtnl_unlock();
@@ -1597,19 +1612,19 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
return 0;
disable_vport:
- idpf_send_disable_vport_msg(vport);
+ idpf_send_disable_vport_msg(adapter, vport_id);
disable_queues:
idpf_send_disable_queues_msg(vport);
unmap_queue_vectors:
- idpf_send_map_unmap_queue_vector_msg(vport, false);
+ idpf_send_map_unmap_queue_vector_msg(adapter, rsrc, vport_id, false);
rxq_deinit:
- idpf_xdp_rxq_info_deinit_all(vport);
+ idpf_xdp_rxq_info_deinit_all(rsrc);
intr_deinit:
- idpf_vport_intr_deinit(vport);
+ idpf_vport_intr_deinit(vport, rsrc);
queues_rel:
- idpf_vport_queues_rel(vport);
+ idpf_vport_queues_rel(vport, rsrc);
intr_rel:
- idpf_vport_intr_rel(vport);
+ idpf_vport_intr_rel(rsrc);
err_rtnl_unlock:
if (rtnl)
@@ -1667,10 +1682,6 @@ void idpf_init_task(struct work_struct *work)
goto unwind_vports;
}
- err = idpf_send_get_rx_ptype_msg(vport);
- if (err)
- goto unwind_vports;
-
index = vport->idx;
vport_config = adapter->vport_config[index];
@@ -1996,9 +2007,13 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
bool vport_is_up = test_bit(IDPF_VPORT_UP, np->state);
+ struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_vport_config *vport_config;
+ struct idpf_q_vec_rsrc *new_rsrc;
+ u32 vport_id = vport->vport_id;
struct idpf_vport *new_vport;
- int err;
+ int err, tmp_err = 0;
/* If the system is low on memory, we can end up in bad state if we
* free all the memory for queue resources and try to allocate them
@@ -2023,16 +2038,18 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
*/
memcpy(new_vport, vport, offsetof(struct idpf_vport, link_up));
+ new_rsrc = &new_vport->dflt_qv_rsrc;
+
/* Adjust resource parameters prior to reallocating resources */
switch (reset_cause) {
case IDPF_SR_Q_CHANGE:
- err = idpf_vport_adjust_qs(new_vport);
+ err = idpf_vport_adjust_qs(new_vport, new_rsrc);
if (err)
goto free_vport;
break;
case IDPF_SR_Q_DESC_CHANGE:
/* Update queue parameters before allocating resources */
- idpf_vport_calc_num_q_desc(new_vport);
+ idpf_vport_calc_num_q_desc(new_vport, new_rsrc);
break;
case IDPF_SR_MTU_CHANGE:
idpf_idc_vdev_mtu_event(vport->vdev_info,
@@ -2046,41 +2063,40 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
goto free_vport;
}
+ vport_config = adapter->vport_config[vport->idx];
+
if (!vport_is_up) {
- idpf_send_delete_queues_msg(vport);
+ idpf_send_delete_queues_msg(adapter, &vport_config->qid_reg_info,
+ vport_id);
} else {
set_bit(IDPF_VPORT_DEL_QUEUES, vport->flags);
idpf_vport_stop(vport, false);
}
- /* We're passing in vport here because we need its wait_queue
- * to send a message and it should be getting all the vport
- * config data out of the adapter but we need to be careful not
- * to add code to add_queues to change the vport config within
- * vport itself as it will be wiped with a memcpy later.
- */
- err = idpf_send_add_queues_msg(vport, new_vport->num_txq,
- new_vport->num_complq,
- new_vport->num_rxq,
- new_vport->num_bufq);
+ err = idpf_send_add_queues_msg(adapter, vport_config, new_rsrc,
+ vport_id);
if (err)
goto err_reset;
- /* Same comment as above regarding avoiding copying the wait_queues and
- * mutexes applies here. We do not want to mess with those if possible.
+ /* Avoid copying the wait_queues and mutexes. We do not want to mess
+ * with those if possible.
*/
memcpy(vport, new_vport, offsetof(struct idpf_vport, link_up));
if (reset_cause == IDPF_SR_Q_CHANGE)
- idpf_vport_alloc_vec_indexes(vport);
+ idpf_vport_alloc_vec_indexes(vport, &vport->dflt_qv_rsrc);
err = idpf_set_real_num_queues(vport);
if (err)
goto err_open;
if (reset_cause == IDPF_SR_Q_CHANGE &&
- !netif_is_rxfh_configured(vport->netdev))
- idpf_fill_dflt_rss_lut(vport);
+ !netif_is_rxfh_configured(vport->netdev)) {
+ struct idpf_rss_data *rss_data;
+
+ rss_data = &vport_config->user_config.rss_data;
+ idpf_fill_dflt_rss_lut(vport, rss_data);
+ }
if (vport_is_up)
err = idpf_vport_open(vport, false);
@@ -2088,11 +2104,11 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
goto free_vport;
err_reset:
- idpf_send_add_queues_msg(vport, vport->num_txq, vport->num_complq,
- vport->num_rxq, vport->num_bufq);
+ tmp_err = idpf_send_add_queues_msg(adapter, vport_config, rsrc,
+ vport_id);
err_open:
- if (vport_is_up)
+ if (!tmp_err && vport_is_up)
idpf_vport_open(vport, false);
free_vport:
@@ -2258,7 +2274,12 @@ static int idpf_set_features(struct net_device *netdev,
* the HW when the interface is brought up.
*/
if (test_bit(IDPF_VPORT_UP, np->state)) {
- err = idpf_config_rss(vport);
+ struct idpf_vport_config *vport_config;
+ struct idpf_rss_data *rss_data;
+
+ vport_config = adapter->vport_config[vport->idx];
+ rss_data = &vport_config->user_config.rss_data;
+ err = idpf_config_rss(vport, rss_data);
if (err)
goto unlock_mutex;
}
@@ -2272,8 +2293,13 @@ static int idpf_set_features(struct net_device *netdev,
}
if (changed & NETIF_F_LOOPBACK) {
+ bool loopback_ena;
+
netdev->features ^= NETIF_F_LOOPBACK;
- err = idpf_send_ena_dis_loopback_msg(vport);
+ loopback_ena = idpf_is_feature_ena(vport, NETIF_F_LOOPBACK);
+
+ err = idpf_send_ena_dis_loopback_msg(adapter, vport->vport_id,
+ loopback_ena);
}
unlock_mutex:
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_ptp.c
index 0a8b50350b86..4a805a9541f0 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ptp.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ptp.c
@@ -384,15 +384,17 @@ static int idpf_ptp_update_cached_phctime(struct idpf_adapter *adapter)
WRITE_ONCE(adapter->ptp->cached_phc_jiffies, jiffies);
idpf_for_each_vport(adapter, vport) {
+ struct idpf_q_vec_rsrc *rsrc;
bool split;
- if (!vport || !vport->rxq_grps)
+ if (!vport || !vport->dflt_qv_rsrc.rxq_grps)
continue;
- split = idpf_is_queue_model_split(vport->rxq_model);
+ rsrc = &vport->dflt_qv_rsrc;
+ split = idpf_is_queue_model_split(rsrc->rxq_model);
- for (u16 i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *grp = &vport->rxq_grps[i];
+ for (u16 i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *grp = &rsrc->rxq_grps[i];
idpf_ptp_update_phctime_rxq_grp(grp, split, systime);
}
@@ -681,9 +683,10 @@ int idpf_ptp_request_ts(struct idpf_tx_queue *tx_q, struct sk_buff *skb,
*/
static void idpf_ptp_set_rx_tstamp(struct idpf_vport *vport, int rx_filter)
{
+ struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
bool enable = true, splitq;
- splitq = idpf_is_queue_model_split(vport->rxq_model);
+ splitq = idpf_is_queue_model_split(rsrc->rxq_model);
if (rx_filter == HWTSTAMP_FILTER_NONE) {
enable = false;
@@ -692,8 +695,8 @@ static void idpf_ptp_set_rx_tstamp(struct idpf_vport *vport, int rx_filter)
vport->tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL;
}
- for (u16 i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *grp = &vport->rxq_grps[i];
+ for (u16 i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *grp = &rsrc->rxq_grps[i];
struct idpf_rx_queue *rx_queue;
u16 j, num_rxq;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index f58f616d87fc..376050308b06 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -19,6 +19,8 @@ LIBETH_SQE_CHECK_PRIV(u32);
* Make sure we don't exceed maximum scatter gather buffers for a single
* packet.
* TSO case has been handled earlier from idpf_features_check().
+ *
+ * Return: %true if skb exceeds max descriptors per packet, %false otherwise.
*/
static bool idpf_chk_linearize(const struct sk_buff *skb,
unsigned int max_bufs,
@@ -146,24 +148,22 @@ static void idpf_compl_desc_rel(struct idpf_compl_queue *complq)
/**
* idpf_tx_desc_rel_all - Free Tx Resources for All Queues
- * @vport: virtual port structure
+ * @rsrc: pointer to queue and vector resources
*
* Free all transmit software resources
*/
-static void idpf_tx_desc_rel_all(struct idpf_vport *vport)
+static void idpf_tx_desc_rel_all(struct idpf_q_vec_rsrc *rsrc)
{
- int i, j;
-
- if (!vport->txq_grps)
+ if (!rsrc->txq_grps)
return;
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
+ for (unsigned int i = 0; i < rsrc->num_txq_grp; i++) {
+ struct idpf_txq_group *txq_grp = &rsrc->txq_grps[i];
- for (j = 0; j < txq_grp->num_txq; j++)
+ for (unsigned int j = 0; j < txq_grp->num_txq; j++)
idpf_tx_desc_rel(txq_grp->txqs[j]);
- if (idpf_is_queue_model_split(vport->txq_model))
+ if (idpf_is_queue_model_split(rsrc->txq_model))
idpf_compl_desc_rel(txq_grp->complq);
}
}
@@ -172,7 +172,7 @@ static void idpf_tx_desc_rel_all(struct idpf_vport *vport)
* idpf_tx_buf_alloc_all - Allocate memory for all buffer resources
* @tx_q: queue for which the buffers are allocated
*
- * Returns 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
static int idpf_tx_buf_alloc_all(struct idpf_tx_queue *tx_q)
{
@@ -196,7 +196,7 @@ static int idpf_tx_buf_alloc_all(struct idpf_tx_queue *tx_q)
* @vport: vport to allocate resources for
* @tx_q: the tx ring to set up
*
- * Returns 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
static int idpf_tx_desc_alloc(const struct idpf_vport *vport,
struct idpf_tx_queue *tx_q)
@@ -263,7 +263,7 @@ err_alloc:
/**
* idpf_compl_desc_alloc - allocate completion descriptors
- * @vport: vport to allocate resources for
+ * @vport: virtual port private structure
* @complq: completion queue to set up
*
* Return: 0 on success, -errno on failure.
@@ -296,20 +296,21 @@ static int idpf_compl_desc_alloc(const struct idpf_vport *vport,
/**
* idpf_tx_desc_alloc_all - allocate all queues Tx resources
* @vport: virtual port private structure
+ * @rsrc: pointer to queue and vector resources
*
- * Returns 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
-static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
+static int idpf_tx_desc_alloc_all(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
int err = 0;
- int i, j;
/* Setup buffer queues. In single queue model buffer queues and
* completion queues will be same
*/
- for (i = 0; i < vport->num_txq_grp; i++) {
- for (j = 0; j < vport->txq_grps[i].num_txq; j++) {
- struct idpf_tx_queue *txq = vport->txq_grps[i].txqs[j];
+ for (unsigned int i = 0; i < rsrc->num_txq_grp; i++) {
+ for (unsigned int j = 0; j < rsrc->txq_grps[i].num_txq; j++) {
+ struct idpf_tx_queue *txq = rsrc->txq_grps[i].txqs[j];
err = idpf_tx_desc_alloc(vport, txq);
if (err) {
@@ -320,11 +321,11 @@ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
}
}
- if (!idpf_is_queue_model_split(vport->txq_model))
+ if (!idpf_is_queue_model_split(rsrc->txq_model))
continue;
/* Setup completion queues */
- err = idpf_compl_desc_alloc(vport, vport->txq_grps[i].complq);
+ err = idpf_compl_desc_alloc(vport, rsrc->txq_grps[i].complq);
if (err) {
pci_err(vport->adapter->pdev,
"Allocation for Tx Completion Queue %u failed\n",
@@ -335,7 +336,7 @@ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
err_out:
if (err)
- idpf_tx_desc_rel_all(vport);
+ idpf_tx_desc_rel_all(rsrc);
return err;
}
@@ -488,38 +489,38 @@ static void idpf_rx_desc_rel_bufq(struct idpf_buf_queue *bufq,
/**
* idpf_rx_desc_rel_all - Free Rx Resources for All Queues
* @vport: virtual port structure
+ * @rsrc: pointer to queue and vector resources
*
* Free all rx queues resources
*/
-static void idpf_rx_desc_rel_all(struct idpf_vport *vport)
+static void idpf_rx_desc_rel_all(struct idpf_q_vec_rsrc *rsrc)
{
- struct device *dev = &vport->adapter->pdev->dev;
+ struct device *dev = rsrc->dev;
struct idpf_rxq_group *rx_qgrp;
u16 num_rxq;
- int i, j;
- if (!vport->rxq_grps)
+ if (!rsrc->rxq_grps)
return;
- for (i = 0; i < vport->num_rxq_grp; i++) {
- rx_qgrp = &vport->rxq_grps[i];
+ for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) {
+ rx_qgrp = &rsrc->rxq_grps[i];
- if (!idpf_is_queue_model_split(vport->rxq_model)) {
- for (j = 0; j < rx_qgrp->singleq.num_rxq; j++)
+ if (!idpf_is_queue_model_split(rsrc->rxq_model)) {
+ for (unsigned int j = 0; j < rx_qgrp->singleq.num_rxq; j++)
idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j], dev,
VIRTCHNL2_QUEUE_MODEL_SINGLE);
continue;
}
num_rxq = rx_qgrp->splitq.num_rxq_sets;
- for (j = 0; j < num_rxq; j++)
+ for (unsigned int j = 0; j < num_rxq; j++)
idpf_rx_desc_rel(&rx_qgrp->splitq.rxq_sets[j]->rxq,
dev, VIRTCHNL2_QUEUE_MODEL_SPLIT);
if (!rx_qgrp->splitq.bufq_sets)
continue;
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ for (unsigned int j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
struct idpf_bufq_set *bufq_set =
&rx_qgrp->splitq.bufq_sets[j];
@@ -548,7 +549,7 @@ static void idpf_rx_buf_hw_update(struct idpf_buf_queue *bufq, u32 val)
* idpf_rx_hdr_buf_alloc_all - Allocate memory for header buffers
* @bufq: ring to use
*
- * Returns 0 on success, negative on failure.
+ * Return: 0 on success, negative on failure.
*/
static int idpf_rx_hdr_buf_alloc_all(struct idpf_buf_queue *bufq)
{
@@ -600,7 +601,7 @@ static void idpf_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
* @bufq: buffer queue to post to
* @buf_id: buffer id to post
*
- * Returns false if buffer could not be allocated, true otherwise.
+ * Return: %false if buffer could not be allocated, %true otherwise.
*/
static bool idpf_rx_post_buf_desc(struct idpf_buf_queue *bufq, u16 buf_id)
{
@@ -649,7 +650,7 @@ static bool idpf_rx_post_buf_desc(struct idpf_buf_queue *bufq, u16 buf_id)
* @bufq: buffer queue to post working set to
* @working_set: number of buffers to put in working set
*
- * Returns true if @working_set bufs were posted successfully, false otherwise.
+ * Return: %true if @working_set bufs were posted successfully, %false otherwise.
*/
static bool idpf_rx_post_init_bufs(struct idpf_buf_queue *bufq,
u16 working_set)
@@ -718,7 +719,7 @@ static int idpf_rx_bufs_init_singleq(struct idpf_rx_queue *rxq)
* idpf_rx_buf_alloc_all - Allocate memory for all buffer resources
* @rxbufq: queue for which the buffers are allocated
*
- * Returns 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
static int idpf_rx_buf_alloc_all(struct idpf_buf_queue *rxbufq)
{
@@ -746,7 +747,7 @@ rx_buf_alloc_all_out:
* @bufq: buffer queue to create page pool for
* @type: type of Rx buffers to allocate
*
- * Returns 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
static int idpf_rx_bufs_init(struct idpf_buf_queue *bufq,
enum libeth_fqe_type type)
@@ -779,26 +780,28 @@ static int idpf_rx_bufs_init(struct idpf_buf_queue *bufq,
/**
* idpf_rx_bufs_init_all - Initialize all RX bufs
- * @vport: virtual port struct
+ * @vport: pointer to vport struct
+ * @rsrc: pointer to queue and vector resources
*
- * Returns 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
-int idpf_rx_bufs_init_all(struct idpf_vport *vport)
+int idpf_rx_bufs_init_all(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
- bool split = idpf_is_queue_model_split(vport->rxq_model);
- int i, j, err;
+ bool split = idpf_is_queue_model_split(rsrc->rxq_model);
+ int err;
- idpf_xdp_copy_prog_to_rqs(vport, vport->xdp_prog);
+ idpf_xdp_copy_prog_to_rqs(rsrc, vport->xdp_prog);
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
u32 truesize = 0;
/* Allocate bufs for the rxq itself in singleq */
if (!split) {
int num_rxq = rx_qgrp->singleq.num_rxq;
- for (j = 0; j < num_rxq; j++) {
+ for (unsigned int j = 0; j < num_rxq; j++) {
struct idpf_rx_queue *q;
q = rx_qgrp->singleq.rxqs[j];
@@ -811,7 +814,7 @@ int idpf_rx_bufs_init_all(struct idpf_vport *vport)
}
/* Otherwise, allocate bufs for the buffer queues */
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ for (unsigned int j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
enum libeth_fqe_type type;
struct idpf_buf_queue *q;
@@ -836,7 +839,7 @@ int idpf_rx_bufs_init_all(struct idpf_vport *vport)
* @vport: vport to allocate resources for
* @rxq: Rx queue for which the resources are setup
*
- * Returns 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
static int idpf_rx_desc_alloc(const struct idpf_vport *vport,
struct idpf_rx_queue *rxq)
@@ -897,26 +900,28 @@ static int idpf_bufq_desc_alloc(const struct idpf_vport *vport,
/**
* idpf_rx_desc_alloc_all - allocate all RX queues resources
* @vport: virtual port structure
+ * @rsrc: pointer to queue and vector resources
*
- * Returns 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
-static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
+static int idpf_rx_desc_alloc_all(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_rxq_group *rx_qgrp;
- int i, j, err;
u16 num_rxq;
+ int err;
- for (i = 0; i < vport->num_rxq_grp; i++) {
- rx_qgrp = &vport->rxq_grps[i];
- if (idpf_is_queue_model_split(vport->rxq_model))
+ for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) {
+ rx_qgrp = &rsrc->rxq_grps[i];
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
num_rxq = rx_qgrp->splitq.num_rxq_sets;
else
num_rxq = rx_qgrp->singleq.num_rxq;
- for (j = 0; j < num_rxq; j++) {
+ for (unsigned int j = 0; j < num_rxq; j++) {
struct idpf_rx_queue *q;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
else
q = rx_qgrp->singleq.rxqs[j];
@@ -930,10 +935,10 @@ static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
}
}
- if (!idpf_is_queue_model_split(vport->rxq_model))
+ if (!idpf_is_queue_model_split(rsrc->rxq_model))
continue;
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ for (unsigned int j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
struct idpf_buf_queue *q;
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
@@ -951,18 +956,18 @@ static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
return 0;
err_out:
- idpf_rx_desc_rel_all(vport);
+ idpf_rx_desc_rel_all(rsrc);
return err;
}
-static int idpf_init_queue_set(const struct idpf_queue_set *qs)
+static int idpf_init_queue_set(const struct idpf_vport *vport,
+ const struct idpf_queue_set *qs)
{
- const struct idpf_vport *vport = qs->vport;
bool splitq;
int err;
- splitq = idpf_is_queue_model_split(vport->rxq_model);
+ splitq = idpf_is_queue_model_split(qs->qv_rsrc->rxq_model);
for (u32 i = 0; i < qs->num; i++) {
const struct idpf_queue_ptr *q = &qs->qs[i];
@@ -1032,19 +1037,18 @@ static int idpf_init_queue_set(const struct idpf_queue_set *qs)
static void idpf_clean_queue_set(const struct idpf_queue_set *qs)
{
- const struct idpf_vport *vport = qs->vport;
- struct device *dev = vport->netdev->dev.parent;
+ const struct idpf_q_vec_rsrc *rsrc = qs->qv_rsrc;
for (u32 i = 0; i < qs->num; i++) {
const struct idpf_queue_ptr *q = &qs->qs[i];
switch (q->type) {
case VIRTCHNL2_QUEUE_TYPE_RX:
- idpf_xdp_rxq_info_deinit(q->rxq, vport->rxq_model);
- idpf_rx_desc_rel(q->rxq, dev, vport->rxq_model);
+ idpf_xdp_rxq_info_deinit(q->rxq, rsrc->rxq_model);
+ idpf_rx_desc_rel(q->rxq, rsrc->dev, rsrc->rxq_model);
break;
case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
- idpf_rx_desc_rel_bufq(q->bufq, dev);
+ idpf_rx_desc_rel_bufq(q->bufq, rsrc->dev);
break;
case VIRTCHNL2_QUEUE_TYPE_TX:
idpf_tx_desc_rel(q->txq);
@@ -1111,7 +1115,8 @@ static void idpf_qvec_ena_irq(struct idpf_q_vector *qv)
static struct idpf_queue_set *
idpf_vector_to_queue_set(struct idpf_q_vector *qv)
{
- bool xdp = qv->vport->xdp_txq_offset && !qv->num_xsksq;
+ u32 xdp_txq_offset = qv->vport->dflt_qv_rsrc.xdp_txq_offset;
+ bool xdp = xdp_txq_offset && !qv->num_xsksq;
struct idpf_vport *vport = qv->vport;
struct idpf_queue_set *qs;
u32 num;
@@ -1121,7 +1126,8 @@ idpf_vector_to_queue_set(struct idpf_q_vector *qv)
if (!num)
return NULL;
- qs = idpf_alloc_queue_set(vport, num);
+ qs = idpf_alloc_queue_set(vport->adapter, &vport->dflt_qv_rsrc,
+ vport->vport_id, num);
if (!qs)
return NULL;
@@ -1147,12 +1153,12 @@ idpf_vector_to_queue_set(struct idpf_q_vector *qv)
qs->qs[num++].complq = qv->complq[i];
}
- if (!vport->xdp_txq_offset)
+ if (!xdp_txq_offset)
goto finalize;
if (xdp) {
for (u32 i = 0; i < qv->num_rxq; i++) {
- u32 idx = vport->xdp_txq_offset + qv->rx[i]->idx;
+ u32 idx = xdp_txq_offset + qv->rx[i]->idx;
qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX;
qs->qs[num++].txq = vport->txqs[idx];
@@ -1179,26 +1185,27 @@ finalize:
return qs;
}
-static int idpf_qp_enable(const struct idpf_queue_set *qs, u32 qid)
+static int idpf_qp_enable(const struct idpf_vport *vport,
+ const struct idpf_queue_set *qs, u32 qid)
{
- struct idpf_vport *vport = qs->vport;
+ const struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct idpf_q_vector *q_vector;
int err;
q_vector = idpf_find_rxq_vec(vport, qid);
- err = idpf_init_queue_set(qs);
+ err = idpf_init_queue_set(vport, qs);
if (err) {
netdev_err(vport->netdev, "Could not initialize queues in pair %u: %pe\n",
qid, ERR_PTR(err));
return err;
}
- if (!vport->xdp_txq_offset)
+ if (!rsrc->xdp_txq_offset)
goto config;
- q_vector->xsksq = kcalloc(DIV_ROUND_UP(vport->num_rxq_grp,
- vport->num_q_vectors),
+ q_vector->xsksq = kcalloc(DIV_ROUND_UP(rsrc->num_rxq_grp,
+ rsrc->num_q_vectors),
sizeof(*q_vector->xsksq), GFP_KERNEL);
if (!q_vector->xsksq)
return -ENOMEM;
@@ -1241,9 +1248,9 @@ config:
return 0;
}
-static int idpf_qp_disable(const struct idpf_queue_set *qs, u32 qid)
+static int idpf_qp_disable(const struct idpf_vport *vport,
+ const struct idpf_queue_set *qs, u32 qid)
{
- struct idpf_vport *vport = qs->vport;
struct idpf_q_vector *q_vector;
int err;
@@ -1288,30 +1295,28 @@ int idpf_qp_switch(struct idpf_vport *vport, u32 qid, bool en)
if (!qs)
return -ENOMEM;
- return en ? idpf_qp_enable(qs, qid) : idpf_qp_disable(qs, qid);
+ return en ? idpf_qp_enable(vport, qs, qid) :
+ idpf_qp_disable(vport, qs, qid);
}
/**
* idpf_txq_group_rel - Release all resources for txq groups
- * @vport: vport to release txq groups on
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_txq_group_rel(struct idpf_vport *vport)
+static void idpf_txq_group_rel(struct idpf_q_vec_rsrc *rsrc)
{
- bool split, flow_sch_en;
- int i, j;
+ bool split;
- if (!vport->txq_grps)
+ if (!rsrc->txq_grps)
return;
- split = idpf_is_queue_model_split(vport->txq_model);
- flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
- VIRTCHNL2_CAP_SPLITQ_QSCHED);
+ split = idpf_is_queue_model_split(rsrc->txq_model);
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
+ for (unsigned int i = 0; i < rsrc->num_txq_grp; i++) {
+ struct idpf_txq_group *txq_grp = &rsrc->txq_grps[i];
- for (j = 0; j < txq_grp->num_txq; j++) {
- if (flow_sch_en) {
+ for (unsigned int j = 0; j < txq_grp->num_txq; j++) {
+ if (idpf_queue_has(FLOW_SCH_EN, txq_grp->txqs[j])) {
kfree(txq_grp->txqs[j]->refillq);
txq_grp->txqs[j]->refillq = NULL;
}
@@ -1326,8 +1331,8 @@ static void idpf_txq_group_rel(struct idpf_vport *vport)
kfree(txq_grp->complq);
txq_grp->complq = NULL;
}
- kfree(vport->txq_grps);
- vport->txq_grps = NULL;
+ kfree(rsrc->txq_grps);
+ rsrc->txq_grps = NULL;
}
/**
@@ -1336,12 +1341,10 @@ static void idpf_txq_group_rel(struct idpf_vport *vport)
*/
static void idpf_rxq_sw_queue_rel(struct idpf_rxq_group *rx_qgrp)
{
- int i, j;
-
- for (i = 0; i < rx_qgrp->vport->num_bufqs_per_qgrp; i++) {
+ for (unsigned int i = 0; i < rx_qgrp->splitq.num_bufq_sets; i++) {
struct idpf_bufq_set *bufq_set = &rx_qgrp->splitq.bufq_sets[i];
- for (j = 0; j < bufq_set->num_refillqs; j++) {
+ for (unsigned int j = 0; j < bufq_set->num_refillqs; j++) {
kfree(bufq_set->refillqs[j].ring);
bufq_set->refillqs[j].ring = NULL;
}
@@ -1352,23 +1355,20 @@ static void idpf_rxq_sw_queue_rel(struct idpf_rxq_group *rx_qgrp)
/**
* idpf_rxq_group_rel - Release all resources for rxq groups
- * @vport: vport to release rxq groups on
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_rxq_group_rel(struct idpf_vport *vport)
+static void idpf_rxq_group_rel(struct idpf_q_vec_rsrc *rsrc)
{
- int i;
-
- if (!vport->rxq_grps)
+ if (!rsrc->rxq_grps)
return;
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
u16 num_rxq;
- int j;
- if (idpf_is_queue_model_split(vport->rxq_model)) {
+ if (idpf_is_queue_model_split(rsrc->rxq_model)) {
num_rxq = rx_qgrp->splitq.num_rxq_sets;
- for (j = 0; j < num_rxq; j++) {
+ for (unsigned int j = 0; j < num_rxq; j++) {
kfree(rx_qgrp->splitq.rxq_sets[j]);
rx_qgrp->splitq.rxq_sets[j] = NULL;
}
@@ -1378,41 +1378,44 @@ static void idpf_rxq_group_rel(struct idpf_vport *vport)
rx_qgrp->splitq.bufq_sets = NULL;
} else {
num_rxq = rx_qgrp->singleq.num_rxq;
- for (j = 0; j < num_rxq; j++) {
+ for (unsigned int j = 0; j < num_rxq; j++) {
kfree(rx_qgrp->singleq.rxqs[j]);
rx_qgrp->singleq.rxqs[j] = NULL;
}
}
}
- kfree(vport->rxq_grps);
- vport->rxq_grps = NULL;
+ kfree(rsrc->rxq_grps);
+ rsrc->rxq_grps = NULL;
}
/**
* idpf_vport_queue_grp_rel_all - Release all queue groups
* @vport: vport to release queue groups for
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_vport_queue_grp_rel_all(struct idpf_vport *vport)
+static void idpf_vport_queue_grp_rel_all(struct idpf_q_vec_rsrc *rsrc)
{
- idpf_txq_group_rel(vport);
- idpf_rxq_group_rel(vport);
+ idpf_txq_group_rel(rsrc);
+ idpf_rxq_group_rel(rsrc);
}
/**
* idpf_vport_queues_rel - Free memory for all queues
* @vport: virtual port
+ * @rsrc: pointer to queue and vector resources
*
* Free the memory allocated for queues associated to a vport
*/
-void idpf_vport_queues_rel(struct idpf_vport *vport)
+void idpf_vport_queues_rel(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
- idpf_xdp_copy_prog_to_rqs(vport, NULL);
+ idpf_xdp_copy_prog_to_rqs(rsrc, NULL);
- idpf_tx_desc_rel_all(vport);
- idpf_rx_desc_rel_all(vport);
+ idpf_tx_desc_rel_all(rsrc);
+ idpf_rx_desc_rel_all(rsrc);
idpf_xdpsqs_put(vport);
- idpf_vport_queue_grp_rel_all(vport);
+ idpf_vport_queue_grp_rel_all(rsrc);
kfree(vport->txqs);
vport->txqs = NULL;
@@ -1421,29 +1424,31 @@ void idpf_vport_queues_rel(struct idpf_vport *vport)
/**
* idpf_vport_init_fast_path_txqs - Initialize fast path txq array
* @vport: vport to init txqs on
+ * @rsrc: pointer to queue and vector resources
*
* We get a queue index from skb->queue_mapping and we need a fast way to
* dereference the queue from queue groups. This allows us to quickly pull a
* txq based on a queue index.
*
- * Returns 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
-static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport)
+static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_ptp_vport_tx_tstamp_caps *caps = vport->tx_tstamp_caps;
struct work_struct *tstamp_task = &vport->tstamp_task;
- int i, j, k = 0;
+ int k = 0;
- vport->txqs = kcalloc(vport->num_txq, sizeof(*vport->txqs),
+ vport->txqs = kcalloc(rsrc->num_txq, sizeof(*vport->txqs),
GFP_KERNEL);
-
if (!vport->txqs)
return -ENOMEM;
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *tx_grp = &vport->txq_grps[i];
+ vport->num_txq = rsrc->num_txq;
+ for (unsigned int i = 0; i < rsrc->num_txq_grp; i++) {
+ struct idpf_txq_group *tx_grp = &rsrc->txq_grps[i];
- for (j = 0; j < tx_grp->num_txq; j++, k++) {
+ for (unsigned int j = 0; j < tx_grp->num_txq; j++, k++) {
vport->txqs[k] = tx_grp->txqs[j];
vport->txqs[k]->idx = k;
@@ -1462,16 +1467,18 @@ static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport)
* idpf_vport_init_num_qs - Initialize number of queues
* @vport: vport to initialize queues
* @vport_msg: data to be filled into vport
+ * @rsrc: pointer to queue and vector resources
*/
void idpf_vport_init_num_qs(struct idpf_vport *vport,
- struct virtchnl2_create_vport *vport_msg)
+ struct virtchnl2_create_vport *vport_msg,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_vport_user_config_data *config_data;
u16 idx = vport->idx;
config_data = &vport->adapter->vport_config[idx]->user_config;
- vport->num_txq = le16_to_cpu(vport_msg->num_tx_q);
- vport->num_rxq = le16_to_cpu(vport_msg->num_rx_q);
+ rsrc->num_txq = le16_to_cpu(vport_msg->num_tx_q);
+ rsrc->num_rxq = le16_to_cpu(vport_msg->num_rx_q);
/* number of txqs and rxqs in config data will be zeros only in the
* driver load path and we dont update them there after
*/
@@ -1480,74 +1487,75 @@ void idpf_vport_init_num_qs(struct idpf_vport *vport,
config_data->num_req_rx_qs = le16_to_cpu(vport_msg->num_rx_q);
}
- if (idpf_is_queue_model_split(vport->txq_model))
- vport->num_complq = le16_to_cpu(vport_msg->num_tx_complq);
- if (idpf_is_queue_model_split(vport->rxq_model))
- vport->num_bufq = le16_to_cpu(vport_msg->num_rx_bufq);
+ if (idpf_is_queue_model_split(rsrc->txq_model))
+ rsrc->num_complq = le16_to_cpu(vport_msg->num_tx_complq);
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
+ rsrc->num_bufq = le16_to_cpu(vport_msg->num_rx_bufq);
vport->xdp_prog = config_data->xdp_prog;
if (idpf_xdp_enabled(vport)) {
- vport->xdp_txq_offset = config_data->num_req_tx_qs;
+ rsrc->xdp_txq_offset = config_data->num_req_tx_qs;
vport->num_xdp_txq = le16_to_cpu(vport_msg->num_tx_q) -
- vport->xdp_txq_offset;
+ rsrc->xdp_txq_offset;
vport->xdpsq_share = libeth_xdpsq_shared(vport->num_xdp_txq);
} else {
- vport->xdp_txq_offset = 0;
+ rsrc->xdp_txq_offset = 0;
vport->num_xdp_txq = 0;
vport->xdpsq_share = false;
}
/* Adjust number of buffer queues per Rx queue group. */
- if (!idpf_is_queue_model_split(vport->rxq_model)) {
- vport->num_bufqs_per_qgrp = 0;
+ if (!idpf_is_queue_model_split(rsrc->rxq_model)) {
+ rsrc->num_bufqs_per_qgrp = 0;
return;
}
- vport->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP;
+ rsrc->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP;
}
/**
* idpf_vport_calc_num_q_desc - Calculate number of queue groups
* @vport: vport to calculate q groups for
+ * @rsrc: pointer to queue and vector resources
*/
-void idpf_vport_calc_num_q_desc(struct idpf_vport *vport)
+void idpf_vport_calc_num_q_desc(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_vport_user_config_data *config_data;
- int num_bufqs = vport->num_bufqs_per_qgrp;
+ u8 num_bufqs = rsrc->num_bufqs_per_qgrp;
u32 num_req_txq_desc, num_req_rxq_desc;
u16 idx = vport->idx;
- int i;
config_data = &vport->adapter->vport_config[idx]->user_config;
num_req_txq_desc = config_data->num_req_txq_desc;
num_req_rxq_desc = config_data->num_req_rxq_desc;
- vport->complq_desc_count = 0;
+ rsrc->complq_desc_count = 0;
if (num_req_txq_desc) {
- vport->txq_desc_count = num_req_txq_desc;
- if (idpf_is_queue_model_split(vport->txq_model)) {
- vport->complq_desc_count = num_req_txq_desc;
- if (vport->complq_desc_count < IDPF_MIN_TXQ_COMPLQ_DESC)
- vport->complq_desc_count =
+ rsrc->txq_desc_count = num_req_txq_desc;
+ if (idpf_is_queue_model_split(rsrc->txq_model)) {
+ rsrc->complq_desc_count = num_req_txq_desc;
+ if (rsrc->complq_desc_count < IDPF_MIN_TXQ_COMPLQ_DESC)
+ rsrc->complq_desc_count =
IDPF_MIN_TXQ_COMPLQ_DESC;
}
} else {
- vport->txq_desc_count = IDPF_DFLT_TX_Q_DESC_COUNT;
- if (idpf_is_queue_model_split(vport->txq_model))
- vport->complq_desc_count =
+ rsrc->txq_desc_count = IDPF_DFLT_TX_Q_DESC_COUNT;
+ if (idpf_is_queue_model_split(rsrc->txq_model))
+ rsrc->complq_desc_count =
IDPF_DFLT_TX_COMPLQ_DESC_COUNT;
}
if (num_req_rxq_desc)
- vport->rxq_desc_count = num_req_rxq_desc;
+ rsrc->rxq_desc_count = num_req_rxq_desc;
else
- vport->rxq_desc_count = IDPF_DFLT_RX_Q_DESC_COUNT;
+ rsrc->rxq_desc_count = IDPF_DFLT_RX_Q_DESC_COUNT;
- for (i = 0; i < num_bufqs; i++) {
- if (!vport->bufq_desc_count[i])
- vport->bufq_desc_count[i] =
- IDPF_RX_BUFQ_DESC_COUNT(vport->rxq_desc_count,
+ for (unsigned int i = 0; i < num_bufqs; i++) {
+ if (!rsrc->bufq_desc_count[i])
+ rsrc->bufq_desc_count[i] =
+ IDPF_RX_BUFQ_DESC_COUNT(rsrc->rxq_desc_count,
num_bufqs);
}
}
@@ -1559,7 +1567,7 @@ void idpf_vport_calc_num_q_desc(struct idpf_vport *vport)
* @vport_msg: message to fill with data
* @max_q: vport max queue info
*
- * Return 0 on success, error value on failure.
+ * Return: 0 on success, error value on failure.
*/
int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_idx,
struct virtchnl2_create_vport *vport_msg,
@@ -1636,54 +1644,54 @@ int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_idx,
/**
* idpf_vport_calc_num_q_groups - Calculate number of queue groups
- * @vport: vport to calculate q groups for
+ * @rsrc: pointer to queue and vector resources
*/
-void idpf_vport_calc_num_q_groups(struct idpf_vport *vport)
+void idpf_vport_calc_num_q_groups(struct idpf_q_vec_rsrc *rsrc)
{
- if (idpf_is_queue_model_split(vport->txq_model))
- vport->num_txq_grp = vport->num_txq;
+ if (idpf_is_queue_model_split(rsrc->txq_model))
+ rsrc->num_txq_grp = rsrc->num_txq;
else
- vport->num_txq_grp = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS;
+ rsrc->num_txq_grp = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS;
- if (idpf_is_queue_model_split(vport->rxq_model))
- vport->num_rxq_grp = vport->num_rxq;
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
+ rsrc->num_rxq_grp = rsrc->num_rxq;
else
- vport->num_rxq_grp = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS;
+ rsrc->num_rxq_grp = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS;
}
/**
* idpf_vport_calc_numq_per_grp - Calculate number of queues per group
- * @vport: vport to calculate queues for
+ * @rsrc: pointer to queue and vector resources
* @num_txq: return parameter for number of TX queues
* @num_rxq: return parameter for number of RX queues
*/
-static void idpf_vport_calc_numq_per_grp(struct idpf_vport *vport,
+static void idpf_vport_calc_numq_per_grp(struct idpf_q_vec_rsrc *rsrc,
u16 *num_txq, u16 *num_rxq)
{
- if (idpf_is_queue_model_split(vport->txq_model))
+ if (idpf_is_queue_model_split(rsrc->txq_model))
*num_txq = IDPF_DFLT_SPLITQ_TXQ_PER_GROUP;
else
- *num_txq = vport->num_txq;
+ *num_txq = rsrc->num_txq;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
*num_rxq = IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
else
- *num_rxq = vport->num_rxq;
+ *num_rxq = rsrc->num_rxq;
}
/**
* idpf_rxq_set_descids - set the descids supported by this queue
- * @vport: virtual port data structure
+ * @rsrc: pointer to queue and vector resources
* @q: rx queue for which descids are set
*
*/
-static void idpf_rxq_set_descids(const struct idpf_vport *vport,
+static void idpf_rxq_set_descids(struct idpf_q_vec_rsrc *rsrc,
struct idpf_rx_queue *q)
{
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
return;
- if (vport->base_rxd)
+ if (rsrc->base_rxd)
q->rxdids = VIRTCHNL2_RXDID_1_32B_BASE_M;
else
q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
@@ -1692,44 +1700,45 @@ static void idpf_rxq_set_descids(const struct idpf_vport *vport,
/**
* idpf_txq_group_alloc - Allocate all txq group resources
* @vport: vport to allocate txq groups for
+ * @rsrc: pointer to queue and vector resources
* @num_txq: number of txqs to allocate for each group
*
- * Returns 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
-static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
+static int idpf_txq_group_alloc(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc,
+ u16 num_txq)
{
bool split, flow_sch_en;
- int i;
- vport->txq_grps = kcalloc(vport->num_txq_grp,
- sizeof(*vport->txq_grps), GFP_KERNEL);
- if (!vport->txq_grps)
+ rsrc->txq_grps = kcalloc(rsrc->num_txq_grp,
+ sizeof(*rsrc->txq_grps), GFP_KERNEL);
+ if (!rsrc->txq_grps)
return -ENOMEM;
- split = idpf_is_queue_model_split(vport->txq_model);
+ split = idpf_is_queue_model_split(rsrc->txq_model);
flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
VIRTCHNL2_CAP_SPLITQ_QSCHED);
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ for (unsigned int i = 0; i < rsrc->num_txq_grp; i++) {
+ struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
struct idpf_adapter *adapter = vport->adapter;
- int j;
tx_qgrp->vport = vport;
tx_qgrp->num_txq = num_txq;
- for (j = 0; j < tx_qgrp->num_txq; j++) {
+ for (unsigned int j = 0; j < tx_qgrp->num_txq; j++) {
tx_qgrp->txqs[j] = kzalloc(sizeof(*tx_qgrp->txqs[j]),
GFP_KERNEL);
if (!tx_qgrp->txqs[j])
goto err_alloc;
}
- for (j = 0; j < tx_qgrp->num_txq; j++) {
+ for (unsigned int j = 0; j < tx_qgrp->num_txq; j++) {
struct idpf_tx_queue *q = tx_qgrp->txqs[j];
q->dev = &adapter->pdev->dev;
- q->desc_count = vport->txq_desc_count;
+ q->desc_count = rsrc->txq_desc_count;
q->tx_max_bufs = idpf_get_max_tx_bufs(adapter);
q->tx_min_pkt_len = idpf_get_min_tx_pkt_len(adapter);
q->netdev = vport->netdev;
@@ -1764,7 +1773,7 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
if (!tx_qgrp->complq)
goto err_alloc;
- tx_qgrp->complq->desc_count = vport->complq_desc_count;
+ tx_qgrp->complq->desc_count = rsrc->complq_desc_count;
tx_qgrp->complq->txq_grp = tx_qgrp;
tx_qgrp->complq->netdev = vport->netdev;
tx_qgrp->complq->clean_budget = vport->compln_clean_budget;
@@ -1776,7 +1785,7 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
return 0;
err_alloc:
- idpf_txq_group_rel(vport);
+ idpf_txq_group_rel(rsrc);
return -ENOMEM;
}
@@ -1784,30 +1793,34 @@ err_alloc:
/**
* idpf_rxq_group_alloc - Allocate all rxq group resources
* @vport: vport to allocate rxq groups for
+ * @rsrc: pointer to queue and vector resources
* @num_rxq: number of rxqs to allocate for each group
*
- * Returns 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
-static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
+static int idpf_rxq_group_alloc(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc,
+ u16 num_rxq)
{
- int i, k, err = 0;
- bool hs;
+ struct idpf_adapter *adapter = vport->adapter;
+ bool hs, rsc;
+ int err = 0;
- vport->rxq_grps = kcalloc(vport->num_rxq_grp,
- sizeof(struct idpf_rxq_group), GFP_KERNEL);
- if (!vport->rxq_grps)
+ rsrc->rxq_grps = kcalloc(rsrc->num_rxq_grp,
+ sizeof(struct idpf_rxq_group), GFP_KERNEL);
+ if (!rsrc->rxq_grps)
return -ENOMEM;
hs = idpf_vport_get_hsplit(vport) == ETHTOOL_TCP_DATA_SPLIT_ENABLED;
+ rsc = idpf_is_feature_ena(vport, NETIF_F_GRO_HW);
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
- int j;
+ for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
rx_qgrp->vport = vport;
- if (!idpf_is_queue_model_split(vport->rxq_model)) {
+ if (!idpf_is_queue_model_split(rsrc->rxq_model)) {
rx_qgrp->singleq.num_rxq = num_rxq;
- for (j = 0; j < num_rxq; j++) {
+ for (unsigned int j = 0; j < num_rxq; j++) {
rx_qgrp->singleq.rxqs[j] =
kzalloc(sizeof(*rx_qgrp->singleq.rxqs[j]),
GFP_KERNEL);
@@ -1820,7 +1833,7 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
}
rx_qgrp->splitq.num_rxq_sets = num_rxq;
- for (j = 0; j < num_rxq; j++) {
+ for (unsigned int j = 0; j < num_rxq; j++) {
rx_qgrp->splitq.rxq_sets[j] =
kzalloc(sizeof(struct idpf_rxq_set),
GFP_KERNEL);
@@ -1830,25 +1843,27 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
}
}
- rx_qgrp->splitq.bufq_sets = kcalloc(vport->num_bufqs_per_qgrp,
+ rx_qgrp->splitq.bufq_sets = kcalloc(rsrc->num_bufqs_per_qgrp,
sizeof(struct idpf_bufq_set),
GFP_KERNEL);
if (!rx_qgrp->splitq.bufq_sets) {
err = -ENOMEM;
goto err_alloc;
}
+ rx_qgrp->splitq.num_bufq_sets = rsrc->num_bufqs_per_qgrp;
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ for (unsigned int j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
struct idpf_bufq_set *bufq_set =
&rx_qgrp->splitq.bufq_sets[j];
int swq_size = sizeof(struct idpf_sw_queue);
struct idpf_buf_queue *q;
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
- q->desc_count = vport->bufq_desc_count[j];
+ q->desc_count = rsrc->bufq_desc_count[j];
q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
idpf_queue_assign(HSPLIT_EN, q, hs);
+ idpf_queue_assign(RSC_EN, q, rsc);
bufq_set->num_refillqs = num_rxq;
bufq_set->refillqs = kcalloc(num_rxq, swq_size,
@@ -1857,12 +1872,12 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
err = -ENOMEM;
goto err_alloc;
}
- for (k = 0; k < bufq_set->num_refillqs; k++) {
+ for (unsigned int k = 0; k < bufq_set->num_refillqs; k++) {
struct idpf_sw_queue *refillq =
&bufq_set->refillqs[k];
refillq->desc_count =
- vport->bufq_desc_count[j];
+ rsrc->bufq_desc_count[j];
idpf_queue_set(GEN_CHK, refillq);
idpf_queue_set(RFL_GEN_CHK, refillq);
refillq->ring = kcalloc(refillq->desc_count,
@@ -1876,37 +1891,39 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
}
skip_splitq_rx_init:
- for (j = 0; j < num_rxq; j++) {
+ for (unsigned int j = 0; j < num_rxq; j++) {
struct idpf_rx_queue *q;
- if (!idpf_is_queue_model_split(vport->rxq_model)) {
+ if (!idpf_is_queue_model_split(rsrc->rxq_model)) {
q = rx_qgrp->singleq.rxqs[j];
+ q->rx_ptype_lkup = adapter->singleq_pt_lkup;
goto setup_rxq;
}
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
rx_qgrp->splitq.rxq_sets[j]->refillq[0] =
&rx_qgrp->splitq.bufq_sets[0].refillqs[j];
- if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP)
+ if (rsrc->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP)
rx_qgrp->splitq.rxq_sets[j]->refillq[1] =
&rx_qgrp->splitq.bufq_sets[1].refillqs[j];
idpf_queue_assign(HSPLIT_EN, q, hs);
+ idpf_queue_assign(RSC_EN, q, rsc);
+ q->rx_ptype_lkup = adapter->splitq_pt_lkup;
setup_rxq:
- q->desc_count = vport->rxq_desc_count;
- q->rx_ptype_lkup = vport->rx_ptype_lkup;
+ q->desc_count = rsrc->rxq_desc_count;
q->bufq_sets = rx_qgrp->splitq.bufq_sets;
q->idx = (i * num_rxq) + j;
q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
q->rx_max_pkt_size = vport->netdev->mtu +
LIBETH_RX_LL_LEN;
- idpf_rxq_set_descids(vport, q);
+ idpf_rxq_set_descids(rsrc, q);
}
}
err_alloc:
if (err)
- idpf_rxq_group_rel(vport);
+ idpf_rxq_group_rel(rsrc);
return err;
}
@@ -1914,28 +1931,30 @@ err_alloc:
/**
* idpf_vport_queue_grp_alloc_all - Allocate all queue groups/resources
* @vport: vport with qgrps to allocate
+ * @rsrc: pointer to queue and vector resources
*
- * Returns 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
-static int idpf_vport_queue_grp_alloc_all(struct idpf_vport *vport)
+static int idpf_vport_queue_grp_alloc_all(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
u16 num_txq, num_rxq;
int err;
- idpf_vport_calc_numq_per_grp(vport, &num_txq, &num_rxq);
+ idpf_vport_calc_numq_per_grp(rsrc, &num_txq, &num_rxq);
- err = idpf_txq_group_alloc(vport, num_txq);
+ err = idpf_txq_group_alloc(vport, rsrc, num_txq);
if (err)
goto err_out;
- err = idpf_rxq_group_alloc(vport, num_rxq);
+ err = idpf_rxq_group_alloc(vport, rsrc, num_rxq);
if (err)
goto err_out;
return 0;
err_out:
- idpf_vport_queue_grp_rel_all(vport);
+ idpf_vport_queue_grp_rel_all(rsrc);
return err;
}
@@ -1943,19 +1962,22 @@ err_out:
/**
* idpf_vport_queues_alloc - Allocate memory for all queues
* @vport: virtual port
+ * @rsrc: pointer to queue and vector resources
+ *
+ * Allocate memory for queues associated with a vport.
*
- * Allocate memory for queues associated with a vport. Returns 0 on success,
- * negative on failure.
+ * Return: 0 on success, negative on failure.
*/
-int idpf_vport_queues_alloc(struct idpf_vport *vport)
+int idpf_vport_queues_alloc(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
int err;
- err = idpf_vport_queue_grp_alloc_all(vport);
+ err = idpf_vport_queue_grp_alloc_all(vport, rsrc);
if (err)
goto err_out;
- err = idpf_vport_init_fast_path_txqs(vport);
+ err = idpf_vport_init_fast_path_txqs(vport, rsrc);
if (err)
goto err_out;
@@ -1963,18 +1985,18 @@ int idpf_vport_queues_alloc(struct idpf_vport *vport)
if (err)
goto err_out;
- err = idpf_tx_desc_alloc_all(vport);
+ err = idpf_tx_desc_alloc_all(vport, rsrc);
if (err)
goto err_out;
- err = idpf_rx_desc_alloc_all(vport);
+ err = idpf_rx_desc_alloc_all(vport, rsrc);
if (err)
goto err_out;
return 0;
err_out:
- idpf_vport_queues_rel(vport);
+ idpf_vport_queues_rel(vport, rsrc);
return err;
}
@@ -2172,7 +2194,7 @@ static void idpf_tx_handle_rs_completion(struct idpf_tx_queue *txq,
* @budget: Used to determine if we are in netpoll
* @cleaned: returns number of packets cleaned
*
- * Returns true if there's any budget left (e.g. the clean is finished)
+ * Return: %true if there's any budget left (e.g. the clean is finished)
*/
static bool idpf_tx_clean_complq(struct idpf_compl_queue *complq, int budget,
int *cleaned)
@@ -2398,7 +2420,7 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
}
/**
- * idpf_tx_splitq_has_room - check if enough Tx splitq resources are available
+ * idpf_txq_has_room - check if enough Tx splitq resources are available
* @tx_q: the queue to be checked
* @descs_needed: number of descriptors required for this packet
* @bufs_needed: number of Tx buffers required for this packet
@@ -2529,6 +2551,8 @@ unsigned int idpf_tx_res_count_required(struct idpf_tx_queue *txq,
* idpf_tx_splitq_bump_ntu - adjust NTU and generation
* @txq: the tx ring to wrap
* @ntu: ring index to bump
+ *
+ * Return: the next ring index hopping to 0 when wraps around
*/
static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_tx_queue *txq, u16 ntu)
{
@@ -2797,7 +2821,7 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
* @skb: pointer to skb
* @off: pointer to struct that holds offload parameters
*
- * Returns error (negative) if TSO was requested but cannot be applied to the
+ * Return: error (negative) if TSO was requested but cannot be applied to the
* given skb, 0 if TSO does not apply to the given skb, or 1 otherwise.
*/
int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off)
@@ -2875,6 +2899,8 @@ int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off)
*
* Since the TX buffer rings mimics the descriptor ring, update the tx buffer
* ring entry to reflect that this index is a context descriptor
+ *
+ * Return: pointer to the next descriptor
*/
static union idpf_flex_tx_ctx_desc *
idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue *txq)
@@ -2893,6 +2919,8 @@ idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue *txq)
* idpf_tx_drop_skb - free the SKB and bump tail if necessary
* @tx_q: queue to send buffer on
* @skb: pointer to skb
+ *
+ * Return: always NETDEV_TX_OK
*/
netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb)
{
@@ -2994,7 +3022,7 @@ static bool idpf_tx_splitq_need_re(struct idpf_tx_queue *tx_q)
* @skb: send buffer
* @tx_q: queue to send buffer on
*
- * Returns NETDEV_TX_OK if sent, else an error code
+ * Return: NETDEV_TX_OK if sent, else an error code
*/
static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
struct idpf_tx_queue *tx_q)
@@ -3120,7 +3148,7 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
* @skb: send buffer
* @netdev: network interface device structure
*
- * Returns NETDEV_TX_OK if sent, else an error code
+ * Return: NETDEV_TX_OK if sent, else an error code
*/
netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev)
{
@@ -3145,7 +3173,7 @@ netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
- if (idpf_is_queue_model_split(vport->txq_model))
+ if (idpf_is_queue_model_split(vport->dflt_qv_rsrc.txq_model))
return idpf_tx_splitq_frame(skb, tx_q);
else
return idpf_tx_singleq_frame(skb, tx_q);
@@ -3270,10 +3298,10 @@ idpf_rx_splitq_extract_csum_bits(const struct virtchnl2_rx_flex_desc_adv_nic_3 *
* @rx_desc: Receive descriptor
* @decoded: Decoded Rx packet type related fields
*
- * Return 0 on success and error code on failure
- *
* Populate the skb fields with the total number of RSC segments, RSC payload
* length and packet type.
+ *
+ * Return: 0 on success and error code on failure
*/
static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
@@ -3371,6 +3399,8 @@ idpf_rx_hwtstamp(const struct idpf_rx_queue *rxq,
* This function checks the ring, descriptor, and packet information in
* order to populate the hash, checksum, protocol, and
* other fields within the skb.
+ *
+ * Return: 0 on success and error code on failure
*/
static int
__idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
@@ -3465,6 +3495,7 @@ static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr,
* @stat_err_field: field from descriptor to test bits in
* @stat_err_bits: value to mask
*
+ * Return: %true if any of given @stat_err_bits are set, %false otherwise.
*/
static bool idpf_rx_splitq_test_staterr(const u8 stat_err_field,
const u8 stat_err_bits)
@@ -3476,8 +3507,8 @@ static bool idpf_rx_splitq_test_staterr(const u8 stat_err_field,
* idpf_rx_splitq_is_eop - process handling of EOP buffers
* @rx_desc: Rx descriptor for current buffer
*
- * If the buffer is an EOP buffer, this function exits returning true,
- * otherwise return false indicating that this is in fact a non-EOP buffer.
+ * Return: %true if the buffer is an EOP buffer, %false otherwise, indicating
+ * that this is in fact a non-EOP buffer.
*/
static bool idpf_rx_splitq_is_eop(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
{
@@ -3496,7 +3527,7 @@ static bool idpf_rx_splitq_is_eop(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_de
* expensive overhead for IOMMU access this provides a means of avoiding
* it by maintaining the mapping of the page to the system.
*
- * Returns amount of work completed
+ * Return: amount of work completed
*/
static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget)
{
@@ -3626,7 +3657,7 @@ payload:
* @buf_id: buffer ID
* @buf_desc: Buffer queue descriptor
*
- * Return 0 on success and negative on failure.
+ * Return: 0 on success and negative on failure.
*/
static int idpf_rx_update_bufq_desc(struct idpf_buf_queue *bufq, u32 buf_id,
struct virtchnl2_splitq_rx_buf_desc *buf_desc)
@@ -3753,6 +3784,7 @@ static void idpf_rx_clean_refillq_all(struct idpf_buf_queue *bufq, int nid)
* @irq: interrupt number
* @data: pointer to a q_vector
*
+ * Return: always IRQ_HANDLED
*/
static irqreturn_t idpf_vport_intr_clean_queues(int __always_unused irq,
void *data)
@@ -3767,39 +3799,34 @@ static irqreturn_t idpf_vport_intr_clean_queues(int __always_unused irq,
/**
* idpf_vport_intr_napi_del_all - Unregister napi for all q_vectors in vport
- * @vport: virtual port structure
- *
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_vport_intr_napi_del_all(struct idpf_vport *vport)
+static void idpf_vport_intr_napi_del_all(struct idpf_q_vec_rsrc *rsrc)
{
- u16 v_idx;
-
- for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++)
- netif_napi_del(&vport->q_vectors[v_idx].napi);
+ for (u16 v_idx = 0; v_idx < rsrc->num_q_vectors; v_idx++)
+ netif_napi_del(&rsrc->q_vectors[v_idx].napi);
}
/**
* idpf_vport_intr_napi_dis_all - Disable NAPI for all q_vectors in the vport
- * @vport: main vport structure
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_vport_intr_napi_dis_all(struct idpf_vport *vport)
+static void idpf_vport_intr_napi_dis_all(struct idpf_q_vec_rsrc *rsrc)
{
- int v_idx;
-
- for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++)
- napi_disable(&vport->q_vectors[v_idx].napi);
+ for (u16 v_idx = 0; v_idx < rsrc->num_q_vectors; v_idx++)
+ napi_disable(&rsrc->q_vectors[v_idx].napi);
}
/**
* idpf_vport_intr_rel - Free memory allocated for interrupt vectors
- * @vport: virtual port
+ * @rsrc: pointer to queue and vector resources
*
* Free the memory allocated for interrupt vectors associated to a vport
*/
-void idpf_vport_intr_rel(struct idpf_vport *vport)
+void idpf_vport_intr_rel(struct idpf_q_vec_rsrc *rsrc)
{
- for (u32 v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
- struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
+ for (u16 v_idx = 0; v_idx < rsrc->num_q_vectors; v_idx++) {
+ struct idpf_q_vector *q_vector = &rsrc->q_vectors[v_idx];
kfree(q_vector->xsksq);
q_vector->xsksq = NULL;
@@ -3813,8 +3840,8 @@ void idpf_vport_intr_rel(struct idpf_vport *vport)
q_vector->rx = NULL;
}
- kfree(vport->q_vectors);
- vport->q_vectors = NULL;
+ kfree(rsrc->q_vectors);
+ rsrc->q_vectors = NULL;
}
static void idpf_q_vector_set_napi(struct idpf_q_vector *q_vector, bool link)
@@ -3834,21 +3861,22 @@ static void idpf_q_vector_set_napi(struct idpf_q_vector *q_vector, bool link)
/**
* idpf_vport_intr_rel_irq - Free the IRQ association with the OS
* @vport: main vport structure
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_vport_intr_rel_irq(struct idpf_vport *vport)
+static void idpf_vport_intr_rel_irq(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_adapter *adapter = vport->adapter;
- int vector;
- for (vector = 0; vector < vport->num_q_vectors; vector++) {
- struct idpf_q_vector *q_vector = &vport->q_vectors[vector];
+ for (u16 vector = 0; vector < rsrc->num_q_vectors; vector++) {
+ struct idpf_q_vector *q_vector = &rsrc->q_vectors[vector];
int irq_num, vidx;
/* free only the irqs that were actually requested */
if (!q_vector)
continue;
- vidx = vport->q_vector_idxs[vector];
+ vidx = rsrc->q_vector_idxs[vector];
irq_num = adapter->msix_entries[vidx].vector;
idpf_q_vector_set_napi(q_vector, false);
@@ -3858,22 +3886,23 @@ static void idpf_vport_intr_rel_irq(struct idpf_vport *vport)
/**
* idpf_vport_intr_dis_irq_all - Disable all interrupt
- * @vport: main vport structure
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_vport_intr_dis_irq_all(struct idpf_vport *vport)
+static void idpf_vport_intr_dis_irq_all(struct idpf_q_vec_rsrc *rsrc)
{
- struct idpf_q_vector *q_vector = vport->q_vectors;
- int q_idx;
+ struct idpf_q_vector *q_vector = rsrc->q_vectors;
- writel(0, vport->noirq_dyn_ctl);
+ writel(0, rsrc->noirq_dyn_ctl);
- for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++)
+ for (u16 q_idx = 0; q_idx < rsrc->num_q_vectors; q_idx++)
writel(0, q_vector[q_idx].intr_reg.dyn_ctl);
}
/**
* idpf_vport_intr_buildreg_itr - Enable default interrupt generation settings
* @q_vector: pointer to q_vector
+ *
+ * Return: value to be written back to HW to enable interrupt generation
*/
static u32 idpf_vport_intr_buildreg_itr(struct idpf_q_vector *q_vector)
{
@@ -4011,8 +4040,12 @@ void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector)
/**
* idpf_vport_intr_req_irq - get MSI-X vectors from the OS for the vport
* @vport: main vport structure
+ * @rsrc: pointer to queue and vector resources
+ *
+ * Return: 0 on success, negative on failure
*/
-static int idpf_vport_intr_req_irq(struct idpf_vport *vport)
+static int idpf_vport_intr_req_irq(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_adapter *adapter = vport->adapter;
const char *drv_name, *if_name, *vec_name;
@@ -4021,11 +4054,11 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport)
drv_name = dev_driver_string(&adapter->pdev->dev);
if_name = netdev_name(vport->netdev);
- for (vector = 0; vector < vport->num_q_vectors; vector++) {
- struct idpf_q_vector *q_vector = &vport->q_vectors[vector];
+ for (vector = 0; vector < rsrc->num_q_vectors; vector++) {
+ struct idpf_q_vector *q_vector = &rsrc->q_vectors[vector];
char *name;
- vidx = vport->q_vector_idxs[vector];
+ vidx = rsrc->q_vector_idxs[vector];
irq_num = adapter->msix_entries[vidx].vector;
if (q_vector->num_rxq && q_vector->num_txq)
@@ -4055,9 +4088,9 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport)
free_q_irqs:
while (--vector >= 0) {
- vidx = vport->q_vector_idxs[vector];
+ vidx = rsrc->q_vector_idxs[vector];
irq_num = adapter->msix_entries[vidx].vector;
- kfree(free_irq(irq_num, &vport->q_vectors[vector]));
+ kfree(free_irq(irq_num, &rsrc->q_vectors[vector]));
}
return err;
@@ -4086,15 +4119,16 @@ void idpf_vport_intr_write_itr(struct idpf_q_vector *q_vector, u16 itr, bool tx)
/**
* idpf_vport_intr_ena_irq_all - Enable IRQ for the given vport
* @vport: main vport structure
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport)
+static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
bool dynamic;
- int q_idx;
u16 itr;
- for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) {
- struct idpf_q_vector *qv = &vport->q_vectors[q_idx];
+ for (u16 q_idx = 0; q_idx < rsrc->num_q_vectors; q_idx++) {
+ struct idpf_q_vector *qv = &rsrc->q_vectors[q_idx];
/* Set the initial ITR values */
if (qv->num_txq) {
@@ -4117,19 +4151,21 @@ static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport)
idpf_vport_intr_update_itr_ena_irq(qv);
}
- writel(vport->noirq_dyn_ctl_ena, vport->noirq_dyn_ctl);
+ writel(rsrc->noirq_dyn_ctl_ena, rsrc->noirq_dyn_ctl);
}
/**
* idpf_vport_intr_deinit - Release all vector associations for the vport
* @vport: main vport structure
+ * @rsrc: pointer to queue and vector resources
*/
-void idpf_vport_intr_deinit(struct idpf_vport *vport)
+void idpf_vport_intr_deinit(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
- idpf_vport_intr_dis_irq_all(vport);
- idpf_vport_intr_napi_dis_all(vport);
- idpf_vport_intr_napi_del_all(vport);
- idpf_vport_intr_rel_irq(vport);
+ idpf_vport_intr_dis_irq_all(rsrc);
+ idpf_vport_intr_napi_dis_all(rsrc);
+ idpf_vport_intr_napi_del_all(rsrc);
+ idpf_vport_intr_rel_irq(vport, rsrc);
}
/**
@@ -4201,14 +4237,12 @@ static void idpf_init_dim(struct idpf_q_vector *qv)
/**
* idpf_vport_intr_napi_ena_all - Enable NAPI for all q_vectors in the vport
- * @vport: main vport structure
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_vport_intr_napi_ena_all(struct idpf_vport *vport)
+static void idpf_vport_intr_napi_ena_all(struct idpf_q_vec_rsrc *rsrc)
{
- int q_idx;
-
- for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) {
- struct idpf_q_vector *q_vector = &vport->q_vectors[q_idx];
+ for (u16 q_idx = 0; q_idx < rsrc->num_q_vectors; q_idx++) {
+ struct idpf_q_vector *q_vector = &rsrc->q_vectors[q_idx];
idpf_init_dim(q_vector);
napi_enable(&q_vector->napi);
@@ -4221,7 +4255,7 @@ static void idpf_vport_intr_napi_ena_all(struct idpf_vport *vport)
* @budget: Used to determine if we are in netpoll
* @cleaned: returns number of packets cleaned
*
- * Returns false if clean is not complete else returns true
+ * Return: %false if clean is not complete else returns %true
*/
static bool idpf_tx_splitq_clean_all(struct idpf_q_vector *q_vec,
int budget, int *cleaned)
@@ -4248,7 +4282,7 @@ static bool idpf_tx_splitq_clean_all(struct idpf_q_vector *q_vec,
* @budget: Used to determine if we are in netpoll
* @cleaned: returns number of packets cleaned
*
- * Returns false if clean is not complete else returns true
+ * Return: %false if clean is not complete else returns %true
*/
static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget,
int *cleaned)
@@ -4291,6 +4325,8 @@ static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget,
* idpf_vport_splitq_napi_poll - NAPI handler
* @napi: struct from which you get q_vector
* @budget: budget provided by stack
+ *
+ * Return: how many packets were cleaned
*/
static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
{
@@ -4336,24 +4372,26 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
/**
* idpf_vport_intr_map_vector_to_qs - Map vectors to queues
* @vport: virtual port
+ * @rsrc: pointer to queue and vector resources
*
* Mapping for vectors to queues
*/
-static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
+static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
- u16 num_txq_grp = vport->num_txq_grp - vport->num_xdp_txq;
- bool split = idpf_is_queue_model_split(vport->rxq_model);
+ u16 num_txq_grp = rsrc->num_txq_grp - vport->num_xdp_txq;
+ bool split = idpf_is_queue_model_split(rsrc->rxq_model);
struct idpf_rxq_group *rx_qgrp;
struct idpf_txq_group *tx_qgrp;
- u32 i, qv_idx, q_index;
+ u32 q_index;
- for (i = 0, qv_idx = 0; i < vport->num_rxq_grp; i++) {
+ for (unsigned int i = 0, qv_idx = 0; i < rsrc->num_rxq_grp; i++) {
u16 num_rxq;
- if (qv_idx >= vport->num_q_vectors)
+ if (qv_idx >= rsrc->num_q_vectors)
qv_idx = 0;
- rx_qgrp = &vport->rxq_grps[i];
+ rx_qgrp = &rsrc->rxq_grps[i];
if (split)
num_rxq = rx_qgrp->splitq.num_rxq_sets;
else
@@ -4366,7 +4404,7 @@ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
else
q = rx_qgrp->singleq.rxqs[j];
- q->q_vector = &vport->q_vectors[qv_idx];
+ q->q_vector = &rsrc->q_vectors[qv_idx];
q_index = q->q_vector->num_rxq;
q->q_vector->rx[q_index] = q;
q->q_vector->num_rxq++;
@@ -4376,11 +4414,11 @@ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
}
if (split) {
- for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ for (u32 j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
struct idpf_buf_queue *bufq;
bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
- bufq->q_vector = &vport->q_vectors[qv_idx];
+ bufq->q_vector = &rsrc->q_vectors[qv_idx];
q_index = bufq->q_vector->num_bufq;
bufq->q_vector->bufq[q_index] = bufq;
bufq->q_vector->num_bufq++;
@@ -4390,40 +4428,40 @@ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
qv_idx++;
}
- split = idpf_is_queue_model_split(vport->txq_model);
+ split = idpf_is_queue_model_split(rsrc->txq_model);
- for (i = 0, qv_idx = 0; i < num_txq_grp; i++) {
+ for (unsigned int i = 0, qv_idx = 0; i < num_txq_grp; i++) {
u16 num_txq;
- if (qv_idx >= vport->num_q_vectors)
+ if (qv_idx >= rsrc->num_q_vectors)
qv_idx = 0;
- tx_qgrp = &vport->txq_grps[i];
+ tx_qgrp = &rsrc->txq_grps[i];
num_txq = tx_qgrp->num_txq;
for (u32 j = 0; j < num_txq; j++) {
struct idpf_tx_queue *q;
q = tx_qgrp->txqs[j];
- q->q_vector = &vport->q_vectors[qv_idx];
+ q->q_vector = &rsrc->q_vectors[qv_idx];
q->q_vector->tx[q->q_vector->num_txq++] = q;
}
if (split) {
struct idpf_compl_queue *q = tx_qgrp->complq;
- q->q_vector = &vport->q_vectors[qv_idx];
+ q->q_vector = &rsrc->q_vectors[qv_idx];
q->q_vector->complq[q->q_vector->num_complq++] = q;
}
qv_idx++;
}
- for (i = 0; i < vport->num_xdp_txq; i++) {
+ for (unsigned int i = 0; i < vport->num_xdp_txq; i++) {
struct idpf_tx_queue *xdpsq;
struct idpf_q_vector *qv;
- xdpsq = vport->txqs[vport->xdp_txq_offset + i];
+ xdpsq = vport->txqs[rsrc->xdp_txq_offset + i];
if (!idpf_queue_has(XSK, xdpsq))
continue;
@@ -4438,10 +4476,14 @@ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
/**
* idpf_vport_intr_init_vec_idx - Initialize the vector indexes
* @vport: virtual port
+ * @rsrc: pointer to queue and vector resources
*
- * Initialize vector indexes with values returened over mailbox
+ * Initialize vector indexes with values returned over mailbox.
+ *
+ * Return: 0 on success, negative on failure
*/
-static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
+static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_adapter *adapter = vport->adapter;
struct virtchnl2_alloc_vectors *ac;
@@ -4450,10 +4492,10 @@ static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
ac = adapter->req_vec_chunks;
if (!ac) {
- for (i = 0; i < vport->num_q_vectors; i++)
- vport->q_vectors[i].v_idx = vport->q_vector_idxs[i];
+ for (i = 0; i < rsrc->num_q_vectors; i++)
+ rsrc->q_vectors[i].v_idx = rsrc->q_vector_idxs[i];
- vport->noirq_v_idx = vport->q_vector_idxs[i];
+ rsrc->noirq_v_idx = rsrc->q_vector_idxs[i];
return 0;
}
@@ -4465,10 +4507,10 @@ static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
idpf_get_vec_ids(adapter, vecids, total_vecs, &ac->vchunks);
- for (i = 0; i < vport->num_q_vectors; i++)
- vport->q_vectors[i].v_idx = vecids[vport->q_vector_idxs[i]];
+ for (i = 0; i < rsrc->num_q_vectors; i++)
+ rsrc->q_vectors[i].v_idx = vecids[rsrc->q_vector_idxs[i]];
- vport->noirq_v_idx = vecids[vport->q_vector_idxs[i]];
+ rsrc->noirq_v_idx = vecids[rsrc->q_vector_idxs[i]];
kfree(vecids);
@@ -4478,21 +4520,24 @@ static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
/**
* idpf_vport_intr_napi_add_all- Register napi handler for all qvectors
* @vport: virtual port structure
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
+static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
int (*napi_poll)(struct napi_struct *napi, int budget);
- u16 v_idx, qv_idx;
int irq_num;
+ u16 qv_idx;
- if (idpf_is_queue_model_split(vport->txq_model))
+ if (idpf_is_queue_model_split(rsrc->txq_model))
napi_poll = idpf_vport_splitq_napi_poll;
else
napi_poll = idpf_vport_singleq_napi_poll;
- for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
- struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
- qv_idx = vport->q_vector_idxs[v_idx];
+ for (u16 v_idx = 0; v_idx < rsrc->num_q_vectors; v_idx++) {
+ struct idpf_q_vector *q_vector = &rsrc->q_vectors[v_idx];
+
+ qv_idx = rsrc->q_vector_idxs[v_idx];
irq_num = vport->adapter->msix_entries[qv_idx].vector;
netif_napi_add_config(vport->netdev, &q_vector->napi,
@@ -4504,37 +4549,41 @@ static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
/**
* idpf_vport_intr_alloc - Allocate memory for interrupt vectors
* @vport: virtual port
+ * @rsrc: pointer to queue and vector resources
+ *
+ * Allocate one q_vector per queue interrupt.
*
- * We allocate one q_vector per queue interrupt. If allocation fails we
- * return -ENOMEM.
+ * Return: 0 on success, if allocation fails we return -ENOMEM.
*/
-int idpf_vport_intr_alloc(struct idpf_vport *vport)
+int idpf_vport_intr_alloc(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
u16 txqs_per_vector, rxqs_per_vector, bufqs_per_vector;
struct idpf_vport_user_config_data *user_config;
struct idpf_q_vector *q_vector;
struct idpf_q_coalesce *q_coal;
- u32 complqs_per_vector, v_idx;
+ u32 complqs_per_vector;
u16 idx = vport->idx;
user_config = &vport->adapter->vport_config[idx]->user_config;
- vport->q_vectors = kcalloc(vport->num_q_vectors,
- sizeof(struct idpf_q_vector), GFP_KERNEL);
- if (!vport->q_vectors)
+
+ rsrc->q_vectors = kcalloc(rsrc->num_q_vectors,
+ sizeof(struct idpf_q_vector), GFP_KERNEL);
+ if (!rsrc->q_vectors)
return -ENOMEM;
- txqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp,
- vport->num_q_vectors);
- rxqs_per_vector = DIV_ROUND_UP(vport->num_rxq_grp,
- vport->num_q_vectors);
- bufqs_per_vector = vport->num_bufqs_per_qgrp *
- DIV_ROUND_UP(vport->num_rxq_grp,
- vport->num_q_vectors);
- complqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp,
- vport->num_q_vectors);
-
- for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
- q_vector = &vport->q_vectors[v_idx];
+ txqs_per_vector = DIV_ROUND_UP(rsrc->num_txq_grp,
+ rsrc->num_q_vectors);
+ rxqs_per_vector = DIV_ROUND_UP(rsrc->num_rxq_grp,
+ rsrc->num_q_vectors);
+ bufqs_per_vector = rsrc->num_bufqs_per_qgrp *
+ DIV_ROUND_UP(rsrc->num_rxq_grp,
+ rsrc->num_q_vectors);
+ complqs_per_vector = DIV_ROUND_UP(rsrc->num_txq_grp,
+ rsrc->num_q_vectors);
+
+ for (u16 v_idx = 0; v_idx < rsrc->num_q_vectors; v_idx++) {
+ q_vector = &rsrc->q_vectors[v_idx];
q_coal = &user_config->q_coalesce[v_idx];
q_vector->vport = vport;
@@ -4556,7 +4605,7 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport)
if (!q_vector->rx)
goto error;
- if (!idpf_is_queue_model_split(vport->rxq_model))
+ if (!idpf_is_queue_model_split(rsrc->rxq_model))
continue;
q_vector->bufq = kcalloc(bufqs_per_vector,
@@ -4571,7 +4620,7 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport)
if (!q_vector->complq)
goto error;
- if (!vport->xdp_txq_offset)
+ if (!rsrc->xdp_txq_offset)
continue;
q_vector->xsksq = kcalloc(rxqs_per_vector,
@@ -4584,7 +4633,7 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport)
return 0;
error:
- idpf_vport_intr_rel(vport);
+ idpf_vport_intr_rel(rsrc);
return -ENOMEM;
}
@@ -4592,72 +4641,74 @@ error:
/**
* idpf_vport_intr_init - Setup all vectors for the given vport
* @vport: virtual port
+ * @rsrc: pointer to queue and vector resources
*
- * Returns 0 on success or negative on failure
+ * Return: 0 on success or negative on failure
*/
-int idpf_vport_intr_init(struct idpf_vport *vport)
+int idpf_vport_intr_init(struct idpf_vport *vport, struct idpf_q_vec_rsrc *rsrc)
{
int err;
- err = idpf_vport_intr_init_vec_idx(vport);
+ err = idpf_vport_intr_init_vec_idx(vport, rsrc);
if (err)
return err;
- idpf_vport_intr_map_vector_to_qs(vport);
- idpf_vport_intr_napi_add_all(vport);
+ idpf_vport_intr_map_vector_to_qs(vport, rsrc);
+ idpf_vport_intr_napi_add_all(vport, rsrc);
- err = vport->adapter->dev_ops.reg_ops.intr_reg_init(vport);
+ err = vport->adapter->dev_ops.reg_ops.intr_reg_init(vport, rsrc);
if (err)
goto unroll_vectors_alloc;
- err = idpf_vport_intr_req_irq(vport);
+ err = idpf_vport_intr_req_irq(vport, rsrc);
if (err)
goto unroll_vectors_alloc;
return 0;
unroll_vectors_alloc:
- idpf_vport_intr_napi_del_all(vport);
+ idpf_vport_intr_napi_del_all(rsrc);
return err;
}
-void idpf_vport_intr_ena(struct idpf_vport *vport)
+void idpf_vport_intr_ena(struct idpf_vport *vport, struct idpf_q_vec_rsrc *rsrc)
{
- idpf_vport_intr_napi_ena_all(vport);
- idpf_vport_intr_ena_irq_all(vport);
+ idpf_vport_intr_napi_ena_all(rsrc);
+ idpf_vport_intr_ena_irq_all(vport, rsrc);
}
/**
* idpf_config_rss - Send virtchnl messages to configure RSS
* @vport: virtual port
+ * @rss_data: pointer to RSS key and lut info
*
- * Return 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
-int idpf_config_rss(struct idpf_vport *vport)
+int idpf_config_rss(struct idpf_vport *vport, struct idpf_rss_data *rss_data)
{
+ struct idpf_adapter *adapter = vport->adapter;
+ u32 vport_id = vport->vport_id;
int err;
- err = idpf_send_get_set_rss_key_msg(vport, false);
+ err = idpf_send_get_set_rss_key_msg(adapter, rss_data, vport_id, false);
if (err)
return err;
- return idpf_send_get_set_rss_lut_msg(vport, false);
+ return idpf_send_get_set_rss_lut_msg(adapter, rss_data, vport_id, false);
}
/**
* idpf_fill_dflt_rss_lut - Fill the indirection table with the default values
* @vport: virtual port structure
+ * @rss_data: pointer to RSS key and lut info
*/
-void idpf_fill_dflt_rss_lut(struct idpf_vport *vport)
+void idpf_fill_dflt_rss_lut(struct idpf_vport *vport,
+ struct idpf_rss_data *rss_data)
{
- struct idpf_adapter *adapter = vport->adapter;
- u16 num_active_rxq = vport->num_rxq;
- struct idpf_rss_data *rss_data;
+ u16 num_active_rxq = vport->dflt_qv_rsrc.num_rxq;
int i;
- rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
-
for (i = 0; i < rss_data->rss_lut_size; i++)
rss_data->rss_lut[i] = i % num_active_rxq;
}
@@ -4665,15 +4716,12 @@ void idpf_fill_dflt_rss_lut(struct idpf_vport *vport)
/**
* idpf_init_rss_lut - Allocate and initialize RSS LUT
* @vport: virtual port
+ * @rss_data: pointer to RSS key and lut info
*
* Return: 0 on success, negative on failure
*/
-int idpf_init_rss_lut(struct idpf_vport *vport)
+int idpf_init_rss_lut(struct idpf_vport *vport, struct idpf_rss_data *rss_data)
{
- struct idpf_adapter *adapter = vport->adapter;
- struct idpf_rss_data *rss_data;
-
- rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
if (!rss_data->rss_lut) {
u32 lut_size;
@@ -4684,21 +4732,17 @@ int idpf_init_rss_lut(struct idpf_vport *vport)
}
/* Fill the default RSS lut values */
- idpf_fill_dflt_rss_lut(vport);
+ idpf_fill_dflt_rss_lut(vport, rss_data);
return 0;
}
/**
* idpf_deinit_rss_lut - Release RSS LUT
- * @vport: virtual port
+ * @rss_data: pointer to RSS key and lut info
*/
-void idpf_deinit_rss_lut(struct idpf_vport *vport)
+void idpf_deinit_rss_lut(struct idpf_rss_data *rss_data)
{
- struct idpf_adapter *adapter = vport->adapter;
- struct idpf_rss_data *rss_data;
-
- rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
kfree(rss_data->rss_lut);
rss_data->rss_lut = NULL;
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index 423cc9486dce..4be5b3b6d3ed 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -283,6 +283,7 @@ struct idpf_ptype_state {
* @__IDPF_Q_FLOW_SCH_EN: Enable flow scheduling
* @__IDPF_Q_SW_MARKER: Used to indicate TX queue marker completions
* @__IDPF_Q_CRC_EN: enable CRC offload in singleq mode
+ * @__IDPF_Q_RSC_EN: enable Receive Side Coalescing on Rx (splitq)
* @__IDPF_Q_HSPLIT_EN: enable header split on Rx (splitq)
* @__IDPF_Q_PTP: indicates whether the Rx timestamping is enabled for the
* queue
@@ -297,6 +298,7 @@ enum idpf_queue_flags_t {
__IDPF_Q_FLOW_SCH_EN,
__IDPF_Q_SW_MARKER,
__IDPF_Q_CRC_EN,
+ __IDPF_Q_RSC_EN,
__IDPF_Q_HSPLIT_EN,
__IDPF_Q_PTP,
__IDPF_Q_NOIRQ,
@@ -925,6 +927,7 @@ struct idpf_bufq_set {
* @singleq.rxqs: Array of RX queue pointers
* @splitq: Struct with split queue related members
* @splitq.num_rxq_sets: Number of RX queue sets
+ * @splitq.num_rxq_sets: Number of Buffer queue sets
* @splitq.rxq_sets: Array of RX queue sets
* @splitq.bufq_sets: Buffer queue set pointer
*
@@ -942,6 +945,7 @@ struct idpf_rxq_group {
} singleq;
struct {
u16 num_rxq_sets;
+ u16 num_bufq_sets;
struct idpf_rxq_set *rxq_sets[IDPF_LARGE_MAX_Q];
struct idpf_bufq_set *bufq_sets;
} splitq;
@@ -1072,25 +1076,35 @@ static inline u32 idpf_tx_splitq_get_free_bufs(struct idpf_sw_queue *refillq)
int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget);
void idpf_vport_init_num_qs(struct idpf_vport *vport,
- struct virtchnl2_create_vport *vport_msg);
-void idpf_vport_calc_num_q_desc(struct idpf_vport *vport);
+ struct virtchnl2_create_vport *vport_msg,
+ struct idpf_q_vec_rsrc *rsrc);
+void idpf_vport_calc_num_q_desc(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_index,
struct virtchnl2_create_vport *vport_msg,
struct idpf_vport_max_q *max_q);
-void idpf_vport_calc_num_q_groups(struct idpf_vport *vport);
-int idpf_vport_queues_alloc(struct idpf_vport *vport);
-void idpf_vport_queues_rel(struct idpf_vport *vport);
-void idpf_vport_intr_rel(struct idpf_vport *vport);
-int idpf_vport_intr_alloc(struct idpf_vport *vport);
+void idpf_vport_calc_num_q_groups(struct idpf_q_vec_rsrc *rsrc);
+int idpf_vport_queues_alloc(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
+void idpf_vport_queues_rel(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
+void idpf_vport_intr_rel(struct idpf_q_vec_rsrc *rsrc);
+int idpf_vport_intr_alloc(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector);
-void idpf_vport_intr_deinit(struct idpf_vport *vport);
-int idpf_vport_intr_init(struct idpf_vport *vport);
-void idpf_vport_intr_ena(struct idpf_vport *vport);
-void idpf_fill_dflt_rss_lut(struct idpf_vport *vport);
-int idpf_config_rss(struct idpf_vport *vport);
-int idpf_init_rss_lut(struct idpf_vport *vport);
-void idpf_deinit_rss_lut(struct idpf_vport *vport);
-int idpf_rx_bufs_init_all(struct idpf_vport *vport);
+void idpf_vport_intr_deinit(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
+int idpf_vport_intr_init(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
+void idpf_vport_intr_ena(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
+void idpf_fill_dflt_rss_lut(struct idpf_vport *vport,
+ struct idpf_rss_data *rss_data);
+int idpf_config_rss(struct idpf_vport *vport, struct idpf_rss_data *rss_data);
+int idpf_init_rss_lut(struct idpf_vport *vport, struct idpf_rss_data *rss_data);
+void idpf_deinit_rss_lut(struct idpf_rss_data *rss_data);
+int idpf_rx_bufs_init_all(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
u32 q_num);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
index 4cc58c83688c..7527b967e2e7 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
@@ -69,11 +69,13 @@ static void idpf_vf_mb_intr_reg_init(struct idpf_adapter *adapter)
/**
* idpf_vf_intr_reg_init - Initialize interrupt registers
* @vport: virtual port structure
+ * @rsrc: pointer to queue and vector resources
*/
-static int idpf_vf_intr_reg_init(struct idpf_vport *vport)
+static int idpf_vf_intr_reg_init(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_adapter *adapter = vport->adapter;
- int num_vecs = vport->num_q_vectors;
+ u16 num_vecs = rsrc->num_q_vectors;
struct idpf_vec_regs *reg_vals;
int num_regs, i, err = 0;
u32 rx_itr, tx_itr, val;
@@ -85,15 +87,15 @@ static int idpf_vf_intr_reg_init(struct idpf_vport *vport)
if (!reg_vals)
return -ENOMEM;
- num_regs = idpf_get_reg_intr_vecs(vport, reg_vals);
+ num_regs = idpf_get_reg_intr_vecs(adapter, reg_vals);
if (num_regs < num_vecs) {
err = -EINVAL;
goto free_reg_vals;
}
for (i = 0; i < num_vecs; i++) {
- struct idpf_q_vector *q_vector = &vport->q_vectors[i];
- u16 vec_id = vport->q_vector_idxs[i] - IDPF_MBX_Q_VEC;
+ struct idpf_q_vector *q_vector = &rsrc->q_vectors[i];
+ u16 vec_id = rsrc->q_vector_idxs[i] - IDPF_MBX_Q_VEC;
struct idpf_intr_reg *intr = &q_vector->intr_reg;
u32 spacing;
@@ -122,12 +124,12 @@ static int idpf_vf_intr_reg_init(struct idpf_vport *vport)
/* Data vector for NOIRQ queues */
- val = reg_vals[vport->q_vector_idxs[i] - IDPF_MBX_Q_VEC].dyn_ctl_reg;
- vport->noirq_dyn_ctl = idpf_get_reg_addr(adapter, val);
+ val = reg_vals[rsrc->q_vector_idxs[i] - IDPF_MBX_Q_VEC].dyn_ctl_reg;
+ rsrc->noirq_dyn_ctl = idpf_get_reg_addr(adapter, val);
val = VF_INT_DYN_CTLN_WB_ON_ITR_M | VF_INT_DYN_CTLN_INTENA_MSK_M |
FIELD_PREP(VF_INT_DYN_CTLN_ITR_INDX_M, IDPF_NO_ITR_UPDATE_IDX);
- vport->noirq_dyn_ctl_ena = val;
+ rsrc->noirq_dyn_ctl_ena = val;
free_reg_vals:
kfree(reg_vals);
@@ -156,7 +158,8 @@ static void idpf_vf_trigger_reset(struct idpf_adapter *adapter,
/* Do not send VIRTCHNL2_OP_RESET_VF message on driver unload */
if (trig_cause == IDPF_HR_FUNC_RESET &&
!test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
- idpf_send_mb_msg(adapter, VIRTCHNL2_OP_RESET_VF, 0, NULL, 0);
+ idpf_send_mb_msg(adapter, adapter->hw.asq,
+ VIRTCHNL2_OP_RESET_VF, 0, NULL, 0);
}
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index cb702eac86c8..d46affaf7185 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -117,13 +117,15 @@ static void idpf_recv_event_msg(struct idpf_adapter *adapter,
/**
* idpf_mb_clean - Reclaim the send mailbox queue entries
- * @adapter: Driver specific private structure
+ * @adapter: driver specific private structure
+ * @asq: send control queue info
*
* Reclaim the send mailbox queue entries to be used to send further messages
*
- * Returns 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
-static int idpf_mb_clean(struct idpf_adapter *adapter)
+static int idpf_mb_clean(struct idpf_adapter *adapter,
+ struct idpf_ctlq_info *asq)
{
u16 i, num_q_msg = IDPF_DFLT_MBX_Q_LEN;
struct idpf_ctlq_msg **q_msg;
@@ -134,7 +136,7 @@ static int idpf_mb_clean(struct idpf_adapter *adapter)
if (!q_msg)
return -ENOMEM;
- err = idpf_ctlq_clean_sq(adapter->hw.asq, &num_q_msg, q_msg);
+ err = idpf_ctlq_clean_sq(asq, &num_q_msg, q_msg);
if (err)
goto err_kfree;
@@ -206,7 +208,8 @@ static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op,
/**
* idpf_send_mb_msg - Send message over mailbox
- * @adapter: Driver specific private structure
+ * @adapter: driver specific private structure
+ * @asq: control queue to send message to
* @op: virtchnl opcode
* @msg_size: size of the payload
* @msg: pointer to buffer holding the payload
@@ -214,10 +217,10 @@ static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op,
*
* Will prepare the control queue message and initiates the send api
*
- * Returns 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
-int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
- u16 msg_size, u8 *msg, u16 cookie)
+int idpf_send_mb_msg(struct idpf_adapter *adapter, struct idpf_ctlq_info *asq,
+ u32 op, u16 msg_size, u8 *msg, u16 cookie)
{
struct idpf_ctlq_msg *ctlq_msg;
struct idpf_dma_mem *dma_mem;
@@ -231,7 +234,7 @@ int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
if (idpf_is_reset_detected(adapter))
return 0;
- err = idpf_mb_clean(adapter);
+ err = idpf_mb_clean(adapter, asq);
if (err)
return err;
@@ -267,7 +270,7 @@ int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
ctlq_msg->ctx.indirect.payload = dma_mem;
ctlq_msg->ctx.sw_cookie.data = cookie;
- err = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg);
+ err = idpf_ctlq_send(&adapter->hw, asq, 1, ctlq_msg);
if (err)
goto send_error;
@@ -463,7 +466,7 @@ ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
cookie = FIELD_PREP(IDPF_VC_XN_SALT_M, xn->salt) |
FIELD_PREP(IDPF_VC_XN_IDX_M, xn->idx);
- retval = idpf_send_mb_msg(adapter, params->vc_op,
+ retval = idpf_send_mb_msg(adapter, adapter->hw.asq, params->vc_op,
send_buf->iov_len, send_buf->iov_base,
cookie);
if (retval) {
@@ -662,12 +665,14 @@ out_unlock:
/**
* idpf_recv_mb_msg - Receive message over mailbox
- * @adapter: Driver specific private structure
+ * @adapter: driver specific private structure
+ * @arq: control queue to receive message from
+ *
+ * Will receive control queue message and posts the receive buffer.
*
- * Will receive control queue message and posts the receive buffer. Returns 0
- * on success and negative on failure.
+ * Return: 0 on success and negative on failure.
*/
-int idpf_recv_mb_msg(struct idpf_adapter *adapter)
+int idpf_recv_mb_msg(struct idpf_adapter *adapter, struct idpf_ctlq_info *arq)
{
struct idpf_ctlq_msg ctlq_msg;
struct idpf_dma_mem *dma_mem;
@@ -679,7 +684,7 @@ int idpf_recv_mb_msg(struct idpf_adapter *adapter)
* actually received on num_recv.
*/
num_recv = 1;
- err = idpf_ctlq_recv(adapter->hw.arq, &num_recv, &ctlq_msg);
+ err = idpf_ctlq_recv(arq, &num_recv, &ctlq_msg);
if (err || !num_recv)
break;
@@ -695,8 +700,7 @@ int idpf_recv_mb_msg(struct idpf_adapter *adapter)
else
err = idpf_vc_xn_forward_reply(adapter, &ctlq_msg);
- post_err = idpf_ctlq_post_rx_buffs(&adapter->hw,
- adapter->hw.arq,
+ post_err = idpf_ctlq_post_rx_buffs(&adapter->hw, arq,
&num_recv, &dma_mem);
/* If post failed clear the only buffer we supplied */
@@ -717,9 +721,8 @@ int idpf_recv_mb_msg(struct idpf_adapter *adapter)
}
struct idpf_chunked_msg_params {
- u32 (*prepare_msg)(const struct idpf_vport *vport,
- void *buf, const void *pos,
- u32 num);
+ u32 (*prepare_msg)(u32 vport_id, void *buf,
+ const void *pos, u32 num);
const void *chunks;
u32 num_chunks;
@@ -728,9 +731,12 @@ struct idpf_chunked_msg_params {
u32 config_sz;
u32 vc_op;
+ u32 vport_id;
};
-struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_vport *vport, u32 num)
+struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_adapter *adapter,
+ struct idpf_q_vec_rsrc *qv_rsrc,
+ u32 vport_id, u32 num)
{
struct idpf_queue_set *qp;
@@ -738,7 +744,9 @@ struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_vport *vport, u32 num)
if (!qp)
return NULL;
- qp->vport = vport;
+ qp->adapter = adapter;
+ qp->qv_rsrc = qv_rsrc;
+ qp->vport_id = vport_id;
qp->num = num;
return qp;
@@ -746,7 +754,7 @@ struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_vport *vport, u32 num)
/**
* idpf_send_chunked_msg - send VC message consisting of chunks
- * @vport: virtual port data structure
+ * @adapter: Driver specific private structure
* @params: message params
*
* Helper function for preparing a message describing queues to be enabled
@@ -754,7 +762,7 @@ struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_vport *vport, u32 num)
*
* Return: the total size of the prepared message.
*/
-static int idpf_send_chunked_msg(struct idpf_vport *vport,
+static int idpf_send_chunked_msg(struct idpf_adapter *adapter,
const struct idpf_chunked_msg_params *params)
{
struct idpf_vc_xn_params xn_params = {
@@ -765,6 +773,7 @@ static int idpf_send_chunked_msg(struct idpf_vport *vport,
u32 num_chunks, num_msgs, buf_sz;
void *buf __free(kfree) = NULL;
u32 totqs = params->num_chunks;
+ u32 vid = params->vport_id;
num_chunks = min(IDPF_NUM_CHUNKS_PER_MSG(params->config_sz,
params->chunk_sz), totqs);
@@ -783,10 +792,10 @@ static int idpf_send_chunked_msg(struct idpf_vport *vport,
memset(buf, 0, buf_sz);
xn_params.send_buf.iov_len = buf_sz;
- if (params->prepare_msg(vport, buf, pos, num_chunks) != buf_sz)
+ if (params->prepare_msg(vid, buf, pos, num_chunks) != buf_sz)
return -EINVAL;
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
if (reply_sz < 0)
return reply_sz;
@@ -809,6 +818,7 @@ static int idpf_send_chunked_msg(struct idpf_vport *vport,
*/
static int idpf_wait_for_marker_event_set(const struct idpf_queue_set *qs)
{
+ struct net_device *netdev;
struct idpf_tx_queue *txq;
bool markers_rcvd = true;
@@ -817,6 +827,8 @@ static int idpf_wait_for_marker_event_set(const struct idpf_queue_set *qs)
case VIRTCHNL2_QUEUE_TYPE_TX:
txq = qs->qs[i].txq;
+ netdev = txq->netdev;
+
idpf_queue_set(SW_MARKER, txq);
idpf_wait_for_sw_marker_completion(txq);
markers_rcvd &= !idpf_queue_has(SW_MARKER, txq);
@@ -827,7 +839,7 @@ static int idpf_wait_for_marker_event_set(const struct idpf_queue_set *qs)
}
if (!markers_rcvd) {
- netdev_warn(qs->vport->netdev,
+ netdev_warn(netdev,
"Failed to receive marker packets\n");
return -ETIMEDOUT;
}
@@ -845,7 +857,8 @@ static int idpf_wait_for_marker_event(struct idpf_vport *vport)
{
struct idpf_queue_set *qs __free(kfree) = NULL;
- qs = idpf_alloc_queue_set(vport, vport->num_txq);
+ qs = idpf_alloc_queue_set(vport->adapter, &vport->dflt_qv_rsrc,
+ vport->vport_id, vport->num_txq);
if (!qs)
return -ENOMEM;
@@ -1263,13 +1276,52 @@ static void idpf_init_avail_queues(struct idpf_adapter *adapter)
}
/**
+ * idpf_vport_init_queue_reg_chunks - initialize queue register chunks
+ * @vport_config: persistent vport structure to store the queue register info
+ * @schunks: source chunks to copy data from
+ *
+ * Return: 0 on success, negative on failure.
+ */
+static int
+idpf_vport_init_queue_reg_chunks(struct idpf_vport_config *vport_config,
+ struct virtchnl2_queue_reg_chunks *schunks)
+{
+ struct idpf_queue_id_reg_info *q_info = &vport_config->qid_reg_info;
+ u16 num_chunks = le16_to_cpu(schunks->num_chunks);
+
+ kfree(q_info->queue_chunks);
+
+ q_info->queue_chunks = kcalloc(num_chunks, sizeof(*q_info->queue_chunks),
+ GFP_KERNEL);
+ if (!q_info->queue_chunks) {
+ q_info->num_chunks = 0;
+ return -ENOMEM;
+ }
+
+ q_info->num_chunks = num_chunks;
+
+ for (u16 i = 0; i < num_chunks; i++) {
+ struct idpf_queue_id_reg_chunk *dchunk = &q_info->queue_chunks[i];
+ struct virtchnl2_queue_reg_chunk *schunk = &schunks->chunks[i];
+
+ dchunk->qtail_reg_start = le64_to_cpu(schunk->qtail_reg_start);
+ dchunk->qtail_reg_spacing = le32_to_cpu(schunk->qtail_reg_spacing);
+ dchunk->type = le32_to_cpu(schunk->type);
+ dchunk->start_queue_id = le32_to_cpu(schunk->start_queue_id);
+ dchunk->num_queues = le32_to_cpu(schunk->num_queues);
+ }
+
+ return 0;
+}
+
+/**
* idpf_get_reg_intr_vecs - Get vector queue register offset
- * @vport: virtual port structure
+ * @adapter: adapter structure to get the vector chunks
* @reg_vals: Register offsets to store in
*
- * Returns number of registers that got populated
+ * Return: number of registers that got populated
*/
-int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
+int idpf_get_reg_intr_vecs(struct idpf_adapter *adapter,
struct idpf_vec_regs *reg_vals)
{
struct virtchnl2_vector_chunks *chunks;
@@ -1277,7 +1329,7 @@ int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
u16 num_vchunks, num_vec;
int num_regs = 0, i, j;
- chunks = &vport->adapter->req_vec_chunks->vchunks;
+ chunks = &adapter->req_vec_chunks->vchunks;
num_vchunks = le16_to_cpu(chunks->num_vchunks);
for (j = 0; j < num_vchunks; j++) {
@@ -1322,25 +1374,25 @@ int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
* are filled.
*/
static int idpf_vport_get_q_reg(u32 *reg_vals, int num_regs, u32 q_type,
- struct virtchnl2_queue_reg_chunks *chunks)
+ struct idpf_queue_id_reg_info *chunks)
{
- u16 num_chunks = le16_to_cpu(chunks->num_chunks);
+ u16 num_chunks = chunks->num_chunks;
int reg_filled = 0, i;
u32 reg_val;
while (num_chunks--) {
- struct virtchnl2_queue_reg_chunk *chunk;
+ struct idpf_queue_id_reg_chunk *chunk;
u16 num_q;
- chunk = &chunks->chunks[num_chunks];
- if (le32_to_cpu(chunk->type) != q_type)
+ chunk = &chunks->queue_chunks[num_chunks];
+ if (chunk->type != q_type)
continue;
- num_q = le32_to_cpu(chunk->num_queues);
- reg_val = le64_to_cpu(chunk->qtail_reg_start);
+ num_q = chunk->num_queues;
+ reg_val = chunk->qtail_reg_start;
for (i = 0; i < num_q && reg_filled < num_regs ; i++) {
reg_vals[reg_filled++] = reg_val;
- reg_val += le32_to_cpu(chunk->qtail_reg_spacing);
+ reg_val += chunk->qtail_reg_spacing;
}
}
@@ -1350,13 +1402,15 @@ static int idpf_vport_get_q_reg(u32 *reg_vals, int num_regs, u32 q_type,
/**
* __idpf_queue_reg_init - initialize queue registers
* @vport: virtual port structure
+ * @rsrc: pointer to queue and vector resources
* @reg_vals: registers we are initializing
* @num_regs: how many registers there are in total
* @q_type: queue model
*
* Return number of queues that are initialized
*/
-static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
+static int __idpf_queue_reg_init(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc, u32 *reg_vals,
int num_regs, u32 q_type)
{
struct idpf_adapter *adapter = vport->adapter;
@@ -1364,8 +1418,8 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
switch (q_type) {
case VIRTCHNL2_QUEUE_TYPE_TX:
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ for (i = 0; i < rsrc->num_txq_grp; i++) {
+ struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
for (j = 0; j < tx_qgrp->num_txq && k < num_regs; j++, k++)
tx_qgrp->txqs[j]->tail =
@@ -1373,8 +1427,8 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
}
break;
case VIRTCHNL2_QUEUE_TYPE_RX:
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ for (i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
u16 num_rxq = rx_qgrp->singleq.num_rxq;
for (j = 0; j < num_rxq && k < num_regs; j++, k++) {
@@ -1387,9 +1441,9 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
}
break;
case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
- u8 num_bufqs = vport->num_bufqs_per_qgrp;
+ for (i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
+ u8 num_bufqs = rsrc->num_bufqs_per_qgrp;
for (j = 0; j < num_bufqs && k < num_regs; j++, k++) {
struct idpf_buf_queue *q;
@@ -1410,15 +1464,15 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
/**
* idpf_queue_reg_init - initialize queue registers
* @vport: virtual port structure
+ * @rsrc: pointer to queue and vector resources
+ * @chunks: queue registers received over mailbox
*
- * Return 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
-int idpf_queue_reg_init(struct idpf_vport *vport)
+int idpf_queue_reg_init(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc,
+ struct idpf_queue_id_reg_info *chunks)
{
- struct virtchnl2_create_vport *vport_params;
- struct virtchnl2_queue_reg_chunks *chunks;
- struct idpf_vport_config *vport_config;
- u16 vport_idx = vport->idx;
int num_regs, ret = 0;
u32 *reg_vals;
@@ -1427,28 +1481,18 @@ int idpf_queue_reg_init(struct idpf_vport *vport)
if (!reg_vals)
return -ENOMEM;
- vport_config = vport->adapter->vport_config[vport_idx];
- if (vport_config->req_qs_chunks) {
- struct virtchnl2_add_queues *vc_aq =
- (struct virtchnl2_add_queues *)vport_config->req_qs_chunks;
- chunks = &vc_aq->chunks;
- } else {
- vport_params = vport->adapter->vport_params_recvd[vport_idx];
- chunks = &vport_params->chunks;
- }
-
/* Initialize Tx queue tail register address */
num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
VIRTCHNL2_QUEUE_TYPE_TX,
chunks);
- if (num_regs < vport->num_txq) {
+ if (num_regs < rsrc->num_txq) {
ret = -EINVAL;
goto free_reg_vals;
}
- num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs,
+ num_regs = __idpf_queue_reg_init(vport, rsrc, reg_vals, num_regs,
VIRTCHNL2_QUEUE_TYPE_TX);
- if (num_regs < vport->num_txq) {
+ if (num_regs < rsrc->num_txq) {
ret = -EINVAL;
goto free_reg_vals;
}
@@ -1456,18 +1500,18 @@ int idpf_queue_reg_init(struct idpf_vport *vport)
/* Initialize Rx/buffer queue tail register address based on Rx queue
* model
*/
- if (idpf_is_queue_model_split(vport->rxq_model)) {
+ if (idpf_is_queue_model_split(rsrc->rxq_model)) {
num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
VIRTCHNL2_QUEUE_TYPE_RX_BUFFER,
chunks);
- if (num_regs < vport->num_bufq) {
+ if (num_regs < rsrc->num_bufq) {
ret = -EINVAL;
goto free_reg_vals;
}
- num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs,
+ num_regs = __idpf_queue_reg_init(vport, rsrc, reg_vals, num_regs,
VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
- if (num_regs < vport->num_bufq) {
+ if (num_regs < rsrc->num_bufq) {
ret = -EINVAL;
goto free_reg_vals;
}
@@ -1475,14 +1519,14 @@ int idpf_queue_reg_init(struct idpf_vport *vport)
num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
VIRTCHNL2_QUEUE_TYPE_RX,
chunks);
- if (num_regs < vport->num_rxq) {
+ if (num_regs < rsrc->num_rxq) {
ret = -EINVAL;
goto free_reg_vals;
}
- num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs,
+ num_regs = __idpf_queue_reg_init(vport, rsrc, reg_vals, num_regs,
VIRTCHNL2_QUEUE_TYPE_RX);
- if (num_regs < vport->num_rxq) {
+ if (num_regs < rsrc->num_rxq) {
ret = -EINVAL;
goto free_reg_vals;
}
@@ -1581,6 +1625,7 @@ free_vport_params:
*/
int idpf_check_supported_desc_ids(struct idpf_vport *vport)
{
+ struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct idpf_adapter *adapter = vport->adapter;
struct virtchnl2_create_vport *vport_msg;
u64 rx_desc_ids, tx_desc_ids;
@@ -1597,17 +1642,17 @@ int idpf_check_supported_desc_ids(struct idpf_vport *vport)
rx_desc_ids = le64_to_cpu(vport_msg->rx_desc_ids);
tx_desc_ids = le64_to_cpu(vport_msg->tx_desc_ids);
- if (idpf_is_queue_model_split(vport->rxq_model)) {
+ if (idpf_is_queue_model_split(rsrc->rxq_model)) {
if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M)) {
dev_info(&adapter->pdev->dev, "Minimum RX descriptor support not provided, using the default\n");
vport_msg->rx_desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
}
} else {
if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M))
- vport->base_rxd = true;
+ rsrc->base_rxd = true;
}
- if (!idpf_is_queue_model_split(vport->txq_model))
+ if (!idpf_is_queue_model_split(rsrc->txq_model))
return 0;
if ((tx_desc_ids & MIN_SUPPORT_TXDID) != MIN_SUPPORT_TXDID) {
@@ -1620,96 +1665,96 @@ int idpf_check_supported_desc_ids(struct idpf_vport *vport)
/**
* idpf_send_destroy_vport_msg - Send virtchnl destroy vport message
- * @vport: virtual port data structure
+ * @adapter: adapter pointer used to send virtchnl message
+ * @vport_id: vport identifier used while preparing the virtchnl message
*
- * Send virtchnl destroy vport message. Returns 0 on success, negative on
- * failure.
+ * Return: 0 on success, negative on failure.
*/
-int idpf_send_destroy_vport_msg(struct idpf_vport *vport)
+int idpf_send_destroy_vport_msg(struct idpf_adapter *adapter, u32 vport_id)
{
struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_vport v_id;
ssize_t reply_sz;
- v_id.vport_id = cpu_to_le32(vport->vport_id);
+ v_id.vport_id = cpu_to_le32(vport_id);
xn_params.vc_op = VIRTCHNL2_OP_DESTROY_VPORT;
xn_params.send_buf.iov_base = &v_id;
xn_params.send_buf.iov_len = sizeof(v_id);
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
return reply_sz < 0 ? reply_sz : 0;
}
/**
* idpf_send_enable_vport_msg - Send virtchnl enable vport message
- * @vport: virtual port data structure
+ * @adapter: adapter pointer used to send virtchnl message
+ * @vport_id: vport identifier used while preparing the virtchnl message
*
- * Send enable vport virtchnl message. Returns 0 on success, negative on
- * failure.
+ * Return: 0 on success, negative on failure.
*/
-int idpf_send_enable_vport_msg(struct idpf_vport *vport)
+int idpf_send_enable_vport_msg(struct idpf_adapter *adapter, u32 vport_id)
{
struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_vport v_id;
ssize_t reply_sz;
- v_id.vport_id = cpu_to_le32(vport->vport_id);
+ v_id.vport_id = cpu_to_le32(vport_id);
xn_params.vc_op = VIRTCHNL2_OP_ENABLE_VPORT;
xn_params.send_buf.iov_base = &v_id;
xn_params.send_buf.iov_len = sizeof(v_id);
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
return reply_sz < 0 ? reply_sz : 0;
}
/**
* idpf_send_disable_vport_msg - Send virtchnl disable vport message
- * @vport: virtual port data structure
+ * @adapter: adapter pointer used to send virtchnl message
+ * @vport_id: vport identifier used while preparing the virtchnl message
*
- * Send disable vport virtchnl message. Returns 0 on success, negative on
- * failure.
+ * Return: 0 on success, negative on failure.
*/
-int idpf_send_disable_vport_msg(struct idpf_vport *vport)
+int idpf_send_disable_vport_msg(struct idpf_adapter *adapter, u32 vport_id)
{
struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_vport v_id;
ssize_t reply_sz;
- v_id.vport_id = cpu_to_le32(vport->vport_id);
+ v_id.vport_id = cpu_to_le32(vport_id);
xn_params.vc_op = VIRTCHNL2_OP_DISABLE_VPORT;
xn_params.send_buf.iov_base = &v_id;
xn_params.send_buf.iov_len = sizeof(v_id);
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
return reply_sz < 0 ? reply_sz : 0;
}
/**
* idpf_fill_txq_config_chunk - fill chunk describing the Tx queue
- * @vport: virtual port data structure
+ * @rsrc: pointer to queue and vector resources
* @q: Tx queue to be inserted into VC chunk
* @qi: pointer to the buffer containing the VC chunk
*/
-static void idpf_fill_txq_config_chunk(const struct idpf_vport *vport,
+static void idpf_fill_txq_config_chunk(const struct idpf_q_vec_rsrc *rsrc,
const struct idpf_tx_queue *q,
struct virtchnl2_txq_info *qi)
{
u32 val;
qi->queue_id = cpu_to_le32(q->q_id);
- qi->model = cpu_to_le16(vport->txq_model);
+ qi->model = cpu_to_le16(rsrc->txq_model);
qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
qi->ring_len = cpu_to_le16(q->desc_count);
qi->dma_ring_addr = cpu_to_le64(q->dma);
qi->relative_queue_id = cpu_to_le16(q->rel_q_id);
- if (!idpf_is_queue_model_split(vport->txq_model)) {
+ if (!idpf_is_queue_model_split(rsrc->txq_model)) {
qi->sched_mode = cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE);
return;
}
@@ -1731,18 +1776,18 @@ static void idpf_fill_txq_config_chunk(const struct idpf_vport *vport,
/**
* idpf_fill_complq_config_chunk - fill chunk describing the completion queue
- * @vport: virtual port data structure
+ * @rsrc: pointer to queue and vector resources
* @q: completion queue to be inserted into VC chunk
* @qi: pointer to the buffer containing the VC chunk
*/
-static void idpf_fill_complq_config_chunk(const struct idpf_vport *vport,
+static void idpf_fill_complq_config_chunk(const struct idpf_q_vec_rsrc *rsrc,
const struct idpf_compl_queue *q,
struct virtchnl2_txq_info *qi)
{
u32 val;
qi->queue_id = cpu_to_le32(q->q_id);
- qi->model = cpu_to_le16(vport->txq_model);
+ qi->model = cpu_to_le16(rsrc->txq_model);
qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
qi->ring_len = cpu_to_le16(q->desc_count);
qi->dma_ring_addr = cpu_to_le64(q->dma);
@@ -1757,7 +1802,7 @@ static void idpf_fill_complq_config_chunk(const struct idpf_vport *vport,
/**
* idpf_prepare_cfg_txqs_msg - prepare message to configure selected Tx queues
- * @vport: virtual port data structure
+ * @vport_id: ID of virtual port queues are associated with
* @buf: buffer containing the message
* @pos: pointer to the first chunk describing the tx queue
* @num_chunks: number of chunks in the message
@@ -1767,13 +1812,12 @@ static void idpf_fill_complq_config_chunk(const struct idpf_vport *vport,
*
* Return: the total size of the prepared message.
*/
-static u32 idpf_prepare_cfg_txqs_msg(const struct idpf_vport *vport,
- void *buf, const void *pos,
+static u32 idpf_prepare_cfg_txqs_msg(u32 vport_id, void *buf, const void *pos,
u32 num_chunks)
{
struct virtchnl2_config_tx_queues *ctq = buf;
- ctq->vport_id = cpu_to_le32(vport->vport_id);
+ ctq->vport_id = cpu_to_le32(vport_id);
ctq->num_qinfo = cpu_to_le16(num_chunks);
memcpy(ctq->qinfo, pos, num_chunks * sizeof(*ctq->qinfo));
@@ -1794,6 +1838,7 @@ static int idpf_send_config_tx_queue_set_msg(const struct idpf_queue_set *qs)
{
struct virtchnl2_txq_info *qi __free(kfree) = NULL;
struct idpf_chunked_msg_params params = {
+ .vport_id = qs->vport_id,
.vc_op = VIRTCHNL2_OP_CONFIG_TX_QUEUES,
.prepare_msg = idpf_prepare_cfg_txqs_msg,
.config_sz = sizeof(struct virtchnl2_config_tx_queues),
@@ -1808,43 +1853,47 @@ static int idpf_send_config_tx_queue_set_msg(const struct idpf_queue_set *qs)
for (u32 i = 0; i < qs->num; i++) {
if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_TX)
- idpf_fill_txq_config_chunk(qs->vport, qs->qs[i].txq,
+ idpf_fill_txq_config_chunk(qs->qv_rsrc, qs->qs[i].txq,
&qi[params.num_chunks++]);
else if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION)
- idpf_fill_complq_config_chunk(qs->vport,
+ idpf_fill_complq_config_chunk(qs->qv_rsrc,
qs->qs[i].complq,
&qi[params.num_chunks++]);
}
- return idpf_send_chunked_msg(qs->vport, &params);
+ return idpf_send_chunked_msg(qs->adapter, &params);
}
/**
* idpf_send_config_tx_queues_msg - send virtchnl config Tx queues message
- * @vport: virtual port data structure
+ * @adapter: adapter pointer used to send virtchnl message
+ * @rsrc: pointer to queue and vector resources
+ * @vport_id: vport identifier used while preparing the virtchnl message
*
* Return: 0 on success, -errno on failure.
*/
-static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
+static int idpf_send_config_tx_queues_msg(struct idpf_adapter *adapter,
+ struct idpf_q_vec_rsrc *rsrc,
+ u32 vport_id)
{
struct idpf_queue_set *qs __free(kfree) = NULL;
- u32 totqs = vport->num_txq + vport->num_complq;
+ u32 totqs = rsrc->num_txq + rsrc->num_complq;
u32 k = 0;
- qs = idpf_alloc_queue_set(vport, totqs);
+ qs = idpf_alloc_queue_set(adapter, rsrc, vport_id, totqs);
if (!qs)
return -ENOMEM;
/* Populate the queue info buffer with all queue context info */
- for (u32 i = 0; i < vport->num_txq_grp; i++) {
- const struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ for (u32 i = 0; i < rsrc->num_txq_grp; i++) {
+ const struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
for (u32 j = 0; j < tx_qgrp->num_txq; j++) {
qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX;
qs->qs[k++].txq = tx_qgrp->txqs[j];
}
- if (idpf_is_queue_model_split(vport->txq_model)) {
+ if (idpf_is_queue_model_split(rsrc->txq_model)) {
qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
qs->qs[k++].complq = tx_qgrp->complq;
}
@@ -1859,28 +1908,28 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
/**
* idpf_fill_rxq_config_chunk - fill chunk describing the Rx queue
- * @vport: virtual port data structure
+ * @rsrc: pointer to queue and vector resources
* @q: Rx queue to be inserted into VC chunk
* @qi: pointer to the buffer containing the VC chunk
*/
-static void idpf_fill_rxq_config_chunk(const struct idpf_vport *vport,
+static void idpf_fill_rxq_config_chunk(const struct idpf_q_vec_rsrc *rsrc,
struct idpf_rx_queue *q,
struct virtchnl2_rxq_info *qi)
{
const struct idpf_bufq_set *sets;
qi->queue_id = cpu_to_le32(q->q_id);
- qi->model = cpu_to_le16(vport->rxq_model);
+ qi->model = cpu_to_le16(rsrc->rxq_model);
qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
qi->ring_len = cpu_to_le16(q->desc_count);
qi->dma_ring_addr = cpu_to_le64(q->dma);
qi->max_pkt_size = cpu_to_le32(q->rx_max_pkt_size);
qi->rx_buffer_low_watermark = cpu_to_le16(q->rx_buffer_low_watermark);
qi->qflags = cpu_to_le16(VIRTCHNL2_RX_DESC_SIZE_32BYTE);
- if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
+ if (idpf_queue_has(RSC_EN, q))
qi->qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
- if (!idpf_is_queue_model_split(vport->rxq_model)) {
+ if (!idpf_is_queue_model_split(rsrc->rxq_model)) {
qi->data_buffer_size = cpu_to_le32(q->rx_buf_size);
qi->desc_ids = cpu_to_le64(q->rxdids);
@@ -1897,7 +1946,7 @@ static void idpf_fill_rxq_config_chunk(const struct idpf_vport *vport,
qi->data_buffer_size = cpu_to_le32(q->rx_buf_size);
qi->rx_bufq1_id = cpu_to_le16(sets[0].bufq.q_id);
- if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) {
+ if (rsrc->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) {
qi->bufq2_ena = IDPF_BUFQ2_ENA;
qi->rx_bufq2_id = cpu_to_le16(sets[1].bufq.q_id);
}
@@ -1914,16 +1963,16 @@ static void idpf_fill_rxq_config_chunk(const struct idpf_vport *vport,
/**
* idpf_fill_bufq_config_chunk - fill chunk describing the buffer queue
- * @vport: virtual port data structure
+ * @rsrc: pointer to queue and vector resources
* @q: buffer queue to be inserted into VC chunk
* @qi: pointer to the buffer containing the VC chunk
*/
-static void idpf_fill_bufq_config_chunk(const struct idpf_vport *vport,
+static void idpf_fill_bufq_config_chunk(const struct idpf_q_vec_rsrc *rsrc,
const struct idpf_buf_queue *q,
struct virtchnl2_rxq_info *qi)
{
qi->queue_id = cpu_to_le32(q->q_id);
- qi->model = cpu_to_le16(vport->rxq_model);
+ qi->model = cpu_to_le16(rsrc->rxq_model);
qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
qi->ring_len = cpu_to_le16(q->desc_count);
qi->dma_ring_addr = cpu_to_le64(q->dma);
@@ -1931,7 +1980,7 @@ static void idpf_fill_bufq_config_chunk(const struct idpf_vport *vport,
qi->rx_buffer_low_watermark = cpu_to_le16(q->rx_buffer_low_watermark);
qi->desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
qi->buffer_notif_stride = IDPF_RX_BUF_STRIDE;
- if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
+ if (idpf_queue_has(RSC_EN, q))
qi->qflags = cpu_to_le16(VIRTCHNL2_RXQ_RSC);
if (idpf_queue_has(HSPLIT_EN, q)) {
@@ -1942,7 +1991,7 @@ static void idpf_fill_bufq_config_chunk(const struct idpf_vport *vport,
/**
* idpf_prepare_cfg_rxqs_msg - prepare message to configure selected Rx queues
- * @vport: virtual port data structure
+ * @vport_id: ID of virtual port queues are associated with
* @buf: buffer containing the message
* @pos: pointer to the first chunk describing the rx queue
* @num_chunks: number of chunks in the message
@@ -1952,13 +2001,12 @@ static void idpf_fill_bufq_config_chunk(const struct idpf_vport *vport,
*
* Return: the total size of the prepared message.
*/
-static u32 idpf_prepare_cfg_rxqs_msg(const struct idpf_vport *vport,
- void *buf, const void *pos,
+static u32 idpf_prepare_cfg_rxqs_msg(u32 vport_id, void *buf, const void *pos,
u32 num_chunks)
{
struct virtchnl2_config_rx_queues *crq = buf;
- crq->vport_id = cpu_to_le32(vport->vport_id);
+ crq->vport_id = cpu_to_le32(vport_id);
crq->num_qinfo = cpu_to_le16(num_chunks);
memcpy(crq->qinfo, pos, num_chunks * sizeof(*crq->qinfo));
@@ -1979,6 +2027,7 @@ static int idpf_send_config_rx_queue_set_msg(const struct idpf_queue_set *qs)
{
struct virtchnl2_rxq_info *qi __free(kfree) = NULL;
struct idpf_chunked_msg_params params = {
+ .vport_id = qs->vport_id,
.vc_op = VIRTCHNL2_OP_CONFIG_RX_QUEUES,
.prepare_msg = idpf_prepare_cfg_rxqs_msg,
.config_sz = sizeof(struct virtchnl2_config_rx_queues),
@@ -1993,36 +2042,40 @@ static int idpf_send_config_rx_queue_set_msg(const struct idpf_queue_set *qs)
for (u32 i = 0; i < qs->num; i++) {
if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_RX)
- idpf_fill_rxq_config_chunk(qs->vport, qs->qs[i].rxq,
+ idpf_fill_rxq_config_chunk(qs->qv_rsrc, qs->qs[i].rxq,
&qi[params.num_chunks++]);
else if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_RX_BUFFER)
- idpf_fill_bufq_config_chunk(qs->vport, qs->qs[i].bufq,
+ idpf_fill_bufq_config_chunk(qs->qv_rsrc, qs->qs[i].bufq,
&qi[params.num_chunks++]);
}
- return idpf_send_chunked_msg(qs->vport, &params);
+ return idpf_send_chunked_msg(qs->adapter, &params);
}
/**
* idpf_send_config_rx_queues_msg - send virtchnl config Rx queues message
- * @vport: virtual port data structure
+ * @adapter: adapter pointer used to send virtchnl message
+ * @rsrc: pointer to queue and vector resources
+ * @vport_id: vport identifier used while preparing the virtchnl message
*
* Return: 0 on success, -errno on failure.
*/
-static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
+static int idpf_send_config_rx_queues_msg(struct idpf_adapter *adapter,
+ struct idpf_q_vec_rsrc *rsrc,
+ u32 vport_id)
{
- bool splitq = idpf_is_queue_model_split(vport->rxq_model);
+ bool splitq = idpf_is_queue_model_split(rsrc->rxq_model);
struct idpf_queue_set *qs __free(kfree) = NULL;
- u32 totqs = vport->num_rxq + vport->num_bufq;
+ u32 totqs = rsrc->num_rxq + rsrc->num_bufq;
u32 k = 0;
- qs = idpf_alloc_queue_set(vport, totqs);
+ qs = idpf_alloc_queue_set(adapter, rsrc, vport_id, totqs);
if (!qs)
return -ENOMEM;
/* Populate the queue info buffer with all queue context info */
- for (u32 i = 0; i < vport->num_rxq_grp; i++) {
- const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ for (u32 i = 0; i < rsrc->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
u32 num_rxq;
if (!splitq) {
@@ -2030,7 +2083,7 @@ static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
goto rxq;
}
- for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ for (u32 j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
qs->qs[k++].bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
}
@@ -2059,7 +2112,7 @@ rxq:
/**
* idpf_prepare_ena_dis_qs_msg - prepare message to enable/disable selected
* queues
- * @vport: virtual port data structure
+ * @vport_id: ID of virtual port queues are associated with
* @buf: buffer containing the message
* @pos: pointer to the first chunk describing the queue
* @num_chunks: number of chunks in the message
@@ -2069,13 +2122,12 @@ rxq:
*
* Return: the total size of the prepared message.
*/
-static u32 idpf_prepare_ena_dis_qs_msg(const struct idpf_vport *vport,
- void *buf, const void *pos,
+static u32 idpf_prepare_ena_dis_qs_msg(u32 vport_id, void *buf, const void *pos,
u32 num_chunks)
{
struct virtchnl2_del_ena_dis_queues *eq = buf;
- eq->vport_id = cpu_to_le32(vport->vport_id);
+ eq->vport_id = cpu_to_le32(vport_id);
eq->chunks.num_chunks = cpu_to_le16(num_chunks);
memcpy(eq->chunks.chunks, pos,
num_chunks * sizeof(*eq->chunks.chunks));
@@ -2100,6 +2152,7 @@ static int idpf_send_ena_dis_queue_set_msg(const struct idpf_queue_set *qs,
{
struct virtchnl2_queue_chunk *qc __free(kfree) = NULL;
struct idpf_chunked_msg_params params = {
+ .vport_id = qs->vport_id,
.vc_op = en ? VIRTCHNL2_OP_ENABLE_QUEUES :
VIRTCHNL2_OP_DISABLE_QUEUES,
.prepare_msg = idpf_prepare_ena_dis_qs_msg,
@@ -2141,34 +2194,38 @@ static int idpf_send_ena_dis_queue_set_msg(const struct idpf_queue_set *qs,
qc[i].start_queue_id = cpu_to_le32(qid);
}
- return idpf_send_chunked_msg(qs->vport, &params);
+ return idpf_send_chunked_msg(qs->adapter, &params);
}
/**
* idpf_send_ena_dis_queues_msg - send virtchnl enable or disable queues
* message
- * @vport: virtual port data structure
+ * @adapter: adapter pointer used to send virtchnl message
+ * @rsrc: pointer to queue and vector resources
+ * @vport_id: vport identifier used while preparing the virtchnl message
* @en: whether to enable or disable queues
*
* Return: 0 on success, -errno on failure.
*/
-static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool en)
+static int idpf_send_ena_dis_queues_msg(struct idpf_adapter *adapter,
+ struct idpf_q_vec_rsrc *rsrc,
+ u32 vport_id, bool en)
{
struct idpf_queue_set *qs __free(kfree) = NULL;
u32 num_txq, num_q, k = 0;
bool split;
- num_txq = vport->num_txq + vport->num_complq;
- num_q = num_txq + vport->num_rxq + vport->num_bufq;
+ num_txq = rsrc->num_txq + rsrc->num_complq;
+ num_q = num_txq + rsrc->num_rxq + rsrc->num_bufq;
- qs = idpf_alloc_queue_set(vport, num_q);
+ qs = idpf_alloc_queue_set(adapter, rsrc, vport_id, num_q);
if (!qs)
return -ENOMEM;
- split = idpf_is_queue_model_split(vport->txq_model);
+ split = idpf_is_queue_model_split(rsrc->txq_model);
- for (u32 i = 0; i < vport->num_txq_grp; i++) {
- const struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ for (u32 i = 0; i < rsrc->num_txq_grp; i++) {
+ const struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
for (u32 j = 0; j < tx_qgrp->num_txq; j++) {
qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX;
@@ -2185,10 +2242,10 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool en)
if (k != num_txq)
return -EINVAL;
- split = idpf_is_queue_model_split(vport->rxq_model);
+ split = idpf_is_queue_model_split(rsrc->rxq_model);
- for (u32 i = 0; i < vport->num_rxq_grp; i++) {
- const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ for (u32 i = 0; i < rsrc->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
u32 num_rxq;
if (split)
@@ -2209,7 +2266,7 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool en)
if (!split)
continue;
- for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ for (u32 j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
qs->qs[k++].bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
}
@@ -2224,7 +2281,7 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool en)
/**
* idpf_prep_map_unmap_queue_set_vector_msg - prepare message to map or unmap
* queue set to the interrupt vector
- * @vport: virtual port data structure
+ * @vport_id: ID of virtual port queues are associated with
* @buf: buffer containing the message
* @pos: pointer to the first chunk describing the vector mapping
* @num_chunks: number of chunks in the message
@@ -2235,13 +2292,12 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool en)
* Return: the total size of the prepared message.
*/
static u32
-idpf_prep_map_unmap_queue_set_vector_msg(const struct idpf_vport *vport,
- void *buf, const void *pos,
- u32 num_chunks)
+idpf_prep_map_unmap_queue_set_vector_msg(u32 vport_id, void *buf,
+ const void *pos, u32 num_chunks)
{
struct virtchnl2_queue_vector_maps *vqvm = buf;
- vqvm->vport_id = cpu_to_le32(vport->vport_id);
+ vqvm->vport_id = cpu_to_le32(vport_id);
vqvm->num_qv_maps = cpu_to_le16(num_chunks);
memcpy(vqvm->qv_maps, pos, num_chunks * sizeof(*vqvm->qv_maps));
@@ -2262,6 +2318,7 @@ idpf_send_map_unmap_queue_set_vector_msg(const struct idpf_queue_set *qs,
{
struct virtchnl2_queue_vector *vqv __free(kfree) = NULL;
struct idpf_chunked_msg_params params = {
+ .vport_id = qs->vport_id,
.vc_op = map ? VIRTCHNL2_OP_MAP_QUEUE_VECTOR :
VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR,
.prepare_msg = idpf_prep_map_unmap_queue_set_vector_msg,
@@ -2277,7 +2334,7 @@ idpf_send_map_unmap_queue_set_vector_msg(const struct idpf_queue_set *qs,
params.chunks = vqv;
- split = idpf_is_queue_model_split(qs->vport->txq_model);
+ split = idpf_is_queue_model_split(qs->qv_rsrc->txq_model);
for (u32 i = 0; i < qs->num; i++) {
const struct idpf_queue_ptr *q = &qs->qs[i];
@@ -2299,7 +2356,7 @@ idpf_send_map_unmap_queue_set_vector_msg(const struct idpf_queue_set *qs,
v_idx = vec->v_idx;
itr_idx = vec->rx_itr_idx;
} else {
- v_idx = qs->vport->noirq_v_idx;
+ v_idx = qs->qv_rsrc->noirq_v_idx;
itr_idx = VIRTCHNL2_ITR_IDX_0;
}
break;
@@ -2319,7 +2376,7 @@ idpf_send_map_unmap_queue_set_vector_msg(const struct idpf_queue_set *qs,
v_idx = vec->v_idx;
itr_idx = vec->tx_itr_idx;
} else {
- v_idx = qs->vport->noirq_v_idx;
+ v_idx = qs->qv_rsrc->noirq_v_idx;
itr_idx = VIRTCHNL2_ITR_IDX_1;
}
break;
@@ -2332,29 +2389,33 @@ idpf_send_map_unmap_queue_set_vector_msg(const struct idpf_queue_set *qs,
vqv[i].itr_idx = cpu_to_le32(itr_idx);
}
- return idpf_send_chunked_msg(qs->vport, &params);
+ return idpf_send_chunked_msg(qs->adapter, &params);
}
/**
* idpf_send_map_unmap_queue_vector_msg - send virtchnl map or unmap queue
* vector message
- * @vport: virtual port data structure
+ * @adapter: adapter pointer used to send virtchnl message
+ * @rsrc: pointer to queue and vector resources
+ * @vport_id: vport identifier used while preparing the virtchnl message
* @map: true for map and false for unmap
*
* Return: 0 on success, -errno on failure.
*/
-int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
+int idpf_send_map_unmap_queue_vector_msg(struct idpf_adapter *adapter,
+ struct idpf_q_vec_rsrc *rsrc,
+ u32 vport_id, bool map)
{
struct idpf_queue_set *qs __free(kfree) = NULL;
- u32 num_q = vport->num_txq + vport->num_rxq;
+ u32 num_q = rsrc->num_txq + rsrc->num_rxq;
u32 k = 0;
- qs = idpf_alloc_queue_set(vport, num_q);
+ qs = idpf_alloc_queue_set(adapter, rsrc, vport_id, num_q);
if (!qs)
return -ENOMEM;
- for (u32 i = 0; i < vport->num_txq_grp; i++) {
- const struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ for (u32 i = 0; i < rsrc->num_txq_grp; i++) {
+ const struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
for (u32 j = 0; j < tx_qgrp->num_txq; j++) {
qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX;
@@ -2362,14 +2423,14 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
}
}
- if (k != vport->num_txq)
+ if (k != rsrc->num_txq)
return -EINVAL;
- for (u32 i = 0; i < vport->num_rxq_grp; i++) {
- const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ for (u32 i = 0; i < rsrc->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
u32 num_rxq;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
num_rxq = rx_qgrp->splitq.num_rxq_sets;
else
num_rxq = rx_qgrp->singleq.num_rxq;
@@ -2377,7 +2438,7 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
for (u32 j = 0; j < num_rxq; j++) {
qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
qs->qs[k++].rxq =
&rx_qgrp->splitq.rxq_sets[j]->rxq;
else
@@ -2453,7 +2514,9 @@ int idpf_send_config_queue_set_msg(const struct idpf_queue_set *qs)
*/
int idpf_send_enable_queues_msg(struct idpf_vport *vport)
{
- return idpf_send_ena_dis_queues_msg(vport, true);
+ return idpf_send_ena_dis_queues_msg(vport->adapter,
+ &vport->dflt_qv_rsrc,
+ vport->vport_id, true);
}
/**
@@ -2467,7 +2530,9 @@ int idpf_send_disable_queues_msg(struct idpf_vport *vport)
{
int err;
- err = idpf_send_ena_dis_queues_msg(vport, false);
+ err = idpf_send_ena_dis_queues_msg(vport->adapter,
+ &vport->dflt_qv_rsrc,
+ vport->vport_id, false);
if (err)
return err;
@@ -2482,104 +2547,96 @@ int idpf_send_disable_queues_msg(struct idpf_vport *vport)
* @num_chunks: number of chunks to copy
*/
static void idpf_convert_reg_to_queue_chunks(struct virtchnl2_queue_chunk *dchunks,
- struct virtchnl2_queue_reg_chunk *schunks,
+ struct idpf_queue_id_reg_chunk *schunks,
u16 num_chunks)
{
u16 i;
for (i = 0; i < num_chunks; i++) {
- dchunks[i].type = schunks[i].type;
- dchunks[i].start_queue_id = schunks[i].start_queue_id;
- dchunks[i].num_queues = schunks[i].num_queues;
+ dchunks[i].type = cpu_to_le32(schunks[i].type);
+ dchunks[i].start_queue_id = cpu_to_le32(schunks[i].start_queue_id);
+ dchunks[i].num_queues = cpu_to_le32(schunks[i].num_queues);
}
}
/**
* idpf_send_delete_queues_msg - send delete queues virtchnl message
- * @vport: Virtual port private data structure
+ * @adapter: adapter pointer used to send virtchnl message
+ * @chunks: queue ids received over mailbox
+ * @vport_id: vport identifier used while preparing the virtchnl message
*
- * Will send delete queues virtchnl message. Return 0 on success, negative on
- * failure.
+ * Return: 0 on success, negative on failure.
*/
-int idpf_send_delete_queues_msg(struct idpf_vport *vport)
+int idpf_send_delete_queues_msg(struct idpf_adapter *adapter,
+ struct idpf_queue_id_reg_info *chunks,
+ u32 vport_id)
{
struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL;
- struct virtchnl2_create_vport *vport_params;
- struct virtchnl2_queue_reg_chunks *chunks;
struct idpf_vc_xn_params xn_params = {};
- struct idpf_vport_config *vport_config;
- u16 vport_idx = vport->idx;
ssize_t reply_sz;
u16 num_chunks;
int buf_size;
- vport_config = vport->adapter->vport_config[vport_idx];
- if (vport_config->req_qs_chunks) {
- chunks = &vport_config->req_qs_chunks->chunks;
- } else {
- vport_params = vport->adapter->vport_params_recvd[vport_idx];
- chunks = &vport_params->chunks;
- }
-
- num_chunks = le16_to_cpu(chunks->num_chunks);
+ num_chunks = chunks->num_chunks;
buf_size = struct_size(eq, chunks.chunks, num_chunks);
eq = kzalloc(buf_size, GFP_KERNEL);
if (!eq)
return -ENOMEM;
- eq->vport_id = cpu_to_le32(vport->vport_id);
+ eq->vport_id = cpu_to_le32(vport_id);
eq->chunks.num_chunks = cpu_to_le16(num_chunks);
- idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->chunks,
+ idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->queue_chunks,
num_chunks);
xn_params.vc_op = VIRTCHNL2_OP_DEL_QUEUES;
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
xn_params.send_buf.iov_base = eq;
xn_params.send_buf.iov_len = buf_size;
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
return reply_sz < 0 ? reply_sz : 0;
}
/**
* idpf_send_config_queues_msg - Send config queues virtchnl message
- * @vport: Virtual port private data structure
+ * @adapter: adapter pointer used to send virtchnl message
+ * @rsrc: pointer to queue and vector resources
+ * @vport_id: vport identifier used while preparing the virtchnl message
*
- * Will send config queues virtchnl message. Returns 0 on success, negative on
- * failure.
+ * Return: 0 on success, negative on failure.
*/
-int idpf_send_config_queues_msg(struct idpf_vport *vport)
+int idpf_send_config_queues_msg(struct idpf_adapter *adapter,
+ struct idpf_q_vec_rsrc *rsrc,
+ u32 vport_id)
{
int err;
- err = idpf_send_config_tx_queues_msg(vport);
+ err = idpf_send_config_tx_queues_msg(adapter, rsrc, vport_id);
if (err)
return err;
- return idpf_send_config_rx_queues_msg(vport);
+ return idpf_send_config_rx_queues_msg(adapter, rsrc, vport_id);
}
/**
* idpf_send_add_queues_msg - Send virtchnl add queues message
- * @vport: Virtual port private data structure
- * @num_tx_q: number of transmit queues
- * @num_complq: number of transmit completion queues
- * @num_rx_q: number of receive queues
- * @num_rx_bufq: number of receive buffer queues
+ * @adapter: adapter pointer used to send virtchnl message
+ * @vport_config: vport persistent structure to store the queue chunk info
+ * @rsrc: pointer to queue and vector resources
+ * @vport_id: vport identifier used while preparing the virtchnl message
*
- * Returns 0 on success, negative on failure. vport _MUST_ be const here as
- * we should not change any fields within vport itself in this function.
+ * Return: 0 on success, negative on failure.
*/
-int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
- u16 num_complq, u16 num_rx_q, u16 num_rx_bufq)
+int idpf_send_add_queues_msg(struct idpf_adapter *adapter,
+ struct idpf_vport_config *vport_config,
+ struct idpf_q_vec_rsrc *rsrc,
+ u32 vport_id)
{
struct virtchnl2_add_queues *vc_msg __free(kfree) = NULL;
struct idpf_vc_xn_params xn_params = {};
- struct idpf_vport_config *vport_config;
struct virtchnl2_add_queues aq = {};
- u16 vport_idx = vport->idx;
ssize_t reply_sz;
int size;
@@ -2587,15 +2644,11 @@ int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
if (!vc_msg)
return -ENOMEM;
- vport_config = vport->adapter->vport_config[vport_idx];
- kfree(vport_config->req_qs_chunks);
- vport_config->req_qs_chunks = NULL;
-
- aq.vport_id = cpu_to_le32(vport->vport_id);
- aq.num_tx_q = cpu_to_le16(num_tx_q);
- aq.num_tx_complq = cpu_to_le16(num_complq);
- aq.num_rx_q = cpu_to_le16(num_rx_q);
- aq.num_rx_bufq = cpu_to_le16(num_rx_bufq);
+ aq.vport_id = cpu_to_le32(vport_id);
+ aq.num_tx_q = cpu_to_le16(rsrc->num_txq);
+ aq.num_tx_complq = cpu_to_le16(rsrc->num_complq);
+ aq.num_rx_q = cpu_to_le16(rsrc->num_rxq);
+ aq.num_rx_bufq = cpu_to_le16(rsrc->num_bufq);
xn_params.vc_op = VIRTCHNL2_OP_ADD_QUEUES;
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
@@ -2603,15 +2656,15 @@ int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
xn_params.send_buf.iov_len = sizeof(aq);
xn_params.recv_buf.iov_base = vc_msg;
xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
if (reply_sz < 0)
return reply_sz;
/* compare vc_msg num queues with vport num queues */
- if (le16_to_cpu(vc_msg->num_tx_q) != num_tx_q ||
- le16_to_cpu(vc_msg->num_rx_q) != num_rx_q ||
- le16_to_cpu(vc_msg->num_tx_complq) != num_complq ||
- le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq)
+ if (le16_to_cpu(vc_msg->num_tx_q) != rsrc->num_txq ||
+ le16_to_cpu(vc_msg->num_rx_q) != rsrc->num_rxq ||
+ le16_to_cpu(vc_msg->num_tx_complq) != rsrc->num_complq ||
+ le16_to_cpu(vc_msg->num_rx_bufq) != rsrc->num_bufq)
return -EINVAL;
size = struct_size(vc_msg, chunks.chunks,
@@ -2619,11 +2672,7 @@ int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
if (reply_sz < size)
return -EIO;
- vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL);
- if (!vport_config->req_qs_chunks)
- return -ENOMEM;
-
- return 0;
+ return idpf_vport_init_queue_reg_chunks(vport_config, &vc_msg->chunks);
}
/**
@@ -2746,13 +2795,14 @@ int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs)
/**
* idpf_send_get_stats_msg - Send virtchnl get statistics message
- * @vport: vport to get stats for
+ * @np: netdev private structure
+ * @port_stats: structure to store the vport statistics
*
- * Returns 0 on success, negative on failure.
+ * Return: 0 on success, negative on failure.
*/
-int idpf_send_get_stats_msg(struct idpf_vport *vport)
+int idpf_send_get_stats_msg(struct idpf_netdev_priv *np,
+ struct idpf_port_stats *port_stats)
{
- struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
struct rtnl_link_stats64 *netstats = &np->netstats;
struct virtchnl2_vport_stats stats_msg = {};
struct idpf_vc_xn_params xn_params = {};
@@ -2763,7 +2813,7 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport)
if (!test_bit(IDPF_VPORT_UP, np->state))
return 0;
- stats_msg.vport_id = cpu_to_le32(vport->vport_id);
+ stats_msg.vport_id = cpu_to_le32(np->vport_id);
xn_params.vc_op = VIRTCHNL2_OP_GET_STATS;
xn_params.send_buf.iov_base = &stats_msg;
@@ -2771,7 +2821,7 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport)
xn_params.recv_buf = xn_params.send_buf;
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ reply_sz = idpf_vc_xn_exec(np->adapter, &xn_params);
if (reply_sz < 0)
return reply_sz;
if (reply_sz < sizeof(stats_msg))
@@ -2792,7 +2842,7 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport)
netstats->rx_dropped = le64_to_cpu(stats_msg.rx_discards);
netstats->tx_dropped = le64_to_cpu(stats_msg.tx_discards);
- vport->port_stats.vport_stats = stats_msg;
+ port_stats->vport_stats = stats_msg;
spin_unlock_bh(&np->stats_lock);
@@ -2800,36 +2850,43 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport)
}
/**
- * idpf_send_get_set_rss_lut_msg - Send virtchnl get or set rss lut message
- * @vport: virtual port data structure
- * @get: flag to set or get rss look up table
+ * idpf_send_get_set_rss_lut_msg - Send virtchnl get or set RSS lut message
+ * @adapter: adapter pointer used to send virtchnl message
+ * @rss_data: pointer to RSS key and lut info
+ * @vport_id: vport identifier used while preparing the virtchnl message
+ * @get: flag to set or get RSS look up table
*
* When rxhash is disabled, RSS LUT will be configured with zeros. If rxhash
* is enabled, the LUT values stored in driver's soft copy will be used to setup
* the HW.
*
- * Returns 0 on success, negative on failure.
+ * Return: 0 on success, negative on failure.
*/
-int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
+int idpf_send_get_set_rss_lut_msg(struct idpf_adapter *adapter,
+ struct idpf_rss_data *rss_data,
+ u32 vport_id, bool get)
{
struct virtchnl2_rss_lut *recv_rl __free(kfree) = NULL;
struct virtchnl2_rss_lut *rl __free(kfree) = NULL;
struct idpf_vc_xn_params xn_params = {};
- struct idpf_rss_data *rss_data;
int buf_size, lut_buf_size;
+ struct idpf_vport *vport;
ssize_t reply_sz;
bool rxhash_ena;
int i;
- rss_data =
- &vport->adapter->vport_config[vport->idx]->user_config.rss_data;
+ vport = idpf_vid_to_vport(adapter, vport_id);
+ if (!vport)
+ return -EINVAL;
+
rxhash_ena = idpf_is_feature_ena(vport, NETIF_F_RXHASH);
+
buf_size = struct_size(rl, lut, rss_data->rss_lut_size);
rl = kzalloc(buf_size, GFP_KERNEL);
if (!rl)
return -ENOMEM;
- rl->vport_id = cpu_to_le32(vport->vport_id);
+ rl->vport_id = cpu_to_le32(vport_id);
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
xn_params.send_buf.iov_base = rl;
@@ -2850,7 +2907,7 @@ int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_LUT;
}
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
if (reply_sz < 0)
return reply_sz;
if (!get)
@@ -2882,30 +2939,31 @@ do_memcpy:
}
/**
- * idpf_send_get_set_rss_key_msg - Send virtchnl get or set rss key message
- * @vport: virtual port data structure
- * @get: flag to set or get rss look up table
+ * idpf_send_get_set_rss_key_msg - Send virtchnl get or set RSS key message
+ * @adapter: adapter pointer used to send virtchnl message
+ * @rss_data: pointer to RSS key and lut info
+ * @vport_id: vport identifier used while preparing the virtchnl message
+ * @get: flag to set or get RSS look up table
*
- * Returns 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
-int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get)
+int idpf_send_get_set_rss_key_msg(struct idpf_adapter *adapter,
+ struct idpf_rss_data *rss_data,
+ u32 vport_id, bool get)
{
struct virtchnl2_rss_key *recv_rk __free(kfree) = NULL;
struct virtchnl2_rss_key *rk __free(kfree) = NULL;
struct idpf_vc_xn_params xn_params = {};
- struct idpf_rss_data *rss_data;
ssize_t reply_sz;
int i, buf_size;
u16 key_size;
- rss_data =
- &vport->adapter->vport_config[vport->idx]->user_config.rss_data;
buf_size = struct_size(rk, key_flex, rss_data->rss_key_size);
rk = kzalloc(buf_size, GFP_KERNEL);
if (!rk)
return -ENOMEM;
- rk->vport_id = cpu_to_le32(vport->vport_id);
+ rk->vport_id = cpu_to_le32(vport_id);
xn_params.send_buf.iov_base = rk;
xn_params.send_buf.iov_len = buf_size;
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
@@ -2925,7 +2983,7 @@ int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get)
xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_KEY;
}
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
if (reply_sz < 0)
return reply_sz;
if (!get)
@@ -3011,33 +3069,142 @@ static void idpf_finalize_ptype_lookup(struct libeth_rx_pt *ptype)
}
/**
+ * idpf_parse_protocol_ids - parse protocol IDs for a given packet type
+ * @ptype: packet type to parse
+ * @rx_pt: store the parsed packet type info into
+ */
+static void idpf_parse_protocol_ids(struct virtchnl2_ptype *ptype,
+ struct libeth_rx_pt *rx_pt)
+{
+ struct idpf_ptype_state pstate = {};
+
+ for (u32 j = 0; j < ptype->proto_id_count; j++) {
+ u16 id = le16_to_cpu(ptype->proto_id[j]);
+
+ switch (id) {
+ case VIRTCHNL2_PROTO_HDR_GRE:
+ if (pstate.tunnel_state == IDPF_PTYPE_TUNNEL_IP) {
+ rx_pt->tunnel_type =
+ LIBETH_RX_PT_TUNNEL_IP_GRENAT;
+ pstate.tunnel_state |=
+ IDPF_PTYPE_TUNNEL_IP_GRENAT;
+ }
+ break;
+ case VIRTCHNL2_PROTO_HDR_MAC:
+ rx_pt->outer_ip = LIBETH_RX_PT_OUTER_L2;
+ if (pstate.tunnel_state == IDPF_TUN_IP_GRE) {
+ rx_pt->tunnel_type =
+ LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC;
+ pstate.tunnel_state |=
+ IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC;
+ }
+ break;
+ case VIRTCHNL2_PROTO_HDR_IPV4:
+ idpf_fill_ptype_lookup(rx_pt, &pstate, true, false);
+ break;
+ case VIRTCHNL2_PROTO_HDR_IPV6:
+ idpf_fill_ptype_lookup(rx_pt, &pstate, false, false);
+ break;
+ case VIRTCHNL2_PROTO_HDR_IPV4_FRAG:
+ idpf_fill_ptype_lookup(rx_pt, &pstate, true, true);
+ break;
+ case VIRTCHNL2_PROTO_HDR_IPV6_FRAG:
+ idpf_fill_ptype_lookup(rx_pt, &pstate, false, true);
+ break;
+ case VIRTCHNL2_PROTO_HDR_UDP:
+ rx_pt->inner_prot = LIBETH_RX_PT_INNER_UDP;
+ break;
+ case VIRTCHNL2_PROTO_HDR_TCP:
+ rx_pt->inner_prot = LIBETH_RX_PT_INNER_TCP;
+ break;
+ case VIRTCHNL2_PROTO_HDR_SCTP:
+ rx_pt->inner_prot = LIBETH_RX_PT_INNER_SCTP;
+ break;
+ case VIRTCHNL2_PROTO_HDR_ICMP:
+ rx_pt->inner_prot = LIBETH_RX_PT_INNER_ICMP;
+ break;
+ case VIRTCHNL2_PROTO_HDR_PAY:
+ rx_pt->payload_layer = LIBETH_RX_PT_PAYLOAD_L2;
+ break;
+ case VIRTCHNL2_PROTO_HDR_ICMPV6:
+ case VIRTCHNL2_PROTO_HDR_IPV6_EH:
+ case VIRTCHNL2_PROTO_HDR_PRE_MAC:
+ case VIRTCHNL2_PROTO_HDR_POST_MAC:
+ case VIRTCHNL2_PROTO_HDR_ETHERTYPE:
+ case VIRTCHNL2_PROTO_HDR_SVLAN:
+ case VIRTCHNL2_PROTO_HDR_CVLAN:
+ case VIRTCHNL2_PROTO_HDR_MPLS:
+ case VIRTCHNL2_PROTO_HDR_MMPLS:
+ case VIRTCHNL2_PROTO_HDR_PTP:
+ case VIRTCHNL2_PROTO_HDR_CTRL:
+ case VIRTCHNL2_PROTO_HDR_LLDP:
+ case VIRTCHNL2_PROTO_HDR_ARP:
+ case VIRTCHNL2_PROTO_HDR_ECP:
+ case VIRTCHNL2_PROTO_HDR_EAPOL:
+ case VIRTCHNL2_PROTO_HDR_PPPOD:
+ case VIRTCHNL2_PROTO_HDR_PPPOE:
+ case VIRTCHNL2_PROTO_HDR_IGMP:
+ case VIRTCHNL2_PROTO_HDR_AH:
+ case VIRTCHNL2_PROTO_HDR_ESP:
+ case VIRTCHNL2_PROTO_HDR_IKE:
+ case VIRTCHNL2_PROTO_HDR_NATT_KEEP:
+ case VIRTCHNL2_PROTO_HDR_L2TPV2:
+ case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL:
+ case VIRTCHNL2_PROTO_HDR_L2TPV3:
+ case VIRTCHNL2_PROTO_HDR_GTP:
+ case VIRTCHNL2_PROTO_HDR_GTP_EH:
+ case VIRTCHNL2_PROTO_HDR_GTPCV2:
+ case VIRTCHNL2_PROTO_HDR_GTPC_TEID:
+ case VIRTCHNL2_PROTO_HDR_GTPU:
+ case VIRTCHNL2_PROTO_HDR_GTPU_UL:
+ case VIRTCHNL2_PROTO_HDR_GTPU_DL:
+ case VIRTCHNL2_PROTO_HDR_ECPRI:
+ case VIRTCHNL2_PROTO_HDR_VRRP:
+ case VIRTCHNL2_PROTO_HDR_OSPF:
+ case VIRTCHNL2_PROTO_HDR_TUN:
+ case VIRTCHNL2_PROTO_HDR_NVGRE:
+ case VIRTCHNL2_PROTO_HDR_VXLAN:
+ case VIRTCHNL2_PROTO_HDR_VXLAN_GPE:
+ case VIRTCHNL2_PROTO_HDR_GENEVE:
+ case VIRTCHNL2_PROTO_HDR_NSH:
+ case VIRTCHNL2_PROTO_HDR_QUIC:
+ case VIRTCHNL2_PROTO_HDR_PFCP:
+ case VIRTCHNL2_PROTO_HDR_PFCP_NODE:
+ case VIRTCHNL2_PROTO_HDR_PFCP_SESSION:
+ case VIRTCHNL2_PROTO_HDR_RTP:
+ case VIRTCHNL2_PROTO_HDR_NO_PROTO:
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+/**
* idpf_send_get_rx_ptype_msg - Send virtchnl for ptype info
- * @vport: virtual port data structure
+ * @adapter: driver specific private structure
*
- * Returns 0 on success, negative on failure.
+ * Return: 0 on success, negative on failure.
*/
-int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
+static int idpf_send_get_rx_ptype_msg(struct idpf_adapter *adapter)
{
struct virtchnl2_get_ptype_info *get_ptype_info __free(kfree) = NULL;
struct virtchnl2_get_ptype_info *ptype_info __free(kfree) = NULL;
- struct libeth_rx_pt *ptype_lkup __free(kfree) = NULL;
- int max_ptype, ptypes_recvd = 0, ptype_offset;
- struct idpf_adapter *adapter = vport->adapter;
+ struct libeth_rx_pt *singleq_pt_lkup __free(kfree) = NULL;
+ struct libeth_rx_pt *splitq_pt_lkup __free(kfree) = NULL;
struct idpf_vc_xn_params xn_params = {};
+ int ptypes_recvd = 0, ptype_offset;
+ u32 max_ptype = IDPF_RX_MAX_PTYPE;
u16 next_ptype_id = 0;
ssize_t reply_sz;
- int i, j, k;
- if (vport->rx_ptype_lkup)
- return 0;
-
- if (idpf_is_queue_model_split(vport->rxq_model))
- max_ptype = IDPF_RX_MAX_PTYPE;
- else
- max_ptype = IDPF_RX_MAX_BASE_PTYPE;
+ singleq_pt_lkup = kcalloc(IDPF_RX_MAX_BASE_PTYPE,
+ sizeof(*singleq_pt_lkup), GFP_KERNEL);
+ if (!singleq_pt_lkup)
+ return -ENOMEM;
- ptype_lkup = kcalloc(max_ptype, sizeof(*ptype_lkup), GFP_KERNEL);
- if (!ptype_lkup)
+ splitq_pt_lkup = kcalloc(max_ptype, sizeof(*splitq_pt_lkup), GFP_KERNEL);
+ if (!splitq_pt_lkup)
return -ENOMEM;
get_ptype_info = kzalloc(sizeof(*get_ptype_info), GFP_KERNEL);
@@ -3078,175 +3245,85 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
ptype_offset = IDPF_RX_PTYPE_HDR_SZ;
- for (i = 0; i < le16_to_cpu(ptype_info->num_ptypes); i++) {
- struct idpf_ptype_state pstate = { };
+ for (u16 i = 0; i < le16_to_cpu(ptype_info->num_ptypes); i++) {
+ struct libeth_rx_pt rx_pt = {};
struct virtchnl2_ptype *ptype;
- u16 id;
+ u16 pt_10, pt_8;
ptype = (struct virtchnl2_ptype *)
((u8 *)ptype_info + ptype_offset);
+ pt_10 = le16_to_cpu(ptype->ptype_id_10);
+ pt_8 = ptype->ptype_id_8;
+
ptype_offset += IDPF_GET_PTYPE_SIZE(ptype);
if (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN)
return -EINVAL;
/* 0xFFFF indicates end of ptypes */
- if (le16_to_cpu(ptype->ptype_id_10) ==
- IDPF_INVALID_PTYPE_ID)
+ if (pt_10 == IDPF_INVALID_PTYPE_ID)
goto out;
+ if (pt_10 >= max_ptype)
+ return -EINVAL;
- if (idpf_is_queue_model_split(vport->rxq_model))
- k = le16_to_cpu(ptype->ptype_id_10);
- else
- k = ptype->ptype_id_8;
-
- for (j = 0; j < ptype->proto_id_count; j++) {
- id = le16_to_cpu(ptype->proto_id[j]);
- switch (id) {
- case VIRTCHNL2_PROTO_HDR_GRE:
- if (pstate.tunnel_state ==
- IDPF_PTYPE_TUNNEL_IP) {
- ptype_lkup[k].tunnel_type =
- LIBETH_RX_PT_TUNNEL_IP_GRENAT;
- pstate.tunnel_state |=
- IDPF_PTYPE_TUNNEL_IP_GRENAT;
- }
- break;
- case VIRTCHNL2_PROTO_HDR_MAC:
- ptype_lkup[k].outer_ip =
- LIBETH_RX_PT_OUTER_L2;
- if (pstate.tunnel_state ==
- IDPF_TUN_IP_GRE) {
- ptype_lkup[k].tunnel_type =
- LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC;
- pstate.tunnel_state |=
- IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC;
- }
- break;
- case VIRTCHNL2_PROTO_HDR_IPV4:
- idpf_fill_ptype_lookup(&ptype_lkup[k],
- &pstate, true,
- false);
- break;
- case VIRTCHNL2_PROTO_HDR_IPV6:
- idpf_fill_ptype_lookup(&ptype_lkup[k],
- &pstate, false,
- false);
- break;
- case VIRTCHNL2_PROTO_HDR_IPV4_FRAG:
- idpf_fill_ptype_lookup(&ptype_lkup[k],
- &pstate, true,
- true);
- break;
- case VIRTCHNL2_PROTO_HDR_IPV6_FRAG:
- idpf_fill_ptype_lookup(&ptype_lkup[k],
- &pstate, false,
- true);
- break;
- case VIRTCHNL2_PROTO_HDR_UDP:
- ptype_lkup[k].inner_prot =
- LIBETH_RX_PT_INNER_UDP;
- break;
- case VIRTCHNL2_PROTO_HDR_TCP:
- ptype_lkup[k].inner_prot =
- LIBETH_RX_PT_INNER_TCP;
- break;
- case VIRTCHNL2_PROTO_HDR_SCTP:
- ptype_lkup[k].inner_prot =
- LIBETH_RX_PT_INNER_SCTP;
- break;
- case VIRTCHNL2_PROTO_HDR_ICMP:
- ptype_lkup[k].inner_prot =
- LIBETH_RX_PT_INNER_ICMP;
- break;
- case VIRTCHNL2_PROTO_HDR_PAY:
- ptype_lkup[k].payload_layer =
- LIBETH_RX_PT_PAYLOAD_L2;
- break;
- case VIRTCHNL2_PROTO_HDR_ICMPV6:
- case VIRTCHNL2_PROTO_HDR_IPV6_EH:
- case VIRTCHNL2_PROTO_HDR_PRE_MAC:
- case VIRTCHNL2_PROTO_HDR_POST_MAC:
- case VIRTCHNL2_PROTO_HDR_ETHERTYPE:
- case VIRTCHNL2_PROTO_HDR_SVLAN:
- case VIRTCHNL2_PROTO_HDR_CVLAN:
- case VIRTCHNL2_PROTO_HDR_MPLS:
- case VIRTCHNL2_PROTO_HDR_MMPLS:
- case VIRTCHNL2_PROTO_HDR_PTP:
- case VIRTCHNL2_PROTO_HDR_CTRL:
- case VIRTCHNL2_PROTO_HDR_LLDP:
- case VIRTCHNL2_PROTO_HDR_ARP:
- case VIRTCHNL2_PROTO_HDR_ECP:
- case VIRTCHNL2_PROTO_HDR_EAPOL:
- case VIRTCHNL2_PROTO_HDR_PPPOD:
- case VIRTCHNL2_PROTO_HDR_PPPOE:
- case VIRTCHNL2_PROTO_HDR_IGMP:
- case VIRTCHNL2_PROTO_HDR_AH:
- case VIRTCHNL2_PROTO_HDR_ESP:
- case VIRTCHNL2_PROTO_HDR_IKE:
- case VIRTCHNL2_PROTO_HDR_NATT_KEEP:
- case VIRTCHNL2_PROTO_HDR_L2TPV2:
- case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL:
- case VIRTCHNL2_PROTO_HDR_L2TPV3:
- case VIRTCHNL2_PROTO_HDR_GTP:
- case VIRTCHNL2_PROTO_HDR_GTP_EH:
- case VIRTCHNL2_PROTO_HDR_GTPCV2:
- case VIRTCHNL2_PROTO_HDR_GTPC_TEID:
- case VIRTCHNL2_PROTO_HDR_GTPU:
- case VIRTCHNL2_PROTO_HDR_GTPU_UL:
- case VIRTCHNL2_PROTO_HDR_GTPU_DL:
- case VIRTCHNL2_PROTO_HDR_ECPRI:
- case VIRTCHNL2_PROTO_HDR_VRRP:
- case VIRTCHNL2_PROTO_HDR_OSPF:
- case VIRTCHNL2_PROTO_HDR_TUN:
- case VIRTCHNL2_PROTO_HDR_NVGRE:
- case VIRTCHNL2_PROTO_HDR_VXLAN:
- case VIRTCHNL2_PROTO_HDR_VXLAN_GPE:
- case VIRTCHNL2_PROTO_HDR_GENEVE:
- case VIRTCHNL2_PROTO_HDR_NSH:
- case VIRTCHNL2_PROTO_HDR_QUIC:
- case VIRTCHNL2_PROTO_HDR_PFCP:
- case VIRTCHNL2_PROTO_HDR_PFCP_NODE:
- case VIRTCHNL2_PROTO_HDR_PFCP_SESSION:
- case VIRTCHNL2_PROTO_HDR_RTP:
- case VIRTCHNL2_PROTO_HDR_NO_PROTO:
- break;
- default:
- break;
- }
- }
-
- idpf_finalize_ptype_lookup(&ptype_lkup[k]);
+ idpf_parse_protocol_ids(ptype, &rx_pt);
+ idpf_finalize_ptype_lookup(&rx_pt);
+
+ /* For a given protocol ID stack, the ptype value might
+ * vary between ptype_id_10 and ptype_id_8. So store
+ * them separately for splitq and singleq. Also skip
+ * the repeated ptypes in case of singleq.
+ */
+ splitq_pt_lkup[pt_10] = rx_pt;
+ if (!singleq_pt_lkup[pt_8].outer_ip)
+ singleq_pt_lkup[pt_8] = rx_pt;
}
}
out:
- vport->rx_ptype_lkup = no_free_ptr(ptype_lkup);
+ adapter->splitq_pt_lkup = no_free_ptr(splitq_pt_lkup);
+ adapter->singleq_pt_lkup = no_free_ptr(singleq_pt_lkup);
return 0;
}
/**
+ * idpf_rel_rx_pt_lkup - release RX ptype lookup table
+ * @adapter: adapter pointer to get the lookup table
+ */
+static void idpf_rel_rx_pt_lkup(struct idpf_adapter *adapter)
+{
+ kfree(adapter->splitq_pt_lkup);
+ adapter->splitq_pt_lkup = NULL;
+
+ kfree(adapter->singleq_pt_lkup);
+ adapter->singleq_pt_lkup = NULL;
+}
+
+/**
* idpf_send_ena_dis_loopback_msg - Send virtchnl enable/disable loopback
* message
- * @vport: virtual port data structure
+ * @adapter: adapter pointer used to send virtchnl message
+ * @vport_id: vport identifier used while preparing the virtchnl message
+ * @loopback_ena: flag to enable or disable loopback
*
- * Returns 0 on success, negative on failure.
+ * Return: 0 on success, negative on failure.
*/
-int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport)
+int idpf_send_ena_dis_loopback_msg(struct idpf_adapter *adapter, u32 vport_id,
+ bool loopback_ena)
{
struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_loopback loopback;
ssize_t reply_sz;
- loopback.vport_id = cpu_to_le32(vport->vport_id);
- loopback.enable = idpf_is_feature_ena(vport, NETIF_F_LOOPBACK);
+ loopback.vport_id = cpu_to_le32(vport_id);
+ loopback.enable = loopback_ena;
xn_params.vc_op = VIRTCHNL2_OP_LOOPBACK;
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
xn_params.send_buf.iov_base = &loopback;
xn_params.send_buf.iov_len = sizeof(loopback);
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
return reply_sz < 0 ? reply_sz : 0;
}
@@ -3325,7 +3402,7 @@ int idpf_init_dflt_mbx(struct idpf_adapter *adapter)
void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter)
{
if (adapter->hw.arq && adapter->hw.asq) {
- idpf_mb_clean(adapter);
+ idpf_mb_clean(adapter, adapter->hw.asq);
idpf_ctlq_deinit(&adapter->hw);
}
adapter->hw.arq = NULL;
@@ -3520,6 +3597,13 @@ restart:
goto err_intr_req;
}
+ err = idpf_send_get_rx_ptype_msg(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "failed to get RX ptypes: %d\n",
+ err);
+ goto intr_rel;
+ }
+
err = idpf_ptp_init(adapter);
if (err)
pci_err(adapter->pdev, "PTP init failed, err=%pe\n",
@@ -3537,6 +3621,8 @@ restart:
return 0;
+intr_rel:
+ idpf_intr_rel(adapter);
err_intr_req:
cancel_delayed_work_sync(&adapter->serv_task);
cancel_delayed_work_sync(&adapter->mbx_task);
@@ -3591,6 +3677,7 @@ void idpf_vc_core_deinit(struct idpf_adapter *adapter)
idpf_ptp_release(adapter);
idpf_deinit_task(adapter);
idpf_idc_deinit_core_aux_device(adapter->cdev_info);
+ idpf_rel_rx_pt_lkup(adapter);
idpf_intr_rel(adapter);
if (remove_in_prog)
@@ -3613,25 +3700,27 @@ void idpf_vc_core_deinit(struct idpf_adapter *adapter)
/**
* idpf_vport_alloc_vec_indexes - Get relative vector indexes
* @vport: virtual port data struct
+ * @rsrc: pointer to queue and vector resources
*
* This function requests the vector information required for the vport and
* stores the vector indexes received from the 'global vector distribution'
* in the vport's queue vectors array.
*
- * Return 0 on success, error on failure
+ * Return: 0 on success, error on failure
*/
-int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport)
+int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_vector_info vec_info;
int num_alloc_vecs;
u32 req;
- vec_info.num_curr_vecs = vport->num_q_vectors;
+ vec_info.num_curr_vecs = rsrc->num_q_vectors;
if (vec_info.num_curr_vecs)
vec_info.num_curr_vecs += IDPF_RESERVED_VECS;
/* XDPSQs are all bound to the NOIRQ vector from IDPF_RESERVED_VECS */
- req = max(vport->num_txq - vport->num_xdp_txq, vport->num_rxq) +
+ req = max(rsrc->num_txq - vport->num_xdp_txq, rsrc->num_rxq) +
IDPF_RESERVED_VECS;
vec_info.num_req_vecs = req;
@@ -3639,7 +3728,7 @@ int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport)
vec_info.index = vport->idx;
num_alloc_vecs = idpf_req_rel_vector_indexes(vport->adapter,
- vport->q_vector_idxs,
+ rsrc->q_vector_idxs,
&vec_info);
if (num_alloc_vecs <= 0) {
dev_err(&vport->adapter->pdev->dev, "Vector distribution failed: %d\n",
@@ -3647,7 +3736,7 @@ int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport)
return -EINVAL;
}
- vport->num_q_vectors = num_alloc_vecs - IDPF_RESERVED_VECS;
+ rsrc->num_q_vectors = num_alloc_vecs - IDPF_RESERVED_VECS;
return 0;
}
@@ -3658,9 +3747,12 @@ int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport)
* @max_q: vport max queue info
*
* Will initialize vport with the info received through MB earlier
+ *
+ * Return: 0 on success, negative on failure.
*/
-void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
+int idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
{
+ struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct idpf_adapter *adapter = vport->adapter;
struct virtchnl2_create_vport *vport_msg;
struct idpf_vport_config *vport_config;
@@ -3674,13 +3766,18 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
rss_data = &vport_config->user_config.rss_data;
vport_msg = adapter->vport_params_recvd[idx];
+ err = idpf_vport_init_queue_reg_chunks(vport_config,
+ &vport_msg->chunks);
+ if (err)
+ return err;
+
vport_config->max_q.max_txq = max_q->max_txq;
vport_config->max_q.max_rxq = max_q->max_rxq;
vport_config->max_q.max_complq = max_q->max_complq;
vport_config->max_q.max_bufq = max_q->max_bufq;
- vport->txq_model = le16_to_cpu(vport_msg->txq_model);
- vport->rxq_model = le16_to_cpu(vport_msg->rxq_model);
+ rsrc->txq_model = le16_to_cpu(vport_msg->txq_model);
+ rsrc->rxq_model = le16_to_cpu(vport_msg->rxq_model);
vport->vport_type = le16_to_cpu(vport_msg->vport_type);
vport->vport_id = le32_to_cpu(vport_msg->vport_id);
@@ -3697,24 +3794,27 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
idpf_vport_set_hsplit(vport, ETHTOOL_TCP_DATA_SPLIT_ENABLED);
- idpf_vport_init_num_qs(vport, vport_msg);
- idpf_vport_calc_num_q_desc(vport);
- idpf_vport_calc_num_q_groups(vport);
- idpf_vport_alloc_vec_indexes(vport);
+ idpf_vport_init_num_qs(vport, vport_msg, rsrc);
+ idpf_vport_calc_num_q_desc(vport, rsrc);
+ idpf_vport_calc_num_q_groups(rsrc);
+ idpf_vport_alloc_vec_indexes(vport, rsrc);
vport->crc_enable = adapter->crc_enable;
if (!(vport_msg->vport_flags &
cpu_to_le16(VIRTCHNL2_VPORT_UPLINK_PORT)))
- return;
+ return 0;
err = idpf_ptp_get_vport_tstamps_caps(vport);
if (err) {
+ /* Do not error on timestamp failure */
pci_dbg(vport->adapter->pdev, "Tx timestamping not supported\n");
- return;
+ return 0;
}
INIT_WORK(&vport->tstamp_task, idpf_tstamp_task);
+
+ return 0;
}
/**
@@ -3773,21 +3873,21 @@ int idpf_get_vec_ids(struct idpf_adapter *adapter,
* Returns number of ids filled
*/
static int idpf_vport_get_queue_ids(u32 *qids, int num_qids, u16 q_type,
- struct virtchnl2_queue_reg_chunks *chunks)
+ struct idpf_queue_id_reg_info *chunks)
{
- u16 num_chunks = le16_to_cpu(chunks->num_chunks);
+ u16 num_chunks = chunks->num_chunks;
u32 num_q_id_filled = 0, i;
u32 start_q_id, num_q;
while (num_chunks--) {
- struct virtchnl2_queue_reg_chunk *chunk;
+ struct idpf_queue_id_reg_chunk *chunk;
- chunk = &chunks->chunks[num_chunks];
- if (le32_to_cpu(chunk->type) != q_type)
+ chunk = &chunks->queue_chunks[num_chunks];
+ if (chunk->type != q_type)
continue;
- num_q = le32_to_cpu(chunk->num_queues);
- start_q_id = le32_to_cpu(chunk->start_queue_id);
+ num_q = chunk->num_queues;
+ start_q_id = chunk->start_queue_id;
for (i = 0; i < num_q; i++) {
if ((num_q_id_filled + i) < num_qids) {
@@ -3806,6 +3906,7 @@ static int idpf_vport_get_queue_ids(u32 *qids, int num_qids, u16 q_type,
/**
* __idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters
* @vport: virtual port for which the queues ids are initialized
+ * @rsrc: pointer to queue and vector resources
* @qids: queue ids
* @num_qids: number of queue ids
* @q_type: type of queue
@@ -3814,6 +3915,7 @@ static int idpf_vport_get_queue_ids(u32 *qids, int num_qids, u16 q_type,
* parameters. Returns number of queue ids initialized.
*/
static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc,
const u32 *qids,
int num_qids,
u32 q_type)
@@ -3822,19 +3924,19 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
switch (q_type) {
case VIRTCHNL2_QUEUE_TYPE_TX:
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ for (i = 0; i < rsrc->num_txq_grp; i++) {
+ struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++)
tx_qgrp->txqs[j]->q_id = qids[k];
}
break;
case VIRTCHNL2_QUEUE_TYPE_RX:
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ for (i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
u16 num_rxq;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
num_rxq = rx_qgrp->splitq.num_rxq_sets;
else
num_rxq = rx_qgrp->singleq.num_rxq;
@@ -3842,7 +3944,7 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
for (j = 0; j < num_rxq && k < num_qids; j++, k++) {
struct idpf_rx_queue *q;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
else
q = rx_qgrp->singleq.rxqs[j];
@@ -3851,16 +3953,16 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
}
break;
case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
- for (i = 0; i < vport->num_txq_grp && k < num_qids; i++, k++) {
- struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ for (i = 0; i < rsrc->num_txq_grp && k < num_qids; i++, k++) {
+ struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
tx_qgrp->complq->q_id = qids[k];
}
break;
case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
- u8 num_bufqs = vport->num_bufqs_per_qgrp;
+ for (i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
+ u8 num_bufqs = rsrc->num_bufqs_per_qgrp;
for (j = 0; j < num_bufqs && k < num_qids; j++, k++) {
struct idpf_buf_queue *q;
@@ -3880,30 +3982,21 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
/**
* idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters
* @vport: virtual port for which the queues ids are initialized
+ * @rsrc: pointer to queue and vector resources
+ * @chunks: queue ids received over mailbox
*
* Will initialize all queue ids with ids received as mailbox parameters.
- * Returns 0 on success, negative if all the queues are not initialized.
+ *
+ * Return: 0 on success, negative if all the queues are not initialized.
*/
-int idpf_vport_queue_ids_init(struct idpf_vport *vport)
+int idpf_vport_queue_ids_init(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc,
+ struct idpf_queue_id_reg_info *chunks)
{
- struct virtchnl2_create_vport *vport_params;
- struct virtchnl2_queue_reg_chunks *chunks;
- struct idpf_vport_config *vport_config;
- u16 vport_idx = vport->idx;
int num_ids, err = 0;
u16 q_type;
u32 *qids;
- vport_config = vport->adapter->vport_config[vport_idx];
- if (vport_config->req_qs_chunks) {
- struct virtchnl2_add_queues *vc_aq =
- (struct virtchnl2_add_queues *)vport_config->req_qs_chunks;
- chunks = &vc_aq->chunks;
- } else {
- vport_params = vport->adapter->vport_params_recvd[vport_idx];
- chunks = &vport_params->chunks;
- }
-
qids = kcalloc(IDPF_MAX_QIDS, sizeof(u32), GFP_KERNEL);
if (!qids)
return -ENOMEM;
@@ -3911,13 +4004,13 @@ int idpf_vport_queue_ids_init(struct idpf_vport *vport)
num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS,
VIRTCHNL2_QUEUE_TYPE_TX,
chunks);
- if (num_ids < vport->num_txq) {
+ if (num_ids < rsrc->num_txq) {
err = -EINVAL;
goto mem_rel;
}
- num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids,
+ num_ids = __idpf_vport_queue_ids_init(vport, rsrc, qids, num_ids,
VIRTCHNL2_QUEUE_TYPE_TX);
- if (num_ids < vport->num_txq) {
+ if (num_ids < rsrc->num_txq) {
err = -EINVAL;
goto mem_rel;
}
@@ -3925,44 +4018,46 @@ int idpf_vport_queue_ids_init(struct idpf_vport *vport)
num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS,
VIRTCHNL2_QUEUE_TYPE_RX,
chunks);
- if (num_ids < vport->num_rxq) {
+ if (num_ids < rsrc->num_rxq) {
err = -EINVAL;
goto mem_rel;
}
- num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids,
+ num_ids = __idpf_vport_queue_ids_init(vport, rsrc, qids, num_ids,
VIRTCHNL2_QUEUE_TYPE_RX);
- if (num_ids < vport->num_rxq) {
+ if (num_ids < rsrc->num_rxq) {
err = -EINVAL;
goto mem_rel;
}
- if (!idpf_is_queue_model_split(vport->txq_model))
+ if (!idpf_is_queue_model_split(rsrc->txq_model))
goto check_rxq;
q_type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks);
- if (num_ids < vport->num_complq) {
+ if (num_ids < rsrc->num_complq) {
err = -EINVAL;
goto mem_rel;
}
- num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type);
- if (num_ids < vport->num_complq) {
+ num_ids = __idpf_vport_queue_ids_init(vport, rsrc, qids,
+ num_ids, q_type);
+ if (num_ids < rsrc->num_complq) {
err = -EINVAL;
goto mem_rel;
}
check_rxq:
- if (!idpf_is_queue_model_split(vport->rxq_model))
+ if (!idpf_is_queue_model_split(rsrc->rxq_model))
goto mem_rel;
q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks);
- if (num_ids < vport->num_bufq) {
+ if (num_ids < rsrc->num_bufq) {
err = -EINVAL;
goto mem_rel;
}
- num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type);
- if (num_ids < vport->num_bufq)
+ num_ids = __idpf_vport_queue_ids_init(vport, rsrc, qids,
+ num_ids, q_type);
+ if (num_ids < rsrc->num_bufq)
err = -EINVAL;
mem_rel:
@@ -3974,23 +4069,24 @@ mem_rel:
/**
* idpf_vport_adjust_qs - Adjust to new requested queues
* @vport: virtual port data struct
+ * @rsrc: pointer to queue and vector resources
*
* Renegotiate queues. Returns 0 on success, negative on failure.
*/
-int idpf_vport_adjust_qs(struct idpf_vport *vport)
+int idpf_vport_adjust_qs(struct idpf_vport *vport, struct idpf_q_vec_rsrc *rsrc)
{
struct virtchnl2_create_vport vport_msg;
int err;
- vport_msg.txq_model = cpu_to_le16(vport->txq_model);
- vport_msg.rxq_model = cpu_to_le16(vport->rxq_model);
+ vport_msg.txq_model = cpu_to_le16(rsrc->txq_model);
+ vport_msg.rxq_model = cpu_to_le16(rsrc->rxq_model);
err = idpf_vport_calc_total_qs(vport->adapter, vport->idx, &vport_msg,
NULL);
if (err)
return err;
- idpf_vport_init_num_qs(vport, &vport_msg);
- idpf_vport_calc_num_q_groups(vport);
+ idpf_vport_init_num_qs(vport, &vport_msg, rsrc);
+ idpf_vport_calc_num_q_groups(rsrc);
return 0;
}
@@ -4112,12 +4208,12 @@ u32 idpf_get_vport_id(struct idpf_vport *vport)
return le32_to_cpu(vport_msg->vport_id);
}
-static void idpf_set_mac_type(struct idpf_vport *vport,
+static void idpf_set_mac_type(const u8 *default_mac_addr,
struct virtchnl2_mac_addr *mac_addr)
{
bool is_primary;
- is_primary = ether_addr_equal(vport->default_mac_addr, mac_addr->addr);
+ is_primary = ether_addr_equal(default_mac_addr, mac_addr->addr);
mac_addr->type = is_primary ? VIRTCHNL2_MAC_ADDR_PRIMARY :
VIRTCHNL2_MAC_ADDR_EXTRA;
}
@@ -4193,22 +4289,23 @@ invalid_payload:
/**
* idpf_add_del_mac_filters - Add/del mac filters
- * @vport: Virtual port data structure
- * @np: Netdev private structure
+ * @adapter: adapter pointer used to send virtchnl message
+ * @vport_config: persistent vport structure to get the MAC filter list
+ * @default_mac_addr: default MAC address to compare with
+ * @vport_id: vport identifier used while preparing the virtchnl message
* @add: Add or delete flag
* @async: Don't wait for return message
*
- * Returns 0 on success, error on failure.
+ * Return: 0 on success, error on failure.
**/
-int idpf_add_del_mac_filters(struct idpf_vport *vport,
- struct idpf_netdev_priv *np,
+int idpf_add_del_mac_filters(struct idpf_adapter *adapter,
+ struct idpf_vport_config *vport_config,
+ const u8 *default_mac_addr, u32 vport_id,
bool add, bool async)
{
struct virtchnl2_mac_addr_list *ma_list __free(kfree) = NULL;
struct virtchnl2_mac_addr *mac_addr __free(kfree) = NULL;
- struct idpf_adapter *adapter = np->adapter;
struct idpf_vc_xn_params xn_params = {};
- struct idpf_vport_config *vport_config;
u32 num_msgs, total_filters = 0;
struct idpf_mac_filter *f;
ssize_t reply_sz;
@@ -4220,7 +4317,6 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
xn_params.async = async;
xn_params.async_handler = idpf_mac_filter_async_handler;
- vport_config = adapter->vport_config[np->vport_idx];
spin_lock_bh(&vport_config->mac_filter_list_lock);
/* Find the number of newly added filters */
@@ -4251,7 +4347,7 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
list) {
if (add && f->add) {
ether_addr_copy(mac_addr[i].addr, f->macaddr);
- idpf_set_mac_type(vport, &mac_addr[i]);
+ idpf_set_mac_type(default_mac_addr, &mac_addr[i]);
i++;
f->add = false;
if (i == total_filters)
@@ -4259,7 +4355,7 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
}
if (!add && f->remove) {
ether_addr_copy(mac_addr[i].addr, f->macaddr);
- idpf_set_mac_type(vport, &mac_addr[i]);
+ idpf_set_mac_type(default_mac_addr, &mac_addr[i]);
i++;
f->remove = false;
if (i == total_filters)
@@ -4291,7 +4387,7 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
memset(ma_list, 0, buf_size);
}
- ma_list->vport_id = cpu_to_le32(np->vport_id);
+ ma_list->vport_id = cpu_to_le32(vport_id);
ma_list->num_mac_addr = cpu_to_le16(num_entries);
memcpy(ma_list->mac_addr_list, &mac_addr[k], entries_size);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
index eac3d15daa42..fe065911ad5a 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
@@ -92,6 +92,7 @@ struct idpf_netdev_priv;
struct idpf_vec_regs;
struct idpf_vport;
struct idpf_vport_max_q;
+struct idpf_vport_config;
struct idpf_vport_user_config_data;
ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
@@ -101,10 +102,20 @@ void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter);
int idpf_vc_core_init(struct idpf_adapter *adapter);
void idpf_vc_core_deinit(struct idpf_adapter *adapter);
-int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
+int idpf_get_reg_intr_vecs(struct idpf_adapter *adapter,
struct idpf_vec_regs *reg_vals);
-int idpf_queue_reg_init(struct idpf_vport *vport);
-int idpf_vport_queue_ids_init(struct idpf_vport *vport);
+int idpf_queue_reg_init(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc,
+ struct idpf_queue_id_reg_info *chunks);
+int idpf_vport_queue_ids_init(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc,
+ struct idpf_queue_id_reg_info *chunks);
+static inline void
+idpf_vport_deinit_queue_reg_chunks(struct idpf_vport_config *vport_cfg)
+{
+ kfree(vport_cfg->qid_reg_info.queue_chunks);
+ vport_cfg->qid_reg_info.queue_chunks = NULL;
+}
bool idpf_vport_is_cap_ena(struct idpf_vport *vport, u16 flag);
bool idpf_sideband_flow_type_ena(struct idpf_vport *vport, u32 flow_type);
@@ -112,9 +123,9 @@ bool idpf_sideband_action_ena(struct idpf_vport *vport,
struct ethtool_rx_flow_spec *fsp);
unsigned int idpf_fsteer_max_rules(struct idpf_vport *vport);
-int idpf_recv_mb_msg(struct idpf_adapter *adapter);
-int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
- u16 msg_size, u8 *msg, u16 cookie);
+int idpf_recv_mb_msg(struct idpf_adapter *adapter, struct idpf_ctlq_info *arq);
+int idpf_send_mb_msg(struct idpf_adapter *adapter, struct idpf_ctlq_info *asq,
+ u32 op, u16 msg_size, u8 *msg, u16 cookie);
struct idpf_queue_ptr {
enum virtchnl2_queue_type type;
@@ -127,60 +138,81 @@ struct idpf_queue_ptr {
};
struct idpf_queue_set {
- struct idpf_vport *vport;
+ struct idpf_adapter *adapter;
+ struct idpf_q_vec_rsrc *qv_rsrc;
+ u32 vport_id;
u32 num;
struct idpf_queue_ptr qs[] __counted_by(num);
};
-struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_vport *vport, u32 num);
+struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_adapter *adapter,
+ struct idpf_q_vec_rsrc *rsrc,
+ u32 vport_id, u32 num);
int idpf_send_enable_queue_set_msg(const struct idpf_queue_set *qs);
int idpf_send_disable_queue_set_msg(const struct idpf_queue_set *qs);
int idpf_send_config_queue_set_msg(const struct idpf_queue_set *qs);
int idpf_send_disable_queues_msg(struct idpf_vport *vport);
-int idpf_send_config_queues_msg(struct idpf_vport *vport);
int idpf_send_enable_queues_msg(struct idpf_vport *vport);
+int idpf_send_config_queues_msg(struct idpf_adapter *adapter,
+ struct idpf_q_vec_rsrc *rsrc,
+ u32 vport_id);
-void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q);
+int idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q);
u32 idpf_get_vport_id(struct idpf_vport *vport);
int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
struct idpf_vport_max_q *max_q);
-int idpf_send_destroy_vport_msg(struct idpf_vport *vport);
-int idpf_send_enable_vport_msg(struct idpf_vport *vport);
-int idpf_send_disable_vport_msg(struct idpf_vport *vport);
+int idpf_send_destroy_vport_msg(struct idpf_adapter *adapter, u32 vport_id);
+int idpf_send_enable_vport_msg(struct idpf_adapter *adapter, u32 vport_id);
+int idpf_send_disable_vport_msg(struct idpf_adapter *adapter, u32 vport_id);
-int idpf_vport_adjust_qs(struct idpf_vport *vport);
+int idpf_vport_adjust_qs(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter,
struct idpf_vport_max_q *max_q);
void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter,
struct idpf_vport_max_q *max_q);
-int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
- u16 num_complq, u16 num_rx_q, u16 num_rx_bufq);
-int idpf_send_delete_queues_msg(struct idpf_vport *vport);
-
-int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport);
+int idpf_send_add_queues_msg(struct idpf_adapter *adapter,
+ struct idpf_vport_config *vport_config,
+ struct idpf_q_vec_rsrc *rsrc,
+ u32 vport_id);
+int idpf_send_delete_queues_msg(struct idpf_adapter *adapter,
+ struct idpf_queue_id_reg_info *chunks,
+ u32 vport_id);
+
+int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
int idpf_get_vec_ids(struct idpf_adapter *adapter,
u16 *vecids, int num_vecids,
struct virtchnl2_vector_chunks *chunks);
int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors);
int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter);
-int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map);
-
-int idpf_add_del_mac_filters(struct idpf_vport *vport,
- struct idpf_netdev_priv *np,
+int idpf_send_map_unmap_queue_vector_msg(struct idpf_adapter *adapter,
+ struct idpf_q_vec_rsrc *rsrc,
+ u32 vport_id,
+ bool map);
+
+int idpf_add_del_mac_filters(struct idpf_adapter *adapter,
+ struct idpf_vport_config *vport_config,
+ const u8 *default_mac_addr, u32 vport_id,
bool add, bool async);
int idpf_set_promiscuous(struct idpf_adapter *adapter,
struct idpf_vport_user_config_data *config_data,
u32 vport_id);
int idpf_check_supported_desc_ids(struct idpf_vport *vport);
-int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport);
-int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport);
-int idpf_send_get_stats_msg(struct idpf_vport *vport);
+int idpf_send_ena_dis_loopback_msg(struct idpf_adapter *adapter, u32 vport_id,
+ bool loopback_ena);
+int idpf_send_get_stats_msg(struct idpf_netdev_priv *np,
+ struct idpf_port_stats *port_stats);
int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs);
-int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get);
-int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get);
+int idpf_send_get_set_rss_key_msg(struct idpf_adapter *adapter,
+ struct idpf_rss_data *rss_data,
+ u32 vport_id, bool get);
+int idpf_send_get_set_rss_lut_msg(struct idpf_adapter *adapter,
+ struct idpf_rss_data *rss_data,
+ u32 vport_id, bool get);
void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr);
int idpf_idc_rdma_vc_send_sync(struct iidc_rdma_core_dev_info *cdev_info,
u8 *send_msg, u16 msg_size,
diff --git a/drivers/net/ethernet/intel/idpf/xdp.c b/drivers/net/ethernet/intel/idpf/xdp.c
index 958d16f87424..2b60f2a78684 100644
--- a/drivers/net/ethernet/intel/idpf/xdp.c
+++ b/drivers/net/ethernet/intel/idpf/xdp.c
@@ -2,21 +2,22 @@
/* Copyright (C) 2025 Intel Corporation */
#include "idpf.h"
+#include "idpf_ptp.h"
#include "idpf_virtchnl.h"
#include "xdp.h"
#include "xsk.h"
-static int idpf_rxq_for_each(const struct idpf_vport *vport,
+static int idpf_rxq_for_each(const struct idpf_q_vec_rsrc *rsrc,
int (*fn)(struct idpf_rx_queue *rxq, void *arg),
void *arg)
{
- bool splitq = idpf_is_queue_model_split(vport->rxq_model);
+ bool splitq = idpf_is_queue_model_split(rsrc->rxq_model);
- if (!vport->rxq_grps)
+ if (!rsrc->rxq_grps)
return -ENETDOWN;
- for (u32 i = 0; i < vport->num_rxq_grp; i++) {
- const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ for (u32 i = 0; i < rsrc->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
u32 num_rxq;
if (splitq)
@@ -45,7 +46,8 @@ static int idpf_rxq_for_each(const struct idpf_vport *vport,
static int __idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq, void *arg)
{
const struct idpf_vport *vport = rxq->q_vector->vport;
- bool split = idpf_is_queue_model_split(vport->rxq_model);
+ const struct idpf_q_vec_rsrc *rsrc;
+ bool split;
int err;
err = __xdp_rxq_info_reg(&rxq->xdp_rxq, vport->netdev, rxq->idx,
@@ -54,6 +56,9 @@ static int __idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq, void *arg)
if (err)
return err;
+ rsrc = &vport->dflt_qv_rsrc;
+ split = idpf_is_queue_model_split(rsrc->rxq_model);
+
if (idpf_queue_has(XSK, rxq)) {
err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL,
@@ -70,7 +75,7 @@ static int __idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq, void *arg)
if (!split)
return 0;
- rxq->xdpsqs = &vport->txqs[vport->xdp_txq_offset];
+ rxq->xdpsqs = &vport->txqs[rsrc->xdp_txq_offset];
rxq->num_xdp_txq = vport->num_xdp_txq;
return 0;
@@ -86,9 +91,9 @@ int idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq)
return __idpf_xdp_rxq_info_init(rxq, NULL);
}
-int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport)
+int idpf_xdp_rxq_info_init_all(const struct idpf_q_vec_rsrc *rsrc)
{
- return idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_init, NULL);
+ return idpf_rxq_for_each(rsrc, __idpf_xdp_rxq_info_init, NULL);
}
static int __idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, void *arg)
@@ -111,10 +116,10 @@ void idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, u32 model)
__idpf_xdp_rxq_info_deinit(rxq, (void *)(size_t)model);
}
-void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport)
+void idpf_xdp_rxq_info_deinit_all(const struct idpf_q_vec_rsrc *rsrc)
{
- idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_deinit,
- (void *)(size_t)vport->rxq_model);
+ idpf_rxq_for_each(rsrc, __idpf_xdp_rxq_info_deinit,
+ (void *)(size_t)rsrc->rxq_model);
}
static int idpf_xdp_rxq_assign_prog(struct idpf_rx_queue *rxq, void *arg)
@@ -132,10 +137,10 @@ static int idpf_xdp_rxq_assign_prog(struct idpf_rx_queue *rxq, void *arg)
return 0;
}
-void idpf_xdp_copy_prog_to_rqs(const struct idpf_vport *vport,
+void idpf_xdp_copy_prog_to_rqs(const struct idpf_q_vec_rsrc *rsrc,
struct bpf_prog *xdp_prog)
{
- idpf_rxq_for_each(vport, idpf_xdp_rxq_assign_prog, xdp_prog);
+ idpf_rxq_for_each(rsrc, idpf_xdp_rxq_assign_prog, xdp_prog);
}
static void idpf_xdp_tx_timer(struct work_struct *work);
@@ -165,7 +170,7 @@ int idpf_xdpsqs_get(const struct idpf_vport *vport)
}
dev = vport->netdev;
- sqs = vport->xdp_txq_offset;
+ sqs = vport->dflt_qv_rsrc.xdp_txq_offset;
for (u32 i = sqs; i < vport->num_txq; i++) {
struct idpf_tx_queue *xdpsq = vport->txqs[i];
@@ -202,7 +207,7 @@ void idpf_xdpsqs_put(const struct idpf_vport *vport)
return;
dev = vport->netdev;
- sqs = vport->xdp_txq_offset;
+ sqs = vport->dflt_qv_rsrc.xdp_txq_offset;
for (u32 i = sqs; i < vport->num_txq; i++) {
struct idpf_tx_queue *xdpsq = vport->txqs[i];
@@ -358,12 +363,15 @@ int idpf_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
{
const struct idpf_netdev_priv *np = netdev_priv(dev);
const struct idpf_vport *vport = np->vport;
+ u32 xdp_txq_offset;
if (unlikely(!netif_carrier_ok(dev) || !vport->link_up))
return -ENETDOWN;
+ xdp_txq_offset = vport->dflt_qv_rsrc.xdp_txq_offset;
+
return libeth_xdp_xmit_do_bulk(dev, n, frames, flags,
- &vport->txqs[vport->xdp_txq_offset],
+ &vport->txqs[xdp_txq_offset],
vport->num_xdp_txq,
idpf_xdp_xmit_flush_bulk,
idpf_xdp_tx_finalize);
@@ -391,13 +399,43 @@ static int idpf_xdpmo_rx_hash(const struct xdp_md *ctx, u32 *hash,
pt);
}
+static int idpf_xdpmo_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
+{
+ const struct libeth_xdp_buff *xdp = (typeof(xdp))ctx;
+ struct idpf_xdp_rx_desc desc __uninitialized;
+ const struct idpf_rx_queue *rxq;
+ u64 cached_time, ts_ns;
+ u32 ts_high;
+
+ rxq = libeth_xdp_buff_to_rq(xdp, typeof(*rxq), xdp_rxq);
+
+ if (!idpf_queue_has(PTP, rxq))
+ return -ENODATA;
+
+ idpf_xdp_get_qw1(&desc, xdp->desc);
+
+ if (!(idpf_xdp_rx_ts_low(&desc) & VIRTCHNL2_RX_FLEX_TSTAMP_VALID))
+ return -ENODATA;
+
+ cached_time = READ_ONCE(rxq->cached_phc_time);
+
+ idpf_xdp_get_qw3(&desc, xdp->desc);
+
+ ts_high = idpf_xdp_rx_ts_high(&desc);
+ ts_ns = idpf_ptp_tstamp_extend_32b_to_64b(cached_time, ts_high);
+
+ *timestamp = ts_ns;
+ return 0;
+}
+
static const struct xdp_metadata_ops idpf_xdpmo = {
.xmo_rx_hash = idpf_xdpmo_rx_hash,
+ .xmo_rx_timestamp = idpf_xdpmo_rx_timestamp,
};
void idpf_xdp_set_features(const struct idpf_vport *vport)
{
- if (!idpf_is_queue_model_split(vport->rxq_model))
+ if (!idpf_is_queue_model_split(vport->dflt_qv_rsrc.rxq_model))
return;
libeth_xdp_set_features_noredir(vport->netdev, &idpf_xdpmo,
@@ -409,6 +447,7 @@ static int idpf_xdp_setup_prog(struct idpf_vport *vport,
const struct netdev_bpf *xdp)
{
const struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+ const struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct bpf_prog *old, *prog = xdp->prog;
struct idpf_vport_config *cfg;
int ret;
@@ -419,7 +458,7 @@ static int idpf_xdp_setup_prog(struct idpf_vport *vport,
!test_bit(IDPF_VPORT_REG_NETDEV, cfg->flags) ||
!!vport->xdp_prog == !!prog) {
if (test_bit(IDPF_VPORT_UP, np->state))
- idpf_xdp_copy_prog_to_rqs(vport, prog);
+ idpf_xdp_copy_prog_to_rqs(rsrc, prog);
old = xchg(&vport->xdp_prog, prog);
if (old)
@@ -464,7 +503,7 @@ int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp)
idpf_vport_ctrl_lock(dev);
vport = idpf_netdev_to_vport(dev);
- if (!idpf_is_queue_model_split(vport->txq_model))
+ if (!idpf_is_queue_model_split(vport->dflt_qv_rsrc.txq_model))
goto notsupp;
switch (xdp->command) {
diff --git a/drivers/net/ethernet/intel/idpf/xdp.h b/drivers/net/ethernet/intel/idpf/xdp.h
index 479f5ef3c604..63e56f7d43e0 100644
--- a/drivers/net/ethernet/intel/idpf/xdp.h
+++ b/drivers/net/ethernet/intel/idpf/xdp.h
@@ -9,10 +9,10 @@
#include "idpf_txrx.h"
int idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq);
-int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport);
+int idpf_xdp_rxq_info_init_all(const struct idpf_q_vec_rsrc *rsrc);
void idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, u32 model);
-void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport);
-void idpf_xdp_copy_prog_to_rqs(const struct idpf_vport *vport,
+void idpf_xdp_rxq_info_deinit_all(const struct idpf_q_vec_rsrc *rsrc);
+void idpf_xdp_copy_prog_to_rqs(const struct idpf_q_vec_rsrc *rsrc,
struct bpf_prog *xdp_prog);
int idpf_xdpsqs_get(const struct idpf_vport *vport);
@@ -112,11 +112,13 @@ struct idpf_xdp_rx_desc {
aligned_u64 qw1;
#define IDPF_XDP_RX_BUF GENMASK_ULL(47, 32)
#define IDPF_XDP_RX_EOP BIT_ULL(1)
+#define IDPF_XDP_RX_TS_LOW GENMASK_ULL(31, 24)
aligned_u64 qw2;
#define IDPF_XDP_RX_HASH GENMASK_ULL(31, 0)
aligned_u64 qw3;
+#define IDPF_XDP_RX_TS_HIGH GENMASK_ULL(63, 32)
} __aligned(4 * sizeof(u64));
static_assert(sizeof(struct idpf_xdp_rx_desc) ==
sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3));
@@ -128,6 +130,8 @@ static_assert(sizeof(struct idpf_xdp_rx_desc) ==
#define idpf_xdp_rx_buf(desc) FIELD_GET(IDPF_XDP_RX_BUF, (desc)->qw1)
#define idpf_xdp_rx_eop(desc) !!((desc)->qw1 & IDPF_XDP_RX_EOP)
#define idpf_xdp_rx_hash(desc) FIELD_GET(IDPF_XDP_RX_HASH, (desc)->qw2)
+#define idpf_xdp_rx_ts_low(desc) FIELD_GET(IDPF_XDP_RX_TS_LOW, (desc)->qw1)
+#define idpf_xdp_rx_ts_high(desc) FIELD_GET(IDPF_XDP_RX_TS_HIGH, (desc)->qw3)
static inline void
idpf_xdp_get_qw0(struct idpf_xdp_rx_desc *desc,
@@ -149,6 +153,9 @@ idpf_xdp_get_qw1(struct idpf_xdp_rx_desc *desc,
desc->qw1 = ((const typeof(desc))rxd)->qw1;
#else
desc->qw1 = ((u64)le16_to_cpu(rxd->buf_id) << 32) |
+ ((u64)rxd->ts_low << 24) |
+ ((u64)rxd->fflags1 << 16) |
+ ((u64)rxd->status_err1 << 8) |
rxd->status_err0_qw1;
#endif
}
@@ -166,6 +173,19 @@ idpf_xdp_get_qw2(struct idpf_xdp_rx_desc *desc,
#endif
}
+static inline void
+idpf_xdp_get_qw3(struct idpf_xdp_rx_desc *desc,
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rxd)
+{
+#ifdef __LIBETH_WORD_ACCESS
+ desc->qw3 = ((const typeof(desc))rxd)->qw3;
+#else
+ desc->qw3 = ((u64)le32_to_cpu(rxd->ts_high) << 32) |
+ ((u64)le16_to_cpu(rxd->fmd6) << 16) |
+ le16_to_cpu(rxd->l2tag1);
+#endif
+}
+
void idpf_xdp_set_features(const struct idpf_vport *vport);
int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp);
diff --git a/drivers/net/ethernet/intel/idpf/xsk.c b/drivers/net/ethernet/intel/idpf/xsk.c
index fd2cc43ab43c..676cbd80774d 100644
--- a/drivers/net/ethernet/intel/idpf/xsk.c
+++ b/drivers/net/ethernet/intel/idpf/xsk.c
@@ -26,13 +26,14 @@ static void idpf_xsk_setup_rxq(const struct idpf_vport *vport,
static void idpf_xsk_setup_bufq(const struct idpf_vport *vport,
struct idpf_buf_queue *bufq)
{
+ const struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct xsk_buff_pool *pool;
u32 qid = U32_MAX;
- for (u32 i = 0; i < vport->num_rxq_grp; i++) {
- const struct idpf_rxq_group *grp = &vport->rxq_grps[i];
+ for (u32 i = 0; i < rsrc->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *grp = &rsrc->rxq_grps[i];
- for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ for (u32 j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
if (&grp->splitq.bufq_sets[j].bufq == bufq) {
qid = grp->splitq.rxq_sets[0]->rxq.idx;
goto setup;
@@ -61,7 +62,7 @@ static void idpf_xsk_setup_txq(const struct idpf_vport *vport,
if (!idpf_queue_has(XDP, txq))
return;
- qid = txq->idx - vport->xdp_txq_offset;
+ qid = txq->idx - vport->dflt_qv_rsrc.xdp_txq_offset;
pool = xsk_get_pool_from_qid(vport->netdev, qid);
if (!pool || !pool->dev)
@@ -86,7 +87,8 @@ static void idpf_xsk_setup_complq(const struct idpf_vport *vport,
if (!idpf_queue_has(XDP, complq))
return;
- qid = complq->txq_grp->txqs[0]->idx - vport->xdp_txq_offset;
+ qid = complq->txq_grp->txqs[0]->idx -
+ vport->dflt_qv_rsrc.xdp_txq_offset;
pool = xsk_get_pool_from_qid(vport->netdev, qid);
if (!pool || !pool->dev)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 3069b583fd81..89c7fed7b8fc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -342,6 +342,13 @@ static int ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
return 0;
}
+ if (hw->phy.sfp_type == ixgbe_sfp_type_10g_bx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_10g_bx_core1) {
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ *autoneg = false;
+ return 0;
+ }
+
/*
* Determine link capabilities based on the stored value of AUTOC,
* which represents EEPROM defaults. If AUTOC value has not been
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 2ad81f687a84..bb4b53fee234 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -351,6 +351,8 @@ static int ixgbe_get_link_ksettings(struct net_device *netdev,
case ixgbe_sfp_type_1g_lx_core1:
case ixgbe_sfp_type_1g_bx_core0:
case ixgbe_sfp_type_1g_bx_core1:
+ case ixgbe_sfp_type_10g_bx_core0:
+ case ixgbe_sfp_type_10g_bx_core1:
ethtool_link_ksettings_add_link_mode(cmd, supported,
FIBRE);
ethtool_link_ksettings_add_link_mode(cmd, advertising,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 2449e4cf2679..ab733e73927d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -1534,8 +1534,10 @@ int ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
struct ixgbe_adapter *adapter = hw->back;
u8 oui_bytes[3] = {0, 0, 0};
u8 bitrate_nominal = 0;
+ u8 sm_length_100m = 0;
u8 comp_codes_10g = 0;
u8 comp_codes_1g = 0;
+ u8 sm_length_km = 0;
u16 enforce_sfp = 0;
u32 vendor_oui = 0;
u8 identifier = 0;
@@ -1678,6 +1680,33 @@ int ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
else
hw->phy.sfp_type =
ixgbe_sfp_type_1g_bx_core1;
+ /* Support Ethernet 10G-BX, checking the Bit Rate
+ * Nominal Value as per SFF-8472 to be 12.5 Gb/s (67h) and
+ * Single Mode fibre with at least 1km link length
+ */
+ } else if ((!comp_codes_10g) && (bitrate_nominal == 0x67) &&
+ (!(cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)) &&
+ (!(cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE))) {
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_SM_LENGTH_KM,
+ &sm_length_km);
+ if (status != 0)
+ goto err_read_i2c_eeprom;
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_SM_LENGTH_100M,
+ &sm_length_100m);
+ if (status != 0)
+ goto err_read_i2c_eeprom;
+ if (sm_length_km > 0 || sm_length_100m >= 10) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_10g_bx_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_10g_bx_core1;
+ } else {
+ hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+ }
} else {
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
}
@@ -1768,7 +1797,9 @@ int ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core1)) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_10g_bx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_10g_bx_core1)) {
hw->phy.type = ixgbe_phy_sfp_unsupported;
return -EOPNOTSUPP;
}
@@ -1786,7 +1817,9 @@ int ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core1)) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_10g_bx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_10g_bx_core1)) {
/* Make sure we're a supported PHY type */
if (hw->phy.type == ixgbe_phy_sfp_intel)
return 0;
@@ -2016,20 +2049,22 @@ int ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
return -EOPNOTSUPP;
/*
- * Limiting active cables and 1G Phys must be initialized as
+ * Limiting active cables, 10G BX and 1G Phys must be initialized as
* SR modules
*/
if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
- sfp_type == ixgbe_sfp_type_1g_bx_core0)
+ sfp_type == ixgbe_sfp_type_1g_bx_core0 ||
+ sfp_type == ixgbe_sfp_type_10g_bx_core0)
sfp_type = ixgbe_sfp_type_srlr_core0;
else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
- sfp_type == ixgbe_sfp_type_1g_bx_core1)
+ sfp_type == ixgbe_sfp_type_1g_bx_core1 ||
+ sfp_type == ixgbe_sfp_type_10g_bx_core1)
sfp_type = ixgbe_sfp_type_srlr_core1;
/* Read offset to PHY init contents */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 81179c60af4e..039ba4b6c120 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -32,6 +32,8 @@
#define IXGBE_SFF_QSFP_1GBE_COMP 0x86
#define IXGBE_SFF_QSFP_CABLE_LENGTH 0x92
#define IXGBE_SFF_QSFP_DEVICE_TECH 0x93
+#define IXGBE_SFF_SM_LENGTH_KM 0xE
+#define IXGBE_SFF_SM_LENGTH_100M 0xF
/* Bitmasks */
#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index b1bfeb21537a..61f2ef67defd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -3286,6 +3286,8 @@ enum ixgbe_sfp_type {
ixgbe_sfp_type_1g_lx_core1 = 14,
ixgbe_sfp_type_1g_bx_core0 = 15,
ixgbe_sfp_type_1g_bx_core1 = 16,
+ ixgbe_sfp_type_10g_bx_core0 = 17,
+ ixgbe_sfp_type_10g_bx_core1 = 18,
ixgbe_sfp_type_not_present = 0xFFFE,
ixgbe_sfp_type_unknown = 0xFFFF
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
index b5805969404f..0fbbcb5400c7 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
@@ -307,7 +307,7 @@ static void octep_setup_iq_regs_cn93_pf(struct octep_device *oct, int iq_no)
}
/* Setup registers for a hardware Rx Queue */
-static void octep_setup_oq_regs_cn93_pf(struct octep_device *oct, int oq_no)
+static int octep_setup_oq_regs_cn93_pf(struct octep_device *oct, int oq_no)
{
u64 reg_val;
u64 oq_ctl = 0ULL;
@@ -355,6 +355,7 @@ static void octep_setup_oq_regs_cn93_pf(struct octep_device *oct, int oq_no)
reg_val = ((u64)time_threshold << 32) |
CFG_GET_OQ_INTR_PKT(oct->conf);
octep_write_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+ return 0;
}
/* Setup registers for a PF mailbox */
@@ -637,6 +638,19 @@ static int octep_soft_reset_cn93_pf(struct octep_device *oct)
octep_write_csr64(oct, CN93_SDP_WIN_WR_MASK_REG, 0xFF);
+ /* Firmware status CSR is supposed to be cleared by
+ * core domain reset, but due to a hw bug, it is not.
+ * Set it to RUNNING right before reset so that it is not
+ * left in READY (1) state after a reset. This is required
+ * in addition to the early setting to handle the case where
+ * the OcteonTX is unexpectedly reset, reboots, and then
+ * the module is removed.
+ */
+ OCTEP_PCI_WIN_WRITE(oct,
+ CN9K_PEMX_PFX_CSX_PFCFGX(0,
+ 0, CN9K_PCIEEP_VSECST_CTL),
+ FW_STATUS_DOWNING);
+
/* Set core domain reset bit */
OCTEP_PCI_WIN_WRITE(oct, CN93_RST_CORE_DOMAIN_W1S, 1);
/* Wait for 100ms as Octeon resets. */
@@ -696,14 +710,26 @@ static void octep_enable_interrupts_cn93_pf(struct octep_device *oct)
/* Disable all interrupts */
static void octep_disable_interrupts_cn93_pf(struct octep_device *oct)
{
- u64 intr_mask = 0ULL;
+ u64 reg_val, intr_mask = 0ULL;
int srn, num_rings, i;
srn = CFG_GET_PORTS_PF_SRN(oct->conf);
num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
- for (i = 0; i < num_rings; i++)
- intr_mask |= (0x1ULL << (srn + i));
+ for (i = 0; i < num_rings; i++) {
+ intr_mask |= BIT_ULL(srn + i);
+ reg_val = octep_read_csr64(oct,
+ CN93_SDP_R_IN_INT_LEVELS(srn + i));
+ reg_val &= ~CN93_INT_ENA_BIT;
+ octep_write_csr64(oct,
+ CN93_SDP_R_IN_INT_LEVELS(srn + i), reg_val);
+
+ reg_val = octep_read_csr64(oct,
+ CN93_SDP_R_OUT_INT_LEVELS(srn + i));
+ reg_val &= ~CN93_INT_ENA_BIT;
+ octep_write_csr64(oct,
+ CN93_SDP_R_OUT_INT_LEVELS(srn + i), reg_val);
+ }
octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1C, intr_mask);
octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1C, intr_mask);
@@ -894,4 +920,17 @@ void octep_device_setup_cn93_pf(struct octep_device *oct)
octep_init_config_cn93_pf(oct);
octep_configure_ring_mapping_cn93_pf(oct);
+
+ if (oct->chip_id == OCTEP_PCI_DEVICE_ID_CN98_PF)
+ return;
+
+ /* Firmware status CSR is supposed to be cleared by
+ * core domain reset, but due to IPBUPEM-38842, it is not.
+ * Set it to RUNNING early in boot, so that unexpected resets
+ * leave it in a state that is not READY (1).
+ */
+ OCTEP_PCI_WIN_WRITE(oct,
+ CN9K_PEMX_PFX_CSX_PFCFGX(0,
+ 0, CN9K_PCIEEP_VSECST_CTL),
+ FW_STATUS_RUNNING);
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c b/drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c
index 5de0b5ecbc5f..ad2f4984e40a 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c
@@ -8,6 +8,7 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/jiffies.h>
#include "octep_config.h"
#include "octep_main.h"
@@ -327,12 +328,14 @@ static void octep_setup_iq_regs_cnxk_pf(struct octep_device *oct, int iq_no)
}
/* Setup registers for a hardware Rx Queue */
-static void octep_setup_oq_regs_cnxk_pf(struct octep_device *oct, int oq_no)
+static int octep_setup_oq_regs_cnxk_pf(struct octep_device *oct, int oq_no)
{
- u64 reg_val;
- u64 oq_ctl = 0ULL;
- u32 time_threshold = 0;
struct octep_oq *oq = oct->oq[oq_no];
+ unsigned long t_out_jiffies;
+ u32 time_threshold = 0;
+ u64 oq_ctl = 0ULL;
+ u64 reg_ba_val;
+ u64 reg_val;
oq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
reg_val = octep_read_csr64(oct, CNXK_SDP_R_OUT_CONTROL(oq_no));
@@ -343,6 +346,36 @@ static void octep_setup_oq_regs_cnxk_pf(struct octep_device *oct, int oq_no)
reg_val = octep_read_csr64(oct, CNXK_SDP_R_OUT_CONTROL(oq_no));
} while (!(reg_val & CNXK_R_OUT_CTL_IDLE));
}
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_WMARK(oq_no), oq->max_count);
+ /* Wait for WMARK to get applied */
+ usleep_range(10, 15);
+
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_BADDR(oq_no),
+ oq->desc_ring_dma);
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_RSIZE(oq_no),
+ oq->max_count);
+ reg_ba_val = octep_read_csr64(oct, CNXK_SDP_R_OUT_SLIST_BADDR(oq_no));
+
+ if (reg_ba_val != oq->desc_ring_dma) {
+ t_out_jiffies = jiffies + 10 * HZ;
+ do {
+ if (reg_ba_val == ULLONG_MAX)
+ return -EFAULT;
+ octep_write_csr64(oct,
+ CNXK_SDP_R_OUT_SLIST_BADDR(oq_no),
+ oq->desc_ring_dma);
+ octep_write_csr64(oct,
+ CNXK_SDP_R_OUT_SLIST_RSIZE(oq_no),
+ oq->max_count);
+ reg_ba_val =
+ octep_read_csr64(oct,
+ CNXK_SDP_R_OUT_SLIST_BADDR(oq_no));
+ } while ((reg_ba_val != oq->desc_ring_dma) &&
+ time_before(jiffies, t_out_jiffies));
+
+ if (reg_ba_val != oq->desc_ring_dma)
+ return -EAGAIN;
+ }
reg_val &= ~(CNXK_R_OUT_CTL_IMODE);
reg_val &= ~(CNXK_R_OUT_CTL_ROR_P);
@@ -356,10 +389,6 @@ static void octep_setup_oq_regs_cnxk_pf(struct octep_device *oct, int oq_no)
reg_val |= (CNXK_R_OUT_CTL_ES_P);
octep_write_csr64(oct, CNXK_SDP_R_OUT_CONTROL(oq_no), reg_val);
- octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_BADDR(oq_no),
- oq->desc_ring_dma);
- octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_RSIZE(oq_no),
- oq->max_count);
oq_ctl = octep_read_csr64(oct, CNXK_SDP_R_OUT_CONTROL(oq_no));
@@ -385,6 +414,7 @@ static void octep_setup_oq_regs_cnxk_pf(struct octep_device *oct, int oq_no)
reg_val &= ~0xFFFFFFFFULL;
reg_val |= CFG_GET_OQ_WMARK(oct->conf);
octep_write_csr64(oct, CNXK_SDP_R_OUT_WMARK(oq_no), reg_val);
+ return 0;
}
/* Setup registers for a PF mailbox */
@@ -660,7 +690,7 @@ static int octep_soft_reset_cnxk_pf(struct octep_device *oct)
* the module is removed.
*/
OCTEP_PCI_WIN_WRITE(oct, CNXK_PEMX_PFX_CSX_PFCFGX(0, 0, CNXK_PCIEEP_VSECST_CTL),
- FW_STATUS_RUNNING);
+ FW_STATUS_DOWNING);
/* Set chip domain reset bit */
OCTEP_PCI_WIN_WRITE(oct, CNXK_RST_CHIP_DOMAIN_W1S, 1);
@@ -720,14 +750,26 @@ static void octep_enable_interrupts_cnxk_pf(struct octep_device *oct)
/* Disable all interrupts */
static void octep_disable_interrupts_cnxk_pf(struct octep_device *oct)
{
- u64 intr_mask = 0ULL;
+ u64 reg_val, intr_mask = 0ULL;
int srn, num_rings, i;
srn = CFG_GET_PORTS_PF_SRN(oct->conf);
num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
- for (i = 0; i < num_rings; i++)
- intr_mask |= (0x1ULL << (srn + i));
+ for (i = 0; i < num_rings; i++) {
+ intr_mask |= BIT_ULL(srn + i);
+ reg_val = octep_read_csr64(oct,
+ CNXK_SDP_R_IN_INT_LEVELS(srn + i));
+ reg_val &= ~CNXK_INT_ENA_BIT;
+ octep_write_csr64(oct,
+ CNXK_SDP_R_IN_INT_LEVELS(srn + i), reg_val);
+
+ reg_val = octep_read_csr64(oct,
+ CNXK_SDP_R_OUT_INT_LEVELS(srn + i));
+ reg_val &= ~CNXK_INT_ENA_BIT;
+ octep_write_csr64(oct,
+ CNXK_SDP_R_OUT_INT_LEVELS(srn + i), reg_val);
+ }
octep_write_csr64(oct, CNXK_SDP_EPF_IRERR_RINT_ENA_W1C, intr_mask);
octep_write_csr64(oct, CNXK_SDP_EPF_ORERR_RINT_ENA_W1C, intr_mask);
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
index 81ac4267811c..35d0ff289a70 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
@@ -77,7 +77,7 @@ struct octep_pci_win_regs {
struct octep_hw_ops {
void (*setup_iq_regs)(struct octep_device *oct, int q);
- void (*setup_oq_regs)(struct octep_device *oct, int q);
+ int (*setup_oq_regs)(struct octep_device *oct, int q);
void (*setup_mbox_regs)(struct octep_device *oct, int mbox);
irqreturn_t (*mbox_intr_handler)(void *ioq_vector);
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
index ca473502d7a0..06eff23521fa 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
@@ -5,6 +5,8 @@
*
*/
+#include <linux/bitfield.h>
+
#ifndef _OCTEP_REGS_CN9K_PF_H_
#define _OCTEP_REGS_CN9K_PF_H_
@@ -383,8 +385,37 @@
/* bit 1 for firmware heartbeat interrupt */
#define CN93_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT BIT_ULL(1)
+#define FW_STATUS_DOWNING 0ULL
+#define FW_STATUS_RUNNING 2ULL
+
+#define CN9K_PEM_GENMASK BIT_ULL(36)
+#define CN9K_PF_GENMASK GENMASK_ULL(21, 18)
+#define CN9K_PFX_CSX_PFCFGX_SHADOW_BIT BIT_ULL(16)
+#define CN9K_PFX_CSX_PFCFGX_BASE_ADDR (0x8e0000008000ULL)
+#define CN9K_4BYTE_ALIGNED_ADDRESS_OFFSET(offset) ((offset) & BIT_ULL(2))
+#define CN9K_PEMX_PFX_CSX_PFCFGX cn9k_pemx_pfx_csx_pfcfgx
+
+static inline u64 cn9k_pemx_pfx_csx_pfcfgx(u64 pem, u32 pf, u32 offset)
+{
+ u32 shadow_addr_bit, pf_addr_bits, aligned_offset;
+ u64 pem_addr_bits;
+
+ pem_addr_bits = FIELD_PREP(CN9K_PEM_GENMASK, pem);
+ pf_addr_bits = FIELD_PREP(CN9K_PF_GENMASK, pf);
+ shadow_addr_bit = CN9K_PFX_CSX_PFCFGX_SHADOW_BIT & (offset);
+ aligned_offset = rounddown((offset), 8);
+
+ return (CN9K_PFX_CSX_PFCFGX_BASE_ADDR | pem_addr_bits
+ | pf_addr_bits | shadow_addr_bit | aligned_offset)
+ + CN9K_4BYTE_ALIGNED_ADDRESS_OFFSET(offset);
+}
+
+/* Register defines for use with CN9K_PEMX_PFX_CSX_PFCFGX */
+#define CN9K_PCIEEP_VSECST_CTL 0x4D0
+
#define CN93_PEM_BAR4_INDEX 7
#define CN93_PEM_BAR4_INDEX_SIZE 0x400000ULL
#define CN93_PEM_BAR4_INDEX_OFFSET (CN93_PEM_BAR4_INDEX * CN93_PEM_BAR4_INDEX_SIZE)
+#define CN93_INT_ENA_BIT BIT_ULL(62)
#endif /* _OCTEP_REGS_CN9K_PF_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cnxk_pf.h b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cnxk_pf.h
index e637d7c8224d..006e23882ee9 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cnxk_pf.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cnxk_pf.h
@@ -396,6 +396,7 @@
#define CNXK_SDP_EPF_OEI_RINT_DATA_BIT_MBOX BIT_ULL(0)
/* bit 1 for firmware heartbeat interrupt */
#define CNXK_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT BIT_ULL(1)
+#define FW_STATUS_DOWNING 0ULL
#define FW_STATUS_RUNNING 2ULL
#define CNXK_PEMX_PFX_CSX_PFCFGX(pem, pf, offset) ({ typeof(offset) _off = (offset); \
((0x8e0000008000 | \
@@ -412,5 +413,6 @@
#define CNXK_PEM_BAR4_INDEX 7
#define CNXK_PEM_BAR4_INDEX_SIZE 0x400000ULL
#define CNXK_PEM_BAR4_INDEX_OFFSET (CNXK_PEM_BAR4_INDEX * CNXK_PEM_BAR4_INDEX_SIZE)
+#define CNXK_INT_ENA_BIT BIT_ULL(62)
#endif /* _OCTEP_REGS_CNXK_PF_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
index 82b6b19e76b4..f2a7c6a76c74 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
@@ -12,6 +12,8 @@
#include "octep_config.h"
#include "octep_main.h"
+static void octep_oq_free_ring_buffers(struct octep_oq *oq);
+
static void octep_oq_reset_indices(struct octep_oq *oq)
{
oq->host_read_idx = 0;
@@ -170,11 +172,15 @@ static int octep_setup_oq(struct octep_device *oct, int q_no)
goto oq_fill_buff_err;
octep_oq_reset_indices(oq);
- oct->hw_ops.setup_oq_regs(oct, q_no);
+ if (oct->hw_ops.setup_oq_regs(oct, q_no))
+ goto oq_setup_err;
+
oct->num_oqs++;
return 0;
+oq_setup_err:
+ octep_oq_free_ring_buffers(oq);
oq_fill_buff_err:
vfree(oq->buff_info);
oq->buff_info = NULL;
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
index 88937fce75f1..4c769b27c278 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
@@ -196,7 +196,7 @@ static void octep_vf_setup_iq_regs_cn93(struct octep_vf_device *oct, int iq_no)
}
/* Setup registers for a hardware Rx Queue */
-static void octep_vf_setup_oq_regs_cn93(struct octep_vf_device *oct, int oq_no)
+static int octep_vf_setup_oq_regs_cn93(struct octep_vf_device *oct, int oq_no)
{
struct octep_vf_oq *oq = oct->oq[oq_no];
u32 time_threshold = 0;
@@ -239,6 +239,7 @@ static void octep_vf_setup_oq_regs_cn93(struct octep_vf_device *oct, int oq_no)
time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf);
reg_val = ((u64)time_threshold << 32) | CFG_GET_OQ_INTR_PKT(oct->conf);
octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+ return 0;
}
/* Setup registers for a VF mailbox */
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
index 1f79dfad42c6..a968b93a6794 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
@@ -199,11 +199,13 @@ static void octep_vf_setup_iq_regs_cnxk(struct octep_vf_device *oct, int iq_no)
}
/* Setup registers for a hardware Rx Queue */
-static void octep_vf_setup_oq_regs_cnxk(struct octep_vf_device *oct, int oq_no)
+static int octep_vf_setup_oq_regs_cnxk(struct octep_vf_device *oct, int oq_no)
{
struct octep_vf_oq *oq = oct->oq[oq_no];
+ unsigned long t_out_jiffies;
u32 time_threshold = 0;
u64 oq_ctl = ULL(0);
+ u64 reg_ba_val;
u64 reg_val;
reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no));
@@ -214,6 +216,38 @@ static void octep_vf_setup_oq_regs_cnxk(struct octep_vf_device *oct, int oq_no)
reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no));
} while (!(reg_val & CNXK_VF_R_OUT_CTL_IDLE));
}
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_WMARK(oq_no),
+ oq->max_count);
+ /* Wait for WMARK to get applied */
+ usleep_range(10, 15);
+
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_BADDR(oq_no),
+ oq->desc_ring_dma);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(oq_no),
+ oq->max_count);
+ reg_ba_val = octep_vf_read_csr64(oct,
+ CNXK_VF_SDP_R_OUT_SLIST_BADDR(oq_no));
+ if (reg_ba_val != oq->desc_ring_dma) {
+ t_out_jiffies = jiffies + 10 * HZ;
+ do {
+ if (reg_ba_val == ULLONG_MAX)
+ return -EFAULT;
+ octep_vf_write_csr64(oct,
+ CNXK_VF_SDP_R_OUT_SLIST_BADDR
+ (oq_no), oq->desc_ring_dma);
+ octep_vf_write_csr64(oct,
+ CNXK_VF_SDP_R_OUT_SLIST_RSIZE
+ (oq_no), oq->max_count);
+ reg_ba_val =
+ octep_vf_read_csr64(oct,
+ CNXK_VF_SDP_R_OUT_SLIST_BADDR
+ (oq_no));
+ } while ((reg_ba_val != oq->desc_ring_dma) &&
+ time_before(jiffies, t_out_jiffies));
+
+ if (reg_ba_val != oq->desc_ring_dma)
+ return -EAGAIN;
+ }
reg_val &= ~(CNXK_VF_R_OUT_CTL_IMODE);
reg_val &= ~(CNXK_VF_R_OUT_CTL_ROR_P);
@@ -227,8 +261,6 @@ static void octep_vf_setup_oq_regs_cnxk(struct octep_vf_device *oct, int oq_no)
reg_val |= (CNXK_VF_R_OUT_CTL_ES_P);
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no), reg_val);
- octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_BADDR(oq_no), oq->desc_ring_dma);
- octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(oq_no), oq->max_count);
oq_ctl = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no));
/* Clear the ISIZE and BSIZE (22-0) */
@@ -250,6 +282,7 @@ static void octep_vf_setup_oq_regs_cnxk(struct octep_vf_device *oct, int oq_no)
reg_val &= ~GENMASK_ULL(31, 0);
reg_val |= CFG_GET_OQ_WMARK(oct->conf);
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_WMARK(oq_no), reg_val);
+ return 0;
}
/* Setup registers for a VF mailbox */
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
index b9f13506f462..c74cd2369e90 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
@@ -55,7 +55,7 @@ struct octep_vf_mmio {
struct octep_vf_hw_ops {
void (*setup_iq_regs)(struct octep_vf_device *oct, int q);
- void (*setup_oq_regs)(struct octep_vf_device *oct, int q);
+ int (*setup_oq_regs)(struct octep_vf_device *oct, int q);
void (*setup_mbox_regs)(struct octep_vf_device *oct, int mbox);
irqreturn_t (*non_ioq_intr_handler)(void *ioq_vector);
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
index d70c8be3cfc4..6f865dbbba6c 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
@@ -12,6 +12,8 @@
#include "octep_vf_config.h"
#include "octep_vf_main.h"
+static void octep_vf_oq_free_ring_buffers(struct octep_vf_oq *oq);
+
static void octep_vf_oq_reset_indices(struct octep_vf_oq *oq)
{
oq->host_read_idx = 0;
@@ -171,11 +173,15 @@ static int octep_vf_setup_oq(struct octep_vf_device *oct, int q_no)
goto oq_fill_buff_err;
octep_vf_oq_reset_indices(oq);
- oct->hw_ops.setup_oq_regs(oct, q_no);
+ if (oct->hw_ops.setup_oq_regs(oct, q_no))
+ goto oq_setup_err;
+
oct->num_oqs++;
return 0;
+oq_setup_err:
+ octep_vf_oq_free_ring_buffers(oq);
oq_fill_buff_err:
vfree(oq->buff_info);
oq->buff_info = NULL;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 42044cd810b1..fd4792e432bf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -1823,6 +1823,8 @@ static int cgx_lmac_exit(struct cgx *cgx)
cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, false);
cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true);
kfree(lmac->mac_to_index_bmap.bmap);
+ rvu_free_bitmap(&lmac->rx_fc_pfvf_bmap);
+ rvu_free_bitmap(&lmac->tx_fc_pfvf_bmap);
kfree(lmac->name);
kfree(lmac);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 747fbdf2a908..8530df8b3fda 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -3632,11 +3632,22 @@ static void rvu_remove(struct pci_dev *pdev)
devm_kfree(&pdev->dev, rvu);
}
+static void rvu_shutdown(struct pci_dev *pdev)
+{
+ struct rvu *rvu = pci_get_drvdata(pdev);
+
+ if (!rvu)
+ return;
+
+ rvu_clear_rvum_blk_revid(rvu);
+}
+
static struct pci_driver rvu_driver = {
.name = DRV_NAME,
.id_table = rvu_id_table,
.probe = rvu_probe,
.remove = rvu_remove,
+ .shutdown = rvu_shutdown,
};
static int __init rvu_init_module(void)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 2f485a930edd..49f7ff5eddfc 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -4938,12 +4938,18 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
/* Set chan/link to backpressure TL3 instead of TL2 */
rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
- /* Disable SQ manager's sticky mode operation (set TM6 = 0)
+ /* Disable SQ manager's sticky mode operation (set TM6 = 0, TM11 = 0)
* This sticky mode is known to cause SQ stalls when multiple
- * SQs are mapped to same SMQ and transmitting pkts at a time.
+ * SQs are mapped to same SMQ and transmitting pkts simultaneously.
+ * NIX PSE may deadlock when there are any sticky to non-sticky
+ * transmission. Hence disable it (TM5 = 0).
*/
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
- cfg &= ~BIT_ULL(15);
+ cfg &= ~(BIT_ULL(15) | BIT_ULL(14) | BIT_ULL(23));
+ /* NIX may drop credits when condition clocks are turned off.
+ * Hence enable control flow clk (set TM9 = 1).
+ */
+ cfg |= BIT_ULL(21);
rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
ltdefs = rvu->kpu.lt_def;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 8cdfc36d79d2..255c7e2633bb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -789,8 +789,15 @@ static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura,
tar_addr = (__force u64)otx2_get_regaddr(pfvf, NPA_LF_AURA_BATCH_FREE0);
/* LMTID is same as AURA Id */
val = (lmt_info->lmt_id & 0x7FF) | BIT_ULL(63);
- /* Set if [127:64] of last 128bit word has a valid pointer */
- count_eot = (num_ptrs % 2) ? 0ULL : 1ULL;
+ /* Meaning of count_eot
+ * CN10K: count_eot = 0 if the number of pointers to free is even,
+ * count_eot = 1 if the number of pointers to free is odd.
+ *
+ * CN20K: count_eot represents the least significant 2 bits of the
+ * total number of valid pointers to free.
+ * Example: if 7 pointers are freed (0b111), count_eot = 0b11.
+ */
+ count_eot = (num_ptrs - 1) & 0x3ULL;
/* Set AURA ID to free pointer */
ptrs[0] = (count_eot << 32) | (aura & 0xFFFFF);
/* Target address for LMTST flush tells HW how many 128bit
@@ -800,7 +807,7 @@ static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura,
*/
if (num_ptrs > 2) {
size = (sizeof(u64) * num_ptrs) / 16;
- if (!count_eot)
+ if (!(count_eot & 1))
size++;
tar_addr |= ((size - 1) & 0x7) << 4;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.h
index c7bd4f3c6c6b..069e39b48847 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.h
@@ -17,4 +17,4 @@ struct otx2_devlink {
int otx2_register_dl(struct otx2_nic *pfvf);
void otx2_unregister_dl(struct otx2_nic *pfvf);
-#endif /* RVU_DEVLINK_H */
+#endif /* OTX2_DEVLINK_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index b6449f0a9e7d..a0340f3422bf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -66,6 +66,8 @@ static const struct otx2_stat otx2_queue_stats[] = {
{ "frames", 1 },
};
+#define OTX2_FEC_MAX_INDEX 4
+
static const unsigned int otx2_n_dev_stats = ARRAY_SIZE(otx2_dev_stats);
static const unsigned int otx2_n_drv_stats = ARRAY_SIZE(otx2_drv_stats);
static const unsigned int otx2_n_queue_stats = ARRAY_SIZE(otx2_queue_stats);
@@ -568,6 +570,13 @@ static int otx2_set_coalesce(struct net_device *netdev,
return 0;
}
+static u32 otx2_get_rx_ring_count(struct net_device *dev)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+
+ return pfvf->hw.rx_queues;
+}
+
static int otx2_get_rss_hash_opts(struct net_device *dev,
struct ethtool_rxfh_fields *nfc)
{
@@ -742,10 +751,6 @@ static int otx2_get_rxnfc(struct net_device *dev,
int ret = -EOPNOTSUPP;
switch (nfc->cmd) {
- case ETHTOOL_GRXRINGS:
- nfc->data = pfvf->hw.rx_queues;
- ret = 0;
- break;
case ETHTOOL_GRXCLSRLCNT:
if (netif_running(dev) && ntuple) {
nfc->rule_cnt = pfvf->flow_cfg->nr_flows;
@@ -1028,15 +1033,14 @@ static int otx2_get_fecparam(struct net_device *netdev,
ETHTOOL_FEC_BASER,
ETHTOOL_FEC_RS,
ETHTOOL_FEC_BASER | ETHTOOL_FEC_RS};
-#define FEC_MAX_INDEX 4
- if (pfvf->linfo.fec < FEC_MAX_INDEX)
- fecparam->active_fec = fec[pfvf->linfo.fec];
+
+ fecparam->active_fec = fec[pfvf->linfo.fec];
rsp = otx2_get_fwdata(pfvf);
if (IS_ERR(rsp))
return PTR_ERR(rsp);
- if (rsp->fwdata.supported_fec < FEC_MAX_INDEX) {
+ if (rsp->fwdata.supported_fec < OTX2_FEC_MAX_INDEX) {
if (!rsp->fwdata.supported_fec)
fecparam->fec = ETHTOOL_FEC_NONE;
else
@@ -1344,6 +1348,7 @@ static const struct ethtool_ops otx2_ethtool_ops = {
.set_coalesce = otx2_set_coalesce,
.get_rxnfc = otx2_get_rxnfc,
.set_rxnfc = otx2_set_rxnfc,
+ .get_rx_ring_count = otx2_get_rx_ring_count,
.get_rxfh_key_size = otx2_get_rxfh_key_size,
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
@@ -1462,6 +1467,7 @@ static const struct ethtool_ops otx2vf_ethtool_ops = {
.get_channels = otx2_get_channels,
.get_rxnfc = otx2_get_rxnfc,
.set_rxnfc = otx2_set_rxnfc,
+ .get_rx_ring_count = otx2_get_rx_ring_count,
.get_rxfh_key_size = otx2_get_rxfh_key_size,
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 6b2d8559f0eb..444bb67494ab 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -3315,6 +3315,7 @@ err_free_zc_bmap:
err_sriov_cleannup:
otx2_sriov_vfcfg_cleanup(pf);
err_pf_sriov_init:
+ otx2_unregister_dl(pf);
otx2_shutdown_tc(pf);
err_mcam_flow_del:
otx2_mcam_flow_del(pf);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
index 2f52daba58e6..750c72f628e5 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
@@ -717,11 +717,6 @@ static int prestera_ethtool_set_fecparam(struct net_device *dev,
return -EINVAL;
}
- if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) {
- netdev_err(dev, "FEC set is not allowed on non-SFP ports\n");
- return -EINVAL;
- }
-
fec = PRESTERA_PORT_FEC_MAX;
for (mode = 0; mode < PRESTERA_PORT_FEC_MAX; mode++) {
if ((port_fec_caps[mode].eth_fec & fecparam->fec) &&
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
index 3e13322470da..2989a77e3b42 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
@@ -542,7 +542,7 @@ static int prestera_ldr_wait_reg32(struct prestera_fw *fw,
10 * USEC_PER_MSEC, waitms * USEC_PER_MSEC);
}
-static u32 prestera_ldr_wait_buf(struct prestera_fw *fw, size_t len)
+static int prestera_ldr_wait_buf(struct prestera_fw *fw, size_t len)
{
u8 __iomem *addr = PRESTERA_LDR_REG_ADDR(fw, PRESTERA_LDR_BUF_RD_REG);
u32 buf_len = fw->ldr_buf_len;
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 05349a0b2db1..cf4e26d337bb 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -78,7 +78,6 @@ static const struct pci_device_id skge_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x4320) }, /* SK-98xx V2.0 */
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* D-Link DGE-530T (rev.B) */
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4c00) }, /* D-Link DGE-530T */
- { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302) }, /* D-Link DGE-530T Rev C1 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) }, /* Marvell Yukon 88E8001/8003/8010 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */
{ PCI_DEVICE(PCI_VENDOR_ID_CNET, 0x434E) }, /* CNet PowerG-2000 */
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index e68997a29191..35fef28ee2f9 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -4625,18 +4625,20 @@ static void mtk_get_ethtool_stats(struct net_device *dev,
} while (u64_stats_fetch_retry(&hwstats->syncp, start));
}
+static u32 mtk_get_rx_ring_count(struct net_device *dev)
+{
+ if (dev->hw_features & NETIF_F_LRO)
+ return MTK_MAX_RX_RING_NUM;
+
+ return 0;
+}
+
static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- if (dev->hw_features & NETIF_F_LRO) {
- cmd->data = MTK_MAX_RX_RING_NUM;
- ret = 0;
- }
- break;
case ETHTOOL_GRXCLSRLCNT:
if (dev->hw_features & NETIF_F_LRO) {
struct mtk_mac *mac = netdev_priv(dev);
@@ -4741,6 +4743,7 @@ static const struct ethtool_ops mtk_ethtool_ops = {
.set_pauseparam = mtk_set_pauseparam,
.get_rxnfc = mtk_get_rxnfc,
.set_rxnfc = mtk_set_rxnfc,
+ .get_rx_ring_count = mtk_get_rx_ring_count,
.get_eee = mtk_get_eee,
.set_eee = mtk_set_eee,
};
@@ -4991,7 +4994,6 @@ static int mtk_sgmii_init(struct mtk_eth *eth)
{
struct device_node *np;
struct regmap *regmap;
- u32 flags;
int i;
for (i = 0; i < MTK_MAX_DEVS; i++) {
@@ -5000,18 +5002,16 @@ static int mtk_sgmii_init(struct mtk_eth *eth)
break;
regmap = syscon_node_to_regmap(np);
- flags = 0;
- if (of_property_read_bool(np, "mediatek,pnswap"))
- flags |= MTK_SGMII_FLAG_PN_SWAP;
-
- of_node_put(np);
-
- if (IS_ERR(regmap))
+ if (IS_ERR(regmap)) {
+ of_node_put(np);
return PTR_ERR(regmap);
+ }
- eth->sgmii_pcs[i] = mtk_pcs_lynxi_create(eth->dev, regmap,
- eth->soc->ana_rgc3,
- flags);
+ eth->sgmii_pcs[i] = mtk_pcs_lynxi_create(eth->dev,
+ of_fwnode_handle(np),
+ regmap,
+ eth->soc->ana_rgc3);
+ of_node_put(np);
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 87f35bcbeff8..c5d564e5a581 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -636,28 +636,20 @@ static int get_real_size(const struct sk_buff *skb,
struct net_device *dev,
int *lso_header_size,
bool *inline_ok,
- void **pfrag,
- int *hopbyhop)
+ void **pfrag)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int real_size;
if (shinfo->gso_size) {
*inline_ok = false;
- *hopbyhop = 0;
if (skb->encapsulation) {
*lso_header_size = skb_inner_tcp_all_headers(skb);
} else {
- /* Detects large IPV6 TCP packets and prepares for removal of
- * HBH header that has been pushed by ip6_xmit(),
- * mainly so that tcpdump can dissect them.
- */
- if (ipv6_has_hopopt_jumbo(skb))
- *hopbyhop = sizeof(struct hop_jumbo_hdr);
*lso_header_size = skb_tcp_all_headers(skb);
}
real_size = CTRL_SIZE + shinfo->nr_frags * DS_SIZE +
- ALIGN(*lso_header_size - *hopbyhop + 4, DS_SIZE);
+ ALIGN(*lso_header_size + 4, DS_SIZE);
if (unlikely(*lso_header_size != skb_headlen(skb))) {
/* We add a segment for the skb linear buffer only if
* it contains data */
@@ -884,7 +876,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
int desc_size;
int real_size;
u32 index, bf_index;
- struct ipv6hdr *h6;
__be32 op_own;
int lso_header_size;
void *fragptr = NULL;
@@ -893,7 +884,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
bool stop_queue;
bool inline_ok;
u8 data_offset;
- int hopbyhop;
bool bf_ok;
tx_ind = skb_get_queue_mapping(skb);
@@ -903,7 +893,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_drop;
real_size = get_real_size(skb, shinfo, dev, &lso_header_size,
- &inline_ok, &fragptr, &hopbyhop);
+ &inline_ok, &fragptr);
if (unlikely(!real_size))
goto tx_drop_count;
@@ -956,7 +946,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
data = &tx_desc->data;
data_offset = offsetof(struct mlx4_en_tx_desc, data);
} else {
- int lso_align = ALIGN(lso_header_size - hopbyhop + 4, DS_SIZE);
+ int lso_align = ALIGN(lso_header_size + 4, DS_SIZE);
data = (void *)&tx_desc->lso + lso_align;
data_offset = offsetof(struct mlx4_en_tx_desc, lso) + lso_align;
@@ -1021,31 +1011,15 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
((ring->prod & ring->size) ?
cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
- lso_header_size -= hopbyhop;
/* Fill in the LSO prefix */
tx_desc->lso.mss_hdr_size = cpu_to_be32(
shinfo->gso_size << 16 | lso_header_size);
+ /* Copy headers;
+ * note that we already verified that it is linear
+ */
+ memcpy(tx_desc->lso.header, skb->data, lso_header_size);
- if (unlikely(hopbyhop)) {
- /* remove the HBH header.
- * Layout: [Ethernet header][IPv6 header][HBH][TCP header]
- */
- memcpy(tx_desc->lso.header, skb->data, ETH_HLEN + sizeof(*h6));
- h6 = (struct ipv6hdr *)((char *)tx_desc->lso.header + ETH_HLEN);
- h6->nexthdr = IPPROTO_TCP;
- /* Copy the TCP header after the IPv6 one */
- memcpy(h6 + 1,
- skb->data + ETH_HLEN + sizeof(*h6) +
- sizeof(struct hop_jumbo_hdr),
- tcp_hdrlen(skb));
- /* Leave ipv6 payload_len set to 0, as LSO v2 specs request. */
- } else {
- /* Copy headers;
- * note that we already verified that it is linear
- */
- memcpy(tx_desc->lso.header, skb->data, lso_header_size);
- }
ring->tso_packets++;
i = shinfo->gso_segs;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
index 1e5522a19483..3981dd81d4c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
@@ -9,7 +9,9 @@
*/
struct mlx5_dpll {
struct dpll_device *dpll;
+ dpll_tracker dpll_tracker;
struct dpll_pin *dpll_pin;
+ dpll_tracker pin_tracker;
struct mlx5_core_dev *mdev;
struct workqueue_struct *wq;
struct delayed_work work;
@@ -136,7 +138,7 @@ mlx5_dpll_pin_ffo_get(struct mlx5_dpll_synce_status *synce_status,
{
if (!synce_status->oper_freq_measure)
return -ENODATA;
- *ffo = synce_status->frequency_diff;
+ *ffo = 1000000LL * synce_status->frequency_diff;
return 0;
}
@@ -438,7 +440,8 @@ static int mlx5_dpll_probe(struct auxiliary_device *adev,
auxiliary_set_drvdata(adev, mdpll);
/* Multiple mdev instances might share one DPLL device. */
- mdpll->dpll = dpll_device_get(clock_id, 0, THIS_MODULE);
+ mdpll->dpll = dpll_device_get(clock_id, 0, THIS_MODULE,
+ &mdpll->dpll_tracker);
if (IS_ERR(mdpll->dpll)) {
err = PTR_ERR(mdpll->dpll);
goto err_free_mdpll;
@@ -451,7 +454,8 @@ static int mlx5_dpll_probe(struct auxiliary_device *adev,
/* Multiple mdev instances might share one DPLL pin. */
mdpll->dpll_pin = dpll_pin_get(clock_id, mlx5_get_dev_index(mdev),
- THIS_MODULE, &mlx5_dpll_pin_properties);
+ THIS_MODULE, &mlx5_dpll_pin_properties,
+ &mdpll->pin_tracker);
if (IS_ERR(mdpll->dpll_pin)) {
err = PTR_ERR(mdpll->dpll_pin);
goto err_unregister_dpll_device;
@@ -479,11 +483,11 @@ err_unregister_dpll_pin:
dpll_pin_unregister(mdpll->dpll, mdpll->dpll_pin,
&mlx5_dpll_pins_ops, mdpll);
err_put_dpll_pin:
- dpll_pin_put(mdpll->dpll_pin);
+ dpll_pin_put(mdpll->dpll_pin, &mdpll->pin_tracker);
err_unregister_dpll_device:
dpll_device_unregister(mdpll->dpll, &mlx5_dpll_device_ops, mdpll);
err_put_dpll_device:
- dpll_device_put(mdpll->dpll);
+ dpll_device_put(mdpll->dpll, &mdpll->dpll_tracker);
err_free_mdpll:
kfree(mdpll);
return err;
@@ -499,9 +503,9 @@ static void mlx5_dpll_remove(struct auxiliary_device *adev)
destroy_workqueue(mdpll->wq);
dpll_pin_unregister(mdpll->dpll, mdpll->dpll_pin,
&mlx5_dpll_pins_ops, mdpll);
- dpll_pin_put(mdpll->dpll_pin);
+ dpll_pin_put(mdpll->dpll_pin, &mdpll->pin_tracker);
dpll_device_unregister(mdpll->dpll, &mlx5_dpll_device_ops, mdpll);
- dpll_device_put(mdpll->dpll);
+ dpll_device_put(mdpll->dpll, &mdpll->dpll_tracker);
kfree(mdpll);
mlx5_dpll_synce_status_set(mdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
index d000236ddbac..15cb27aea2c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019 Mellanox Technologies. */
#include "ecpf.h"
+#include "eswitch.h"
bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev)
{
@@ -49,7 +50,7 @@ static int mlx5_host_pf_init(struct mlx5_core_dev *dev)
/* ECPF shall enable HCA for host PF in the same way a PF
* does this for its VFs when ECPF is not a eswitch manager.
*/
- err = mlx5_cmd_host_pf_enable_hca(dev);
+ err = mlx5_esw_host_pf_enable_hca(dev);
if (err)
mlx5_core_err(dev, "Failed to enable external host PF HCA err(%d)\n", err);
@@ -63,7 +64,7 @@ static void mlx5_host_pf_cleanup(struct mlx5_core_dev *dev)
if (mlx5_ecpf_esw_admins_host_pf(dev))
return;
- err = mlx5_cmd_host_pf_disable_hca(dev);
+ err = mlx5_esw_host_pf_disable_hca(dev);
if (err) {
mlx5_core_err(dev, "Failed to disable external host PF HCA err(%d)\n", err);
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index ff4ab4691baf..a7de3a3efc49 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -82,9 +82,10 @@ struct page_pool;
#define MLX5E_RX_MAX_HEAD (256)
#define MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE (8)
-#define MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE (9)
-#define MLX5E_SHAMPO_WQ_HEADER_PER_PAGE (PAGE_SIZE >> MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE)
-#define MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE (PAGE_SHIFT - MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE)
+#define MLX5E_SHAMPO_WQ_HEADER_PER_PAGE \
+ (PAGE_SIZE >> MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE)
+#define MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE \
+ (PAGE_SHIFT - MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE)
#define MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE_SHIFT (6)
#define MLX5E_SHAMPO_WQ_RESRV_SIZE_BASE_SHIFT (12)
#define MLX5E_SHAMPO_WQ_LOG_RESRV_SIZE (16)
@@ -388,6 +389,7 @@ enum {
MLX5E_SQ_STATE_DIM,
MLX5E_SQ_STATE_PENDING_XSK_TX,
MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC,
+ MLX5E_SQ_STATE_LOCK_NEEDED,
MLX5E_NUM_SQ_STATES, /* Must be kept last */
};
@@ -545,6 +547,11 @@ struct mlx5e_icosq {
u32 sqn;
u16 reserved_room;
unsigned long state;
+ /* icosq can be accessed from any CPU and from different contexts
+ * (NAPI softirq or process/workqueue). Always use spin_lock_bh for
+ * simplicity and correctness across all contexts.
+ */
+ spinlock_t lock;
struct mlx5e_ktls_resync_resp *ktls_resync;
/* control path */
@@ -632,16 +639,11 @@ struct mlx5e_dma_info {
};
struct mlx5e_shampo_hd {
- struct mlx5e_frag_page *pages;
u32 hd_per_wq;
- u32 hd_per_page;
- u16 hd_per_wqe;
- u8 log_hd_per_page;
- u8 log_hd_entry_size;
- unsigned long *bitmap;
- u16 pi;
- u16 ci;
- __be32 mkey_be;
+ u32 hd_buf_size;
+ u32 mkey;
+ u32 nentries;
+ DECLARE_FLEX_ARRAY(struct mlx5e_dma_info, hd_buf_pages);
};
struct mlx5e_hw_gro_data {
@@ -776,9 +778,7 @@ struct mlx5e_channel {
struct mlx5e_xdpsq xsksq;
/* Async ICOSQ */
- struct mlx5e_icosq async_icosq;
- /* async_icosq can be accessed from any CPU - the spinlock protects it. */
- spinlock_t async_icosq_lock;
+ struct mlx5e_icosq *async_icosq;
/* data path - accessed per napi poll */
const struct cpumask *aff_mask;
@@ -801,6 +801,21 @@ struct mlx5e_channel {
struct dim_cq_moder tx_cq_moder;
};
+static inline bool mlx5e_icosq_sync_lock(struct mlx5e_icosq *sq)
+{
+ if (likely(!test_bit(MLX5E_SQ_STATE_LOCK_NEEDED, &sq->state)))
+ return false;
+
+ spin_lock_bh(&sq->lock);
+ return true;
+}
+
+static inline void mlx5e_icosq_sync_unlock(struct mlx5e_icosq *sq, bool locked)
+{
+ if (unlikely(locked))
+ spin_unlock_bh(&sq->lock);
+}
+
struct mlx5e_ptp;
struct mlx5e_channels {
@@ -920,6 +935,7 @@ struct mlx5e_priv {
u8 max_opened_tc;
bool tx_ptp_opened;
bool rx_ptp_opened;
+ bool ktls_rx_was_enabled;
struct kernel_hwtstamp_config hwtstamp_config;
u16 q_counter[MLX5_SD_MAX_GROUP_SZ];
u16 drop_rq_q_counter;
@@ -1018,8 +1034,6 @@ void mlx5e_build_ptys2ethtool_map(void);
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode);
-void mlx5e_shampo_fill_umr(struct mlx5e_rq *rq, int len);
-void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq);
void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
index 2c98a5299df3..6bd959f9083d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
@@ -29,6 +29,10 @@ struct mlx5e_dcbx {
u32 cable_len;
u32 xoff;
u16 port_buff_cell_sz;
+
+ /* Upper limit for 100Mbps and 1Gbps in Kbps units */
+ u64 upper_limit_100mbps;
+ u64 upper_limit_gbps;
};
#define MLX5E_MAX_DSCP (64)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index c9bdee9a8b30..8e99d07586fa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -1068,26 +1068,6 @@ u32 mlx5e_shampo_hd_per_wq(struct mlx5_core_dev *mdev,
return hd_per_wq;
}
-static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params,
- struct mlx5e_rq_param *rq_param)
-{
- int max_num_of_umr_per_wqe, max_hd_per_wqe, max_ksm_per_umr, rest;
- void *wqc = MLX5_ADDR_OF(rqc, rq_param->rqc, wq);
- int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
- u32 wqebbs;
-
- max_ksm_per_umr = MLX5E_MAX_KSM_PER_WQE(mdev);
- max_hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param);
- max_num_of_umr_per_wqe = max_hd_per_wqe / max_ksm_per_umr;
- rest = max_hd_per_wqe % max_ksm_per_umr;
- wqebbs = MLX5E_KSM_UMR_WQEBBS(max_ksm_per_umr) * max_num_of_umr_per_wqe;
- if (rest)
- wqebbs += MLX5E_KSM_UMR_WQEBBS(rest);
- wqebbs *= wq_size;
- return wqebbs;
-}
-
#define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
@@ -1173,9 +1153,6 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev,
wqebbs += max_xsk_wqebbs;
}
- if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
- wqebbs += mlx5e_shampo_icosq_sz(mdev, params, rqp);
-
/* UMR WQEs don't cross the page boundary, they are padded with NOPs.
* This padding is always smaller than the max WQE size. That gives us
* at least (PAGE_SIZE - (max WQE size - MLX5_SEND_WQE_BB)) useful bytes
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 9e2cf191ed30..4adc1adf9897 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -15,6 +15,7 @@ static const char * const sq_sw_state_type_name[] = {
[MLX5E_SQ_STATE_DIM] = "dim",
[MLX5E_SQ_STATE_PENDING_XSK_TX] = "pending_xsk_tx",
[MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC] = "pending_tls_rx_resync",
+ [MLX5E_SQ_STATE_LOCK_NEEDED] = "lock_needed",
};
static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 7e191e1569e8..f2a8453d8dce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -65,7 +65,6 @@ ktime_t mlx5e_cqe_ts_to_ns(cqe_ts_to_ns func, struct mlx5_clock *clock, u64 cqe_
enum mlx5e_icosq_wqe_type {
MLX5E_ICOSQ_WQE_NOP,
MLX5E_ICOSQ_WQE_UMR_RX,
- MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR,
#ifdef CONFIG_MLX5_EN_TLS
MLX5E_ICOSQ_WQE_UMR_TLS,
MLX5E_ICOSQ_WQE_SET_PSV_TLS,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
index 2b05536d564a..4f984f6a2cb9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -23,6 +23,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
struct mlx5_wq_cyc *wq = &icosq->wq;
struct mlx5e_umr_wqe *umr_wqe;
struct xdp_buff **xsk_buffs;
+ bool sync_locked;
int batch, i;
u32 offset; /* 17-bit value with MTT. */
u16 pi;
@@ -47,6 +48,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
goto err_reuse_batch;
}
+ sync_locked = mlx5e_icosq_sync_lock(icosq);
pi = mlx5e_icosq_get_next_pi(icosq, rq->mpwqe.umr_wqebbs);
umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe));
@@ -143,6 +145,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
};
icosq->pc += rq->mpwqe.umr_wqebbs;
+ mlx5e_icosq_sync_unlock(icosq, sync_locked);
icosq->doorbell_cseg = &umr_wqe->hdr.ctrl;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
index a59199ed590d..9e33156fac8a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
@@ -26,10 +26,12 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
* active and not polled by NAPI. Return 0, because the upcoming
* activate will trigger the IRQ for us.
*/
- if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &c->async_icosq.state)))
+ if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED,
+ &c->async_icosq->state)))
return 0;
- if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state))
+ if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX,
+ &c->async_icosq->state))
return 0;
mlx5e_trigger_napi_icosq(c);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 8bef99e8367e..b526b3898c22 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -100,20 +100,6 @@ static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
#endif /* CONFIG_GENEVE */
-static inline void
-mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
-{
- int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
- struct udphdr *udphdr;
-
- if (skb->encapsulation)
- udphdr = (struct udphdr *)skb_inner_transport_header(skb);
- else
- udphdr = udp_hdr(skb);
-
- udphdr->len = htons(payload_len);
-}
-
struct mlx5e_accel_tx_state {
#ifdef CONFIG_MLX5_EN_TLS
struct mlx5e_accel_tx_tls_state tls;
@@ -131,9 +117,6 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
struct sk_buff *skb,
struct mlx5e_accel_tx_state *state)
{
- if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
- mlx5e_udp_gso_handle_tx_skb(skb);
-
#ifdef CONFIG_MLX5_EN_TLS
/* May send WQEs. */
if (tls_is_skb_tx_device_offloaded(skb))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
index e3e57c849436..1c2cc2aad2b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
@@ -135,10 +135,15 @@ int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable)
int err = 0;
mutex_lock(&priv->state_lock);
- if (enable)
+ if (enable) {
err = mlx5e_accel_fs_tcp_create(priv->fs);
- else
+ if (!err && !priv->ktls_rx_was_enabled) {
+ priv->ktls_rx_was_enabled = true;
+ mlx5e_safe_reopen_channels(priv);
+ }
+ } else {
mlx5e_accel_fs_tcp_destroy(priv->fs);
+ }
mutex_unlock(&priv->state_lock);
return err;
@@ -161,6 +166,7 @@ int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
destroy_workqueue(priv->tls->rx_wq);
return err;
}
+ priv->ktls_rx_was_enabled = true;
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index da2d1eb52c13..5d8fe252799e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -202,8 +202,8 @@ static int post_rx_param_wqes(struct mlx5e_channel *c,
int err;
err = 0;
- sq = &c->async_icosq;
- spin_lock_bh(&c->async_icosq_lock);
+ sq = c->async_icosq;
+ spin_lock_bh(&sq->lock);
cseg = post_static_params(sq, priv_rx);
if (IS_ERR(cseg))
@@ -214,7 +214,7 @@ static int post_rx_param_wqes(struct mlx5e_channel *c,
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
unlock:
- spin_unlock_bh(&c->async_icosq_lock);
+ spin_unlock_bh(&sq->lock);
return err;
@@ -277,10 +277,10 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
buf->priv_rx = priv_rx;
- spin_lock_bh(&sq->channel->async_icosq_lock);
+ spin_lock_bh(&sq->lock);
if (unlikely(!mlx5e_icosq_can_post_wqe(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS))) {
- spin_unlock_bh(&sq->channel->async_icosq_lock);
+ spin_unlock_bh(&sq->lock);
err = -ENOSPC;
goto err_dma_unmap;
}
@@ -311,7 +311,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
icosq_fill_wi(sq, pi, &wi);
sq->pc++;
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
- spin_unlock_bh(&sq->channel->async_icosq_lock);
+ spin_unlock_bh(&sq->lock);
return 0;
@@ -344,7 +344,7 @@ static void resync_handle_work(struct work_struct *work)
}
c = resync->priv->channels.c[priv_rx->rxq];
- sq = &c->async_icosq;
+ sq = c->async_icosq;
if (resync_post_get_progress_params(sq, priv_rx)) {
priv_rx->rq_stats->tls_resync_req_skip++;
@@ -371,7 +371,7 @@ static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_r
struct mlx5e_icosq *sq;
bool trigger_poll;
- sq = &c->async_icosq;
+ sq = c->async_icosq;
ktls_resync = sq->ktls_resync;
trigger_poll = false;
@@ -413,9 +413,9 @@ static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_r
return;
if (!napi_if_scheduled_mark_missed(&c->napi)) {
- spin_lock_bh(&c->async_icosq_lock);
+ spin_lock_bh(&sq->lock);
mlx5e_trigger_irq(sq);
- spin_unlock_bh(&c->async_icosq_lock);
+ spin_unlock_bh(&sq->lock);
}
}
@@ -753,7 +753,7 @@ bool mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget)
LIST_HEAD(local_list);
int i, j;
- sq = &c->async_icosq;
+ sq = c->async_icosq;
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return false;
@@ -772,7 +772,7 @@ bool mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget)
clear_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state);
spin_unlock(&ktls_resync->lock);
- spin_lock(&c->async_icosq_lock);
+ spin_lock(&sq->lock);
for (j = 0; j < i; j++) {
struct mlx5_wqe_ctrl_seg *cseg;
@@ -791,7 +791,7 @@ bool mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget)
}
if (db_cseg)
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, db_cseg);
- spin_unlock(&c->async_icosq_lock);
+ spin_unlock(&sq->lock);
priv_rx->rq_stats->tls_resync_res_ok += j;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
index cb08799769ee..4022c7e78a2e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
@@ -50,7 +50,8 @@ bool mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget);
static inline bool
mlx5e_ktls_rx_pending_resync_list(struct mlx5e_channel *c, int budget)
{
- return budget && test_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &c->async_icosq.state);
+ return budget && test_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC,
+ &c->async_icosq->state);
}
static inline void
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index fddf7c207f8e..4b86df6d5b9e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -58,6 +58,20 @@ enum {
MLX5_DCB_CHG_NO_RESET,
};
+static const struct {
+ int scale;
+ const char *units_str;
+} mlx5e_bw_units[] = {
+ [MLX5_100_MBPS_UNIT] = {
+ .scale = 100,
+ .units_str = "Mbps",
+ },
+ [MLX5_GBPS_UNIT] = {
+ .scale = 1,
+ .units_str = "Gbps",
+ },
+};
+
#define MLX5_DSCP_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, qcam_reg) && \
MLX5_CAP_QCAM_REG(mdev, qpts) && \
MLX5_CAP_QCAM_REG(mdev, qpdpm))
@@ -559,7 +573,7 @@ static int mlx5e_dcbnl_ieee_getmaxrate(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
- u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
+ u16 max_bw_value[IEEE_8021QAZ_MAX_TCS];
u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
int err;
int i;
@@ -594,57 +608,41 @@ static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
- u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
+ u16 max_bw_value[IEEE_8021QAZ_MAX_TCS];
u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
- u64 upper_limit_100mbps;
- u64 upper_limit_gbps;
int i;
- struct {
- int scale;
- const char *units_str;
- } units[] = {
- [MLX5_100_MBPS_UNIT] = {
- .scale = 100,
- .units_str = "Mbps",
- },
- [MLX5_GBPS_UNIT] = {
- .scale = 1,
- .units_str = "Gbps",
- },
- };
memset(max_bw_value, 0, sizeof(max_bw_value));
memset(max_bw_unit, 0, sizeof(max_bw_unit));
- upper_limit_100mbps = U8_MAX * MLX5E_100MB_TO_KB;
- upper_limit_gbps = U8_MAX * MLX5E_1GB_TO_KB;
for (i = 0; i <= mlx5_max_tc(mdev); i++) {
- if (!maxrate->tc_maxrate[i]) {
+ u64 rate = maxrate->tc_maxrate[i];
+
+ if (!rate) {
max_bw_unit[i] = MLX5_BW_NO_LIMIT;
continue;
}
- if (maxrate->tc_maxrate[i] <= upper_limit_100mbps) {
- max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
- MLX5E_100MB_TO_KB);
+ if (rate <= priv->dcbx.upper_limit_100mbps) {
+ max_bw_value[i] = div_u64(rate, MLX5E_100MB_TO_KB);
max_bw_value[i] = max_bw_value[i] ? max_bw_value[i] : 1;
max_bw_unit[i] = MLX5_100_MBPS_UNIT;
- } else if (maxrate->tc_maxrate[i] <= upper_limit_gbps) {
- max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
- MLX5E_1GB_TO_KB);
+ } else if (rate <= priv->dcbx.upper_limit_gbps) {
+ max_bw_value[i] = div_u64(rate, MLX5E_1GB_TO_KB);
max_bw_unit[i] = MLX5_GBPS_UNIT;
} else {
netdev_err(netdev,
"tc_%d maxrate %llu Kbps exceeds limit %llu\n",
- i, maxrate->tc_maxrate[i],
- upper_limit_gbps);
+ i, rate, priv->dcbx.upper_limit_gbps);
return -EINVAL;
}
}
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ u8 unit = max_bw_unit[i];
+
netdev_dbg(netdev, "%s: tc_%d <=> max_bw %u %s\n", __func__, i,
- max_bw_value[i] * units[max_bw_unit[i]].scale,
- units[max_bw_unit[i]].units_str);
+ max_bw_value[i] * mlx5e_bw_units[unit].scale,
+ mlx5e_bw_units[unit].units_str);
}
return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
@@ -1268,6 +1266,8 @@ static u16 mlx5e_query_port_buffers_cell_size(struct mlx5e_priv *priv)
void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
{
struct mlx5e_dcbx *dcbx = &priv->dcbx;
+ bool max_bw_msb_supported;
+ u16 type_max;
mlx5e_trust_initialize(priv);
@@ -1285,5 +1285,11 @@ void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
priv->dcbx.port_buff_cell_sz = mlx5e_query_port_buffers_cell_size(priv);
priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN;
+ max_bw_msb_supported = MLX5_CAP_QCAM_FEATURE(priv->mdev,
+ qetcr_qshr_max_bw_val_msb);
+ type_max = max_bw_msb_supported ? U16_MAX : U8_MAX;
+ priv->dcbx.upper_limit_100mbps = type_max * MLX5E_100MB_TO_KB;
+ priv->dcbx.upper_limit_gbps = type_max * MLX5E_1GB_TO_KB;
+
mlx5e_ets_init(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index d3fef1e7e2f7..4a8dc85d5924 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -261,7 +261,7 @@ void mlx5e_build_ptys2ethtool_map(void)
ETHTOOL_LINK_MODE_800000baseDR4_2_Full_BIT,
ETHTOOL_LINK_MODE_800000baseSR4_Full_BIT,
ETHTOOL_LINK_MODE_800000baseVR4_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1600TAUI_8_1600TBASE_CR8_KR8, ext,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1600GAUI_8_1600GBASE_CR8_KR8, ext,
ETHTOOL_LINK_MODE_1600000baseCR8_Full_BIT,
ETHTOOL_LINK_MODE_1600000baseKR8_Full_BIT,
ETHTOOL_LINK_MODE_1600000baseDR8_Full_BIT,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 4b2963bbe7ff..4b8084420816 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -492,40 +492,6 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
return err;
}
-static int mlx5e_create_umr_ksm_mkey(struct mlx5_core_dev *mdev,
- u64 nentries, u8 log_entry_size,
- u32 *umr_mkey)
-{
- int inlen;
- void *mkc;
- u32 *in;
- int err;
-
- inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
-
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
- mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
-
- MLX5_SET(mkc, mkc, free, 1);
- MLX5_SET(mkc, mkc, umr_en, 1);
- MLX5_SET(mkc, mkc, lw, 1);
- MLX5_SET(mkc, mkc, lr, 1);
- MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KSM);
- mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
- MLX5_SET(mkc, mkc, qpn, 0xffffff);
- MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn);
- MLX5_SET(mkc, mkc, translations_octword_size, nentries);
- MLX5_SET(mkc, mkc, log_page_size, log_entry_size);
- MLX5_SET64(mkc, mkc, len, nentries << log_entry_size);
- err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
-
- kvfree(in);
- return err;
-}
-
static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
{
u32 xsk_chunk_size = rq->xsk_pool ? rq->xsk_pool->chunk_size : 0;
@@ -551,29 +517,6 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq
return err;
}
-static int mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev *mdev,
- u16 hd_per_wq, __be32 *umr_mkey)
-{
- u32 max_ksm_size = BIT(MLX5_CAP_GEN(mdev, log_max_klm_list_size));
- u32 mkey;
- int err;
-
- if (max_ksm_size < hd_per_wq) {
- mlx5_core_err(mdev, "max ksm list size 0x%x is smaller than shampo header buffer list size 0x%x\n",
- max_ksm_size, hd_per_wq);
- return -EINVAL;
- }
-
- err = mlx5e_create_umr_ksm_mkey(mdev, hd_per_wq,
- MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE,
- &mkey);
- if (err)
- return err;
-
- *umr_mkey = cpu_to_be32(mkey);
- return 0;
-}
-
static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
{
struct mlx5e_wqe_frag_info next_frag = {};
@@ -754,145 +697,169 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
xdp_frag_size);
}
-static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, u16 hd_per_wq,
- int node)
+static void mlx5e_release_rq_hd_pages(struct mlx5e_rq *rq,
+ struct mlx5e_shampo_hd *shampo)
+
{
- struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+ for (int i = 0; i < shampo->nentries; i++) {
+ struct mlx5e_dma_info *info = &shampo->hd_buf_pages[i];
- shampo->hd_per_wq = hd_per_wq;
+ if (!info->page)
+ continue;
- shampo->bitmap = bitmap_zalloc_node(hd_per_wq, GFP_KERNEL, node);
- shampo->pages = kvzalloc_node(array_size(hd_per_wq,
- sizeof(*shampo->pages)),
- GFP_KERNEL, node);
- if (!shampo->bitmap || !shampo->pages)
- goto err_nomem;
+ dma_unmap_page(rq->pdev, info->addr, PAGE_SIZE,
+ rq->buff.map_dir);
+ __free_page(info->page);
+ }
+}
+
+static int mlx5e_alloc_rq_hd_pages(struct mlx5e_rq *rq, int node,
+ struct mlx5e_shampo_hd *shampo)
+{
+ int err, i;
+
+ for (i = 0; i < shampo->nentries; i++) {
+ struct page *page = alloc_pages_node(node, GFP_KERNEL, 0);
+ dma_addr_t addr;
+
+ if (!page) {
+ err = -ENOMEM;
+ goto err_free_pages;
+ }
+
+ addr = dma_map_page(rq->pdev, page, 0, PAGE_SIZE,
+ rq->buff.map_dir);
+ err = dma_mapping_error(rq->pdev, addr);
+ if (err) {
+ __free_page(page);
+ goto err_free_pages;
+ }
+
+ shampo->hd_buf_pages[i].page = page;
+ shampo->hd_buf_pages[i].addr = addr;
+ }
return 0;
-err_nomem:
- kvfree(shampo->pages);
- bitmap_free(shampo->bitmap);
+err_free_pages:
+ mlx5e_release_rq_hd_pages(rq, shampo);
- return -ENOMEM;
+ return err;
}
-static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
+static int mlx5e_create_rq_hd_mkey(struct mlx5_core_dev *mdev,
+ struct mlx5e_shampo_hd *shampo)
{
- kvfree(rq->mpwqe.shampo->pages);
- bitmap_free(rq->mpwqe.shampo->bitmap);
+ enum mlx5e_mpwrq_umr_mode umr_mode = MLX5E_MPWRQ_UMR_MODE_ALIGNED;
+ struct mlx5_mtt *mtt;
+ void *mkc, *in;
+ int inlen, err;
+ u32 octwords;
+
+ octwords = mlx5e_mpwrq_umr_octowords(shampo->nentries, umr_mode);
+ inlen = MLX5_FLEXIBLE_INLEN(mdev, MLX5_ST_SZ_BYTES(create_mkey_in),
+ MLX5_OCTWORD, octwords);
+ if (inlen < 0)
+ return inlen;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+
+ MLX5_SET(mkc, mkc, lw, 1);
+ MLX5_SET(mkc, mkc, lr, 1);
+ MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
+ mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn);
+ MLX5_SET64(mkc, mkc, len, shampo->hd_buf_size);
+ MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
+ MLX5_SET(mkc, mkc, translations_octword_size, octwords);
+ MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
+ octwords);
+
+ mtt = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
+ for (int i = 0; i < shampo->nentries; i++)
+ mtt[i].ptag = cpu_to_be64(shampo->hd_buf_pages[i].addr);
+
+ err = mlx5_core_create_mkey(mdev, &shampo->mkey, in, inlen);
+
+ kvfree(in);
+ return err;
}
static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_rq_param *rqp,
struct mlx5e_rq *rq,
- u32 *pool_size,
int node)
{
- void *wqc = MLX5_ADDR_OF(rqc, rqp->rqc, wq);
- u8 log_hd_per_page, log_hd_entry_size;
- u16 hd_per_wq, hd_per_wqe;
- u32 hd_pool_size;
- int wq_size;
- int err;
+ struct mlx5e_shampo_hd *shampo;
+ int nentries, err, shampo_sz;
+ u32 hd_per_wq, hd_buf_size;
if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
return 0;
- rq->mpwqe.shampo = kvzalloc_node(sizeof(*rq->mpwqe.shampo),
- GFP_KERNEL, node);
- if (!rq->mpwqe.shampo)
- return -ENOMEM;
-
- /* split headers data structures */
hd_per_wq = mlx5e_shampo_hd_per_wq(mdev, params, rqp);
- err = mlx5e_rq_shampo_hd_info_alloc(rq, hd_per_wq, node);
- if (err)
- goto err_shampo_hd_info_alloc;
-
- err = mlx5e_create_rq_hd_umr_mkey(mdev, hd_per_wq,
- &rq->mpwqe.shampo->mkey_be);
- if (err)
- goto err_umr_mkey;
-
- hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rqp);
- wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
-
- BUILD_BUG_ON(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE > PAGE_SHIFT);
- if (hd_per_wqe >= MLX5E_SHAMPO_WQ_HEADER_PER_PAGE) {
- log_hd_per_page = MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE;
- log_hd_entry_size = MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
- } else {
- log_hd_per_page = order_base_2(hd_per_wqe);
- log_hd_entry_size = order_base_2(PAGE_SIZE / hd_per_wqe);
+ hd_buf_size = hd_per_wq * BIT(MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE);
+ nentries = hd_buf_size / PAGE_SIZE;
+ if (!nentries) {
+ mlx5_core_err(mdev, "SHAMPO header buffer size %u < %lu\n",
+ hd_buf_size, PAGE_SIZE);
+ return -EINVAL;
}
- rq->mpwqe.shampo->hd_per_wqe = hd_per_wqe;
- rq->mpwqe.shampo->hd_per_page = BIT(log_hd_per_page);
- rq->mpwqe.shampo->log_hd_per_page = log_hd_per_page;
- rq->mpwqe.shampo->log_hd_entry_size = log_hd_entry_size;
-
- hd_pool_size = (hd_per_wqe * wq_size) >> log_hd_per_page;
-
- if (netif_rxq_has_unreadable_mp(rq->netdev, rq->ix)) {
- /* Separate page pool for shampo headers */
- struct page_pool_params pp_params = { };
+ shampo_sz = struct_size(shampo, hd_buf_pages, nentries);
+ shampo = kvzalloc_node(shampo_sz, GFP_KERNEL, node);
+ if (!shampo)
+ return -ENOMEM;
- pp_params.order = 0;
- pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
- pp_params.pool_size = hd_pool_size;
- pp_params.nid = node;
- pp_params.dev = rq->pdev;
- pp_params.napi = rq->cq.napi;
- pp_params.netdev = rq->netdev;
- pp_params.dma_dir = rq->buff.map_dir;
- pp_params.max_len = PAGE_SIZE;
+ shampo->hd_per_wq = hd_per_wq;
+ shampo->hd_buf_size = hd_buf_size;
+ shampo->nentries = nentries;
+ err = mlx5e_alloc_rq_hd_pages(rq, node, shampo);
+ if (err)
+ goto err_free;
- rq->hd_page_pool = page_pool_create(&pp_params);
- if (IS_ERR(rq->hd_page_pool)) {
- err = PTR_ERR(rq->hd_page_pool);
- rq->hd_page_pool = NULL;
- goto err_hds_page_pool;
- }
- } else {
- /* Common page pool, reserve space for headers. */
- *pool_size += hd_pool_size;
- rq->hd_page_pool = NULL;
- }
+ err = mlx5e_create_rq_hd_mkey(mdev, shampo);
+ if (err)
+ goto err_release_pages;
/* gro only data structures */
rq->hw_gro_data = kvzalloc_node(sizeof(*rq->hw_gro_data), GFP_KERNEL, node);
if (!rq->hw_gro_data) {
err = -ENOMEM;
- goto err_hw_gro_data;
+ goto err_destroy_mkey;
}
+ rq->mpwqe.shampo = shampo;
+
return 0;
-err_hw_gro_data:
- page_pool_destroy(rq->hd_page_pool);
-err_hds_page_pool:
- mlx5_core_destroy_mkey(mdev, be32_to_cpu(rq->mpwqe.shampo->mkey_be));
-err_umr_mkey:
- mlx5e_rq_shampo_hd_info_free(rq);
-err_shampo_hd_info_alloc:
- kvfree(rq->mpwqe.shampo);
+err_destroy_mkey:
+ mlx5_core_destroy_mkey(mdev, shampo->mkey);
+err_release_pages:
+ mlx5e_release_rq_hd_pages(rq, shampo);
+err_free:
+ kvfree(shampo);
+
return err;
}
static void mlx5e_rq_free_shampo(struct mlx5e_rq *rq)
{
- if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+
+ if (!shampo)
return;
kvfree(rq->hw_gro_data);
- if (rq->hd_page_pool != rq->page_pool)
- page_pool_destroy(rq->hd_page_pool);
- mlx5e_rq_shampo_hd_info_free(rq);
- mlx5_core_destroy_mkey(rq->mdev,
- be32_to_cpu(rq->mpwqe.shampo->mkey_be));
- kvfree(rq->mpwqe.shampo);
+ mlx5_core_destroy_mkey(rq->mdev, shampo->mkey);
+ mlx5e_release_rq_hd_pages(rq, shampo);
+ kvfree(shampo);
}
static int mlx5e_alloc_rq(struct mlx5e_params *params,
@@ -970,7 +937,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
if (err)
goto err_rq_mkey;
- err = mlx5_rq_shampo_alloc(mdev, params, rqp, rq, &pool_size, node);
+ err = mlx5_rq_shampo_alloc(mdev, params, rqp, rq, node);
if (err)
goto err_free_mpwqe_info;
@@ -1165,8 +1132,7 @@ int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_cou
if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
MLX5_SET(wq, wq, log_headers_buffer_entry_num,
order_base_2(rq->mpwqe.shampo->hd_per_wq));
- MLX5_SET(wq, wq, headers_mkey,
- be32_to_cpu(rq->mpwqe.shampo->mkey_be));
+ MLX5_SET(wq, wq, headers_mkey, rq->mpwqe.shampo->mkey);
}
mlx5_fill_page_frag_array(&rq->wq_ctrl.buf,
@@ -1326,14 +1292,6 @@ void mlx5e_free_rx_missing_descs(struct mlx5e_rq *rq)
rq->mpwqe.actual_wq_head = wq->head;
rq->mpwqe.umr_in_progress = 0;
rq->mpwqe.umr_completed = 0;
-
- if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
- struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
- u16 len;
-
- len = (shampo->pi - shampo->ci) & shampo->hd_per_wq;
- mlx5e_shampo_fill_umr(rq, len);
- }
}
void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
@@ -1356,9 +1314,6 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
mlx5_wq_ll_pop(wq, wqe_ix_be,
&wqe->next.next_wqe_index);
}
-
- if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
- mlx5e_shampo_dealloc_hd(rq);
} else {
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
u16 missing = mlx5_wq_cyc_missing(wq);
@@ -2075,6 +2030,8 @@ static int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params
if (err)
goto err_free_icosq;
+ spin_lock_init(&sq->lock);
+
if (param->is_tls) {
sq->ktls_resync = mlx5e_ktls_rx_resync_create_resp_list();
if (IS_ERR(sq->ktls_resync)) {
@@ -2587,9 +2544,51 @@ static int mlx5e_open_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
return mlx5e_open_rq(params, rq_params, NULL, cpu_to_node(c->cpu), q_counter, &c->rq);
}
+static struct mlx5e_icosq *
+mlx5e_open_async_icosq(struct mlx5e_channel *c,
+ struct mlx5e_params *params,
+ struct mlx5e_channel_param *cparam,
+ struct mlx5e_create_cq_param *ccp)
+{
+ struct dim_cq_moder icocq_moder = {0, 0};
+ struct mlx5e_icosq *async_icosq;
+ int err;
+
+ async_icosq = kvzalloc_node(sizeof(*async_icosq), GFP_KERNEL,
+ cpu_to_node(c->cpu));
+ if (!async_icosq)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5e_open_cq(c->mdev, icocq_moder, &cparam->async_icosq.cqp, ccp,
+ &async_icosq->cq);
+ if (err)
+ goto err_free_async_icosq;
+
+ err = mlx5e_open_icosq(c, params, &cparam->async_icosq, async_icosq,
+ mlx5e_async_icosq_err_cqe_work);
+ if (err)
+ goto err_close_async_icosq_cq;
+
+ return async_icosq;
+
+err_close_async_icosq_cq:
+ mlx5e_close_cq(&async_icosq->cq);
+err_free_async_icosq:
+ kvfree(async_icosq);
+ return ERR_PTR(err);
+}
+
+static void mlx5e_close_async_icosq(struct mlx5e_icosq *async_icosq)
+{
+ mlx5e_close_icosq(async_icosq);
+ mlx5e_close_cq(&async_icosq->cq);
+ kvfree(async_icosq);
+}
+
static int mlx5e_open_queues(struct mlx5e_channel *c,
struct mlx5e_params *params,
- struct mlx5e_channel_param *cparam)
+ struct mlx5e_channel_param *cparam,
+ bool async_icosq_needed)
{
const struct net_device_ops *netdev_ops = c->netdev->netdev_ops;
struct dim_cq_moder icocq_moder = {0, 0};
@@ -2598,15 +2597,10 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
mlx5e_build_create_cq_param(&ccp, c);
- err = mlx5e_open_cq(c->mdev, icocq_moder, &cparam->async_icosq.cqp, &ccp,
- &c->async_icosq.cq);
- if (err)
- return err;
-
err = mlx5e_open_cq(c->mdev, icocq_moder, &cparam->icosq.cqp, &ccp,
&c->icosq.cq);
if (err)
- goto err_close_async_icosq_cq;
+ return err;
err = mlx5e_open_tx_cqs(c, params, &ccp, cparam);
if (err)
@@ -2630,12 +2624,14 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
if (err)
goto err_close_rx_cq;
- spin_lock_init(&c->async_icosq_lock);
-
- err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq,
- mlx5e_async_icosq_err_cqe_work);
- if (err)
- goto err_close_rq_xdpsq_cq;
+ if (async_icosq_needed) {
+ c->async_icosq = mlx5e_open_async_icosq(c, params, cparam,
+ &ccp);
+ if (IS_ERR(c->async_icosq)) {
+ err = PTR_ERR(c->async_icosq);
+ goto err_close_rq_xdpsq_cq;
+ }
+ }
mutex_init(&c->icosq_recovery_lock);
@@ -2671,7 +2667,8 @@ err_close_icosq:
mlx5e_close_icosq(&c->icosq);
err_close_async_icosq:
- mlx5e_close_icosq(&c->async_icosq);
+ if (c->async_icosq)
+ mlx5e_close_async_icosq(c->async_icosq);
err_close_rq_xdpsq_cq:
if (c->xdp)
@@ -2690,9 +2687,6 @@ err_close_tx_cqs:
err_close_icosq_cq:
mlx5e_close_cq(&c->icosq.cq);
-err_close_async_icosq_cq:
- mlx5e_close_cq(&c->async_icosq.cq);
-
return err;
}
@@ -2706,7 +2700,8 @@ static void mlx5e_close_queues(struct mlx5e_channel *c)
mlx5e_close_sqs(c);
mlx5e_close_icosq(&c->icosq);
mutex_destroy(&c->icosq_recovery_lock);
- mlx5e_close_icosq(&c->async_icosq);
+ if (c->async_icosq)
+ mlx5e_close_async_icosq(c->async_icosq);
if (c->xdp)
mlx5e_close_cq(&c->rq_xdpsq.cq);
mlx5e_close_cq(&c->rq.cq);
@@ -2714,7 +2709,6 @@ static void mlx5e_close_queues(struct mlx5e_channel *c)
mlx5e_close_xdpredirect_sq(c->xdpsq);
mlx5e_close_tx_cqs(c);
mlx5e_close_cq(&c->icosq.cq);
- mlx5e_close_cq(&c->async_icosq.cq);
}
static u8 mlx5e_enumerate_lag_port(struct mlx5_core_dev *mdev, int ix)
@@ -2750,9 +2744,16 @@ static int mlx5e_channel_stats_alloc(struct mlx5e_priv *priv, int ix, int cpu)
void mlx5e_trigger_napi_icosq(struct mlx5e_channel *c)
{
- spin_lock_bh(&c->async_icosq_lock);
- mlx5e_trigger_irq(&c->async_icosq);
- spin_unlock_bh(&c->async_icosq_lock);
+ bool locked;
+
+ if (!test_and_set_bit(MLX5E_SQ_STATE_LOCK_NEEDED, &c->icosq.state))
+ synchronize_net();
+
+ locked = mlx5e_icosq_sync_lock(&c->icosq);
+ mlx5e_trigger_irq(&c->icosq);
+ mlx5e_icosq_sync_unlock(&c->icosq, locked);
+
+ clear_bit(MLX5E_SQ_STATE_LOCK_NEEDED, &c->icosq.state);
}
void mlx5e_trigger_napi_sched(struct napi_struct *napi)
@@ -2785,6 +2786,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_channel_param *cparam;
struct mlx5_core_dev *mdev;
struct mlx5e_xsk_param xsk;
+ bool async_icosq_needed;
struct mlx5e_channel *c;
unsigned int irq;
int vec_ix;
@@ -2834,7 +2836,8 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
netif_napi_add_config_locked(netdev, &c->napi, mlx5e_napi_poll, ix);
netif_napi_set_irq_locked(&c->napi, irq);
- err = mlx5e_open_queues(c, params, cparam);
+ async_icosq_needed = !!xsk_pool || priv->ktls_rx_was_enabled;
+ err = mlx5e_open_queues(c, params, cparam, async_icosq_needed);
if (unlikely(err))
goto err_napi_del;
@@ -2872,7 +2875,8 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_activate_txqsq(&c->sq[tc]);
mlx5e_activate_icosq(&c->icosq);
- mlx5e_activate_icosq(&c->async_icosq);
+ if (c->async_icosq)
+ mlx5e_activate_icosq(c->async_icosq);
if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
mlx5e_activate_xsk(c);
@@ -2893,7 +2897,8 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
else
mlx5e_deactivate_rq(&c->rq);
- mlx5e_deactivate_icosq(&c->async_icosq);
+ if (c->async_icosq)
+ mlx5e_deactivate_icosq(c->async_icosq);
mlx5e_deactivate_icosq(&c->icosq);
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_deactivate_txqsq(&c->sq[tc]);
@@ -4666,7 +4671,6 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_params new_params;
struct mlx5e_params *params;
- bool reset = true;
int err = 0;
mutex_lock(&priv->state_lock);
@@ -4692,28 +4696,8 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
goto out;
}
- if (params->packet_merge.type == MLX5E_PACKET_MERGE_LRO)
- reset = false;
-
- if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
- params->packet_merge.type != MLX5E_PACKET_MERGE_SHAMPO) {
- bool is_linear_old = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, params, NULL);
- bool is_linear_new = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev,
- &new_params, NULL);
- u8 sz_old = mlx5e_mpwqe_get_log_rq_size(priv->mdev, params, NULL);
- u8 sz_new = mlx5e_mpwqe_get_log_rq_size(priv->mdev, &new_params, NULL);
-
- /* Always reset in linear mode - hw_mtu is used in data path.
- * Check that the mode was non-linear and didn't change.
- * If XSK is active, XSK RQs are linear.
- * Reset if the RQ size changed, even if it's non-linear.
- */
- if (!is_linear_old && !is_linear_new && !priv->xsk.refcnt &&
- sz_old == sz_new)
- reset = false;
- }
-
- err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, reset);
+ err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL,
+ true);
out:
WRITE_ONCE(netdev->mtu, params->sw_mtu);
@@ -5139,7 +5123,7 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
netdev_get_tx_queue(netdev, i);
struct mlx5e_txqsq *sq = priv->txq2sq[i];
- if (!netif_xmit_stopped(dev_queue))
+ if (!netif_xmit_timeout_ms(dev_queue))
continue;
if (mlx5e_reporter_tx_timeout(sq))
@@ -5598,8 +5582,9 @@ struct mlx5_qmgmt_data {
struct mlx5e_channel_param cparam;
};
-static int mlx5e_queue_mem_alloc(struct net_device *dev, void *newq,
- int queue_index)
+static int mlx5e_queue_mem_alloc(struct net_device *dev,
+ struct netdev_queue_config *qcfg,
+ void *newq, int queue_index)
{
struct mlx5_qmgmt_data *new = (struct mlx5_qmgmt_data *)newq;
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -5660,8 +5645,9 @@ static int mlx5e_queue_stop(struct net_device *dev, void *oldq, int queue_index)
return 0;
}
-static int mlx5e_queue_start(struct net_device *dev, void *newq,
- int queue_index)
+static int mlx5e_queue_start(struct net_device *dev,
+ struct netdev_queue_config *qcfg,
+ void *newq, int queue_index)
{
struct mlx5_qmgmt_data *new = (struct mlx5_qmgmt_data *)newq;
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -5800,8 +5786,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
NETIF_F_GSO_GRE_CSUM;
netdev->hw_enc_features |= NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM;
- netdev->gso_partial_features |= NETIF_F_GSO_GRE |
- NETIF_F_GSO_GRE_CSUM;
+ netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
+ netdev->vlan_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM;
}
if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_IPIP)) {
@@ -5815,6 +5801,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->gso_partial_features |= NETIF_F_GSO_UDP_L4;
netdev->hw_features |= NETIF_F_GSO_UDP_L4;
+ netdev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 1f6930c77437..efcfcddab376 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -611,165 +611,6 @@ static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n)
mlx5_wq_ll_update_db_record(wq);
}
-/* This function returns the size of the continuous free space inside a bitmap
- * that starts from first and no longer than len including circular ones.
- */
-static int bitmap_find_window(unsigned long *bitmap, int len,
- int bitmap_size, int first)
-{
- int next_one, count;
-
- next_one = find_next_bit(bitmap, bitmap_size, first);
- if (next_one == bitmap_size) {
- if (bitmap_size - first >= len)
- return len;
- next_one = find_next_bit(bitmap, bitmap_size, 0);
- count = next_one + bitmap_size - first;
- } else {
- count = next_one - first;
- }
-
- return min(len, count);
-}
-
-static void build_ksm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe,
- __be32 key, u16 offset, u16 ksm_len)
-{
- memset(umr_wqe, 0, offsetof(struct mlx5e_umr_wqe, inline_ksms));
- umr_wqe->hdr.ctrl.opmod_idx_opcode =
- cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
- MLX5_OPCODE_UMR);
- umr_wqe->hdr.ctrl.umr_mkey = key;
- umr_wqe->hdr.ctrl.qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT)
- | MLX5E_KSM_UMR_DS_CNT(ksm_len));
- umr_wqe->hdr.uctrl.flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
- umr_wqe->hdr.uctrl.xlt_offset = cpu_to_be16(offset);
- umr_wqe->hdr.uctrl.xlt_octowords = cpu_to_be16(ksm_len);
- umr_wqe->hdr.uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
-}
-
-static struct mlx5e_frag_page *mlx5e_shampo_hd_to_frag_page(struct mlx5e_rq *rq,
- int header_index)
-{
- struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
-
- return &shampo->pages[header_index >> shampo->log_hd_per_page];
-}
-
-static u64 mlx5e_shampo_hd_offset(struct mlx5e_rq *rq, int header_index)
-{
- struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
- u32 hd_per_page = shampo->hd_per_page;
-
- return (header_index & (hd_per_page - 1)) << shampo->log_hd_entry_size;
-}
-
-static void mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index);
-
-static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
- struct mlx5e_icosq *sq,
- u16 ksm_entries, u16 index)
-{
- struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
- u16 pi, header_offset, err, wqe_bbs;
- u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey;
- struct mlx5e_umr_wqe *umr_wqe;
- int headroom, i;
-
- headroom = rq->buff.headroom;
- wqe_bbs = MLX5E_KSM_UMR_WQEBBS(ksm_entries);
- pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs);
- umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
- build_ksm_umr(sq, umr_wqe, shampo->mkey_be, index, ksm_entries);
-
- for (i = 0; i < ksm_entries; i++, index++) {
- struct mlx5e_frag_page *frag_page;
- u64 addr;
-
- frag_page = mlx5e_shampo_hd_to_frag_page(rq, index);
- header_offset = mlx5e_shampo_hd_offset(rq, index);
- if (!header_offset) {
- err = mlx5e_page_alloc_fragmented(rq->hd_page_pool,
- frag_page);
- if (err)
- goto err_unmap;
- }
-
- addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
- umr_wqe->inline_ksms[i] = (struct mlx5_ksm) {
- .key = cpu_to_be32(lkey),
- .va = cpu_to_be64(addr + header_offset + headroom),
- };
- }
-
- sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
- .wqe_type = MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR,
- .num_wqebbs = wqe_bbs,
- .shampo.len = ksm_entries,
- };
-
- shampo->pi = (shampo->pi + ksm_entries) & (shampo->hd_per_wq - 1);
- sq->pc += wqe_bbs;
- sq->doorbell_cseg = &umr_wqe->hdr.ctrl;
-
- return 0;
-
-err_unmap:
- while (--i >= 0) {
- --index;
- header_offset = mlx5e_shampo_hd_offset(rq, index);
- if (!header_offset) {
- struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index);
-
- mlx5e_page_release_fragmented(rq->hd_page_pool,
- frag_page);
- }
- }
-
- rq->stats->buff_alloc_err++;
- return err;
-}
-
-static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
-{
- struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
- u16 ksm_entries, num_wqe, index, entries_before;
- struct mlx5e_icosq *sq = rq->icosq;
- int i, err, max_ksm_entries, len;
-
- max_ksm_entries = MLX5E_MAX_KSM_PER_WQE(rq->mdev);
- ksm_entries = bitmap_find_window(shampo->bitmap,
- shampo->hd_per_wqe,
- shampo->hd_per_wq, shampo->pi);
- ksm_entries = ALIGN_DOWN(ksm_entries, shampo->hd_per_page);
- if (!ksm_entries)
- return 0;
-
- /* pi is aligned to MLX5E_SHAMPO_WQ_HEADER_PER_PAGE */
- index = shampo->pi;
- entries_before = shampo->hd_per_wq - index;
-
- if (unlikely(entries_before < ksm_entries))
- num_wqe = DIV_ROUND_UP(entries_before, max_ksm_entries) +
- DIV_ROUND_UP(ksm_entries - entries_before, max_ksm_entries);
- else
- num_wqe = DIV_ROUND_UP(ksm_entries, max_ksm_entries);
-
- for (i = 0; i < num_wqe; i++) {
- len = (ksm_entries > max_ksm_entries) ? max_ksm_entries :
- ksm_entries;
- if (unlikely(index + len > shampo->hd_per_wq))
- len = shampo->hd_per_wq - index;
- err = mlx5e_build_shampo_hd_umr(rq, sq, len, index);
- if (unlikely(err))
- return err;
- index = (index + len) & (rq->mpwqe.shampo->hd_per_wq - 1);
- ksm_entries -= len;
- }
-
- return 0;
-}
-
static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
@@ -778,16 +619,12 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_umr_wqe *umr_wqe;
u32 offset; /* 17-bit value with MTT. */
+ bool sync_locked;
u16 pi;
int err;
int i;
- if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
- err = mlx5e_alloc_rx_hd_mpwqe(rq);
- if (unlikely(err))
- goto err;
- }
-
+ sync_locked = mlx5e_icosq_sync_lock(sq);
pi = mlx5e_icosq_get_next_pi(sq, rq->mpwqe.umr_wqebbs);
umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe));
@@ -835,12 +672,14 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
};
sq->pc += rq->mpwqe.umr_wqebbs;
+ mlx5e_icosq_sync_unlock(sq, sync_locked);
sq->doorbell_cseg = &umr_wqe->hdr.ctrl;
return 0;
err_unmap:
+ mlx5e_icosq_sync_unlock(sq, sync_locked);
while (--i >= 0) {
frag_page--;
mlx5e_page_release_fragmented(rq->page_pool, frag_page);
@@ -848,34 +687,11 @@ err_unmap:
bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
-err:
rq->stats->buff_alloc_err++;
return err;
}
-static void
-mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
-{
- struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
-
- if (((header_index + 1) & (shampo->hd_per_page - 1)) == 0) {
- struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
-
- mlx5e_page_release_fragmented(rq->hd_page_pool, frag_page);
- }
- clear_bit(header_index, shampo->bitmap);
-}
-
-void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq)
-{
- struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
- int i;
-
- for_each_set_bit(i, shampo->bitmap, rq->mpwqe.shampo->hd_per_wq)
- mlx5e_free_rx_shampo_hd_entry(rq, i);
-}
-
static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
@@ -968,33 +784,6 @@ void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq)
sq->cc = sqcc;
}
-void mlx5e_shampo_fill_umr(struct mlx5e_rq *rq, int len)
-{
- struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
- int end, from, full_len = len;
-
- end = shampo->hd_per_wq;
- from = shampo->ci;
- if (from + len > end) {
- len -= end - from;
- bitmap_set(shampo->bitmap, from, end - from);
- from = 0;
- }
-
- bitmap_set(shampo->bitmap, from, len);
- shampo->ci = (shampo->ci + full_len) & (shampo->hd_per_wq - 1);
-}
-
-static void mlx5e_handle_shampo_hd_umr(struct mlx5e_shampo_umr umr,
- struct mlx5e_icosq *sq)
-{
- struct mlx5e_channel *c = container_of(sq, struct mlx5e_channel, icosq);
- /* assume 1:1 relationship between RQ and icosq */
- struct mlx5e_rq *rq = &c->rq;
-
- mlx5e_shampo_fill_umr(rq, umr.len);
-}
-
int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
{
struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
@@ -1055,9 +844,6 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
break;
case MLX5E_ICOSQ_WQE_NOP:
break;
- case MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR:
- mlx5e_handle_shampo_hd_umr(wi->shampo, sq);
- break;
#ifdef CONFIG_MLX5_EN_TLS
case MLX5E_ICOSQ_WQE_UMR_TLS:
break;
@@ -1083,11 +869,24 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
return i;
}
+static void mlx5e_reclaim_mpwqe_pages(struct mlx5e_rq *rq, int head,
+ int reclaim)
+{
+ struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
+
+ for (int i = 0; i < reclaim; i++) {
+ head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
+
+ mlx5e_dealloc_rx_mpwqe(rq, head);
+ }
+}
+
INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
{
struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
u8 umr_completed = rq->mpwqe.umr_completed;
struct mlx5e_icosq *sq = rq->icosq;
+ bool reclaimed = false;
int alloc_err = 0;
u8 missing, i;
u16 head;
@@ -1122,11 +921,20 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
/* Deferred free for better page pool cache usage. */
mlx5e_free_rx_mpwqe(rq, wi);
+retry:
alloc_err = rq->xsk_pool ? mlx5e_xsk_alloc_rx_mpwqe(rq, head) :
mlx5e_alloc_rx_mpwqe(rq, head);
+ if (unlikely(alloc_err)) {
+ int reclaim = i - 1;
- if (unlikely(alloc_err))
- break;
+ if (reclaimed || !reclaim)
+ break;
+
+ mlx5e_reclaim_mpwqe_pages(rq, head, reclaim);
+ reclaimed = true;
+
+ goto retry;
+ }
head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
} while (--i);
@@ -1223,15 +1031,6 @@ static unsigned int mlx5e_lro_update_hdr(struct sk_buff *skb,
return (unsigned int)((unsigned char *)tcp + tcp->doff * 4 - skb->data);
}
-static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index)
-{
- struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
- u16 head_offset = mlx5e_shampo_hd_offset(rq, header_index);
- void *addr = netmem_address(frag_page->netmem);
-
- return addr + head_offset + rq->buff.headroom;
-}
-
static void mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4)
{
int udp_off = rq->hw_gro_data->fk.control.thoff;
@@ -1270,15 +1069,46 @@ static void mlx5e_shampo_update_ipv6_udp_hdr(struct mlx5e_rq *rq, struct ipv6hdr
skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4;
}
+static void mlx5e_shampo_get_hd_buf_info(struct mlx5e_rq *rq,
+ struct mlx5_cqe64 *cqe,
+ struct mlx5e_dma_info **di,
+ u32 *head_offset)
+{
+ u32 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe);
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+ u32 di_index;
+
+ di_index = header_index >> MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE;
+ *di = &shampo->hd_buf_pages[di_index];
+ *head_offset = (header_index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) *
+ BIT(MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE);
+}
+
+static void *mlx5e_shampo_get_hdr(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
+ int len)
+{
+ struct mlx5e_dma_info *di;
+ u32 head_offset;
+
+ mlx5e_shampo_get_hd_buf_info(rq, cqe, &di, &head_offset);
+
+ dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset,
+ len, rq->buff.map_dir);
+
+ return page_address(di->page) + head_offset;
+}
+
static void mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct tcphdr *skb_tcp_hd)
{
- u16 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe);
+ int nhoff = ETH_HLEN + rq->hw_gro_data->fk.control.thoff;
+ int len = nhoff + sizeof(struct tcphdr);
struct tcphdr *last_tcp_hd;
void *last_hd_addr;
- last_hd_addr = mlx5e_shampo_get_packet_hd(rq, header_index);
- last_tcp_hd = last_hd_addr + ETH_HLEN + rq->hw_gro_data->fk.control.thoff;
+ last_hd_addr = mlx5e_shampo_get_hdr(rq, cqe, len);
+ last_tcp_hd = (struct tcphdr *)(last_hd_addr + nhoff);
+
tcp_flag_word(skb_tcp_hd) |= tcp_flag_word(last_tcp_hd) & (TCP_FLAG_FIN | TCP_FLAG_PSH);
}
@@ -1570,7 +1400,7 @@ static inline bool mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
struct mlx5e_rq *rq,
struct sk_buff *skb)
{
- u8 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
+ u8 lro_num_seg = get_cqe_lro_num_seg(cqe);
struct mlx5e_rq_stats *stats = rq->stats;
struct net_device *netdev = rq->netdev;
@@ -2054,6 +1884,15 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
u16 linear_hr;
void *va;
+ if (unlikely(cqe_bcnt > rq->hw_mtu)) {
+ u8 lro_num_seg = get_cqe_lro_num_seg(cqe);
+
+ if (lro_num_seg <= 1) {
+ rq->stats->oversize_pkts_sw_drop++;
+ return NULL;
+ }
+ }
+
prog = rcu_dereference(rq->xdp_prog);
if (prog) {
@@ -2268,52 +2107,25 @@ static struct sk_buff *
mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
struct mlx5_cqe64 *cqe, u16 header_index)
{
- struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
- u16 head_offset = mlx5e_shampo_hd_offset(rq, header_index);
- struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
u16 head_size = cqe->shampo.header_size;
- u16 rx_headroom = rq->buff.headroom;
- struct sk_buff *skb = NULL;
- dma_addr_t page_dma_addr;
- dma_addr_t dma_addr;
- void *hdr, *data;
- u32 frag_size;
-
- page_dma_addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
- dma_addr = page_dma_addr + head_offset;
-
- hdr = netmem_address(frag_page->netmem) + head_offset;
- data = hdr + rx_headroom;
- frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + head_size);
+ struct mlx5e_dma_info *di;
+ struct sk_buff *skb;
+ u32 head_offset;
+ int len;
- if (likely(frag_size <= BIT(shampo->log_hd_entry_size))) {
- /* build SKB around header */
- dma_sync_single_range_for_cpu(rq->pdev, dma_addr, 0, frag_size, rq->buff.map_dir);
- net_prefetchw(hdr);
- net_prefetch(data);
- skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0);
- if (unlikely(!skb))
- return NULL;
+ len = ALIGN(head_size, sizeof(long));
+ skb = napi_alloc_skb(rq->cq.napi, len);
+ if (unlikely(!skb)) {
+ rq->stats->buff_alloc_err++;
+ return NULL;
+ }
- frag_page->frags++;
- } else {
- /* allocate SKB and copy header for large header */
- rq->stats->gro_large_hds++;
- skb = napi_alloc_skb(rq->cq.napi,
- ALIGN(head_size, sizeof(long)));
- if (unlikely(!skb)) {
- rq->stats->buff_alloc_err++;
- return NULL;
- }
+ net_prefetchw(skb->data);
- net_prefetchw(skb->data);
- mlx5e_copy_skb_header(rq, skb, frag_page->netmem, dma_addr,
- head_offset + rx_headroom,
- rx_headroom, head_size);
- /* skb linear part was allocated with headlen and aligned to long */
- skb->tail += head_size;
- skb->len += head_size;
- }
+ mlx5e_shampo_get_hd_buf_info(rq, cqe, &di, &head_offset);
+ mlx5e_copy_skb_header(rq, skb, page_to_netmem(di->page), di->addr,
+ head_offset, head_offset, len);
+ __skb_put(skb, head_size);
/* queue up for recycling/reuse */
skb_mark_for_recycle(skb);
@@ -2414,7 +2226,7 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
* prevent the kernel from touching it.
*/
if (unlikely(netmem_is_net_iov(frag_page->netmem)))
- goto free_hd_entry;
+ goto mpwrq_cqe_out;
*skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe,
cqe_bcnt,
data_offset,
@@ -2422,19 +2234,22 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
}
if (unlikely(!*skb))
- goto free_hd_entry;
+ goto mpwrq_cqe_out;
NAPI_GRO_CB(*skb)->count = 1;
skb_shinfo(*skb)->gso_size = cqe_bcnt - head_size;
} else {
NAPI_GRO_CB(*skb)->count++;
+
if (NAPI_GRO_CB(*skb)->count == 2 &&
rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP)) {
- void *hd_addr = mlx5e_shampo_get_packet_hd(rq, header_index);
- int nhoff = ETH_HLEN + rq->hw_gro_data->fk.control.thoff -
- sizeof(struct iphdr);
- struct iphdr *iph = (struct iphdr *)(hd_addr + nhoff);
+ int len = ETH_HLEN + rq->hw_gro_data->fk.control.thoff;
+ int nhoff = len - sizeof(struct iphdr);
+ void *last_hd_addr;
+ struct iphdr *iph;
+ last_hd_addr = mlx5e_shampo_get_hdr(rq, cqe, len);
+ iph = (struct iphdr *)(last_hd_addr + nhoff);
rq->hw_gro_data->second_ip_id = ntohs(iph->id);
}
}
@@ -2456,13 +2271,10 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
if (mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb)) {
*skb = NULL;
- goto free_hd_entry;
+ goto mpwrq_cqe_out;
}
if (flush && rq->hw_gro_data->skb)
mlx5e_shampo_flush_skb(rq, cqe, match);
-free_hd_entry:
- if (likely(head_size))
- mlx5e_free_rx_shampo_hd_entry(rq, header_index);
mpwrq_cqe_out:
if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index a01ee656a1e7..9f0272649fa1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -152,12 +152,11 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
* to inline later in the transmit descriptor
*/
static inline u16
-mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop)
+mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb)
{
struct mlx5e_sq_stats *stats = sq->stats;
u16 ihs;
- *hopbyhop = 0;
if (skb->encapsulation) {
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
ihs = skb_inner_transport_offset(skb) +
@@ -167,17 +166,12 @@ mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop)
stats->tso_inner_packets++;
stats->tso_inner_bytes += skb->len - ihs;
} else {
- if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
- } else {
+ else
ihs = skb_tcp_all_headers(skb);
- if (ipv6_has_hopopt_jumbo(skb)) {
- *hopbyhop = sizeof(struct hop_jumbo_hdr);
- ihs -= sizeof(struct hop_jumbo_hdr);
- }
- }
stats->tso_packets++;
- stats->tso_bytes += skb->len - ihs - *hopbyhop;
+ stats->tso_bytes += skb->len - ihs;
}
return ihs;
@@ -239,7 +233,6 @@ struct mlx5e_tx_attr {
__be16 mss;
u16 insz;
u8 opcode;
- u8 hopbyhop;
};
struct mlx5e_tx_wqe_attr {
@@ -275,16 +268,14 @@ static void mlx5e_sq_xmit_prepare(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_sq_stats *stats = sq->stats;
if (skb_is_gso(skb)) {
- int hopbyhop;
- u16 ihs = mlx5e_tx_get_gso_ihs(sq, skb, &hopbyhop);
+ u16 ihs = mlx5e_tx_get_gso_ihs(sq, skb);
*attr = (struct mlx5e_tx_attr) {
.opcode = MLX5_OPCODE_LSO,
.mss = cpu_to_be16(skb_shinfo(skb)->gso_size),
.ihs = ihs,
.num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs,
- .headlen = skb_headlen(skb) - ihs - hopbyhop,
- .hopbyhop = hopbyhop,
+ .headlen = skb_headlen(skb) - ihs,
};
stats->packets += skb_shinfo(skb)->gso_segs;
@@ -439,7 +430,6 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_wqe_data_seg *dseg;
struct mlx5e_tx_wqe_info *wi;
u16 ihs = attr->ihs;
- struct ipv6hdr *h6;
struct mlx5e_sq_stats *stats = sq->stats;
int num_dma;
@@ -456,28 +446,7 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
if (ihs) {
u8 *start = eseg->inline_hdr.start;
- if (unlikely(attr->hopbyhop)) {
- /* remove the HBH header.
- * Layout: [Ethernet header][IPv6 header][HBH][TCP header]
- */
- if (skb_vlan_tag_present(skb)) {
- mlx5e_insert_vlan(start, skb, ETH_HLEN + sizeof(*h6));
- ihs += VLAN_HLEN;
- h6 = (struct ipv6hdr *)(start + sizeof(struct vlan_ethhdr));
- } else {
- unsafe_memcpy(start, skb->data,
- ETH_HLEN + sizeof(*h6),
- MLX5_UNSAFE_MEMCPY_DISCLAIMER);
- h6 = (struct ipv6hdr *)(start + ETH_HLEN);
- }
- h6->nexthdr = IPPROTO_TCP;
- /* Copy the TCP header after the IPv6 one */
- memcpy(h6 + 1,
- skb->data + ETH_HLEN + sizeof(*h6) +
- sizeof(struct hop_jumbo_hdr),
- tcp_hdrlen(skb));
- /* Leave ipv6 payload_len set to 0, as LSO v2 specs request. */
- } else if (skb_vlan_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
mlx5e_insert_vlan(start, skb, ihs);
ihs += VLAN_HLEN;
stats->added_vlan_packets++;
@@ -491,7 +460,7 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
}
dseg += wqe_attr->ds_cnt_ids;
- num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr->ihs + attr->hopbyhop,
+ num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr->ihs,
attr->headlen, dseg);
if (unlikely(num_dma < 0))
goto err_drop;
@@ -1019,34 +988,14 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
eseg->mss = attr.mss;
if (attr.ihs) {
- if (unlikely(attr.hopbyhop)) {
- struct ipv6hdr *h6;
-
- /* remove the HBH header.
- * Layout: [Ethernet header][IPv6 header][HBH][TCP header]
- */
- unsafe_memcpy(eseg->inline_hdr.start, skb->data,
- ETH_HLEN + sizeof(*h6),
- MLX5_UNSAFE_MEMCPY_DISCLAIMER);
- h6 = (struct ipv6hdr *)((char *)eseg->inline_hdr.start + ETH_HLEN);
- h6->nexthdr = IPPROTO_TCP;
- /* Copy the TCP header after the IPv6 one */
- unsafe_memcpy(h6 + 1,
- skb->data + ETH_HLEN + sizeof(*h6) +
- sizeof(struct hop_jumbo_hdr),
- tcp_hdrlen(skb),
- MLX5_UNSAFE_MEMCPY_DISCLAIMER);
- /* Leave ipv6 payload_len set to 0, as LSO v2 specs request. */
- } else {
- unsafe_memcpy(eseg->inline_hdr.start, skb->data,
- attr.ihs,
- MLX5_UNSAFE_MEMCPY_DISCLAIMER);
- }
+ unsafe_memcpy(eseg->inline_hdr.start, skb->data,
+ attr.ihs,
+ MLX5_UNSAFE_MEMCPY_DISCLAIMER);
eseg->inline_hdr.sz = cpu_to_be16(attr.ihs);
dseg += wqe_attr.ds_cnt_inl;
}
- num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr.ihs + attr.hopbyhop,
+ num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr.ihs,
attr.headlen, dseg);
if (unlikely(num_dma < 0))
goto err_drop;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 76108299ea57..b31f689fe271 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -125,6 +125,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
{
struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
napi);
+ struct mlx5e_icosq *aicosq = c->async_icosq;
struct mlx5e_ch_stats *ch_stats = c->stats;
struct mlx5e_xdpsq *xsksq = &c->xsksq;
struct mlx5e_txqsq __rcu **qos_sqs;
@@ -180,15 +181,18 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
busy |= work_done == budget;
mlx5e_poll_ico_cq(&c->icosq.cq);
- if (mlx5e_poll_ico_cq(&c->async_icosq.cq))
- /* Don't clear the flag if nothing was polled to prevent
- * queueing more WQEs and overflowing the async ICOSQ.
- */
- clear_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state);
-
- /* Keep after async ICOSQ CQ poll */
- if (unlikely(mlx5e_ktls_rx_pending_resync_list(c, budget)))
- busy |= mlx5e_ktls_rx_handle_resync_list(c, budget);
+ if (aicosq) {
+ if (mlx5e_poll_ico_cq(&aicosq->cq))
+ /* Don't clear the flag if nothing was polled to prevent
+ * queueing more WQEs and overflowing the async ICOSQ.
+ */
+ clear_bit(MLX5E_SQ_STATE_PENDING_XSK_TX,
+ &aicosq->state);
+
+ /* Keep after async ICOSQ CQ poll */
+ if (unlikely(mlx5e_ktls_rx_pending_resync_list(c, budget)))
+ busy |= mlx5e_ktls_rx_handle_resync_list(c, budget);
+ }
busy |= INDIRECT_CALL_2(rq->post_wqes,
mlx5e_post_rx_mpwqes,
@@ -236,16 +240,17 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
mlx5e_cq_arm(&rq->cq);
mlx5e_cq_arm(&c->icosq.cq);
- mlx5e_cq_arm(&c->async_icosq.cq);
+ if (aicosq) {
+ mlx5e_cq_arm(&aicosq->cq);
+ if (xsk_open) {
+ mlx5e_handle_rx_dim(xskrq);
+ mlx5e_cq_arm(&xsksq->cq);
+ mlx5e_cq_arm(&xskrq->cq);
+ }
+ }
if (c->xdpsq)
mlx5e_cq_arm(&c->xdpsq->cq);
- if (xsk_open) {
- mlx5e_handle_rx_dim(xskrq);
- mlx5e_cq_arm(&xsksq->cq);
- mlx5e_cq_arm(&xskrq->cq);
- }
-
if (unlikely(aff_change && busy_xsk)) {
mlx5e_trigger_irq(&c->icosq);
ch_stats->force_irq++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index 89a58dee50b3..cd60bc500ec5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -99,6 +99,8 @@ static const struct devlink_port_ops mlx5_esw_pf_vf_dl_port_ops = {
.port_fn_roce_set = mlx5_devlink_port_fn_roce_set,
.port_fn_migratable_get = mlx5_devlink_port_fn_migratable_get,
.port_fn_migratable_set = mlx5_devlink_port_fn_migratable_set,
+ .port_fn_state_get = mlx5_devlink_pf_port_fn_state_get,
+ .port_fn_state_set = mlx5_devlink_pf_port_fn_state_set,
#ifdef CONFIG_XFRM_OFFLOAD
.port_fn_ipsec_crypto_get = mlx5_devlink_port_fn_ipsec_crypto_get,
.port_fn_ipsec_crypto_set = mlx5_devlink_port_fn_ipsec_crypto_set,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 4b7a1ce7f406..5fbfabe28bdb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1304,24 +1304,52 @@ vf_err:
return err;
}
-static int host_pf_enable_hca(struct mlx5_core_dev *dev)
+int mlx5_esw_host_pf_enable_hca(struct mlx5_core_dev *dev)
{
- if (!mlx5_core_is_ecpf(dev))
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ struct mlx5_vport *vport;
+ int err;
+
+ if (!mlx5_core_is_ecpf(dev) || !mlx5_esw_allowed(esw))
return 0;
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
+
/* Once vport and representor are ready, take out the external host PF
* out of initializing state. Enabling HCA clears the iser->initializing
* bit and host PF driver loading can progress.
*/
- return mlx5_cmd_host_pf_enable_hca(dev);
+ err = mlx5_cmd_host_pf_enable_hca(dev);
+ if (err)
+ return err;
+
+ vport->pf_activated = true;
+
+ return 0;
}
-static void host_pf_disable_hca(struct mlx5_core_dev *dev)
+int mlx5_esw_host_pf_disable_hca(struct mlx5_core_dev *dev)
{
- if (!mlx5_core_is_ecpf(dev))
- return;
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ struct mlx5_vport *vport;
+ int err;
- mlx5_cmd_host_pf_disable_hca(dev);
+ if (!mlx5_core_is_ecpf(dev) || !mlx5_esw_allowed(esw))
+ return 0;
+
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
+
+ err = mlx5_cmd_host_pf_disable_hca(dev);
+ if (err)
+ return err;
+
+ vport->pf_activated = false;
+
+ return 0;
}
/* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs
@@ -1347,7 +1375,7 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
if (mlx5_esw_host_functions_enabled(esw->dev)) {
/* Enable external host PF HCA */
- ret = host_pf_enable_hca(esw->dev);
+ ret = mlx5_esw_host_pf_enable_hca(esw->dev);
if (ret)
goto pf_hca_err;
}
@@ -1391,7 +1419,7 @@ ec_vf_err:
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_ECPF);
ecpf_err:
if (mlx5_esw_host_functions_enabled(esw->dev))
- host_pf_disable_hca(esw->dev);
+ mlx5_esw_host_pf_disable_hca(esw->dev);
pf_hca_err:
if (pf_needed && mlx5_esw_host_functions_enabled(esw->dev))
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
@@ -1416,7 +1444,7 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
}
if (mlx5_esw_host_functions_enabled(esw->dev))
- host_pf_disable_hca(esw->dev);
+ mlx5_esw_host_pf_disable_hca(esw->dev);
if ((mlx5_core_is_ecpf_esw_manager(esw->dev) ||
esw->mode == MLX5_ESWITCH_LEGACY) &&
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 714ad28e8445..6841caef02d1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -243,6 +243,7 @@ struct mlx5_vport {
u16 vport;
bool enabled;
bool max_eqs_set;
+ bool pf_activated;
enum mlx5_eswitch_vport_event enabled_events;
int index;
struct mlx5_devlink_port *dl_port;
@@ -587,6 +588,13 @@ int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enab
struct netlink_ext_ack *extack);
int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
struct netlink_ext_ack *extack);
+int mlx5_devlink_pf_port_fn_state_get(struct devlink_port *port,
+ enum devlink_port_fn_state *state,
+ enum devlink_port_fn_opstate *opstate,
+ struct netlink_ext_ack *extack);
+int mlx5_devlink_pf_port_fn_state_set(struct devlink_port *port,
+ enum devlink_port_fn_state state,
+ struct netlink_ext_ack *extack);
#ifdef CONFIG_XFRM_OFFLOAD
int mlx5_devlink_port_fn_ipsec_crypto_get(struct devlink_port *port, bool *is_enabled,
struct netlink_ext_ack *extack);
@@ -634,6 +642,8 @@ bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
struct mlx5_core_dev *dev1);
const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev);
+int mlx5_esw_host_pf_enable_hca(struct mlx5_core_dev *dev);
+int mlx5_esw_host_pf_disable_hca(struct mlx5_core_dev *dev);
void mlx5_esw_adjacent_vhcas_setup(struct mlx5_eswitch *esw);
void mlx5_esw_adjacent_vhcas_cleanup(struct mlx5_eswitch *esw);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 02b7e474586d..1b439cef3719 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -4696,6 +4696,61 @@ out:
return err;
}
+int mlx5_devlink_pf_port_fn_state_get(struct devlink_port *port,
+ enum devlink_port_fn_state *state,
+ enum devlink_port_fn_opstate *opstate,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
+ const u32 *query_out;
+ bool pf_disabled;
+
+ if (vport->vport != MLX5_VPORT_PF) {
+ NL_SET_ERR_MSG_MOD(extack, "State get is not supported for VF");
+ return -EOPNOTSUPP;
+ }
+
+ *state = vport->pf_activated ?
+ DEVLINK_PORT_FN_STATE_ACTIVE : DEVLINK_PORT_FN_STATE_INACTIVE;
+
+ query_out = mlx5_esw_query_functions(vport->dev);
+ if (IS_ERR(query_out))
+ return PTR_ERR(query_out);
+
+ pf_disabled = MLX5_GET(query_esw_functions_out, query_out,
+ host_params_context.host_pf_disabled);
+
+ *opstate = pf_disabled ? DEVLINK_PORT_FN_OPSTATE_DETACHED :
+ DEVLINK_PORT_FN_OPSTATE_ATTACHED;
+
+ kvfree(query_out);
+ return 0;
+}
+
+int mlx5_devlink_pf_port_fn_state_set(struct devlink_port *port,
+ enum devlink_port_fn_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
+ struct mlx5_core_dev *dev;
+
+ if (vport->vport != MLX5_VPORT_PF) {
+ NL_SET_ERR_MSG_MOD(extack, "State set is not supported for VF");
+ return -EOPNOTSUPP;
+ }
+
+ dev = vport->dev;
+
+ switch (state) {
+ case DEVLINK_PORT_FN_STATE_ACTIVE:
+ return mlx5_esw_host_pf_enable_hca(dev);
+ case DEVLINK_PORT_FN_STATE_INACTIVE:
+ return mlx5_esw_host_pf_disable_hca(dev);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
int
mlx5_eswitch_restore_ipsec_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule,
struct mlx5_esw_flow_attr *esw_attr, int attr_idx)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 1c6591425260..dbaf33b537f7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -308,7 +308,8 @@ struct mlx5_flow_root_namespace {
};
enum mlx5_fc_type {
- MLX5_FC_TYPE_ACQUIRED = 0,
+ MLX5_FC_TYPE_POOL_ACQUIRED = 0,
+ MLX5_FC_TYPE_SINGLE,
MLX5_FC_TYPE_LOCAL,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 83001eda3884..fe7caa910219 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -153,6 +153,7 @@ static void mlx5_fc_stats_query_all_counters(struct mlx5_core_dev *dev)
static void mlx5_fc_free(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
{
mlx5_cmd_fc_free(dev, counter->id);
+ kfree(counter->bulk);
kfree(counter);
}
@@ -163,7 +164,7 @@ static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
if (WARN_ON(counter->type == MLX5_FC_TYPE_LOCAL))
return;
- if (counter->bulk)
+ if (counter->type == MLX5_FC_TYPE_POOL_ACQUIRED)
mlx5_fc_pool_release_counter(&fc_stats->fc_pool, counter);
else
mlx5_fc_free(dev, counter);
@@ -220,8 +221,16 @@ static void mlx5_fc_stats_work(struct work_struct *work)
mlx5_fc_stats_query_all_counters(dev);
}
+static void mlx5_fc_bulk_init(struct mlx5_fc_bulk *fc_bulk, u32 base_id)
+{
+ fc_bulk->base_id = base_id;
+ refcount_set(&fc_bulk->hws_data.hws_action_refcount, 0);
+ mutex_init(&fc_bulk->hws_data.lock);
+}
+
static struct mlx5_fc *mlx5_fc_single_alloc(struct mlx5_core_dev *dev)
{
+ struct mlx5_fc_bulk *fc_bulk;
struct mlx5_fc *counter;
int err;
@@ -229,13 +238,26 @@ static struct mlx5_fc *mlx5_fc_single_alloc(struct mlx5_core_dev *dev)
if (!counter)
return ERR_PTR(-ENOMEM);
- err = mlx5_cmd_fc_alloc(dev, &counter->id);
- if (err) {
- kfree(counter);
- return ERR_PTR(err);
+ fc_bulk = kzalloc(sizeof(*fc_bulk), GFP_KERNEL);
+ if (!fc_bulk) {
+ err = -ENOMEM;
+ goto free_counter;
}
+ err = mlx5_cmd_fc_alloc(dev, &counter->id);
+ if (err)
+ goto free_bulk;
+ counter->type = MLX5_FC_TYPE_SINGLE;
+ mlx5_fs_bulk_init(&fc_bulk->fs_bulk, 1);
+ mlx5_fc_bulk_init(fc_bulk, counter->id);
+ counter->bulk = fc_bulk;
return counter;
+
+free_bulk:
+ kfree(fc_bulk);
+free_counter:
+ kfree(counter);
+ return ERR_PTR(err);
}
static struct mlx5_fc *mlx5_fc_acquire(struct mlx5_core_dev *dev, bool aging)
@@ -442,17 +464,18 @@ static struct mlx5_fs_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev,
if (!fc_bulk)
return NULL;
- if (mlx5_fs_bulk_init(dev, &fc_bulk->fs_bulk, bulk_len))
+ mlx5_fs_bulk_init(&fc_bulk->fs_bulk, bulk_len);
+
+ if (mlx5_fs_bulk_bitmap_alloc(dev, &fc_bulk->fs_bulk))
goto fc_bulk_free;
if (mlx5_cmd_fc_bulk_alloc(dev, alloc_bitmask, &base_id))
goto fs_bulk_cleanup;
- fc_bulk->base_id = base_id;
+
+ mlx5_fc_bulk_init(fc_bulk, base_id);
for (i = 0; i < bulk_len; i++)
mlx5_fc_init(&fc_bulk->fcs[i], fc_bulk, base_id + i);
- refcount_set(&fc_bulk->hws_data.hws_action_refcount, 0);
- mutex_init(&fc_bulk->hws_data.lock);
return &fc_bulk->fs_bulk;
fs_bulk_cleanup:
@@ -560,10 +583,8 @@ mlx5_fc_local_create(u32 counter_id, u32 offset, u32 bulk_size)
counter->type = MLX5_FC_TYPE_LOCAL;
counter->id = counter_id;
- fc_bulk->base_id = counter_id - offset;
- fc_bulk->fs_bulk.bulk_len = bulk_size;
- refcount_set(&fc_bulk->hws_data.hws_action_refcount, 0);
- mutex_init(&fc_bulk->hws_data.lock);
+ mlx5_fs_bulk_init(&fc_bulk->fs_bulk, bulk_size);
+ mlx5_fc_bulk_init(fc_bulk, counter_id - offset);
counter->bulk = fc_bulk;
refcount_set(&counter->fc_local_refcount, 1);
return counter;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c
index f6c226664602..faa519254316 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c
@@ -4,23 +4,27 @@
#include <mlx5_core.h>
#include "fs_pool.h"
-int mlx5_fs_bulk_init(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk,
- int bulk_len)
+int mlx5_fs_bulk_bitmap_alloc(struct mlx5_core_dev *dev,
+ struct mlx5_fs_bulk *fs_bulk)
{
int i;
- fs_bulk->bitmask = kvcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long),
- GFP_KERNEL);
+ fs_bulk->bitmask = kvcalloc(BITS_TO_LONGS(fs_bulk->bulk_len),
+ sizeof(unsigned long), GFP_KERNEL);
if (!fs_bulk->bitmask)
return -ENOMEM;
- fs_bulk->bulk_len = bulk_len;
- for (i = 0; i < bulk_len; i++)
+ for (i = 0; i < fs_bulk->bulk_len; i++)
set_bit(i, fs_bulk->bitmask);
return 0;
}
+void mlx5_fs_bulk_init(struct mlx5_fs_bulk *fs_bulk, int bulk_len)
+{
+ fs_bulk->bulk_len = bulk_len;
+}
+
void mlx5_fs_bulk_cleanup(struct mlx5_fs_bulk *fs_bulk)
{
kvfree(fs_bulk->bitmask);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h
index f04ec3107498..4deb66479d16 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h
@@ -39,8 +39,9 @@ struct mlx5_fs_pool {
int threshold;
};
-int mlx5_fs_bulk_init(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk,
- int bulk_len);
+void mlx5_fs_bulk_init(struct mlx5_fs_bulk *fs_bulk, int bulk_len);
+int mlx5_fs_bulk_bitmap_alloc(struct mlx5_core_dev *dev,
+ struct mlx5_fs_bulk *fs_bulk);
void mlx5_fs_bulk_cleanup(struct mlx5_fs_bulk *fs_bulk);
int mlx5_fs_bulk_get_free_amount(struct mlx5_fs_bulk *bulk);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index a459a30f36ca..9fe47c836ebd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -233,14 +233,25 @@ static void mlx5_ldev_free(struct kref *ref)
{
struct mlx5_lag *ldev = container_of(ref, struct mlx5_lag, ref);
struct net *net;
+ int i;
if (ldev->nb.notifier_call) {
net = read_pnet(&ldev->net);
unregister_netdevice_notifier_net(net, &ldev->nb);
}
+ mlx5_ldev_for_each(i, 0, ldev) {
+ if (ldev->pf[i].dev &&
+ ldev->pf[i].port_change_nb.nb.notifier_call) {
+ struct mlx5_nb *nb = &ldev->pf[i].port_change_nb;
+
+ mlx5_eq_notifier_unregister(ldev->pf[i].dev, nb);
+ }
+ }
+
mlx5_lag_mp_cleanup(ldev);
cancel_delayed_work_sync(&ldev->bond_work);
+ cancel_work_sync(&ldev->speed_update_work);
destroy_workqueue(ldev->wq);
mutex_destroy(&ldev->lock);
kfree(ldev);
@@ -274,6 +285,7 @@ static struct mlx5_lag *mlx5_lag_dev_alloc(struct mlx5_core_dev *dev)
kref_init(&ldev->ref);
mutex_init(&ldev->lock);
INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
+ INIT_WORK(&ldev->speed_update_work, mlx5_mpesw_speed_update_work);
ldev->nb.notifier_call = mlx5_lag_netdev_event;
write_pnet(&ldev->net, mlx5_core_net(dev));
@@ -996,6 +1008,137 @@ static bool mlx5_lag_should_disable_lag(struct mlx5_lag *ldev, bool do_bond)
ldev->mode != MLX5_LAG_MODE_MPESW;
}
+#ifdef CONFIG_MLX5_ESWITCH
+static int
+mlx5_lag_sum_devices_speed(struct mlx5_lag *ldev, u32 *sum_speed,
+ int (*get_speed)(struct mlx5_core_dev *, u32 *))
+{
+ struct mlx5_core_dev *pf_mdev;
+ int pf_idx;
+ u32 speed;
+ int ret;
+
+ *sum_speed = 0;
+ mlx5_ldev_for_each(pf_idx, 0, ldev) {
+ pf_mdev = ldev->pf[pf_idx].dev;
+ if (!pf_mdev)
+ continue;
+
+ ret = get_speed(pf_mdev, &speed);
+ if (ret) {
+ mlx5_core_dbg(pf_mdev,
+ "Failed to get device speed using %ps. Device %s speed is not available (err=%d)\n",
+ get_speed, dev_name(pf_mdev->device),
+ ret);
+ return ret;
+ }
+
+ *sum_speed += speed;
+ }
+
+ return 0;
+}
+
+static int mlx5_lag_sum_devices_max_speed(struct mlx5_lag *ldev, u32 *max_speed)
+{
+ return mlx5_lag_sum_devices_speed(ldev, max_speed,
+ mlx5_port_max_linkspeed);
+}
+
+static int mlx5_lag_sum_devices_oper_speed(struct mlx5_lag *ldev,
+ u32 *oper_speed)
+{
+ return mlx5_lag_sum_devices_speed(ldev, oper_speed,
+ mlx5_port_oper_linkspeed);
+}
+
+static void mlx5_lag_modify_device_vports_speed(struct mlx5_core_dev *mdev,
+ u32 speed)
+{
+ u16 op_mod = MLX5_VPORT_STATE_OP_MOD_ESW_VPORT;
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ struct mlx5_vport *vport;
+ unsigned long i;
+ int ret;
+
+ if (!esw)
+ return;
+
+ if (!MLX5_CAP_ESW(mdev, esw_vport_state_max_tx_speed))
+ return;
+
+ mlx5_esw_for_each_vport(esw, i, vport) {
+ if (!vport)
+ continue;
+
+ if (vport->vport == MLX5_VPORT_UPLINK)
+ continue;
+
+ ret = mlx5_modify_vport_max_tx_speed(mdev, op_mod,
+ vport->vport, true, speed);
+ if (ret)
+ mlx5_core_dbg(mdev,
+ "Failed to set vport %d speed %d, err=%d\n",
+ vport->vport, speed, ret);
+ }
+}
+
+void mlx5_lag_set_vports_agg_speed(struct mlx5_lag *ldev)
+{
+ struct mlx5_core_dev *mdev;
+ u32 speed;
+ int pf_idx;
+
+ if (ldev->mode == MLX5_LAG_MODE_MPESW) {
+ if (mlx5_lag_sum_devices_oper_speed(ldev, &speed))
+ return;
+ } else {
+ speed = ldev->tracker.bond_speed_mbps;
+ if (speed == SPEED_UNKNOWN)
+ return;
+ }
+
+ /* If speed is not set, use the sum of max speeds of all PFs */
+ if (!speed && mlx5_lag_sum_devices_max_speed(ldev, &speed))
+ return;
+
+ speed = speed / MLX5_MAX_TX_SPEED_UNIT;
+
+ mlx5_ldev_for_each(pf_idx, 0, ldev) {
+ mdev = ldev->pf[pf_idx].dev;
+ if (!mdev)
+ continue;
+
+ mlx5_lag_modify_device_vports_speed(mdev, speed);
+ }
+}
+
+void mlx5_lag_reset_vports_speed(struct mlx5_lag *ldev)
+{
+ struct mlx5_core_dev *mdev;
+ u32 speed;
+ int pf_idx;
+ int ret;
+
+ mlx5_ldev_for_each(pf_idx, 0, ldev) {
+ mdev = ldev->pf[pf_idx].dev;
+ if (!mdev)
+ continue;
+
+ ret = mlx5_port_oper_linkspeed(mdev, &speed);
+ if (ret) {
+ mlx5_core_dbg(mdev,
+ "Failed to reset vports speed for device %s. Oper speed is not available (err=%d)\n",
+ dev_name(mdev->device), ret);
+ continue;
+ }
+
+ speed = speed / MLX5_MAX_TX_SPEED_UNIT;
+ mlx5_lag_modify_device_vports_speed(mdev, speed);
+ }
+}
+#endif
+
static void mlx5_do_bond(struct mlx5_lag *ldev)
{
int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
@@ -1083,9 +1226,12 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
ndev);
dev_put(ndev);
}
+ mlx5_lag_set_vports_agg_speed(ldev);
} else if (mlx5_lag_should_modify_lag(ldev, do_bond)) {
mlx5_modify_lag(ldev, &tracker);
+ mlx5_lag_set_vports_agg_speed(ldev);
} else if (mlx5_lag_should_disable_lag(ldev, do_bond)) {
+ mlx5_lag_reset_vports_speed(ldev);
mlx5_disable_lag(ldev);
}
}
@@ -1286,6 +1432,65 @@ static int mlx5_handle_changeinfodata_event(struct mlx5_lag *ldev,
return 1;
}
+static void mlx5_lag_update_tracker_speed(struct lag_tracker *tracker,
+ struct net_device *ndev)
+{
+ struct ethtool_link_ksettings lksettings;
+ struct net_device *bond_dev;
+ int err;
+
+ if (netif_is_lag_master(ndev))
+ bond_dev = ndev;
+ else
+ bond_dev = netdev_master_upper_dev_get(ndev);
+
+ if (!bond_dev) {
+ tracker->bond_speed_mbps = SPEED_UNKNOWN;
+ return;
+ }
+
+ err = __ethtool_get_link_ksettings(bond_dev, &lksettings);
+ if (err) {
+ netdev_dbg(bond_dev,
+ "Failed to get speed for bond dev %s, err=%d\n",
+ bond_dev->name, err);
+ tracker->bond_speed_mbps = SPEED_UNKNOWN;
+ return;
+ }
+
+ if (lksettings.base.speed == SPEED_UNKNOWN)
+ tracker->bond_speed_mbps = 0;
+ else
+ tracker->bond_speed_mbps = lksettings.base.speed;
+}
+
+/* Returns speed in Mbps. */
+int mlx5_lag_query_bond_speed(struct mlx5_core_dev *mdev, u32 *speed)
+{
+ struct mlx5_lag *ldev;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&lag_lock, flags);
+ ldev = mlx5_lag_dev(mdev);
+ if (!ldev) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ *speed = ldev->tracker.bond_speed_mbps;
+
+ if (*speed == SPEED_UNKNOWN) {
+ mlx5_core_dbg(mdev, "Bond speed is unknown\n");
+ ret = -EINVAL;
+ }
+
+unlock:
+ spin_unlock_irqrestore(&lag_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mlx5_lag_query_bond_speed);
+
/* this handler is always registered to netdev events */
static int mlx5_lag_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
@@ -1317,6 +1522,9 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
break;
}
+ if (changed)
+ mlx5_lag_update_tracker_speed(&tracker, ndev);
+
ldev->tracker = tracker;
if (changed)
@@ -1362,6 +1570,10 @@ static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
ldev->pf[fn].dev = dev;
dev->priv.lag = ldev;
+
+ MLX5_NB_INIT(&ldev->pf[fn].port_change_nb,
+ mlx5_lag_mpesw_port_change_event, PORT_CHANGE);
+ mlx5_eq_notifier_register(dev, &ldev->pf[fn].port_change_nb);
}
static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev,
@@ -1373,6 +1585,9 @@ static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev,
if (ldev->pf[fn].dev != dev)
return;
+ if (ldev->pf[fn].port_change_nb.nb.notifier_call)
+ mlx5_eq_notifier_unregister(dev, &ldev->pf[fn].port_change_nb);
+
ldev->pf[fn].dev = NULL;
dev->priv.lag = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
index 4918eee2b3da..be1afece5fdc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
@@ -39,6 +39,7 @@ struct lag_func {
struct mlx5_core_dev *dev;
struct net_device *netdev;
bool has_drop;
+ struct mlx5_nb port_change_nb;
};
/* Used for collection of netdev event info. */
@@ -48,6 +49,7 @@ struct lag_tracker {
unsigned int is_bonded:1;
unsigned int has_inactive:1;
enum netdev_lag_hash hash_type;
+ u32 bond_speed_mbps;
};
/* LAG data of a ConnectX card.
@@ -66,6 +68,7 @@ struct mlx5_lag {
struct lag_tracker tracker;
struct workqueue_struct *wq;
struct delayed_work bond_work;
+ struct work_struct speed_update_work;
struct notifier_block nb;
possible_net_t net;
struct lag_mp lag_mp;
@@ -116,6 +119,14 @@ int mlx5_deactivate_lag(struct mlx5_lag *ldev);
void mlx5_lag_add_devices(struct mlx5_lag *ldev);
struct mlx5_devcom_comp_dev *mlx5_lag_get_devcom_comp(struct mlx5_lag *ldev);
+#ifdef CONFIG_MLX5_ESWITCH
+void mlx5_lag_set_vports_agg_speed(struct mlx5_lag *ldev);
+void mlx5_lag_reset_vports_speed(struct mlx5_lag *ldev);
+#else
+static inline void mlx5_lag_set_vports_agg_speed(struct mlx5_lag *ldev) {}
+static inline void mlx5_lag_reset_vports_speed(struct mlx5_lag *ldev) {}
+#endif
+
static inline bool mlx5_lag_is_supported(struct mlx5_core_dev *dev)
{
if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
index 2d86af8f0d9b..04762562d7d9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
@@ -110,6 +110,8 @@ static int enable_mpesw(struct mlx5_lag *ldev)
goto err_rescan_drivers;
}
+ mlx5_lag_set_vports_agg_speed(ldev);
+
return 0;
err_rescan_drivers:
@@ -223,3 +225,40 @@ bool mlx5_lag_is_mpesw(struct mlx5_core_dev *dev)
return ldev && ldev->mode == MLX5_LAG_MODE_MPESW;
}
EXPORT_SYMBOL(mlx5_lag_is_mpesw);
+
+void mlx5_mpesw_speed_update_work(struct work_struct *work)
+{
+ struct mlx5_lag *ldev = container_of(work, struct mlx5_lag,
+ speed_update_work);
+
+ mutex_lock(&ldev->lock);
+ if (ldev->mode == MLX5_LAG_MODE_MPESW) {
+ if (ldev->mode_changes_in_progress)
+ queue_work(ldev->wq, &ldev->speed_update_work);
+ else
+ mlx5_lag_set_vports_agg_speed(ldev);
+ }
+
+ mutex_unlock(&ldev->lock);
+}
+
+int mlx5_lag_mpesw_port_change_event(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct mlx5_nb *mlx5_nb = container_of(nb, struct mlx5_nb, nb);
+ struct lag_func *lag_func = container_of(mlx5_nb,
+ struct lag_func,
+ port_change_nb);
+ struct mlx5_core_dev *dev = lag_func->dev;
+ struct mlx5_lag *ldev = dev->priv.lag;
+ struct mlx5_eqe *eqe = data;
+
+ if (!ldev)
+ return NOTIFY_DONE;
+
+ if (eqe->sub_type == MLX5_PORT_CHANGE_SUBTYPE_DOWN ||
+ eqe->sub_type == MLX5_PORT_CHANGE_SUBTYPE_ACTIVE)
+ queue_work(ldev->wq, &ldev->speed_update_work);
+
+ return NOTIFY_OK;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h
index 02520f27a033..f5d9b5c97b0d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h
@@ -32,4 +32,18 @@ bool mlx5_lag_is_mpesw(struct mlx5_core_dev *dev);
void mlx5_lag_mpesw_disable(struct mlx5_core_dev *dev);
int mlx5_lag_mpesw_enable(struct mlx5_core_dev *dev);
+#ifdef CONFIG_MLX5_ESWITCH
+void mlx5_mpesw_speed_update_work(struct work_struct *work);
+int mlx5_lag_mpesw_port_change_event(struct notifier_block *nb,
+ unsigned long event, void *data);
+#else
+static inline void mlx5_mpesw_speed_update_work(struct work_struct *work) {}
+static inline int mlx5_lag_mpesw_port_change_event(struct notifier_block *nb,
+ unsigned long event,
+ void *data)
+{
+ return NOTIFY_DONE;
+}
+#endif /* CONFIG_MLX5_ESWITCH */
+
#endif /* __MLX5_LAG_MPESW_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index f2d74382fb85..b635b423d972 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -345,10 +345,10 @@ int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
u8 tc, u8 *bw_pct);
int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
- u8 *max_bw_value,
+ u16 *max_bw_value,
u8 *max_bw_unit);
int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
- u8 *max_bw_value,
+ u16 *max_bw_value,
u8 *max_bw_unit);
int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode);
int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode);
@@ -383,6 +383,7 @@ const struct mlx5_link_info *mlx5_port_ptys2info(struct mlx5_core_dev *mdev,
u32 mlx5_port_info2linkmodes(struct mlx5_core_dev *mdev,
struct mlx5_link_info *info,
bool force_legacy);
+int mlx5_port_oper_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
#define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) && \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 7f8bed353e67..ee8b9765c5ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -773,7 +773,7 @@ int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
}
int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
- u8 *max_bw_value,
+ u16 *max_bw_value,
u8 *max_bw_units)
{
u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0};
@@ -796,7 +796,7 @@ int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
}
int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
- u8 *max_bw_value,
+ u16 *max_bw_value,
u8 *max_bw_units)
{
u32 out[MLX5_ST_SZ_DW(qetc_reg)];
@@ -1111,7 +1111,7 @@ mlx5e_ext_link_info[MLX5E_EXT_LINK_MODES_NUMBER] = {
[MLX5E_200GAUI_1_200GBASE_CR1_KR1] = {.speed = 200000, .lanes = 1},
[MLX5E_400GAUI_2_400GBASE_CR2_KR2] = {.speed = 400000, .lanes = 2},
[MLX5E_800GAUI_4_800GBASE_CR4_KR4] = {.speed = 800000, .lanes = 4},
- [MLX5E_1600TAUI_8_1600TBASE_CR8_KR8] = {.speed = 1600000, .lanes = 8},
+ [MLX5E_1600GAUI_8_1600GBASE_CR8_KR8] = {.speed = 1600000, .lanes = 8},
};
int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
@@ -1203,6 +1203,30 @@ u32 mlx5_port_info2linkmodes(struct mlx5_core_dev *mdev,
return link_modes;
}
+int mlx5_port_oper_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
+{
+ const struct mlx5_link_info *table;
+ struct mlx5_port_eth_proto eproto;
+ u32 oper_speed = 0;
+ u32 max_size;
+ bool ext;
+ int err;
+ int i;
+
+ ext = mlx5_ptys_ext_supported(mdev);
+ err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
+ if (err)
+ return err;
+
+ mlx5e_port_get_link_mode_info_arr(mdev, &table, &max_size, false);
+ for (i = 0; i < max_size; ++i)
+ if (eproto.oper & MLX5E_PROT_MASK(i))
+ oper_speed = max(oper_speed, table[i].speed);
+
+ *speed = oper_speed;
+ return 0;
+}
+
int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
{
const struct mlx5_link_info *table;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
index 839d71bd4216..5bc8e97ecf1c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
@@ -121,7 +121,9 @@ mlx5_fs_hws_pr_bulk_create(struct mlx5_core_dev *dev, void *pool_ctx)
if (!pr_bulk)
return NULL;
- if (mlx5_fs_bulk_init(dev, &pr_bulk->fs_bulk, bulk_len))
+ mlx5_fs_bulk_init(&pr_bulk->fs_bulk, bulk_len);
+
+ if (mlx5_fs_bulk_bitmap_alloc(dev, &pr_bulk->fs_bulk))
goto free_pr_bulk;
for (i = 0; i < bulk_len; i++) {
@@ -275,7 +277,9 @@ mlx5_fs_hws_mh_bulk_create(struct mlx5_core_dev *dev, void *pool_ctx)
if (!mh_bulk)
return NULL;
- if (mlx5_fs_bulk_init(dev, &mh_bulk->fs_bulk, bulk_len))
+ mlx5_fs_bulk_init(&mh_bulk->fs_bulk, bulk_len);
+
+ if (mlx5_fs_bulk_bitmap_alloc(dev, &mh_bulk->fs_bulk))
goto free_mh_bulk;
for (int i = 0; i < bulk_len; i++) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 306affbcfd3b..cb098d3eb2fa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -62,6 +62,28 @@ u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
return MLX5_GET(query_vport_state_out, out, state);
}
+static int mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
+ u16 vport, u8 other_vport,
+ u8 *admin_state)
+{
+ u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {};
+ int err;
+
+ MLX5_SET(query_vport_state_in, in, opcode,
+ MLX5_CMD_OP_QUERY_VPORT_STATE);
+ MLX5_SET(query_vport_state_in, in, op_mod, opmod);
+ MLX5_SET(query_vport_state_in, in, vport_number, vport);
+ MLX5_SET(query_vport_state_in, in, other_vport, other_vport);
+
+ err = mlx5_cmd_exec_inout(mdev, query_vport_state, in, out);
+ if (err)
+ return err;
+
+ *admin_state = MLX5_GET(query_vport_state_out, out, admin_state);
+ return 0;
+}
+
int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
u16 vport, u8 other_vport, u8 state)
{
@@ -77,6 +99,58 @@ int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
return mlx5_cmd_exec_in(mdev, modify_vport_state, in);
}
+int mlx5_modify_vport_max_tx_speed(struct mlx5_core_dev *mdev, u8 opmod,
+ u16 vport, u8 other_vport, u16 max_tx_speed)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {};
+ u8 admin_state;
+ int err;
+
+ err = mlx5_query_vport_admin_state(mdev, opmod, vport, other_vport,
+ &admin_state);
+ if (err)
+ return err;
+
+ MLX5_SET(modify_vport_state_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_VPORT_STATE);
+ MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
+ MLX5_SET(modify_vport_state_in, in, vport_number, vport);
+ MLX5_SET(modify_vport_state_in, in, other_vport, other_vport);
+ MLX5_SET(modify_vport_state_in, in, admin_state, admin_state);
+ MLX5_SET(modify_vport_state_in, in, max_tx_speed, max_tx_speed);
+
+ return mlx5_cmd_exec_in(mdev, modify_vport_state, in);
+}
+
+int mlx5_query_vport_max_tx_speed(struct mlx5_core_dev *mdev, u8 op_mod,
+ u16 vport, u8 other_vport, u32 *max_tx_speed)
+{
+ u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {};
+ u32 state;
+ int err;
+
+ MLX5_SET(query_vport_state_in, in, opcode,
+ MLX5_CMD_OP_QUERY_VPORT_STATE);
+ MLX5_SET(query_vport_state_in, in, op_mod, op_mod);
+ MLX5_SET(query_vport_state_in, in, vport_number, vport);
+ MLX5_SET(query_vport_state_in, in, other_vport, other_vport);
+
+ err = mlx5_cmd_exec_inout(mdev, query_vport_state, in, out);
+ if (err)
+ return err;
+
+ state = MLX5_GET(query_vport_state_out, out, state);
+ if (state == VPORT_STATE_DOWN) {
+ *max_tx_speed = 0;
+ return 0;
+ }
+
+ *max_tx_speed = MLX5_GET(query_vport_state_out, out, max_tx_speed);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_vport_max_tx_speed);
+
static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
bool other_vport, u32 *out)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 8769cba2c746..7da9ef254b72 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -2542,18 +2542,6 @@ void mlxsw_pci_driver_unregister(struct pci_driver *pci_driver)
}
EXPORT_SYMBOL(mlxsw_pci_driver_unregister);
-static int __init mlxsw_pci_module_init(void)
-{
- return 0;
-}
-
-static void __exit mlxsw_pci_module_exit(void)
-{
-}
-
-module_init(mlxsw_pci_module_init);
-module_exit(mlxsw_pci_module_exit);
-
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
MODULE_DESCRIPTION("Mellanox switch PCI interface driver");
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
index 422265dc7abd..b717db879cd3 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
@@ -1019,6 +1019,9 @@ enum {
#define FBNIC_QUEUE_TWQ_CTL_ENABLE CSR_BIT(1)
#define FBNIC_QUEUE_TWQ0_TAIL 0x002 /* 0x008 */
#define FBNIC_QUEUE_TWQ1_TAIL 0x003 /* 0x00c */
+#define FBNIC_QUEUE_TWQ0_PTRS 0x004 /* 0x010 */
+#define FBNIC_QUEUE_TWQ1_PTRS 0x005 /* 0x014 */
+#define FBNIC_QUEUE_TWQ_PTRS_HEAD_MASK CSR_GENMASK(31, 16)
#define FBNIC_QUEUE_TWQ0_SIZE 0x00a /* 0x028 */
#define FBNIC_QUEUE_TWQ1_SIZE 0x00b /* 0x02c */
@@ -1042,6 +1045,8 @@ enum {
#define FBNIC_QUEUE_TCQ_CTL_ENABLE CSR_BIT(1)
#define FBNIC_QUEUE_TCQ_HEAD 0x081 /* 0x204 */
+#define FBNIC_QUEUE_TCQ_PTRS 0x082 /* 0x208 */
+#define FBNIC_QUEUE_TCQ_PTRS_TAIL_MASK CSR_GENMASK(31, 16)
#define FBNIC_QUEUE_TCQ_SIZE 0x084 /* 0x210 */
#define FBNIC_QUEUE_TCQ_SIZE_MASK CSR_GENMASK(3, 0)
@@ -1075,6 +1080,9 @@ enum {
#define FBNIC_QUEUE_RCQ_CTL_ENABLE CSR_BIT(1)
#define FBNIC_QUEUE_RCQ_HEAD 0x201 /* 0x804 */
+#define FBNIC_QUEUE_RCQ_PTRS 0x202 /* 0x808 */
+#define FBNIC_QUEUE_RCQ_PTRS_TAIL_MASK CSR_GENMASK(31, 16)
+#define FBNIC_QUEUE_RCQ_PTRS_HEAD_MASK CSR_GENMASK(15, 0)
#define FBNIC_QUEUE_RCQ_SIZE 0x204 /* 0x810 */
#define FBNIC_QUEUE_RCQ_SIZE_MASK CSR_GENMASK(3, 0)
@@ -1090,6 +1098,10 @@ enum {
#define FBNIC_QUEUE_BDQ_HPQ_TAIL 0x241 /* 0x904 */
#define FBNIC_QUEUE_BDQ_PPQ_TAIL 0x242 /* 0x908 */
+#define FBNIC_QUEUE_BDQ_HPQ_PTRS 0x243 /* 0x90c */
+#define FBNIC_QUEUE_BDQ_PPQ_PTRS 0x244 /* 0x910 */
+#define FBNIC_QUEUE_BDQ_PTRS_HEAD_MASK CSR_GENMASK(31, 16)
+#define FBNIC_QUEUE_BDQ_PTRS_TAIL_MASK CSR_GENMASK(15, 0)
#define FBNIC_QUEUE_BDQ_HPQ_SIZE 0x247 /* 0x91c */
#define FBNIC_QUEUE_BDQ_PPQ_SIZE 0x248 /* 0x920 */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c b/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c
index b7238dd967fe..08270db2dee8 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c
@@ -7,9 +7,12 @@
#include <linux/seq_file.h>
#include "fbnic.h"
+#include "fbnic_txrx.h"
static struct dentry *fbnic_dbg_root;
+/* Descriptor Seq Functions */
+
static void fbnic_dbg_desc_break(struct seq_file *s, int i)
{
while (i--)
@@ -18,6 +21,362 @@ static void fbnic_dbg_desc_break(struct seq_file *s, int i)
seq_putc(s, '\n');
}
+static void fbnic_dbg_ring_show(struct seq_file *s)
+{
+ struct fbnic_ring *ring = s->private;
+ unsigned long doorbell_offset;
+ u32 head = 0, tail = 0;
+ u32 __iomem *csr_base;
+
+ csr_base = fbnic_ring_csr_base(ring);
+ doorbell_offset = ring->doorbell - csr_base;
+
+ seq_printf(s, "doorbell CSR: %#05lx q_idx: %d\n",
+ doorbell_offset, ring->q_idx);
+ seq_printf(s, "size_mask: %#06x size: %zu flags: 0x%02x\n",
+ ring->size_mask, ring->size, ring->flags);
+ seq_printf(s, "SW: head: %#06x tail: %#06x\n",
+ ring->head, ring->tail);
+
+ switch (doorbell_offset) {
+ case FBNIC_QUEUE_TWQ0_TAIL:
+ tail = readl(csr_base + FBNIC_QUEUE_TWQ0_PTRS);
+ head = FIELD_GET(FBNIC_QUEUE_TWQ_PTRS_HEAD_MASK, tail);
+ break;
+ case FBNIC_QUEUE_TWQ1_TAIL:
+ tail = readl(csr_base + FBNIC_QUEUE_TWQ1_PTRS);
+ head = FIELD_GET(FBNIC_QUEUE_TWQ_PTRS_HEAD_MASK, tail);
+ break;
+ case FBNIC_QUEUE_TCQ_HEAD:
+ head = readl(csr_base + FBNIC_QUEUE_TCQ_PTRS);
+ tail = FIELD_GET(FBNIC_QUEUE_TCQ_PTRS_TAIL_MASK, head);
+ break;
+ case FBNIC_QUEUE_BDQ_HPQ_TAIL:
+ tail = readl(csr_base + FBNIC_QUEUE_BDQ_HPQ_PTRS);
+ head = FIELD_GET(FBNIC_QUEUE_BDQ_PTRS_HEAD_MASK, tail);
+ break;
+ case FBNIC_QUEUE_BDQ_PPQ_TAIL:
+ tail = readl(csr_base + FBNIC_QUEUE_BDQ_PPQ_PTRS);
+ head = FIELD_GET(FBNIC_QUEUE_BDQ_PTRS_HEAD_MASK, tail);
+ break;
+ case FBNIC_QUEUE_RCQ_HEAD:
+ head = readl(csr_base + FBNIC_QUEUE_RCQ_PTRS);
+ tail = FIELD_GET(FBNIC_QUEUE_RCQ_PTRS_TAIL_MASK, head);
+ break;
+ }
+
+ tail &= FBNIC_QUEUE_BDQ_PTRS_TAIL_MASK;
+ head &= FBNIC_QUEUE_RCQ_PTRS_HEAD_MASK;
+
+ seq_printf(s, "HW: head: %#06x tail: %#06x\n", head, tail);
+
+ seq_puts(s, "\n");
+}
+
+static void fbnic_dbg_twd_desc_seq_show(struct seq_file *s, int i)
+{
+ struct fbnic_ring *ring = s->private;
+ u64 twd = le64_to_cpu(ring->desc[i]);
+
+ switch (FIELD_GET(FBNIC_TWD_TYPE_MASK, twd)) {
+ case FBNIC_TWD_TYPE_META:
+ seq_printf(s, "%04x %#06llx %llx %llx %llx %llx %llx %#llx %#llx %llx %#04llx %#04llx %llx %#04llx\n",
+ i, FIELD_GET(FBNIC_TWD_LEN_MASK, twd),
+ FIELD_GET(FBNIC_TWD_TYPE_MASK, twd),
+ FIELD_GET(FBNIC_TWD_FLAG_REQ_COMPLETION, twd),
+ FIELD_GET(FBNIC_TWD_FLAG_REQ_CSO, twd),
+ FIELD_GET(FBNIC_TWD_FLAG_REQ_LSO, twd),
+ FIELD_GET(FBNIC_TWD_FLAG_REQ_TS, twd),
+ FIELD_GET(FBNIC_TWD_L4_HLEN_MASK, twd),
+ FIELD_GET(FBNIC_TWD_CSUM_OFFSET_MASK, twd),
+ FIELD_GET(FBNIC_TWD_L4_TYPE_MASK, twd),
+ FIELD_GET(FBNIC_TWD_L3_IHLEN_MASK, twd),
+ FIELD_GET(FBNIC_TWD_L3_OHLEN_MASK, twd),
+ FIELD_GET(FBNIC_TWD_L3_TYPE_MASK, twd),
+ FIELD_GET(FBNIC_TWD_L2_HLEN_MASK, twd));
+ break;
+ default:
+ seq_printf(s, "%04x %#06llx %llx %#014llx\n", i,
+ FIELD_GET(FBNIC_TWD_LEN_MASK, twd),
+ FIELD_GET(FBNIC_TWD_TYPE_MASK, twd),
+ FIELD_GET(FBNIC_TWD_ADDR_MASK, twd));
+ break;
+ }
+}
+
+static int fbnic_dbg_twq_desc_seq_show(struct seq_file *s, void *v)
+{
+ struct fbnic_ring *ring = s->private;
+ char hdr[80];
+ int i;
+
+ /* Generate header on first entry */
+ fbnic_dbg_ring_show(s);
+ snprintf(hdr, sizeof(hdr), "%4s %5s %s %s\n",
+ "DESC", "LEN/MSS", "T", "METADATA/TIMESTAMP/BUFFER_ADDR");
+ seq_puts(s, hdr);
+ fbnic_dbg_desc_break(s, strnlen(hdr, sizeof(hdr)));
+
+ /* Display descriptor */
+ if (!ring->desc) {
+ seq_puts(s, "Descriptor ring not allocated.\n");
+ return 0;
+ }
+
+ for (i = 0; i <= ring->size_mask; i++)
+ fbnic_dbg_twd_desc_seq_show(s, i);
+
+ return 0;
+}
+
+static int fbnic_dbg_tcq_desc_seq_show(struct seq_file *s, void *v)
+{
+ struct fbnic_ring *ring = s->private;
+ char hdr[80];
+ int i;
+
+ /* Generate header on first entry */
+ fbnic_dbg_ring_show(s);
+ snprintf(hdr, sizeof(hdr), "%4s %s %s %s %5s %-16s %-6s %-6s\n",
+ "DESC", "D", "T", "Q", "STATUS", "TIMESTAMP", "HEAD1", "HEAD0");
+ seq_puts(s, hdr);
+ fbnic_dbg_desc_break(s, strnlen(hdr, sizeof(hdr)));
+
+ /* Display descriptor */
+ if (!ring->desc) {
+ seq_puts(s, "Descriptor ring not allocated.\n");
+ return 0;
+ }
+
+ for (i = 0; i <= ring->size_mask; i++) {
+ u64 tcd = le64_to_cpu(ring->desc[i]);
+
+ switch (FIELD_GET(FBNIC_TCD_TYPE_MASK, tcd)) {
+ case FBNIC_TCD_TYPE_0:
+ seq_printf(s, "%04x %llx %llx %llx %#05llx %-17s %#06llx %#06llx\n",
+ i, FIELD_GET(FBNIC_TCD_DONE, tcd),
+ FIELD_GET(FBNIC_TCD_TYPE_MASK, tcd),
+ FIELD_GET(FBNIC_TCD_TWQ1, tcd),
+ FIELD_GET(FBNIC_TCD_STATUS_MASK, tcd),
+ "",
+ FIELD_GET(FBNIC_TCD_TYPE0_HEAD1_MASK, tcd),
+ FIELD_GET(FBNIC_TCD_TYPE0_HEAD0_MASK, tcd));
+ break;
+ case FBNIC_TCD_TYPE_1:
+ seq_printf(s, "%04x %llx %llx %llx %#05llx %#012llx\n",
+ i, FIELD_GET(FBNIC_TCD_DONE, tcd),
+ FIELD_GET(FBNIC_TCD_TYPE_MASK, tcd),
+ FIELD_GET(FBNIC_TCD_TWQ1, tcd),
+ FIELD_GET(FBNIC_TCD_STATUS_MASK, tcd),
+ FIELD_GET(FBNIC_TCD_TYPE1_TS_MASK, tcd));
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int fbnic_dbg_bdq_desc_seq_show(struct seq_file *s, void *v)
+{
+ struct fbnic_ring *ring = s->private;
+ char hdr[80];
+ int i;
+
+ /* Generate header on first entry */
+ fbnic_dbg_ring_show(s);
+ snprintf(hdr, sizeof(hdr), "%4s %-4s %s\n",
+ "DESC", "ID", "BUFFER_ADDR");
+ seq_puts(s, hdr);
+ fbnic_dbg_desc_break(s, strnlen(hdr, sizeof(hdr)));
+
+ /* Display descriptor */
+ if (!ring->desc) {
+ seq_puts(s, "Descriptor ring not allocated.\n");
+ return 0;
+ }
+
+ for (i = 0; i <= ring->size_mask; i++) {
+ u64 bd = le64_to_cpu(ring->desc[i]);
+
+ seq_printf(s, "%04x %#04llx %#014llx\n", i,
+ FIELD_GET(FBNIC_BD_DESC_ID_MASK, bd),
+ FIELD_GET(FBNIC_BD_DESC_ADDR_MASK, bd));
+ }
+
+ return 0;
+}
+
+static void fbnic_dbg_rcd_desc_seq_show(struct seq_file *s, int i)
+{
+ struct fbnic_ring *ring = s->private;
+ u64 rcd = le64_to_cpu(ring->desc[i]);
+
+ switch (FIELD_GET(FBNIC_RCD_TYPE_MASK, rcd)) {
+ case FBNIC_RCD_TYPE_HDR_AL:
+ case FBNIC_RCD_TYPE_PAY_AL:
+ seq_printf(s, "%04x %llx %llx %llx %#06llx %#06llx %#06llx\n",
+ i, FIELD_GET(FBNIC_RCD_DONE, rcd),
+ FIELD_GET(FBNIC_RCD_TYPE_MASK, rcd),
+ FIELD_GET(FBNIC_RCD_AL_PAGE_FIN, rcd),
+ FIELD_GET(FBNIC_RCD_AL_BUFF_OFF_MASK, rcd),
+ FIELD_GET(FBNIC_RCD_AL_BUFF_LEN_MASK, rcd),
+ FIELD_GET(FBNIC_RCD_AL_BUFF_ID_MASK, rcd));
+ break;
+ case FBNIC_RCD_TYPE_OPT_META:
+ seq_printf(s, "%04x %llx %llx %llx %llx %llx %#06llx %#012llx\n",
+ i, FIELD_GET(FBNIC_RCD_DONE, rcd),
+ FIELD_GET(FBNIC_RCD_TYPE_MASK, rcd),
+ FIELD_GET(FBNIC_RCD_OPT_META_TYPE_MASK, rcd),
+ FIELD_GET(FBNIC_RCD_OPT_META_TS, rcd),
+ FIELD_GET(FBNIC_RCD_OPT_META_ACTION, rcd),
+ FIELD_GET(FBNIC_RCD_OPT_META_ACTION_MASK, rcd),
+ FIELD_GET(FBNIC_RCD_OPT_META_TS_MASK, rcd));
+ break;
+ case FBNIC_RCD_TYPE_META:
+ seq_printf(s, "%04x %llx %llx %llx %llx %llx %llx %llx %llx %llx %#06llx %#010llx\n",
+ i, FIELD_GET(FBNIC_RCD_DONE, rcd),
+ FIELD_GET(FBNIC_RCD_TYPE_MASK, rcd),
+ FIELD_GET(FBNIC_RCD_META_ECN, rcd),
+ FIELD_GET(FBNIC_RCD_META_L4_CSUM_UNNECESSARY, rcd),
+ FIELD_GET(FBNIC_RCD_META_ERR_MAC_EOP, rcd),
+ FIELD_GET(FBNIC_RCD_META_ERR_TRUNCATED_FRAME, rcd),
+ FIELD_GET(FBNIC_RCD_META_ERR_PARSER, rcd),
+ FIELD_GET(FBNIC_RCD_META_L4_TYPE_MASK, rcd),
+ FIELD_GET(FBNIC_RCD_META_L3_TYPE_MASK, rcd),
+ FIELD_GET(FBNIC_RCD_META_L2_CSUM_MASK, rcd),
+ FIELD_GET(FBNIC_RCD_META_RSS_HASH_MASK, rcd));
+ break;
+ }
+}
+
+static int fbnic_dbg_rcq_desc_seq_show(struct seq_file *s, void *v)
+{
+ struct fbnic_ring *ring = s->private;
+ char hdr[80];
+ int i;
+
+ /* Generate header on first entry */
+ fbnic_dbg_ring_show(s);
+ snprintf(hdr, sizeof(hdr),
+ "%18s %s %s\n", "OFFSET/", "L", "L");
+ seq_puts(s, hdr);
+ snprintf(hdr, sizeof(hdr),
+ "%4s %s %s %s %s %s %s %s %s %s %-8s %s\n",
+ "DESC", "D", "T", "F", "C", "M", "T", "P", "4", "3", "LEN/CSUM", "ID/TS/RSS");
+ seq_puts(s, hdr);
+ fbnic_dbg_desc_break(s, strnlen(hdr, sizeof(hdr)));
+
+ /* Display descriptor */
+ if (!ring->desc) {
+ seq_puts(s, "Descriptor ring not allocated.\n");
+ return 0;
+ }
+
+ for (i = 0; i <= ring->size_mask; i++)
+ fbnic_dbg_rcd_desc_seq_show(s, i);
+
+ return 0;
+}
+
+static int fbnic_dbg_desc_open(struct inode *inode, struct file *file)
+{
+ struct fbnic_ring *ring = inode->i_private;
+ int (*show)(struct seq_file *s, void *v);
+
+ switch (ring->doorbell - fbnic_ring_csr_base(ring)) {
+ case FBNIC_QUEUE_TWQ0_TAIL:
+ case FBNIC_QUEUE_TWQ1_TAIL:
+ show = fbnic_dbg_twq_desc_seq_show;
+ break;
+ case FBNIC_QUEUE_TCQ_HEAD:
+ show = fbnic_dbg_tcq_desc_seq_show;
+ break;
+ case FBNIC_QUEUE_BDQ_HPQ_TAIL:
+ case FBNIC_QUEUE_BDQ_PPQ_TAIL:
+ show = fbnic_dbg_bdq_desc_seq_show;
+ break;
+ case FBNIC_QUEUE_RCQ_HEAD:
+ show = fbnic_dbg_rcq_desc_seq_show;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return single_open(file, show, ring);
+}
+
+static const struct file_operations fbnic_dbg_desc_fops = {
+ .owner = THIS_MODULE,
+ .open = fbnic_dbg_desc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void fbnic_dbg_nv_init(struct fbnic_napi_vector *nv)
+{
+ struct fbnic_dev *fbd = nv->fbd;
+ char name[16];
+ int i, j;
+
+ /* Generate a folder for each napi vector */
+ snprintf(name, sizeof(name), "nv.%03d", nv->v_idx);
+
+ nv->dbg_nv = debugfs_create_dir(name, fbd->dbg_fbd);
+
+ /* Generate a file for each Tx ring in the napi vector */
+ for (i = 0; i < nv->txt_count; i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+ unsigned int hw_idx;
+
+ hw_idx = fbnic_ring_csr_base(&qt->cmpl) -
+ &fbd->uc_addr0[FBNIC_QUEUE(0)];
+ hw_idx /= FBNIC_QUEUE_STRIDE;
+
+ snprintf(name, sizeof(name), "twq0.%03d", hw_idx);
+ debugfs_create_file(name, 0400, nv->dbg_nv, &qt->sub0,
+ &fbnic_dbg_desc_fops);
+
+ snprintf(name, sizeof(name), "twq1.%03d", hw_idx);
+ debugfs_create_file(name, 0400, nv->dbg_nv, &qt->sub1,
+ &fbnic_dbg_desc_fops);
+
+ snprintf(name, sizeof(name), "tcq.%03d", hw_idx);
+ debugfs_create_file(name, 0400, nv->dbg_nv, &qt->cmpl,
+ &fbnic_dbg_desc_fops);
+ }
+
+ /* Generate a file for each Rx ring in the napi vector */
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+ unsigned int hw_idx;
+
+ hw_idx = fbnic_ring_csr_base(&qt->cmpl) -
+ &fbd->uc_addr0[FBNIC_QUEUE(0)];
+ hw_idx /= FBNIC_QUEUE_STRIDE;
+
+ snprintf(name, sizeof(name), "hpq.%03d", hw_idx);
+ debugfs_create_file(name, 0400, nv->dbg_nv, &qt->sub0,
+ &fbnic_dbg_desc_fops);
+
+ snprintf(name, sizeof(name), "ppq.%03d", hw_idx);
+ debugfs_create_file(name, 0400, nv->dbg_nv, &qt->sub1,
+ &fbnic_dbg_desc_fops);
+
+ snprintf(name, sizeof(name), "rcq.%03d", hw_idx);
+ debugfs_create_file(name, 0400, nv->dbg_nv, &qt->cmpl,
+ &fbnic_dbg_desc_fops);
+ }
+}
+
+void fbnic_dbg_nv_exit(struct fbnic_napi_vector *nv)
+{
+ debugfs_remove_recursive(nv->dbg_nv);
+ nv->dbg_nv = NULL;
+}
+
static int fbnic_dbg_mac_addr_show(struct seq_file *s, void *v)
{
struct fbnic_dev *fbd = s->private;
@@ -170,6 +529,52 @@ static int fbnic_dbg_ipo_dst_show(struct seq_file *s, void *v)
}
DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_ipo_dst);
+static void fbnic_dbg_fw_mbx_display(struct seq_file *s,
+ struct fbnic_dev *fbd, int mbx_idx)
+{
+ struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
+ char hdr[80];
+ int i;
+
+ /* Generate header */
+ seq_puts(s, mbx_idx == FBNIC_IPC_MBX_RX_IDX ? "Rx\n" : "Tx\n");
+
+ seq_printf(s, "Rdy: %d Head: %d Tail: %d\n",
+ mbx->ready, mbx->head, mbx->tail);
+
+ snprintf(hdr, sizeof(hdr), "%3s %-4s %s %-12s %s %-3s %-16s\n",
+ "Idx", "Len", "E", "Addr", "F", "H", "Raw");
+ seq_puts(s, hdr);
+ fbnic_dbg_desc_break(s, strnlen(hdr, sizeof(hdr)));
+
+ for (i = 0; i < FBNIC_IPC_MBX_DESC_LEN; i++) {
+ u64 desc = __fbnic_mbx_rd_desc(fbd, mbx_idx, i);
+
+ seq_printf(s, "%-3.2d %04lld %d %012llx %d %-3d %016llx\n",
+ i, FIELD_GET(FBNIC_IPC_MBX_DESC_LEN_MASK, desc),
+ !!(desc & FBNIC_IPC_MBX_DESC_EOM),
+ desc & FBNIC_IPC_MBX_DESC_ADDR_MASK,
+ !!(desc & FBNIC_IPC_MBX_DESC_FW_CMPL),
+ !!(desc & FBNIC_IPC_MBX_DESC_HOST_CMPL),
+ desc);
+ }
+}
+
+static int fbnic_dbg_fw_mbx_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+
+ fbnic_dbg_fw_mbx_display(s, fbd, FBNIC_IPC_MBX_RX_IDX);
+
+ /* Add blank line between Rx and Tx */
+ seq_puts(s, "\n");
+
+ fbnic_dbg_fw_mbx_display(s, fbd, FBNIC_IPC_MBX_TX_IDX);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_fw_mbx);
+
static int fbnic_dbg_fw_log_show(struct seq_file *s, void *v)
{
struct fbnic_dev *fbd = s->private;
@@ -249,6 +654,8 @@ void fbnic_dbg_fbd_init(struct fbnic_dev *fbd)
&fbnic_dbg_ipo_src_fops);
debugfs_create_file("ipo_dst", 0400, fbd->dbg_fbd, fbd,
&fbnic_dbg_ipo_dst_fops);
+ debugfs_create_file("fw_mbx", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_fw_mbx_fops);
debugfs_create_file("fw_log", 0400, fbd->dbg_fbd, fbd,
&fbnic_dbg_fw_log_fops);
}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
index b62b1d5b1453..f1c992f5fe94 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
@@ -178,7 +178,7 @@ fbnic_flash_start(struct fbnic_dev *fbd, struct pldmfw_component *component)
goto cmpl_free;
/* Wait for firmware to ack firmware upgrade start */
- if (wait_for_completion_timeout(&cmpl->done, 10 * HZ))
+ if (fbnic_mbx_wait_for_cmpl(cmpl))
err = cmpl->result;
else
err = -ETIMEDOUT;
@@ -252,7 +252,7 @@ fbnic_flash_component(struct pldmfw *context,
goto err_no_msg;
while (offset < size) {
- if (!wait_for_completion_timeout(&cmpl->done, 15 * HZ)) {
+ if (!fbnic_mbx_wait_for_cmpl(cmpl)) {
err = -ETIMEDOUT;
break;
}
@@ -390,7 +390,7 @@ static int fbnic_fw_reporter_dump(struct devlink_health_reporter *reporter,
"Failed to transmit core dump info msg");
goto cmpl_free;
}
- if (!wait_for_completion_timeout(&fw_cmpl->done, 2 * HZ)) {
+ if (!fbnic_mbx_wait_for_cmpl(fw_cmpl)) {
NL_SET_ERR_MSG_MOD(extack,
"Timed out waiting on core dump info");
err = -ETIMEDOUT;
@@ -447,7 +447,7 @@ static int fbnic_fw_reporter_dump(struct devlink_health_reporter *reporter,
goto cmpl_cleanup;
}
- if (wait_for_completion_timeout(&fw_cmpl->done, 2 * HZ)) {
+ if (fbnic_mbx_wait_for_cmpl(fw_cmpl)) {
reinit_completion(&fw_cmpl->done);
} else {
NL_SET_ERR_MSG_FMT_MOD(extack,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
index 693ebdf38705..11745a2d8a44 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
@@ -825,6 +825,13 @@ static int fbnic_get_cls_rule(struct fbnic_net *fbn, struct ethtool_rxnfc *cmd)
return 0;
}
+static u32 fbnic_get_rx_ring_count(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ return fbn->num_rx_queues;
+}
+
static int fbnic_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *cmd, u32 *rule_locs)
{
@@ -833,10 +840,6 @@ static int fbnic_get_rxnfc(struct net_device *netdev,
u32 special = 0;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = fbn->num_rx_queues;
- ret = 0;
- break;
case ETHTOOL_GRXCLSRULE:
ret = fbnic_get_cls_rule(fbn, cmd);
break;
@@ -1671,7 +1674,7 @@ fbnic_get_module_eeprom_by_page(struct net_device *netdev,
goto exit_free;
}
- if (!wait_for_completion_timeout(&fw_cmpl->done, 2 * HZ)) {
+ if (!fbnic_mbx_wait_for_cmpl(fw_cmpl)) {
err = -ETIMEDOUT;
NL_SET_ERR_MSG_MOD(extack,
"Timed out waiting for firmware response");
@@ -1895,6 +1898,7 @@ static const struct ethtool_ops fbnic_ethtool_ops = {
.get_sset_count = fbnic_get_sset_count,
.get_rxnfc = fbnic_get_rxnfc,
.set_rxnfc = fbnic_set_rxnfc,
+ .get_rx_ring_count = fbnic_get_rx_ring_count,
.get_rxfh_key_size = fbnic_get_rxfh_key_size,
.get_rxfh_indir_size = fbnic_get_rxfh_indir_size,
.get_rxfh = fbnic_get_rxfh,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
index d8d9b6cfde82..1f0b6350bef4 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
@@ -40,7 +40,7 @@ static void __fbnic_mbx_invalidate_desc(struct fbnic_dev *fbd, int mbx_idx,
fw_wr32(fbd, desc_offset + 1, 0);
}
-static u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx)
+u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx)
{
u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
u64 desc;
@@ -205,8 +205,7 @@ static int fbnic_mbx_alloc_rx_msgs(struct fbnic_dev *fbd)
while (!err && count--) {
struct fbnic_tlv_msg *msg;
- msg = (struct fbnic_tlv_msg *)__get_free_page(GFP_ATOMIC |
- __GFP_NOWARN);
+ msg = (struct fbnic_tlv_msg *)__get_free_page(GFP_KERNEL);
if (!msg) {
err = -ENOMEM;
break;
@@ -416,7 +415,7 @@ static int fbnic_fw_xmit_simple_msg(struct fbnic_dev *fbd, u32 msg_type)
return err;
}
-static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
+static int fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
{
struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
@@ -429,14 +428,15 @@ static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME);
/* Make sure we have a page for the FW to write to */
- fbnic_mbx_alloc_rx_msgs(fbd);
- break;
+ return fbnic_mbx_alloc_rx_msgs(fbd);
case FBNIC_IPC_MBX_TX_IDX:
/* Enable DMA reads from the device */
wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME);
break;
}
+
+ return 0;
}
static bool fbnic_mbx_event(struct fbnic_dev *fbd)
@@ -1592,7 +1592,7 @@ static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = {
static void fbnic_mbx_process_rx_msgs(struct fbnic_dev *fbd)
{
struct fbnic_fw_mbx *rx_mbx = &fbd->mbx[FBNIC_IPC_MBX_RX_IDX];
- u8 head = rx_mbx->head;
+ u8 head = rx_mbx->head, tail = rx_mbx->tail;
u64 desc, length;
while (head != rx_mbx->tail) {
@@ -1603,8 +1603,8 @@ static void fbnic_mbx_process_rx_msgs(struct fbnic_dev *fbd)
if (!(desc & FBNIC_IPC_MBX_DESC_FW_CMPL))
break;
- dma_unmap_single(fbd->dev, rx_mbx->buf_info[head].addr,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(fbd->dev, rx_mbx->buf_info[head].addr,
+ FBNIC_RX_PAGE_SIZE, DMA_FROM_DEVICE);
msg = rx_mbx->buf_info[head].msg;
@@ -1637,19 +1637,26 @@ static void fbnic_mbx_process_rx_msgs(struct fbnic_dev *fbd)
dev_dbg(fbd->dev, "Parsed msg type %d\n", msg->hdr.type);
next_page:
+ fw_wr32(fbd, FBNIC_IPC_MBX(FBNIC_IPC_MBX_RX_IDX, head), 0);
- free_page((unsigned long)rx_mbx->buf_info[head].msg);
+ rx_mbx->buf_info[tail] = rx_mbx->buf_info[head];
rx_mbx->buf_info[head].msg = NULL;
+ rx_mbx->buf_info[head].addr = 0;
- head++;
- head %= FBNIC_IPC_MBX_DESC_LEN;
+ __fbnic_mbx_wr_desc(fbd, FBNIC_IPC_MBX_RX_IDX, tail,
+ FIELD_PREP(FBNIC_IPC_MBX_DESC_LEN_MASK,
+ FBNIC_RX_PAGE_SIZE) |
+ (rx_mbx->buf_info[tail].addr &
+ FBNIC_IPC_MBX_DESC_ADDR_MASK) |
+ FBNIC_IPC_MBX_DESC_HOST_CMPL);
+
+ head = (head + 1) & (FBNIC_IPC_MBX_DESC_LEN - 1);
+ tail = (tail + 1) & (FBNIC_IPC_MBX_DESC_LEN - 1);
}
/* Record head for next interrupt */
rx_mbx->head = head;
-
- /* Make sure we have at least one page for the FW to write to */
- fbnic_mbx_alloc_rx_msgs(fbd);
+ rx_mbx->tail = tail;
}
void fbnic_mbx_poll(struct fbnic_dev *fbd)
@@ -1684,8 +1691,11 @@ int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd)
} while (!fbnic_mbx_event(fbd));
/* FW has shown signs of life. Enable DMA and start Tx/Rx */
- for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
- fbnic_mbx_init_desc_ring(fbd, i);
+ for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++) {
+ err = fbnic_mbx_init_desc_ring(fbd, i);
+ if (err)
+ goto clean_mbx;
+ }
/* Request an update from the firmware. This should overwrite
* mgmt.version once we get the actual version from the firmware
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
index 1ecd777aaada..8f7218900562 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
@@ -4,6 +4,7 @@
#ifndef _FBNIC_FW_H_
#define _FBNIC_FW_H_
+#include <linux/completion.h>
#include <linux/if_ether.h>
#include <linux/types.h>
@@ -36,6 +37,7 @@ struct fbnic_fw_mbx {
* + INDEX_SZ))
*/
#define FBNIC_FW_MAX_LOG_HISTORY 14
+#define FBNIC_MBX_RX_TO_SEC 10
struct fbnic_fw_ver {
u32 version;
@@ -92,6 +94,7 @@ struct fbnic_fw_completion {
} u;
};
+u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx);
void fbnic_mbx_init(struct fbnic_dev *fbd);
void fbnic_mbx_clean(struct fbnic_dev *fbd);
int fbnic_mbx_set_cmpl(struct fbnic_dev *fbd,
@@ -129,6 +132,13 @@ struct fbnic_fw_completion *__fbnic_fw_alloc_cmpl(u32 msg_type,
struct fbnic_fw_completion *fbnic_fw_alloc_cmpl(u32 msg_type);
void fbnic_fw_put_cmpl(struct fbnic_fw_completion *cmpl_data);
+static inline unsigned long
+fbnic_mbx_wait_for_cmpl(struct fbnic_fw_completion *cmpl)
+{
+ return wait_for_completion_timeout(&cmpl->done,
+ FBNIC_MBX_RX_TO_SEC * HZ);
+}
+
#define fbnic_mk_full_fw_ver_str(_rev_id, _delim, _commit, _str, _str_sz) \
do { \
const u32 __rev_id = _rev_id; \
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
index fc7abea4ef5b..9d0e4b2cc9ac 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
@@ -835,7 +835,7 @@ static int fbnic_mac_get_sensor_asic(struct fbnic_dev *fbd, int id,
long *val)
{
struct fbnic_fw_completion *fw_cmpl;
- int err = 0, retries = 5;
+ int err = 0;
s32 *sensor;
fw_cmpl = fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_TSENE_READ_RESP);
@@ -862,24 +862,10 @@ static int fbnic_mac_get_sensor_asic(struct fbnic_dev *fbd, int id,
goto exit_free;
}
- /* Allow 2 seconds for reply, resend and try up to 5 times */
- while (!wait_for_completion_timeout(&fw_cmpl->done, 2 * HZ)) {
- retries--;
-
- if (retries == 0) {
- dev_err(fbd->dev,
- "Timed out waiting for TSENE read\n");
- err = -ETIMEDOUT;
- goto exit_cleanup;
- }
-
- err = fbnic_fw_xmit_tsene_read_msg(fbd, NULL);
- if (err) {
- dev_err(fbd->dev,
- "Failed to transmit TSENE read msg, err %d\n",
- err);
- goto exit_cleanup;
- }
+ if (!wait_for_completion_timeout(&fw_cmpl->done, 10 * HZ)) {
+ dev_err(fbd->dev, "Timed out waiting for TSENE read\n");
+ err = -ETIMEDOUT;
+ goto exit_cleanup;
}
/* Handle error returned by firmware */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
index 9240673c7533..6f9389748a7d 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
@@ -142,10 +142,14 @@ void fbnic_up(struct fbnic_net *fbn)
netif_tx_start_all_queues(fbn->netdev);
fbnic_service_task_start(fbn);
+
+ fbnic_dbg_up(fbn);
}
void fbnic_down_noidle(struct fbnic_net *fbn)
{
+ fbnic_dbg_down(fbn);
+
fbnic_service_task_stop(fbn);
/* Disable Tx/Rx Processing */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
index 13d508ce637f..e29959241ff3 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
@@ -39,7 +39,7 @@ struct fbnic_xmit_cb {
#define FBNIC_XMIT_NOUNMAP ((void *)1)
-static u32 __iomem *fbnic_ring_csr_base(const struct fbnic_ring *ring)
+u32 __iomem *fbnic_ring_csr_base(const struct fbnic_ring *ring)
{
unsigned long csr_base = (unsigned long)ring->doorbell;
@@ -2255,6 +2255,22 @@ fbnic_nv_disable(struct fbnic_net *fbn, struct fbnic_napi_vector *nv)
fbnic_wrfl(fbn->fbd);
}
+void fbnic_dbg_down(struct fbnic_net *fbn)
+{
+ int i;
+
+ for (i = 0; i < fbn->num_napi; i++)
+ fbnic_dbg_nv_exit(fbn->napi[i]);
+}
+
+void fbnic_dbg_up(struct fbnic_net *fbn)
+{
+ int i;
+
+ for (i = 0; i < fbn->num_napi; i++)
+ fbnic_dbg_nv_init(fbn->napi[i]);
+}
+
void fbnic_disable(struct fbnic_net *fbn)
{
struct fbnic_dev *fbd = fbn->fbd;
@@ -2809,7 +2825,9 @@ void fbnic_napi_depletion_check(struct net_device *netdev)
fbnic_wrfl(fbd);
}
-static int fbnic_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
+static int fbnic_queue_mem_alloc(struct net_device *dev,
+ struct netdev_queue_config *qcfg,
+ void *qmem, int idx)
{
struct fbnic_net *fbn = netdev_priv(dev);
const struct fbnic_q_triad *real;
@@ -2859,9 +2877,12 @@ static void __fbnic_nv_restart(struct fbnic_net *fbn,
for (i = 0; i < nv->txt_count; i++)
netif_wake_subqueue(fbn->netdev, nv->qt[i].sub0.q_idx);
+ fbnic_dbg_nv_init(nv);
}
-static int fbnic_queue_start(struct net_device *dev, void *qmem, int idx)
+static int fbnic_queue_start(struct net_device *dev,
+ struct netdev_queue_config *qcfg,
+ void *qmem, int idx)
{
struct fbnic_net *fbn = netdev_priv(dev);
struct fbnic_napi_vector *nv;
@@ -2891,6 +2912,7 @@ static int fbnic_queue_stop(struct net_device *dev, void *qmem, int idx)
real = container_of(fbn->rx[idx], struct fbnic_q_triad, cmpl);
nv = fbn->napi[idx % fbn->num_napi];
+ fbnic_dbg_nv_exit(nv);
napi_disable_locked(&nv->napi);
fbnic_nv_irq_disable(nv);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
index 27776e844e29..b9560103ab86 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
@@ -151,6 +151,7 @@ struct fbnic_napi_vector {
struct napi_struct napi;
struct device *dev; /* Device for DMA unmapping */
struct fbnic_dev *fbd;
+ struct dentry *dbg_nv;
u16 v_idx;
u8 txt_count;
@@ -187,9 +188,12 @@ void fbnic_napi_disable(struct fbnic_net *fbn);
void fbnic_config_drop_mode(struct fbnic_net *fbn, bool tx_pause);
void fbnic_enable(struct fbnic_net *fbn);
void fbnic_disable(struct fbnic_net *fbn);
+void fbnic_dbg_up(struct fbnic_net *fbn);
+void fbnic_dbg_down(struct fbnic_net *fbn);
void fbnic_flush(struct fbnic_net *fbn);
void fbnic_fill(struct fbnic_net *fbn);
+u32 __iomem *fbnic_ring_csr_base(const struct fbnic_ring *ring);
void fbnic_napi_depletion_check(struct net_device *netdev);
int fbnic_wait_all_queues_idle(struct fbnic_dev *fbd, bool may_fail);
@@ -198,4 +202,6 @@ static inline int fbnic_napi_idx(const struct fbnic_napi_vector *nv)
return nv->v_idx - FBNIC_NON_NAPI_VECTORS;
}
+void fbnic_dbg_nv_init(struct fbnic_napi_vector *nv);
+void fbnic_dbg_nv_exit(struct fbnic_napi_vector *nv);
#endif /* _FBNIC_TXRX_H_ */
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index 541c41a9077a..b7cf2ee9115f 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -242,7 +242,7 @@ static void ks8842_reset(struct ks8842_adapter *adapter)
msleep(10);
iowrite16(0, adapter->hw_addr + REG_GRR);
} else {
- /* The KS8842 goes haywire when doing softare reset
+ /* The KS8842 goes haywire when doing software reset
* a work around in the timberdale IP is implemented to
* do a hardware reset instead
ks8842_write16(adapter, 3, 1, REG_GRR);
@@ -312,7 +312,7 @@ static void ks8842_reset_hw(struct ks8842_adapter *adapter)
/* aggressive back off in half duplex */
ks8842_enable_bits(adapter, 32, 1 << 8, REG_SGCR1);
- /* enable no excessive collison drop */
+ /* enable no excessive collision drop */
ks8842_enable_bits(adapter, 32, 1 << 3, REG_SGCR2);
/* Enable port 1 force flow control / back pressure / transmit / recv */
diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
index bb5138806c3f..8048770958d6 100644
--- a/drivers/net/ethernet/micrel/ks8851_common.c
+++ b/drivers/net/ethernet/micrel/ks8851_common.c
@@ -480,7 +480,7 @@ static int ks8851_net_open(struct net_device *dev)
* ks8851_net_stop - close network device
* @dev: The device being closed.
*
- * Called to close down a network device which has been active. Cancell any
+ * Called to close down a network device which has been active. Cancel any
* work, shutdown the RX and TX process and then place the chip into a low
* power state whilst it is not being used.
*/
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
index c862b13b447a..a161ae45743a 100644
--- a/drivers/net/ethernet/micrel/ks8851_spi.c
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -39,7 +39,7 @@ static int msg_enable;
*
* The @lock ensures that the chip is protected when certain operations are
* in progress. When the read or write packet transfer is in progress, most
- * of the chip registers are not ccessible until the transfer is finished and
+ * of the chip registers are not accessible until the transfer is finished and
* the DMA has been de-asserted.
*/
struct ks8851_net_spi {
@@ -298,7 +298,7 @@ static unsigned int calc_txlen(unsigned int len)
/**
* ks8851_tx_work - process tx packet(s)
- * @work: The work strucutre what was scheduled.
+ * @work: The work structure what was scheduled.
*
* This is called when a number of packets have been scheduled for
* transmission and need to be sent to the device.
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index cdde19b8edc4..60223f03482d 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -1166,7 +1166,7 @@ struct ksz_port_info {
* @tx_cfg: Cached transmit control settings.
* @rx_cfg: Cached receive control settings.
* @intr_mask: Current interrupt mask.
- * @intr_set: Current interrup set.
+ * @intr_set: Current interrupt set.
* @intr_blocked: Interrupt blocked.
* @rx_desc_info: Receive descriptor information.
* @tx_desc_info: Transmit descriptor information.
@@ -2096,7 +2096,7 @@ static void sw_dis_prio_rate(struct ksz_hw *hw, int port)
}
/**
- * sw_init_prio_rate - initialize switch prioirty rate
+ * sw_init_prio_rate - initialize switch priority rate
* @hw: The hardware instance.
*
* This routine initializes the priority rate function of the switch.
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index 40002d9fe274..8a3c1ecc7866 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -931,16 +931,9 @@ static int lan743x_ethtool_get_rxfh_fields(struct net_device *netdev,
return 0;
}
-static int lan743x_ethtool_get_rxnfc(struct net_device *netdev,
- struct ethtool_rxnfc *rxnfc,
- u32 *rule_locs)
+static u32 lan743x_ethtool_get_rx_ring_count(struct net_device *netdev)
{
- switch (rxnfc->cmd) {
- case ETHTOOL_GRXRINGS:
- rxnfc->data = LAN743X_USED_RX_CHANNELS;
- return 0;
- }
- return -EOPNOTSUPP;
+ return LAN743X_USED_RX_CHANNELS;
}
static u32 lan743x_ethtool_get_rxfh_key_size(struct net_device *netdev)
@@ -1369,7 +1362,7 @@ const struct ethtool_ops lan743x_ethtool_ops = {
.get_priv_flags = lan743x_ethtool_get_priv_flags,
.set_priv_flags = lan743x_ethtool_set_priv_flags,
.get_sset_count = lan743x_ethtool_get_sset_count,
- .get_rxnfc = lan743x_ethtool_get_rxnfc,
+ .get_rx_ring_count = lan743x_ethtool_get_rx_ring_count,
.get_rxfh_key_size = lan743x_ethtool_get_rxfh_key_size,
.get_rxfh_indir_size = lan743x_ethtool_get_rxfh_indir_size,
.get_rxfh = lan743x_ethtool_get_rxfh,
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 1ad154f9db1a..9b5a72ada5c4 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -299,6 +299,39 @@ static int mana_get_gso_hs(struct sk_buff *skb)
return gso_hs;
}
+static void mana_per_port_queue_reset_work_handler(struct work_struct *work)
+{
+ struct mana_port_context *apc = container_of(work,
+ struct mana_port_context,
+ queue_reset_work);
+ struct net_device *ndev = apc->ndev;
+ int err;
+
+ rtnl_lock();
+
+ /* Pre-allocate buffers to prevent failure in mana_attach later */
+ err = mana_pre_alloc_rxbufs(apc, ndev->mtu, apc->num_queues);
+ if (err) {
+ netdev_err(ndev, "Insufficient memory for reset post tx stall detection\n");
+ goto out;
+ }
+
+ err = mana_detach(ndev, false);
+ if (err) {
+ netdev_err(ndev, "mana_detach failed: %d\n", err);
+ goto dealloc_pre_rxbufs;
+ }
+
+ err = mana_attach(ndev);
+ if (err)
+ netdev_err(ndev, "mana_attach failed: %d\n", err);
+
+dealloc_pre_rxbufs:
+ mana_pre_dealloc_rxbufs(apc);
+out:
+ rtnl_unlock();
+}
+
netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
@@ -322,9 +355,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (skb_cow_head(skb, MANA_HEADROOM))
goto tx_drop_count;
- if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
- goto tx_drop_count;
-
txq = &apc->tx_qp[txq_idx].txq;
gdma_sq = txq->gdma_sq;
cq = &apc->tx_qp[txq_idx].tx_cq;
@@ -839,6 +869,23 @@ out:
return err;
}
+static void mana_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+{
+ struct mana_port_context *apc = netdev_priv(netdev);
+ struct mana_context *ac = apc->ac;
+ struct gdma_context *gc = ac->gdma_dev->gdma_context;
+
+ /* Already in service, hence tx queue reset is not required.*/
+ if (gc->in_service)
+ return;
+
+ /* Note: If there are pending queue reset work for this port(apc),
+ * subsequent request queued up from here are ignored. This is because
+ * we are using the same work instance per port(apc).
+ */
+ queue_work(ac->per_port_queue_reset_wq, &apc->queue_reset_work);
+}
+
static int mana_shaper_set(struct net_shaper_binding *binding,
const struct net_shaper *shaper,
struct netlink_ext_ack *extack)
@@ -924,6 +971,7 @@ static const struct net_device_ops mana_devops = {
.ndo_bpf = mana_bpf,
.ndo_xdp_xmit = mana_xdp_xmit,
.ndo_change_mtu = mana_change_mtu,
+ .ndo_tx_timeout = mana_tx_timeout,
.net_shaper_ops = &mana_shaper_ops,
};
@@ -3287,6 +3335,8 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
ndev->min_mtu = ETH_MIN_MTU;
ndev->needed_headroom = MANA_HEADROOM;
ndev->dev_port = port_idx;
+ /* Recommended timeout based on HW FPGA re-config scenario. */
+ ndev->watchdog_timeo = 15 * HZ;
SET_NETDEV_DEV(ndev, gc->dev);
netif_set_tso_max_size(ndev, GSO_MAX_SIZE);
@@ -3303,6 +3353,10 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
if (err)
goto reset_apc;
+ /* Initialize the per port queue reset work.*/
+ INIT_WORK(&apc->queue_reset_work,
+ mana_per_port_queue_reset_work_handler);
+
netdev_lockdep_set_classes(ndev);
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
@@ -3492,6 +3546,7 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
{
struct gdma_context *gc = gd->gdma_context;
struct mana_context *ac = gd->driver_data;
+ struct mana_port_context *apc = NULL;
struct device *dev = gc->dev;
u8 bm_hostmode = 0;
u16 num_ports = 0;
@@ -3549,6 +3604,14 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
ac->num_ports = MAX_PORTS_IN_MANA_DEV;
+ ac->per_port_queue_reset_wq =
+ create_singlethread_workqueue("mana_per_port_queue_reset_wq");
+ if (!ac->per_port_queue_reset_wq) {
+ dev_err(dev, "Failed to allocate per port queue reset workqueue\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
if (!resuming) {
for (i = 0; i < ac->num_ports; i++) {
err = mana_probe_port(ac, i, &ac->ports[i]);
@@ -3565,6 +3628,8 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
} else {
for (i = 0; i < ac->num_ports; i++) {
rtnl_lock();
+ apc = netdev_priv(ac->ports[i]);
+ enable_work(&apc->queue_reset_work);
err = mana_attach(ac->ports[i]);
rtnl_unlock();
/* we log the port for which the attach failed and stop
@@ -3616,13 +3681,15 @@ void mana_remove(struct gdma_dev *gd, bool suspending)
for (i = 0; i < ac->num_ports; i++) {
ndev = ac->ports[i];
- apc = netdev_priv(ndev);
if (!ndev) {
if (i == 0)
dev_err(dev, "No net device to remove\n");
goto out;
}
+ apc = netdev_priv(ndev);
+ disable_work_sync(&apc->queue_reset_work);
+
/* All cleanup actions should stay after rtnl_lock(), otherwise
* other functions may access partially cleaned up data.
*/
@@ -3649,6 +3716,11 @@ void mana_remove(struct gdma_dev *gd, bool suspending)
mana_destroy_eq(ac);
out:
+ if (ac->per_port_queue_reset_wq) {
+ destroy_workqueue(ac->per_port_queue_reset_wq);
+ ac->per_port_queue_reset_wq = NULL;
+ }
+
mana_gd_deregister_device(gd);
if (suspending)
diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
index 0e2f4343ac67..f2d220b371b5 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
@@ -282,18 +282,11 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
}
}
-static int mana_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *cmd,
- u32 *rules)
+static u32 mana_get_rx_ring_count(struct net_device *ndev)
{
struct mana_port_context *apc = netdev_priv(ndev);
- switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = apc->num_queues;
- return 0;
- }
-
- return -EOPNOTSUPP;
+ return apc->num_queues;
}
static u32 mana_get_rxfh_key_size(struct net_device *ndev)
@@ -520,7 +513,7 @@ const struct ethtool_ops mana_ethtool_ops = {
.get_ethtool_stats = mana_get_ethtool_stats,
.get_sset_count = mana_get_sset_count,
.get_strings = mana_get_strings,
- .get_rxnfc = mana_get_rxnfc,
+ .get_rx_ring_count = mana_get_rx_ring_count,
.get_rxfh_key_size = mana_get_rxfh_key_size,
.get_rxfh_indir_size = mana_rss_indir_size,
.get_rxfh = mana_get_rxfh,
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 7be30a8df268..2f0cdbd4e2ac 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -688,6 +688,9 @@ static int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp)
/* probe for IPv6 TSO support */
mgp->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO;
+ cmd.data0 = 0,
+ cmd.data1 = 0,
+ cmd.data2 = 0,
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE,
&cmd, 0);
if (status == 0) {
@@ -806,6 +809,7 @@ static int myri10ge_update_mac_address(struct myri10ge_priv *mgp,
| (addr[2] << 8) | addr[3]);
cmd.data1 = ((addr[4] << 8) | (addr[5]));
+ cmd.data2 = 0;
status = myri10ge_send_cmd(mgp, MXGEFW_SET_MAC_ADDRESS, &cmd, 0);
return status;
@@ -817,6 +821,9 @@ static int myri10ge_change_pause(struct myri10ge_priv *mgp, int pause)
int status, ctl;
ctl = pause ? MXGEFW_ENABLE_FLOW_CONTROL : MXGEFW_DISABLE_FLOW_CONTROL;
+ cmd.data0 = 0,
+ cmd.data1 = 0,
+ cmd.data2 = 0,
status = myri10ge_send_cmd(mgp, ctl, &cmd, 0);
if (status) {
@@ -834,6 +841,9 @@ myri10ge_change_promisc(struct myri10ge_priv *mgp, int promisc, int atomic)
int status, ctl;
ctl = promisc ? MXGEFW_ENABLE_PROMISC : MXGEFW_DISABLE_PROMISC;
+ cmd.data0 = 0;
+ cmd.data1 = 0;
+ cmd.data2 = 0;
status = myri10ge_send_cmd(mgp, ctl, &cmd, atomic);
if (status)
netdev_err(mgp->dev, "Failed to set promisc mode\n");
@@ -1946,6 +1956,8 @@ static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss)
/* get ring sizes */
slice = ss - mgp->ss;
cmd.data0 = slice;
+ cmd.data1 = 0;
+ cmd.data2 = 0;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0);
tx_ring_size = cmd.data0;
cmd.data0 = slice;
@@ -2238,12 +2250,16 @@ static int myri10ge_get_txrx(struct myri10ge_priv *mgp, int slice)
status = 0;
if (slice == 0 || (mgp->dev->real_num_tx_queues > 1)) {
cmd.data0 = slice;
+ cmd.data1 = 0;
+ cmd.data2 = 0;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET,
&cmd, 0);
ss->tx.lanai = (struct mcp_kreq_ether_send __iomem *)
(mgp->sram + cmd.data0);
}
cmd.data0 = slice;
+ cmd.data1 = 0;
+ cmd.data2 = 0;
status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET,
&cmd, 0);
ss->rx_small.lanai = (struct mcp_kreq_ether_recv __iomem *)
@@ -2312,6 +2328,7 @@ static int myri10ge_open(struct net_device *dev)
if (mgp->num_slices > 1) {
cmd.data0 = mgp->num_slices;
cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE;
+ cmd.data2 = 0;
if (mgp->dev->real_num_tx_queues > 1)
cmd.data1 |= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES,
@@ -2414,6 +2431,8 @@ static int myri10ge_open(struct net_device *dev)
/* now give firmware buffers sizes, and MTU */
cmd.data0 = dev->mtu + ETH_HLEN + VLAN_HLEN;
+ cmd.data1 = 0;
+ cmd.data2 = 0;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_MTU, &cmd, 0);
cmd.data0 = mgp->small_bytes;
status |=
@@ -2472,7 +2491,6 @@ abort_with_nothing:
static int myri10ge_close(struct net_device *dev)
{
struct myri10ge_priv *mgp = netdev_priv(dev);
- struct myri10ge_cmd cmd;
int status, old_down_cnt;
int i;
@@ -2491,8 +2509,13 @@ static int myri10ge_close(struct net_device *dev)
netif_tx_stop_all_queues(dev);
if (mgp->rebooted == 0) {
+ struct myri10ge_cmd cmd;
+
old_down_cnt = mgp->down_cnt;
mb();
+ cmd.data0 = 0;
+ cmd.data1 = 0;
+ cmd.data2 = 0;
status =
myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_DOWN, &cmd, 0);
if (status)
@@ -2956,6 +2979,9 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
/* Disable multicast filtering */
+ cmd.data0 = 0;
+ cmd.data1 = 0;
+ cmd.data2 = 0;
err = myri10ge_send_cmd(mgp, MXGEFW_ENABLE_ALLMULTI, &cmd, 1);
if (err != 0) {
netdev_err(dev, "Failed MXGEFW_ENABLE_ALLMULTI, error status: %d\n",
diff --git a/drivers/net/ethernet/neterion/Kconfig b/drivers/net/ethernet/neterion/Kconfig
deleted file mode 100644
index 09a89e72f904..000000000000
--- a/drivers/net/ethernet/neterion/Kconfig
+++ /dev/null
@@ -1,35 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Exar device configuration
-#
-
-config NET_VENDOR_NETERION
- bool "Neterion (Exar) devices"
- default y
- depends on PCI
- help
- If you have a network (Ethernet) card belonging to this class, say Y.
-
- Note that the answer to this question doesn't directly affect the
- kernel: saying N will just cause the configurator to skip all
- the questions about Neterion/Exar cards. If you say Y, you will be
- asked for your specific card in the following questions.
-
-if NET_VENDOR_NETERION
-
-config S2IO
- tristate "Neterion (Exar) Xframe 10Gb Ethernet Adapter"
- depends on PCI
- help
- This driver supports Exar Corp's Xframe Series 10Gb Ethernet Adapters.
- These were originally released from S2IO, which renamed itself
- Neterion. So, the adapters might be labeled as either one, depending
- on its age.
-
- More specific information on configuring the driver is in
- <file:Documentation/networking/device_drivers/ethernet/neterion/s2io.rst>.
-
- To compile this driver as a module, choose M here. The module
- will be called s2io.
-
-endif # NET_VENDOR_NETERION
diff --git a/drivers/net/ethernet/neterion/Makefile b/drivers/net/ethernet/neterion/Makefile
deleted file mode 100644
index de98b4e6eff9..000000000000
--- a/drivers/net/ethernet/neterion/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for the Exar network device drivers.
-#
-
-obj-$(CONFIG_S2IO) += s2io.o
diff --git a/drivers/net/ethernet/neterion/s2io-regs.h b/drivers/net/ethernet/neterion/s2io-regs.h
deleted file mode 100644
index 3688325c11f5..000000000000
--- a/drivers/net/ethernet/neterion/s2io-regs.h
+++ /dev/null
@@ -1,958 +0,0 @@
-/************************************************************************
- * regs.h: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
- * Copyright(c) 2002-2010 Exar Corp.
-
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- ************************************************************************/
-#ifndef _REGS_H
-#define _REGS_H
-
-#define TBD 0
-
-struct XENA_dev_config {
-/* Convention: mHAL_XXX is mask, vHAL_XXX is value */
-
-/* General Control-Status Registers */
- u64 general_int_status;
-#define GEN_INTR_TXPIC s2BIT(0)
-#define GEN_INTR_TXDMA s2BIT(1)
-#define GEN_INTR_TXMAC s2BIT(2)
-#define GEN_INTR_TXXGXS s2BIT(3)
-#define GEN_INTR_TXTRAFFIC s2BIT(8)
-#define GEN_INTR_RXPIC s2BIT(32)
-#define GEN_INTR_RXDMA s2BIT(33)
-#define GEN_INTR_RXMAC s2BIT(34)
-#define GEN_INTR_MC s2BIT(35)
-#define GEN_INTR_RXXGXS s2BIT(36)
-#define GEN_INTR_RXTRAFFIC s2BIT(40)
-#define GEN_ERROR_INTR GEN_INTR_TXPIC | GEN_INTR_RXPIC | \
- GEN_INTR_TXDMA | GEN_INTR_RXDMA | \
- GEN_INTR_TXMAC | GEN_INTR_RXMAC | \
- GEN_INTR_TXXGXS| GEN_INTR_RXXGXS| \
- GEN_INTR_MC
-
- u64 general_int_mask;
-
- u8 unused0[0x100 - 0x10];
-
- u64 sw_reset;
-/* XGXS must be removed from reset only once. */
-#define SW_RESET_XENA vBIT(0xA5,0,8)
-#define SW_RESET_FLASH vBIT(0xA5,8,8)
-#define SW_RESET_EOI vBIT(0xA5,16,8)
-#define SW_RESET_ALL (SW_RESET_XENA | \
- SW_RESET_FLASH | \
- SW_RESET_EOI)
-/* The SW_RESET register must read this value after a successful reset. */
-#define SW_RESET_RAW_VAL 0xA5000000
-
-
- u64 adapter_status;
-#define ADAPTER_STATUS_TDMA_READY s2BIT(0)
-#define ADAPTER_STATUS_RDMA_READY s2BIT(1)
-#define ADAPTER_STATUS_PFC_READY s2BIT(2)
-#define ADAPTER_STATUS_TMAC_BUF_EMPTY s2BIT(3)
-#define ADAPTER_STATUS_PIC_QUIESCENT s2BIT(5)
-#define ADAPTER_STATUS_RMAC_REMOTE_FAULT s2BIT(6)
-#define ADAPTER_STATUS_RMAC_LOCAL_FAULT s2BIT(7)
-#define ADAPTER_STATUS_RMAC_PCC_IDLE vBIT(0xFF,8,8)
-#define ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE vBIT(0x0F,8,8)
-#define ADAPTER_STATUS_RC_PRC_QUIESCENT vBIT(0xFF,16,8)
-#define ADAPTER_STATUS_MC_DRAM_READY s2BIT(24)
-#define ADAPTER_STATUS_MC_QUEUES_READY s2BIT(25)
-#define ADAPTER_STATUS_RIC_RUNNING s2BIT(26)
-#define ADAPTER_STATUS_M_PLL_LOCK s2BIT(30)
-#define ADAPTER_STATUS_P_PLL_LOCK s2BIT(31)
-
- u64 adapter_control;
-#define ADAPTER_CNTL_EN s2BIT(7)
-#define ADAPTER_EOI_TX_ON s2BIT(15)
-#define ADAPTER_LED_ON s2BIT(23)
-#define ADAPTER_UDPI(val) vBIT(val,36,4)
-#define ADAPTER_WAIT_INT s2BIT(48)
-#define ADAPTER_ECC_EN s2BIT(55)
-
- u64 serr_source;
-#define SERR_SOURCE_PIC s2BIT(0)
-#define SERR_SOURCE_TXDMA s2BIT(1)
-#define SERR_SOURCE_RXDMA s2BIT(2)
-#define SERR_SOURCE_MAC s2BIT(3)
-#define SERR_SOURCE_MC s2BIT(4)
-#define SERR_SOURCE_XGXS s2BIT(5)
-#define SERR_SOURCE_ANY (SERR_SOURCE_PIC | \
- SERR_SOURCE_TXDMA | \
- SERR_SOURCE_RXDMA | \
- SERR_SOURCE_MAC | \
- SERR_SOURCE_MC | \
- SERR_SOURCE_XGXS)
-
- u64 pci_mode;
-#define GET_PCI_MODE(val) ((val & vBIT(0xF, 0, 4)) >> 60)
-#define PCI_MODE_PCI_33 0
-#define PCI_MODE_PCI_66 0x1
-#define PCI_MODE_PCIX_M1_66 0x2
-#define PCI_MODE_PCIX_M1_100 0x3
-#define PCI_MODE_PCIX_M1_133 0x4
-#define PCI_MODE_PCIX_M2_66 0x5
-#define PCI_MODE_PCIX_M2_100 0x6
-#define PCI_MODE_PCIX_M2_133 0x7
-#define PCI_MODE_UNSUPPORTED s2BIT(0)
-#define PCI_MODE_32_BITS s2BIT(8)
-#define PCI_MODE_UNKNOWN_MODE s2BIT(9)
-
- u8 unused_0[0x800 - 0x128];
-
-/* PCI-X Controller registers */
- u64 pic_int_status;
- u64 pic_int_mask;
-#define PIC_INT_TX s2BIT(0)
-#define PIC_INT_FLSH s2BIT(1)
-#define PIC_INT_MDIO s2BIT(2)
-#define PIC_INT_IIC s2BIT(3)
-#define PIC_INT_GPIO s2BIT(4)
-#define PIC_INT_RX s2BIT(32)
-
- u64 txpic_int_reg;
- u64 txpic_int_mask;
-#define PCIX_INT_REG_ECC_SG_ERR s2BIT(0)
-#define PCIX_INT_REG_ECC_DB_ERR s2BIT(1)
-#define PCIX_INT_REG_FLASHR_R_FSM_ERR s2BIT(8)
-#define PCIX_INT_REG_FLASHR_W_FSM_ERR s2BIT(9)
-#define PCIX_INT_REG_INI_TX_FSM_SERR s2BIT(10)
-#define PCIX_INT_REG_INI_TXO_FSM_ERR s2BIT(11)
-#define PCIX_INT_REG_TRT_FSM_SERR s2BIT(13)
-#define PCIX_INT_REG_SRT_FSM_SERR s2BIT(14)
-#define PCIX_INT_REG_PIFR_FSM_SERR s2BIT(15)
-#define PCIX_INT_REG_WRC_TX_SEND_FSM_SERR s2BIT(21)
-#define PCIX_INT_REG_RRC_TX_REQ_FSM_SERR s2BIT(23)
-#define PCIX_INT_REG_INI_RX_FSM_SERR s2BIT(48)
-#define PCIX_INT_REG_RA_RX_FSM_SERR s2BIT(50)
-/*
-#define PCIX_INT_REG_WRC_RX_SEND_FSM_SERR s2BIT(52)
-#define PCIX_INT_REG_RRC_RX_REQ_FSM_SERR s2BIT(54)
-#define PCIX_INT_REG_RRC_RX_SPLIT_FSM_SERR s2BIT(58)
-*/
- u64 txpic_alarms;
- u64 rxpic_int_reg;
- u64 rxpic_int_mask;
- u64 rxpic_alarms;
-
- u64 flsh_int_reg;
- u64 flsh_int_mask;
-#define PIC_FLSH_INT_REG_CYCLE_FSM_ERR s2BIT(63)
-#define PIC_FLSH_INT_REG_ERR s2BIT(62)
- u64 flash_alarms;
-
- u64 mdio_int_reg;
- u64 mdio_int_mask;
-#define MDIO_INT_REG_MDIO_BUS_ERR s2BIT(0)
-#define MDIO_INT_REG_DTX_BUS_ERR s2BIT(8)
-#define MDIO_INT_REG_LASI s2BIT(39)
- u64 mdio_alarms;
-
- u64 iic_int_reg;
- u64 iic_int_mask;
-#define IIC_INT_REG_BUS_FSM_ERR s2BIT(4)
-#define IIC_INT_REG_BIT_FSM_ERR s2BIT(5)
-#define IIC_INT_REG_CYCLE_FSM_ERR s2BIT(6)
-#define IIC_INT_REG_REQ_FSM_ERR s2BIT(7)
-#define IIC_INT_REG_ACK_ERR s2BIT(8)
- u64 iic_alarms;
-
- u8 unused4[0x08];
-
- u64 gpio_int_reg;
-#define GPIO_INT_REG_DP_ERR_INT s2BIT(0)
-#define GPIO_INT_REG_LINK_DOWN s2BIT(1)
-#define GPIO_INT_REG_LINK_UP s2BIT(2)
- u64 gpio_int_mask;
-#define GPIO_INT_MASK_LINK_DOWN s2BIT(1)
-#define GPIO_INT_MASK_LINK_UP s2BIT(2)
- u64 gpio_alarms;
-
- u8 unused5[0x38];
-
- u64 tx_traffic_int;
-#define TX_TRAFFIC_INT_n(n) s2BIT(n)
- u64 tx_traffic_mask;
-
- u64 rx_traffic_int;
-#define RX_TRAFFIC_INT_n(n) s2BIT(n)
- u64 rx_traffic_mask;
-
-/* PIC Control registers */
- u64 pic_control;
-#define PIC_CNTL_RX_ALARM_MAP_1 s2BIT(0)
-#define PIC_CNTL_SHARED_SPLITS(n) vBIT(n,11,5)
-
- u64 swapper_ctrl;
-#define SWAPPER_CTRL_PIF_R_FE s2BIT(0)
-#define SWAPPER_CTRL_PIF_R_SE s2BIT(1)
-#define SWAPPER_CTRL_PIF_W_FE s2BIT(8)
-#define SWAPPER_CTRL_PIF_W_SE s2BIT(9)
-#define SWAPPER_CTRL_TXP_FE s2BIT(16)
-#define SWAPPER_CTRL_TXP_SE s2BIT(17)
-#define SWAPPER_CTRL_TXD_R_FE s2BIT(18)
-#define SWAPPER_CTRL_TXD_R_SE s2BIT(19)
-#define SWAPPER_CTRL_TXD_W_FE s2BIT(20)
-#define SWAPPER_CTRL_TXD_W_SE s2BIT(21)
-#define SWAPPER_CTRL_TXF_R_FE s2BIT(22)
-#define SWAPPER_CTRL_TXF_R_SE s2BIT(23)
-#define SWAPPER_CTRL_RXD_R_FE s2BIT(32)
-#define SWAPPER_CTRL_RXD_R_SE s2BIT(33)
-#define SWAPPER_CTRL_RXD_W_FE s2BIT(34)
-#define SWAPPER_CTRL_RXD_W_SE s2BIT(35)
-#define SWAPPER_CTRL_RXF_W_FE s2BIT(36)
-#define SWAPPER_CTRL_RXF_W_SE s2BIT(37)
-#define SWAPPER_CTRL_XMSI_FE s2BIT(40)
-#define SWAPPER_CTRL_XMSI_SE s2BIT(41)
-#define SWAPPER_CTRL_STATS_FE s2BIT(48)
-#define SWAPPER_CTRL_STATS_SE s2BIT(49)
-
- u64 pif_rd_swapper_fb;
-#define IF_RD_SWAPPER_FB 0x0123456789ABCDEF
-
- u64 scheduled_int_ctrl;
-#define SCHED_INT_CTRL_TIMER_EN s2BIT(0)
-#define SCHED_INT_CTRL_ONE_SHOT s2BIT(1)
-#define SCHED_INT_CTRL_INT2MSI(val) vBIT(val,10,6)
-#define SCHED_INT_PERIOD TBD
-
- u64 txreqtimeout;
-#define TXREQTO_VAL(val) vBIT(val,0,32)
-#define TXREQTO_EN s2BIT(63)
-
- u64 statsreqtimeout;
-#define STATREQTO_VAL(n) TBD
-#define STATREQTO_EN s2BIT(63)
-
- u64 read_retry_delay;
- u64 read_retry_acceleration;
- u64 write_retry_delay;
- u64 write_retry_acceleration;
-
- u64 xmsi_control;
- u64 xmsi_access;
- u64 xmsi_address;
- u64 xmsi_data;
-
- u64 rx_mat;
-#define RX_MAT_SET(ring, msi) vBIT(msi, (8 * ring), 8)
-
- u8 unused6[0x8];
-
- u64 tx_mat0_n[0x8];
-#define TX_MAT_SET(fifo, msi) vBIT(msi, (8 * fifo), 8)
-
- u64 xmsi_mask_reg;
- u64 stat_byte_cnt;
-#define STAT_BC(n) vBIT(n,4,12)
-
- /* Automated statistics collection */
- u64 stat_cfg;
-#define STAT_CFG_STAT_EN s2BIT(0)
-#define STAT_CFG_ONE_SHOT_EN s2BIT(1)
-#define STAT_CFG_STAT_NS_EN s2BIT(8)
-#define STAT_CFG_STAT_RO s2BIT(9)
-#define STAT_TRSF_PER(n) TBD
-#define PER_SEC 0x208d5
-#define SET_UPDT_PERIOD(n) vBIT((PER_SEC*n),32,32)
-#define SET_UPDT_CLICKS(val) vBIT(val, 32, 32)
-
- u64 stat_addr;
-
- /* General Configuration */
- u64 mdio_control;
-#define MDIO_MMD_INDX_ADDR(val) vBIT(val, 0, 16)
-#define MDIO_MMD_DEV_ADDR(val) vBIT(val, 19, 5)
-#define MDIO_MMS_PRT_ADDR(val) vBIT(val, 27, 5)
-#define MDIO_CTRL_START_TRANS(val) vBIT(val, 56, 4)
-#define MDIO_OP(val) vBIT(val, 60, 2)
-#define MDIO_OP_ADDR_TRANS 0x0
-#define MDIO_OP_WRITE_TRANS 0x1
-#define MDIO_OP_READ_POST_INC_TRANS 0x2
-#define MDIO_OP_READ_TRANS 0x3
-#define MDIO_MDIO_DATA(val) vBIT(val, 32, 16)
-
- u64 dtx_control;
-
- u64 i2c_control;
-#define I2C_CONTROL_DEV_ID(id) vBIT(id,1,3)
-#define I2C_CONTROL_ADDR(addr) vBIT(addr,5,11)
-#define I2C_CONTROL_BYTE_CNT(cnt) vBIT(cnt,22,2)
-#define I2C_CONTROL_READ s2BIT(24)
-#define I2C_CONTROL_NACK s2BIT(25)
-#define I2C_CONTROL_CNTL_START vBIT(0xE,28,4)
-#define I2C_CONTROL_CNTL_END(val) (val & vBIT(0x1,28,4))
-#define I2C_CONTROL_GET_DATA(val) (u32)(val & 0xFFFFFFFF)
-#define I2C_CONTROL_SET_DATA(val) vBIT(val,32,32)
-
- u64 gpio_control;
-#define GPIO_CTRL_GPIO_0 s2BIT(8)
- u64 misc_control;
-#define FAULT_BEHAVIOUR s2BIT(0)
-#define EXT_REQ_EN s2BIT(1)
-#define MISC_LINK_STABILITY_PRD(val) vBIT(val,29,3)
-
- u8 unused7_1[0x230 - 0x208];
-
- u64 pic_control2;
- u64 ini_dperr_ctrl;
-
- u64 wreq_split_mask;
-#define WREQ_SPLIT_MASK_SET_MASK(val) vBIT(val, 52, 12)
-
- u8 unused7_2[0x800 - 0x248];
-
-/* TxDMA registers */
- u64 txdma_int_status;
- u64 txdma_int_mask;
-#define TXDMA_PFC_INT s2BIT(0)
-#define TXDMA_TDA_INT s2BIT(1)
-#define TXDMA_PCC_INT s2BIT(2)
-#define TXDMA_TTI_INT s2BIT(3)
-#define TXDMA_LSO_INT s2BIT(4)
-#define TXDMA_TPA_INT s2BIT(5)
-#define TXDMA_SM_INT s2BIT(6)
- u64 pfc_err_reg;
-#define PFC_ECC_SG_ERR s2BIT(7)
-#define PFC_ECC_DB_ERR s2BIT(15)
-#define PFC_SM_ERR_ALARM s2BIT(23)
-#define PFC_MISC_0_ERR s2BIT(31)
-#define PFC_MISC_1_ERR s2BIT(32)
-#define PFC_PCIX_ERR s2BIT(39)
- u64 pfc_err_mask;
- u64 pfc_err_alarm;
-
- u64 tda_err_reg;
-#define TDA_Fn_ECC_SG_ERR vBIT(0xff,0,8)
-#define TDA_Fn_ECC_DB_ERR vBIT(0xff,8,8)
-#define TDA_SM0_ERR_ALARM s2BIT(22)
-#define TDA_SM1_ERR_ALARM s2BIT(23)
-#define TDA_PCIX_ERR s2BIT(39)
- u64 tda_err_mask;
- u64 tda_err_alarm;
-
- u64 pcc_err_reg;
-#define PCC_FB_ECC_SG_ERR vBIT(0xFF,0,8)
-#define PCC_TXB_ECC_SG_ERR vBIT(0xFF,8,8)
-#define PCC_FB_ECC_DB_ERR vBIT(0xFF,16, 8)
-#define PCC_TXB_ECC_DB_ERR vBIT(0xff,24,8)
-#define PCC_SM_ERR_ALARM vBIT(0xff,32,8)
-#define PCC_WR_ERR_ALARM vBIT(0xff,40,8)
-#define PCC_N_SERR vBIT(0xff,48,8)
-#define PCC_6_COF_OV_ERR s2BIT(56)
-#define PCC_7_COF_OV_ERR s2BIT(57)
-#define PCC_6_LSO_OV_ERR s2BIT(58)
-#define PCC_7_LSO_OV_ERR s2BIT(59)
-#define PCC_ENABLE_FOUR vBIT(0x0F,0,8)
- u64 pcc_err_mask;
- u64 pcc_err_alarm;
-
- u64 tti_err_reg;
-#define TTI_ECC_SG_ERR s2BIT(7)
-#define TTI_ECC_DB_ERR s2BIT(15)
-#define TTI_SM_ERR_ALARM s2BIT(23)
- u64 tti_err_mask;
- u64 tti_err_alarm;
-
- u64 lso_err_reg;
-#define LSO6_SEND_OFLOW s2BIT(12)
-#define LSO7_SEND_OFLOW s2BIT(13)
-#define LSO6_ABORT s2BIT(14)
-#define LSO7_ABORT s2BIT(15)
-#define LSO6_SM_ERR_ALARM s2BIT(22)
-#define LSO7_SM_ERR_ALARM s2BIT(23)
- u64 lso_err_mask;
- u64 lso_err_alarm;
-
- u64 tpa_err_reg;
-#define TPA_TX_FRM_DROP s2BIT(7)
-#define TPA_SM_ERR_ALARM s2BIT(23)
-
- u64 tpa_err_mask;
- u64 tpa_err_alarm;
-
- u64 sm_err_reg;
-#define SM_SM_ERR_ALARM s2BIT(15)
- u64 sm_err_mask;
- u64 sm_err_alarm;
-
- u8 unused8[0x100 - 0xB8];
-
-/* TxDMA arbiter */
- u64 tx_dma_wrap_stat;
-
-/* Tx FIFO controller */
-#define X_MAX_FIFOS 8
-#define X_FIFO_MAX_LEN 0x1FFF /*8191 */
- u64 tx_fifo_partition_0;
-#define TX_FIFO_PARTITION_EN s2BIT(0)
-#define TX_FIFO_PARTITION_0_PRI(val) vBIT(val,5,3)
-#define TX_FIFO_PARTITION_0_LEN(val) vBIT(val,19,13)
-#define TX_FIFO_PARTITION_1_PRI(val) vBIT(val,37,3)
-#define TX_FIFO_PARTITION_1_LEN(val) vBIT(val,51,13 )
-
- u64 tx_fifo_partition_1;
-#define TX_FIFO_PARTITION_2_PRI(val) vBIT(val,5,3)
-#define TX_FIFO_PARTITION_2_LEN(val) vBIT(val,19,13)
-#define TX_FIFO_PARTITION_3_PRI(val) vBIT(val,37,3)
-#define TX_FIFO_PARTITION_3_LEN(val) vBIT(val,51,13)
-
- u64 tx_fifo_partition_2;
-#define TX_FIFO_PARTITION_4_PRI(val) vBIT(val,5,3)
-#define TX_FIFO_PARTITION_4_LEN(val) vBIT(val,19,13)
-#define TX_FIFO_PARTITION_5_PRI(val) vBIT(val,37,3)
-#define TX_FIFO_PARTITION_5_LEN(val) vBIT(val,51,13)
-
- u64 tx_fifo_partition_3;
-#define TX_FIFO_PARTITION_6_PRI(val) vBIT(val,5,3)
-#define TX_FIFO_PARTITION_6_LEN(val) vBIT(val,19,13)
-#define TX_FIFO_PARTITION_7_PRI(val) vBIT(val,37,3)
-#define TX_FIFO_PARTITION_7_LEN(val) vBIT(val,51,13)
-
-#define TX_FIFO_PARTITION_PRI_0 0 /* highest */
-#define TX_FIFO_PARTITION_PRI_1 1
-#define TX_FIFO_PARTITION_PRI_2 2
-#define TX_FIFO_PARTITION_PRI_3 3
-#define TX_FIFO_PARTITION_PRI_4 4
-#define TX_FIFO_PARTITION_PRI_5 5
-#define TX_FIFO_PARTITION_PRI_6 6
-#define TX_FIFO_PARTITION_PRI_7 7 /* lowest */
-
- u64 tx_w_round_robin_0;
- u64 tx_w_round_robin_1;
- u64 tx_w_round_robin_2;
- u64 tx_w_round_robin_3;
- u64 tx_w_round_robin_4;
-
- u64 tti_command_mem;
-#define TTI_CMD_MEM_WE s2BIT(7)
-#define TTI_CMD_MEM_STROBE_NEW_CMD s2BIT(15)
-#define TTI_CMD_MEM_STROBE_BEING_EXECUTED s2BIT(15)
-#define TTI_CMD_MEM_OFFSET(n) vBIT(n,26,6)
-
- u64 tti_data1_mem;
-#define TTI_DATA1_MEM_TX_TIMER_VAL(n) vBIT(n,6,26)
-#define TTI_DATA1_MEM_TX_TIMER_AC_CI(n) vBIT(n,38,2)
-#define TTI_DATA1_MEM_TX_TIMER_AC_EN s2BIT(38)
-#define TTI_DATA1_MEM_TX_TIMER_CI_EN s2BIT(39)
-#define TTI_DATA1_MEM_TX_URNG_A(n) vBIT(n,41,7)
-#define TTI_DATA1_MEM_TX_URNG_B(n) vBIT(n,49,7)
-#define TTI_DATA1_MEM_TX_URNG_C(n) vBIT(n,57,7)
-
- u64 tti_data2_mem;
-#define TTI_DATA2_MEM_TX_UFC_A(n) vBIT(n,0,16)
-#define TTI_DATA2_MEM_TX_UFC_B(n) vBIT(n,16,16)
-#define TTI_DATA2_MEM_TX_UFC_C(n) vBIT(n,32,16)
-#define TTI_DATA2_MEM_TX_UFC_D(n) vBIT(n,48,16)
-
-/* Tx Protocol assist */
- u64 tx_pa_cfg;
-#define TX_PA_CFG_IGNORE_FRM_ERR s2BIT(1)
-#define TX_PA_CFG_IGNORE_SNAP_OUI s2BIT(2)
-#define TX_PA_CFG_IGNORE_LLC_CTRL s2BIT(3)
-#define TX_PA_CFG_IGNORE_L2_ERR s2BIT(6)
-#define RX_PA_CFG_STRIP_VLAN_TAG s2BIT(15)
-
-/* Recent add, used only debug purposes. */
- u64 pcc_enable;
-
- u8 unused9[0x700 - 0x178];
-
- u64 txdma_debug_ctrl;
-
- u8 unused10[0x1800 - 0x1708];
-
-/* RxDMA Registers */
- u64 rxdma_int_status;
- u64 rxdma_int_mask;
-#define RXDMA_INT_RC_INT_M s2BIT(0)
-#define RXDMA_INT_RPA_INT_M s2BIT(1)
-#define RXDMA_INT_RDA_INT_M s2BIT(2)
-#define RXDMA_INT_RTI_INT_M s2BIT(3)
-
- u64 rda_err_reg;
-#define RDA_RXDn_ECC_SG_ERR vBIT(0xFF,0,8)
-#define RDA_RXDn_ECC_DB_ERR vBIT(0xFF,8,8)
-#define RDA_FRM_ECC_SG_ERR s2BIT(23)
-#define RDA_FRM_ECC_DB_N_AERR s2BIT(31)
-#define RDA_SM1_ERR_ALARM s2BIT(38)
-#define RDA_SM0_ERR_ALARM s2BIT(39)
-#define RDA_MISC_ERR s2BIT(47)
-#define RDA_PCIX_ERR s2BIT(55)
-#define RDA_RXD_ECC_DB_SERR s2BIT(63)
- u64 rda_err_mask;
- u64 rda_err_alarm;
-
- u64 rc_err_reg;
-#define RC_PRCn_ECC_SG_ERR vBIT(0xFF,0,8)
-#define RC_PRCn_ECC_DB_ERR vBIT(0xFF,8,8)
-#define RC_FTC_ECC_SG_ERR s2BIT(23)
-#define RC_FTC_ECC_DB_ERR s2BIT(31)
-#define RC_PRCn_SM_ERR_ALARM vBIT(0xFF,32,8)
-#define RC_FTC_SM_ERR_ALARM s2BIT(47)
-#define RC_RDA_FAIL_WR_Rn vBIT(0xFF,48,8)
- u64 rc_err_mask;
- u64 rc_err_alarm;
-
- u64 prc_pcix_err_reg;
-#define PRC_PCI_AB_RD_Rn vBIT(0xFF,0,8)
-#define PRC_PCI_DP_RD_Rn vBIT(0xFF,8,8)
-#define PRC_PCI_AB_WR_Rn vBIT(0xFF,16,8)
-#define PRC_PCI_DP_WR_Rn vBIT(0xFF,24,8)
-#define PRC_PCI_AB_F_WR_Rn vBIT(0xFF,32,8)
-#define PRC_PCI_DP_F_WR_Rn vBIT(0xFF,40,8)
- u64 prc_pcix_err_mask;
- u64 prc_pcix_err_alarm;
-
- u64 rpa_err_reg;
-#define RPA_ECC_SG_ERR s2BIT(7)
-#define RPA_ECC_DB_ERR s2BIT(15)
-#define RPA_FLUSH_REQUEST s2BIT(22)
-#define RPA_SM_ERR_ALARM s2BIT(23)
-#define RPA_CREDIT_ERR s2BIT(31)
- u64 rpa_err_mask;
- u64 rpa_err_alarm;
-
- u64 rti_err_reg;
-#define RTI_ECC_SG_ERR s2BIT(7)
-#define RTI_ECC_DB_ERR s2BIT(15)
-#define RTI_SM_ERR_ALARM s2BIT(23)
- u64 rti_err_mask;
- u64 rti_err_alarm;
-
- u8 unused11[0x100 - 0x88];
-
-/* DMA arbiter */
- u64 rx_queue_priority;
-#define RX_QUEUE_0_PRIORITY(val) vBIT(val,5,3)
-#define RX_QUEUE_1_PRIORITY(val) vBIT(val,13,3)
-#define RX_QUEUE_2_PRIORITY(val) vBIT(val,21,3)
-#define RX_QUEUE_3_PRIORITY(val) vBIT(val,29,3)
-#define RX_QUEUE_4_PRIORITY(val) vBIT(val,37,3)
-#define RX_QUEUE_5_PRIORITY(val) vBIT(val,45,3)
-#define RX_QUEUE_6_PRIORITY(val) vBIT(val,53,3)
-#define RX_QUEUE_7_PRIORITY(val) vBIT(val,61,3)
-
-#define RX_QUEUE_PRI_0 0 /* highest */
-#define RX_QUEUE_PRI_1 1
-#define RX_QUEUE_PRI_2 2
-#define RX_QUEUE_PRI_3 3
-#define RX_QUEUE_PRI_4 4
-#define RX_QUEUE_PRI_5 5
-#define RX_QUEUE_PRI_6 6
-#define RX_QUEUE_PRI_7 7 /* lowest */
-
- u64 rx_w_round_robin_0;
- u64 rx_w_round_robin_1;
- u64 rx_w_round_robin_2;
- u64 rx_w_round_robin_3;
- u64 rx_w_round_robin_4;
-
- /* Per-ring controller regs */
-#define RX_MAX_RINGS 8
-#if 0
-#define RX_MAX_RINGS_SZ 0xFFFF /* 65536 */
-#define RX_MIN_RINGS_SZ 0x3F /* 63 */
-#endif
- u64 prc_rxd0_n[RX_MAX_RINGS];
- u64 prc_ctrl_n[RX_MAX_RINGS];
-#define PRC_CTRL_RC_ENABLED s2BIT(7)
-#define PRC_CTRL_RING_MODE (s2BIT(14)|s2BIT(15))
-#define PRC_CTRL_RING_MODE_1 vBIT(0,14,2)
-#define PRC_CTRL_RING_MODE_3 vBIT(1,14,2)
-#define PRC_CTRL_RING_MODE_5 vBIT(2,14,2)
-#define PRC_CTRL_RING_MODE_x vBIT(3,14,2)
-#define PRC_CTRL_NO_SNOOP (s2BIT(22)|s2BIT(23))
-#define PRC_CTRL_NO_SNOOP_DESC s2BIT(22)
-#define PRC_CTRL_NO_SNOOP_BUFF s2BIT(23)
-#define PRC_CTRL_BIMODAL_INTERRUPT s2BIT(37)
-#define PRC_CTRL_GROUP_READS s2BIT(38)
-#define PRC_CTRL_RXD_BACKOFF_INTERVAL(val) vBIT(val,40,24)
-
- u64 prc_alarm_action;
-#define PRC_ALARM_ACTION_RR_R0_STOP s2BIT(3)
-#define PRC_ALARM_ACTION_RW_R0_STOP s2BIT(7)
-#define PRC_ALARM_ACTION_RR_R1_STOP s2BIT(11)
-#define PRC_ALARM_ACTION_RW_R1_STOP s2BIT(15)
-#define PRC_ALARM_ACTION_RR_R2_STOP s2BIT(19)
-#define PRC_ALARM_ACTION_RW_R2_STOP s2BIT(23)
-#define PRC_ALARM_ACTION_RR_R3_STOP s2BIT(27)
-#define PRC_ALARM_ACTION_RW_R3_STOP s2BIT(31)
-#define PRC_ALARM_ACTION_RR_R4_STOP s2BIT(35)
-#define PRC_ALARM_ACTION_RW_R4_STOP s2BIT(39)
-#define PRC_ALARM_ACTION_RR_R5_STOP s2BIT(43)
-#define PRC_ALARM_ACTION_RW_R5_STOP s2BIT(47)
-#define PRC_ALARM_ACTION_RR_R6_STOP s2BIT(51)
-#define PRC_ALARM_ACTION_RW_R6_STOP s2BIT(55)
-#define PRC_ALARM_ACTION_RR_R7_STOP s2BIT(59)
-#define PRC_ALARM_ACTION_RW_R7_STOP s2BIT(63)
-
-/* Receive traffic interrupts */
- u64 rti_command_mem;
-#define RTI_CMD_MEM_WE s2BIT(7)
-#define RTI_CMD_MEM_STROBE s2BIT(15)
-#define RTI_CMD_MEM_STROBE_NEW_CMD s2BIT(15)
-#define RTI_CMD_MEM_STROBE_CMD_BEING_EXECUTED s2BIT(15)
-#define RTI_CMD_MEM_OFFSET(n) vBIT(n,29,3)
-
- u64 rti_data1_mem;
-#define RTI_DATA1_MEM_RX_TIMER_VAL(n) vBIT(n,3,29)
-#define RTI_DATA1_MEM_RX_TIMER_AC_EN s2BIT(38)
-#define RTI_DATA1_MEM_RX_TIMER_CI_EN s2BIT(39)
-#define RTI_DATA1_MEM_RX_URNG_A(n) vBIT(n,41,7)
-#define RTI_DATA1_MEM_RX_URNG_B(n) vBIT(n,49,7)
-#define RTI_DATA1_MEM_RX_URNG_C(n) vBIT(n,57,7)
-
- u64 rti_data2_mem;
-#define RTI_DATA2_MEM_RX_UFC_A(n) vBIT(n,0,16)
-#define RTI_DATA2_MEM_RX_UFC_B(n) vBIT(n,16,16)
-#define RTI_DATA2_MEM_RX_UFC_C(n) vBIT(n,32,16)
-#define RTI_DATA2_MEM_RX_UFC_D(n) vBIT(n,48,16)
-
- u64 rx_pa_cfg;
-#define RX_PA_CFG_IGNORE_FRM_ERR s2BIT(1)
-#define RX_PA_CFG_IGNORE_SNAP_OUI s2BIT(2)
-#define RX_PA_CFG_IGNORE_LLC_CTRL s2BIT(3)
-#define RX_PA_CFG_IGNORE_L2_ERR s2BIT(6)
-
- u64 unused_11_1;
-
- u64 ring_bump_counter1;
- u64 ring_bump_counter2;
-
- u8 unused12[0x700 - 0x1F0];
-
- u64 rxdma_debug_ctrl;
-
- u8 unused13[0x2000 - 0x1f08];
-
-/* Media Access Controller Register */
- u64 mac_int_status;
- u64 mac_int_mask;
-#define MAC_INT_STATUS_TMAC_INT s2BIT(0)
-#define MAC_INT_STATUS_RMAC_INT s2BIT(1)
-
- u64 mac_tmac_err_reg;
-#define TMAC_ECC_SG_ERR s2BIT(7)
-#define TMAC_ECC_DB_ERR s2BIT(15)
-#define TMAC_TX_BUF_OVRN s2BIT(23)
-#define TMAC_TX_CRI_ERR s2BIT(31)
-#define TMAC_TX_SM_ERR s2BIT(39)
-#define TMAC_DESC_ECC_SG_ERR s2BIT(47)
-#define TMAC_DESC_ECC_DB_ERR s2BIT(55)
-
- u64 mac_tmac_err_mask;
- u64 mac_tmac_err_alarm;
-
- u64 mac_rmac_err_reg;
-#define RMAC_RX_BUFF_OVRN s2BIT(0)
-#define RMAC_FRM_RCVD_INT s2BIT(1)
-#define RMAC_UNUSED_INT s2BIT(2)
-#define RMAC_RTS_PNUM_ECC_SG_ERR s2BIT(5)
-#define RMAC_RTS_DS_ECC_SG_ERR s2BIT(6)
-#define RMAC_RD_BUF_ECC_SG_ERR s2BIT(7)
-#define RMAC_RTH_MAP_ECC_SG_ERR s2BIT(8)
-#define RMAC_RTH_SPDM_ECC_SG_ERR s2BIT(9)
-#define RMAC_RTS_VID_ECC_SG_ERR s2BIT(10)
-#define RMAC_DA_SHADOW_ECC_SG_ERR s2BIT(11)
-#define RMAC_RTS_PNUM_ECC_DB_ERR s2BIT(13)
-#define RMAC_RTS_DS_ECC_DB_ERR s2BIT(14)
-#define RMAC_RD_BUF_ECC_DB_ERR s2BIT(15)
-#define RMAC_RTH_MAP_ECC_DB_ERR s2BIT(16)
-#define RMAC_RTH_SPDM_ECC_DB_ERR s2BIT(17)
-#define RMAC_RTS_VID_ECC_DB_ERR s2BIT(18)
-#define RMAC_DA_SHADOW_ECC_DB_ERR s2BIT(19)
-#define RMAC_LINK_STATE_CHANGE_INT s2BIT(31)
-#define RMAC_RX_SM_ERR s2BIT(39)
-#define RMAC_SINGLE_ECC_ERR (s2BIT(5) | s2BIT(6) | s2BIT(7) |\
- s2BIT(8) | s2BIT(9) | s2BIT(10)|\
- s2BIT(11))
-#define RMAC_DOUBLE_ECC_ERR (s2BIT(13) | s2BIT(14) | s2BIT(15) |\
- s2BIT(16) | s2BIT(17) | s2BIT(18)|\
- s2BIT(19))
- u64 mac_rmac_err_mask;
- u64 mac_rmac_err_alarm;
-
- u8 unused14[0x100 - 0x40];
-
- u64 mac_cfg;
-#define MAC_CFG_TMAC_ENABLE s2BIT(0)
-#define MAC_CFG_RMAC_ENABLE s2BIT(1)
-#define MAC_CFG_LAN_NOT_WAN s2BIT(2)
-#define MAC_CFG_TMAC_LOOPBACK s2BIT(3)
-#define MAC_CFG_TMAC_APPEND_PAD s2BIT(4)
-#define MAC_CFG_RMAC_STRIP_FCS s2BIT(5)
-#define MAC_CFG_RMAC_STRIP_PAD s2BIT(6)
-#define MAC_CFG_RMAC_PROM_ENABLE s2BIT(7)
-#define MAC_RMAC_DISCARD_PFRM s2BIT(8)
-#define MAC_RMAC_BCAST_ENABLE s2BIT(9)
-#define MAC_RMAC_ALL_ADDR_ENABLE s2BIT(10)
-#define MAC_RMAC_INVLD_IPG_THR(val) vBIT(val,16,8)
-
- u64 tmac_avg_ipg;
-#define TMAC_AVG_IPG(val) vBIT(val,0,8)
-
- u64 rmac_max_pyld_len;
-#define RMAC_MAX_PYLD_LEN(val) vBIT(val,2,14)
-#define RMAC_MAX_PYLD_LEN_DEF vBIT(1500,2,14)
-#define RMAC_MAX_PYLD_LEN_JUMBO_DEF vBIT(9600,2,14)
-
- u64 rmac_err_cfg;
-#define RMAC_ERR_FCS s2BIT(0)
-#define RMAC_ERR_FCS_ACCEPT s2BIT(1)
-#define RMAC_ERR_TOO_LONG s2BIT(1)
-#define RMAC_ERR_TOO_LONG_ACCEPT s2BIT(1)
-#define RMAC_ERR_RUNT s2BIT(2)
-#define RMAC_ERR_RUNT_ACCEPT s2BIT(2)
-#define RMAC_ERR_LEN_MISMATCH s2BIT(3)
-#define RMAC_ERR_LEN_MISMATCH_ACCEPT s2BIT(3)
-
- u64 rmac_cfg_key;
-#define RMAC_CFG_KEY(val) vBIT(val,0,16)
-
-#define S2IO_MAC_ADDR_START_OFFSET 0
-
-#define S2IO_XENA_MAX_MC_ADDRESSES 64 /* multicast addresses */
-#define S2IO_HERC_MAX_MC_ADDRESSES 256
-
-#define S2IO_XENA_MAX_MAC_ADDRESSES 16
-#define S2IO_HERC_MAX_MAC_ADDRESSES 64
-
-#define S2IO_XENA_MC_ADDR_START_OFFSET 16
-#define S2IO_HERC_MC_ADDR_START_OFFSET 64
-
- u64 rmac_addr_cmd_mem;
-#define RMAC_ADDR_CMD_MEM_WE s2BIT(7)
-#define RMAC_ADDR_CMD_MEM_RD 0
-#define RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD s2BIT(15)
-#define RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING s2BIT(15)
-#define RMAC_ADDR_CMD_MEM_OFFSET(n) vBIT(n,26,6)
-
- u64 rmac_addr_data0_mem;
-#define RMAC_ADDR_DATA0_MEM_ADDR(n) vBIT(n,0,48)
-#define RMAC_ADDR_DATA0_MEM_USER s2BIT(48)
-
- u64 rmac_addr_data1_mem;
-#define RMAC_ADDR_DATA1_MEM_MASK(n) vBIT(n,0,48)
-
- u8 unused15[0x8];
-
-/*
- u64 rmac_addr_cfg;
-#define RMAC_ADDR_UCASTn_EN(n) mBIT(0)_n(n)
-#define RMAC_ADDR_MCASTn_EN(n) mBIT(0)_n(n)
-#define RMAC_ADDR_BCAST_EN vBIT(0)_48
-#define RMAC_ADDR_ALL_ADDR_EN vBIT(0)_49
-*/
- u64 tmac_ipg_cfg;
-
- u64 rmac_pause_cfg;
-#define RMAC_PAUSE_GEN s2BIT(0)
-#define RMAC_PAUSE_GEN_ENABLE s2BIT(0)
-#define RMAC_PAUSE_RX s2BIT(1)
-#define RMAC_PAUSE_RX_ENABLE s2BIT(1)
-#define RMAC_PAUSE_HG_PTIME_DEF vBIT(0xFFFF,16,16)
-#define RMAC_PAUSE_HG_PTIME(val) vBIT(val,16,16)
-
- u64 rmac_red_cfg;
-
- u64 rmac_red_rate_q0q3;
- u64 rmac_red_rate_q4q7;
-
- u64 mac_link_util;
-#define MAC_TX_LINK_UTIL vBIT(0xFE,1,7)
-#define MAC_TX_LINK_UTIL_DISABLE vBIT(0xF, 8,4)
-#define MAC_TX_LINK_UTIL_VAL( n ) vBIT(n,8,4)
-#define MAC_RX_LINK_UTIL vBIT(0xFE,33,7)
-#define MAC_RX_LINK_UTIL_DISABLE vBIT(0xF,40,4)
-#define MAC_RX_LINK_UTIL_VAL( n ) vBIT(n,40,4)
-
-#define MAC_LINK_UTIL_DISABLE MAC_TX_LINK_UTIL_DISABLE | \
- MAC_RX_LINK_UTIL_DISABLE
-
- u64 rmac_invalid_ipg;
-
-/* rx traffic steering */
-#define MAC_RTS_FRM_LEN_SET(len) vBIT(len,2,14)
- u64 rts_frm_len_n[8];
-
- u64 rts_qos_steering;
-
-#define MAX_DIX_MAP 4
- u64 rts_dix_map_n[MAX_DIX_MAP];
-#define RTS_DIX_MAP_ETYPE(val) vBIT(val,0,16)
-#define RTS_DIX_MAP_SCW(val) s2BIT(val,21)
-
- u64 rts_q_alternates;
- u64 rts_default_q;
-
- u64 rts_ctrl;
-#define RTS_CTRL_IGNORE_SNAP_OUI s2BIT(2)
-#define RTS_CTRL_IGNORE_LLC_CTRL s2BIT(3)
-
- u64 rts_pn_cam_ctrl;
-#define RTS_PN_CAM_CTRL_WE s2BIT(7)
-#define RTS_PN_CAM_CTRL_STROBE_NEW_CMD s2BIT(15)
-#define RTS_PN_CAM_CTRL_STROBE_BEING_EXECUTED s2BIT(15)
-#define RTS_PN_CAM_CTRL_OFFSET(n) vBIT(n,24,8)
- u64 rts_pn_cam_data;
-#define RTS_PN_CAM_DATA_TCP_SELECT s2BIT(7)
-#define RTS_PN_CAM_DATA_PORT(val) vBIT(val,8,16)
-#define RTS_PN_CAM_DATA_SCW(val) vBIT(val,24,8)
-
- u64 rts_ds_mem_ctrl;
-#define RTS_DS_MEM_CTRL_WE s2BIT(7)
-#define RTS_DS_MEM_CTRL_STROBE_NEW_CMD s2BIT(15)
-#define RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED s2BIT(15)
-#define RTS_DS_MEM_CTRL_OFFSET(n) vBIT(n,26,6)
- u64 rts_ds_mem_data;
-#define RTS_DS_MEM_DATA(n) vBIT(n,0,8)
-
- u8 unused16[0x700 - 0x220];
-
- u64 mac_debug_ctrl;
-#define MAC_DBG_ACTIVITY_VALUE 0x411040400000000ULL
-
- u8 unused17[0x2800 - 0x2708];
-
-/* memory controller registers */
- u64 mc_int_status;
-#define MC_INT_STATUS_MC_INT s2BIT(0)
- u64 mc_int_mask;
-#define MC_INT_MASK_MC_INT s2BIT(0)
-
- u64 mc_err_reg;
-#define MC_ERR_REG_ECC_DB_ERR_L s2BIT(14)
-#define MC_ERR_REG_ECC_DB_ERR_U s2BIT(15)
-#define MC_ERR_REG_MIRI_ECC_DB_ERR_0 s2BIT(18)
-#define MC_ERR_REG_MIRI_ECC_DB_ERR_1 s2BIT(20)
-#define MC_ERR_REG_MIRI_CRI_ERR_0 s2BIT(22)
-#define MC_ERR_REG_MIRI_CRI_ERR_1 s2BIT(23)
-#define MC_ERR_REG_SM_ERR s2BIT(31)
-#define MC_ERR_REG_ECC_ALL_SNG (s2BIT(2) | s2BIT(3) | s2BIT(4) | s2BIT(5) |\
- s2BIT(17) | s2BIT(19))
-#define MC_ERR_REG_ECC_ALL_DBL (s2BIT(10) | s2BIT(11) | s2BIT(12) |\
- s2BIT(13) | s2BIT(18) | s2BIT(20))
-#define PLL_LOCK_N s2BIT(39)
- u64 mc_err_mask;
- u64 mc_err_alarm;
-
- u8 unused18[0x100 - 0x28];
-
-/* MC configuration */
- u64 rx_queue_cfg;
-#define RX_QUEUE_CFG_Q0_SZ(n) vBIT(n,0,8)
-#define RX_QUEUE_CFG_Q1_SZ(n) vBIT(n,8,8)
-#define RX_QUEUE_CFG_Q2_SZ(n) vBIT(n,16,8)
-#define RX_QUEUE_CFG_Q3_SZ(n) vBIT(n,24,8)
-#define RX_QUEUE_CFG_Q4_SZ(n) vBIT(n,32,8)
-#define RX_QUEUE_CFG_Q5_SZ(n) vBIT(n,40,8)
-#define RX_QUEUE_CFG_Q6_SZ(n) vBIT(n,48,8)
-#define RX_QUEUE_CFG_Q7_SZ(n) vBIT(n,56,8)
-
- u64 mc_rldram_mrs;
-#define MC_RLDRAM_QUEUE_SIZE_ENABLE s2BIT(39)
-#define MC_RLDRAM_MRS_ENABLE s2BIT(47)
-
- u64 mc_rldram_interleave;
-
- u64 mc_pause_thresh_q0q3;
- u64 mc_pause_thresh_q4q7;
-
- u64 mc_red_thresh_q[8];
-
- u8 unused19[0x200 - 0x168];
- u64 mc_rldram_ref_per;
- u8 unused20[0x220 - 0x208];
- u64 mc_rldram_test_ctrl;
-#define MC_RLDRAM_TEST_MODE s2BIT(47)
-#define MC_RLDRAM_TEST_WRITE s2BIT(7)
-#define MC_RLDRAM_TEST_GO s2BIT(15)
-#define MC_RLDRAM_TEST_DONE s2BIT(23)
-#define MC_RLDRAM_TEST_PASS s2BIT(31)
-
- u8 unused21[0x240 - 0x228];
- u64 mc_rldram_test_add;
- u8 unused22[0x260 - 0x248];
- u64 mc_rldram_test_d0;
- u8 unused23[0x280 - 0x268];
- u64 mc_rldram_test_d1;
- u8 unused24[0x300 - 0x288];
- u64 mc_rldram_test_d2;
-
- u8 unused24_1[0x360 - 0x308];
- u64 mc_rldram_ctrl;
-#define MC_RLDRAM_ENABLE_ODT s2BIT(7)
-
- u8 unused24_2[0x640 - 0x368];
- u64 mc_rldram_ref_per_herc;
-#define MC_RLDRAM_SET_REF_PERIOD(val) vBIT(val, 0, 16)
-
- u8 unused24_3[0x660 - 0x648];
- u64 mc_rldram_mrs_herc;
-
- u8 unused25[0x700 - 0x668];
- u64 mc_debug_ctrl;
-
- u8 unused26[0x3000 - 0x2f08];
-
-/* XGXG */
- /* XGXS control registers */
-
- u64 xgxs_int_status;
-#define XGXS_INT_STATUS_TXGXS s2BIT(0)
-#define XGXS_INT_STATUS_RXGXS s2BIT(1)
- u64 xgxs_int_mask;
-#define XGXS_INT_MASK_TXGXS s2BIT(0)
-#define XGXS_INT_MASK_RXGXS s2BIT(1)
-
- u64 xgxs_txgxs_err_reg;
-#define TXGXS_ECC_SG_ERR s2BIT(7)
-#define TXGXS_ECC_DB_ERR s2BIT(15)
-#define TXGXS_ESTORE_UFLOW s2BIT(31)
-#define TXGXS_TX_SM_ERR s2BIT(39)
-
- u64 xgxs_txgxs_err_mask;
- u64 xgxs_txgxs_err_alarm;
-
- u64 xgxs_rxgxs_err_reg;
-#define RXGXS_ESTORE_OFLOW s2BIT(7)
-#define RXGXS_RX_SM_ERR s2BIT(39)
- u64 xgxs_rxgxs_err_mask;
- u64 xgxs_rxgxs_err_alarm;
-
- u8 unused27[0x100 - 0x40];
-
- u64 xgxs_cfg;
- u64 xgxs_status;
-
- u64 xgxs_cfg_key;
- u64 xgxs_efifo_cfg; /* CHANGED */
- u64 rxgxs_ber_0; /* CHANGED */
- u64 rxgxs_ber_1; /* CHANGED */
-
- u64 spi_control;
-#define SPI_CONTROL_KEY(key) vBIT(key,0,4)
-#define SPI_CONTROL_BYTECNT(cnt) vBIT(cnt,29,3)
-#define SPI_CONTROL_CMD(cmd) vBIT(cmd,32,8)
-#define SPI_CONTROL_ADDR(addr) vBIT(addr,40,24)
-#define SPI_CONTROL_SEL1 s2BIT(4)
-#define SPI_CONTROL_REQ s2BIT(7)
-#define SPI_CONTROL_NACK s2BIT(5)
-#define SPI_CONTROL_DONE s2BIT(6)
- u64 spi_data;
-#define SPI_DATA_WRITE(data,len) vBIT(data,0,len)
-};
-
-#define XENA_REG_SPACE sizeof(struct XENA_dev_config)
-#define XENA_EEPROM_SPACE (0x01 << 11)
-
-#endif /* _REGS_H */
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
deleted file mode 100644
index 1e55ccb4822b..000000000000
--- a/drivers/net/ethernet/neterion/s2io.c
+++ /dev/null
@@ -1,8572 +0,0 @@
-/************************************************************************
- * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
- * Copyright(c) 2002-2010 Exar Corp.
- *
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * Credits:
- * Jeff Garzik : For pointing out the improper error condition
- * check in the s2io_xmit routine and also some
- * issues in the Tx watch dog function. Also for
- * patiently answering all those innumerable
- * questions regaring the 2.6 porting issues.
- * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
- * macros available only in 2.6 Kernel.
- * Francois Romieu : For pointing out all code part that were
- * deprecated and also styling related comments.
- * Grant Grundler : For helping me get rid of some Architecture
- * dependent code.
- * Christopher Hellwig : Some more 2.6 specific issues in the driver.
- *
- * The module loadable parameters that are supported by the driver and a brief
- * explanation of all the variables.
- *
- * rx_ring_num : This can be used to program the number of receive rings used
- * in the driver.
- * rx_ring_sz: This defines the number of receive blocks each ring can have.
- * This is also an array of size 8.
- * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
- * values are 1, 2.
- * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
- * tx_fifo_len: This too is an array of 8. Each element defines the number of
- * Tx descriptors that can be associated with each corresponding FIFO.
- * intr_type: This defines the type of interrupt. The values can be 0(INTA),
- * 2(MSI_X). Default value is '2(MSI_X)'
- * lro_max_pkts: This parameter defines maximum number of packets can be
- * aggregated as a single large packet
- * napi: This parameter used to enable/disable NAPI (polling Rx)
- * Possible values '1' for enable and '0' for disable. Default is '1'
- * vlan_tag_strip: This can be used to enable or disable vlan stripping.
- * Possible values '1' for enable , '0' for disable.
- * Default is '2' - which means disable in promisc mode
- * and enable in non-promiscuous mode.
- * multiq: This parameter used to enable/disable MULTIQUEUE support.
- * Possible values '1' for enable and '0' for disable. Default is '0'
- ************************************************************************/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/mdio.h>
-#include <linux/skbuff.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/stddef.h>
-#include <linux/ioctl.h>
-#include <linux/timex.h>
-#include <linux/ethtool.h>
-#include <linux/workqueue.h>
-#include <linux/if_vlan.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/uaccess.h>
-#include <linux/io.h>
-#include <linux/io-64-nonatomic-lo-hi.h>
-#include <linux/slab.h>
-#include <linux/prefetch.h>
-#include <net/tcp.h>
-#include <net/checksum.h>
-
-#include <asm/div64.h>
-#include <asm/irq.h>
-
-/* local include */
-#include "s2io.h"
-#include "s2io-regs.h"
-
-#define DRV_VERSION "2.0.26.28"
-
-/* S2io Driver name & version. */
-static const char s2io_driver_name[] = "Neterion";
-static const char s2io_driver_version[] = DRV_VERSION;
-
-static const int rxd_size[2] = {32, 48};
-static const int rxd_count[2] = {127, 85};
-
-static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
-{
- int ret;
-
- ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
- (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
-
- return ret;
-}
-
-/*
- * Cards with following subsystem_id have a link state indication
- * problem, 600B, 600C, 600D, 640B, 640C and 640D.
- * macro below identifies these cards given the subsystem_id.
- */
-#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
- (dev_type == XFRAME_I_DEVICE) ? \
- ((((subid >= 0x600B) && (subid <= 0x600D)) || \
- ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
-
-#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
- ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
-
-static inline int is_s2io_card_up(const struct s2io_nic *sp)
-{
- return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
-}
-
-/* Ethtool related variables and Macros. */
-static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
- "Register test\t(offline)",
- "Eeprom test\t(offline)",
- "Link test\t(online)",
- "RLDRAM test\t(offline)",
- "BIST Test\t(offline)"
-};
-
-static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
- {"tmac_frms"},
- {"tmac_data_octets"},
- {"tmac_drop_frms"},
- {"tmac_mcst_frms"},
- {"tmac_bcst_frms"},
- {"tmac_pause_ctrl_frms"},
- {"tmac_ttl_octets"},
- {"tmac_ucst_frms"},
- {"tmac_nucst_frms"},
- {"tmac_any_err_frms"},
- {"tmac_ttl_less_fb_octets"},
- {"tmac_vld_ip_octets"},
- {"tmac_vld_ip"},
- {"tmac_drop_ip"},
- {"tmac_icmp"},
- {"tmac_rst_tcp"},
- {"tmac_tcp"},
- {"tmac_udp"},
- {"rmac_vld_frms"},
- {"rmac_data_octets"},
- {"rmac_fcs_err_frms"},
- {"rmac_drop_frms"},
- {"rmac_vld_mcst_frms"},
- {"rmac_vld_bcst_frms"},
- {"rmac_in_rng_len_err_frms"},
- {"rmac_out_rng_len_err_frms"},
- {"rmac_long_frms"},
- {"rmac_pause_ctrl_frms"},
- {"rmac_unsup_ctrl_frms"},
- {"rmac_ttl_octets"},
- {"rmac_accepted_ucst_frms"},
- {"rmac_accepted_nucst_frms"},
- {"rmac_discarded_frms"},
- {"rmac_drop_events"},
- {"rmac_ttl_less_fb_octets"},
- {"rmac_ttl_frms"},
- {"rmac_usized_frms"},
- {"rmac_osized_frms"},
- {"rmac_frag_frms"},
- {"rmac_jabber_frms"},
- {"rmac_ttl_64_frms"},
- {"rmac_ttl_65_127_frms"},
- {"rmac_ttl_128_255_frms"},
- {"rmac_ttl_256_511_frms"},
- {"rmac_ttl_512_1023_frms"},
- {"rmac_ttl_1024_1518_frms"},
- {"rmac_ip"},
- {"rmac_ip_octets"},
- {"rmac_hdr_err_ip"},
- {"rmac_drop_ip"},
- {"rmac_icmp"},
- {"rmac_tcp"},
- {"rmac_udp"},
- {"rmac_err_drp_udp"},
- {"rmac_xgmii_err_sym"},
- {"rmac_frms_q0"},
- {"rmac_frms_q1"},
- {"rmac_frms_q2"},
- {"rmac_frms_q3"},
- {"rmac_frms_q4"},
- {"rmac_frms_q5"},
- {"rmac_frms_q6"},
- {"rmac_frms_q7"},
- {"rmac_full_q0"},
- {"rmac_full_q1"},
- {"rmac_full_q2"},
- {"rmac_full_q3"},
- {"rmac_full_q4"},
- {"rmac_full_q5"},
- {"rmac_full_q6"},
- {"rmac_full_q7"},
- {"rmac_pause_cnt"},
- {"rmac_xgmii_data_err_cnt"},
- {"rmac_xgmii_ctrl_err_cnt"},
- {"rmac_accepted_ip"},
- {"rmac_err_tcp"},
- {"rd_req_cnt"},
- {"new_rd_req_cnt"},
- {"new_rd_req_rtry_cnt"},
- {"rd_rtry_cnt"},
- {"wr_rtry_rd_ack_cnt"},
- {"wr_req_cnt"},
- {"new_wr_req_cnt"},
- {"new_wr_req_rtry_cnt"},
- {"wr_rtry_cnt"},
- {"wr_disc_cnt"},
- {"rd_rtry_wr_ack_cnt"},
- {"txp_wr_cnt"},
- {"txd_rd_cnt"},
- {"txd_wr_cnt"},
- {"rxd_rd_cnt"},
- {"rxd_wr_cnt"},
- {"txf_rd_cnt"},
- {"rxf_wr_cnt"}
-};
-
-static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
- {"rmac_ttl_1519_4095_frms"},
- {"rmac_ttl_4096_8191_frms"},
- {"rmac_ttl_8192_max_frms"},
- {"rmac_ttl_gt_max_frms"},
- {"rmac_osized_alt_frms"},
- {"rmac_jabber_alt_frms"},
- {"rmac_gt_max_alt_frms"},
- {"rmac_vlan_frms"},
- {"rmac_len_discard"},
- {"rmac_fcs_discard"},
- {"rmac_pf_discard"},
- {"rmac_da_discard"},
- {"rmac_red_discard"},
- {"rmac_rts_discard"},
- {"rmac_ingm_full_discard"},
- {"link_fault_cnt"}
-};
-
-static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
- {"\n DRIVER STATISTICS"},
- {"single_bit_ecc_errs"},
- {"double_bit_ecc_errs"},
- {"parity_err_cnt"},
- {"serious_err_cnt"},
- {"soft_reset_cnt"},
- {"fifo_full_cnt"},
- {"ring_0_full_cnt"},
- {"ring_1_full_cnt"},
- {"ring_2_full_cnt"},
- {"ring_3_full_cnt"},
- {"ring_4_full_cnt"},
- {"ring_5_full_cnt"},
- {"ring_6_full_cnt"},
- {"ring_7_full_cnt"},
- {"alarm_transceiver_temp_high"},
- {"alarm_transceiver_temp_low"},
- {"alarm_laser_bias_current_high"},
- {"alarm_laser_bias_current_low"},
- {"alarm_laser_output_power_high"},
- {"alarm_laser_output_power_low"},
- {"warn_transceiver_temp_high"},
- {"warn_transceiver_temp_low"},
- {"warn_laser_bias_current_high"},
- {"warn_laser_bias_current_low"},
- {"warn_laser_output_power_high"},
- {"warn_laser_output_power_low"},
- {"lro_aggregated_pkts"},
- {"lro_flush_both_count"},
- {"lro_out_of_sequence_pkts"},
- {"lro_flush_due_to_max_pkts"},
- {"lro_avg_aggr_pkts"},
- {"mem_alloc_fail_cnt"},
- {"pci_map_fail_cnt"},
- {"watchdog_timer_cnt"},
- {"mem_allocated"},
- {"mem_freed"},
- {"link_up_cnt"},
- {"link_down_cnt"},
- {"link_up_time"},
- {"link_down_time"},
- {"tx_tcode_buf_abort_cnt"},
- {"tx_tcode_desc_abort_cnt"},
- {"tx_tcode_parity_err_cnt"},
- {"tx_tcode_link_loss_cnt"},
- {"tx_tcode_list_proc_err_cnt"},
- {"rx_tcode_parity_err_cnt"},
- {"rx_tcode_abort_cnt"},
- {"rx_tcode_parity_abort_cnt"},
- {"rx_tcode_rda_fail_cnt"},
- {"rx_tcode_unkn_prot_cnt"},
- {"rx_tcode_fcs_err_cnt"},
- {"rx_tcode_buf_size_err_cnt"},
- {"rx_tcode_rxd_corrupt_cnt"},
- {"rx_tcode_unkn_err_cnt"},
- {"tda_err_cnt"},
- {"pfc_err_cnt"},
- {"pcc_err_cnt"},
- {"tti_err_cnt"},
- {"tpa_err_cnt"},
- {"sm_err_cnt"},
- {"lso_err_cnt"},
- {"mac_tmac_err_cnt"},
- {"mac_rmac_err_cnt"},
- {"xgxs_txgxs_err_cnt"},
- {"xgxs_rxgxs_err_cnt"},
- {"rc_err_cnt"},
- {"prc_pcix_err_cnt"},
- {"rpa_err_cnt"},
- {"rda_err_cnt"},
- {"rti_err_cnt"},
- {"mc_err_cnt"}
-};
-
-#define S2IO_XENA_STAT_LEN ARRAY_SIZE(ethtool_xena_stats_keys)
-#define S2IO_ENHANCED_STAT_LEN ARRAY_SIZE(ethtool_enhanced_stats_keys)
-#define S2IO_DRIVER_STAT_LEN ARRAY_SIZE(ethtool_driver_stats_keys)
-
-#define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
-#define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
-
-#define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
-#define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
-
-#define S2IO_TEST_LEN ARRAY_SIZE(s2io_gstrings)
-#define S2IO_STRINGS_LEN (S2IO_TEST_LEN * ETH_GSTRING_LEN)
-
-/* copy mac addr to def_mac_addr array */
-static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
-{
- sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
- sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
- sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
- sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
- sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
- sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
-}
-
-/*
- * Constants to be programmed into the Xena's registers, to configure
- * the XAUI.
- */
-
-#define END_SIGN 0x0
-static const u64 herc_act_dtx_cfg[] = {
- /* Set address */
- 0x8000051536750000ULL, 0x80000515367500E0ULL,
- /* Write data */
- 0x8000051536750004ULL, 0x80000515367500E4ULL,
- /* Set address */
- 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
- /* Write data */
- 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
- /* Set address */
- 0x801205150D440000ULL, 0x801205150D4400E0ULL,
- /* Write data */
- 0x801205150D440004ULL, 0x801205150D4400E4ULL,
- /* Set address */
- 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
- /* Write data */
- 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
- /* Done */
- END_SIGN
-};
-
-static const u64 xena_dtx_cfg[] = {
- /* Set address */
- 0x8000051500000000ULL, 0x80000515000000E0ULL,
- /* Write data */
- 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
- /* Set address */
- 0x8001051500000000ULL, 0x80010515000000E0ULL,
- /* Write data */
- 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
- /* Set address */
- 0x8002051500000000ULL, 0x80020515000000E0ULL,
- /* Write data */
- 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
- END_SIGN
-};
-
-/*
- * Constants for Fixing the MacAddress problem seen mostly on
- * Alpha machines.
- */
-static const u64 fix_mac[] = {
- 0x0060000000000000ULL, 0x0060600000000000ULL,
- 0x0040600000000000ULL, 0x0000600000000000ULL,
- 0x0020600000000000ULL, 0x0060600000000000ULL,
- 0x0020600000000000ULL, 0x0060600000000000ULL,
- 0x0020600000000000ULL, 0x0060600000000000ULL,
- 0x0020600000000000ULL, 0x0060600000000000ULL,
- 0x0020600000000000ULL, 0x0060600000000000ULL,
- 0x0020600000000000ULL, 0x0060600000000000ULL,
- 0x0020600000000000ULL, 0x0060600000000000ULL,
- 0x0020600000000000ULL, 0x0060600000000000ULL,
- 0x0020600000000000ULL, 0x0060600000000000ULL,
- 0x0020600000000000ULL, 0x0060600000000000ULL,
- 0x0020600000000000ULL, 0x0000600000000000ULL,
- 0x0040600000000000ULL, 0x0060600000000000ULL,
- END_SIGN
-};
-
-MODULE_DESCRIPTION("Neterion 10GbE driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
-
-
-/* Module Loadable parameters. */
-S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
-S2IO_PARM_INT(rx_ring_num, 1);
-S2IO_PARM_INT(multiq, 0);
-S2IO_PARM_INT(rx_ring_mode, 1);
-S2IO_PARM_INT(use_continuous_tx_intrs, 1);
-S2IO_PARM_INT(rmac_pause_time, 0x100);
-S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
-S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
-S2IO_PARM_INT(shared_splits, 0);
-S2IO_PARM_INT(tmac_util_period, 5);
-S2IO_PARM_INT(rmac_util_period, 5);
-S2IO_PARM_INT(l3l4hdr_size, 128);
-/* 0 is no steering, 1 is Priority steering, 2 is Default steering */
-S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
-/* Frequency of Rx desc syncs expressed as power of 2 */
-S2IO_PARM_INT(rxsync_frequency, 3);
-/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
-S2IO_PARM_INT(intr_type, 2);
-/* Large receive offload feature */
-
-/* Max pkts to be aggregated by LRO at one time. If not specified,
- * aggregation happens until we hit max IP pkt size(64K)
- */
-S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
-S2IO_PARM_INT(indicate_max_pkts, 0);
-
-S2IO_PARM_INT(napi, 1);
-S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
-
-static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
-{DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
-static unsigned int rx_ring_sz[MAX_RX_RINGS] =
-{[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
-static unsigned int rts_frm_len[MAX_RX_RINGS] =
-{[0 ...(MAX_RX_RINGS - 1)] = 0 };
-
-module_param_array(tx_fifo_len, uint, NULL, 0);
-module_param_array(rx_ring_sz, uint, NULL, 0);
-module_param_array(rts_frm_len, uint, NULL, 0);
-
-/*
- * S2IO device table.
- * This table lists all the devices that this driver supports.
- */
-static const struct pci_device_id s2io_tbl[] = {
- {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
- PCI_ANY_ID, PCI_ANY_ID},
- {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
- PCI_ANY_ID, PCI_ANY_ID},
- {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
- PCI_ANY_ID, PCI_ANY_ID},
- {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
- PCI_ANY_ID, PCI_ANY_ID},
- {0,}
-};
-
-MODULE_DEVICE_TABLE(pci, s2io_tbl);
-
-static const struct pci_error_handlers s2io_err_handler = {
- .error_detected = s2io_io_error_detected,
- .slot_reset = s2io_io_slot_reset,
- .resume = s2io_io_resume,
-};
-
-static struct pci_driver s2io_driver = {
- .name = "S2IO",
- .id_table = s2io_tbl,
- .probe = s2io_init_nic,
- .remove = s2io_rem_nic,
- .err_handler = &s2io_err_handler,
-};
-
-/* A simplifier macro used both by init and free shared_mem Fns(). */
-#define TXD_MEM_PAGE_CNT(len, per_each) DIV_ROUND_UP(len, per_each)
-
-/* netqueue manipulation helper functions */
-static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
-{
- if (!sp->config.multiq) {
- int i;
-
- for (i = 0; i < sp->config.tx_fifo_num; i++)
- sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
- }
- netif_tx_stop_all_queues(sp->dev);
-}
-
-static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
-{
- if (!sp->config.multiq)
- sp->mac_control.fifos[fifo_no].queue_state =
- FIFO_QUEUE_STOP;
-
- netif_tx_stop_all_queues(sp->dev);
-}
-
-static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
-{
- if (!sp->config.multiq) {
- int i;
-
- for (i = 0; i < sp->config.tx_fifo_num; i++)
- sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
- }
- netif_tx_start_all_queues(sp->dev);
-}
-
-static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
-{
- if (!sp->config.multiq) {
- int i;
-
- for (i = 0; i < sp->config.tx_fifo_num; i++)
- sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
- }
- netif_tx_wake_all_queues(sp->dev);
-}
-
-static inline void s2io_wake_tx_queue(
- struct fifo_info *fifo, int cnt, u8 multiq)
-{
-
- if (multiq) {
- if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
- netif_wake_subqueue(fifo->dev, fifo->fifo_no);
- } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
- if (netif_queue_stopped(fifo->dev)) {
- fifo->queue_state = FIFO_QUEUE_START;
- netif_wake_queue(fifo->dev);
- }
- }
-}
-
-/**
- * init_shared_mem - Allocation and Initialization of Memory
- * @nic: Device private variable.
- * Description: The function allocates all the memory areas shared
- * between the NIC and the driver. This includes Tx descriptors,
- * Rx descriptors and the statistics block.
- */
-
-static int init_shared_mem(struct s2io_nic *nic)
-{
- u32 size;
- void *tmp_v_addr, *tmp_v_addr_next;
- dma_addr_t tmp_p_addr, tmp_p_addr_next;
- struct RxD_block *pre_rxd_blk = NULL;
- int i, j, blk_cnt;
- int lst_size, lst_per_page;
- struct net_device *dev = nic->dev;
- unsigned long tmp;
- struct buffAdd *ba;
- struct config_param *config = &nic->config;
- struct mac_info *mac_control = &nic->mac_control;
- unsigned long long mem_allocated = 0;
-
- /* Allocation and initialization of TXDLs in FIFOs */
- size = 0;
- for (i = 0; i < config->tx_fifo_num; i++) {
- struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
-
- size += tx_cfg->fifo_len;
- }
- if (size > MAX_AVAILABLE_TXDS) {
- DBG_PRINT(ERR_DBG,
- "Too many TxDs requested: %d, max supported: %d\n",
- size, MAX_AVAILABLE_TXDS);
- return -EINVAL;
- }
-
- size = 0;
- for (i = 0; i < config->tx_fifo_num; i++) {
- struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
-
- size = tx_cfg->fifo_len;
- /*
- * Legal values are from 2 to 8192
- */
- if (size < 2) {
- DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - "
- "Valid lengths are 2 through 8192\n",
- i, size);
- return -EINVAL;
- }
- }
-
- lst_size = (sizeof(struct TxD) * config->max_txds);
- lst_per_page = PAGE_SIZE / lst_size;
-
- for (i = 0; i < config->tx_fifo_num; i++) {
- struct fifo_info *fifo = &mac_control->fifos[i];
- struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
- int fifo_len = tx_cfg->fifo_len;
- int list_holder_size = fifo_len * sizeof(struct list_info_hold);
-
- fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
- if (!fifo->list_info) {
- DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
- return -ENOMEM;
- }
- mem_allocated += list_holder_size;
- }
- for (i = 0; i < config->tx_fifo_num; i++) {
- int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
- lst_per_page);
- struct fifo_info *fifo = &mac_control->fifos[i];
- struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
-
- fifo->tx_curr_put_info.offset = 0;
- fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
- fifo->tx_curr_get_info.offset = 0;
- fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
- fifo->fifo_no = i;
- fifo->nic = nic;
- fifo->max_txds = MAX_SKB_FRAGS + 2;
- fifo->dev = dev;
-
- for (j = 0; j < page_num; j++) {
- int k = 0;
- dma_addr_t tmp_p;
- void *tmp_v;
- tmp_v = dma_alloc_coherent(&nic->pdev->dev, PAGE_SIZE,
- &tmp_p, GFP_KERNEL);
- if (!tmp_v) {
- DBG_PRINT(INFO_DBG,
- "dma_alloc_coherent failed for TxDL\n");
- return -ENOMEM;
- }
- /* If we got a zero DMA address(can happen on
- * certain platforms like PPC), reallocate.
- * Store virtual address of page we don't want,
- * to be freed later.
- */
- if (!tmp_p) {
- mac_control->zerodma_virt_addr = tmp_v;
- DBG_PRINT(INIT_DBG,
- "%s: Zero DMA address for TxDL. "
- "Virtual address %p\n",
- dev->name, tmp_v);
- tmp_v = dma_alloc_coherent(&nic->pdev->dev,
- PAGE_SIZE, &tmp_p,
- GFP_KERNEL);
- if (!tmp_v) {
- DBG_PRINT(INFO_DBG,
- "dma_alloc_coherent failed for TxDL\n");
- return -ENOMEM;
- }
- mem_allocated += PAGE_SIZE;
- }
- while (k < lst_per_page) {
- int l = (j * lst_per_page) + k;
- if (l == tx_cfg->fifo_len)
- break;
- fifo->list_info[l].list_virt_addr =
- tmp_v + (k * lst_size);
- fifo->list_info[l].list_phy_addr =
- tmp_p + (k * lst_size);
- k++;
- }
- }
- }
-
- for (i = 0; i < config->tx_fifo_num; i++) {
- struct fifo_info *fifo = &mac_control->fifos[i];
- struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
-
- size = tx_cfg->fifo_len;
- fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
- if (!fifo->ufo_in_band_v)
- return -ENOMEM;
- mem_allocated += (size * sizeof(u64));
- }
-
- /* Allocation and initialization of RXDs in Rings */
- size = 0;
- for (i = 0; i < config->rx_ring_num; i++) {
- struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
- struct ring_info *ring = &mac_control->rings[i];
-
- if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
- DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a "
- "multiple of RxDs per Block\n",
- dev->name, i);
- return FAILURE;
- }
- size += rx_cfg->num_rxd;
- ring->block_count = rx_cfg->num_rxd /
- (rxd_count[nic->rxd_mode] + 1);
- ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
- }
- if (nic->rxd_mode == RXD_MODE_1)
- size = (size * (sizeof(struct RxD1)));
- else
- size = (size * (sizeof(struct RxD3)));
-
- for (i = 0; i < config->rx_ring_num; i++) {
- struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
- struct ring_info *ring = &mac_control->rings[i];
-
- ring->rx_curr_get_info.block_index = 0;
- ring->rx_curr_get_info.offset = 0;
- ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
- ring->rx_curr_put_info.block_index = 0;
- ring->rx_curr_put_info.offset = 0;
- ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
- ring->nic = nic;
- ring->ring_no = i;
-
- blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
- /* Allocating all the Rx blocks */
- for (j = 0; j < blk_cnt; j++) {
- struct rx_block_info *rx_blocks;
- int l;
-
- rx_blocks = &ring->rx_blocks[j];
- size = SIZE_OF_BLOCK; /* size is always page size */
- tmp_v_addr = dma_alloc_coherent(&nic->pdev->dev, size,
- &tmp_p_addr, GFP_KERNEL);
- if (tmp_v_addr == NULL) {
- /*
- * In case of failure, free_shared_mem()
- * is called, which should free any
- * memory that was alloced till the
- * failure happened.
- */
- rx_blocks->block_virt_addr = tmp_v_addr;
- return -ENOMEM;
- }
- mem_allocated += size;
-
- size = sizeof(struct rxd_info) *
- rxd_count[nic->rxd_mode];
- rx_blocks->block_virt_addr = tmp_v_addr;
- rx_blocks->block_dma_addr = tmp_p_addr;
- rx_blocks->rxds = kmalloc(size, GFP_KERNEL);
- if (!rx_blocks->rxds)
- return -ENOMEM;
- mem_allocated += size;
- for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
- rx_blocks->rxds[l].virt_addr =
- rx_blocks->block_virt_addr +
- (rxd_size[nic->rxd_mode] * l);
- rx_blocks->rxds[l].dma_addr =
- rx_blocks->block_dma_addr +
- (rxd_size[nic->rxd_mode] * l);
- }
- }
- /* Interlinking all Rx Blocks */
- for (j = 0; j < blk_cnt; j++) {
- int next = (j + 1) % blk_cnt;
- tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
- tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
- tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
- tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
-
- pre_rxd_blk = tmp_v_addr;
- pre_rxd_blk->reserved_2_pNext_RxD_block =
- (unsigned long)tmp_v_addr_next;
- pre_rxd_blk->pNext_RxD_Blk_physical =
- (u64)tmp_p_addr_next;
- }
- }
- if (nic->rxd_mode == RXD_MODE_3B) {
- /*
- * Allocation of Storages for buffer addresses in 2BUFF mode
- * and the buffers as well.
- */
- for (i = 0; i < config->rx_ring_num; i++) {
- struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
- struct ring_info *ring = &mac_control->rings[i];
-
- blk_cnt = rx_cfg->num_rxd /
- (rxd_count[nic->rxd_mode] + 1);
- size = sizeof(struct buffAdd *) * blk_cnt;
- ring->ba = kmalloc(size, GFP_KERNEL);
- if (!ring->ba)
- return -ENOMEM;
- mem_allocated += size;
- for (j = 0; j < blk_cnt; j++) {
- int k = 0;
-
- size = sizeof(struct buffAdd) *
- (rxd_count[nic->rxd_mode] + 1);
- ring->ba[j] = kmalloc(size, GFP_KERNEL);
- if (!ring->ba[j])
- return -ENOMEM;
- mem_allocated += size;
- while (k != rxd_count[nic->rxd_mode]) {
- ba = &ring->ba[j][k];
- size = BUF0_LEN + ALIGN_SIZE;
- ba->ba_0_org = kmalloc(size, GFP_KERNEL);
- if (!ba->ba_0_org)
- return -ENOMEM;
- mem_allocated += size;
- tmp = (unsigned long)ba->ba_0_org;
- tmp += ALIGN_SIZE;
- tmp &= ~((unsigned long)ALIGN_SIZE);
- ba->ba_0 = (void *)tmp;
-
- size = BUF1_LEN + ALIGN_SIZE;
- ba->ba_1_org = kmalloc(size, GFP_KERNEL);
- if (!ba->ba_1_org)
- return -ENOMEM;
- mem_allocated += size;
- tmp = (unsigned long)ba->ba_1_org;
- tmp += ALIGN_SIZE;
- tmp &= ~((unsigned long)ALIGN_SIZE);
- ba->ba_1 = (void *)tmp;
- k++;
- }
- }
- }
- }
-
- /* Allocation and initialization of Statistics block */
- size = sizeof(struct stat_block);
- mac_control->stats_mem =
- dma_alloc_coherent(&nic->pdev->dev, size,
- &mac_control->stats_mem_phy, GFP_KERNEL);
-
- if (!mac_control->stats_mem) {
- /*
- * In case of failure, free_shared_mem() is called, which
- * should free any memory that was alloced till the
- * failure happened.
- */
- return -ENOMEM;
- }
- mem_allocated += size;
- mac_control->stats_mem_sz = size;
-
- tmp_v_addr = mac_control->stats_mem;
- mac_control->stats_info = tmp_v_addr;
- memset(tmp_v_addr, 0, size);
- DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
- dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
- mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
- return SUCCESS;
-}
-
-/**
- * free_shared_mem - Free the allocated Memory
- * @nic: Device private variable.
- * Description: This function is to free all memory locations allocated by
- * the init_shared_mem() function and return it to the kernel.
- */
-
-static void free_shared_mem(struct s2io_nic *nic)
-{
- int i, j, blk_cnt, size;
- void *tmp_v_addr;
- dma_addr_t tmp_p_addr;
- int lst_size, lst_per_page;
- struct net_device *dev;
- int page_num = 0;
- struct config_param *config;
- struct mac_info *mac_control;
- struct stat_block *stats;
- struct swStat *swstats;
-
- if (!nic)
- return;
-
- dev = nic->dev;
-
- config = &nic->config;
- mac_control = &nic->mac_control;
- stats = mac_control->stats_info;
- swstats = &stats->sw_stat;
-
- lst_size = sizeof(struct TxD) * config->max_txds;
- lst_per_page = PAGE_SIZE / lst_size;
-
- for (i = 0; i < config->tx_fifo_num; i++) {
- struct fifo_info *fifo = &mac_control->fifos[i];
- struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
-
- page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
- for (j = 0; j < page_num; j++) {
- int mem_blks = (j * lst_per_page);
- struct list_info_hold *fli;
-
- if (!fifo->list_info)
- return;
-
- fli = &fifo->list_info[mem_blks];
- if (!fli->list_virt_addr)
- break;
- dma_free_coherent(&nic->pdev->dev, PAGE_SIZE,
- fli->list_virt_addr,
- fli->list_phy_addr);
- swstats->mem_freed += PAGE_SIZE;
- }
- /* If we got a zero DMA address during allocation,
- * free the page now
- */
- if (mac_control->zerodma_virt_addr) {
- dma_free_coherent(&nic->pdev->dev, PAGE_SIZE,
- mac_control->zerodma_virt_addr,
- (dma_addr_t)0);
- DBG_PRINT(INIT_DBG,
- "%s: Freeing TxDL with zero DMA address. "
- "Virtual address %p\n",
- dev->name, mac_control->zerodma_virt_addr);
- swstats->mem_freed += PAGE_SIZE;
- }
- kfree(fifo->list_info);
- swstats->mem_freed += tx_cfg->fifo_len *
- sizeof(struct list_info_hold);
- }
-
- size = SIZE_OF_BLOCK;
- for (i = 0; i < config->rx_ring_num; i++) {
- struct ring_info *ring = &mac_control->rings[i];
-
- blk_cnt = ring->block_count;
- for (j = 0; j < blk_cnt; j++) {
- tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
- tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
- if (tmp_v_addr == NULL)
- break;
- dma_free_coherent(&nic->pdev->dev, size, tmp_v_addr,
- tmp_p_addr);
- swstats->mem_freed += size;
- kfree(ring->rx_blocks[j].rxds);
- swstats->mem_freed += sizeof(struct rxd_info) *
- rxd_count[nic->rxd_mode];
- }
- }
-
- if (nic->rxd_mode == RXD_MODE_3B) {
- /* Freeing buffer storage addresses in 2BUFF mode. */
- for (i = 0; i < config->rx_ring_num; i++) {
- struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
- struct ring_info *ring = &mac_control->rings[i];
-
- blk_cnt = rx_cfg->num_rxd /
- (rxd_count[nic->rxd_mode] + 1);
- for (j = 0; j < blk_cnt; j++) {
- int k = 0;
- if (!ring->ba[j])
- continue;
- while (k != rxd_count[nic->rxd_mode]) {
- struct buffAdd *ba = &ring->ba[j][k];
- kfree(ba->ba_0_org);
- swstats->mem_freed +=
- BUF0_LEN + ALIGN_SIZE;
- kfree(ba->ba_1_org);
- swstats->mem_freed +=
- BUF1_LEN + ALIGN_SIZE;
- k++;
- }
- kfree(ring->ba[j]);
- swstats->mem_freed += sizeof(struct buffAdd) *
- (rxd_count[nic->rxd_mode] + 1);
- }
- kfree(ring->ba);
- swstats->mem_freed += sizeof(struct buffAdd *) *
- blk_cnt;
- }
- }
-
- for (i = 0; i < nic->config.tx_fifo_num; i++) {
- struct fifo_info *fifo = &mac_control->fifos[i];
- struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
-
- if (fifo->ufo_in_band_v) {
- swstats->mem_freed += tx_cfg->fifo_len *
- sizeof(u64);
- kfree(fifo->ufo_in_band_v);
- }
- }
-
- if (mac_control->stats_mem) {
- swstats->mem_freed += mac_control->stats_mem_sz;
- dma_free_coherent(&nic->pdev->dev, mac_control->stats_mem_sz,
- mac_control->stats_mem,
- mac_control->stats_mem_phy);
- }
-}
-
-/*
- * s2io_verify_pci_mode -
- */
-
-static int s2io_verify_pci_mode(struct s2io_nic *nic)
-{
- struct XENA_dev_config __iomem *bar0 = nic->bar0;
- register u64 val64 = 0;
- int mode;
-
- val64 = readq(&bar0->pci_mode);
- mode = (u8)GET_PCI_MODE(val64);
-
- if (val64 & PCI_MODE_UNKNOWN_MODE)
- return -1; /* Unknown PCI mode */
- return mode;
-}
-
-#define NEC_VENID 0x1033
-#define NEC_DEVID 0x0125
-static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
-{
- struct pci_dev *tdev = NULL;
- for_each_pci_dev(tdev) {
- if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
- if (tdev->bus == s2io_pdev->bus->parent) {
- pci_dev_put(tdev);
- return 1;
- }
- }
- }
- return 0;
-}
-
-static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
-/*
- * s2io_print_pci_mode -
- */
-static int s2io_print_pci_mode(struct s2io_nic *nic)
-{
- struct XENA_dev_config __iomem *bar0 = nic->bar0;
- register u64 val64 = 0;
- int mode;
- struct config_param *config = &nic->config;
- const char *pcimode;
-
- val64 = readq(&bar0->pci_mode);
- mode = (u8)GET_PCI_MODE(val64);
-
- if (val64 & PCI_MODE_UNKNOWN_MODE)
- return -1; /* Unknown PCI mode */
-
- config->bus_speed = bus_speed[mode];
-
- if (s2io_on_nec_bridge(nic->pdev)) {
- DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
- nic->dev->name);
- return mode;
- }
-
- switch (mode) {
- case PCI_MODE_PCI_33:
- pcimode = "33MHz PCI bus";
- break;
- case PCI_MODE_PCI_66:
- pcimode = "66MHz PCI bus";
- break;
- case PCI_MODE_PCIX_M1_66:
- pcimode = "66MHz PCIX(M1) bus";
- break;
- case PCI_MODE_PCIX_M1_100:
- pcimode = "100MHz PCIX(M1) bus";
- break;
- case PCI_MODE_PCIX_M1_133:
- pcimode = "133MHz PCIX(M1) bus";
- break;
- case PCI_MODE_PCIX_M2_66:
- pcimode = "133MHz PCIX(M2) bus";
- break;
- case PCI_MODE_PCIX_M2_100:
- pcimode = "200MHz PCIX(M2) bus";
- break;
- case PCI_MODE_PCIX_M2_133:
- pcimode = "266MHz PCIX(M2) bus";
- break;
- default:
- pcimode = "unsupported bus!";
- mode = -1;
- }
-
- DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n",
- nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode);
-
- return mode;
-}
-
-/**
- * init_tti - Initialization transmit traffic interrupt scheme
- * @nic: device private variable
- * @link: link status (UP/DOWN) used to enable/disable continuous
- * transmit interrupts
- * @may_sleep: parameter indicates if sleeping when waiting for
- * command complete
- * Description: The function configures transmit traffic interrupts
- * Return Value: SUCCESS on success and
- * '-1' on failure
- */
-
-static int init_tti(struct s2io_nic *nic, int link, bool may_sleep)
-{
- struct XENA_dev_config __iomem *bar0 = nic->bar0;
- register u64 val64 = 0;
- int i;
- struct config_param *config = &nic->config;
-
- for (i = 0; i < config->tx_fifo_num; i++) {
- /*
- * TTI Initialization. Default Tx timer gets us about
- * 250 interrupts per sec. Continuous interrupts are enabled
- * by default.
- */
- if (nic->device_type == XFRAME_II_DEVICE) {
- int count = (nic->config.bus_speed * 125)/2;
- val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
- } else
- val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
-
- val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
- TTI_DATA1_MEM_TX_URNG_B(0x10) |
- TTI_DATA1_MEM_TX_URNG_C(0x30) |
- TTI_DATA1_MEM_TX_TIMER_AC_EN;
- if (i == 0)
- if (use_continuous_tx_intrs && (link == LINK_UP))
- val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
- writeq(val64, &bar0->tti_data1_mem);
-
- if (nic->config.intr_type == MSI_X) {
- val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
- TTI_DATA2_MEM_TX_UFC_B(0x100) |
- TTI_DATA2_MEM_TX_UFC_C(0x200) |
- TTI_DATA2_MEM_TX_UFC_D(0x300);
- } else {
- if ((nic->config.tx_steering_type ==
- TX_DEFAULT_STEERING) &&
- (config->tx_fifo_num > 1) &&
- (i >= nic->udp_fifo_idx) &&
- (i < (nic->udp_fifo_idx +
- nic->total_udp_fifos)))
- val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
- TTI_DATA2_MEM_TX_UFC_B(0x80) |
- TTI_DATA2_MEM_TX_UFC_C(0x100) |
- TTI_DATA2_MEM_TX_UFC_D(0x120);
- else
- val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
- TTI_DATA2_MEM_TX_UFC_B(0x20) |
- TTI_DATA2_MEM_TX_UFC_C(0x40) |
- TTI_DATA2_MEM_TX_UFC_D(0x80);
- }
-
- writeq(val64, &bar0->tti_data2_mem);
-
- val64 = TTI_CMD_MEM_WE |
- TTI_CMD_MEM_STROBE_NEW_CMD |
- TTI_CMD_MEM_OFFSET(i);
- writeq(val64, &bar0->tti_command_mem);
-
- if (wait_for_cmd_complete(&bar0->tti_command_mem,
- TTI_CMD_MEM_STROBE_NEW_CMD,
- S2IO_BIT_RESET, may_sleep) != SUCCESS)
- return FAILURE;
- }
-
- return SUCCESS;
-}
-
-/**
- * init_nic - Initialization of hardware
- * @nic: device private variable
- * Description: The function sequentially configures every block
- * of the H/W from their reset values.
- * Return Value: SUCCESS on success and
- * '-1' on failure (endian settings incorrect).
- */
-
-static int init_nic(struct s2io_nic *nic)
-{
- struct XENA_dev_config __iomem *bar0 = nic->bar0;
- struct net_device *dev = nic->dev;
- register u64 val64 = 0;
- void __iomem *add;
- u32 time;
- int i, j;
- int dtx_cnt = 0;
- unsigned long long mem_share;
- int mem_size;
- struct config_param *config = &nic->config;
- struct mac_info *mac_control = &nic->mac_control;
-
- /* to set the swapper controle on the card */
- if (s2io_set_swapper(nic)) {
- DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
- return -EIO;
- }
-
- /*
- * Herc requires EOI to be removed from reset before XGXS, so..
- */
- if (nic->device_type & XFRAME_II_DEVICE) {
- val64 = 0xA500000000ULL;
- writeq(val64, &bar0->sw_reset);
- msleep(500);
- val64 = readq(&bar0->sw_reset);
- }
-
- /* Remove XGXS from reset state */
- val64 = 0;
- writeq(val64, &bar0->sw_reset);
- msleep(500);
- val64 = readq(&bar0->sw_reset);
-
- /* Ensure that it's safe to access registers by checking
- * RIC_RUNNING bit is reset. Check is valid only for XframeII.
- */
- if (nic->device_type == XFRAME_II_DEVICE) {
- for (i = 0; i < 50; i++) {
- val64 = readq(&bar0->adapter_status);
- if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
- break;
- msleep(10);
- }
- if (i == 50)
- return -ENODEV;
- }
-
- /* Enable Receiving broadcasts */
- add = &bar0->mac_cfg;
- val64 = readq(&bar0->mac_cfg);
- val64 |= MAC_RMAC_BCAST_ENABLE;
- writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
- writel((u32)val64, add);
- writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
- writel((u32) (val64 >> 32), (add + 4));
-
- /* Read registers in all blocks */
- val64 = readq(&bar0->mac_int_mask);
- val64 = readq(&bar0->mc_int_mask);
- val64 = readq(&bar0->xgxs_int_mask);
-
- /* Set MTU */
- val64 = dev->mtu;
- writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
-
- if (nic->device_type & XFRAME_II_DEVICE) {
- while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
- SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
- &bar0->dtx_control, UF);
- if (dtx_cnt & 0x1)
- msleep(1); /* Necessary!! */
- dtx_cnt++;
- }
- } else {
- while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
- SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
- &bar0->dtx_control, UF);
- val64 = readq(&bar0->dtx_control);
- dtx_cnt++;
- }
- }
-
- /* Tx DMA Initialization */
- val64 = 0;
- writeq(val64, &bar0->tx_fifo_partition_0);
- writeq(val64, &bar0->tx_fifo_partition_1);
- writeq(val64, &bar0->tx_fifo_partition_2);
- writeq(val64, &bar0->tx_fifo_partition_3);
-
- for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
- struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
-
- val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
- vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
-
- if (i == (config->tx_fifo_num - 1)) {
- if (i % 2 == 0)
- i++;
- }
-
- switch (i) {
- case 1:
- writeq(val64, &bar0->tx_fifo_partition_0);
- val64 = 0;
- j = 0;
- break;
- case 3:
- writeq(val64, &bar0->tx_fifo_partition_1);
- val64 = 0;
- j = 0;
- break;
- case 5:
- writeq(val64, &bar0->tx_fifo_partition_2);
- val64 = 0;
- j = 0;
- break;
- case 7:
- writeq(val64, &bar0->tx_fifo_partition_3);
- val64 = 0;
- j = 0;
- break;
- default:
- j++;
- break;
- }
- }
-
- /*
- * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
- * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
- */
- if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
- writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
-
- val64 = readq(&bar0->tx_fifo_partition_0);
- DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
- &bar0->tx_fifo_partition_0, (unsigned long long)val64);
-
- /*
- * Initialization of Tx_PA_CONFIG register to ignore packet
- * integrity checking.
- */
- val64 = readq(&bar0->tx_pa_cfg);
- val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
- TX_PA_CFG_IGNORE_SNAP_OUI |
- TX_PA_CFG_IGNORE_LLC_CTRL |
- TX_PA_CFG_IGNORE_L2_ERR;
- writeq(val64, &bar0->tx_pa_cfg);
-
- /* Rx DMA initialization. */
- val64 = 0;
- for (i = 0; i < config->rx_ring_num; i++) {
- struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
-
- val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
- }
- writeq(val64, &bar0->rx_queue_priority);
-
- /*
- * Allocating equal share of memory to all the
- * configured Rings.
- */
- val64 = 0;
- if (nic->device_type & XFRAME_II_DEVICE)
- mem_size = 32;
- else
- mem_size = 64;
-
- for (i = 0; i < config->rx_ring_num; i++) {
- switch (i) {
- case 0:
- mem_share = (mem_size / config->rx_ring_num +
- mem_size % config->rx_ring_num);
- val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
- continue;
- case 1:
- mem_share = (mem_size / config->rx_ring_num);
- val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
- continue;
- case 2:
- mem_share = (mem_size / config->rx_ring_num);
- val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
- continue;
- case 3:
- mem_share = (mem_size / config->rx_ring_num);
- val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
- continue;
- case 4:
- mem_share = (mem_size / config->rx_ring_num);
- val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
- continue;
- case 5:
- mem_share = (mem_size / config->rx_ring_num);
- val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
- continue;
- case 6:
- mem_share = (mem_size / config->rx_ring_num);
- val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
- continue;
- case 7:
- mem_share = (mem_size / config->rx_ring_num);
- val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
- continue;
- }
- }
- writeq(val64, &bar0->rx_queue_cfg);
-
- /*
- * Filling Tx round robin registers
- * as per the number of FIFOs for equal scheduling priority
- */
- switch (config->tx_fifo_num) {
- case 1:
- val64 = 0x0;
- writeq(val64, &bar0->tx_w_round_robin_0);
- writeq(val64, &bar0->tx_w_round_robin_1);
- writeq(val64, &bar0->tx_w_round_robin_2);
- writeq(val64, &bar0->tx_w_round_robin_3);
- writeq(val64, &bar0->tx_w_round_robin_4);
- break;
- case 2:
- val64 = 0x0001000100010001ULL;
- writeq(val64, &bar0->tx_w_round_robin_0);
- writeq(val64, &bar0->tx_w_round_robin_1);
- writeq(val64, &bar0->tx_w_round_robin_2);
- writeq(val64, &bar0->tx_w_round_robin_3);
- val64 = 0x0001000100000000ULL;
- writeq(val64, &bar0->tx_w_round_robin_4);
- break;
- case 3:
- val64 = 0x0001020001020001ULL;
- writeq(val64, &bar0->tx_w_round_robin_0);
- val64 = 0x0200010200010200ULL;
- writeq(val64, &bar0->tx_w_round_robin_1);
- val64 = 0x0102000102000102ULL;
- writeq(val64, &bar0->tx_w_round_robin_2);
- val64 = 0x0001020001020001ULL;
- writeq(val64, &bar0->tx_w_round_robin_3);
- val64 = 0x0200010200000000ULL;
- writeq(val64, &bar0->tx_w_round_robin_4);
- break;
- case 4:
- val64 = 0x0001020300010203ULL;
- writeq(val64, &bar0->tx_w_round_robin_0);
- writeq(val64, &bar0->tx_w_round_robin_1);
- writeq(val64, &bar0->tx_w_round_robin_2);
- writeq(val64, &bar0->tx_w_round_robin_3);
- val64 = 0x0001020300000000ULL;
- writeq(val64, &bar0->tx_w_round_robin_4);
- break;
- case 5:
- val64 = 0x0001020304000102ULL;
- writeq(val64, &bar0->tx_w_round_robin_0);
- val64 = 0x0304000102030400ULL;
- writeq(val64, &bar0->tx_w_round_robin_1);
- val64 = 0x0102030400010203ULL;
- writeq(val64, &bar0->tx_w_round_robin_2);
- val64 = 0x0400010203040001ULL;
- writeq(val64, &bar0->tx_w_round_robin_3);
- val64 = 0x0203040000000000ULL;
- writeq(val64, &bar0->tx_w_round_robin_4);
- break;
- case 6:
- val64 = 0x0001020304050001ULL;
- writeq(val64, &bar0->tx_w_round_robin_0);
- val64 = 0x0203040500010203ULL;
- writeq(val64, &bar0->tx_w_round_robin_1);
- val64 = 0x0405000102030405ULL;
- writeq(val64, &bar0->tx_w_round_robin_2);
- val64 = 0x0001020304050001ULL;
- writeq(val64, &bar0->tx_w_round_robin_3);
- val64 = 0x0203040500000000ULL;
- writeq(val64, &bar0->tx_w_round_robin_4);
- break;
- case 7:
- val64 = 0x0001020304050600ULL;
- writeq(val64, &bar0->tx_w_round_robin_0);
- val64 = 0x0102030405060001ULL;
- writeq(val64, &bar0->tx_w_round_robin_1);
- val64 = 0x0203040506000102ULL;
- writeq(val64, &bar0->tx_w_round_robin_2);
- val64 = 0x0304050600010203ULL;
- writeq(val64, &bar0->tx_w_round_robin_3);
- val64 = 0x0405060000000000ULL;
- writeq(val64, &bar0->tx_w_round_robin_4);
- break;
- case 8:
- val64 = 0x0001020304050607ULL;
- writeq(val64, &bar0->tx_w_round_robin_0);
- writeq(val64, &bar0->tx_w_round_robin_1);
- writeq(val64, &bar0->tx_w_round_robin_2);
- writeq(val64, &bar0->tx_w_round_robin_3);
- val64 = 0x0001020300000000ULL;
- writeq(val64, &bar0->tx_w_round_robin_4);
- break;
- }
-
- /* Enable all configured Tx FIFO partitions */
- val64 = readq(&bar0->tx_fifo_partition_0);
- val64 |= (TX_FIFO_PARTITION_EN);
- writeq(val64, &bar0->tx_fifo_partition_0);
-
- /* Filling the Rx round robin registers as per the
- * number of Rings and steering based on QoS with
- * equal priority.
- */
- switch (config->rx_ring_num) {
- case 1:
- val64 = 0x0;
- writeq(val64, &bar0->rx_w_round_robin_0);
- writeq(val64, &bar0->rx_w_round_robin_1);
- writeq(val64, &bar0->rx_w_round_robin_2);
- writeq(val64, &bar0->rx_w_round_robin_3);
- writeq(val64, &bar0->rx_w_round_robin_4);
-
- val64 = 0x8080808080808080ULL;
- writeq(val64, &bar0->rts_qos_steering);
- break;
- case 2:
- val64 = 0x0001000100010001ULL;
- writeq(val64, &bar0->rx_w_round_robin_0);
- writeq(val64, &bar0->rx_w_round_robin_1);
- writeq(val64, &bar0->rx_w_round_robin_2);
- writeq(val64, &bar0->rx_w_round_robin_3);
- val64 = 0x0001000100000000ULL;
- writeq(val64, &bar0->rx_w_round_robin_4);
-
- val64 = 0x8080808040404040ULL;
- writeq(val64, &bar0->rts_qos_steering);
- break;
- case 3:
- val64 = 0x0001020001020001ULL;
- writeq(val64, &bar0->rx_w_round_robin_0);
- val64 = 0x0200010200010200ULL;
- writeq(val64, &bar0->rx_w_round_robin_1);
- val64 = 0x0102000102000102ULL;
- writeq(val64, &bar0->rx_w_round_robin_2);
- val64 = 0x0001020001020001ULL;
- writeq(val64, &bar0->rx_w_round_robin_3);
- val64 = 0x0200010200000000ULL;
- writeq(val64, &bar0->rx_w_round_robin_4);
-
- val64 = 0x8080804040402020ULL;
- writeq(val64, &bar0->rts_qos_steering);
- break;
- case 4:
- val64 = 0x0001020300010203ULL;
- writeq(val64, &bar0->rx_w_round_robin_0);
- writeq(val64, &bar0->rx_w_round_robin_1);
- writeq(val64, &bar0->rx_w_round_robin_2);
- writeq(val64, &bar0->rx_w_round_robin_3);
- val64 = 0x0001020300000000ULL;
- writeq(val64, &bar0->rx_w_round_robin_4);
-
- val64 = 0x8080404020201010ULL;
- writeq(val64, &bar0->rts_qos_steering);
- break;
- case 5:
- val64 = 0x0001020304000102ULL;
- writeq(val64, &bar0->rx_w_round_robin_0);
- val64 = 0x0304000102030400ULL;
- writeq(val64, &bar0->rx_w_round_robin_1);
- val64 = 0x0102030400010203ULL;
- writeq(val64, &bar0->rx_w_round_robin_2);
- val64 = 0x0400010203040001ULL;
- writeq(val64, &bar0->rx_w_round_robin_3);
- val64 = 0x0203040000000000ULL;
- writeq(val64, &bar0->rx_w_round_robin_4);
-
- val64 = 0x8080404020201008ULL;
- writeq(val64, &bar0->rts_qos_steering);
- break;
- case 6:
- val64 = 0x0001020304050001ULL;
- writeq(val64, &bar0->rx_w_round_robin_0);
- val64 = 0x0203040500010203ULL;
- writeq(val64, &bar0->rx_w_round_robin_1);
- val64 = 0x0405000102030405ULL;
- writeq(val64, &bar0->rx_w_round_robin_2);
- val64 = 0x0001020304050001ULL;
- writeq(val64, &bar0->rx_w_round_robin_3);
- val64 = 0x0203040500000000ULL;
- writeq(val64, &bar0->rx_w_round_robin_4);
-
- val64 = 0x8080404020100804ULL;
- writeq(val64, &bar0->rts_qos_steering);
- break;
- case 7:
- val64 = 0x0001020304050600ULL;
- writeq(val64, &bar0->rx_w_round_robin_0);
- val64 = 0x0102030405060001ULL;
- writeq(val64, &bar0->rx_w_round_robin_1);
- val64 = 0x0203040506000102ULL;
- writeq(val64, &bar0->rx_w_round_robin_2);
- val64 = 0x0304050600010203ULL;
- writeq(val64, &bar0->rx_w_round_robin_3);
- val64 = 0x0405060000000000ULL;
- writeq(val64, &bar0->rx_w_round_robin_4);
-
- val64 = 0x8080402010080402ULL;
- writeq(val64, &bar0->rts_qos_steering);
- break;
- case 8:
- val64 = 0x0001020304050607ULL;
- writeq(val64, &bar0->rx_w_round_robin_0);
- writeq(val64, &bar0->rx_w_round_robin_1);
- writeq(val64, &bar0->rx_w_round_robin_2);
- writeq(val64, &bar0->rx_w_round_robin_3);
- val64 = 0x0001020300000000ULL;
- writeq(val64, &bar0->rx_w_round_robin_4);
-
- val64 = 0x8040201008040201ULL;
- writeq(val64, &bar0->rts_qos_steering);
- break;
- }
-
- /* UDP Fix */
- val64 = 0;
- for (i = 0; i < 8; i++)
- writeq(val64, &bar0->rts_frm_len_n[i]);
-
- /* Set the default rts frame length for the rings configured */
- val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
- for (i = 0 ; i < config->rx_ring_num ; i++)
- writeq(val64, &bar0->rts_frm_len_n[i]);
-
- /* Set the frame length for the configured rings
- * desired by the user
- */
- for (i = 0; i < config->rx_ring_num; i++) {
- /* If rts_frm_len[i] == 0 then it is assumed that user not
- * specified frame length steering.
- * If the user provides the frame length then program
- * the rts_frm_len register for those values or else
- * leave it as it is.
- */
- if (rts_frm_len[i] != 0) {
- writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
- &bar0->rts_frm_len_n[i]);
- }
- }
-
- /* Disable differentiated services steering logic */
- for (i = 0; i < 64; i++) {
- if (rts_ds_steer(nic, i, 0) == FAILURE) {
- DBG_PRINT(ERR_DBG,
- "%s: rts_ds_steer failed on codepoint %d\n",
- dev->name, i);
- return -ENODEV;
- }
- }
-
- /* Program statistics memory */
- writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
-
- if (nic->device_type == XFRAME_II_DEVICE) {
- val64 = STAT_BC(0x320);
- writeq(val64, &bar0->stat_byte_cnt);
- }
-
- /*
- * Initializing the sampling rate for the device to calculate the
- * bandwidth utilization.
- */
- val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
- MAC_RX_LINK_UTIL_VAL(rmac_util_period);
- writeq(val64, &bar0->mac_link_util);
-
- /*
- * Initializing the Transmit and Receive Traffic Interrupt
- * Scheme.
- */
-
- /* Initialize TTI */
- if (SUCCESS != init_tti(nic, nic->last_link_state, true))
- return -ENODEV;
-
- /* RTI Initialization */
- if (nic->device_type == XFRAME_II_DEVICE) {
- /*
- * Programmed to generate Apprx 500 Intrs per
- * second
- */
- int count = (nic->config.bus_speed * 125)/4;
- val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
- } else
- val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
- val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
- RTI_DATA1_MEM_RX_URNG_B(0x10) |
- RTI_DATA1_MEM_RX_URNG_C(0x30) |
- RTI_DATA1_MEM_RX_TIMER_AC_EN;
-
- writeq(val64, &bar0->rti_data1_mem);
-
- val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
- RTI_DATA2_MEM_RX_UFC_B(0x2) ;
- if (nic->config.intr_type == MSI_X)
- val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
- RTI_DATA2_MEM_RX_UFC_D(0x40));
- else
- val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
- RTI_DATA2_MEM_RX_UFC_D(0x80));
- writeq(val64, &bar0->rti_data2_mem);
-
- for (i = 0; i < config->rx_ring_num; i++) {
- val64 = RTI_CMD_MEM_WE |
- RTI_CMD_MEM_STROBE_NEW_CMD |
- RTI_CMD_MEM_OFFSET(i);
- writeq(val64, &bar0->rti_command_mem);
-
- /*
- * Once the operation completes, the Strobe bit of the
- * command register will be reset. We poll for this
- * particular condition. We wait for a maximum of 500ms
- * for the operation to complete, if it's not complete
- * by then we return error.
- */
- time = 0;
- while (true) {
- val64 = readq(&bar0->rti_command_mem);
- if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
- break;
-
- if (time > 10) {
- DBG_PRINT(ERR_DBG, "%s: RTI init failed\n",
- dev->name);
- return -ENODEV;
- }
- time++;
- msleep(50);
- }
- }
-
- /*
- * Initializing proper values as Pause threshold into all
- * the 8 Queues on Rx side.
- */
- writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
- writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
-
- /* Disable RMAC PAD STRIPPING */
- add = &bar0->mac_cfg;
- val64 = readq(&bar0->mac_cfg);
- val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
- writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
- writel((u32) (val64), add);
- writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
- writel((u32) (val64 >> 32), (add + 4));
- val64 = readq(&bar0->mac_cfg);
-
- /* Enable FCS stripping by adapter */
- add = &bar0->mac_cfg;
- val64 = readq(&bar0->mac_cfg);
- val64 |= MAC_CFG_RMAC_STRIP_FCS;
- if (nic->device_type == XFRAME_II_DEVICE)
- writeq(val64, &bar0->mac_cfg);
- else {
- writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
- writel((u32) (val64), add);
- writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
- writel((u32) (val64 >> 32), (add + 4));
- }
-
- /*
- * Set the time value to be inserted in the pause frame
- * generated by xena.
- */
- val64 = readq(&bar0->rmac_pause_cfg);
- val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
- val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
- writeq(val64, &bar0->rmac_pause_cfg);
-
- /*
- * Set the Threshold Limit for Generating the pause frame
- * If the amount of data in any Queue exceeds ratio of
- * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
- * pause frame is generated
- */
- val64 = 0;
- for (i = 0; i < 4; i++) {
- val64 |= (((u64)0xFF00 |
- nic->mac_control.mc_pause_threshold_q0q3)
- << (i * 2 * 8));
- }
- writeq(val64, &bar0->mc_pause_thresh_q0q3);
-
- val64 = 0;
- for (i = 0; i < 4; i++) {
- val64 |= (((u64)0xFF00 |
- nic->mac_control.mc_pause_threshold_q4q7)
- << (i * 2 * 8));
- }
- writeq(val64, &bar0->mc_pause_thresh_q4q7);
-
- /*
- * TxDMA will stop Read request if the number of read split has
- * exceeded the limit pointed by shared_splits
- */
- val64 = readq(&bar0->pic_control);
- val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
- writeq(val64, &bar0->pic_control);
-
- if (nic->config.bus_speed == 266) {
- writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
- writeq(0x0, &bar0->read_retry_delay);
- writeq(0x0, &bar0->write_retry_delay);
- }
-
- /*
- * Programming the Herc to split every write transaction
- * that does not start on an ADB to reduce disconnects.
- */
- if (nic->device_type == XFRAME_II_DEVICE) {
- val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
- MISC_LINK_STABILITY_PRD(3);
- writeq(val64, &bar0->misc_control);
- val64 = readq(&bar0->pic_control2);
- val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
- writeq(val64, &bar0->pic_control2);
- }
- if (strstr(nic->product_name, "CX4")) {
- val64 = TMAC_AVG_IPG(0x17);
- writeq(val64, &bar0->tmac_avg_ipg);
- }
-
- return SUCCESS;
-}
-#define LINK_UP_DOWN_INTERRUPT 1
-#define MAC_RMAC_ERR_TIMER 2
-
-static int s2io_link_fault_indication(struct s2io_nic *nic)
-{
- if (nic->device_type == XFRAME_II_DEVICE)
- return LINK_UP_DOWN_INTERRUPT;
- else
- return MAC_RMAC_ERR_TIMER;
-}
-
-/**
- * do_s2io_write_bits - update alarm bits in alarm register
- * @value: alarm bits
- * @flag: interrupt status
- * @addr: address value
- * Description: update alarm bits in alarm register
- * Return Value:
- * NONE.
- */
-static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
-{
- u64 temp64;
-
- temp64 = readq(addr);
-
- if (flag == ENABLE_INTRS)
- temp64 &= ~((u64)value);
- else
- temp64 |= ((u64)value);
- writeq(temp64, addr);
-}
-
-static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
-{
- struct XENA_dev_config __iomem *bar0 = nic->bar0;
- register u64 gen_int_mask = 0;
- u64 interruptible;
-
- writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
- if (mask & TX_DMA_INTR) {
- gen_int_mask |= TXDMA_INT_M;
-
- do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
- TXDMA_PCC_INT | TXDMA_TTI_INT |
- TXDMA_LSO_INT | TXDMA_TPA_INT |
- TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
-
- do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
- PFC_MISC_0_ERR | PFC_MISC_1_ERR |
- PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
- &bar0->pfc_err_mask);
-
- do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
- TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
- TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
-
- do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
- PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
- PCC_N_SERR | PCC_6_COF_OV_ERR |
- PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
- PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
- PCC_TXB_ECC_SG_ERR,
- flag, &bar0->pcc_err_mask);
-
- do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
- TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
-
- do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
- LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
- LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
- flag, &bar0->lso_err_mask);
-
- do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
- flag, &bar0->tpa_err_mask);
-
- do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
- }
-
- if (mask & TX_MAC_INTR) {
- gen_int_mask |= TXMAC_INT_M;
- do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
- &bar0->mac_int_mask);
- do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
- TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
- TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
- flag, &bar0->mac_tmac_err_mask);
- }
-
- if (mask & TX_XGXS_INTR) {
- gen_int_mask |= TXXGXS_INT_M;
- do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
- &bar0->xgxs_int_mask);
- do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
- TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
- flag, &bar0->xgxs_txgxs_err_mask);
- }
-
- if (mask & RX_DMA_INTR) {
- gen_int_mask |= RXDMA_INT_M;
- do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
- RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
- flag, &bar0->rxdma_int_mask);
- do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
- RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
- RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
- RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
- do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
- PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
- PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
- &bar0->prc_pcix_err_mask);
- do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
- RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
- &bar0->rpa_err_mask);
- do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
- RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
- RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
- RDA_FRM_ECC_SG_ERR |
- RDA_MISC_ERR|RDA_PCIX_ERR,
- flag, &bar0->rda_err_mask);
- do_s2io_write_bits(RTI_SM_ERR_ALARM |
- RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
- flag, &bar0->rti_err_mask);
- }
-
- if (mask & RX_MAC_INTR) {
- gen_int_mask |= RXMAC_INT_M;
- do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
- &bar0->mac_int_mask);
- interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
- RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
- RMAC_DOUBLE_ECC_ERR);
- if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
- interruptible |= RMAC_LINK_STATE_CHANGE_INT;
- do_s2io_write_bits(interruptible,
- flag, &bar0->mac_rmac_err_mask);
- }
-
- if (mask & RX_XGXS_INTR) {
- gen_int_mask |= RXXGXS_INT_M;
- do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
- &bar0->xgxs_int_mask);
- do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
- &bar0->xgxs_rxgxs_err_mask);
- }
-
- if (mask & MC_INTR) {
- gen_int_mask |= MC_INT_M;
- do_s2io_write_bits(MC_INT_MASK_MC_INT,
- flag, &bar0->mc_int_mask);
- do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
- MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
- &bar0->mc_err_mask);
- }
- nic->general_int_mask = gen_int_mask;
-
- /* Remove this line when alarm interrupts are enabled */
- nic->general_int_mask = 0;
-}
-
-/**
- * en_dis_able_nic_intrs - Enable or Disable the interrupts
- * @nic: device private variable,
- * @mask: A mask indicating which Intr block must be modified and,
- * @flag: A flag indicating whether to enable or disable the Intrs.
- * Description: This function will either disable or enable the interrupts
- * depending on the flag argument. The mask argument can be used to
- * enable/disable any Intr block.
- * Return Value: NONE.
- */
-
-static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
-{
- struct XENA_dev_config __iomem *bar0 = nic->bar0;
- register u64 temp64 = 0, intr_mask = 0;
-
- intr_mask = nic->general_int_mask;
-
- /* Top level interrupt classification */
- /* PIC Interrupts */
- if (mask & TX_PIC_INTR) {
- /* Enable PIC Intrs in the general intr mask register */
- intr_mask |= TXPIC_INT_M;
- if (flag == ENABLE_INTRS) {
- /*
- * If Hercules adapter enable GPIO otherwise
- * disable all PCIX, Flash, MDIO, IIC and GPIO
- * interrupts for now.
- * TODO
- */
- if (s2io_link_fault_indication(nic) ==
- LINK_UP_DOWN_INTERRUPT) {
- do_s2io_write_bits(PIC_INT_GPIO, flag,
- &bar0->pic_int_mask);
- do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
- &bar0->gpio_int_mask);
- } else
- writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
- } else if (flag == DISABLE_INTRS) {
- /*
- * Disable PIC Intrs in the general
- * intr mask register
- */
- writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
- }
- }
-
- /* Tx traffic interrupts */
- if (mask & TX_TRAFFIC_INTR) {
- intr_mask |= TXTRAFFIC_INT_M;
- if (flag == ENABLE_INTRS) {
- /*
- * Enable all the Tx side interrupts
- * writing 0 Enables all 64 TX interrupt levels
- */
- writeq(0x0, &bar0->tx_traffic_mask);
- } else if (flag == DISABLE_INTRS) {
- /*
- * Disable Tx Traffic Intrs in the general intr mask
- * register.
- */
- writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
- }
- }
-
- /* Rx traffic interrupts */
- if (mask & RX_TRAFFIC_INTR) {
- intr_mask |= RXTRAFFIC_INT_M;
- if (flag == ENABLE_INTRS) {
- /* writing 0 Enables all 8 RX interrupt levels */
- writeq(0x0, &bar0->rx_traffic_mask);
- } else if (flag == DISABLE_INTRS) {
- /*
- * Disable Rx Traffic Intrs in the general intr mask
- * register.
- */
- writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
- }
- }
-
- temp64 = readq(&bar0->general_int_mask);
- if (flag == ENABLE_INTRS)
- temp64 &= ~((u64)intr_mask);
- else
- temp64 = DISABLE_ALL_INTRS;
- writeq(temp64, &bar0->general_int_mask);
-
- nic->general_int_mask = readq(&bar0->general_int_mask);
-}
-
-/**
- * verify_pcc_quiescent- Checks for PCC quiescent state
- * @sp : private member of the device structure, which is a pointer to the
- * s2io_nic structure.
- * @flag: boolean controlling function path
- * Return: 1 If PCC is quiescence
- * 0 If PCC is not quiescence
- */
-static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
-{
- int ret = 0, herc;
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- u64 val64 = readq(&bar0->adapter_status);
-
- herc = (sp->device_type == XFRAME_II_DEVICE);
-
- if (flag == false) {
- if ((!herc && (sp->pdev->revision >= 4)) || herc) {
- if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
- ret = 1;
- } else {
- if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
- ret = 1;
- }
- } else {
- if ((!herc && (sp->pdev->revision >= 4)) || herc) {
- if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
- ADAPTER_STATUS_RMAC_PCC_IDLE))
- ret = 1;
- } else {
- if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
- ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
- ret = 1;
- }
- }
-
- return ret;
-}
-/**
- * verify_xena_quiescence - Checks whether the H/W is ready
- * @sp : private member of the device structure, which is a pointer to the
- * s2io_nic structure.
- * Description: Returns whether the H/W is ready to go or not. Depending
- * on whether adapter enable bit was written or not the comparison
- * differs and the calling function passes the input argument flag to
- * indicate this.
- * Return: 1 If xena is quiescence
- * 0 If Xena is not quiescence
- */
-
-static int verify_xena_quiescence(struct s2io_nic *sp)
-{
- int mode;
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- u64 val64 = readq(&bar0->adapter_status);
- mode = s2io_verify_pci_mode(sp);
-
- if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
- DBG_PRINT(ERR_DBG, "TDMA is not ready!\n");
- return 0;
- }
- if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
- DBG_PRINT(ERR_DBG, "RDMA is not ready!\n");
- return 0;
- }
- if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
- DBG_PRINT(ERR_DBG, "PFC is not ready!\n");
- return 0;
- }
- if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
- DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n");
- return 0;
- }
- if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
- DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n");
- return 0;
- }
- if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
- DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n");
- return 0;
- }
- if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
- DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n");
- return 0;
- }
- if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
- DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n");
- return 0;
- }
-
- /*
- * In PCI 33 mode, the P_PLL is not used, and therefore,
- * the P_PLL_LOCK bit in the adapter_status register will
- * not be asserted.
- */
- if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
- sp->device_type == XFRAME_II_DEVICE &&
- mode != PCI_MODE_PCI_33) {
- DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n");
- return 0;
- }
- if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
- ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
- DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n");
- return 0;
- }
- return 1;
-}
-
-/**
- * fix_mac_address - Fix for Mac addr problem on Alpha platforms
- * @sp: Pointer to device specifc structure
- * Description :
- * New procedure to clear mac address reading problems on Alpha platforms
- *
- */
-
-static void fix_mac_address(struct s2io_nic *sp)
-{
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- int i = 0;
-
- while (fix_mac[i] != END_SIGN) {
- writeq(fix_mac[i++], &bar0->gpio_control);
- udelay(10);
- (void) readq(&bar0->gpio_control);
- }
-}
-
-/**
- * start_nic - Turns the device on
- * @nic : device private variable.
- * Description:
- * This function actually turns the device on. Before this function is
- * called,all Registers are configured from their reset states
- * and shared memory is allocated but the NIC is still quiescent. On
- * calling this function, the device interrupts are cleared and the NIC is
- * literally switched on by writing into the adapter control register.
- * Return Value:
- * SUCCESS on success and -1 on failure.
- */
-
-static int start_nic(struct s2io_nic *nic)
-{
- struct XENA_dev_config __iomem *bar0 = nic->bar0;
- struct net_device *dev = nic->dev;
- register u64 val64 = 0;
- u16 subid, i;
- struct config_param *config = &nic->config;
- struct mac_info *mac_control = &nic->mac_control;
-
- /* PRC Initialization and configuration */
- for (i = 0; i < config->rx_ring_num; i++) {
- struct ring_info *ring = &mac_control->rings[i];
-
- writeq((u64)ring->rx_blocks[0].block_dma_addr,
- &bar0->prc_rxd0_n[i]);
-
- val64 = readq(&bar0->prc_ctrl_n[i]);
- if (nic->rxd_mode == RXD_MODE_1)
- val64 |= PRC_CTRL_RC_ENABLED;
- else
- val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
- if (nic->device_type == XFRAME_II_DEVICE)
- val64 |= PRC_CTRL_GROUP_READS;
- val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
- val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
- writeq(val64, &bar0->prc_ctrl_n[i]);
- }
-
- if (nic->rxd_mode == RXD_MODE_3B) {
- /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
- val64 = readq(&bar0->rx_pa_cfg);
- val64 |= RX_PA_CFG_IGNORE_L2_ERR;
- writeq(val64, &bar0->rx_pa_cfg);
- }
-
- if (vlan_tag_strip == 0) {
- val64 = readq(&bar0->rx_pa_cfg);
- val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
- writeq(val64, &bar0->rx_pa_cfg);
- nic->vlan_strip_flag = 0;
- }
-
- /*
- * Enabling MC-RLDRAM. After enabling the device, we timeout
- * for around 100ms, which is approximately the time required
- * for the device to be ready for operation.
- */
- val64 = readq(&bar0->mc_rldram_mrs);
- val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
- SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
- val64 = readq(&bar0->mc_rldram_mrs);
-
- msleep(100); /* Delay by around 100 ms. */
-
- /* Enabling ECC Protection. */
- val64 = readq(&bar0->adapter_control);
- val64 &= ~ADAPTER_ECC_EN;
- writeq(val64, &bar0->adapter_control);
-
- /*
- * Verify if the device is ready to be enabled, if so enable
- * it.
- */
- val64 = readq(&bar0->adapter_status);
- if (!verify_xena_quiescence(nic)) {
- DBG_PRINT(ERR_DBG, "%s: device is not ready, "
- "Adapter status reads: 0x%llx\n",
- dev->name, (unsigned long long)val64);
- return FAILURE;
- }
-
- /*
- * With some switches, link might be already up at this point.
- * Because of this weird behavior, when we enable laser,
- * we may not get link. We need to handle this. We cannot
- * figure out which switch is misbehaving. So we are forced to
- * make a global change.
- */
-
- /* Enabling Laser. */
- val64 = readq(&bar0->adapter_control);
- val64 |= ADAPTER_EOI_TX_ON;
- writeq(val64, &bar0->adapter_control);
-
- if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
- /*
- * Dont see link state interrupts initially on some switches,
- * so directly scheduling the link state task here.
- */
- schedule_work(&nic->set_link_task);
- }
- /* SXE-002: Initialize link and activity LED */
- subid = nic->pdev->subsystem_device;
- if (((subid & 0xFF) >= 0x07) &&
- (nic->device_type == XFRAME_I_DEVICE)) {
- val64 = readq(&bar0->gpio_control);
- val64 |= 0x0000800000000000ULL;
- writeq(val64, &bar0->gpio_control);
- val64 = 0x0411040400000000ULL;
- writeq(val64, (void __iomem *)bar0 + 0x2700);
- }
-
- return SUCCESS;
-}
-/**
- * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
- * @fifo_data: fifo data pointer
- * @txdlp: descriptor
- * @get_off: unused
- */
-static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
- struct TxD *txdlp, int get_off)
-{
- struct s2io_nic *nic = fifo_data->nic;
- struct sk_buff *skb;
- struct TxD *txds;
- u16 j, frg_cnt;
-
- txds = txdlp;
- if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
- dma_unmap_single(&nic->pdev->dev,
- (dma_addr_t)txds->Buffer_Pointer,
- sizeof(u64), DMA_TO_DEVICE);
- txds++;
- }
-
- skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
- if (!skb) {
- memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
- return NULL;
- }
- dma_unmap_single(&nic->pdev->dev, (dma_addr_t)txds->Buffer_Pointer,
- skb_headlen(skb), DMA_TO_DEVICE);
- frg_cnt = skb_shinfo(skb)->nr_frags;
- if (frg_cnt) {
- txds++;
- for (j = 0; j < frg_cnt; j++, txds++) {
- const skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
- if (!txds->Buffer_Pointer)
- break;
- dma_unmap_page(&nic->pdev->dev,
- (dma_addr_t)txds->Buffer_Pointer,
- skb_frag_size(frag), DMA_TO_DEVICE);
- }
- }
- memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
- return skb;
-}
-
-/**
- * free_tx_buffers - Free all queued Tx buffers
- * @nic : device private variable.
- * Description:
- * Free all queued Tx buffers.
- * Return Value: void
- */
-
-static void free_tx_buffers(struct s2io_nic *nic)
-{
- struct net_device *dev = nic->dev;
- struct sk_buff *skb;
- struct TxD *txdp;
- int i, j;
- int cnt = 0;
- struct config_param *config = &nic->config;
- struct mac_info *mac_control = &nic->mac_control;
- struct stat_block *stats = mac_control->stats_info;
- struct swStat *swstats = &stats->sw_stat;
-
- for (i = 0; i < config->tx_fifo_num; i++) {
- struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
- struct fifo_info *fifo = &mac_control->fifos[i];
- unsigned long flags;
-
- spin_lock_irqsave(&fifo->tx_lock, flags);
- for (j = 0; j < tx_cfg->fifo_len; j++) {
- txdp = fifo->list_info[j].list_virt_addr;
- skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
- if (skb) {
- swstats->mem_freed += skb->truesize;
- dev_kfree_skb_irq(skb);
- cnt++;
- }
- }
- DBG_PRINT(INTR_DBG,
- "%s: forcibly freeing %d skbs on FIFO%d\n",
- dev->name, cnt, i);
- fifo->tx_curr_get_info.offset = 0;
- fifo->tx_curr_put_info.offset = 0;
- spin_unlock_irqrestore(&fifo->tx_lock, flags);
- }
-}
-
-/**
- * stop_nic - To stop the nic
- * @nic : device private variable.
- * Description:
- * This function does exactly the opposite of what the start_nic()
- * function does. This function is called to stop the device.
- * Return Value:
- * void.
- */
-
-static void stop_nic(struct s2io_nic *nic)
-{
- struct XENA_dev_config __iomem *bar0 = nic->bar0;
- register u64 val64 = 0;
- u16 interruptible;
-
- /* Disable all interrupts */
- en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
- interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
- interruptible |= TX_PIC_INTR;
- en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
-
- /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
- val64 = readq(&bar0->adapter_control);
- val64 &= ~(ADAPTER_CNTL_EN);
- writeq(val64, &bar0->adapter_control);
-}
-
-/**
- * fill_rx_buffers - Allocates the Rx side skbs
- * @nic : device private variable.
- * @ring: per ring structure
- * @from_card_up: If this is true, we will map the buffer to get
- * the dma address for buf0 and buf1 to give it to the card.
- * Else we will sync the already mapped buffer to give it to the card.
- * Description:
- * The function allocates Rx side skbs and puts the physical
- * address of these buffers into the RxD buffer pointers, so that the NIC
- * can DMA the received frame into these locations.
- * The NIC supports 3 receive modes, viz
- * 1. single buffer,
- * 2. three buffer and
- * 3. Five buffer modes.
- * Each mode defines how many fragments the received frame will be split
- * up into by the NIC. The frame is split into L3 header, L4 Header,
- * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
- * is split into 3 fragments. As of now only single buffer mode is
- * supported.
- * Return Value:
- * SUCCESS on success or an appropriate -ve value on failure.
- */
-static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
- int from_card_up)
-{
- struct sk_buff *skb;
- struct RxD_t *rxdp;
- int off, size, block_no, block_no1;
- u32 alloc_tab = 0;
- u32 alloc_cnt;
- u64 tmp;
- struct buffAdd *ba;
- struct RxD_t *first_rxdp = NULL;
- u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
- struct RxD1 *rxdp1;
- struct RxD3 *rxdp3;
- struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
-
- alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
-
- block_no1 = ring->rx_curr_get_info.block_index;
- while (alloc_tab < alloc_cnt) {
- block_no = ring->rx_curr_put_info.block_index;
-
- off = ring->rx_curr_put_info.offset;
-
- rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
-
- if ((block_no == block_no1) &&
- (off == ring->rx_curr_get_info.offset) &&
- (rxdp->Host_Control)) {
- DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n",
- ring->dev->name);
- goto end;
- }
- if (off && (off == ring->rxd_count)) {
- ring->rx_curr_put_info.block_index++;
- if (ring->rx_curr_put_info.block_index ==
- ring->block_count)
- ring->rx_curr_put_info.block_index = 0;
- block_no = ring->rx_curr_put_info.block_index;
- off = 0;
- ring->rx_curr_put_info.offset = off;
- rxdp = ring->rx_blocks[block_no].block_virt_addr;
- DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
- ring->dev->name, rxdp);
-
- }
-
- if ((rxdp->Control_1 & RXD_OWN_XENA) &&
- ((ring->rxd_mode == RXD_MODE_3B) &&
- (rxdp->Control_2 & s2BIT(0)))) {
- ring->rx_curr_put_info.offset = off;
- goto end;
- }
- /* calculate size of skb based on ring mode */
- size = ring->mtu +
- HEADER_ETHERNET_II_802_3_SIZE +
- HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
- if (ring->rxd_mode == RXD_MODE_1)
- size += NET_IP_ALIGN;
- else
- size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
-
- /* allocate skb */
- skb = netdev_alloc_skb(nic->dev, size);
- if (!skb) {
- DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
- ring->dev->name);
- if (first_rxdp) {
- dma_wmb();
- first_rxdp->Control_1 |= RXD_OWN_XENA;
- }
- swstats->mem_alloc_fail_cnt++;
-
- return -ENOMEM ;
- }
- swstats->mem_allocated += skb->truesize;
-
- if (ring->rxd_mode == RXD_MODE_1) {
- /* 1 buffer mode - normal operation mode */
- rxdp1 = (struct RxD1 *)rxdp;
- memset(rxdp, 0, sizeof(struct RxD1));
- skb_reserve(skb, NET_IP_ALIGN);
- rxdp1->Buffer0_ptr =
- dma_map_single(&ring->pdev->dev, skb->data,
- size - NET_IP_ALIGN,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&nic->pdev->dev, rxdp1->Buffer0_ptr))
- goto pci_map_failed;
-
- rxdp->Control_2 =
- SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
- rxdp->Host_Control = (unsigned long)skb;
- } else if (ring->rxd_mode == RXD_MODE_3B) {
- /*
- * 2 buffer mode -
- * 2 buffer mode provides 128
- * byte aligned receive buffers.
- */
-
- rxdp3 = (struct RxD3 *)rxdp;
- /* save buffer pointers to avoid frequent dma mapping */
- Buffer0_ptr = rxdp3->Buffer0_ptr;
- Buffer1_ptr = rxdp3->Buffer1_ptr;
- memset(rxdp, 0, sizeof(struct RxD3));
- /* restore the buffer pointers for dma sync*/
- rxdp3->Buffer0_ptr = Buffer0_ptr;
- rxdp3->Buffer1_ptr = Buffer1_ptr;
-
- ba = &ring->ba[block_no][off];
- skb_reserve(skb, BUF0_LEN);
- tmp = (u64)(unsigned long)skb->data;
- tmp += ALIGN_SIZE;
- tmp &= ~ALIGN_SIZE;
- skb->data = (void *) (unsigned long)tmp;
- skb_reset_tail_pointer(skb);
-
- if (from_card_up) {
- rxdp3->Buffer0_ptr =
- dma_map_single(&ring->pdev->dev,
- ba->ba_0, BUF0_LEN,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer0_ptr))
- goto pci_map_failed;
- } else
- dma_sync_single_for_device(&ring->pdev->dev,
- (dma_addr_t)rxdp3->Buffer0_ptr,
- BUF0_LEN,
- DMA_FROM_DEVICE);
-
- rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
- if (ring->rxd_mode == RXD_MODE_3B) {
- /* Two buffer mode */
-
- /*
- * Buffer2 will have L3/L4 header plus
- * L4 payload
- */
- rxdp3->Buffer2_ptr = dma_map_single(&ring->pdev->dev,
- skb->data,
- ring->mtu + 4,
- DMA_FROM_DEVICE);
-
- if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer2_ptr))
- goto pci_map_failed;
-
- if (from_card_up) {
- rxdp3->Buffer1_ptr =
- dma_map_single(&ring->pdev->dev,
- ba->ba_1,
- BUF1_LEN,
- DMA_FROM_DEVICE);
-
- if (dma_mapping_error(&nic->pdev->dev,
- rxdp3->Buffer1_ptr)) {
- dma_unmap_single(&ring->pdev->dev,
- (dma_addr_t)(unsigned long)
- skb->data,
- ring->mtu + 4,
- DMA_FROM_DEVICE);
- goto pci_map_failed;
- }
- }
- rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
- rxdp->Control_2 |= SET_BUFFER2_SIZE_3
- (ring->mtu + 4);
- }
- rxdp->Control_2 |= s2BIT(0);
- rxdp->Host_Control = (unsigned long) (skb);
- }
- if (alloc_tab & ((1 << rxsync_frequency) - 1))
- rxdp->Control_1 |= RXD_OWN_XENA;
- off++;
- if (off == (ring->rxd_count + 1))
- off = 0;
- ring->rx_curr_put_info.offset = off;
-
- rxdp->Control_2 |= SET_RXD_MARKER;
- if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
- if (first_rxdp) {
- dma_wmb();
- first_rxdp->Control_1 |= RXD_OWN_XENA;
- }
- first_rxdp = rxdp;
- }
- ring->rx_bufs_left += 1;
- alloc_tab++;
- }
-
-end:
- /* Transfer ownership of first descriptor to adapter just before
- * exiting. Before that, use memory barrier so that ownership
- * and other fields are seen by adapter correctly.
- */
- if (first_rxdp) {
- dma_wmb();
- first_rxdp->Control_1 |= RXD_OWN_XENA;
- }
-
- return SUCCESS;
-
-pci_map_failed:
- swstats->pci_map_fail_cnt++;
- swstats->mem_freed += skb->truesize;
- dev_kfree_skb_irq(skb);
- return -ENOMEM;
-}
-
-static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
-{
- struct net_device *dev = sp->dev;
- int j;
- struct sk_buff *skb;
- struct RxD_t *rxdp;
- struct RxD1 *rxdp1;
- struct RxD3 *rxdp3;
- struct mac_info *mac_control = &sp->mac_control;
- struct stat_block *stats = mac_control->stats_info;
- struct swStat *swstats = &stats->sw_stat;
-
- for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
- rxdp = mac_control->rings[ring_no].
- rx_blocks[blk].rxds[j].virt_addr;
- skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
- if (!skb)
- continue;
- if (sp->rxd_mode == RXD_MODE_1) {
- rxdp1 = (struct RxD1 *)rxdp;
- dma_unmap_single(&sp->pdev->dev,
- (dma_addr_t)rxdp1->Buffer0_ptr,
- dev->mtu +
- HEADER_ETHERNET_II_802_3_SIZE +
- HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
- DMA_FROM_DEVICE);
- memset(rxdp, 0, sizeof(struct RxD1));
- } else if (sp->rxd_mode == RXD_MODE_3B) {
- rxdp3 = (struct RxD3 *)rxdp;
- dma_unmap_single(&sp->pdev->dev,
- (dma_addr_t)rxdp3->Buffer0_ptr,
- BUF0_LEN, DMA_FROM_DEVICE);
- dma_unmap_single(&sp->pdev->dev,
- (dma_addr_t)rxdp3->Buffer1_ptr,
- BUF1_LEN, DMA_FROM_DEVICE);
- dma_unmap_single(&sp->pdev->dev,
- (dma_addr_t)rxdp3->Buffer2_ptr,
- dev->mtu + 4, DMA_FROM_DEVICE);
- memset(rxdp, 0, sizeof(struct RxD3));
- }
- swstats->mem_freed += skb->truesize;
- dev_kfree_skb(skb);
- mac_control->rings[ring_no].rx_bufs_left -= 1;
- }
-}
-
-/**
- * free_rx_buffers - Frees all Rx buffers
- * @sp: device private variable.
- * Description:
- * This function will free all Rx buffers allocated by host.
- * Return Value:
- * NONE.
- */
-
-static void free_rx_buffers(struct s2io_nic *sp)
-{
- struct net_device *dev = sp->dev;
- int i, blk = 0, buf_cnt = 0;
- struct config_param *config = &sp->config;
- struct mac_info *mac_control = &sp->mac_control;
-
- for (i = 0; i < config->rx_ring_num; i++) {
- struct ring_info *ring = &mac_control->rings[i];
-
- for (blk = 0; blk < rx_ring_sz[i]; blk++)
- free_rxd_blk(sp, i, blk);
-
- ring->rx_curr_put_info.block_index = 0;
- ring->rx_curr_get_info.block_index = 0;
- ring->rx_curr_put_info.offset = 0;
- ring->rx_curr_get_info.offset = 0;
- ring->rx_bufs_left = 0;
- DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n",
- dev->name, buf_cnt, i);
- }
-}
-
-static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
-{
- if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
- DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n",
- ring->dev->name);
- }
- return 0;
-}
-
-/**
- * s2io_poll_msix - Rx interrupt handler for NAPI support
- * @napi : pointer to the napi structure.
- * @budget : The number of packets that were budgeted to be processed
- * during one pass through the 'Poll" function.
- * Description:
- * Comes into picture only if NAPI support has been incorporated. It does
- * the same thing that rx_intr_handler does, but not in a interrupt context
- * also It will process only a given number of packets.
- * Return value:
- * 0 on success and 1 if there are No Rx packets to be processed.
- */
-
-static int s2io_poll_msix(struct napi_struct *napi, int budget)
-{
- struct ring_info *ring = container_of(napi, struct ring_info, napi);
- struct net_device *dev = ring->dev;
- int pkts_processed = 0;
- u8 __iomem *addr = NULL;
- u8 val8 = 0;
- struct s2io_nic *nic = netdev_priv(dev);
- struct XENA_dev_config __iomem *bar0 = nic->bar0;
- int budget_org = budget;
-
- if (unlikely(!is_s2io_card_up(nic)))
- return 0;
-
- pkts_processed = rx_intr_handler(ring, budget);
- s2io_chk_rx_buffers(nic, ring);
-
- if (pkts_processed < budget_org) {
- napi_complete_done(napi, pkts_processed);
- /*Re Enable MSI-Rx Vector*/
- addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
- addr += 7 - ring->ring_no;
- val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
- writeb(val8, addr);
- val8 = readb(addr);
- }
- return pkts_processed;
-}
-
-static int s2io_poll_inta(struct napi_struct *napi, int budget)
-{
- struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
- int pkts_processed = 0;
- int ring_pkts_processed, i;
- struct XENA_dev_config __iomem *bar0 = nic->bar0;
- int budget_org = budget;
- struct config_param *config = &nic->config;
- struct mac_info *mac_control = &nic->mac_control;
-
- if (unlikely(!is_s2io_card_up(nic)))
- return 0;
-
- for (i = 0; i < config->rx_ring_num; i++) {
- struct ring_info *ring = &mac_control->rings[i];
- ring_pkts_processed = rx_intr_handler(ring, budget);
- s2io_chk_rx_buffers(nic, ring);
- pkts_processed += ring_pkts_processed;
- budget -= ring_pkts_processed;
- if (budget <= 0)
- break;
- }
- if (pkts_processed < budget_org) {
- napi_complete_done(napi, pkts_processed);
- /* Re enable the Rx interrupts for the ring */
- writeq(0, &bar0->rx_traffic_mask);
- readl(&bar0->rx_traffic_mask);
- }
- return pkts_processed;
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/**
- * s2io_netpoll - netpoll event handler entry point
- * @dev : pointer to the device structure.
- * Description:
- * This function will be called by upper layer to check for events on the
- * interface in situations where interrupts are disabled. It is used for
- * specific in-kernel networking tasks, such as remote consoles and kernel
- * debugging over the network (example netdump in RedHat).
- */
-static void s2io_netpoll(struct net_device *dev)
-{
- struct s2io_nic *nic = netdev_priv(dev);
- const int irq = nic->pdev->irq;
- struct XENA_dev_config __iomem *bar0 = nic->bar0;
- u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
- int i;
- struct config_param *config = &nic->config;
- struct mac_info *mac_control = &nic->mac_control;
-
- if (pci_channel_offline(nic->pdev))
- return;
-
- disable_irq(irq);
-
- writeq(val64, &bar0->rx_traffic_int);
- writeq(val64, &bar0->tx_traffic_int);
-
- /* we need to free up the transmitted skbufs or else netpoll will
- * run out of skbs and will fail and eventually netpoll application such
- * as netdump will fail.
- */
- for (i = 0; i < config->tx_fifo_num; i++)
- tx_intr_handler(&mac_control->fifos[i]);
-
- /* check for received packet and indicate up to network */
- for (i = 0; i < config->rx_ring_num; i++) {
- struct ring_info *ring = &mac_control->rings[i];
-
- rx_intr_handler(ring, 0);
- }
-
- for (i = 0; i < config->rx_ring_num; i++) {
- struct ring_info *ring = &mac_control->rings[i];
-
- if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
- DBG_PRINT(INFO_DBG,
- "%s: Out of memory in Rx Netpoll!!\n",
- dev->name);
- break;
- }
- }
- enable_irq(irq);
-}
-#endif
-
-/**
- * rx_intr_handler - Rx interrupt handler
- * @ring_data: per ring structure.
- * @budget: budget for napi processing.
- * Description:
- * If the interrupt is because of a received frame or if the
- * receive ring contains fresh as yet un-processed frames,this function is
- * called. It picks out the RxD at which place the last Rx processing had
- * stopped and sends the skb to the OSM's Rx handler and then increments
- * the offset.
- * Return Value:
- * No. of napi packets processed.
- */
-static int rx_intr_handler(struct ring_info *ring_data, int budget)
-{
- int get_block, put_block;
- struct rx_curr_get_info get_info, put_info;
- struct RxD_t *rxdp;
- struct sk_buff *skb;
- int pkt_cnt = 0, napi_pkts = 0;
- int i;
- struct RxD1 *rxdp1;
- struct RxD3 *rxdp3;
-
- if (budget <= 0)
- return napi_pkts;
-
- get_info = ring_data->rx_curr_get_info;
- get_block = get_info.block_index;
- memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
- put_block = put_info.block_index;
- rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
-
- while (RXD_IS_UP2DT(rxdp)) {
- /*
- * If your are next to put index then it's
- * FIFO full condition
- */
- if ((get_block == put_block) &&
- (get_info.offset + 1) == put_info.offset) {
- DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
- ring_data->dev->name);
- break;
- }
- skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
- if (skb == NULL) {
- DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n",
- ring_data->dev->name);
- return 0;
- }
- if (ring_data->rxd_mode == RXD_MODE_1) {
- rxdp1 = (struct RxD1 *)rxdp;
- dma_unmap_single(&ring_data->pdev->dev,
- (dma_addr_t)rxdp1->Buffer0_ptr,
- ring_data->mtu +
- HEADER_ETHERNET_II_802_3_SIZE +
- HEADER_802_2_SIZE +
- HEADER_SNAP_SIZE,
- DMA_FROM_DEVICE);
- } else if (ring_data->rxd_mode == RXD_MODE_3B) {
- rxdp3 = (struct RxD3 *)rxdp;
- dma_sync_single_for_cpu(&ring_data->pdev->dev,
- (dma_addr_t)rxdp3->Buffer0_ptr,
- BUF0_LEN, DMA_FROM_DEVICE);
- dma_unmap_single(&ring_data->pdev->dev,
- (dma_addr_t)rxdp3->Buffer2_ptr,
- ring_data->mtu + 4, DMA_FROM_DEVICE);
- }
- prefetch(skb->data);
- rx_osm_handler(ring_data, rxdp);
- get_info.offset++;
- ring_data->rx_curr_get_info.offset = get_info.offset;
- rxdp = ring_data->rx_blocks[get_block].
- rxds[get_info.offset].virt_addr;
- if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
- get_info.offset = 0;
- ring_data->rx_curr_get_info.offset = get_info.offset;
- get_block++;
- if (get_block == ring_data->block_count)
- get_block = 0;
- ring_data->rx_curr_get_info.block_index = get_block;
- rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
- }
-
- if (ring_data->nic->config.napi) {
- budget--;
- napi_pkts++;
- if (!budget)
- break;
- }
- pkt_cnt++;
- if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
- break;
- }
- if (ring_data->lro) {
- /* Clear all LRO sessions before exiting */
- for (i = 0; i < MAX_LRO_SESSIONS; i++) {
- struct lro *lro = &ring_data->lro0_n[i];
- if (lro->in_use) {
- update_L3L4_header(ring_data->nic, lro);
- queue_rx_frame(lro->parent, lro->vlan_tag);
- clear_lro_session(lro);
- }
- }
- }
- return napi_pkts;
-}
-
-/**
- * tx_intr_handler - Transmit interrupt handler
- * @fifo_data : fifo data pointer
- * Description:
- * If an interrupt was raised to indicate DMA complete of the
- * Tx packet, this function is called. It identifies the last TxD
- * whose buffer was freed and frees all skbs whose data have already
- * DMA'ed into the NICs internal memory.
- * Return Value:
- * NONE
- */
-
-static void tx_intr_handler(struct fifo_info *fifo_data)
-{
- struct s2io_nic *nic = fifo_data->nic;
- struct tx_curr_get_info get_info, put_info;
- struct sk_buff *skb = NULL;
- struct TxD *txdlp;
- int pkt_cnt = 0;
- unsigned long flags = 0;
- u8 err_mask;
- struct stat_block *stats = nic->mac_control.stats_info;
- struct swStat *swstats = &stats->sw_stat;
-
- if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
- return;
-
- get_info = fifo_data->tx_curr_get_info;
- memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
- txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
- while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
- (get_info.offset != put_info.offset) &&
- (txdlp->Host_Control)) {
- /* Check for TxD errors */
- if (txdlp->Control_1 & TXD_T_CODE) {
- unsigned long long err;
- err = txdlp->Control_1 & TXD_T_CODE;
- if (err & 0x1) {
- swstats->parity_err_cnt++;
- }
-
- /* update t_code statistics */
- err_mask = err >> 48;
- switch (err_mask) {
- case 2:
- swstats->tx_buf_abort_cnt++;
- break;
-
- case 3:
- swstats->tx_desc_abort_cnt++;
- break;
-
- case 7:
- swstats->tx_parity_err_cnt++;
- break;
-
- case 10:
- swstats->tx_link_loss_cnt++;
- break;
-
- case 15:
- swstats->tx_list_proc_err_cnt++;
- break;
- }
- }
-
- skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
- if (skb == NULL) {
- spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
- DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n",
- __func__);
- return;
- }
- pkt_cnt++;
-
- /* Updating the statistics block */
- swstats->mem_freed += skb->truesize;
- dev_consume_skb_irq(skb);
-
- get_info.offset++;
- if (get_info.offset == get_info.fifo_len + 1)
- get_info.offset = 0;
- txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
- fifo_data->tx_curr_get_info.offset = get_info.offset;
- }
-
- s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
-
- spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
-}
-
-/**
- * s2io_mdio_write - Function to write in to MDIO registers
- * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
- * @addr : address value
- * @value : data value
- * @dev : pointer to net_device structure
- * Description:
- * This function is used to write values to the MDIO registers
- * NONE
- */
-static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
- struct net_device *dev)
-{
- u64 val64;
- struct s2io_nic *sp = netdev_priv(dev);
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
-
- /* address transaction */
- val64 = MDIO_MMD_INDX_ADDR(addr) |
- MDIO_MMD_DEV_ADDR(mmd_type) |
- MDIO_MMS_PRT_ADDR(0x0);
- writeq(val64, &bar0->mdio_control);
- val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
- writeq(val64, &bar0->mdio_control);
- udelay(100);
-
- /* Data transaction */
- val64 = MDIO_MMD_INDX_ADDR(addr) |
- MDIO_MMD_DEV_ADDR(mmd_type) |
- MDIO_MMS_PRT_ADDR(0x0) |
- MDIO_MDIO_DATA(value) |
- MDIO_OP(MDIO_OP_WRITE_TRANS);
- writeq(val64, &bar0->mdio_control);
- val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
- writeq(val64, &bar0->mdio_control);
- udelay(100);
-
- val64 = MDIO_MMD_INDX_ADDR(addr) |
- MDIO_MMD_DEV_ADDR(mmd_type) |
- MDIO_MMS_PRT_ADDR(0x0) |
- MDIO_OP(MDIO_OP_READ_TRANS);
- writeq(val64, &bar0->mdio_control);
- val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
- writeq(val64, &bar0->mdio_control);
- udelay(100);
-}
-
-/**
- * s2io_mdio_read - Function to write in to MDIO registers
- * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
- * @addr : address value
- * @dev : pointer to net_device structure
- * Description:
- * This function is used to read values to the MDIO registers
- * NONE
- */
-static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
-{
- u64 val64 = 0x0;
- u64 rval64 = 0x0;
- struct s2io_nic *sp = netdev_priv(dev);
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
-
- /* address transaction */
- val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
- | MDIO_MMD_DEV_ADDR(mmd_type)
- | MDIO_MMS_PRT_ADDR(0x0));
- writeq(val64, &bar0->mdio_control);
- val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
- writeq(val64, &bar0->mdio_control);
- udelay(100);
-
- /* Data transaction */
- val64 = MDIO_MMD_INDX_ADDR(addr) |
- MDIO_MMD_DEV_ADDR(mmd_type) |
- MDIO_MMS_PRT_ADDR(0x0) |
- MDIO_OP(MDIO_OP_READ_TRANS);
- writeq(val64, &bar0->mdio_control);
- val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
- writeq(val64, &bar0->mdio_control);
- udelay(100);
-
- /* Read the value from regs */
- rval64 = readq(&bar0->mdio_control);
- rval64 = rval64 & 0xFFFF0000;
- rval64 = rval64 >> 16;
- return rval64;
-}
-
-/**
- * s2io_chk_xpak_counter - Function to check the status of the xpak counters
- * @counter : counter value to be updated
- * @regs_stat : registers status
- * @index : index
- * @flag : flag to indicate the status
- * @type : counter type
- * Description:
- * This function is to check the status of the xpak counters value
- * NONE
- */
-
-static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
- u16 flag, u16 type)
-{
- u64 mask = 0x3;
- u64 val64;
- int i;
- for (i = 0; i < index; i++)
- mask = mask << 0x2;
-
- if (flag > 0) {
- *counter = *counter + 1;
- val64 = *regs_stat & mask;
- val64 = val64 >> (index * 0x2);
- val64 = val64 + 1;
- if (val64 == 3) {
- switch (type) {
- case 1:
- DBG_PRINT(ERR_DBG,
- "Take Xframe NIC out of service.\n");
- DBG_PRINT(ERR_DBG,
-"Excessive temperatures may result in premature transceiver failure.\n");
- break;
- case 2:
- DBG_PRINT(ERR_DBG,
- "Take Xframe NIC out of service.\n");
- DBG_PRINT(ERR_DBG,
-"Excessive bias currents may indicate imminent laser diode failure.\n");
- break;
- case 3:
- DBG_PRINT(ERR_DBG,
- "Take Xframe NIC out of service.\n");
- DBG_PRINT(ERR_DBG,
-"Excessive laser output power may saturate far-end receiver.\n");
- break;
- default:
- DBG_PRINT(ERR_DBG,
- "Incorrect XPAK Alarm type\n");
- }
- val64 = 0x0;
- }
- val64 = val64 << (index * 0x2);
- *regs_stat = (*regs_stat & (~mask)) | (val64);
-
- } else {
- *regs_stat = *regs_stat & (~mask);
- }
-}
-
-/**
- * s2io_updt_xpak_counter - Function to update the xpak counters
- * @dev : pointer to net_device struct
- * Description:
- * This function is to upate the status of the xpak counters value
- * NONE
- */
-static void s2io_updt_xpak_counter(struct net_device *dev)
-{
- u16 flag = 0x0;
- u16 type = 0x0;
- u16 val16 = 0x0;
- u64 val64 = 0x0;
- u64 addr = 0x0;
-
- struct s2io_nic *sp = netdev_priv(dev);
- struct stat_block *stats = sp->mac_control.stats_info;
- struct xpakStat *xstats = &stats->xpak_stat;
-
- /* Check the communication with the MDIO slave */
- addr = MDIO_CTRL1;
- val64 = 0x0;
- val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
- if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
- DBG_PRINT(ERR_DBG,
- "ERR: MDIO slave access failed - Returned %llx\n",
- (unsigned long long)val64);
- return;
- }
-
- /* Check for the expected value of control reg 1 */
- if (val64 != MDIO_CTRL1_SPEED10G) {
- DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "
- "Returned: %llx- Expected: 0x%x\n",
- (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
- return;
- }
-
- /* Loading the DOM register to MDIO register */
- addr = 0xA100;
- s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
- val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
-
- /* Reading the Alarm flags */
- addr = 0xA070;
- val64 = 0x0;
- val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
-
- flag = CHECKBIT(val64, 0x7);
- type = 1;
- s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high,
- &xstats->xpak_regs_stat,
- 0x0, flag, type);
-
- if (CHECKBIT(val64, 0x6))
- xstats->alarm_transceiver_temp_low++;
-
- flag = CHECKBIT(val64, 0x3);
- type = 2;
- s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high,
- &xstats->xpak_regs_stat,
- 0x2, flag, type);
-
- if (CHECKBIT(val64, 0x2))
- xstats->alarm_laser_bias_current_low++;
-
- flag = CHECKBIT(val64, 0x1);
- type = 3;
- s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high,
- &xstats->xpak_regs_stat,
- 0x4, flag, type);
-
- if (CHECKBIT(val64, 0x0))
- xstats->alarm_laser_output_power_low++;
-
- /* Reading the Warning flags */
- addr = 0xA074;
- val64 = 0x0;
- val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
-
- if (CHECKBIT(val64, 0x7))
- xstats->warn_transceiver_temp_high++;
-
- if (CHECKBIT(val64, 0x6))
- xstats->warn_transceiver_temp_low++;
-
- if (CHECKBIT(val64, 0x3))
- xstats->warn_laser_bias_current_high++;
-
- if (CHECKBIT(val64, 0x2))
- xstats->warn_laser_bias_current_low++;
-
- if (CHECKBIT(val64, 0x1))
- xstats->warn_laser_output_power_high++;
-
- if (CHECKBIT(val64, 0x0))
- xstats->warn_laser_output_power_low++;
-}
-
-/**
- * wait_for_cmd_complete - waits for a command to complete.
- * @addr: address
- * @busy_bit: bit to check for busy
- * @bit_state: state to check
- * @may_sleep: parameter indicates if sleeping when waiting for
- * command complete
- * Description: Function that waits for a command to Write into RMAC
- * ADDR DATA registers to be completed and returns either success or
- * error depending on whether the command was complete or not.
- * Return value:
- * SUCCESS on success and FAILURE on failure.
- */
-
-static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
- int bit_state, bool may_sleep)
-{
- int ret = FAILURE, cnt = 0, delay = 1;
- u64 val64;
-
- if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
- return FAILURE;
-
- do {
- val64 = readq(addr);
- if (bit_state == S2IO_BIT_RESET) {
- if (!(val64 & busy_bit)) {
- ret = SUCCESS;
- break;
- }
- } else {
- if (val64 & busy_bit) {
- ret = SUCCESS;
- break;
- }
- }
-
- if (!may_sleep)
- mdelay(delay);
- else
- msleep(delay);
-
- if (++cnt >= 10)
- delay = 50;
- } while (cnt < 20);
- return ret;
-}
-/**
- * check_pci_device_id - Checks if the device id is supported
- * @id : device id
- * Description: Function to check if the pci device id is supported by driver.
- * Return value: Actual device id if supported else PCI_ANY_ID
- */
-static u16 check_pci_device_id(u16 id)
-{
- switch (id) {
- case PCI_DEVICE_ID_HERC_WIN:
- case PCI_DEVICE_ID_HERC_UNI:
- return XFRAME_II_DEVICE;
- case PCI_DEVICE_ID_S2IO_UNI:
- case PCI_DEVICE_ID_S2IO_WIN:
- return XFRAME_I_DEVICE;
- default:
- return PCI_ANY_ID;
- }
-}
-
-/**
- * s2io_reset - Resets the card.
- * @sp : private member of the device structure.
- * Description: Function to Reset the card. This function then also
- * restores the previously saved PCI configuration space registers as
- * the card reset also resets the configuration space.
- * Return value:
- * void.
- */
-
-static void s2io_reset(struct s2io_nic *sp)
-{
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- u64 val64;
- u16 subid, pci_cmd;
- int i;
- u16 val16;
- unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
- unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
- struct stat_block *stats;
- struct swStat *swstats;
-
- DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
- __func__, pci_name(sp->pdev));
-
- /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
- pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
-
- val64 = SW_RESET_ALL;
- writeq(val64, &bar0->sw_reset);
- if (strstr(sp->product_name, "CX4"))
- msleep(750);
- msleep(250);
- for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
-
- /* Restore the PCI state saved during initialization. */
- pci_restore_state(sp->pdev);
- pci_read_config_word(sp->pdev, 0x2, &val16);
- if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
- break;
- msleep(200);
- }
-
- if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
- DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
-
- pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
-
- s2io_init_pci(sp);
-
- /* Set swapper to enable I/O register access */
- s2io_set_swapper(sp);
-
- /* restore mac_addr entries */
- do_s2io_restore_unicast_mc(sp);
-
- /* Restore the MSIX table entries from local variables */
- restore_xmsi_data(sp);
-
- /* Clear certain PCI/PCI-X fields after reset */
- if (sp->device_type == XFRAME_II_DEVICE) {
- /* Clear "detected parity error" bit */
- pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
-
- /* Clearing PCIX Ecc status register */
- pci_write_config_dword(sp->pdev, 0x68, 0x7C);
-
- /* Clearing PCI_STATUS error reflected here */
- writeq(s2BIT(62), &bar0->txpic_int_reg);
- }
-
- /* Reset device statistics maintained by OS */
- memset(&sp->stats, 0, sizeof(struct net_device_stats));
-
- stats = sp->mac_control.stats_info;
- swstats = &stats->sw_stat;
-
- /* save link up/down time/cnt, reset/memory/watchdog cnt */
- up_cnt = swstats->link_up_cnt;
- down_cnt = swstats->link_down_cnt;
- up_time = swstats->link_up_time;
- down_time = swstats->link_down_time;
- reset_cnt = swstats->soft_reset_cnt;
- mem_alloc_cnt = swstats->mem_allocated;
- mem_free_cnt = swstats->mem_freed;
- watchdog_cnt = swstats->watchdog_timer_cnt;
-
- memset(stats, 0, sizeof(struct stat_block));
-
- /* restore link up/down time/cnt, reset/memory/watchdog cnt */
- swstats->link_up_cnt = up_cnt;
- swstats->link_down_cnt = down_cnt;
- swstats->link_up_time = up_time;
- swstats->link_down_time = down_time;
- swstats->soft_reset_cnt = reset_cnt;
- swstats->mem_allocated = mem_alloc_cnt;
- swstats->mem_freed = mem_free_cnt;
- swstats->watchdog_timer_cnt = watchdog_cnt;
-
- /* SXE-002: Configure link and activity LED to turn it off */
- subid = sp->pdev->subsystem_device;
- if (((subid & 0xFF) >= 0x07) &&
- (sp->device_type == XFRAME_I_DEVICE)) {
- val64 = readq(&bar0->gpio_control);
- val64 |= 0x0000800000000000ULL;
- writeq(val64, &bar0->gpio_control);
- val64 = 0x0411040400000000ULL;
- writeq(val64, (void __iomem *)bar0 + 0x2700);
- }
-
- /*
- * Clear spurious ECC interrupts that would have occurred on
- * XFRAME II cards after reset.
- */
- if (sp->device_type == XFRAME_II_DEVICE) {
- val64 = readq(&bar0->pcc_err_reg);
- writeq(val64, &bar0->pcc_err_reg);
- }
-
- sp->device_enabled_once = false;
-}
-
-/**
- * s2io_set_swapper - to set the swapper controle on the card
- * @sp : private member of the device structure,
- * pointer to the s2io_nic structure.
- * Description: Function to set the swapper control on the card
- * correctly depending on the 'endianness' of the system.
- * Return value:
- * SUCCESS on success and FAILURE on failure.
- */
-
-static int s2io_set_swapper(struct s2io_nic *sp)
-{
- struct net_device *dev = sp->dev;
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- u64 val64, valt, valr;
-
- /*
- * Set proper endian settings and verify the same by reading
- * the PIF Feed-back register.
- */
-
- val64 = readq(&bar0->pif_rd_swapper_fb);
- if (val64 != 0x0123456789ABCDEFULL) {
- int i = 0;
- static const u64 value[] = {
- 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
- 0x8100008181000081ULL, /* FE=1, SE=0 */
- 0x4200004242000042ULL, /* FE=0, SE=1 */
- 0 /* FE=0, SE=0 */
- };
-
- while (i < 4) {
- writeq(value[i], &bar0->swapper_ctrl);
- val64 = readq(&bar0->pif_rd_swapper_fb);
- if (val64 == 0x0123456789ABCDEFULL)
- break;
- i++;
- }
- if (i == 4) {
- DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, "
- "feedback read %llx\n",
- dev->name, (unsigned long long)val64);
- return FAILURE;
- }
- valr = value[i];
- } else {
- valr = readq(&bar0->swapper_ctrl);
- }
-
- valt = 0x0123456789ABCDEFULL;
- writeq(valt, &bar0->xmsi_address);
- val64 = readq(&bar0->xmsi_address);
-
- if (val64 != valt) {
- int i = 0;
- static const u64 value[] = {
- 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
- 0x0081810000818100ULL, /* FE=1, SE=0 */
- 0x0042420000424200ULL, /* FE=0, SE=1 */
- 0 /* FE=0, SE=0 */
- };
-
- while (i < 4) {
- writeq((value[i] | valr), &bar0->swapper_ctrl);
- writeq(valt, &bar0->xmsi_address);
- val64 = readq(&bar0->xmsi_address);
- if (val64 == valt)
- break;
- i++;
- }
- if (i == 4) {
- unsigned long long x = val64;
- DBG_PRINT(ERR_DBG,
- "Write failed, Xmsi_addr reads:0x%llx\n", x);
- return FAILURE;
- }
- }
- val64 = readq(&bar0->swapper_ctrl);
- val64 &= 0xFFFF000000000000ULL;
-
-#ifdef __BIG_ENDIAN
- /*
- * The device by default set to a big endian format, so a
- * big endian driver need not set anything.
- */
- val64 |= (SWAPPER_CTRL_TXP_FE |
- SWAPPER_CTRL_TXP_SE |
- SWAPPER_CTRL_TXD_R_FE |
- SWAPPER_CTRL_TXD_W_FE |
- SWAPPER_CTRL_TXF_R_FE |
- SWAPPER_CTRL_RXD_R_FE |
- SWAPPER_CTRL_RXD_W_FE |
- SWAPPER_CTRL_RXF_W_FE |
- SWAPPER_CTRL_XMSI_FE |
- SWAPPER_CTRL_STATS_FE |
- SWAPPER_CTRL_STATS_SE);
- if (sp->config.intr_type == INTA)
- val64 |= SWAPPER_CTRL_XMSI_SE;
- writeq(val64, &bar0->swapper_ctrl);
-#else
- /*
- * Initially we enable all bits to make it accessible by the
- * driver, then we selectively enable only those bits that
- * we want to set.
- */
- val64 |= (SWAPPER_CTRL_TXP_FE |
- SWAPPER_CTRL_TXP_SE |
- SWAPPER_CTRL_TXD_R_FE |
- SWAPPER_CTRL_TXD_R_SE |
- SWAPPER_CTRL_TXD_W_FE |
- SWAPPER_CTRL_TXD_W_SE |
- SWAPPER_CTRL_TXF_R_FE |
- SWAPPER_CTRL_RXD_R_FE |
- SWAPPER_CTRL_RXD_R_SE |
- SWAPPER_CTRL_RXD_W_FE |
- SWAPPER_CTRL_RXD_W_SE |
- SWAPPER_CTRL_RXF_W_FE |
- SWAPPER_CTRL_XMSI_FE |
- SWAPPER_CTRL_STATS_FE |
- SWAPPER_CTRL_STATS_SE);
- if (sp->config.intr_type == INTA)
- val64 |= SWAPPER_CTRL_XMSI_SE;
- writeq(val64, &bar0->swapper_ctrl);
-#endif
- val64 = readq(&bar0->swapper_ctrl);
-
- /*
- * Verifying if endian settings are accurate by reading a
- * feedback register.
- */
- val64 = readq(&bar0->pif_rd_swapper_fb);
- if (val64 != 0x0123456789ABCDEFULL) {
- /* Endian settings are incorrect, calls for another dekko. */
- DBG_PRINT(ERR_DBG,
- "%s: Endian settings are wrong, feedback read %llx\n",
- dev->name, (unsigned long long)val64);
- return FAILURE;
- }
-
- return SUCCESS;
-}
-
-static int wait_for_msix_trans(struct s2io_nic *nic, int i)
-{
- struct XENA_dev_config __iomem *bar0 = nic->bar0;
- u64 val64;
- int ret = 0, cnt = 0;
-
- do {
- val64 = readq(&bar0->xmsi_access);
- if (!(val64 & s2BIT(15)))
- break;
- mdelay(1);
- cnt++;
- } while (cnt < 5);
- if (cnt == 5) {
- DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
- ret = 1;
- }
-
- return ret;
-}
-
-static void restore_xmsi_data(struct s2io_nic *nic)
-{
- struct XENA_dev_config __iomem *bar0 = nic->bar0;
- u64 val64;
- int i, msix_index;
-
- if (nic->device_type == XFRAME_I_DEVICE)
- return;
-
- for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
- msix_index = (i) ? ((i-1) * 8 + 1) : 0;
- writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
- writeq(nic->msix_info[i].data, &bar0->xmsi_data);
- val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
- writeq(val64, &bar0->xmsi_access);
- if (wait_for_msix_trans(nic, msix_index))
- DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
- __func__, msix_index);
- }
-}
-
-static void store_xmsi_data(struct s2io_nic *nic)
-{
- struct XENA_dev_config __iomem *bar0 = nic->bar0;
- u64 val64, addr, data;
- int i, msix_index;
-
- if (nic->device_type == XFRAME_I_DEVICE)
- return;
-
- /* Store and display */
- for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
- msix_index = (i) ? ((i-1) * 8 + 1) : 0;
- val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
- writeq(val64, &bar0->xmsi_access);
- if (wait_for_msix_trans(nic, msix_index)) {
- DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
- __func__, msix_index);
- continue;
- }
- addr = readq(&bar0->xmsi_address);
- data = readq(&bar0->xmsi_data);
- if (addr && data) {
- nic->msix_info[i].addr = addr;
- nic->msix_info[i].data = data;
- }
- }
-}
-
-static int s2io_enable_msi_x(struct s2io_nic *nic)
-{
- struct XENA_dev_config __iomem *bar0 = nic->bar0;
- u64 rx_mat;
- u16 msi_control; /* Temp variable */
- int ret, i, j, msix_indx = 1;
- int size;
- struct stat_block *stats = nic->mac_control.stats_info;
- struct swStat *swstats = &stats->sw_stat;
-
- size = nic->num_entries * sizeof(struct msix_entry);
- nic->entries = kzalloc(size, GFP_KERNEL);
- if (!nic->entries) {
- DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
- __func__);
- swstats->mem_alloc_fail_cnt++;
- return -ENOMEM;
- }
- swstats->mem_allocated += size;
-
- size = nic->num_entries * sizeof(struct s2io_msix_entry);
- nic->s2io_entries = kzalloc(size, GFP_KERNEL);
- if (!nic->s2io_entries) {
- DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
- __func__);
- swstats->mem_alloc_fail_cnt++;
- kfree(nic->entries);
- swstats->mem_freed
- += (nic->num_entries * sizeof(struct msix_entry));
- return -ENOMEM;
- }
- swstats->mem_allocated += size;
-
- nic->entries[0].entry = 0;
- nic->s2io_entries[0].entry = 0;
- nic->s2io_entries[0].in_use = MSIX_FLG;
- nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
- nic->s2io_entries[0].arg = &nic->mac_control.fifos;
-
- for (i = 1; i < nic->num_entries; i++) {
- nic->entries[i].entry = ((i - 1) * 8) + 1;
- nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
- nic->s2io_entries[i].arg = NULL;
- nic->s2io_entries[i].in_use = 0;
- }
-
- rx_mat = readq(&bar0->rx_mat);
- for (j = 0; j < nic->config.rx_ring_num; j++) {
- rx_mat |= RX_MAT_SET(j, msix_indx);
- nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
- nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
- nic->s2io_entries[j+1].in_use = MSIX_FLG;
- msix_indx += 8;
- }
- writeq(rx_mat, &bar0->rx_mat);
- readq(&bar0->rx_mat);
-
- ret = pci_enable_msix_range(nic->pdev, nic->entries,
- nic->num_entries, nic->num_entries);
- /* We fail init if error or we get less vectors than min required */
- if (ret < 0) {
- DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
- kfree(nic->entries);
- swstats->mem_freed += nic->num_entries *
- sizeof(struct msix_entry);
- kfree(nic->s2io_entries);
- swstats->mem_freed += nic->num_entries *
- sizeof(struct s2io_msix_entry);
- nic->entries = NULL;
- nic->s2io_entries = NULL;
- return -ENOMEM;
- }
-
- /*
- * To enable MSI-X, MSI also needs to be enabled, due to a bug
- * in the herc NIC. (Temp change, needs to be removed later)
- */
- pci_read_config_word(nic->pdev, 0x42, &msi_control);
- msi_control |= 0x1; /* Enable MSI */
- pci_write_config_word(nic->pdev, 0x42, msi_control);
-
- return 0;
-}
-
-/* Handle software interrupt used during MSI(X) test */
-static irqreturn_t s2io_test_intr(int irq, void *dev_id)
-{
- struct s2io_nic *sp = dev_id;
-
- sp->msi_detected = 1;
- wake_up(&sp->msi_wait);
-
- return IRQ_HANDLED;
-}
-
-/* Test interrupt path by forcing a software IRQ */
-static int s2io_test_msi(struct s2io_nic *sp)
-{
- struct pci_dev *pdev = sp->pdev;
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- int err;
- u64 val64, saved64;
-
- err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
- sp->name, sp);
- if (err) {
- DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
- sp->dev->name, pci_name(pdev), pdev->irq);
- return err;
- }
-
- init_waitqueue_head(&sp->msi_wait);
- sp->msi_detected = 0;
-
- saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
- val64 |= SCHED_INT_CTRL_ONE_SHOT;
- val64 |= SCHED_INT_CTRL_TIMER_EN;
- val64 |= SCHED_INT_CTRL_INT2MSI(1);
- writeq(val64, &bar0->scheduled_int_ctrl);
-
- wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
-
- if (!sp->msi_detected) {
- /* MSI(X) test failed, go back to INTx mode */
- DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
- "using MSI(X) during test\n",
- sp->dev->name, pci_name(pdev));
-
- err = -EOPNOTSUPP;
- }
-
- free_irq(sp->entries[1].vector, sp);
-
- writeq(saved64, &bar0->scheduled_int_ctrl);
-
- return err;
-}
-
-static void remove_msix_isr(struct s2io_nic *sp)
-{
- int i;
- u16 msi_control;
-
- for (i = 0; i < sp->num_entries; i++) {
- if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
- int vector = sp->entries[i].vector;
- void *arg = sp->s2io_entries[i].arg;
- free_irq(vector, arg);
- }
- }
-
- kfree(sp->entries);
- kfree(sp->s2io_entries);
- sp->entries = NULL;
- sp->s2io_entries = NULL;
-
- pci_read_config_word(sp->pdev, 0x42, &msi_control);
- msi_control &= 0xFFFE; /* Disable MSI */
- pci_write_config_word(sp->pdev, 0x42, msi_control);
-
- pci_disable_msix(sp->pdev);
-}
-
-static void remove_inta_isr(struct s2io_nic *sp)
-{
- free_irq(sp->pdev->irq, sp->dev);
-}
-
-/* ********************************************************* *
- * Functions defined below concern the OS part of the driver *
- * ********************************************************* */
-
-/**
- * s2io_open - open entry point of the driver
- * @dev : pointer to the device structure.
- * Description:
- * This function is the open entry point of the driver. It mainly calls a
- * function to allocate Rx buffers and inserts them into the buffer
- * descriptors and then enables the Rx part of the NIC.
- * Return value:
- * 0 on success and an appropriate (-)ve integer as defined in errno.h
- * file on failure.
- */
-
-static int s2io_open(struct net_device *dev)
-{
- struct s2io_nic *sp = netdev_priv(dev);
- struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
- int err = 0;
-
- /*
- * Make sure you have link off by default every time
- * Nic is initialized
- */
- netif_carrier_off(dev);
- sp->last_link_state = 0;
-
- /* Initialize H/W and enable interrupts */
- err = s2io_card_up(sp);
- if (err) {
- DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
- dev->name);
- goto hw_init_failed;
- }
-
- if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
- DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
- s2io_card_down(sp);
- err = -ENODEV;
- goto hw_init_failed;
- }
- s2io_start_all_tx_queue(sp);
- return 0;
-
-hw_init_failed:
- if (sp->config.intr_type == MSI_X) {
- if (sp->entries) {
- kfree(sp->entries);
- swstats->mem_freed += sp->num_entries *
- sizeof(struct msix_entry);
- }
- if (sp->s2io_entries) {
- kfree(sp->s2io_entries);
- swstats->mem_freed += sp->num_entries *
- sizeof(struct s2io_msix_entry);
- }
- }
- return err;
-}
-
-/**
- * s2io_close -close entry point of the driver
- * @dev : device pointer.
- * Description:
- * This is the stop entry point of the driver. It needs to undo exactly
- * whatever was done by the open entry point,thus it's usually referred to
- * as the close function.Among other things this function mainly stops the
- * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
- * Return value:
- * 0 on success and an appropriate (-)ve integer as defined in errno.h
- * file on failure.
- */
-
-static int s2io_close(struct net_device *dev)
-{
- struct s2io_nic *sp = netdev_priv(dev);
- struct config_param *config = &sp->config;
- u64 tmp64;
- int offset;
-
- /* Return if the device is already closed *
- * Can happen when s2io_card_up failed in change_mtu *
- */
- if (!is_s2io_card_up(sp))
- return 0;
-
- s2io_stop_all_tx_queue(sp);
- /* delete all populated mac entries */
- for (offset = 1; offset < config->max_mc_addr; offset++) {
- tmp64 = do_s2io_read_unicast_mc(sp, offset);
- if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
- do_s2io_delete_unicast_mc(sp, tmp64);
- }
-
- s2io_card_down(sp);
-
- return 0;
-}
-
-/**
- * s2io_xmit - Tx entry point of te driver
- * @skb : the socket buffer containing the Tx data.
- * @dev : device pointer.
- * Description :
- * This function is the Tx entry point of the driver. S2IO NIC supports
- * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
- * NOTE: when device can't queue the pkt,just the trans_start variable will
- * not be upadted.
- * Return value:
- * 0 on success & 1 on failure.
- */
-
-static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct s2io_nic *sp = netdev_priv(dev);
- u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
- register u64 val64;
- struct TxD *txdp;
- struct TxFIFO_element __iomem *tx_fifo;
- unsigned long flags = 0;
- u16 vlan_tag = 0;
- struct fifo_info *fifo = NULL;
- int offload_type;
- int enable_per_list_interrupt = 0;
- struct config_param *config = &sp->config;
- struct mac_info *mac_control = &sp->mac_control;
- struct stat_block *stats = mac_control->stats_info;
- struct swStat *swstats = &stats->sw_stat;
-
- DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
-
- if (unlikely(skb->len <= 0)) {
- DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name);
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- if (!is_s2io_card_up(sp)) {
- DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
- dev->name);
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- queue = 0;
- if (skb_vlan_tag_present(skb))
- vlan_tag = skb_vlan_tag_get(skb);
- if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
- if (skb->protocol == htons(ETH_P_IP)) {
- struct iphdr *ip;
- struct tcphdr *th;
- ip = ip_hdr(skb);
-
- if (!ip_is_fragment(ip)) {
- th = (struct tcphdr *)(((unsigned char *)ip) +
- ip->ihl*4);
-
- if (ip->protocol == IPPROTO_TCP) {
- queue_len = sp->total_tcp_fifos;
- queue = (ntohs(th->source) +
- ntohs(th->dest)) &
- sp->fifo_selector[queue_len - 1];
- if (queue >= queue_len)
- queue = queue_len - 1;
- } else if (ip->protocol == IPPROTO_UDP) {
- queue_len = sp->total_udp_fifos;
- queue = (ntohs(th->source) +
- ntohs(th->dest)) &
- sp->fifo_selector[queue_len - 1];
- if (queue >= queue_len)
- queue = queue_len - 1;
- queue += sp->udp_fifo_idx;
- if (skb->len > 1024)
- enable_per_list_interrupt = 1;
- }
- }
- }
- } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
- /* get fifo number based on skb->priority value */
- queue = config->fifo_mapping
- [skb->priority & (MAX_TX_FIFOS - 1)];
- fifo = &mac_control->fifos[queue];
-
- spin_lock_irqsave(&fifo->tx_lock, flags);
-
- if (sp->config.multiq) {
- if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
- spin_unlock_irqrestore(&fifo->tx_lock, flags);
- return NETDEV_TX_BUSY;
- }
- } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
- if (netif_queue_stopped(dev)) {
- spin_unlock_irqrestore(&fifo->tx_lock, flags);
- return NETDEV_TX_BUSY;
- }
- }
-
- put_off = (u16)fifo->tx_curr_put_info.offset;
- get_off = (u16)fifo->tx_curr_get_info.offset;
- txdp = fifo->list_info[put_off].list_virt_addr;
-
- queue_len = fifo->tx_curr_put_info.fifo_len + 1;
- /* Avoid "put" pointer going beyond "get" pointer */
- if (txdp->Host_Control ||
- ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
- DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
- s2io_stop_tx_queue(sp, fifo->fifo_no);
- dev_kfree_skb_any(skb);
- spin_unlock_irqrestore(&fifo->tx_lock, flags);
- return NETDEV_TX_OK;
- }
-
- offload_type = s2io_offload_type(skb);
- if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
- txdp->Control_1 |= TXD_TCP_LSO_EN;
- txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
- }
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
- TXD_TX_CKO_TCP_EN |
- TXD_TX_CKO_UDP_EN);
- }
- txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
- txdp->Control_1 |= TXD_LIST_OWN_XENA;
- txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
- if (enable_per_list_interrupt)
- if (put_off & (queue_len >> 5))
- txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
- if (vlan_tag) {
- txdp->Control_2 |= TXD_VLAN_ENABLE;
- txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
- }
-
- frg_len = skb_headlen(skb);
- txdp->Buffer_Pointer = dma_map_single(&sp->pdev->dev, skb->data,
- frg_len, DMA_TO_DEVICE);
- if (dma_mapping_error(&sp->pdev->dev, txdp->Buffer_Pointer))
- goto pci_map_failed;
-
- txdp->Host_Control = (unsigned long)skb;
- txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
-
- frg_cnt = skb_shinfo(skb)->nr_frags;
- /* For fragmented SKB. */
- for (i = 0; i < frg_cnt; i++) {
- const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- /* A '0' length fragment will be ignored */
- if (!skb_frag_size(frag))
- continue;
- txdp++;
- txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev,
- frag, 0,
- skb_frag_size(frag),
- DMA_TO_DEVICE);
- txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag));
- }
- txdp->Control_1 |= TXD_GATHER_CODE_LAST;
-
- tx_fifo = mac_control->tx_FIFO_start[queue];
- val64 = fifo->list_info[put_off].list_phy_addr;
- writeq(val64, &tx_fifo->TxDL_Pointer);
-
- val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
- TX_FIFO_LAST_LIST);
- if (offload_type)
- val64 |= TX_FIFO_SPECIAL_FUNC;
-
- writeq(val64, &tx_fifo->List_Control);
-
- put_off++;
- if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
- put_off = 0;
- fifo->tx_curr_put_info.offset = put_off;
-
- /* Avoid "put" pointer going beyond "get" pointer */
- if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
- swstats->fifo_full_cnt++;
- DBG_PRINT(TX_DBG,
- "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
- put_off, get_off);
- s2io_stop_tx_queue(sp, fifo->fifo_no);
- }
- swstats->mem_allocated += skb->truesize;
- spin_unlock_irqrestore(&fifo->tx_lock, flags);
-
- if (sp->config.intr_type == MSI_X)
- tx_intr_handler(fifo);
-
- return NETDEV_TX_OK;
-
-pci_map_failed:
- swstats->pci_map_fail_cnt++;
- s2io_stop_tx_queue(sp, fifo->fifo_no);
- swstats->mem_freed += skb->truesize;
- dev_kfree_skb_any(skb);
- spin_unlock_irqrestore(&fifo->tx_lock, flags);
- return NETDEV_TX_OK;
-}
-
-static void
-s2io_alarm_handle(struct timer_list *t)
-{
- struct s2io_nic *sp = timer_container_of(sp, t, alarm_timer);
- struct net_device *dev = sp->dev;
-
- s2io_handle_errors(dev);
- mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
-}
-
-static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
-{
- struct ring_info *ring = (struct ring_info *)dev_id;
- struct s2io_nic *sp = ring->nic;
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
-
- if (unlikely(!is_s2io_card_up(sp)))
- return IRQ_HANDLED;
-
- if (sp->config.napi) {
- u8 __iomem *addr = NULL;
- u8 val8 = 0;
-
- addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
- addr += (7 - ring->ring_no);
- val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
- writeb(val8, addr);
- val8 = readb(addr);
- napi_schedule(&ring->napi);
- } else {
- rx_intr_handler(ring, 0);
- s2io_chk_rx_buffers(sp, ring);
- }
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
-{
- int i;
- struct fifo_info *fifos = (struct fifo_info *)dev_id;
- struct s2io_nic *sp = fifos->nic;
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- struct config_param *config = &sp->config;
- u64 reason;
-
- if (unlikely(!is_s2io_card_up(sp)))
- return IRQ_NONE;
-
- reason = readq(&bar0->general_int_status);
- if (unlikely(reason == S2IO_MINUS_ONE))
- /* Nothing much can be done. Get out */
- return IRQ_HANDLED;
-
- if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
- writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
-
- if (reason & GEN_INTR_TXPIC)
- s2io_txpic_intr_handle(sp);
-
- if (reason & GEN_INTR_TXTRAFFIC)
- writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
-
- for (i = 0; i < config->tx_fifo_num; i++)
- tx_intr_handler(&fifos[i]);
-
- writeq(sp->general_int_mask, &bar0->general_int_mask);
- readl(&bar0->general_int_status);
- return IRQ_HANDLED;
- }
- /* The interrupt was not raised by us */
- return IRQ_NONE;
-}
-
-static void s2io_txpic_intr_handle(struct s2io_nic *sp)
-{
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- u64 val64;
-
- val64 = readq(&bar0->pic_int_status);
- if (val64 & PIC_INT_GPIO) {
- val64 = readq(&bar0->gpio_int_reg);
- if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
- (val64 & GPIO_INT_REG_LINK_UP)) {
- /*
- * This is unstable state so clear both up/down
- * interrupt and adapter to re-evaluate the link state.
- */
- val64 |= GPIO_INT_REG_LINK_DOWN;
- val64 |= GPIO_INT_REG_LINK_UP;
- writeq(val64, &bar0->gpio_int_reg);
- val64 = readq(&bar0->gpio_int_mask);
- val64 &= ~(GPIO_INT_MASK_LINK_UP |
- GPIO_INT_MASK_LINK_DOWN);
- writeq(val64, &bar0->gpio_int_mask);
- } else if (val64 & GPIO_INT_REG_LINK_UP) {
- val64 = readq(&bar0->adapter_status);
- /* Enable Adapter */
- val64 = readq(&bar0->adapter_control);
- val64 |= ADAPTER_CNTL_EN;
- writeq(val64, &bar0->adapter_control);
- val64 |= ADAPTER_LED_ON;
- writeq(val64, &bar0->adapter_control);
- if (!sp->device_enabled_once)
- sp->device_enabled_once = 1;
-
- s2io_link(sp, LINK_UP);
- /*
- * unmask link down interrupt and mask link-up
- * intr
- */
- val64 = readq(&bar0->gpio_int_mask);
- val64 &= ~GPIO_INT_MASK_LINK_DOWN;
- val64 |= GPIO_INT_MASK_LINK_UP;
- writeq(val64, &bar0->gpio_int_mask);
-
- } else if (val64 & GPIO_INT_REG_LINK_DOWN) {
- val64 = readq(&bar0->adapter_status);
- s2io_link(sp, LINK_DOWN);
- /* Link is down so unmaks link up interrupt */
- val64 = readq(&bar0->gpio_int_mask);
- val64 &= ~GPIO_INT_MASK_LINK_UP;
- val64 |= GPIO_INT_MASK_LINK_DOWN;
- writeq(val64, &bar0->gpio_int_mask);
-
- /* turn off LED */
- val64 = readq(&bar0->adapter_control);
- val64 = val64 & (~ADAPTER_LED_ON);
- writeq(val64, &bar0->adapter_control);
- }
- }
- val64 = readq(&bar0->gpio_int_mask);
-}
-
-/**
- * do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
- * @value: alarm bits
- * @addr: address value
- * @cnt: counter variable
- * Description: Check for alarm and increment the counter
- * Return Value:
- * 1 - if alarm bit set
- * 0 - if alarm bit is not set
- */
-static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
- unsigned long long *cnt)
-{
- u64 val64;
- val64 = readq(addr);
- if (val64 & value) {
- writeq(val64, addr);
- (*cnt)++;
- return 1;
- }
- return 0;
-
-}
-
-/**
- * s2io_handle_errors - Xframe error indication handler
- * @dev_id: opaque handle to dev
- * Description: Handle alarms such as loss of link, single or
- * double ECC errors, critical and serious errors.
- * Return Value:
- * NONE
- */
-static void s2io_handle_errors(void *dev_id)
-{
- struct net_device *dev = (struct net_device *)dev_id;
- struct s2io_nic *sp = netdev_priv(dev);
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- u64 temp64 = 0, val64 = 0;
- int i = 0;
-
- struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
- struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
-
- if (!is_s2io_card_up(sp))
- return;
-
- if (pci_channel_offline(sp->pdev))
- return;
-
- memset(&sw_stat->ring_full_cnt, 0,
- sizeof(sw_stat->ring_full_cnt));
-
- /* Handling the XPAK counters update */
- if (stats->xpak_timer_count < 72000) {
- /* waiting for an hour */
- stats->xpak_timer_count++;
- } else {
- s2io_updt_xpak_counter(dev);
- /* reset the count to zero */
- stats->xpak_timer_count = 0;
- }
-
- /* Handling link status change error Intr */
- if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
- val64 = readq(&bar0->mac_rmac_err_reg);
- writeq(val64, &bar0->mac_rmac_err_reg);
- if (val64 & RMAC_LINK_STATE_CHANGE_INT)
- schedule_work(&sp->set_link_task);
- }
-
- /* In case of a serious error, the device will be Reset. */
- if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
- &sw_stat->serious_err_cnt))
- goto reset;
-
- /* Check for data parity error */
- if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
- &sw_stat->parity_err_cnt))
- goto reset;
-
- /* Check for ring full counter */
- if (sp->device_type == XFRAME_II_DEVICE) {
- val64 = readq(&bar0->ring_bump_counter1);
- for (i = 0; i < 4; i++) {
- temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
- temp64 >>= 64 - ((i+1)*16);
- sw_stat->ring_full_cnt[i] += temp64;
- }
-
- val64 = readq(&bar0->ring_bump_counter2);
- for (i = 0; i < 4; i++) {
- temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
- temp64 >>= 64 - ((i+1)*16);
- sw_stat->ring_full_cnt[i+4] += temp64;
- }
- }
-
- val64 = readq(&bar0->txdma_int_status);
- /*check for pfc_err*/
- if (val64 & TXDMA_PFC_INT) {
- if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
- PFC_MISC_0_ERR | PFC_MISC_1_ERR |
- PFC_PCIX_ERR,
- &bar0->pfc_err_reg,
- &sw_stat->pfc_err_cnt))
- goto reset;
- do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
- &bar0->pfc_err_reg,
- &sw_stat->pfc_err_cnt);
- }
-
- /*check for tda_err*/
- if (val64 & TXDMA_TDA_INT) {
- if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
- TDA_SM0_ERR_ALARM |
- TDA_SM1_ERR_ALARM,
- &bar0->tda_err_reg,
- &sw_stat->tda_err_cnt))
- goto reset;
- do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
- &bar0->tda_err_reg,
- &sw_stat->tda_err_cnt);
- }
- /*check for pcc_err*/
- if (val64 & TXDMA_PCC_INT) {
- if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
- PCC_N_SERR | PCC_6_COF_OV_ERR |
- PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
- PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
- PCC_TXB_ECC_DB_ERR,
- &bar0->pcc_err_reg,
- &sw_stat->pcc_err_cnt))
- goto reset;
- do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
- &bar0->pcc_err_reg,
- &sw_stat->pcc_err_cnt);
- }
-
- /*check for tti_err*/
- if (val64 & TXDMA_TTI_INT) {
- if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
- &bar0->tti_err_reg,
- &sw_stat->tti_err_cnt))
- goto reset;
- do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
- &bar0->tti_err_reg,
- &sw_stat->tti_err_cnt);
- }
-
- /*check for lso_err*/
- if (val64 & TXDMA_LSO_INT) {
- if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
- LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
- &bar0->lso_err_reg,
- &sw_stat->lso_err_cnt))
- goto reset;
- do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
- &bar0->lso_err_reg,
- &sw_stat->lso_err_cnt);
- }
-
- /*check for tpa_err*/
- if (val64 & TXDMA_TPA_INT) {
- if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
- &bar0->tpa_err_reg,
- &sw_stat->tpa_err_cnt))
- goto reset;
- do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
- &bar0->tpa_err_reg,
- &sw_stat->tpa_err_cnt);
- }
-
- /*check for sm_err*/
- if (val64 & TXDMA_SM_INT) {
- if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
- &bar0->sm_err_reg,
- &sw_stat->sm_err_cnt))
- goto reset;
- }
-
- val64 = readq(&bar0->mac_int_status);
- if (val64 & MAC_INT_STATUS_TMAC_INT) {
- if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
- &bar0->mac_tmac_err_reg,
- &sw_stat->mac_tmac_err_cnt))
- goto reset;
- do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
- TMAC_DESC_ECC_SG_ERR |
- TMAC_DESC_ECC_DB_ERR,
- &bar0->mac_tmac_err_reg,
- &sw_stat->mac_tmac_err_cnt);
- }
-
- val64 = readq(&bar0->xgxs_int_status);
- if (val64 & XGXS_INT_STATUS_TXGXS) {
- if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
- &bar0->xgxs_txgxs_err_reg,
- &sw_stat->xgxs_txgxs_err_cnt))
- goto reset;
- do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
- &bar0->xgxs_txgxs_err_reg,
- &sw_stat->xgxs_txgxs_err_cnt);
- }
-
- val64 = readq(&bar0->rxdma_int_status);
- if (val64 & RXDMA_INT_RC_INT_M) {
- if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
- RC_FTC_ECC_DB_ERR |
- RC_PRCn_SM_ERR_ALARM |
- RC_FTC_SM_ERR_ALARM,
- &bar0->rc_err_reg,
- &sw_stat->rc_err_cnt))
- goto reset;
- do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
- RC_FTC_ECC_SG_ERR |
- RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
- &sw_stat->rc_err_cnt);
- if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
- PRC_PCI_AB_WR_Rn |
- PRC_PCI_AB_F_WR_Rn,
- &bar0->prc_pcix_err_reg,
- &sw_stat->prc_pcix_err_cnt))
- goto reset;
- do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
- PRC_PCI_DP_WR_Rn |
- PRC_PCI_DP_F_WR_Rn,
- &bar0->prc_pcix_err_reg,
- &sw_stat->prc_pcix_err_cnt);
- }
-
- if (val64 & RXDMA_INT_RPA_INT_M) {
- if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
- &bar0->rpa_err_reg,
- &sw_stat->rpa_err_cnt))
- goto reset;
- do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
- &bar0->rpa_err_reg,
- &sw_stat->rpa_err_cnt);
- }
-
- if (val64 & RXDMA_INT_RDA_INT_M) {
- if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
- RDA_FRM_ECC_DB_N_AERR |
- RDA_SM1_ERR_ALARM |
- RDA_SM0_ERR_ALARM |
- RDA_RXD_ECC_DB_SERR,
- &bar0->rda_err_reg,
- &sw_stat->rda_err_cnt))
- goto reset;
- do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
- RDA_FRM_ECC_SG_ERR |
- RDA_MISC_ERR |
- RDA_PCIX_ERR,
- &bar0->rda_err_reg,
- &sw_stat->rda_err_cnt);
- }
-
- if (val64 & RXDMA_INT_RTI_INT_M) {
- if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
- &bar0->rti_err_reg,
- &sw_stat->rti_err_cnt))
- goto reset;
- do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
- &bar0->rti_err_reg,
- &sw_stat->rti_err_cnt);
- }
-
- val64 = readq(&bar0->mac_int_status);
- if (val64 & MAC_INT_STATUS_RMAC_INT) {
- if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
- &bar0->mac_rmac_err_reg,
- &sw_stat->mac_rmac_err_cnt))
- goto reset;
- do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
- RMAC_SINGLE_ECC_ERR |
- RMAC_DOUBLE_ECC_ERR,
- &bar0->mac_rmac_err_reg,
- &sw_stat->mac_rmac_err_cnt);
- }
-
- val64 = readq(&bar0->xgxs_int_status);
- if (val64 & XGXS_INT_STATUS_RXGXS) {
- if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
- &bar0->xgxs_rxgxs_err_reg,
- &sw_stat->xgxs_rxgxs_err_cnt))
- goto reset;
- }
-
- val64 = readq(&bar0->mc_int_status);
- if (val64 & MC_INT_STATUS_MC_INT) {
- if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
- &bar0->mc_err_reg,
- &sw_stat->mc_err_cnt))
- goto reset;
-
- /* Handling Ecc errors */
- if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
- writeq(val64, &bar0->mc_err_reg);
- if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
- sw_stat->double_ecc_errs++;
- if (sp->device_type != XFRAME_II_DEVICE) {
- /*
- * Reset XframeI only if critical error
- */
- if (val64 &
- (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
- MC_ERR_REG_MIRI_ECC_DB_ERR_1))
- goto reset;
- }
- } else
- sw_stat->single_ecc_errs++;
- }
- }
- return;
-
-reset:
- s2io_stop_all_tx_queue(sp);
- schedule_work(&sp->rst_timer_task);
- sw_stat->soft_reset_cnt++;
-}
-
-/**
- * s2io_isr - ISR handler of the device .
- * @irq: the irq of the device.
- * @dev_id: a void pointer to the dev structure of the NIC.
- * Description: This function is the ISR handler of the device. It
- * identifies the reason for the interrupt and calls the relevant
- * service routines. As a contongency measure, this ISR allocates the
- * recv buffers, if their numbers are below the panic value which is
- * presently set to 25% of the original number of rcv buffers allocated.
- * Return value:
- * IRQ_HANDLED: will be returned if IRQ was handled by this routine
- * IRQ_NONE: will be returned if interrupt is not from our device
- */
-static irqreturn_t s2io_isr(int irq, void *dev_id)
-{
- struct net_device *dev = (struct net_device *)dev_id;
- struct s2io_nic *sp = netdev_priv(dev);
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- int i;
- u64 reason = 0;
- struct mac_info *mac_control;
- struct config_param *config;
-
- /* Pretend we handled any irq's from a disconnected card */
- if (pci_channel_offline(sp->pdev))
- return IRQ_NONE;
-
- if (!is_s2io_card_up(sp))
- return IRQ_NONE;
-
- config = &sp->config;
- mac_control = &sp->mac_control;
-
- /*
- * Identify the cause for interrupt and call the appropriate
- * interrupt handler. Causes for the interrupt could be;
- * 1. Rx of packet.
- * 2. Tx complete.
- * 3. Link down.
- */
- reason = readq(&bar0->general_int_status);
-
- if (unlikely(reason == S2IO_MINUS_ONE))
- return IRQ_HANDLED; /* Nothing much can be done. Get out */
-
- if (reason &
- (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
- writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
-
- if (config->napi) {
- if (reason & GEN_INTR_RXTRAFFIC) {
- napi_schedule(&sp->napi);
- writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
- writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
- readl(&bar0->rx_traffic_int);
- }
- } else {
- /*
- * rx_traffic_int reg is an R1 register, writing all 1's
- * will ensure that the actual interrupt causing bit
- * gets cleared and hence a read can be avoided.
- */
- if (reason & GEN_INTR_RXTRAFFIC)
- writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
-
- for (i = 0; i < config->rx_ring_num; i++) {
- struct ring_info *ring = &mac_control->rings[i];
-
- rx_intr_handler(ring, 0);
- }
- }
-
- /*
- * tx_traffic_int reg is an R1 register, writing all 1's
- * will ensure that the actual interrupt causing bit gets
- * cleared and hence a read can be avoided.
- */
- if (reason & GEN_INTR_TXTRAFFIC)
- writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
-
- for (i = 0; i < config->tx_fifo_num; i++)
- tx_intr_handler(&mac_control->fifos[i]);
-
- if (reason & GEN_INTR_TXPIC)
- s2io_txpic_intr_handle(sp);
-
- /*
- * Reallocate the buffers from the interrupt handler itself.
- */
- if (!config->napi) {
- for (i = 0; i < config->rx_ring_num; i++) {
- struct ring_info *ring = &mac_control->rings[i];
-
- s2io_chk_rx_buffers(sp, ring);
- }
- }
- writeq(sp->general_int_mask, &bar0->general_int_mask);
- readl(&bar0->general_int_status);
-
- return IRQ_HANDLED;
-
- } else if (!reason) {
- /* The interrupt was not raised by us */
- return IRQ_NONE;
- }
-
- return IRQ_HANDLED;
-}
-
-/*
- * s2io_updt_stats -
- */
-static void s2io_updt_stats(struct s2io_nic *sp)
-{
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- u64 val64;
- int cnt = 0;
-
- if (is_s2io_card_up(sp)) {
- /* Apprx 30us on a 133 MHz bus */
- val64 = SET_UPDT_CLICKS(10) |
- STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
- writeq(val64, &bar0->stat_cfg);
- do {
- udelay(100);
- val64 = readq(&bar0->stat_cfg);
- if (!(val64 & s2BIT(0)))
- break;
- cnt++;
- if (cnt == 5)
- break; /* Updt failed */
- } while (1);
- }
-}
-
-/**
- * s2io_get_stats - Updates the device statistics structure.
- * @dev : pointer to the device structure.
- * Description:
- * This function updates the device statistics structure in the s2io_nic
- * structure and returns a pointer to the same.
- * Return value:
- * pointer to the updated net_device_stats structure.
- */
-static struct net_device_stats *s2io_get_stats(struct net_device *dev)
-{
- struct s2io_nic *sp = netdev_priv(dev);
- struct mac_info *mac_control = &sp->mac_control;
- struct stat_block *stats = mac_control->stats_info;
- u64 delta;
-
- /* Configure Stats for immediate updt */
- s2io_updt_stats(sp);
-
- /* A device reset will cause the on-adapter statistics to be zero'ed.
- * This can be done while running by changing the MTU. To prevent the
- * system from having the stats zero'ed, the driver keeps a copy of the
- * last update to the system (which is also zero'ed on reset). This
- * enables the driver to accurately know the delta between the last
- * update and the current update.
- */
- delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
- le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
- sp->stats.rx_packets += delta;
- dev->stats.rx_packets += delta;
-
- delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
- le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
- sp->stats.tx_packets += delta;
- dev->stats.tx_packets += delta;
-
- delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
- le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
- sp->stats.rx_bytes += delta;
- dev->stats.rx_bytes += delta;
-
- delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
- le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
- sp->stats.tx_bytes += delta;
- dev->stats.tx_bytes += delta;
-
- delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
- sp->stats.rx_errors += delta;
- dev->stats.rx_errors += delta;
-
- delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
- le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
- sp->stats.tx_errors += delta;
- dev->stats.tx_errors += delta;
-
- delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
- sp->stats.rx_dropped += delta;
- dev->stats.rx_dropped += delta;
-
- delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
- sp->stats.tx_dropped += delta;
- dev->stats.tx_dropped += delta;
-
- /* The adapter MAC interprets pause frames as multicast packets, but
- * does not pass them up. This erroneously increases the multicast
- * packet count and needs to be deducted when the multicast frame count
- * is queried.
- */
- delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
- le32_to_cpu(stats->rmac_vld_mcst_frms);
- delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
- delta -= sp->stats.multicast;
- sp->stats.multicast += delta;
- dev->stats.multicast += delta;
-
- delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
- le32_to_cpu(stats->rmac_usized_frms)) +
- le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
- sp->stats.rx_length_errors += delta;
- dev->stats.rx_length_errors += delta;
-
- delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
- sp->stats.rx_crc_errors += delta;
- dev->stats.rx_crc_errors += delta;
-
- return &dev->stats;
-}
-
-/**
- * s2io_set_multicast - entry point for multicast address enable/disable.
- * @dev : pointer to the device structure
- * @may_sleep: parameter indicates if sleeping when waiting for command
- * complete
- * Description:
- * This function is a driver entry point which gets called by the kernel
- * whenever multicast addresses must be enabled/disabled. This also gets
- * called to set/reset promiscuous mode. Depending on the deivce flag, we
- * determine, if multicast address must be enabled or if promiscuous mode
- * is to be disabled etc.
- * Return value:
- * void.
- */
-static void s2io_set_multicast(struct net_device *dev, bool may_sleep)
-{
- int i, j, prev_cnt;
- struct netdev_hw_addr *ha;
- struct s2io_nic *sp = netdev_priv(dev);
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
- 0xfeffffffffffULL;
- u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
- void __iomem *add;
- struct config_param *config = &sp->config;
-
- if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
- /* Enable all Multicast addresses */
- writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
- &bar0->rmac_addr_data0_mem);
- writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
- &bar0->rmac_addr_data1_mem);
- val64 = RMAC_ADDR_CMD_MEM_WE |
- RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
- RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
- writeq(val64, &bar0->rmac_addr_cmd_mem);
- /* Wait till command completes */
- wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
- RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
- S2IO_BIT_RESET, may_sleep);
-
- sp->m_cast_flg = 1;
- sp->all_multi_pos = config->max_mc_addr - 1;
- } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
- /* Disable all Multicast addresses */
- writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
- &bar0->rmac_addr_data0_mem);
- writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
- &bar0->rmac_addr_data1_mem);
- val64 = RMAC_ADDR_CMD_MEM_WE |
- RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
- RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
- writeq(val64, &bar0->rmac_addr_cmd_mem);
- /* Wait till command completes */
- wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
- RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
- S2IO_BIT_RESET, may_sleep);
-
- sp->m_cast_flg = 0;
- sp->all_multi_pos = 0;
- }
-
- if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
- /* Put the NIC into promiscuous mode */
- add = &bar0->mac_cfg;
- val64 = readq(&bar0->mac_cfg);
- val64 |= MAC_CFG_RMAC_PROM_ENABLE;
-
- writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
- writel((u32)val64, add);
- writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
- writel((u32) (val64 >> 32), (add + 4));
-
- if (vlan_tag_strip != 1) {
- val64 = readq(&bar0->rx_pa_cfg);
- val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
- writeq(val64, &bar0->rx_pa_cfg);
- sp->vlan_strip_flag = 0;
- }
-
- val64 = readq(&bar0->mac_cfg);
- sp->promisc_flg = 1;
- DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
- dev->name);
- } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
- /* Remove the NIC from promiscuous mode */
- add = &bar0->mac_cfg;
- val64 = readq(&bar0->mac_cfg);
- val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
-
- writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
- writel((u32)val64, add);
- writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
- writel((u32) (val64 >> 32), (add + 4));
-
- if (vlan_tag_strip != 0) {
- val64 = readq(&bar0->rx_pa_cfg);
- val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
- writeq(val64, &bar0->rx_pa_cfg);
- sp->vlan_strip_flag = 1;
- }
-
- val64 = readq(&bar0->mac_cfg);
- sp->promisc_flg = 0;
- DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name);
- }
-
- /* Update individual M_CAST address list */
- if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
- if (netdev_mc_count(dev) >
- (config->max_mc_addr - config->max_mac_addr)) {
- DBG_PRINT(ERR_DBG,
- "%s: No more Rx filters can be added - "
- "please enable ALL_MULTI instead\n",
- dev->name);
- return;
- }
-
- prev_cnt = sp->mc_addr_count;
- sp->mc_addr_count = netdev_mc_count(dev);
-
- /* Clear out the previous list of Mc in the H/W. */
- for (i = 0; i < prev_cnt; i++) {
- writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
- &bar0->rmac_addr_data0_mem);
- writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
- &bar0->rmac_addr_data1_mem);
- val64 = RMAC_ADDR_CMD_MEM_WE |
- RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
- RMAC_ADDR_CMD_MEM_OFFSET
- (config->mc_start_offset + i);
- writeq(val64, &bar0->rmac_addr_cmd_mem);
-
- /* Wait for command completes */
- if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
- RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
- S2IO_BIT_RESET, may_sleep)) {
- DBG_PRINT(ERR_DBG,
- "%s: Adding Multicasts failed\n",
- dev->name);
- return;
- }
- }
-
- /* Create the new Rx filter list and update the same in H/W. */
- i = 0;
- netdev_for_each_mc_addr(ha, dev) {
- mac_addr = 0;
- for (j = 0; j < ETH_ALEN; j++) {
- mac_addr |= ha->addr[j];
- mac_addr <<= 8;
- }
- mac_addr >>= 8;
- writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
- &bar0->rmac_addr_data0_mem);
- writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
- &bar0->rmac_addr_data1_mem);
- val64 = RMAC_ADDR_CMD_MEM_WE |
- RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
- RMAC_ADDR_CMD_MEM_OFFSET
- (i + config->mc_start_offset);
- writeq(val64, &bar0->rmac_addr_cmd_mem);
-
- /* Wait for command completes */
- if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
- RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
- S2IO_BIT_RESET, may_sleep)) {
- DBG_PRINT(ERR_DBG,
- "%s: Adding Multicasts failed\n",
- dev->name);
- return;
- }
- i++;
- }
- }
-}
-
-/* NDO wrapper for s2io_set_multicast */
-static void s2io_ndo_set_multicast(struct net_device *dev)
-{
- s2io_set_multicast(dev, false);
-}
-
-/* read from CAM unicast & multicast addresses and store it in
- * def_mac_addr structure
- */
-static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
-{
- int offset;
- u64 mac_addr = 0x0;
- struct config_param *config = &sp->config;
-
- /* store unicast & multicast mac addresses */
- for (offset = 0; offset < config->max_mc_addr; offset++) {
- mac_addr = do_s2io_read_unicast_mc(sp, offset);
- /* if read fails disable the entry */
- if (mac_addr == FAILURE)
- mac_addr = S2IO_DISABLE_MAC_ENTRY;
- do_s2io_copy_mac_addr(sp, offset, mac_addr);
- }
-}
-
-/* restore unicast & multicast MAC to CAM from def_mac_addr structure */
-static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
-{
- int offset;
- struct config_param *config = &sp->config;
- /* restore unicast mac address */
- for (offset = 0; offset < config->max_mac_addr; offset++)
- do_s2io_prog_unicast(sp->dev,
- sp->def_mac_addr[offset].mac_addr);
-
- /* restore multicast mac address */
- for (offset = config->mc_start_offset;
- offset < config->max_mc_addr; offset++)
- do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
-}
-
-/* add a multicast MAC address to CAM */
-static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
-{
- int i;
- u64 mac_addr;
- struct config_param *config = &sp->config;
-
- mac_addr = ether_addr_to_u64(addr);
- if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
- return SUCCESS;
-
- /* check if the multicast mac already preset in CAM */
- for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
- u64 tmp64;
- tmp64 = do_s2io_read_unicast_mc(sp, i);
- if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
- break;
-
- if (tmp64 == mac_addr)
- return SUCCESS;
- }
- if (i == config->max_mc_addr) {
- DBG_PRINT(ERR_DBG,
- "CAM full no space left for multicast MAC\n");
- return FAILURE;
- }
- /* Update the internal structure with this new mac address */
- do_s2io_copy_mac_addr(sp, i, mac_addr);
-
- return do_s2io_add_mac(sp, mac_addr, i);
-}
-
-/* add MAC address to CAM */
-static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
-{
- u64 val64;
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
-
- writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
- &bar0->rmac_addr_data0_mem);
-
- val64 = RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
- RMAC_ADDR_CMD_MEM_OFFSET(off);
- writeq(val64, &bar0->rmac_addr_cmd_mem);
-
- /* Wait till command completes */
- if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
- RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
- S2IO_BIT_RESET, true)) {
- DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
- return FAILURE;
- }
- return SUCCESS;
-}
-/* deletes a specified unicast/multicast mac entry from CAM */
-static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
-{
- int offset;
- u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
- struct config_param *config = &sp->config;
-
- for (offset = 1;
- offset < config->max_mc_addr; offset++) {
- tmp64 = do_s2io_read_unicast_mc(sp, offset);
- if (tmp64 == addr) {
- /* disable the entry by writing 0xffffffffffffULL */
- if (do_s2io_add_mac(sp, dis_addr, offset) == FAILURE)
- return FAILURE;
- /* store the new mac list from CAM */
- do_s2io_store_unicast_mc(sp);
- return SUCCESS;
- }
- }
- DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
- (unsigned long long)addr);
- return FAILURE;
-}
-
-/* read mac entries from CAM */
-static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
-{
- u64 tmp64, val64;
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
-
- /* read mac addr */
- val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
- RMAC_ADDR_CMD_MEM_OFFSET(offset);
- writeq(val64, &bar0->rmac_addr_cmd_mem);
-
- /* Wait till command completes */
- if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
- RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
- S2IO_BIT_RESET, true)) {
- DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
- return FAILURE;
- }
- tmp64 = readq(&bar0->rmac_addr_data0_mem);
-
- return tmp64 >> 16;
-}
-
-/*
- * s2io_set_mac_addr - driver entry point
- */
-
-static int s2io_set_mac_addr(struct net_device *dev, void *p)
-{
- struct sockaddr *addr = p;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
-
- eth_hw_addr_set(dev, addr->sa_data);
-
- /* store the MAC address in CAM */
- return do_s2io_prog_unicast(dev, dev->dev_addr);
-}
-/**
- * do_s2io_prog_unicast - Programs the Xframe mac address
- * @dev : pointer to the device structure.
- * @addr: a uchar pointer to the new mac address which is to be set.
- * Description : This procedure will program the Xframe to receive
- * frames with new Mac Address
- * Return value: SUCCESS on success and an appropriate (-)ve integer
- * as defined in errno.h file on failure.
- */
-
-static int do_s2io_prog_unicast(struct net_device *dev, const u8 *addr)
-{
- struct s2io_nic *sp = netdev_priv(dev);
- register u64 mac_addr, perm_addr;
- int i;
- u64 tmp64;
- struct config_param *config = &sp->config;
-
- /*
- * Set the new MAC address as the new unicast filter and reflect this
- * change on the device address registered with the OS. It will be
- * at offset 0.
- */
- mac_addr = ether_addr_to_u64(addr);
- perm_addr = ether_addr_to_u64(sp->def_mac_addr[0].mac_addr);
-
- /* check if the dev_addr is different than perm_addr */
- if (mac_addr == perm_addr)
- return SUCCESS;
-
- /* check if the mac already preset in CAM */
- for (i = 1; i < config->max_mac_addr; i++) {
- tmp64 = do_s2io_read_unicast_mc(sp, i);
- if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
- break;
-
- if (tmp64 == mac_addr) {
- DBG_PRINT(INFO_DBG,
- "MAC addr:0x%llx already present in CAM\n",
- (unsigned long long)mac_addr);
- return SUCCESS;
- }
- }
- if (i == config->max_mac_addr) {
- DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
- return FAILURE;
- }
- /* Update the internal structure with this new mac address */
- do_s2io_copy_mac_addr(sp, i, mac_addr);
-
- return do_s2io_add_mac(sp, mac_addr, i);
-}
-
-/**
- * s2io_ethtool_set_link_ksettings - Sets different link parameters.
- * @dev : pointer to netdev
- * @cmd: pointer to the structure with parameters given by ethtool to set
- * link information.
- * Description:
- * The function sets different link parameters provided by the user onto
- * the NIC.
- * Return value:
- * 0 on success.
- */
-
-static int
-s2io_ethtool_set_link_ksettings(struct net_device *dev,
- const struct ethtool_link_ksettings *cmd)
-{
- struct s2io_nic *sp = netdev_priv(dev);
- if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
- (cmd->base.speed != SPEED_10000) ||
- (cmd->base.duplex != DUPLEX_FULL))
- return -EINVAL;
- else {
- s2io_close(sp->dev);
- s2io_open(sp->dev);
- }
-
- return 0;
-}
-
-/**
- * s2io_ethtool_get_link_ksettings - Return link specific information.
- * @dev: pointer to netdev
- * @cmd : pointer to the structure with parameters given by ethtool
- * to return link information.
- * Description:
- * Returns link specific information like speed, duplex etc.. to ethtool.
- * Return value :
- * return 0 on success.
- */
-
-static int
-s2io_ethtool_get_link_ksettings(struct net_device *dev,
- struct ethtool_link_ksettings *cmd)
-{
- struct s2io_nic *sp = netdev_priv(dev);
-
- ethtool_link_ksettings_zero_link_mode(cmd, supported);
- ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
-
- ethtool_link_ksettings_zero_link_mode(cmd, advertising);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
-
- cmd->base.port = PORT_FIBRE;
-
- if (netif_carrier_ok(sp->dev)) {
- cmd->base.speed = SPEED_10000;
- cmd->base.duplex = DUPLEX_FULL;
- } else {
- cmd->base.speed = SPEED_UNKNOWN;
- cmd->base.duplex = DUPLEX_UNKNOWN;
- }
-
- cmd->base.autoneg = AUTONEG_DISABLE;
- return 0;
-}
-
-/**
- * s2io_ethtool_gdrvinfo - Returns driver specific information.
- * @dev: pointer to netdev
- * @info : pointer to the structure with parameters given by ethtool to
- * return driver information.
- * Description:
- * Returns driver specefic information like name, version etc.. to ethtool.
- * Return value:
- * void
- */
-
-static void s2io_ethtool_gdrvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- struct s2io_nic *sp = netdev_priv(dev);
-
- strscpy(info->driver, s2io_driver_name, sizeof(info->driver));
- strscpy(info->version, s2io_driver_version, sizeof(info->version));
- strscpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
-}
-
-/**
- * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
- * @dev: pointer to netdev
- * @regs : pointer to the structure with parameters given by ethtool for
- * dumping the registers.
- * @space: The input argument into which all the registers are dumped.
- * Description:
- * Dumps the entire register space of xFrame NIC into the user given
- * buffer area.
- * Return value :
- * void .
- */
-
-static void s2io_ethtool_gregs(struct net_device *dev,
- struct ethtool_regs *regs, void *space)
-{
- int i;
- u64 reg;
- u8 *reg_space = (u8 *)space;
- struct s2io_nic *sp = netdev_priv(dev);
-
- regs->len = XENA_REG_SPACE;
- regs->version = sp->pdev->subsystem_device;
-
- for (i = 0; i < regs->len; i += 8) {
- reg = readq(sp->bar0 + i);
- memcpy((reg_space + i), &reg, 8);
- }
-}
-
-/*
- * s2io_set_led - control NIC led
- */
-static void s2io_set_led(struct s2io_nic *sp, bool on)
-{
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- u16 subid = sp->pdev->subsystem_device;
- u64 val64;
-
- if ((sp->device_type == XFRAME_II_DEVICE) ||
- ((subid & 0xFF) >= 0x07)) {
- val64 = readq(&bar0->gpio_control);
- if (on)
- val64 |= GPIO_CTRL_GPIO_0;
- else
- val64 &= ~GPIO_CTRL_GPIO_0;
-
- writeq(val64, &bar0->gpio_control);
- } else {
- val64 = readq(&bar0->adapter_control);
- if (on)
- val64 |= ADAPTER_LED_ON;
- else
- val64 &= ~ADAPTER_LED_ON;
-
- writeq(val64, &bar0->adapter_control);
- }
-
-}
-
-/**
- * s2io_ethtool_set_led - To physically identify the nic on the system.
- * @dev : network device
- * @state: led setting
- *
- * Description: Used to physically identify the NIC on the system.
- * The Link LED will blink for a time specified by the user for
- * identification.
- * NOTE: The Link has to be Up to be able to blink the LED. Hence
- * identification is possible only if it's link is up.
- */
-
-static int s2io_ethtool_set_led(struct net_device *dev,
- enum ethtool_phys_id_state state)
-{
- struct s2io_nic *sp = netdev_priv(dev);
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- u16 subid = sp->pdev->subsystem_device;
-
- if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
- u64 val64 = readq(&bar0->adapter_control);
- if (!(val64 & ADAPTER_CNTL_EN)) {
- pr_err("Adapter Link down, cannot blink LED\n");
- return -EAGAIN;
- }
- }
-
- switch (state) {
- case ETHTOOL_ID_ACTIVE:
- sp->adapt_ctrl_org = readq(&bar0->gpio_control);
- return 1; /* cycle on/off once per second */
-
- case ETHTOOL_ID_ON:
- s2io_set_led(sp, true);
- break;
-
- case ETHTOOL_ID_OFF:
- s2io_set_led(sp, false);
- break;
-
- case ETHTOOL_ID_INACTIVE:
- if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid))
- writeq(sp->adapt_ctrl_org, &bar0->gpio_control);
- }
-
- return 0;
-}
-
-static void
-s2io_ethtool_gringparam(struct net_device *dev,
- struct ethtool_ringparam *ering,
- struct kernel_ethtool_ringparam *kernel_ering,
- struct netlink_ext_ack *extack)
-{
- struct s2io_nic *sp = netdev_priv(dev);
- int i, tx_desc_count = 0, rx_desc_count = 0;
-
- if (sp->rxd_mode == RXD_MODE_1) {
- ering->rx_max_pending = MAX_RX_DESC_1;
- ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
- } else {
- ering->rx_max_pending = MAX_RX_DESC_2;
- ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
- }
-
- ering->tx_max_pending = MAX_TX_DESC;
-
- for (i = 0; i < sp->config.rx_ring_num; i++)
- rx_desc_count += sp->config.rx_cfg[i].num_rxd;
- ering->rx_pending = rx_desc_count;
- ering->rx_jumbo_pending = rx_desc_count;
-
- for (i = 0; i < sp->config.tx_fifo_num; i++)
- tx_desc_count += sp->config.tx_cfg[i].fifo_len;
- ering->tx_pending = tx_desc_count;
- DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
-}
-
-/**
- * s2io_ethtool_getpause_data -Pause frame generation and reception.
- * @dev: pointer to netdev
- * @ep : pointer to the structure with pause parameters given by ethtool.
- * Description:
- * Returns the Pause frame generation and reception capability of the NIC.
- * Return value:
- * void
- */
-static void s2io_ethtool_getpause_data(struct net_device *dev,
- struct ethtool_pauseparam *ep)
-{
- u64 val64;
- struct s2io_nic *sp = netdev_priv(dev);
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
-
- val64 = readq(&bar0->rmac_pause_cfg);
- if (val64 & RMAC_PAUSE_GEN_ENABLE)
- ep->tx_pause = true;
- if (val64 & RMAC_PAUSE_RX_ENABLE)
- ep->rx_pause = true;
- ep->autoneg = false;
-}
-
-/**
- * s2io_ethtool_setpause_data - set/reset pause frame generation.
- * @dev: pointer to netdev
- * @ep : pointer to the structure with pause parameters given by ethtool.
- * Description:
- * It can be used to set or reset Pause frame generation or reception
- * support of the NIC.
- * Return value:
- * int, returns 0 on Success
- */
-
-static int s2io_ethtool_setpause_data(struct net_device *dev,
- struct ethtool_pauseparam *ep)
-{
- u64 val64;
- struct s2io_nic *sp = netdev_priv(dev);
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
-
- val64 = readq(&bar0->rmac_pause_cfg);
- if (ep->tx_pause)
- val64 |= RMAC_PAUSE_GEN_ENABLE;
- else
- val64 &= ~RMAC_PAUSE_GEN_ENABLE;
- if (ep->rx_pause)
- val64 |= RMAC_PAUSE_RX_ENABLE;
- else
- val64 &= ~RMAC_PAUSE_RX_ENABLE;
- writeq(val64, &bar0->rmac_pause_cfg);
- return 0;
-}
-
-#define S2IO_DEV_ID 5
-/**
- * read_eeprom - reads 4 bytes of data from user given offset.
- * @sp : private member of the device structure, which is a pointer to the
- * s2io_nic structure.
- * @off : offset at which the data must be written
- * @data : Its an output parameter where the data read at the given
- * offset is stored.
- * Description:
- * Will read 4 bytes of data from the user given offset and return the
- * read data.
- * NOTE: Will allow to read only part of the EEPROM visible through the
- * I2C bus.
- * Return value:
- * -1 on failure and 0 on success.
- */
-static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
-{
- int ret = -1;
- u32 exit_cnt = 0;
- u64 val64;
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
-
- if (sp->device_type == XFRAME_I_DEVICE) {
- val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
- I2C_CONTROL_ADDR(off) |
- I2C_CONTROL_BYTE_CNT(0x3) |
- I2C_CONTROL_READ |
- I2C_CONTROL_CNTL_START;
- SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
-
- while (exit_cnt < 5) {
- val64 = readq(&bar0->i2c_control);
- if (I2C_CONTROL_CNTL_END(val64)) {
- *data = I2C_CONTROL_GET_DATA(val64);
- ret = 0;
- break;
- }
- msleep(50);
- exit_cnt++;
- }
- }
-
- if (sp->device_type == XFRAME_II_DEVICE) {
- val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
- SPI_CONTROL_BYTECNT(0x3) |
- SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
- SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
- val64 |= SPI_CONTROL_REQ;
- SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
- while (exit_cnt < 5) {
- val64 = readq(&bar0->spi_control);
- if (val64 & SPI_CONTROL_NACK) {
- ret = 1;
- break;
- } else if (val64 & SPI_CONTROL_DONE) {
- *data = readq(&bar0->spi_data);
- *data &= 0xffffff;
- ret = 0;
- break;
- }
- msleep(50);
- exit_cnt++;
- }
- }
- return ret;
-}
-
-/**
- * write_eeprom - actually writes the relevant part of the data value.
- * @sp : private member of the device structure, which is a pointer to the
- * s2io_nic structure.
- * @off : offset at which the data must be written
- * @data : The data that is to be written
- * @cnt : Number of bytes of the data that are actually to be written into
- * the Eeprom. (max of 3)
- * Description:
- * Actually writes the relevant part of the data value into the Eeprom
- * through the I2C bus.
- * Return value:
- * 0 on success, -1 on failure.
- */
-
-static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt)
-{
- int exit_cnt = 0, ret = -1;
- u64 val64;
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
-
- if (sp->device_type == XFRAME_I_DEVICE) {
- val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
- I2C_CONTROL_ADDR(off) |
- I2C_CONTROL_BYTE_CNT(cnt) |
- I2C_CONTROL_SET_DATA((u32)data) |
- I2C_CONTROL_CNTL_START;
- SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
-
- while (exit_cnt < 5) {
- val64 = readq(&bar0->i2c_control);
- if (I2C_CONTROL_CNTL_END(val64)) {
- if (!(val64 & I2C_CONTROL_NACK))
- ret = 0;
- break;
- }
- msleep(50);
- exit_cnt++;
- }
- }
-
- if (sp->device_type == XFRAME_II_DEVICE) {
- int write_cnt = (cnt == 8) ? 0 : cnt;
- writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data);
-
- val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
- SPI_CONTROL_BYTECNT(write_cnt) |
- SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
- SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
- val64 |= SPI_CONTROL_REQ;
- SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
- while (exit_cnt < 5) {
- val64 = readq(&bar0->spi_control);
- if (val64 & SPI_CONTROL_NACK) {
- ret = 1;
- break;
- } else if (val64 & SPI_CONTROL_DONE) {
- ret = 0;
- break;
- }
- msleep(50);
- exit_cnt++;
- }
- }
- return ret;
-}
-static void s2io_vpd_read(struct s2io_nic *nic)
-{
- u8 *vpd_data;
- u8 data;
- int i = 0, cnt, len, fail = 0;
- int vpd_addr = 0x80;
- struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
-
- if (nic->device_type == XFRAME_II_DEVICE) {
- strcpy(nic->product_name, "Xframe II 10GbE network adapter");
- vpd_addr = 0x80;
- } else {
- strcpy(nic->product_name, "Xframe I 10GbE network adapter");
- vpd_addr = 0x50;
- }
- strcpy(nic->serial_num, "NOT AVAILABLE");
-
- vpd_data = kmalloc(256, GFP_KERNEL);
- if (!vpd_data) {
- swstats->mem_alloc_fail_cnt++;
- return;
- }
- swstats->mem_allocated += 256;
-
- for (i = 0; i < 256; i += 4) {
- pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
- pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
- pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
- for (cnt = 0; cnt < 5; cnt++) {
- msleep(2);
- pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
- if (data == 0x80)
- break;
- }
- if (cnt >= 5) {
- DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
- fail = 1;
- break;
- }
- pci_read_config_dword(nic->pdev, (vpd_addr + 4),
- (u32 *)&vpd_data[i]);
- }
-
- if (!fail) {
- /* read serial number of adapter */
- for (cnt = 0; cnt < 252; cnt++) {
- if ((vpd_data[cnt] == 'S') &&
- (vpd_data[cnt+1] == 'N')) {
- len = vpd_data[cnt+2];
- if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
- memcpy(nic->serial_num,
- &vpd_data[cnt + 3],
- len);
- memset(nic->serial_num+len,
- 0,
- VPD_STRING_LEN-len);
- break;
- }
- }
- }
- }
-
- if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
- len = vpd_data[1];
- memcpy(nic->product_name, &vpd_data[3], len);
- nic->product_name[len] = 0;
- }
- kfree(vpd_data);
- swstats->mem_freed += 256;
-}
-
-/**
- * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
- * @dev: pointer to netdev
- * @eeprom : pointer to the user level structure provided by ethtool,
- * containing all relevant information.
- * @data_buf : user defined value to be written into Eeprom.
- * Description: Reads the values stored in the Eeprom at given offset
- * for a given length. Stores these values int the input argument data
- * buffer 'data_buf' and returns these to the caller (ethtool.)
- * Return value:
- * int 0 on success
- */
-
-static int s2io_ethtool_geeprom(struct net_device *dev,
- struct ethtool_eeprom *eeprom, u8 * data_buf)
-{
- u32 i, valid;
- u64 data;
- struct s2io_nic *sp = netdev_priv(dev);
-
- eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
-
- if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
- eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
-
- for (i = 0; i < eeprom->len; i += 4) {
- if (read_eeprom(sp, (eeprom->offset + i), &data)) {
- DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
- return -EFAULT;
- }
- valid = INV(data);
- memcpy((data_buf + i), &valid, 4);
- }
- return 0;
-}
-
-/**
- * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
- * @dev: pointer to netdev
- * @eeprom : pointer to the user level structure provided by ethtool,
- * containing all relevant information.
- * @data_buf : user defined value to be written into Eeprom.
- * Description:
- * Tries to write the user provided value in the Eeprom, at the offset
- * given by the user.
- * Return value:
- * 0 on success, -EFAULT on failure.
- */
-
-static int s2io_ethtool_seeprom(struct net_device *dev,
- struct ethtool_eeprom *eeprom,
- u8 *data_buf)
-{
- int len = eeprom->len, cnt = 0;
- u64 valid = 0, data;
- struct s2io_nic *sp = netdev_priv(dev);
-
- if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
- DBG_PRINT(ERR_DBG,
- "ETHTOOL_WRITE_EEPROM Err: "
- "Magic value is wrong, it is 0x%x should be 0x%x\n",
- (sp->pdev->vendor | (sp->pdev->device << 16)),
- eeprom->magic);
- return -EFAULT;
- }
-
- while (len) {
- data = (u32)data_buf[cnt] & 0x000000FF;
- if (data)
- valid = (u32)(data << 24);
- else
- valid = data;
-
- if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
- DBG_PRINT(ERR_DBG,
- "ETHTOOL_WRITE_EEPROM Err: "
- "Cannot write into the specified offset\n");
- return -EFAULT;
- }
- cnt++;
- len--;
- }
-
- return 0;
-}
-
-/**
- * s2io_register_test - reads and writes into all clock domains.
- * @sp : private member of the device structure, which is a pointer to the
- * s2io_nic structure.
- * @data : variable that returns the result of each of the test conducted b
- * by the driver.
- * Description:
- * Read and write into all clock domains. The NIC has 3 clock domains,
- * see that registers in all the three regions are accessible.
- * Return value:
- * 0 on success.
- */
-
-static int s2io_register_test(struct s2io_nic *sp, uint64_t *data)
-{
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- u64 val64 = 0, exp_val;
- int fail = 0;
-
- val64 = readq(&bar0->pif_rd_swapper_fb);
- if (val64 != 0x123456789abcdefULL) {
- fail = 1;
- DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1);
- }
-
- val64 = readq(&bar0->rmac_pause_cfg);
- if (val64 != 0xc000ffff00000000ULL) {
- fail = 1;
- DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2);
- }
-
- val64 = readq(&bar0->rx_queue_cfg);
- if (sp->device_type == XFRAME_II_DEVICE)
- exp_val = 0x0404040404040404ULL;
- else
- exp_val = 0x0808080808080808ULL;
- if (val64 != exp_val) {
- fail = 1;
- DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3);
- }
-
- val64 = readq(&bar0->xgxs_efifo_cfg);
- if (val64 != 0x000000001923141EULL) {
- fail = 1;
- DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4);
- }
-
- val64 = 0x5A5A5A5A5A5A5A5AULL;
- writeq(val64, &bar0->xmsi_data);
- val64 = readq(&bar0->xmsi_data);
- if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
- fail = 1;
- DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1);
- }
-
- val64 = 0xA5A5A5A5A5A5A5A5ULL;
- writeq(val64, &bar0->xmsi_data);
- val64 = readq(&bar0->xmsi_data);
- if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
- fail = 1;
- DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2);
- }
-
- *data = fail;
- return fail;
-}
-
-/**
- * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
- * @sp : private member of the device structure, which is a pointer to the
- * s2io_nic structure.
- * @data:variable that returns the result of each of the test conducted by
- * the driver.
- * Description:
- * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
- * register.
- * Return value:
- * 0 on success.
- */
-
-static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data)
-{
- int fail = 0;
- u64 ret_data, org_4F0, org_7F0;
- u8 saved_4F0 = 0, saved_7F0 = 0;
- struct net_device *dev = sp->dev;
-
- /* Test Write Error at offset 0 */
- /* Note that SPI interface allows write access to all areas
- * of EEPROM. Hence doing all negative testing only for Xframe I.
- */
- if (sp->device_type == XFRAME_I_DEVICE)
- if (!write_eeprom(sp, 0, 0, 3))
- fail = 1;
-
- /* Save current values at offsets 0x4F0 and 0x7F0 */
- if (!read_eeprom(sp, 0x4F0, &org_4F0))
- saved_4F0 = 1;
- if (!read_eeprom(sp, 0x7F0, &org_7F0))
- saved_7F0 = 1;
-
- /* Test Write at offset 4f0 */
- if (write_eeprom(sp, 0x4F0, 0x012345, 3))
- fail = 1;
- if (read_eeprom(sp, 0x4F0, &ret_data))
- fail = 1;
-
- if (ret_data != 0x012345) {
- DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
- "Data written %llx Data read %llx\n",
- dev->name, (unsigned long long)0x12345,
- (unsigned long long)ret_data);
- fail = 1;
- }
-
- /* Reset the EEPROM data go FFFF */
- write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
-
- /* Test Write Request Error at offset 0x7c */
- if (sp->device_type == XFRAME_I_DEVICE)
- if (!write_eeprom(sp, 0x07C, 0, 3))
- fail = 1;
-
- /* Test Write Request at offset 0x7f0 */
- if (write_eeprom(sp, 0x7F0, 0x012345, 3))
- fail = 1;
- if (read_eeprom(sp, 0x7F0, &ret_data))
- fail = 1;
-
- if (ret_data != 0x012345) {
- DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
- "Data written %llx Data read %llx\n",
- dev->name, (unsigned long long)0x12345,
- (unsigned long long)ret_data);
- fail = 1;
- }
-
- /* Reset the EEPROM data go FFFF */
- write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
-
- if (sp->device_type == XFRAME_I_DEVICE) {
- /* Test Write Error at offset 0x80 */
- if (!write_eeprom(sp, 0x080, 0, 3))
- fail = 1;
-
- /* Test Write Error at offset 0xfc */
- if (!write_eeprom(sp, 0x0FC, 0, 3))
- fail = 1;
-
- /* Test Write Error at offset 0x100 */
- if (!write_eeprom(sp, 0x100, 0, 3))
- fail = 1;
-
- /* Test Write Error at offset 4ec */
- if (!write_eeprom(sp, 0x4EC, 0, 3))
- fail = 1;
- }
-
- /* Restore values at offsets 0x4F0 and 0x7F0 */
- if (saved_4F0)
- write_eeprom(sp, 0x4F0, org_4F0, 3);
- if (saved_7F0)
- write_eeprom(sp, 0x7F0, org_7F0, 3);
-
- *data = fail;
- return fail;
-}
-
-/**
- * s2io_bist_test - invokes the MemBist test of the card .
- * @sp : private member of the device structure, which is a pointer to the
- * s2io_nic structure.
- * @data:variable that returns the result of each of the test conducted by
- * the driver.
- * Description:
- * This invokes the MemBist test of the card. We give around
- * 2 secs time for the Test to complete. If it's still not complete
- * within this peiod, we consider that the test failed.
- * Return value:
- * 0 on success and -1 on failure.
- */
-
-static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
-{
- u8 bist = 0;
- int cnt = 0, ret = -1;
-
- pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
- bist |= PCI_BIST_START;
- pci_write_config_word(sp->pdev, PCI_BIST, bist);
-
- while (cnt < 20) {
- pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
- if (!(bist & PCI_BIST_START)) {
- *data = (bist & PCI_BIST_CODE_MASK);
- ret = 0;
- break;
- }
- msleep(100);
- cnt++;
- }
-
- return ret;
-}
-
-/**
- * s2io_link_test - verifies the link state of the nic
- * @sp: private member of the device structure, which is a pointer to the
- * s2io_nic structure.
- * @data: variable that returns the result of each of the test conducted by
- * the driver.
- * Description:
- * The function verifies the link state of the NIC and updates the input
- * argument 'data' appropriately.
- * Return value:
- * 0 on success.
- */
-
-static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
-{
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- u64 val64;
-
- val64 = readq(&bar0->adapter_status);
- if (!(LINK_IS_UP(val64)))
- *data = 1;
- else
- *data = 0;
-
- return *data;
-}
-
-/**
- * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
- * @sp: private member of the device structure, which is a pointer to the
- * s2io_nic structure.
- * @data: variable that returns the result of each of the test
- * conducted by the driver.
- * Description:
- * This is one of the offline test that tests the read and write
- * access to the RldRam chip on the NIC.
- * Return value:
- * 0 on success.
- */
-
-static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
-{
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- u64 val64;
- int cnt, iteration = 0, test_fail = 0;
-
- val64 = readq(&bar0->adapter_control);
- val64 &= ~ADAPTER_ECC_EN;
- writeq(val64, &bar0->adapter_control);
-
- val64 = readq(&bar0->mc_rldram_test_ctrl);
- val64 |= MC_RLDRAM_TEST_MODE;
- SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
-
- val64 = readq(&bar0->mc_rldram_mrs);
- val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
- SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
-
- val64 |= MC_RLDRAM_MRS_ENABLE;
- SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
-
- while (iteration < 2) {
- val64 = 0x55555555aaaa0000ULL;
- if (iteration == 1)
- val64 ^= 0xFFFFFFFFFFFF0000ULL;
- writeq(val64, &bar0->mc_rldram_test_d0);
-
- val64 = 0xaaaa5a5555550000ULL;
- if (iteration == 1)
- val64 ^= 0xFFFFFFFFFFFF0000ULL;
- writeq(val64, &bar0->mc_rldram_test_d1);
-
- val64 = 0x55aaaaaaaa5a0000ULL;
- if (iteration == 1)
- val64 ^= 0xFFFFFFFFFFFF0000ULL;
- writeq(val64, &bar0->mc_rldram_test_d2);
-
- val64 = (u64) (0x0000003ffffe0100ULL);
- writeq(val64, &bar0->mc_rldram_test_add);
-
- val64 = MC_RLDRAM_TEST_MODE |
- MC_RLDRAM_TEST_WRITE |
- MC_RLDRAM_TEST_GO;
- SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
-
- for (cnt = 0; cnt < 5; cnt++) {
- val64 = readq(&bar0->mc_rldram_test_ctrl);
- if (val64 & MC_RLDRAM_TEST_DONE)
- break;
- msleep(200);
- }
-
- if (cnt == 5)
- break;
-
- val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
- SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
-
- for (cnt = 0; cnt < 5; cnt++) {
- val64 = readq(&bar0->mc_rldram_test_ctrl);
- if (val64 & MC_RLDRAM_TEST_DONE)
- break;
- msleep(500);
- }
-
- if (cnt == 5)
- break;
-
- val64 = readq(&bar0->mc_rldram_test_ctrl);
- if (!(val64 & MC_RLDRAM_TEST_PASS))
- test_fail = 1;
-
- iteration++;
- }
-
- *data = test_fail;
-
- /* Bring the adapter out of test mode */
- SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
-
- return test_fail;
-}
-
-/**
- * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
- * @dev: pointer to netdev
- * @ethtest : pointer to a ethtool command specific structure that will be
- * returned to the user.
- * @data : variable that returns the result of each of the test
- * conducted by the driver.
- * Description:
- * This function conducts 6 tests ( 4 offline and 2 online) to determine
- * the health of the card.
- * Return value:
- * void
- */
-
-static void s2io_ethtool_test(struct net_device *dev,
- struct ethtool_test *ethtest,
- uint64_t *data)
-{
- struct s2io_nic *sp = netdev_priv(dev);
- int orig_state = netif_running(sp->dev);
-
- if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
- /* Offline Tests. */
- if (orig_state)
- s2io_close(sp->dev);
-
- if (s2io_register_test(sp, &data[0]))
- ethtest->flags |= ETH_TEST_FL_FAILED;
-
- s2io_reset(sp);
-
- if (s2io_rldram_test(sp, &data[3]))
- ethtest->flags |= ETH_TEST_FL_FAILED;
-
- s2io_reset(sp);
-
- if (s2io_eeprom_test(sp, &data[1]))
- ethtest->flags |= ETH_TEST_FL_FAILED;
-
- if (s2io_bist_test(sp, &data[4]))
- ethtest->flags |= ETH_TEST_FL_FAILED;
-
- if (orig_state)
- s2io_open(sp->dev);
-
- data[2] = 0;
- } else {
- /* Online Tests. */
- if (!orig_state) {
- DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n",
- dev->name);
- data[0] = -1;
- data[1] = -1;
- data[2] = -1;
- data[3] = -1;
- data[4] = -1;
- }
-
- if (s2io_link_test(sp, &data[2]))
- ethtest->flags |= ETH_TEST_FL_FAILED;
-
- data[0] = 0;
- data[1] = 0;
- data[3] = 0;
- data[4] = 0;
- }
-}
-
-static void s2io_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *estats,
- u64 *tmp_stats)
-{
- int i = 0, k;
- struct s2io_nic *sp = netdev_priv(dev);
- struct stat_block *stats = sp->mac_control.stats_info;
- struct swStat *swstats = &stats->sw_stat;
- struct xpakStat *xstats = &stats->xpak_stat;
-
- s2io_updt_stats(sp);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->tmac_frms_oflow) << 32 |
- le32_to_cpu(stats->tmac_frms);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
- le32_to_cpu(stats->tmac_data_octets);
- tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 |
- le32_to_cpu(stats->tmac_mcst_frms);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 |
- le32_to_cpu(stats->tmac_bcst_frms);
- tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 |
- le32_to_cpu(stats->tmac_ttl_octets);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 |
- le32_to_cpu(stats->tmac_ucst_frms);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 |
- le32_to_cpu(stats->tmac_nucst_frms);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
- le32_to_cpu(stats->tmac_any_err_frms);
- tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets);
- tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 |
- le32_to_cpu(stats->tmac_vld_ip);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 |
- le32_to_cpu(stats->tmac_drop_ip);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 |
- le32_to_cpu(stats->tmac_icmp);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 |
- le32_to_cpu(stats->tmac_rst_tcp);
- tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp);
- tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 |
- le32_to_cpu(stats->tmac_udp);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
- le32_to_cpu(stats->rmac_vld_frms);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
- le32_to_cpu(stats->rmac_data_octets);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
- le32_to_cpu(stats->rmac_vld_mcst_frms);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 |
- le32_to_cpu(stats->rmac_vld_bcst_frms);
- tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms);
- tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 |
- le32_to_cpu(stats->rmac_ttl_octets);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32
- | le32_to_cpu(stats->rmac_accepted_ucst_frms);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow)
- << 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 |
- le32_to_cpu(stats->rmac_discarded_frms);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_drop_events_oflow)
- << 32 | le32_to_cpu(stats->rmac_drop_events);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
- le32_to_cpu(stats->rmac_usized_frms);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 |
- le32_to_cpu(stats->rmac_osized_frms);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 |
- le32_to_cpu(stats->rmac_frag_frms);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 |
- le32_to_cpu(stats->rmac_jabber_frms);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 |
- le32_to_cpu(stats->rmac_ip);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets);
- tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 |
- le32_to_cpu(stats->rmac_drop_ip);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 |
- le32_to_cpu(stats->rmac_icmp);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 |
- le32_to_cpu(stats->rmac_udp);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 |
- le32_to_cpu(stats->rmac_err_drp_udp);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7);
- tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0);
- tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1);
- tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2);
- tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3);
- tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4);
- tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5);
- tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6);
- tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 |
- le32_to_cpu(stats->rmac_pause_cnt);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt);
- tmp_stats[i++] =
- (u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 |
- le32_to_cpu(stats->rmac_accepted_ip);
- tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp);
- tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt);
- tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt);
- tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt);
- tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt);
- tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt);
- tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt);
- tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt);
- tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt);
- tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt);
- tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt);
- tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt);
- tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt);
- tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt);
- tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt);
- tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt);
- tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt);
- tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt);
- tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt);
-
- /* Enhanced statistics exist only for Hercules */
- if (sp->device_type == XFRAME_II_DEVICE) {
- tmp_stats[i++] =
- le64_to_cpu(stats->rmac_ttl_1519_4095_frms);
- tmp_stats[i++] =
- le64_to_cpu(stats->rmac_ttl_4096_8191_frms);
- tmp_stats[i++] =
- le64_to_cpu(stats->rmac_ttl_8192_max_frms);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms);
- tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms);
- tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard);
- tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard);
- tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard);
- tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard);
- tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard);
- tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard);
- tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard);
- tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt);
- }
-
- tmp_stats[i++] = 0;
- tmp_stats[i++] = swstats->single_ecc_errs;
- tmp_stats[i++] = swstats->double_ecc_errs;
- tmp_stats[i++] = swstats->parity_err_cnt;
- tmp_stats[i++] = swstats->serious_err_cnt;
- tmp_stats[i++] = swstats->soft_reset_cnt;
- tmp_stats[i++] = swstats->fifo_full_cnt;
- for (k = 0; k < MAX_RX_RINGS; k++)
- tmp_stats[i++] = swstats->ring_full_cnt[k];
- tmp_stats[i++] = xstats->alarm_transceiver_temp_high;
- tmp_stats[i++] = xstats->alarm_transceiver_temp_low;
- tmp_stats[i++] = xstats->alarm_laser_bias_current_high;
- tmp_stats[i++] = xstats->alarm_laser_bias_current_low;
- tmp_stats[i++] = xstats->alarm_laser_output_power_high;
- tmp_stats[i++] = xstats->alarm_laser_output_power_low;
- tmp_stats[i++] = xstats->warn_transceiver_temp_high;
- tmp_stats[i++] = xstats->warn_transceiver_temp_low;
- tmp_stats[i++] = xstats->warn_laser_bias_current_high;
- tmp_stats[i++] = xstats->warn_laser_bias_current_low;
- tmp_stats[i++] = xstats->warn_laser_output_power_high;
- tmp_stats[i++] = xstats->warn_laser_output_power_low;
- tmp_stats[i++] = swstats->clubbed_frms_cnt;
- tmp_stats[i++] = swstats->sending_both;
- tmp_stats[i++] = swstats->outof_sequence_pkts;
- tmp_stats[i++] = swstats->flush_max_pkts;
- if (swstats->num_aggregations) {
- u64 tmp = swstats->sum_avg_pkts_aggregated;
- int count = 0;
- /*
- * Since 64-bit divide does not work on all platforms,
- * do repeated subtraction.
- */
- while (tmp >= swstats->num_aggregations) {
- tmp -= swstats->num_aggregations;
- count++;
- }
- tmp_stats[i++] = count;
- } else
- tmp_stats[i++] = 0;
- tmp_stats[i++] = swstats->mem_alloc_fail_cnt;
- tmp_stats[i++] = swstats->pci_map_fail_cnt;
- tmp_stats[i++] = swstats->watchdog_timer_cnt;
- tmp_stats[i++] = swstats->mem_allocated;
- tmp_stats[i++] = swstats->mem_freed;
- tmp_stats[i++] = swstats->link_up_cnt;
- tmp_stats[i++] = swstats->link_down_cnt;
- tmp_stats[i++] = swstats->link_up_time;
- tmp_stats[i++] = swstats->link_down_time;
-
- tmp_stats[i++] = swstats->tx_buf_abort_cnt;
- tmp_stats[i++] = swstats->tx_desc_abort_cnt;
- tmp_stats[i++] = swstats->tx_parity_err_cnt;
- tmp_stats[i++] = swstats->tx_link_loss_cnt;
- tmp_stats[i++] = swstats->tx_list_proc_err_cnt;
-
- tmp_stats[i++] = swstats->rx_parity_err_cnt;
- tmp_stats[i++] = swstats->rx_abort_cnt;
- tmp_stats[i++] = swstats->rx_parity_abort_cnt;
- tmp_stats[i++] = swstats->rx_rda_fail_cnt;
- tmp_stats[i++] = swstats->rx_unkn_prot_cnt;
- tmp_stats[i++] = swstats->rx_fcs_err_cnt;
- tmp_stats[i++] = swstats->rx_buf_size_err_cnt;
- tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt;
- tmp_stats[i++] = swstats->rx_unkn_err_cnt;
- tmp_stats[i++] = swstats->tda_err_cnt;
- tmp_stats[i++] = swstats->pfc_err_cnt;
- tmp_stats[i++] = swstats->pcc_err_cnt;
- tmp_stats[i++] = swstats->tti_err_cnt;
- tmp_stats[i++] = swstats->tpa_err_cnt;
- tmp_stats[i++] = swstats->sm_err_cnt;
- tmp_stats[i++] = swstats->lso_err_cnt;
- tmp_stats[i++] = swstats->mac_tmac_err_cnt;
- tmp_stats[i++] = swstats->mac_rmac_err_cnt;
- tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt;
- tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt;
- tmp_stats[i++] = swstats->rc_err_cnt;
- tmp_stats[i++] = swstats->prc_pcix_err_cnt;
- tmp_stats[i++] = swstats->rpa_err_cnt;
- tmp_stats[i++] = swstats->rda_err_cnt;
- tmp_stats[i++] = swstats->rti_err_cnt;
- tmp_stats[i++] = swstats->mc_err_cnt;
-}
-
-static int s2io_ethtool_get_regs_len(struct net_device *dev)
-{
- return XENA_REG_SPACE;
-}
-
-
-static int s2io_get_eeprom_len(struct net_device *dev)
-{
- return XENA_EEPROM_SPACE;
-}
-
-static int s2io_get_sset_count(struct net_device *dev, int sset)
-{
- struct s2io_nic *sp = netdev_priv(dev);
-
- switch (sset) {
- case ETH_SS_TEST:
- return S2IO_TEST_LEN;
- case ETH_SS_STATS:
- switch (sp->device_type) {
- case XFRAME_I_DEVICE:
- return XFRAME_I_STAT_LEN;
- case XFRAME_II_DEVICE:
- return XFRAME_II_STAT_LEN;
- default:
- return 0;
- }
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static void s2io_ethtool_get_strings(struct net_device *dev,
- u32 stringset, u8 *data)
-{
- int stat_size = 0;
- struct s2io_nic *sp = netdev_priv(dev);
-
- switch (stringset) {
- case ETH_SS_TEST:
- memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
- break;
- case ETH_SS_STATS:
- stat_size = sizeof(ethtool_xena_stats_keys);
- memcpy(data, &ethtool_xena_stats_keys, stat_size);
- if (sp->device_type == XFRAME_II_DEVICE) {
- memcpy(data + stat_size,
- &ethtool_enhanced_stats_keys,
- sizeof(ethtool_enhanced_stats_keys));
- stat_size += sizeof(ethtool_enhanced_stats_keys);
- }
-
- memcpy(data + stat_size, &ethtool_driver_stats_keys,
- sizeof(ethtool_driver_stats_keys));
- }
-}
-
-static int s2io_set_features(struct net_device *dev, netdev_features_t features)
-{
- struct s2io_nic *sp = netdev_priv(dev);
- netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO;
-
- if (changed && netif_running(dev)) {
- int rc;
-
- s2io_stop_all_tx_queue(sp);
- s2io_card_down(sp);
- dev->features = features;
- rc = s2io_card_up(sp);
- if (rc)
- s2io_reset(sp);
- else
- s2io_start_all_tx_queue(sp);
-
- return rc ? rc : 1;
- }
-
- return 0;
-}
-
-static const struct ethtool_ops netdev_ethtool_ops = {
- .get_drvinfo = s2io_ethtool_gdrvinfo,
- .get_regs_len = s2io_ethtool_get_regs_len,
- .get_regs = s2io_ethtool_gregs,
- .get_link = ethtool_op_get_link,
- .get_eeprom_len = s2io_get_eeprom_len,
- .get_eeprom = s2io_ethtool_geeprom,
- .set_eeprom = s2io_ethtool_seeprom,
- .get_ringparam = s2io_ethtool_gringparam,
- .get_pauseparam = s2io_ethtool_getpause_data,
- .set_pauseparam = s2io_ethtool_setpause_data,
- .self_test = s2io_ethtool_test,
- .get_strings = s2io_ethtool_get_strings,
- .set_phys_id = s2io_ethtool_set_led,
- .get_ethtool_stats = s2io_get_ethtool_stats,
- .get_sset_count = s2io_get_sset_count,
- .get_link_ksettings = s2io_ethtool_get_link_ksettings,
- .set_link_ksettings = s2io_ethtool_set_link_ksettings,
-};
-
-/**
- * s2io_ioctl - Entry point for the Ioctl
- * @dev : Device pointer.
- * @rq : An IOCTL specefic structure, that can contain a pointer to
- * a proprietary structure used to pass information to the driver.
- * @cmd : This is used to distinguish between the different commands that
- * can be passed to the IOCTL functions.
- * Description:
- * Currently there are no special functionality supported in IOCTL, hence
- * function always return EOPNOTSUPPORTED
- */
-
-static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- return -EOPNOTSUPP;
-}
-
-/**
- * s2io_change_mtu - entry point to change MTU size for the device.
- * @dev : device pointer.
- * @new_mtu : the new MTU size for the device.
- * Description: A driver entry point to change MTU size for the device.
- * Before changing the MTU the device must be stopped.
- * Return value:
- * 0 on success and an appropriate (-)ve integer as defined in errno.h
- * file on failure.
- */
-
-static int s2io_change_mtu(struct net_device *dev, int new_mtu)
-{
- struct s2io_nic *sp = netdev_priv(dev);
- int ret = 0;
-
- WRITE_ONCE(dev->mtu, new_mtu);
- if (netif_running(dev)) {
- s2io_stop_all_tx_queue(sp);
- s2io_card_down(sp);
- ret = s2io_card_up(sp);
- if (ret) {
- DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
- __func__);
- return ret;
- }
- s2io_wake_all_tx_queue(sp);
- } else { /* Device is down */
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- u64 val64 = new_mtu;
-
- writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
- }
-
- return ret;
-}
-
-/**
- * s2io_set_link - Set the LInk status
- * @work: work struct containing a pointer to device private structure
- * Description: Sets the link status for the adapter
- */
-
-static void s2io_set_link(struct work_struct *work)
-{
- struct s2io_nic *nic = container_of(work, struct s2io_nic,
- set_link_task);
- struct net_device *dev = nic->dev;
- struct XENA_dev_config __iomem *bar0 = nic->bar0;
- register u64 val64;
- u16 subid;
-
- rtnl_lock();
-
- if (!netif_running(dev))
- goto out_unlock;
-
- if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
- /* The card is being reset, no point doing anything */
- goto out_unlock;
- }
-
- subid = nic->pdev->subsystem_device;
- if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
- /*
- * Allow a small delay for the NICs self initiated
- * cleanup to complete.
- */
- msleep(100);
- }
-
- val64 = readq(&bar0->adapter_status);
- if (LINK_IS_UP(val64)) {
- if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
- if (verify_xena_quiescence(nic)) {
- val64 = readq(&bar0->adapter_control);
- val64 |= ADAPTER_CNTL_EN;
- writeq(val64, &bar0->adapter_control);
- if (CARDS_WITH_FAULTY_LINK_INDICATORS(
- nic->device_type, subid)) {
- val64 = readq(&bar0->gpio_control);
- val64 |= GPIO_CTRL_GPIO_0;
- writeq(val64, &bar0->gpio_control);
- val64 = readq(&bar0->gpio_control);
- } else {
- val64 |= ADAPTER_LED_ON;
- writeq(val64, &bar0->adapter_control);
- }
- nic->device_enabled_once = true;
- } else {
- DBG_PRINT(ERR_DBG,
- "%s: Error: device is not Quiescent\n",
- dev->name);
- s2io_stop_all_tx_queue(nic);
- }
- }
- val64 = readq(&bar0->adapter_control);
- val64 |= ADAPTER_LED_ON;
- writeq(val64, &bar0->adapter_control);
- s2io_link(nic, LINK_UP);
- } else {
- if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
- subid)) {
- val64 = readq(&bar0->gpio_control);
- val64 &= ~GPIO_CTRL_GPIO_0;
- writeq(val64, &bar0->gpio_control);
- val64 = readq(&bar0->gpio_control);
- }
- /* turn off LED */
- val64 = readq(&bar0->adapter_control);
- val64 = val64 & (~ADAPTER_LED_ON);
- writeq(val64, &bar0->adapter_control);
- s2io_link(nic, LINK_DOWN);
- }
- clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
-
-out_unlock:
- rtnl_unlock();
-}
-
-static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
- struct buffAdd *ba,
- struct sk_buff **skb, u64 *temp0, u64 *temp1,
- u64 *temp2, int size)
-{
- struct net_device *dev = sp->dev;
- struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
-
- if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
- struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
- /* allocate skb */
- if (*skb) {
- DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
- /*
- * As Rx frame are not going to be processed,
- * using same mapped address for the Rxd
- * buffer pointer
- */
- rxdp1->Buffer0_ptr = *temp0;
- } else {
- *skb = netdev_alloc_skb(dev, size);
- if (!(*skb)) {
- DBG_PRINT(INFO_DBG,
- "%s: Out of memory to allocate %s\n",
- dev->name, "1 buf mode SKBs");
- stats->mem_alloc_fail_cnt++;
- return -ENOMEM ;
- }
- stats->mem_allocated += (*skb)->truesize;
- /* storing the mapped addr in a temp variable
- * such it will be used for next rxd whose
- * Host Control is NULL
- */
- rxdp1->Buffer0_ptr = *temp0 =
- dma_map_single(&sp->pdev->dev, (*skb)->data,
- size - NET_IP_ALIGN,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&sp->pdev->dev, rxdp1->Buffer0_ptr))
- goto memalloc_failed;
- rxdp->Host_Control = (unsigned long) (*skb);
- }
- } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
- struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
- /* Two buffer Mode */
- if (*skb) {
- rxdp3->Buffer2_ptr = *temp2;
- rxdp3->Buffer0_ptr = *temp0;
- rxdp3->Buffer1_ptr = *temp1;
- } else {
- *skb = netdev_alloc_skb(dev, size);
- if (!(*skb)) {
- DBG_PRINT(INFO_DBG,
- "%s: Out of memory to allocate %s\n",
- dev->name,
- "2 buf mode SKBs");
- stats->mem_alloc_fail_cnt++;
- return -ENOMEM;
- }
- stats->mem_allocated += (*skb)->truesize;
- rxdp3->Buffer2_ptr = *temp2 =
- dma_map_single(&sp->pdev->dev, (*skb)->data,
- dev->mtu + 4, DMA_FROM_DEVICE);
- if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer2_ptr))
- goto memalloc_failed;
- rxdp3->Buffer0_ptr = *temp0 =
- dma_map_single(&sp->pdev->dev, ba->ba_0,
- BUF0_LEN, DMA_FROM_DEVICE);
- if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer0_ptr)) {
- dma_unmap_single(&sp->pdev->dev,
- (dma_addr_t)rxdp3->Buffer2_ptr,
- dev->mtu + 4,
- DMA_FROM_DEVICE);
- goto memalloc_failed;
- }
- rxdp->Host_Control = (unsigned long) (*skb);
-
- /* Buffer-1 will be dummy buffer not used */
- rxdp3->Buffer1_ptr = *temp1 =
- dma_map_single(&sp->pdev->dev, ba->ba_1,
- BUF1_LEN, DMA_FROM_DEVICE);
- if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer1_ptr)) {
- dma_unmap_single(&sp->pdev->dev,
- (dma_addr_t)rxdp3->Buffer0_ptr,
- BUF0_LEN, DMA_FROM_DEVICE);
- dma_unmap_single(&sp->pdev->dev,
- (dma_addr_t)rxdp3->Buffer2_ptr,
- dev->mtu + 4,
- DMA_FROM_DEVICE);
- goto memalloc_failed;
- }
- }
- }
- return 0;
-
-memalloc_failed:
- stats->pci_map_fail_cnt++;
- stats->mem_freed += (*skb)->truesize;
- dev_kfree_skb(*skb);
- return -ENOMEM;
-}
-
-static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
- int size)
-{
- struct net_device *dev = sp->dev;
- if (sp->rxd_mode == RXD_MODE_1) {
- rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
- } else if (sp->rxd_mode == RXD_MODE_3B) {
- rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
- rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
- rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4);
- }
-}
-
-static int rxd_owner_bit_reset(struct s2io_nic *sp)
-{
- int i, j, k, blk_cnt = 0, size;
- struct config_param *config = &sp->config;
- struct mac_info *mac_control = &sp->mac_control;
- struct net_device *dev = sp->dev;
- struct RxD_t *rxdp = NULL;
- struct sk_buff *skb = NULL;
- struct buffAdd *ba = NULL;
- u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
-
- /* Calculate the size based on ring mode */
- size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
- HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
- if (sp->rxd_mode == RXD_MODE_1)
- size += NET_IP_ALIGN;
- else if (sp->rxd_mode == RXD_MODE_3B)
- size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
-
- for (i = 0; i < config->rx_ring_num; i++) {
- struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
- struct ring_info *ring = &mac_control->rings[i];
-
- blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1);
-
- for (j = 0; j < blk_cnt; j++) {
- for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
- rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
- if (sp->rxd_mode == RXD_MODE_3B)
- ba = &ring->ba[j][k];
- if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
- &temp0_64,
- &temp1_64,
- &temp2_64,
- size) == -ENOMEM) {
- return 0;
- }
-
- set_rxd_buffer_size(sp, rxdp, size);
- dma_wmb();
- /* flip the Ownership bit to Hardware */
- rxdp->Control_1 |= RXD_OWN_XENA;
- }
- }
- }
- return 0;
-
-}
-
-static int s2io_add_isr(struct s2io_nic *sp)
-{
- int ret = 0;
- struct net_device *dev = sp->dev;
- int err = 0;
-
- if (sp->config.intr_type == MSI_X)
- ret = s2io_enable_msi_x(sp);
- if (ret) {
- DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
- sp->config.intr_type = INTA;
- }
-
- /*
- * Store the values of the MSIX table in
- * the struct s2io_nic structure
- */
- store_xmsi_data(sp);
-
- /* After proper initialization of H/W, register ISR */
- if (sp->config.intr_type == MSI_X) {
- int i, msix_rx_cnt = 0;
-
- for (i = 0; i < sp->num_entries; i++) {
- if (sp->s2io_entries[i].in_use == MSIX_FLG) {
- if (sp->s2io_entries[i].type ==
- MSIX_RING_TYPE) {
- snprintf(sp->desc[i],
- sizeof(sp->desc[i]),
- "%s:MSI-X-%d-RX",
- dev->name, i);
- err = request_irq(sp->entries[i].vector,
- s2io_msix_ring_handle,
- 0,
- sp->desc[i],
- sp->s2io_entries[i].arg);
- } else if (sp->s2io_entries[i].type ==
- MSIX_ALARM_TYPE) {
- snprintf(sp->desc[i],
- sizeof(sp->desc[i]),
- "%s:MSI-X-%d-TX",
- dev->name, i);
- err = request_irq(sp->entries[i].vector,
- s2io_msix_fifo_handle,
- 0,
- sp->desc[i],
- sp->s2io_entries[i].arg);
-
- }
- /* if either data or addr is zero print it. */
- if (!(sp->msix_info[i].addr &&
- sp->msix_info[i].data)) {
- DBG_PRINT(ERR_DBG,
- "%s @Addr:0x%llx Data:0x%llx\n",
- sp->desc[i],
- (unsigned long long)
- sp->msix_info[i].addr,
- (unsigned long long)
- ntohl(sp->msix_info[i].data));
- } else
- msix_rx_cnt++;
- if (err) {
- remove_msix_isr(sp);
-
- DBG_PRINT(ERR_DBG,
- "%s:MSI-X-%d registration "
- "failed\n", dev->name, i);
-
- DBG_PRINT(ERR_DBG,
- "%s: Defaulting to INTA\n",
- dev->name);
- sp->config.intr_type = INTA;
- break;
- }
- sp->s2io_entries[i].in_use =
- MSIX_REGISTERED_SUCCESS;
- }
- }
- if (!err) {
- pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
- DBG_PRINT(INFO_DBG,
- "MSI-X-TX entries enabled through alarm vector\n");
- }
- }
- if (sp->config.intr_type == INTA) {
- err = request_irq(sp->pdev->irq, s2io_isr, IRQF_SHARED,
- sp->name, dev);
- if (err) {
- DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
- dev->name);
- return -1;
- }
- }
- return 0;
-}
-
-static void s2io_rem_isr(struct s2io_nic *sp)
-{
- if (sp->config.intr_type == MSI_X)
- remove_msix_isr(sp);
- else
- remove_inta_isr(sp);
-}
-
-static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
-{
- int cnt = 0;
- struct XENA_dev_config __iomem *bar0 = sp->bar0;
- register u64 val64 = 0;
- struct config_param *config;
- config = &sp->config;
-
- if (!is_s2io_card_up(sp))
- return;
-
- timer_delete_sync(&sp->alarm_timer);
- /* If s2io_set_link task is executing, wait till it completes. */
- while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
- msleep(50);
- clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
-
- /* Disable napi */
- if (sp->config.napi) {
- int off = 0;
- if (config->intr_type == MSI_X) {
- for (; off < sp->config.rx_ring_num; off++)
- napi_disable(&sp->mac_control.rings[off].napi);
- }
- else
- napi_disable(&sp->napi);
- }
-
- /* disable Tx and Rx traffic on the NIC */
- if (do_io)
- stop_nic(sp);
-
- s2io_rem_isr(sp);
-
- /* stop the tx queue, indicate link down */
- s2io_link(sp, LINK_DOWN);
-
- /* Check if the device is Quiescent and then Reset the NIC */
- while (do_io) {
- /* As per the HW requirement we need to replenish the
- * receive buffer to avoid the ring bump. Since there is
- * no intention of processing the Rx frame at this pointwe are
- * just setting the ownership bit of rxd in Each Rx
- * ring to HW and set the appropriate buffer size
- * based on the ring mode
- */
- rxd_owner_bit_reset(sp);
-
- val64 = readq(&bar0->adapter_status);
- if (verify_xena_quiescence(sp)) {
- if (verify_pcc_quiescent(sp, sp->device_enabled_once))
- break;
- }
-
- msleep(50);
- cnt++;
- if (cnt == 10) {
- DBG_PRINT(ERR_DBG, "Device not Quiescent - "
- "adapter status reads 0x%llx\n",
- (unsigned long long)val64);
- break;
- }
- }
- if (do_io)
- s2io_reset(sp);
-
- /* Free all Tx buffers */
- free_tx_buffers(sp);
-
- /* Free all Rx buffers */
- free_rx_buffers(sp);
-
- clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
-}
-
-static void s2io_card_down(struct s2io_nic *sp)
-{
- do_s2io_card_down(sp, 1);
-}
-
-static int s2io_card_up(struct s2io_nic *sp)
-{
- int i, ret = 0;
- struct config_param *config;
- struct mac_info *mac_control;
- struct net_device *dev = sp->dev;
- u16 interruptible;
-
- /* Initialize the H/W I/O registers */
- ret = init_nic(sp);
- if (ret != 0) {
- DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
- dev->name);
- if (ret != -EIO)
- s2io_reset(sp);
- return ret;
- }
-
- /*
- * Initializing the Rx buffers. For now we are considering only 1
- * Rx ring and initializing buffers into 30 Rx blocks
- */
- config = &sp->config;
- mac_control = &sp->mac_control;
-
- for (i = 0; i < config->rx_ring_num; i++) {
- struct ring_info *ring = &mac_control->rings[i];
-
- ring->mtu = dev->mtu;
- ring->lro = !!(dev->features & NETIF_F_LRO);
- ret = fill_rx_buffers(sp, ring, 1);
- if (ret) {
- DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
- dev->name);
- ret = -ENOMEM;
- goto err_fill_buff;
- }
- DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
- ring->rx_bufs_left);
- }
-
- /* Initialise napi */
- if (config->napi) {
- if (config->intr_type == MSI_X) {
- for (i = 0; i < sp->config.rx_ring_num; i++)
- napi_enable(&sp->mac_control.rings[i].napi);
- } else {
- napi_enable(&sp->napi);
- }
- }
-
- /* Maintain the state prior to the open */
- if (sp->promisc_flg)
- sp->promisc_flg = 0;
- if (sp->m_cast_flg) {
- sp->m_cast_flg = 0;
- sp->all_multi_pos = 0;
- }
-
- /* Setting its receive mode */
- s2io_set_multicast(dev, true);
-
- if (dev->features & NETIF_F_LRO) {
- /* Initialize max aggregatable pkts per session based on MTU */
- sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
- /* Check if we can use (if specified) user provided value */
- if (lro_max_pkts < sp->lro_max_aggr_per_sess)
- sp->lro_max_aggr_per_sess = lro_max_pkts;
- }
-
- /* Enable Rx Traffic and interrupts on the NIC */
- if (start_nic(sp)) {
- DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
- ret = -ENODEV;
- goto err_out;
- }
-
- /* Add interrupt service routine */
- if (s2io_add_isr(sp) != 0) {
- if (sp->config.intr_type == MSI_X)
- s2io_rem_isr(sp);
- ret = -ENODEV;
- goto err_out;
- }
-
- timer_setup(&sp->alarm_timer, s2io_alarm_handle, 0);
- mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
-
- set_bit(__S2IO_STATE_CARD_UP, &sp->state);
-
- /* Enable select interrupts */
- en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
- if (sp->config.intr_type != INTA) {
- interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
- en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
- } else {
- interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
- interruptible |= TX_PIC_INTR;
- en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
- }
-
- return 0;
-
-err_out:
- if (config->napi) {
- if (config->intr_type == MSI_X) {
- for (i = 0; i < sp->config.rx_ring_num; i++)
- napi_disable(&sp->mac_control.rings[i].napi);
- } else {
- napi_disable(&sp->napi);
- }
- }
-err_fill_buff:
- s2io_reset(sp);
- free_rx_buffers(sp);
- return ret;
-}
-
-/**
- * s2io_restart_nic - Resets the NIC.
- * @work : work struct containing a pointer to the device private structure
- * Description:
- * This function is scheduled to be run by the s2io_tx_watchdog
- * function after 0.5 secs to reset the NIC. The idea is to reduce
- * the run time of the watch dog routine which is run holding a
- * spin lock.
- */
-
-static void s2io_restart_nic(struct work_struct *work)
-{
- struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
- struct net_device *dev = sp->dev;
-
- rtnl_lock();
-
- if (!netif_running(dev))
- goto out_unlock;
-
- s2io_card_down(sp);
- if (s2io_card_up(sp)) {
- DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name);
- }
- s2io_wake_all_tx_queue(sp);
- DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name);
-out_unlock:
- rtnl_unlock();
-}
-
-/**
- * s2io_tx_watchdog - Watchdog for transmit side.
- * @dev : Pointer to net device structure
- * @txqueue: index of the hanging queue
- * Description:
- * This function is triggered if the Tx Queue is stopped
- * for a pre-defined amount of time when the Interface is still up.
- * If the Interface is jammed in such a situation, the hardware is
- * reset (by s2io_close) and restarted again (by s2io_open) to
- * overcome any problem that might have been caused in the hardware.
- * Return value:
- * void
- */
-
-static void s2io_tx_watchdog(struct net_device *dev, unsigned int txqueue)
-{
- struct s2io_nic *sp = netdev_priv(dev);
- struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
-
- if (netif_carrier_ok(dev)) {
- swstats->watchdog_timer_cnt++;
- schedule_work(&sp->rst_timer_task);
- swstats->soft_reset_cnt++;
- }
-}
-
-/**
- * rx_osm_handler - To perform some OS related operations on SKB.
- * @ring_data : the ring from which this RxD was extracted.
- * @rxdp: descriptor
- * Description:
- * This function is called by the Rx interrupt serivce routine to perform
- * some OS related operations on the SKB before passing it to the upper
- * layers. It mainly checks if the checksum is OK, if so adds it to the
- * SKBs cksum variable, increments the Rx packet count and passes the SKB
- * to the upper layer. If the checksum is wrong, it increments the Rx
- * packet error count, frees the SKB and returns error.
- * Return value:
- * SUCCESS on success and -1 on failure.
- */
-static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
-{
- struct s2io_nic *sp = ring_data->nic;
- struct net_device *dev = ring_data->dev;
- struct sk_buff *skb = (struct sk_buff *)
- ((unsigned long)rxdp->Host_Control);
- int ring_no = ring_data->ring_no;
- u16 l3_csum, l4_csum;
- unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
- struct lro *lro;
- u8 err_mask;
- struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
-
- skb->dev = dev;
-
- if (err) {
- /* Check for parity error */
- if (err & 0x1)
- swstats->parity_err_cnt++;
-
- err_mask = err >> 48;
- switch (err_mask) {
- case 1:
- swstats->rx_parity_err_cnt++;
- break;
-
- case 2:
- swstats->rx_abort_cnt++;
- break;
-
- case 3:
- swstats->rx_parity_abort_cnt++;
- break;
-
- case 4:
- swstats->rx_rda_fail_cnt++;
- break;
-
- case 5:
- swstats->rx_unkn_prot_cnt++;
- break;
-
- case 6:
- swstats->rx_fcs_err_cnt++;
- break;
-
- case 7:
- swstats->rx_buf_size_err_cnt++;
- break;
-
- case 8:
- swstats->rx_rxd_corrupt_cnt++;
- break;
-
- case 15:
- swstats->rx_unkn_err_cnt++;
- break;
- }
- /*
- * Drop the packet if bad transfer code. Exception being
- * 0x5, which could be due to unsupported IPv6 extension header.
- * In this case, we let stack handle the packet.
- * Note that in this case, since checksum will be incorrect,
- * stack will validate the same.
- */
- if (err_mask != 0x5) {
- DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
- dev->name, err_mask);
- dev->stats.rx_crc_errors++;
- swstats->mem_freed
- += skb->truesize;
- dev_kfree_skb(skb);
- ring_data->rx_bufs_left -= 1;
- rxdp->Host_Control = 0;
- return 0;
- }
- }
-
- rxdp->Host_Control = 0;
- if (sp->rxd_mode == RXD_MODE_1) {
- int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
-
- skb_put(skb, len);
- } else if (sp->rxd_mode == RXD_MODE_3B) {
- int get_block = ring_data->rx_curr_get_info.block_index;
- int get_off = ring_data->rx_curr_get_info.offset;
- int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
- int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
-
- struct buffAdd *ba = &ring_data->ba[get_block][get_off];
- skb_put_data(skb, ba->ba_0, buf0_len);
- skb_put(skb, buf2_len);
- }
-
- if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
- ((!ring_data->lro) ||
- (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG))) &&
- (dev->features & NETIF_F_RXCSUM)) {
- l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
- l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
- if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
- /*
- * NIC verifies if the Checksum of the received
- * frame is Ok or not and accordingly returns
- * a flag in the RxD.
- */
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- if (ring_data->lro) {
- u32 tcp_len = 0;
- u8 *tcp;
- int ret = 0;
-
- ret = s2io_club_tcp_session(ring_data,
- skb->data, &tcp,
- &tcp_len, &lro,
- rxdp, sp);
- switch (ret) {
- case 3: /* Begin anew */
- lro->parent = skb;
- goto aggregate;
- case 1: /* Aggregate */
- lro_append_pkt(sp, lro, skb, tcp_len);
- goto aggregate;
- case 4: /* Flush session */
- lro_append_pkt(sp, lro, skb, tcp_len);
- queue_rx_frame(lro->parent,
- lro->vlan_tag);
- clear_lro_session(lro);
- swstats->flush_max_pkts++;
- goto aggregate;
- case 2: /* Flush both */
- lro->parent->data_len = lro->frags_len;
- swstats->sending_both++;
- queue_rx_frame(lro->parent,
- lro->vlan_tag);
- clear_lro_session(lro);
- goto send_up;
- case 0: /* sessions exceeded */
- case -1: /* non-TCP or not L2 aggregatable */
- case 5: /*
- * First pkt in session not
- * L3/L4 aggregatable
- */
- break;
- default:
- DBG_PRINT(ERR_DBG,
- "%s: Samadhana!!\n",
- __func__);
- BUG();
- }
- }
- } else {
- /*
- * Packet with erroneous checksum, let the
- * upper layers deal with it.
- */
- skb_checksum_none_assert(skb);
- }
- } else
- skb_checksum_none_assert(skb);
-
- swstats->mem_freed += skb->truesize;
-send_up:
- skb_record_rx_queue(skb, ring_no);
- queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
-aggregate:
- sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
- return SUCCESS;
-}
-
-/**
- * s2io_link - stops/starts the Tx queue.
- * @sp : private member of the device structure, which is a pointer to the
- * s2io_nic structure.
- * @link : inidicates whether link is UP/DOWN.
- * Description:
- * This function stops/starts the Tx queue depending on whether the link
- * status of the NIC is down or up. This is called by the Alarm
- * interrupt handler whenever a link change interrupt comes up.
- * Return value:
- * void.
- */
-
-static void s2io_link(struct s2io_nic *sp, int link)
-{
- struct net_device *dev = sp->dev;
- struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
-
- if (link != sp->last_link_state) {
- init_tti(sp, link, false);
- if (link == LINK_DOWN) {
- DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
- s2io_stop_all_tx_queue(sp);
- netif_carrier_off(dev);
- if (swstats->link_up_cnt)
- swstats->link_up_time =
- jiffies - sp->start_time;
- swstats->link_down_cnt++;
- } else {
- DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
- if (swstats->link_down_cnt)
- swstats->link_down_time =
- jiffies - sp->start_time;
- swstats->link_up_cnt++;
- netif_carrier_on(dev);
- s2io_wake_all_tx_queue(sp);
- }
- }
- sp->last_link_state = link;
- sp->start_time = jiffies;
-}
-
-/**
- * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
- * @sp : private member of the device structure, which is a pointer to the
- * s2io_nic structure.
- * Description:
- * This function initializes a few of the PCI and PCI-X configuration registers
- * with recommended values.
- * Return value:
- * void
- */
-
-static void s2io_init_pci(struct s2io_nic *sp)
-{
- u16 pci_cmd = 0, pcix_cmd = 0;
-
- /* Enable Data Parity Error Recovery in PCI-X command register. */
- pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
- &(pcix_cmd));
- pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
- (pcix_cmd | 1));
- pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
- &(pcix_cmd));
-
- /* Set the PErr Response bit in PCI command register. */
- pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
- pci_write_config_word(sp->pdev, PCI_COMMAND,
- (pci_cmd | PCI_COMMAND_PARITY));
- pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
-}
-
-static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
- u8 *dev_multiq)
-{
- int i;
-
- if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
- DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
- "(%d) not supported\n", tx_fifo_num);
-
- if (tx_fifo_num < 1)
- tx_fifo_num = 1;
- else
- tx_fifo_num = MAX_TX_FIFOS;
-
- DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num);
- }
-
- if (multiq)
- *dev_multiq = multiq;
-
- if (tx_steering_type && (1 == tx_fifo_num)) {
- if (tx_steering_type != TX_DEFAULT_STEERING)
- DBG_PRINT(ERR_DBG,
- "Tx steering is not supported with "
- "one fifo. Disabling Tx steering.\n");
- tx_steering_type = NO_STEERING;
- }
-
- if ((tx_steering_type < NO_STEERING) ||
- (tx_steering_type > TX_DEFAULT_STEERING)) {
- DBG_PRINT(ERR_DBG,
- "Requested transmit steering not supported\n");
- DBG_PRINT(ERR_DBG, "Disabling transmit steering\n");
- tx_steering_type = NO_STEERING;
- }
-
- if (rx_ring_num > MAX_RX_RINGS) {
- DBG_PRINT(ERR_DBG,
- "Requested number of rx rings not supported\n");
- DBG_PRINT(ERR_DBG, "Default to %d rx rings\n",
- MAX_RX_RINGS);
- rx_ring_num = MAX_RX_RINGS;
- }
-
- if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
- DBG_PRINT(ERR_DBG, "Wrong intr_type requested. "
- "Defaulting to INTA\n");
- *dev_intr_type = INTA;
- }
-
- if ((*dev_intr_type == MSI_X) &&
- ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
- (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
- DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. "
- "Defaulting to INTA\n");
- *dev_intr_type = INTA;
- }
-
- if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
- DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n");
- DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
- rx_ring_mode = 1;
- }
-
- for (i = 0; i < MAX_RX_RINGS; i++)
- if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
- DBG_PRINT(ERR_DBG, "Requested rx ring size not "
- "supported\nDefaulting to %d\n",
- MAX_RX_BLOCKS_PER_RING);
- rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
- }
-
- return SUCCESS;
-}
-
-/**
- * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS or Traffic class respectively.
- * @nic: device private variable
- * @ds_codepoint: data
- * @ring: ring index
- * Description: The function configures the receive steering to
- * desired receive ring.
- * Return Value: SUCCESS on success and
- * '-1' on failure (endian settings incorrect).
- */
-static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
-{
- struct XENA_dev_config __iomem *bar0 = nic->bar0;
- register u64 val64 = 0;
-
- if (ds_codepoint > 63)
- return FAILURE;
-
- val64 = RTS_DS_MEM_DATA(ring);
- writeq(val64, &bar0->rts_ds_mem_data);
-
- val64 = RTS_DS_MEM_CTRL_WE |
- RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
- RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
-
- writeq(val64, &bar0->rts_ds_mem_ctrl);
-
- return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
- RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
- S2IO_BIT_RESET, true);
-}
-
-static const struct net_device_ops s2io_netdev_ops = {
- .ndo_open = s2io_open,
- .ndo_stop = s2io_close,
- .ndo_get_stats = s2io_get_stats,
- .ndo_start_xmit = s2io_xmit,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_rx_mode = s2io_ndo_set_multicast,
- .ndo_eth_ioctl = s2io_ioctl,
- .ndo_set_mac_address = s2io_set_mac_addr,
- .ndo_change_mtu = s2io_change_mtu,
- .ndo_set_features = s2io_set_features,
- .ndo_tx_timeout = s2io_tx_watchdog,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = s2io_netpoll,
-#endif
-};
-
-/**
- * s2io_init_nic - Initialization of the adapter .
- * @pdev : structure containing the PCI related information of the device.
- * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
- * Description:
- * The function initializes an adapter identified by the pci_dec structure.
- * All OS related initialization including memory and device structure and
- * initlaization of the device private variable is done. Also the swapper
- * control register is initialized to enable read and write into the I/O
- * registers of the device.
- * Return value:
- * returns 0 on success and negative on failure.
- */
-
-static int
-s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
-{
- struct s2io_nic *sp;
- struct net_device *dev;
- int i, j, ret;
- u32 mac_up, mac_down;
- u64 val64 = 0, tmp64 = 0;
- struct XENA_dev_config __iomem *bar0 = NULL;
- u16 subid;
- struct config_param *config;
- struct mac_info *mac_control;
- int mode;
- u8 dev_intr_type = intr_type;
- u8 dev_multiq = 0;
-
- ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
- if (ret)
- return ret;
-
- ret = pci_enable_device(pdev);
- if (ret) {
- DBG_PRINT(ERR_DBG,
- "%s: pci_enable_device failed\n", __func__);
- return ret;
- }
-
- if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
- DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
- } else {
- pci_disable_device(pdev);
- return -ENOMEM;
- }
- ret = pci_request_regions(pdev, s2io_driver_name);
- if (ret) {
- DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n",
- __func__, ret);
- pci_disable_device(pdev);
- return -ENODEV;
- }
- if (dev_multiq)
- dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
- else
- dev = alloc_etherdev(sizeof(struct s2io_nic));
- if (dev == NULL) {
- pci_disable_device(pdev);
- pci_release_regions(pdev);
- return -ENODEV;
- }
-
- pci_set_master(pdev);
- pci_set_drvdata(pdev, dev);
- SET_NETDEV_DEV(dev, &pdev->dev);
-
- /* Private member variable initialized to s2io NIC structure */
- sp = netdev_priv(dev);
- sp->dev = dev;
- sp->pdev = pdev;
- sp->device_enabled_once = false;
- if (rx_ring_mode == 1)
- sp->rxd_mode = RXD_MODE_1;
- if (rx_ring_mode == 2)
- sp->rxd_mode = RXD_MODE_3B;
-
- sp->config.intr_type = dev_intr_type;
-
- if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
- (pdev->device == PCI_DEVICE_ID_HERC_UNI))
- sp->device_type = XFRAME_II_DEVICE;
- else
- sp->device_type = XFRAME_I_DEVICE;
-
-
- /* Initialize some PCI/PCI-X fields of the NIC. */
- s2io_init_pci(sp);
-
- /*
- * Setting the device configuration parameters.
- * Most of these parameters can be specified by the user during
- * module insertion as they are module loadable parameters. If
- * these parameters are not specified during load time, they
- * are initialized with default values.
- */
- config = &sp->config;
- mac_control = &sp->mac_control;
-
- config->napi = napi;
- config->tx_steering_type = tx_steering_type;
-
- /* Tx side parameters. */
- if (config->tx_steering_type == TX_PRIORITY_STEERING)
- config->tx_fifo_num = MAX_TX_FIFOS;
- else
- config->tx_fifo_num = tx_fifo_num;
-
- /* Initialize the fifos used for tx steering */
- if (config->tx_fifo_num < 5) {
- if (config->tx_fifo_num == 1)
- sp->total_tcp_fifos = 1;
- else
- sp->total_tcp_fifos = config->tx_fifo_num - 1;
- sp->udp_fifo_idx = config->tx_fifo_num - 1;
- sp->total_udp_fifos = 1;
- sp->other_fifo_idx = sp->total_tcp_fifos - 1;
- } else {
- sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
- FIFO_OTHER_MAX_NUM);
- sp->udp_fifo_idx = sp->total_tcp_fifos;
- sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
- sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
- }
-
- config->multiq = dev_multiq;
- for (i = 0; i < config->tx_fifo_num; i++) {
- struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
-
- tx_cfg->fifo_len = tx_fifo_len[i];
- tx_cfg->fifo_priority = i;
- }
-
- /* mapping the QoS priority to the configured fifos */
- for (i = 0; i < MAX_TX_FIFOS; i++)
- config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
-
- /* map the hashing selector table to the configured fifos */
- for (i = 0; i < config->tx_fifo_num; i++)
- sp->fifo_selector[i] = fifo_selector[i];
-
-
- config->tx_intr_type = TXD_INT_TYPE_UTILZ;
- for (i = 0; i < config->tx_fifo_num; i++) {
- struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
-
- tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
- if (tx_cfg->fifo_len < 65) {
- config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
- break;
- }
- }
- /* + 2 because one Txd for skb->data and one Txd for UFO */
- config->max_txds = MAX_SKB_FRAGS + 2;
-
- /* Rx side parameters. */
- config->rx_ring_num = rx_ring_num;
- for (i = 0; i < config->rx_ring_num; i++) {
- struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
- struct ring_info *ring = &mac_control->rings[i];
-
- rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
- rx_cfg->ring_priority = i;
- ring->rx_bufs_left = 0;
- ring->rxd_mode = sp->rxd_mode;
- ring->rxd_count = rxd_count[sp->rxd_mode];
- ring->pdev = sp->pdev;
- ring->dev = sp->dev;
- }
-
- for (i = 0; i < rx_ring_num; i++) {
- struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
-
- rx_cfg->ring_org = RING_ORG_BUFF1;
- rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
- }
-
- /* Setting Mac Control parameters */
- mac_control->rmac_pause_time = rmac_pause_time;
- mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
- mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
-
-
- /* initialize the shared memory used by the NIC and the host */
- if (init_shared_mem(sp)) {
- DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name);
- ret = -ENOMEM;
- goto mem_alloc_failed;
- }
-
- sp->bar0 = pci_ioremap_bar(pdev, 0);
- if (!sp->bar0) {
- DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
- dev->name);
- ret = -ENOMEM;
- goto bar0_remap_failed;
- }
-
- sp->bar1 = pci_ioremap_bar(pdev, 2);
- if (!sp->bar1) {
- DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
- dev->name);
- ret = -ENOMEM;
- goto bar1_remap_failed;
- }
-
- /* Initializing the BAR1 address as the start of the FIFO pointer. */
- for (j = 0; j < MAX_TX_FIFOS; j++) {
- mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000);
- }
-
- /* Driver entry points */
- dev->netdev_ops = &s2io_netdev_ops;
- dev->ethtool_ops = &netdev_ethtool_ops;
- dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_TSO | NETIF_F_TSO6 |
- NETIF_F_RXCSUM | NETIF_F_LRO;
- dev->features |= dev->hw_features |
- NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HIGHDMA;
- dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
- INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
- INIT_WORK(&sp->set_link_task, s2io_set_link);
-
- pci_save_state(sp->pdev);
-
- /* Setting swapper control on the NIC, for proper reset operation */
- if (s2io_set_swapper(sp)) {
- DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n",
- dev->name);
- ret = -EAGAIN;
- goto set_swap_failed;
- }
-
- /* Verify if the Herc works on the slot its placed into */
- if (sp->device_type & XFRAME_II_DEVICE) {
- mode = s2io_verify_pci_mode(sp);
- if (mode < 0) {
- DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n",
- __func__);
- ret = -EBADSLT;
- goto set_swap_failed;
- }
- }
-
- if (sp->config.intr_type == MSI_X) {
- sp->num_entries = config->rx_ring_num + 1;
- ret = s2io_enable_msi_x(sp);
-
- if (!ret) {
- ret = s2io_test_msi(sp);
- /* rollback MSI-X, will re-enable during add_isr() */
- remove_msix_isr(sp);
- }
- if (ret) {
-
- DBG_PRINT(ERR_DBG,
- "MSI-X requested but failed to enable\n");
- sp->config.intr_type = INTA;
- }
- }
-
- if (config->intr_type == MSI_X) {
- for (i = 0; i < config->rx_ring_num ; i++) {
- struct ring_info *ring = &mac_control->rings[i];
-
- netif_napi_add(dev, &ring->napi, s2io_poll_msix);
- }
- } else {
- netif_napi_add(dev, &sp->napi, s2io_poll_inta);
- }
-
- /* Not needed for Herc */
- if (sp->device_type & XFRAME_I_DEVICE) {
- /*
- * Fix for all "FFs" MAC address problems observed on
- * Alpha platforms
- */
- fix_mac_address(sp);
- s2io_reset(sp);
- }
-
- /*
- * MAC address initialization.
- * For now only one mac address will be read and used.
- */
- bar0 = sp->bar0;
- val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
- RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
- writeq(val64, &bar0->rmac_addr_cmd_mem);
- wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
- RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
- S2IO_BIT_RESET, true);
- tmp64 = readq(&bar0->rmac_addr_data0_mem);
- mac_down = (u32)tmp64;
- mac_up = (u32) (tmp64 >> 32);
-
- sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
- sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
- sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
- sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
- sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
- sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
-
- /* Set the factory defined MAC address initially */
- dev->addr_len = ETH_ALEN;
- eth_hw_addr_set(dev, sp->def_mac_addr[0].mac_addr);
-
- /* initialize number of multicast & unicast MAC entries variables */
- if (sp->device_type == XFRAME_I_DEVICE) {
- config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
- config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
- config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
- } else if (sp->device_type == XFRAME_II_DEVICE) {
- config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
- config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
- config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
- }
-
- /* MTU range: 46 - 9600 */
- dev->min_mtu = MIN_MTU;
- dev->max_mtu = S2IO_JUMBO_SIZE;
-
- /* store mac addresses from CAM to s2io_nic structure */
- do_s2io_store_unicast_mc(sp);
-
- /* Configure MSIX vector for number of rings configured plus one */
- if ((sp->device_type == XFRAME_II_DEVICE) &&
- (config->intr_type == MSI_X))
- sp->num_entries = config->rx_ring_num + 1;
-
- /* Store the values of the MSIX table in the s2io_nic structure */
- store_xmsi_data(sp);
- /* reset Nic and bring it to known state */
- s2io_reset(sp);
-
- /*
- * Initialize link state flags
- * and the card state parameter
- */
- sp->state = 0;
-
- /* Initialize spinlocks */
- for (i = 0; i < sp->config.tx_fifo_num; i++) {
- struct fifo_info *fifo = &mac_control->fifos[i];
-
- spin_lock_init(&fifo->tx_lock);
- }
-
- /*
- * SXE-002: Configure link and activity LED to init state
- * on driver load.
- */
- subid = sp->pdev->subsystem_device;
- if ((subid & 0xFF) >= 0x07) {
- val64 = readq(&bar0->gpio_control);
- val64 |= 0x0000800000000000ULL;
- writeq(val64, &bar0->gpio_control);
- val64 = 0x0411040400000000ULL;
- writeq(val64, (void __iomem *)bar0 + 0x2700);
- val64 = readq(&bar0->gpio_control);
- }
-
- sp->rx_csum = 1; /* Rx chksum verify enabled by default */
-
- if (register_netdev(dev)) {
- DBG_PRINT(ERR_DBG, "Device registration failed\n");
- ret = -ENODEV;
- goto register_failed;
- }
- s2io_vpd_read(sp);
- DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n");
- DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
- sp->product_name, pdev->revision);
- DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
- s2io_driver_version);
- DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr);
- DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num);
- if (sp->device_type & XFRAME_II_DEVICE) {
- mode = s2io_print_pci_mode(sp);
- if (mode < 0) {
- ret = -EBADSLT;
- unregister_netdev(dev);
- goto set_swap_failed;
- }
- }
- switch (sp->rxd_mode) {
- case RXD_MODE_1:
- DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
- dev->name);
- break;
- case RXD_MODE_3B:
- DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
- dev->name);
- break;
- }
-
- switch (sp->config.napi) {
- case 0:
- DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
- break;
- case 1:
- DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
- break;
- }
-
- DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
- sp->config.tx_fifo_num);
-
- DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
- sp->config.rx_ring_num);
-
- switch (sp->config.intr_type) {
- case INTA:
- DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
- break;
- case MSI_X:
- DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
- break;
- }
- if (sp->config.multiq) {
- for (i = 0; i < sp->config.tx_fifo_num; i++) {
- struct fifo_info *fifo = &mac_control->fifos[i];
-
- fifo->multiq = config->multiq;
- }
- DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
- dev->name);
- } else
- DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
- dev->name);
-
- switch (sp->config.tx_steering_type) {
- case NO_STEERING:
- DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n",
- dev->name);
- break;
- case TX_PRIORITY_STEERING:
- DBG_PRINT(ERR_DBG,
- "%s: Priority steering enabled for transmit\n",
- dev->name);
- break;
- case TX_DEFAULT_STEERING:
- DBG_PRINT(ERR_DBG,
- "%s: Default steering enabled for transmit\n",
- dev->name);
- }
-
- DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
- dev->name);
- /* Initialize device name */
- snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name,
- sp->product_name);
-
- if (vlan_tag_strip)
- sp->vlan_strip_flag = 1;
- else
- sp->vlan_strip_flag = 0;
-
- /*
- * Make Link state as off at this point, when the Link change
- * interrupt comes the state will be automatically changed to
- * the right state.
- */
- netif_carrier_off(dev);
-
- return 0;
-
-register_failed:
-set_swap_failed:
- iounmap(sp->bar1);
-bar1_remap_failed:
- iounmap(sp->bar0);
-bar0_remap_failed:
-mem_alloc_failed:
- free_shared_mem(sp);
- pci_disable_device(pdev);
- pci_release_regions(pdev);
- free_netdev(dev);
-
- return ret;
-}
-
-/**
- * s2io_rem_nic - Free the PCI device
- * @pdev: structure containing the PCI related information of the device.
- * Description: This function is called by the Pci subsystem to release a
- * PCI device and free up all resource held up by the device. This could
- * be in response to a Hot plug event or when the driver is to be removed
- * from memory.
- */
-
-static void s2io_rem_nic(struct pci_dev *pdev)
-{
- struct net_device *dev = pci_get_drvdata(pdev);
- struct s2io_nic *sp;
-
- if (dev == NULL) {
- DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
- return;
- }
-
- sp = netdev_priv(dev);
-
- cancel_work_sync(&sp->rst_timer_task);
- cancel_work_sync(&sp->set_link_task);
-
- unregister_netdev(dev);
-
- free_shared_mem(sp);
- iounmap(sp->bar0);
- iounmap(sp->bar1);
- pci_release_regions(pdev);
- free_netdev(dev);
- pci_disable_device(pdev);
-}
-
-module_pci_driver(s2io_driver);
-
-static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
- struct tcphdr **tcp, struct RxD_t *rxdp,
- struct s2io_nic *sp)
-{
- int ip_off;
- u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
-
- if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
- DBG_PRINT(INIT_DBG,
- "%s: Non-TCP frames not supported for LRO\n",
- __func__);
- return -1;
- }
-
- /* Checking for DIX type or DIX type with VLAN */
- if ((l2_type == 0) || (l2_type == 4)) {
- ip_off = HEADER_ETHERNET_II_802_3_SIZE;
- /*
- * If vlan stripping is disabled and the frame is VLAN tagged,
- * shift the offset by the VLAN header size bytes.
- */
- if ((!sp->vlan_strip_flag) &&
- (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
- ip_off += HEADER_VLAN_SIZE;
- } else {
- /* LLC, SNAP etc are considered non-mergeable */
- return -1;
- }
-
- *ip = (struct iphdr *)(buffer + ip_off);
- ip_len = (u8)((*ip)->ihl);
- ip_len <<= 2;
- *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
-
- return 0;
-}
-
-static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
- struct tcphdr *tcp)
-{
- DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
- if ((lro->iph->saddr != ip->saddr) ||
- (lro->iph->daddr != ip->daddr) ||
- (lro->tcph->source != tcp->source) ||
- (lro->tcph->dest != tcp->dest))
- return -1;
- return 0;
-}
-
-static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
-{
- return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
-}
-
-static void initiate_new_session(struct lro *lro, u8 *l2h,
- struct iphdr *ip, struct tcphdr *tcp,
- u32 tcp_pyld_len, u16 vlan_tag)
-{
- DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
- lro->l2h = l2h;
- lro->iph = ip;
- lro->tcph = tcp;
- lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
- lro->tcp_ack = tcp->ack_seq;
- lro->sg_num = 1;
- lro->total_len = ntohs(ip->tot_len);
- lro->frags_len = 0;
- lro->vlan_tag = vlan_tag;
- /*
- * Check if we saw TCP timestamp.
- * Other consistency checks have already been done.
- */
- if (tcp->doff == 8) {
- __be32 *ptr;
- ptr = (__be32 *)(tcp+1);
- lro->saw_ts = 1;
- lro->cur_tsval = ntohl(*(ptr+1));
- lro->cur_tsecr = *(ptr+2);
- }
- lro->in_use = 1;
-}
-
-static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
-{
- struct iphdr *ip = lro->iph;
- struct tcphdr *tcp = lro->tcph;
- struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
-
- DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
-
- /* Update L3 header */
- csum_replace2(&ip->check, ip->tot_len, htons(lro->total_len));
- ip->tot_len = htons(lro->total_len);
-
- /* Update L4 header */
- tcp->ack_seq = lro->tcp_ack;
- tcp->window = lro->window;
-
- /* Update tsecr field if this session has timestamps enabled */
- if (lro->saw_ts) {
- __be32 *ptr = (__be32 *)(tcp + 1);
- *(ptr+2) = lro->cur_tsecr;
- }
-
- /* Update counters required for calculation of
- * average no. of packets aggregated.
- */
- swstats->sum_avg_pkts_aggregated += lro->sg_num;
- swstats->num_aggregations++;
-}
-
-static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
- struct tcphdr *tcp, u32 l4_pyld)
-{
- DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
- lro->total_len += l4_pyld;
- lro->frags_len += l4_pyld;
- lro->tcp_next_seq += l4_pyld;
- lro->sg_num++;
-
- /* Update ack seq no. and window ad(from this pkt) in LRO object */
- lro->tcp_ack = tcp->ack_seq;
- lro->window = tcp->window;
-
- if (lro->saw_ts) {
- __be32 *ptr;
- /* Update tsecr and tsval from this packet */
- ptr = (__be32 *)(tcp+1);
- lro->cur_tsval = ntohl(*(ptr+1));
- lro->cur_tsecr = *(ptr + 2);
- }
-}
-
-static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
- struct tcphdr *tcp, u32 tcp_pyld_len)
-{
- u8 *ptr;
-
- DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
-
- if (!tcp_pyld_len) {
- /* Runt frame or a pure ack */
- return -1;
- }
-
- if (ip->ihl != 5) /* IP has options */
- return -1;
-
- /* If we see CE codepoint in IP header, packet is not mergeable */
- if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
- return -1;
-
- /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
- if (tcp->urg || tcp->psh || tcp->rst ||
- tcp->syn || tcp->fin ||
- tcp->ece || tcp->cwr || !tcp->ack) {
- /*
- * Currently recognize only the ack control word and
- * any other control field being set would result in
- * flushing the LRO session
- */
- return -1;
- }
-
- /*
- * Allow only one TCP timestamp option. Don't aggregate if
- * any other options are detected.
- */
- if (tcp->doff != 5 && tcp->doff != 8)
- return -1;
-
- if (tcp->doff == 8) {
- ptr = (u8 *)(tcp + 1);
- while (*ptr == TCPOPT_NOP)
- ptr++;
- if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
- return -1;
-
- /* Ensure timestamp value increases monotonically */
- if (l_lro)
- if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
- return -1;
-
- /* timestamp echo reply should be non-zero */
- if (*((__be32 *)(ptr+6)) == 0)
- return -1;
- }
-
- return 0;
-}
-
-static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
- u8 **tcp, u32 *tcp_len, struct lro **lro,
- struct RxD_t *rxdp, struct s2io_nic *sp)
-{
- struct iphdr *ip;
- struct tcphdr *tcph;
- int ret = 0, i;
- u16 vlan_tag = 0;
- struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
-
- ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
- rxdp, sp);
- if (ret)
- return ret;
-
- DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr);
-
- vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
- tcph = (struct tcphdr *)*tcp;
- *tcp_len = get_l4_pyld_length(ip, tcph);
- for (i = 0; i < MAX_LRO_SESSIONS; i++) {
- struct lro *l_lro = &ring_data->lro0_n[i];
- if (l_lro->in_use) {
- if (check_for_socket_match(l_lro, ip, tcph))
- continue;
- /* Sock pair matched */
- *lro = l_lro;
-
- if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
- DBG_PRINT(INFO_DBG, "%s: Out of sequence. "
- "expected 0x%x, actual 0x%x\n",
- __func__,
- (*lro)->tcp_next_seq,
- ntohl(tcph->seq));
-
- swstats->outof_sequence_pkts++;
- ret = 2;
- break;
- }
-
- if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
- *tcp_len))
- ret = 1; /* Aggregate */
- else
- ret = 2; /* Flush both */
- break;
- }
- }
-
- if (ret == 0) {
- /* Before searching for available LRO objects,
- * check if the pkt is L3/L4 aggregatable. If not
- * don't create new LRO session. Just send this
- * packet up.
- */
- if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len))
- return 5;
-
- for (i = 0; i < MAX_LRO_SESSIONS; i++) {
- struct lro *l_lro = &ring_data->lro0_n[i];
- if (!(l_lro->in_use)) {
- *lro = l_lro;
- ret = 3; /* Begin anew */
- break;
- }
- }
- }
-
- if (ret == 0) { /* sessions exceeded */
- DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n",
- __func__);
- *lro = NULL;
- return ret;
- }
-
- switch (ret) {
- case 3:
- initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
- vlan_tag);
- break;
- case 2:
- update_L3L4_header(sp, *lro);
- break;
- case 1:
- aggregate_new_rx(*lro, ip, tcph, *tcp_len);
- if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
- update_L3L4_header(sp, *lro);
- ret = 4; /* Flush the LRO */
- }
- break;
- default:
- DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__);
- break;
- }
-
- return ret;
-}
-
-static void clear_lro_session(struct lro *lro)
-{
- static u16 lro_struct_size = sizeof(struct lro);
-
- memset(lro, 0, lro_struct_size);
-}
-
-static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
-{
- struct net_device *dev = skb->dev;
- struct s2io_nic *sp = netdev_priv(dev);
-
- skb->protocol = eth_type_trans(skb, dev);
- if (vlan_tag && sp->vlan_strip_flag)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
- if (sp->config.napi)
- netif_receive_skb(skb);
- else
- netif_rx(skb);
-}
-
-static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
- struct sk_buff *skb, u32 tcp_len)
-{
- struct sk_buff *first = lro->parent;
- struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
-
- first->len += tcp_len;
- first->data_len = lro->frags_len;
- skb_pull(skb, (skb->len - tcp_len));
- if (skb_shinfo(first)->frag_list)
- lro->last_frag->next = skb;
- else
- skb_shinfo(first)->frag_list = skb;
- first->truesize += skb->truesize;
- lro->last_frag = skb;
- swstats->clubbed_frms_cnt++;
-}
-
-/**
- * s2io_io_error_detected - called when PCI error is detected
- * @pdev: Pointer to PCI device
- * @state: The current pci connection state
- *
- * This function is called after a PCI bus error affecting
- * this device has been detected.
- */
-static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct s2io_nic *sp = netdev_priv(netdev);
-
- netif_device_detach(netdev);
-
- if (state == pci_channel_io_perm_failure)
- return PCI_ERS_RESULT_DISCONNECT;
-
- if (netif_running(netdev)) {
- /* Bring down the card, while avoiding PCI I/O */
- do_s2io_card_down(sp, 0);
- }
- pci_disable_device(pdev);
-
- return PCI_ERS_RESULT_NEED_RESET;
-}
-
-/**
- * s2io_io_slot_reset - called after the pci bus has been reset.
- * @pdev: Pointer to PCI device
- *
- * Restart the card from scratch, as if from a cold-boot.
- * At this point, the card has experienced a hard reset,
- * followed by fixups by BIOS, and has its config space
- * set up identically to what it was at cold boot.
- */
-static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct s2io_nic *sp = netdev_priv(netdev);
-
- if (pci_enable_device(pdev)) {
- pr_err("Cannot re-enable PCI device after reset.\n");
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- pci_set_master(pdev);
- s2io_reset(sp);
-
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-/**
- * s2io_io_resume - called when traffic can start flowing again.
- * @pdev: Pointer to PCI device
- *
- * This callback is called when the error recovery driver tells
- * us that its OK to resume normal operation.
- */
-static void s2io_io_resume(struct pci_dev *pdev)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct s2io_nic *sp = netdev_priv(netdev);
-
- if (netif_running(netdev)) {
- if (s2io_card_up(sp)) {
- pr_err("Can't bring device back up after reset.\n");
- return;
- }
-
- if (do_s2io_prog_unicast(netdev, netdev->dev_addr) == FAILURE) {
- s2io_card_down(sp);
- pr_err("Can't restore mac addr after reset.\n");
- return;
- }
- }
-
- netif_device_attach(netdev);
- netif_tx_wake_all_queues(netdev);
-}
diff --git a/drivers/net/ethernet/neterion/s2io.h b/drivers/net/ethernet/neterion/s2io.h
deleted file mode 100644
index cb7080eb5912..000000000000
--- a/drivers/net/ethernet/neterion/s2io.h
+++ /dev/null
@@ -1,1124 +0,0 @@
-/************************************************************************
- * s2io.h: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
- * Copyright(c) 2002-2010 Exar Corp.
-
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- ************************************************************************/
-#include <linux/io-64-nonatomic-lo-hi.h>
-#ifndef _S2IO_H
-#define _S2IO_H
-
-#define TBD 0
-#define s2BIT(loc) (0x8000000000000000ULL >> (loc))
-#define vBIT(val, loc, sz) (((u64)val) << (64-loc-sz))
-#define INV(d) ((d&0xff)<<24) | (((d>>8)&0xff)<<16) | (((d>>16)&0xff)<<8)| ((d>>24)&0xff)
-
-#undef SUCCESS
-#define SUCCESS 0
-#define FAILURE -1
-#define S2IO_MINUS_ONE 0xFFFFFFFFFFFFFFFFULL
-#define S2IO_DISABLE_MAC_ENTRY 0xFFFFFFFFFFFFULL
-#define S2IO_MAX_PCI_CONFIG_SPACE_REINIT 100
-#define S2IO_BIT_RESET 1
-#define S2IO_BIT_SET 2
-#define CHECKBIT(value, nbit) (value & (1 << nbit))
-
-/* Maximum time to flicker LED when asked to identify NIC using ethtool */
-#define MAX_FLICKER_TIME 60000 /* 60 Secs */
-
-/* Maximum outstanding splits to be configured into xena. */
-enum {
- XENA_ONE_SPLIT_TRANSACTION = 0,
- XENA_TWO_SPLIT_TRANSACTION = 1,
- XENA_THREE_SPLIT_TRANSACTION = 2,
- XENA_FOUR_SPLIT_TRANSACTION = 3,
- XENA_EIGHT_SPLIT_TRANSACTION = 4,
- XENA_TWELVE_SPLIT_TRANSACTION = 5,
- XENA_SIXTEEN_SPLIT_TRANSACTION = 6,
- XENA_THIRTYTWO_SPLIT_TRANSACTION = 7
-};
-#define XENA_MAX_OUTSTANDING_SPLITS(n) (n << 4)
-
-/* OS concerned variables and constants */
-#define WATCH_DOG_TIMEOUT 15*HZ
-#define EFILL 0x1234
-#define ALIGN_SIZE 127
-#define PCIX_COMMAND_REGISTER 0x62
-
-/*
- * Debug related variables.
- */
-/* different debug levels. */
-#define ERR_DBG 0
-#define INIT_DBG 1
-#define INFO_DBG 2
-#define TX_DBG 3
-#define INTR_DBG 4
-
-/* Global variable that defines the present debug level of the driver. */
-static int debug_level = ERR_DBG;
-
-/* DEBUG message print. */
-#define DBG_PRINT(dbg_level, fmt, args...) do { \
- if (dbg_level <= debug_level) \
- pr_info(fmt, ##args); \
- } while (0)
-
-/* Protocol assist features of the NIC */
-#define L3_CKSUM_OK 0xFFFF
-#define L4_CKSUM_OK 0xFFFF
-#define S2IO_JUMBO_SIZE 9600
-
-/* Driver statistics maintained by driver */
-struct swStat {
- unsigned long long single_ecc_errs;
- unsigned long long double_ecc_errs;
- unsigned long long parity_err_cnt;
- unsigned long long serious_err_cnt;
- unsigned long long soft_reset_cnt;
- unsigned long long fifo_full_cnt;
- unsigned long long ring_full_cnt[8];
- /* LRO statistics */
- unsigned long long clubbed_frms_cnt;
- unsigned long long sending_both;
- unsigned long long outof_sequence_pkts;
- unsigned long long flush_max_pkts;
- unsigned long long sum_avg_pkts_aggregated;
- unsigned long long num_aggregations;
- /* Other statistics */
- unsigned long long mem_alloc_fail_cnt;
- unsigned long long pci_map_fail_cnt;
- unsigned long long watchdog_timer_cnt;
- unsigned long long mem_allocated;
- unsigned long long mem_freed;
- unsigned long long link_up_cnt;
- unsigned long long link_down_cnt;
- unsigned long long link_up_time;
- unsigned long long link_down_time;
-
- /* Transfer Code statistics */
- unsigned long long tx_buf_abort_cnt;
- unsigned long long tx_desc_abort_cnt;
- unsigned long long tx_parity_err_cnt;
- unsigned long long tx_link_loss_cnt;
- unsigned long long tx_list_proc_err_cnt;
-
- unsigned long long rx_parity_err_cnt;
- unsigned long long rx_abort_cnt;
- unsigned long long rx_parity_abort_cnt;
- unsigned long long rx_rda_fail_cnt;
- unsigned long long rx_unkn_prot_cnt;
- unsigned long long rx_fcs_err_cnt;
- unsigned long long rx_buf_size_err_cnt;
- unsigned long long rx_rxd_corrupt_cnt;
- unsigned long long rx_unkn_err_cnt;
-
- /* Error/alarm statistics*/
- unsigned long long tda_err_cnt;
- unsigned long long pfc_err_cnt;
- unsigned long long pcc_err_cnt;
- unsigned long long tti_err_cnt;
- unsigned long long lso_err_cnt;
- unsigned long long tpa_err_cnt;
- unsigned long long sm_err_cnt;
- unsigned long long mac_tmac_err_cnt;
- unsigned long long mac_rmac_err_cnt;
- unsigned long long xgxs_txgxs_err_cnt;
- unsigned long long xgxs_rxgxs_err_cnt;
- unsigned long long rc_err_cnt;
- unsigned long long prc_pcix_err_cnt;
- unsigned long long rpa_err_cnt;
- unsigned long long rda_err_cnt;
- unsigned long long rti_err_cnt;
- unsigned long long mc_err_cnt;
-
-};
-
-/* Xpak releated alarm and warnings */
-struct xpakStat {
- u64 alarm_transceiver_temp_high;
- u64 alarm_transceiver_temp_low;
- u64 alarm_laser_bias_current_high;
- u64 alarm_laser_bias_current_low;
- u64 alarm_laser_output_power_high;
- u64 alarm_laser_output_power_low;
- u64 warn_transceiver_temp_high;
- u64 warn_transceiver_temp_low;
- u64 warn_laser_bias_current_high;
- u64 warn_laser_bias_current_low;
- u64 warn_laser_output_power_high;
- u64 warn_laser_output_power_low;
- u64 xpak_regs_stat;
- u32 xpak_timer_count;
-};
-
-
-/* The statistics block of Xena */
-struct stat_block {
-/* Tx MAC statistics counters. */
- __le32 tmac_data_octets;
- __le32 tmac_frms;
- __le64 tmac_drop_frms;
- __le32 tmac_bcst_frms;
- __le32 tmac_mcst_frms;
- __le64 tmac_pause_ctrl_frms;
- __le32 tmac_ucst_frms;
- __le32 tmac_ttl_octets;
- __le32 tmac_any_err_frms;
- __le32 tmac_nucst_frms;
- __le64 tmac_ttl_less_fb_octets;
- __le64 tmac_vld_ip_octets;
- __le32 tmac_drop_ip;
- __le32 tmac_vld_ip;
- __le32 tmac_rst_tcp;
- __le32 tmac_icmp;
- __le64 tmac_tcp;
- __le32 reserved_0;
- __le32 tmac_udp;
-
-/* Rx MAC Statistics counters. */
- __le32 rmac_data_octets;
- __le32 rmac_vld_frms;
- __le64 rmac_fcs_err_frms;
- __le64 rmac_drop_frms;
- __le32 rmac_vld_bcst_frms;
- __le32 rmac_vld_mcst_frms;
- __le32 rmac_out_rng_len_err_frms;
- __le32 rmac_in_rng_len_err_frms;
- __le64 rmac_long_frms;
- __le64 rmac_pause_ctrl_frms;
- __le64 rmac_unsup_ctrl_frms;
- __le32 rmac_accepted_ucst_frms;
- __le32 rmac_ttl_octets;
- __le32 rmac_discarded_frms;
- __le32 rmac_accepted_nucst_frms;
- __le32 reserved_1;
- __le32 rmac_drop_events;
- __le64 rmac_ttl_less_fb_octets;
- __le64 rmac_ttl_frms;
- __le64 reserved_2;
- __le32 rmac_usized_frms;
- __le32 reserved_3;
- __le32 rmac_frag_frms;
- __le32 rmac_osized_frms;
- __le32 reserved_4;
- __le32 rmac_jabber_frms;
- __le64 rmac_ttl_64_frms;
- __le64 rmac_ttl_65_127_frms;
- __le64 reserved_5;
- __le64 rmac_ttl_128_255_frms;
- __le64 rmac_ttl_256_511_frms;
- __le64 reserved_6;
- __le64 rmac_ttl_512_1023_frms;
- __le64 rmac_ttl_1024_1518_frms;
- __le32 rmac_ip;
- __le32 reserved_7;
- __le64 rmac_ip_octets;
- __le32 rmac_drop_ip;
- __le32 rmac_hdr_err_ip;
- __le32 reserved_8;
- __le32 rmac_icmp;
- __le64 rmac_tcp;
- __le32 rmac_err_drp_udp;
- __le32 rmac_udp;
- __le64 rmac_xgmii_err_sym;
- __le64 rmac_frms_q0;
- __le64 rmac_frms_q1;
- __le64 rmac_frms_q2;
- __le64 rmac_frms_q3;
- __le64 rmac_frms_q4;
- __le64 rmac_frms_q5;
- __le64 rmac_frms_q6;
- __le64 rmac_frms_q7;
- __le16 rmac_full_q3;
- __le16 rmac_full_q2;
- __le16 rmac_full_q1;
- __le16 rmac_full_q0;
- __le16 rmac_full_q7;
- __le16 rmac_full_q6;
- __le16 rmac_full_q5;
- __le16 rmac_full_q4;
- __le32 reserved_9;
- __le32 rmac_pause_cnt;
- __le64 rmac_xgmii_data_err_cnt;
- __le64 rmac_xgmii_ctrl_err_cnt;
- __le32 rmac_err_tcp;
- __le32 rmac_accepted_ip;
-
-/* PCI/PCI-X Read transaction statistics. */
- __le32 new_rd_req_cnt;
- __le32 rd_req_cnt;
- __le32 rd_rtry_cnt;
- __le32 new_rd_req_rtry_cnt;
-
-/* PCI/PCI-X Write/Read transaction statistics. */
- __le32 wr_req_cnt;
- __le32 wr_rtry_rd_ack_cnt;
- __le32 new_wr_req_rtry_cnt;
- __le32 new_wr_req_cnt;
- __le32 wr_disc_cnt;
- __le32 wr_rtry_cnt;
-
-/* PCI/PCI-X Write / DMA Transaction statistics. */
- __le32 txp_wr_cnt;
- __le32 rd_rtry_wr_ack_cnt;
- __le32 txd_wr_cnt;
- __le32 txd_rd_cnt;
- __le32 rxd_wr_cnt;
- __le32 rxd_rd_cnt;
- __le32 rxf_wr_cnt;
- __le32 txf_rd_cnt;
-
-/* Tx MAC statistics overflow counters. */
- __le32 tmac_data_octets_oflow;
- __le32 tmac_frms_oflow;
- __le32 tmac_bcst_frms_oflow;
- __le32 tmac_mcst_frms_oflow;
- __le32 tmac_ucst_frms_oflow;
- __le32 tmac_ttl_octets_oflow;
- __le32 tmac_any_err_frms_oflow;
- __le32 tmac_nucst_frms_oflow;
- __le64 tmac_vlan_frms;
- __le32 tmac_drop_ip_oflow;
- __le32 tmac_vld_ip_oflow;
- __le32 tmac_rst_tcp_oflow;
- __le32 tmac_icmp_oflow;
- __le32 tpa_unknown_protocol;
- __le32 tmac_udp_oflow;
- __le32 reserved_10;
- __le32 tpa_parse_failure;
-
-/* Rx MAC Statistics overflow counters. */
- __le32 rmac_data_octets_oflow;
- __le32 rmac_vld_frms_oflow;
- __le32 rmac_vld_bcst_frms_oflow;
- __le32 rmac_vld_mcst_frms_oflow;
- __le32 rmac_accepted_ucst_frms_oflow;
- __le32 rmac_ttl_octets_oflow;
- __le32 rmac_discarded_frms_oflow;
- __le32 rmac_accepted_nucst_frms_oflow;
- __le32 rmac_usized_frms_oflow;
- __le32 rmac_drop_events_oflow;
- __le32 rmac_frag_frms_oflow;
- __le32 rmac_osized_frms_oflow;
- __le32 rmac_ip_oflow;
- __le32 rmac_jabber_frms_oflow;
- __le32 rmac_icmp_oflow;
- __le32 rmac_drop_ip_oflow;
- __le32 rmac_err_drp_udp_oflow;
- __le32 rmac_udp_oflow;
- __le32 reserved_11;
- __le32 rmac_pause_cnt_oflow;
- __le64 rmac_ttl_1519_4095_frms;
- __le64 rmac_ttl_4096_8191_frms;
- __le64 rmac_ttl_8192_max_frms;
- __le64 rmac_ttl_gt_max_frms;
- __le64 rmac_osized_alt_frms;
- __le64 rmac_jabber_alt_frms;
- __le64 rmac_gt_max_alt_frms;
- __le64 rmac_vlan_frms;
- __le32 rmac_len_discard;
- __le32 rmac_fcs_discard;
- __le32 rmac_pf_discard;
- __le32 rmac_da_discard;
- __le32 rmac_red_discard;
- __le32 rmac_rts_discard;
- __le32 reserved_12;
- __le32 rmac_ingm_full_discard;
- __le32 reserved_13;
- __le32 rmac_accepted_ip_oflow;
- __le32 reserved_14;
- __le32 link_fault_cnt;
- u8 buffer[20];
- struct swStat sw_stat;
- struct xpakStat xpak_stat;
-};
-
-/* Default value for 'vlan_strip_tag' configuration parameter */
-#define NO_STRIP_IN_PROMISC 2
-
-/*
- * Structures representing different init time configuration
- * parameters of the NIC.
- */
-
-#define MAX_TX_FIFOS 8
-#define MAX_RX_RINGS 8
-
-#define FIFO_DEFAULT_NUM 5
-#define FIFO_UDP_MAX_NUM 2 /* 0 - even, 1 -odd ports */
-#define FIFO_OTHER_MAX_NUM 1
-
-
-#define MAX_RX_DESC_1 (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 128)
-#define MAX_RX_DESC_2 (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 86)
-#define MAX_TX_DESC (MAX_AVAILABLE_TXDS)
-
-/* FIFO mappings for all possible number of fifos configured */
-static const int fifo_map[][MAX_TX_FIFOS] = {
- {0, 0, 0, 0, 0, 0, 0, 0},
- {0, 0, 0, 0, 1, 1, 1, 1},
- {0, 0, 0, 1, 1, 1, 2, 2},
- {0, 0, 1, 1, 2, 2, 3, 3},
- {0, 0, 1, 1, 2, 2, 3, 4},
- {0, 0, 1, 1, 2, 3, 4, 5},
- {0, 0, 1, 2, 3, 4, 5, 6},
- {0, 1, 2, 3, 4, 5, 6, 7},
-};
-
-static const u16 fifo_selector[MAX_TX_FIFOS] = {0, 1, 3, 3, 7, 7, 7, 7};
-
-/* Maintains Per FIFO related information. */
-struct tx_fifo_config {
-#define MAX_AVAILABLE_TXDS 8192
- u32 fifo_len; /* specifies len of FIFO up to 8192, ie no of TxDLs */
-/* Priority definition */
-#define TX_FIFO_PRI_0 0 /*Highest */
-#define TX_FIFO_PRI_1 1
-#define TX_FIFO_PRI_2 2
-#define TX_FIFO_PRI_3 3
-#define TX_FIFO_PRI_4 4
-#define TX_FIFO_PRI_5 5
-#define TX_FIFO_PRI_6 6
-#define TX_FIFO_PRI_7 7 /*lowest */
- u8 fifo_priority; /* specifies pointer level for FIFO */
- /* user should not set twos fifos with same pri */
- u8 f_no_snoop;
-#define NO_SNOOP_TXD 0x01
-#define NO_SNOOP_TXD_BUFFER 0x02
-};
-
-
-/* Maintains per Ring related information */
-struct rx_ring_config {
- u32 num_rxd; /*No of RxDs per Rx Ring */
-#define RX_RING_PRI_0 0 /* highest */
-#define RX_RING_PRI_1 1
-#define RX_RING_PRI_2 2
-#define RX_RING_PRI_3 3
-#define RX_RING_PRI_4 4
-#define RX_RING_PRI_5 5
-#define RX_RING_PRI_6 6
-#define RX_RING_PRI_7 7 /* lowest */
-
- u8 ring_priority; /*Specifies service priority of ring */
- /* OSM should not set any two rings with same priority */
- u8 ring_org; /*Organization of ring */
-#define RING_ORG_BUFF1 0x01
-#define RX_RING_ORG_BUFF3 0x03
-#define RX_RING_ORG_BUFF5 0x05
-
- u8 f_no_snoop;
-#define NO_SNOOP_RXD 0x01
-#define NO_SNOOP_RXD_BUFFER 0x02
-};
-
-/* This structure provides contains values of the tunable parameters
- * of the H/W
- */
-struct config_param {
-/* Tx Side */
- u32 tx_fifo_num; /*Number of Tx FIFOs */
-
- /* 0-No steering, 1-Priority steering, 2-Default fifo map */
-#define NO_STEERING 0
-#define TX_PRIORITY_STEERING 0x1
-#define TX_DEFAULT_STEERING 0x2
- u8 tx_steering_type;
-
- u8 fifo_mapping[MAX_TX_FIFOS];
- struct tx_fifo_config tx_cfg[MAX_TX_FIFOS]; /*Per-Tx FIFO config */
- u32 max_txds; /*Max no. of Tx buffer descriptor per TxDL */
- u64 tx_intr_type;
-#define INTA 0
-#define MSI_X 2
- u8 intr_type;
- u8 napi;
-
- /* Specifies if Tx Intr is UTILZ or PER_LIST type. */
-
-/* Rx Side */
- u32 rx_ring_num; /*Number of receive rings */
-#define MAX_RX_BLOCKS_PER_RING 150
-
- struct rx_ring_config rx_cfg[MAX_RX_RINGS]; /*Per-Rx Ring config */
-
-#define HEADER_ETHERNET_II_802_3_SIZE 14
-#define HEADER_802_2_SIZE 3
-#define HEADER_SNAP_SIZE 5
-#define HEADER_VLAN_SIZE 4
-
-#define MIN_MTU 46
-#define MAX_PYLD 1500
-#define MAX_MTU (MAX_PYLD+18)
-#define MAX_MTU_VLAN (MAX_PYLD+22)
-#define MAX_PYLD_JUMBO 9600
-#define MAX_MTU_JUMBO (MAX_PYLD_JUMBO+18)
-#define MAX_MTU_JUMBO_VLAN (MAX_PYLD_JUMBO+22)
- u16 bus_speed;
- int max_mc_addr; /* xena=64 herc=256 */
- int max_mac_addr; /* xena=16 herc=64 */
- int mc_start_offset; /* xena=16 herc=64 */
- u8 multiq;
-};
-
-/* Structure representing MAC Addrs */
-struct mac_addr {
- u8 mac_addr[ETH_ALEN];
-};
-
-/* Structure that represent every FIFO element in the BAR1
- * Address location.
- */
-struct TxFIFO_element {
- u64 TxDL_Pointer;
-
- u64 List_Control;
-#define TX_FIFO_LAST_TXD_NUM( val) vBIT(val,0,8)
-#define TX_FIFO_FIRST_LIST s2BIT(14)
-#define TX_FIFO_LAST_LIST s2BIT(15)
-#define TX_FIFO_FIRSTNLAST_LIST vBIT(3,14,2)
-#define TX_FIFO_SPECIAL_FUNC s2BIT(23)
-#define TX_FIFO_DS_NO_SNOOP s2BIT(31)
-#define TX_FIFO_BUFF_NO_SNOOP s2BIT(30)
-};
-
-/* Tx descriptor structure */
-struct TxD {
- u64 Control_1;
-/* bit mask */
-#define TXD_LIST_OWN_XENA s2BIT(7)
-#define TXD_T_CODE (s2BIT(12)|s2BIT(13)|s2BIT(14)|s2BIT(15))
-#define TXD_T_CODE_OK(val) (|(val & TXD_T_CODE))
-#define GET_TXD_T_CODE(val) ((val & TXD_T_CODE)<<12)
-#define TXD_GATHER_CODE (s2BIT(22) | s2BIT(23))
-#define TXD_GATHER_CODE_FIRST s2BIT(22)
-#define TXD_GATHER_CODE_LAST s2BIT(23)
-#define TXD_TCP_LSO_EN s2BIT(30)
-#define TXD_UDP_COF_EN s2BIT(31)
-#define TXD_UFO_EN s2BIT(31) | s2BIT(30)
-#define TXD_TCP_LSO_MSS(val) vBIT(val,34,14)
-#define TXD_UFO_MSS(val) vBIT(val,34,14)
-#define TXD_BUFFER0_SIZE(val) vBIT(val,48,16)
-
- u64 Control_2;
-#define TXD_TX_CKO_CONTROL (s2BIT(5)|s2BIT(6)|s2BIT(7))
-#define TXD_TX_CKO_IPV4_EN s2BIT(5)
-#define TXD_TX_CKO_TCP_EN s2BIT(6)
-#define TXD_TX_CKO_UDP_EN s2BIT(7)
-#define TXD_VLAN_ENABLE s2BIT(15)
-#define TXD_VLAN_TAG(val) vBIT(val,16,16)
-#define TXD_INT_NUMBER(val) vBIT(val,34,6)
-#define TXD_INT_TYPE_PER_LIST s2BIT(47)
-#define TXD_INT_TYPE_UTILZ s2BIT(46)
-#define TXD_SET_MARKER vBIT(0x6,0,4)
-
- u64 Buffer_Pointer;
- u64 Host_Control; /* reserved for host */
-};
-
-/* Structure to hold the phy and virt addr of every TxDL. */
-struct list_info_hold {
- dma_addr_t list_phy_addr;
- void *list_virt_addr;
-};
-
-/* Rx descriptor structure for 1 buffer mode */
-struct RxD_t {
- u64 Host_Control; /* reserved for host */
- u64 Control_1;
-#define RXD_OWN_XENA s2BIT(7)
-#define RXD_T_CODE (s2BIT(12)|s2BIT(13)|s2BIT(14)|s2BIT(15))
-#define RXD_FRAME_PROTO vBIT(0xFFFF,24,8)
-#define RXD_FRAME_VLAN_TAG s2BIT(24)
-#define RXD_FRAME_PROTO_IPV4 s2BIT(27)
-#define RXD_FRAME_PROTO_IPV6 s2BIT(28)
-#define RXD_FRAME_IP_FRAG s2BIT(29)
-#define RXD_FRAME_PROTO_TCP s2BIT(30)
-#define RXD_FRAME_PROTO_UDP s2BIT(31)
-#define TCP_OR_UDP_FRAME (RXD_FRAME_PROTO_TCP | RXD_FRAME_PROTO_UDP)
-#define RXD_GET_L3_CKSUM(val) ((u16)(val>> 16) & 0xFFFF)
-#define RXD_GET_L4_CKSUM(val) ((u16)(val) & 0xFFFF)
-
- u64 Control_2;
-#define THE_RXD_MARK 0x3
-#define SET_RXD_MARKER vBIT(THE_RXD_MARK, 0, 2)
-#define GET_RXD_MARKER(ctrl) ((ctrl & SET_RXD_MARKER) >> 62)
-
-#define MASK_VLAN_TAG vBIT(0xFFFF,48,16)
-#define SET_VLAN_TAG(val) vBIT(val,48,16)
-#define SET_NUM_TAG(val) vBIT(val,16,32)
-
-
-};
-/* Rx descriptor structure for 1 buffer mode */
-struct RxD1 {
- struct RxD_t h;
-
-#define MASK_BUFFER0_SIZE_1 vBIT(0x3FFF,2,14)
-#define SET_BUFFER0_SIZE_1(val) vBIT(val,2,14)
-#define RXD_GET_BUFFER0_SIZE_1(_Control_2) \
- (u16)((_Control_2 & MASK_BUFFER0_SIZE_1) >> 48)
- u64 Buffer0_ptr;
-};
-/* Rx descriptor structure for 3 or 2 buffer mode */
-
-struct RxD3 {
- struct RxD_t h;
-
-#define MASK_BUFFER0_SIZE_3 vBIT(0xFF,2,14)
-#define MASK_BUFFER1_SIZE_3 vBIT(0xFFFF,16,16)
-#define MASK_BUFFER2_SIZE_3 vBIT(0xFFFF,32,16)
-#define SET_BUFFER0_SIZE_3(val) vBIT(val,8,8)
-#define SET_BUFFER1_SIZE_3(val) vBIT(val,16,16)
-#define SET_BUFFER2_SIZE_3(val) vBIT(val,32,16)
-#define RXD_GET_BUFFER0_SIZE_3(Control_2) \
- (u8)((Control_2 & MASK_BUFFER0_SIZE_3) >> 48)
-#define RXD_GET_BUFFER1_SIZE_3(Control_2) \
- (u16)((Control_2 & MASK_BUFFER1_SIZE_3) >> 32)
-#define RXD_GET_BUFFER2_SIZE_3(Control_2) \
- (u16)((Control_2 & MASK_BUFFER2_SIZE_3) >> 16)
-#define BUF0_LEN 40
-#define BUF1_LEN 1
-
- u64 Buffer0_ptr;
- u64 Buffer1_ptr;
- u64 Buffer2_ptr;
-};
-
-
-/* Structure that represents the Rx descriptor block which contains
- * 128 Rx descriptors.
- */
-struct RxD_block {
-#define MAX_RXDS_PER_BLOCK_1 127
- struct RxD1 rxd[MAX_RXDS_PER_BLOCK_1];
-
- u64 reserved_0;
-#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL
- u64 reserved_1; /* 0xFEFFFFFFFFFFFFFF to mark last
- * Rxd in this blk */
- u64 reserved_2_pNext_RxD_block; /* Logical ptr to next */
- u64 pNext_RxD_Blk_physical; /* Buff0_ptr.In a 32 bit arch
- * the upper 32 bits should
- * be 0 */
-};
-
-#define SIZE_OF_BLOCK 4096
-
-#define RXD_MODE_1 0 /* One Buffer mode */
-#define RXD_MODE_3B 1 /* Two Buffer mode */
-
-/* Structure to hold virtual addresses of Buf0 and Buf1 in
- * 2buf mode. */
-struct buffAdd {
- void *ba_0_org;
- void *ba_1_org;
- void *ba_0;
- void *ba_1;
-};
-
-/* Structure which stores all the MAC control parameters */
-
-/* This structure stores the offset of the RxD in the ring
- * from which the Rx Interrupt processor can start picking
- * up the RxDs for processing.
- */
-struct rx_curr_get_info {
- u32 block_index;
- u32 offset;
- u32 ring_len;
-};
-
-struct rx_curr_put_info {
- u32 block_index;
- u32 offset;
- u32 ring_len;
-};
-
-/* This structure stores the offset of the TxDl in the FIFO
- * from which the Tx Interrupt processor can start picking
- * up the TxDLs for send complete interrupt processing.
- */
-struct tx_curr_get_info {
- u32 offset;
- u32 fifo_len;
-};
-
-struct tx_curr_put_info {
- u32 offset;
- u32 fifo_len;
-};
-
-struct rxd_info {
- void *virt_addr;
- dma_addr_t dma_addr;
-};
-
-/* Structure that holds the Phy and virt addresses of the Blocks */
-struct rx_block_info {
- void *block_virt_addr;
- dma_addr_t block_dma_addr;
- struct rxd_info *rxds;
-};
-
-/* Data structure to represent a LRO session */
-struct lro {
- struct sk_buff *parent;
- struct sk_buff *last_frag;
- u8 *l2h;
- struct iphdr *iph;
- struct tcphdr *tcph;
- u32 tcp_next_seq;
- __be32 tcp_ack;
- int total_len;
- int frags_len;
- int sg_num;
- int in_use;
- __be16 window;
- u16 vlan_tag;
- u32 cur_tsval;
- __be32 cur_tsecr;
- u8 saw_ts;
-} ____cacheline_aligned;
-
-/* Ring specific structure */
-struct ring_info {
- /* The ring number */
- int ring_no;
-
- /* per-ring buffer counter */
- u32 rx_bufs_left;
-
-#define MAX_LRO_SESSIONS 32
- struct lro lro0_n[MAX_LRO_SESSIONS];
- u8 lro;
-
- /* copy of sp->rxd_mode flag */
- int rxd_mode;
-
- /* Number of rxds per block for the rxd_mode */
- int rxd_count;
-
- /* copy of sp pointer */
- struct s2io_nic *nic;
-
- /* copy of sp->dev pointer */
- struct net_device *dev;
-
- /* copy of sp->pdev pointer */
- struct pci_dev *pdev;
-
- /* Per ring napi struct */
- struct napi_struct napi;
-
- unsigned long interrupt_count;
-
- /*
- * Place holders for the virtual and physical addresses of
- * all the Rx Blocks
- */
- struct rx_block_info rx_blocks[MAX_RX_BLOCKS_PER_RING];
- int block_count;
- int pkt_cnt;
-
- /*
- * Put pointer info which indictes which RxD has to be replenished
- * with a new buffer.
- */
- struct rx_curr_put_info rx_curr_put_info;
-
- /*
- * Get pointer info which indictes which is the last RxD that was
- * processed by the driver.
- */
- struct rx_curr_get_info rx_curr_get_info;
-
- /* interface MTU value */
- unsigned mtu;
-
- /* Buffer Address store. */
- struct buffAdd **ba;
-} ____cacheline_aligned;
-
-/* Fifo specific structure */
-struct fifo_info {
- /* FIFO number */
- int fifo_no;
-
- /* Maximum TxDs per TxDL */
- int max_txds;
-
- /* Place holder of all the TX List's Phy and Virt addresses. */
- struct list_info_hold *list_info;
-
- /*
- * Current offset within the tx FIFO where driver would write
- * new Tx frame
- */
- struct tx_curr_put_info tx_curr_put_info;
-
- /*
- * Current offset within tx FIFO from where the driver would start freeing
- * the buffers
- */
- struct tx_curr_get_info tx_curr_get_info;
-#define FIFO_QUEUE_START 0
-#define FIFO_QUEUE_STOP 1
- int queue_state;
-
- /* copy of sp->dev pointer */
- struct net_device *dev;
-
- /* copy of multiq status */
- u8 multiq;
-
- /* Per fifo lock */
- spinlock_t tx_lock;
-
- /* Per fifo UFO in band structure */
- u64 *ufo_in_band_v;
-
- struct s2io_nic *nic;
-} ____cacheline_aligned;
-
-/* Information related to the Tx and Rx FIFOs and Rings of Xena
- * is maintained in this structure.
- */
-struct mac_info {
-/* tx side stuff */
- /* logical pointer of start of each Tx FIFO */
- struct TxFIFO_element __iomem *tx_FIFO_start[MAX_TX_FIFOS];
-
- /* Fifo specific structure */
- struct fifo_info fifos[MAX_TX_FIFOS];
-
- /* Save virtual address of TxD page with zero DMA addr(if any) */
- void *zerodma_virt_addr;
-
-/* rx side stuff */
- /* Ring specific structure */
- struct ring_info rings[MAX_RX_RINGS];
-
- u16 rmac_pause_time;
- u16 mc_pause_threshold_q0q3;
- u16 mc_pause_threshold_q4q7;
-
- void *stats_mem; /* orignal pointer to allocated mem */
- dma_addr_t stats_mem_phy; /* Physical address of the stat block */
- u32 stats_mem_sz;
- struct stat_block *stats_info; /* Logical address of the stat block */
-};
-
-/* Default Tunable parameters of the NIC. */
-#define DEFAULT_FIFO_0_LEN 4096
-#define DEFAULT_FIFO_1_7_LEN 512
-#define SMALL_BLK_CNT 30
-#define LARGE_BLK_CNT 100
-
-/*
- * Structure to keep track of the MSI-X vectors and the corresponding
- * argument registered against each vector
- */
-#define MAX_REQUESTED_MSI_X 9
-struct s2io_msix_entry
-{
- u16 vector;
- u16 entry;
- void *arg;
-
- u8 type;
-#define MSIX_ALARM_TYPE 1
-#define MSIX_RING_TYPE 2
-
- u8 in_use;
-#define MSIX_REGISTERED_SUCCESS 0xAA
-};
-
-struct msix_info_st {
- u64 addr;
- u64 data;
-};
-
-/* These flags represent the devices temporary state */
-enum s2io_device_state_t
-{
- __S2IO_STATE_LINK_TASK=0,
- __S2IO_STATE_CARD_UP
-};
-
-/* Structure representing one instance of the NIC */
-struct s2io_nic {
- int rxd_mode;
- /*
- * Count of packets to be processed in a given iteration, it will be indicated
- * by the quota field of the device structure when NAPI is enabled.
- */
- int pkts_to_process;
- struct net_device *dev;
- struct mac_info mac_control;
- struct config_param config;
- struct pci_dev *pdev;
- void __iomem *bar0;
- void __iomem *bar1;
-#define MAX_MAC_SUPPORTED 16
-#define MAX_SUPPORTED_MULTICASTS MAX_MAC_SUPPORTED
-
- struct mac_addr def_mac_addr[256];
-
- struct net_device_stats stats;
- int device_enabled_once;
-
- char name[60];
-
- /* Timer that handles I/O errors/exceptions */
- struct timer_list alarm_timer;
-
- /* Space to back up the PCI config space */
- u32 config_space[256 / sizeof(u32)];
-
-#define PROMISC 1
-#define ALL_MULTI 2
-
-#define MAX_ADDRS_SUPPORTED 64
- u16 mc_addr_count;
-
- u16 m_cast_flg;
- u16 all_multi_pos;
- u16 promisc_flg;
-
- /* Restart timer, used to restart NIC if the device is stuck and
- * a schedule task that will set the correct Link state once the
- * NIC's PHY has stabilized after a state change.
- */
- struct work_struct rst_timer_task;
- struct work_struct set_link_task;
-
- /* Flag that can be used to turn on or turn off the Rx checksum
- * offload feature.
- */
- int rx_csum;
-
- /* Below variables are used for fifo selection to transmit a packet */
- u16 fifo_selector[MAX_TX_FIFOS];
-
- /* Total fifos for tcp packets */
- u8 total_tcp_fifos;
-
- /*
- * Beginning index of udp for udp packets
- * Value will be equal to
- * (tx_fifo_num - FIFO_UDP_MAX_NUM - FIFO_OTHER_MAX_NUM)
- */
- u8 udp_fifo_idx;
-
- u8 total_udp_fifos;
-
- /*
- * Beginning index of fifo for all other packets
- * Value will be equal to (tx_fifo_num - FIFO_OTHER_MAX_NUM)
- */
- u8 other_fifo_idx;
-
- struct napi_struct napi;
- /* after blink, the adapter must be restored with original
- * values.
- */
- u64 adapt_ctrl_org;
-
- /* Last known link state. */
- u16 last_link_state;
-#define LINK_DOWN 1
-#define LINK_UP 2
-
- int task_flag;
- unsigned long long start_time;
- int vlan_strip_flag;
-#define MSIX_FLG 0xA5
- int num_entries;
- struct msix_entry *entries;
- int msi_detected;
- wait_queue_head_t msi_wait;
- struct s2io_msix_entry *s2io_entries;
- char desc[MAX_REQUESTED_MSI_X][25];
-
- int avail_msix_vectors; /* No. of MSI-X vectors granted by system */
-
- struct msix_info_st msix_info[0x3f];
-
-#define XFRAME_I_DEVICE 1
-#define XFRAME_II_DEVICE 2
- u8 device_type;
-
- unsigned long clubbed_frms_cnt;
- unsigned long sending_both;
- u16 lro_max_aggr_per_sess;
- volatile unsigned long state;
- u64 general_int_mask;
-
-#define VPD_STRING_LEN 80
- u8 product_name[VPD_STRING_LEN];
- u8 serial_num[VPD_STRING_LEN];
-};
-
-#define RESET_ERROR 1
-#define CMD_ERROR 2
-
-/*
- * Some registers have to be written in a particular order to
- * expect correct hardware operation. The macro SPECIAL_REG_WRITE
- * is used to perform such ordered writes. Defines UF (Upper First)
- * and LF (Lower First) will be used to specify the required write order.
- */
-#define UF 1
-#define LF 2
-static inline void SPECIAL_REG_WRITE(u64 val, void __iomem *addr, int order)
-{
- if (order == LF) {
- writel((u32) (val), addr);
- (void) readl(addr);
- writel((u32) (val >> 32), (addr + 4));
- (void) readl(addr + 4);
- } else {
- writel((u32) (val >> 32), (addr + 4));
- (void) readl(addr + 4);
- writel((u32) (val), addr);
- (void) readl(addr);
- }
-}
-
-/* Interrupt related values of Xena */
-
-#define ENABLE_INTRS 1
-#define DISABLE_INTRS 2
-
-/* Highest level interrupt blocks */
-#define TX_PIC_INTR (0x0001<<0)
-#define TX_DMA_INTR (0x0001<<1)
-#define TX_MAC_INTR (0x0001<<2)
-#define TX_XGXS_INTR (0x0001<<3)
-#define TX_TRAFFIC_INTR (0x0001<<4)
-#define RX_PIC_INTR (0x0001<<5)
-#define RX_DMA_INTR (0x0001<<6)
-#define RX_MAC_INTR (0x0001<<7)
-#define RX_XGXS_INTR (0x0001<<8)
-#define RX_TRAFFIC_INTR (0x0001<<9)
-#define MC_INTR (0x0001<<10)
-#define ENA_ALL_INTRS ( TX_PIC_INTR | \
- TX_DMA_INTR | \
- TX_MAC_INTR | \
- TX_XGXS_INTR | \
- TX_TRAFFIC_INTR | \
- RX_PIC_INTR | \
- RX_DMA_INTR | \
- RX_MAC_INTR | \
- RX_XGXS_INTR | \
- RX_TRAFFIC_INTR | \
- MC_INTR )
-
-/* Interrupt masks for the general interrupt mask register */
-#define DISABLE_ALL_INTRS 0xFFFFFFFFFFFFFFFFULL
-
-#define TXPIC_INT_M s2BIT(0)
-#define TXDMA_INT_M s2BIT(1)
-#define TXMAC_INT_M s2BIT(2)
-#define TXXGXS_INT_M s2BIT(3)
-#define TXTRAFFIC_INT_M s2BIT(8)
-#define PIC_RX_INT_M s2BIT(32)
-#define RXDMA_INT_M s2BIT(33)
-#define RXMAC_INT_M s2BIT(34)
-#define MC_INT_M s2BIT(35)
-#define RXXGXS_INT_M s2BIT(36)
-#define RXTRAFFIC_INT_M s2BIT(40)
-
-/* PIC level Interrupts TODO*/
-
-/* DMA level Inressupts */
-#define TXDMA_PFC_INT_M s2BIT(0)
-#define TXDMA_PCC_INT_M s2BIT(2)
-
-/* PFC block interrupts */
-#define PFC_MISC_ERR_1 s2BIT(0) /* Interrupt to indicate FIFO full */
-
-/* PCC block interrupts. */
-#define PCC_FB_ECC_ERR vBIT(0xff, 16, 8) /* Interrupt to indicate
- PCC_FB_ECC Error. */
-
-#define RXD_GET_VLAN_TAG(Control_2) (u16)(Control_2 & MASK_VLAN_TAG)
-/*
- * Prototype declaration.
- */
-static int s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre);
-static void s2io_rem_nic(struct pci_dev *pdev);
-static int init_shared_mem(struct s2io_nic *sp);
-static void free_shared_mem(struct s2io_nic *sp);
-static int init_nic(struct s2io_nic *nic);
-static int rx_intr_handler(struct ring_info *ring_data, int budget);
-static void s2io_txpic_intr_handle(struct s2io_nic *sp);
-static void tx_intr_handler(struct fifo_info *fifo_data);
-static void s2io_handle_errors(void * dev_id);
-
-static void s2io_tx_watchdog(struct net_device *dev, unsigned int txqueue);
-static void s2io_set_multicast(struct net_device *dev, bool may_sleep);
-static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp);
-static void s2io_link(struct s2io_nic * sp, int link);
-static void s2io_reset(struct s2io_nic * sp);
-static int s2io_poll_msix(struct napi_struct *napi, int budget);
-static int s2io_poll_inta(struct napi_struct *napi, int budget);
-static void s2io_init_pci(struct s2io_nic * sp);
-static int do_s2io_prog_unicast(struct net_device *dev, const u8 *addr);
-static void s2io_alarm_handle(struct timer_list *t);
-static irqreturn_t
-s2io_msix_ring_handle(int irq, void *dev_id);
-static irqreturn_t
-s2io_msix_fifo_handle(int irq, void *dev_id);
-static irqreturn_t s2io_isr(int irq, void *dev_id);
-static int verify_xena_quiescence(struct s2io_nic *sp);
-static const struct ethtool_ops netdev_ethtool_ops;
-static void s2io_set_link(struct work_struct *work);
-static int s2io_set_swapper(struct s2io_nic * sp);
-static void s2io_card_down(struct s2io_nic *nic);
-static int s2io_card_up(struct s2io_nic *nic);
-static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
- int bit_state, bool may_sleep);
-static int s2io_add_isr(struct s2io_nic * sp);
-static void s2io_rem_isr(struct s2io_nic * sp);
-
-static void restore_xmsi_data(struct s2io_nic *nic);
-static void do_s2io_store_unicast_mc(struct s2io_nic *sp);
-static void do_s2io_restore_unicast_mc(struct s2io_nic *sp);
-static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset);
-static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr);
-static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int offset);
-static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr);
-
-static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
- u8 **tcp, u32 *tcp_len, struct lro **lro, struct RxD_t *rxdp,
- struct s2io_nic *sp);
-static void clear_lro_session(struct lro *lro);
-static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag);
-static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro);
-static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
- struct sk_buff *skb, u32 tcp_len);
-static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring);
-
-static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state);
-static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev);
-static void s2io_io_resume(struct pci_dev *pdev);
-
-#define s2io_tcp_mss(skb) skb_shinfo(skb)->gso_size
-#define s2io_udp_mss(skb) skb_shinfo(skb)->gso_size
-#define s2io_offload_type(skb) skb_shinfo(skb)->gso_type
-
-#define S2IO_PARM_INT(X, def_val) \
- static unsigned int X = def_val;\
- module_param(X , uint, 0);
-
-#endif /* _S2IO_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 16c828dd5c1a..e88b1c4732a5 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -1435,15 +1435,19 @@ static int nfp_net_get_fs_loc(struct nfp_net *nn, u32 *rule_locs)
return 0;
}
+static u32 nfp_net_get_rx_ring_count(struct net_device *netdev)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ return nn->dp.num_rx_rings;
+}
+
static int nfp_net_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *cmd, u32 *rule_locs)
{
struct nfp_net *nn = netdev_priv(netdev);
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = nn->dp.num_rx_rings;
- return 0;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = nn->fs.count;
return 0;
@@ -2501,6 +2505,7 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
.get_sset_count = nfp_net_get_sset_count,
.get_rxnfc = nfp_net_get_rxnfc,
.set_rxnfc = nfp_net_set_rxnfc,
+ .get_rx_ring_count = nfp_net_get_rx_ring_count,
.get_rxfh_indir_size = nfp_net_get_rxfh_indir_size,
.get_rxfh_key_size = nfp_net_get_rxfh_key_size,
.get_rxfh = nfp_net_get_rxfh,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 2d9efadb5d2a..1514c1019f28 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -263,9 +263,10 @@ static int ionic_get_link_ksettings(struct net_device *netdev,
/* This means there's no module plugged in */
break;
default:
- dev_info(lif->ionic->dev, "unknown xcvr type pid=%d / 0x%x\n",
- idev->port_info->status.xcvr.pid,
- idev->port_info->status.xcvr.pid);
+ dev_dbg_ratelimited(lif->ionic->dev,
+ "unknown xcvr type pid=%d / 0x%x\n",
+ idev->port_info->status.xcvr.pid,
+ idev->port_info->status.xcvr.pid);
break;
}
@@ -843,23 +844,11 @@ static int ionic_set_channels(struct net_device *netdev,
return err;
}
-static int ionic_get_rxnfc(struct net_device *netdev,
- struct ethtool_rxnfc *info, u32 *rules)
+static u32 ionic_get_rx_ring_count(struct net_device *netdev)
{
struct ionic_lif *lif = netdev_priv(netdev);
- int err = 0;
-
- switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = lif->nxqs;
- break;
- default:
- netdev_dbg(netdev, "Command parameter %d is not supported\n",
- info->cmd);
- err = -EOPNOTSUPP;
- }
- return err;
+ return lif->nxqs;
}
static u32 ionic_get_rxfh_indir_size(struct net_device *netdev)
@@ -1152,7 +1141,7 @@ static const struct ethtool_ops ionic_ethtool_ops = {
.get_strings = ionic_get_strings,
.get_ethtool_stats = ionic_get_stats,
.get_sset_count = ionic_get_sset_count,
- .get_rxnfc = ionic_get_rxnfc,
+ .get_rx_ring_count = ionic_get_rx_ring_count,
.get_rxfh_indir_size = ionic_get_rxfh_indir_size,
.get_rxfh_key_size = ionic_get_rxfh_key_size,
.get_rxfh = ionic_get_rxfh,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 23982704273c..647f30a16a94 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -1199,6 +1199,13 @@ static int qede_get_rxfh_fields(struct net_device *dev,
return 0;
}
+static u32 qede_get_rx_ring_count(struct net_device *dev)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ return QEDE_RSS_COUNT(edev);
+}
+
static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
u32 *rule_locs)
{
@@ -1206,9 +1213,6 @@ static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
int rc = 0;
switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = QEDE_RSS_COUNT(edev);
- break;
case ETHTOOL_GRXCLSRLCNT:
info->rule_cnt = qede_get_arfs_filter_count(edev);
info->data = QEDE_RFS_MAX_FLTR;
@@ -2289,6 +2293,7 @@ static const struct ethtool_ops qede_ethtool_ops = {
.get_sset_count = qede_get_sset_count,
.get_rxnfc = qede_get_rxnfc,
.set_rxnfc = qede_set_rxnfc,
+ .get_rx_ring_count = qede_get_rx_ring_count,
.get_rxfh_indir_size = qede_get_rxfh_indir_size,
.get_rxfh_key_size = qede_get_rxfh_key_size,
.get_rxfh = qede_get_rxfh,
@@ -2333,6 +2338,7 @@ static const struct ethtool_ops qede_vf_ethtool_ops = {
.get_sset_count = qede_get_sset_count,
.get_rxnfc = qede_get_rxnfc,
.set_rxnfc = qede_set_rxnfc,
+ .get_rx_ring_count = qede_get_rx_ring_count,
.get_rxfh_indir_size = qede_get_rxfh_indir_size,
.get_rxfh_key_size = qede_get_rxfh_key_size,
.get_rxfh = qede_get_rxfh,
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index a73dcaffa8c5..a8532ebd42ec 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -92,8 +92,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DRV_NAME "8139too"
-#define DRV_VERSION "0.9.28"
-
#include <linux/module.h>
#include <linux/kernel.h>
@@ -115,8 +113,6 @@
#include <linux/if_vlan.h>
#include <asm/irq.h>
-#define RTL8139_DRIVER_NAME DRV_NAME " Fast Ethernet driver " DRV_VERSION
-
/* Default Message level */
#define RTL8139_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
NETIF_MSG_PROBE | \
@@ -623,7 +619,6 @@ struct rtl8139_private {
MODULE_AUTHOR ("Jeff Garzik <jgarzik@pobox.com>");
MODULE_DESCRIPTION ("RealTek RTL-8139 Fast Ethernet driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
module_param(use_io, bool, 0);
MODULE_PARM_DESC(use_io, "Force use of I/O access mode. 0=MMIO 1=PIO");
@@ -955,17 +950,6 @@ static int rtl8139_init_one(struct pci_dev *pdev,
board_idx++;
- /* when we're built into the kernel, the driver version message
- * is only printed if at least one 8139 board has been found
- */
-#ifndef MODULE
- {
- static int printed_version;
- if (!printed_version++)
- pr_info(RTL8139_DRIVER_NAME "\n");
- }
-#endif
-
if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision >= 0x20) {
dev_info(&pdev->dev,
@@ -2383,7 +2367,6 @@ static void rtl8139_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *
{
struct rtl8139_private *tp = netdev_priv(dev);
strscpy(info->driver, DRV_NAME, sizeof(info->driver));
- strscpy(info->version, DRV_VERSION, sizeof(info->version));
strscpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
}
@@ -2656,25 +2639,4 @@ static struct pci_driver rtl8139_pci_driver = {
.driver.pm = &rtl8139_pm_ops,
};
-
-static int __init rtl8139_init_module (void)
-{
- /* when we're a module, we always print a version message,
- * even if no 8139 board is found.
- */
-#ifdef MODULE
- pr_info(RTL8139_DRIVER_NAME "\n");
-#endif
-
- return pci_register_driver(&rtl8139_pci_driver);
-}
-
-
-static void __exit rtl8139_cleanup_module (void)
-{
- pci_unregister_driver (&rtl8139_pci_driver);
-}
-
-
-module_init(rtl8139_init_module);
-module_exit(rtl8139_cleanup_module);
+module_pci_driver(rtl8139_pci_driver);
diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig
index 272c83bfdc6c..9b0f4f9631db 100644
--- a/drivers/net/ethernet/realtek/Kconfig
+++ b/drivers/net/ethernet/realtek/Kconfig
@@ -6,7 +6,7 @@
config NET_VENDOR_REALTEK
bool "Realtek devices"
default y
- depends on PCI || (PARPORT && X86)
+ depends on PCI
help
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -17,20 +17,6 @@ config NET_VENDOR_REALTEK
if NET_VENDOR_REALTEK
-config ATP
- tristate "AT-LAN-TEC/RealTek pocket adapter support"
- depends on PARPORT && X86
- select CRC32
- help
- This is a network (Ethernet) device which attaches to your parallel
- port. Read the file <file:drivers/net/ethernet/realtek/atp.c>
- if you want to use this. If you intend to use this driver, you
- should have said N to the "Parallel printer support", because the two
- drivers don't like each other.
-
- To compile this driver as a module, choose M here: the module
- will be called atp.
-
config 8139CP
tristate "RealTek RTL-8139 C+ PCI Fast Ethernet Adapter support"
depends on PCI
diff --git a/drivers/net/ethernet/realtek/Makefile b/drivers/net/ethernet/realtek/Makefile
index 046adf503ff4..12a9c399f40c 100644
--- a/drivers/net/ethernet/realtek/Makefile
+++ b/drivers/net/ethernet/realtek/Makefile
@@ -5,7 +5,6 @@
obj-$(CONFIG_8139CP) += 8139cp.o
obj-$(CONFIG_8139TOO) += 8139too.o
-obj-$(CONFIG_ATP) += atp.o
r8169-y += r8169_main.o r8169_firmware.o r8169_phy_config.o
r8169-$(CONFIG_R8169_LEDS) += r8169_leds.o
obj-$(CONFIG_R8169) += r8169.o
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
deleted file mode 100644
index 0d65434982a2..000000000000
--- a/drivers/net/ethernet/realtek/atp.c
+++ /dev/null
@@ -1,886 +0,0 @@
-/* atp.c: Attached (pocket) ethernet adapter driver for linux. */
-/*
- This is a driver for commonly OEM pocket (parallel port)
- ethernet adapters based on the Realtek RTL8002 and RTL8012 chips.
-
- Written 1993-2000 by Donald Becker.
-
- This software may be used and distributed according to the terms of
- the GNU General Public License (GPL), incorporated herein by reference.
- Drivers based on or derived from this code fall under the GPL and must
- retain the authorship, copyright and license notice. This file is not
- a complete program and may only be used when the entire operating
- system is licensed under the GPL.
-
- Copyright 1993 United States Government as represented by the Director,
- National Security Agency. Copyright 1994-2000 retained by the original
- author, Donald Becker. The timer-based reset code was supplied in 1995
- by Bill Carlson, wwc@super.org.
-
- The author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation
- 410 Severn Ave., Suite 210
- Annapolis MD 21403
-
- Support information and updates available at
- http://www.scyld.com/network/atp.html
-
-
- Modular support/softnet added by Alan Cox.
- _bit abuse fixed up by Alan Cox
-
-*/
-
-static const char version[] =
-"atp.c:v1.09=ac 2002/10/01 Donald Becker <becker@scyld.com>\n";
-
-/* The user-configurable values.
- These may be modified when a driver module is loaded.*/
-
-static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
-#define net_debug debug
-
-/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
-static int max_interrupt_work = 15;
-
-#define NUM_UNITS 2
-/* The standard set of ISA module parameters. */
-static int io[NUM_UNITS];
-static int irq[NUM_UNITS];
-static int xcvr[NUM_UNITS]; /* The data transfer mode. */
-
-/* Operational parameters that are set at compile time. */
-
-/* Time in jiffies before concluding the transmitter is hung. */
-#define TX_TIMEOUT (400*HZ/1000)
-
-/*
- This file is a device driver for the RealTek (aka AT-Lan-Tec) pocket
- ethernet adapter. This is a common low-cost OEM pocket ethernet
- adapter, sold under many names.
-
- Sources:
- This driver was written from the packet driver assembly code provided by
- Vincent Bono of AT-Lan-Tec. Ever try to figure out how a complicated
- device works just from the assembly code? It ain't pretty. The following
- description is written based on guesses and writing lots of special-purpose
- code to test my theorized operation.
-
- In 1997 Realtek made available the documentation for the second generation
- RTL8012 chip, which has lead to several driver improvements.
- http://www.realtek.com.tw/
-
- Theory of Operation
-
- The RTL8002 adapter seems to be built around a custom spin of the SEEQ
- controller core. It probably has a 16K or 64K internal packet buffer, of
- which the first 4K is devoted to transmit and the rest to receive.
- The controller maintains the queue of received packet and the packet buffer
- access pointer internally, with only 'reset to beginning' and 'skip to next
- packet' commands visible. The transmit packet queue holds two (or more?)
- packets: both 'retransmit this packet' (due to collision) and 'transmit next
- packet' commands must be started by hand.
-
- The station address is stored in a standard bit-serial EEPROM which must be
- read (ughh) by the device driver. (Provisions have been made for
- substituting a 74S288 PROM, but I haven't gotten reports of any models
- using it.) Unlike built-in devices, a pocket adapter can temporarily lose
- power without indication to the device driver. The major effect is that
- the station address, receive filter (promiscuous, etc.) and transceiver
- must be reset.
-
- The controller itself has 16 registers, some of which use only the lower
- bits. The registers are read and written 4 bits at a time. The four bit
- register address is presented on the data lines along with a few additional
- timing and control bits. The data is then read from status port or written
- to the data port.
-
- Correction: the controller has two banks of 16 registers. The second
- bank contains only the multicast filter table (now used) and the EEPROM
- access registers.
-
- Since the bulk data transfer of the actual packets through the slow
- parallel port dominates the driver's running time, four distinct data
- (non-register) transfer modes are provided by the adapter, two in each
- direction. In the first mode timing for the nibble transfers is
- provided through the data port. In the second mode the same timing is
- provided through the control port. In either case the data is read from
- the status port and written to the data port, just as it is accessing
- registers.
-
- In addition to the basic data transfer methods, several more are modes are
- created by adding some delay by doing multiple reads of the data to allow
- it to stabilize. This delay seems to be needed on most machines.
-
- The data transfer mode is stored in the 'dev->if_port' field. Its default
- value is '4'. It may be overridden at boot-time using the third parameter
- to the "ether=..." initialization.
-
- The header file <atp.h> provides inline functions that encapsulate the
- register and data access methods. These functions are hand-tuned to
- generate reasonable object code. This header file also documents my
- interpretations of the device registers.
-*/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/crc32.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <linux/bitops.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#include "atp.h"
-
-MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
-MODULE_DESCRIPTION("RealTek RTL8002/8012 parallel port Ethernet driver");
-MODULE_LICENSE("GPL");
-
-module_param(max_interrupt_work, int, 0);
-module_param(debug, int, 0);
-module_param_hw_array(io, int, ioport, NULL, 0);
-module_param_hw_array(irq, int, irq, NULL, 0);
-module_param_array(xcvr, int, NULL, 0);
-MODULE_PARM_DESC(max_interrupt_work, "ATP maximum events handled per interrupt");
-MODULE_PARM_DESC(debug, "ATP debug level (0-7)");
-MODULE_PARM_DESC(io, "ATP I/O base address(es)");
-MODULE_PARM_DESC(irq, "ATP IRQ number(s)");
-MODULE_PARM_DESC(xcvr, "ATP transceiver(s) (0=internal, 1=external)");
-
-/* The number of low I/O ports used by the ethercard. */
-#define ETHERCARD_TOTAL_SIZE 3
-
-/* Sequence to switch an 8012 from printer mux to ethernet mode. */
-static char mux_8012[] = { 0xff, 0xf7, 0xff, 0xfb, 0xf3, 0xfb, 0xff, 0xf7,};
-
-struct net_local {
- spinlock_t lock;
- struct net_device *next_module;
- struct timer_list timer; /* Media selection timer. */
- struct net_device *dev; /* Timer dev. */
- unsigned long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */
- int saved_tx_size;
- unsigned int tx_unit_busy:1;
- unsigned char re_tx, /* Number of packet retransmissions. */
- addr_mode, /* Current Rx filter e.g. promiscuous, etc. */
- pac_cnt_in_tx_buf;
-};
-
-/* This code, written by wwc@super.org, resets the adapter every
- TIMED_CHECKER ticks. This recovers from an unknown error which
- hangs the device. */
-#define TIMED_CHECKER (HZ/4)
-#ifdef TIMED_CHECKER
-#include <linux/timer.h>
-static void atp_timed_checker(struct timer_list *t);
-#endif
-
-/* Index to functions, as function prototypes. */
-
-static int atp_probe1(long ioaddr);
-static void get_node_ID(struct net_device *dev);
-static unsigned short eeprom_op(long ioaddr, unsigned int cmd);
-static int net_open(struct net_device *dev);
-static void hardware_init(struct net_device *dev);
-static void write_packet(long ioaddr, int length, unsigned char *packet, int pad, int mode);
-static void trigger_send(long ioaddr, int length);
-static netdev_tx_t atp_send_packet(struct sk_buff *skb,
- struct net_device *dev);
-static irqreturn_t atp_interrupt(int irq, void *dev_id);
-static void net_rx(struct net_device *dev);
-static void read_block(long ioaddr, int length, unsigned char *buffer, int data_mode);
-static int net_close(struct net_device *dev);
-static void set_rx_mode(struct net_device *dev);
-static void tx_timeout(struct net_device *dev, unsigned int txqueue);
-
-
-/* A list of all installed ATP devices, for removing the driver module. */
-static struct net_device *root_atp_dev;
-
-/* Check for a network adapter of this type, and return '0' iff one exists.
- If dev->base_addr == 0, probe all likely locations.
- If dev->base_addr == 1, always return failure.
- If dev->base_addr == 2, allocate space for the device and return success
- (detachable devices only).
-
- FIXME: we should use the parport layer for this
- */
-static int __init atp_init(void)
-{
- int *port, ports[] = {0x378, 0x278, 0x3bc, 0};
- int base_addr = io[0];
-
- if (base_addr > 0x1ff) /* Check a single specified location. */
- return atp_probe1(base_addr);
- else if (base_addr == 1) /* Don't probe at all. */
- return -ENXIO;
-
- for (port = ports; *port; port++) {
- long ioaddr = *port;
- outb(0x57, ioaddr + PAR_DATA);
- if (inb(ioaddr + PAR_DATA) != 0x57)
- continue;
- if (atp_probe1(ioaddr) == 0)
- return 0;
- }
-
- return -ENODEV;
-}
-
-static const struct net_device_ops atp_netdev_ops = {
- .ndo_open = net_open,
- .ndo_stop = net_close,
- .ndo_start_xmit = atp_send_packet,
- .ndo_set_rx_mode = set_rx_mode,
- .ndo_tx_timeout = tx_timeout,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int __init atp_probe1(long ioaddr)
-{
- struct net_device *dev = NULL;
- struct net_local *lp;
- int saved_ctrl_reg, status, i;
- int res;
-
- outb(0xff, ioaddr + PAR_DATA);
- /* Save the original value of the Control register, in case we guessed
- wrong. */
- saved_ctrl_reg = inb(ioaddr + PAR_CONTROL);
- if (net_debug > 3)
- printk("atp: Control register was %#2.2x.\n", saved_ctrl_reg);
- /* IRQEN=0, SLCTB=high INITB=high, AUTOFDB=high, STBB=high. */
- outb(0x04, ioaddr + PAR_CONTROL);
-#ifndef final_version
- if (net_debug > 3) {
- /* Turn off the printer multiplexer on the 8012. */
- for (i = 0; i < 8; i++)
- outb(mux_8012[i], ioaddr + PAR_DATA);
- write_reg(ioaddr, MODSEL, 0x00);
- printk("atp: Registers are ");
- for (i = 0; i < 32; i++)
- printk(" %2.2x", read_nibble(ioaddr, i));
- printk(".\n");
- }
-#endif
- /* Turn off the printer multiplexer on the 8012. */
- for (i = 0; i < 8; i++)
- outb(mux_8012[i], ioaddr + PAR_DATA);
- write_reg_high(ioaddr, CMR1, CMR1h_RESET);
- /* udelay() here? */
- status = read_nibble(ioaddr, CMR1);
-
- if (net_debug > 3) {
- printk(KERN_DEBUG "atp: Status nibble was %#2.2x..", status);
- for (i = 0; i < 32; i++)
- printk(" %2.2x", read_nibble(ioaddr, i));
- printk("\n");
- }
-
- if ((status & 0x78) != 0x08) {
- /* The pocket adapter probe failed, restore the control register. */
- outb(saved_ctrl_reg, ioaddr + PAR_CONTROL);
- return -ENODEV;
- }
- status = read_nibble(ioaddr, CMR2_h);
- if ((status & 0x78) != 0x10) {
- outb(saved_ctrl_reg, ioaddr + PAR_CONTROL);
- return -ENODEV;
- }
-
- dev = alloc_etherdev(sizeof(struct net_local));
- if (!dev)
- return -ENOMEM;
-
- /* Find the IRQ used by triggering an interrupt. */
- write_reg_byte(ioaddr, CMR2, 0x01); /* No accept mode, IRQ out. */
- write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE); /* Enable Tx and Rx. */
-
- /* Omit autoIRQ routine for now. Use "table lookup" instead. Uhgggh. */
- if (irq[0])
- dev->irq = irq[0];
- else if (ioaddr == 0x378)
- dev->irq = 7;
- else
- dev->irq = 5;
- write_reg_high(ioaddr, CMR1, CMR1h_TxRxOFF); /* Disable Tx and Rx units. */
- write_reg(ioaddr, CMR2, CMR2_NULL);
-
- dev->base_addr = ioaddr;
-
- /* Read the station address PROM. */
- get_node_ID(dev);
-
-#ifndef MODULE
- if (net_debug)
- printk(KERN_INFO "%s", version);
-#endif
-
- printk(KERN_NOTICE "%s: Pocket adapter found at %#3lx, IRQ %d, "
- "SAPROM %pM.\n",
- dev->name, dev->base_addr, dev->irq, dev->dev_addr);
-
- /* Reset the ethernet hardware and activate the printer pass-through. */
- write_reg_high(ioaddr, CMR1, CMR1h_RESET | CMR1h_MUX);
-
- lp = netdev_priv(dev);
- lp->addr_mode = CMR2h_Normal;
- spin_lock_init(&lp->lock);
-
- /* For the ATP adapter the "if_port" is really the data transfer mode. */
- if (xcvr[0])
- dev->if_port = xcvr[0];
- else
- dev->if_port = (dev->mem_start & 0xf) ? (dev->mem_start & 0x7) : 4;
- if (dev->mem_end & 0xf)
- net_debug = dev->mem_end & 7;
-
- dev->netdev_ops = &atp_netdev_ops;
- dev->watchdog_timeo = TX_TIMEOUT;
-
- res = register_netdev(dev);
- if (res) {
- free_netdev(dev);
- return res;
- }
-
- lp->next_module = root_atp_dev;
- root_atp_dev = dev;
-
- return 0;
-}
-
-/* Read the station address PROM, usually a word-wide EEPROM. */
-static void __init get_node_ID(struct net_device *dev)
-{
- long ioaddr = dev->base_addr;
- __be16 addr[ETH_ALEN / 2];
- int sa_offset = 0;
- int i;
-
- write_reg(ioaddr, CMR2, CMR2_EEPROM); /* Point to the EEPROM control registers. */
-
- /* Some adapters have the station address at offset 15 instead of offset
- zero. Check for it, and fix it if needed. */
- if (eeprom_op(ioaddr, EE_READ(0)) == 0xffff)
- sa_offset = 15;
-
- for (i = 0; i < 3; i++)
- addr[i] =
- cpu_to_be16(eeprom_op(ioaddr, EE_READ(sa_offset + i)));
- eth_hw_addr_set(dev, (u8 *)addr);
-
- write_reg(ioaddr, CMR2, CMR2_NULL);
-}
-
-/*
- An EEPROM read command starts by shifting out 0x60+address, and then
- shifting in the serial data. See the NatSemi databook for details.
- * ________________
- * CS : __|
- * ___ ___
- * CLK: ______| |___| |
- * __ _______ _______
- * DI : __X_______X_______X
- * DO : _________X_______X
- */
-
-static unsigned short __init eeprom_op(long ioaddr, u32 cmd)
-{
- unsigned eedata_out = 0;
- int num_bits = EE_CMD_SIZE;
-
- while (--num_bits >= 0) {
- char outval = (cmd & (1<<num_bits)) ? EE_DATA_WRITE : 0;
- write_reg_high(ioaddr, PROM_CMD, outval | EE_CLK_LOW);
- write_reg_high(ioaddr, PROM_CMD, outval | EE_CLK_HIGH);
- eedata_out <<= 1;
- if (read_nibble(ioaddr, PROM_DATA) & EE_DATA_READ)
- eedata_out++;
- }
- write_reg_high(ioaddr, PROM_CMD, EE_CLK_LOW & ~EE_CS);
- return eedata_out;
-}
-
-
-/* Open/initialize the board. This is called (in the current kernel)
- sometime after booting when the 'ifconfig' program is run.
-
- This routine sets everything up anew at each open, even
- registers that "should" only need to be set once at boot, so that
- there is non-reboot way to recover if something goes wrong.
-
- This is an attachable device: if there is no private entry then it wasn't
- probed for at boot-time, and we need to probe for it again.
- */
-static int net_open(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ret;
-
- /* The interrupt line is turned off (tri-stated) when the device isn't in
- use. That's especially important for "attached" interfaces where the
- port or interrupt may be shared. */
- ret = request_irq(dev->irq, atp_interrupt, 0, dev->name, dev);
- if (ret)
- return ret;
-
- hardware_init(dev);
-
- lp->dev = dev;
- timer_setup(&lp->timer, atp_timed_checker, 0);
- lp->timer.expires = jiffies + TIMED_CHECKER;
- add_timer(&lp->timer);
-
- netif_start_queue(dev);
- return 0;
-}
-
-/* This routine resets the hardware. We initialize everything, assuming that
- the hardware may have been temporarily detached. */
-static void hardware_init(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
- int i;
-
- /* Turn off the printer multiplexer on the 8012. */
- for (i = 0; i < 8; i++)
- outb(mux_8012[i], ioaddr + PAR_DATA);
- write_reg_high(ioaddr, CMR1, CMR1h_RESET);
-
- for (i = 0; i < 6; i++)
- write_reg_byte(ioaddr, PAR0 + i, dev->dev_addr[i]);
-
- write_reg_high(ioaddr, CMR2, lp->addr_mode);
-
- if (net_debug > 2) {
- printk(KERN_DEBUG "%s: Reset: current Rx mode %d.\n", dev->name,
- (read_nibble(ioaddr, CMR2_h) >> 3) & 0x0f);
- }
-
- write_reg(ioaddr, CMR2, CMR2_IRQOUT);
- write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE);
-
- /* Enable the interrupt line from the serial port. */
- outb(Ctrl_SelData + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
-
- /* Unmask the interesting interrupts. */
- write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
- write_reg_high(ioaddr, IMR, ISRh_RxErr);
-
- lp->tx_unit_busy = 0;
- lp->pac_cnt_in_tx_buf = 0;
- lp->saved_tx_size = 0;
-}
-
-static void trigger_send(long ioaddr, int length)
-{
- write_reg_byte(ioaddr, TxCNT0, length & 0xff);
- write_reg(ioaddr, TxCNT1, length >> 8);
- write_reg(ioaddr, CMR1, CMR1_Xmit);
-}
-
-static void write_packet(long ioaddr, int length, unsigned char *packet, int pad_len, int data_mode)
-{
- if (length & 1)
- {
- length++;
- pad_len++;
- }
-
- outb(EOC+MAR, ioaddr + PAR_DATA);
- if ((data_mode & 1) == 0) {
- /* Write the packet out, starting with the write addr. */
- outb(WrAddr+MAR, ioaddr + PAR_DATA);
- do {
- write_byte_mode0(ioaddr, *packet++);
- } while (--length > pad_len) ;
- do {
- write_byte_mode0(ioaddr, 0);
- } while (--length > 0) ;
- } else {
- /* Write the packet out in slow mode. */
- unsigned char outbyte = *packet++;
-
- outb(Ctrl_LNibWrite + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
- outb(WrAddr+MAR, ioaddr + PAR_DATA);
-
- outb((outbyte & 0x0f)|0x40, ioaddr + PAR_DATA);
- outb(outbyte & 0x0f, ioaddr + PAR_DATA);
- outbyte >>= 4;
- outb(outbyte & 0x0f, ioaddr + PAR_DATA);
- outb(Ctrl_HNibWrite + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
- while (--length > pad_len)
- write_byte_mode1(ioaddr, *packet++);
- while (--length > 0)
- write_byte_mode1(ioaddr, 0);
- }
- /* Terminate the Tx frame. End of write: ECB. */
- outb(0xff, ioaddr + PAR_DATA);
- outb(Ctrl_HNibWrite | Ctrl_SelData | Ctrl_IRQEN, ioaddr + PAR_CONTROL);
-}
-
-static void tx_timeout(struct net_device *dev, unsigned int txqueue)
-{
- long ioaddr = dev->base_addr;
-
- printk(KERN_WARNING "%s: Transmit timed out, %s?\n", dev->name,
- inb(ioaddr + PAR_CONTROL) & 0x10 ? "network cable problem"
- : "IRQ conflict");
- dev->stats.tx_errors++;
- /* Try to restart the adapter. */
- hardware_init(dev);
- netif_trans_update(dev); /* prevent tx timeout */
- netif_wake_queue(dev);
- dev->stats.tx_errors++;
-}
-
-static netdev_tx_t atp_send_packet(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
- int length;
- unsigned long flags;
-
- length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
-
- netif_stop_queue(dev);
-
- /* Disable interrupts by writing 0x00 to the Interrupt Mask Register.
- This sequence must not be interrupted by an incoming packet. */
-
- spin_lock_irqsave(&lp->lock, flags);
- write_reg(ioaddr, IMR, 0);
- write_reg_high(ioaddr, IMR, 0);
- spin_unlock_irqrestore(&lp->lock, flags);
-
- write_packet(ioaddr, length, skb->data, length-skb->len, dev->if_port);
-
- lp->pac_cnt_in_tx_buf++;
- if (lp->tx_unit_busy == 0) {
- trigger_send(ioaddr, length);
- lp->saved_tx_size = 0; /* Redundant */
- lp->re_tx = 0;
- lp->tx_unit_busy = 1;
- } else
- lp->saved_tx_size = length;
- /* Re-enable the LPT interrupts. */
- write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
- write_reg_high(ioaddr, IMR, ISRh_RxErr);
-
- dev_kfree_skb (skb);
- return NETDEV_TX_OK;
-}
-
-
-/* The typical workload of the driver:
- Handle the network interface interrupts. */
-static irqreturn_t atp_interrupt(int irq, void *dev_instance)
-{
- struct net_device *dev = dev_instance;
- struct net_local *lp;
- long ioaddr;
- static int num_tx_since_rx;
- int boguscount = max_interrupt_work;
- int handled = 0;
-
- ioaddr = dev->base_addr;
- lp = netdev_priv(dev);
-
- spin_lock(&lp->lock);
-
- /* Disable additional spurious interrupts. */
- outb(Ctrl_SelData, ioaddr + PAR_CONTROL);
-
- /* The adapter's output is currently the IRQ line, switch it to data. */
- write_reg(ioaddr, CMR2, CMR2_NULL);
- write_reg(ioaddr, IMR, 0);
-
- if (net_debug > 5)
- printk(KERN_DEBUG "%s: In interrupt ", dev->name);
- while (--boguscount > 0) {
- int status = read_nibble(ioaddr, ISR);
- if (net_debug > 5)
- printk("loop status %02x..", status);
-
- if (status & (ISR_RxOK<<3)) {
- handled = 1;
- write_reg(ioaddr, ISR, ISR_RxOK); /* Clear the Rx interrupt. */
- do {
- int read_status = read_nibble(ioaddr, CMR1);
- if (net_debug > 6)
- printk("handling Rx packet %02x..", read_status);
- /* We acknowledged the normal Rx interrupt, so if the interrupt
- is still outstanding we must have a Rx error. */
- if (read_status & (CMR1_IRQ << 3)) { /* Overrun. */
- dev->stats.rx_over_errors++;
- /* Set to no-accept mode long enough to remove a packet. */
- write_reg_high(ioaddr, CMR2, CMR2h_OFF);
- net_rx(dev);
- /* Clear the interrupt and return to normal Rx mode. */
- write_reg_high(ioaddr, ISR, ISRh_RxErr);
- write_reg_high(ioaddr, CMR2, lp->addr_mode);
- } else if ((read_status & (CMR1_BufEnb << 3)) == 0) {
- net_rx(dev);
- num_tx_since_rx = 0;
- } else
- break;
- } while (--boguscount > 0);
- } else if (status & ((ISR_TxErr + ISR_TxOK)<<3)) {
- handled = 1;
- if (net_debug > 6)
- printk("handling Tx done..");
- /* Clear the Tx interrupt. We should check for too many failures
- and reinitialize the adapter. */
- write_reg(ioaddr, ISR, ISR_TxErr + ISR_TxOK);
- if (status & (ISR_TxErr<<3)) {
- dev->stats.collisions++;
- if (++lp->re_tx > 15) {
- dev->stats.tx_aborted_errors++;
- hardware_init(dev);
- break;
- }
- /* Attempt to retransmit. */
- if (net_debug > 6) printk("attempting to ReTx");
- write_reg(ioaddr, CMR1, CMR1_ReXmit + CMR1_Xmit);
- } else {
- /* Finish up the transmit. */
- dev->stats.tx_packets++;
- lp->pac_cnt_in_tx_buf--;
- if ( lp->saved_tx_size) {
- trigger_send(ioaddr, lp->saved_tx_size);
- lp->saved_tx_size = 0;
- lp->re_tx = 0;
- } else
- lp->tx_unit_busy = 0;
- netif_wake_queue(dev); /* Inform upper layers. */
- }
- num_tx_since_rx++;
- } else if (num_tx_since_rx > 8 &&
- time_after(jiffies, lp->last_rx_time + HZ)) {
- if (net_debug > 2)
- printk(KERN_DEBUG "%s: Missed packet? No Rx after %d Tx and "
- "%ld jiffies status %02x CMR1 %02x.\n", dev->name,
- num_tx_since_rx, jiffies - lp->last_rx_time, status,
- (read_nibble(ioaddr, CMR1) >> 3) & 15);
- dev->stats.rx_missed_errors++;
- hardware_init(dev);
- num_tx_since_rx = 0;
- break;
- } else
- break;
- }
-
- /* This following code fixes a rare (and very difficult to track down)
- problem where the adapter forgets its ethernet address. */
- {
- int i;
- for (i = 0; i < 6; i++)
- write_reg_byte(ioaddr, PAR0 + i, dev->dev_addr[i]);
-#if 0 && defined(TIMED_CHECKER)
- mod_timer(&lp->timer, jiffies + TIMED_CHECKER);
-#endif
- }
-
- /* Tell the adapter that it can go back to using the output line as IRQ. */
- write_reg(ioaddr, CMR2, CMR2_IRQOUT);
- /* Enable the physical interrupt line, which is sure to be low until.. */
- outb(Ctrl_SelData + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
- /* .. we enable the interrupt sources. */
- write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
- write_reg_high(ioaddr, IMR, ISRh_RxErr); /* Hmmm, really needed? */
-
- spin_unlock(&lp->lock);
-
- if (net_debug > 5) printk("exiting interrupt.\n");
- return IRQ_RETVAL(handled);
-}
-
-#ifdef TIMED_CHECKER
-/* This following code fixes a rare (and very difficult to track down)
- problem where the adapter forgets its ethernet address. */
-static void atp_timed_checker(struct timer_list *t)
-{
- struct net_local *lp = timer_container_of(lp, t, timer);
- struct net_device *dev = lp->dev;
- long ioaddr = dev->base_addr;
- int tickssofar = jiffies - lp->last_rx_time;
- int i;
-
- spin_lock(&lp->lock);
- if (tickssofar > 2*HZ) {
-#if 1
- for (i = 0; i < 6; i++)
- write_reg_byte(ioaddr, PAR0 + i, dev->dev_addr[i]);
- lp->last_rx_time = jiffies;
-#else
- for (i = 0; i < 6; i++)
- if (read_cmd_byte(ioaddr, PAR0 + i) != atp_timed_dev->dev_addr[i])
- {
- struct net_local *lp = netdev_priv(atp_timed_dev);
- write_reg_byte(ioaddr, PAR0 + i, atp_timed_dev->dev_addr[i]);
- if (i == 2)
- dev->stats.tx_errors++;
- else if (i == 3)
- dev->stats.tx_dropped++;
- else if (i == 4)
- dev->stats.collisions++;
- else
- dev->stats.rx_errors++;
- }
-#endif
- }
- spin_unlock(&lp->lock);
- lp->timer.expires = jiffies + TIMED_CHECKER;
- add_timer(&lp->timer);
-}
-#endif
-
-/* We have a good packet(s), get it/them out of the buffers. */
-static void net_rx(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
- struct rx_header rx_head;
-
- /* Process the received packet. */
- outb(EOC+MAR, ioaddr + PAR_DATA);
- read_block(ioaddr, 8, (unsigned char*)&rx_head, dev->if_port);
- if (net_debug > 5)
- printk(KERN_DEBUG " rx_count %04x %04x %04x %04x..", rx_head.pad,
- rx_head.rx_count, rx_head.rx_status, rx_head.cur_addr);
- if ((rx_head.rx_status & 0x77) != 0x01) {
- dev->stats.rx_errors++;
- if (rx_head.rx_status & 0x0004) dev->stats.rx_frame_errors++;
- else if (rx_head.rx_status & 0x0002) dev->stats.rx_crc_errors++;
- if (net_debug > 3)
- printk(KERN_DEBUG "%s: Unknown ATP Rx error %04x.\n",
- dev->name, rx_head.rx_status);
- if (rx_head.rx_status & 0x0020) {
- dev->stats.rx_fifo_errors++;
- write_reg_high(ioaddr, CMR1, CMR1h_TxENABLE);
- write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE);
- } else if (rx_head.rx_status & 0x0050)
- hardware_init(dev);
- return;
- } else {
- /* Malloc up new buffer. The "-4" omits the FCS (CRC). */
- int pkt_len = (rx_head.rx_count & 0x7ff) - 4;
- struct sk_buff *skb;
-
- skb = netdev_alloc_skb(dev, pkt_len + 2);
- if (skb == NULL) {
- dev->stats.rx_dropped++;
- goto done;
- }
-
- skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
- read_block(ioaddr, pkt_len, skb_put(skb,pkt_len), dev->if_port);
- skb->protocol = eth_type_trans(skb, dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
- }
- done:
- write_reg(ioaddr, CMR1, CMR1_NextPkt);
- lp->last_rx_time = jiffies;
-}
-
-static void read_block(long ioaddr, int length, unsigned char *p, int data_mode)
-{
- if (data_mode <= 3) { /* Mode 0 or 1 */
- outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
- outb(length == 8 ? RdAddr | HNib | MAR : RdAddr | MAR,
- ioaddr + PAR_DATA);
- if (data_mode <= 1) { /* Mode 0 or 1 */
- do { *p++ = read_byte_mode0(ioaddr); } while (--length > 0);
- } else { /* Mode 2 or 3 */
- do { *p++ = read_byte_mode2(ioaddr); } while (--length > 0);
- }
- } else if (data_mode <= 5) {
- do { *p++ = read_byte_mode4(ioaddr); } while (--length > 0);
- } else {
- do { *p++ = read_byte_mode6(ioaddr); } while (--length > 0);
- }
-
- outb(EOC+HNib+MAR, ioaddr + PAR_DATA);
- outb(Ctrl_SelData, ioaddr + PAR_CONTROL);
-}
-
-/* The inverse routine to net_open(). */
-static int
-net_close(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
-
- netif_stop_queue(dev);
-
- timer_delete_sync(&lp->timer);
-
- /* Flush the Tx and disable Rx here. */
- lp->addr_mode = CMR2h_OFF;
- write_reg_high(ioaddr, CMR2, CMR2h_OFF);
-
- /* Free the IRQ line. */
- outb(0x00, ioaddr + PAR_CONTROL);
- free_irq(dev->irq, dev);
-
- /* Reset the ethernet hardware and activate the printer pass-through. */
- write_reg_high(ioaddr, CMR1, CMR1h_RESET | CMR1h_MUX);
- return 0;
-}
-
-/*
- * Set or clear the multicast filter for this adapter.
- */
-
-static void set_rx_mode(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
-
- if (!netdev_mc_empty(dev) || (dev->flags & (IFF_ALLMULTI|IFF_PROMISC)))
- lp->addr_mode = CMR2h_PROMISC;
- else
- lp->addr_mode = CMR2h_Normal;
- write_reg_high(ioaddr, CMR2, lp->addr_mode);
-}
-
-static int __init atp_init_module(void) {
- if (debug) /* Emit version even if no cards detected. */
- printk(KERN_INFO "%s", version);
- return atp_init();
-}
-
-static void __exit atp_cleanup_module(void) {
- struct net_device *next_dev;
-
- while (root_atp_dev) {
- struct net_local *atp_local = netdev_priv(root_atp_dev);
- next_dev = atp_local->next_module;
- unregister_netdev(root_atp_dev);
- /* No need to release_region(), since we never snarf it. */
- free_netdev(root_atp_dev);
- root_atp_dev = next_dev;
- }
-}
-
-module_init(atp_init_module);
-module_exit(atp_cleanup_module);
diff --git a/drivers/net/ethernet/realtek/atp.h b/drivers/net/ethernet/realtek/atp.h
deleted file mode 100644
index b202184eddd4..000000000000
--- a/drivers/net/ethernet/realtek/atp.h
+++ /dev/null
@@ -1,262 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Linux header file for the ATP pocket ethernet adapter. */
-/* v1.09 8/9/2000 becker@scyld.com. */
-
-#include <linux/if_ether.h>
-#include <linux/types.h>
-
-/* The header prepended to received packets. */
-struct rx_header {
- ushort pad; /* Pad. */
- ushort rx_count;
- ushort rx_status; /* Unknown bit assignments :-<. */
- ushort cur_addr; /* Apparently the current buffer address(?) */
-};
-
-#define PAR_DATA 0
-#define PAR_STATUS 1
-#define PAR_CONTROL 2
-
-#define Ctrl_LNibRead 0x08 /* LP_PSELECP */
-#define Ctrl_HNibRead 0
-#define Ctrl_LNibWrite 0x08 /* LP_PSELECP */
-#define Ctrl_HNibWrite 0
-#define Ctrl_SelData 0x04 /* LP_PINITP */
-#define Ctrl_IRQEN 0x10 /* LP_PINTEN */
-
-#define EOW 0xE0
-#define EOC 0xE0
-#define WrAddr 0x40 /* Set address of EPLC read, write register. */
-#define RdAddr 0xC0
-#define HNib 0x10
-
-enum page0_regs {
- /* The first six registers hold
- * the ethernet physical station address.
- */
- PAR0 = 0, PAR1 = 1, PAR2 = 2, PAR3 = 3, PAR4 = 4, PAR5 = 5,
- TxCNT0 = 6, TxCNT1 = 7, /* The transmit byte count. */
- TxSTAT = 8, RxSTAT = 9, /* Tx and Rx status. */
- ISR = 10, IMR = 11, /* Interrupt status and mask. */
- CMR1 = 12, /* Command register 1. */
- CMR2 = 13, /* Command register 2. */
- MODSEL = 14, /* Mode select register. */
- MAR = 14, /* Memory address register (?). */
- CMR2_h = 0x1d,
-};
-
-enum eepage_regs {
- PROM_CMD = 6,
- PROM_DATA = 7 /* Note that PROM_CMD is in the "high" bits. */
-};
-
-#define ISR_TxOK 0x01
-#define ISR_RxOK 0x04
-#define ISR_TxErr 0x02
-#define ISRh_RxErr 0x11 /* ISR, high nibble */
-
-#define CMR1h_MUX 0x08 /* Select printer multiplexor on 8012. */
-#define CMR1h_RESET 0x04 /* Reset. */
-#define CMR1h_RxENABLE 0x02 /* Rx unit enable. */
-#define CMR1h_TxENABLE 0x01 /* Tx unit enable. */
-#define CMR1h_TxRxOFF 0x00
-#define CMR1_ReXmit 0x08 /* Trigger a retransmit. */
-#define CMR1_Xmit 0x04 /* Trigger a transmit. */
-#define CMR1_IRQ 0x02 /* Interrupt active. */
-#define CMR1_BufEnb 0x01 /* Enable the buffer(?). */
-#define CMR1_NextPkt 0x01 /* Enable the buffer(?). */
-
-#define CMR2_NULL 8
-#define CMR2_IRQOUT 9
-#define CMR2_RAMTEST 10
-#define CMR2_EEPROM 12 /* Set to page 1, for reading the EEPROM. */
-
-#define CMR2h_OFF 0 /* No accept mode. */
-#define CMR2h_Physical 1 /* Accept a physical address match only. */
-#define CMR2h_Normal 2 /* Accept physical and broadcast address. */
-#define CMR2h_PROMISC 3 /* Promiscuous mode. */
-
-/* An inline function used below: it differs from inb() by explicitly
- * return an unsigned char, saving a truncation.
- */
-static inline unsigned char inbyte(unsigned short port)
-{
- unsigned char _v;
-
- __asm__ __volatile__ ("inb %w1,%b0" : "=a" (_v) : "d" (port));
- return _v;
-}
-
-/* Read register OFFSET.
- * This command should always be terminated with read_end().
- */
-static inline unsigned char read_nibble(short port, unsigned char offset)
-{
- unsigned char retval;
-
- outb(EOC+offset, port + PAR_DATA);
- outb(RdAddr+offset, port + PAR_DATA);
- inbyte(port + PAR_STATUS); /* Settling time delay */
- retval = inbyte(port + PAR_STATUS);
- outb(EOC+offset, port + PAR_DATA);
-
- return retval;
-}
-
-/* Functions for bulk data read. The interrupt line is always disabled. */
-/* Get a byte using read mode 0, reading data from the control lines. */
-static inline unsigned char read_byte_mode0(short ioaddr)
-{
- unsigned char low_nib;
-
- outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
- inbyte(ioaddr + PAR_STATUS);
- low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
- outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL);
- inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */
- inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */
- return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
-}
-
-/* The same as read_byte_mode0(), but does multiple inb()s for stability. */
-static inline unsigned char read_byte_mode2(short ioaddr)
-{
- unsigned char low_nib;
-
- outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
- inbyte(ioaddr + PAR_STATUS);
- low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
- outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL);
- inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */
- return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
-}
-
-/* Read a byte through the data register. */
-static inline unsigned char read_byte_mode4(short ioaddr)
-{
- unsigned char low_nib;
-
- outb(RdAddr | MAR, ioaddr + PAR_DATA);
- low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
- outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA);
- return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
-}
-
-/* Read a byte through the data register, double reading to allow settling. */
-static inline unsigned char read_byte_mode6(short ioaddr)
-{
- unsigned char low_nib;
-
- outb(RdAddr | MAR, ioaddr + PAR_DATA);
- inbyte(ioaddr + PAR_STATUS);
- low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
- outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA);
- inbyte(ioaddr + PAR_STATUS);
- return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
-}
-
-static inline void
-write_reg(short port, unsigned char reg, unsigned char value)
-{
- unsigned char outval;
-
- outb(EOC | reg, port + PAR_DATA);
- outval = WrAddr | reg;
- outb(outval, port + PAR_DATA);
- outb(outval, port + PAR_DATA); /* Double write for PS/2. */
-
- outval &= 0xf0;
- outval |= value;
- outb(outval, port + PAR_DATA);
- outval &= 0x1f;
- outb(outval, port + PAR_DATA);
- outb(outval, port + PAR_DATA);
-
- outb(EOC | outval, port + PAR_DATA);
-}
-
-static inline void
-write_reg_high(short port, unsigned char reg, unsigned char value)
-{
- unsigned char outval = EOC | HNib | reg;
-
- outb(outval, port + PAR_DATA);
- outval &= WrAddr | HNib | 0x0f;
- outb(outval, port + PAR_DATA);
- outb(outval, port + PAR_DATA); /* Double write for PS/2. */
-
- outval = WrAddr | HNib | value;
- outb(outval, port + PAR_DATA);
- outval &= HNib | 0x0f; /* HNib | value */
- outb(outval, port + PAR_DATA);
- outb(outval, port + PAR_DATA);
-
- outb(EOC | HNib | outval, port + PAR_DATA);
-}
-
-/* Write a byte out using nibble mode. The low nibble is written first. */
-static inline void
-write_reg_byte(short port, unsigned char reg, unsigned char value)
-{
- unsigned char outval;
-
- outb(EOC | reg, port + PAR_DATA); /* Reset the address register. */
- outval = WrAddr | reg;
- outb(outval, port + PAR_DATA);
- outb(outval, port + PAR_DATA); /* Double write for PS/2. */
-
- outb((outval & 0xf0) | (value & 0x0f), port + PAR_DATA);
- outb(value & 0x0f, port + PAR_DATA);
- value >>= 4;
- outb(value, port + PAR_DATA);
- outb(0x10 | value, port + PAR_DATA);
- outb(0x10 | value, port + PAR_DATA);
-
- outb(EOC | value, port + PAR_DATA); /* Reset the address register. */
-}
-
-/* Bulk data writes to the packet buffer. The interrupt line remains enabled.
- * The first, faster method uses only the dataport (data modes 0, 2 & 4).
- * The second (backup) method uses data and control regs (modes 1, 3 & 5).
- * It should only be needed when there is skew between the individual data
- * lines.
- */
-static inline void write_byte_mode0(short ioaddr, unsigned char value)
-{
- outb(value & 0x0f, ioaddr + PAR_DATA);
- outb((value>>4) | 0x10, ioaddr + PAR_DATA);
-}
-
-static inline void write_byte_mode1(short ioaddr, unsigned char value)
-{
- outb(value & 0x0f, ioaddr + PAR_DATA);
- outb(Ctrl_IRQEN | Ctrl_LNibWrite, ioaddr + PAR_CONTROL);
- outb((value>>4) | 0x10, ioaddr + PAR_DATA);
- outb(Ctrl_IRQEN | Ctrl_HNibWrite, ioaddr + PAR_CONTROL);
-}
-
-/* Write 16bit VALUE to the packet buffer: the same as above just doubled. */
-static inline void write_word_mode0(short ioaddr, unsigned short value)
-{
- outb(value & 0x0f, ioaddr + PAR_DATA);
- value >>= 4;
- outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA);
- value >>= 4;
- outb(value & 0x0f, ioaddr + PAR_DATA);
- value >>= 4;
- outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA);
-}
-
-/* EEPROM_Ctrl bits. */
-#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
-#define EE_CS 0x02 /* EEPROM chip select. */
-#define EE_CLK_HIGH 0x12
-#define EE_CLK_LOW 0x16
-#define EE_DATA_WRITE 0x01 /* EEPROM chip data in. */
-#define EE_DATA_READ 0x08 /* EEPROM chip data out. */
-
-/* The EEPROM commands include the alway-set leading bit. */
-#define EE_WRITE_CMD(offset) (((5 << 6) + (offset)) << 17)
-#define EE_READ(offset) (((6 << 6) + (offset)) << 17)
-#define EE_ERASE(offset) (((7 << 6) + (offset)) << 17)
-#define EE_CMD_SIZE 27 /* The command+address+data size. */
diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h
index 2c1a0c21af8d..aed4cf852091 100644
--- a/drivers/net/ethernet/realtek/r8169.h
+++ b/drivers/net/ethernet/realtek/r8169.h
@@ -72,7 +72,8 @@ enum mac_version {
RTL_GIGA_MAC_VER_70,
RTL_GIGA_MAC_VER_80,
RTL_GIGA_MAC_NONE,
- RTL_GIGA_MAC_VER_LAST = RTL_GIGA_MAC_NONE - 1
+ RTL_GIGA_MAC_VER_LAST = RTL_GIGA_MAC_NONE - 1,
+ RTL_GIGA_MAC_VER_EXTENDED
};
struct rtl8169_private;
diff --git a/drivers/net/ethernet/realtek/r8169_firmware.c b/drivers/net/ethernet/realtek/r8169_firmware.c
index bf055078a855..6dff3d94793e 100644
--- a/drivers/net/ethernet/realtek/r8169_firmware.c
+++ b/drivers/net/ethernet/realtek/r8169_firmware.c
@@ -68,7 +68,7 @@ static bool rtl_fw_format_ok(struct rtl_fw *rtl_fw)
if (size > (fw->size - start) / FW_OPCODE_SIZE)
return false;
- strscpy(rtl_fw->version, fw_info->version, RTL_VER_SIZE);
+ strscpy(rtl_fw->version, fw_info->version);
pa->code = (__le32 *)(fw->data + start);
pa->size = size;
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 755083852eef..2f7d9809c373 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -31,6 +31,7 @@
#include <linux/unaligned.h>
#include <net/ip6_checksum.h>
#include <net/netdev_queues.h>
+#include <net/phy/realtek_phy.h>
#include "r8169.h"
#include "r8169_firmware.h"
@@ -95,8 +96,8 @@
#define JUMBO_16K (SZ_16K - VLAN_ETH_HLEN - ETH_FCS_LEN)
static const struct rtl_chip_info {
- u16 mask;
- u16 val;
+ u32 mask;
+ u32 val;
enum mac_version mac_version;
const char *name;
const char *fw_name;
@@ -205,10 +206,21 @@ static const struct rtl_chip_info {
{ 0xfc8, 0x040, RTL_GIGA_MAC_VER_03, "RTL8110s" },
{ 0xfc8, 0x008, RTL_GIGA_MAC_VER_02, "RTL8169s" },
+ /* extended chip version*/
+ { 0x7cf, 0x7c8, RTL_GIGA_MAC_VER_EXTENDED },
+
/* Catch-all */
{ 0x000, 0x000, RTL_GIGA_MAC_NONE }
};
+static const struct rtl_chip_info rtl_chip_infos_extended[] = {
+ { 0x7fffffff, 0x00000000, RTL_GIGA_MAC_VER_64, "RTL9151AS",
+ FIRMWARE_9151A_1},
+
+ /* Catch-all */
+ { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE }
+};
+
static const struct pci_device_id rtl8169_pci_tbl[] = {
{ PCI_VDEVICE(REALTEK, 0x2502) },
{ PCI_VDEVICE(REALTEK, 0x2600) },
@@ -255,6 +267,8 @@ enum rtl_registers {
IntrStatus = 0x3e,
TxConfig = 0x40,
+ /* Extended chip version id */
+ TX_CONFIG_V2 = 0x60b0,
#define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */
#define TXCFG_EMPTY (1 << 11) /* 8111e-vl */
@@ -312,6 +326,15 @@ enum rtl_registers {
IBIMR0 = 0xfa,
IBISR0 = 0xfb,
FuncForceEvent = 0xfc,
+
+ ALDPS_LTR = 0xe0a2,
+ LTR_OBFF_LOCK = 0xe032,
+ LTR_SNOOP = 0xe034,
+
+#define ALDPS_LTR_EN BIT(0)
+#define LTR_OBFF_LOCK_EN BIT(0)
+#define LINK_SPEED_CHANGE_EN BIT(14)
+#define LTR_SNOOP_EN GENMASK(15, 14)
};
enum rtl8168_8101_registers {
@@ -397,6 +420,8 @@ enum rtl8168_registers {
#define PWM_EN (1 << 22)
#define RXDV_GATED_EN (1 << 19)
#define EARLY_TALLY_EN (1 << 16)
+ COMBO_LTR_EXTEND = 0xb6,
+#define COMBO_LTR_EXTEND_EN BIT(0)
};
enum rtl8125_registers {
@@ -733,6 +758,7 @@ struct rtl8169_private {
unsigned supports_gmii:1;
unsigned aspm_manageable:1;
unsigned dash_enabled:1;
+ bool sfp_mode:1;
dma_addr_t counters_phys_addr;
struct rtl8169_counters *counters;
struct rtl8169_tc_offsets tc_offset;
@@ -1097,6 +1123,10 @@ static int r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
if (rtl_ocp_reg_failure(reg))
return 0;
+ /* Return dummy MII_PHYSID2 in SFP mode to match SFP PHY driver */
+ if (tp->sfp_mode && reg == (OCP_STD_PHY_BASE + 2 * MII_PHYSID2))
+ return PHY_ID_RTL_DUMMY_SFP & 0xffff;
+
RTL_W32(tp, GPHY_OCP, reg << 15);
return rtl_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
@@ -1154,6 +1184,46 @@ static void r8168_mac_ocp_modify(struct rtl8169_private *tp, u32 reg, u16 mask,
raw_spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
}
+static void r8127_sfp_sds_phy_reset(struct rtl8169_private *tp)
+{
+ RTL_W8(tp, 0x2350, RTL_R8(tp, 0x2350) & ~BIT(0));
+ udelay(1);
+
+ RTL_W16(tp, 0x233a, 0x801f);
+ RTL_W8(tp, 0x2350, RTL_R8(tp, 0x2350) | BIT(0));
+ usleep_range(10, 20);
+}
+
+static void r8127_sfp_init_10g(struct rtl8169_private *tp)
+{
+ int val;
+
+ r8127_sfp_sds_phy_reset(tp);
+
+ RTL_W16(tp, 0x233a, 0x801a);
+ RTL_W16(tp, 0x233e, (RTL_R16(tp, 0x233e) & ~0x3003) | 0x1000);
+
+ r8168_phy_ocp_write(tp, 0xc40a, 0x0000);
+ r8168_phy_ocp_write(tp, 0xc466, 0x0003);
+ r8168_phy_ocp_write(tp, 0xc808, 0x0000);
+ r8168_phy_ocp_write(tp, 0xc80a, 0x0000);
+
+ val = r8168_phy_ocp_read(tp, 0xc804);
+ r8168_phy_ocp_write(tp, 0xc804, (val & ~0x000f) | 0x000c);
+}
+
+static void rtl_sfp_init(struct rtl8169_private *tp)
+{
+ if (tp->mac_version == RTL_GIGA_MAC_VER_80)
+ r8127_sfp_init_10g(tp);
+}
+
+static void rtl_sfp_reset(struct rtl8169_private *tp)
+{
+ if (tp->mac_version == RTL_GIGA_MAC_VER_80)
+ r8127_sfp_sds_phy_reset(tp);
+}
+
/* Work around a hw issue with RTL8168g PHY, the quirk disables
* PHY MCU interrupts before PHY power-down.
*/
@@ -1513,6 +1583,10 @@ static enum rtl_dash_type rtl_get_dash_type(struct rtl8169_private *tp)
return RTL_DASH_EP;
case RTL_GIGA_MAC_VER_66:
return RTL_DASH_25_BP;
+ case RTL_GIGA_MAC_VER_80:
+ return (tp->pci_dev->revision == 0x04)
+ ? RTL_DASH_25_BP
+ : RTL_DASH_NONE;
default:
return RTL_DASH_NONE;
}
@@ -1710,12 +1784,11 @@ static void rtl8169_get_drvinfo(struct net_device *dev,
struct rtl8169_private *tp = netdev_priv(dev);
struct rtl_fw *rtl_fw = tp->rtl_fw;
- strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strscpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, KBUILD_MODNAME);
+ strscpy(info->bus_info, pci_name(tp->pci_dev));
BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
if (rtl_fw)
- strscpy(info->fw_version, rtl_fw->version,
- sizeof(info->fw_version));
+ strscpy(info->fw_version, rtl_fw->version);
}
static int rtl8169_get_regs_len(struct net_device *dev)
@@ -2308,6 +2381,36 @@ static void rtl8169_get_eth_ctrl_stats(struct net_device *dev,
le32_to_cpu(tp->counters->rx_unknown_opcode);
}
+static int rtl8169_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct rtl8169_private *tp = netdev_priv(ndev);
+ struct phy_device *phydev = tp->phydev;
+ int duplex = cmd->base.duplex;
+ int speed = cmd->base.speed;
+
+ if (!tp->sfp_mode)
+ return phy_ethtool_ksettings_set(phydev, cmd);
+
+ if (cmd->base.autoneg != AUTONEG_DISABLE)
+ return -EINVAL;
+
+ if (!phy_check_valid(speed, duplex, phydev->supported))
+ return -EINVAL;
+
+ mutex_lock(&phydev->lock);
+
+ phydev->autoneg = AUTONEG_DISABLE;
+ phydev->speed = speed;
+ phydev->duplex = duplex;
+
+ rtl_sfp_init(tp);
+
+ mutex_unlock(&phydev->lock);
+
+ return 0;
+}
+
static const struct ethtool_ops rtl8169_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
@@ -2327,7 +2430,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_eee = rtl8169_get_eee,
.set_eee = rtl8169_set_eee,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .set_link_ksettings = rtl8169_set_link_ksettings,
.get_ringparam = rtl8169_get_ringparam,
.get_pause_stats = rtl8169_get_pause_stats,
.get_pauseparam = rtl8169_get_pauseparam,
@@ -2336,7 +2439,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_eth_ctrl_stats = rtl8169_get_eth_ctrl_stats,
};
-static const struct rtl_chip_info *rtl8169_get_chip_version(u16 xid, bool gmii)
+static const struct rtl_chip_info *rtl8169_get_chip_version(u32 xid, bool gmii)
{
/* Chips combining a 1Gbps MAC with a 100Mbps PHY */
static const struct rtl_chip_info rtl8106eus_info = {
@@ -2362,6 +2465,15 @@ static const struct rtl_chip_info *rtl8169_get_chip_version(u16 xid, bool gmii)
return p;
}
+static const struct rtl_chip_info *rtl8169_get_extended_chip_version(u32 xid2)
+{
+ const struct rtl_chip_info *p = rtl_chip_infos_extended;
+
+ while ((xid2 & p->mask) != p->val)
+ p++;
+ return p;
+}
+
static void rtl_release_firmware(struct rtl8169_private *tp)
{
if (tp->rtl_fw) {
@@ -2435,6 +2547,9 @@ static void rtl8169_init_phy(struct rtl8169_private *tp)
tp->pci_dev->subsystem_device == 0xe000)
phy_write_paged(tp->phydev, 0x0001, 0x10, 0xf01b);
+ if (tp->sfp_mode)
+ rtl_sfp_init(tp);
+
/* We may have called phy_speed_down before */
phy_speed_up(tp->phydev);
@@ -2915,6 +3030,92 @@ static void rtl_disable_exit_l1(struct rtl8169_private *tp)
}
}
+static void rtl_enable_ltr(struct rtl8169_private *tp)
+{
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_80:
+ r8168_mac_ocp_write(tp, 0xcdd0, 0x9003);
+ r8168_mac_ocp_modify(tp, LTR_SNOOP, 0x0000, LTR_SNOOP_EN);
+ r8168_mac_ocp_modify(tp, ALDPS_LTR, 0x0000, ALDPS_LTR_EN);
+ r8168_mac_ocp_write(tp, 0xcdd2, 0x8c09);
+ r8168_mac_ocp_write(tp, 0xcdd8, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdd4, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdda, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdd6, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcddc, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcde8, 0x887a);
+ r8168_mac_ocp_write(tp, 0xcdea, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdec, 0x8c09);
+ r8168_mac_ocp_write(tp, 0xcdee, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdf0, 0x8a62);
+ r8168_mac_ocp_write(tp, 0xcdf2, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdf4, 0x883e);
+ r8168_mac_ocp_write(tp, 0xcdf6, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdf8, 0x8849);
+ r8168_mac_ocp_write(tp, 0xcdfa, 0x9003);
+ r8168_mac_ocp_modify(tp, LTR_OBFF_LOCK, 0x0000, LINK_SPEED_CHANGE_EN);
+ break;
+ case RTL_GIGA_MAC_VER_70:
+ r8168_mac_ocp_write(tp, 0xcdd0, 0x9003);
+ r8168_mac_ocp_modify(tp, LTR_SNOOP, 0x0000, LTR_SNOOP_EN);
+ r8168_mac_ocp_modify(tp, ALDPS_LTR, 0x0000, ALDPS_LTR_EN);
+ r8168_mac_ocp_write(tp, 0xcdd2, 0x8c09);
+ r8168_mac_ocp_write(tp, 0xcdd8, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdd4, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdda, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdd6, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcddc, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcde8, 0x887a);
+ r8168_mac_ocp_write(tp, 0xcdea, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdec, 0x8c09);
+ r8168_mac_ocp_write(tp, 0xcdee, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdf0, 0x8a62);
+ r8168_mac_ocp_write(tp, 0xcdf2, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdf4, 0x883e);
+ r8168_mac_ocp_write(tp, 0xcdf6, 0x9003);
+ r8168_mac_ocp_modify(tp, LTR_OBFF_LOCK, 0x0000, LINK_SPEED_CHANGE_EN);
+ break;
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
+ r8168_mac_ocp_write(tp, 0xcdd0, 0x9003);
+ r8168_mac_ocp_modify(tp, LTR_SNOOP, 0x0000, LTR_SNOOP_EN);
+ r8168_mac_ocp_modify(tp, ALDPS_LTR, 0x0000, ALDPS_LTR_EN);
+ r8168_mac_ocp_write(tp, 0xcdd2, 0x889c);
+ r8168_mac_ocp_write(tp, 0xcdd8, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdd4, 0x8c30);
+ r8168_mac_ocp_write(tp, 0xcdda, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdd6, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcddc, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcde8, 0x883e);
+ r8168_mac_ocp_write(tp, 0xcdea, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdec, 0x889c);
+ r8168_mac_ocp_write(tp, 0xcdee, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdf0, 0x8C09);
+ r8168_mac_ocp_write(tp, 0xcdf2, 0x9003);
+ r8168_mac_ocp_modify(tp, LTR_OBFF_LOCK, 0x0000, LINK_SPEED_CHANGE_EN);
+ break;
+ case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
+ case RTL_GIGA_MAC_VER_52:
+ r8168_mac_ocp_modify(tp, ALDPS_LTR, 0x0000, ALDPS_LTR_EN);
+ RTL_W8(tp, COMBO_LTR_EXTEND, RTL_R8(tp, COMBO_LTR_EXTEND) | COMBO_LTR_EXTEND_EN);
+ fallthrough;
+ case RTL_GIGA_MAC_VER_51:
+ r8168_mac_ocp_modify(tp, LTR_SNOOP, 0x0000, LTR_SNOOP_EN);
+ r8168_mac_ocp_write(tp, 0xe02c, 0x1880);
+ r8168_mac_ocp_write(tp, 0xe02e, 0x4880);
+ r8168_mac_ocp_write(tp, 0xcdd8, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdda, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcddc, 0x9003);
+ r8168_mac_ocp_write(tp, 0xcdd2, 0x883c);
+ r8168_mac_ocp_write(tp, 0xcdd4, 0x8c12);
+ r8168_mac_ocp_write(tp, 0xcdd6, 0x9003);
+ break;
+ default:
+ return;
+ }
+ /* chip can trigger LTR */
+ r8168_mac_ocp_modify(tp, LTR_OBFF_LOCK, 0x0003, LTR_OBFF_LOCK_EN);
+}
+
static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
{
u8 val8;
@@ -2943,6 +3144,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
break;
}
+ rtl_enable_ltr(tp);
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST:
@@ -4800,6 +5002,10 @@ static void rtl8169_down(struct rtl8169_private *tp)
phy_stop(tp->phydev);
+ /* Reset SerDes PHY to bring down fiber link */
+ if (tp->sfp_mode)
+ rtl_sfp_reset(tp);
+
rtl8169_update_counters(tp);
pci_clear_master(tp->pci_dev);
@@ -5389,11 +5595,12 @@ static bool rtl_aspm_is_safe(struct rtl8169_private *tp)
static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct rtl_chip_info *chip;
+ const char *ext_xid_str = "";
struct rtl8169_private *tp;
int jumbo_max, region, rc;
struct net_device *dev;
u32 txconfig;
- u16 xid;
+ u32 xid;
dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp));
if (!dev)
@@ -5441,10 +5648,16 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Identify chip attached to board */
chip = rtl8169_get_chip_version(xid, tp->supports_gmii);
+
+ if (chip->mac_version == RTL_GIGA_MAC_VER_EXTENDED) {
+ ext_xid_str = "ext";
+ xid = RTL_R32(tp, TX_CONFIG_V2);
+ chip = rtl8169_get_extended_chip_version(xid);
+ }
if (chip->mac_version == RTL_GIGA_MAC_NONE)
return dev_err_probe(&pdev->dev, -ENODEV,
- "unknown chip XID %03x, contact r8169 maintainers (see MAINTAINERS file)\n",
- xid);
+ "unknown chip %sXID %x, contact r8169 maintainers (see MAINTAINERS file)\n",
+ ext_xid_str, xid);
tp->mac_version = chip->mac_version;
tp->fw_name = chip->fw_name;
@@ -5459,13 +5672,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
tp->aspm_manageable = !rc;
- /* Fiber mode on RTL8127AF isn't supported */
if (rtl_is_8125(tp)) {
u16 data = r8168_mac_ocp_read(tp, 0xd006);
if ((data & 0xff) == 0x07)
- return dev_err_probe(&pdev->dev, -ENODEV,
- "Fiber mode not supported\n");
+ tp->sfp_mode = true;
}
tp->dash_type = rtl_get_dash_type(tp);
@@ -5585,8 +5796,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->leds = rtl8168_init_leds(dev);
}
- netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n",
- chip->name, dev->dev_addr, xid, tp->irq);
+ netdev_info(dev, "%s, %pM, %sXID %x, IRQ %d\n",
+ chip->name, dev->dev_addr, ext_xid_str, xid, tp->irq);
if (jumbo_max)
netdev_info(dev, "jumbo features [frames: %d bytes, tx checksumming: %s]\n",
diff --git a/drivers/net/ethernet/renesas/rcar_gen4_ptp.c b/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
index d0979abd36de..27a6f0492097 100644
--- a/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
+++ b/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/ptp_clock_kernel.h>
#include <linux/slab.h>
#include "rcar_gen4_ptp.h"
@@ -23,6 +24,15 @@
#define PTPGPTPTM10_REG 0x0054
#define PTPGPTPTM20_REG 0x0058
+struct rcar_gen4_ptp_private {
+ void __iomem *addr;
+ struct ptp_clock *clock;
+ struct ptp_clock_info info;
+ spinlock_t lock; /* For multiple registers access */
+ s64 default_addend;
+ bool initialized;
+};
+
#define ptp_to_priv(ptp) container_of(ptp, struct rcar_gen4_ptp_private, info)
static int rcar_gen4_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
@@ -168,7 +178,8 @@ int rcar_gen4_ptp_unregister(struct rcar_gen4_ptp_private *ptp_priv)
}
EXPORT_SYMBOL_GPL(rcar_gen4_ptp_unregister);
-struct rcar_gen4_ptp_private *rcar_gen4_ptp_alloc(struct platform_device *pdev)
+struct rcar_gen4_ptp_private *rcar_gen4_ptp_alloc(struct platform_device *pdev,
+ void __iomem *addr)
{
struct rcar_gen4_ptp_private *ptp;
@@ -178,10 +189,31 @@ struct rcar_gen4_ptp_private *rcar_gen4_ptp_alloc(struct platform_device *pdev)
ptp->info = rcar_gen4_ptp_info;
+ ptp->addr = addr;
+
return ptp;
}
EXPORT_SYMBOL_GPL(rcar_gen4_ptp_alloc);
+int rcar_gen4_ptp_clock_index(struct rcar_gen4_ptp_private *priv)
+{
+ if (!priv->initialized)
+ return -1;
+
+ return ptp_clock_index(priv->clock);
+}
+EXPORT_SYMBOL_GPL(rcar_gen4_ptp_clock_index);
+
+void rcar_gen4_ptp_gettime64(struct rcar_gen4_ptp_private *priv,
+ struct timespec64 *ts)
+{
+ if (!priv->initialized)
+ return;
+
+ priv->info.gettime64(&priv->info, ts);
+}
+EXPORT_SYMBOL_GPL(rcar_gen4_ptp_gettime64);
+
MODULE_AUTHOR("Yoshihiro Shimoda");
MODULE_DESCRIPTION("Renesas R-Car Gen4 gPTP driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/renesas/rcar_gen4_ptp.h b/drivers/net/ethernet/renesas/rcar_gen4_ptp.h
index 9a9c232c854e..6abaa7cc6b77 100644
--- a/drivers/net/ethernet/renesas/rcar_gen4_ptp.h
+++ b/drivers/net/ethernet/renesas/rcar_gen4_ptp.h
@@ -7,19 +7,15 @@
#ifndef __RCAR_GEN4_PTP_H__
#define __RCAR_GEN4_PTP_H__
-#include <linux/ptp_clock_kernel.h>
-
-struct rcar_gen4_ptp_private {
- void __iomem *addr;
- struct ptp_clock *clock;
- struct ptp_clock_info info;
- spinlock_t lock; /* For multiple registers access */
- s64 default_addend;
- bool initialized;
-};
+struct rcar_gen4_ptp_private;
int rcar_gen4_ptp_register(struct rcar_gen4_ptp_private *ptp_priv, u32 rate);
int rcar_gen4_ptp_unregister(struct rcar_gen4_ptp_private *ptp_priv);
-struct rcar_gen4_ptp_private *rcar_gen4_ptp_alloc(struct platform_device *pdev);
+struct rcar_gen4_ptp_private *rcar_gen4_ptp_alloc(struct platform_device *pdev,
+ void __iomem *addr);
+
+int rcar_gen4_ptp_clock_index(struct rcar_gen4_ptp_private *priv);
+void rcar_gen4_ptp_gettime64(struct rcar_gen4_ptp_private *priv,
+ struct timespec64 *ts);
#endif /* #ifndef __RCAR_GEN4_PTP_H__ */
diff --git a/drivers/net/ethernet/renesas/rswitch_l2.c b/drivers/net/ethernet/renesas/rswitch_l2.c
index 4a69ec77d69c..9433cd8adced 100644
--- a/drivers/net/ethernet/renesas/rswitch_l2.c
+++ b/drivers/net/ethernet/renesas/rswitch_l2.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Renesas Ethernet Switch device driver
*
- * Copyright (C) 2025 Renesas Electronics Corporation
+ * Copyright (C) 2025 - 2026 Renesas Electronics Corporation
*/
#include <linux/err.h>
@@ -60,6 +60,7 @@ static void rswitch_update_l2_hw_learning(struct rswitch_private *priv)
static void rswitch_update_l2_hw_forwarding(struct rswitch_private *priv)
{
struct rswitch_device *rdev;
+ bool new_forwarding_offload;
unsigned int fwd_mask;
/* calculate fwd_mask with zeroes in bits corresponding to ports that
@@ -73,8 +74,9 @@ static void rswitch_update_l2_hw_forwarding(struct rswitch_private *priv)
}
rswitch_for_all_ports(priv, rdev) {
- if ((rdev_for_l2_offload(rdev) && rdev->forwarding_requested) ||
- rdev->forwarding_offloaded) {
+ new_forwarding_offload = (rdev_for_l2_offload(rdev) && rdev->forwarding_requested);
+
+ if (new_forwarding_offload || rdev->forwarding_offloaded) {
/* Update allowed offload destinations even for ports
* with L2 offload enabled earlier.
*
@@ -84,13 +86,10 @@ static void rswitch_update_l2_hw_forwarding(struct rswitch_private *priv)
priv->addr + FWPC2(rdev->port));
}
- if (rdev_for_l2_offload(rdev) &&
- rdev->forwarding_requested &&
- !rdev->forwarding_offloaded) {
+ if (new_forwarding_offload && !rdev->forwarding_offloaded)
rswitch_change_l2_hw_offloading(rdev, true, false);
- } else if (rdev->forwarding_offloaded) {
+ else if (!new_forwarding_offload && rdev->forwarding_offloaded)
rswitch_change_l2_hw_offloading(rdev, false, false);
- }
}
}
diff --git a/drivers/net/ethernet/renesas/rswitch_main.c b/drivers/net/ethernet/renesas/rswitch_main.c
index e14b21148f27..433eb2b00d10 100644
--- a/drivers/net/ethernet/renesas/rswitch_main.c
+++ b/drivers/net/ethernet/renesas/rswitch_main.c
@@ -1891,7 +1891,7 @@ static int rswitch_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts
{
struct rswitch_device *rdev = netdev_priv(ndev);
- info->phc_index = ptp_clock_index(rdev->priv->ptp_priv->clock);
+ info->phc_index = rcar_gen4_ptp_clock_index(rdev->priv->ptp_priv);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
@@ -2150,17 +2150,16 @@ static int renesas_eth_sw_probe(struct platform_device *pdev)
if (attr)
priv->etha_no_runtime_change = true;
- priv->ptp_priv = rcar_gen4_ptp_alloc(pdev);
- if (!priv->ptp_priv)
- return -ENOMEM;
-
platform_set_drvdata(pdev, priv);
priv->pdev = pdev;
priv->addr = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->addr))
return PTR_ERR(priv->addr);
- priv->ptp_priv->addr = priv->addr + RSWITCH_GPTP_OFFSET_S4;
+ priv->ptp_priv =
+ rcar_gen4_ptp_alloc(pdev, priv->addr + RSWITCH_GPTP_OFFSET_S4);
+ if (!priv->ptp_priv)
+ return -ENOMEM;
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
if (ret < 0) {
diff --git a/drivers/net/ethernet/renesas/rtsn.c b/drivers/net/ethernet/renesas/rtsn.c
index fdb1e7b7fb06..85052b47afb9 100644
--- a/drivers/net/ethernet/renesas/rtsn.c
+++ b/drivers/net/ethernet/renesas/rtsn.c
@@ -104,13 +104,6 @@ static void rtsn_ctrl_data_irq(struct rtsn_private *priv, bool enable)
}
}
-static void rtsn_get_timestamp(struct rtsn_private *priv, struct timespec64 *ts)
-{
- struct rcar_gen4_ptp_private *ptp_priv = priv->ptp_priv;
-
- ptp_priv->info.gettime64(&ptp_priv->info, ts);
-}
-
static int rtsn_tx_free(struct net_device *ndev, bool free_txed_only)
{
struct rtsn_private *priv = netdev_priv(ndev);
@@ -133,7 +126,7 @@ static int rtsn_tx_free(struct net_device *ndev, bool free_txed_only)
struct skb_shared_hwtstamps shhwtstamps;
struct timespec64 ts;
- rtsn_get_timestamp(priv, &ts);
+ rcar_gen4_ptp_gettime64(priv->ptp_priv, &ts);
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
skb_tstamp_tx(skb, &shhwtstamps);
@@ -1197,7 +1190,7 @@ static int rtsn_get_ts_info(struct net_device *ndev,
{
struct rtsn_private *priv = netdev_priv(ndev);
- info->phc_index = ptp_clock_index(priv->ptp_priv->clock);
+ info->phc_index = rcar_gen4_ptp_clock_index(priv->ptp_priv);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
@@ -1227,6 +1220,7 @@ static int rtsn_probe(struct platform_device *pdev)
{
struct rtsn_private *priv;
struct net_device *ndev;
+ void __iomem *ptpaddr;
struct resource *res;
int ret;
@@ -1239,12 +1233,6 @@ static int rtsn_probe(struct platform_device *pdev)
priv->pdev = pdev;
priv->ndev = ndev;
- priv->ptp_priv = rcar_gen4_ptp_alloc(pdev);
- if (!priv->ptp_priv) {
- ret = -ENOMEM;
- goto error_free;
- }
-
spin_lock_init(&priv->lock);
platform_set_drvdata(pdev, priv);
@@ -1288,9 +1276,15 @@ static int rtsn_probe(struct platform_device *pdev)
goto error_free;
}
- priv->ptp_priv->addr = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(priv->ptp_priv->addr)) {
- ret = PTR_ERR(priv->ptp_priv->addr);
+ ptpaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ptpaddr)) {
+ ret = PTR_ERR(ptpaddr);
+ goto error_free;
+ }
+
+ priv->ptp_priv = rcar_gen4_ptp_alloc(pdev, ptpaddr);
+ if (!priv->ptp_priv) {
+ ret = -ENOMEM;
goto error_free;
}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c
index 298a7402e39c..66e6de64626c 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c
@@ -25,7 +25,7 @@ static void sxgbe_mtl_init(void __iomem *ioaddr, unsigned int etsalg,
reg_val = readl(ioaddr + SXGBE_MTL_OP_MODE_REG);
reg_val &= ETS_RST;
- /* ETS Algorith */
+ /* ETS Algorithm */
switch (etsalg & SXGBE_MTL_OPMODE_ESTMASK) {
case ETS_WRR:
reg_val &= ETS_WRR;
diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.c b/drivers/net/ethernet/sfc/ef100_ethtool.c
index 6c3b74000d3b..05dc7b10c885 100644
--- a/drivers/net/ethernet/sfc/ef100_ethtool.c
+++ b/drivers/net/ethernet/sfc/ef100_ethtool.c
@@ -54,6 +54,7 @@ const struct ethtool_ops ef100_ethtool_ops = {
.get_ethtool_stats = efx_ethtool_get_stats,
.get_rxnfc = efx_ethtool_get_rxnfc,
.set_rxnfc = efx_ethtool_set_rxnfc,
+ .get_rx_ring_count = efx_ethtool_get_rx_ring_count,
.reset = efx_ethtool_reset,
.get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 18fe5850a978..362388754a29 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -261,6 +261,7 @@ const struct ethtool_ops efx_ethtool_ops = {
.reset = efx_ethtool_reset,
.get_rxnfc = efx_ethtool_get_rxnfc,
.set_rxnfc = efx_ethtool_set_rxnfc,
+ .get_rx_ring_count = efx_ethtool_get_rx_ring_count,
.get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
.get_rxfh_key_size = efx_ethtool_get_rxfh_key_size,
.rxfh_per_ctx_fields = true,
diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c
index fa303e171d98..2fc42b1a2bfb 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/ethtool_common.c
@@ -850,6 +850,13 @@ out_setdata_unlock:
return rc;
}
+u32 efx_ethtool_get_rx_ring_count(struct net_device *net_dev)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+
+ return efx->n_rx_channels;
+}
+
int efx_ethtool_get_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info, u32 *rule_locs)
{
@@ -858,10 +865,6 @@ int efx_ethtool_get_rxnfc(struct net_device *net_dev,
s32 rc = 0;
switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = efx->n_rx_channels;
- return 0;
-
case ETHTOOL_GRXCLSRLCNT:
info->data = efx_filter_get_rx_id_limit(efx);
if (info->data == 0)
diff --git a/drivers/net/ethernet/sfc/ethtool_common.h b/drivers/net/ethernet/sfc/ethtool_common.h
index 24db4fccbe78..f96db4253454 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.h
+++ b/drivers/net/ethernet/sfc/ethtool_common.h
@@ -40,6 +40,7 @@ int efx_ethtool_set_fecparam(struct net_device *net_dev,
struct ethtool_fecparam *fecparam);
int efx_ethtool_get_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info, u32 *rule_locs);
+u32 efx_ethtool_get_rx_ring_count(struct net_device *net_dev);
int efx_ethtool_set_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info);
u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev);
diff --git a/drivers/net/ethernet/sfc/falcon/ethtool.c b/drivers/net/ethernet/sfc/falcon/ethtool.c
index 27d1cd6f24ca..049364031545 100644
--- a/drivers/net/ethernet/sfc/falcon/ethtool.c
+++ b/drivers/net/ethernet/sfc/falcon/ethtool.c
@@ -974,6 +974,13 @@ ef4_ethtool_get_rxfh_fields(struct net_device *net_dev,
return 0;
}
+static u32 ef4_ethtool_get_rx_ring_count(struct net_device *net_dev)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+
+ return efx->n_rx_channels;
+}
+
static int
ef4_ethtool_get_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info, u32 *rule_locs)
@@ -981,10 +988,6 @@ ef4_ethtool_get_rxnfc(struct net_device *net_dev,
struct ef4_nic *efx = netdev_priv(net_dev);
switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = efx->n_rx_channels;
- return 0;
-
case ETHTOOL_GRXCLSRLCNT:
info->data = ef4_filter_get_rx_id_limit(efx);
if (info->data == 0)
@@ -1348,6 +1351,7 @@ const struct ethtool_ops ef4_ethtool_ops = {
.reset = ef4_ethtool_reset,
.get_rxnfc = ef4_ethtool_get_rxnfc,
.set_rxnfc = ef4_ethtool_set_rxnfc,
+ .get_rx_ring_count = ef4_ethtool_get_rx_ring_count,
.get_rxfh_indir_size = ef4_ethtool_get_rxfh_indir_size,
.get_rxfh = ef4_ethtool_get_rxfh,
.set_rxfh = ef4_ethtool_set_rxfh,
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 9fa5c4c713ab..ec3b2df43b68 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -156,9 +156,9 @@ enum {
* @tx_dpcpu_fw_id: Firmware ID of the TxDPCPU
* @must_probe_vswitching: Flag: vswitching has yet to be setup after MC reboot
* @pf_index: The number for this PF, or the parent PF if this is a VF
-#ifdef CONFIG_SFC_SRIOV
- * @vf: Pointer to VF data structure
-#endif
+ * @port_id: Ethernet address of owning PF, used for phys_port_id
+ * @vf_index: The number for this VF, or 0xFFFF if this is a VF
+ * @vf: for a PF, array of VF data structures indexed by VF's @vf_index
* @vport_mac: The MAC address on the vport, only for PFs; VFs will be zero
* @vlan_list: List of VLANs added over the interface. Serialised by vlan_lock.
* @vlan_lock: Lock to serialize access to vlan_list.
@@ -166,6 +166,7 @@ enum {
* @udp_tunnels_dirty: flag indicating a reboot occurred while pushing
* @udp_tunnels to hardware and thus the push must be re-done.
* @udp_tunnels_lock: Serialises writes to @udp_tunnels and @udp_tunnels_dirty.
+ * @licensed_features: Flags for licensed firmware features.
*/
struct efx_ef10_nic_data {
struct efx_buffer mcdi_buf;
diff --git a/drivers/net/ethernet/sfc/siena/ethtool.c b/drivers/net/ethernet/sfc/siena/ethtool.c
index 8c3ebd0617fb..36feedffe444 100644
--- a/drivers/net/ethernet/sfc/siena/ethtool.c
+++ b/drivers/net/ethernet/sfc/siena/ethtool.c
@@ -261,6 +261,7 @@ const struct ethtool_ops efx_siena_ethtool_ops = {
.reset = efx_siena_ethtool_reset,
.get_rxnfc = efx_siena_ethtool_get_rxnfc,
.set_rxnfc = efx_siena_ethtool_set_rxnfc,
+ .get_rx_ring_count = efx_siena_ethtool_get_rx_ring_count,
.get_rxfh_indir_size = efx_siena_ethtool_get_rxfh_indir_size,
.get_rxfh_key_size = efx_siena_ethtool_get_rxfh_key_size,
.get_rxfh = efx_siena_ethtool_get_rxfh,
diff --git a/drivers/net/ethernet/sfc/siena/ethtool_common.c b/drivers/net/ethernet/sfc/siena/ethtool_common.c
index 47cd16a113cf..c56e0b54d854 100644
--- a/drivers/net/ethernet/sfc/siena/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/siena/ethtool_common.c
@@ -841,6 +841,13 @@ out_setdata:
return 0;
}
+u32 efx_siena_ethtool_get_rx_ring_count(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ return efx->n_rx_channels;
+}
+
int efx_siena_ethtool_get_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info, u32 *rule_locs)
{
@@ -849,10 +856,6 @@ int efx_siena_ethtool_get_rxnfc(struct net_device *net_dev,
s32 rc = 0;
switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = efx->n_rx_channels;
- return 0;
-
case ETHTOOL_GRXCLSRLCNT:
info->data = efx_filter_get_rx_id_limit(efx);
if (info->data == 0)
diff --git a/drivers/net/ethernet/sfc/siena/ethtool_common.h b/drivers/net/ethernet/sfc/siena/ethtool_common.h
index 278d69e920d9..7b445b0ba38a 100644
--- a/drivers/net/ethernet/sfc/siena/ethtool_common.h
+++ b/drivers/net/ethernet/sfc/siena/ethtool_common.h
@@ -37,6 +37,7 @@ int efx_siena_ethtool_set_fecparam(struct net_device *net_dev,
struct ethtool_fecparam *fecparam);
int efx_siena_ethtool_get_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info, u32 *rule_locs);
+u32 efx_siena_ethtool_get_rx_ring_count(struct net_device *net_dev);
int efx_siena_ethtool_set_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info);
u32 efx_siena_ethtool_get_rxfh_indir_size(struct net_device *net_dev);
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index b461918dc5f4..d85ac8cbeb00 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -79,10 +79,6 @@
#include "sis900.h"
#define SIS900_MODULE_NAME "sis900"
-#define SIS900_DRV_VERSION "v1.08.10 Apr. 2 2006"
-
-static const char version[] =
- KERN_INFO "sis900.c: " SIS900_DRV_VERSION "\n";
static int max_interrupt_work = 40;
static int multicast_filter_limit = 128;
@@ -442,13 +438,6 @@ static int sis900_probe(struct pci_dev *pci_dev,
const char *card_name = card_names[pci_id->driver_data];
const char *dev_name = pci_name(pci_dev);
-/* when built into the kernel, we only print version if device is found */
-#ifndef MODULE
- static int printed_version;
- if (!printed_version++)
- printk(version);
-#endif
-
/* setup various bits in PCI command register */
ret = pcim_enable_device(pci_dev);
if(ret) return ret;
@@ -2029,7 +2018,6 @@ static void sis900_get_drvinfo(struct net_device *net_dev,
struct sis900_private *sis_priv = netdev_priv(net_dev);
strscpy(info->driver, SIS900_MODULE_NAME, sizeof(info->driver));
- strscpy(info->version, SIS900_DRV_VERSION, sizeof(info->version));
strscpy(info->bus_info, pci_name(sis_priv->pci_dev),
sizeof(info->bus_info));
}
@@ -2567,21 +2555,4 @@ static struct pci_driver sis900_pci_driver = {
.driver.pm = &sis900_pm_ops,
};
-static int __init sis900_init_module(void)
-{
-/* when a module, this is printed whether or not devices are found in probe */
-#ifdef MODULE
- printk(version);
-#endif
-
- return pci_register_driver(&sis900_pci_driver);
-}
-
-static void __exit sis900_cleanup_module(void)
-{
- pci_unregister_driver(&sis900_pci_driver);
-}
-
-module_init(sis900_init_module);
-module_exit(sis900_cleanup_module);
-
+module_pci_driver(sis900_pci_driver);
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 45f703fe0e5a..389659db06a8 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -26,8 +26,6 @@
*/
#define DRV_NAME "epic100"
-#define DRV_VERSION "2.1"
-#define DRV_RELDATE "Sept 11, 2006"
/* The user-configurable values.
These may be modified when a driver module is loaded.*/
@@ -89,12 +87,6 @@ static int rx_copybreak;
#include <linux/uaccess.h>
#include <asm/byteorder.h>
-/* These identify the driver base version and may not be removed. */
-static char version[] =
-DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>";
-static char version2[] =
-" (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")";
-
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
MODULE_LICENSE("GPL");
@@ -329,11 +321,6 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
void *ring_space;
dma_addr_t ring_dma;
-/* when built into the kernel, we only print version if device is found */
-#ifndef MODULE
- pr_info_once("%s%s\n", version, version2);
-#endif
-
card_idx++;
ret = pci_enable_device(pdev);
@@ -1393,7 +1380,6 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *
struct epic_private *np = netdev_priv(dev);
strscpy(info->driver, DRV_NAME, sizeof(info->driver));
- strscpy(info->version, DRV_VERSION, sizeof(info->version));
strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
@@ -1564,23 +1550,4 @@ static struct pci_driver epic_driver = {
.driver.pm = &epic_pm_ops,
};
-
-static int __init epic_init (void)
-{
-/* when a module, this is printed whether or not devices are found in probe */
-#ifdef MODULE
- pr_info("%s%s\n", version, version2);
-#endif
-
- return pci_register_driver(&epic_driver);
-}
-
-
-static void __exit epic_cleanup (void)
-{
- pci_unregister_driver (&epic_driver);
-}
-
-
-module_init(epic_init);
-module_exit(epic_cleanup);
+module_pci_driver(epic_driver);
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index 66b3549636f8..4700998c4837 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -586,10 +586,8 @@ static int ave_rxdesc_prepare(struct net_device *ndev, int entry)
skb = priv->rx.desc[entry].skbs;
if (!skb) {
skb = netdev_alloc_skb(ndev, AVE_MAX_ETHFRAME);
- if (!skb) {
- netdev_err(ndev, "can't allocate skb for Rx\n");
+ if (!skb)
return -ENOMEM;
- }
skb->data += AVE_FRAME_HEADROOM;
skb->tail += AVE_FRAME_HEADROOM;
}
diff --git a/drivers/net/ethernet/spacemit/k1_emac.c b/drivers/net/ethernet/spacemit/k1_emac.c
index b49c4708bf9e..dab0772c5b9d 100644
--- a/drivers/net/ethernet/spacemit/k1_emac.c
+++ b/drivers/net/ethernet/spacemit/k1_emac.c
@@ -47,8 +47,6 @@
#define EMAC_RX_FRAMES 64
#define EMAC_RX_COAL_TIMEOUT (600 * 312)
-#define DEFAULT_FC_PAUSE_TIME 0xffff
-#define DEFAULT_FC_FIFO_HIGH 1600
#define DEFAULT_TX_ALMOST_FULL 0x1f8
#define DEFAULT_TX_THRESHOLD 1518
#define DEFAULT_RX_THRESHOLD 12
@@ -133,9 +131,6 @@ struct emac_priv {
u32 tx_delay;
u32 rx_delay;
- bool flow_control_autoneg;
- u8 flow_control;
-
/* Softirq-safe, hold while touching hardware statistics */
spinlock_t stats_lock;
};
@@ -180,9 +175,7 @@ static void emac_set_mac_addr_reg(struct emac_priv *priv,
static void emac_set_mac_addr(struct emac_priv *priv, const unsigned char *addr)
{
- /* We use only one address, so set the same for flow control as well */
emac_set_mac_addr_reg(priv, addr, MAC_ADDRESS1_HIGH);
- emac_set_mac_addr_reg(priv, addr, MAC_FC_SOURCE_ADDRESS_HIGH);
}
static void emac_reset_hw(struct emac_priv *priv)
@@ -201,8 +194,6 @@ static void emac_reset_hw(struct emac_priv *priv)
static void emac_init_hw(struct emac_priv *priv)
{
- /* Destination address for 802.3x Ethernet flow control */
- u8 fc_dest_addr[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x01 };
u32 rxirq = 0, dma = 0, frame_sz;
regmap_set_bits(priv->regmap_apmu,
@@ -237,12 +228,6 @@ static void emac_init_hw(struct emac_priv *priv)
emac_wr(priv, MAC_TRANSMIT_JABBER_SIZE, frame_sz);
emac_wr(priv, MAC_RECEIVE_JABBER_SIZE, frame_sz);
- /* Configure flow control (enabled in emac_adjust_link() later) */
- emac_set_mac_addr_reg(priv, fc_dest_addr, MAC_FC_SOURCE_ADDRESS_HIGH);
- emac_wr(priv, MAC_FC_PAUSE_HIGH_THRESHOLD, DEFAULT_FC_FIFO_HIGH);
- emac_wr(priv, MAC_FC_HIGH_PAUSE_TIME, DEFAULT_FC_PAUSE_TIME);
- emac_wr(priv, MAC_FC_PAUSE_LOW_THRESHOLD, 0);
-
/* RX IRQ mitigation */
rxirq = FIELD_PREP(MREGBIT_RECEIVE_IRQ_FRAME_COUNTER_MASK,
EMAC_RX_FRAMES);
@@ -1027,57 +1012,6 @@ static int emac_mdio_init(struct emac_priv *priv)
return ret;
}
-static void emac_set_tx_fc(struct emac_priv *priv, bool enable)
-{
- u32 val;
-
- val = emac_rd(priv, MAC_FC_CONTROL);
-
- FIELD_MODIFY(MREGBIT_FC_GENERATION_ENABLE, &val, enable);
- FIELD_MODIFY(MREGBIT_AUTO_FC_GENERATION_ENABLE, &val, enable);
-
- emac_wr(priv, MAC_FC_CONTROL, val);
-}
-
-static void emac_set_rx_fc(struct emac_priv *priv, bool enable)
-{
- u32 val = emac_rd(priv, MAC_FC_CONTROL);
-
- FIELD_MODIFY(MREGBIT_FC_DECODE_ENABLE, &val, enable);
-
- emac_wr(priv, MAC_FC_CONTROL, val);
-}
-
-static void emac_set_fc(struct emac_priv *priv, u8 fc)
-{
- emac_set_tx_fc(priv, fc & FLOW_CTRL_TX);
- emac_set_rx_fc(priv, fc & FLOW_CTRL_RX);
- priv->flow_control = fc;
-}
-
-static void emac_set_fc_autoneg(struct emac_priv *priv)
-{
- struct phy_device *phydev = priv->ndev->phydev;
- u32 local_adv, remote_adv;
- u8 fc;
-
- local_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
-
- remote_adv = 0;
-
- if (phydev->pause)
- remote_adv |= LPA_PAUSE_CAP;
-
- if (phydev->asym_pause)
- remote_adv |= LPA_PAUSE_ASYM;
-
- fc = mii_resolve_flowctrl_fdx(local_adv, remote_adv);
-
- priv->flow_control_autoneg = true;
-
- emac_set_fc(priv, fc);
-}
-
/*
* Even though this MAC supports gigabit operation, it only provides 32-bit
* statistics counters. The most overflow-prone counters are the "bytes" ones,
@@ -1448,42 +1382,6 @@ static void emac_ethtool_get_regs(struct net_device *dev,
emac_rd(priv, MAC_GLOBAL_CONTROL + i * 4);
}
-static void emac_get_pauseparam(struct net_device *dev,
- struct ethtool_pauseparam *pause)
-{
- struct emac_priv *priv = netdev_priv(dev);
-
- pause->autoneg = priv->flow_control_autoneg;
- pause->tx_pause = !!(priv->flow_control & FLOW_CTRL_TX);
- pause->rx_pause = !!(priv->flow_control & FLOW_CTRL_RX);
-}
-
-static int emac_set_pauseparam(struct net_device *dev,
- struct ethtool_pauseparam *pause)
-{
- struct emac_priv *priv = netdev_priv(dev);
- u8 fc = 0;
-
- if (!netif_running(dev))
- return -ENETDOWN;
-
- priv->flow_control_autoneg = pause->autoneg;
-
- if (pause->autoneg) {
- emac_set_fc_autoneg(priv);
- } else {
- if (pause->tx_pause)
- fc |= FLOW_CTRL_TX;
-
- if (pause->rx_pause)
- fc |= FLOW_CTRL_RX;
-
- emac_set_fc(priv, fc);
- }
-
- return 0;
-}
-
static void emac_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
@@ -1658,8 +1556,6 @@ static void emac_adjust_link(struct net_device *dev)
emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
- emac_set_fc_autoneg(priv);
-
/*
* Reschedule stats updates now that link is up. See comments in
* emac_stats_update().
@@ -1744,12 +1640,12 @@ static int emac_phy_connect(struct net_device *ndev)
goto err_node_put;
}
- phy_support_asym_pause(phydev);
-
phydev->mac_managed_pm = true;
emac_update_delay_line(priv);
+ phy_attached_info(phydev);
+
err_node_put:
of_node_put(np);
return ret;
@@ -1915,9 +1811,6 @@ static const struct ethtool_ops emac_ethtool_ops = {
.get_sset_count = emac_get_sset_count,
.get_strings = emac_get_strings,
.get_ethtool_stats = emac_get_ethtool_stats,
-
- .get_pauseparam = emac_get_pauseparam,
- .set_pauseparam = emac_set_pauseparam,
};
static const struct net_device_ops emac_netdev_ops = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 907fe2e927f0..07088d03dbab 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -374,6 +374,15 @@ config DWMAC_LOONGSON
This selects the LOONGSON PCI bus support for the stmmac driver,
Support for ethernet controller on Loongson-2K1000 SoC and LS7A1000 bridge.
+config DWMAC_MOTORCOMM
+ tristate "Motorcomm PCI DWMAC support"
+ depends on PCI
+ select MOTORCOMM_PHY
+ select STMMAC_LIBPCI
+ help
+ This enables glue driver for Motorcomm DWMAC-based PCI Ethernet
+ controllers. Currently only YT6801 is supported.
+
config STMMAC_PCI
tristate "STMMAC PCI bus support"
depends on PCI
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 7bf528731034..c9263987ef8d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -48,4 +48,5 @@ obj-$(CONFIG_STMMAC_LIBPCI) += stmmac_libpci.o
obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o
obj-$(CONFIG_DWMAC_INTEL) += dwmac-intel.o
obj-$(CONFIG_DWMAC_LOONGSON) += dwmac-loongson.o
+obj-$(CONFIG_DWMAC_MOTORCOMM) += dwmac-motorcomm.o
stmmac-pci-objs:= stmmac_pci.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 49df46be3669..d26e8a063022 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -323,6 +323,10 @@ struct stmmac_safety_stats {
#define PHY_INTF_SEL_SMII 6
#define PHY_INTF_SEL_REVMII 7
+/* XGMAC uses a different encoding - from the AgileX5 documentation */
+#define PHY_INTF_GMII 0
+#define PHY_INTF_RGMII 1
+
/* MSI defines */
#define STMMAC_MSI_VEC_MAX 32
@@ -390,7 +394,6 @@ enum request_irq_err {
REQ_IRQ_ERR_SFTY,
REQ_IRQ_ERR_SFTY_UE,
REQ_IRQ_ERR_SFTY_CE,
- REQ_IRQ_ERR_LPI,
REQ_IRQ_ERR_WOL,
REQ_IRQ_ERR_MAC,
REQ_IRQ_ERR_NO,
@@ -512,6 +515,8 @@ struct dma_features {
unsigned int dbgmem;
/* Number of Policing Counters */
unsigned int pcsel;
+ /* Active PHY interface, PHY_INTF_SEL_xxx */
+ u8 actphyif;
};
/* RX Buffer size must be multiple of 4/8/16 bytes */
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h
index 49d6a866244f..e62e2ebcf273 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
@@ -32,13 +32,11 @@
#define RDES0_DESCRIPTOR_ERROR BIT(14)
#define RDES0_ERROR_SUMMARY BIT(15)
#define RDES0_FRAME_LEN_MASK GENMASK(29, 16)
-#define RDES0_FRAME_LEN_SHIFT 16
#define RDES0_DA_FILTER_FAIL BIT(30)
#define RDES0_OWN BIT(31)
/* RDES1 */
#define RDES1_BUFFER1_SIZE_MASK GENMASK(10, 0)
#define RDES1_BUFFER2_SIZE_MASK GENMASK(21, 11)
-#define RDES1_BUFFER2_SIZE_SHIFT 11
#define RDES1_SECOND_ADDRESS_CHAINED BIT(24)
#define RDES1_END_RING BIT(25)
#define RDES1_DISABLE_IC BIT(31)
@@ -53,7 +51,6 @@
#define ERDES1_SECOND_ADDRESS_CHAINED BIT(14)
#define ERDES1_END_RING BIT(15)
#define ERDES1_BUFFER2_SIZE_MASK GENMASK(28, 16)
-#define ERDES1_BUFFER2_SIZE_SHIFT 16
#define ERDES1_DISABLE_IC BIT(31)
/* Normal transmit descriptor defines */
@@ -77,14 +74,12 @@
/* TDES1 */
#define TDES1_BUFFER1_SIZE_MASK GENMASK(10, 0)
#define TDES1_BUFFER2_SIZE_MASK GENMASK(21, 11)
-#define TDES1_BUFFER2_SIZE_SHIFT 11
#define TDES1_TIME_STAMP_ENABLE BIT(22)
#define TDES1_DISABLE_PADDING BIT(23)
#define TDES1_SECOND_ADDRESS_CHAINED BIT(24)
#define TDES1_END_RING BIT(25)
#define TDES1_CRC_DISABLE BIT(26)
#define TDES1_CHECKSUM_INSERTION_MASK GENMASK(28, 27)
-#define TDES1_CHECKSUM_INSERTION_SHIFT 27
#define TDES1_FIRST_SEGMENT BIT(29)
#define TDES1_LAST_SEGMENT BIT(30)
#define TDES1_INTERRUPT BIT(31)
@@ -109,7 +104,6 @@
#define ETDES0_SECOND_ADDRESS_CHAINED BIT(20)
#define ETDES0_END_RING BIT(21)
#define ETDES0_CHECKSUM_INSERTION_MASK GENMASK(23, 22)
-#define ETDES0_CHECKSUM_INSERTION_SHIFT 22
#define ETDES0_TIME_STAMP_ENABLE BIT(25)
#define ETDES0_DISABLE_PADDING BIT(26)
#define ETDES0_CRC_DISABLE BIT(27)
@@ -120,7 +114,6 @@
/* TDES1 */
#define ETDES1_BUFFER1_SIZE_MASK GENMASK(12, 0)
#define ETDES1_BUFFER2_SIZE_MASK GENMASK(28, 16)
-#define ETDES1_BUFFER2_SIZE_SHIFT 16
/* Extended Receive descriptor definitions */
#define ERDES4_IP_PAYLOAD_TYPE_MASK GENMASK(6, 2)
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
index 40f7f2da9c5e..9d1a94a4fa49 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
@@ -23,9 +23,8 @@ static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end,
int bfsize)
{
if (bfsize == BUF_SIZE_16KiB)
- p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
- << ERDES1_BUFFER2_SIZE_SHIFT)
- & ERDES1_BUFFER2_SIZE_MASK);
+ p->des1 |= cpu_to_le32(FIELD_PREP(ERDES1_BUFFER2_SIZE_MASK,
+ BUF_SIZE_8KiB));
if (end)
p->des1 |= cpu_to_le32(ERDES1_END_RING);
@@ -39,15 +38,20 @@ static inline void enh_desc_end_tx_desc_on_ring(struct dma_desc *p, int end)
p->des0 &= cpu_to_le32(~ETDES0_END_RING);
}
+/* The maximum buffer 1 size is 8KiB - 1. However, we limit to 4KiB. */
static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
{
- if (unlikely(len > BUF_SIZE_4KiB)) {
- p->des1 |= cpu_to_le32((((len - BUF_SIZE_4KiB)
- << ETDES1_BUFFER2_SIZE_SHIFT)
- & ETDES1_BUFFER2_SIZE_MASK) | (BUF_SIZE_4KiB
- & ETDES1_BUFFER1_SIZE_MASK));
- } else
- p->des1 |= cpu_to_le32((len & ETDES1_BUFFER1_SIZE_MASK));
+ unsigned int buffer1_max_length = BUF_SIZE_4KiB;
+
+ if (unlikely(len > buffer1_max_length)) {
+ p->des1 |= cpu_to_le32(FIELD_PREP(ETDES1_BUFFER2_SIZE_MASK,
+ len - buffer1_max_length) |
+ FIELD_PREP(ETDES1_BUFFER1_SIZE_MASK,
+ buffer1_max_length));
+ } else {
+ p->des1 |= cpu_to_le32(FIELD_PREP(ETDES1_BUFFER1_SIZE_MASK,
+ len));
+ }
}
/* Normal descriptors */
@@ -57,8 +61,8 @@ static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end, int bfsize)
int bfsize2;
bfsize2 = min(bfsize - BUF_SIZE_2KiB + 1, BUF_SIZE_2KiB - 1);
- p->des1 |= cpu_to_le32((bfsize2 << RDES1_BUFFER2_SIZE_SHIFT)
- & RDES1_BUFFER2_SIZE_MASK);
+ p->des1 |= cpu_to_le32(FIELD_PREP(RDES1_BUFFER2_SIZE_MASK,
+ bfsize2));
}
if (end)
@@ -73,16 +77,20 @@ static inline void ndesc_end_tx_desc_on_ring(struct dma_desc *p, int end)
p->des1 &= cpu_to_le32(~TDES1_END_RING);
}
+/* The maximum buffer 1 size is 2KiB - 1, limited by the mask width */
static inline void norm_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
{
- if (unlikely(len > BUF_SIZE_2KiB)) {
- unsigned int buffer1 = (BUF_SIZE_2KiB - 1)
- & TDES1_BUFFER1_SIZE_MASK;
- p->des1 |= cpu_to_le32((((len - buffer1)
- << TDES1_BUFFER2_SIZE_SHIFT)
- & TDES1_BUFFER2_SIZE_MASK) | buffer1);
- } else
- p->des1 |= cpu_to_le32((len & TDES1_BUFFER1_SIZE_MASK));
+ unsigned int buffer1_max_length = BUF_SIZE_2KiB - 1;
+
+ if (unlikely(len > buffer1_max_length)) {
+ p->des1 |= cpu_to_le32(FIELD_PREP(TDES1_BUFFER2_SIZE_MASK,
+ len - buffer1_max_length) |
+ FIELD_PREP(TDES1_BUFFER1_SIZE_MASK,
+ buffer1_max_length));
+ } else {
+ p->des1 |= cpu_to_le32(FIELD_PREP(TDES1_BUFFER1_SIZE_MASK,
+ len));
+ }
}
/* Specific functions used for Chain mode */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index db288fbd5a4d..c4e85197629d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -28,11 +28,11 @@
#define GPR_ENET_QOS_CLK_TX_CLK_SEL (0x1 << 20)
#define GPR_ENET_QOS_RGMII_EN (0x1 << 21)
-#define MX93_GPR_ENET_QOS_INTF_MODE_MASK GENMASK(3, 0)
#define MX93_GPR_ENET_QOS_INTF_SEL_MASK GENMASK(3, 1)
-#define MX93_GPR_ENET_QOS_CLK_GEN_EN (0x1 << 0)
-#define MX93_GPR_ENET_QOS_CLK_SEL_MASK BIT_MASK(0)
-#define MX93_GPR_CLK_SEL_OFFSET (4)
+#define MX93_GPR_ENET_QOS_ENABLE BIT(0)
+
+#define MX93_ENET_CLK_SEL_OFFSET (4)
+#define MX93_ENET_QOS_CLK_TX_SEL_MASK BIT_MASK(0)
#define DMA_BUS_MODE 0x00001000
#define DMA_BUS_MODE_SFT_RESET (0x1 << 0)
@@ -46,7 +46,7 @@ struct imx_dwmac_ops {
u32 flags;
bool mac_rgmii_txclk_auto_adj;
- int (*fix_soc_reset)(struct stmmac_priv *priv, void __iomem *ioaddr);
+ int (*fix_soc_reset)(struct stmmac_priv *priv);
int (*set_intf_mode)(struct imx_priv_data *dwmac, u8 phy_intf_sel);
void (*fix_mac_speed)(void *priv, int speed, unsigned int mode);
};
@@ -95,17 +95,18 @@ static int imx93_set_intf_mode(struct imx_priv_data *dwmac, u8 phy_intf_sel)
if (phy_intf_sel == PHY_INTF_SEL_RMII && dwmac->rmii_refclk_ext) {
ret = regmap_clear_bits(dwmac->intf_regmap,
dwmac->intf_reg_off +
- MX93_GPR_CLK_SEL_OFFSET,
- MX93_GPR_ENET_QOS_CLK_SEL_MASK);
+ MX93_ENET_CLK_SEL_OFFSET,
+ MX93_ENET_QOS_CLK_TX_SEL_MASK);
if (ret)
return ret;
}
val = FIELD_PREP(MX93_GPR_ENET_QOS_INTF_SEL_MASK, phy_intf_sel) |
- MX93_GPR_ENET_QOS_CLK_GEN_EN;
+ MX93_GPR_ENET_QOS_ENABLE;
return regmap_update_bits(dwmac->intf_regmap, dwmac->intf_reg_off,
- MX93_GPR_ENET_QOS_INTF_MODE_MASK, val);
+ MX93_GPR_ENET_QOS_INTF_SEL_MASK |
+ MX93_GPR_ENET_QOS_ENABLE, val);
};
static int imx_dwmac_clks_config(void *priv, bool enabled)
@@ -205,7 +206,8 @@ static void imx93_dwmac_fix_speed(void *priv, int speed, unsigned int mode)
old_ctrl = readl(dwmac->base_addr + MAC_CTRL_REG);
ctrl = old_ctrl & ~CTRL_SPEED_MASK;
regmap_update_bits(dwmac->intf_regmap, dwmac->intf_reg_off,
- MX93_GPR_ENET_QOS_INTF_MODE_MASK, 0);
+ MX93_GPR_ENET_QOS_INTF_SEL_MASK |
+ MX93_GPR_ENET_QOS_ENABLE, 0);
writel(ctrl, dwmac->base_addr + MAC_CTRL_REG);
/* Ensure the settings for CTRL are applied. */
@@ -213,19 +215,22 @@ static void imx93_dwmac_fix_speed(void *priv, int speed, unsigned int mode)
usleep_range(10, 20);
iface &= MX93_GPR_ENET_QOS_INTF_SEL_MASK;
- iface |= MX93_GPR_ENET_QOS_CLK_GEN_EN;
+ iface |= MX93_GPR_ENET_QOS_ENABLE;
regmap_update_bits(dwmac->intf_regmap, dwmac->intf_reg_off,
- MX93_GPR_ENET_QOS_INTF_MODE_MASK, iface);
+ MX93_GPR_ENET_QOS_INTF_SEL_MASK |
+ MX93_GPR_ENET_QOS_ENABLE, iface);
writel(old_ctrl, dwmac->base_addr + MAC_CTRL_REG);
}
-static int imx_dwmac_mx93_reset(struct stmmac_priv *priv, void __iomem *ioaddr)
+static int imx_dwmac_mx93_reset(struct stmmac_priv *priv)
{
struct plat_stmmacenet_data *plat_dat = priv->plat;
- u32 value = readl(ioaddr + DMA_BUS_MODE);
+ void __iomem *ioaddr = priv->ioaddr;
+ u32 value;
/* DMA SW reset */
+ value = readl(ioaddr + DMA_BUS_MODE);
value |= DMA_BUS_MODE_SFT_RESET;
writel(value, ioaddr + DMA_BUS_MODE);
@@ -268,9 +273,9 @@ imx_dwmac_parse_dt(struct imx_priv_data *dwmac, struct device *dev)
if (of_machine_is_compatible("fsl,imx8mp") ||
of_machine_is_compatible("fsl,imx91") ||
of_machine_is_compatible("fsl,imx93")) {
- /* Binding doc describes the propety:
+ /* Binding doc describes the property:
* is required by i.MX8MP, i.MX91, i.MX93.
- * is optinoal for i.MX8DXL.
+ * is optional for i.MX8DXL.
*/
dwmac->intf_regmap =
syscon_regmap_lookup_by_phandle_args(np, "intf_mode", 1,
@@ -320,6 +325,9 @@ static int imx_dwmac_probe(struct platform_device *pdev)
if (data->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
plat_dat->flags |= STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY;
+ if (data->flags & STMMAC_FLAG_KEEP_PREAMBLE_BEFORE_SFD)
+ plat_dat->flags |= STMMAC_FLAG_KEEP_PREAMBLE_BEFORE_SFD;
+
/* Default TX Q0 to use TSO and rest TXQ for TBS */
for (int i = 1; i < plat_dat->tx_queues_to_use; i++)
plat_dat->tx_queues_cfg[i].tbs_en = 1;
@@ -355,7 +363,8 @@ static struct imx_dwmac_ops imx8mp_dwmac_data = {
.addr_width = 34,
.mac_rgmii_txclk_auto_adj = false,
.set_intf_mode = imx8mp_set_intf_mode,
- .flags = STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY,
+ .flags = STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY |
+ STMMAC_FLAG_KEEP_PREAMBLE_BEFORE_SFD,
};
static struct imx_dwmac_ops imx8dxl_dwmac_data = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index aad1be1ec4c1..92d77b0c2f54 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -719,7 +719,6 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
/* Setup MSI vector offset specific to Intel mGbE controller */
plat->msi_mac_vec = 29;
- plat->msi_lpi_vec = 28;
plat->msi_sfty_ce_vec = 27;
plat->msi_sfty_ue_vec = 26;
plat->msi_rx_base_vec = 0;
@@ -1177,8 +1176,6 @@ static int stmmac_config_multi_msi(struct pci_dev *pdev,
res->irq = pci_irq_vector(pdev, plat->msi_mac_vec);
if (plat->msi_wol_vec < STMMAC_MSI_VEC_MAX)
res->wol_irq = pci_irq_vector(pdev, plat->msi_wol_vec);
- if (plat->msi_lpi_vec < STMMAC_MSI_VEC_MAX)
- res->lpi_irq = pci_irq_vector(pdev, plat->msi_lpi_vec);
if (plat->msi_sfty_ce_vec < STMMAC_MSI_VEC_MAX)
res->sfty_ce_irq = pci_irq_vector(pdev, plat->msi_sfty_ce_vec);
if (plat->msi_sfty_ue_vec < STMMAC_MSI_VEC_MAX)
@@ -1294,7 +1291,6 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
*/
plat->msi_mac_vec = STMMAC_MSI_VEC_MAX;
plat->msi_wol_vec = STMMAC_MSI_VEC_MAX;
- plat->msi_lpi_vec = STMMAC_MSI_VEC_MAX;
plat->msi_sfty_ce_vec = STMMAC_MSI_VEC_MAX;
plat->msi_sfty_ue_vec = STMMAC_MSI_VEC_MAX;
plat->msi_rx_base_vec = STMMAC_MSI_VEC_MAX;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
index 107a7c84ace8..815213223583 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -91,8 +91,8 @@ static void loongson_default_data(struct pci_dev *pdev,
/* Get bus_id, this can be overwritten later */
plat->bus_id = pci_dev_id(pdev);
- /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
- plat->clk_csr = STMMAC_CSR_20_35M;
+ /* clk_csr_i = 100-150MHz & MDC = clk_csr_i/62 */
+ plat->clk_csr = STMMAC_CSR_100_150M;
plat->core_type = DWMAC_CORE_GMAC;
plat->force_sf_dma_mode = 1;
@@ -192,9 +192,8 @@ static void loongson_dwmac_dma_init_channel(struct stmmac_priv *priv,
value |= DMA_BUS_MODE_MAXPBL;
value |= DMA_BUS_MODE_USP;
- value &= ~(DMA_BUS_MODE_PBL_MASK | DMA_BUS_MODE_RPBL_MASK);
- value |= (txpbl << DMA_BUS_MODE_PBL_SHIFT);
- value |= (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
+ value = u32_replace_bits(value, txpbl, DMA_BUS_MODE_PBL_MASK);
+ value = u32_replace_bits(value, rxpbl, DMA_BUS_MODE_RPBL_MASK);
/* Set the Fixed burst mode */
if (dma_cfg->fixed_burst)
@@ -443,13 +442,6 @@ static int loongson_dwmac_dt_config(struct pci_dev *pdev,
res->wol_irq = res->irq;
}
- res->lpi_irq = of_irq_get_byname(np, "eth_lpi");
- if (res->lpi_irq < 0) {
- dev_err(&pdev->dev, "IRQ eth_lpi not found\n");
- ret = -ENODEV;
- goto err_put_node;
- }
-
ret = device_get_phy_mode(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "phy_mode not found\n");
@@ -486,10 +478,12 @@ static int loongson_dwmac_acpi_config(struct pci_dev *pdev,
}
/* Loongson's DWMAC device may take nearly two seconds to complete DMA reset */
-static int loongson_dwmac_fix_reset(struct stmmac_priv *priv, void __iomem *ioaddr)
+static int loongson_dwmac_fix_reset(struct stmmac_priv *priv)
{
- u32 value = readl(ioaddr + DMA_BUS_MODE);
+ void __iomem *ioaddr = priv->ioaddr;
+ u32 value;
+ value = readl(ioaddr + DMA_BUS_MODE);
if (value & DMA_BUS_MODE_SFT_RESET) {
netdev_err(priv->dev, "the PHY clock is missing\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-motorcomm.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-motorcomm.c
new file mode 100644
index 000000000000..8b45b9cf7202
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-motorcomm.c
@@ -0,0 +1,384 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * DWMAC glue driver for Motorcomm PCI Ethernet controllers
+ *
+ * Copyright (c) 2025-2026 Yao Zi <me@ziyao.cc>
+ */
+
+#include <linux/bits.h>
+#include <linux/dev_printk.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/stmmac.h>
+
+#include "dwmac4.h"
+#include "stmmac.h"
+#include "stmmac_libpci.h"
+
+#define DRIVER_NAME "dwmac-motorcomm"
+
+#define PCI_VENDOR_ID_MOTORCOMM 0x1f0a
+
+/* Register definition */
+#define EPHY_CTRL 0x1004
+/* Clearing this bit asserts resets for internal MDIO bus and PHY */
+#define EPHY_MDIO_PHY_RESET BIT(0)
+#define OOB_WOL_CTRL 0x1010
+#define OOB_WOL_CTRL_DIS BIT(0)
+#define MGMT_INT_CTRL0 0x1100
+#define INT_MODERATION 0x1108
+#define INT_MODERATION_RX GENMASK(11, 0)
+#define INT_MODERATION_TX GENMASK(27, 16)
+#define EFUSE_OP_CTRL_0 0x1500
+#define EFUSE_OP_MODE GENMASK(1, 0)
+#define EFUSE_OP_ROW_READ 0x1
+#define EFUSE_OP_START BIT(2)
+#define EFUSE_OP_ADDR GENMASK(15, 8)
+#define EFUSE_OP_CTRL_1 0x1504
+#define EFUSE_OP_DONE BIT(1)
+#define EFUSE_OP_RD_DATA GENMASK(31, 24)
+#define SYS_RESET 0x152c
+#define SYS_RESET_RESET BIT(31)
+#define GMAC_OFFSET 0x2000
+
+/* Constants */
+#define EFUSE_READ_TIMEOUT_US 20000
+#define EFUSE_PATCH_REGION_OFFSET 18
+#define EFUSE_PATCH_MAX_NUM 39
+#define EFUSE_ADDR_MACA0LR 0x1520
+#define EFUSE_ADDR_MACA0HR 0x1524
+
+struct motorcomm_efuse_patch {
+ __le16 addr;
+ __le32 data;
+} __packed;
+
+struct dwmac_motorcomm_priv {
+ void __iomem *base;
+};
+
+static int motorcomm_efuse_read_byte(struct dwmac_motorcomm_priv *priv,
+ u8 offset, u8 *byte)
+{
+ u32 reg;
+ int ret;
+
+ writel(FIELD_PREP(EFUSE_OP_MODE, EFUSE_OP_ROW_READ) |
+ FIELD_PREP(EFUSE_OP_ADDR, offset) |
+ EFUSE_OP_START, priv->base + EFUSE_OP_CTRL_0);
+
+ ret = readl_poll_timeout(priv->base + EFUSE_OP_CTRL_1,
+ reg, reg & EFUSE_OP_DONE, 2000,
+ EFUSE_READ_TIMEOUT_US);
+
+ *byte = FIELD_GET(EFUSE_OP_RD_DATA, reg);
+
+ return ret;
+}
+
+static int motorcomm_efuse_read_patch(struct dwmac_motorcomm_priv *priv,
+ u8 index,
+ struct motorcomm_efuse_patch *patch)
+{
+ u8 *p = (u8 *)patch, offset;
+ int i, ret;
+
+ for (i = 0; i < sizeof(*patch); i++) {
+ offset = EFUSE_PATCH_REGION_OFFSET + sizeof(*patch) * index + i;
+
+ ret = motorcomm_efuse_read_byte(priv, offset, &p[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int motorcomm_efuse_get_patch_value(struct dwmac_motorcomm_priv *priv,
+ u16 addr, u32 *value)
+{
+ struct motorcomm_efuse_patch patch;
+ int i, ret;
+
+ for (i = 0; i < EFUSE_PATCH_MAX_NUM; i++) {
+ ret = motorcomm_efuse_read_patch(priv, i, &patch);
+ if (ret)
+ return ret;
+
+ if (patch.addr == 0) {
+ return -ENOENT;
+ } else if (le16_to_cpu(patch.addr) == addr) {
+ *value = le32_to_cpu(patch.data);
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+static int motorcomm_efuse_read_mac(struct device *dev,
+ struct dwmac_motorcomm_priv *priv, u8 *mac)
+{
+ u32 maca0lr, maca0hr;
+ int ret;
+
+ ret = motorcomm_efuse_get_patch_value(priv, EFUSE_ADDR_MACA0LR,
+ &maca0lr);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to read maca0lr from eFuse\n");
+
+ ret = motorcomm_efuse_get_patch_value(priv, EFUSE_ADDR_MACA0HR,
+ &maca0hr);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to read maca0hr from eFuse\n");
+
+ mac[0] = FIELD_GET(GENMASK(15, 8), maca0hr);
+ mac[1] = FIELD_GET(GENMASK(7, 0), maca0hr);
+ mac[2] = FIELD_GET(GENMASK(31, 24), maca0lr);
+ mac[3] = FIELD_GET(GENMASK(23, 16), maca0lr);
+ mac[4] = FIELD_GET(GENMASK(15, 8), maca0lr);
+ mac[5] = FIELD_GET(GENMASK(7, 0), maca0lr);
+
+ return 0;
+}
+
+static void motorcomm_deassert_mdio_phy_reset(struct dwmac_motorcomm_priv *priv)
+{
+ u32 reg = readl(priv->base + EPHY_CTRL);
+
+ reg |= EPHY_MDIO_PHY_RESET;
+
+ writel(reg, priv->base + EPHY_CTRL);
+}
+
+static void motorcomm_reset(struct dwmac_motorcomm_priv *priv)
+{
+ u32 reg = readl(priv->base + SYS_RESET);
+
+ reg &= ~SYS_RESET_RESET;
+ writel(reg, priv->base + SYS_RESET);
+
+ reg |= SYS_RESET_RESET;
+ writel(reg, priv->base + SYS_RESET);
+
+ motorcomm_deassert_mdio_phy_reset(priv);
+}
+
+static void motorcomm_init(struct dwmac_motorcomm_priv *priv)
+{
+ writel(0x0, priv->base + MGMT_INT_CTRL0);
+
+ writel(FIELD_PREP(INT_MODERATION_RX, 200) |
+ FIELD_PREP(INT_MODERATION_TX, 200),
+ priv->base + INT_MODERATION);
+
+ /*
+ * OOB WOL must be disabled during normal operation, or DMA interrupts
+ * cannot be delivered to the host.
+ */
+ writel(OOB_WOL_CTRL_DIS, priv->base + OOB_WOL_CTRL);
+}
+
+static int motorcomm_resume(struct device *dev, void *bsp_priv)
+{
+ struct dwmac_motorcomm_priv *priv = bsp_priv;
+ int ret;
+
+ ret = stmmac_pci_plat_resume(dev, bsp_priv);
+ if (ret)
+ return ret;
+
+ /*
+ * When recovering from D3hot, EPHY_MDIO_PHY_RESET is automatically
+ * asserted, and must be deasserted for normal operation.
+ */
+ motorcomm_deassert_mdio_phy_reset(priv);
+ motorcomm_init(priv);
+
+ return 0;
+}
+
+static struct plat_stmmacenet_data *
+motorcomm_default_plat_data(struct pci_dev *pdev)
+{
+ struct plat_stmmacenet_data *plat;
+ struct device *dev = &pdev->dev;
+
+ plat = stmmac_plat_dat_alloc(dev);
+ if (!plat)
+ return NULL;
+
+ plat->mdio_bus_data = devm_kzalloc(dev, sizeof(*plat->mdio_bus_data),
+ GFP_KERNEL);
+ if (!plat->mdio_bus_data)
+ return NULL;
+
+ plat->dma_cfg = devm_kzalloc(dev, sizeof(*plat->dma_cfg), GFP_KERNEL);
+ if (!plat->dma_cfg)
+ return NULL;
+
+ plat->axi = devm_kzalloc(dev, sizeof(*plat->axi), GFP_KERNEL);
+ if (!plat->axi)
+ return NULL;
+
+ plat->dma_cfg->pbl = DEFAULT_DMA_PBL;
+ plat->dma_cfg->pblx8 = true;
+ plat->dma_cfg->txpbl = 32;
+ plat->dma_cfg->rxpbl = 32;
+ plat->dma_cfg->eame = true;
+ plat->dma_cfg->mixed_burst = true;
+
+ plat->axi->axi_wr_osr_lmt = 1;
+ plat->axi->axi_rd_osr_lmt = 1;
+ plat->axi->axi_mb = true;
+ plat->axi->axi_blen_regval = DMA_AXI_BLEN4 | DMA_AXI_BLEN8 |
+ DMA_AXI_BLEN16 | DMA_AXI_BLEN32;
+
+ plat->bus_id = pci_dev_id(pdev);
+ plat->phy_interface = PHY_INTERFACE_MODE_GMII;
+ /*
+ * YT6801 requires an 25MHz clock input/oscillator to function, which
+ * is likely the source of CSR clock.
+ */
+ plat->clk_csr = STMMAC_CSR_20_35M;
+ plat->tx_coe = 1;
+ plat->rx_coe = 1;
+ plat->clk_ref_rate = 125000000;
+ plat->core_type = DWMAC_CORE_GMAC4;
+ plat->suspend = stmmac_pci_plat_suspend;
+ plat->resume = motorcomm_resume;
+ plat->flags = STMMAC_FLAG_TSO_EN |
+ STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP;
+
+ return plat;
+}
+
+static void motorcomm_free_irq(void *data)
+{
+ struct pci_dev *pdev = data;
+
+ pci_free_irq_vectors(pdev);
+}
+
+static int motorcomm_setup_irq(struct pci_dev *pdev,
+ struct stmmac_resources *res,
+ struct plat_stmmacenet_data *plat)
+{
+ int ret;
+
+ ret = pci_alloc_irq_vectors(pdev, 6, 6, PCI_IRQ_MSIX);
+ if (ret > 0) {
+ res->rx_irq[0] = pci_irq_vector(pdev, 0);
+ res->tx_irq[0] = pci_irq_vector(pdev, 4);
+ res->irq = pci_irq_vector(pdev, 5);
+
+ plat->flags |= STMMAC_FLAG_MULTI_MSI_EN;
+ } else {
+ dev_info(&pdev->dev, "failed to allocate MSI-X vector: %d\n",
+ ret);
+ dev_info(&pdev->dev, "try MSI instead\n");
+
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to allocate MSI\n");
+
+ res->irq = pci_irq_vector(pdev, 0);
+ }
+
+ return devm_add_action_or_reset(&pdev->dev, motorcomm_free_irq, pdev);
+}
+
+static int motorcomm_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct plat_stmmacenet_data *plat;
+ struct dwmac_motorcomm_priv *priv;
+ struct stmmac_resources res = {};
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ plat = motorcomm_default_plat_data(pdev);
+ if (!plat)
+ return -ENOMEM;
+
+ plat->bsp_priv = priv;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to enable device\n");
+
+ priv->base = pcim_iomap_region(pdev, 0, DRIVER_NAME);
+ if (IS_ERR(priv->base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->base),
+ "failed to map IO region\n");
+
+ pci_set_master(pdev);
+
+ /*
+ * Some PCIe addons cards based on YT6801 don't deliver MSI(X) with ASPM
+ * enabled. Sadly there isn't a reliable way to read out OEM of the
+ * card, so let's disable L1 state unconditionally for safety.
+ */
+ ret = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1);
+ if (ret)
+ dev_warn(&pdev->dev, "failed to disable L1 state: %d\n", ret);
+
+ motorcomm_reset(priv);
+
+ ret = motorcomm_efuse_read_mac(&pdev->dev, priv, res.mac);
+ if (ret == -ENOENT) {
+ dev_warn(&pdev->dev, "eFuse contains no valid MAC address\n");
+ dev_warn(&pdev->dev, "fallback to random MAC address\n");
+
+ eth_random_addr(res.mac);
+ } else if (ret) {
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to read MAC address from eFuse\n");
+ }
+
+ ret = motorcomm_setup_irq(pdev, &res, plat);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "failed to setup IRQ\n");
+
+ motorcomm_init(priv);
+
+ res.addr = priv->base + GMAC_OFFSET;
+
+ return stmmac_dvr_probe(&pdev->dev, plat, &res);
+}
+
+static void motorcomm_remove(struct pci_dev *pdev)
+{
+ stmmac_dvr_remove(&pdev->dev);
+}
+
+static const struct pci_device_id dwmac_motorcomm_pci_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_MOTORCOMM, 0x6801) },
+ { },
+};
+MODULE_DEVICE_TABLE(pci, dwmac_motorcomm_pci_id_table);
+
+static struct pci_driver dwmac_motorcomm_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = dwmac_motorcomm_pci_id_table,
+ .probe = motorcomm_probe,
+ .remove = motorcomm_remove,
+ .driver = {
+ .pm = &stmmac_simple_pm_ops,
+ },
+};
+
+module_pci_driver(dwmac_motorcomm_pci_driver);
+
+MODULE_DESCRIPTION("DWMAC glue driver for Motorcomm PCI Ethernet controllers");
+MODULE_AUTHOR("Yao Zi <me@ziyao.cc>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 0826a7bd32ff..af8204c0e188 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -100,7 +100,6 @@ struct ethqos_emac_driver_data {
struct qcom_ethqos {
struct platform_device *pdev;
void __iomem *rgmii_base;
- void __iomem *mac_base;
int (*configure_func)(struct qcom_ethqos *ethqos, int speed);
unsigned int link_clk_rate;
@@ -660,10 +659,18 @@ static int qcom_ethqos_serdes_powerup(struct net_device *ndev, void *priv)
return ret;
ret = phy_power_on(ethqos->serdes_phy);
- if (ret)
+ if (ret) {
+ phy_exit(ethqos->serdes_phy);
return ret;
+ }
+
+ ret = phy_set_speed(ethqos->serdes_phy, ethqos->serdes_speed);
+ if (ret) {
+ phy_power_off(ethqos->serdes_phy);
+ phy_exit(ethqos->serdes_phy);
+ }
- return phy_set_speed(ethqos->serdes_phy, ethqos->serdes_speed);
+ return ret;
}
static void qcom_ethqos_serdes_powerdown(struct net_device *ndev, void *priv)
@@ -772,8 +779,6 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(ethqos->rgmii_base),
"Failed to map rgmii resource\n");
- ethqos->mac_base = stmmac_res.addr;
-
data = of_device_get_match_data(dev);
ethqos->por = data->por;
ethqos->num_por = data->num_por;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
index be7f5eb2cdcf..19f34e18bfef 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
@@ -214,6 +214,7 @@ static const struct renesas_gbeth_of_data renesas_gmac_of_data = {
};
static const struct of_device_id renesas_gbeth_match[] = {
+ { .compatible = "renesas,r9a08g046-gbeth", .data = &renesas_gbeth_of_data },
{ .compatible = "renesas,r9a09g077-gbeth", .data = &renesas_gmac_of_data },
{ .compatible = "renesas,rzv2h-gbeth", .data = &renesas_gbeth_of_data },
{ /* Sentinel */ }
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 0a95f54e725e..b0441a368cb1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -26,24 +26,43 @@
struct rk_priv_data;
-struct rk_reg_speed_data {
- unsigned int rgmii_10;
- unsigned int rgmii_100;
- unsigned int rgmii_1000;
- unsigned int rmii_10;
- unsigned int rmii_100;
+struct rk_clock_fields {
+ /* io_clksel_cru_mask - io_clksel bit in clock GRF register which,
+ * when set, selects the tx clock from CRU.
+ */
+ u16 io_clksel_cru_mask;
+ /* io_clksel_io_mask - io_clksel bit in clock GRF register which,
+ * when set, selects the tx clock from IO.
+ */
+ u16 io_clksel_io_mask;
+ u16 gmii_clk_sel_mask;
+ u16 rmii_clk_sel_mask;
+ u16 rmii_gate_en_mask;
+ u16 rmii_mode_mask;
+ u16 mac_speed_mask;
};
struct rk_gmac_ops {
+ int (*init)(struct rk_priv_data *bsp_priv);
void (*set_to_rgmii)(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay);
void (*set_to_rmii)(struct rk_priv_data *bsp_priv);
int (*set_speed)(struct rk_priv_data *bsp_priv,
phy_interface_t interface, int speed);
- void (*set_clock_selection)(struct rk_priv_data *bsp_priv, bool input,
- bool enable);
void (*integrated_phy_powerup)(struct rk_priv_data *bsp_priv);
void (*integrated_phy_powerdown)(struct rk_priv_data *bsp_priv);
+
+ u16 gmac_grf_reg;
+ u16 gmac_phy_intf_sel_mask;
+ u16 gmac_rmii_mode_mask;
+
+ u16 clock_grf_reg;
+ struct rk_clock_fields clock;
+
+ bool gmac_grf_reg_in_php;
+ bool clock_grf_reg_in_php;
+ bool supports_rgmii;
+ bool supports_rmii;
bool php_grf_required;
bool regs_valid;
u32 regs[];
@@ -77,6 +96,8 @@ struct rk_priv_data {
bool clk_enabled;
bool clock_input;
bool integrated_phy;
+ bool supports_rgmii;
+ bool supports_rmii;
struct clk_bulk_data *clks;
int num_clks;
@@ -89,51 +110,121 @@ struct rk_priv_data {
struct regmap *grf;
struct regmap *php_grf;
+
+ u16 gmac_grf_reg;
+ u16 gmac_phy_intf_sel_mask;
+ u16 gmac_rmii_mode_mask;
+
+ u16 clock_grf_reg;
+ struct rk_clock_fields clock;
};
-static int rk_set_reg_speed(struct rk_priv_data *bsp_priv,
- const struct rk_reg_speed_data *rsd,
- unsigned int reg, phy_interface_t interface,
- int speed)
+#define GMAC_CLK_DIV1_125M 0
+#define GMAC_CLK_DIV50_2_5M 2
+#define GMAC_CLK_DIV5_25M 3
+
+static int rk_gmac_rgmii_clk_div(int speed)
{
- unsigned int val;
-
- if (phy_interface_mode_is_rgmii(interface)) {
- if (speed == SPEED_10) {
- val = rsd->rgmii_10;
- } else if (speed == SPEED_100) {
- val = rsd->rgmii_100;
- } else if (speed == SPEED_1000) {
- val = rsd->rgmii_1000;
- } else {
- /* Phylink will not allow inappropriate speeds for
- * interface modes, so this should never happen.
- */
- return -EINVAL;
- }
- } else if (interface == PHY_INTERFACE_MODE_RMII) {
- if (speed == SPEED_10) {
- val = rsd->rmii_10;
- } else if (speed == SPEED_100) {
- val = rsd->rmii_100;
- } else {
- /* Phylink will not allow inappropriate speeds for
- * interface modes, so this should never happen.
- */
- return -EINVAL;
- }
- } else {
- /* This should never happen, as .get_interfaces() limits
- * the interface modes that are supported to RGMII and/or
- * RMII.
- */
- return -EINVAL;
- }
+ if (speed == SPEED_10)
+ return GMAC_CLK_DIV50_2_5M;
+ if (speed == SPEED_100)
+ return GMAC_CLK_DIV5_25M;
+ if (speed == SPEED_1000)
+ return GMAC_CLK_DIV1_125M;
+ return -EINVAL;
+}
- regmap_write(bsp_priv->grf, reg, val);
+static int rk_get_phy_intf_sel(phy_interface_t interface)
+{
+ int ret = stmmac_get_phy_intf_sel(interface);
- return 0;
+ /* Only RGMII and RMII are supported */
+ if (ret != PHY_INTF_SEL_RGMII && ret != PHY_INTF_SEL_RMII)
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static u32 rk_encode_wm16(u16 val, u16 mask)
+{
+ u32 reg_val = mask << 16;
+ if (mask)
+ reg_val |= mask & (val << (ffs(mask) - 1));
+
+ return reg_val;
+}
+
+static int rk_write_gmac_grf_reg(struct rk_priv_data *bsp_priv, u32 val)
+{
+ struct regmap *regmap;
+
+ if (bsp_priv->ops->gmac_grf_reg_in_php)
+ regmap = bsp_priv->php_grf;
+ else
+ regmap = bsp_priv->grf;
+
+ return regmap_write(regmap, bsp_priv->gmac_grf_reg, val);
+}
+
+static int rk_write_clock_grf_reg(struct rk_priv_data *bsp_priv, u32 val)
+{
+ struct regmap *regmap;
+
+ if (bsp_priv->ops->clock_grf_reg_in_php)
+ regmap = bsp_priv->php_grf;
+ else
+ regmap = bsp_priv->grf;
+
+ return regmap_write(regmap, bsp_priv->clock_grf_reg, val);
+}
+
+static int rk_set_rmii_gate_en(struct rk_priv_data *bsp_priv, bool state)
+{
+ u32 val;
+
+ if (!bsp_priv->clock.rmii_gate_en_mask)
+ return 0;
+
+ val = rk_encode_wm16(state, bsp_priv->clock.rmii_gate_en_mask);
+
+ return rk_write_clock_grf_reg(bsp_priv, val);
+}
+
+static int rk_ungate_rmii_clock(struct rk_priv_data *bsp_priv)
+{
+ return rk_set_rmii_gate_en(bsp_priv, false);
+}
+
+static int rk_gate_rmii_clock(struct rk_priv_data *bsp_priv)
+{
+ return rk_set_rmii_gate_en(bsp_priv, true);
+}
+
+static int rk_configure_io_clksel(struct rk_priv_data *bsp_priv)
+{
+ bool io, cru;
+ u32 val;
+
+ if (!bsp_priv->clock.io_clksel_io_mask &&
+ !bsp_priv->clock.io_clksel_cru_mask)
+ return 0;
+
+ io = bsp_priv->clock_input;
+ cru = !io;
+
+ /* The io_clksel configuration can be either:
+ * 0=CRU, 1=IO (rk3506, rk3520, rk3576) or
+ * 0=IO, 1=CRU (rk3588)
+ * where CRU means the transmit clock comes from the CRU and IO
+ * means the transmit clock comes from IO.
+ *
+ * Handle this by having two masks.
+ */
+ val = rk_encode_wm16(io, bsp_priv->clock.io_clksel_io_mask) |
+ rk_encode_wm16(cru, bsp_priv->clock.io_clksel_cru_mask);
+
+ return rk_write_clock_grf_reg(bsp_priv, val);
}
static int rk_set_clk_mac_speed(struct rk_priv_data *bsp_priv,
@@ -151,8 +242,6 @@ static int rk_set_clk_mac_speed(struct rk_priv_data *bsp_priv,
#define GRF_FIELD(hi, lo, val) \
FIELD_PREP_WM16(GENMASK_U16(hi, lo), val)
-#define GRF_FIELD_CONST(hi, lo, val) \
- FIELD_PREP_WM16_CONST(GENMASK_U16(hi, lo), val)
#define GRF_BIT(nr) (BIT(nr) | BIT(nr+16))
#define GRF_CLR_BIT(nr) (BIT(nr+16))
@@ -162,15 +251,17 @@ static int rk_set_clk_mac_speed(struct rk_priv_data *bsp_priv,
((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE))
#define RK_GRF_MACPHY_CON0 0xb00
-#define RK_GRF_MACPHY_CON1 0xb04
-#define RK_GRF_MACPHY_CON2 0xb08
-#define RK_GRF_MACPHY_CON3 0xb0c
-
#define RK_MACPHY_ENABLE GRF_BIT(0)
#define RK_MACPHY_DISABLE GRF_CLR_BIT(0)
#define RK_MACPHY_CFG_CLK_50M GRF_BIT(14)
#define RK_GMAC2PHY_RMII_MODE GRF_FIELD(7, 6, 1)
+
+#define RK_GRF_MACPHY_CON1 0xb04
+
+#define RK_GRF_MACPHY_CON2 0xb08
#define RK_GRF_CON2_MACPHY_ID GRF_FIELD(15, 0, 0x1234)
+
+#define RK_GRF_MACPHY_CON3 0xb0c
#define RK_GRF_CON3_MACPHY_ID GRF_FIELD(5, 0, 0x35)
static void rk_gmac_integrated_ephy_powerup(struct rk_priv_data *priv)
@@ -233,49 +324,16 @@ static void rk_gmac_integrated_fephy_powerdown(struct rk_priv_data *priv,
#define PX30_GRF_GMAC_CON1 0x0904
-/* PX30_GRF_GMAC_CON1 */
-#define PX30_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val)
-#define PX30_GMAC_SPEED_10M GRF_CLR_BIT(2)
-#define PX30_GMAC_SPEED_100M GRF_BIT(2)
-
-static void px30_set_to_rmii(struct rk_priv_data *bsp_priv)
-{
- regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
- PX30_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII));
-}
-
-static int px30_set_speed(struct rk_priv_data *bsp_priv,
- phy_interface_t interface, int speed)
-{
- struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
- struct device *dev = bsp_priv->dev;
- unsigned int con1;
- long rate;
-
- if (!clk_mac_speed) {
- dev_err(dev, "%s: Missing clk_mac_speed clock\n", __func__);
- return -EINVAL;
- }
-
- if (speed == 10) {
- con1 = PX30_GMAC_SPEED_10M;
- rate = 2500000;
- } else if (speed == 100) {
- con1 = PX30_GMAC_SPEED_100M;
- rate = 25000000;
- } else {
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
- return -EINVAL;
- }
+static const struct rk_gmac_ops px30_ops = {
+ .set_speed = rk_set_clk_mac_speed,
- regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1, con1);
+ .gmac_grf_reg = PX30_GRF_GMAC_CON1,
+ .gmac_phy_intf_sel_mask = GENMASK_U16(6, 4),
- return clk_set_rate(clk_mac_speed, rate);
-}
+ .clock_grf_reg = PX30_GRF_GMAC_CON1,
+ .clock.mac_speed_mask = BIT_U16(2),
-static const struct rk_gmac_ops px30_ops = {
- .set_to_rmii = px30_set_to_rmii,
- .set_speed = px30_set_speed,
+ .supports_rmii = true,
};
#define RK3128_GRF_MAC_CON0 0x0168
@@ -290,57 +348,31 @@ static const struct rk_gmac_ops px30_ops = {
#define RK3128_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
/* RK3128_GRF_MAC_CON1 */
-#define RK3128_GMAC_PHY_INTF_SEL(val) GRF_FIELD(8, 6, val)
#define RK3128_GMAC_FLOW_CTRL GRF_BIT(9)
#define RK3128_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(9)
-#define RK3128_GMAC_SPEED_10M GRF_CLR_BIT(10)
-#define RK3128_GMAC_SPEED_100M GRF_BIT(10)
-#define RK3128_GMAC_RMII_CLK_25M GRF_BIT(11)
-#define RK3128_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(11)
-#define RK3128_GMAC_CLK_125M GRF_FIELD_CONST(13, 12, 0)
-#define RK3128_GMAC_CLK_25M GRF_FIELD_CONST(13, 12, 3)
-#define RK3128_GMAC_CLK_2_5M GRF_FIELD_CONST(13, 12, 2)
-#define RK3128_GMAC_RMII_MODE GRF_BIT(14)
-#define RK3128_GMAC_RMII_MODE_CLR GRF_CLR_BIT(14)
static void rk3128_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
- RK3128_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
- RK3128_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON0,
DELAY_ENABLE(RK3128, tx_delay, rx_delay) |
RK3128_GMAC_CLK_RX_DL_CFG(rx_delay) |
RK3128_GMAC_CLK_TX_DL_CFG(tx_delay));
}
-static void rk3128_set_to_rmii(struct rk_priv_data *bsp_priv)
-{
- regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
- RK3128_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
- RK3128_GMAC_RMII_MODE);
-}
+static const struct rk_gmac_ops rk3128_ops = {
+ .set_to_rgmii = rk3128_set_to_rgmii,
-static const struct rk_reg_speed_data rk3128_reg_speed_data = {
- .rgmii_10 = RK3128_GMAC_CLK_2_5M,
- .rgmii_100 = RK3128_GMAC_CLK_25M,
- .rgmii_1000 = RK3128_GMAC_CLK_125M,
- .rmii_10 = RK3128_GMAC_RMII_CLK_2_5M | RK3128_GMAC_SPEED_10M,
- .rmii_100 = RK3128_GMAC_RMII_CLK_25M | RK3128_GMAC_SPEED_100M,
-};
+ .gmac_grf_reg = RK3128_GRF_MAC_CON1,
+ .gmac_phy_intf_sel_mask = GENMASK_U16(8, 6),
+ .gmac_rmii_mode_mask = BIT_U16(14),
-static int rk3128_set_speed(struct rk_priv_data *bsp_priv,
- phy_interface_t interface, int speed)
-{
- return rk_set_reg_speed(bsp_priv, &rk3128_reg_speed_data,
- RK3128_GRF_MAC_CON1, interface, speed);
-}
+ .clock_grf_reg = RK3128_GRF_MAC_CON1,
+ .clock.gmii_clk_sel_mask = GENMASK_U16(13, 12),
+ .clock.rmii_clk_sel_mask = BIT_U16(11),
+ .clock.mac_speed_mask = BIT_U16(10),
-static const struct rk_gmac_ops rk3128_ops = {
- .set_to_rgmii = rk3128_set_to_rgmii,
- .set_to_rmii = rk3128_set_to_rmii,
- .set_speed = rk3128_set_speed,
+ .supports_rmii = true,
};
#define RK3228_GRF_MAC_CON0 0x0900
@@ -353,18 +385,8 @@ static const struct rk_gmac_ops rk3128_ops = {
#define RK3228_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
/* RK3228_GRF_MAC_CON1 */
-#define RK3228_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val)
#define RK3228_GMAC_FLOW_CTRL GRF_BIT(3)
#define RK3228_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
-#define RK3228_GMAC_SPEED_10M GRF_CLR_BIT(2)
-#define RK3228_GMAC_SPEED_100M GRF_BIT(2)
-#define RK3228_GMAC_RMII_CLK_25M GRF_BIT(7)
-#define RK3228_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(7)
-#define RK3228_GMAC_CLK_125M GRF_FIELD_CONST(9, 8, 0)
-#define RK3228_GMAC_CLK_25M GRF_FIELD_CONST(9, 8, 3)
-#define RK3228_GMAC_CLK_2_5M GRF_FIELD_CONST(9, 8, 2)
-#define RK3228_GMAC_RMII_MODE GRF_BIT(10)
-#define RK3228_GMAC_RMII_MODE_CLR GRF_CLR_BIT(10)
#define RK3228_GMAC_TXCLK_DLY_ENABLE GRF_BIT(0)
#define RK3228_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(0)
#define RK3228_GMAC_RXCLK_DLY_ENABLE GRF_BIT(1)
@@ -377,8 +399,6 @@ static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
- RK3228_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
- RK3228_GMAC_RMII_MODE_CLR |
DELAY_ENABLE(RK3228, tx_delay, rx_delay));
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON0,
@@ -388,29 +408,10 @@ static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3228_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
- RK3228_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
- RK3228_GMAC_RMII_MODE);
-
/* set MAC to RMII mode */
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, GRF_BIT(11));
}
-static const struct rk_reg_speed_data rk3228_reg_speed_data = {
- .rgmii_10 = RK3228_GMAC_CLK_2_5M,
- .rgmii_100 = RK3228_GMAC_CLK_25M,
- .rgmii_1000 = RK3228_GMAC_CLK_125M,
- .rmii_10 = RK3228_GMAC_RMII_CLK_2_5M | RK3228_GMAC_SPEED_10M,
- .rmii_100 = RK3228_GMAC_RMII_CLK_25M | RK3228_GMAC_SPEED_100M,
-};
-
-static int rk3228_set_speed(struct rk_priv_data *bsp_priv,
- phy_interface_t interface, int speed)
-{
- return rk_set_reg_speed(bsp_priv, &rk3228_reg_speed_data,
- RK3228_GRF_MAC_CON1, interface, speed);
-}
-
static void rk3228_integrated_phy_powerup(struct rk_priv_data *priv)
{
regmap_write(priv->grf, RK3228_GRF_CON_MUX,
@@ -422,27 +423,25 @@ static void rk3228_integrated_phy_powerup(struct rk_priv_data *priv)
static const struct rk_gmac_ops rk3228_ops = {
.set_to_rgmii = rk3228_set_to_rgmii,
.set_to_rmii = rk3228_set_to_rmii,
- .set_speed = rk3228_set_speed,
.integrated_phy_powerup = rk3228_integrated_phy_powerup,
.integrated_phy_powerdown = rk_gmac_integrated_ephy_powerdown,
+
+ .gmac_grf_reg = RK3228_GRF_MAC_CON1,
+ .gmac_phy_intf_sel_mask = GENMASK_U16(6, 4),
+ .gmac_rmii_mode_mask = BIT_U16(10),
+
+ .clock_grf_reg = RK3228_GRF_MAC_CON1,
+ .clock.gmii_clk_sel_mask = GENMASK_U16(9, 8),
+ .clock.rmii_clk_sel_mask = BIT_U16(7),
+ .clock.mac_speed_mask = BIT_U16(2),
};
#define RK3288_GRF_SOC_CON1 0x0248
#define RK3288_GRF_SOC_CON3 0x0250
/*RK3288_GRF_SOC_CON1*/
-#define RK3288_GMAC_PHY_INTF_SEL(val) GRF_FIELD(8, 6, val)
#define RK3288_GMAC_FLOW_CTRL GRF_BIT(9)
#define RK3288_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(9)
-#define RK3288_GMAC_SPEED_10M GRF_CLR_BIT(10)
-#define RK3288_GMAC_SPEED_100M GRF_BIT(10)
-#define RK3288_GMAC_RMII_CLK_25M GRF_BIT(11)
-#define RK3288_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(11)
-#define RK3288_GMAC_CLK_125M GRF_FIELD_CONST(13, 12, 0)
-#define RK3288_GMAC_CLK_25M GRF_FIELD_CONST(13, 12, 3)
-#define RK3288_GMAC_CLK_2_5M GRF_FIELD_CONST(13, 12, 2)
-#define RK3288_GMAC_RMII_MODE GRF_BIT(14)
-#define RK3288_GMAC_RMII_MODE_CLR GRF_CLR_BIT(14)
/*RK3288_GRF_SOC_CON3*/
#define RK3288_GMAC_TXCLK_DLY_ENABLE GRF_BIT(14)
@@ -455,73 +454,41 @@ static const struct rk_gmac_ops rk3228_ops = {
static void rk3288_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
- RK3288_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
- RK3288_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON3,
DELAY_ENABLE(RK3288, tx_delay, rx_delay) |
RK3288_GMAC_CLK_RX_DL_CFG(rx_delay) |
RK3288_GMAC_CLK_TX_DL_CFG(tx_delay));
}
-static void rk3288_set_to_rmii(struct rk_priv_data *bsp_priv)
-{
- regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
- RK3288_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
- RK3288_GMAC_RMII_MODE);
-}
+static const struct rk_gmac_ops rk3288_ops = {
+ .set_to_rgmii = rk3288_set_to_rgmii,
-static const struct rk_reg_speed_data rk3288_reg_speed_data = {
- .rgmii_10 = RK3288_GMAC_CLK_2_5M,
- .rgmii_100 = RK3288_GMAC_CLK_25M,
- .rgmii_1000 = RK3288_GMAC_CLK_125M,
- .rmii_10 = RK3288_GMAC_RMII_CLK_2_5M | RK3288_GMAC_SPEED_10M,
- .rmii_100 = RK3288_GMAC_RMII_CLK_25M | RK3288_GMAC_SPEED_100M,
-};
+ .gmac_grf_reg = RK3288_GRF_SOC_CON1,
+ .gmac_phy_intf_sel_mask = GENMASK_U16(8, 6),
+ .gmac_rmii_mode_mask = BIT_U16(14),
-static int rk3288_set_speed(struct rk_priv_data *bsp_priv,
- phy_interface_t interface, int speed)
-{
- return rk_set_reg_speed(bsp_priv, &rk3288_reg_speed_data,
- RK3288_GRF_SOC_CON1, interface, speed);
-}
+ .clock_grf_reg = RK3288_GRF_SOC_CON1,
+ .clock.gmii_clk_sel_mask = GENMASK_U16(13, 12),
+ .clock.rmii_clk_sel_mask = BIT_U16(11),
+ .clock.mac_speed_mask = BIT_U16(10),
-static const struct rk_gmac_ops rk3288_ops = {
- .set_to_rgmii = rk3288_set_to_rgmii,
- .set_to_rmii = rk3288_set_to_rmii,
- .set_speed = rk3288_set_speed,
+ .supports_rmii = true,
};
#define RK3308_GRF_MAC_CON0 0x04a0
/* RK3308_GRF_MAC_CON0 */
-#define RK3308_GMAC_PHY_INTF_SEL(val) GRF_FIELD(4, 2, val)
#define RK3308_GMAC_FLOW_CTRL GRF_BIT(3)
#define RK3308_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
-#define RK3308_GMAC_SPEED_10M GRF_CLR_BIT(0)
-#define RK3308_GMAC_SPEED_100M GRF_BIT(0)
-static void rk3308_set_to_rmii(struct rk_priv_data *bsp_priv)
-{
- regmap_write(bsp_priv->grf, RK3308_GRF_MAC_CON0,
- RK3308_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII));
-}
-
-static const struct rk_reg_speed_data rk3308_reg_speed_data = {
- .rmii_10 = RK3308_GMAC_SPEED_10M,
- .rmii_100 = RK3308_GMAC_SPEED_100M,
-};
+static const struct rk_gmac_ops rk3308_ops = {
+ .gmac_grf_reg = RK3308_GRF_MAC_CON0,
+ .gmac_phy_intf_sel_mask = GENMASK_U16(4, 2),
-static int rk3308_set_speed(struct rk_priv_data *bsp_priv,
- phy_interface_t interface, int speed)
-{
- return rk_set_reg_speed(bsp_priv, &rk3308_reg_speed_data,
- RK3308_GRF_MAC_CON0, interface, speed);
-}
+ .clock_grf_reg = RK3308_GRF_MAC_CON0,
+ .clock.mac_speed_mask = BIT_U16(0),
-static const struct rk_gmac_ops rk3308_ops = {
- .set_to_rmii = rk3308_set_to_rmii,
- .set_speed = rk3308_set_speed,
+ .supports_rmii = true,
};
#define RK3328_GRF_MAC_CON0 0x0900
@@ -534,30 +501,38 @@ static const struct rk_gmac_ops rk3308_ops = {
#define RK3328_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
/* RK3328_GRF_MAC_CON1 */
-#define RK3328_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val)
#define RK3328_GMAC_FLOW_CTRL GRF_BIT(3)
#define RK3328_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
-#define RK3328_GMAC_SPEED_10M GRF_CLR_BIT(2)
-#define RK3328_GMAC_SPEED_100M GRF_BIT(2)
-#define RK3328_GMAC_RMII_CLK_25M GRF_BIT(7)
-#define RK3328_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(7)
-#define RK3328_GMAC_CLK_125M GRF_FIELD_CONST(12, 11, 0)
-#define RK3328_GMAC_CLK_25M GRF_FIELD_CONST(12, 11, 3)
-#define RK3328_GMAC_CLK_2_5M GRF_FIELD_CONST(12, 11, 2)
-#define RK3328_GMAC_RMII_MODE GRF_BIT(9)
-#define RK3328_GMAC_RMII_MODE_CLR GRF_CLR_BIT(9)
#define RK3328_GMAC_TXCLK_DLY_ENABLE GRF_BIT(0)
#define RK3328_GMAC_RXCLK_DLY_ENABLE GRF_BIT(1)
/* RK3328_GRF_MACPHY_CON1 */
#define RK3328_MACPHY_RMII_MODE GRF_BIT(9)
+static int rk3328_init(struct rk_priv_data *bsp_priv)
+{
+ switch (bsp_priv->id) {
+ case 0: /* gmac2io */
+ bsp_priv->gmac_grf_reg = RK3328_GRF_MAC_CON1;
+ bsp_priv->clock_grf_reg = RK3328_GRF_MAC_CON1;
+ bsp_priv->clock.gmii_clk_sel_mask = GENMASK_U16(12, 11);
+ return 0;
+
+ case 1: /* gmac2phy */
+ bsp_priv->gmac_grf_reg = RK3328_GRF_MAC_CON2;
+ bsp_priv->clock_grf_reg = RK3328_GRF_MAC_CON2;
+ bsp_priv->supports_rgmii = false;
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
static void rk3328_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
- RK3328_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
- RK3328_GMAC_RMII_MODE_CLR |
RK3328_GMAC_RXCLK_DLY_ENABLE |
RK3328_GMAC_TXCLK_DLY_ENABLE);
@@ -566,40 +541,6 @@ static void rk3328_set_to_rgmii(struct rk_priv_data *bsp_priv,
RK3328_GMAC_CLK_TX_DL_CFG(tx_delay));
}
-static void rk3328_set_to_rmii(struct rk_priv_data *bsp_priv)
-{
- unsigned int reg;
-
- reg = bsp_priv->integrated_phy ? RK3328_GRF_MAC_CON2 :
- RK3328_GRF_MAC_CON1;
-
- regmap_write(bsp_priv->grf, reg,
- RK3328_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
- RK3328_GMAC_RMII_MODE);
-}
-
-static const struct rk_reg_speed_data rk3328_reg_speed_data = {
- .rgmii_10 = RK3328_GMAC_CLK_2_5M,
- .rgmii_100 = RK3328_GMAC_CLK_25M,
- .rgmii_1000 = RK3328_GMAC_CLK_125M,
- .rmii_10 = RK3328_GMAC_RMII_CLK_2_5M | RK3328_GMAC_SPEED_10M,
- .rmii_100 = RK3328_GMAC_RMII_CLK_25M | RK3328_GMAC_SPEED_100M,
-};
-
-static int rk3328_set_speed(struct rk_priv_data *bsp_priv,
- phy_interface_t interface, int speed)
-{
- unsigned int reg;
-
- if (interface == PHY_INTERFACE_MODE_RMII && bsp_priv->integrated_phy)
- reg = RK3328_GRF_MAC_CON2;
- else
- reg = RK3328_GRF_MAC_CON1;
-
- return rk_set_reg_speed(bsp_priv, &rk3328_reg_speed_data, reg,
- interface, speed);
-}
-
static void rk3328_integrated_phy_powerup(struct rk_priv_data *priv)
{
regmap_write(priv->grf, RK3328_GRF_MACPHY_CON1,
@@ -609,29 +550,33 @@ static void rk3328_integrated_phy_powerup(struct rk_priv_data *priv)
}
static const struct rk_gmac_ops rk3328_ops = {
+ .init = rk3328_init,
.set_to_rgmii = rk3328_set_to_rgmii,
- .set_to_rmii = rk3328_set_to_rmii,
- .set_speed = rk3328_set_speed,
.integrated_phy_powerup = rk3328_integrated_phy_powerup,
.integrated_phy_powerdown = rk_gmac_integrated_ephy_powerdown,
+
+ .gmac_phy_intf_sel_mask = GENMASK_U16(6, 4),
+ .gmac_rmii_mode_mask = BIT_U16(9),
+
+ .clock.rmii_clk_sel_mask = BIT_U16(7),
+ .clock.mac_speed_mask = BIT_U16(2),
+
+ .supports_rmii = true,
+
+ .regs_valid = true,
+ .regs = {
+ 0xff540000, /* gmac2io */
+ 0xff550000, /* gmac2phy */
+ 0, /* sentinel */
+ },
};
#define RK3366_GRF_SOC_CON6 0x0418
#define RK3366_GRF_SOC_CON7 0x041c
/* RK3366_GRF_SOC_CON6 */
-#define RK3366_GMAC_PHY_INTF_SEL(val) GRF_FIELD(11, 9, val)
#define RK3366_GMAC_FLOW_CTRL GRF_BIT(8)
#define RK3366_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(8)
-#define RK3366_GMAC_SPEED_10M GRF_CLR_BIT(7)
-#define RK3366_GMAC_SPEED_100M GRF_BIT(7)
-#define RK3366_GMAC_RMII_CLK_25M GRF_BIT(3)
-#define RK3366_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(3)
-#define RK3366_GMAC_CLK_125M GRF_FIELD_CONST(5, 4, 0)
-#define RK3366_GMAC_CLK_25M GRF_FIELD_CONST(5, 4, 3)
-#define RK3366_GMAC_CLK_2_5M GRF_FIELD_CONST(5, 4, 2)
-#define RK3366_GMAC_RMII_MODE GRF_BIT(6)
-#define RK3366_GMAC_RMII_MODE_CLR GRF_CLR_BIT(6)
/* RK3366_GRF_SOC_CON7 */
#define RK3366_GMAC_TXCLK_DLY_ENABLE GRF_BIT(7)
@@ -644,59 +589,33 @@ static const struct rk_gmac_ops rk3328_ops = {
static void rk3366_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
- RK3366_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
- RK3366_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON7,
DELAY_ENABLE(RK3366, tx_delay, rx_delay) |
RK3366_GMAC_CLK_RX_DL_CFG(rx_delay) |
RK3366_GMAC_CLK_TX_DL_CFG(tx_delay));
}
-static void rk3366_set_to_rmii(struct rk_priv_data *bsp_priv)
-{
- regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
- RK3366_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
- RK3366_GMAC_RMII_MODE);
-}
+static const struct rk_gmac_ops rk3366_ops = {
+ .set_to_rgmii = rk3366_set_to_rgmii,
-static const struct rk_reg_speed_data rk3366_reg_speed_data = {
- .rgmii_10 = RK3366_GMAC_CLK_2_5M,
- .rgmii_100 = RK3366_GMAC_CLK_25M,
- .rgmii_1000 = RK3366_GMAC_CLK_125M,
- .rmii_10 = RK3366_GMAC_RMII_CLK_2_5M | RK3366_GMAC_SPEED_10M,
- .rmii_100 = RK3366_GMAC_RMII_CLK_25M | RK3366_GMAC_SPEED_100M,
-};
+ .gmac_grf_reg = RK3366_GRF_SOC_CON6,
+ .gmac_phy_intf_sel_mask = GENMASK_U16(11, 9),
+ .gmac_rmii_mode_mask = BIT_U16(6),
-static int rk3366_set_speed(struct rk_priv_data *bsp_priv,
- phy_interface_t interface, int speed)
-{
- return rk_set_reg_speed(bsp_priv, &rk3366_reg_speed_data,
- RK3366_GRF_SOC_CON6, interface, speed);
-}
+ .clock_grf_reg = RK3366_GRF_SOC_CON6,
+ .clock.gmii_clk_sel_mask = GENMASK_U16(5, 4),
+ .clock.rmii_clk_sel_mask = BIT_U16(3),
+ .clock.mac_speed_mask = BIT_U16(7),
-static const struct rk_gmac_ops rk3366_ops = {
- .set_to_rgmii = rk3366_set_to_rgmii,
- .set_to_rmii = rk3366_set_to_rmii,
- .set_speed = rk3366_set_speed,
+ .supports_rmii = true,
};
#define RK3368_GRF_SOC_CON15 0x043c
#define RK3368_GRF_SOC_CON16 0x0440
/* RK3368_GRF_SOC_CON15 */
-#define RK3368_GMAC_PHY_INTF_SEL(val) GRF_FIELD(11, 9, val)
#define RK3368_GMAC_FLOW_CTRL GRF_BIT(8)
#define RK3368_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(8)
-#define RK3368_GMAC_SPEED_10M GRF_CLR_BIT(7)
-#define RK3368_GMAC_SPEED_100M GRF_BIT(7)
-#define RK3368_GMAC_RMII_CLK_25M GRF_BIT(3)
-#define RK3368_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(3)
-#define RK3368_GMAC_CLK_125M GRF_FIELD_CONST(5, 4, 0)
-#define RK3368_GMAC_CLK_25M GRF_FIELD_CONST(5, 4, 3)
-#define RK3368_GMAC_CLK_2_5M GRF_FIELD_CONST(5, 4, 2)
-#define RK3368_GMAC_RMII_MODE GRF_BIT(6)
-#define RK3368_GMAC_RMII_MODE_CLR GRF_CLR_BIT(6)
/* RK3368_GRF_SOC_CON16 */
#define RK3368_GMAC_TXCLK_DLY_ENABLE GRF_BIT(7)
@@ -709,59 +628,33 @@ static const struct rk_gmac_ops rk3366_ops = {
static void rk3368_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
- RK3368_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
- RK3368_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON16,
DELAY_ENABLE(RK3368, tx_delay, rx_delay) |
RK3368_GMAC_CLK_RX_DL_CFG(rx_delay) |
RK3368_GMAC_CLK_TX_DL_CFG(tx_delay));
}
-static void rk3368_set_to_rmii(struct rk_priv_data *bsp_priv)
-{
- regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
- RK3368_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
- RK3368_GMAC_RMII_MODE);
-}
+static const struct rk_gmac_ops rk3368_ops = {
+ .set_to_rgmii = rk3368_set_to_rgmii,
-static const struct rk_reg_speed_data rk3368_reg_speed_data = {
- .rgmii_10 = RK3368_GMAC_CLK_2_5M,
- .rgmii_100 = RK3368_GMAC_CLK_25M,
- .rgmii_1000 = RK3368_GMAC_CLK_125M,
- .rmii_10 = RK3368_GMAC_RMII_CLK_2_5M | RK3368_GMAC_SPEED_10M,
- .rmii_100 = RK3368_GMAC_RMII_CLK_25M | RK3368_GMAC_SPEED_100M,
-};
+ .gmac_grf_reg = RK3368_GRF_SOC_CON15,
+ .gmac_phy_intf_sel_mask = GENMASK_U16(11, 9),
+ .gmac_rmii_mode_mask = BIT_U16(6),
-static int rk3368_set_speed(struct rk_priv_data *bsp_priv,
- phy_interface_t interface, int speed)
-{
- return rk_set_reg_speed(bsp_priv, &rk3368_reg_speed_data,
- RK3368_GRF_SOC_CON15, interface, speed);
-}
+ .clock_grf_reg = RK3368_GRF_SOC_CON15,
+ .clock.gmii_clk_sel_mask = GENMASK_U16(5, 4),
+ .clock.rmii_clk_sel_mask = BIT_U16(3),
+ .clock.mac_speed_mask = BIT_U16(7),
-static const struct rk_gmac_ops rk3368_ops = {
- .set_to_rgmii = rk3368_set_to_rgmii,
- .set_to_rmii = rk3368_set_to_rmii,
- .set_speed = rk3368_set_speed,
+ .supports_rmii = true,
};
#define RK3399_GRF_SOC_CON5 0xc214
#define RK3399_GRF_SOC_CON6 0xc218
/* RK3399_GRF_SOC_CON5 */
-#define RK3399_GMAC_PHY_INTF_SEL(val) GRF_FIELD(11, 9, val)
#define RK3399_GMAC_FLOW_CTRL GRF_BIT(8)
#define RK3399_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(8)
-#define RK3399_GMAC_SPEED_10M GRF_CLR_BIT(7)
-#define RK3399_GMAC_SPEED_100M GRF_BIT(7)
-#define RK3399_GMAC_RMII_CLK_25M GRF_BIT(3)
-#define RK3399_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(3)
-#define RK3399_GMAC_CLK_125M GRF_FIELD_CONST(5, 4, 0)
-#define RK3399_GMAC_CLK_25M GRF_FIELD_CONST(5, 4, 3)
-#define RK3399_GMAC_CLK_2_5M GRF_FIELD_CONST(5, 4, 2)
-#define RK3399_GMAC_RMII_MODE GRF_BIT(6)
-#define RK3399_GMAC_RMII_MODE_CLR GRF_CLR_BIT(6)
/* RK3399_GRF_SOC_CON6 */
#define RK3399_GMAC_TXCLK_DLY_ENABLE GRF_BIT(7)
@@ -774,41 +667,25 @@ static const struct rk_gmac_ops rk3368_ops = {
static void rk3399_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
- RK3399_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
- RK3399_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON6,
DELAY_ENABLE(RK3399, tx_delay, rx_delay) |
RK3399_GMAC_CLK_RX_DL_CFG(rx_delay) |
RK3399_GMAC_CLK_TX_DL_CFG(tx_delay));
}
-static void rk3399_set_to_rmii(struct rk_priv_data *bsp_priv)
-{
- regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
- RK3399_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
- RK3399_GMAC_RMII_MODE);
-}
+static const struct rk_gmac_ops rk3399_ops = {
+ .set_to_rgmii = rk3399_set_to_rgmii,
-static const struct rk_reg_speed_data rk3399_reg_speed_data = {
- .rgmii_10 = RK3399_GMAC_CLK_2_5M,
- .rgmii_100 = RK3399_GMAC_CLK_25M,
- .rgmii_1000 = RK3399_GMAC_CLK_125M,
- .rmii_10 = RK3399_GMAC_RMII_CLK_2_5M | RK3399_GMAC_SPEED_10M,
- .rmii_100 = RK3399_GMAC_RMII_CLK_25M | RK3399_GMAC_SPEED_100M,
-};
+ .gmac_grf_reg = RK3399_GRF_SOC_CON5,
+ .gmac_phy_intf_sel_mask = GENMASK_U16(11, 9),
+ .gmac_rmii_mode_mask = BIT_U16(6),
-static int rk3399_set_speed(struct rk_priv_data *bsp_priv,
- phy_interface_t interface, int speed)
-{
- return rk_set_reg_speed(bsp_priv, &rk3399_reg_speed_data,
- RK3399_GRF_SOC_CON5, interface, speed);
-}
+ .clock_grf_reg = RK3399_GRF_SOC_CON5,
+ .clock.gmii_clk_sel_mask = GENMASK_U16(5, 4),
+ .clock.rmii_clk_sel_mask = BIT_U16(3),
+ .clock.mac_speed_mask = BIT_U16(7),
-static const struct rk_gmac_ops rk3399_ops = {
- .set_to_rgmii = rk3399_set_to_rgmii,
- .set_to_rmii = rk3399_set_to_rmii,
- .set_speed = rk3399_set_speed,
+ .supports_rmii = true,
};
#define RK3506_GRF_SOC_CON8 0x0020
@@ -816,56 +693,32 @@ static const struct rk_gmac_ops rk3399_ops = {
#define RK3506_GMAC_RMII_MODE GRF_BIT(1)
-#define RK3506_GMAC_CLK_RMII_DIV2 GRF_BIT(3)
-#define RK3506_GMAC_CLK_RMII_DIV20 GRF_CLR_BIT(3)
-
-#define RK3506_GMAC_CLK_SELECT_CRU GRF_CLR_BIT(5)
-#define RK3506_GMAC_CLK_SELECT_IO GRF_BIT(5)
-
-#define RK3506_GMAC_CLK_RMII_GATE GRF_BIT(2)
-#define RK3506_GMAC_CLK_RMII_NOGATE GRF_CLR_BIT(2)
-
-static void rk3506_set_to_rmii(struct rk_priv_data *bsp_priv)
+static int rk3506_init(struct rk_priv_data *bsp_priv)
{
- unsigned int id = bsp_priv->id, offset;
+ switch (bsp_priv->id) {
+ case 0:
+ bsp_priv->clock_grf_reg = RK3506_GRF_SOC_CON8;
+ return 0;
- offset = (id == 1) ? RK3506_GRF_SOC_CON11 : RK3506_GRF_SOC_CON8;
- regmap_write(bsp_priv->grf, offset, RK3506_GMAC_RMII_MODE);
-}
+ case 1:
+ bsp_priv->clock_grf_reg = RK3506_GRF_SOC_CON11;
+ return 0;
-static const struct rk_reg_speed_data rk3506_reg_speed_data = {
- .rmii_10 = RK3506_GMAC_CLK_RMII_DIV20,
- .rmii_100 = RK3506_GMAC_CLK_RMII_DIV2,
-};
-
-static int rk3506_set_speed(struct rk_priv_data *bsp_priv,
- phy_interface_t interface, int speed)
-{
- unsigned int id = bsp_priv->id, offset;
-
- offset = (id == 1) ? RK3506_GRF_SOC_CON11 : RK3506_GRF_SOC_CON8;
- return rk_set_reg_speed(bsp_priv, &rk3506_reg_speed_data,
- offset, interface, speed);
+ default:
+ return -EINVAL;
+ }
}
-static void rk3506_set_clock_selection(struct rk_priv_data *bsp_priv,
- bool input, bool enable)
-{
- unsigned int value, offset, id = bsp_priv->id;
+static const struct rk_gmac_ops rk3506_ops = {
+ .init = rk3506_init,
- offset = (id == 1) ? RK3506_GRF_SOC_CON11 : RK3506_GRF_SOC_CON8;
+ .clock.io_clksel_io_mask = BIT_U16(5),
+ .clock.rmii_clk_sel_mask = BIT_U16(3),
+ .clock.rmii_gate_en_mask = BIT_U16(2),
+ .clock.rmii_mode_mask = BIT_U16(1),
- value = input ? RK3506_GMAC_CLK_SELECT_IO :
- RK3506_GMAC_CLK_SELECT_CRU;
- value |= enable ? RK3506_GMAC_CLK_RMII_NOGATE :
- RK3506_GMAC_CLK_RMII_GATE;
- regmap_write(bsp_priv->grf, offset, value);
-}
+ .supports_rmii = true,
-static const struct rk_gmac_ops rk3506_ops = {
- .set_to_rmii = rk3506_set_to_rmii,
- .set_speed = rk3506_set_speed,
- .set_clock_selection = rk3506_set_clock_selection,
.regs_valid = true,
.regs = {
0xff4c8000, /* gmac0 */
@@ -888,34 +741,35 @@ static const struct rk_gmac_ops rk3506_ops = {
#define RK3528_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(15, 8, val)
#define RK3528_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(7, 0, val)
-#define RK3528_GMAC0_PHY_INTF_SEL_RMII GRF_BIT(1)
-#define RK3528_GMAC1_PHY_INTF_SEL_RGMII GRF_CLR_BIT(8)
-#define RK3528_GMAC1_PHY_INTF_SEL_RMII GRF_BIT(8)
-
-#define RK3528_GMAC1_CLK_SELECT_CRU GRF_CLR_BIT(12)
-#define RK3528_GMAC1_CLK_SELECT_IO GRF_BIT(12)
-
-#define RK3528_GMAC0_CLK_RMII_DIV2 GRF_BIT(3)
-#define RK3528_GMAC0_CLK_RMII_DIV20 GRF_CLR_BIT(3)
-#define RK3528_GMAC1_CLK_RMII_DIV2 GRF_BIT(10)
-#define RK3528_GMAC1_CLK_RMII_DIV20 GRF_CLR_BIT(10)
+static int rk3528_init(struct rk_priv_data *bsp_priv)
+{
+ switch (bsp_priv->id) {
+ case 0:
+ bsp_priv->clock_grf_reg = RK3528_VO_GRF_GMAC_CON;
+ bsp_priv->clock.rmii_clk_sel_mask = BIT_U16(3);
+ bsp_priv->clock.rmii_gate_en_mask = BIT_U16(2);
+ bsp_priv->clock.rmii_mode_mask = BIT_U16(1);
+ bsp_priv->supports_rgmii = false;
+ return 0;
+
+ case 1:
+ bsp_priv->clock_grf_reg = RK3528_VPU_GRF_GMAC_CON5;
+ bsp_priv->clock.io_clksel_io_mask = BIT_U16(12);
+ bsp_priv->clock.gmii_clk_sel_mask = GENMASK_U16(11, 10);
+ bsp_priv->clock.rmii_clk_sel_mask = BIT_U16(10);
+ bsp_priv->clock.rmii_gate_en_mask = BIT_U16(9);
+ bsp_priv->clock.rmii_mode_mask = BIT_U16(8);
+ return 0;
-#define RK3528_GMAC1_CLK_RGMII_DIV1 GRF_FIELD_CONST(11, 10, 0)
-#define RK3528_GMAC1_CLK_RGMII_DIV5 GRF_FIELD_CONST(11, 10, 3)
-#define RK3528_GMAC1_CLK_RGMII_DIV50 GRF_FIELD_CONST(11, 10, 2)
-
-#define RK3528_GMAC0_CLK_RMII_GATE GRF_BIT(2)
-#define RK3528_GMAC0_CLK_RMII_NOGATE GRF_CLR_BIT(2)
-#define RK3528_GMAC1_CLK_RMII_GATE GRF_BIT(9)
-#define RK3528_GMAC1_CLK_RMII_NOGATE GRF_CLR_BIT(9)
+ default:
+ return -EINVAL;
+ }
+}
static void rk3528_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
- RK3528_GMAC1_PHY_INTF_SEL_RGMII);
-
- regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
DELAY_ENABLE(RK3528, tx_delay, rx_delay));
regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON6,
@@ -923,65 +777,6 @@ static void rk3528_set_to_rgmii(struct rk_priv_data *bsp_priv,
RK3528_GMAC_CLK_TX_DL_CFG(tx_delay));
}
-static void rk3528_set_to_rmii(struct rk_priv_data *bsp_priv)
-{
- if (bsp_priv->id == 1)
- regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
- RK3528_GMAC1_PHY_INTF_SEL_RMII);
- else
- regmap_write(bsp_priv->grf, RK3528_VO_GRF_GMAC_CON,
- RK3528_GMAC0_PHY_INTF_SEL_RMII |
- RK3528_GMAC0_CLK_RMII_DIV2);
-}
-
-static const struct rk_reg_speed_data rk3528_gmac0_reg_speed_data = {
- .rmii_10 = RK3528_GMAC0_CLK_RMII_DIV20,
- .rmii_100 = RK3528_GMAC0_CLK_RMII_DIV2,
-};
-
-static const struct rk_reg_speed_data rk3528_gmac1_reg_speed_data = {
- .rgmii_10 = RK3528_GMAC1_CLK_RGMII_DIV50,
- .rgmii_100 = RK3528_GMAC1_CLK_RGMII_DIV5,
- .rgmii_1000 = RK3528_GMAC1_CLK_RGMII_DIV1,
- .rmii_10 = RK3528_GMAC1_CLK_RMII_DIV20,
- .rmii_100 = RK3528_GMAC1_CLK_RMII_DIV2,
-};
-
-static int rk3528_set_speed(struct rk_priv_data *bsp_priv,
- phy_interface_t interface, int speed)
-{
- const struct rk_reg_speed_data *rsd;
- unsigned int reg;
-
- if (bsp_priv->id == 1) {
- rsd = &rk3528_gmac1_reg_speed_data;
- reg = RK3528_VPU_GRF_GMAC_CON5;
- } else {
- rsd = &rk3528_gmac0_reg_speed_data;
- reg = RK3528_VO_GRF_GMAC_CON;
- }
-
- return rk_set_reg_speed(bsp_priv, rsd, reg, interface, speed);
-}
-
-static void rk3528_set_clock_selection(struct rk_priv_data *bsp_priv,
- bool input, bool enable)
-{
- unsigned int val;
-
- if (bsp_priv->id == 1) {
- val = input ? RK3528_GMAC1_CLK_SELECT_IO :
- RK3528_GMAC1_CLK_SELECT_CRU;
- val |= enable ? RK3528_GMAC1_CLK_RMII_NOGATE :
- RK3528_GMAC1_CLK_RMII_GATE;
- regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5, val);
- } else {
- val = enable ? RK3528_GMAC0_CLK_RMII_NOGATE :
- RK3528_GMAC0_CLK_RMII_GATE;
- regmap_write(bsp_priv->grf, RK3528_VO_GRF_GMAC_CON, val);
- }
-}
-
static void rk3528_integrated_phy_powerup(struct rk_priv_data *bsp_priv)
{
rk_gmac_integrated_fephy_powerup(bsp_priv, RK3528_VO_GRF_MACPHY_CON0);
@@ -993,12 +788,13 @@ static void rk3528_integrated_phy_powerdown(struct rk_priv_data *bsp_priv)
}
static const struct rk_gmac_ops rk3528_ops = {
+ .init = rk3528_init,
.set_to_rgmii = rk3528_set_to_rgmii,
- .set_to_rmii = rk3528_set_to_rmii,
- .set_speed = rk3528_set_speed,
- .set_clock_selection = rk3528_set_clock_selection,
.integrated_phy_powerup = rk3528_integrated_phy_powerup,
.integrated_phy_powerdown = rk3528_integrated_phy_powerdown,
+
+ .supports_rmii = true,
+
.regs_valid = true,
.regs = {
0xffbd0000, /* gmac0 */
@@ -1013,7 +809,6 @@ static const struct rk_gmac_ops rk3528_ops = {
#define RK3568_GRF_GMAC1_CON1 0x038c
/* RK3568_GRF_GMAC0_CON1 && RK3568_GRF_GMAC1_CON1 */
-#define RK3568_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val)
#define RK3568_GMAC_FLOW_CTRL GRF_BIT(3)
#define RK3568_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
#define RK3568_GMAC_RXCLK_DLY_ENABLE GRF_BIT(1)
@@ -1025,6 +820,22 @@ static const struct rk_gmac_ops rk3528_ops = {
#define RK3568_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(14, 8, val)
#define RK3568_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
+static int rk3568_init(struct rk_priv_data *bsp_priv)
+{
+ switch (bsp_priv->id) {
+ case 0:
+ bsp_priv->gmac_grf_reg = RK3568_GRF_GMAC0_CON1;
+ return 0;
+
+ case 1:
+ bsp_priv->gmac_grf_reg = RK3568_GRF_GMAC1_CON1;
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
static void rk3568_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
@@ -1040,25 +851,19 @@ static void rk3568_set_to_rgmii(struct rk_priv_data *bsp_priv,
RK3568_GMAC_CLK_TX_DL_CFG(tx_delay));
regmap_write(bsp_priv->grf, con1,
- RK3568_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RK3568_GMAC_RXCLK_DLY_ENABLE |
RK3568_GMAC_TXCLK_DLY_ENABLE);
}
-static void rk3568_set_to_rmii(struct rk_priv_data *bsp_priv)
-{
- u32 con1;
-
- con1 = (bsp_priv->id == 1) ? RK3568_GRF_GMAC1_CON1 :
- RK3568_GRF_GMAC0_CON1;
- regmap_write(bsp_priv->grf, con1,
- RK3568_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII));
-}
-
static const struct rk_gmac_ops rk3568_ops = {
+ .init = rk3568_init,
.set_to_rgmii = rk3568_set_to_rgmii,
- .set_to_rmii = rk3568_set_to_rmii,
.set_speed = rk_set_clk_mac_speed,
+
+ .gmac_phy_intf_sel_mask = GENMASK_U16(6, 4),
+
+ .supports_rmii = true,
+
.regs_valid = true,
.regs = {
0xfe2a0000, /* gmac0 */
@@ -1085,32 +890,29 @@ static const struct rk_gmac_ops rk3568_ops = {
#define RK3576_GRF_GMAC_CON0 0X0020
#define RK3576_GRF_GMAC_CON1 0X0024
-#define RK3576_GMAC_RMII_MODE GRF_BIT(3)
-#define RK3576_GMAC_RGMII_MODE GRF_CLR_BIT(3)
-
-#define RK3576_GMAC_CLK_SELECT_IO GRF_BIT(7)
-#define RK3576_GMAC_CLK_SELECT_CRU GRF_CLR_BIT(7)
-
-#define RK3576_GMAC_CLK_RMII_DIV2 GRF_BIT(5)
-#define RK3576_GMAC_CLK_RMII_DIV20 GRF_CLR_BIT(5)
+static int rk3576_init(struct rk_priv_data *bsp_priv)
+{
+ switch (bsp_priv->id) {
+ case 0:
+ bsp_priv->gmac_grf_reg = RK3576_GRF_GMAC_CON0;
+ bsp_priv->clock_grf_reg = RK3576_GRF_GMAC_CON0;
+ return 0;
-#define RK3576_GMAC_CLK_RGMII_DIV1 GRF_FIELD_CONST(6, 5, 0)
-#define RK3576_GMAC_CLK_RGMII_DIV5 GRF_FIELD_CONST(6, 5, 3)
-#define RK3576_GMAC_CLK_RGMII_DIV50 GRF_FIELD_CONST(6, 5, 2)
+ case 1:
+ bsp_priv->gmac_grf_reg = RK3576_GRF_GMAC_CON1;
+ bsp_priv->clock_grf_reg = RK3576_GRF_GMAC_CON1;
+ return 0;
-#define RK3576_GMAC_CLK_RMII_GATE GRF_BIT(4)
-#define RK3576_GMAC_CLK_RMII_NOGATE GRF_CLR_BIT(4)
+ default:
+ return -EINVAL;
+ }
+}
static void rk3576_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
unsigned int offset_con;
- offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 :
- RK3576_GRF_GMAC_CON0;
-
- regmap_write(bsp_priv->grf, offset_con, RK3576_GMAC_RGMII_MODE);
-
offset_con = bsp_priv->id == 1 ? RK3576_VCCIO0_1_3_IOC_CON4 :
RK3576_VCCIO0_1_3_IOC_CON2;
@@ -1129,57 +931,19 @@ static void rk3576_set_to_rgmii(struct rk_priv_data *bsp_priv,
RK3576_GMAC_CLK_RX_DL_CFG(rx_delay));
}
-static void rk3576_set_to_rmii(struct rk_priv_data *bsp_priv)
-{
- unsigned int offset_con;
-
- offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 :
- RK3576_GRF_GMAC_CON0;
-
- regmap_write(bsp_priv->grf, offset_con, RK3576_GMAC_RMII_MODE);
-}
-
-static const struct rk_reg_speed_data rk3578_reg_speed_data = {
- .rgmii_10 = RK3576_GMAC_CLK_RGMII_DIV50,
- .rgmii_100 = RK3576_GMAC_CLK_RGMII_DIV5,
- .rgmii_1000 = RK3576_GMAC_CLK_RGMII_DIV1,
- .rmii_10 = RK3576_GMAC_CLK_RMII_DIV20,
- .rmii_100 = RK3576_GMAC_CLK_RMII_DIV2,
-};
-
-static int rk3576_set_gmac_speed(struct rk_priv_data *bsp_priv,
- phy_interface_t interface, int speed)
-{
- unsigned int offset_con;
-
- offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 :
- RK3576_GRF_GMAC_CON0;
-
- return rk_set_reg_speed(bsp_priv, &rk3578_reg_speed_data, offset_con,
- interface, speed);
-}
-
-static void rk3576_set_clock_selection(struct rk_priv_data *bsp_priv, bool input,
- bool enable)
-{
- unsigned int val = input ? RK3576_GMAC_CLK_SELECT_IO :
- RK3576_GMAC_CLK_SELECT_CRU;
- unsigned int offset_con;
+static const struct rk_gmac_ops rk3576_ops = {
+ .init = rk3576_init,
+ .set_to_rgmii = rk3576_set_to_rgmii,
- val |= enable ? RK3576_GMAC_CLK_RMII_NOGATE :
- RK3576_GMAC_CLK_RMII_GATE;
+ .gmac_rmii_mode_mask = BIT_U16(3),
- offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 :
- RK3576_GRF_GMAC_CON0;
+ .clock.io_clksel_io_mask = BIT_U16(7),
+ .clock.gmii_clk_sel_mask = GENMASK_U16(6, 5),
+ .clock.rmii_clk_sel_mask = BIT_U16(5),
+ .clock.rmii_gate_en_mask = BIT_U16(4),
- regmap_write(bsp_priv->grf, offset_con, val);
-}
+ .supports_rmii = true,
-static const struct rk_gmac_ops rk3576_ops = {
- .set_to_rgmii = rk3576_set_to_rgmii,
- .set_to_rmii = rk3576_set_to_rmii,
- .set_speed = rk3576_set_gmac_speed,
- .set_clock_selection = rk3576_set_clock_selection,
.php_grf_required = true,
.regs_valid = true,
.regs = {
@@ -1206,27 +970,31 @@ static const struct rk_gmac_ops rk3576_ops = {
#define RK3588_GRF_GMAC_CON0 0X0008
#define RK3588_GRF_CLK_CON1 0X0070
-#define RK3588_GMAC_PHY_INTF_SEL(id, val) \
- (GRF_FIELD(5, 3, val) << ((id) * 6))
-
-#define RK3588_GMAC_CLK_RMII_MODE(id) GRF_BIT(5 * (id))
-#define RK3588_GMAC_CLK_RGMII_MODE(id) GRF_CLR_BIT(5 * (id))
+static int rk3588_init(struct rk_priv_data *bsp_priv)
+{
+ switch (bsp_priv->id) {
+ case 0:
+ bsp_priv->gmac_phy_intf_sel_mask = GENMASK_U16(5, 3);
+ bsp_priv->clock.io_clksel_cru_mask = BIT_U16(4);
+ bsp_priv->clock.gmii_clk_sel_mask = GENMASK_U16(3, 2);
+ bsp_priv->clock.rmii_clk_sel_mask = BIT_U16(2);
+ bsp_priv->clock.rmii_gate_en_mask = BIT_U16(1);
+ bsp_priv->clock.rmii_mode_mask = BIT_U16(0);
+ return 0;
+
+ case 1:
+ bsp_priv->gmac_phy_intf_sel_mask = GENMASK_U16(11, 9);
+ bsp_priv->clock.io_clksel_cru_mask = BIT_U16(9);
+ bsp_priv->clock.gmii_clk_sel_mask = GENMASK_U16(8, 7);
+ bsp_priv->clock.rmii_clk_sel_mask = BIT_U16(7);
+ bsp_priv->clock.rmii_gate_en_mask = BIT_U16(6);
+ bsp_priv->clock.rmii_mode_mask = BIT_U16(5);
+ return 0;
-#define RK3588_GMAC_CLK_SELECT_CRU(id) GRF_BIT(5 * (id) + 4)
-#define RK3588_GMAC_CLK_SELECT_IO(id) GRF_CLR_BIT(5 * (id) + 4)
-
-#define RK3588_GMA_CLK_RMII_DIV2(id) GRF_BIT(5 * (id) + 2)
-#define RK3588_GMA_CLK_RMII_DIV20(id) GRF_CLR_BIT(5 * (id) + 2)
-
-#define RK3588_GMAC_CLK_RGMII_DIV1(id) \
- (GRF_FIELD_CONST(3, 2, 0) << ((id) * 5))
-#define RK3588_GMAC_CLK_RGMII_DIV5(id) \
- (GRF_FIELD_CONST(3, 2, 3) << ((id) * 5))
-#define RK3588_GMAC_CLK_RGMII_DIV50(id) \
- (GRF_FIELD_CONST(3, 2, 2) << ((id) * 5))
-
-#define RK3588_GMAC_CLK_RMII_GATE(id) GRF_BIT(5 * (id) + 1)
-#define RK3588_GMAC_CLK_RMII_NOGATE(id) GRF_CLR_BIT(5 * (id) + 1)
+ default:
+ return -EINVAL;
+ }
+}
static void rk3588_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
@@ -1236,12 +1004,6 @@ static void rk3588_set_to_rgmii(struct rk_priv_data *bsp_priv,
offset_con = bsp_priv->id == 1 ? RK3588_GRF_GMAC_CON9 :
RK3588_GRF_GMAC_CON8;
- regmap_write(bsp_priv->php_grf, RK3588_GRF_GMAC_CON0,
- RK3588_GMAC_PHY_INTF_SEL(id, PHY_INTF_SEL_RGMII));
-
- regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1,
- RK3588_GMAC_CLK_RGMII_MODE(id));
-
regmap_write(bsp_priv->grf, RK3588_GRF_GMAC_CON7,
RK3588_GMAC_RXCLK_DLY_ENABLE(id) |
RK3588_GMAC_TXCLK_DLY_ENABLE(id));
@@ -1251,67 +1013,18 @@ static void rk3588_set_to_rgmii(struct rk_priv_data *bsp_priv,
RK3588_GMAC_CLK_TX_DL_CFG(tx_delay));
}
-static void rk3588_set_to_rmii(struct rk_priv_data *bsp_priv)
-{
- regmap_write(bsp_priv->php_grf, RK3588_GRF_GMAC_CON0,
- RK3588_GMAC_PHY_INTF_SEL(bsp_priv->id, PHY_INTF_SEL_RMII));
-
- regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1,
- RK3588_GMAC_CLK_RMII_MODE(bsp_priv->id));
-}
-
-static int rk3588_set_gmac_speed(struct rk_priv_data *bsp_priv,
- phy_interface_t interface, int speed)
-{
- unsigned int val = 0, id = bsp_priv->id;
-
- switch (speed) {
- case 10:
- if (interface == PHY_INTERFACE_MODE_RMII)
- val = RK3588_GMA_CLK_RMII_DIV20(id);
- else
- val = RK3588_GMAC_CLK_RGMII_DIV50(id);
- break;
- case 100:
- if (interface == PHY_INTERFACE_MODE_RMII)
- val = RK3588_GMA_CLK_RMII_DIV2(id);
- else
- val = RK3588_GMAC_CLK_RGMII_DIV5(id);
- break;
- case 1000:
- if (interface != PHY_INTERFACE_MODE_RMII)
- val = RK3588_GMAC_CLK_RGMII_DIV1(id);
- else
- goto err;
- break;
- default:
- goto err;
- }
-
- regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1, val);
-
- return 0;
-err:
- return -EINVAL;
-}
+static const struct rk_gmac_ops rk3588_ops = {
+ .init = rk3588_init,
+ .set_to_rgmii = rk3588_set_to_rgmii,
-static void rk3588_set_clock_selection(struct rk_priv_data *bsp_priv, bool input,
- bool enable)
-{
- unsigned int val = input ? RK3588_GMAC_CLK_SELECT_IO(bsp_priv->id) :
- RK3588_GMAC_CLK_SELECT_CRU(bsp_priv->id);
+ .gmac_grf_reg_in_php = true,
+ .gmac_grf_reg = RK3588_GRF_GMAC_CON0,
- val |= enable ? RK3588_GMAC_CLK_RMII_NOGATE(bsp_priv->id) :
- RK3588_GMAC_CLK_RMII_GATE(bsp_priv->id);
+ .clock_grf_reg_in_php = true,
+ .clock_grf_reg = RK3588_GRF_CLK_CON1,
- regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1, val);
-}
+ .supports_rmii = true,
-static const struct rk_gmac_ops rk3588_ops = {
- .set_to_rgmii = rk3588_set_to_rgmii,
- .set_to_rmii = rk3588_set_to_rmii,
- .set_speed = rk3588_set_gmac_speed,
- .set_clock_selection = rk3588_set_clock_selection,
.php_grf_required = true,
.regs_valid = true,
.regs = {
@@ -1324,35 +1037,18 @@ static const struct rk_gmac_ops rk3588_ops = {
#define RV1108_GRF_GMAC_CON0 0X0900
/* RV1108_GRF_GMAC_CON0 */
-#define RV1108_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val)
#define RV1108_GMAC_FLOW_CTRL GRF_BIT(3)
#define RV1108_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
-#define RV1108_GMAC_SPEED_10M GRF_CLR_BIT(2)
-#define RV1108_GMAC_SPEED_100M GRF_BIT(2)
-#define RV1108_GMAC_RMII_CLK_25M GRF_BIT(7)
-#define RV1108_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(7)
-static void rv1108_set_to_rmii(struct rk_priv_data *bsp_priv)
-{
- regmap_write(bsp_priv->grf, RV1108_GRF_GMAC_CON0,
- RV1108_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII));
-}
+static const struct rk_gmac_ops rv1108_ops = {
+ .gmac_grf_reg = RV1108_GRF_GMAC_CON0,
+ .gmac_phy_intf_sel_mask = GENMASK_U16(6, 4),
-static const struct rk_reg_speed_data rv1108_reg_speed_data = {
- .rmii_10 = RV1108_GMAC_RMII_CLK_2_5M | RV1108_GMAC_SPEED_10M,
- .rmii_100 = RV1108_GMAC_RMII_CLK_25M | RV1108_GMAC_SPEED_100M,
-};
+ .clock_grf_reg = RV1108_GRF_GMAC_CON0,
+ .clock.rmii_clk_sel_mask = BIT_U16(7),
+ .clock.mac_speed_mask = BIT_U16(2),
-static int rv1108_set_speed(struct rk_priv_data *bsp_priv,
- phy_interface_t interface, int speed)
-{
- return rk_set_reg_speed(bsp_priv, &rv1108_reg_speed_data,
- RV1108_GRF_GMAC_CON0, interface, speed);
-}
-
-static const struct rk_gmac_ops rv1108_ops = {
- .set_to_rmii = rv1108_set_to_rmii,
- .set_speed = rv1108_set_speed,
+ .supports_rmii = true,
};
#define RV1126_GRF_GMAC_CON0 0X0070
@@ -1360,7 +1056,6 @@ static const struct rk_gmac_ops rv1108_ops = {
#define RV1126_GRF_GMAC_CON2 0X0078
/* RV1126_GRF_GMAC_CON0 */
-#define RV1126_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val)
#define RV1126_GMAC_FLOW_CTRL GRF_BIT(7)
#define RV1126_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(7)
#define RV1126_GMAC_M0_RXCLK_DLY_ENABLE GRF_BIT(1)
@@ -1383,7 +1078,6 @@ static void rv1126_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON0,
- RV1126_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RV1126_GMAC_M0_RXCLK_DLY_ENABLE |
RV1126_GMAC_M0_TXCLK_DLY_ENABLE |
RV1126_GMAC_M1_RXCLK_DLY_ENABLE |
@@ -1398,16 +1092,14 @@ static void rv1126_set_to_rgmii(struct rk_priv_data *bsp_priv,
RV1126_GMAC_M1_CLK_TX_DL_CFG(tx_delay));
}
-static void rv1126_set_to_rmii(struct rk_priv_data *bsp_priv)
-{
- regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON0,
- RV1126_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII));
-}
-
static const struct rk_gmac_ops rv1126_ops = {
.set_to_rgmii = rv1126_set_to_rgmii,
- .set_to_rmii = rv1126_set_to_rmii,
.set_speed = rk_set_clk_mac_speed,
+
+ .gmac_grf_reg = RV1126_GRF_GMAC_CON0,
+ .gmac_phy_intf_sel_mask = GENMASK_U16(6, 4),
+
+ .supports_rmii = true,
};
static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat)
@@ -1473,19 +1165,15 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
if (ret)
return ret;
- if (bsp_priv->ops && bsp_priv->ops->set_clock_selection)
- bsp_priv->ops->set_clock_selection(bsp_priv,
- bsp_priv->clock_input, true);
+ rk_configure_io_clksel(bsp_priv);
+ rk_ungate_rmii_clock(bsp_priv);
mdelay(5);
bsp_priv->clk_enabled = true;
}
} else {
if (bsp_priv->clk_enabled) {
- if (bsp_priv->ops && bsp_priv->ops->set_clock_selection) {
- bsp_priv->ops->set_clock_selection(bsp_priv,
- bsp_priv->clock_input, false);
- }
+ rk_gate_rmii_clock(bsp_priv);
clk_bulk_disable_unprepare(bsp_priv->num_clks,
bsp_priv->clks);
@@ -1498,23 +1186,26 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
return 0;
}
-static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
+static int rk_phy_powerup(struct rk_priv_data *bsp_priv)
{
struct regulator *ldo = bsp_priv->regulator;
- struct device *dev = bsp_priv->dev;
int ret;
- if (enable) {
- ret = regulator_enable(ldo);
- if (ret)
- dev_err(dev, "fail to enable phy-supply\n");
- } else {
- ret = regulator_disable(ldo);
- if (ret)
- dev_err(dev, "fail to disable phy-supply\n");
- }
+ ret = regulator_enable(ldo);
+ if (ret)
+ dev_err(bsp_priv->dev, "fail to enable phy-supply\n");
- return 0;
+ return ret;
+}
+
+static void rk_phy_powerdown(struct rk_priv_data *bsp_priv)
+{
+ struct regulator *ldo = bsp_priv->regulator;
+ int ret;
+
+ ret = regulator_disable(ldo);
+ if (ret)
+ dev_err(bsp_priv->dev, "fail to disable phy-supply\n");
}
static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
@@ -1628,6 +1319,31 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
bsp_priv->dev = dev;
+ /* Set the default phy_intf_sel and RMII mode register parameters. */
+ bsp_priv->gmac_grf_reg = ops->gmac_grf_reg;
+ bsp_priv->gmac_phy_intf_sel_mask = ops->gmac_phy_intf_sel_mask;
+ bsp_priv->gmac_rmii_mode_mask = ops->gmac_rmii_mode_mask;
+
+ /* Set the default clock control register related parameters */
+ bsp_priv->clock_grf_reg = ops->clock_grf_reg;
+ bsp_priv->clock = ops->clock;
+
+ bsp_priv->supports_rgmii = ops->supports_rgmii || !!ops->set_to_rgmii;
+ bsp_priv->supports_rmii = ops->supports_rmii || !!ops->set_to_rmii;
+
+ if (ops->init) {
+ ret = ops->init(bsp_priv);
+ if (ret) {
+ reset_control_put(bsp_priv->phy_reset);
+ dev_err_probe(dev, ret, "failed to init BSP\n");
+ return ERR_PTR(ret);
+ }
+ }
+
+ if (bsp_priv->clock.io_clksel_cru_mask &&
+ bsp_priv->clock.io_clksel_io_mask)
+ dev_warn(dev, "both CRU and IO io_clksel masks should not be populated - driver may malfunction\n");
+
return bsp_priv;
}
@@ -1638,11 +1354,11 @@ static int rk_gmac_check_ops(struct rk_priv_data *bsp_priv)
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
- if (!bsp_priv->ops->set_to_rgmii)
+ if (!bsp_priv->supports_rgmii)
return -EINVAL;
break;
case PHY_INTERFACE_MODE_RMII:
- if (!bsp_priv->ops->set_to_rmii)
+ if (!bsp_priv->supports_rmii)
return -EINVAL;
break;
default:
@@ -1655,44 +1371,87 @@ static int rk_gmac_check_ops(struct rk_priv_data *bsp_priv)
static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
{
struct device *dev = bsp_priv->dev;
+ u32 val;
int ret;
+ u8 intf;
ret = rk_gmac_check_ops(bsp_priv);
if (ret)
return ret;
+ ret = rk_get_phy_intf_sel(bsp_priv->phy_iface);
+ if (ret < 0)
+ return ret;
+
+ intf = ret;
+
ret = gmac_clk_enable(bsp_priv, true);
if (ret)
return ret;
+ if (bsp_priv->gmac_phy_intf_sel_mask ||
+ bsp_priv->gmac_rmii_mode_mask) {
+ /* If defined, encode the phy_intf_sel value */
+ val = rk_encode_wm16(intf, bsp_priv->gmac_phy_intf_sel_mask);
+
+ /* If defined, encode the RMII mode mask setting. */
+ val |= rk_encode_wm16(intf == PHY_INTF_SEL_RMII,
+ bsp_priv->gmac_rmii_mode_mask);
+
+ ret = rk_write_gmac_grf_reg(bsp_priv, val);
+ if (ret < 0) {
+ gmac_clk_enable(bsp_priv, false);
+ return ret;
+ }
+ }
+
+ if (bsp_priv->clock.rmii_mode_mask) {
+ val = rk_encode_wm16(intf == PHY_INTF_SEL_RMII,
+ bsp_priv->clock.rmii_mode_mask);
+
+ ret = rk_write_clock_grf_reg(bsp_priv, val);
+ if (ret < 0) {
+ gmac_clk_enable(bsp_priv, false);
+ return ret;
+ }
+ }
+
/*rmii or rgmii*/
switch (bsp_priv->phy_iface) {
case PHY_INTERFACE_MODE_RGMII:
dev_info(dev, "init for RGMII\n");
- bsp_priv->ops->set_to_rgmii(bsp_priv, bsp_priv->tx_delay,
- bsp_priv->rx_delay);
+ if (bsp_priv->ops->set_to_rgmii)
+ bsp_priv->ops->set_to_rgmii(bsp_priv,
+ bsp_priv->tx_delay,
+ bsp_priv->rx_delay);
break;
case PHY_INTERFACE_MODE_RGMII_ID:
dev_info(dev, "init for RGMII_ID\n");
- bsp_priv->ops->set_to_rgmii(bsp_priv, 0, 0);
+ if (bsp_priv->ops->set_to_rgmii)
+ bsp_priv->ops->set_to_rgmii(bsp_priv, 0, 0);
break;
case PHY_INTERFACE_MODE_RGMII_RXID:
dev_info(dev, "init for RGMII_RXID\n");
- bsp_priv->ops->set_to_rgmii(bsp_priv, bsp_priv->tx_delay, 0);
+ if (bsp_priv->ops->set_to_rgmii)
+ bsp_priv->ops->set_to_rgmii(bsp_priv,
+ bsp_priv->tx_delay, 0);
break;
case PHY_INTERFACE_MODE_RGMII_TXID:
dev_info(dev, "init for RGMII_TXID\n");
- bsp_priv->ops->set_to_rgmii(bsp_priv, 0, bsp_priv->rx_delay);
+ if (bsp_priv->ops->set_to_rgmii)
+ bsp_priv->ops->set_to_rgmii(bsp_priv,
+ 0, bsp_priv->rx_delay);
break;
case PHY_INTERFACE_MODE_RMII:
dev_info(dev, "init for RMII\n");
- bsp_priv->ops->set_to_rmii(bsp_priv);
+ if (bsp_priv->ops->set_to_rmii)
+ bsp_priv->ops->set_to_rmii(bsp_priv);
break;
default:
dev_err(dev, "NO interface defined!\n");
}
- ret = phy_power_on(bsp_priv, true);
+ ret = rk_phy_powerup(bsp_priv);
if (ret) {
gmac_clk_enable(bsp_priv, false);
return ret;
@@ -1713,7 +1472,7 @@ static void rk_gmac_powerdown(struct rk_priv_data *gmac)
pm_runtime_put_sync(gmac->dev);
- phy_power_on(gmac, false);
+ rk_phy_powerdown(gmac);
gmac_clk_enable(gmac, false);
}
@@ -1722,10 +1481,10 @@ static void rk_get_interfaces(struct stmmac_priv *priv, void *bsp_priv,
{
struct rk_priv_data *rk = bsp_priv;
- if (rk->ops->set_to_rgmii)
+ if (rk->supports_rgmii)
phy_interface_set_rgmii(interfaces);
- if (rk->ops->set_to_rmii)
+ if (rk->supports_rmii)
__set_bit(PHY_INTERFACE_MODE_RMII, interfaces);
}
@@ -1733,11 +1492,37 @@ static int rk_set_clk_tx_rate(void *bsp_priv_, struct clk *clk_tx_i,
phy_interface_t interface, int speed)
{
struct rk_priv_data *bsp_priv = bsp_priv_;
+ int ret = -EINVAL;
+ bool is_100m;
+ u32 val;
+
+ if (bsp_priv->ops->set_speed) {
+ ret = bsp_priv->ops->set_speed(bsp_priv, interface, speed);
+ if (ret < 0)
+ return ret;
+ }
- if (bsp_priv->ops->set_speed)
- return bsp_priv->ops->set_speed(bsp_priv, interface, speed);
+ if (phy_interface_mode_is_rgmii(interface) &&
+ bsp_priv->clock.gmii_clk_sel_mask) {
+ ret = rk_gmac_rgmii_clk_div(speed);
+ if (ret < 0)
+ return ret;
- return -EINVAL;
+ val = rk_encode_wm16(ret, bsp_priv->clock.gmii_clk_sel_mask);
+
+ ret = rk_write_clock_grf_reg(bsp_priv, val);
+ } else if (interface == PHY_INTERFACE_MODE_RMII &&
+ (bsp_priv->clock.rmii_clk_sel_mask ||
+ bsp_priv->clock.mac_speed_mask)) {
+ is_100m = speed == SPEED_100;
+ val = rk_encode_wm16(is_100m, bsp_priv->clock.mac_speed_mask) |
+ rk_encode_wm16(is_100m,
+ bsp_priv->clock.rmii_clk_sel_mask);
+
+ ret = rk_write_clock_grf_reg(bsp_priv, val);
+ }
+
+ return ret;
}
static int rk_gmac_suspend(struct device *dev, void *bsp_priv_)
@@ -1776,6 +1561,8 @@ static void rk_gmac_exit(struct device *dev, void *bsp_priv_)
if (priv->plat->phy_node && bsp_priv->integrated_phy)
clk_put(bsp_priv->clk_phy);
+
+ reset_control_put(bsp_priv->phy_reset);
}
static int rk_gmac_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
index 5a485ee98fa7..af594a096676 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
@@ -11,12 +11,14 @@
#include <linux/device.h>
#include <linux/ethtool.h>
#include <linux/io.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_mdio.h>
#include <linux/of_address.h>
#include <linux/phy.h>
#include <linux/phylink.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/stmmac.h>
#include "stmmac_platform.h"
@@ -32,6 +34,8 @@
struct s32_priv_data {
void __iomem *ioaddr;
void __iomem *ctrl_sts;
+ struct regmap *sts_regmap;
+ unsigned int sts_offset;
struct device *dev;
phy_interface_t *intf_mode;
struct clk *tx_clk;
@@ -40,11 +44,17 @@ struct s32_priv_data {
static int s32_gmac_write_phy_intf_select(struct s32_priv_data *gmac)
{
- writel(S32_PHY_INTF_SEL_RGMII, gmac->ctrl_sts);
+ int ret = 0;
+
+ if (gmac->ctrl_sts)
+ writel(S32_PHY_INTF_SEL_RGMII, gmac->ctrl_sts);
+ else
+ ret = regmap_write(gmac->sts_regmap, gmac->sts_offset,
+ S32_PHY_INTF_SEL_RGMII);
dev_dbg(gmac->dev, "PHY mode set to %s\n", phy_modes(*gmac->intf_mode));
- return 0;
+ return ret;
}
static int s32_gmac_init(struct device *dev, void *priv)
@@ -125,10 +135,16 @@ static int s32_dwmac_probe(struct platform_device *pdev)
"dt configuration failed\n");
/* PHY interface mode control reg */
- gmac->ctrl_sts = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
- if (IS_ERR(gmac->ctrl_sts))
- return dev_err_probe(dev, PTR_ERR(gmac->ctrl_sts),
- "S32CC config region is missing\n");
+ gmac->sts_regmap = syscon_regmap_lookup_by_phandle_args(dev->of_node,
+ "nxp,phy-sel", 1, &gmac->sts_offset);
+ if (gmac->sts_regmap == ERR_PTR(-EPROBE_DEFER))
+ return PTR_ERR(gmac->sts_regmap);
+ if (IS_ERR(gmac->sts_regmap)) {
+ gmac->ctrl_sts = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
+ if (IS_ERR(gmac->ctrl_sts))
+ return dev_err_probe(dev, PTR_ERR(gmac->ctrl_sts),
+ "S32CC config region is missing\n");
+ }
/* tx clock */
gmac->tx_clk = devm_clk_get(&pdev->dev, "tx");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index a2b52d2c4eb6..4c8991f3b38d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -367,9 +367,8 @@ static int smtg_crosststamp(ktime_t *device, struct system_counterval_t *system,
.use_nsecs = false,
};
- num_snapshot = (readl(ioaddr + XGMAC_TIMESTAMP_STATUS) &
- XGMAC_TIMESTAMP_ATSNS_MASK) >>
- XGMAC_TIMESTAMP_ATSNS_SHIFT;
+ num_snapshot = FIELD_GET(XGMAC_TIMESTAMP_ATSNS_MASK,
+ readl(ioaddr + XGMAC_TIMESTAMP_STATUS));
/* Repeat until the timestamps are from the FIFO last segment */
for (i = 0; i < num_snapshot; i++) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 8aa496ac85cc..c01b86fd64da 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -746,7 +746,7 @@ static int sun8i_dwmac_reset(struct stmmac_priv *priv)
v = readl(priv->ioaddr + EMAC_BASIC_CTL1);
writel(v | 0x01, priv->ioaddr + EMAC_BASIC_CTL1);
- /* The timeout was previoulsy set to 10ms, but some board (OrangePI0)
+ /* The timeout was previously set to 10ms, but some board (OrangePI0)
* need more if no cable plugged. 100ms seems OK
*/
err = readl_poll_timeout(priv->ioaddr + EMAC_BASIC_CTL1, v,
@@ -821,7 +821,7 @@ static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv)
return ret;
}
- /* Make sure the EPHY is properly reseted, as U-Boot may leave
+ /* Make sure the EPHY is properly reset, as U-Boot may leave
* it at deasserted state, and thus it may fail to reset EMAC.
*
* This assumes the driver has exclusive access to the EPHY reset.
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
index e291028ba56e..0d46a6c3f077 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
@@ -37,9 +37,9 @@
#define GMAC_GTXCLK_SEL 0x18
#define GMAC_GTXCLK_SEL_PLL BIT(0)
#define GMAC_INTF_CTRL 0x1c
-#define PHY_INTF_MASK BIT(0)
-#define PHY_INTF_RGMII FIELD_PREP(PHY_INTF_MASK, 1)
-#define PHY_INTF_MII_GMII FIELD_PREP(PHY_INTF_MASK, 0)
+#define GMAC_INTF_MASK BIT(0)
+#define GMAC_INTF_RGMII FIELD_PREP(GMAC_INTF_MASK, 1)
+#define GMAC_INTF_MII_GMII FIELD_PREP(GMAC_INTF_MASK, 0)
#define GMAC_TXCLK_OEN 0x20
#define TXCLK_DIR_MASK BIT(0)
#define TXCLK_DIR_OUTPUT FIELD_PREP(TXCLK_DIR_MASK, 0)
@@ -58,13 +58,13 @@ static int thead_dwmac_set_phy_if(struct plat_stmmacenet_data *plat)
switch (plat->phy_interface) {
case PHY_INTERFACE_MODE_MII:
- phyif = PHY_INTF_MII_GMII;
+ phyif = GMAC_INTF_MII_GMII;
break;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_TXID:
case PHY_INTERFACE_MODE_RGMII_RXID:
- phyif = PHY_INTF_RGMII;
+ phyif = GMAC_INTF_RGMII;
break;
default:
dev_err(dwmac->dev, "unsupported phy interface %s\n",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
index 7ab791c8d355..547863cb982f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
@@ -30,62 +30,30 @@
#define MAC_VLAN2 0x00000024 /* VLAN2 Tag */
/* MAC CTRL defines */
-#define MAC_CONTROL_RA 0x80000000 /* Receive All Mode */
-#define MAC_CONTROL_BLE 0x40000000 /* Endian Mode */
#define MAC_CONTROL_HBD 0x10000000 /* Heartbeat Disable */
#define MAC_CONTROL_PS 0x08000000 /* Port Select */
-#define MAC_CONTROL_DRO 0x00800000 /* Disable Receive Own */
-#define MAC_CONTROL_EXT_LOOPBACK 0x00400000 /* Reserved (ext loopback?) */
#define MAC_CONTROL_OM 0x00200000 /* Loopback Operating Mode */
#define MAC_CONTROL_F 0x00100000 /* Full Duplex Mode */
#define MAC_CONTROL_PM 0x00080000 /* Pass All Multicast */
#define MAC_CONTROL_PR 0x00040000 /* Promiscuous Mode */
#define MAC_CONTROL_IF 0x00020000 /* Inverse Filtering */
-#define MAC_CONTROL_PB 0x00010000 /* Pass Bad Frames */
#define MAC_CONTROL_HO 0x00008000 /* Hash Only Filtering Mode */
#define MAC_CONTROL_HP 0x00002000 /* Hash/Perfect Filtering Mode */
-#define MAC_CONTROL_LCC 0x00001000 /* Late Collision Control */
-#define MAC_CONTROL_DBF 0x00000800 /* Disable Broadcast Frames */
-#define MAC_CONTROL_DRTY 0x00000400 /* Disable Retry */
-#define MAC_CONTROL_ASTP 0x00000100 /* Automatic Pad Stripping */
-#define MAC_CONTROL_BOLMT_10 0x00000000 /* Back Off Limit 10 */
-#define MAC_CONTROL_BOLMT_8 0x00000040 /* Back Off Limit 8 */
-#define MAC_CONTROL_BOLMT_4 0x00000080 /* Back Off Limit 4 */
-#define MAC_CONTROL_BOLMT_1 0x000000c0 /* Back Off Limit 1 */
-#define MAC_CONTROL_DC 0x00000020 /* Deferral Check */
-#define MAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
-#define MAC_CONTROL_RE 0x00000004 /* Receiver Enable */
#define MAC_CORE_INIT (MAC_CONTROL_HBD)
/* MAC FLOW CTRL defines */
-#define MAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
-#define MAC_FLOW_CTRL_PT_SHIFT 16
-#define MAC_FLOW_CTRL_PASS 0x00000004 /* Pass Control Frames */
+#define MAC_FLOW_CTRL_PT_MASK GENMASK(31, 16) /* Pause Time Mask */
#define MAC_FLOW_CTRL_ENABLE 0x00000002 /* Flow Control Enable */
-#define MAC_FLOW_CTRL_PAUSE 0x00000001 /* Flow Control Busy ... */
-
-/* MII ADDR defines */
-#define MAC_MII_ADDR_WRITE 0x00000002 /* MII Write */
-#define MAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */
/*----------------------------------------------------------------------------
* DMA BLOCK defines
*---------------------------------------------------------------------------*/
/* DMA Bus Mode register defines */
-#define DMA_BUS_MODE_DBO 0x00100000 /* Descriptor Byte Ordering */
-#define DMA_BUS_MODE_BLE 0x00000080 /* Big Endian/Little Endian */
-#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
-#define DMA_BUS_MODE_PBL_SHIFT 8
-#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
-#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
-#define DMA_BUS_MODE_BAR_BUS 0x00000002 /* Bar-Bus Arbitration */
+#define DMA_BUS_MODE_PBL_MASK GENMASK(13, 8) /* Programmable Burst Len */
#define DMA_BUS_MODE_DEFAULT 0x00000000
-/* DMA Control register defines */
-#define DMA_CONTROL_SF 0x00200000 /* Store And Forward */
-
/* Transmit Threshold Control */
enum ttc_control {
DMA_CONTROL_TTC_DEFAULT = 0x00000000, /* Threshold is 32 DWORDS */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 697bba641e05..9fe639fb06bb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -20,15 +20,11 @@
#define GMAC_FLOW_CTRL 0x00000018 /* Flow Control */
#define GMAC_VLAN_TAG 0x0000001c /* VLAN Tag */
#define GMAC_DEBUG 0x00000024 /* GMAC debug register */
-#define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */
#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */
-#define GMAC_INT_STATUS_PMT BIT(3)
-#define GMAC_INT_STATUS_MMCIS BIT(4)
#define GMAC_INT_STATUS_MMCRIS BIT(5)
#define GMAC_INT_STATUS_MMCTIS BIT(6)
#define GMAC_INT_STATUS_MMCCSUM BIT(7)
-#define GMAC_INT_STATUS_TSTAMP BIT(9)
#define GMAC_INT_STATUS_LPIIS BIT(10)
/* interrupt mask register */
@@ -76,7 +72,6 @@ enum power_event {
/* SGMII/RGMII status register */
#define GMAC_RGSMIIIS_LNKMODE BIT(0)
#define GMAC_RGSMIIIS_SPEED GENMASK(2, 1)
-#define GMAC_RGSMIIIS_SPEED_SHIFT 1
#define GMAC_RGSMIIIS_LNKSTS BIT(3)
#define GMAC_RGSMIIIS_JABTO BIT(4)
#define GMAC_RGSMIIIS_FALSECARDET BIT(5)
@@ -90,8 +85,6 @@ enum power_event {
/* GMAC Configuration defines */
#define GMAC_CONTROL_2K 0x08000000 /* IEEE 802.3as 2K packets */
-#define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */
-#define GMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on receive */
#define GMAC_CONTROL_JD 0x00400000 /* Jabber disable */
#define GMAC_CONTROL_BE 0x00200000 /* Frame Burst Enable */
#define GMAC_CONTROL_JE 0x00100000 /* Jumbo frame */
@@ -103,42 +96,25 @@ enum inter_frame_gap {
#define GMAC_CONTROL_DCRS 0x00010000 /* Disable carrier sense */
#define GMAC_CONTROL_PS 0x00008000 /* Port Select 0:GMI 1:MII */
#define GMAC_CONTROL_FES 0x00004000 /* Speed 0:10 1:100 */
-#define GMAC_CONTROL_DO 0x00002000 /* Disable Rx Own */
#define GMAC_CONTROL_LM 0x00001000 /* Loop-back mode */
#define GMAC_CONTROL_DM 0x00000800 /* Duplex Mode */
#define GMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */
-#define GMAC_CONTROL_DR 0x00000200 /* Disable Retry */
-#define GMAC_CONTROL_LUD 0x00000100 /* Link up/down */
-#define GMAC_CONTROL_ACS 0x00000080 /* Auto Pad/FCS Stripping */
-#define GMAC_CONTROL_DC 0x00000010 /* Deferral Check */
-#define GMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
-#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | \
GMAC_CONTROL_BE | GMAC_CONTROL_DCRS)
/* GMAC Frame Filter defines */
#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
-#define GMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */
#define GMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */
-#define GMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */
#define GMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */
-#define GMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */
#define GMAC_FRAME_FILTER_PCF 0x00000080 /* Pass Control frames */
-#define GMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */
-#define GMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */
#define GMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */
#define GMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */
-/* GMII ADDR defines */
-#define GMAC_MII_ADDR_WRITE 0x00000002 /* MII Write */
-#define GMAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */
/* GMAC FLOW CTRL defines */
-#define GMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
-#define GMAC_FLOW_CTRL_PT_SHIFT 16
+#define GMAC_FLOW_CTRL_PT_MASK GENMASK(31, 16) /* Pause Time Mask */
#define GMAC_FLOW_CTRL_UP 0x00000008 /* Unicast pause frame enable */
#define GMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */
#define GMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */
-#define GMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */
/* DEBUG Register defines */
/* MTL TxStatus FIFO */
@@ -147,29 +123,23 @@ enum inter_frame_gap {
#define GMAC_DEBUG_TWCSTS BIT(22) /* MTL Tx FIFO Write Controller */
/* MTL Tx FIFO Read Controller Status */
#define GMAC_DEBUG_TRCSTS_MASK GENMASK(21, 20)
-#define GMAC_DEBUG_TRCSTS_SHIFT 20
-#define GMAC_DEBUG_TRCSTS_IDLE 0
#define GMAC_DEBUG_TRCSTS_READ 1
#define GMAC_DEBUG_TRCSTS_TXW 2
#define GMAC_DEBUG_TRCSTS_WRITE 3
#define GMAC_DEBUG_TXPAUSED BIT(19) /* MAC Transmitter in PAUSE */
/* MAC Transmit Frame Controller Status */
#define GMAC_DEBUG_TFCSTS_MASK GENMASK(18, 17)
-#define GMAC_DEBUG_TFCSTS_SHIFT 17
-#define GMAC_DEBUG_TFCSTS_IDLE 0
#define GMAC_DEBUG_TFCSTS_WAIT 1
#define GMAC_DEBUG_TFCSTS_GEN_PAUSE 2
#define GMAC_DEBUG_TFCSTS_XFER 3
/* MAC GMII or MII Transmit Protocol Engine Status */
#define GMAC_DEBUG_TPESTS BIT(16)
#define GMAC_DEBUG_RXFSTS_MASK GENMASK(9, 8) /* MTL Rx FIFO Fill-level */
-#define GMAC_DEBUG_RXFSTS_SHIFT 8
#define GMAC_DEBUG_RXFSTS_EMPTY 0
#define GMAC_DEBUG_RXFSTS_BT 1
#define GMAC_DEBUG_RXFSTS_AT 2
#define GMAC_DEBUG_RXFSTS_FULL 3
#define GMAC_DEBUG_RRCSTS_MASK GENMASK(6, 5) /* MTL Rx FIFO Read Controller */
-#define GMAC_DEBUG_RRCSTS_SHIFT 5
#define GMAC_DEBUG_RRCSTS_IDLE 0
#define GMAC_DEBUG_RRCSTS_RDATA 1
#define GMAC_DEBUG_RRCSTS_RSTAT 2
@@ -177,18 +147,13 @@ enum inter_frame_gap {
#define GMAC_DEBUG_RWCSTS BIT(4) /* MTL Rx FIFO Write Controller Active */
/* MAC Receive Frame Controller FIFO Status */
#define GMAC_DEBUG_RFCFCSTS_MASK GENMASK(2, 1)
-#define GMAC_DEBUG_RFCFCSTS_SHIFT 1
/* MAC GMII or MII Receive Protocol Engine Status */
#define GMAC_DEBUG_RPESTS BIT(0)
/*--- DMA BLOCK defines ---*/
/* DMA Bus Mode register defines */
-#define DMA_BUS_MODE_DA 0x00000002 /* Arbitration scheme */
-#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
-#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
-/* Programmable burst length (passed thorugh platform)*/
-#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
-#define DMA_BUS_MODE_PBL_SHIFT 8
+/* Programmable burst length (passed through platform)*/
+#define DMA_BUS_MODE_PBL_MASK GENMASK(13, 8) /* Programmable Burst Len */
#define DMA_BUS_MODE_ATDS 0x00000080 /* Alternate Descriptor Size */
enum rx_tx_priority_ratio {
@@ -199,23 +164,15 @@ enum rx_tx_priority_ratio {
#define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */
#define DMA_BUS_MODE_MB 0x04000000 /* Mixed burst */
-#define DMA_BUS_MODE_RPBL_MASK 0x007e0000 /* Rx-Programmable Burst Len */
-#define DMA_BUS_MODE_RPBL_SHIFT 17
+#define DMA_BUS_MODE_RPBL_MASK GENMASK(22, 17) /* Rx-Programmable Burst Len */
#define DMA_BUS_MODE_USP 0x00800000
#define DMA_BUS_MODE_MAXPBL 0x01000000
#define DMA_BUS_MODE_AAL 0x02000000
/* DMA CRS Control and Status Register Mapping */
-#define DMA_HOST_TX_DESC 0x00001048 /* Current Host Tx descriptor */
-#define DMA_HOST_RX_DESC 0x0000104c /* Current Host Rx descriptor */
-/* DMA Bus Mode register defines */
-#define DMA_BUS_PR_RATIO_MASK 0x0000c000 /* Rx/Tx priority ratio */
-#define DMA_BUS_PR_RATIO_SHIFT 14
-#define DMA_BUS_FB 0x00010000 /* Fixed Burst */
/* DMA operation mode defines (start/stop tx/rx are placed in common header)*/
/* Disable Drop TCP/IP csum error */
-#define DMA_CONTROL_DT 0x04000000
#define DMA_CONTROL_RSF 0x02000000 /* Receive Store and Forward */
#define DMA_CONTROL_DFF 0x01000000 /* Disaable flushing */
/* Threshold for Activating the FC */
@@ -247,8 +204,6 @@ enum ttc_control {
#define DMA_CONTROL_TC_TX_MASK 0xfffe3fff
#define DMA_CONTROL_EFC 0x00000100
-#define DMA_CONTROL_FEF 0x00000080
-#define DMA_CONTROL_FUF 0x00000040
/* Receive flow control activation field
* RFA field in DMA control register, bits 23,10:9
@@ -285,20 +240,8 @@ enum ttc_control {
*/
#define RFA_FULL_MINUS_1K 0x00000000
-#define RFA_FULL_MINUS_2K 0x00000200
-#define RFA_FULL_MINUS_3K 0x00000400
-#define RFA_FULL_MINUS_4K 0x00000600
-#define RFA_FULL_MINUS_5K 0x00800000
-#define RFA_FULL_MINUS_6K 0x00800200
-#define RFA_FULL_MINUS_7K 0x00800400
-
-#define RFD_FULL_MINUS_1K 0x00000000
+
#define RFD_FULL_MINUS_2K 0x00000800
-#define RFD_FULL_MINUS_3K 0x00001000
-#define RFD_FULL_MINUS_4K 0x00001800
-#define RFD_FULL_MINUS_5K 0x00400000
-#define RFD_FULL_MINUS_6K 0x00400800
-#define RFD_FULL_MINUS_7K 0x00401000
enum rtc_control {
DMA_CONTROL_RTC_64 = 0x00000000,
@@ -311,16 +254,11 @@ enum rtc_control {
#define DMA_CONTROL_OSF 0x00000004 /* Operate on second frame */
/* MMC registers offset */
-#define GMAC_MMC_CTRL 0x100
-#define GMAC_MMC_RX_INTR 0x104
-#define GMAC_MMC_TX_INTR 0x108
-#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
#define GMAC_EXTHASH_BASE 0x500
/* PTP and timestamping registers */
#define GMAC3_X_ATSNS GENMASK(29, 25)
-#define GMAC3_X_ATSNS_SHIFT 25
#define GMAC_PTP_TCR_ATSFC BIT(24)
#define GMAC_PTP_TCR_ATSEN0 BIT(25)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index a2ae136d2c0e..af566636fad9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -242,7 +242,7 @@ static void dwmac1000_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
if (duplex) {
pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
- flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
+ flow |= FIELD_PREP(GMAC_FLOW_CTRL_PT_MASK, pause_time);
}
writel(flow, ioaddr + GMAC_FLOW_CTRL);
@@ -265,10 +265,10 @@ static void dwmac1000_pmt(struct mac_device_info *hw, unsigned long mode)
writel(pmt, ioaddr + GMAC_PMT);
}
-static int dwmac1000_irq_status(struct mac_device_info *hw,
+static int dwmac1000_irq_status(struct stmmac_priv *priv,
struct stmmac_extra_stats *x)
{
- void __iomem *ioaddr = hw->pcsr;
+ void __iomem *ioaddr = priv->hw->pcsr;
u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
u32 intr_mask = readl(ioaddr + GMAC_INT_MASK);
int ret = 0;
@@ -304,7 +304,8 @@ static int dwmac1000_irq_status(struct mac_device_info *hw,
x->irq_rx_path_exit_lpi_mode_n++;
}
- dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
+ if (intr_status & (PCS_ANE_IRQ | PCS_LINK_IRQ))
+ stmmac_integrated_pcs_irq(priv, intr_status, x);
return ret;
}
@@ -378,8 +379,8 @@ static void dwmac1000_debug(struct stmmac_priv *priv, void __iomem *ioaddr,
if (value & GMAC_DEBUG_TWCSTS)
x->mmtl_fifo_ctrl++;
if (value & GMAC_DEBUG_TRCSTS_MASK) {
- u32 trcsts = (value & GMAC_DEBUG_TRCSTS_MASK)
- >> GMAC_DEBUG_TRCSTS_SHIFT;
+ u32 trcsts = FIELD_GET(GMAC_DEBUG_TRCSTS_MASK, value);
+
if (trcsts == GMAC_DEBUG_TRCSTS_WRITE)
x->mtl_tx_fifo_read_ctrl_write++;
else if (trcsts == GMAC_DEBUG_TRCSTS_TXW)
@@ -392,8 +393,7 @@ static void dwmac1000_debug(struct stmmac_priv *priv, void __iomem *ioaddr,
if (value & GMAC_DEBUG_TXPAUSED)
x->mac_tx_in_pause++;
if (value & GMAC_DEBUG_TFCSTS_MASK) {
- u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK)
- >> GMAC_DEBUG_TFCSTS_SHIFT;
+ u32 tfcsts = FIELD_GET(GMAC_DEBUG_TFCSTS_MASK, value);
if (tfcsts == GMAC_DEBUG_TFCSTS_XFER)
x->mac_tx_frame_ctrl_xfer++;
@@ -407,8 +407,7 @@ static void dwmac1000_debug(struct stmmac_priv *priv, void __iomem *ioaddr,
if (value & GMAC_DEBUG_TPESTS)
x->mac_gmii_tx_proto_engine++;
if (value & GMAC_DEBUG_RXFSTS_MASK) {
- u32 rxfsts = (value & GMAC_DEBUG_RXFSTS_MASK)
- >> GMAC_DEBUG_RRCSTS_SHIFT;
+ u32 rxfsts = FIELD_GET(GMAC_DEBUG_RXFSTS_MASK, value);
if (rxfsts == GMAC_DEBUG_RXFSTS_FULL)
x->mtl_rx_fifo_fill_level_full++;
@@ -420,8 +419,7 @@ static void dwmac1000_debug(struct stmmac_priv *priv, void __iomem *ioaddr,
x->mtl_rx_fifo_fill_level_empty++;
}
if (value & GMAC_DEBUG_RRCSTS_MASK) {
- u32 rrcsts = (value & GMAC_DEBUG_RRCSTS_MASK) >>
- GMAC_DEBUG_RRCSTS_SHIFT;
+ u32 rrcsts = FIELD_GET(GMAC_DEBUG_RRCSTS_MASK, value);
if (rrcsts == GMAC_DEBUG_RRCSTS_FLUSH)
x->mtl_rx_fifo_read_ctrl_flush++;
@@ -435,8 +433,8 @@ static void dwmac1000_debug(struct stmmac_priv *priv, void __iomem *ioaddr,
if (value & GMAC_DEBUG_RWCSTS)
x->mtl_rx_fifo_ctrl_active++;
if (value & GMAC_DEBUG_RFCFCSTS_MASK)
- x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK)
- >> GMAC_DEBUG_RFCFCSTS_SHIFT;
+ x->mac_rx_frame_ctrl_fifo = FIELD_GET(GMAC_DEBUG_RFCFCSTS_MASK,
+ value);
if (value & GMAC_DEBUG_RPESTS)
x->mac_gmii_rx_proto_engine++;
}
@@ -534,7 +532,7 @@ void dwmac1000_timestamp_interrupt(struct stmmac_priv *priv)
if (!(priv->plat->flags & STMMAC_FLAG_EXT_SNAPSHOT_EN))
return;
- num_snapshot = (ts_status & GMAC3_X_ATSNS) >> GMAC3_X_ATSNS_SHIFT;
+ num_snapshot = FIELD_GET(GMAC3_X_ATSNS, ts_status);
for (i = 0; i < num_snapshot; i++) {
read_lock_irqsave(&priv->ptp_lock, flags);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 5877fec9f6c3..3ac7a7949529 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -28,13 +28,10 @@ static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
if (axi->axi_xit_frm)
value |= DMA_AXI_LPI_XIT_FRM;
- value &= ~DMA_AXI_WR_OSR_LMT;
- value |= (axi->axi_wr_osr_lmt & DMA_AXI_WR_OSR_LMT_MASK) <<
- DMA_AXI_WR_OSR_LMT_SHIFT;
-
- value &= ~DMA_AXI_RD_OSR_LMT;
- value |= (axi->axi_rd_osr_lmt & DMA_AXI_RD_OSR_LMT_MASK) <<
- DMA_AXI_RD_OSR_LMT_SHIFT;
+ value = u32_replace_bits(value, axi->axi_wr_osr_lmt,
+ DMA_AXI_WR_OSR_LMT);
+ value = u32_replace_bits(value, axi->axi_rd_osr_lmt,
+ DMA_AXI_RD_OSR_LMT);
/* Depending on the UNDEF bit the Master AXI will perform any burst
* length according to the BLEN programmed (by default all BLEN are
@@ -64,9 +61,8 @@ static void dwmac1000_dma_init_channel(struct stmmac_priv *priv,
if (dma_cfg->pblx8)
value |= DMA_BUS_MODE_MAXPBL;
value |= DMA_BUS_MODE_USP;
- value &= ~(DMA_BUS_MODE_PBL_MASK | DMA_BUS_MODE_RPBL_MASK);
- value |= (txpbl << DMA_BUS_MODE_PBL_SHIFT);
- value |= (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
+ value = u32_replace_bits(value, txpbl, DMA_BUS_MODE_PBL_MASK);
+ value = u32_replace_bits(value, rxpbl, DMA_BUS_MODE_RPBL_MASK);
/* Set the Fixed burst mode */
if (dma_cfg->fixed_burst)
@@ -243,6 +239,8 @@ static int dwmac1000_get_hw_feature(void __iomem *ioaddr,
/* Alternate (enhanced) DESC mode */
dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
+ dma_cap->actphyif = FIELD_GET(DMA_HW_FEAT_ACTPHYIF, hw_cap);
+
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 14e847c0e1a9..db4fbe64a38a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -53,7 +53,7 @@ static int dwmac100_rx_ipc_enable(struct mac_device_info *hw)
return 0;
}
-static int dwmac100_irq_status(struct mac_device_info *hw,
+static int dwmac100_irq_status(struct stmmac_priv *priv,
struct stmmac_extra_stats *x)
{
return 0;
@@ -108,7 +108,7 @@ static void dwmac100_set_filter(struct mac_device_info *hw,
memset(mc_filter, 0, sizeof(mc_filter));
netdev_for_each_mc_addr(ha, dev) {
/* The upper 6 bits of the calculated CRC are used to
- * index the contens of the hash table
+ * index the contents of the hash table
*/
int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
/* The most significant bit determines the register to
@@ -132,7 +132,7 @@ static void dwmac100_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
unsigned int flow = MAC_FLOW_CTRL_ENABLE;
if (duplex)
- flow |= (pause_time << MAC_FLOW_CTRL_PT_SHIFT);
+ flow |= FIELD_PREP(MAC_FLOW_CTRL_PT_MASK, pause_time);
writel(flow, ioaddr + MAC_FLOW_CTRL);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index 82957db47c99..12b2bf2d739a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -22,7 +22,8 @@ static void dwmac100_dma_init(void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg)
{
/* Enable Application Access by writing to DMA CSR0 */
- writel(DMA_BUS_MODE_DEFAULT | (dma_cfg->pbl << DMA_BUS_MODE_PBL_SHIFT),
+ writel(DMA_BUS_MODE_DEFAULT |
+ FIELD_PREP(DMA_BUS_MODE_PBL_MASK, dma_cfg->pbl),
ioaddr + DMA_BUS_MODE);
/* Mask interrupts by writing to CSR7 */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index 3cb733781e1e..d797d936aee1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -95,7 +95,7 @@
/* MAC Flow Control TX */
#define GMAC_TX_FLOW_CTRL_TFE BIT(1)
-#define GMAC_TX_FLOW_CTRL_PT_SHIFT 16
+#define GMAC_TX_FLOW_CTRL_PT_MASK GENMASK(31, 16)
/* MAC Interrupt bitmap*/
#define GMAC_INT_RGSMIIS BIT(0)
@@ -142,23 +142,19 @@ enum power_event {
/* MAC Debug bitmap */
#define GMAC_DEBUG_TFCSTS_MASK GENMASK(18, 17)
-#define GMAC_DEBUG_TFCSTS_SHIFT 17
#define GMAC_DEBUG_TFCSTS_IDLE 0
#define GMAC_DEBUG_TFCSTS_WAIT 1
#define GMAC_DEBUG_TFCSTS_GEN_PAUSE 2
#define GMAC_DEBUG_TFCSTS_XFER 3
#define GMAC_DEBUG_TPESTS BIT(16)
#define GMAC_DEBUG_RFCFCSTS_MASK GENMASK(2, 1)
-#define GMAC_DEBUG_RFCFCSTS_SHIFT 1
#define GMAC_DEBUG_RPESTS BIT(0)
/* MAC config */
#define GMAC_CONFIG_ARPEN BIT(31)
#define GMAC_CONFIG_SARC GENMASK(30, 28)
-#define GMAC_CONFIG_SARC_SHIFT 28
#define GMAC_CONFIG_IPC BIT(27)
#define GMAC_CONFIG_IPG GENMASK(26, 24)
-#define GMAC_CONFIG_IPG_SHIFT 24
#define GMAC_CONFIG_2K BIT(22)
#define GMAC_CONFIG_ACS BIT(20)
#define GMAC_CONFIG_BE BIT(18)
@@ -166,7 +162,6 @@ enum power_event {
#define GMAC_CONFIG_JE BIT(16)
#define GMAC_CONFIG_PS BIT(15)
#define GMAC_CONFIG_FES BIT(14)
-#define GMAC_CONFIG_FES_SHIFT 14
#define GMAC_CONFIG_DM BIT(13)
#define GMAC_CONFIG_LM BIT(12)
#define GMAC_CONFIG_DCRS BIT(9)
@@ -175,11 +170,9 @@ enum power_event {
/* MAC extended config */
#define GMAC_CONFIG_EIPG GENMASK(29, 25)
-#define GMAC_CONFIG_EIPG_SHIFT 25
#define GMAC_CONFIG_EIPG_EN BIT(24)
#define GMAC_CONFIG_HDSMS GENMASK(22, 20)
-#define GMAC_CONFIG_HDSMS_SHIFT 20
-#define GMAC_CONFIG_HDSMS_256 (0x2 << GMAC_CONFIG_HDSMS_SHIFT)
+#define GMAC_CONFIG_HDSMS_256 FIELD_PREP_CONST(GMAC_CONFIG_HDSMS, 0x2)
/* MAC HW features0 bitmap */
#define GMAC_HW_FEAT_SAVLANINS BIT(27)
@@ -242,7 +235,6 @@ enum power_event {
/* MAC HW ADDR regs */
#define GMAC_HI_DCS GENMASK(18, 16)
-#define GMAC_HI_DCS_SHIFT 16
#define GMAC_HI_REG_AE BIT(31)
/* L3/L4 Filters regs */
@@ -257,7 +249,6 @@ enum power_event {
#define GMAC_L3SAM0 BIT(2)
#define GMAC_L3PEN0 BIT(0)
#define GMAC_L4DP0 GENMASK(31, 16)
-#define GMAC_L4DP0_SHIFT 16
#define GMAC_L4SP0 GENMASK(15, 0)
/* MAC Timestamp Status */
@@ -314,39 +305,32 @@ static inline u32 mtl_chanx_base_addr(const struct dwmac4_addrs *addrs,
#define MTL_OP_MODE_TSF BIT(1)
#define MTL_OP_MODE_TQS_MASK GENMASK(24, 16)
-#define MTL_OP_MODE_TQS_SHIFT 16
-#define MTL_OP_MODE_TTC_MASK 0x70
-#define MTL_OP_MODE_TTC_SHIFT 4
-
-#define MTL_OP_MODE_TTC_32 0
-#define MTL_OP_MODE_TTC_64 (1 << MTL_OP_MODE_TTC_SHIFT)
-#define MTL_OP_MODE_TTC_96 (2 << MTL_OP_MODE_TTC_SHIFT)
-#define MTL_OP_MODE_TTC_128 (3 << MTL_OP_MODE_TTC_SHIFT)
-#define MTL_OP_MODE_TTC_192 (4 << MTL_OP_MODE_TTC_SHIFT)
-#define MTL_OP_MODE_TTC_256 (5 << MTL_OP_MODE_TTC_SHIFT)
-#define MTL_OP_MODE_TTC_384 (6 << MTL_OP_MODE_TTC_SHIFT)
-#define MTL_OP_MODE_TTC_512 (7 << MTL_OP_MODE_TTC_SHIFT)
+#define MTL_OP_MODE_TTC_MASK GENMASK(6, 4)
+#define MTL_OP_MODE_TTC_32 FIELD_PREP(MTL_OP_MODE_TTC_MASK, 0)
+#define MTL_OP_MODE_TTC_64 FIELD_PREP(MTL_OP_MODE_TTC_MASK, 1)
+#define MTL_OP_MODE_TTC_96 FIELD_PREP(MTL_OP_MODE_TTC_MASK, 2)
+#define MTL_OP_MODE_TTC_128 FIELD_PREP(MTL_OP_MODE_TTC_MASK, 3)
+#define MTL_OP_MODE_TTC_192 FIELD_PREP(MTL_OP_MODE_TTC_MASK, 4)
+#define MTL_OP_MODE_TTC_256 FIELD_PREP(MTL_OP_MODE_TTC_MASK, 5)
+#define MTL_OP_MODE_TTC_384 FIELD_PREP(MTL_OP_MODE_TTC_MASK, 6)
+#define MTL_OP_MODE_TTC_512 FIELD_PREP(MTL_OP_MODE_TTC_MASK, 7)
#define MTL_OP_MODE_RQS_MASK GENMASK(29, 20)
-#define MTL_OP_MODE_RQS_SHIFT 20
#define MTL_OP_MODE_RFD_MASK GENMASK(19, 14)
-#define MTL_OP_MODE_RFD_SHIFT 14
#define MTL_OP_MODE_RFA_MASK GENMASK(13, 8)
-#define MTL_OP_MODE_RFA_SHIFT 8
#define MTL_OP_MODE_EHFC BIT(7)
#define MTL_OP_MODE_DIS_TCP_EF BIT(6)
#define MTL_OP_MODE_RTC_MASK GENMASK(1, 0)
-#define MTL_OP_MODE_RTC_SHIFT 0
-#define MTL_OP_MODE_RTC_32 (1 << MTL_OP_MODE_RTC_SHIFT)
-#define MTL_OP_MODE_RTC_64 0
-#define MTL_OP_MODE_RTC_96 (2 << MTL_OP_MODE_RTC_SHIFT)
-#define MTL_OP_MODE_RTC_128 (3 << MTL_OP_MODE_RTC_SHIFT)
+#define MTL_OP_MODE_RTC_32 FIELD_PREP(MTL_OP_MODE_RTC_MASK, 1)
+#define MTL_OP_MODE_RTC_64 FIELD_PREP(MTL_OP_MODE_RTC_MASK, 0)
+#define MTL_OP_MODE_RTC_96 FIELD_PREP(MTL_OP_MODE_RTC_MASK, 2)
+#define MTL_OP_MODE_RTC_128 FIELD_PREP(MTL_OP_MODE_RTC_MASK, 3)
/* MTL ETS Control register */
#define MTL_ETS_CTRL_BASE_ADDR 0x00000d10
@@ -451,7 +435,6 @@ static inline u32 mtl_low_credx_base_addr(const struct dwmac4_addrs *addrs,
/* MTL debug: Tx FIFO Read Controller Status */
#define MTL_DEBUG_TRCSTS_MASK GENMASK(2, 1)
-#define MTL_DEBUG_TRCSTS_SHIFT 1
#define MTL_DEBUG_TRCSTS_IDLE 0
#define MTL_DEBUG_TRCSTS_READ 1
#define MTL_DEBUG_TRCSTS_TXW 2
@@ -460,13 +443,11 @@ static inline u32 mtl_low_credx_base_addr(const struct dwmac4_addrs *addrs,
/* MAC debug: GMII or MII Transmit Protocol Engine Status */
#define MTL_DEBUG_RXFSTS_MASK GENMASK(5, 4)
-#define MTL_DEBUG_RXFSTS_SHIFT 4
#define MTL_DEBUG_RXFSTS_EMPTY 0
#define MTL_DEBUG_RXFSTS_BT 1
#define MTL_DEBUG_RXFSTS_AT 2
#define MTL_DEBUG_RXFSTS_FULL 3
#define MTL_DEBUG_RRCSTS_MASK GENMASK(2, 1)
-#define MTL_DEBUG_RRCSTS_SHIFT 1
#define MTL_DEBUG_RRCSTS_IDLE 0
#define MTL_DEBUG_RRCSTS_RDATA 1
#define MTL_DEBUG_RRCSTS_RSTAT 2
@@ -485,42 +466,12 @@ static inline u32 mtl_low_credx_base_addr(const struct dwmac4_addrs *addrs,
/* To dump the core regs excluding the Address Registers */
#define GMAC_REG_NUM 132
-/* MTL debug */
-#define MTL_DEBUG_TXSTSFSTS BIT(5)
-#define MTL_DEBUG_TXFSTS BIT(4)
-#define MTL_DEBUG_TWCSTS BIT(3)
-
-/* MTL debug: Tx FIFO Read Controller Status */
-#define MTL_DEBUG_TRCSTS_MASK GENMASK(2, 1)
-#define MTL_DEBUG_TRCSTS_SHIFT 1
-#define MTL_DEBUG_TRCSTS_IDLE 0
-#define MTL_DEBUG_TRCSTS_READ 1
-#define MTL_DEBUG_TRCSTS_TXW 2
-#define MTL_DEBUG_TRCSTS_WRITE 3
-#define MTL_DEBUG_TXPAUSED BIT(0)
-
-/* MAC debug: GMII or MII Transmit Protocol Engine Status */
-#define MTL_DEBUG_RXFSTS_MASK GENMASK(5, 4)
-#define MTL_DEBUG_RXFSTS_SHIFT 4
-#define MTL_DEBUG_RXFSTS_EMPTY 0
-#define MTL_DEBUG_RXFSTS_BT 1
-#define MTL_DEBUG_RXFSTS_AT 2
-#define MTL_DEBUG_RXFSTS_FULL 3
-#define MTL_DEBUG_RRCSTS_MASK GENMASK(2, 1)
-#define MTL_DEBUG_RRCSTS_SHIFT 1
-#define MTL_DEBUG_RRCSTS_IDLE 0
-#define MTL_DEBUG_RRCSTS_RDATA 1
-#define MTL_DEBUG_RRCSTS_RSTAT 2
-#define MTL_DEBUG_RRCSTS_FLUSH 3
-#define MTL_DEBUG_RWCSTS BIT(0)
-
/* SGMII/RGMII status register */
#define GMAC_PHYIF_CTRLSTATUS_TC BIT(0)
#define GMAC_PHYIF_CTRLSTATUS_LUD BIT(1)
#define GMAC_PHYIF_CTRLSTATUS_SMIDRXS BIT(4)
#define GMAC_PHYIF_CTRLSTATUS_LNKMOD BIT(16)
#define GMAC_PHYIF_CTRLSTATUS_SPEED GENMASK(18, 17)
-#define GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT 17
#define GMAC_PHYIF_CTRLSTATUS_LNKSTS BIT(19)
#define GMAC_PHYIF_CTRLSTATUS_JABTO BIT(20)
#define GMAC_PHYIF_CTRLSTATUS_FALSECARDET BIT(21)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index a4282fd7c3c7..623868afe93d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -572,8 +572,8 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
flow = GMAC_TX_FLOW_CTRL_TFE;
if (duplex)
- flow |=
- (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
+ flow |= FIELD_PREP(GMAC_TX_FLOW_CTRL_PT_MASK,
+ pause_time);
writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
}
@@ -615,10 +615,10 @@ static int dwmac4_irq_mtl_status(struct stmmac_priv *priv,
return ret;
}
-static int dwmac4_irq_status(struct mac_device_info *hw,
+static int dwmac4_irq_status(struct stmmac_priv *priv,
struct stmmac_extra_stats *x)
{
- void __iomem *ioaddr = hw->pcsr;
+ void __iomem *ioaddr = priv->hw->pcsr;
u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
u32 intr_enable = readl(ioaddr + GMAC_INT_EN);
int ret = 0;
@@ -658,7 +658,8 @@ static int dwmac4_irq_status(struct mac_device_info *hw,
x->irq_rx_path_exit_lpi_mode_n++;
}
- dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
+ if (intr_status & (PCS_ANE_IRQ | PCS_LINK_IRQ))
+ stmmac_integrated_pcs_irq(priv, intr_status, x);
return ret;
}
@@ -681,8 +682,8 @@ static void dwmac4_debug(struct stmmac_priv *priv, void __iomem *ioaddr,
if (value & MTL_DEBUG_TWCSTS)
x->mmtl_fifo_ctrl++;
if (value & MTL_DEBUG_TRCSTS_MASK) {
- u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
- >> MTL_DEBUG_TRCSTS_SHIFT;
+ u32 trcsts = FIELD_GET(MTL_DEBUG_TRCSTS_MASK, value);
+
if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
x->mtl_tx_fifo_read_ctrl_write++;
else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
@@ -700,8 +701,7 @@ static void dwmac4_debug(struct stmmac_priv *priv, void __iomem *ioaddr,
value = readl(ioaddr + MTL_CHAN_RX_DEBUG(dwmac4_addrs, queue));
if (value & MTL_DEBUG_RXFSTS_MASK) {
- u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
- >> MTL_DEBUG_RRCSTS_SHIFT;
+ u32 rxfsts = FIELD_GET(MTL_DEBUG_RXFSTS_MASK, value);
if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
x->mtl_rx_fifo_fill_level_full++;
@@ -713,8 +713,7 @@ static void dwmac4_debug(struct stmmac_priv *priv, void __iomem *ioaddr,
x->mtl_rx_fifo_fill_level_empty++;
}
if (value & MTL_DEBUG_RRCSTS_MASK) {
- u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
- MTL_DEBUG_RRCSTS_SHIFT;
+ u32 rrcsts = FIELD_GET(MTL_DEBUG_RRCSTS_MASK, value);
if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
x->mtl_rx_fifo_read_ctrl_flush++;
@@ -733,8 +732,7 @@ static void dwmac4_debug(struct stmmac_priv *priv, void __iomem *ioaddr,
value = readl(ioaddr + GMAC_DEBUG);
if (value & GMAC_DEBUG_TFCSTS_MASK) {
- u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK)
- >> GMAC_DEBUG_TFCSTS_SHIFT;
+ u32 tfcsts = FIELD_GET(GMAC_DEBUG_TFCSTS_MASK, value);
if (tfcsts == GMAC_DEBUG_TFCSTS_XFER)
x->mac_tx_frame_ctrl_xfer++;
@@ -748,8 +746,8 @@ static void dwmac4_debug(struct stmmac_priv *priv, void __iomem *ioaddr,
if (value & GMAC_DEBUG_TPESTS)
x->mac_gmii_tx_proto_engine++;
if (value & GMAC_DEBUG_RFCFCSTS_MASK)
- x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK)
- >> GMAC_DEBUG_RFCFCSTS_SHIFT;
+ x->mac_rx_frame_ctrl_fifo = FIELD_GET(GMAC_DEBUG_RFCFCSTS_MASK,
+ value);
if (value & GMAC_DEBUG_RPESTS)
x->mac_gmii_rx_proto_engine++;
}
@@ -770,8 +768,7 @@ static void dwmac4_sarc_configure(void __iomem *ioaddr, int val)
{
u32 value = readl(ioaddr + GMAC_CONFIG);
- value &= ~GMAC_CONFIG_SARC;
- value |= val << GMAC_CONFIG_SARC_SHIFT;
+ value = u32_replace_bits(value, val, GMAC_CONFIG_SARC);
writel(value, ioaddr + GMAC_CONFIG);
}
@@ -879,9 +876,9 @@ static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
if (sa) {
- value = match & GMAC_L4SP0;
+ value = FIELD_PREP(GMAC_L4SP0, match);
} else {
- value = (match << GMAC_L4DP0_SHIFT) & GMAC_L4DP0;
+ value = FIELD_PREP(GMAC_L4DP0, match);
}
writel(value, ioaddr + GMAC_L4_ADDR(filter_no));
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index aac68dc28dc1..e226dc6a1b17 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -17,11 +17,9 @@ static int dwmac4_wrback_get_tx_status(struct stmmac_extra_stats *x,
struct dma_desc *p,
void __iomem *ioaddr)
{
- unsigned int tdes3;
+ u32 tdes3 = le32_to_cpu(p->des3);
int ret = tx_done;
- tdes3 = le32_to_cpu(p->des3);
-
/* Get tx owner first */
if (unlikely(tdes3 & TDES3_OWN))
return tx_dma_own;
@@ -46,8 +44,7 @@ static int dwmac4_wrback_get_tx_status(struct stmmac_extra_stats *x,
if (unlikely((tdes3 & TDES3_LATE_COLLISION) ||
(tdes3 & TDES3_EXCESSIVE_COLLISION)))
x->tx_collision +=
- (tdes3 & TDES3_COLLISION_COUNT_MASK)
- >> TDES3_COLLISION_COUNT_SHIFT;
+ FIELD_GET(TDES3_COLLISION_COUNT_MASK, tdes3);
if (unlikely(tdes3 & TDES3_EXCESSIVE_DEFERRAL))
x->tx_deferred++;
@@ -73,9 +70,9 @@ static int dwmac4_wrback_get_tx_status(struct stmmac_extra_stats *x,
static int dwmac4_wrback_get_rx_status(struct stmmac_extra_stats *x,
struct dma_desc *p)
{
- unsigned int rdes1 = le32_to_cpu(p->des1);
- unsigned int rdes2 = le32_to_cpu(p->des2);
- unsigned int rdes3 = le32_to_cpu(p->des3);
+ u32 rdes1 = le32_to_cpu(p->des1);
+ u32 rdes2 = le32_to_cpu(p->des2);
+ u32 rdes3 = le32_to_cpu(p->des3);
int message_type;
int ret = good_frame;
@@ -108,7 +105,7 @@ static int dwmac4_wrback_get_rx_status(struct stmmac_extra_stats *x,
ret = discard_frame;
}
- message_type = (rdes1 & ERDES4_MSG_TYPE_MASK) >> 8;
+ message_type = FIELD_GET(RDES1_PTP_MSG_TYPE_MASK, rdes1);
if (rdes1 & RDES1_IP_HDR_ERROR) {
x->ip_hdr_err++;
@@ -168,8 +165,7 @@ static int dwmac4_wrback_get_rx_status(struct stmmac_extra_stats *x,
x->l3_filter_match++;
if (rdes2 & RDES2_L4_FILTER_MATCH)
x->l4_filter_match++;
- if ((rdes2 & RDES2_L3_L4_FILT_NB_MATCH_MASK)
- >> RDES2_L3_L4_FILT_NB_MATCH_SHIFT)
+ if (rdes2 & RDES2_L3_L4_FILT_NB_MATCH_MASK)
x->l3_l4_filter_no_match++;
return ret;
@@ -255,15 +251,14 @@ static inline void dwmac4_get_timestamp(void *desc, u32 ats, u64 *ts)
static int dwmac4_rx_check_timestamp(void *desc)
{
struct dma_desc *p = (struct dma_desc *)desc;
- unsigned int rdes0 = le32_to_cpu(p->des0);
- unsigned int rdes1 = le32_to_cpu(p->des1);
- unsigned int rdes3 = le32_to_cpu(p->des3);
- u32 own, ctxt;
+ u32 rdes0 = le32_to_cpu(p->des0);
+ u32 rdes1 = le32_to_cpu(p->des1);
+ u32 rdes3 = le32_to_cpu(p->des3);
+ bool own, ctxt;
int ret = 1;
own = rdes3 & RDES3_OWN;
- ctxt = ((rdes3 & RDES3_CONTEXT_DESCRIPTOR)
- >> RDES3_CONTEXT_DESCRIPTOR_SHIFT);
+ ctxt = rdes3 & RDES3_CONTEXT_DESCRIPTOR;
if (likely(!own && ctxt)) {
if ((rdes0 == 0xffffffff) && (rdes1 == 0xffffffff))
@@ -327,7 +322,7 @@ static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
bool csum_flag, int mode, bool tx_own,
bool ls, unsigned int tot_pkt_len)
{
- unsigned int tdes3 = le32_to_cpu(p->des3);
+ u32 tdes3 = le32_to_cpu(p->des3);
p->des2 |= cpu_to_le32(len & TDES2_BUFFER1_SIZE_MASK);
@@ -337,10 +332,8 @@ static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
else
tdes3 &= ~TDES3_FIRST_DESCRIPTOR;
- if (likely(csum_flag))
- tdes3 |= (TX_CIC_FULL << TDES3_CHECKSUM_INSERTION_SHIFT);
- else
- tdes3 &= ~(TX_CIC_FULL << TDES3_CHECKSUM_INSERTION_SHIFT);
+ tdes3 = u32_replace_bits(tdes3, csum_flag ? TX_CIC_FULL : 0,
+ TDES3_CHECKSUM_INSERTION_MASK);
if (ls)
tdes3 |= TDES3_LAST_DESCRIPTOR;
@@ -366,21 +359,21 @@ static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
bool ls, unsigned int tcphdrlen,
unsigned int tcppayloadlen)
{
- unsigned int tdes3 = le32_to_cpu(p->des3);
+ u32 tdes3 = le32_to_cpu(p->des3);
if (len1)
- p->des2 |= cpu_to_le32((len1 & TDES2_BUFFER1_SIZE_MASK));
+ p->des2 |= cpu_to_le32(FIELD_PREP(TDES2_BUFFER1_SIZE_MASK,
+ len1));
if (len2)
- p->des2 |= cpu_to_le32((len2 << TDES2_BUFFER2_SIZE_MASK_SHIFT)
- & TDES2_BUFFER2_SIZE_MASK);
+ p->des2 |= cpu_to_le32(FIELD_PREP(TDES2_BUFFER2_SIZE_MASK,
+ len2));
if (is_fs) {
tdes3 |= TDES3_FIRST_DESCRIPTOR |
TDES3_TCP_SEGMENTATION_ENABLE |
- ((tcphdrlen << TDES3_HDR_LEN_SHIFT) &
- TDES3_SLOT_NUMBER_MASK) |
- ((tcppayloadlen & TDES3_TCP_PKT_PAYLOAD_MASK));
+ FIELD_PREP(TDES3_SLOT_NUMBER_MASK, tcphdrlen) |
+ FIELD_PREP(TDES3_TCP_PKT_PAYLOAD_MASK, tcppayloadlen);
} else {
tdes3 &= ~TDES3_FIRST_DESCRIPTOR;
}
@@ -491,9 +484,8 @@ static void dwmac4_clear(struct dma_desc *p)
static void dwmac4_set_sarc(struct dma_desc *p, u32 sarc_type)
{
- sarc_type <<= TDES3_SA_INSERT_CTRL_SHIFT;
-
- p->des3 |= cpu_to_le32(sarc_type & TDES3_SA_INSERT_CTRL_MASK);
+ p->des3 |= cpu_to_le32(FIELD_PREP(TDES3_SA_INSERT_CTRL_MASK,
+ sarc_type));
}
static int set_16kib_bfsize(int mtu)
@@ -515,14 +507,9 @@ static void dwmac4_set_vlan_tag(struct dma_desc *p, u16 tag, u16 inner_tag,
/* Inner VLAN */
if (inner_type) {
- u32 des = inner_tag << TDES2_IVT_SHIFT;
-
- des &= TDES2_IVT_MASK;
- p->des2 = cpu_to_le32(des);
-
- des = inner_type << TDES3_IVTIR_SHIFT;
- des &= TDES3_IVTIR_MASK;
- p->des3 = cpu_to_le32(des | TDES3_IVLTV);
+ p->des2 = cpu_to_le32(FIELD_PREP(TDES2_IVT_MASK, inner_tag));
+ p->des3 = cpu_to_le32(FIELD_PREP(TDES3_IVTIR_MASK, inner_type) |
+ TDES3_IVLTV);
}
/* Outer VLAN */
@@ -534,8 +521,7 @@ static void dwmac4_set_vlan_tag(struct dma_desc *p, u16 tag, u16 inner_tag,
static void dwmac4_set_vlan(struct dma_desc *p, u32 type)
{
- type <<= TDES2_VLAN_TAG_SHIFT;
- p->des2 |= cpu_to_le32(type & TDES2_VLAN_TAG_MASK);
+ p->des2 |= cpu_to_le32(FIELD_PREP(TDES2_VLAN_TAG_MASK, type));
}
static void dwmac4_get_rx_header_len(struct dma_desc *p, unsigned int *len)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
index 806555976496..fb1fea5b0e6e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
@@ -18,15 +18,11 @@
/* TDES2 (read format) */
#define TDES2_BUFFER1_SIZE_MASK GENMASK(13, 0)
#define TDES2_VLAN_TAG_MASK GENMASK(15, 14)
-#define TDES2_VLAN_TAG_SHIFT 14
#define TDES2_BUFFER2_SIZE_MASK GENMASK(29, 16)
-#define TDES2_BUFFER2_SIZE_MASK_SHIFT 16
#define TDES3_IVTIR_MASK GENMASK(19, 18)
-#define TDES3_IVTIR_SHIFT 18
#define TDES3_IVLTV BIT(17)
#define TDES2_TIMESTAMP_ENABLE BIT(30)
#define TDES2_IVT_MASK GENMASK(31, 16)
-#define TDES2_IVT_SHIFT 16
#define TDES2_INTERRUPT_ON_COMPLETION BIT(31)
/* TDES3 (read format) */
@@ -34,13 +30,10 @@
#define TDES3_VLAN_TAG GENMASK(15, 0)
#define TDES3_VLTV BIT(16)
#define TDES3_CHECKSUM_INSERTION_MASK GENMASK(17, 16)
-#define TDES3_CHECKSUM_INSERTION_SHIFT 16
#define TDES3_TCP_PKT_PAYLOAD_MASK GENMASK(17, 0)
#define TDES3_TCP_SEGMENTATION_ENABLE BIT(18)
-#define TDES3_HDR_LEN_SHIFT 19
#define TDES3_SLOT_NUMBER_MASK GENMASK(22, 19)
#define TDES3_SA_INSERT_CTRL_MASK GENMASK(25, 23)
-#define TDES3_SA_INSERT_CTRL_SHIFT 23
#define TDES3_CRC_PAD_CTRL_MASK GENMASK(27, 26)
/* TDES3 (write back format) */
@@ -49,7 +42,6 @@
#define TDES3_UNDERFLOW_ERROR BIT(2)
#define TDES3_EXCESSIVE_DEFERRAL BIT(3)
#define TDES3_COLLISION_COUNT_MASK GENMASK(7, 4)
-#define TDES3_COLLISION_COUNT_SHIFT 4
#define TDES3_EXCESSIVE_COLLISION BIT(8)
#define TDES3_LATE_COLLISION BIT(9)
#define TDES3_NO_CARRIER BIT(10)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index 7b513324cfb0..60b880cdd9da 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -27,13 +27,10 @@ static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
if (axi->axi_xit_frm)
value |= DMA_AXI_LPI_XIT_FRM;
- value &= ~DMA_AXI_WR_OSR_LMT;
- value |= (axi->axi_wr_osr_lmt & DMA_AXI_OSR_MAX) <<
- DMA_AXI_WR_OSR_LMT_SHIFT;
-
- value &= ~DMA_AXI_RD_OSR_LMT;
- value |= (axi->axi_rd_osr_lmt & DMA_AXI_OSR_MAX) <<
- DMA_AXI_RD_OSR_LMT_SHIFT;
+ value = u32_replace_bits(value, axi->axi_wr_osr_lmt,
+ DMA_AXI_WR_OSR_LMT);
+ value = u32_replace_bits(value, axi->axi_rd_osr_lmt,
+ DMA_AXI_RD_OSR_LMT);
/* Depending on the UNDEF bit the Master AXI will perform any burst
* length according to the BLEN programmed (by default all BLEN are
@@ -55,7 +52,7 @@ static void dwmac4_dma_init_rx_chan(struct stmmac_priv *priv,
u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
value = readl(ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, chan));
- value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
+ value = value | FIELD_PREP(DMA_CHAN_RX_CTRL_RXPBL_MASK, rxpbl);
writel(value, ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, chan));
if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && likely(dma_cfg->eame))
@@ -76,7 +73,7 @@ static void dwmac4_dma_init_tx_chan(struct stmmac_priv *priv,
u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
value = readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan));
- value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT);
+ value = value | FIELD_PREP(DMA_CHAN_TX_CTRL_TXPBL_MASK, txpbl);
/* Enable OSP to get best performance */
value |= DMA_CONTROL_OSP;
@@ -101,7 +98,7 @@ static void dwmac4_dma_init_channel(struct stmmac_priv *priv,
/* common channel control register config */
value = readl(ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan));
if (dma_cfg->pblx8)
- value = value | DMA_BUS_MODE_PBL;
+ value = value | DMA_CHAN_CTRL_PBLX8;
writel(value, ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan));
/* Mask interrupts by writing to CSR7 */
@@ -119,7 +116,7 @@ static void dwmac410_dma_init_channel(struct stmmac_priv *priv,
/* common channel control register config */
value = readl(ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan));
if (dma_cfg->pblx8)
- value = value | DMA_BUS_MODE_PBL;
+ value = value | DMA_CHAN_CTRL_PBLX8;
writel(value, ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan));
@@ -151,10 +148,9 @@ static void dwmac4_dma_init(void __iomem *ioaddr,
value = readl(ioaddr + DMA_BUS_MODE);
- if (dma_cfg->multi_msi_en) {
- value &= ~DMA_BUS_MODE_INTM_MASK;
- value |= (DMA_BUS_MODE_INTM_MODE1 << DMA_BUS_MODE_INTM_SHIFT);
- }
+ if (dma_cfg->multi_msi_en)
+ value = u32_replace_bits(value, DMA_BUS_MODE_INTM_MODE1,
+ DMA_BUS_MODE_INTM_MASK);
if (dma_cfg->dche)
value |= DMA_BUS_MODE_DCHE;
@@ -264,7 +260,7 @@ static void dwmac4_dma_rx_chan_op_mode(struct stmmac_priv *priv,
}
mtl_rx_op &= ~MTL_OP_MODE_RQS_MASK;
- mtl_rx_op |= rqs << MTL_OP_MODE_RQS_SHIFT;
+ mtl_rx_op |= FIELD_PREP(MTL_OP_MODE_RQS_MASK, rqs);
/* Enable flow control only if each channel gets 4 KiB or more FIFO and
* only if channel is not an AVB channel.
@@ -295,11 +291,10 @@ static void dwmac4_dma_rx_chan_op_mode(struct stmmac_priv *priv,
break;
}
- mtl_rx_op &= ~MTL_OP_MODE_RFD_MASK;
- mtl_rx_op |= rfd << MTL_OP_MODE_RFD_SHIFT;
-
- mtl_rx_op &= ~MTL_OP_MODE_RFA_MASK;
- mtl_rx_op |= rfa << MTL_OP_MODE_RFA_SHIFT;
+ mtl_rx_op = u32_replace_bits(mtl_rx_op, rfd,
+ MTL_OP_MODE_RFD_MASK);
+ mtl_rx_op = u32_replace_bits(mtl_rx_op, rfa,
+ MTL_OP_MODE_RFA_MASK);
}
writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(dwmac4_addrs, channel));
@@ -354,8 +349,8 @@ static void dwmac4_dma_tx_chan_op_mode(struct stmmac_priv *priv,
mtl_tx_op |= MTL_OP_MODE_TXQEN;
else
mtl_tx_op |= MTL_OP_MODE_TXQEN_AV;
- mtl_tx_op &= ~MTL_OP_MODE_TQS_MASK;
- mtl_tx_op |= tqs << MTL_OP_MODE_TQS_SHIFT;
+
+ mtl_tx_op = u32_replace_bits(mtl_tx_op, tqs, MTL_OP_MODE_TQS_MASK);
writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(dwmac4_addrs, channel));
}
@@ -387,6 +382,8 @@ static int dwmac4_get_hw_feature(void __iomem *ioaddr,
dma_cap->vlins = (hw_cap & GMAC_HW_FEAT_SAVLANINS) >> 27;
dma_cap->arpoffsel = (hw_cap & GMAC_HW_FEAT_ARPOFFSEL) >> 9;
+ dma_cap->actphyif = FIELD_GET(DMA_HW_FEAT_ACTPHYIF, hw_cap);
+
/* MAC HW feature1 */
hw_cap = readl(ioaddr + GMAC_HW_FEATURE1);
dma_cap->l3l4fnum = (hw_cap & GMAC_HW_FEAT_L3L4FNUM) >> 27;
@@ -496,8 +493,7 @@ static void dwmac4_set_bfsize(struct stmmac_priv *priv, void __iomem *ioaddr,
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, chan));
- value &= ~DMA_RBSZ_MASK;
- value |= (bfsize << DMA_RBSZ_SHIFT) & DMA_RBSZ_MASK;
+ value = u32_replace_bits(value, bfsize, DMA_RBSZ_MASK);
writel(value, ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, chan));
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
index f27126f05551..9d9077a4ac9f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -16,69 +16,33 @@
#define DMA_CHANNEL_NB_MAX 1
#define DMA_BUS_MODE 0x00001000
-#define DMA_SYS_BUS_MODE 0x00001004
-#define DMA_STATUS 0x00001008
-#define DMA_DEBUG_STATUS_0 0x0000100c
-#define DMA_DEBUG_STATUS_1 0x00001010
-#define DMA_DEBUG_STATUS_2 0x00001014
-#define DMA_AXI_BUS_MODE 0x00001028
-#define DMA_TBS_CTRL 0x00001050
-/* DMA Bus Mode bitmap */
#define DMA_BUS_MODE_DCHE BIT(19)
#define DMA_BUS_MODE_INTM_MASK GENMASK(17, 16)
-#define DMA_BUS_MODE_INTM_SHIFT 16
#define DMA_BUS_MODE_INTM_MODE1 0x1
#define DMA_BUS_MODE_SFT_RESET BIT(0)
-/* DMA SYS Bus Mode bitmap */
-#define DMA_BUS_MODE_SPH BIT(24)
-#define DMA_BUS_MODE_PBL BIT(16)
-#define DMA_BUS_MODE_PBL_SHIFT 16
-#define DMA_BUS_MODE_RPBL_SHIFT 16
+#define DMA_SYS_BUS_MODE 0x00001004
+
#define DMA_BUS_MODE_MB BIT(14)
#define DMA_BUS_MODE_FB BIT(0)
-/* DMA Interrupt top status */
-#define DMA_STATUS_MAC BIT(17)
-#define DMA_STATUS_MTL BIT(16)
-#define DMA_STATUS_CHAN7 BIT(7)
-#define DMA_STATUS_CHAN6 BIT(6)
-#define DMA_STATUS_CHAN5 BIT(5)
-#define DMA_STATUS_CHAN4 BIT(4)
-#define DMA_STATUS_CHAN3 BIT(3)
-#define DMA_STATUS_CHAN2 BIT(2)
-#define DMA_STATUS_CHAN1 BIT(1)
-#define DMA_STATUS_CHAN0 BIT(0)
-
-/* DMA debug status bitmap */
-#define DMA_DEBUG_STATUS_TS_MASK 0xf
-#define DMA_DEBUG_STATUS_RS_MASK 0xf
-
-/* DMA AXI bitmap */
+#define DMA_STATUS 0x00001008
+
+#define DMA_AXI_BUS_MODE 0x00001028
+
#define DMA_AXI_EN_LPI BIT(31)
#define DMA_AXI_LPI_XIT_FRM BIT(30)
#define DMA_AXI_WR_OSR_LMT GENMASK(27, 24)
-#define DMA_AXI_WR_OSR_LMT_SHIFT 24
#define DMA_AXI_RD_OSR_LMT GENMASK(19, 16)
-#define DMA_AXI_RD_OSR_LMT_SHIFT 16
-
-#define DMA_AXI_OSR_MAX 0xf
-#define DMA_AXI_MAX_OSR_LIMIT ((DMA_AXI_OSR_MAX << DMA_AXI_WR_OSR_LMT_SHIFT) | \
- (DMA_AXI_OSR_MAX << DMA_AXI_RD_OSR_LMT_SHIFT))
#define DMA_SYS_BUS_MB BIT(14)
-#define DMA_AXI_1KBBE BIT(13)
#define DMA_SYS_BUS_AAL DMA_AXI_AAL
#define DMA_SYS_BUS_EAME BIT(11)
#define DMA_SYS_BUS_FB BIT(0)
-#define DMA_BURST_LEN_DEFAULT (DMA_AXI_BLEN256 | DMA_AXI_BLEN128 | \
- DMA_AXI_BLEN64 | DMA_AXI_BLEN32 | \
- DMA_AXI_BLEN16 | DMA_AXI_BLEN8 | \
- DMA_AXI_BLEN4)
+#define DMA_TBS_CTRL 0x00001050
-/* DMA TBS Control */
#define DMA_TBS_FTOS GENMASK(31, 8)
#define DMA_TBS_FTOV BIT(0)
#define DMA_TBS_DEF_FTOS (DMA_TBS_FTOS | DMA_TBS_FTOV)
@@ -100,11 +64,25 @@ static inline u32 dma_chanx_base_addr(const struct dwmac4_addrs *addrs,
return addr;
}
-#define DMA_CHAN_REG_NUMBER 17
-
#define DMA_CHAN_CONTROL(addrs, x) dma_chanx_base_addr(addrs, x)
+
+#define DMA_CHAN_CTRL_PBLX8 BIT(16)
+#define DMA_CONTROL_SPH BIT(24)
+
#define DMA_CHAN_TX_CONTROL(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x4)
+
+#define DMA_CONTROL_EDSE BIT(28)
+#define DMA_CHAN_TX_CTRL_TXPBL_MASK GENMASK(21, 16)
+#define DMA_CONTROL_TSE BIT(12)
+#define DMA_CONTROL_OSP BIT(4)
+#define DMA_CONTROL_ST BIT(0)
+
#define DMA_CHAN_RX_CONTROL(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x8)
+
+#define DMA_CHAN_RX_CTRL_RXPBL_MASK GENMASK(21, 16)
+#define DMA_RBSZ_MASK GENMASK(14, 1)
+#define DMA_CONTROL_SR BIT(0)
+
#define DMA_CHAN_TX_BASE_ADDR_HI(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x10)
#define DMA_CHAN_TX_BASE_ADDR(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x14)
#define DMA_CHAN_RX_BASE_ADDR_HI(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x18)
@@ -113,7 +91,41 @@ static inline u32 dma_chanx_base_addr(const struct dwmac4_addrs *addrs,
#define DMA_CHAN_RX_END_ADDR(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x28)
#define DMA_CHAN_TX_RING_LEN(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x2c)
#define DMA_CHAN_RX_RING_LEN(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x30)
+
#define DMA_CHAN_INTR_ENA(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x34)
+
+#define DMA_CHAN_INTR_ENA_NIE BIT(16)
+#define DMA_CHAN_INTR_ENA_AIE BIT(15)
+#define DMA_CHAN_INTR_ENA_NIE_4_10 BIT(15)
+#define DMA_CHAN_INTR_ENA_AIE_4_10 BIT(14)
+#define DMA_CHAN_INTR_ENA_FBE BIT(12)
+#define DMA_CHAN_INTR_ENA_RIE BIT(6)
+#define DMA_CHAN_INTR_ENA_TIE BIT(0)
+
+#define DMA_CHAN_INTR_NORMAL (DMA_CHAN_INTR_ENA_NIE | \
+ DMA_CHAN_INTR_ENA_RIE | \
+ DMA_CHAN_INTR_ENA_TIE)
+
+#define DMA_CHAN_INTR_ABNORMAL (DMA_CHAN_INTR_ENA_AIE | \
+ DMA_CHAN_INTR_ENA_FBE)
+/* DMA default interrupt mask for 4.00 */
+#define DMA_CHAN_INTR_DEFAULT_MASK (DMA_CHAN_INTR_NORMAL | \
+ DMA_CHAN_INTR_ABNORMAL)
+#define DMA_CHAN_INTR_DEFAULT_RX (DMA_CHAN_INTR_ENA_RIE)
+#define DMA_CHAN_INTR_DEFAULT_TX (DMA_CHAN_INTR_ENA_TIE)
+
+#define DMA_CHAN_INTR_NORMAL_4_10 (DMA_CHAN_INTR_ENA_NIE_4_10 | \
+ DMA_CHAN_INTR_ENA_RIE | \
+ DMA_CHAN_INTR_ENA_TIE)
+
+#define DMA_CHAN_INTR_ABNORMAL_4_10 (DMA_CHAN_INTR_ENA_AIE_4_10 | \
+ DMA_CHAN_INTR_ENA_FBE)
+/* DMA default interrupt mask for 4.10a */
+#define DMA_CHAN_INTR_DEFAULT_MASK_4_10 (DMA_CHAN_INTR_NORMAL_4_10 | \
+ DMA_CHAN_INTR_ABNORMAL_4_10)
+#define DMA_CHAN_INTR_DEFAULT_RX_4_10 (DMA_CHAN_INTR_ENA_RIE)
+#define DMA_CHAN_INTR_DEFAULT_TX_4_10 (DMA_CHAN_INTR_ENA_TIE)
+
#define DMA_CHAN_RX_WATCHDOG(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x38)
#define DMA_CHAN_SLOT_CTRL_STATUS(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x3c)
#define DMA_CHAN_CUR_TX_DESC(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x44)
@@ -124,26 +136,8 @@ static inline u32 dma_chanx_base_addr(const struct dwmac4_addrs *addrs,
#define DMA_CHAN_CUR_RX_BUF_ADDR(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x5c)
#define DMA_CHAN_STATUS(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x60)
-/* DMA Control X */
-#define DMA_CONTROL_SPH BIT(24)
-#define DMA_CONTROL_MSS_MASK GENMASK(13, 0)
-
-/* DMA Tx Channel X Control register defines */
-#define DMA_CONTROL_EDSE BIT(28)
-#define DMA_CONTROL_TSE BIT(12)
-#define DMA_CONTROL_OSP BIT(4)
-#define DMA_CONTROL_ST BIT(0)
-
-/* DMA Rx Channel X Control register defines */
-#define DMA_CONTROL_SR BIT(0)
-#define DMA_RBSZ_MASK GENMASK(14, 1)
-#define DMA_RBSZ_SHIFT 1
-
/* Interrupt status per channel */
#define DMA_CHAN_STATUS_REB GENMASK(21, 19)
-#define DMA_CHAN_STATUS_REB_SHIFT 19
-#define DMA_CHAN_STATUS_TEB GENMASK(18, 16)
-#define DMA_CHAN_STATUS_TEB_SHIFT 16
#define DMA_CHAN_STATUS_NIS BIT(15)
#define DMA_CHAN_STATUS_AIS BIT(14)
#define DMA_CHAN_STATUS_CDE BIT(13)
@@ -177,53 +171,6 @@ static inline u32 dma_chanx_base_addr(const struct dwmac4_addrs *addrs,
DMA_CHAN_STATUS_TI | \
DMA_CHAN_STATUS_MSK_COMMON)
-/* Interrupt enable bits per channel */
-#define DMA_CHAN_INTR_ENA_NIE BIT(16)
-#define DMA_CHAN_INTR_ENA_AIE BIT(15)
-#define DMA_CHAN_INTR_ENA_NIE_4_10 BIT(15)
-#define DMA_CHAN_INTR_ENA_AIE_4_10 BIT(14)
-#define DMA_CHAN_INTR_ENA_CDE BIT(13)
-#define DMA_CHAN_INTR_ENA_FBE BIT(12)
-#define DMA_CHAN_INTR_ENA_ERE BIT(11)
-#define DMA_CHAN_INTR_ENA_ETE BIT(10)
-#define DMA_CHAN_INTR_ENA_RWE BIT(9)
-#define DMA_CHAN_INTR_ENA_RSE BIT(8)
-#define DMA_CHAN_INTR_ENA_RBUE BIT(7)
-#define DMA_CHAN_INTR_ENA_RIE BIT(6)
-#define DMA_CHAN_INTR_ENA_TBUE BIT(2)
-#define DMA_CHAN_INTR_ENA_TSE BIT(1)
-#define DMA_CHAN_INTR_ENA_TIE BIT(0)
-
-#define DMA_CHAN_INTR_NORMAL (DMA_CHAN_INTR_ENA_NIE | \
- DMA_CHAN_INTR_ENA_RIE | \
- DMA_CHAN_INTR_ENA_TIE)
-
-#define DMA_CHAN_INTR_ABNORMAL (DMA_CHAN_INTR_ENA_AIE | \
- DMA_CHAN_INTR_ENA_FBE)
-/* DMA default interrupt mask for 4.00 */
-#define DMA_CHAN_INTR_DEFAULT_MASK (DMA_CHAN_INTR_NORMAL | \
- DMA_CHAN_INTR_ABNORMAL)
-#define DMA_CHAN_INTR_DEFAULT_RX (DMA_CHAN_INTR_ENA_RIE)
-#define DMA_CHAN_INTR_DEFAULT_TX (DMA_CHAN_INTR_ENA_TIE)
-
-#define DMA_CHAN_INTR_NORMAL_4_10 (DMA_CHAN_INTR_ENA_NIE_4_10 | \
- DMA_CHAN_INTR_ENA_RIE | \
- DMA_CHAN_INTR_ENA_TIE)
-
-#define DMA_CHAN_INTR_ABNORMAL_4_10 (DMA_CHAN_INTR_ENA_AIE_4_10 | \
- DMA_CHAN_INTR_ENA_FBE)
-/* DMA default interrupt mask for 4.10a */
-#define DMA_CHAN_INTR_DEFAULT_MASK_4_10 (DMA_CHAN_INTR_NORMAL_4_10 | \
- DMA_CHAN_INTR_ABNORMAL_4_10)
-#define DMA_CHAN_INTR_DEFAULT_RX_4_10 (DMA_CHAN_INTR_ENA_RIE)
-#define DMA_CHAN_INTR_DEFAULT_TX_4_10 (DMA_CHAN_INTR_ENA_TIE)
-
-/* channel 0 specific fields */
-#define DMA_CHAN0_DBG_STAT_TPS GENMASK(15, 12)
-#define DMA_CHAN0_DBG_STAT_TPS_SHIFT 12
-#define DMA_CHAN0_DBG_STAT_RPS GENMASK(11, 8)
-#define DMA_CHAN0_DBG_STAT_RPS_SHIFT 8
-
int dwmac4_dma_reset(void __iomem *ioaddr);
void dwmac4_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan, bool rx, bool tx);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
index 57c03d491774..c098047a3bff 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -234,7 +234,7 @@ void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, const u8 addr[6],
* bit that has no effect on the High Reg 0 where the bit 31 (MO)
* is RO.
*/
- data |= (STMMAC_CHAN0 << GMAC_HI_DCS_SHIFT);
+ data |= FIELD_PREP(GMAC_HI_DCS, STMMAC_CHAN0);
writel(data | GMAC_HI_REG_AE, ioaddr + high);
data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
writel(data, ioaddr + low);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 054ecb20ce3f..e1c37ac2c99d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -13,13 +13,86 @@
/* DMA CRS Control and Status Register Mapping */
#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
+
+#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
+
#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
#define DMA_RCV_POLL_DEMAND 0x00001008 /* Received Poll Demand */
#define DMA_RCV_BASE_ADDR 0x0000100c /* Receive List Base */
#define DMA_TX_BASE_ADDR 0x00001010 /* Transmit List Base */
+
#define DMA_STATUS 0x00001014 /* Status Register */
+#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
+#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
+#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */
+#define DMA_STATUS_TS_MASK GENMASK(22, 20) /* Transmit Process State */
+#define DMA_STATUS_RS_MASK GENMASK(19, 17) /* Receive Process State */
+#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
+#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
+#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
+#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
+#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
+#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
+#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
+#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
+#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
+#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
+#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
+#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
+#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
+#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
+#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
+
+#define DMA_STATUS_MSK_COMMON (DMA_STATUS_NIS | \
+ DMA_STATUS_AIS | \
+ DMA_STATUS_FBI)
+
+#define DMA_STATUS_MSK_RX (DMA_STATUS_ERI | \
+ DMA_STATUS_RWT | \
+ DMA_STATUS_RPS | \
+ DMA_STATUS_RU | \
+ DMA_STATUS_RI | \
+ DMA_STATUS_OVF | \
+ DMA_STATUS_MSK_COMMON)
+
+#define DMA_STATUS_MSK_TX (DMA_STATUS_ETI | \
+ DMA_STATUS_UNF | \
+ DMA_STATUS_TJT | \
+ DMA_STATUS_TU | \
+ DMA_STATUS_TPS | \
+ DMA_STATUS_TI | \
+ DMA_STATUS_MSK_COMMON)
+
#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */
+
+/* DMA Control register defines */
+#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
+#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
+#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
+
#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
+
+/* DMA Normal interrupt */
+#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
+#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
+#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
+
+#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
+ DMA_INTR_ENA_TIE)
+
+/* DMA Abnormal interrupt */
+#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
+#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
+#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
+
+#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
+ DMA_INTR_ENA_UNE)
+
+/* DMA default interrupt mask */
+#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
+#define DMA_INTR_DEFAULT_RX (DMA_INTR_ENA_RIE)
+#define DMA_INTR_DEFAULT_TX (DMA_INTR_ENA_TIE)
+
#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
/* Following DMA defines are channels oriented */
@@ -42,13 +115,9 @@ static inline u32 dma_chan_base_addr(u32 base, u32 chan)
#define DMA_CHAN_STATUS(chan) dma_chan_base_addr(DMA_STATUS, chan)
#define DMA_CHAN_CONTROL(chan) dma_chan_base_addr(DMA_CONTROL, chan)
#define DMA_CHAN_INTR_ENA(chan) dma_chan_base_addr(DMA_INTR_ENA, chan)
-#define DMA_CHAN_MISSED_FRAME_CTR(chan) \
- dma_chan_base_addr(DMA_MISSED_FRAME_CTR, chan)
#define DMA_CHAN_RX_WATCHDOG(chan) \
dma_chan_base_addr(DMA_RX_WATCHDOG, chan)
-/* SW Reset */
-#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
/* Rx watchdog register */
#define DMA_RX_WATCHDOG 0x00001024
@@ -59,19 +128,7 @@ static inline u32 dma_chan_base_addr(u32 base, u32 chan)
#define DMA_AXI_EN_LPI BIT(31)
#define DMA_AXI_LPI_XIT_FRM BIT(30)
#define DMA_AXI_WR_OSR_LMT GENMASK(23, 20)
-#define DMA_AXI_WR_OSR_LMT_SHIFT 20
-#define DMA_AXI_WR_OSR_LMT_MASK 0xf
#define DMA_AXI_RD_OSR_LMT GENMASK(19, 16)
-#define DMA_AXI_RD_OSR_LMT_SHIFT 16
-#define DMA_AXI_RD_OSR_LMT_MASK 0xf
-
-#define DMA_AXI_OSR_MAX 0xf
-#define DMA_AXI_MAX_OSR_LIMIT ((DMA_AXI_OSR_MAX << DMA_AXI_WR_OSR_LMT_SHIFT) | \
- (DMA_AXI_OSR_MAX << DMA_AXI_RD_OSR_LMT_SHIFT))
-#define DMA_BURST_LEN_DEFAULT (DMA_AXI_BLEN256 | DMA_AXI_BLEN128 | \
- DMA_AXI_BLEN64 | DMA_AXI_BLEN32 | \
- DMA_AXI_BLEN16 | DMA_AXI_BLEN8 | \
- DMA_AXI_BLEN4)
#define DMA_AXI_1KBBE BIT(13)
@@ -81,89 +138,6 @@ static inline u32 dma_chan_base_addr(u32 base, u32 chan)
#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
#define DMA_HW_FEATURE 0x00001058 /* HW Feature Register */
-/* DMA Control register defines */
-#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
-#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
-
-/* DMA Normal interrupt */
-#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
-#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
-#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavailable */
-#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
-#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
-
-#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
- DMA_INTR_ENA_TIE)
-
-/* DMA Abnormal interrupt */
-#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
-#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
-#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
-#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
-#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
-#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
-#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
-#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
-#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
-#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
-
-#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
- DMA_INTR_ENA_UNE)
-
-/* DMA default interrupt mask */
-#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
-#define DMA_INTR_DEFAULT_RX (DMA_INTR_ENA_RIE)
-#define DMA_INTR_DEFAULT_TX (DMA_INTR_ENA_TIE)
-
-/* DMA Status register defines */
-#define DMA_STATUS_GLPII 0x40000000 /* GMAC LPI interrupt */
-#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
-#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
-#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */
-#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
-#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
-#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
-#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
-#define DMA_STATUS_TS_SHIFT 20
-#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
-#define DMA_STATUS_RS_SHIFT 17
-#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
-#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
-#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
-#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
-#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
-#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
-#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
-#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
-#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
-#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
-#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
-#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
-#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
-#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
-#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
-#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
-
-#define DMA_STATUS_MSK_COMMON (DMA_STATUS_NIS | \
- DMA_STATUS_AIS | \
- DMA_STATUS_FBI)
-
-#define DMA_STATUS_MSK_RX (DMA_STATUS_ERI | \
- DMA_STATUS_RWT | \
- DMA_STATUS_RPS | \
- DMA_STATUS_RU | \
- DMA_STATUS_RI | \
- DMA_STATUS_OVF | \
- DMA_STATUS_MSK_COMMON)
-
-#define DMA_STATUS_MSK_TX (DMA_STATUS_ETI | \
- DMA_STATUS_UNF | \
- DMA_STATUS_TJT | \
- DMA_STATUS_TU | \
- DMA_STATUS_TPS | \
- DMA_STATUS_TI | \
- DMA_STATUS_MSK_COMMON)
-
#define NUM_DWMAC100_DMA_REGS 9
#define NUM_DWMAC1000_DMA_REGS 23
#define NUM_DWMAC4_DMA_REGS 27
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 97a803d68e3a..a0383f9486c2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -97,10 +97,7 @@ void dwmac_dma_stop_rx(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan)
#ifdef DWMAC_DMA_DEBUG
static void show_tx_process_state(unsigned int status)
{
- unsigned int state;
- state = (status & DMA_STATUS_TS_MASK) >> DMA_STATUS_TS_SHIFT;
-
- switch (state) {
+ switch (FIELD_GET(DMA_STATUS_TS_MASK, status)) {
case 0:
pr_debug("- TX (Stopped): Reset or Stop command\n");
break;
@@ -128,10 +125,7 @@ static void show_tx_process_state(unsigned int status)
static void show_rx_process_state(unsigned int status)
{
- unsigned int state;
- state = (status & DMA_STATUS_RS_MASK) >> DMA_STATUS_RS_SHIFT;
-
- switch (state) {
+ switch (FIELD_GET(DMA_STATUS_RS_MASK, status)) {
case 0:
pr_debug("- RX (Stopped): Reset or Stop command\n");
break;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index fecda3034d36..51943705a2b0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -24,17 +24,15 @@
#define XGMAC_CONFIG_SS_2500 (0x6 << XGMAC_CONFIG_SS_OFF)
#define XGMAC_CONFIG_SS_10_MII (0x7 << XGMAC_CONFIG_SS_OFF)
#define XGMAC_CONFIG_SARC GENMASK(22, 20)
-#define XGMAC_CONFIG_SARC_SHIFT 20
#define XGMAC_CONFIG_JD BIT(16)
#define XGMAC_CONFIG_TE BIT(0)
#define XGMAC_CORE_INIT_TX (XGMAC_CONFIG_JD)
#define XGMAC_RX_CONFIG 0x00000004
#define XGMAC_CONFIG_ARPEN BIT(31)
#define XGMAC_CONFIG_GPSL GENMASK(29, 16)
-#define XGMAC_CONFIG_GPSL_SHIFT 16
#define XGMAC_CONFIG_HDSMS GENMASK(14, 12)
#define XGMAC_CONFIG_HDSMS_SHIFT 12
-#define XGMAC_CONFIG_HDSMS_256 (0x2 << XGMAC_CONFIG_HDSMS_SHIFT)
+#define XGMAC_CONFIG_HDSMS_256 FIELD_PREP(XGMAC_CONFIG_HDSMS, 0x2)
#define XGMAC_CONFIG_S2KP BIT(11)
#define XGMAC_CONFIG_LM BIT(10)
#define XGMAC_CONFIG_IPC BIT(9)
@@ -44,8 +42,10 @@
#define XGMAC_CONFIG_CST BIT(2)
#define XGMAC_CONFIG_ACS BIT(1)
#define XGMAC_CONFIG_RE BIT(0)
-#define XGMAC_CORE_INIT_RX (XGMAC_CONFIG_GPSLCE | XGMAC_CONFIG_WD | \
- (XGMAC_JUMBO_LEN << XGMAC_CONFIG_GPSL_SHIFT))
+#define XGMAC_CORE_INIT_RX (XGMAC_CONFIG_GPSLCE | \
+ XGMAC_CONFIG_WD | \
+ FIELD_PREP(XGMAC_CONFIG_GPSL, \
+ XGMAC_JUMBO_LEN))
#define XGMAC_PACKET_FILTER 0x00000008
#define XGMAC_FILTER_RA BIT(31)
#define XGMAC_FILTER_IPFE BIT(20)
@@ -90,7 +90,6 @@
#define XGMAC_INT_DEFAULT_EN (XGMAC_LPIIE | XGMAC_PMTIE)
#define XGMAC_Qx_TX_FLOW_CTRL(x) (0x00000070 + (x) * 4)
#define XGMAC_PT GENMASK(31, 16)
-#define XGMAC_PT_SHIFT 16
#define XGMAC_TFE BIT(1)
#define XGMAC_RX_FLOW_CTRL 0x00000090
#define XGMAC_RFE BIT(0)
@@ -108,6 +107,7 @@
#define XGMAC_HWFEAT_VXN BIT(29)
#define XGMAC_HWFEAT_SAVLANINS BIT(27)
#define XGMAC_HWFEAT_TSSTSSEL GENMASK(26, 25)
+#define XGMAC_HWFEAT_PHYSEL GENMASK(24, 23)
#define XGMAC_HWFEAT_ADDMACADRSEL GENMASK(22, 18)
#define XGMAC_HWFEAT_RXCOESEL BIT(16)
#define XGMAC_HWFEAT_TXCOESEL BIT(14)
@@ -180,12 +180,11 @@
#define XGMAC_ADDR_MAX 32
#define XGMAC_AE BIT(31)
#define XGMAC_DCS GENMASK(19, 16)
-#define XGMAC_DCS_SHIFT 16
#define XGMAC_ADDRx_LOW(x) (0x00000304 + (x) * 0x8)
#define XGMAC_L3L4_ADDR_CTRL 0x00000c00
#define XGMAC_IDDR GENMASK(16, 8)
-#define XGMAC_IDDR_SHIFT 8
-#define XGMAC_IDDR_FNUM 4
+#define XGMAC_IDDR_FNUM_MASK GENMASK(7, 4) /* FNUM within IDDR */
+#define XGMAC_IDDR_REG_MASK GENMASK(3, 0) /* REG within IDDR */
#define XGMAC_TT BIT(1)
#define XGMAC_XB BIT(0)
#define XGMAC_L3L4_DATA 0x00000c04
@@ -204,7 +203,6 @@
#define XGMAC_L3PEN0 BIT(0)
#define XGMAC_L4_ADDR 0x1
#define XGMAC_L4DP0 GENMASK(31, 16)
-#define XGMAC_L4DP0_SHIFT 16
#define XGMAC_L4SP0 GENMASK(15, 0)
#define XGMAC_L3_ADDR0 0x4
#define XGMAC_L3_ADDR1 0x5
@@ -224,7 +222,6 @@
#define XGMAC_RSS_DATA 0x00000c8c
#define XGMAC_TIMESTAMP_STATUS 0x00000d20
#define XGMAC_TIMESTAMP_ATSNS_MASK GENMASK(29, 25)
-#define XGMAC_TIMESTAMP_ATSNS_SHIFT 25
#define XGMAC_TXTSC BIT(15)
#define XGMAC_TXTIMESTAMP_NSEC 0x00000d30
#define XGMAC_TXTSSTSLO GENMASK(30, 0)
@@ -290,13 +287,9 @@
#define XGMAC_DPP_DISABLE BIT(0)
#define XGMAC_MTL_TXQ_OPMODE(x) (0x00001100 + (0x80 * (x)))
#define XGMAC_TQS GENMASK(25, 16)
-#define XGMAC_TQS_SHIFT 16
#define XGMAC_Q2TCMAP GENMASK(10, 8)
-#define XGMAC_Q2TCMAP_SHIFT 8
#define XGMAC_TTC GENMASK(6, 4)
-#define XGMAC_TTC_SHIFT 4
#define XGMAC_TXQEN GENMASK(3, 2)
-#define XGMAC_TXQEN_SHIFT 2
#define XGMAC_TSF BIT(1)
#define XGMAC_MTL_TCx_ETS_CONTROL(x) (0x00001110 + (0x80 * (x)))
#define XGMAC_MTL_TCx_QUANTUM_WEIGHT(x) (0x00001118 + (0x80 * (x)))
@@ -310,16 +303,12 @@
#define XGMAC_ETS (0x2 << 0)
#define XGMAC_MTL_RXQ_OPMODE(x) (0x00001140 + (0x80 * (x)))
#define XGMAC_RQS GENMASK(25, 16)
-#define XGMAC_RQS_SHIFT 16
#define XGMAC_EHFC BIT(7)
#define XGMAC_RSF BIT(5)
#define XGMAC_RTC GENMASK(1, 0)
-#define XGMAC_RTC_SHIFT 0
#define XGMAC_MTL_RXQ_FLOW_CONTROL(x) (0x00001150 + (0x80 * (x)))
#define XGMAC_RFD GENMASK(31, 17)
-#define XGMAC_RFD_SHIFT 17
#define XGMAC_RFA GENMASK(15, 1)
-#define XGMAC_RFA_SHIFT 1
#define XGMAC_MTL_QINTEN(x) (0x00001170 + (0x80 * (x)))
#define XGMAC_RXOIE BIT(16)
#define XGMAC_MTL_QINT_STATUS(x) (0x00001174 + (0x80 * (x)))
@@ -333,9 +322,7 @@
#define XGMAC_SWR BIT(0)
#define XGMAC_DMA_SYSBUS_MODE 0x00003004
#define XGMAC_WR_OSR_LMT GENMASK(29, 24)
-#define XGMAC_WR_OSR_LMT_SHIFT 24
#define XGMAC_RD_OSR_LMT GENMASK(21, 16)
-#define XGMAC_RD_OSR_LMT_SHIFT 16
#define XGMAC_EN_LPI BIT(15)
#define XGMAC_LPI_XIT_PKT BIT(14)
#define XGMAC_AAL DMA_AXI_AAL
@@ -370,15 +357,12 @@
#define XGMAC_DMA_CH_TX_CONTROL(x) (0x00003104 + (0x80 * (x)))
#define XGMAC_EDSE BIT(28)
#define XGMAC_TxPBL GENMASK(21, 16)
-#define XGMAC_TxPBL_SHIFT 16
#define XGMAC_TSE BIT(12)
#define XGMAC_OSP BIT(4)
#define XGMAC_TXST BIT(0)
#define XGMAC_DMA_CH_RX_CONTROL(x) (0x00003108 + (0x80 * (x)))
#define XGMAC_RxPBL GENMASK(21, 16)
-#define XGMAC_RxPBL_SHIFT 16
#define XGMAC_RBSZ GENMASK(14, 1)
-#define XGMAC_RBSZ_SHIFT 1
#define XGMAC_RXST BIT(0)
#define XGMAC_DMA_CH_TxDESC_HADDR(x) (0x00003110 + (0x80 * (x)))
#define XGMAC_DMA_CH_TxDESC_LADDR(x) (0x00003114 + (0x80 * (x)))
@@ -423,32 +407,24 @@
#define XGMAC_TDES0_LT GENMASK(7, 0)
#define XGMAC_TDES1_LT GENMASK(31, 8)
#define XGMAC_TDES2_IVT GENMASK(31, 16)
-#define XGMAC_TDES2_IVT_SHIFT 16
#define XGMAC_TDES2_IOC BIT(31)
#define XGMAC_TDES2_TTSE BIT(30)
#define XGMAC_TDES2_B2L GENMASK(29, 16)
-#define XGMAC_TDES2_B2L_SHIFT 16
#define XGMAC_TDES2_VTIR GENMASK(15, 14)
-#define XGMAC_TDES2_VTIR_SHIFT 14
#define XGMAC_TDES2_B1L GENMASK(13, 0)
#define XGMAC_TDES3_OWN BIT(31)
#define XGMAC_TDES3_CTXT BIT(30)
#define XGMAC_TDES3_FD BIT(29)
#define XGMAC_TDES3_LD BIT(28)
#define XGMAC_TDES3_CPC GENMASK(27, 26)
-#define XGMAC_TDES3_CPC_SHIFT 26
#define XGMAC_TDES3_TCMSSV BIT(26)
#define XGMAC_TDES3_SAIC GENMASK(25, 23)
-#define XGMAC_TDES3_SAIC_SHIFT 23
#define XGMAC_TDES3_TBSV BIT(24)
#define XGMAC_TDES3_THL GENMASK(22, 19)
-#define XGMAC_TDES3_THL_SHIFT 19
#define XGMAC_TDES3_IVTIR GENMASK(19, 18)
-#define XGMAC_TDES3_IVTIR_SHIFT 18
#define XGMAC_TDES3_TSE BIT(18)
#define XGMAC_TDES3_IVLTV BIT(17)
#define XGMAC_TDES3_CIC GENMASK(17, 16)
-#define XGMAC_TDES3_CIC_SHIFT 16
#define XGMAC_TDES3_TPL GENMASK(17, 0)
#define XGMAC_TDES3_VLTV BIT(16)
#define XGMAC_TDES3_VT GENMASK(15, 0)
@@ -461,7 +437,6 @@
#define XGMAC_RDES3_CDA BIT(27)
#define XGMAC_RDES3_RSV BIT(26)
#define XGMAC_RDES3_L34T GENMASK(23, 20)
-#define XGMAC_RDES3_L34T_SHIFT 20
#define XGMAC_RDES3_ET_LT GENMASK(19, 16)
#define XGMAC_L34T_IP4TCP 0x1
#define XGMAC_L34T_IP4UDP 0x2
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index b40b3ea50e25..49893b9fb88c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -46,8 +46,6 @@ static void dwxgmac2_update_caps(struct stmmac_priv *priv)
{
if (!priv->dma_cap.mbps_10_100)
priv->hw->link.caps &= ~(MAC_10 | MAC_100);
- else if (!priv->dma_cap.half_duplex)
- priv->hw->link.caps &= ~(MAC_10HD | MAC_100HD);
}
static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
@@ -298,10 +296,10 @@ static void dwxgmac2_dump_regs(struct mac_device_info *hw, u32 *reg_space)
reg_space[i] = readl(ioaddr + i * 4);
}
-static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
+static int dwxgmac2_host_irq_status(struct stmmac_priv *priv,
struct stmmac_extra_stats *x)
{
- void __iomem *ioaddr = hw->pcsr;
+ void __iomem *ioaddr = priv->hw->pcsr;
u32 stat, en;
int ret = 0;
@@ -369,7 +367,7 @@ static void dwxgmac2_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
u32 value = XGMAC_TFE;
if (duplex)
- value |= pause_time << XGMAC_PT_SHIFT;
+ value |= FIELD_PREP(XGMAC_PT, pause_time);
writel(value, ioaddr + XGMAC_Qx_TX_FLOW_CTRL(i));
}
@@ -1226,8 +1224,7 @@ static void dwxgmac2_sarc_configure(void __iomem *ioaddr, int val)
{
u32 value = readl(ioaddr + XGMAC_TX_CONFIG);
- value &= ~XGMAC_CONFIG_SARC;
- value |= val << XGMAC_CONFIG_SARC_SHIFT;
+ value = u32_replace_bits(value, val, XGMAC_CONFIG_SARC);
writel(value, ioaddr + XGMAC_TX_CONFIG);
}
@@ -1247,14 +1244,16 @@ static int dwxgmac2_filter_read(struct mac_device_info *hw, u32 filter_no,
u8 reg, u32 *data)
{
void __iomem *ioaddr = hw->pcsr;
- u32 value;
+ u32 value, iddr;
int ret;
ret = dwxgmac2_filter_wait(hw);
if (ret)
return ret;
- value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
+ iddr = FIELD_PREP(XGMAC_IDDR_FNUM_MASK, filter_no) |
+ FIELD_PREP(XGMAC_IDDR_REG_MASK, reg);
+ value = FIELD_PREP(XGMAC_IDDR, iddr);
value |= XGMAC_TT | XGMAC_XB;
writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
@@ -1270,7 +1269,7 @@ static int dwxgmac2_filter_write(struct mac_device_info *hw, u32 filter_no,
u8 reg, u32 data)
{
void __iomem *ioaddr = hw->pcsr;
- u32 value;
+ u32 value, iddr;
int ret;
ret = dwxgmac2_filter_wait(hw);
@@ -1279,7 +1278,9 @@ static int dwxgmac2_filter_write(struct mac_device_info *hw, u32 filter_no,
writel(data, ioaddr + XGMAC_L3L4_DATA);
- value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
+ iddr = FIELD_PREP(XGMAC_IDDR_FNUM_MASK, filter_no) |
+ FIELD_PREP(XGMAC_IDDR_REG_MASK, reg);
+ value = FIELD_PREP(XGMAC_IDDR, iddr);
value |= XGMAC_XB;
writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
@@ -1388,13 +1389,13 @@ static int dwxgmac2_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
return ret;
if (sa) {
- value = match & XGMAC_L4SP0;
+ value = FIELD_PREP(XGMAC_L4SP0, match);
ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
if (ret)
return ret;
} else {
- value = (match << XGMAC_L4DP0_SHIFT) & XGMAC_L4DP0;
+ value = FIELD_PREP(XGMAC_L4DP0, match);
ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
if (ret)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
index a2980482fcce..41e5b420a215 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -12,7 +12,7 @@
static int dwxgmac2_get_tx_status(struct stmmac_extra_stats *x,
struct dma_desc *p, void __iomem *ioaddr)
{
- unsigned int tdes3 = le32_to_cpu(p->des3);
+ u32 tdes3 = le32_to_cpu(p->des3);
int ret = tx_done;
if (unlikely(tdes3 & XGMAC_TDES3_OWN))
@@ -26,7 +26,7 @@ static int dwxgmac2_get_tx_status(struct stmmac_extra_stats *x,
static int dwxgmac2_get_rx_status(struct stmmac_extra_stats *x,
struct dma_desc *p)
{
- unsigned int rdes3 = le32_to_cpu(p->des3);
+ u32 rdes3 = le32_to_cpu(p->des3);
if (unlikely(rdes3 & XGMAC_RDES3_OWN))
return dma_own;
@@ -114,7 +114,7 @@ static inline void dwxgmac2_get_timestamp(void *desc, u32 ats, u64 *ts)
static int dwxgmac2_rx_check_timestamp(void *desc)
{
struct dma_desc *p = (struct dma_desc *)desc;
- unsigned int rdes3 = le32_to_cpu(p->des3);
+ u32 rdes3 = le32_to_cpu(p->des3);
bool desc_valid, ts_valid;
dma_rmb();
@@ -135,7 +135,7 @@ static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc,
u32 ats)
{
struct dma_desc *p = (struct dma_desc *)desc;
- unsigned int rdes3 = le32_to_cpu(p->des3);
+ u32 rdes3 = le32_to_cpu(p->des3);
int ret = -EBUSY;
if (likely(rdes3 & XGMAC_RDES3_CDA))
@@ -162,7 +162,7 @@ static void dwxgmac2_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
bool csum_flag, int mode, bool tx_own,
bool ls, unsigned int tot_pkt_len)
{
- unsigned int tdes3 = le32_to_cpu(p->des3);
+ u32 tdes3 = le32_to_cpu(p->des3);
p->des2 |= cpu_to_le32(len & XGMAC_TDES2_B1L);
@@ -173,7 +173,7 @@ static void dwxgmac2_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
tdes3 &= ~XGMAC_TDES3_FD;
if (csum_flag)
- tdes3 |= 0x3 << XGMAC_TDES3_CIC_SHIFT;
+ tdes3 |= FIELD_PREP(XGMAC_TDES3_CIC, 0x3);
else
tdes3 &= ~XGMAC_TDES3_CIC;
@@ -201,18 +201,16 @@ static void dwxgmac2_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
bool ls, unsigned int tcphdrlen,
unsigned int tcppayloadlen)
{
- unsigned int tdes3 = le32_to_cpu(p->des3);
+ u32 tdes3 = le32_to_cpu(p->des3);
if (len1)
p->des2 |= cpu_to_le32(len1 & XGMAC_TDES2_B1L);
if (len2)
- p->des2 |= cpu_to_le32((len2 << XGMAC_TDES2_B2L_SHIFT) &
- XGMAC_TDES2_B2L);
+ p->des2 |= cpu_to_le32(FIELD_PREP(XGMAC_TDES2_B2L, len2));
if (is_fs) {
tdes3 |= XGMAC_TDES3_FD | XGMAC_TDES3_TSE;
- tdes3 |= (tcphdrlen << XGMAC_TDES3_THL_SHIFT) &
- XGMAC_TDES3_THL;
- tdes3 |= tcppayloadlen & XGMAC_TDES3_TPL;
+ tdes3 |= FIELD_PREP(XGMAC_TDES3_THL, tcphdrlen);
+ tdes3 |= FIELD_PREP(XGMAC_TDES3_TPL, tcppayloadlen);
} else {
tdes3 &= ~XGMAC_TDES3_FD;
}
@@ -274,11 +272,11 @@ static void dwxgmac2_clear(struct dma_desc *p)
static int dwxgmac2_get_rx_hash(struct dma_desc *p, u32 *hash,
enum pkt_hash_types *type)
{
- unsigned int rdes3 = le32_to_cpu(p->des3);
+ u32 rdes3 = le32_to_cpu(p->des3);
u32 ptype;
if (rdes3 & XGMAC_RDES3_RSV) {
- ptype = (rdes3 & XGMAC_RDES3_L34T) >> XGMAC_RDES3_L34T_SHIFT;
+ ptype = FIELD_GET(XGMAC_RDES3_L34T, rdes3);
switch (ptype) {
case XGMAC_L34T_IP4TCP:
@@ -313,9 +311,7 @@ static void dwxgmac2_set_sec_addr(struct dma_desc *p, dma_addr_t addr, bool is_v
static void dwxgmac2_set_sarc(struct dma_desc *p, u32 sarc_type)
{
- sarc_type <<= XGMAC_TDES3_SAIC_SHIFT;
-
- p->des3 |= cpu_to_le32(sarc_type & XGMAC_TDES3_SAIC);
+ p->des3 |= cpu_to_le32(FIELD_PREP(XGMAC_TDES3_SAIC, sarc_type));
}
static void dwxgmac2_set_vlan_tag(struct dma_desc *p, u16 tag, u16 inner_tag,
@@ -328,13 +324,11 @@ static void dwxgmac2_set_vlan_tag(struct dma_desc *p, u16 tag, u16 inner_tag,
/* Inner VLAN */
if (inner_type) {
- u32 des = inner_tag << XGMAC_TDES2_IVT_SHIFT;
+ u32 des = FIELD_PREP(XGMAC_TDES2_IVT, inner_tag);
- des &= XGMAC_TDES2_IVT;
p->des2 = cpu_to_le32(des);
- des = inner_type << XGMAC_TDES3_IVTIR_SHIFT;
- des &= XGMAC_TDES3_IVTIR;
+ des = FIELD_PREP(XGMAC_TDES3_IVTIR, inner_type);
p->des3 = cpu_to_le32(des | XGMAC_TDES3_IVLTV);
}
@@ -347,8 +341,7 @@ static void dwxgmac2_set_vlan_tag(struct dma_desc *p, u16 tag, u16 inner_tag,
static void dwxgmac2_set_vlan(struct dma_desc *p, u32 type)
{
- type <<= XGMAC_TDES2_VTIR_SHIFT;
- p->des2 |= cpu_to_le32(type & XGMAC_TDES2_VTIR);
+ p->des2 |= cpu_to_le32(FIELD_PREP(XGMAC_TDES2_VTIR, type));
}
static void dwxgmac2_set_tbs(struct dma_edesc *p, u32 sec, u32 nsec)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index cc1bdc0975d5..03437f1cf3df 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -55,8 +55,7 @@ static void dwxgmac2_dma_init_rx_chan(struct stmmac_priv *priv,
u32 value;
value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
- value &= ~XGMAC_RxPBL;
- value |= (rxpbl << XGMAC_RxPBL_SHIFT) & XGMAC_RxPBL;
+ value = u32_replace_bits(value, rxpbl, XGMAC_RxPBL);
writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
writel(upper_32_bits(phy), ioaddr + XGMAC_DMA_CH_RxDESC_HADDR(chan));
@@ -72,9 +71,7 @@ static void dwxgmac2_dma_init_tx_chan(struct stmmac_priv *priv,
u32 value;
value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
- value &= ~XGMAC_TxPBL;
- value |= (txpbl << XGMAC_TxPBL_SHIFT) & XGMAC_TxPBL;
- value |= XGMAC_OSP;
+ value = u32_replace_bits(value, txpbl, XGMAC_TxPBL);
writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
writel(upper_32_bits(phy), ioaddr + XGMAC_DMA_CH_TxDESC_HADDR(chan));
@@ -90,13 +87,8 @@ static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
if (axi->axi_xit_frm)
value |= XGMAC_LPI_XIT_PKT;
- value &= ~XGMAC_WR_OSR_LMT;
- value |= (axi->axi_wr_osr_lmt << XGMAC_WR_OSR_LMT_SHIFT) &
- XGMAC_WR_OSR_LMT;
-
- value &= ~XGMAC_RD_OSR_LMT;
- value |= (axi->axi_rd_osr_lmt << XGMAC_RD_OSR_LMT_SHIFT) &
- XGMAC_RD_OSR_LMT;
+ value = u32_replace_bits(value, axi->axi_wr_osr_lmt, XGMAC_WR_OSR_LMT);
+ value = u32_replace_bits(value, axi->axi_rd_osr_lmt, XGMAC_RD_OSR_LMT);
if (!axi->axi_fb)
value |= XGMAC_UNDEF;
@@ -127,23 +119,24 @@ static void dwxgmac2_dma_rx_mode(struct stmmac_priv *priv, void __iomem *ioaddr,
{
u32 value = readl(ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
unsigned int rqs = fifosz / 256 - 1;
+ unsigned int rtc;
if (mode == SF_DMA_MODE) {
value |= XGMAC_RSF;
} else {
value &= ~XGMAC_RSF;
- value &= ~XGMAC_RTC;
if (mode <= 64)
- value |= 0x0 << XGMAC_RTC_SHIFT;
+ rtc = 0x0;
else if (mode <= 96)
- value |= 0x2 << XGMAC_RTC_SHIFT;
+ rtc = 0x2;
else
- value |= 0x3 << XGMAC_RTC_SHIFT;
+ rtc = 0x3;
+
+ value = u32_replace_bits(value, rtc, XGMAC_RTC);
}
- value &= ~XGMAC_RQS;
- value |= (rqs << XGMAC_RQS_SHIFT) & XGMAC_RQS;
+ value = u32_replace_bits(value, rqs, XGMAC_RQS);
if ((fifosz >= 4096) && (qmode != MTL_QUEUE_AVB)) {
u32 flow = readl(ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel));
@@ -172,11 +165,8 @@ static void dwxgmac2_dma_rx_mode(struct stmmac_priv *priv, void __iomem *ioaddr,
break;
}
- flow &= ~XGMAC_RFD;
- flow |= rfd << XGMAC_RFD_SHIFT;
-
- flow &= ~XGMAC_RFA;
- flow |= rfa << XGMAC_RFA_SHIFT;
+ flow = u32_replace_bits(flow, rfd, XGMAC_RFD);
+ flow = u32_replace_bits(flow, rfa, XGMAC_RFA);
writel(flow, ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel));
}
@@ -189,40 +179,41 @@ static void dwxgmac2_dma_tx_mode(struct stmmac_priv *priv, void __iomem *ioaddr,
{
u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
unsigned int tqs = fifosz / 256 - 1;
+ unsigned int ttc, txqen;
if (mode == SF_DMA_MODE) {
value |= XGMAC_TSF;
} else {
value &= ~XGMAC_TSF;
- value &= ~XGMAC_TTC;
if (mode <= 64)
- value |= 0x0 << XGMAC_TTC_SHIFT;
+ ttc = 0x0;
else if (mode <= 96)
- value |= 0x2 << XGMAC_TTC_SHIFT;
+ ttc = 0x2;
else if (mode <= 128)
- value |= 0x3 << XGMAC_TTC_SHIFT;
+ ttc = 0x3;
else if (mode <= 192)
- value |= 0x4 << XGMAC_TTC_SHIFT;
+ ttc = 0x4;
else if (mode <= 256)
- value |= 0x5 << XGMAC_TTC_SHIFT;
+ ttc = 0x5;
else if (mode <= 384)
- value |= 0x6 << XGMAC_TTC_SHIFT;
+ ttc = 0x6;
else
- value |= 0x7 << XGMAC_TTC_SHIFT;
+ ttc = 0x7;
+
+ value = u32_replace_bits(value, ttc, XGMAC_TTC);
}
/* Use static TC to Queue mapping */
- value |= (channel << XGMAC_Q2TCMAP_SHIFT) & XGMAC_Q2TCMAP;
+ value |= FIELD_PREP(XGMAC_Q2TCMAP, channel);
- value &= ~XGMAC_TXQEN;
if (qmode != MTL_QUEUE_AVB)
- value |= 0x2 << XGMAC_TXQEN_SHIFT;
+ txqen = 0x2;
else
- value |= 0x1 << XGMAC_TXQEN_SHIFT;
+ txqen = 0x1;
- value &= ~XGMAC_TQS;
- value |= (tqs << XGMAC_TQS_SHIFT) & XGMAC_TQS;
+ value = u32_replace_bits(value, txqen, XGMAC_TXQEN);
+ value = u32_replace_bits(value, tqs, XGMAC_TQS);
writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
}
@@ -373,6 +364,7 @@ static int dwxgmac2_get_hw_feature(void __iomem *ioaddr,
dma_cap->vxn = (hw_cap & XGMAC_HWFEAT_VXN) >> 29;
dma_cap->vlins = (hw_cap & XGMAC_HWFEAT_SAVLANINS) >> 27;
dma_cap->tssrc = (hw_cap & XGMAC_HWFEAT_TSSTSSEL) >> 25;
+ dma_cap->actphyif = FIELD_GET(XGMAC_HWFEAT_PHYSEL, hw_cap);
dma_cap->multi_addr = (hw_cap & XGMAC_HWFEAT_ADDMACADRSEL) >> 18;
dma_cap->rx_coe = (hw_cap & XGMAC_HWFEAT_RXCOESEL) >> 16;
dma_cap->tx_coe = (hw_cap & XGMAC_HWFEAT_TXCOESEL) >> 14;
@@ -526,16 +518,17 @@ static void dwxgmac2_qmode(struct stmmac_priv *priv, void __iomem *ioaddr,
{
u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
u32 flow = readl(ioaddr + XGMAC_RX_FLOW_CTRL);
+ unsigned int txqen;
- value &= ~XGMAC_TXQEN;
if (qmode != MTL_QUEUE_AVB) {
- value |= 0x2 << XGMAC_TXQEN_SHIFT;
+ txqen = 0x2;
writel(0, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(channel));
} else {
- value |= 0x1 << XGMAC_TXQEN_SHIFT;
+ txqen = 0x1;
writel(flow & (~XGMAC_RFE), ioaddr + XGMAC_RX_FLOW_CTRL);
}
+ value = u32_replace_bits(value, txqen, XGMAC_TXQEN);
writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
}
@@ -545,8 +538,7 @@ static void dwxgmac2_set_bfsize(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 value;
value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
- value &= ~XGMAC_RBSZ;
- value |= bfsize << XGMAC_RBSZ_SHIFT;
+ value = u32_replace_bits(value, bfsize, XGMAC_RBSZ);
writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 937b7a0466fc..8f6993c8bcae 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -15,7 +15,7 @@
static int enh_desc_get_tx_status(struct stmmac_extra_stats *x,
struct dma_desc *p, void __iomem *ioaddr)
{
- unsigned int tdes0 = le32_to_cpu(p->des0);
+ u32 tdes0 = le32_to_cpu(p->des0);
int ret = tx_done;
/* Get tx owner first */
@@ -44,7 +44,7 @@ static int enh_desc_get_tx_status(struct stmmac_extra_stats *x,
if (unlikely((tdes0 & ETDES0_LATE_COLLISION) ||
(tdes0 & ETDES0_EXCESSIVE_COLLISIONS)))
x->tx_collision +=
- (tdes0 & ETDES0_COLLISION_COUNT_MASK) >> 3;
+ FIELD_GET(ETDES0_COLLISION_COUNT_MASK, tdes0);
if (unlikely(tdes0 & ETDES0_EXCESSIVE_DEFERRAL))
x->tx_deferred++;
@@ -88,7 +88,7 @@ static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
/* bits 5 7 0 | Frame status
* ----------------------------------------------------------
- * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
+ * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octets)
* 1 0 0 | IPv4/6 No CSUM errorS.
* 1 0 1 | IPv4/6 CSUM PAYLOAD error
* 1 1 0 | IPv4/6 CSUM IP HR error
@@ -117,11 +117,11 @@ static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
static void enh_desc_get_ext_status(struct stmmac_extra_stats *x,
struct dma_extended_desc *p)
{
- unsigned int rdes0 = le32_to_cpu(p->basic.des0);
- unsigned int rdes4 = le32_to_cpu(p->des4);
+ u32 rdes0 = le32_to_cpu(p->basic.des0);
+ u32 rdes4 = le32_to_cpu(p->des4);
if (unlikely(rdes0 & ERDES0_RX_MAC_ADDR)) {
- int message_type = (rdes4 & ERDES4_MSG_TYPE_MASK) >> 8;
+ int message_type = FIELD_GET(ERDES4_MSG_TYPE_MASK, rdes4);
if (rdes4 & ERDES4_IP_HDR_ERR)
x->ip_hdr_err++;
@@ -167,13 +167,13 @@ static void enh_desc_get_ext_status(struct stmmac_extra_stats *x,
x->av_pkt_rcvd++;
if (rdes4 & ERDES4_AV_TAGGED_PKT_RCVD)
x->av_tagged_pkt_rcvd++;
- if ((rdes4 & ERDES4_VLAN_TAG_PRI_VAL_MASK) >> 18)
+ if (rdes4 & ERDES4_VLAN_TAG_PRI_VAL_MASK)
x->vlan_tag_priority_val++;
if (rdes4 & ERDES4_L3_FILTER_MATCH)
x->l3_filter_match++;
if (rdes4 & ERDES4_L4_FILTER_MATCH)
x->l4_filter_match++;
- if ((rdes4 & ERDES4_L3_L4_FILT_NO_MATCH_MASK) >> 26)
+ if (rdes4 & ERDES4_L3_L4_FILT_NO_MATCH_MASK)
x->l3_l4_filter_no_match++;
}
}
@@ -181,7 +181,7 @@ static void enh_desc_get_ext_status(struct stmmac_extra_stats *x,
static int enh_desc_get_rx_status(struct stmmac_extra_stats *x,
struct dma_desc *p)
{
- unsigned int rdes0 = le32_to_cpu(p->des0);
+ u32 rdes0 = le32_to_cpu(p->des0);
int ret = good_frame;
if (unlikely(rdes0 & RDES0_OWN))
@@ -312,7 +312,7 @@ static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
bool csum_flag, int mode, bool tx_own,
bool ls, unsigned int tot_pkt_len)
{
- unsigned int tdes0 = le32_to_cpu(p->des0);
+ u32 tdes0 = le32_to_cpu(p->des0);
if (mode == STMMAC_CHAIN_MODE)
enh_set_tx_desc_len_on_chain(p, len);
@@ -324,10 +324,8 @@ static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
else
tdes0 &= ~ETDES0_FIRST_SEGMENT;
- if (likely(csum_flag))
- tdes0 |= (TX_CIC_FULL << ETDES0_CHECKSUM_INSERTION_SHIFT);
- else
- tdes0 &= ~(TX_CIC_FULL << ETDES0_CHECKSUM_INSERTION_SHIFT);
+ tdes0 = u32_replace_bits(tdes0, csum_flag ? TX_CIC_FULL : 0,
+ ETDES0_CHECKSUM_INSERTION_MASK);
if (ls)
tdes0 |= ETDES0_LAST_SEGMENT;
@@ -363,8 +361,7 @@ static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
if (rx_coe_type == STMMAC_RX_COE_TYPE1)
csum = 2;
- return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK)
- >> RDES0_FRAME_LEN_SHIFT) - csum);
+ return FIELD_GET(RDES0_FRAME_LEN_MASK, le32_to_cpu(p->des0)) - csum;
}
static void enh_desc_enable_tx_timestamp(struct dma_desc *p)
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index 014f7cd79a3c..7e69ff4b9a98 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -109,7 +109,7 @@ int stmmac_reset(struct stmmac_priv *priv)
void __iomem *ioaddr = priv->ioaddr;
if (plat && plat->fix_soc_reset)
- return plat->fix_soc_reset(priv, ioaddr);
+ return plat->fix_soc_reset(priv);
return stmmac_do_callback(priv, dma, reset, ioaddr);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index df6e8a567b1f..0db96a387259 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -354,7 +354,7 @@ struct stmmac_ops {
/* Dump MAC registers */
void (*dump_regs)(struct mac_device_info *hw, u32 *reg_space);
/* Handle extra events on specific interrupts hw dependent */
- int (*host_irq_status)(struct mac_device_info *hw,
+ int (*host_irq_status)(struct stmmac_priv *priv,
struct stmmac_extra_stats *x);
/* Handle MTL interrupts */
int (*host_mtl_irq_status)(struct stmmac_priv *priv,
@@ -453,7 +453,7 @@ struct stmmac_ops {
#define stmmac_dump_mac_regs(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, dump_regs, __args)
#define stmmac_host_irq_status(__priv, __args...) \
- stmmac_do_callback(__priv, mac, host_irq_status, __args)
+ stmmac_do_callback(__priv, mac, host_irq_status, __priv, __args)
#define stmmac_host_mtl_irq_status(__priv, __args...) \
stmmac_do_callback(__priv, mac, host_mtl_irq_status, __priv, __args)
#define stmmac_set_filter(__priv, __args...) \
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index 0fab842902a8..1b3b114e7bec 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -252,7 +252,7 @@ static void dwmac_mmc_intr_all_mask(void __iomem *mmcaddr)
writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_IPC_INTR_MASK);
}
-/* This reads the MAC core counters (if actaully supported).
+/* This reads the MAC core counters (if actually supported).
* by default the MMC core is programmed to reset each
* counter after a read. So all the field of the mmc struct
* have to be incremented.
@@ -420,7 +420,7 @@ static void dwxgmac_read_mmc_reg(void __iomem *addr, u32 reg, u32 *dest)
*dest = *dest + tmp;
}
-/* This reads the MAC core counters (if actaully supported).
+/* This reads the MAC core counters (if actually supported).
* by default the MMC core is programmed to reset each
* counter after a read. So all the field of the mmc struct
* have to be incremented.
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index 68a7cfcb1d8f..859cb9242a52 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -15,8 +15,8 @@
static int ndesc_get_tx_status(struct stmmac_extra_stats *x,
struct dma_desc *p, void __iomem *ioaddr)
{
- unsigned int tdes0 = le32_to_cpu(p->des0);
- unsigned int tdes1 = le32_to_cpu(p->des1);
+ u32 tdes0 = le32_to_cpu(p->des0);
+ u32 tdes1 = le32_to_cpu(p->des1);
int ret = tx_done;
/* Get tx owner first */
@@ -40,10 +40,8 @@ static int ndesc_get_tx_status(struct stmmac_extra_stats *x,
if (unlikely((tdes0 & TDES0_EXCESSIVE_DEFERRAL) ||
(tdes0 & TDES0_EXCESSIVE_COLLISIONS) ||
(tdes0 & TDES0_LATE_COLLISION))) {
- unsigned int collisions;
-
- collisions = (tdes0 & TDES0_COLLISION_COUNT_MASK) >> 3;
- x->tx_collision += collisions;
+ x->tx_collision +=
+ FIELD_GET(TDES0_COLLISION_COUNT_MASK, tdes0);
}
ret = tx_err;
}
@@ -69,8 +67,8 @@ static int ndesc_get_tx_len(struct dma_desc *p)
static int ndesc_get_rx_status(struct stmmac_extra_stats *x,
struct dma_desc *p)
{
+ u32 rdes0 = le32_to_cpu(p->des0);
int ret = good_frame;
- unsigned int rdes0 = le32_to_cpu(p->des0);
if (unlikely(rdes0 & RDES0_OWN))
return dma_own;
@@ -178,17 +176,15 @@ static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
bool csum_flag, int mode, bool tx_own,
bool ls, unsigned int tot_pkt_len)
{
- unsigned int tdes1 = le32_to_cpu(p->des1);
+ u32 tdes1 = le32_to_cpu(p->des1);
if (is_fs)
tdes1 |= TDES1_FIRST_SEGMENT;
else
tdes1 &= ~TDES1_FIRST_SEGMENT;
- if (likely(csum_flag))
- tdes1 |= (TX_CIC_FULL) << TDES1_CHECKSUM_INSERTION_SHIFT;
- else
- tdes1 &= ~(TX_CIC_FULL << TDES1_CHECKSUM_INSERTION_SHIFT);
+ tdes1 = u32_replace_bits(tdes1, csum_flag ? TX_CIC_FULL : 0,
+ TDES1_CHECKSUM_INSERTION_MASK);
if (ls)
tdes1 |= TDES1_LAST_SEGMENT;
@@ -222,10 +218,7 @@ static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
if (rx_coe_type == STMMAC_RX_COE_TYPE1)
csum = 2;
- return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK)
- >> RDES0_FRAME_LEN_SHIFT) -
- csum);
-
+ return FIELD_GET(RDES0_FRAME_LEN_MASK, le32_to_cpu(p->des0)) - csum;
}
static void ndesc_enable_tx_timestamp(struct dma_desc *p)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 012b0a477255..51c96a738151 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -31,7 +31,6 @@ struct stmmac_resources {
void __iomem *addr;
u8 mac[ETH_ALEN];
int wol_irq;
- int lpi_irq;
int irq;
int sfty_irq;
int sfty_ce_irq;
@@ -297,12 +296,12 @@ struct stmmac_priv {
int wol_irq;
u32 gmii_address_bus_config;
struct timer_list eee_ctrl_timer;
- int lpi_irq;
u32 tx_lpi_timer;
bool tx_lpi_clk_stop;
bool eee_enabled;
bool eee_active;
bool eee_sw_timer_en;
+ bool legacy_serdes_is_powered;
unsigned int mode;
unsigned int chain_mode;
int extend_desc;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index b155e71aac51..c1e26965d9b5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -914,20 +914,11 @@ static int stmmac_set_per_queue_coalesce(struct net_device *dev, u32 queue,
return __stmmac_set_coalesce(dev, ec, queue);
}
-static int stmmac_get_rxnfc(struct net_device *dev,
- struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
+static u32 stmmac_get_rx_ring_count(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
- switch (rxnfc->cmd) {
- case ETHTOOL_GRXRINGS:
- rxnfc->data = priv->plat->rx_queues_to_use;
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- return 0;
+ return priv->plat->rx_queues_to_use;
}
static u32 stmmac_get_rxfh_key_size(struct net_device *dev)
@@ -1121,7 +1112,7 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
.get_eee = stmmac_ethtool_op_get_eee,
.set_eee = stmmac_ethtool_op_set_eee,
.get_sset_count = stmmac_get_sset_count,
- .get_rxnfc = stmmac_get_rxnfc,
+ .get_rx_ring_count = stmmac_get_rx_ring_count,
.get_rxfh_key_size = stmmac_get_rxfh_key_size,
.get_rxfh_indir_size = stmmac_get_rxfh_indir_size,
.get_rxfh = stmmac_get_rxfh,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index bb110124f21e..b9a985fa772c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -43,7 +43,7 @@ static void config_sub_second_increment(void __iomem *ioaddr,
unsigned long data;
u32 reg_value;
- /* For GMAC3.x, 4.x versions, in "fine adjustement mode" set sub-second
+ /* For GMAC3.x, 4.x versions, in "fine adjustment mode" set sub-second
* increment to twice the number of nanoseconds of a clock cycle.
* The calculation of the default_addend value by the caller will set it
* to mid-range = 2^31 when the remainder of this division is zero,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index a379221b96a3..c63099a77cc0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -127,6 +127,22 @@ static unsigned int chain_mode;
module_param(chain_mode, int, 0444);
MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
+static const char *stmmac_dwmac_actphyif[8] = {
+ [PHY_INTF_SEL_GMII_MII] = "GMII/MII",
+ [PHY_INTF_SEL_RGMII] = "RGMII",
+ [PHY_INTF_SEL_SGMII] = "SGMII",
+ [PHY_INTF_SEL_TBI] = "TBI",
+ [PHY_INTF_SEL_RMII] = "RMII",
+ [PHY_INTF_SEL_RTBI] = "RTBI",
+ [PHY_INTF_SEL_SMII] = "SMII",
+ [PHY_INTF_SEL_REVMII] = "REVMII",
+};
+
+static const char *stmmac_dwxgmac_phyif[4] = {
+ [PHY_INTF_GMII] = "GMII",
+ [PHY_INTF_RGMII] = "RGMII",
+};
+
static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
/* For MSI interrupts handling */
static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
@@ -866,6 +882,30 @@ static void stmmac_release_ptp(struct stmmac_priv *priv)
clk_disable_unprepare(priv->plat->clk_ptp_ref);
}
+static void stmmac_legacy_serdes_power_down(struct stmmac_priv *priv)
+{
+ if (priv->plat->serdes_powerdown && priv->legacy_serdes_is_powered)
+ priv->plat->serdes_powerdown(priv->dev, priv->plat->bsp_priv);
+
+ priv->legacy_serdes_is_powered = false;
+}
+
+static int stmmac_legacy_serdes_power_up(struct stmmac_priv *priv)
+{
+ int ret;
+
+ if (!priv->plat->serdes_powerup)
+ return 0;
+
+ ret = priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
+ if (ret < 0)
+ netdev_err(priv->dev, "SerDes powerup failed\n");
+ else
+ priv->legacy_serdes_is_powered = true;
+
+ return ret;
+}
+
/**
* stmmac_mac_flow_ctrl - Configure flow control in all queues
* @priv: driver private structure
@@ -890,6 +930,9 @@ static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
/* Refresh the MAC-specific capabilities */
stmmac_mac_update_caps(priv);
+ if (priv->hw_cap_support && !priv->dma_cap.half_duplex)
+ priv->hw->link.caps &= ~(MAC_1000HD | MAC_100HD | MAC_10HD);
+
config->mac_capabilities = priv->hw->link.caps;
if (priv->plat->max_speed)
@@ -962,9 +1005,8 @@ static void stmmac_mac_link_up(struct phylink_config *config,
u32 old_ctrl, ctrl;
int ret;
- if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
- priv->plat->serdes_powerup)
- priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
+ if (priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP)
+ stmmac_legacy_serdes_power_up(priv);
old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
ctrl = old_ctrl & ~priv->hw->link.speed_mask;
@@ -1114,7 +1156,7 @@ static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
STMMAC_DEFAULT_TWT_LS);
- /* Try to cnfigure the hardware timer. */
+ /* Try to configure the hardware timer. */
ret = stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_TIMER,
priv->tx_lpi_clk_stop, priv->tx_lpi_timer);
@@ -1206,6 +1248,7 @@ static int stmmac_init_phy(struct net_device *dev)
struct fwnode_handle *phy_fwnode;
struct fwnode_handle *fwnode;
struct ethtool_keee eee;
+ u32 dev_flags = 0;
int ret;
if (!phylink_expects_phy(priv->phylink))
@@ -1224,6 +1267,9 @@ static int stmmac_init_phy(struct net_device *dev)
else
phy_fwnode = NULL;
+ if (priv->plat->flags & STMMAC_FLAG_KEEP_PREAMBLE_BEFORE_SFD)
+ dev_flags |= PHY_F_KEEP_PREAMBLE_BEFORE_SFD;
+
/* Some DT bindings do not set-up the PHY handle. Let's try to
* manually parse it
*/
@@ -1242,10 +1288,12 @@ static int stmmac_init_phy(struct net_device *dev)
return -ENODEV;
}
+ phydev->dev_flags |= dev_flags;
+
ret = phylink_connect_phy(priv->phylink, phydev);
} else {
fwnode_handle_put(phy_fwnode);
- ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
+ ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, dev_flags);
}
if (ret) {
@@ -3134,8 +3182,6 @@ int stmmac_get_phy_intf_sel(phy_interface_t interface)
phy_intf_sel = PHY_INTF_SEL_GMII_MII;
else if (phy_interface_mode_is_rgmii(interface))
phy_intf_sel = PHY_INTF_SEL_RGMII;
- else if (interface == PHY_INTERFACE_MODE_SGMII)
- phy_intf_sel = PHY_INTF_SEL_SGMII;
else if (interface == PHY_INTERFACE_MODE_RMII)
phy_intf_sel = PHY_INTF_SEL_RMII;
else if (interface == PHY_INTERFACE_MODE_REVMII)
@@ -3149,13 +3195,24 @@ static int stmmac_prereset_configure(struct stmmac_priv *priv)
{
struct plat_stmmacenet_data *plat_dat = priv->plat;
phy_interface_t interface;
+ struct phylink_pcs *pcs;
int phy_intf_sel, ret;
if (!plat_dat->set_phy_intf_sel)
return 0;
interface = plat_dat->phy_interface;
- phy_intf_sel = stmmac_get_phy_intf_sel(interface);
+
+ /* Check whether this mode uses a PCS */
+ pcs = stmmac_mac_select_pcs(&priv->phylink_config, interface);
+ if (priv->integrated_pcs && pcs == &priv->integrated_pcs->pcs) {
+ /* Request the phy_intf_sel from the integrated PCS */
+ phy_intf_sel = stmmac_integrated_pcs_get_phy_intf_sel(pcs,
+ interface);
+ } else {
+ phy_intf_sel = stmmac_get_phy_intf_sel(interface);
+ }
+
if (phy_intf_sel < 0) {
netdev_err(priv->dev,
"failed to get phy_intf_sel for %s: %pe\n",
@@ -3489,7 +3546,7 @@ static void stmmac_mac_config_rss(struct stmmac_priv *priv)
/**
* stmmac_mtl_configuration - Configure MTL
* @priv: driver private structure
- * Description: It is used for configurring MTL
+ * Description: It is used for configuring MTL
*/
static void stmmac_mtl_configuration(struct stmmac_priv *priv)
{
@@ -3712,10 +3769,6 @@ static void stmmac_free_irq(struct net_device *dev,
free_irq(priv->sfty_ce_irq, dev);
fallthrough;
case REQ_IRQ_ERR_SFTY_CE:
- if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
- free_irq(priv->lpi_irq, dev);
- fallthrough;
- case REQ_IRQ_ERR_LPI:
if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
free_irq(priv->wol_irq, dev);
fallthrough;
@@ -3773,24 +3826,6 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
}
}
- /* Request the LPI IRQ in case of another line
- * is used for LPI
- */
- if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
- int_name = priv->int_name_lpi;
- sprintf(int_name, "%s:%s", dev->name, "lpi");
- ret = request_irq(priv->lpi_irq,
- stmmac_mac_interrupt,
- 0, int_name, dev);
- if (unlikely(ret < 0)) {
- netdev_err(priv->dev,
- "%s: alloc lpi MSI %d (error: %d)\n",
- __func__, priv->lpi_irq, ret);
- irq_err = REQ_IRQ_ERR_LPI;
- goto irq_error;
- }
- }
-
/* Request the common Safety Feature Correctible/Uncorrectible
* Error line in case of another line is used
*/
@@ -3930,19 +3965,6 @@ static int stmmac_request_irq_single(struct net_device *dev)
}
}
- /* Request the IRQ lines */
- if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
- ret = request_irq(priv->lpi_irq, stmmac_interrupt,
- IRQF_SHARED, dev->name, dev);
- if (unlikely(ret < 0)) {
- netdev_err(priv->dev,
- "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
- __func__, priv->lpi_irq, ret);
- irq_err = REQ_IRQ_ERR_LPI;
- goto irq_error;
- }
- }
-
/* Request the common Safety Feature Correctible/Uncorrectible
* Error line in case of another line is used
*/
@@ -4077,16 +4099,6 @@ static int __stmmac_open(struct net_device *dev,
stmmac_reset_queues_param(priv);
- if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
- priv->plat->serdes_powerup) {
- ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
- if (ret < 0) {
- netdev_err(priv->dev, "%s: Serdes powerup failed\n",
- __func__);
- goto init_error;
- }
- }
-
ret = stmmac_hw_setup(dev);
if (ret < 0) {
netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
@@ -4142,9 +4154,15 @@ static int stmmac_open(struct net_device *dev)
if (ret)
goto err_runtime_pm;
+ if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP)) {
+ ret = stmmac_legacy_serdes_power_up(priv);
+ if (ret < 0)
+ goto err_disconnect_phy;
+ }
+
ret = __stmmac_open(dev, dma_conf);
if (ret)
- goto err_disconnect_phy;
+ goto err_serdes;
kfree(dma_conf);
@@ -4153,6 +4171,8 @@ static int stmmac_open(struct net_device *dev)
return ret;
+err_serdes:
+ stmmac_legacy_serdes_power_down(priv);
err_disconnect_phy:
phylink_disconnect_phy(priv->phylink);
err_runtime_pm:
@@ -4187,10 +4207,6 @@ static void __stmmac_release(struct net_device *dev)
/* Release and free the Rx/Tx resources */
free_dma_desc_resources(priv, &priv->dma_conf);
- /* Powerdown Serdes if there is */
- if (priv->plat->serdes_powerdown)
- priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
-
stmmac_release_ptp(priv);
if (stmmac_fpe_supported(priv))
@@ -4216,6 +4232,7 @@ static int stmmac_release(struct net_device *dev)
__stmmac_release(dev);
+ stmmac_legacy_serdes_power_down(priv);
phylink_disconnect_phy(priv->phylink);
pm_runtime_put(priv->device);
@@ -4367,7 +4384,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
/* Always insert VLAN tag to SKB payload for TSO frames.
*
- * Never insert VLAN tag by HW, since segments splited by
+ * Never insert VLAN tag by HW, since segments split by
* TSO engine will be un-tagged by mistake.
*/
if (skb_vlan_tag_present(skb)) {
@@ -5940,7 +5957,7 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
unsigned long flags;
spin_lock_irqsave(&ch->lock, flags);
- /* Both RX and TX work done are compelte,
+ /* Both RX and TX work done are complete,
* so enable both RX & TX IRQs.
*/
stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
@@ -6143,7 +6160,7 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv)
/* To handle GMAC own interrupts */
if (priv->plat->core_type == DWMAC_CORE_GMAC || xmac) {
- int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
+ int status = stmmac_host_irq_status(priv, &priv->xstats);
if (unlikely(status)) {
/* For LPI we need to save the tx status */
@@ -6272,7 +6289,7 @@ static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
/**
* stmmac_ioctl - Entry point for the Ioctl
* @dev: Device pointer.
- * @rq: An IOCTL specefic structure, that can contain a pointer to
+ * @rq: An IOCTL specific structure, that can contain a pointer to
* a proprietary structure used to pass information to the driver.
* @cmd: IOCTL command
* Description:
@@ -7264,6 +7281,40 @@ static void stmmac_service_task(struct work_struct *work)
clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
}
+static void stmmac_print_actphyif(struct stmmac_priv *priv)
+{
+ const char **phyif_table;
+ const char *actphyif_str;
+ size_t phyif_table_size;
+
+ switch (priv->plat->core_type) {
+ case DWMAC_CORE_MAC100:
+ return;
+
+ case DWMAC_CORE_GMAC:
+ case DWMAC_CORE_GMAC4:
+ phyif_table = stmmac_dwmac_actphyif;
+ phyif_table_size = ARRAY_SIZE(stmmac_dwmac_actphyif);
+ break;
+
+ case DWMAC_CORE_XGMAC:
+ phyif_table = stmmac_dwxgmac_phyif;
+ phyif_table_size = ARRAY_SIZE(stmmac_dwxgmac_phyif);
+ break;
+ }
+
+ if (priv->dma_cap.actphyif < phyif_table_size)
+ actphyif_str = phyif_table[priv->dma_cap.actphyif];
+ else
+ actphyif_str = NULL;
+
+ if (!actphyif_str)
+ actphyif_str = "unknown";
+
+ dev_info(priv->device, "Active PHY interface: %s (%u)\n",
+ actphyif_str, priv->dma_cap.actphyif);
+}
+
/**
* stmmac_hw_init - Init the MAC device
* @priv: driver private structure
@@ -7320,6 +7371,7 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
else if (priv->dma_cap.rx_coe_type1)
priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
+ stmmac_print_actphyif(priv);
} else {
dev_info(priv->device, "No HW DMA feature register supported\n");
}
@@ -7695,7 +7747,6 @@ static int __stmmac_dvr_probe(struct device *device,
priv->dev->irq = res->irq;
priv->wol_irq = res->wol_irq;
- priv->lpi_irq = res->lpi_irq;
priv->sfty_irq = res->sfty_irq;
priv->sfty_ce_irq = res->sfty_ce_irq;
priv->sfty_ue_irq = res->sfty_ue_irq;
@@ -8061,8 +8112,7 @@ int stmmac_suspend(struct device *dev)
/* Stop TX/RX DMA */
stmmac_stop_all_dma(priv);
- if (priv->plat->serdes_powerdown)
- priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
+ stmmac_legacy_serdes_power_down(priv);
/* Enable Power down mode by programming the PMT regs */
if (priv->wolopts) {
@@ -8165,11 +8215,8 @@ int stmmac_resume(struct device *dev)
stmmac_mdio_reset(priv->mii);
}
- if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
- priv->plat->serdes_powerup) {
- ret = priv->plat->serdes_powerup(ndev,
- priv->plat->bsp_priv);
-
+ if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP)) {
+ ret = stmmac_legacy_serdes_power_up(priv);
if (ret < 0)
return ret;
}
@@ -8191,6 +8238,7 @@ int stmmac_resume(struct device *dev)
ret = stmmac_hw_setup(ndev);
if (ret < 0) {
netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
+ stmmac_legacy_serdes_power_down(priv);
mutex_unlock(&priv->lock);
rtnl_unlock();
return ret;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 1e82850f2a25..a7c2496b39f2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -483,7 +483,7 @@ void stmmac_pcs_clean(struct net_device *ndev)
* If a specific clk_csr value is passed from the platform
* this means that the CSR Clock Range selection cannot be
* changed at run-time and it is fixed (as reported in the driver
- * documentation). Viceversa the driver will try to set the MDC
+ * documentation). Vice versa the driver will try to set the MDC
* clock dynamically according to the actual clock input.
*/
static u32 stmmac_clk_csr_set(struct stmmac_priv *priv)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.c
index e2f531c11986..88fa359ea716 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.c
@@ -2,6 +2,20 @@
#include "stmmac.h"
#include "stmmac_pcs.h"
+/*
+ * GMAC_AN_STATUS is equivalent to MII_BMSR
+ * GMAC_ANE_ADV is equivalent to 802.3z MII_ADVERTISE
+ * GMAC_ANE_LPA is equivalent to 802.3z MII_LPA
+ * GMAC_ANE_EXP is equivalent to MII_EXPANSION
+ * GMAC_TBI is equivalent to MII_ESTATUS
+ *
+ * ADV, LPA and EXP are only available for the TBI and RTBI modes.
+ */
+#define GMAC_AN_STATUS 0x04 /* AN status */
+#define GMAC_ANE_ADV 0x08 /* ANE Advertisement */
+#define GMAC_ANE_LPA 0x0c /* ANE link partener ability */
+#define GMAC_TBI 0x14 /* TBI extend status */
+
static int dwmac_integrated_pcs_enable(struct phylink_pcs *pcs)
{
struct stmmac_pcs *spcs = phylink_pcs_to_stmmac_pcs(pcs);
@@ -45,6 +59,37 @@ static const struct phylink_pcs_ops dwmac_integrated_pcs_ops = {
.pcs_config = dwmac_integrated_pcs_config,
};
+void stmmac_integrated_pcs_irq(struct stmmac_priv *priv, u32 status,
+ struct stmmac_extra_stats *x)
+{
+ struct stmmac_pcs *spcs = priv->integrated_pcs;
+ u32 val = readl(spcs->base + GMAC_AN_STATUS);
+
+ if (status & PCS_ANE_IRQ) {
+ x->irq_pcs_ane_n++;
+ if (val & BMSR_ANEGCOMPLETE)
+ dev_info(priv->device,
+ "PCS ANE process completed\n");
+ }
+
+ if (status & PCS_LINK_IRQ) {
+ x->irq_pcs_link_n++;
+ dev_info(priv->device, "PCS Link %s\n",
+ val & BMSR_LSTATUS ? "Up" : "Down");
+
+ phylink_pcs_change(&spcs->pcs, val & BMSR_LSTATUS);
+ }
+}
+
+int stmmac_integrated_pcs_get_phy_intf_sel(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ if (interface == PHY_INTERFACE_MODE_SGMII)
+ return PHY_INTF_SEL_SGMII;
+
+ return -EINVAL;
+}
+
int stmmac_integrated_pcs_init(struct stmmac_priv *priv, unsigned int offset,
u32 int_mask)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
index cda93894168e..23bbd4f10bf8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
@@ -16,36 +16,14 @@
/* PCS registers (AN/TBI/SGMII/RGMII) offsets */
#define GMAC_AN_CTRL(x) (x) /* AN control */
-#define GMAC_AN_STATUS(x) (x + 0x4) /* AN status */
-
-/* ADV, LPA and EXP are only available for the TBI and RTBI interfaces */
-#define GMAC_ANE_ADV(x) (x + 0x8) /* ANE Advertisement */
-#define GMAC_ANE_LPA(x) (x + 0xc) /* ANE link partener ability */
-#define GMAC_ANE_EXP(x) (x + 0x10) /* ANE expansion */
-#define GMAC_TBI(x) (x + 0x14) /* TBI extend status */
/* AN Configuration defines */
-#define GMAC_AN_CTRL_RAN BIT(9) /* Restart Auto-Negotiation */
-#define GMAC_AN_CTRL_ANE BIT(12) /* Auto-Negotiation Enable */
-#define GMAC_AN_CTRL_ELE BIT(14) /* External Loopback Enable */
-#define GMAC_AN_CTRL_ECD BIT(16) /* Enable Comma Detect */
-#define GMAC_AN_CTRL_LR BIT(17) /* Lock to Reference */
-#define GMAC_AN_CTRL_SGMRAL BIT(18) /* SGMII RAL Control */
-
-/* AN Status defines */
-#define GMAC_AN_STATUS_LS BIT(2) /* Link Status 0:down 1:up */
-#define GMAC_AN_STATUS_ANA BIT(3) /* Auto-Negotiation Ability */
-#define GMAC_AN_STATUS_ANC BIT(5) /* Auto-Negotiation Complete */
-#define GMAC_AN_STATUS_ES BIT(8) /* Extended Status */
-
-/* ADV and LPA defines */
-#define GMAC_ANE_FD BIT(5)
-#define GMAC_ANE_HD BIT(6)
-#define GMAC_ANE_PSE GENMASK(8, 7)
-#define GMAC_ANE_PSE_SHIFT 7
-#define GMAC_ANE_RFE GENMASK(13, 12)
-#define GMAC_ANE_RFE_SHIFT 12
-#define GMAC_ANE_ACK BIT(14)
+#define GMAC_AN_CTRL_RAN BIT_U32(9) /* Restart Auto-Negotiation */
+#define GMAC_AN_CTRL_ANE BIT_U32(12) /* Auto-Negotiation Enable */
+#define GMAC_AN_CTRL_ELE BIT_U32(14) /* External Loopback Enable */
+#define GMAC_AN_CTRL_ECD BIT_U32(16) /* Enable Comma Detect */
+#define GMAC_AN_CTRL_LR BIT_U32(17) /* Lock to Reference */
+#define GMAC_AN_CTRL_SGMRAL BIT_U32(18) /* SGMII RAL Control */
struct stmmac_priv;
@@ -62,40 +40,14 @@ phylink_pcs_to_stmmac_pcs(struct phylink_pcs *pcs)
return container_of(pcs, struct stmmac_pcs, pcs);
}
+void stmmac_integrated_pcs_irq(struct stmmac_priv *priv, u32 status,
+ struct stmmac_extra_stats *x);
+int stmmac_integrated_pcs_get_phy_intf_sel(struct phylink_pcs *pcs,
+ phy_interface_t interface);
int stmmac_integrated_pcs_init(struct stmmac_priv *priv, unsigned int offset,
u32 int_mask);
/**
- * dwmac_pcs_isr - TBI, RTBI, or SGMII PHY ISR
- * @ioaddr: IO registers pointer
- * @reg: Base address of the AN Control Register.
- * @intr_status: GMAC core interrupt status
- * @x: pointer to log these events as stats
- * Description: it is the ISR for PCS events: Auto-Negotiation Completed and
- * Link status.
- */
-static inline void dwmac_pcs_isr(void __iomem *ioaddr, u32 reg,
- unsigned int intr_status,
- struct stmmac_extra_stats *x)
-{
- u32 val = readl(ioaddr + GMAC_AN_STATUS(reg));
-
- if (intr_status & PCS_ANE_IRQ) {
- x->irq_pcs_ane_n++;
- if (val & GMAC_AN_STATUS_ANC)
- pr_info("stmmac_pcs: ANE process completed\n");
- }
-
- if (intr_status & PCS_LINK_IRQ) {
- x->irq_pcs_link_n++;
- if (val & GMAC_AN_STATUS_LS)
- pr_info("stmmac_pcs: Link Up\n");
- else
- pr_info("stmmac_pcs: Link Down\n");
- }
-}
-
-/**
* dwmac_ctrl_ane - To program the AN Control Register.
* @ioaddr: IO registers pointer
* @reg: Base address of the AN Control Register.
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 8979a50b5507..5c9fd91a1db9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -725,14 +725,6 @@ int stmmac_get_platform_resources(struct platform_device *pdev,
stmmac_res->wol_irq = stmmac_res->irq;
}
- stmmac_res->lpi_irq =
- platform_get_irq_byname_optional(pdev, "eth_lpi");
- if (stmmac_res->lpi_irq < 0) {
- if (stmmac_res->lpi_irq == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- dev_info(&pdev->dev, "IRQ eth_lpi not found\n");
- }
-
stmmac_res->sfty_irq =
platform_get_irq_byname_optional(pdev, "sfty");
if (stmmac_res->sfty_irq < 0) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index e90a2c469b9a..08b60b7d5fd6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -2000,7 +2000,7 @@ void stmmac_selftest_run(struct net_device *dev,
}
/*
- * First tests will always be MAC / PHY loobpack. If any of
+ * First tests will always be MAC / PHY loopback. If any of
* them is not supported we abort earlier.
*/
if (ret) {
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 893216b0e08d..f035e3bbbef8 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -7302,6 +7302,13 @@ static int niu_get_ethtool_tcam_all(struct niu *np,
return ret;
}
+static u32 niu_get_rx_ring_count(struct net_device *dev)
+{
+ struct niu *np = netdev_priv(dev);
+
+ return np->num_rx_rings;
+}
+
static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
@@ -7309,9 +7316,6 @@ static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
int ret = 0;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = np->num_rx_rings;
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = tcam_get_valid_entry_cnt(np);
break;
@@ -7928,6 +7932,7 @@ static const struct ethtool_ops niu_ethtool_ops = {
.set_phys_id = niu_set_phys_id,
.get_rxnfc = niu_get_nfc,
.set_rxnfc = niu_set_nfc,
+ .get_rx_ring_count = niu_get_rx_ring_count,
.get_rxfh_fields = niu_get_rxfh_fields,
.set_rxfh_fields = niu_set_rxfh_fields,
.get_link_ksettings = niu_get_link_ksettings,
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 48f0a96c0e9e..666998082998 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2551,6 +2551,9 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
goto err_out_clear_quattro;
}
+ /* BIGMAC may have bogus sizes */
+ if ((op->resource[3].end - op->resource[3].start) >= BMAC_REG_SIZE)
+ op->resource[3].end = op->resource[3].start + BMAC_REG_SIZE - 1;
hp->bigmacregs = devm_platform_ioremap_resource(op, 3);
if (IS_ERR(hp->bigmacregs)) {
dev_err(&op->dev, "Cannot map BIGMAC registers.\n");
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index fe5b2926d8ab..c60b04921c62 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -192,6 +192,7 @@ config TI_ICSSG_PRUETH
depends on NET_SWITCHDEV
depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
depends on PTP_1588_CLOCK_OPTIONAL
+ depends on HSR || !HSR
help
Support dual Gigabit Ethernet ports over the ICSSG PRU Subsystem.
This subsystem is available starting with the AM65 platform.
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index 93c0a4d0e33a..6da50f4b7c2e 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -4,7 +4,7 @@
#
obj-$(CONFIG_TI_PRUETH) += icssm-prueth.o
-icssm-prueth-y := icssm/icssm_prueth.o
+icssm-prueth-y := icssm/icssm_prueth.o icssm/icssm_prueth_switch.o icssm/icssm_switchdev.o
obj-$(CONFIG_TI_CPSW) += cpsw-common.o
obj-$(CONFIG_TI_DAVINCI_EMAC) += cpsw-common.o
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index c57497074ae6..98d60da7cc3b 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -391,11 +391,8 @@ static int am65_cpsw_ethtool_op_begin(struct net_device *ndev)
static void am65_cpsw_ethtool_op_complete(struct net_device *ndev)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
- int ret;
- ret = pm_runtime_put(common->dev);
- if (ret < 0 && ret != -EBUSY)
- dev_err(common->dev, "ethtool complete failed %d\n", ret);
+ pm_runtime_put(common->dev);
}
static void am65_cpsw_get_drvinfo(struct net_device *ndev,
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index fbe35af615a6..bb969dd435b4 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -23,11 +23,6 @@
#define BITMASK(bits) (BIT(bits) - 1)
-#define ALE_VERSION_MAJOR(rev, mask) (((rev) >> 8) & (mask))
-#define ALE_VERSION_MINOR(rev) (rev & 0xff)
-#define ALE_VERSION_1R3 0x0103
-#define ALE_VERSION_1R4 0x0104
-
/* ALE Registers */
#define ALE_IDVER 0x00
#define ALE_STATUS 0x04
diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
index bdc4db0d169c..a43f75ee269e 100644
--- a/drivers/net/ethernet/ti/cpsw_ethtool.c
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -374,11 +374,8 @@ int cpsw_ethtool_op_begin(struct net_device *ndev)
void cpsw_ethtool_op_complete(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
- int ret;
- ret = pm_runtime_put(priv->cpsw->dev);
- if (ret < 0)
- cpsw_err(priv, drv, "ethtool complete failed %d\n", ret);
+ pm_runtime_put(priv->cpsw->dev);
}
void cpsw_get_channels(struct net_device *ndev, struct ethtool_channels *ch)
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 21af0a10626a..7f42f58a4b03 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -1472,7 +1472,7 @@ static void cpsw_unregister_ports(struct cpsw_common *cpsw)
for (i = 0; i < cpsw->data.slaves; i++) {
ndev = cpsw->slaves[i].ndev;
- if (!ndev)
+ if (!ndev || ndev->reg_state != NETREG_REGISTERED)
continue;
priv = netdev_priv(ndev);
@@ -1494,7 +1494,6 @@ static int cpsw_register_ports(struct cpsw_common *cpsw)
if (ret) {
dev_err(cpsw->dev,
"cpsw: err registering net device%d\n", i);
- cpsw->slaves[i].ndev = NULL;
break;
}
}
@@ -2003,7 +2002,7 @@ static int cpsw_probe(struct platform_device *pdev)
/* setup netdevs */
ret = cpsw_create_ports(cpsw);
if (ret)
- goto clean_unregister_netdev;
+ goto clean_cpts;
/* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
* MISC IRQs which are always kept disabled with this driver so
@@ -2017,14 +2016,14 @@ static int cpsw_probe(struct platform_device *pdev)
0, dev_name(dev), cpsw);
if (ret < 0) {
dev_err(dev, "error attaching irq (%d)\n", ret);
- goto clean_unregister_netdev;
+ goto clean_cpts;
}
ret = devm_request_irq(dev, cpsw->irqs_table[1], cpsw_tx_interrupt,
0, dev_name(dev), cpsw);
if (ret < 0) {
dev_err(dev, "error attaching irq (%d)\n", ret);
- goto clean_unregister_netdev;
+ goto clean_cpts;
}
if (!cpsw->cpts)
@@ -2034,7 +2033,7 @@ static int cpsw_probe(struct platform_device *pdev)
0, dev_name(&pdev->dev), cpsw);
if (ret < 0) {
dev_err(dev, "error attaching misc irq (%d)\n", ret);
- goto clean_unregister_netdev;
+ goto clean_cpts;
}
/* Enable misc CPTS evnt_pend IRQ */
@@ -2043,7 +2042,7 @@ static int cpsw_probe(struct platform_device *pdev)
skip_cpts:
ret = cpsw_register_notifiers(cpsw);
if (ret)
- goto clean_unregister_netdev;
+ goto clean_cpts;
ret = cpsw_register_devlink(cpsw);
if (ret)
@@ -2065,8 +2064,6 @@ skip_cpts:
clean_unregister_notifiers:
cpsw_unregister_notifiers(cpsw);
-clean_unregister_netdev:
- cpsw_unregister_ports(cpsw);
clean_cpts:
cpts_release(cpsw->cpts);
cpdma_ctlr_destroy(cpsw->dma);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c
index 090aa74d3ce7..0cf9dfe0fa36 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_common.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_common.c
@@ -1720,7 +1720,6 @@ void prueth_netdev_exit(struct prueth *prueth,
netif_napi_del(&emac->napi_rx);
pruss_release_mem_region(prueth->pruss, &emac->dram);
- destroy_workqueue(emac->cmd_wq);
free_netdev(emac->ndev);
prueth->emac[mac] = NULL;
}
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index f65041662173..0939994c932f 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -1099,7 +1099,7 @@ static void emac_ndo_set_rx_mode(struct net_device *ndev)
{
struct prueth_emac *emac = netdev_priv(ndev);
- queue_work(emac->cmd_wq, &emac->rx_mode_work);
+ schedule_work(&emac->rx_mode_work);
}
static netdev_features_t emac_ndo_fix_features(struct net_device *ndev,
@@ -1451,11 +1451,6 @@ static int prueth_netdev_init(struct prueth *prueth,
emac->port_id = port;
emac->xdp_prog = NULL;
emac->ndev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
- emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq");
- if (!emac->cmd_wq) {
- ret = -ENOMEM;
- goto free_ndev;
- }
INIT_WORK(&emac->rx_mode_work, emac_ndo_set_rx_mode_work);
INIT_DELAYED_WORK(&emac->stats_work, icssg_stats_work_handler);
@@ -1467,7 +1462,7 @@ static int prueth_netdev_init(struct prueth *prueth,
if (ret) {
dev_err(prueth->dev, "unable to get DRAM: %d\n", ret);
ret = -ENOMEM;
- goto free_wq;
+ goto free_ndev;
}
emac->tx_ch_num = 1;
@@ -1566,8 +1561,6 @@ static int prueth_netdev_init(struct prueth *prueth,
free:
pruss_release_mem_region(prueth->pruss, &emac->dram);
-free_wq:
- destroy_workqueue(emac->cmd_wq);
free_ndev:
emac->ndev = NULL;
prueth->emac[mac] = NULL;
@@ -2236,6 +2229,7 @@ netdev_unregister:
prueth->emac[i]->ndev->phydev = NULL;
}
unregister_netdev(prueth->registered_netdevs[i]);
+ disable_work_sync(&prueth->emac[i]->rx_mode_work);
}
netdev_exit:
@@ -2295,6 +2289,7 @@ static void prueth_remove(struct platform_device *pdev)
phy_disconnect(prueth->emac[i]->ndev->phydev);
prueth->emac[i]->ndev->phydev = NULL;
unregister_netdev(prueth->registered_netdevs[i]);
+ disable_work_sync(&prueth->emac[i]->rx_mode_work);
}
for (i = 0; i < PRUETH_NUM_MACS; i++) {
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
index 10eadd356650..3d94fa5a7ac1 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
@@ -236,7 +236,6 @@ struct prueth_emac {
/* Mutex to serialize access to firmware command interface */
struct mutex cmd_lock;
struct work_struct rx_mode_work;
- struct workqueue_struct *cmd_wq;
struct pruss_mem_region dram;
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
index 7bb4f0d850cc..b8115ca47082 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
@@ -783,11 +783,6 @@ static int prueth_netdev_init(struct prueth *prueth,
emac->prueth = prueth;
emac->ndev = ndev;
emac->port_id = port;
- emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq");
- if (!emac->cmd_wq) {
- ret = -ENOMEM;
- goto free_ndev;
- }
INIT_DELAYED_WORK(&emac->stats_work, icssg_stats_work_handler);
@@ -798,7 +793,7 @@ static int prueth_netdev_init(struct prueth *prueth,
if (ret) {
dev_err(prueth->dev, "unable to get DRAM: %d\n", ret);
ret = -ENOMEM;
- goto free_wq;
+ goto free_ndev;
}
/* SR1.0 uses a dedicated high priority channel
@@ -883,8 +878,6 @@ static int prueth_netdev_init(struct prueth *prueth,
free:
pruss_release_mem_region(prueth->pruss, &emac->dram);
-free_wq:
- destroy_workqueue(emac->cmd_wq);
free_ndev:
emac->ndev = NULL;
prueth->emac[mac] = NULL;
diff --git a/drivers/net/ethernet/ti/icssm/icssm_prueth.c b/drivers/net/ethernet/ti/icssm/icssm_prueth.c
index 293b7af04263..53bbd9290904 100644
--- a/drivers/net/ethernet/ti/icssm/icssm_prueth.c
+++ b/drivers/net/ethernet/ti/icssm/icssm_prueth.c
@@ -29,6 +29,8 @@
#include <net/pkt_cls.h>
#include "icssm_prueth.h"
+#include "icssm_prueth_switch.h"
+#include "icssm_vlan_mcast_filter_mmap.h"
#include "../icssg/icssg_mii_rt.h"
#include "../icssg/icss_iep.h"
@@ -145,7 +147,7 @@ static const struct prueth_queue_info queue_infos[][NUM_QUEUES] = {
},
};
-static const struct prueth_queue_desc queue_descs[][NUM_QUEUES] = {
+const struct prueth_queue_desc queue_descs[][NUM_QUEUES] = {
[PRUETH_PORT_QUEUE_HOST] = {
{ .rd_ptr = P0_Q1_BD_OFFSET, .wr_ptr = P0_Q1_BD_OFFSET, },
{ .rd_ptr = P0_Q2_BD_OFFSET, .wr_ptr = P0_Q2_BD_OFFSET, },
@@ -205,9 +207,9 @@ static void icssm_prueth_hostconfig(struct prueth *prueth)
static void icssm_prueth_mii_init(struct prueth *prueth)
{
+ u32 txcfg_reg, txcfg, txcfg2;
struct regmap *mii_rt;
u32 rxcfg_reg, rxcfg;
- u32 txcfg_reg, txcfg;
mii_rt = prueth->mii_rt;
@@ -235,17 +237,23 @@ static void icssm_prueth_mii_init(struct prueth *prueth)
(TX_START_DELAY << PRUSS_MII_RT_TXCFG_TX_START_DELAY_SHIFT) |
(TX_CLK_DELAY_100M << PRUSS_MII_RT_TXCFG_TX_CLK_DELAY_SHIFT);
+ txcfg2 = txcfg;
+ if (!PRUETH_IS_EMAC(prueth))
+ txcfg2 |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL;
+
/* Configuration of Port 0 Tx */
txcfg_reg = PRUSS_MII_RT_TXCFG0;
- regmap_write(mii_rt, txcfg_reg, txcfg);
+ regmap_write(mii_rt, txcfg_reg, txcfg2);
- txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL;
+ txcfg2 = txcfg;
+ if (PRUETH_IS_EMAC(prueth))
+ txcfg2 |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL;
/* Configuration of Port 1 Tx */
txcfg_reg = PRUSS_MII_RT_TXCFG1;
- regmap_write(mii_rt, txcfg_reg, txcfg);
+ regmap_write(mii_rt, txcfg_reg, txcfg2);
txcfg_reg = PRUSS_MII_RT_RX_FRMS0;
@@ -292,7 +300,10 @@ static void icssm_prueth_hostinit(struct prueth *prueth)
icssm_prueth_clearmem(prueth, PRUETH_MEM_DRAM1);
/* Initialize host queues in shared RAM */
- icssm_prueth_hostconfig(prueth);
+ if (!PRUETH_IS_EMAC(prueth))
+ icssm_prueth_sw_hostconfig(prueth);
+ else
+ icssm_prueth_hostconfig(prueth);
/* Configure MII_RT */
icssm_prueth_mii_init(prueth);
@@ -499,19 +510,24 @@ static int icssm_prueth_tx_enqueue(struct prueth_emac *emac,
struct prueth_queue_desc __iomem *queue_desc;
const struct prueth_queue_info *txqueue;
struct net_device *ndev = emac->ndev;
+ struct prueth *prueth = emac->prueth;
unsigned int buffer_desc_count;
int free_blocks, update_block;
bool buffer_wrapped = false;
int write_block, read_block;
void *src_addr, *dst_addr;
int pkt_block_size;
+ void __iomem *sram;
void __iomem *dram;
int txport, pktlen;
u16 update_wr_ptr;
u32 wr_buf_desc;
void *ocmc_ram;
- dram = emac->prueth->mem[emac->dram].va;
+ if (!PRUETH_IS_EMAC(prueth))
+ dram = prueth->mem[PRUETH_MEM_DRAM1].va;
+ else
+ dram = emac->prueth->mem[emac->dram].va;
if (eth_skb_pad(skb)) {
if (netif_msg_tx_err(emac) && net_ratelimit())
netdev_err(ndev, "packet pad failed\n");
@@ -524,7 +540,10 @@ static int icssm_prueth_tx_enqueue(struct prueth_emac *emac,
pktlen = skb->len;
/* Get the tx queue */
queue_desc = emac->tx_queue_descs + queue_id;
- txqueue = &queue_infos[txport][queue_id];
+ if (!PRUETH_IS_EMAC(prueth))
+ txqueue = &sw_queue_infos[txport][queue_id];
+ else
+ txqueue = &queue_infos[txport][queue_id];
buffer_desc_count = icssm_get_buff_desc_count(txqueue);
@@ -590,7 +609,11 @@ static int icssm_prueth_tx_enqueue(struct prueth_emac *emac,
/* update first buffer descriptor */
wr_buf_desc = (pktlen << PRUETH_BD_LENGTH_SHIFT) &
PRUETH_BD_LENGTH_MASK;
- writel(wr_buf_desc, dram + readw(&queue_desc->wr_ptr));
+ sram = prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+ if (!PRUETH_IS_EMAC(prueth))
+ writel(wr_buf_desc, sram + readw(&queue_desc->wr_ptr));
+ else
+ writel(wr_buf_desc, dram + readw(&queue_desc->wr_ptr));
/* update the write pointer in this queue descriptor, the firmware
* polls for this change so this will signal the start of transmission
@@ -604,7 +627,6 @@ static int icssm_prueth_tx_enqueue(struct prueth_emac *emac,
void icssm_parse_packet_info(struct prueth *prueth, u32 buffer_descriptor,
struct prueth_packet_info *pkt_info)
{
- pkt_info->shadow = !!(buffer_descriptor & PRUETH_BD_SHADOW_MASK);
pkt_info->port = (buffer_descriptor & PRUETH_BD_PORT_MASK) >>
PRUETH_BD_PORT_SHIFT;
pkt_info->length = (buffer_descriptor & PRUETH_BD_LENGTH_MASK) >>
@@ -713,11 +735,19 @@ int icssm_emac_rx_packet(struct prueth_emac *emac, u16 *bd_rd_ptr,
src_addr += actual_pkt_len;
}
+ if (PRUETH_IS_SWITCH(emac->prueth)) {
+ skb->offload_fwd_mark = READ_ONCE(emac->offload_fwd_mark);
+ if (!pkt_info->lookup_success)
+ icssm_prueth_sw_learn_fdb(emac, skb->data + ETH_ALEN);
+ }
+
skb_put(skb, actual_pkt_len);
/* send packet up the stack */
skb->protocol = eth_type_trans(skb, ndev);
+ local_bh_disable();
netif_receive_skb(skb);
+ local_bh_enable();
/* update stats */
emac->stats.rx_bytes += actual_pkt_len;
@@ -743,6 +773,7 @@ static int icssm_emac_rx_packets(struct prueth_emac *emac, int budget)
shared_ram = emac->prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+ /* Start and end queue is made common for EMAC, RSTP */
start_queue = emac->rx_queue_start;
end_queue = emac->rx_queue_end;
@@ -753,8 +784,10 @@ static int icssm_emac_rx_packets(struct prueth_emac *emac, int budget)
/* search host queues for packets */
for (i = start_queue; i <= end_queue; i++) {
queue_desc = emac->rx_queue_descs + i;
- rxqueue = &queue_infos[PRUETH_PORT_HOST][i];
-
+ if (PRUETH_IS_SWITCH(emac->prueth))
+ rxqueue = &sw_queue_infos[PRUETH_PORT_HOST][i];
+ else
+ rxqueue = &queue_infos[PRUETH_PORT_HOST][i];
overflow_cnt = readb(&queue_desc->overflow_cnt);
if (overflow_cnt > 0) {
emac->stats.rx_over_errors += overflow_cnt;
@@ -879,6 +912,13 @@ static int icssm_emac_request_irqs(struct prueth_emac *emac)
return ret;
}
+/* Function to free memory related to sw */
+static void icssm_prueth_free_memory(struct prueth *prueth)
+{
+ if (PRUETH_IS_SWITCH(prueth))
+ icssm_prueth_sw_free_fdb_table(prueth);
+}
+
static void icssm_ptp_dram_init(struct prueth_emac *emac)
{
void __iomem *sram = emac->prueth->mem[PRUETH_MEM_SHARED_RAM].va;
@@ -941,20 +981,38 @@ static int icssm_emac_ndo_open(struct net_device *ndev)
if (!prueth->emac_configured)
icssm_prueth_init_ethernet_mode(prueth);
- icssm_prueth_emac_config(emac);
+ /* reset and start PRU firmware */
+ if (PRUETH_IS_SWITCH(prueth)) {
+ ret = icssm_prueth_sw_emac_config(emac);
+ if (ret)
+ return ret;
+
+ ret = icssm_prueth_sw_init_fdb_table(prueth);
+ if (ret)
+ return ret;
+ } else {
+ icssm_prueth_emac_config(emac);
+ }
if (!prueth->emac_configured) {
icssm_ptp_dram_init(emac);
ret = icss_iep_init(prueth->iep, NULL, NULL, 0);
if (ret) {
netdev_err(ndev, "Failed to initialize iep: %d\n", ret);
- goto iep_exit;
+ goto free_mem;
}
}
- ret = icssm_emac_set_boot_pru(emac, ndev);
- if (ret)
- goto iep_exit;
+ if (!PRUETH_IS_EMAC(prueth)) {
+ ret = icssm_prueth_sw_boot_prus(prueth, ndev);
+ if (ret)
+ goto iep_exit;
+ } else {
+ /* boot the PRU */
+ ret = icssm_emac_set_boot_pru(emac, ndev);
+ if (ret)
+ goto iep_exit;
+ }
ret = icssm_emac_request_irqs(emac);
if (ret)
@@ -969,19 +1027,25 @@ static int icssm_emac_ndo_open(struct net_device *ndev)
icssm_prueth_port_enable(emac, true);
prueth->emac_configured |= BIT(emac->port_id);
-
+ if (PRUETH_IS_SWITCH(prueth))
+ icssm_prueth_sw_set_stp_state(prueth, emac->port_id,
+ BR_STATE_LEARNING);
if (netif_msg_drv(emac))
dev_notice(&ndev->dev, "started\n");
return 0;
rproc_shutdown:
- rproc_shutdown(emac->pru);
+ if (!PRUETH_IS_EMAC(prueth))
+ icssm_prueth_sw_shutdown_prus(emac, ndev);
+ else
+ rproc_shutdown(emac->pru);
iep_exit:
if (!prueth->emac_configured)
icss_iep_exit(prueth->iep);
-
+free_mem:
+ icssm_prueth_free_memory(emac->prueth);
return ret;
}
@@ -1010,17 +1074,83 @@ static int icssm_emac_ndo_stop(struct net_device *ndev)
hrtimer_cancel(&emac->tx_hrtimer);
/* stop the PRU */
- rproc_shutdown(emac->pru);
+ if (!PRUETH_IS_EMAC(prueth))
+ icssm_prueth_sw_shutdown_prus(emac, ndev);
+ else
+ rproc_shutdown(emac->pru);
/* free rx interrupts */
free_irq(emac->rx_irq, ndev);
+ /* free memory related to sw */
+ icssm_prueth_free_memory(emac->prueth);
+
+ if (!prueth->emac_configured)
+ icss_iep_exit(prueth->iep);
+
if (netif_msg_drv(emac))
dev_notice(&ndev->dev, "stopped\n");
return 0;
}
+static int icssm_prueth_change_mode(struct prueth *prueth,
+ enum pruss_ethtype mode)
+{
+ bool portstatus[PRUETH_NUM_MACS];
+ struct prueth_emac *emac;
+ struct net_device *ndev;
+ int i, ret;
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ if (!prueth->emac[i]) {
+ dev_err(prueth->dev, "Unknown MAC port\n");
+ return -EINVAL;
+ }
+
+ emac = prueth->emac[i];
+ ndev = emac->ndev;
+
+ portstatus[i] = netif_running(ndev);
+ if (!portstatus[i])
+ continue;
+
+ ret = ndev->netdev_ops->ndo_stop(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to stop: %d", ret);
+ return ret;
+ }
+ }
+
+ if (mode == PRUSS_ETHTYPE_EMAC || mode == PRUSS_ETHTYPE_SWITCH) {
+ prueth->eth_type = mode;
+ } else {
+ dev_err(prueth->dev, "unknown mode\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ if (!prueth->emac[i]) {
+ dev_err(prueth->dev, "Unknown MAC port\n");
+ return -EINVAL;
+ }
+
+ emac = prueth->emac[i];
+ ndev = emac->ndev;
+
+ if (!portstatus[i])
+ continue;
+
+ ret = ndev->netdev_ops->ndo_open(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to start: %d", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
/* VLAN-tag PCP to priority queue map for EMAC/Switch/HSR/PRP used by driver
* Index is PCP val / 2.
* low - pcp 0..3 maps to Q4 for Host
@@ -1131,11 +1261,183 @@ static void icssm_emac_ndo_get_stats64(struct net_device *ndev,
stats->rx_length_errors = emac->stats.rx_length_errors;
}
+/* enable/disable MC filter */
+static void icssm_emac_mc_filter_ctrl(struct prueth_emac *emac, bool enable)
+{
+ struct prueth *prueth = emac->prueth;
+ void __iomem *mc_filter_ctrl;
+ void __iomem *ram;
+ u32 reg;
+
+ ram = prueth->mem[emac->dram].va;
+ mc_filter_ctrl = ram + ICSS_EMAC_FW_MULTICAST_FILTER_CTRL_OFFSET;
+
+ if (enable)
+ reg = ICSS_EMAC_FW_MULTICAST_FILTER_CTRL_ENABLED;
+ else
+ reg = ICSS_EMAC_FW_MULTICAST_FILTER_CTRL_DISABLED;
+
+ writeb(reg, mc_filter_ctrl);
+}
+
+/* reset MC filter bins */
+static void icssm_emac_mc_filter_reset(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ void __iomem *mc_filter_tbl;
+ u32 mc_filter_tbl_base;
+ void __iomem *ram;
+
+ ram = prueth->mem[emac->dram].va;
+ mc_filter_tbl_base = ICSS_EMAC_FW_MULTICAST_FILTER_TABLE;
+
+ mc_filter_tbl = ram + mc_filter_tbl_base;
+ memset_io(mc_filter_tbl, 0, ICSS_EMAC_FW_MULTICAST_TABLE_SIZE_BYTES);
+}
+
+/* set MC filter hashmask */
+static void icssm_emac_mc_filter_hashmask
+ (struct prueth_emac *emac,
+ u8 mask[ICSS_EMAC_FW_MULTICAST_FILTER_MASK_SIZE_BYTES])
+{
+ struct prueth *prueth = emac->prueth;
+ void __iomem *mc_filter_mask;
+ void __iomem *ram;
+
+ ram = prueth->mem[emac->dram].va;
+
+ mc_filter_mask = ram + ICSS_EMAC_FW_MULTICAST_FILTER_MASK_OFFSET;
+ memcpy_toio(mc_filter_mask, mask,
+ ICSS_EMAC_FW_MULTICAST_FILTER_MASK_SIZE_BYTES);
+}
+
+static void icssm_emac_mc_filter_bin_update(struct prueth_emac *emac, u8 hash,
+ u8 val)
+{
+ struct prueth *prueth = emac->prueth;
+ void __iomem *mc_filter_tbl;
+ void __iomem *ram;
+
+ ram = prueth->mem[emac->dram].va;
+
+ mc_filter_tbl = ram + ICSS_EMAC_FW_MULTICAST_FILTER_TABLE;
+ writeb(val, mc_filter_tbl + hash);
+}
+
+void icssm_emac_mc_filter_bin_allow(struct prueth_emac *emac, u8 hash)
+{
+ icssm_emac_mc_filter_bin_update
+ (emac, hash,
+ ICSS_EMAC_FW_MULTICAST_FILTER_HOST_RCV_ALLOWED);
+}
+
+void icssm_emac_mc_filter_bin_disallow(struct prueth_emac *emac, u8 hash)
+{
+ icssm_emac_mc_filter_bin_update
+ (emac, hash,
+ ICSS_EMAC_FW_MULTICAST_FILTER_HOST_RCV_NOT_ALLOWED);
+}
+
+u8 icssm_emac_get_mc_hash(u8 *mac, u8 *mask)
+{
+ u8 hash;
+ int j;
+
+ for (j = 0, hash = 0; j < ETH_ALEN; j++)
+ hash ^= (mac[j] & mask[j]);
+
+ return hash;
+}
+
+/**
+ * icssm_emac_ndo_set_rx_mode - EMAC set receive mode function
+ * @ndev: The EMAC network adapter
+ *
+ * Called when system wants to set the receive mode of the device.
+ *
+ */
+static void icssm_emac_ndo_set_rx_mode(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ bool promisc = ndev->flags & IFF_PROMISC;
+ struct netdev_hw_addr *ha;
+ struct prueth *prueth;
+ unsigned long flags;
+ void __iomem *sram;
+ u32 mask, reg;
+ u8 hash;
+
+ prueth = emac->prueth;
+ sram = prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+ reg = readl(sram + EMAC_PROMISCUOUS_MODE_OFFSET);
+
+ /* It is a shared table. So lock the access */
+ spin_lock_irqsave(&emac->addr_lock, flags);
+
+ /* Disable and reset multicast filter, allows allmulti */
+ icssm_emac_mc_filter_ctrl(emac, false);
+ icssm_emac_mc_filter_reset(emac);
+ icssm_emac_mc_filter_hashmask(emac, emac->mc_filter_mask);
+
+ if (PRUETH_IS_EMAC(prueth)) {
+ switch (emac->port_id) {
+ case PRUETH_PORT_MII0:
+ mask = EMAC_P1_PROMISCUOUS_BIT;
+ break;
+ case PRUETH_PORT_MII1:
+ mask = EMAC_P2_PROMISCUOUS_BIT;
+ break;
+ default:
+ netdev_err(ndev, "%s: invalid port\n", __func__);
+ goto unlock;
+ }
+
+ if (promisc) {
+ /* Enable promiscuous mode */
+ reg |= mask;
+ } else {
+ /* Disable promiscuous mode */
+ reg &= ~mask;
+ }
+
+ writel(reg, sram + EMAC_PROMISCUOUS_MODE_OFFSET);
+
+ if (promisc)
+ goto unlock;
+ }
+
+ if (ndev->flags & IFF_ALLMULTI && !PRUETH_IS_SWITCH(prueth))
+ goto unlock;
+
+ icssm_emac_mc_filter_ctrl(emac, true); /* all multicast blocked */
+
+ if (netdev_mc_empty(ndev))
+ goto unlock;
+
+ netdev_for_each_mc_addr(ha, ndev) {
+ hash = icssm_emac_get_mc_hash(ha->addr, emac->mc_filter_mask);
+ icssm_emac_mc_filter_bin_allow(emac, hash);
+ }
+
+ /* Add bridge device's MC addresses as well */
+ if (prueth->hw_bridge_dev) {
+ netdev_for_each_mc_addr(ha, prueth->hw_bridge_dev) {
+ hash = icssm_emac_get_mc_hash(ha->addr,
+ emac->mc_filter_mask);
+ icssm_emac_mc_filter_bin_allow(emac, hash);
+ }
+ }
+
+unlock:
+ spin_unlock_irqrestore(&emac->addr_lock, flags);
+}
+
static const struct net_device_ops emac_netdev_ops = {
.ndo_open = icssm_emac_ndo_open,
.ndo_stop = icssm_emac_ndo_stop,
.ndo_start_xmit = icssm_emac_ndo_start_xmit,
.ndo_get_stats64 = icssm_emac_ndo_get_stats64,
+ .ndo_set_rx_mode = icssm_emac_ndo_set_rx_mode,
};
/* get emac_port corresponding to eth_node name */
@@ -1188,6 +1490,7 @@ static enum hrtimer_restart icssm_emac_tx_timer_callback(struct hrtimer *timer)
static int icssm_prueth_netdev_init(struct prueth *prueth,
struct device_node *eth_node)
{
+ const struct prueth_private_data *fw_data = prueth->fw_data;
struct prueth_emac *emac;
struct net_device *ndev;
enum prueth_port port;
@@ -1212,6 +1515,7 @@ static int icssm_prueth_netdev_init(struct prueth *prueth,
emac->prueth = prueth;
emac->ndev = ndev;
emac->port_id = port;
+ memset(&emac->mc_filter_mask[0], 0xff, ETH_ALEN);
/* by default eth_type is EMAC */
switch (port) {
@@ -1247,6 +1551,9 @@ static int icssm_prueth_netdev_init(struct prueth *prueth,
goto free;
}
+ spin_lock_init(&emac->lock);
+ spin_lock_init(&emac->addr_lock);
+
/* get mac address from DT and set private and netdev addr */
ret = of_get_ethdev_address(eth_node, ndev);
if (!is_valid_ether_addr(ndev->dev_addr)) {
@@ -1274,6 +1581,14 @@ static int icssm_prueth_netdev_init(struct prueth *prueth,
phy_remove_link_mode(emac->phydev, ETHTOOL_LINK_MODE_Pause_BIT);
phy_remove_link_mode(emac->phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
+ /* Protocol switching
+ * Enabling L2 Firmware offloading
+ */
+ if (fw_data->support_switch) {
+ ndev->features |= NETIF_F_HW_L2FW_DOFFLOAD;
+ ndev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD;
+ }
+
ndev->dev.of_node = eth_node;
ndev->netdev_ops = &emac_netdev_ops;
@@ -1310,6 +1625,169 @@ static void icssm_prueth_netdev_exit(struct prueth *prueth,
prueth->emac[mac] = NULL;
}
+bool icssm_prueth_sw_port_dev_check(const struct net_device *ndev)
+{
+ if (ndev->netdev_ops != &emac_netdev_ops)
+ return false;
+
+ if (ndev->features & NETIF_F_HW_L2FW_DOFFLOAD)
+ return true;
+
+ return false;
+}
+
+static int icssm_prueth_port_offload_fwd_mark_update(struct prueth *prueth)
+{
+ int set_val = 0;
+ int i, ret = 0;
+ u8 all_slaves;
+
+ all_slaves = BIT(PRUETH_PORT_MII0) | BIT(PRUETH_PORT_MII1);
+
+ if (prueth->br_members == all_slaves)
+ set_val = 1;
+
+ dev_dbg(prueth->dev, "set offload_fwd_mark %d, mbrs=0x%x\n",
+ set_val, prueth->br_members);
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ if (prueth->emac[i])
+ WRITE_ONCE(prueth->emac[i]->offload_fwd_mark, set_val);
+ }
+
+ /* Bridge is created, load switch firmware,
+ * if not already in that mode
+ */
+ if (set_val && !PRUETH_IS_SWITCH(prueth)) {
+ ret = icssm_prueth_change_mode(prueth, PRUSS_ETHTYPE_SWITCH);
+ if (ret < 0)
+ dev_err(prueth->dev, "Failed to enable Switch mode\n");
+ else
+ dev_info(prueth->dev,
+ "TI PRU ethernet now in Switch mode\n");
+ }
+
+ /* Bridge is deleted, switch to Dual EMAC mode */
+ if (!prueth->br_members && !PRUETH_IS_EMAC(prueth)) {
+ ret = icssm_prueth_change_mode(prueth, PRUSS_ETHTYPE_EMAC);
+ if (ret < 0)
+ dev_err(prueth->dev, "Failed to enable Dual EMAC mode\n");
+ else
+ dev_info(prueth->dev,
+ "TI PRU ethernet now in Dual EMAC mode\n");
+ }
+
+ return ret;
+}
+
+static int icssm_prueth_ndev_port_link(struct net_device *ndev,
+ struct net_device *br_ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ unsigned long flags;
+ int ret = 0;
+
+ dev_dbg(prueth->dev, "%s: br_mbrs=0x%x %s\n",
+ __func__, prueth->br_members, ndev->name);
+
+ spin_lock_irqsave(&emac->addr_lock, flags);
+
+ if (!prueth->br_members) {
+ prueth->hw_bridge_dev = br_ndev;
+ } else {
+ /* This is adding the port to a second bridge,
+ * this is unsupported
+ */
+ if (prueth->hw_bridge_dev != br_ndev) {
+ spin_unlock_irqrestore(&emac->addr_lock, flags);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ prueth->br_members |= BIT(emac->port_id);
+
+ spin_unlock_irqrestore(&emac->addr_lock, flags);
+
+ ret = icssm_prueth_port_offload_fwd_mark_update(prueth);
+
+ return ret;
+}
+
+static int icssm_prueth_ndev_port_unlink(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ unsigned long flags;
+ int ret = 0;
+
+ dev_dbg(prueth->dev, "emac_sw_ndev_port_unlink\n");
+
+ spin_lock_irqsave(&emac->addr_lock, flags);
+
+ prueth->br_members &= ~BIT(emac->port_id);
+
+ spin_unlock_irqrestore(&emac->addr_lock, flags);
+
+ ret = icssm_prueth_port_offload_fwd_mark_update(prueth);
+
+ spin_lock_irqsave(&emac->addr_lock, flags);
+
+ if (!prueth->br_members)
+ prueth->hw_bridge_dev = NULL;
+
+ spin_unlock_irqrestore(&emac->addr_lock, flags);
+
+ return ret;
+}
+
+static int icssm_prueth_ndev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info;
+ int ret = NOTIFY_DONE;
+
+ if (!icssm_prueth_sw_port_dev_check(ndev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ info = ptr;
+ if (netif_is_bridge_master(info->upper_dev)) {
+ if (info->linking)
+ ret = icssm_prueth_ndev_port_link
+ (ndev, info->upper_dev);
+ else
+ ret = icssm_prueth_ndev_port_unlink(ndev);
+ }
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return notifier_from_errno(ret);
+}
+
+static int icssm_prueth_register_notifiers(struct prueth *prueth)
+{
+ int ret = 0;
+
+ prueth->prueth_netdevice_nb.notifier_call = icssm_prueth_ndev_event;
+ ret = register_netdevice_notifier(&prueth->prueth_netdevice_nb);
+ if (ret) {
+ dev_err(prueth->dev,
+ "register netdevice notifier failed ret: %d\n", ret);
+ return ret;
+ }
+
+ ret = icssm_prueth_sw_register_notifiers(prueth);
+ if (ret)
+ unregister_netdevice_notifier(&prueth->prueth_netdevice_nb);
+
+ return ret;
+}
+
static int icssm_prueth_probe(struct platform_device *pdev)
{
struct device_node *eth0_node = NULL, *eth1_node = NULL;
@@ -1529,6 +2007,12 @@ static int icssm_prueth_probe(struct platform_device *pdev)
prueth->emac[PRUETH_MAC1]->ndev;
}
+ ret = icssm_prueth_register_notifiers(prueth);
+ if (ret) {
+ dev_err(dev, "can't register switchdev notifiers");
+ goto netdev_unregister;
+ }
+
dev_info(dev, "TI PRU ethernet driver initialized: %s EMAC mode\n",
(!eth0_node || !eth1_node) ? "single" : "dual");
@@ -1589,6 +2073,9 @@ static void icssm_prueth_remove(struct platform_device *pdev)
struct device_node *eth_node;
int i;
+ unregister_netdevice_notifier(&prueth->prueth_netdevice_nb);
+ icssm_prueth_sw_unregister_notifiers(prueth);
+
for (i = 0; i < PRUETH_NUM_MACS; i++) {
if (!prueth->registered_netdevs[i])
continue;
@@ -1688,11 +2175,16 @@ static struct prueth_private_data am335x_prueth_pdata = {
.fw_pru[PRUSS_PRU0] = {
.fw_name[PRUSS_ETHTYPE_EMAC] =
"ti-pruss/am335x-pru0-prueth-fw.elf",
+ .fw_name[PRUSS_ETHTYPE_SWITCH] =
+ "ti-pruss/am335x-pru0-prusw-fw.elf",
},
.fw_pru[PRUSS_PRU1] = {
.fw_name[PRUSS_ETHTYPE_EMAC] =
"ti-pruss/am335x-pru1-prueth-fw.elf",
+ .fw_name[PRUSS_ETHTYPE_SWITCH] =
+ "ti-pruss/am335x-pru1-prusw-fw.elf",
},
+ .support_switch = true,
};
/* AM437x SoC-specific firmware data */
@@ -1701,11 +2193,16 @@ static struct prueth_private_data am437x_prueth_pdata = {
.fw_pru[PRUSS_PRU0] = {
.fw_name[PRUSS_ETHTYPE_EMAC] =
"ti-pruss/am437x-pru0-prueth-fw.elf",
+ .fw_name[PRUSS_ETHTYPE_SWITCH] =
+ "ti-pruss/am437x-pru0-prusw-fw.elf",
},
.fw_pru[PRUSS_PRU1] = {
.fw_name[PRUSS_ETHTYPE_EMAC] =
"ti-pruss/am437x-pru1-prueth-fw.elf",
+ .fw_name[PRUSS_ETHTYPE_SWITCH] =
+ "ti-pruss/am437x-pru1-prusw-fw.elf",
},
+ .support_switch = true,
};
/* AM57xx SoC-specific firmware data */
@@ -1714,11 +2211,17 @@ static struct prueth_private_data am57xx_prueth_pdata = {
.fw_pru[PRUSS_PRU0] = {
.fw_name[PRUSS_ETHTYPE_EMAC] =
"ti-pruss/am57xx-pru0-prueth-fw.elf",
+ .fw_name[PRUSS_ETHTYPE_SWITCH] =
+ "ti-pruss/am57xx-pru0-prusw-fw.elf",
},
.fw_pru[PRUSS_PRU1] = {
.fw_name[PRUSS_ETHTYPE_EMAC] =
"ti-pruss/am57xx-pru1-prueth-fw.elf",
+ .fw_name[PRUSS_ETHTYPE_SWITCH] =
+ "ti-pruss/am57xx-pru1-prusw-fw.elf",
+
},
+ .support_switch = true,
};
static const struct of_device_id prueth_dt_match[] = {
diff --git a/drivers/net/ethernet/ti/icssm/icssm_prueth.h b/drivers/net/ethernet/ti/icssm/icssm_prueth.h
index 8e7e0af08144..d5b49b462c24 100644
--- a/drivers/net/ethernet/ti/icssm/icssm_prueth.h
+++ b/drivers/net/ethernet/ti/icssm/icssm_prueth.h
@@ -15,6 +15,7 @@
#include "icssm_switch.h"
#include "icssm_prueth_ptp.h"
+#include "icssm_prueth_fdb_tbl.h"
/* ICSSM size of redundancy tag */
#define ICSSM_LRE_TAG_SIZE 6
@@ -181,10 +182,12 @@ enum pruss_device {
* struct prueth_private_data - PRU Ethernet private data
* @driver_data: PRU Ethernet device name
* @fw_pru: firmware names to be used for PRUSS ethernet usecases
+ * @support_switch: boolean to indicate if switch is enabled
*/
struct prueth_private_data {
enum pruss_device driver_data;
const struct prueth_firmware fw_pru[PRUSS_NUM_PRUS];
+ bool support_switch;
};
struct prueth_emac_stats {
@@ -221,15 +224,18 @@ struct prueth_emac {
const char *phy_id;
u32 msg_enable;
u8 mac_addr[6];
+ unsigned char mc_filter_mask[ETH_ALEN]; /* for multicast filtering */
phy_interface_t phy_if;
/* spin lock used to protect
* during link configuration
*/
spinlock_t lock;
+ spinlock_t addr_lock; /* serialize access to VLAN/MC filter table */
struct hrtimer tx_hrtimer;
struct prueth_emac_stats stats;
+ int offload_fwd_mark;
};
struct prueth {
@@ -248,15 +254,27 @@ struct prueth {
struct prueth_emac *emac[PRUETH_NUM_MACS];
struct net_device *registered_netdevs[PRUETH_NUM_MACS];
+ struct net_device *hw_bridge_dev;
+ struct fdb_tbl *fdb_tbl;
+
+ struct notifier_block prueth_netdevice_nb;
+ struct notifier_block prueth_switchdev_nb;
+ struct notifier_block prueth_switchdev_bl_nb;
+
unsigned int eth_type;
size_t ocmc_ram_size;
u8 emac_configured;
+ u8 br_members;
};
+extern const struct prueth_queue_desc queue_descs[][NUM_QUEUES];
+
void icssm_parse_packet_info(struct prueth *prueth, u32 buffer_descriptor,
struct prueth_packet_info *pkt_info);
int icssm_emac_rx_packet(struct prueth_emac *emac, u16 *bd_rd_ptr,
struct prueth_packet_info *pkt_info,
const struct prueth_queue_info *rxqueue);
-
+void icssm_emac_mc_filter_bin_allow(struct prueth_emac *emac, u8 hash);
+void icssm_emac_mc_filter_bin_disallow(struct prueth_emac *emac, u8 hash);
+u8 icssm_emac_get_mc_hash(u8 *mac, u8 *mask);
#endif /* __NET_TI_PRUETH_H */
diff --git a/drivers/net/ethernet/ti/icssm/icssm_prueth_fdb_tbl.h b/drivers/net/ethernet/ti/icssm/icssm_prueth_fdb_tbl.h
new file mode 100644
index 000000000000..9089259d96ea
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssm/icssm_prueth_fdb_tbl.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021 Texas Instruments Incorporated - https://www.ti.com */
+#ifndef __NET_TI_PRUSS_FDB_TBL_H
+#define __NET_TI_PRUSS_FDB_TBL_H
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include "icssm_prueth.h"
+
+/* 4 bytes */
+struct fdb_index_tbl_entry {
+ /* Bucket Table index of first Bucket with this MAC address */
+ u16 bucket_idx;
+ u16 bucket_entries; /* Number of entries in this bucket */
+};
+
+/* 4 * 256 = 1024 = 0x200 bytes */
+struct fdb_index_array {
+ struct fdb_index_tbl_entry index_tbl_entry[FDB_INDEX_TBL_MAX_ENTRIES];
+};
+
+/* 10 bytes */
+struct fdb_mac_tbl_entry {
+ u8 mac[ETH_ALEN];
+ u16 age;
+ u8 port; /* 0 based: 0=port1, 1=port2 */
+ union {
+ struct {
+ u8 is_static:1;
+ u8 active:1;
+ };
+ u8 flags;
+ };
+};
+
+/* 10 * 256 = 2560 = 0xa00 bytes */
+struct fdb_mac_tbl_array {
+ struct fdb_mac_tbl_entry mac_tbl_entry[FDB_MAC_TBL_MAX_ENTRIES];
+};
+
+/* 1 byte */
+struct fdb_stp_config {
+ u8 state; /* per-port STP state (defined in FW header) */
+};
+
+/* 1 byte */
+struct fdb_flood_config {
+ u8 host_flood_enable:1;
+ u8 port1_flood_enable:1;
+ u8 port2_flood_enable:1;
+};
+
+/* 2 byte */
+struct fdb_arbitration {
+ u8 host_lock;
+ u8 pru_locks;
+};
+
+struct fdb_tbl {
+ /* fdb index table */
+ struct fdb_index_array __iomem *index_a;
+ /* fdb MAC table */
+ struct fdb_mac_tbl_array __iomem *mac_tbl_a;
+ /* port 1 stp config */
+ struct fdb_stp_config __iomem *port1_stp_cfg;
+ /* port 2 stp config */
+ struct fdb_stp_config __iomem *port2_stp_cfg;
+ /* per-port flood enable */
+ struct fdb_flood_config __iomem *flood_enable_flags;
+ /* fdb locking mechanism */
+ struct fdb_arbitration __iomem *locks;
+ /* total number of entries in hash table */
+ u16 total_entries;
+};
+
+#endif
diff --git a/drivers/net/ethernet/ti/icssm/icssm_prueth_switch.c b/drivers/net/ethernet/ti/icssm/icssm_prueth_switch.c
new file mode 100644
index 000000000000..07c08564386e
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssm/icssm_prueth_switch.c
@@ -0,0 +1,1065 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Texas Instruments PRUETH Switch Driver
+ *
+ * Copyright (C) 2020-2021 Texas Instruments Incorporated - https://www.ti.com
+ */
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/remoteproc.h>
+#include <net/switchdev.h>
+#include "icssm_prueth.h"
+#include "icssm_prueth_switch.h"
+#include "icssm_prueth_fdb_tbl.h"
+
+#define FDB_IDX_TBL_ENTRY(n) (&prueth->fdb_tbl->index_a->index_tbl_entry[n])
+
+#define FDB_MAC_TBL_ENTRY(n) (&prueth->fdb_tbl->mac_tbl_a->mac_tbl_entry[n])
+
+#define FLAG_IS_STATIC BIT(0)
+#define FLAG_ACTIVE BIT(1)
+
+#define FDB_LEARN 1
+#define FDB_PURGE 2
+
+struct icssm_prueth_sw_fdb_work {
+ netdevice_tracker ndev_tracker;
+ struct work_struct work;
+ struct prueth_emac *emac;
+ u8 addr[ETH_ALEN];
+ int event;
+};
+
+const struct prueth_queue_info sw_queue_infos[][NUM_QUEUES] = {
+ [PRUETH_PORT_QUEUE_HOST] = {
+ [PRUETH_QUEUE1] = {
+ P0_Q1_BUFFER_OFFSET,
+ P0_QUEUE_DESC_OFFSET,
+ P0_Q1_BD_OFFSET,
+ P0_Q1_BD_OFFSET + ((HOST_QUEUE_1_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE2] = {
+ P0_Q2_BUFFER_OFFSET,
+ P0_QUEUE_DESC_OFFSET + 8,
+ P0_Q2_BD_OFFSET,
+ P0_Q2_BD_OFFSET + ((HOST_QUEUE_2_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE3] = {
+ P0_Q3_BUFFER_OFFSET,
+ P0_QUEUE_DESC_OFFSET + 16,
+ P0_Q3_BD_OFFSET,
+ P0_Q3_BD_OFFSET + ((HOST_QUEUE_3_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE4] = {
+ P0_Q4_BUFFER_OFFSET,
+ P0_QUEUE_DESC_OFFSET + 24,
+ P0_Q4_BD_OFFSET,
+ P0_Q4_BD_OFFSET + ((HOST_QUEUE_4_SIZE - 1) * BD_SIZE),
+ },
+ },
+ [PRUETH_PORT_QUEUE_MII0] = {
+ [PRUETH_QUEUE1] = {
+ P1_Q1_BUFFER_OFFSET,
+ P1_Q1_BUFFER_OFFSET +
+ ((QUEUE_1_SIZE - 1) * ICSS_BLOCK_SIZE),
+ P1_Q1_BD_OFFSET,
+ P1_Q1_BD_OFFSET + ((QUEUE_1_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE2] = {
+ P1_Q2_BUFFER_OFFSET,
+ P1_Q2_BUFFER_OFFSET +
+ ((QUEUE_2_SIZE - 1) * ICSS_BLOCK_SIZE),
+ P1_Q2_BD_OFFSET,
+ P1_Q2_BD_OFFSET + ((QUEUE_2_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE3] = {
+ P1_Q3_BUFFER_OFFSET,
+ P1_Q3_BUFFER_OFFSET +
+ ((QUEUE_3_SIZE - 1) * ICSS_BLOCK_SIZE),
+ P1_Q3_BD_OFFSET,
+ P1_Q3_BD_OFFSET + ((QUEUE_3_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE4] = {
+ P1_Q4_BUFFER_OFFSET,
+ P1_Q4_BUFFER_OFFSET +
+ ((QUEUE_4_SIZE - 1) * ICSS_BLOCK_SIZE),
+ P1_Q4_BD_OFFSET,
+ P1_Q4_BD_OFFSET + ((QUEUE_4_SIZE - 1) * BD_SIZE),
+ },
+ },
+ [PRUETH_PORT_QUEUE_MII1] = {
+ [PRUETH_QUEUE1] = {
+ P2_Q1_BUFFER_OFFSET,
+ P2_Q1_BUFFER_OFFSET +
+ ((QUEUE_1_SIZE - 1) * ICSS_BLOCK_SIZE),
+ P2_Q1_BD_OFFSET,
+ P2_Q1_BD_OFFSET + ((QUEUE_1_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE2] = {
+ P2_Q2_BUFFER_OFFSET,
+ P2_Q2_BUFFER_OFFSET +
+ ((QUEUE_2_SIZE - 1) * ICSS_BLOCK_SIZE),
+ P2_Q2_BD_OFFSET,
+ P2_Q2_BD_OFFSET + ((QUEUE_2_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE3] = {
+ P2_Q3_BUFFER_OFFSET,
+ P2_Q3_BUFFER_OFFSET +
+ ((QUEUE_3_SIZE - 1) * ICSS_BLOCK_SIZE),
+ P2_Q3_BD_OFFSET,
+ P2_Q3_BD_OFFSET + ((QUEUE_3_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE4] = {
+ P2_Q4_BUFFER_OFFSET,
+ P2_Q4_BUFFER_OFFSET +
+ ((QUEUE_4_SIZE - 1) * ICSS_BLOCK_SIZE),
+ P2_Q4_BD_OFFSET,
+ P2_Q4_BD_OFFSET + ((QUEUE_4_SIZE - 1) * BD_SIZE),
+ },
+ },
+};
+
+static const struct prueth_queue_info rx_queue_infos[][NUM_QUEUES] = {
+ [PRUETH_PORT_QUEUE_HOST] = {
+ [PRUETH_QUEUE1] = {
+ P0_Q1_BUFFER_OFFSET,
+ HOST_QUEUE_DESC_OFFSET,
+ P0_Q1_BD_OFFSET,
+ P0_Q1_BD_OFFSET + ((HOST_QUEUE_1_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE2] = {
+ P0_Q2_BUFFER_OFFSET,
+ HOST_QUEUE_DESC_OFFSET + 8,
+ P0_Q2_BD_OFFSET,
+ P0_Q2_BD_OFFSET + ((HOST_QUEUE_2_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE3] = {
+ P0_Q3_BUFFER_OFFSET,
+ HOST_QUEUE_DESC_OFFSET + 16,
+ P0_Q3_BD_OFFSET,
+ P0_Q3_BD_OFFSET + ((HOST_QUEUE_3_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE4] = {
+ P0_Q4_BUFFER_OFFSET,
+ HOST_QUEUE_DESC_OFFSET + 24,
+ P0_Q4_BD_OFFSET,
+ P0_Q4_BD_OFFSET + ((HOST_QUEUE_4_SIZE - 1) * BD_SIZE),
+ },
+ },
+ [PRUETH_PORT_QUEUE_MII0] = {
+ [PRUETH_QUEUE1] = {
+ P1_Q1_BUFFER_OFFSET,
+ P1_QUEUE_DESC_OFFSET,
+ P1_Q1_BD_OFFSET,
+ P1_Q1_BD_OFFSET + ((QUEUE_1_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE2] = {
+ P1_Q2_BUFFER_OFFSET,
+ P1_QUEUE_DESC_OFFSET + 8,
+ P1_Q2_BD_OFFSET,
+ P1_Q2_BD_OFFSET + ((QUEUE_2_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE3] = {
+ P1_Q3_BUFFER_OFFSET,
+ P1_QUEUE_DESC_OFFSET + 16,
+ P1_Q3_BD_OFFSET,
+ P1_Q3_BD_OFFSET + ((QUEUE_3_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE4] = {
+ P1_Q4_BUFFER_OFFSET,
+ P1_QUEUE_DESC_OFFSET + 24,
+ P1_Q4_BD_OFFSET,
+ P1_Q4_BD_OFFSET + ((QUEUE_4_SIZE - 1) * BD_SIZE),
+ },
+ },
+ [PRUETH_PORT_QUEUE_MII1] = {
+ [PRUETH_QUEUE1] = {
+ P2_Q1_BUFFER_OFFSET,
+ P2_QUEUE_DESC_OFFSET,
+ P2_Q1_BD_OFFSET,
+ P2_Q1_BD_OFFSET + ((QUEUE_1_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE2] = {
+ P2_Q2_BUFFER_OFFSET,
+ P2_QUEUE_DESC_OFFSET + 8,
+ P2_Q2_BD_OFFSET,
+ P2_Q2_BD_OFFSET + ((QUEUE_2_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE3] = {
+ P2_Q3_BUFFER_OFFSET,
+ P2_QUEUE_DESC_OFFSET + 16,
+ P2_Q3_BD_OFFSET,
+ P2_Q3_BD_OFFSET + ((QUEUE_3_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE4] = {
+ P2_Q4_BUFFER_OFFSET,
+ P2_QUEUE_DESC_OFFSET + 24,
+ P2_Q4_BD_OFFSET,
+ P2_Q4_BD_OFFSET + ((QUEUE_4_SIZE - 1) * BD_SIZE),
+ },
+ },
+};
+
+void icssm_prueth_sw_free_fdb_table(struct prueth *prueth)
+{
+ if (prueth->emac_configured)
+ return;
+
+ kfree(prueth->fdb_tbl);
+ prueth->fdb_tbl = NULL;
+}
+
+void icssm_prueth_sw_fdb_tbl_init(struct prueth *prueth)
+{
+ struct fdb_tbl *t = prueth->fdb_tbl;
+ void __iomem *sram_base;
+ u8 val;
+
+ sram_base = prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+
+ t->index_a = sram_base + V2_1_FDB_TBL_OFFSET;
+ t->mac_tbl_a = sram_base + FDB_MAC_TBL_OFFSET;
+ t->port1_stp_cfg = sram_base + FDB_PORT1_STP_CFG_OFFSET;
+ t->port2_stp_cfg = sram_base + FDB_PORT2_STP_CFG_OFFSET;
+ t->flood_enable_flags = sram_base + FDB_FLOOD_ENABLE_FLAGS_OFFSET;
+ t->locks = sram_base + FDB_LOCKS_OFFSET;
+
+ val = readb(t->flood_enable_flags);
+ /* host_flood_enable = 1 */
+ val |= BIT(0);
+ /* port1_flood_enable = 1 */
+ val |= BIT(1);
+ /* port2_flood_enable = 1 */
+ val |= BIT(2);
+ writeb(val, t->flood_enable_flags);
+
+ writeb(0, &t->locks->host_lock);
+ t->total_entries = 0;
+}
+
+static u8 icssm_pru_lock_done(struct fdb_tbl *fdb_tbl)
+{
+ return readb(&fdb_tbl->locks->pru_locks);
+}
+
+static int icssm_prueth_sw_fdb_spin_lock(struct fdb_tbl *fdb_tbl)
+{
+ u8 done;
+ int ret;
+
+ /* Take the host lock */
+ writeb(1, &fdb_tbl->locks->host_lock);
+
+ /* Wait for the PRUs to release their locks */
+ ret = read_poll_timeout(icssm_pru_lock_done, done, done == 0,
+ 1, 10, false, fdb_tbl);
+ if (ret == -ETIMEDOUT)
+ writeb(0, &fdb_tbl->locks->host_lock);
+
+ return ret;
+}
+
+static void icssm_prueth_sw_fdb_spin_unlock(struct fdb_tbl *fdb_tbl)
+{
+ writeb(0, &fdb_tbl->locks->host_lock);
+}
+
+static u8 icssm_prueth_sw_fdb_hash(const u8 *mac)
+{
+ return (mac[0] ^ mac[1] ^ mac[2] ^ mac[3] ^ mac[4] ^ mac[5]);
+}
+
+static int
+icssm_prueth_sw_fdb_search(struct fdb_mac_tbl_array __iomem *mac_tbl,
+ struct fdb_index_tbl_entry __iomem *bucket_info,
+ const u8 *mac)
+{
+ unsigned int bucket_entries, mac_tbl_idx;
+ u8 tmp_mac[ETH_ALEN];
+ int i;
+
+ mac_tbl_idx = readw(&bucket_info->bucket_idx);
+ bucket_entries = readw(&bucket_info->bucket_entries);
+ for (i = 0; i < bucket_entries; i++, mac_tbl_idx++) {
+ memcpy_fromio(tmp_mac, mac_tbl->mac_tbl_entry[mac_tbl_idx].mac,
+ ETH_ALEN);
+ if (ether_addr_equal(mac, tmp_mac))
+ return mac_tbl_idx;
+ }
+
+ return -ENODATA;
+}
+
+static int icssm_prueth_sw_fdb_find_open_slot(struct fdb_tbl *fdb_tbl)
+{
+ unsigned int i;
+ u8 flags;
+
+ for (i = 0; i < FDB_MAC_TBL_MAX_ENTRIES; i++) {
+ flags = readb(&fdb_tbl->mac_tbl_a->mac_tbl_entry[i].flags);
+ if (!(flags & FLAG_ACTIVE))
+ break;
+ }
+
+ return i;
+}
+
+static int
+icssm_prueth_sw_find_fdb_insert(struct fdb_tbl *fdb, struct prueth *prueth,
+ struct fdb_index_tbl_entry __iomem *bkt_info,
+ const u8 *mac, const u8 port)
+{
+ struct fdb_mac_tbl_array __iomem *mac_tbl = fdb->mac_tbl_a;
+ unsigned int bucket_entries, mac_tbl_idx;
+ struct fdb_mac_tbl_entry __iomem *e;
+ u8 mac_from_hw[ETH_ALEN];
+ s8 cmp;
+ int i;
+
+ mac_tbl_idx = readw(&bkt_info->bucket_idx);
+ bucket_entries = readw(&bkt_info->bucket_entries);
+
+ for (i = 0; i < bucket_entries; i++, mac_tbl_idx++) {
+ e = &mac_tbl->mac_tbl_entry[mac_tbl_idx];
+ memcpy_fromio(mac_from_hw, e->mac, ETH_ALEN);
+ cmp = memcmp(mac, mac_from_hw, ETH_ALEN);
+ if (cmp < 0) {
+ return mac_tbl_idx;
+ } else if (cmp == 0) {
+ if (readb(&e->port) != port) {
+ /* MAC is already in FDB, only port is
+ * different. So just update the port.
+ * Note: total_entries and bucket_entries
+ * remain the same.
+ */
+ writeb(port, &e->port);
+ }
+
+ /* MAC and port are the same, touch the fdb */
+ writew(0, &e->age);
+ return -EEXIST;
+ }
+ }
+
+ return mac_tbl_idx;
+}
+
+static int
+icssm_prueth_sw_fdb_empty_slot_left(struct fdb_mac_tbl_array __iomem *mac_tbl,
+ unsigned int mac_tbl_idx)
+{
+ u8 flags;
+ int i;
+
+ for (i = mac_tbl_idx - 1; i > -1; i--) {
+ flags = readb(&mac_tbl->mac_tbl_entry[i].flags);
+ if (!(flags & FLAG_ACTIVE))
+ break;
+ }
+
+ return i;
+}
+
+static int
+icssm_prueth_sw_fdb_empty_slot_right(struct fdb_mac_tbl_array __iomem *mac_tbl,
+ unsigned int mac_tbl_idx)
+{
+ u8 flags;
+ int i;
+
+ for (i = mac_tbl_idx; i < FDB_MAC_TBL_MAX_ENTRIES; i++) {
+ flags = readb(&mac_tbl->mac_tbl_entry[i].flags);
+ if (!(flags & FLAG_ACTIVE))
+ return i;
+ }
+
+ return -1;
+}
+
+static void icssm_prueth_sw_fdb_move_range_left(struct prueth *prueth,
+ u16 left, u16 right)
+{
+ struct fdb_mac_tbl_entry entry;
+ u32 sz = 0;
+ u16 i;
+
+ sz = sizeof(struct fdb_mac_tbl_entry);
+ for (i = left; i < right; i++) {
+ memcpy_fromio(&entry, FDB_MAC_TBL_ENTRY(i + 1), sz);
+ memcpy_toio(FDB_MAC_TBL_ENTRY(i), &entry, sz);
+ }
+}
+
+static void icssm_prueth_sw_fdb_move_range_right(struct prueth *prueth,
+ u16 left, u16 right)
+{
+ struct fdb_mac_tbl_entry entry;
+ u32 sz = 0;
+ u16 i;
+
+ sz = sizeof(struct fdb_mac_tbl_entry);
+ for (i = right; i > left; i--) {
+ memcpy_fromio(&entry, FDB_MAC_TBL_ENTRY(i - 1), sz);
+ memcpy_toio(FDB_MAC_TBL_ENTRY(i), &entry, sz);
+ }
+}
+
+static void icssm_prueth_sw_fdb_update_index_tbl(struct prueth *prueth,
+ u16 left, u16 right)
+{
+ unsigned int hash, hash_prev;
+ u8 mac[ETH_ALEN];
+ unsigned int i;
+
+ /* To ensure we don't improperly update the
+ * bucket index, initialize with an invalid
+ * hash in case we are in leftmost slot
+ */
+ hash_prev = 0xff;
+
+ if (left > 0) {
+ memcpy_fromio(mac, FDB_MAC_TBL_ENTRY(left - 1)->mac, ETH_ALEN);
+ hash_prev = icssm_prueth_sw_fdb_hash(mac);
+ }
+
+ /* For each moved element, update the bucket index */
+ for (i = left; i <= right; i++) {
+ memcpy_fromio(mac, FDB_MAC_TBL_ENTRY(i)->mac, ETH_ALEN);
+ hash = icssm_prueth_sw_fdb_hash(mac);
+
+ /* Only need to update buckets once */
+ if (hash != hash_prev)
+ writew(i, &FDB_IDX_TBL_ENTRY(hash)->bucket_idx);
+
+ hash_prev = hash;
+ }
+}
+
+static struct fdb_mac_tbl_entry __iomem *
+icssm_prueth_sw_find_free_mac(struct prueth *prueth, struct fdb_index_tbl_entry
+ __iomem *bucket_info, u8 suggested_mac_tbl_idx,
+ bool *update_indexes, const u8 *mac)
+{
+ s16 empty_slot_idx = 0, left = 0, right = 0;
+ unsigned int mti = suggested_mac_tbl_idx;
+ struct fdb_mac_tbl_array __iomem *mt;
+ struct fdb_tbl *fdb;
+ u8 flags;
+
+ fdb = prueth->fdb_tbl;
+ mt = fdb->mac_tbl_a;
+
+ flags = readb(&FDB_MAC_TBL_ENTRY(mti)->flags);
+ if (!(flags & FLAG_ACTIVE)) {
+ /* Claim the entry */
+ flags |= FLAG_ACTIVE;
+ writeb(flags, &FDB_MAC_TBL_ENTRY(mti)->flags);
+
+ return FDB_MAC_TBL_ENTRY(mti);
+ }
+
+ if (fdb->total_entries == FDB_MAC_TBL_MAX_ENTRIES)
+ return NULL;
+
+ empty_slot_idx = icssm_prueth_sw_fdb_empty_slot_left(mt, mti);
+ if (empty_slot_idx == -1) {
+ /* Nothing available on the left. But table isn't full
+ * so there must be space to the right,
+ */
+ empty_slot_idx = icssm_prueth_sw_fdb_empty_slot_right(mt, mti);
+
+ /* Shift right */
+ left = mti;
+ right = empty_slot_idx;
+ icssm_prueth_sw_fdb_move_range_right(prueth, left, right);
+
+ /* Claim the entry */
+ flags = readb(&FDB_MAC_TBL_ENTRY(mti)->flags);
+ flags |= FLAG_ACTIVE;
+ writeb(flags, &FDB_MAC_TBL_ENTRY(mti)->flags);
+
+ memcpy_toio(FDB_MAC_TBL_ENTRY(mti)->mac, mac, ETH_ALEN);
+
+ /* There is a chance we moved something in a
+ * different bucket, update index table
+ */
+ icssm_prueth_sw_fdb_update_index_tbl(prueth, left, right);
+
+ return FDB_MAC_TBL_ENTRY(mti);
+ }
+
+ if (empty_slot_idx == mti - 1) {
+ /* There is space immediately left of the open slot,
+ * which means the inserted MAC address
+ * must be the lowest-valued MAC address in bucket.
+ * Update bucket pointer accordingly.
+ */
+ writew(empty_slot_idx, &bucket_info->bucket_idx);
+
+ /* Claim the entry */
+ flags = readb(&FDB_MAC_TBL_ENTRY(empty_slot_idx)->flags);
+ flags |= FLAG_ACTIVE;
+ writeb(flags, &FDB_MAC_TBL_ENTRY(empty_slot_idx)->flags);
+
+ return FDB_MAC_TBL_ENTRY(empty_slot_idx);
+ }
+
+ /* There is empty space to the left, shift MAC table entries left */
+ left = empty_slot_idx;
+ right = mti - 1;
+ icssm_prueth_sw_fdb_move_range_left(prueth, left, right);
+
+ /* Claim the entry */
+ flags = readb(&FDB_MAC_TBL_ENTRY(mti - 1)->flags);
+ flags |= FLAG_ACTIVE;
+ writeb(flags, &FDB_MAC_TBL_ENTRY(mti - 1)->flags);
+
+ memcpy_toio(FDB_MAC_TBL_ENTRY(mti - 1)->mac, mac, ETH_ALEN);
+
+ /* There is a chance we moved something in a
+ * different bucket, update index table
+ */
+ icssm_prueth_sw_fdb_update_index_tbl(prueth, left, right);
+
+ return FDB_MAC_TBL_ENTRY(mti - 1);
+}
+
+static int icssm_prueth_sw_insert_fdb_entry(struct prueth_emac *emac,
+ const u8 *mac, u8 is_static)
+{
+ struct fdb_index_tbl_entry __iomem *bucket_info;
+ struct fdb_mac_tbl_entry __iomem *mac_info;
+ struct prueth *prueth = emac->prueth;
+ unsigned int hash_val, mac_tbl_idx;
+ struct prueth_emac *other_emac;
+ enum prueth_port other_port_id;
+ int total_fdb_entries;
+ struct fdb_tbl *fdb;
+ u8 flags;
+ s16 ret;
+ int err;
+ u16 val;
+
+ fdb = prueth->fdb_tbl;
+ other_port_id = (emac->port_id == PRUETH_PORT_MII0) ?
+ PRUETH_PORT_MII1 : PRUETH_PORT_MII0;
+
+ other_emac = prueth->emac[other_port_id - 1];
+ if (!other_emac)
+ return -EINVAL;
+
+ err = icssm_prueth_sw_fdb_spin_lock(fdb);
+ if (err) {
+ dev_err(prueth->dev, "PRU lock timeout %d\n", err);
+ return err;
+ }
+
+ if (fdb->total_entries == FDB_MAC_TBL_MAX_ENTRIES) {
+ icssm_prueth_sw_fdb_spin_unlock(fdb);
+ return -ENOMEM;
+ }
+
+ if (ether_addr_equal(mac, emac->mac_addr) ||
+ (ether_addr_equal(mac, other_emac->mac_addr))) {
+ icssm_prueth_sw_fdb_spin_unlock(fdb);
+ /* Don't insert fdb of own mac addr */
+ return -EINVAL;
+ }
+
+ /* Get the bucket that the mac belongs to */
+ hash_val = icssm_prueth_sw_fdb_hash(mac);
+ bucket_info = FDB_IDX_TBL_ENTRY(hash_val);
+
+ if (!readw(&bucket_info->bucket_entries)) {
+ mac_tbl_idx = icssm_prueth_sw_fdb_find_open_slot(fdb);
+ writew(mac_tbl_idx, &bucket_info->bucket_idx);
+ }
+
+ ret = icssm_prueth_sw_find_fdb_insert(fdb, prueth, bucket_info, mac,
+ emac->port_id - 1);
+ if (ret < 0) {
+ icssm_prueth_sw_fdb_spin_unlock(fdb);
+ /* mac is already in fdb table */
+ return 0;
+ }
+
+ mac_tbl_idx = ret;
+
+ mac_info = icssm_prueth_sw_find_free_mac(prueth, bucket_info,
+ mac_tbl_idx, NULL,
+ mac);
+ if (!mac_info) {
+ /* Should not happen */
+ dev_warn(prueth->dev, "OUT of FDB MEM\n");
+ icssm_prueth_sw_fdb_spin_unlock(fdb);
+ return -ENOMEM;
+ }
+
+ memcpy_toio(mac_info->mac, mac, ETH_ALEN);
+ writew(0, &mac_info->age);
+ writeb(emac->port_id - 1, &mac_info->port);
+
+ flags = readb(&mac_info->flags);
+ if (is_static)
+ flags |= FLAG_IS_STATIC;
+ else
+ flags &= ~FLAG_IS_STATIC;
+
+ /* bit 1 - active */
+ flags |= FLAG_ACTIVE;
+ writeb(flags, &mac_info->flags);
+
+ val = readw(&bucket_info->bucket_entries);
+ val++;
+ writew(val, &bucket_info->bucket_entries);
+
+ fdb->total_entries++;
+
+ total_fdb_entries = fdb->total_entries;
+
+ icssm_prueth_sw_fdb_spin_unlock(fdb);
+
+ dev_dbg(prueth->dev, "added fdb: %pM port=%d total_entries=%u\n",
+ mac, emac->port_id, total_fdb_entries);
+
+ return 0;
+}
+
+static int icssm_prueth_sw_delete_fdb_entry(struct prueth_emac *emac,
+ const u8 *mac, u8 is_static)
+{
+ struct fdb_index_tbl_entry __iomem *bucket_info;
+ struct fdb_mac_tbl_entry __iomem *mac_info;
+ struct fdb_mac_tbl_array __iomem *mt;
+ unsigned int hash_val, mac_tbl_idx;
+ unsigned int idx, entries;
+ struct prueth *prueth;
+ int total_fdb_entries;
+ s16 ret, left, right;
+ struct fdb_tbl *fdb;
+ u8 flags;
+ int err;
+ u16 val;
+
+ prueth = emac->prueth;
+ fdb = prueth->fdb_tbl;
+ mt = fdb->mac_tbl_a;
+
+ err = icssm_prueth_sw_fdb_spin_lock(fdb);
+ if (err) {
+ dev_err(prueth->dev, "PRU lock timeout %d\n", err);
+ return err;
+ }
+
+ if (fdb->total_entries == 0) {
+ icssm_prueth_sw_fdb_spin_unlock(fdb);
+ return 0;
+ }
+
+ /* Get the bucket that the mac belongs to */
+ hash_val = icssm_prueth_sw_fdb_hash(mac);
+ bucket_info = FDB_IDX_TBL_ENTRY(hash_val);
+
+ ret = icssm_prueth_sw_fdb_search(mt, bucket_info, mac);
+ if (ret < 0) {
+ icssm_prueth_sw_fdb_spin_unlock(fdb);
+ return ret;
+ }
+
+ mac_tbl_idx = ret;
+ mac_info = FDB_MAC_TBL_ENTRY(mac_tbl_idx);
+
+ /* Shift all elements in bucket to the left. No need to
+ * update index table since only shifting within bucket.
+ */
+ left = mac_tbl_idx;
+ idx = readw(&bucket_info->bucket_idx);
+ entries = readw(&bucket_info->bucket_entries);
+ right = idx + entries - 1;
+ icssm_prueth_sw_fdb_move_range_left(prueth, left, right);
+
+ /* Remove end of bucket from table */
+ mac_info = FDB_MAC_TBL_ENTRY(right);
+ flags = readb(&mac_info->flags);
+ /* active = 0 */
+ flags &= ~FLAG_ACTIVE;
+ writeb(flags, &mac_info->flags);
+ val = readw(&bucket_info->bucket_entries);
+ val--;
+ writew(val, &bucket_info->bucket_entries);
+ fdb->total_entries--;
+
+ total_fdb_entries = fdb->total_entries;
+
+ icssm_prueth_sw_fdb_spin_unlock(fdb);
+
+ dev_dbg(prueth->dev, "del fdb: %pM total_entries=%u\n",
+ mac, total_fdb_entries);
+
+ return 0;
+}
+
+int icssm_prueth_sw_do_purge_fdb(struct prueth_emac *emac)
+{
+ struct fdb_index_tbl_entry __iomem *bucket_info;
+ struct prueth *prueth = emac->prueth;
+ u8 flags, mac[ETH_ALEN];
+ unsigned int hash_val;
+ struct fdb_tbl *fdb;
+ int ret, i;
+ u16 val;
+
+ fdb = prueth->fdb_tbl;
+
+ ret = icssm_prueth_sw_fdb_spin_lock(fdb);
+ if (ret) {
+ dev_err(prueth->dev, "PRU lock timeout %d\n", ret);
+ return ret;
+ }
+
+ if (fdb->total_entries == 0) {
+ icssm_prueth_sw_fdb_spin_unlock(fdb);
+ return 0;
+ }
+
+ for (i = 0; i < FDB_MAC_TBL_MAX_ENTRIES; i++) {
+ flags = readb(&fdb->mac_tbl_a->mac_tbl_entry[i].flags);
+ if ((flags & FLAG_ACTIVE) && !(flags & FLAG_IS_STATIC)) {
+ /* Get the bucket that the mac belongs to */
+ memcpy_fromio(mac, FDB_MAC_TBL_ENTRY(i)->mac,
+ ETH_ALEN);
+ hash_val = icssm_prueth_sw_fdb_hash(mac);
+ bucket_info = FDB_IDX_TBL_ENTRY(hash_val);
+ flags &= ~FLAG_ACTIVE;
+ writeb(flags,
+ &fdb->mac_tbl_a->mac_tbl_entry[i].flags);
+ val = readw(&bucket_info->bucket_entries);
+ val--;
+ writew(val, &bucket_info->bucket_entries);
+ fdb->total_entries--;
+ }
+ }
+
+ icssm_prueth_sw_fdb_spin_unlock(fdb);
+ return 0;
+}
+
+int icssm_prueth_sw_init_fdb_table(struct prueth *prueth)
+{
+ if (prueth->emac_configured)
+ return 0;
+
+ prueth->fdb_tbl = kmalloc(sizeof(*prueth->fdb_tbl), GFP_KERNEL);
+ if (!prueth->fdb_tbl)
+ return -ENOMEM;
+
+ icssm_prueth_sw_fdb_tbl_init(prueth);
+
+ return 0;
+}
+
+/**
+ * icssm_prueth_sw_fdb_add - insert fdb entry
+ *
+ * @emac: EMAC data structure
+ * @fdb: fdb info
+ *
+ */
+void icssm_prueth_sw_fdb_add(struct prueth_emac *emac,
+ struct switchdev_notifier_fdb_info *fdb)
+{
+ icssm_prueth_sw_insert_fdb_entry(emac, fdb->addr, 1);
+}
+
+/**
+ * icssm_prueth_sw_fdb_del - delete fdb entry
+ *
+ * @emac: EMAC data structure
+ * @fdb: fdb info
+ *
+ */
+void icssm_prueth_sw_fdb_del(struct prueth_emac *emac,
+ struct switchdev_notifier_fdb_info *fdb)
+{
+ icssm_prueth_sw_delete_fdb_entry(emac, fdb->addr, 1);
+}
+
+static void icssm_prueth_sw_fdb_work(struct work_struct *work)
+{
+ struct icssm_prueth_sw_fdb_work *fdb_work =
+ container_of(work, struct icssm_prueth_sw_fdb_work, work);
+ struct prueth_emac *emac = fdb_work->emac;
+
+ rtnl_lock();
+
+ /* Interface is not up */
+ if (!emac->prueth->fdb_tbl)
+ goto free;
+
+ switch (fdb_work->event) {
+ case FDB_LEARN:
+ icssm_prueth_sw_insert_fdb_entry(emac, fdb_work->addr, 0);
+ break;
+ case FDB_PURGE:
+ icssm_prueth_sw_do_purge_fdb(emac);
+ break;
+ default:
+ break;
+ }
+
+free:
+ rtnl_unlock();
+ netdev_put(emac->ndev, &fdb_work->ndev_tracker);
+ kfree(fdb_work);
+}
+
+int icssm_prueth_sw_learn_fdb(struct prueth_emac *emac, u8 *src_mac)
+{
+ struct icssm_prueth_sw_fdb_work *fdb_work;
+
+ fdb_work = kzalloc(sizeof(*fdb_work), GFP_ATOMIC);
+ if (WARN_ON(!fdb_work))
+ return -ENOMEM;
+
+ INIT_WORK(&fdb_work->work, icssm_prueth_sw_fdb_work);
+
+ fdb_work->event = FDB_LEARN;
+ fdb_work->emac = emac;
+ ether_addr_copy(fdb_work->addr, src_mac);
+
+ netdev_hold(emac->ndev, &fdb_work->ndev_tracker, GFP_ATOMIC);
+ queue_work(system_long_wq, &fdb_work->work);
+ return 0;
+}
+
+int icssm_prueth_sw_purge_fdb(struct prueth_emac *emac)
+{
+ struct icssm_prueth_sw_fdb_work *fdb_work;
+
+ fdb_work = kzalloc(sizeof(*fdb_work), GFP_ATOMIC);
+ if (WARN_ON(!fdb_work))
+ return -ENOMEM;
+
+ INIT_WORK(&fdb_work->work, icssm_prueth_sw_fdb_work);
+
+ fdb_work->event = FDB_PURGE;
+ fdb_work->emac = emac;
+
+ netdev_hold(emac->ndev, &fdb_work->ndev_tracker, GFP_ATOMIC);
+ queue_work(system_long_wq, &fdb_work->work);
+ return 0;
+}
+
+void icssm_prueth_sw_hostconfig(struct prueth *prueth)
+{
+ void __iomem *dram1_base = prueth->mem[PRUETH_MEM_DRAM1].va;
+ void __iomem *dram;
+
+ /* queue information table */
+ dram = dram1_base + P0_Q1_RX_CONTEXT_OFFSET;
+ memcpy_toio(dram, sw_queue_infos[PRUETH_PORT_QUEUE_HOST],
+ sizeof(sw_queue_infos[PRUETH_PORT_QUEUE_HOST]));
+
+ /* buffer descriptor offset table*/
+ dram = dram1_base + QUEUE_DESCRIPTOR_OFFSET_ADDR;
+ writew(P0_Q1_BD_OFFSET, dram);
+ writew(P0_Q2_BD_OFFSET, dram + 2);
+ writew(P0_Q3_BD_OFFSET, dram + 4);
+ writew(P0_Q4_BD_OFFSET, dram + 6);
+
+ /* buffer offset table */
+ dram = dram1_base + QUEUE_OFFSET_ADDR;
+ writew(P0_Q1_BUFFER_OFFSET, dram);
+ writew(P0_Q2_BUFFER_OFFSET, dram + 2);
+ writew(P0_Q3_BUFFER_OFFSET, dram + 4);
+ writew(P0_Q4_BUFFER_OFFSET, dram + 6);
+
+ /* queue size lookup table */
+ dram = dram1_base + QUEUE_SIZE_ADDR;
+ writew(HOST_QUEUE_1_SIZE, dram);
+ writew(HOST_QUEUE_1_SIZE, dram + 2);
+ writew(HOST_QUEUE_1_SIZE, dram + 4);
+ writew(HOST_QUEUE_1_SIZE, dram + 6);
+
+ /* queue table */
+ dram = dram1_base + P0_QUEUE_DESC_OFFSET;
+ memcpy_toio(dram, queue_descs[PRUETH_PORT_QUEUE_HOST],
+ sizeof(queue_descs[PRUETH_PORT_QUEUE_HOST]));
+}
+
+static int icssm_prueth_sw_port_config(struct prueth *prueth,
+ enum prueth_port port_id)
+{
+ unsigned int tx_context_ofs_addr, rx_context_ofs, queue_desc_ofs;
+ void __iomem *dram, *dram_base, *dram_mac;
+ struct prueth_emac *emac;
+ void __iomem *dram1_base;
+
+ dram1_base = prueth->mem[PRUETH_MEM_DRAM1].va;
+ emac = prueth->emac[port_id - 1];
+ switch (port_id) {
+ case PRUETH_PORT_MII0:
+ tx_context_ofs_addr = TX_CONTEXT_P1_Q1_OFFSET_ADDR;
+ rx_context_ofs = P1_Q1_RX_CONTEXT_OFFSET;
+ queue_desc_ofs = P1_QUEUE_DESC_OFFSET;
+
+ /* for switch PORT MII0 mac addr is in DRAM0. */
+ dram_mac = prueth->mem[PRUETH_MEM_DRAM0].va;
+ break;
+ case PRUETH_PORT_MII1:
+ tx_context_ofs_addr = TX_CONTEXT_P2_Q1_OFFSET_ADDR;
+ rx_context_ofs = P2_Q1_RX_CONTEXT_OFFSET;
+ queue_desc_ofs = P2_QUEUE_DESC_OFFSET;
+
+ /* for switch PORT MII1 mac addr is in DRAM1. */
+ dram_mac = prueth->mem[PRUETH_MEM_DRAM1].va;
+ break;
+ default:
+ netdev_err(emac->ndev, "invalid port\n");
+ return -EINVAL;
+ }
+
+ /* setup mac address */
+ memcpy_toio(dram_mac + PORT_MAC_ADDR, emac->mac_addr, 6);
+
+ /* Remaining switch port configs are in DRAM1 */
+ dram_base = prueth->mem[PRUETH_MEM_DRAM1].va;
+
+ /* queue information table */
+ memcpy_toio(dram_base + tx_context_ofs_addr,
+ sw_queue_infos[port_id],
+ sizeof(sw_queue_infos[port_id]));
+
+ memcpy_toio(dram_base + rx_context_ofs,
+ rx_queue_infos[port_id],
+ sizeof(rx_queue_infos[port_id]));
+
+ /* buffer descriptor offset table*/
+ dram = dram_base + QUEUE_DESCRIPTOR_OFFSET_ADDR +
+ (port_id * NUM_QUEUES * sizeof(u16));
+ writew(sw_queue_infos[port_id][PRUETH_QUEUE1].buffer_desc_offset, dram);
+ writew(sw_queue_infos[port_id][PRUETH_QUEUE2].buffer_desc_offset,
+ dram + 2);
+ writew(sw_queue_infos[port_id][PRUETH_QUEUE3].buffer_desc_offset,
+ dram + 4);
+ writew(sw_queue_infos[port_id][PRUETH_QUEUE4].buffer_desc_offset,
+ dram + 6);
+
+ /* buffer offset table */
+ dram = dram_base + QUEUE_OFFSET_ADDR +
+ port_id * NUM_QUEUES * sizeof(u16);
+ writew(sw_queue_infos[port_id][PRUETH_QUEUE1].buffer_offset, dram);
+ writew(sw_queue_infos[port_id][PRUETH_QUEUE2].buffer_offset,
+ dram + 2);
+ writew(sw_queue_infos[port_id][PRUETH_QUEUE3].buffer_offset,
+ dram + 4);
+ writew(sw_queue_infos[port_id][PRUETH_QUEUE4].buffer_offset,
+ dram + 6);
+
+ /* queue size lookup table */
+ dram = dram_base + QUEUE_SIZE_ADDR +
+ port_id * NUM_QUEUES * sizeof(u16);
+ writew(QUEUE_1_SIZE, dram);
+ writew(QUEUE_2_SIZE, dram + 2);
+ writew(QUEUE_3_SIZE, dram + 4);
+ writew(QUEUE_4_SIZE, dram + 6);
+
+ /* queue table */
+ memcpy_toio(dram_base + queue_desc_ofs,
+ &queue_descs[port_id][0],
+ 4 * sizeof(queue_descs[port_id][0]));
+
+ emac->rx_queue_descs = dram1_base + P0_QUEUE_DESC_OFFSET;
+ emac->tx_queue_descs = dram1_base +
+ rx_queue_infos[port_id][PRUETH_QUEUE1].queue_desc_offset;
+
+ return 0;
+}
+
+int icssm_prueth_sw_emac_config(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ u32 sharedramaddr, ocmcaddr;
+ int ret;
+
+ /* PRU needs local shared RAM address for C28 */
+ sharedramaddr = ICSS_LOCAL_SHARED_RAM;
+ /* PRU needs real global OCMC address for C30*/
+ ocmcaddr = (u32)prueth->mem[PRUETH_MEM_OCMC].pa;
+
+ if (prueth->emac_configured & BIT(emac->port_id))
+ return 0;
+
+ ret = icssm_prueth_sw_port_config(prueth, emac->port_id);
+ if (ret)
+ return ret;
+
+ if (!prueth->emac_configured) {
+ /* Set in constant table C28 of PRUn to ICSS Shared memory */
+ pru_rproc_set_ctable(prueth->pru0, PRU_C28, sharedramaddr);
+ pru_rproc_set_ctable(prueth->pru1, PRU_C28, sharedramaddr);
+
+ /* Set in constant table C30 of PRUn to OCMC memory */
+ pru_rproc_set_ctable(prueth->pru0, PRU_C30, ocmcaddr);
+ pru_rproc_set_ctable(prueth->pru1, PRU_C30, ocmcaddr);
+ }
+ return 0;
+}
+
+int icssm_prueth_sw_boot_prus(struct prueth *prueth, struct net_device *ndev)
+{
+ const struct prueth_firmware *pru_firmwares;
+ const char *fw_name, *fw_name1;
+ int ret;
+
+ if (prueth->emac_configured)
+ return 0;
+
+ pru_firmwares = &prueth->fw_data->fw_pru[PRUSS_PRU0];
+ fw_name = pru_firmwares->fw_name[prueth->eth_type];
+ pru_firmwares = &prueth->fw_data->fw_pru[PRUSS_PRU1];
+ fw_name1 = pru_firmwares->fw_name[prueth->eth_type];
+
+ ret = rproc_set_firmware(prueth->pru0, fw_name);
+ if (ret) {
+ netdev_err(ndev, "failed to set PRU0 firmware %s: %d\n",
+ fw_name, ret);
+ return ret;
+ }
+ ret = rproc_boot(prueth->pru0);
+ if (ret) {
+ netdev_err(ndev, "failed to boot PRU0: %d\n", ret);
+ return ret;
+ }
+
+ ret = rproc_set_firmware(prueth->pru1, fw_name1);
+ if (ret) {
+ netdev_err(ndev, "failed to set PRU1 firmware %s: %d\n",
+ fw_name1, ret);
+ goto rproc0_shutdown;
+ }
+ ret = rproc_boot(prueth->pru1);
+ if (ret) {
+ netdev_err(ndev, "failed to boot PRU1: %d\n", ret);
+ goto rproc0_shutdown;
+ }
+
+ return 0;
+
+rproc0_shutdown:
+ rproc_shutdown(prueth->pru0);
+ return ret;
+}
+
+int icssm_prueth_sw_shutdown_prus(struct prueth_emac *emac,
+ struct net_device *ndev)
+{
+ struct prueth *prueth = emac->prueth;
+
+ if (prueth->emac_configured)
+ return 0;
+
+ rproc_shutdown(prueth->pru0);
+ rproc_shutdown(prueth->pru1);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/ti/icssm/icssm_prueth_switch.h b/drivers/net/ethernet/ti/icssm/icssm_prueth_switch.h
new file mode 100644
index 000000000000..e6111bba166e
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssm/icssm_prueth_switch.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020-2021 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#ifndef __NET_TI_PRUETH_SWITCH_H
+#define __NET_TI_PRUETH_SWITCH_H
+
+#include <net/switchdev.h>
+
+#include "icssm_prueth.h"
+#include "icssm_prueth_fdb_tbl.h"
+#include "icssm_switchdev.h"
+
+void icssm_prueth_sw_set_stp_state(struct prueth *prueth,
+ enum prueth_port port, u8 state);
+u8 icssm_prueth_sw_get_stp_state(struct prueth *prueth,
+ enum prueth_port port);
+
+extern const struct prueth_queue_info sw_queue_infos[][4];
+
+void icssm_prueth_sw_fdb_tbl_init(struct prueth *prueth);
+int icssm_prueth_sw_init_fdb_table(struct prueth *prueth);
+void icssm_prueth_sw_free_fdb_table(struct prueth *prueth);
+int icssm_prueth_sw_do_purge_fdb(struct prueth_emac *emac);
+void icssm_prueth_sw_fdb_add(struct prueth_emac *emac,
+ struct switchdev_notifier_fdb_info *fdb);
+void icssm_prueth_sw_fdb_del(struct prueth_emac *emac,
+ struct switchdev_notifier_fdb_info *fdb);
+int icssm_prueth_sw_learn_fdb(struct prueth_emac *emac, u8 *src_mac);
+int icssm_prueth_sw_purge_fdb(struct prueth_emac *emac);
+void icssm_prueth_sw_hostconfig(struct prueth *prueth);
+int icssm_prueth_sw_emac_config(struct prueth_emac *emac);
+int icssm_prueth_sw_boot_prus(struct prueth *prueth, struct net_device *ndev);
+int icssm_prueth_sw_shutdown_prus(struct prueth_emac *emac,
+ struct net_device *ndev);
+
+#endif /* __NET_TI_PRUETH_SWITCH_H */
diff --git a/drivers/net/ethernet/ti/icssm/icssm_switch.h b/drivers/net/ethernet/ti/icssm/icssm_switch.h
index 8b494ffdcde7..5ba9ce14da44 100644
--- a/drivers/net/ethernet/ti/icssm/icssm_switch.h
+++ b/drivers/net/ethernet/ti/icssm/icssm_switch.h
@@ -117,6 +117,15 @@
#define STATISTICS_OFFSET 0x1F00
#define STAT_SIZE 0x98
+/* The following offsets indicate which sections of the memory are used
+ * for switch internal tasks
+ */
+#define SWITCH_SPECIFIC_DRAM0_START_SIZE 0x100
+#define SWITCH_SPECIFIC_DRAM0_START_OFFSET 0x1F00
+
+#define SWITCH_SPECIFIC_DRAM1_START_SIZE 0x300
+#define SWITCH_SPECIFIC_DRAM1_START_OFFSET 0x1D00
+
/* Offset for storing
* 1. Storm Prevention Params
* 2. PHY Speed Offset
@@ -146,6 +155,74 @@
/* 4 bytes ? */
#define STP_INVALID_STATE_OFFSET (STATISTICS_OFFSET + STAT_SIZE + 33)
+/* DRAM1 Offsets for Switch */
+/* 4 queue descriptors for port 0 (host receive) */
+#define P0_QUEUE_DESC_OFFSET 0x1E7C
+#define P1_QUEUE_DESC_OFFSET 0x1E9C
+#define P2_QUEUE_DESC_OFFSET 0x1EBC
+/* collision descriptor of port 0 */
+#define P0_COL_QUEUE_DESC_OFFSET 0x1E64
+#define P1_COL_QUEUE_DESC_OFFSET 0x1E6C
+#define P2_COL_QUEUE_DESC_OFFSET 0x1E74
+/* Collision Status Register
+ * P0: bit 0 is pending flag, bit 1..2 indicates which queue,
+ * P1: bit 8 is pending flag, 9..10 is queue number
+ * P2: bit 16 is pending flag, 17..18 is queue number, remaining bits are 0.
+ */
+#define COLLISION_STATUS_ADDR 0x1E60
+
+#define INTERFACE_MAC_ADDR 0x1E58
+#define P2_MAC_ADDR 0x1E50
+#define P1_MAC_ADDR 0x1E48
+
+#define QUEUE_SIZE_ADDR 0x1E30
+#define QUEUE_OFFSET_ADDR 0x1E18
+#define QUEUE_DESCRIPTOR_OFFSET_ADDR 0x1E00
+
+#define COL_RX_CONTEXT_P2_OFFSET_ADDR (COL_RX_CONTEXT_P1_OFFSET_ADDR + 12)
+#define COL_RX_CONTEXT_P1_OFFSET_ADDR (COL_RX_CONTEXT_P0_OFFSET_ADDR + 12)
+#define COL_RX_CONTEXT_P0_OFFSET_ADDR (P2_Q4_RX_CONTEXT_OFFSET + 8)
+
+/* Port 2 Rx Context */
+#define P2_Q4_RX_CONTEXT_OFFSET (P2_Q3_RX_CONTEXT_OFFSET + 8)
+#define P2_Q3_RX_CONTEXT_OFFSET (P2_Q2_RX_CONTEXT_OFFSET + 8)
+#define P2_Q2_RX_CONTEXT_OFFSET (P2_Q1_RX_CONTEXT_OFFSET + 8)
+#define P2_Q1_RX_CONTEXT_OFFSET RX_CONTEXT_P2_Q1_OFFSET_ADDR
+#define RX_CONTEXT_P2_Q1_OFFSET_ADDR (P1_Q4_RX_CONTEXT_OFFSET + 8)
+
+/* Port 1 Rx Context */
+#define P1_Q4_RX_CONTEXT_OFFSET (P1_Q3_RX_CONTEXT_OFFSET + 8)
+#define P1_Q3_RX_CONTEXT_OFFSET (P1_Q2_RX_CONTEXT_OFFSET + 8)
+#define P1_Q2_RX_CONTEXT_OFFSET (P1_Q1_RX_CONTEXT_OFFSET + 8)
+#define P1_Q1_RX_CONTEXT_OFFSET (RX_CONTEXT_P1_Q1_OFFSET_ADDR)
+#define RX_CONTEXT_P1_Q1_OFFSET_ADDR (P0_Q4_RX_CONTEXT_OFFSET + 8)
+
+/* Host Port Rx Context */
+#define P0_Q4_RX_CONTEXT_OFFSET (P0_Q3_RX_CONTEXT_OFFSET + 8)
+#define P0_Q3_RX_CONTEXT_OFFSET (P0_Q2_RX_CONTEXT_OFFSET + 8)
+#define P0_Q2_RX_CONTEXT_OFFSET (P0_Q1_RX_CONTEXT_OFFSET + 8)
+#define P0_Q1_RX_CONTEXT_OFFSET RX_CONTEXT_P0_Q1_OFFSET_ADDR
+#define RX_CONTEXT_P0_Q1_OFFSET_ADDR (COL_TX_CONTEXT_P2_Q1_OFFSET_ADDR + 8)
+
+/* Port 2 Tx Collision Context */
+#define COL_TX_CONTEXT_P2_Q1_OFFSET_ADDR (COL_TX_CONTEXT_P1_Q1_OFFSET_ADDR + 8)
+/* Port 1 Tx Collision Context */
+#define COL_TX_CONTEXT_P1_Q1_OFFSET_ADDR (P2_Q4_TX_CONTEXT_OFFSET + 8)
+
+/* Port 2 */
+#define P2_Q4_TX_CONTEXT_OFFSET (P2_Q3_TX_CONTEXT_OFFSET + 8)
+#define P2_Q3_TX_CONTEXT_OFFSET (P2_Q2_TX_CONTEXT_OFFSET + 8)
+#define P2_Q2_TX_CONTEXT_OFFSET (P2_Q1_TX_CONTEXT_OFFSET + 8)
+#define P2_Q1_TX_CONTEXT_OFFSET TX_CONTEXT_P2_Q1_OFFSET_ADDR
+#define TX_CONTEXT_P2_Q1_OFFSET_ADDR (P1_Q4_TX_CONTEXT_OFFSET + 8)
+
+/* Port 1 */
+#define P1_Q4_TX_CONTEXT_OFFSET (P1_Q3_TX_CONTEXT_OFFSET + 8)
+#define P1_Q3_TX_CONTEXT_OFFSET (P1_Q2_TX_CONTEXT_OFFSET + 8)
+#define P1_Q2_TX_CONTEXT_OFFSET (P1_Q1_TX_CONTEXT_OFFSET + 8)
+#define P1_Q1_TX_CONTEXT_OFFSET TX_CONTEXT_P1_Q1_OFFSET_ADDR
+#define TX_CONTEXT_P1_Q1_OFFSET_ADDR SWITCH_SPECIFIC_DRAM1_START_OFFSET
+
/* DRAM Offsets for EMAC
* Present on Both DRAM0 and DRAM1
*/
@@ -254,4 +331,30 @@
#define P0_COL_BUFFER_OFFSET 0xEE00
#define P0_Q1_BUFFER_OFFSET 0x0000
+#define V2_1_FDB_TBL_LOC PRUETH_MEM_SHARED_RAM
+#define V2_1_FDB_TBL_OFFSET 0x2000
+
+#define FDB_INDEX_TBL_MAX_ENTRIES 256
+#define FDB_MAC_TBL_MAX_ENTRIES 256
+
+#define FDB_INDEX_TBL_OFFSET V2_1_FDB_TBL_OFFSET
+#define FDB_INDEX_TBL_SIZE (FDB_INDEX_TBL_MAX_ENTRIES * \
+ sizeof(struct fdb_index_tbl_entry))
+
+#define FDB_MAC_TBL_OFFSET (FDB_INDEX_TBL_OFFSET + FDB_INDEX_TBL_SIZE)
+#define FDB_MAC_TBL_SIZE (FDB_MAC_TBL_MAX_ENTRIES * \
+ sizeof(struct fdb_mac_tbl_entry))
+
+#define FDB_PORT1_STP_CFG_OFFSET (FDB_MAC_TBL_OFFSET + FDB_MAC_TBL_SIZE)
+#define FDB_PORT_STP_CFG_SIZE sizeof(struct fdb_stp_config)
+#define FDB_PORT2_STP_CFG_OFFSET (FDB_PORT1_STP_CFG_OFFSET + \
+ FDB_PORT_STP_CFG_SIZE)
+
+#define FDB_FLOOD_ENABLE_FLAGS_OFFSET (FDB_PORT2_STP_CFG_OFFSET + \
+ FDB_PORT_STP_CFG_SIZE)
+#define FDB_FLOOD_ENABLE_FLAGS_SIZE sizeof(struct fdb_flood_config)
+
+#define FDB_LOCKS_OFFSET (FDB_FLOOD_ENABLE_FLAGS_OFFSET + \
+ FDB_FLOOD_ENABLE_FLAGS_SIZE)
+
#endif /* __ICSS_SWITCH_H */
diff --git a/drivers/net/ethernet/ti/icssm/icssm_switchdev.c b/drivers/net/ethernet/ti/icssm/icssm_switchdev.c
new file mode 100644
index 000000000000..414ec9fc02a0
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssm/icssm_switchdev.c
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Texas Instruments ICSSM Ethernet Driver
+ *
+ * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/remoteproc.h>
+#include <net/switchdev.h>
+
+#include "icssm_prueth.h"
+#include "icssm_prueth_switch.h"
+#include "icssm_prueth_fdb_tbl.h"
+
+/* switchev event work */
+struct icssm_sw_event_work {
+ netdevice_tracker ndev_tracker;
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct prueth_emac *emac;
+ unsigned long event;
+};
+
+void icssm_prueth_sw_set_stp_state(struct prueth *prueth,
+ enum prueth_port port, u8 state)
+{
+ struct fdb_tbl *t = prueth->fdb_tbl;
+
+ writeb(state, port - 1 ? (void __iomem *)&t->port2_stp_cfg->state :
+ (void __iomem *)&t->port1_stp_cfg->state);
+}
+
+u8 icssm_prueth_sw_get_stp_state(struct prueth *prueth, enum prueth_port port)
+{
+ struct fdb_tbl *t = prueth->fdb_tbl;
+ u8 state;
+
+ state = readb(port - 1 ? (void __iomem *)&t->port2_stp_cfg->state :
+ (void __iomem *)&t->port1_stp_cfg->state);
+ return state;
+}
+
+static int icssm_prueth_sw_attr_set(struct net_device *ndev, const void *ctx,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ int err = 0;
+ u8 o_state;
+
+ /* Interface is not up */
+ if (!prueth->fdb_tbl)
+ return 0;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ o_state = icssm_prueth_sw_get_stp_state(prueth, emac->port_id);
+ icssm_prueth_sw_set_stp_state(prueth, emac->port_id,
+ attr->u.stp_state);
+
+ if (o_state != attr->u.stp_state)
+ icssm_prueth_sw_purge_fdb(emac);
+
+ dev_dbg(prueth->dev, "attr set: stp state:%u port:%u\n",
+ attr->u.stp_state, emac->port_id);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static void icssm_prueth_sw_fdb_offload(struct net_device *ndev,
+ struct switchdev_notifier_fdb_info *rcv)
+{
+ struct switchdev_notifier_fdb_info info;
+
+ info.addr = rcv->addr;
+ info.vid = rcv->vid;
+ call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, ndev, &info.info,
+ NULL);
+}
+
+/**
+ * icssm_sw_event_work - insert/delete fdb entry
+ *
+ * @work: work structure
+ *
+ */
+static void icssm_sw_event_work(struct work_struct *work)
+{
+ struct icssm_sw_event_work *switchdev_work =
+ container_of(work, struct icssm_sw_event_work, work);
+ struct prueth_emac *emac = switchdev_work->emac;
+ struct switchdev_notifier_fdb_info *fdb;
+ struct prueth *prueth = emac->prueth;
+ int port = emac->port_id;
+
+ rtnl_lock();
+
+ /* Interface is not up */
+ if (!emac->prueth->fdb_tbl)
+ goto free;
+
+ switch (switchdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ fdb = &switchdev_work->fdb_info;
+ dev_dbg(prueth->dev,
+ "prueth fdb add: MACID = %pM vid = %u flags = %u -- port %d\n",
+ fdb->addr, fdb->vid, fdb->added_by_user, port);
+
+ if (!fdb->added_by_user)
+ break;
+
+ if (fdb->is_local)
+ break;
+
+ icssm_prueth_sw_fdb_add(emac, fdb);
+ icssm_prueth_sw_fdb_offload(emac->ndev, fdb);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ fdb = &switchdev_work->fdb_info;
+ dev_dbg(prueth->dev,
+ "prueth fdb del: MACID = %pM vid = %u flags = %u -- port %d\n",
+ fdb->addr, fdb->vid, fdb->added_by_user, port);
+
+ if (fdb->is_local)
+ break;
+
+ icssm_prueth_sw_fdb_del(emac, fdb);
+ break;
+ default:
+ break;
+ }
+
+free:
+ rtnl_unlock();
+
+ netdev_put(emac->ndev, &switchdev_work->ndev_tracker);
+ kfree(switchdev_work->fdb_info.addr);
+ kfree(switchdev_work);
+}
+
+/* called under rcu_read_lock() */
+static int icssm_prueth_sw_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = switchdev_notifier_info_to_dev(ptr);
+ struct switchdev_notifier_fdb_info *fdb_info = ptr;
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct icssm_sw_event_work *switchdev_work;
+ int err;
+
+ if (!icssm_prueth_sw_port_dev_check(ndev))
+ return NOTIFY_DONE;
+
+ if (event == SWITCHDEV_PORT_ATTR_SET) {
+ err = switchdev_handle_port_attr_set
+ (ndev, ptr, icssm_prueth_sw_port_dev_check,
+ icssm_prueth_sw_attr_set);
+ return notifier_from_errno(err);
+ }
+
+ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+ if (WARN_ON(!switchdev_work))
+ return NOTIFY_BAD;
+
+ INIT_WORK(&switchdev_work->work, icssm_sw_event_work);
+ switchdev_work->emac = emac;
+ switchdev_work->event = event;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ memcpy(&switchdev_work->fdb_info, ptr,
+ sizeof(switchdev_work->fdb_info));
+ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!switchdev_work->fdb_info.addr)
+ goto err_addr_alloc;
+ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
+ fdb_info->addr);
+ netdev_hold(ndev, &switchdev_work->ndev_tracker, GFP_ATOMIC);
+ break;
+ default:
+ kfree(switchdev_work);
+ return NOTIFY_DONE;
+ }
+
+ queue_work(system_long_wq, &switchdev_work->work);
+
+ return NOTIFY_DONE;
+
+err_addr_alloc:
+ kfree(switchdev_work);
+ return NOTIFY_BAD;
+}
+
+static int icssm_prueth_switchdev_obj_add(struct net_device *ndev,
+ const void *ctx,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack)
+{
+ struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ int ret = 0;
+ u8 hash;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ dev_dbg(prueth->dev, "MDB add: %s: vid %u:%pM port: %x\n",
+ ndev->name, mdb->vid, mdb->addr, emac->port_id);
+ hash = icssm_emac_get_mc_hash(mdb->addr, emac->mc_filter_mask);
+ icssm_emac_mc_filter_bin_allow(emac, hash);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static int icssm_prueth_switchdev_obj_del(struct net_device *ndev,
+ const void *ctx,
+ const struct switchdev_obj *obj)
+{
+ struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ struct netdev_hw_addr *ha;
+ u8 hash, tmp_hash;
+ int ret = 0;
+ u8 *mask;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ dev_dbg(prueth->dev, "MDB del: %s: vid %u:%pM port: %x\n",
+ ndev->name, mdb->vid, mdb->addr, emac->port_id);
+ if (prueth->hw_bridge_dev) {
+ mask = emac->mc_filter_mask;
+ hash = icssm_emac_get_mc_hash(mdb->addr, mask);
+ netdev_for_each_mc_addr(ha, prueth->hw_bridge_dev) {
+ tmp_hash = icssm_emac_get_mc_hash(ha->addr,
+ mask);
+ /* Another MC address is in the bin.
+ * Don't disable.
+ */
+ if (tmp_hash == hash)
+ return 0;
+ }
+ icssm_emac_mc_filter_bin_disallow(emac, hash);
+ }
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+/* switchdev notifiers */
+static int icssm_prueth_sw_blocking_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = switchdev_handle_port_obj_add
+ (ndev, ptr, icssm_prueth_sw_port_dev_check,
+ icssm_prueth_switchdev_obj_add);
+ return notifier_from_errno(err);
+
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = switchdev_handle_port_obj_del
+ (ndev, ptr, icssm_prueth_sw_port_dev_check,
+ icssm_prueth_switchdev_obj_del);
+ return notifier_from_errno(err);
+
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set
+ (ndev, ptr, icssm_prueth_sw_port_dev_check,
+ icssm_prueth_sw_attr_set);
+ return notifier_from_errno(err);
+
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+int icssm_prueth_sw_register_notifiers(struct prueth *prueth)
+{
+ int ret = 0;
+
+ prueth->prueth_switchdev_nb.notifier_call =
+ &icssm_prueth_sw_switchdev_event;
+ ret = register_switchdev_notifier(&prueth->prueth_switchdev_nb);
+ if (ret) {
+ dev_err(prueth->dev,
+ "register switchdev notifier failed ret:%d\n", ret);
+ return ret;
+ }
+
+ prueth->prueth_switchdev_bl_nb.notifier_call =
+ &icssm_prueth_sw_blocking_event;
+ ret = register_switchdev_blocking_notifier
+ (&prueth->prueth_switchdev_bl_nb);
+ if (ret) {
+ dev_err(prueth->dev,
+ "register switchdev blocking notifier failed ret:%d\n",
+ ret);
+ unregister_switchdev_notifier(&prueth->prueth_switchdev_nb);
+ }
+
+ return ret;
+}
+
+void icssm_prueth_sw_unregister_notifiers(struct prueth *prueth)
+{
+ unregister_switchdev_blocking_notifier(&prueth->prueth_switchdev_bl_nb);
+ unregister_switchdev_notifier(&prueth->prueth_switchdev_nb);
+}
diff --git a/drivers/net/ethernet/ti/icssm/icssm_switchdev.h b/drivers/net/ethernet/ti/icssm/icssm_switchdev.h
new file mode 100644
index 000000000000..b03a98e3472e
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssm/icssm_switchdev.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020-2021 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#ifndef __NET_TI_ICSSM_SWITCHDEV_H
+#define __NET_TI_ICSSM_SWITCHDEV_H
+
+#include "icssm_prueth.h"
+
+int icssm_prueth_sw_register_notifiers(struct prueth *prueth);
+void icssm_prueth_sw_unregister_notifiers(struct prueth *prueth);
+bool icssm_prueth_sw_port_dev_check(const struct net_device *ndev);
+#endif /* __NET_TI_ICSSM_SWITCHDEV_H */
diff --git a/drivers/net/ethernet/ti/icssm/icssm_vlan_mcast_filter_mmap.h b/drivers/net/ethernet/ti/icssm/icssm_vlan_mcast_filter_mmap.h
new file mode 100644
index 000000000000..c177c19a36ef
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssm/icssm_vlan_mcast_filter_mmap.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (C) 2015-2021 Texas Instruments Incorporated - https://www.ti.com
+ *
+ * This file contains VLAN/Multicast filtering feature memory map
+ *
+ */
+
+#ifndef ICSS_VLAN_MULTICAST_FILTER_MM_H
+#define ICSS_VLAN_MULTICAST_FILTER_MM_H
+
+/* VLAN/Multicast filter defines & offsets,
+ * present on both PRU0 and PRU1 DRAM
+ */
+
+/* Feature enable/disable values for multicast filtering */
+#define ICSS_EMAC_FW_MULTICAST_FILTER_CTRL_DISABLED 0x00
+#define ICSS_EMAC_FW_MULTICAST_FILTER_CTRL_ENABLED 0x01
+
+/* Feature enable/disable values for VLAN filtering */
+#define ICSS_EMAC_FW_VLAN_FILTER_CTRL_DISABLED 0x00
+#define ICSS_EMAC_FW_VLAN_FILTER_CTRL_ENABLED 0x01
+
+/* Add/remove multicast mac id for filtering bin */
+#define ICSS_EMAC_FW_MULTICAST_FILTER_HOST_RCV_ALLOWED 0x01
+#define ICSS_EMAC_FW_MULTICAST_FILTER_HOST_RCV_NOT_ALLOWED 0x00
+
+/* Default HASH value for the multicast filtering Mask */
+#define ICSS_EMAC_FW_MULTICAST_FILTER_INIT_VAL 0xFF
+
+/* Size requirements for Multicast filtering feature */
+#define ICSS_EMAC_FW_MULTICAST_TABLE_SIZE_BYTES 256
+#define ICSS_EMAC_FW_MULTICAST_FILTER_MASK_SIZE_BYTES 6
+#define ICSS_EMAC_FW_MULTICAST_FILTER_CTRL_SIZE_BYTES 1
+#define ICSS_EMAC_FW_MULTICAST_FILTER_MASK_OVERRIDE_STATUS_SIZE_BYTES 1
+#define ICSS_EMAC_FW_MULTICAST_FILTER_DROP_CNT_SIZE_BYTES 4
+
+/* Size requirements for VLAN filtering feature : 4096 bits = 512 bytes */
+#define ICSS_EMAC_FW_VLAN_FILTER_TABLE_SIZE_BYTES 512
+#define ICSS_EMAC_FW_VLAN_FILTER_CTRL_SIZE_BYTES 1
+#define ICSS_EMAC_FW_VLAN_FILTER_DROP_CNT_SIZE_BYTES 4
+
+/* Mask override set status */
+#define ICSS_EMAC_FW_MULTICAST_FILTER_MASK_OVERRIDE_SET 1
+/* Mask override not set status */
+#define ICSS_EMAC_FW_MULTICAST_FILTER_MASK_OVERRIDE_NOT_SET 0
+/* 6 bytes HASH Mask for the MAC */
+#define ICSS_EMAC_FW_MULTICAST_FILTER_MASK_OFFSET 0xF4
+/* 0 -> multicast filtering disabled | 1 -> multicast filtering enabled */
+#define ICSS_EMAC_FW_MULTICAST_FILTER_CTRL_OFFSET \
+ (ICSS_EMAC_FW_MULTICAST_FILTER_MASK_OFFSET + \
+ ICSS_EMAC_FW_MULTICAST_FILTER_MASK_SIZE_BYTES)
+/* Status indicating if the HASH override is done or not: 0: no, 1: yes */
+#define ICSS_EMAC_FW_MULTICAST_FILTER_OVERRIDE_STATUS \
+ (ICSS_EMAC_FW_MULTICAST_FILTER_CTRL_OFFSET + \
+ ICSS_EMAC_FW_MULTICAST_FILTER_CTRL_SIZE_BYTES)
+/* Multicast drop statistics */
+#define ICSS_EMAC_FW_MULTICAST_FILTER_DROP_CNT_OFFSET \
+ (ICSS_EMAC_FW_MULTICAST_FILTER_OVERRIDE_STATUS +\
+ ICSS_EMAC_FW_MULTICAST_FILTER_MASK_OVERRIDE_STATUS_SIZE_BYTES)
+/* Multicast table */
+#define ICSS_EMAC_FW_MULTICAST_FILTER_TABLE \
+ (ICSS_EMAC_FW_MULTICAST_FILTER_DROP_CNT_OFFSET +\
+ ICSS_EMAC_FW_MULTICAST_FILTER_DROP_CNT_SIZE_BYTES)
+
+/* Multicast filter defines & offsets for LRE
+ */
+#define ICSS_LRE_FW_MULTICAST_TABLE_SEARCH_OP_CONTROL_BIT 0xE0
+/* one byte field :
+ * 0 -> multicast filtering disabled
+ * 1 -> multicast filtering enabled
+ */
+#define ICSS_LRE_FW_MULTICAST_FILTER_MASK 0xE4
+#define ICSS_LRE_FW_MULTICAST_FILTER_TABLE 0x100
+
+/* VLAN table Offsets */
+#define ICSS_EMAC_FW_VLAN_FLTR_TBL_BASE_ADDR 0x200
+#define ICSS_EMAC_FW_VLAN_FILTER_CTRL_BITMAP_OFFSET 0xEF
+#define ICSS_EMAC_FW_VLAN_FILTER_DROP_CNT_OFFSET \
+ (ICSS_EMAC_FW_VLAN_FILTER_CTRL_BITMAP_OFFSET + \
+ ICSS_EMAC_FW_VLAN_FILTER_CTRL_SIZE_BYTES)
+
+/* VLAN filter Control Bit maps */
+/* one bit field, bit 0: | 0 : VLAN filter disabled (default),
+ * 1: VLAN filter enabled
+ */
+#define ICSS_EMAC_FW_VLAN_FILTER_CTRL_ENABLE_BIT 0
+/* one bit field, bit 1: | 0 : untagged host rcv allowed (default),
+ * 1: untagged host rcv not allowed
+ */
+#define ICSS_EMAC_FW_VLAN_FILTER_UNTAG_HOST_RCV_ALLOW_CTRL_BIT 1
+/* one bit field, bit 1: | 0 : priotag host rcv allowed (default),
+ * 1: priotag host rcv not allowed
+ */
+#define ICSS_EMAC_FW_VLAN_FILTER_PRIOTAG_HOST_RCV_ALLOW_CTRL_BIT 2
+/* one bit field, bit 1: | 0 : skip sv vlan flow
+ * :1 : take sv vlan flow (not applicable for dual emac )
+ */
+#define ICSS_EMAC_FW_VLAN_FILTER_SV_VLAN_FLOW_HOST_RCV_ALLOW_CTRL_BIT 3
+
+/* VLAN IDs */
+#define ICSS_EMAC_FW_VLAN_FILTER_PRIOTAG_VID 0
+#define ICSS_EMAC_FW_VLAN_FILTER_VID_MIN 0x0000
+#define ICSS_EMAC_FW_VLAN_FILTER_VID_MAX 0x0FFF
+
+/* VLAN Filtering Commands */
+#define ICSS_EMAC_FW_VLAN_FILTER_ADD_VLAN_VID_CMD 0x00
+#define ICSS_EMAC_FW_VLAN_FILTER_REMOVE_VLAN_VID_CMD 0x01
+
+/* Switch defines for VLAN/MC filtering */
+/* SRAM
+ * VLAN filter defines & offsets
+ */
+#define ICSS_LRE_FW_VLAN_FLTR_CTRL_BYTE 0x1FE
+/* one bit field | 0 : VLAN filter disabled
+ * | 1 : VLAN filter enabled
+ */
+#define ICSS_LRE_FW_VLAN_FLTR_TBL_BASE_ADDR 0x200
+
+#endif /* ICSS_MULTICAST_FILTER_MM_H */
diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h
index b9cbd3b4a8a2..9cfddaa807e2 100644
--- a/drivers/net/ethernet/ti/netcp.h
+++ b/drivers/net/ethernet/ti/netcp.h
@@ -65,14 +65,14 @@ struct netcp_addr {
struct netcp_stats {
struct u64_stats_sync syncp_rx ____cacheline_aligned_in_smp;
- u64 rx_packets;
- u64 rx_bytes;
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
u32 rx_errors;
u32 rx_dropped;
struct u64_stats_sync syncp_tx ____cacheline_aligned_in_smp;
- u64 tx_packets;
- u64 tx_bytes;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
u32 tx_errors;
u32 tx_dropped;
};
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 5ed1c46bbcb1..eb8fc2ed05f4 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -759,8 +759,8 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
knav_pool_desc_put(netcp->rx_pool, desc);
u64_stats_update_begin(&rx_stats->syncp_rx);
- rx_stats->rx_packets++;
- rx_stats->rx_bytes += skb->len;
+ u64_stats_inc(&rx_stats->rx_packets);
+ u64_stats_add(&rx_stats->rx_bytes, skb->len);
u64_stats_update_end(&rx_stats->syncp_rx);
/* push skb up the stack */
@@ -1045,8 +1045,8 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
}
u64_stats_update_begin(&tx_stats->syncp_tx);
- tx_stats->tx_packets++;
- tx_stats->tx_bytes += skb->len;
+ u64_stats_inc(&tx_stats->tx_packets);
+ u64_stats_add(&tx_stats->tx_bytes, skb->len);
u64_stats_update_end(&tx_stats->syncp_tx);
dev_kfree_skb(skb);
pkts++;
@@ -1973,14 +1973,14 @@ netcp_get_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats)
do {
start = u64_stats_fetch_begin(&p->syncp_rx);
- rxpackets = p->rx_packets;
- rxbytes = p->rx_bytes;
+ rxpackets = u64_stats_read(&p->rx_packets);
+ rxbytes = u64_stats_read(&p->rx_bytes);
} while (u64_stats_fetch_retry(&p->syncp_rx, start));
do {
start = u64_stats_fetch_begin(&p->syncp_tx);
- txpackets = p->tx_packets;
- txbytes = p->tx_bytes;
+ txpackets = u64_stats_read(&p->tx_packets);
+ txbytes = u64_stats_read(&p->tx_bytes);
} while (u64_stats_fetch_retry(&p->syncp_tx, start));
stats->rx_packets = rxpackets;
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index 32cadafa4b3b..b31b48d26575 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -179,8 +179,7 @@ static void wx_dma_sync_frag(struct wx_ring *rx_ring,
static struct wx_rx_buffer *wx_get_rx_buffer(struct wx_ring *rx_ring,
union wx_rx_desc *rx_desc,
- struct sk_buff **skb,
- int *rx_buffer_pgcnt)
+ struct sk_buff **skb)
{
struct wx_rx_buffer *rx_buffer;
unsigned int size;
@@ -188,12 +187,6 @@ static struct wx_rx_buffer *wx_get_rx_buffer(struct wx_ring *rx_ring,
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
size = le16_to_cpu(rx_desc->wb.upper.length);
-#if (PAGE_SIZE < 8192)
- *rx_buffer_pgcnt = page_count(rx_buffer->page);
-#else
- *rx_buffer_pgcnt = 0;
-#endif
-
prefetchw(rx_buffer->page);
*skb = rx_buffer->skb;
@@ -221,8 +214,7 @@ skip_sync:
static void wx_put_rx_buffer(struct wx_ring *rx_ring,
struct wx_rx_buffer *rx_buffer,
- struct sk_buff *skb,
- int rx_buffer_pgcnt)
+ struct sk_buff *skb)
{
/* clear contents of rx_buffer */
rx_buffer->page = NULL;
@@ -685,7 +677,6 @@ static int wx_clean_rx_irq(struct wx_q_vector *q_vector,
struct wx_rx_buffer *rx_buffer;
union wx_rx_desc *rx_desc;
struct sk_buff *skb;
- int rx_buffer_pgcnt;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= WX_RX_BUFFER_WRITE) {
@@ -703,7 +694,7 @@ static int wx_clean_rx_irq(struct wx_q_vector *q_vector,
*/
dma_rmb();
- rx_buffer = wx_get_rx_buffer(rx_ring, rx_desc, &skb, &rx_buffer_pgcnt);
+ rx_buffer = wx_get_rx_buffer(rx_ring, rx_desc, &skb);
/* retrieve a buffer from the ring */
skb = wx_build_skb(rx_ring, rx_buffer, rx_desc);
@@ -714,7 +705,7 @@ static int wx_clean_rx_irq(struct wx_q_vector *q_vector,
break;
}
- wx_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt);
+ wx_put_rx_buffer(rx_ring, rx_buffer, skb);
cleaned_count++;
/* place incomplete frames back on ring for completion */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
index f3cb00109529..59d758acccf0 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
@@ -193,6 +193,13 @@ static int txgbe_get_ethtool_fdir_all(struct txgbe *txgbe,
return 0;
}
+static u32 txgbe_get_rx_ring_count(struct net_device *dev)
+{
+ struct wx *wx = netdev_priv(dev);
+
+ return wx->num_rx_queues;
+}
+
static int txgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
@@ -201,10 +208,6 @@ static int txgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = wx->num_rx_queues;
- ret = 0;
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = txgbe->fdir_filter_count;
ret = 0;
@@ -587,6 +590,7 @@ static const struct ethtool_ops txgbe_ethtool_ops = {
.set_channels = txgbe_set_channels,
.get_rxnfc = txgbe_get_rxnfc,
.set_rxnfc = txgbe_set_rxnfc,
+ .get_rx_ring_count = txgbe_get_rx_ring_count,
.get_rxfh_fields = wx_get_rxfh_fields,
.set_rxfh_fields = wx_set_rxfh_fields,
.get_rxfh_indir_size = wx_rss_indir_size,
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 284031fb2e2c..998bacd508b8 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -2787,7 +2787,7 @@ static int axienet_probe(struct platform_device *pdev)
int addr_width = 32;
u32 value;
- ndev = alloc_etherdev(sizeof(*lp));
+ ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*lp));
if (!ndev)
return -ENOMEM;
@@ -2815,41 +2815,32 @@ static int axienet_probe(struct platform_device *pdev)
seqcount_mutex_init(&lp->hw_stats_seqcount, &lp->stats_lock);
INIT_DEFERRABLE_WORK(&lp->stats_work, axienet_refresh_stats);
- lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
+ lp->axi_clk = devm_clk_get_optional_enabled(&pdev->dev,
+ "s_axi_lite_clk");
if (!lp->axi_clk) {
/* For backward compatibility, if named AXI clock is not present,
* treat the first clock specified as the AXI clock.
*/
- lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL);
- }
- if (IS_ERR(lp->axi_clk)) {
- ret = PTR_ERR(lp->axi_clk);
- goto free_netdev;
- }
- ret = clk_prepare_enable(lp->axi_clk);
- if (ret) {
- dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret);
- goto free_netdev;
+ lp->axi_clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
}
+ if (IS_ERR(lp->axi_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(lp->axi_clk),
+ "could not get AXI clock\n");
lp->misc_clks[0].id = "axis_clk";
lp->misc_clks[1].id = "ref_clk";
lp->misc_clks[2].id = "mgt_clk";
- ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks);
- if (ret)
- goto cleanup_clk;
-
- ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
+ ret = devm_clk_bulk_get_optional_enable(&pdev->dev, XAE_NUM_MISC_CLOCKS,
+ lp->misc_clks);
if (ret)
- goto cleanup_clk;
+ return dev_err_probe(&pdev->dev, ret,
+ "could not get/enable misc. clocks\n");
/* Map device registers */
lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ethres);
- if (IS_ERR(lp->regs)) {
- ret = PTR_ERR(lp->regs);
- goto cleanup_clk;
- }
+ if (IS_ERR(lp->regs))
+ return PTR_ERR(lp->regs);
lp->regs_start = ethres->start;
/* Setup checksum offload, but default to off if not specified */
@@ -2918,19 +2909,17 @@ static int axienet_probe(struct platform_device *pdev)
lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX;
break;
default:
- ret = -EINVAL;
- goto cleanup_clk;
+ return -EINVAL;
}
} else {
ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
if (ret)
- goto cleanup_clk;
+ return ret;
}
if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII &&
lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) {
dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n");
- ret = -EINVAL;
- goto cleanup_clk;
+ return -EINVAL;
}
if (!of_property_present(pdev->dev.of_node, "dmas")) {
@@ -2945,7 +2934,7 @@ static int axienet_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"unable to get DMA resource\n");
of_node_put(np);
- goto cleanup_clk;
+ return ret;
}
lp->dma_regs = devm_ioremap_resource(&pdev->dev,
&dmares);
@@ -2962,19 +2951,17 @@ static int axienet_probe(struct platform_device *pdev)
}
if (IS_ERR(lp->dma_regs)) {
dev_err(&pdev->dev, "could not map DMA regs\n");
- ret = PTR_ERR(lp->dma_regs);
- goto cleanup_clk;
+ return PTR_ERR(lp->dma_regs);
}
if (lp->rx_irq <= 0 || lp->tx_irq <= 0) {
dev_err(&pdev->dev, "could not determine irqs\n");
- ret = -ENOMEM;
- goto cleanup_clk;
+ return -ENOMEM;
}
/* Reset core now that clocks are enabled, prior to accessing MDIO */
ret = __axienet_device_reset(lp);
if (ret)
- goto cleanup_clk;
+ return ret;
/* Autodetect the need for 64-bit DMA pointers.
* When the IP is configured for a bus width bigger than 32 bits,
@@ -3001,14 +2988,13 @@ static int axienet_probe(struct platform_device *pdev)
}
if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit architecture\n");
- ret = -EINVAL;
- goto cleanup_clk;
+ return -EINVAL;
}
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
if (ret) {
dev_err(&pdev->dev, "No suitable DMA available\n");
- goto cleanup_clk;
+ return ret;
}
netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
@@ -3018,15 +3004,12 @@ static int axienet_probe(struct platform_device *pdev)
lp->eth_irq = platform_get_irq_optional(pdev, 0);
if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) {
- ret = lp->eth_irq;
- goto cleanup_clk;
+ return lp->eth_irq;
}
tx_chan = dma_request_chan(lp->dev, "tx_chan0");
- if (IS_ERR(tx_chan)) {
- ret = PTR_ERR(tx_chan);
- dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n");
- goto cleanup_clk;
- }
+ if (IS_ERR(tx_chan))
+ return dev_err_probe(lp->dev, PTR_ERR(tx_chan),
+ "No Ethernet DMA (TX) channel found\n");
cfg.reset = 1;
/* As name says VDMA but it has support for DMA channel reset */
@@ -3034,7 +3017,7 @@ static int axienet_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev, "Reset channel failed\n");
dma_release_channel(tx_chan);
- goto cleanup_clk;
+ return ret;
}
dma_release_channel(tx_chan);
@@ -3139,13 +3122,6 @@ cleanup_mdio:
put_device(&lp->pcs_phy->dev);
if (lp->mii_bus)
axienet_mdio_teardown(lp);
-cleanup_clk:
- clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
- clk_disable_unprepare(lp->axi_clk);
-
-free_netdev:
- free_netdev(ndev);
-
return ret;
}
@@ -3163,11 +3139,6 @@ static void axienet_remove(struct platform_device *pdev)
put_device(&lp->pcs_phy->dev);
axienet_mdio_teardown(lp);
-
- clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
- clk_disable_unprepare(lp->axi_clk);
-
- free_netdev(ndev);
}
static void axienet_shutdown(struct platform_device *pdev)
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 77b0c3d52041..0949d4579171 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -38,6 +38,26 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
#define GENEVE_IPV4_HLEN (ETH_HLEN + sizeof(struct iphdr) + GENEVE_BASE_HLEN)
#define GENEVE_IPV6_HLEN (ETH_HLEN + sizeof(struct ipv6hdr) + GENEVE_BASE_HLEN)
+#define GENEVE_OPT_NETDEV_CLASS 0x100
+#define GENEVE_OPT_GRO_HINT_SIZE 8
+#define GENEVE_OPT_GRO_HINT_TYPE 1
+#define GENEVE_OPT_GRO_HINT_LEN 1
+
+struct geneve_opt_gro_hint {
+ u8 inner_proto_id:2,
+ nested_is_v6:1;
+ u8 nested_nh_offset;
+ u8 nested_tp_offset;
+ u8 nested_hdr_len;
+};
+
+struct geneve_skb_cb {
+ unsigned int gro_hint_len;
+ struct geneve_opt_gro_hint gro_hint;
+};
+
+#define GENEVE_SKB_CB(__skb) ((struct geneve_skb_cb *)&((__skb)->cb[0]))
+
/* per-network namespace private data for this module */
struct geneve_net {
struct list_head geneve_list;
@@ -56,6 +76,7 @@ struct geneve_config {
bool collect_md;
bool use_udp6_rx_checksums;
bool ttl_inherit;
+ bool gro_hint;
enum ifla_geneve_df df;
bool inner_proto_inherit;
u16 port_min;
@@ -84,6 +105,7 @@ struct geneve_dev {
struct geneve_sock {
bool collect_md;
+ bool gro_hint;
struct list_head list;
struct socket *sock;
struct rcu_head rcu;
@@ -91,6 +113,21 @@ struct geneve_sock {
struct hlist_head vni_list[VNI_HASH_SIZE];
};
+static const __be16 proto_id_map[] = { htons(ETH_P_TEB),
+ htons(ETH_P_IPV6),
+ htons(ETH_P_IP) };
+
+static int proto_to_id(__be16 proto)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(proto_id_map); i++)
+ if (proto_id_map[i] == proto)
+ return i;
+
+ return -1;
+}
+
static inline __u32 geneve_net_vni_hash(u8 vni[3])
{
__u32 vnid;
@@ -222,9 +259,8 @@ static struct geneve_dev *geneve_lookup_skb(struct geneve_sock *gs,
/* geneve receive/decap routine */
static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
- struct sk_buff *skb)
+ struct sk_buff *skb, const struct genevehdr *gnvh)
{
- struct genevehdr *gnvh = geneve_hdr(skb);
struct metadata_dst *tun_dst = NULL;
unsigned int len;
int nh, err = 0;
@@ -325,8 +361,12 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
}
}
+ /* Skip the additional GRO stage when hints are in use. */
len = skb->len;
- err = gro_cells_receive(&geneve->gro_cells, skb);
+ if (skb->encapsulation)
+ err = netif_rx(skb);
+ else
+ err = gro_cells_receive(&geneve->gro_cells, skb);
if (likely(err == NET_RX_SUCCESS))
dev_dstats_rx_add(geneve->dev, len);
@@ -363,6 +403,250 @@ static void geneve_uninit(struct net_device *dev)
gro_cells_destroy(&geneve->gro_cells);
}
+static int geneve_hlen(const struct genevehdr *gh)
+{
+ return sizeof(*gh) + gh->opt_len * 4;
+}
+
+/*
+ * Look for GRO hint in the genenve options; if not found or does not pass basic
+ * sanitization return 0, otherwise the offset WRT the geneve hdr start.
+ */
+static unsigned int
+geneve_opt_gro_hint_off(const struct genevehdr *gh, __be16 *type,
+ unsigned int *gh_len)
+{
+ struct geneve_opt *opt = (void *)(gh + 1);
+ unsigned int id, opt_len = gh->opt_len;
+ struct geneve_opt_gro_hint *gro_hint;
+
+ while (opt_len >= (GENEVE_OPT_GRO_HINT_SIZE >> 2)) {
+ if (opt->opt_class == htons(GENEVE_OPT_NETDEV_CLASS) &&
+ opt->type == GENEVE_OPT_GRO_HINT_TYPE &&
+ opt->length == GENEVE_OPT_GRO_HINT_LEN)
+ goto found;
+
+ /* check for bad opt len */
+ if (opt->length + 1 >= opt_len)
+ return 0;
+
+ /* next opt */
+ opt_len -= opt->length + 1;
+ opt = ((void *)opt) + ((opt->length + 1) << 2);
+ }
+ return 0;
+
+found:
+ gro_hint = (struct geneve_opt_gro_hint *)opt->opt_data;
+
+ /*
+ * Sanitize the hinted hdrs: the nested transport is UDP and must fit
+ * the overall hinted hdr size.
+ */
+ if (gro_hint->nested_tp_offset + sizeof(struct udphdr) >
+ gro_hint->nested_hdr_len)
+ return 0;
+
+ if (gro_hint->nested_nh_offset +
+ (gro_hint->nested_is_v6 ? sizeof(struct ipv6hdr) :
+ sizeof(struct iphdr)) >
+ gro_hint->nested_tp_offset)
+ return 0;
+
+ /* Allow only supported L2. */
+ id = gro_hint->inner_proto_id;
+ if (id >= ARRAY_SIZE(proto_id_map))
+ return 0;
+
+ *type = proto_id_map[id];
+ *gh_len += gro_hint->nested_hdr_len;
+
+ return (void *)gro_hint - (void *)gh;
+}
+
+static const struct geneve_opt_gro_hint *
+geneve_opt_gro_hint(const struct genevehdr *gh, unsigned int hint_off)
+{
+ return (const struct geneve_opt_gro_hint *)((void *)gh + hint_off);
+}
+
+static unsigned int
+geneve_sk_gro_hint_off(const struct sock *sk, const struct genevehdr *gh,
+ __be16 *type, unsigned int *gh_len)
+{
+ const struct geneve_sock *gs = rcu_dereference_sk_user_data(sk);
+
+ if (!gs || !gs->gro_hint)
+ return 0;
+ return geneve_opt_gro_hint_off(gh, type, gh_len);
+}
+
+/* Validate the packet headers pointed by data WRT the provided hint */
+static bool
+geneve_opt_gro_hint_validate(void *data,
+ const struct geneve_opt_gro_hint *gro_hint)
+{
+ void *nested_nh = data + gro_hint->nested_nh_offset;
+ struct iphdr *iph;
+
+ if (gro_hint->nested_is_v6) {
+ struct ipv6hdr *ipv6h = nested_nh;
+ struct ipv6_opt_hdr *opth;
+ int offset, len;
+
+ if (ipv6h->nexthdr == IPPROTO_UDP)
+ return true;
+
+ offset = sizeof(*ipv6h) + gro_hint->nested_nh_offset;
+ while (offset + sizeof(*opth) <= gro_hint->nested_tp_offset) {
+ opth = data + offset;
+
+ len = ipv6_optlen(opth);
+ if (len + offset > gro_hint->nested_tp_offset)
+ return false;
+ if (opth->nexthdr == IPPROTO_UDP)
+ return true;
+
+ offset += len;
+ }
+ return false;
+ }
+
+ iph = nested_nh;
+ if (*(u8 *)iph != 0x45 || ip_is_fragment(iph) ||
+ iph->protocol != IPPROTO_UDP || ip_fast_csum((u8 *)iph, 5))
+ return false;
+
+ return true;
+}
+
+/*
+ * Validate the skb headers following the specified geneve hdr vs the
+ * provided hint, including nested L4 checksum.
+ * The caller already ensured that the relevant amount of data is available
+ * in the linear part.
+ */
+static bool
+geneve_opt_gro_hint_validate_csum(const struct sk_buff *skb,
+ const struct genevehdr *gh,
+ const struct geneve_opt_gro_hint *gro_hint)
+{
+ unsigned int plen, gh_len = geneve_hlen(gh);
+ void *nested = (void *)gh + gh_len;
+ struct udphdr *nested_uh;
+ unsigned int nested_len;
+ struct ipv6hdr *ipv6h;
+ struct iphdr *iph;
+ __wsum csum, psum;
+
+ if (!geneve_opt_gro_hint_validate(nested, gro_hint))
+ return false;
+
+ /* Use GRO hints with nested csum only if the outer header has csum. */
+ nested_uh = nested + gro_hint->nested_tp_offset;
+ if (!nested_uh->check || skb->ip_summed == CHECKSUM_PARTIAL)
+ return true;
+
+ if (!NAPI_GRO_CB(skb)->csum_valid)
+ return false;
+
+ /* Compute the complete checksum up to the nested transport. */
+ plen = gh_len + gro_hint->nested_tp_offset;
+ csum = csum_sub(NAPI_GRO_CB(skb)->csum, csum_partial(gh, plen, 0));
+ nested_len = skb_gro_len(skb) - plen;
+
+ /* Compute the nested pseudo header csum. */
+ ipv6h = nested + gro_hint->nested_nh_offset;
+ iph = (struct iphdr *)ipv6h;
+ psum = gro_hint->nested_is_v6 ?
+ ~csum_unfold(csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
+ nested_len, IPPROTO_UDP, 0)) :
+ csum_tcpudp_nofold(iph->saddr, iph->daddr,
+ nested_len, IPPROTO_UDP, 0);
+
+ return !csum_fold(csum_add(psum, csum));
+}
+
+static int geneve_post_decap_hint(const struct sock *sk, struct sk_buff *skb,
+ unsigned int gh_len,
+ struct genevehdr **geneveh)
+{
+ const struct geneve_opt_gro_hint *gro_hint;
+ unsigned int len, total_len, hint_off;
+ struct ipv6hdr *ipv6h;
+ struct iphdr *iph;
+ struct udphdr *uh;
+ __be16 p;
+
+ hint_off = geneve_sk_gro_hint_off(sk, *geneveh, &p, &len);
+ if (!hint_off)
+ return 0;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ gro_hint = geneve_opt_gro_hint(*geneveh, hint_off);
+ if (unlikely(!pskb_may_pull(skb, gro_hint->nested_hdr_len)))
+ return -ENOMEM;
+
+ *geneveh = geneve_hdr(skb);
+ gro_hint = geneve_opt_gro_hint(*geneveh, hint_off);
+
+ /*
+ * Validate hints from untrusted source before accessing
+ * the headers; csum will be checked later by the nested
+ * protocol rx path.
+ */
+ if (unlikely(skb_shinfo(skb)->gso_type & SKB_GSO_DODGY &&
+ !geneve_opt_gro_hint_validate(skb->data, gro_hint)))
+ return -EINVAL;
+
+ ipv6h = (void *)skb->data + gro_hint->nested_nh_offset;
+ iph = (struct iphdr *)ipv6h;
+ total_len = skb->len - gro_hint->nested_nh_offset;
+ if (total_len > GRO_LEGACY_MAX_SIZE)
+ return -E2BIG;
+
+ /*
+ * After stripping the outer encap, the packet still carries a
+ * tunnel encapsulation: the nested one.
+ */
+ skb->encapsulation = 1;
+
+ /* GSO expect a valid transpor header, move it to the current one. */
+ skb_set_transport_header(skb, gro_hint->nested_tp_offset);
+
+ /* Adjust the nested IP{6} hdr to actual GSO len. */
+ if (gro_hint->nested_is_v6) {
+ ipv6h->payload_len = htons(total_len - sizeof(*ipv6h));
+ } else {
+ __be16 old_len = iph->tot_len;
+
+ iph->tot_len = htons(total_len);
+
+ /* For IPv4 additionally adjust the nested csum. */
+ csum_replace2(&iph->check, old_len, iph->tot_len);
+ ip_send_check(iph);
+ }
+
+ /* Adjust the nested UDP header len and checksum. */
+ uh = udp_hdr(skb);
+ uh->len = htons(skb->len - gro_hint->nested_tp_offset);
+ if (uh->check) {
+ len = skb->len - gro_hint->nested_nh_offset;
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
+ if (gro_hint->nested_is_v6)
+ uh->check = ~udp_v6_check(len, &ipv6h->saddr,
+ &ipv6h->daddr, 0);
+ else
+ uh->check = ~udp_v4_check(len, iph->saddr,
+ iph->daddr, 0);
+ } else {
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
+ }
+ return 0;
+}
+
/* Callback from net/ipv4/udp.c to receive packets */
static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
@@ -404,7 +688,18 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
goto drop;
}
- geneve_rx(geneve, gs, skb);
+ /*
+ * After hint processing, the transport header points to the inner one
+ * and we can't use anymore on geneve_hdr().
+ */
+ geneveh = geneve_hdr(skb);
+ if (geneve_post_decap_hint(sk, skb, sizeof(struct genevehdr) +
+ opts_len, &geneveh)) {
+ DEV_STATS_INC(geneve->dev, rx_errors);
+ goto drop;
+ }
+
+ geneve_rx(geneve, gs, skb, geneveh);
return 0;
drop:
@@ -495,22 +790,93 @@ static struct socket *geneve_create_sock(struct net *net, bool ipv6,
return sock;
}
-static int geneve_hlen(struct genevehdr *gh)
+static bool geneve_hdr_match(struct sk_buff *skb,
+ const struct genevehdr *gh,
+ const struct genevehdr *gh2,
+ unsigned int hint_off)
{
- return sizeof(*gh) + gh->opt_len * 4;
+ const struct geneve_opt_gro_hint *gro_hint;
+ void *nested, *nested2, *nh, *nh2;
+ struct udphdr *udp, *udp2;
+ unsigned int gh_len;
+
+ /* Match the geneve hdr and options */
+ if (gh->opt_len != gh2->opt_len)
+ return false;
+
+ gh_len = geneve_hlen(gh);
+ if (memcmp(gh, gh2, gh_len))
+ return false;
+
+ if (!hint_off)
+ return true;
+
+ /*
+ * When gro is present consider the nested headers as part
+ * of the geneve options
+ */
+ nested = (void *)gh + gh_len;
+ nested2 = (void *)gh2 + gh_len;
+ gro_hint = geneve_opt_gro_hint(gh, hint_off);
+ if (!memcmp(nested, nested2, gro_hint->nested_hdr_len))
+ return true;
+
+ /*
+ * The nested headers differ; the packets can still belong to
+ * the same flow when IPs/proto/ports match; if so flushing is
+ * required.
+ */
+ nh = nested + gro_hint->nested_nh_offset;
+ nh2 = nested2 + gro_hint->nested_nh_offset;
+ if (gro_hint->nested_is_v6) {
+ struct ipv6hdr *iph = nh, *iph2 = nh2;
+ unsigned int nested_nlen;
+ __be32 first_word;
+
+ first_word = *(__be32 *)iph ^ *(__be32 *)iph2;
+ if ((first_word & htonl(0xF00FFFFF)) ||
+ !ipv6_addr_equal(&iph->saddr, &iph2->saddr) ||
+ !ipv6_addr_equal(&iph->daddr, &iph2->daddr) ||
+ iph->nexthdr != iph2->nexthdr)
+ return false;
+
+ nested_nlen = gro_hint->nested_tp_offset -
+ gro_hint->nested_nh_offset;
+ if (nested_nlen > sizeof(struct ipv6hdr) &&
+ (memcmp(iph + 1, iph2 + 1,
+ nested_nlen - sizeof(struct ipv6hdr))))
+ return false;
+ } else {
+ struct iphdr *iph = nh, *iph2 = nh2;
+
+ if ((iph->protocol ^ iph2->protocol) |
+ ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) |
+ ((__force u32)iph->daddr ^ (__force u32)iph2->daddr))
+ return false;
+ }
+
+ udp = nested + gro_hint->nested_tp_offset;
+ udp2 = nested2 + gro_hint->nested_tp_offset;
+ if (udp->source != udp2->source || udp->dest != udp2->dest ||
+ udp->check != udp2->check)
+ return false;
+
+ NAPI_GRO_CB(skb)->flush = 1;
+ return true;
}
static struct sk_buff *geneve_gro_receive(struct sock *sk,
struct list_head *head,
struct sk_buff *skb)
{
+ unsigned int hlen, gh_len, off_gnv, hint_off;
+ const struct geneve_opt_gro_hint *gro_hint;
+ const struct packet_offload *ptype;
+ struct genevehdr *gh, *gh2;
struct sk_buff *pp = NULL;
struct sk_buff *p;
- struct genevehdr *gh, *gh2;
- unsigned int hlen, gh_len, off_gnv;
- const struct packet_offload *ptype;
- __be16 type;
int flush = 1;
+ __be16 type;
off_gnv = skb_gro_offset(skb);
hlen = off_gnv + sizeof(*gh);
@@ -521,6 +887,7 @@ static struct sk_buff *geneve_gro_receive(struct sock *sk,
if (gh->ver != GENEVE_VER || gh->oam)
goto out;
gh_len = geneve_hlen(gh);
+ type = gh->proto_type;
hlen = off_gnv + gh_len;
if (!skb_gro_may_pull(skb, hlen)) {
@@ -529,13 +896,30 @@ static struct sk_buff *geneve_gro_receive(struct sock *sk,
goto out;
}
+ /* The GRO hint/nested hdr could use a different ethernet type. */
+ hint_off = geneve_sk_gro_hint_off(sk, gh, &type, &gh_len);
+ if (hint_off) {
+
+ /*
+ * If the hint is present, and nested hdr validation fails, do
+ * not attempt plain GRO: it will ignore inner hdrs and cause
+ * OoO.
+ */
+ gh = skb_gro_header(skb, off_gnv + gh_len, off_gnv);
+ if (unlikely(!gh))
+ goto out;
+
+ gro_hint = geneve_opt_gro_hint(gh, hint_off);
+ if (!geneve_opt_gro_hint_validate_csum(skb, gh, gro_hint))
+ goto out;
+ }
+
list_for_each_entry(p, head, list) {
if (!NAPI_GRO_CB(p)->same_flow)
continue;
gh2 = (struct genevehdr *)(p->data + off_gnv);
- if (gh->opt_len != gh2->opt_len ||
- memcmp(gh, gh2, gh_len)) {
+ if (!geneve_hdr_match(skb, gh, gh2, hint_off)) {
NAPI_GRO_CB(p)->same_flow = 0;
continue;
}
@@ -543,7 +927,6 @@ static struct sk_buff *geneve_gro_receive(struct sock *sk,
skb_gro_pull(skb, gh_len);
skb_gro_postpull_rcsum(skb, gh, gh_len);
- type = gh->proto_type;
if (likely(type == htons(ETH_P_TEB)))
return call_gro_receive(eth_gro_receive, head, skb);
@@ -572,6 +955,7 @@ static int geneve_gro_complete(struct sock *sk, struct sk_buff *skb,
gh = (struct genevehdr *)(skb->data + nhoff);
gh_len = geneve_hlen(gh);
type = gh->proto_type;
+ geneve_opt_gro_hint_off(gh, &type, &gh_len);
/* since skb->encapsulation is set, eth_gro_complete() sets the inner mac header */
if (likely(type == htons(ETH_P_TEB)))
@@ -659,13 +1043,15 @@ static void geneve_sock_release(struct geneve_dev *geneve)
static struct geneve_sock *geneve_find_sock(struct geneve_net *gn,
sa_family_t family,
- __be16 dst_port)
+ __be16 dst_port,
+ bool gro_hint)
{
struct geneve_sock *gs;
list_for_each_entry(gs, &gn->sock_list, list) {
if (inet_sk(gs->sock->sk)->inet_sport == dst_port &&
- geneve_get_sk_family(gs) == family) {
+ geneve_get_sk_family(gs) == family &&
+ gs->gro_hint == gro_hint) {
return gs;
}
}
@@ -676,12 +1062,14 @@ static int geneve_sock_add(struct geneve_dev *geneve, bool ipv6)
{
struct net *net = geneve->net;
struct geneve_net *gn = net_generic(net, geneve_net_id);
+ bool gro_hint = geneve->cfg.gro_hint;
struct geneve_dev_node *node;
struct geneve_sock *gs;
__u8 vni[3];
__u32 hash;
- gs = geneve_find_sock(gn, ipv6 ? AF_INET6 : AF_INET, geneve->cfg.info.key.tp_dst);
+ gs = geneve_find_sock(gn, ipv6 ? AF_INET6 : AF_INET,
+ geneve->cfg.info.key.tp_dst, gro_hint);
if (gs) {
gs->refcnt++;
goto out;
@@ -694,6 +1082,7 @@ static int geneve_sock_add(struct geneve_dev *geneve, bool ipv6)
out:
gs->collect_md = geneve->cfg.collect_md;
+ gs->gro_hint = gro_hint;
#if IS_ENABLED(CONFIG_IPV6)
if (ipv6) {
rcu_assign_pointer(geneve->sock6, gs);
@@ -766,34 +1155,116 @@ static void geneve_build_header(struct genevehdr *geneveh,
ip_tunnel_info_opts_get(geneveh->options, info);
}
+static int geneve_build_gro_hint_opt(const struct geneve_dev *geneve,
+ struct sk_buff *skb)
+{
+ struct geneve_skb_cb *cb = GENEVE_SKB_CB(skb);
+ struct geneve_opt_gro_hint *hint;
+ unsigned int nhlen;
+ bool nested_is_v6;
+ int id;
+
+ BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct geneve_skb_cb));
+ cb->gro_hint_len = 0;
+
+ /* Try to add the GRO hint only in case of double encap. */
+ if (!geneve->cfg.gro_hint || !skb->encapsulation)
+ return 0;
+
+ /*
+ * The nested headers must fit the geneve opt len fields and the
+ * nested encap must carry a nested transport (UDP) header.
+ */
+ nhlen = skb_inner_mac_header(skb) - skb->data;
+ if (nhlen > 255 || !skb_transport_header_was_set(skb) ||
+ skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
+ (skb_transport_offset(skb) + sizeof(struct udphdr) > nhlen))
+ return 0;
+
+ id = proto_to_id(skb->inner_protocol);
+ if (id < 0)
+ return 0;
+
+ nested_is_v6 = skb->protocol == htons(ETH_P_IPV6);
+ if (nested_is_v6) {
+ int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
+ u8 proto = ipv6_hdr(skb)->nexthdr;
+ __be16 foff;
+
+ if (ipv6_skip_exthdr(skb, start, &proto, &foff) < 0 ||
+ proto != IPPROTO_UDP)
+ return 0;
+ } else {
+ if (ip_hdr(skb)->protocol != IPPROTO_UDP)
+ return 0;
+ }
+
+ hint = &cb->gro_hint;
+ memset(hint, 0, sizeof(*hint));
+ hint->inner_proto_id = id;
+ hint->nested_is_v6 = skb->protocol == htons(ETH_P_IPV6);
+ hint->nested_nh_offset = skb_network_offset(skb);
+ hint->nested_tp_offset = skb_transport_offset(skb);
+ hint->nested_hdr_len = nhlen;
+ cb->gro_hint_len = GENEVE_OPT_GRO_HINT_SIZE;
+ return GENEVE_OPT_GRO_HINT_SIZE;
+}
+
+static void geneve_put_gro_hint_opt(struct genevehdr *gnvh, int opt_size,
+ const struct geneve_opt_gro_hint *hint)
+{
+ struct geneve_opt *gro_opt;
+
+ /* geneve_build_header() did not took in account the GRO hint. */
+ gnvh->opt_len = (opt_size + GENEVE_OPT_GRO_HINT_SIZE) >> 2;
+
+ gro_opt = (void *)(gnvh + 1) + opt_size;
+ memset(gro_opt, 0, sizeof(*gro_opt));
+
+ gro_opt->opt_class = htons(GENEVE_OPT_NETDEV_CLASS);
+ gro_opt->type = GENEVE_OPT_GRO_HINT_TYPE;
+ gro_opt->length = GENEVE_OPT_GRO_HINT_LEN;
+ memcpy(gro_opt + 1, hint, sizeof(*hint));
+}
+
static int geneve_build_skb(struct dst_entry *dst, struct sk_buff *skb,
const struct ip_tunnel_info *info,
- bool xnet, int ip_hdr_len,
- bool inner_proto_inherit)
+ const struct geneve_dev *geneve, int ip_hdr_len)
{
bool udp_sum = test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags);
+ bool inner_proto_inherit = geneve->cfg.inner_proto_inherit;
+ bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
+ struct geneve_skb_cb *cb = GENEVE_SKB_CB(skb);
struct genevehdr *gnvh;
__be16 inner_proto;
+ bool double_encap;
int min_headroom;
+ int opt_size;
int err;
skb_reset_mac_header(skb);
skb_scrub_packet(skb, xnet);
+ opt_size = info->options_len + cb->gro_hint_len;
min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len +
- GENEVE_BASE_HLEN + info->options_len + ip_hdr_len;
+ GENEVE_BASE_HLEN + opt_size + ip_hdr_len;
err = skb_cow_head(skb, min_headroom);
if (unlikely(err))
goto free_dst;
+ double_encap = udp_tunnel_handle_partial(skb);
err = udp_tunnel_handle_offloads(skb, udp_sum);
if (err)
goto free_dst;
- gnvh = __skb_push(skb, sizeof(*gnvh) + info->options_len);
+ gnvh = __skb_push(skb, sizeof(*gnvh) + opt_size);
inner_proto = inner_proto_inherit ? skb->protocol : htons(ETH_P_TEB);
geneve_build_header(gnvh, info, inner_proto);
- skb_set_inner_protocol(skb, inner_proto);
+
+ if (cb->gro_hint_len)
+ geneve_put_gro_hint_opt(gnvh, info->options_len, &cb->gro_hint);
+
+ udp_tunnel_set_inner_protocol(skb, double_encap, inner_proto);
return 0;
free_dst:
@@ -821,8 +1292,6 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct geneve_dev *geneve,
const struct ip_tunnel_info *info)
{
- bool inner_proto_inherit = geneve->cfg.inner_proto_inherit;
- bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
struct geneve_sock *gs4 = rcu_dereference(geneve->sock4);
const struct ip_tunnel_key *key = &info->key;
struct rtable *rt;
@@ -833,7 +1302,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
- if (skb_vlan_inet_prepare(skb, inner_proto_inherit))
+ if (skb_vlan_inet_prepare(skb, geneve->cfg.inner_proto_inherit))
return -EINVAL;
if (!gs4)
@@ -854,7 +1323,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
return PTR_ERR(rt);
err = skb_tunnel_check_pmtu(skb, &rt->dst,
- GENEVE_IPV4_HLEN + info->options_len,
+ GENEVE_IPV4_HLEN + info->options_len +
+ geneve_build_gro_hint_opt(geneve, skb),
netif_is_any_bridge_port(dev));
if (err < 0) {
dst_release(&rt->dst);
@@ -916,8 +1386,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
}
}
- err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr),
- inner_proto_inherit);
+ err = geneve_build_skb(&rt->dst, skb, info, geneve,
+ sizeof(struct iphdr));
if (unlikely(err))
return err;
@@ -934,8 +1404,6 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct geneve_dev *geneve,
const struct ip_tunnel_info *info)
{
- bool inner_proto_inherit = geneve->cfg.inner_proto_inherit;
- bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
struct geneve_sock *gs6 = rcu_dereference(geneve->sock6);
const struct ip_tunnel_key *key = &info->key;
struct dst_entry *dst = NULL;
@@ -945,7 +1413,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
- if (skb_vlan_inet_prepare(skb, inner_proto_inherit))
+ if (skb_vlan_inet_prepare(skb, geneve->cfg.inner_proto_inherit))
return -EINVAL;
if (!gs6)
@@ -966,7 +1434,8 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
return PTR_ERR(dst);
err = skb_tunnel_check_pmtu(skb, dst,
- GENEVE_IPV6_HLEN + info->options_len,
+ GENEVE_IPV6_HLEN + info->options_len +
+ geneve_build_gro_hint_opt(geneve, skb),
netif_is_any_bridge_port(dev));
if (err < 0) {
dst_release(dst);
@@ -1008,8 +1477,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
ttl = key->ttl;
ttl = ttl ? : ip6_dst_hoplimit(dst);
}
- err = geneve_build_skb(dst, skb, info, xnet, sizeof(struct ipv6hdr),
- inner_proto_inherit);
+ err = geneve_build_skb(dst, skb, info, geneve, sizeof(struct ipv6hdr));
if (unlikely(err))
return err;
@@ -1211,9 +1679,16 @@ static void geneve_setup(struct net_device *dev)
dev->features |= NETIF_F_RXCSUM;
dev->features |= NETIF_F_GSO_SOFTWARE;
+ /* Partial features are disabled by default. */
dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
dev->hw_features |= NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+ dev->hw_features |= UDP_TUNNEL_PARTIAL_FEATURES;
+ dev->hw_features |= NETIF_F_GSO_PARTIAL;
+
+ dev->hw_enc_features = dev->hw_features;
+ dev->gso_partial_features = UDP_TUNNEL_PARTIAL_FEATURES;
+ dev->mangleid_features = NETIF_F_GSO_PARTIAL;
dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
/* MTU range: 68 - (something less than 65535) */
@@ -1248,6 +1723,7 @@ static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
[IFLA_GENEVE_DF] = { .type = NLA_U8 },
[IFLA_GENEVE_INNER_PROTO_INHERIT] = { .type = NLA_FLAG },
[IFLA_GENEVE_PORT_RANGE] = NLA_POLICY_EXACT_LEN(sizeof(struct ifla_geneve_port_range)),
+ [IFLA_GENEVE_GRO_HINT] = { .type = NLA_FLAG },
};
static int geneve_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -1598,10 +2074,18 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
cfg->inner_proto_inherit = true;
}
+ if (data[IFLA_GENEVE_GRO_HINT]) {
+ if (changelink) {
+ attrtype = IFLA_GENEVE_GRO_HINT;
+ goto change_notsup;
+ }
+ cfg->gro_hint = true;
+ }
+
return 0;
change_notsup:
NL_SET_ERR_MSG_ATTR(extack, data[attrtype],
- "Changing VNI, Port, endpoint IP address family, external, inner_proto_inherit, and UDP checksum attributes are not supported");
+ "Changing VNI, Port, endpoint IP address family, external, inner_proto_inherit, gro_hint and UDP checksum attributes are not supported");
return -EOPNOTSUPP;
}
@@ -1784,6 +2268,7 @@ static size_t geneve_get_size(const struct net_device *dev)
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL_INHERIT */
nla_total_size(0) + /* IFLA_GENEVE_INNER_PROTO_INHERIT */
nla_total_size(sizeof(struct ifla_geneve_port_range)) + /* IFLA_GENEVE_PORT_RANGE */
+ nla_total_size(0) + /* IFLA_GENEVE_GRO_HINT */
0;
}
@@ -1856,6 +2341,10 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (nla_put(skb, IFLA_GENEVE_PORT_RANGE, sizeof(ports), &ports))
goto nla_put_failure;
+ if (geneve->cfg.gro_hint &&
+ nla_put_flag(skb, IFLA_GENEVE_GRO_HINT))
+ goto nla_put_failure;
+
return 0;
nla_put_failure:
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 2263029d1a20..3b88e465d08f 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -742,26 +742,6 @@ EXPORT_SYMBOL(hdlcdrv_unregister);
/* --------------------------------------------------------------------- */
-static int __init hdlcdrv_init_driver(void)
-{
- printk(KERN_INFO "hdlcdrv: (C) 1996-2000 Thomas Sailer HB9JNX/AE4WA\n");
- printk(KERN_INFO "hdlcdrv: version 0.8\n");
- return 0;
-}
-
-/* --------------------------------------------------------------------- */
-
-static void __exit hdlcdrv_cleanup_driver(void)
-{
- printk(KERN_INFO "hdlcdrv: cleanup\n");
-}
-
-/* --------------------------------------------------------------------- */
-
MODULE_AUTHOR("Thomas M. Sailer, sailer@ife.ee.ethz.ch, hb9jnx@hb9w.che.eu");
MODULE_DESCRIPTION("Packet Radio network interface HDLC encoder/decoder");
MODULE_LICENSE("GPL");
-module_init(hdlcdrv_init_driver);
-module_exit(hdlcdrv_cleanup_driver);
-
-/* --------------------------------------------------------------------- */
diff --git a/drivers/net/hippi/Kconfig b/drivers/net/hippi/Kconfig
deleted file mode 100644
index 46b911bb8493..000000000000
--- a/drivers/net/hippi/Kconfig
+++ /dev/null
@@ -1,40 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# HIPPI network device configuration
-#
-
-config HIPPI
- bool "HIPPI driver support"
- depends on INET && PCI
- help
- HIgh Performance Parallel Interface (HIPPI) is a 800Mbit/sec and
- 1600Mbit/sec dual-simplex switched or point-to-point network. HIPPI
- can run over copper (25m) or fiber (300m on multi-mode or 10km on
- single-mode). HIPPI networks are commonly used for clusters and to
- connect to super computers. If you are connected to a HIPPI network
- and have a HIPPI network card in your computer that you want to use
- under Linux, say Y here (you must also remember to enable the driver
- for your HIPPI card below). Most people will say N here.
-
-if HIPPI
-
-config ROADRUNNER
- tristate "Essential RoadRunner HIPPI PCI adapter support"
- depends on PCI
- help
- Say Y here if this is your PCI HIPPI network card.
-
- To compile this driver as a module, choose M here: the module
- will be called rrunner. If unsure, say N.
-
-config ROADRUNNER_LARGE_RINGS
- bool "Use large TX/RX rings"
- depends on ROADRUNNER
- help
- If you say Y here, the RoadRunner driver will preallocate up to 2 MB
- of additional memory to allow for fastest operation, both for
- transmitting and receiving. This memory cannot be used by any other
- kernel code or by user space programs. Say Y here only if you have
- the memory.
-
-endif # HIPPI
diff --git a/drivers/net/hippi/Makefile b/drivers/net/hippi/Makefile
deleted file mode 100644
index 409dd47f3e0a..000000000000
--- a/drivers/net/hippi/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for the HIPPI network device drivers.
-#
-
-obj-$(CONFIG_ROADRUNNER) += rrunner.o
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
deleted file mode 100644
index 7b7e7a47a75e..000000000000
--- a/drivers/net/hippi/rrunner.c
+++ /dev/null
@@ -1,1687 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * rrunner.c: Linux driver for the Essential RoadRunner HIPPI board.
- *
- * Copyright (C) 1998-2002 by Jes Sorensen, <jes@wildopensource.com>.
- *
- * Thanks to Essential Communication for providing us with hardware
- * and very comprehensive documentation without which I would not have
- * been able to write this driver. A special thank you to John Gibbon
- * for sorting out the legal issues, with the NDA, allowing the code to
- * be released under the GPL.
- *
- * Thanks to Jayaram Bhat from ODS/Essential for fixing some of the
- * stupid bugs in my code.
- *
- * Softnet support and various other patches from Val Henson of
- * ODS/Essential.
- *
- * PCI DMA mapping code partly based on work by Francois Romieu.
- */
-
-
-#define DEBUG 1
-#define RX_DMA_SKBUFF 1
-#define PKT_COPY_THRESHOLD 512
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/hippidevice.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <net/sock.h>
-
-#include <asm/cache.h>
-#include <asm/byteorder.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <linux/uaccess.h>
-
-#define rr_if_busy(dev) netif_queue_stopped(dev)
-#define rr_if_running(dev) netif_running(dev)
-
-#include "rrunner.h"
-
-#define RUN_AT(x) (jiffies + (x))
-
-
-MODULE_AUTHOR("Jes Sorensen <jes@wildopensource.com>");
-MODULE_DESCRIPTION("Essential RoadRunner HIPPI driver");
-MODULE_LICENSE("GPL");
-
-static const char version[] =
-"rrunner.c: v0.50 11/11/2002 Jes Sorensen (jes@wildopensource.com)\n";
-
-
-static const struct net_device_ops rr_netdev_ops = {
- .ndo_open = rr_open,
- .ndo_stop = rr_close,
- .ndo_siocdevprivate = rr_siocdevprivate,
- .ndo_start_xmit = rr_start_xmit,
- .ndo_set_mac_address = hippi_mac_addr,
-};
-
-/*
- * Implementation notes:
- *
- * The DMA engine only allows for DMA within physical 64KB chunks of
- * memory. The current approach of the driver (and stack) is to use
- * linear blocks of memory for the skbuffs. However, as the data block
- * is always the first part of the skb and skbs are 2^n aligned so we
- * are guarantted to get the whole block within one 64KB align 64KB
- * chunk.
- *
- * On the long term, relying on being able to allocate 64KB linear
- * chunks of memory is not feasible and the skb handling code and the
- * stack will need to know about I/O vectors or something similar.
- */
-
-static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- struct net_device *dev;
- static int version_disp;
- u8 pci_latency;
- struct rr_private *rrpriv;
- void *tmpptr;
- dma_addr_t ring_dma;
- int ret = -ENOMEM;
-
- dev = alloc_hippi_dev(sizeof(struct rr_private));
- if (!dev)
- goto out3;
-
- ret = pci_enable_device(pdev);
- if (ret) {
- ret = -ENODEV;
- goto out2;
- }
-
- rrpriv = netdev_priv(dev);
-
- SET_NETDEV_DEV(dev, &pdev->dev);
-
- ret = pci_request_regions(pdev, "rrunner");
- if (ret < 0)
- goto out;
-
- pci_set_drvdata(pdev, dev);
-
- rrpriv->pci_dev = pdev;
-
- spin_lock_init(&rrpriv->lock);
-
- dev->netdev_ops = &rr_netdev_ops;
-
- /* display version info if adapter is found */
- if (!version_disp) {
- /* set display flag to TRUE so that */
- /* we only display this string ONCE */
- version_disp = 1;
- printk(version);
- }
-
- pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
- if (pci_latency <= 0x58){
- pci_latency = 0x58;
- pci_write_config_byte(pdev, PCI_LATENCY_TIMER, pci_latency);
- }
-
- pci_set_master(pdev);
-
- printk(KERN_INFO "%s: Essential RoadRunner serial HIPPI "
- "at 0x%llx, irq %i, PCI latency %i\n", dev->name,
- (unsigned long long)pci_resource_start(pdev, 0),
- pdev->irq, pci_latency);
-
- /*
- * Remap the MMIO regs into kernel space.
- */
- rrpriv->regs = pci_iomap(pdev, 0, 0x1000);
- if (!rrpriv->regs) {
- printk(KERN_ERR "%s: Unable to map I/O register, "
- "RoadRunner will be disabled.\n", dev->name);
- ret = -EIO;
- goto out;
- }
-
- tmpptr = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
- GFP_KERNEL);
- rrpriv->tx_ring = tmpptr;
- rrpriv->tx_ring_dma = ring_dma;
-
- if (!tmpptr) {
- ret = -ENOMEM;
- goto out;
- }
-
- tmpptr = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
- GFP_KERNEL);
- rrpriv->rx_ring = tmpptr;
- rrpriv->rx_ring_dma = ring_dma;
-
- if (!tmpptr) {
- ret = -ENOMEM;
- goto out;
- }
-
- tmpptr = dma_alloc_coherent(&pdev->dev, EVT_RING_SIZE, &ring_dma,
- GFP_KERNEL);
- rrpriv->evt_ring = tmpptr;
- rrpriv->evt_ring_dma = ring_dma;
-
- if (!tmpptr) {
- ret = -ENOMEM;
- goto out;
- }
-
- /*
- * Don't access any register before this point!
- */
-#ifdef __BIG_ENDIAN
- writel(readl(&rrpriv->regs->HostCtrl) | NO_SWAP,
- &rrpriv->regs->HostCtrl);
-#endif
- /*
- * Need to add a case for little-endian 64-bit hosts here.
- */
-
- rr_init(dev);
-
- ret = register_netdev(dev);
- if (ret)
- goto out;
- return 0;
-
- out:
- if (rrpriv->evt_ring)
- dma_free_coherent(&pdev->dev, EVT_RING_SIZE, rrpriv->evt_ring,
- rrpriv->evt_ring_dma);
- if (rrpriv->rx_ring)
- dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, rrpriv->rx_ring,
- rrpriv->rx_ring_dma);
- if (rrpriv->tx_ring)
- dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, rrpriv->tx_ring,
- rrpriv->tx_ring_dma);
- if (rrpriv->regs)
- pci_iounmap(pdev, rrpriv->regs);
- if (pdev)
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- out2:
- free_netdev(dev);
- out3:
- return ret;
-}
-
-static void rr_remove_one(struct pci_dev *pdev)
-{
- struct net_device *dev = pci_get_drvdata(pdev);
- struct rr_private *rr = netdev_priv(dev);
-
- if (!(readl(&rr->regs->HostCtrl) & NIC_HALTED)) {
- printk(KERN_ERR "%s: trying to unload running NIC\n",
- dev->name);
- writel(HALT_NIC, &rr->regs->HostCtrl);
- }
-
- unregister_netdev(dev);
- dma_free_coherent(&pdev->dev, EVT_RING_SIZE, rr->evt_ring,
- rr->evt_ring_dma);
- dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, rr->rx_ring,
- rr->rx_ring_dma);
- dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, rr->tx_ring,
- rr->tx_ring_dma);
- pci_iounmap(pdev, rr->regs);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- free_netdev(dev);
-}
-
-
-/*
- * Commands are considered to be slow, thus there is no reason to
- * inline this.
- */
-static void rr_issue_cmd(struct rr_private *rrpriv, struct cmd *cmd)
-{
- struct rr_regs __iomem *regs;
- u32 idx;
-
- regs = rrpriv->regs;
- /*
- * This is temporary - it will go away in the final version.
- * We probably also want to make this function inline.
- */
- if (readl(&regs->HostCtrl) & NIC_HALTED){
- printk("issuing command for halted NIC, code 0x%x, "
- "HostCtrl %08x\n", cmd->code, readl(&regs->HostCtrl));
- if (readl(&regs->Mode) & FATAL_ERR)
- printk("error codes Fail1 %02x, Fail2 %02x\n",
- readl(&regs->Fail1), readl(&regs->Fail2));
- }
-
- idx = rrpriv->info->cmd_ctrl.pi;
-
- writel(*(u32*)(cmd), &regs->CmdRing[idx]);
- wmb();
-
- idx = (idx - 1) % CMD_RING_ENTRIES;
- rrpriv->info->cmd_ctrl.pi = idx;
- wmb();
-
- if (readl(&regs->Mode) & FATAL_ERR)
- printk("error code %02x\n", readl(&regs->Fail1));
-}
-
-
-/*
- * Reset the board in a sensible manner. The NIC is already halted
- * when we get here and a spin-lock is held.
- */
-static int rr_reset(struct net_device *dev)
-{
- struct rr_private *rrpriv;
- struct rr_regs __iomem *regs;
- u32 start_pc;
- int i;
-
- rrpriv = netdev_priv(dev);
- regs = rrpriv->regs;
-
- rr_load_firmware(dev);
-
- writel(0x01000000, &regs->TX_state);
- writel(0xff800000, &regs->RX_state);
- writel(0, &regs->AssistState);
- writel(CLEAR_INTA, &regs->LocalCtrl);
- writel(0x01, &regs->BrkPt);
- writel(0, &regs->Timer);
- writel(0, &regs->TimerRef);
- writel(RESET_DMA, &regs->DmaReadState);
- writel(RESET_DMA, &regs->DmaWriteState);
- writel(0, &regs->DmaWriteHostHi);
- writel(0, &regs->DmaWriteHostLo);
- writel(0, &regs->DmaReadHostHi);
- writel(0, &regs->DmaReadHostLo);
- writel(0, &regs->DmaReadLen);
- writel(0, &regs->DmaWriteLen);
- writel(0, &regs->DmaWriteLcl);
- writel(0, &regs->DmaWriteIPchecksum);
- writel(0, &regs->DmaReadLcl);
- writel(0, &regs->DmaReadIPchecksum);
- writel(0, &regs->PciState);
-#if (BITS_PER_LONG == 64) && defined __LITTLE_ENDIAN
- writel(SWAP_DATA | PTR64BIT | PTR_WD_SWAP, &regs->Mode);
-#elif (BITS_PER_LONG == 64)
- writel(SWAP_DATA | PTR64BIT | PTR_WD_NOSWAP, &regs->Mode);
-#else
- writel(SWAP_DATA | PTR32BIT | PTR_WD_NOSWAP, &regs->Mode);
-#endif
-
-#if 0
- /*
- * Don't worry, this is just black magic.
- */
- writel(0xdf000, &regs->RxBase);
- writel(0xdf000, &regs->RxPrd);
- writel(0xdf000, &regs->RxCon);
- writel(0xce000, &regs->TxBase);
- writel(0xce000, &regs->TxPrd);
- writel(0xce000, &regs->TxCon);
- writel(0, &regs->RxIndPro);
- writel(0, &regs->RxIndCon);
- writel(0, &regs->RxIndRef);
- writel(0, &regs->TxIndPro);
- writel(0, &regs->TxIndCon);
- writel(0, &regs->TxIndRef);
- writel(0xcc000, &regs->pad10[0]);
- writel(0, &regs->DrCmndPro);
- writel(0, &regs->DrCmndCon);
- writel(0, &regs->DwCmndPro);
- writel(0, &regs->DwCmndCon);
- writel(0, &regs->DwCmndRef);
- writel(0, &regs->DrDataPro);
- writel(0, &regs->DrDataCon);
- writel(0, &regs->DrDataRef);
- writel(0, &regs->DwDataPro);
- writel(0, &regs->DwDataCon);
- writel(0, &regs->DwDataRef);
-#endif
-
- writel(0xffffffff, &regs->MbEvent);
- writel(0, &regs->Event);
-
- writel(0, &regs->TxPi);
- writel(0, &regs->IpRxPi);
-
- writel(0, &regs->EvtCon);
- writel(0, &regs->EvtPrd);
-
- rrpriv->info->evt_ctrl.pi = 0;
-
- for (i = 0; i < CMD_RING_ENTRIES; i++)
- writel(0, &regs->CmdRing[i]);
-
-/*
- * Why 32 ? is this not cache line size dependent?
- */
- writel(RBURST_64|WBURST_64, &regs->PciState);
- wmb();
-
- start_pc = rr_read_eeprom_word(rrpriv,
- offsetof(struct eeprom, rncd_info.FwStart));
-
-#if (DEBUG > 1)
- printk("%s: Executing firmware at address 0x%06x\n",
- dev->name, start_pc);
-#endif
-
- writel(start_pc + 0x800, &regs->Pc);
- wmb();
- udelay(5);
-
- writel(start_pc, &regs->Pc);
- wmb();
-
- return 0;
-}
-
-
-/*
- * Read a string from the EEPROM.
- */
-static unsigned int rr_read_eeprom(struct rr_private *rrpriv,
- unsigned long offset,
- unsigned char *buf,
- unsigned long length)
-{
- struct rr_regs __iomem *regs = rrpriv->regs;
- u32 misc, io, host, i;
-
- io = readl(&regs->ExtIo);
- writel(0, &regs->ExtIo);
- misc = readl(&regs->LocalCtrl);
- writel(0, &regs->LocalCtrl);
- host = readl(&regs->HostCtrl);
- writel(host | HALT_NIC, &regs->HostCtrl);
- mb();
-
- for (i = 0; i < length; i++){
- writel((EEPROM_BASE + ((offset+i) << 3)), &regs->WinBase);
- mb();
- buf[i] = (readl(&regs->WinData) >> 24) & 0xff;
- mb();
- }
-
- writel(host, &regs->HostCtrl);
- writel(misc, &regs->LocalCtrl);
- writel(io, &regs->ExtIo);
- mb();
- return i;
-}
-
-
-/*
- * Shortcut to read one word (4 bytes) out of the EEPROM and convert
- * it to our CPU byte-order.
- */
-static u32 rr_read_eeprom_word(struct rr_private *rrpriv,
- size_t offset)
-{
- __be32 word;
-
- if ((rr_read_eeprom(rrpriv, offset,
- (unsigned char *)&word, 4) == 4))
- return be32_to_cpu(word);
- return 0;
-}
-
-
-/*
- * Write a string to the EEPROM.
- *
- * This is only called when the firmware is not running.
- */
-static unsigned int write_eeprom(struct rr_private *rrpriv,
- unsigned long offset,
- unsigned char *buf,
- unsigned long length)
-{
- struct rr_regs __iomem *regs = rrpriv->regs;
- u32 misc, io, data, i, j, ready, error = 0;
-
- io = readl(&regs->ExtIo);
- writel(0, &regs->ExtIo);
- misc = readl(&regs->LocalCtrl);
- writel(ENABLE_EEPROM_WRITE, &regs->LocalCtrl);
- mb();
-
- for (i = 0; i < length; i++){
- writel((EEPROM_BASE + ((offset+i) << 3)), &regs->WinBase);
- mb();
- data = buf[i] << 24;
- /*
- * Only try to write the data if it is not the same
- * value already.
- */
- if ((readl(&regs->WinData) & 0xff000000) != data){
- writel(data, &regs->WinData);
- ready = 0;
- j = 0;
- mb();
- while(!ready){
- udelay(20);
- if ((readl(&regs->WinData) & 0xff000000) ==
- data)
- ready = 1;
- mb();
- if (j++ > 5000){
- printk("data mismatch: %08x, "
- "WinData %08x\n", data,
- readl(&regs->WinData));
- ready = 1;
- error = 1;
- }
- }
- }
- }
-
- writel(misc, &regs->LocalCtrl);
- writel(io, &regs->ExtIo);
- mb();
-
- return error;
-}
-
-
-static int rr_init(struct net_device *dev)
-{
- u8 addr[HIPPI_ALEN] __aligned(4);
- struct rr_private *rrpriv;
- struct rr_regs __iomem *regs;
- u32 sram_size, rev;
-
- rrpriv = netdev_priv(dev);
- regs = rrpriv->regs;
-
- rev = readl(&regs->FwRev);
- rrpriv->fw_rev = rev;
- if (rev > 0x00020024)
- printk(" Firmware revision: %i.%i.%i\n", (rev >> 16),
- ((rev >> 8) & 0xff), (rev & 0xff));
- else if (rev >= 0x00020000) {
- printk(" Firmware revision: %i.%i.%i (2.0.37 or "
- "later is recommended)\n", (rev >> 16),
- ((rev >> 8) & 0xff), (rev & 0xff));
- }else{
- printk(" Firmware revision too old: %i.%i.%i, please "
- "upgrade to 2.0.37 or later.\n",
- (rev >> 16), ((rev >> 8) & 0xff), (rev & 0xff));
- }
-
-#if (DEBUG > 2)
- printk(" Maximum receive rings %i\n", readl(&regs->MaxRxRng));
-#endif
-
- /*
- * Read the hardware address from the eeprom. The HW address
- * is not really necessary for HIPPI but awfully convenient.
- * The pointer arithmetic to put it in dev_addr is ugly, but
- * Donald Becker does it this way for the GigE version of this
- * card and it's shorter and more portable than any
- * other method I've seen. -VAL
- */
-
- *(__be16 *)(addr) =
- htons(rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, manf.BoardULA)));
- *(__be32 *)(addr+2) =
- htonl(rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, manf.BoardULA[4])));
- dev_addr_set(dev, addr);
-
- printk(" MAC: %pM\n", dev->dev_addr);
-
- sram_size = rr_read_eeprom_word(rrpriv, 8);
- printk(" SRAM size 0x%06x\n", sram_size);
-
- return 0;
-}
-
-
-static int rr_init1(struct net_device *dev)
-{
- struct rr_private *rrpriv;
- struct rr_regs __iomem *regs;
- unsigned long myjif, flags;
- struct cmd cmd;
- u32 hostctrl;
- int ecode = 0;
- short i;
-
- rrpriv = netdev_priv(dev);
- regs = rrpriv->regs;
-
- spin_lock_irqsave(&rrpriv->lock, flags);
-
- hostctrl = readl(&regs->HostCtrl);
- writel(hostctrl | HALT_NIC | RR_CLEAR_INT, &regs->HostCtrl);
- wmb();
-
- if (hostctrl & PARITY_ERR){
- printk("%s: Parity error halting NIC - this is serious!\n",
- dev->name);
- spin_unlock_irqrestore(&rrpriv->lock, flags);
- ecode = -EFAULT;
- goto error;
- }
-
- set_rxaddr(regs, rrpriv->rx_ctrl_dma);
- set_infoaddr(regs, rrpriv->info_dma);
-
- rrpriv->info->evt_ctrl.entry_size = sizeof(struct event);
- rrpriv->info->evt_ctrl.entries = EVT_RING_ENTRIES;
- rrpriv->info->evt_ctrl.mode = 0;
- rrpriv->info->evt_ctrl.pi = 0;
- set_rraddr(&rrpriv->info->evt_ctrl.rngptr, rrpriv->evt_ring_dma);
-
- rrpriv->info->cmd_ctrl.entry_size = sizeof(struct cmd);
- rrpriv->info->cmd_ctrl.entries = CMD_RING_ENTRIES;
- rrpriv->info->cmd_ctrl.mode = 0;
- rrpriv->info->cmd_ctrl.pi = 15;
-
- for (i = 0; i < CMD_RING_ENTRIES; i++) {
- writel(0, &regs->CmdRing[i]);
- }
-
- for (i = 0; i < TX_RING_ENTRIES; i++) {
- rrpriv->tx_ring[i].size = 0;
- set_rraddr(&rrpriv->tx_ring[i].addr, 0);
- rrpriv->tx_skbuff[i] = NULL;
- }
- rrpriv->info->tx_ctrl.entry_size = sizeof(struct tx_desc);
- rrpriv->info->tx_ctrl.entries = TX_RING_ENTRIES;
- rrpriv->info->tx_ctrl.mode = 0;
- rrpriv->info->tx_ctrl.pi = 0;
- set_rraddr(&rrpriv->info->tx_ctrl.rngptr, rrpriv->tx_ring_dma);
-
- /*
- * Set dirty_tx before we start receiving interrupts, otherwise
- * the interrupt handler might think it is supposed to process
- * tx ints before we are up and running, which may cause a null
- * pointer access in the int handler.
- */
- rrpriv->tx_full = 0;
- rrpriv->cur_rx = 0;
- rrpriv->dirty_rx = rrpriv->dirty_tx = 0;
-
- rr_reset(dev);
-
- /* Tuning values */
- writel(0x5000, &regs->ConRetry);
- writel(0x100, &regs->ConRetryTmr);
- writel(0x500000, &regs->ConTmout);
- writel(0x60, &regs->IntrTmr);
- writel(0x500000, &regs->TxDataMvTimeout);
- writel(0x200000, &regs->RxDataMvTimeout);
- writel(0x80, &regs->WriteDmaThresh);
- writel(0x80, &regs->ReadDmaThresh);
-
- rrpriv->fw_running = 0;
- wmb();
-
- hostctrl &= ~(HALT_NIC | INVALID_INST_B | PARITY_ERR);
- writel(hostctrl, &regs->HostCtrl);
- wmb();
-
- spin_unlock_irqrestore(&rrpriv->lock, flags);
-
- for (i = 0; i < RX_RING_ENTRIES; i++) {
- struct sk_buff *skb;
- dma_addr_t addr;
-
- rrpriv->rx_ring[i].mode = 0;
- skb = alloc_skb(dev->mtu + HIPPI_HLEN, GFP_ATOMIC);
- if (!skb) {
- printk(KERN_WARNING "%s: Unable to allocate memory "
- "for receive ring - halting NIC\n", dev->name);
- ecode = -ENOMEM;
- goto error;
- }
- rrpriv->rx_skbuff[i] = skb;
- addr = dma_map_single(&rrpriv->pci_dev->dev, skb->data,
- dev->mtu + HIPPI_HLEN, DMA_FROM_DEVICE);
- /*
- * Sanity test to see if we conflict with the DMA
- * limitations of the Roadrunner.
- */
- if ((((unsigned long)skb->data) & 0xfff) > ~65320)
- printk("skb alloc error\n");
-
- set_rraddr(&rrpriv->rx_ring[i].addr, addr);
- rrpriv->rx_ring[i].size = dev->mtu + HIPPI_HLEN;
- }
-
- rrpriv->rx_ctrl[4].entry_size = sizeof(struct rx_desc);
- rrpriv->rx_ctrl[4].entries = RX_RING_ENTRIES;
- rrpriv->rx_ctrl[4].mode = 8;
- rrpriv->rx_ctrl[4].pi = 0;
- wmb();
- set_rraddr(&rrpriv->rx_ctrl[4].rngptr, rrpriv->rx_ring_dma);
-
- udelay(1000);
-
- /*
- * Now start the FirmWare.
- */
- cmd.code = C_START_FW;
- cmd.ring = 0;
- cmd.index = 0;
-
- rr_issue_cmd(rrpriv, &cmd);
-
- /*
- * Give the FirmWare time to chew on the `get running' command.
- */
- myjif = jiffies + 5 * HZ;
- while (time_before(jiffies, myjif) && !rrpriv->fw_running)
- cpu_relax();
-
- netif_start_queue(dev);
-
- return ecode;
-
- error:
- /*
- * We might have gotten here because we are out of memory,
- * make sure we release everything we allocated before failing
- */
- for (i = 0; i < RX_RING_ENTRIES; i++) {
- struct sk_buff *skb = rrpriv->rx_skbuff[i];
-
- if (skb) {
- dma_unmap_single(&rrpriv->pci_dev->dev,
- rrpriv->rx_ring[i].addr.addrlo,
- dev->mtu + HIPPI_HLEN,
- DMA_FROM_DEVICE);
- rrpriv->rx_ring[i].size = 0;
- set_rraddr(&rrpriv->rx_ring[i].addr, 0);
- dev_kfree_skb(skb);
- rrpriv->rx_skbuff[i] = NULL;
- }
- }
- return ecode;
-}
-
-
-/*
- * All events are considered to be slow (RX/TX ints do not generate
- * events) and are handled here, outside the main interrupt handler,
- * to reduce the size of the handler.
- */
-static u32 rr_handle_event(struct net_device *dev, u32 prodidx, u32 eidx)
-{
- struct rr_private *rrpriv;
- struct rr_regs __iomem *regs;
- u32 tmp;
-
- rrpriv = netdev_priv(dev);
- regs = rrpriv->regs;
-
- while (prodidx != eidx){
- switch (rrpriv->evt_ring[eidx].code){
- case E_NIC_UP:
- tmp = readl(&regs->FwRev);
- printk(KERN_INFO "%s: Firmware revision %i.%i.%i "
- "up and running\n", dev->name,
- (tmp >> 16), ((tmp >> 8) & 0xff), (tmp & 0xff));
- rrpriv->fw_running = 1;
- writel(RX_RING_ENTRIES - 1, &regs->IpRxPi);
- wmb();
- break;
- case E_LINK_ON:
- printk(KERN_INFO "%s: Optical link ON\n", dev->name);
- break;
- case E_LINK_OFF:
- printk(KERN_INFO "%s: Optical link OFF\n", dev->name);
- break;
- case E_RX_IDLE:
- printk(KERN_WARNING "%s: RX data not moving\n",
- dev->name);
- goto drop;
- case E_WATCHDOG:
- printk(KERN_INFO "%s: The watchdog is here to see "
- "us\n", dev->name);
- break;
- case E_INTERN_ERR:
- printk(KERN_ERR "%s: HIPPI Internal NIC error\n",
- dev->name);
- writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
- &regs->HostCtrl);
- wmb();
- break;
- case E_HOST_ERR:
- printk(KERN_ERR "%s: Host software error\n",
- dev->name);
- writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
- &regs->HostCtrl);
- wmb();
- break;
- /*
- * TX events.
- */
- case E_CON_REJ:
- printk(KERN_WARNING "%s: Connection rejected\n",
- dev->name);
- dev->stats.tx_aborted_errors++;
- break;
- case E_CON_TMOUT:
- printk(KERN_WARNING "%s: Connection timeout\n",
- dev->name);
- break;
- case E_DISC_ERR:
- printk(KERN_WARNING "%s: HIPPI disconnect error\n",
- dev->name);
- dev->stats.tx_aborted_errors++;
- break;
- case E_INT_PRTY:
- printk(KERN_ERR "%s: HIPPI Internal Parity error\n",
- dev->name);
- writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
- &regs->HostCtrl);
- wmb();
- break;
- case E_TX_IDLE:
- printk(KERN_WARNING "%s: Transmitter idle\n",
- dev->name);
- break;
- case E_TX_LINK_DROP:
- printk(KERN_WARNING "%s: Link lost during transmit\n",
- dev->name);
- dev->stats.tx_aborted_errors++;
- writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
- &regs->HostCtrl);
- wmb();
- break;
- case E_TX_INV_RNG:
- printk(KERN_ERR "%s: Invalid send ring block\n",
- dev->name);
- writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
- &regs->HostCtrl);
- wmb();
- break;
- case E_TX_INV_BUF:
- printk(KERN_ERR "%s: Invalid send buffer address\n",
- dev->name);
- writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
- &regs->HostCtrl);
- wmb();
- break;
- case E_TX_INV_DSC:
- printk(KERN_ERR "%s: Invalid descriptor address\n",
- dev->name);
- writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
- &regs->HostCtrl);
- wmb();
- break;
- /*
- * RX events.
- */
- case E_RX_RNG_OUT:
- printk(KERN_INFO "%s: Receive ring full\n", dev->name);
- break;
-
- case E_RX_PAR_ERR:
- printk(KERN_WARNING "%s: Receive parity error\n",
- dev->name);
- goto drop;
- case E_RX_LLRC_ERR:
- printk(KERN_WARNING "%s: Receive LLRC error\n",
- dev->name);
- goto drop;
- case E_PKT_LN_ERR:
- printk(KERN_WARNING "%s: Receive packet length "
- "error\n", dev->name);
- goto drop;
- case E_DTA_CKSM_ERR:
- printk(KERN_WARNING "%s: Data checksum error\n",
- dev->name);
- goto drop;
- case E_SHT_BST:
- printk(KERN_WARNING "%s: Unexpected short burst "
- "error\n", dev->name);
- goto drop;
- case E_STATE_ERR:
- printk(KERN_WARNING "%s: Recv. state transition"
- " error\n", dev->name);
- goto drop;
- case E_UNEXP_DATA:
- printk(KERN_WARNING "%s: Unexpected data error\n",
- dev->name);
- goto drop;
- case E_LST_LNK_ERR:
- printk(KERN_WARNING "%s: Link lost error\n",
- dev->name);
- goto drop;
- case E_FRM_ERR:
- printk(KERN_WARNING "%s: Framing Error\n",
- dev->name);
- goto drop;
- case E_FLG_SYN_ERR:
- printk(KERN_WARNING "%s: Flag sync. lost during "
- "packet\n", dev->name);
- goto drop;
- case E_RX_INV_BUF:
- printk(KERN_ERR "%s: Invalid receive buffer "
- "address\n", dev->name);
- writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
- &regs->HostCtrl);
- wmb();
- break;
- case E_RX_INV_DSC:
- printk(KERN_ERR "%s: Invalid receive descriptor "
- "address\n", dev->name);
- writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
- &regs->HostCtrl);
- wmb();
- break;
- case E_RNG_BLK:
- printk(KERN_ERR "%s: Invalid ring block\n",
- dev->name);
- writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
- &regs->HostCtrl);
- wmb();
- break;
- drop:
- /* Label packet to be dropped.
- * Actual dropping occurs in rx
- * handling.
- *
- * The index of packet we get to drop is
- * the index of the packet following
- * the bad packet. -kbf
- */
- {
- u16 index = rrpriv->evt_ring[eidx].index;
- index = (index + (RX_RING_ENTRIES - 1)) %
- RX_RING_ENTRIES;
- rrpriv->rx_ring[index].mode |=
- (PACKET_BAD | PACKET_END);
- }
- break;
- default:
- printk(KERN_WARNING "%s: Unhandled event 0x%02x\n",
- dev->name, rrpriv->evt_ring[eidx].code);
- }
- eidx = (eidx + 1) % EVT_RING_ENTRIES;
- }
-
- rrpriv->info->evt_ctrl.pi = eidx;
- wmb();
- return eidx;
-}
-
-
-static void rx_int(struct net_device *dev, u32 rxlimit, u32 index)
-{
- struct rr_private *rrpriv = netdev_priv(dev);
- struct rr_regs __iomem *regs = rrpriv->regs;
-
- do {
- struct rx_desc *desc;
- u32 pkt_len;
-
- desc = &(rrpriv->rx_ring[index]);
- pkt_len = desc->size;
-#if (DEBUG > 2)
- printk("index %i, rxlimit %i\n", index, rxlimit);
- printk("len %x, mode %x\n", pkt_len, desc->mode);
-#endif
- if ( (rrpriv->rx_ring[index].mode & PACKET_BAD) == PACKET_BAD){
- dev->stats.rx_dropped++;
- goto defer;
- }
-
- if (pkt_len > 0){
- struct sk_buff *skb, *rx_skb;
-
- rx_skb = rrpriv->rx_skbuff[index];
-
- if (pkt_len < PKT_COPY_THRESHOLD) {
- skb = alloc_skb(pkt_len, GFP_ATOMIC);
- if (skb == NULL){
- printk(KERN_WARNING "%s: Unable to allocate skb (%i bytes), deferring packet\n", dev->name, pkt_len);
- dev->stats.rx_dropped++;
- goto defer;
- } else {
- dma_sync_single_for_cpu(&rrpriv->pci_dev->dev,
- desc->addr.addrlo,
- pkt_len,
- DMA_FROM_DEVICE);
-
- skb_put_data(skb, rx_skb->data,
- pkt_len);
-
- dma_sync_single_for_device(&rrpriv->pci_dev->dev,
- desc->addr.addrlo,
- pkt_len,
- DMA_FROM_DEVICE);
- }
- }else{
- struct sk_buff *newskb;
-
- newskb = alloc_skb(dev->mtu + HIPPI_HLEN,
- GFP_ATOMIC);
- if (newskb){
- dma_addr_t addr;
-
- dma_unmap_single(&rrpriv->pci_dev->dev,
- desc->addr.addrlo,
- dev->mtu + HIPPI_HLEN,
- DMA_FROM_DEVICE);
- skb = rx_skb;
- skb_put(skb, pkt_len);
- rrpriv->rx_skbuff[index] = newskb;
- addr = dma_map_single(&rrpriv->pci_dev->dev,
- newskb->data,
- dev->mtu + HIPPI_HLEN,
- DMA_FROM_DEVICE);
- set_rraddr(&desc->addr, addr);
- } else {
- printk("%s: Out of memory, deferring "
- "packet\n", dev->name);
- dev->stats.rx_dropped++;
- goto defer;
- }
- }
- skb->protocol = hippi_type_trans(skb, dev);
-
- netif_rx(skb); /* send it up */
-
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
- }
- defer:
- desc->mode = 0;
- desc->size = dev->mtu + HIPPI_HLEN;
-
- if ((index & 7) == 7)
- writel(index, &regs->IpRxPi);
-
- index = (index + 1) % RX_RING_ENTRIES;
- } while(index != rxlimit);
-
- rrpriv->cur_rx = index;
- wmb();
-}
-
-
-static irqreturn_t rr_interrupt(int irq, void *dev_id)
-{
- struct rr_private *rrpriv;
- struct rr_regs __iomem *regs;
- struct net_device *dev = (struct net_device *)dev_id;
- u32 prodidx, rxindex, eidx, txcsmr, rxlimit, txcon;
-
- rrpriv = netdev_priv(dev);
- regs = rrpriv->regs;
-
- if (!(readl(&regs->HostCtrl) & RR_INT))
- return IRQ_NONE;
-
- spin_lock(&rrpriv->lock);
-
- prodidx = readl(&regs->EvtPrd);
- txcsmr = (prodidx >> 8) & 0xff;
- rxlimit = (prodidx >> 16) & 0xff;
- prodidx &= 0xff;
-
-#if (DEBUG > 2)
- printk("%s: interrupt, prodidx = %i, eidx = %i\n", dev->name,
- prodidx, rrpriv->info->evt_ctrl.pi);
-#endif
- /*
- * Order here is important. We must handle events
- * before doing anything else in order to catch
- * such things as LLRC errors, etc -kbf
- */
-
- eidx = rrpriv->info->evt_ctrl.pi;
- if (prodidx != eidx)
- eidx = rr_handle_event(dev, prodidx, eidx);
-
- rxindex = rrpriv->cur_rx;
- if (rxindex != rxlimit)
- rx_int(dev, rxlimit, rxindex);
-
- txcon = rrpriv->dirty_tx;
- if (txcsmr != txcon) {
- do {
- /* Due to occational firmware TX producer/consumer out
- * of sync. error need to check entry in ring -kbf
- */
- if(rrpriv->tx_skbuff[txcon]){
- struct tx_desc *desc;
- struct sk_buff *skb;
-
- desc = &(rrpriv->tx_ring[txcon]);
- skb = rrpriv->tx_skbuff[txcon];
-
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
-
- dma_unmap_single(&rrpriv->pci_dev->dev,
- desc->addr.addrlo, skb->len,
- DMA_TO_DEVICE);
- dev_kfree_skb_irq(skb);
-
- rrpriv->tx_skbuff[txcon] = NULL;
- desc->size = 0;
- set_rraddr(&rrpriv->tx_ring[txcon].addr, 0);
- desc->mode = 0;
- }
- txcon = (txcon + 1) % TX_RING_ENTRIES;
- } while (txcsmr != txcon);
- wmb();
-
- rrpriv->dirty_tx = txcon;
- if (rrpriv->tx_full && rr_if_busy(dev) &&
- (((rrpriv->info->tx_ctrl.pi + 1) % TX_RING_ENTRIES)
- != rrpriv->dirty_tx)){
- rrpriv->tx_full = 0;
- netif_wake_queue(dev);
- }
- }
-
- eidx |= ((txcsmr << 8) | (rxlimit << 16));
- writel(eidx, &regs->EvtCon);
- wmb();
-
- spin_unlock(&rrpriv->lock);
- return IRQ_HANDLED;
-}
-
-static inline void rr_raz_tx(struct rr_private *rrpriv,
- struct net_device *dev)
-{
- int i;
-
- for (i = 0; i < TX_RING_ENTRIES; i++) {
- struct sk_buff *skb = rrpriv->tx_skbuff[i];
-
- if (skb) {
- struct tx_desc *desc = &(rrpriv->tx_ring[i]);
-
- dma_unmap_single(&rrpriv->pci_dev->dev,
- desc->addr.addrlo, skb->len,
- DMA_TO_DEVICE);
- desc->size = 0;
- set_rraddr(&desc->addr, 0);
- dev_kfree_skb(skb);
- rrpriv->tx_skbuff[i] = NULL;
- }
- }
-}
-
-
-static inline void rr_raz_rx(struct rr_private *rrpriv,
- struct net_device *dev)
-{
- int i;
-
- for (i = 0; i < RX_RING_ENTRIES; i++) {
- struct sk_buff *skb = rrpriv->rx_skbuff[i];
-
- if (skb) {
- struct rx_desc *desc = &(rrpriv->rx_ring[i]);
-
- dma_unmap_single(&rrpriv->pci_dev->dev,
- desc->addr.addrlo,
- dev->mtu + HIPPI_HLEN,
- DMA_FROM_DEVICE);
- desc->size = 0;
- set_rraddr(&desc->addr, 0);
- dev_kfree_skb(skb);
- rrpriv->rx_skbuff[i] = NULL;
- }
- }
-}
-
-static void rr_timer(struct timer_list *t)
-{
- struct rr_private *rrpriv = timer_container_of(rrpriv, t, timer);
- struct net_device *dev = pci_get_drvdata(rrpriv->pci_dev);
- struct rr_regs __iomem *regs = rrpriv->regs;
- unsigned long flags;
-
- if (readl(&regs->HostCtrl) & NIC_HALTED){
- printk("%s: Restarting nic\n", dev->name);
- memset(rrpriv->rx_ctrl, 0, 256 * sizeof(struct ring_ctrl));
- memset(rrpriv->info, 0, sizeof(struct rr_info));
- wmb();
-
- rr_raz_tx(rrpriv, dev);
- rr_raz_rx(rrpriv, dev);
-
- if (rr_init1(dev)) {
- spin_lock_irqsave(&rrpriv->lock, flags);
- writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
- &regs->HostCtrl);
- spin_unlock_irqrestore(&rrpriv->lock, flags);
- }
- }
- rrpriv->timer.expires = RUN_AT(5*HZ);
- add_timer(&rrpriv->timer);
-}
-
-
-static int rr_open(struct net_device *dev)
-{
- struct rr_private *rrpriv = netdev_priv(dev);
- struct pci_dev *pdev = rrpriv->pci_dev;
- struct rr_regs __iomem *regs;
- int ecode = 0;
- unsigned long flags;
- dma_addr_t dma_addr;
-
- regs = rrpriv->regs;
-
- if (rrpriv->fw_rev < 0x00020000) {
- printk(KERN_WARNING "%s: trying to configure device with "
- "obsolete firmware\n", dev->name);
- ecode = -EBUSY;
- goto error;
- }
-
- rrpriv->rx_ctrl = dma_alloc_coherent(&pdev->dev,
- 256 * sizeof(struct ring_ctrl),
- &dma_addr, GFP_KERNEL);
- if (!rrpriv->rx_ctrl) {
- ecode = -ENOMEM;
- goto error;
- }
- rrpriv->rx_ctrl_dma = dma_addr;
-
- rrpriv->info = dma_alloc_coherent(&pdev->dev, sizeof(struct rr_info),
- &dma_addr, GFP_KERNEL);
- if (!rrpriv->info) {
- ecode = -ENOMEM;
- goto error;
- }
- rrpriv->info_dma = dma_addr;
- wmb();
-
- spin_lock_irqsave(&rrpriv->lock, flags);
- writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl);
- readl(&regs->HostCtrl);
- spin_unlock_irqrestore(&rrpriv->lock, flags);
-
- if (request_irq(pdev->irq, rr_interrupt, IRQF_SHARED, dev->name, dev)) {
- printk(KERN_WARNING "%s: Requested IRQ %d is busy\n",
- dev->name, pdev->irq);
- ecode = -EAGAIN;
- goto error;
- }
-
- if ((ecode = rr_init1(dev)))
- goto error;
-
- /* Set the timer to switch to check for link beat and perhaps switch
- to an alternate media type. */
- timer_setup(&rrpriv->timer, rr_timer, 0);
- rrpriv->timer.expires = RUN_AT(5*HZ); /* 5 sec. watchdog */
- add_timer(&rrpriv->timer);
-
- netif_start_queue(dev);
-
- return ecode;
-
- error:
- spin_lock_irqsave(&rrpriv->lock, flags);
- writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl);
- spin_unlock_irqrestore(&rrpriv->lock, flags);
-
- if (rrpriv->info) {
- dma_free_coherent(&pdev->dev, sizeof(struct rr_info),
- rrpriv->info, rrpriv->info_dma);
- rrpriv->info = NULL;
- }
- if (rrpriv->rx_ctrl) {
- dma_free_coherent(&pdev->dev, 256 * sizeof(struct ring_ctrl),
- rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
- rrpriv->rx_ctrl = NULL;
- }
-
- netif_stop_queue(dev);
-
- return ecode;
-}
-
-
-static void rr_dump(struct net_device *dev)
-{
- struct rr_private *rrpriv;
- struct rr_regs __iomem *regs;
- u32 index, cons;
- short i;
- int len;
-
- rrpriv = netdev_priv(dev);
- regs = rrpriv->regs;
-
- printk("%s: dumping NIC TX rings\n", dev->name);
-
- printk("RxPrd %08x, TxPrd %02x, EvtPrd %08x, TxPi %02x, TxCtrlPi %02x\n",
- readl(&regs->RxPrd), readl(&regs->TxPrd),
- readl(&regs->EvtPrd), readl(&regs->TxPi),
- rrpriv->info->tx_ctrl.pi);
-
- printk("Error code 0x%x\n", readl(&regs->Fail1));
-
- index = (((readl(&regs->EvtPrd) >> 8) & 0xff) - 1) % TX_RING_ENTRIES;
- cons = rrpriv->dirty_tx;
- printk("TX ring index %i, TX consumer %i\n",
- index, cons);
-
- if (rrpriv->tx_skbuff[index]){
- len = min_t(int, 0x80, rrpriv->tx_skbuff[index]->len);
- printk("skbuff for index %i is valid - dumping data (0x%x bytes - DMA len 0x%x)\n", index, len, rrpriv->tx_ring[index].size);
- for (i = 0; i < len; i++){
- if (!(i & 7))
- printk("\n");
- printk("%02x ", (unsigned char) rrpriv->tx_skbuff[index]->data[i]);
- }
- printk("\n");
- }
-
- if (rrpriv->tx_skbuff[cons]){
- len = min_t(int, 0x80, rrpriv->tx_skbuff[cons]->len);
- printk("skbuff for cons %i is valid - dumping data (0x%x bytes - skbuff len 0x%x)\n", cons, len, rrpriv->tx_skbuff[cons]->len);
- printk("mode 0x%x, size 0x%x,\n phys %08Lx, skbuff-addr %p, truesize 0x%x\n",
- rrpriv->tx_ring[cons].mode,
- rrpriv->tx_ring[cons].size,
- (unsigned long long) rrpriv->tx_ring[cons].addr.addrlo,
- rrpriv->tx_skbuff[cons]->data,
- (unsigned int)rrpriv->tx_skbuff[cons]->truesize);
- for (i = 0; i < len; i++){
- if (!(i & 7))
- printk("\n");
- printk("%02x ", (unsigned char)rrpriv->tx_ring[cons].size);
- }
- printk("\n");
- }
-
- printk("dumping TX ring info:\n");
- for (i = 0; i < TX_RING_ENTRIES; i++)
- printk("mode 0x%x, size 0x%x, phys-addr %08Lx\n",
- rrpriv->tx_ring[i].mode,
- rrpriv->tx_ring[i].size,
- (unsigned long long) rrpriv->tx_ring[i].addr.addrlo);
-
-}
-
-
-static int rr_close(struct net_device *dev)
-{
- struct rr_private *rrpriv = netdev_priv(dev);
- struct rr_regs __iomem *regs = rrpriv->regs;
- struct pci_dev *pdev = rrpriv->pci_dev;
- unsigned long flags;
- u32 tmp;
- short i;
-
- netif_stop_queue(dev);
-
-
- /*
- * Lock to make sure we are not cleaning up while another CPU
- * is handling interrupts.
- */
- spin_lock_irqsave(&rrpriv->lock, flags);
-
- tmp = readl(&regs->HostCtrl);
- if (tmp & NIC_HALTED){
- printk("%s: NIC already halted\n", dev->name);
- rr_dump(dev);
- }else{
- tmp |= HALT_NIC | RR_CLEAR_INT;
- writel(tmp, &regs->HostCtrl);
- readl(&regs->HostCtrl);
- }
-
- rrpriv->fw_running = 0;
-
- spin_unlock_irqrestore(&rrpriv->lock, flags);
- timer_delete_sync(&rrpriv->timer);
- spin_lock_irqsave(&rrpriv->lock, flags);
-
- writel(0, &regs->TxPi);
- writel(0, &regs->IpRxPi);
-
- writel(0, &regs->EvtCon);
- writel(0, &regs->EvtPrd);
-
- for (i = 0; i < CMD_RING_ENTRIES; i++)
- writel(0, &regs->CmdRing[i]);
-
- rrpriv->info->tx_ctrl.entries = 0;
- rrpriv->info->cmd_ctrl.pi = 0;
- rrpriv->info->evt_ctrl.pi = 0;
- rrpriv->rx_ctrl[4].entries = 0;
-
- rr_raz_tx(rrpriv, dev);
- rr_raz_rx(rrpriv, dev);
-
- dma_free_coherent(&pdev->dev, 256 * sizeof(struct ring_ctrl),
- rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
- rrpriv->rx_ctrl = NULL;
-
- dma_free_coherent(&pdev->dev, sizeof(struct rr_info), rrpriv->info,
- rrpriv->info_dma);
- rrpriv->info = NULL;
-
- spin_unlock_irqrestore(&rrpriv->lock, flags);
- free_irq(pdev->irq, dev);
-
- return 0;
-}
-
-
-static netdev_tx_t rr_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct rr_private *rrpriv = netdev_priv(dev);
- struct rr_regs __iomem *regs = rrpriv->regs;
- struct hippi_cb *hcb = (struct hippi_cb *) skb->cb;
- struct ring_ctrl *txctrl;
- unsigned long flags;
- u32 index, len = skb->len;
- u32 *ifield;
- struct sk_buff *new_skb;
-
- if (readl(&regs->Mode) & FATAL_ERR)
- printk("error codes Fail1 %02x, Fail2 %02x\n",
- readl(&regs->Fail1), readl(&regs->Fail2));
-
- /*
- * We probably need to deal with tbusy here to prevent overruns.
- */
-
- if (skb_headroom(skb) < 8){
- printk("incoming skb too small - reallocating\n");
- if (!(new_skb = dev_alloc_skb(len + 8))) {
- dev_kfree_skb(skb);
- netif_wake_queue(dev);
- return NETDEV_TX_OK;
- }
- skb_reserve(new_skb, 8);
- skb_put(new_skb, len);
- skb_copy_from_linear_data(skb, new_skb->data, len);
- dev_kfree_skb(skb);
- skb = new_skb;
- }
-
- ifield = skb_push(skb, 8);
-
- ifield[0] = 0;
- ifield[1] = hcb->ifield;
-
- /*
- * We don't need the lock before we are actually going to start
- * fiddling with the control blocks.
- */
- spin_lock_irqsave(&rrpriv->lock, flags);
-
- txctrl = &rrpriv->info->tx_ctrl;
-
- index = txctrl->pi;
-
- rrpriv->tx_skbuff[index] = skb;
- set_rraddr(&rrpriv->tx_ring[index].addr,
- dma_map_single(&rrpriv->pci_dev->dev, skb->data, len + 8, DMA_TO_DEVICE));
- rrpriv->tx_ring[index].size = len + 8; /* include IFIELD */
- rrpriv->tx_ring[index].mode = PACKET_START | PACKET_END;
- txctrl->pi = (index + 1) % TX_RING_ENTRIES;
- wmb();
- writel(txctrl->pi, &regs->TxPi);
-
- if (txctrl->pi == rrpriv->dirty_tx){
- rrpriv->tx_full = 1;
- netif_stop_queue(dev);
- }
-
- spin_unlock_irqrestore(&rrpriv->lock, flags);
-
- return NETDEV_TX_OK;
-}
-
-
-/*
- * Read the firmware out of the EEPROM and put it into the SRAM
- * (or from user space - later)
- *
- * This operation requires the NIC to be halted and is performed with
- * interrupts disabled and with the spinlock hold.
- */
-static int rr_load_firmware(struct net_device *dev)
-{
- struct rr_private *rrpriv;
- struct rr_regs __iomem *regs;
- size_t eptr, segptr;
- int i, j;
- u32 localctrl, sptr, len, tmp;
- u32 p2len, p2size, nr_seg, revision, io, sram_size;
-
- rrpriv = netdev_priv(dev);
- regs = rrpriv->regs;
-
- if (dev->flags & IFF_UP)
- return -EBUSY;
-
- if (!(readl(&regs->HostCtrl) & NIC_HALTED)){
- printk("%s: Trying to load firmware to a running NIC.\n",
- dev->name);
- return -EBUSY;
- }
-
- localctrl = readl(&regs->LocalCtrl);
- writel(0, &regs->LocalCtrl);
-
- writel(0, &regs->EvtPrd);
- writel(0, &regs->RxPrd);
- writel(0, &regs->TxPrd);
-
- /*
- * First wipe the entire SRAM, otherwise we might run into all
- * kinds of trouble ... sigh, this took almost all afternoon
- * to track down ;-(
- */
- io = readl(&regs->ExtIo);
- writel(0, &regs->ExtIo);
- sram_size = rr_read_eeprom_word(rrpriv, 8);
-
- for (i = 200; i < sram_size / 4; i++){
- writel(i * 4, &regs->WinBase);
- mb();
- writel(0, &regs->WinData);
- mb();
- }
- writel(io, &regs->ExtIo);
- mb();
-
- eptr = rr_read_eeprom_word(rrpriv,
- offsetof(struct eeprom, rncd_info.AddrRunCodeSegs));
- eptr = ((eptr & 0x1fffff) >> 3);
-
- p2len = rr_read_eeprom_word(rrpriv, 0x83*4);
- p2len = (p2len << 2);
- p2size = rr_read_eeprom_word(rrpriv, 0x84*4);
- p2size = ((p2size & 0x1fffff) >> 3);
-
- if ((eptr < p2size) || (eptr > (p2size + p2len))){
- printk("%s: eptr is invalid\n", dev->name);
- goto out;
- }
-
- revision = rr_read_eeprom_word(rrpriv,
- offsetof(struct eeprom, manf.HeaderFmt));
-
- if (revision != 1){
- printk("%s: invalid firmware format (%i)\n",
- dev->name, revision);
- goto out;
- }
-
- nr_seg = rr_read_eeprom_word(rrpriv, eptr);
- eptr +=4;
-#if (DEBUG > 1)
- printk("%s: nr_seg %i\n", dev->name, nr_seg);
-#endif
-
- for (i = 0; i < nr_seg; i++){
- sptr = rr_read_eeprom_word(rrpriv, eptr);
- eptr += 4;
- len = rr_read_eeprom_word(rrpriv, eptr);
- eptr += 4;
- segptr = rr_read_eeprom_word(rrpriv, eptr);
- segptr = ((segptr & 0x1fffff) >> 3);
- eptr += 4;
-#if (DEBUG > 1)
- printk("%s: segment %i, sram address %06x, length %04x, segptr %06x\n",
- dev->name, i, sptr, len, segptr);
-#endif
- for (j = 0; j < len; j++){
- tmp = rr_read_eeprom_word(rrpriv, segptr);
- writel(sptr, &regs->WinBase);
- mb();
- writel(tmp, &regs->WinData);
- mb();
- segptr += 4;
- sptr += 4;
- }
- }
-
-out:
- writel(localctrl, &regs->LocalCtrl);
- mb();
- return 0;
-}
-
-
-static int rr_siocdevprivate(struct net_device *dev, struct ifreq *rq,
- void __user *data, int cmd)
-{
- struct rr_private *rrpriv;
- unsigned char *image, *oldimage;
- unsigned long flags;
- unsigned int i;
- int error = -EOPNOTSUPP;
-
- rrpriv = netdev_priv(dev);
-
- switch(cmd){
- case SIOCRRGFW:
- if (!capable(CAP_SYS_RAWIO)){
- return -EPERM;
- }
-
- image = kmalloc_array(EEPROM_WORDS, sizeof(u32), GFP_KERNEL);
- if (!image)
- return -ENOMEM;
-
- if (rrpriv->fw_running){
- printk("%s: Firmware already running\n", dev->name);
- error = -EPERM;
- goto gf_out;
- }
-
- spin_lock_irqsave(&rrpriv->lock, flags);
- i = rr_read_eeprom(rrpriv, 0, image, EEPROM_BYTES);
- spin_unlock_irqrestore(&rrpriv->lock, flags);
- if (i != EEPROM_BYTES){
- printk(KERN_ERR "%s: Error reading EEPROM\n",
- dev->name);
- error = -EFAULT;
- goto gf_out;
- }
- error = copy_to_user(data, image, EEPROM_BYTES);
- if (error)
- error = -EFAULT;
- gf_out:
- kfree(image);
- return error;
-
- case SIOCRRPFW:
- if (!capable(CAP_SYS_RAWIO)){
- return -EPERM;
- }
-
- image = memdup_user(data, EEPROM_BYTES);
- if (IS_ERR(image))
- return PTR_ERR(image);
-
- oldimage = kmalloc(EEPROM_BYTES, GFP_KERNEL);
- if (!oldimage) {
- kfree(image);
- return -ENOMEM;
- }
-
- if (rrpriv->fw_running){
- printk("%s: Firmware already running\n", dev->name);
- error = -EPERM;
- goto wf_out;
- }
-
- printk("%s: Updating EEPROM firmware\n", dev->name);
-
- spin_lock_irqsave(&rrpriv->lock, flags);
- error = write_eeprom(rrpriv, 0, image, EEPROM_BYTES);
- if (error)
- printk(KERN_ERR "%s: Error writing EEPROM\n",
- dev->name);
-
- i = rr_read_eeprom(rrpriv, 0, oldimage, EEPROM_BYTES);
- spin_unlock_irqrestore(&rrpriv->lock, flags);
-
- if (i != EEPROM_BYTES)
- printk(KERN_ERR "%s: Error reading back EEPROM "
- "image\n", dev->name);
-
- error = memcmp(image, oldimage, EEPROM_BYTES);
- if (error){
- printk(KERN_ERR "%s: Error verifying EEPROM image\n",
- dev->name);
- error = -EFAULT;
- }
- wf_out:
- kfree(oldimage);
- kfree(image);
- return error;
-
- case SIOCRRID:
- return put_user(0x52523032, (int __user *)data);
- default:
- return error;
- }
-}
-
-static const struct pci_device_id rr_pci_tbl[] = {
- { PCI_VENDOR_ID_ESSENTIAL, PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER,
- PCI_ANY_ID, PCI_ANY_ID, },
- { 0,}
-};
-MODULE_DEVICE_TABLE(pci, rr_pci_tbl);
-
-static struct pci_driver rr_driver = {
- .name = "rrunner",
- .id_table = rr_pci_tbl,
- .probe = rr_init_one,
- .remove = rr_remove_one,
-};
-
-module_pci_driver(rr_driver);
diff --git a/drivers/net/hippi/rrunner.h b/drivers/net/hippi/rrunner.h
deleted file mode 100644
index 55377614e752..000000000000
--- a/drivers/net/hippi/rrunner.h
+++ /dev/null
@@ -1,848 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _RRUNNER_H_
-#define _RRUNNER_H_
-
-#include <linux/interrupt.h>
-
-#if ((BITS_PER_LONG != 32) && (BITS_PER_LONG != 64))
-#error "BITS_PER_LONG not defined or not valid"
-#endif
-
-
-struct rr_regs {
-
- u32 pad0[16];
-
- u32 HostCtrl;
- u32 LocalCtrl;
- u32 Pc;
- u32 BrkPt;
-
-/* Timer increments every 0.97 micro-seconds (unsigned int) */
- u32 Timer_Hi;
- u32 Timer;
- u32 TimerRef;
- u32 PciState;
-
- u32 Event;
- u32 MbEvent;
-
- u32 WinBase;
- u32 WinData;
- u32 RX_state;
- u32 TX_state;
-
- u32 Overhead;
- u32 ExtIo;
-
- u32 DmaWriteHostHi;
- u32 DmaWriteHostLo;
-
- u32 pad1[2];
-
- u32 DmaReadHostHi;
- u32 DmaReadHostLo;
-
- u32 pad2;
-
- u32 DmaReadLen;
- u32 DmaWriteState;
-
- u32 DmaWriteLcl;
- u32 DmaWriteIPchecksum;
- u32 DmaWriteLen;
- u32 DmaReadState;
- u32 DmaReadLcl;
- u32 DmaReadIPchecksum;
- u32 pad3;
-
- u32 RxBase;
- u32 RxPrd;
- u32 RxCon;
-
- u32 pad4;
-
- u32 TxBase;
- u32 TxPrd;
- u32 TxCon;
-
- u32 pad5;
-
- u32 RxIndPro;
- u32 RxIndCon;
- u32 RxIndRef;
-
- u32 pad6;
-
- u32 TxIndPro;
- u32 TxIndCon;
- u32 TxIndRef;
-
- u32 pad7[17];
-
- u32 DrCmndPro;
- u32 DrCmndCon;
- u32 DrCmndRef;
-
- u32 pad8;
-
- u32 DwCmndPro;
- u32 DwCmndCon;
- u32 DwCmndRef;
-
- u32 AssistState;
-
- u32 DrDataPro;
- u32 DrDataCon;
- u32 DrDataRef;
-
- u32 pad9;
-
- u32 DwDataPro;
- u32 DwDataCon;
- u32 DwDataRef;
-
- u32 pad10[33];
-
- u32 EvtCon;
-
- u32 pad11[5];
-
- u32 TxPi;
- u32 IpRxPi;
-
- u32 pad11a[8];
-
- u32 CmdRing[16];
-
-/* The ULA is in two registers the high order two bytes of the first
- * word contain the RunCode features.
- * ula0 res res byte0 byte1
- * ula1 byte2 byte3 byte4 byte5
- */
- u32 Ula0;
- u32 Ula1;
-
- u32 RxRingHi;
- u32 RxRingLo;
-
- u32 InfoPtrHi;
- u32 InfoPtrLo;
-
- u32 Mode;
-
- u32 ConRetry;
- u32 ConRetryTmr;
-
- u32 ConTmout;
- u32 CtatTmr;
-
- u32 MaxRxRng;
-
- u32 IntrTmr;
- u32 TxDataMvTimeout;
- u32 RxDataMvTimeout;
-
- u32 EvtPrd;
- u32 TraceIdx;
-
- u32 Fail1;
- u32 Fail2;
-
- u32 DrvPrm;
-
- u32 FilterLA;
-
- u32 FwRev;
- u32 FwRes1;
- u32 FwRes2;
- u32 FwRes3;
-
- u32 WriteDmaThresh;
- u32 ReadDmaThresh;
-
- u32 pad12[325];
- u32 Window[512];
-};
-
-/*
- * Host control register bits.
- */
-
-#define RR_INT 0x01
-#define RR_CLEAR_INT 0x02
-#define NO_SWAP 0x04000004
-#define NO_SWAP1 0x00000004
-#define PCI_RESET_NIC 0x08
-#define HALT_NIC 0x10
-#define SSTEP_NIC 0x20
-#define MEM_READ_MULTI 0x40
-#define NIC_HALTED 0x100
-#define HALT_INST 0x200
-#define PARITY_ERR 0x400
-#define INVALID_INST_B 0x800
-#define RR_REV_2 0x20000000
-#define RR_REV_MASK 0xf0000000
-
-/*
- * Local control register bits.
- */
-
-#define INTA_STATE 0x01
-#define CLEAR_INTA 0x02
-#define FAST_EEPROM_ACCESS 0x08
-#define ENABLE_EXTRA_SRAM 0x100
-#define ENABLE_EXTRA_DESC 0x200
-#define ENABLE_PARITY 0x400
-#define FORCE_DMA_PARITY_ERROR 0x800
-#define ENABLE_EEPROM_WRITE 0x1000
-#define ENABLE_DATA_CACHE 0x2000
-#define SRAM_LO_PARITY_ERR 0x4000
-#define SRAM_HI_PARITY_ERR 0x8000
-
-/*
- * PCI state bits.
- */
-
-#define FORCE_PCI_RESET 0x01
-#define PROVIDE_LENGTH 0x02
-#define MASK_DMA_READ_MAX 0x1C
-#define RBURST_DISABLE 0x00
-#define RBURST_4 0x04
-#define RBURST_16 0x08
-#define RBURST_32 0x0C
-#define RBURST_64 0x10
-#define RBURST_128 0x14
-#define RBURST_256 0x18
-#define RBURST_1024 0x1C
-#define MASK_DMA_WRITE_MAX 0xE0
-#define WBURST_DISABLE 0x00
-#define WBURST_4 0x20
-#define WBURST_16 0x40
-#define WBURST_32 0x60
-#define WBURST_64 0x80
-#define WBURST_128 0xa0
-#define WBURST_256 0xc0
-#define WBURST_1024 0xe0
-#define MASK_MIN_DMA 0xFF00
-#define FIFO_RETRY_ENABLE 0x10000
-
-/*
- * Event register
- */
-
-#define DMA_WRITE_DONE 0x10000
-#define DMA_READ_DONE 0x20000
-#define DMA_WRITE_ERR 0x40000
-#define DMA_READ_ERR 0x80000
-
-/*
- * Receive state
- *
- * RoadRunner HIPPI Receive State Register controls and monitors the
- * HIPPI receive interface in the NIC. Look at err bits when a HIPPI
- * receive Error Event occurs.
- */
-
-#define ENABLE_NEW_CON 0x01
-#define RESET_RECV 0x02
-#define RECV_ALL 0x00
-#define RECV_1K 0x20
-#define RECV_2K 0x40
-#define RECV_4K 0x60
-#define RECV_8K 0x80
-#define RECV_16K 0xa0
-#define RECV_32K 0xc0
-#define RECV_64K 0xe0
-
-/*
- * Transmit status.
- */
-
-#define ENA_XMIT 0x01
-#define PERM_CON 0x02
-
-/*
- * DMA write state
- */
-
-#define RESET_DMA 0x01
-#define NO_SWAP_DMA 0x02
-#define DMA_ACTIVE 0x04
-#define THRESH_MASK 0x1F
-#define DMA_ERROR_MASK 0xff000000
-
-/*
- * Gooddies stored in the ULA registers.
- */
-
-#define TRACE_ON_WHAT_BIT 0x00020000 /* Traces on */
-#define ONEM_BUF_WHAT_BIT 0x00040000 /* 1Meg vs 256K */
-#define CHAR_API_WHAT_BIT 0x00080000 /* Char API vs network only */
-#define CMD_EVT_WHAT_BIT 0x00200000 /* Command event */
-#define LONG_TX_WHAT_BIT 0x00400000
-#define LONG_RX_WHAT_BIT 0x00800000
-#define WHAT_BIT_MASK 0xFFFD0000 /* Feature bit mask */
-
-/*
- * Mode status
- */
-
-#define EVENT_OVFL 0x80000000
-#define FATAL_ERR 0x40000000
-#define LOOP_BACK 0x01
-#define MODE_PH 0x02
-#define MODE_FP 0x00
-#define PTR64BIT 0x04
-#define PTR32BIT 0x00
-#define PTR_WD_SWAP 0x08
-#define PTR_WD_NOSWAP 0x00
-#define POST_WARN_EVENT 0x10
-#define ERR_TERM 0x20
-#define DIRECT_CONN 0x40
-#define NO_NIC_WATCHDOG 0x80
-#define SWAP_DATA 0x100
-#define SWAP_CONTROL 0x200
-#define NIC_HALT_ON_ERR 0x400
-#define NIC_NO_RESTART 0x800
-#define HALF_DUP_TX 0x1000
-#define HALF_DUP_RX 0x2000
-
-
-/*
- * Error codes
- */
-
-/* Host Error Codes - values of fail1 */
-#define ERR_UNKNOWN_MBOX 0x1001
-#define ERR_UNKNOWN_CMD 0x1002
-#define ERR_MAX_RING 0x1003
-#define ERR_RING_CLOSED 0x1004
-#define ERR_RING_OPEN 0x1005
-/* Firmware internal errors */
-#define ERR_EVENT_RING_FULL 0x01
-#define ERR_DW_PEND_CMND_FULL 0x02
-#define ERR_DR_PEND_CMND_FULL 0x03
-#define ERR_DW_PEND_DATA_FULL 0x04
-#define ERR_DR_PEND_DATA_FULL 0x05
-#define ERR_ILLEGAL_JUMP 0x06
-#define ERR_UNIMPLEMENTED 0x07
-#define ERR_TX_INFO_FULL 0x08
-#define ERR_RX_INFO_FULL 0x09
-#define ERR_ILLEGAL_MODE 0x0A
-#define ERR_MAIN_TIMEOUT 0x0B
-#define ERR_EVENT_BITS 0x0C
-#define ERR_UNPEND_FULL 0x0D
-#define ERR_TIMER_QUEUE_FULL 0x0E
-#define ERR_TIMER_QUEUE_EMPTY 0x0F
-#define ERR_TIMER_NO_FREE 0x10
-#define ERR_INTR_START 0x11
-#define ERR_BAD_STARTUP 0x12
-#define ERR_NO_PKT_END 0x13
-#define ERR_HALTED_ON_ERR 0x14
-/* Hardware NIC Errors */
-#define ERR_WRITE_DMA 0x0101
-#define ERR_READ_DMA 0x0102
-#define ERR_EXT_SERIAL 0x0103
-#define ERR_TX_INT_PARITY 0x0104
-
-
-/*
- * Event definitions
- */
-
-#define EVT_RING_ENTRIES 64
-#define EVT_RING_SIZE (EVT_RING_ENTRIES * sizeof(struct event))
-
-struct event {
-#ifdef __LITTLE_ENDIAN
- u16 index;
- u8 ring;
- u8 code;
-#else
- u8 code;
- u8 ring;
- u16 index;
-#endif
- u32 timestamp;
-};
-
-/*
- * General Events
- */
-
-#define E_NIC_UP 0x01
-#define E_WATCHDOG 0x02
-
-#define E_STAT_UPD 0x04
-#define E_INVAL_CMD 0x05
-#define E_SET_CMD_CONS 0x06
-#define E_LINK_ON 0x07
-#define E_LINK_OFF 0x08
-#define E_INTERN_ERR 0x09
-#define E_HOST_ERR 0x0A
-#define E_STATS_UPDATE 0x0B
-#define E_REJECTING 0x0C
-
-/*
- * Send Events
- */
-#define E_CON_REJ 0x13
-#define E_CON_TMOUT 0x14
-#define E_CON_NC_TMOUT 0x15 /* I , Connection No Campon Timeout */
-#define E_DISC_ERR 0x16
-#define E_INT_PRTY 0x17
-#define E_TX_IDLE 0x18
-#define E_TX_LINK_DROP 0x19
-#define E_TX_INV_RNG 0x1A
-#define E_TX_INV_BUF 0x1B
-#define E_TX_INV_DSC 0x1C
-
-/*
- * Destination Events
- */
-/*
- * General Receive events
- */
-#define E_VAL_RNG 0x20
-#define E_RX_RNG_ENER 0x21
-#define E_INV_RNG 0x22
-#define E_RX_RNG_SPC 0x23
-#define E_RX_RNG_OUT 0x24
-#define E_PKT_DISCARD 0x25
-#define E_INFO_EVT 0x27
-
-/*
- * Data corrupted events
- */
-#define E_RX_PAR_ERR 0x2B
-#define E_RX_LLRC_ERR 0x2C
-#define E_IP_CKSM_ERR 0x2D
-#define E_DTA_CKSM_ERR 0x2E
-#define E_SHT_BST 0x2F
-
-/*
- * Data lost events
- */
-#define E_LST_LNK_ERR 0x30
-#define E_FLG_SYN_ERR 0x31
-#define E_FRM_ERR 0x32
-#define E_RX_IDLE 0x33
-#define E_PKT_LN_ERR 0x34
-#define E_STATE_ERR 0x35
-#define E_UNEXP_DATA 0x3C
-
-/*
- * Fatal events
- */
-#define E_RX_INV_BUF 0x36
-#define E_RX_INV_DSC 0x37
-#define E_RNG_BLK 0x38
-
-/*
- * Warning events
- */
-#define E_RX_TO 0x39
-#define E_BFR_SPC 0x3A
-#define E_INV_ULP 0x3B
-
-#define E_NOT_IMPLEMENTED 0x40
-
-
-/*
- * Commands
- */
-
-#define CMD_RING_ENTRIES 16
-
-struct cmd {
-#ifdef __LITTLE_ENDIAN
- u16 index;
- u8 ring;
- u8 code;
-#else
- u8 code;
- u8 ring;
- u16 index;
-#endif
-};
-
-#define C_START_FW 0x01
-#define C_UPD_STAT 0x02
-#define C_WATCHDOG 0x05
-#define C_DEL_RNG 0x09
-#define C_NEW_RNG 0x0A
-#define C_CONN 0x0D
-
-
-/*
- * Mode bits
- */
-
-#define PACKET_BAD 0x01 /* Packet had link-layer error */
-#define INTERRUPT 0x02
-#define TX_IP_CKSUM 0x04
-#define PACKET_END 0x08
-#define PACKET_START 0x10
-#define SAME_IFIELD 0x80
-
-
-typedef struct {
-#if (BITS_PER_LONG == 64)
- u64 addrlo;
-#else
- u32 addrhi;
- u32 addrlo;
-#endif
-} rraddr;
-
-
-static inline void set_rraddr(rraddr *ra, dma_addr_t addr)
-{
- unsigned long baddr = addr;
-#if (BITS_PER_LONG == 64)
- ra->addrlo = baddr;
-#else
- /* Don't bother setting zero every time */
- ra->addrlo = baddr;
-#endif
- mb();
-}
-
-
-static inline void set_rxaddr(struct rr_regs __iomem *regs, volatile dma_addr_t addr)
-{
- unsigned long baddr = addr;
-#if (BITS_PER_LONG == 64) && defined(__LITTLE_ENDIAN)
- writel(baddr & 0xffffffff, &regs->RxRingHi);
- writel(baddr >> 32, &regs->RxRingLo);
-#elif (BITS_PER_LONG == 64)
- writel(baddr >> 32, &regs->RxRingHi);
- writel(baddr & 0xffffffff, &regs->RxRingLo);
-#else
- writel(0, &regs->RxRingHi);
- writel(baddr, &regs->RxRingLo);
-#endif
- mb();
-}
-
-
-static inline void set_infoaddr(struct rr_regs __iomem *regs, volatile dma_addr_t addr)
-{
- unsigned long baddr = addr;
-#if (BITS_PER_LONG == 64) && defined(__LITTLE_ENDIAN)
- writel(baddr & 0xffffffff, &regs->InfoPtrHi);
- writel(baddr >> 32, &regs->InfoPtrLo);
-#elif (BITS_PER_LONG == 64)
- writel(baddr >> 32, &regs->InfoPtrHi);
- writel(baddr & 0xffffffff, &regs->InfoPtrLo);
-#else
- writel(0, &regs->InfoPtrHi);
- writel(baddr, &regs->InfoPtrLo);
-#endif
- mb();
-}
-
-
-/*
- * TX ring
- */
-
-#ifdef CONFIG_ROADRUNNER_LARGE_RINGS
-#define TX_RING_ENTRIES 32
-#else
-#define TX_RING_ENTRIES 16
-#endif
-#define TX_TOTAL_SIZE (TX_RING_ENTRIES * sizeof(struct tx_desc))
-
-struct tx_desc{
- rraddr addr;
- u32 res;
-#ifdef __LITTLE_ENDIAN
- u16 size;
- u8 pad;
- u8 mode;
-#else
- u8 mode;
- u8 pad;
- u16 size;
-#endif
-};
-
-
-#ifdef CONFIG_ROADRUNNER_LARGE_RINGS
-#define RX_RING_ENTRIES 32
-#else
-#define RX_RING_ENTRIES 16
-#endif
-#define RX_TOTAL_SIZE (RX_RING_ENTRIES * sizeof(struct rx_desc))
-
-struct rx_desc{
- rraddr addr;
- u32 res;
-#ifdef __LITTLE_ENDIAN
- u16 size;
- u8 pad;
- u8 mode;
-#else
- u8 mode;
- u8 pad;
- u16 size;
-#endif
-};
-
-
-/*
- * ioctl's
- */
-
-#define SIOCRRPFW SIOCDEVPRIVATE /* put firmware */
-#define SIOCRRGFW SIOCDEVPRIVATE+1 /* get firmware */
-#define SIOCRRID SIOCDEVPRIVATE+2 /* identify */
-
-
-struct seg_hdr {
- u32 seg_start;
- u32 seg_len;
- u32 seg_eestart;
-};
-
-
-#define EEPROM_BASE 0x80000000
-#define EEPROM_WORDS 8192
-#define EEPROM_BYTES (EEPROM_WORDS * sizeof(u32))
-
-struct eeprom_boot {
- u32 key1;
- u32 key2;
- u32 sram_size;
- struct seg_hdr loader;
- u32 init_chksum;
- u32 reserved1;
-};
-
-struct eeprom_manf {
- u32 HeaderFmt;
- u32 Firmware;
- u32 BoardRevision;
- u32 RoadrunnerRev;
- char OpticsPart[8];
- u32 OpticsRev;
- u32 pad1;
- char SramPart[8];
- u32 SramRev;
- u32 pad2;
- char EepromPart[8];
- u32 EepromRev;
- u32 EepromSize;
- char PalPart[8];
- u32 PalRev;
- u32 pad3;
- char PalCodeFile[12];
- u32 PalCodeRev;
- char BoardULA[8];
- char SerialNo[8];
- char MfgDate[8];
- char MfgTime[8];
- char ModifyDate[8];
- u32 ModCount;
- u32 pad4[13];
-};
-
-
-struct eeprom_phase_info {
- char phase1File[12];
- u32 phase1Rev;
- char phase1Date[8];
- char phase2File[12];
- u32 phase2Rev;
- char phase2Date[8];
- u32 reserved7[4];
-};
-
-struct eeprom_rncd_info {
- u32 FwStart;
- u32 FwRev;
- char FwDate[8];
- u32 AddrRunCodeSegs;
- u32 FileNames;
- char File[13][8];
-};
-
-
-/* Phase 1 region (starts are word offset 0x80) */
-struct phase1_hdr{
- u32 jump;
- u32 noop;
- struct seg_hdr phase2Seg;
-};
-
-struct eeprom {
- struct eeprom_boot boot;
- u32 pad1[8];
- struct eeprom_manf manf;
- struct eeprom_phase_info phase_info;
- struct eeprom_rncd_info rncd_info;
- u32 pad2[15];
- u32 hdr_checksum;
- struct phase1_hdr phase1;
-};
-
-
-struct rr_stats {
- u32 NicTimeStamp;
- u32 RngCreated;
- u32 RngDeleted;
- u32 IntrGen;
- u32 NEvtOvfl;
- u32 InvCmd;
- u32 DmaReadErrs;
- u32 DmaWriteErrs;
- u32 StatUpdtT;
- u32 StatUpdtC;
- u32 WatchDog;
- u32 Trace;
-
- /* Serial HIPPI */
- u32 LnkRdyEst;
- u32 GLinkErr;
- u32 AltFlgErr;
- u32 OvhdBit8Sync;
- u32 RmtSerPrtyErr;
- u32 RmtParPrtyErr;
- u32 RmtLoopBk;
- u32 pad1;
-
- /* HIPPI tx */
- u32 ConEst;
- u32 ConRejS;
- u32 ConRetry;
- u32 ConTmOut;
- u32 SndConDiscon;
- u32 SndParErr;
- u32 PktSnt;
- u32 pad2[2];
- u32 ShFBstSnt;
- u64 BytSent;
- u32 TxTimeout;
- u32 pad3[3];
-
- /* HIPPI rx */
- u32 ConAcc;
- u32 ConRejdiPrty;
- u32 ConRejd64b;
- u32 ConRejdBuf;
- u32 RxConDiscon;
- u32 RxConNoData;
- u32 PktRx;
- u32 pad4[2];
- u32 ShFBstRx;
- u64 BytRx;
- u32 RxParErr;
- u32 RxLLRCerr;
- u32 RxBstSZerr;
- u32 RxStateErr;
- u32 RxRdyErr;
- u32 RxInvULP;
- u32 RxSpcBuf;
- u32 RxSpcDesc;
- u32 RxRngSpc;
- u32 RxRngFull;
- u32 RxPktLenErr;
- u32 RxCksmErr;
- u32 RxPktDrp;
- u32 RngLowSpc;
- u32 RngDataClose;
- u32 RxTimeout;
- u32 RxIdle;
-};
-
-
-/*
- * This struct is shared with the NIC firmware.
- */
-struct ring_ctrl {
- rraddr rngptr;
-#ifdef __LITTLE_ENDIAN
- u16 entries;
- u8 pad;
- u8 entry_size;
- u16 pi;
- u16 mode;
-#else
- u8 entry_size;
- u8 pad;
- u16 entries;
- u16 mode;
- u16 pi;
-#endif
-};
-
-struct rr_info {
- union {
- struct rr_stats stats;
- u32 stati[128];
- } s;
- struct ring_ctrl evt_ctrl;
- struct ring_ctrl cmd_ctrl;
- struct ring_ctrl tx_ctrl;
- u8 pad[464];
- u8 trace[3072];
-};
-
-/*
- * The linux structure for the RoadRunner.
- *
- * RX/TX descriptors are put first to make sure they are properly
- * aligned and do not cross cache-line boundaries.
- */
-
-struct rr_private
-{
- struct rx_desc *rx_ring;
- struct tx_desc *tx_ring;
- struct event *evt_ring;
- dma_addr_t tx_ring_dma;
- dma_addr_t rx_ring_dma;
- dma_addr_t evt_ring_dma;
- /* Alignment ok ? */
- struct sk_buff *rx_skbuff[RX_RING_ENTRIES];
- struct sk_buff *tx_skbuff[TX_RING_ENTRIES];
- struct rr_regs __iomem *regs; /* Register base */
- struct ring_ctrl *rx_ctrl; /* Receive ring control */
- struct rr_info *info; /* Shared info page */
- dma_addr_t rx_ctrl_dma;
- dma_addr_t info_dma;
- spinlock_t lock;
- struct timer_list timer;
- u32 cur_rx, cur_cmd, cur_evt;
- u32 dirty_rx, dirty_tx;
- u32 tx_full;
- u32 fw_rev;
- volatile short fw_running;
- struct pci_dev *pci_dev;
-};
-
-
-/*
- * Prototypes
- */
-static int rr_init(struct net_device *dev);
-static int rr_init1(struct net_device *dev);
-static irqreturn_t rr_interrupt(int irq, void *dev_id);
-
-static int rr_open(struct net_device *dev);
-static netdev_tx_t rr_start_xmit(struct sk_buff *skb,
- struct net_device *dev);
-static int rr_close(struct net_device *dev);
-static int rr_siocdevprivate(struct net_device *dev, struct ifreq *rq,
- void __user *data, int cmd);
-static unsigned int rr_read_eeprom(struct rr_private *rrpriv,
- unsigned long offset,
- unsigned char *buf,
- unsigned long length);
-static u32 rr_read_eeprom_word(struct rr_private *rrpriv, size_t offset);
-static int rr_load_firmware(struct net_device *dev);
-static inline void rr_raz_tx(struct rr_private *, struct net_device *);
-static inline void rr_raz_rx(struct rr_private *, struct net_device *);
-#endif /* _RRUNNER_H_ */
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index d3dc0914450a..6da801748189 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -39,8 +39,8 @@
#define TX_Q_LIMIT 32
struct ifb_q_stats {
- u64 packets;
- u64 bytes;
+ u64_stats_t packets;
+ u64_stats_t bytes;
struct u64_stats_sync sync;
};
@@ -81,8 +81,8 @@ static int ifb_close(struct net_device *dev);
static void ifb_update_q_stats(struct ifb_q_stats *stats, int len)
{
u64_stats_update_begin(&stats->sync);
- stats->packets++;
- stats->bytes += len;
+ u64_stats_inc(&stats->packets);
+ u64_stats_add(&stats->bytes, len);
u64_stats_update_end(&stats->sync);
}
@@ -163,16 +163,16 @@ static void ifb_stats64(struct net_device *dev,
for (i = 0; i < dev->num_tx_queues; i++,txp++) {
do {
start = u64_stats_fetch_begin(&txp->rx_stats.sync);
- packets = txp->rx_stats.packets;
- bytes = txp->rx_stats.bytes;
+ packets = u64_stats_read(&txp->rx_stats.packets);
+ bytes = u64_stats_read(&txp->rx_stats.bytes);
} while (u64_stats_fetch_retry(&txp->rx_stats.sync, start));
stats->rx_packets += packets;
stats->rx_bytes += bytes;
do {
start = u64_stats_fetch_begin(&txp->tx_stats.sync);
- packets = txp->tx_stats.packets;
- bytes = txp->tx_stats.bytes;
+ packets = u64_stats_read(&txp->tx_stats.packets);
+ bytes = u64_stats_read(&txp->tx_stats.bytes);
} while (u64_stats_fetch_retry(&txp->tx_stats.sync, start));
stats->tx_packets += packets;
stats->tx_bytes += bytes;
@@ -248,7 +248,7 @@ static void ifb_fill_stats_data(u64 **data,
start = u64_stats_fetch_begin(&q_stats->sync);
for (j = 0; j < IFB_Q_STATS_LEN; j++) {
offset = ifb_q_stats_desc[j].offset;
- (*data)[j] = *(u64 *)(stats_base + offset);
+ (*data)[j] = u64_stats_read((u64_stats_t *)(stats_base + offset));
}
} while (u64_stats_fetch_retry(&q_stats->sync, start));
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index bdb3a46b327c..835e048351a9 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -48,11 +48,9 @@ static u8 ipvlan_get_v6_hash(const void *iaddr)
}
#endif
-static u8 ipvlan_get_v4_hash(const void *iaddr)
+static u8 ipvlan_get_v4_hash(__be32 addr)
{
- const struct in_addr *ip4_addr = iaddr;
-
- return jhash_1word((__force u32)ip4_addr->s_addr, ipvlan_jhash_secret) &
+ return jhash_1word((__force u32)addr, ipvlan_jhash_secret) &
IPVLAN_HASH_MASK;
}
@@ -73,16 +71,30 @@ static bool addr_equal(bool is_v6, struct ipvl_addr *addr, const void *iaddr)
return false;
}
-static struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
- const void *iaddr, bool is_v6)
+#if IS_ENABLED(CONFIG_IPV6)
+static struct ipvl_addr *ipvlan_ht_addr_lookup6(const struct ipvl_port *port,
+ const void *iaddr)
{
struct ipvl_addr *addr;
u8 hash;
- hash = is_v6 ? ipvlan_get_v6_hash(iaddr) :
- ipvlan_get_v4_hash(iaddr);
+ hash = ipvlan_get_v6_hash(iaddr);
hlist_for_each_entry_rcu(addr, &port->hlhead[hash], hlnode)
- if (addr_equal(is_v6, addr, iaddr))
+ if (addr_equal(true, addr, iaddr))
+ return addr;
+ return NULL;
+}
+#endif
+
+static struct ipvl_addr *ipvlan_ht_addr_lookup4(const struct ipvl_port *port,
+ __be32 addr4)
+{
+ struct ipvl_addr *addr;
+ u8 hash;
+
+ hash = ipvlan_get_v4_hash(addr4);
+ hlist_for_each_entry_rcu(addr, &port->hlhead[hash], hlnode)
+ if (addr->atype == IPVL_IPV4 && addr->ip4addr.s_addr == addr4)
return addr;
return NULL;
}
@@ -94,7 +106,7 @@ void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr)
hash = (addr->atype == IPVL_IPV6) ?
ipvlan_get_v6_hash(&addr->ip6addr) :
- ipvlan_get_v4_hash(&addr->ip4addr);
+ ipvlan_get_v4_hash(addr->ip4addr.s_addr);
if (hlist_unhashed(&addr->hlnode))
hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
}
@@ -355,21 +367,24 @@ struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
int addr_type, bool use_dest)
{
struct ipvl_addr *addr = NULL;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr *i6addr;
+#endif
+ __be32 addr4;
switch (addr_type) {
#if IS_ENABLED(CONFIG_IPV6)
case IPVL_IPV6: {
struct ipv6hdr *ip6h;
- struct in6_addr *i6addr;
ip6h = (struct ipv6hdr *)lyr3h;
i6addr = use_dest ? &ip6h->daddr : &ip6h->saddr;
- addr = ipvlan_ht_addr_lookup(port, i6addr, true);
+lookup6:
+ addr = ipvlan_ht_addr_lookup6(port, i6addr);
break;
}
case IPVL_ICMPV6: {
struct nd_msg *ndmh;
- struct in6_addr *i6addr;
/* Make sure that the NeighborSolicitation ICMPv6 packets
* are handled to avoid DAD issue.
@@ -377,24 +392,23 @@ struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
ndmh = (struct nd_msg *)lyr3h;
if (ndmh->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
i6addr = &ndmh->target;
- addr = ipvlan_ht_addr_lookup(port, i6addr, true);
+ goto lookup6;
}
break;
}
#endif
case IPVL_IPV4: {
struct iphdr *ip4h;
- __be32 *i4addr;
ip4h = (struct iphdr *)lyr3h;
- i4addr = use_dest ? &ip4h->daddr : &ip4h->saddr;
- addr = ipvlan_ht_addr_lookup(port, i4addr, false);
+ addr4 = use_dest ? ip4h->daddr : ip4h->saddr;
+lookup4:
+ addr = ipvlan_ht_addr_lookup4(port, addr4);
break;
}
case IPVL_ARP: {
struct arphdr *arph;
unsigned char *arp_ptr;
- __be32 dip;
arph = (struct arphdr *)lyr3h;
arp_ptr = (unsigned char *)(arph + 1);
@@ -403,9 +417,8 @@ struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
else
arp_ptr += port->dev->addr_len;
- memcpy(&dip, arp_ptr, 4);
- addr = ipvlan_ht_addr_lookup(port, &dip, false);
- break;
+ addr4 = get_unaligned((__be32 *)arp_ptr);
+ goto lookup4;
}
}
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 5200fd5a10e5..c2cb2d20976b 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -2806,7 +2806,7 @@ static void get_rx_sc_stats(struct net_device *dev,
stats = per_cpu_ptr(rx_sc->stats, cpu);
do {
start = u64_stats_fetch_begin(&stats->syncp);
- memcpy(&tmp, &stats->stats, sizeof(tmp));
+ u64_stats_copy(&tmp, &stats->stats, sizeof(tmp));
} while (u64_stats_fetch_retry(&stats->syncp, start));
sum->InOctetsValidated += tmp.InOctetsValidated;
@@ -2887,7 +2887,7 @@ static void get_tx_sc_stats(struct net_device *dev,
stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu);
do {
start = u64_stats_fetch_begin(&stats->syncp);
- memcpy(&tmp, &stats->stats, sizeof(tmp));
+ u64_stats_copy(&tmp, &stats->stats, sizeof(tmp));
} while (u64_stats_fetch_retry(&stats->syncp, start));
sum->OutPktsProtected += tmp.OutPktsProtected;
@@ -2943,7 +2943,7 @@ static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum)
stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu);
do {
start = u64_stats_fetch_begin(&stats->syncp);
- memcpy(&tmp, &stats->stats, sizeof(tmp));
+ u64_stats_copy(&tmp, &stats->stats, sizeof(tmp));
} while (u64_stats_fetch_retry(&stats->syncp, start));
sum->OutPktsUntagged += tmp.OutPktsUntagged;
diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c
index f782d93f826e..8043b57bdf25 100644
--- a/drivers/net/mctp/mctp-i2c.c
+++ b/drivers/net/mctp/mctp-i2c.c
@@ -242,6 +242,12 @@ static int mctp_i2c_slave_cb(struct i2c_client *client,
return 0;
switch (event) {
+ case I2C_SLAVE_READ_REQUESTED:
+ case I2C_SLAVE_READ_PROCESSED:
+ /* MCTP I2C transport only uses writes */
+ midev->rx_pos = 0;
+ *val = 0xff;
+ break;
case I2C_SLAVE_WRITE_RECEIVED:
if (midev->rx_pos < MCTP_I2C_BUFSZ) {
midev->rx_buffer[midev->rx_pos] = *val;
@@ -279,6 +285,9 @@ static int mctp_i2c_recv(struct mctp_i2c_dev *midev)
size_t recvlen;
int status;
+ if (midev->rx_pos == 0)
+ return 0;
+
/* + 1 for the PEC */
if (midev->rx_pos < MCTP_I2C_MINLEN + 1) {
ndev->stats.rx_length_errors++;
diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
index 5b50d9186f12..d0361aaf25ef 100644
--- a/drivers/net/net_failover.c
+++ b/drivers/net/net_failover.c
@@ -819,18 +819,5 @@ void net_failover_destroy(struct failover *failover)
}
EXPORT_SYMBOL_GPL(net_failover_destroy);
-static __init int
-net_failover_init(void)
-{
- return 0;
-}
-module_init(net_failover_init);
-
-static __exit
-void net_failover_exit(void)
-{
-}
-module_exit(net_failover_exit);
-
MODULE_DESCRIPTION("Failover driver for Paravirtual drivers");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 9cb4dfc242f5..0f44ce5ccc0a 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -39,6 +39,7 @@
#include <linux/u64_stats_sync.h>
#include <linux/utsname.h>
#include <linux/rtnetlink.h>
+#include <linux/workqueue.h>
MODULE_AUTHOR("Matt Mackall <mpm@selenic.com>");
MODULE_DESCRIPTION("Console driver for network interfaces");
@@ -85,6 +86,8 @@ static DEFINE_SPINLOCK(target_list_lock);
/* This needs to be a mutex because netpoll_cleanup might sleep */
static DEFINE_MUTEX(target_cleanup_list_lock);
+static struct workqueue_struct *netconsole_wq;
+
/*
* Console driver for netconsoles. Register only consoles that have
* an associated target of the same type.
@@ -119,6 +122,12 @@ enum sysdata_feature {
MAX_SYSDATA_ITEMS = 4,
};
+enum target_state {
+ STATE_DISABLED,
+ STATE_ENABLED,
+ STATE_DEACTIVATED,
+};
+
/**
* struct netconsole_target - Represents a configured netconsole target.
* @list: Links this target into the target_list.
@@ -130,12 +139,16 @@ enum sysdata_feature {
* @sysdata_fields: Sysdata features enabled.
* @msgcounter: Message sent counter.
* @stats: Packet send stats for the target. Used for debugging.
- * @enabled: On / off knob to enable / disable target.
+ * @state: State of the target.
* Visible from userspace (read-write).
- * We maintain a strict 1:1 correspondence between this and
- * whether the corresponding netpoll is active or inactive.
+ * From a userspace perspective, the target is either enabled or
+ * disabled. Internally, although both STATE_DISABLED and
+ * STATE_DEACTIVATED correspond to inactive targets, the latter is
+ * due to automatic interface state changes and will try
+ * recover automatically, if the interface comes back
+ * online.
* Also, other parameters of a target may be modified at
- * runtime only when it is disabled (enabled == 0).
+ * runtime only when it is disabled (state != STATE_ENABLED).
* @extended: Denotes whether console is extended or not.
* @release: Denotes whether kernel release version should be prepended
* to the message. Depends on extended console.
@@ -149,6 +162,7 @@ enum sysdata_feature {
* local_mac (read-only)
* remote_mac (read-write)
* @buf: The buffer used to send the full msg to the network stack
+ * @resume_wq: Workqueue to resume deactivated target
*/
struct netconsole_target {
struct list_head list;
@@ -165,12 +179,13 @@ struct netconsole_target {
u32 msgcounter;
#endif
struct netconsole_target_stats stats;
- bool enabled;
+ enum target_state state;
bool extended;
bool release;
struct netpoll np;
/* protected by target_list_lock */
char buf[MAX_PRINT_CHUNK];
+ struct work_struct resume_wq;
};
#ifdef CONFIG_NETCONSOLE_DYNAMIC
@@ -207,6 +222,16 @@ static void netconsole_target_put(struct netconsole_target *nt)
config_group_put(&nt->group);
}
+static void dynamic_netconsole_mutex_lock(void)
+{
+ mutex_lock(&dynamic_netconsole_mutex);
+}
+
+static void dynamic_netconsole_mutex_unlock(void)
+{
+ mutex_unlock(&dynamic_netconsole_mutex);
+}
+
#else /* !CONFIG_NETCONSOLE_DYNAMIC */
static int __init dynamic_netconsole_init(void)
@@ -234,8 +259,87 @@ static void populate_configfs_item(struct netconsole_target *nt,
int cmdline_count)
{
}
+
+static void dynamic_netconsole_mutex_lock(void)
+{
+}
+
+static void dynamic_netconsole_mutex_unlock(void)
+{
+}
+
#endif /* CONFIG_NETCONSOLE_DYNAMIC */
+/* Check if the target was bound by mac address. */
+static bool bound_by_mac(struct netconsole_target *nt)
+{
+ return is_valid_ether_addr(nt->np.dev_mac);
+}
+
+/* Attempts to resume logging to a deactivated target. */
+static void resume_target(struct netconsole_target *nt)
+{
+ if (netpoll_setup(&nt->np)) {
+ /* netpoll fails setup once, do not try again. */
+ nt->state = STATE_DISABLED;
+ return;
+ }
+
+ nt->state = STATE_ENABLED;
+ pr_info("network logging resumed on interface %s\n", nt->np.dev_name);
+}
+
+/* Checks if a deactivated target matches a device. */
+static bool deactivated_target_match(struct netconsole_target *nt,
+ struct net_device *ndev)
+{
+ if (nt->state != STATE_DEACTIVATED)
+ return false;
+
+ if (bound_by_mac(nt))
+ return !memcmp(nt->np.dev_mac, ndev->dev_addr, ETH_ALEN);
+ return !strncmp(nt->np.dev_name, ndev->name, IFNAMSIZ);
+}
+
+/* Process work scheduled for target resume. */
+static void process_resume_target(struct work_struct *work)
+{
+ struct netconsole_target *nt;
+ unsigned long flags;
+
+ nt = container_of(work, struct netconsole_target, resume_wq);
+
+ dynamic_netconsole_mutex_lock();
+
+ spin_lock_irqsave(&target_list_lock, flags);
+ /* Check if target is still deactivated as it may have been disabled
+ * while resume was being scheduled.
+ */
+ if (nt->state != STATE_DEACTIVATED) {
+ spin_unlock_irqrestore(&target_list_lock, flags);
+ goto out_unlock;
+ }
+
+ /* resume_target is IRQ unsafe, remove target from
+ * target_list in order to resume it with IRQ enabled.
+ */
+ list_del_init(&nt->list);
+ spin_unlock_irqrestore(&target_list_lock, flags);
+
+ resume_target(nt);
+
+ /* At this point the target is either enabled or disabled and
+ * was cleaned up before getting deactivated. Either way, add it
+ * back to target list.
+ */
+ spin_lock_irqsave(&target_list_lock, flags);
+ list_add(&nt->list, &target_list);
+ spin_unlock_irqrestore(&target_list_lock, flags);
+
+out_unlock:
+ dynamic_netconsole_mutex_unlock();
+}
+
/* Allocate and initialize with defaults.
* Note that these targets get their config_item fields zeroed-out.
*/
@@ -257,6 +361,8 @@ static struct netconsole_target *alloc_and_init(void)
nt->np.local_port = 6665;
nt->np.remote_port = 6666;
eth_broadcast_addr(nt->np.remote_mac);
+ nt->state = STATE_DISABLED;
+ INIT_WORK(&nt->resume_wq, process_resume_target);
return nt;
}
@@ -275,8 +381,10 @@ static void netconsole_process_cleanups_core(void)
mutex_lock(&target_cleanup_list_lock);
list_for_each_entry_safe(nt, tmp, &target_cleanup_list, list) {
/* all entries in the cleanup_list needs to be disabled */
- WARN_ON_ONCE(nt->enabled);
+ WARN_ON_ONCE(nt->state == STATE_ENABLED);
do_netpoll_cleanup(&nt->np);
+ if (bound_by_mac(nt))
+ memset(&nt->np.dev_name, 0, IFNAMSIZ);
/* moved the cleaned target to target_list. Need to hold both
* locks
*/
@@ -398,7 +506,7 @@ static void trim_newline(char *s, size_t maxlen)
static ssize_t enabled_show(struct config_item *item, char *buf)
{
- return sysfs_emit(buf, "%d\n", to_target(item)->enabled);
+ return sysfs_emit(buf, "%d\n", to_target(item)->state == STATE_ENABLED);
}
static ssize_t extended_show(struct config_item *item, char *buf)
@@ -480,9 +588,9 @@ static ssize_t sysdata_cpu_nr_enabled_show(struct config_item *item, char *buf)
struct netconsole_target *nt = to_target(item->ci_parent);
bool cpu_nr_enabled;
- mutex_lock(&dynamic_netconsole_mutex);
+ dynamic_netconsole_mutex_lock();
cpu_nr_enabled = !!(nt->sysdata_fields & SYSDATA_CPU_NR);
- mutex_unlock(&dynamic_netconsole_mutex);
+ dynamic_netconsole_mutex_unlock();
return sysfs_emit(buf, "%d\n", cpu_nr_enabled);
}
@@ -494,9 +602,9 @@ static ssize_t sysdata_taskname_enabled_show(struct config_item *item,
struct netconsole_target *nt = to_target(item->ci_parent);
bool taskname_enabled;
- mutex_lock(&dynamic_netconsole_mutex);
+ dynamic_netconsole_mutex_lock();
taskname_enabled = !!(nt->sysdata_fields & SYSDATA_TASKNAME);
- mutex_unlock(&dynamic_netconsole_mutex);
+ dynamic_netconsole_mutex_unlock();
return sysfs_emit(buf, "%d\n", taskname_enabled);
}
@@ -507,9 +615,9 @@ static ssize_t sysdata_release_enabled_show(struct config_item *item,
struct netconsole_target *nt = to_target(item->ci_parent);
bool release_enabled;
- mutex_lock(&dynamic_netconsole_mutex);
+ dynamic_netconsole_mutex_lock();
release_enabled = !!(nt->sysdata_fields & SYSDATA_TASKNAME);
- mutex_unlock(&dynamic_netconsole_mutex);
+ dynamic_netconsole_mutex_unlock();
return sysfs_emit(buf, "%d\n", release_enabled);
}
@@ -547,9 +655,9 @@ static ssize_t sysdata_msgid_enabled_show(struct config_item *item,
struct netconsole_target *nt = to_target(item->ci_parent);
bool msgid_enabled;
- mutex_lock(&dynamic_netconsole_mutex);
+ dynamic_netconsole_mutex_lock();
msgid_enabled = !!(nt->sysdata_fields & SYSDATA_MSGID);
- mutex_unlock(&dynamic_netconsole_mutex);
+ dynamic_netconsole_mutex_unlock();
return sysfs_emit(buf, "%d\n", msgid_enabled);
}
@@ -565,19 +673,28 @@ static ssize_t enabled_store(struct config_item *item,
const char *buf, size_t count)
{
struct netconsole_target *nt = to_target(item);
+ bool enabled, current_enabled;
unsigned long flags;
- bool enabled;
ssize_t ret;
- mutex_lock(&dynamic_netconsole_mutex);
+ dynamic_netconsole_mutex_lock();
ret = kstrtobool(buf, &enabled);
if (ret)
goto out_unlock;
+ /* When the user explicitly enables or disables a target that is
+ * currently deactivated, reset its state to disabled. The DEACTIVATED
+ * state only tracks interface-driven deactivation and should _not_
+ * persist when the user manually changes the target's enabled state.
+ */
+ if (nt->state == STATE_DEACTIVATED)
+ nt->state = STATE_DISABLED;
+
ret = -EINVAL;
- if (enabled == nt->enabled) {
+ current_enabled = nt->state == STATE_ENABLED;
+ if (enabled == current_enabled) {
pr_info("network logging has already %s\n",
- nt->enabled ? "started" : "stopped");
+ current_enabled ? "started" : "stopped");
goto out_unlock;
}
@@ -610,16 +727,16 @@ static ssize_t enabled_store(struct config_item *item,
if (ret)
goto out_unlock;
- nt->enabled = true;
+ nt->state = STATE_ENABLED;
pr_info("network logging started\n");
} else { /* false */
/* We need to disable the netconsole before cleaning it up
* otherwise we might end up in write_msg() with
- * nt->np.dev == NULL and nt->enabled == true
+ * nt->np.dev == NULL and nt->state == STATE_ENABLED
*/
mutex_lock(&target_cleanup_list_lock);
spin_lock_irqsave(&target_list_lock, flags);
- nt->enabled = false;
+ nt->state = STATE_DISABLED;
/* Remove the target from the list, while holding
* target_list_lock
*/
@@ -636,7 +753,7 @@ static ssize_t enabled_store(struct config_item *item,
/* Deferred cleanup */
netconsole_process_cleanups();
out_unlock:
- mutex_unlock(&dynamic_netconsole_mutex);
+ dynamic_netconsole_mutex_unlock();
return ret;
}
@@ -647,8 +764,8 @@ static ssize_t release_store(struct config_item *item, const char *buf,
bool release;
ssize_t ret;
- mutex_lock(&dynamic_netconsole_mutex);
- if (nt->enabled) {
+ dynamic_netconsole_mutex_lock();
+ if (nt->state == STATE_ENABLED) {
pr_err("target (%s) is enabled, disable to update parameters\n",
config_item_name(&nt->group.cg_item));
ret = -EINVAL;
@@ -663,7 +780,7 @@ static ssize_t release_store(struct config_item *item, const char *buf,
ret = strnlen(buf, count);
out_unlock:
- mutex_unlock(&dynamic_netconsole_mutex);
+ dynamic_netconsole_mutex_unlock();
return ret;
}
@@ -674,8 +791,8 @@ static ssize_t extended_store(struct config_item *item, const char *buf,
bool extended;
ssize_t ret;
- mutex_lock(&dynamic_netconsole_mutex);
- if (nt->enabled) {
+ dynamic_netconsole_mutex_lock();
+ if (nt->state == STATE_ENABLED) {
pr_err("target (%s) is enabled, disable to update parameters\n",
config_item_name(&nt->group.cg_item));
ret = -EINVAL;
@@ -689,7 +806,7 @@ static ssize_t extended_store(struct config_item *item, const char *buf,
nt->extended = extended;
ret = strnlen(buf, count);
out_unlock:
- mutex_unlock(&dynamic_netconsole_mutex);
+ dynamic_netconsole_mutex_unlock();
return ret;
}
@@ -698,18 +815,18 @@ static ssize_t dev_name_store(struct config_item *item, const char *buf,
{
struct netconsole_target *nt = to_target(item);
- mutex_lock(&dynamic_netconsole_mutex);
- if (nt->enabled) {
+ dynamic_netconsole_mutex_lock();
+ if (nt->state == STATE_ENABLED) {
pr_err("target (%s) is enabled, disable to update parameters\n",
config_item_name(&nt->group.cg_item));
- mutex_unlock(&dynamic_netconsole_mutex);
+ dynamic_netconsole_mutex_unlock();
return -EINVAL;
}
strscpy(nt->np.dev_name, buf, IFNAMSIZ);
trim_newline(nt->np.dev_name, IFNAMSIZ);
- mutex_unlock(&dynamic_netconsole_mutex);
+ dynamic_netconsole_mutex_unlock();
return strnlen(buf, count);
}
@@ -719,8 +836,8 @@ static ssize_t local_port_store(struct config_item *item, const char *buf,
struct netconsole_target *nt = to_target(item);
ssize_t ret = -EINVAL;
- mutex_lock(&dynamic_netconsole_mutex);
- if (nt->enabled) {
+ dynamic_netconsole_mutex_lock();
+ if (nt->state == STATE_ENABLED) {
pr_err("target (%s) is enabled, disable to update parameters\n",
config_item_name(&nt->group.cg_item));
goto out_unlock;
@@ -731,7 +848,7 @@ static ssize_t local_port_store(struct config_item *item, const char *buf,
goto out_unlock;
ret = strnlen(buf, count);
out_unlock:
- mutex_unlock(&dynamic_netconsole_mutex);
+ dynamic_netconsole_mutex_unlock();
return ret;
}
@@ -741,8 +858,8 @@ static ssize_t remote_port_store(struct config_item *item,
struct netconsole_target *nt = to_target(item);
ssize_t ret = -EINVAL;
- mutex_lock(&dynamic_netconsole_mutex);
- if (nt->enabled) {
+ dynamic_netconsole_mutex_lock();
+ if (nt->state == STATE_ENABLED) {
pr_err("target (%s) is enabled, disable to update parameters\n",
config_item_name(&nt->group.cg_item));
goto out_unlock;
@@ -753,7 +870,7 @@ static ssize_t remote_port_store(struct config_item *item,
goto out_unlock;
ret = strnlen(buf, count);
out_unlock:
- mutex_unlock(&dynamic_netconsole_mutex);
+ dynamic_netconsole_mutex_unlock();
return ret;
}
@@ -764,8 +881,8 @@ static ssize_t local_ip_store(struct config_item *item, const char *buf,
ssize_t ret = -EINVAL;
int ipv6;
- mutex_lock(&dynamic_netconsole_mutex);
- if (nt->enabled) {
+ dynamic_netconsole_mutex_lock();
+ if (nt->state == STATE_ENABLED) {
pr_err("target (%s) is enabled, disable to update parameters\n",
config_item_name(&nt->group.cg_item));
goto out_unlock;
@@ -778,7 +895,7 @@ static ssize_t local_ip_store(struct config_item *item, const char *buf,
ret = strnlen(buf, count);
out_unlock:
- mutex_unlock(&dynamic_netconsole_mutex);
+ dynamic_netconsole_mutex_unlock();
return ret;
}
@@ -789,8 +906,8 @@ static ssize_t remote_ip_store(struct config_item *item, const char *buf,
ssize_t ret = -EINVAL;
int ipv6;
- mutex_lock(&dynamic_netconsole_mutex);
- if (nt->enabled) {
+ dynamic_netconsole_mutex_lock();
+ if (nt->state == STATE_ENABLED) {
pr_err("target (%s) is enabled, disable to update parameters\n",
config_item_name(&nt->group.cg_item));
goto out_unlock;
@@ -803,7 +920,7 @@ static ssize_t remote_ip_store(struct config_item *item, const char *buf,
ret = strnlen(buf, count);
out_unlock:
- mutex_unlock(&dynamic_netconsole_mutex);
+ dynamic_netconsole_mutex_unlock();
return ret;
}
@@ -824,8 +941,8 @@ static ssize_t remote_mac_store(struct config_item *item, const char *buf,
u8 remote_mac[ETH_ALEN];
ssize_t ret = -EINVAL;
- mutex_lock(&dynamic_netconsole_mutex);
- if (nt->enabled) {
+ dynamic_netconsole_mutex_lock();
+ if (nt->state == STATE_ENABLED) {
pr_err("target (%s) is enabled, disable to update parameters\n",
config_item_name(&nt->group.cg_item));
goto out_unlock;
@@ -839,7 +956,7 @@ static ssize_t remote_mac_store(struct config_item *item, const char *buf,
ret = strnlen(buf, count);
out_unlock:
- mutex_unlock(&dynamic_netconsole_mutex);
+ dynamic_netconsole_mutex_unlock();
return ret;
}
@@ -960,7 +1077,7 @@ static ssize_t userdatum_value_store(struct config_item *item, const char *buf,
return -EMSGSIZE;
mutex_lock(&netconsole_subsys.su_mutex);
- mutex_lock(&dynamic_netconsole_mutex);
+ dynamic_netconsole_mutex_lock();
ret = strscpy(udm->value, buf, sizeof(udm->value));
if (ret < 0)
@@ -974,7 +1091,7 @@ static ssize_t userdatum_value_store(struct config_item *item, const char *buf,
goto out_unlock;
ret = count;
out_unlock:
- mutex_unlock(&dynamic_netconsole_mutex);
+ dynamic_netconsole_mutex_unlock();
mutex_unlock(&netconsole_subsys.su_mutex);
return ret;
}
@@ -1002,7 +1119,7 @@ static ssize_t sysdata_msgid_enabled_store(struct config_item *item,
return ret;
mutex_lock(&netconsole_subsys.su_mutex);
- mutex_lock(&dynamic_netconsole_mutex);
+ dynamic_netconsole_mutex_lock();
curr = !!(nt->sysdata_fields & SYSDATA_MSGID);
if (msgid_enabled == curr)
goto unlock_ok;
@@ -1014,7 +1131,7 @@ static ssize_t sysdata_msgid_enabled_store(struct config_item *item,
unlock_ok:
ret = strnlen(buf, count);
- mutex_unlock(&dynamic_netconsole_mutex);
+ dynamic_netconsole_mutex_unlock();
mutex_unlock(&netconsole_subsys.su_mutex);
return ret;
}
@@ -1031,7 +1148,7 @@ static ssize_t sysdata_release_enabled_store(struct config_item *item,
return ret;
mutex_lock(&netconsole_subsys.su_mutex);
- mutex_lock(&dynamic_netconsole_mutex);
+ dynamic_netconsole_mutex_lock();
curr = !!(nt->sysdata_fields & SYSDATA_RELEASE);
if (release_enabled == curr)
goto unlock_ok;
@@ -1043,7 +1160,7 @@ static ssize_t sysdata_release_enabled_store(struct config_item *item,
unlock_ok:
ret = strnlen(buf, count);
- mutex_unlock(&dynamic_netconsole_mutex);
+ dynamic_netconsole_mutex_unlock();
mutex_unlock(&netconsole_subsys.su_mutex);
return ret;
}
@@ -1060,7 +1177,7 @@ static ssize_t sysdata_taskname_enabled_store(struct config_item *item,
return ret;
mutex_lock(&netconsole_subsys.su_mutex);
- mutex_lock(&dynamic_netconsole_mutex);
+ dynamic_netconsole_mutex_lock();
curr = !!(nt->sysdata_fields & SYSDATA_TASKNAME);
if (taskname_enabled == curr)
goto unlock_ok;
@@ -1072,7 +1189,7 @@ static ssize_t sysdata_taskname_enabled_store(struct config_item *item,
unlock_ok:
ret = strnlen(buf, count);
- mutex_unlock(&dynamic_netconsole_mutex);
+ dynamic_netconsole_mutex_unlock();
mutex_unlock(&netconsole_subsys.su_mutex);
return ret;
}
@@ -1090,7 +1207,7 @@ static ssize_t sysdata_cpu_nr_enabled_store(struct config_item *item,
return ret;
mutex_lock(&netconsole_subsys.su_mutex);
- mutex_lock(&dynamic_netconsole_mutex);
+ dynamic_netconsole_mutex_lock();
curr = !!(nt->sysdata_fields & SYSDATA_CPU_NR);
if (cpu_nr_enabled == curr)
/* no change requested */
@@ -1106,7 +1223,7 @@ static ssize_t sysdata_cpu_nr_enabled_store(struct config_item *item,
unlock_ok:
ret = strnlen(buf, count);
- mutex_unlock(&dynamic_netconsole_mutex);
+ dynamic_netconsole_mutex_unlock();
mutex_unlock(&netconsole_subsys.su_mutex);
return ret;
}
@@ -1168,10 +1285,10 @@ static void userdatum_drop(struct config_group *group, struct config_item *item)
ud = to_userdata(&group->cg_item);
nt = userdata_to_target(ud);
- mutex_lock(&dynamic_netconsole_mutex);
+ dynamic_netconsole_mutex_lock();
update_userdata(nt);
config_item_put(item);
- mutex_unlock(&dynamic_netconsole_mutex);
+ dynamic_netconsole_mutex_unlock();
}
static struct configfs_attribute *userdata_attrs[] = {
@@ -1310,18 +1427,34 @@ static struct config_group *make_netconsole_target(struct config_group *group,
static void drop_netconsole_target(struct config_group *group,
struct config_item *item)
{
- unsigned long flags;
struct netconsole_target *nt = to_target(item);
+ unsigned long flags;
+
+ dynamic_netconsole_mutex_lock();
spin_lock_irqsave(&target_list_lock, flags);
+ /* Disable deactivated target to prevent races between resume attempt
+ * and target removal.
+ */
+ if (nt->state == STATE_DEACTIVATED)
+ nt->state = STATE_DISABLED;
list_del(&nt->list);
spin_unlock_irqrestore(&target_list_lock, flags);
+ dynamic_netconsole_mutex_unlock();
+
+ /* Now that the target has been marked disabled no further work
+ * can be scheduled. Existing work will skip as targets are not
+ * deactivated anymore. Cancel any scheduled resume and wait for
+ * completion.
+ */
+ cancel_work_sync(&nt->resume_wq);
+
/*
* The target may have never been enabled, or was manually disabled
* before being removed so netpoll may have already been cleaned up.
*/
- if (nt->enabled)
+ if (nt->state == STATE_ENABLED)
netpoll_cleanup(&nt->np);
config_item_put(&nt->group.cg_item);
@@ -1357,18 +1490,20 @@ static void populate_configfs_item(struct netconsole_target *nt,
init_target_config_group(nt, target_name);
}
-static int sysdata_append_cpu_nr(struct netconsole_target *nt, int offset)
+static int sysdata_append_cpu_nr(struct netconsole_target *nt, int offset,
+ struct nbcon_write_context *wctxt)
{
return scnprintf(&nt->sysdata[offset],
MAX_EXTRADATA_ENTRY_LEN, " cpu=%u\n",
- raw_smp_processor_id());
+ wctxt->cpu);
}
-static int sysdata_append_taskname(struct netconsole_target *nt, int offset)
+static int sysdata_append_taskname(struct netconsole_target *nt, int offset,
+ struct nbcon_write_context *wctxt)
{
return scnprintf(&nt->sysdata[offset],
MAX_EXTRADATA_ENTRY_LEN, " taskname=%s\n",
- current->comm);
+ wctxt->comm);
}
static int sysdata_append_release(struct netconsole_target *nt, int offset)
@@ -1389,8 +1524,10 @@ static int sysdata_append_msgid(struct netconsole_target *nt, int offset)
/*
* prepare_sysdata - append sysdata in runtime
* @nt: target to send message to
+ * @wctxt: nbcon write context containing message metadata
*/
-static int prepare_sysdata(struct netconsole_target *nt)
+static int prepare_sysdata(struct netconsole_target *nt,
+ struct nbcon_write_context *wctxt)
{
int sysdata_len = 0;
@@ -1398,9 +1535,9 @@ static int prepare_sysdata(struct netconsole_target *nt)
goto out;
if (nt->sysdata_fields & SYSDATA_CPU_NR)
- sysdata_len += sysdata_append_cpu_nr(nt, sysdata_len);
+ sysdata_len += sysdata_append_cpu_nr(nt, sysdata_len, wctxt);
if (nt->sysdata_fields & SYSDATA_TASKNAME)
- sysdata_len += sysdata_append_taskname(nt, sysdata_len);
+ sysdata_len += sysdata_append_taskname(nt, sysdata_len, wctxt);
if (nt->sysdata_fields & SYSDATA_RELEASE)
sysdata_len += sysdata_append_release(nt, sysdata_len);
if (nt->sysdata_fields & SYSDATA_MSGID)
@@ -1418,13 +1555,14 @@ out:
static int netconsole_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
- unsigned long flags;
- struct netconsole_target *nt, *tmp;
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct netconsole_target *nt, *tmp;
bool stopped = false;
+ unsigned long flags;
if (!(event == NETDEV_CHANGENAME || event == NETDEV_UNREGISTER ||
- event == NETDEV_RELEASE || event == NETDEV_JOIN))
+ event == NETDEV_RELEASE || event == NETDEV_JOIN ||
+ event == NETDEV_REGISTER))
goto done;
mutex_lock(&target_cleanup_list_lock);
@@ -1438,12 +1576,28 @@ static int netconsole_netdev_event(struct notifier_block *this,
break;
case NETDEV_RELEASE:
case NETDEV_JOIN:
+ /* transition target to DISABLED instead of
+ * DEACTIVATED when (de)enslaving devices as
+ * their targets should not be automatically
+ * resumed when the interface is brought up.
+ */
+ nt->state = STATE_DISABLED;
+ list_move(&nt->list, &target_cleanup_list);
+ stopped = true;
+ break;
case NETDEV_UNREGISTER:
- nt->enabled = false;
+ nt->state = STATE_DEACTIVATED;
list_move(&nt->list, &target_cleanup_list);
stopped = true;
}
}
+ if ((event == NETDEV_REGISTER || event == NETDEV_CHANGENAME) &&
+ deactivated_target_match(nt, dev))
+ /* Schedule resume on a workqueue as it will attempt
+ * to UP the device, which can't be done as part of this
+ * notifier.
+ */
+ queue_work(netconsole_wq, &nt->resume_wq);
netconsole_target_put(nt);
}
spin_unlock_irqrestore(&target_list_lock, flags);
@@ -1681,81 +1835,108 @@ static void send_msg_fragmented(struct netconsole_target *nt,
/**
* send_ext_msg_udp - send extended log message to target
* @nt: target to send message to
- * @msg: extended log message to send
- * @msg_len: length of message
+ * @wctxt: nbcon write context containing message and metadata
*
- * Transfer extended log @msg to @nt. If @msg is longer than
+ * Transfer extended log message to @nt. If message is longer than
* MAX_PRINT_CHUNK, it'll be split and transmitted in multiple chunks with
* ncfrag header field added to identify them.
*/
-static void send_ext_msg_udp(struct netconsole_target *nt, const char *msg,
- int msg_len)
+static void send_ext_msg_udp(struct netconsole_target *nt,
+ struct nbcon_write_context *wctxt)
{
int userdata_len = 0;
int release_len = 0;
int sysdata_len = 0;
+ int len;
#ifdef CONFIG_NETCONSOLE_DYNAMIC
- sysdata_len = prepare_sysdata(nt);
+ sysdata_len = prepare_sysdata(nt, wctxt);
userdata_len = nt->userdata_length;
#endif
if (nt->release)
release_len = strlen(init_utsname()->release) + 1;
- if (msg_len + release_len + sysdata_len + userdata_len <= MAX_PRINT_CHUNK)
- return send_msg_no_fragmentation(nt, msg, msg_len, release_len);
+ len = wctxt->len + release_len + sysdata_len + userdata_len;
+ if (len <= MAX_PRINT_CHUNK)
+ return send_msg_no_fragmentation(nt, wctxt->outbuf,
+ wctxt->len, release_len);
- return send_msg_fragmented(nt, msg, msg_len, release_len,
+ return send_msg_fragmented(nt, wctxt->outbuf, wctxt->len, release_len,
sysdata_len);
}
-static void write_ext_msg(struct console *con, const char *msg,
- unsigned int len)
+static void send_msg_udp(struct netconsole_target *nt, const char *msg,
+ unsigned int len)
{
- struct netconsole_target *nt;
- unsigned long flags;
+ const char *tmp = msg;
+ int frag, left = len;
- if ((oops_only && !oops_in_progress) || list_empty(&target_list))
- return;
-
- spin_lock_irqsave(&target_list_lock, flags);
- list_for_each_entry(nt, &target_list, list)
- if (nt->extended && nt->enabled && netif_running(nt->np.dev))
- send_ext_msg_udp(nt, msg, len);
- spin_unlock_irqrestore(&target_list_lock, flags);
+ while (left > 0) {
+ frag = min(left, MAX_PRINT_CHUNK);
+ send_udp(nt, tmp, frag);
+ tmp += frag;
+ left -= frag;
+ }
}
-static void write_msg(struct console *con, const char *msg, unsigned int len)
+/**
+ * netconsole_write - Generic function to send a msg to all targets
+ * @wctxt: nbcon write context
+ * @extended: "true" for extended console mode
+ *
+ * Given an nbcon write context, send the message to the netconsole targets
+ */
+static void netconsole_write(struct nbcon_write_context *wctxt, bool extended)
{
- int frag, left;
- unsigned long flags;
struct netconsole_target *nt;
- const char *tmp;
if (oops_only && !oops_in_progress)
return;
- /* Avoid taking lock and disabling interrupts unnecessarily */
- if (list_empty(&target_list))
- return;
- spin_lock_irqsave(&target_list_lock, flags);
list_for_each_entry(nt, &target_list, list) {
- if (!nt->extended && nt->enabled && netif_running(nt->np.dev)) {
- /*
- * We nest this inside the for-each-target loop above
- * so that we're able to get as much logging out to
- * at least one target if we die inside here, instead
- * of unnecessarily keeping all targets in lock-step.
- */
- tmp = msg;
- for (left = len; left;) {
- frag = min(left, MAX_PRINT_CHUNK);
- send_udp(nt, tmp, frag);
- tmp += frag;
- left -= frag;
- }
- }
+ if (nt->extended != extended || nt->state != STATE_ENABLED ||
+ !netif_running(nt->np.dev))
+ continue;
+
+ /* If nbcon_enter_unsafe() fails, just return given netconsole
+ * lost the ownership, and iterating over the targets will not
+ * be able to re-acquire.
+ */
+ if (!nbcon_enter_unsafe(wctxt))
+ return;
+
+ if (extended)
+ send_ext_msg_udp(nt, wctxt);
+ else
+ send_msg_udp(nt, wctxt->outbuf, wctxt->len);
+
+ nbcon_exit_unsafe(wctxt);
}
+}
+
+static void netconsole_write_ext(struct console *con __always_unused,
+ struct nbcon_write_context *wctxt)
+{
+ netconsole_write(wctxt, true);
+}
+
+static void netconsole_write_basic(struct console *con __always_unused,
+ struct nbcon_write_context *wctxt)
+{
+ netconsole_write(wctxt, false);
+}
+
+static void netconsole_device_lock(struct console *con __always_unused,
+ unsigned long *flags)
+__acquires(&target_list_lock)
+{
+ spin_lock_irqsave(&target_list_lock, *flags);
+}
+
+static void netconsole_device_unlock(struct console *con __always_unused,
+ unsigned long flags)
+__releases(&target_list_lock)
+{
spin_unlock_irqrestore(&target_list_lock, flags);
}
@@ -1896,7 +2077,7 @@ static struct netconsole_target *alloc_param_target(char *target_config,
*/
goto fail;
} else {
- nt->enabled = true;
+ nt->state = STATE_ENABLED;
}
populate_configfs_item(nt, cmdline_count);
@@ -1910,6 +2091,7 @@ fail:
/* Cleanup netpoll for given target (from boot/module param) and free it */
static void free_param_target(struct netconsole_target *nt)
{
+ cancel_work_sync(&nt->resume_wq);
netpoll_cleanup(&nt->np);
#ifdef CONFIG_NETCONSOLE_DYNAMIC
kfree(nt->userdata);
@@ -1918,15 +2100,21 @@ static void free_param_target(struct netconsole_target *nt)
}
static struct console netconsole_ext = {
- .name = "netcon_ext",
- .flags = CON_ENABLED | CON_EXTENDED,
- .write = write_ext_msg,
+ .name = "netcon_ext",
+ .flags = CON_ENABLED | CON_EXTENDED | CON_NBCON | CON_NBCON_ATOMIC_UNSAFE,
+ .write_thread = netconsole_write_ext,
+ .write_atomic = netconsole_write_ext,
+ .device_lock = netconsole_device_lock,
+ .device_unlock = netconsole_device_unlock,
};
static struct console netconsole = {
- .name = "netcon",
- .flags = CON_ENABLED,
- .write = write_msg,
+ .name = "netcon",
+ .flags = CON_ENABLED | CON_NBCON | CON_NBCON_ATOMIC_UNSAFE,
+ .write_thread = netconsole_write_basic,
+ .write_atomic = netconsole_write_basic,
+ .device_lock = netconsole_device_lock,
+ .device_unlock = netconsole_device_unlock,
};
static int __init init_netconsole(void)
@@ -1964,6 +2152,12 @@ static int __init init_netconsole(void)
}
}
+ netconsole_wq = alloc_workqueue("netconsole", WQ_UNBOUND, 0);
+ if (!netconsole_wq) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
err = register_netdevice_notifier(&netconsole_netdev_notifier);
if (err)
goto fail;
@@ -1986,6 +2180,8 @@ undonotifier:
fail:
pr_err("cleaning up\n");
+ if (netconsole_wq)
+ flush_workqueue(netconsole_wq);
/*
* Remove all targets and destroy them (only targets created
* from the boot/module option exist here). Skipping the list
@@ -1996,6 +2192,9 @@ fail:
free_param_target(nt);
}
+ if (netconsole_wq)
+ destroy_workqueue(netconsole_wq);
+
return err;
}
@@ -2009,6 +2208,7 @@ static void __exit cleanup_netconsole(void)
unregister_console(&netconsole);
dynamic_netconsole_exit();
unregister_netdevice_notifier(&netconsole_netdev_notifier);
+ flush_workqueue(netconsole_wq);
/*
* Targets created via configfs pin references on our module
@@ -2022,6 +2222,8 @@ static void __exit cleanup_netconsole(void)
list_del(&nt->list);
free_param_target(nt);
}
+
+ destroy_workqueue(netconsole_wq);
}
/*
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 6927c1962277..6285fbefe38a 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -758,7 +758,9 @@ struct nsim_queue_mem {
};
static int
-nsim_queue_mem_alloc(struct net_device *dev, void *per_queue_mem, int idx)
+nsim_queue_mem_alloc(struct net_device *dev,
+ struct netdev_queue_config *qcfg,
+ void *per_queue_mem, int idx)
{
struct nsim_queue_mem *qmem = per_queue_mem;
struct netdevsim *ns = netdev_priv(dev);
@@ -807,7 +809,8 @@ static void nsim_queue_mem_free(struct net_device *dev, void *per_queue_mem)
}
static int
-nsim_queue_start(struct net_device *dev, void *per_queue_mem, int idx)
+nsim_queue_start(struct net_device *dev, struct netdev_queue_config *qcfg,
+ void *per_queue_mem, int idx)
{
struct nsim_queue_mem *qmem = per_queue_mem;
struct netdevsim *ns = netdev_priv(dev);
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 46c67983c517..f767fc8a7505 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -109,10 +109,10 @@ struct netdevsim {
int rq_reset_mode;
struct {
- u64 rx_packets;
- u64 rx_bytes;
- u64 tx_packets;
- u64 tx_bytes;
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
struct u64_stats_sync syncp;
struct psp_dev *dev;
u32 spi;
diff --git a/drivers/net/netdevsim/psp.c b/drivers/net/netdevsim/psp.c
index 727da06101ca..0b4d717253b0 100644
--- a/drivers/net/netdevsim/psp.c
+++ b/drivers/net/netdevsim/psp.c
@@ -72,10 +72,12 @@ nsim_do_psp(struct sk_buff *skb, struct netdevsim *ns,
skb->decrypted = 1;
u64_stats_update_begin(&ns->psp.syncp);
- ns->psp.tx_packets++;
- ns->psp.rx_packets++;
- ns->psp.tx_bytes += skb->len - skb_inner_transport_offset(skb);
- ns->psp.rx_bytes += skb->len - skb_inner_transport_offset(skb);
+ u64_stats_inc(&ns->psp.tx_packets);
+ u64_stats_inc(&ns->psp.rx_packets);
+ u64_stats_add(&ns->psp.tx_bytes,
+ skb->len - skb_inner_transport_offset(skb));
+ u64_stats_add(&ns->psp.rx_bytes,
+ skb->len - skb_inner_transport_offset(skb));
u64_stats_update_end(&ns->psp.syncp);
} else {
struct ipv6hdr *ip6h __maybe_unused;
@@ -183,10 +185,10 @@ static void nsim_get_stats(struct psp_dev *psd, struct psp_dev_stats *stats)
do {
start = u64_stats_fetch_begin(&ns->psp.syncp);
- stats->rx_bytes = ns->psp.rx_bytes;
- stats->rx_packets = ns->psp.rx_packets;
- stats->tx_bytes = ns->psp.tx_bytes;
- stats->tx_packets = ns->psp.tx_packets;
+ stats->rx_bytes = u64_stats_read(&ns->psp.rx_bytes);
+ stats->rx_packets = u64_stats_read(&ns->psp.rx_packets);
+ stats->tx_bytes = u64_stats_read(&ns->psp.tx_bytes);
+ stats->tx_packets = u64_stats_read(&ns->psp.tx_packets);
} while (u64_stats_fetch_retry(&ns->psp.syncp, start));
}
diff --git a/drivers/net/ovpn/peer.c b/drivers/net/ovpn/peer.c
index 4bfcab0c8652..0463b5b0542f 100644
--- a/drivers/net/ovpn/peer.c
+++ b/drivers/net/ovpn/peer.c
@@ -61,7 +61,7 @@ void ovpn_peer_keepalive_set(struct ovpn_peer *peer, u32 interval, u32 timeout)
/* now that interval and timeout have been changed, kick
* off the worker so that the next delay can be recomputed
*/
- mod_delayed_work(system_wq, &peer->ovpn->keepalive_work, 0);
+ mod_delayed_work(system_percpu_wq, &peer->ovpn->keepalive_work, 0);
}
/**
diff --git a/drivers/net/pcs/Kconfig b/drivers/net/pcs/Kconfig
index ecbc3530e780..e417fd66f660 100644
--- a/drivers/net/pcs/Kconfig
+++ b/drivers/net/pcs/Kconfig
@@ -20,6 +20,7 @@ config PCS_LYNX
config PCS_MTK_LYNXI
tristate
+ select PHY_COMMON_PROPS
select REGMAP
help
This module provides helpers to phylink for managing the LynxI PCS
diff --git a/drivers/net/pcs/pcs-mtk-lynxi.c b/drivers/net/pcs/pcs-mtk-lynxi.c
index 87df3a9dfc9b..44006bb6ac0b 100644
--- a/drivers/net/pcs/pcs-mtk-lynxi.c
+++ b/drivers/net/pcs/pcs-mtk-lynxi.c
@@ -11,6 +11,7 @@
#include <linux/mdio.h>
#include <linux/of.h>
#include <linux/pcs/pcs-mtk-lynxi.h>
+#include <linux/phy/phy-common-props.h>
#include <linux/phylink.h>
#include <linux/regmap.h>
@@ -62,8 +63,9 @@
/* Register to QPHY wrapper control */
#define SGMSYS_QPHY_WRAP_CTRL 0xec
-#define SGMII_PN_SWAP_MASK GENMASK(1, 0)
-#define SGMII_PN_SWAP_TX_RX (BIT(0) | BIT(1))
+#define SGMII_PN_SWAP_RX BIT(1)
+#define SGMII_PN_SWAP_TX BIT(0)
+
/* struct mtk_pcs_lynxi - This structure holds each sgmii regmap andassociated
* data
@@ -81,6 +83,7 @@ struct mtk_pcs_lynxi {
phy_interface_t interface;
struct phylink_pcs pcs;
u32 flags;
+ struct fwnode_handle *fwnode;
};
static struct mtk_pcs_lynxi *pcs_to_mtk_pcs_lynxi(struct phylink_pcs *pcs)
@@ -118,6 +121,42 @@ static void mtk_pcs_lynxi_get_state(struct phylink_pcs *pcs,
FIELD_GET(SGMII_LPA, adv));
}
+static int mtk_pcs_config_polarity(struct mtk_pcs_lynxi *mpcs,
+ phy_interface_t interface)
+{
+ struct fwnode_handle *fwnode = mpcs->fwnode, *pcs_fwnode;
+ unsigned int pol, default_pol = PHY_POL_NORMAL;
+ unsigned int val = 0;
+ int ret;
+
+ if (fwnode_property_read_bool(fwnode, "mediatek,pnswap"))
+ default_pol = PHY_POL_INVERT;
+
+ pcs_fwnode = fwnode_get_named_child_node(fwnode, "pcs");
+
+ ret = phy_get_rx_polarity(pcs_fwnode, phy_modes(interface),
+ BIT(PHY_POL_NORMAL) | BIT(PHY_POL_INVERT),
+ default_pol, &pol);
+ if (ret) {
+ fwnode_handle_put(pcs_fwnode);
+ return ret;
+ }
+ if (pol == PHY_POL_INVERT)
+ val |= SGMII_PN_SWAP_RX;
+
+ ret = phy_get_tx_polarity(pcs_fwnode, phy_modes(interface),
+ BIT(PHY_POL_NORMAL) | BIT(PHY_POL_INVERT),
+ default_pol, &pol);
+ fwnode_handle_put(pcs_fwnode);
+ if (ret)
+ return ret;
+ if (pol == PHY_POL_INVERT)
+ val |= SGMII_PN_SWAP_TX;
+
+ return regmap_update_bits(mpcs->regmap, SGMSYS_QPHY_WRAP_CTRL,
+ SGMII_PN_SWAP_RX | SGMII_PN_SWAP_TX, val);
+}
+
static int mtk_pcs_lynxi_config(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface,
const unsigned long *advertising,
@@ -127,6 +166,7 @@ static int mtk_pcs_lynxi_config(struct phylink_pcs *pcs, unsigned int neg_mode,
bool mode_changed = false, changed;
unsigned int rgc3, sgm_mode, bmcr;
int advertise, link_timer;
+ int ret;
advertise = phylink_mii_c22_pcs_encode_advertisement(interface,
advertising);
@@ -166,10 +206,9 @@ static int mtk_pcs_lynxi_config(struct phylink_pcs *pcs, unsigned int neg_mode,
regmap_set_bits(mpcs->regmap, SGMSYS_RESERVED_0,
SGMII_SW_RESET);
- if (mpcs->flags & MTK_SGMII_FLAG_PN_SWAP)
- regmap_update_bits(mpcs->regmap, SGMSYS_QPHY_WRAP_CTRL,
- SGMII_PN_SWAP_MASK,
- SGMII_PN_SWAP_TX_RX);
+ ret = mtk_pcs_config_polarity(mpcs, interface);
+ if (ret)
+ return ret;
if (interface == PHY_INTERFACE_MODE_2500BASEX)
rgc3 = SGMII_PHY_SPEED_3_125G;
@@ -266,8 +305,8 @@ static const struct phylink_pcs_ops mtk_pcs_lynxi_ops = {
};
struct phylink_pcs *mtk_pcs_lynxi_create(struct device *dev,
- struct regmap *regmap, u32 ana_rgc3,
- u32 flags)
+ struct fwnode_handle *fwnode,
+ struct regmap *regmap, u32 ana_rgc3)
{
struct mtk_pcs_lynxi *mpcs;
u32 id, ver;
@@ -301,10 +340,10 @@ struct phylink_pcs *mtk_pcs_lynxi_create(struct device *dev,
mpcs->ana_rgc3 = ana_rgc3;
mpcs->regmap = regmap;
- mpcs->flags = flags;
mpcs->pcs.ops = &mtk_pcs_lynxi_ops;
mpcs->pcs.poll = true;
mpcs->interface = PHY_INTERFACE_MODE_NA;
+ mpcs->fwnode = fwnode_handle_get(fwnode);
__set_bit(PHY_INTERFACE_MODE_SGMII, mpcs->pcs.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_1000BASEX, mpcs->pcs.supported_interfaces);
@@ -316,10 +355,14 @@ EXPORT_SYMBOL(mtk_pcs_lynxi_create);
void mtk_pcs_lynxi_destroy(struct phylink_pcs *pcs)
{
+ struct mtk_pcs_lynxi *mpcs;
+
if (!pcs)
return;
- kfree(pcs_to_mtk_pcs_lynxi(pcs));
+ mpcs = pcs_to_mtk_pcs_lynxi(pcs);
+ fwnode_handle_put(mpcs->fwnode);
+ kfree(mpcs);
}
EXPORT_SYMBOL(mtk_pcs_lynxi_destroy);
diff --git a/drivers/net/pcs/pcs-rzn1-miic.c b/drivers/net/pcs/pcs-rzn1-miic.c
index 885f17c32643..8d7f82c1df2f 100644
--- a/drivers/net/pcs/pcs-rzn1-miic.c
+++ b/drivers/net/pcs/pcs-rzn1-miic.c
@@ -28,6 +28,8 @@
#define MIIC_MODCTRL 0x8
+#define MIIC_PHY_LINK 0x14
+
#define MIIC_CONVCTRL(port) (0x100 + (port) * 4)
#define MIIC_CONVCTRL_CONV_SPEED GENMASK(1, 0)
@@ -178,12 +180,23 @@ static const char * const rzt2h_reset_ids[] = {
};
/**
+ * struct miic_phy_link_cfg - MIIC PHY_LINK configuration
+ * @mask: Mask of phy_link bits
+ * @val: Value of phy_link bits
+ */
+struct miic_phy_link_cfg {
+ u32 mask;
+ u32 val;
+};
+
+/**
* struct miic - MII converter structure
* @base: base address of the MII converter
* @dev: Device associated to the MII converter
* @lock: Lock used for read-modify-write access
* @rsts: Reset controls for the MII converter
* @of_data: Pointer to OF data
+ * @link_cfg: MIIC PHY_LINK configuration
*/
struct miic {
void __iomem *base;
@@ -191,6 +204,12 @@ struct miic {
spinlock_t lock;
struct reset_control_bulk_data rsts[MIIC_MAX_NUM_RSTS];
const struct miic_of_data *of_data;
+ struct miic_phy_link_cfg link_cfg;
+};
+
+enum miic_type {
+ MIIC_TYPE_RZN1,
+ MIIC_TYPE_RZT2H,
};
/**
@@ -210,6 +229,7 @@ struct miic {
* @init_unlock_lock_regs: Flag to indicate if registers need to be unlocked
* before access.
* @miic_write: Function pointer to write a value to a MIIC register
+ * @type: Type of MIIC
*/
struct miic_of_data {
struct modctrl_match *match_table;
@@ -226,6 +246,7 @@ struct miic_of_data {
u8 reset_count;
bool init_unlock_lock_regs;
void (*miic_write)(struct miic *miic, int offset, u32 value);
+ enum miic_type type;
};
/**
@@ -581,10 +602,79 @@ static int miic_match_dt_conf(struct miic *miic, s8 *dt_val, u32 *mode_cfg)
return -EINVAL;
}
+static void miic_configure_phy_link(struct miic *miic, u32 conf,
+ u32 port, bool active_low)
+{
+ bool polarity_active_high;
+ u32 mask, shift;
+
+ /* determine shift and polarity for this conf */
+ if (miic->of_data->type == MIIC_TYPE_RZN1) {
+ switch (conf) {
+ /* switch ports => bits [3:0] (shift 0), active when low */
+ case MIIC_SWITCH_PORTA:
+ case MIIC_SWITCH_PORTB:
+ case MIIC_SWITCH_PORTC:
+ case MIIC_SWITCH_PORTD:
+ shift = 0;
+ polarity_active_high = false;
+ break;
+
+ /* EtherCAT ports => bits [7:4] (shift 4), active when high */
+ case MIIC_ETHERCAT_PORTA:
+ case MIIC_ETHERCAT_PORTB:
+ case MIIC_ETHERCAT_PORTC:
+ shift = 4;
+ polarity_active_high = true;
+ break;
+
+ /* Sercos ports => bits [11:8] (shift 8), active when high */
+ case MIIC_SERCOS_PORTA:
+ case MIIC_SERCOS_PORTB:
+ shift = 8;
+ polarity_active_high = true;
+ break;
+
+ default:
+ return;
+ }
+ } else {
+ switch (conf) {
+ /* ETHSW ports => bits [3:0] (shift 0), active when low */
+ case ETHSS_ETHSW_PORT0:
+ case ETHSS_ETHSW_PORT1:
+ case ETHSS_ETHSW_PORT2:
+ shift = 0;
+ polarity_active_high = false;
+ break;
+
+ /* ESC ports => bits [7:4] (shift 4), active when high */
+ case ETHSS_ESC_PORT0:
+ case ETHSS_ESC_PORT1:
+ case ETHSS_ESC_PORT2:
+ shift = 4;
+ polarity_active_high = true;
+ break;
+
+ default:
+ return;
+ }
+ }
+
+ mask = BIT(port + shift);
+
+ miic->link_cfg.mask |= mask;
+ if (polarity_active_high != active_low)
+ miic->link_cfg.val |= mask;
+ else
+ miic->link_cfg.val &= ~mask;
+}
+
static int miic_parse_dt(struct miic *miic, u32 *mode_cfg)
{
struct device_node *np = miic->dev->of_node;
struct device_node *conv;
+ bool active_low;
int port, ret;
s8 *dt_val;
u32 conf;
@@ -603,10 +693,15 @@ static int miic_parse_dt(struct miic *miic, u32 *mode_cfg)
if (of_property_read_u32(conv, "reg", &port))
continue;
+ if (of_property_read_u32(conv, "renesas,miic-input", &conf))
+ continue;
+
/* Adjust for 0 based index */
- port += !miic->of_data->miic_port_start;
- if (of_property_read_u32(conv, "renesas,miic-input", &conf) == 0)
- dt_val[port] = conf;
+ dt_val[port + !miic->of_data->miic_port_start] = conf;
+
+ active_low = of_property_read_bool(conv, "renesas,miic-phy-link-active-low");
+
+ miic_configure_phy_link(miic, conf, port, active_low);
}
ret = miic_match_dt_conf(miic, dt_val, mode_cfg);
@@ -696,6 +791,8 @@ static int miic_probe(struct platform_device *pdev)
if (ret)
goto disable_runtime_pm;
+ miic_reg_rmw(miic, MIIC_PHY_LINK, miic->link_cfg.mask, miic->link_cfg.val);
+
/* miic_create() relies on that fact that data are attached to the
* platform device to determine if the driver is ready so this needs to
* be the last thing to be done after everything is initialized
@@ -729,6 +826,7 @@ static struct miic_of_data rzn1_miic_of_data = {
.sw_mode_mask = GENMASK(4, 0),
.init_unlock_lock_regs = true,
.miic_write = miic_reg_writel_unlocked,
+ .type = MIIC_TYPE_RZN1,
};
static struct miic_of_data rzt2h_miic_of_data = {
@@ -745,6 +843,7 @@ static struct miic_of_data rzt2h_miic_of_data = {
.reset_ids = rzt2h_reset_ids,
.reset_count = ARRAY_SIZE(rzt2h_reset_ids),
.miic_write = miic_reg_writel_locked,
+ .type = MIIC_TYPE_RZT2H,
};
static const struct of_device_id miic_of_mtable[] = {
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index a7ade7b95a2e..7b73332a13d9 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -98,6 +98,7 @@ config AS21XXX_PHY
config AIR_EN8811H_PHY
tristate "Airoha EN8811H 2.5 Gigabit PHY"
+ select PHY_COMMON_PROPS
help
Currently supports the Airoha EN8811H PHY.
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 76e0db40f879..3a34917adea7 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -3,7 +3,7 @@
libphy-y := phy.o phy-c45.o phy-core.o phy_device.o \
linkmode.o phy_link_topology.o \
- phy_caps.o mdio_bus_provider.o
+ phy_caps.o mdio_bus_provider.o phy_port.o
mdio-bus-y += mdio_bus.o mdio_device.o
ifdef CONFIG_PHYLIB
diff --git a/drivers/net/phy/adin.c b/drivers/net/phy/adin.c
index 7fa713ca8d45..3a934051b574 100644
--- a/drivers/net/phy/adin.c
+++ b/drivers/net/phy/adin.c
@@ -89,6 +89,9 @@
#define ADIN1300_CLOCK_STOP_REG 0x9400
#define ADIN1300_LPI_WAKE_ERR_CNT_REG 0xa000
+#define ADIN1300_B_100_ZPTM_DIMRX 0xB685
+#define ADIN1300_B_100_ZPTM_EN_DIMRX BIT(0)
+
#define ADIN1300_CDIAG_RUN 0xba1b
#define ADIN1300_CDIAG_RUN_EN BIT(0)
@@ -522,6 +525,19 @@ static int adin_config_clk_out(struct phy_device *phydev)
ADIN1300_GE_CLK_CFG_MASK, sel);
}
+static int adin_config_zptm100(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+
+ if (!(device_property_read_bool(dev, "adi,low-cmode-impedance")))
+ return 0;
+
+ /* clear bit 0 to configure for lowest common-mode impedance */
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
+ ADIN1300_B_100_ZPTM_DIMRX,
+ ADIN1300_B_100_ZPTM_EN_DIMRX);
+}
+
static int adin_config_init(struct phy_device *phydev)
{
int rc;
@@ -548,6 +564,10 @@ static int adin_config_init(struct phy_device *phydev)
if (rc < 0)
return rc;
+ rc = adin_config_zptm100(phydev);
+ if (rc < 0)
+ return rc;
+
phydev_dbg(phydev, "PHY is using mode '%s'\n",
phy_modes(phydev->interface));
diff --git a/drivers/net/phy/air_en8811h.c b/drivers/net/phy/air_en8811h.c
index badd65f0ccee..29ae73e65caa 100644
--- a/drivers/net/phy/air_en8811h.c
+++ b/drivers/net/phy/air_en8811h.c
@@ -1,28 +1,33 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Driver for the Airoha EN8811H 2.5 Gigabit PHY.
+ * Driver for the Airoha EN8811H and AN8811HB 2.5 Gigabit PHYs.
*
- * Limitations of the EN8811H:
+ * Limitations:
* - Only full duplex supported
* - Forced speed (AN off) is not supported by hardware (100Mbps)
*
* Source originated from airoha's en8811h.c and en8811h.h v1.2.1
+ * with AN8811HB bits from air_an8811hb.c v0.0.4
*
- * Copyright (C) 2023 Airoha Technology Corp.
+ * Copyright (C) 2023, 2026 Airoha Technology Corp.
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/phy.h>
+#include <linux/phy/phy-common-props.h>
#include <linux/firmware.h>
#include <linux/property.h>
#include <linux/wordpart.h>
#include <linux/unaligned.h>
#define EN8811H_PHY_ID 0x03a2a411
+#define AN8811HB_PHY_ID 0xc0ff04a0
#define EN8811H_MD32_DM "airoha/EthMD32.dm.bin"
#define EN8811H_MD32_DSP "airoha/EthMD32.DSP.bin"
+#define AN8811HB_MD32_DM "airoha/an8811hb/EthMD32_CRC.DM.bin"
+#define AN8811HB_MD32_DSP "airoha/an8811hb/EthMD32_CRC.DSP.bin"
#define AIR_FW_ADDR_DM 0x00000000
#define AIR_FW_ADDR_DSP 0x00100000
@@ -30,6 +35,7 @@
/* MII Registers */
#define AIR_AUX_CTRL_STATUS 0x1d
#define AIR_AUX_CTRL_STATUS_SPEED_MASK GENMASK(4, 2)
+#define AIR_AUX_CTRL_STATUS_SPEED_10 0x0
#define AIR_AUX_CTRL_STATUS_SPEED_100 0x4
#define AIR_AUX_CTRL_STATUS_SPEED_1000 0x8
#define AIR_AUX_CTRL_STATUS_SPEED_2500 0xc
@@ -55,6 +61,7 @@
#define EN8811H_PHY_FW_STATUS 0x8009
#define EN8811H_PHY_READY 0x02
+#define AIR_PHY_MCU_CMD_0 0x800b
#define AIR_PHY_MCU_CMD_1 0x800c
#define AIR_PHY_MCU_CMD_1_MODE1 0x0
#define AIR_PHY_MCU_CMD_2 0x800d
@@ -64,6 +71,10 @@
#define AIR_PHY_MCU_CMD_3_DOCMD 0x1100
#define AIR_PHY_MCU_CMD_4 0x800f
#define AIR_PHY_MCU_CMD_4_MODE1 0x0002
+#define AIR_PHY_MCU_CMD_4_CABLE_PAIR_A 0x00d7
+#define AIR_PHY_MCU_CMD_4_CABLE_PAIR_B 0x00d8
+#define AIR_PHY_MCU_CMD_4_CABLE_PAIR_C 0x00d9
+#define AIR_PHY_MCU_CMD_4_CABLE_PAIR_D 0x00da
#define AIR_PHY_MCU_CMD_4_INTCLR 0x00e4
/* Registers on MDIO_MMD_VEND2 */
@@ -105,6 +116,9 @@
#define AIR_PHY_LED_BLINK_2500RX BIT(11)
/* Registers on BUCKPBUS */
+#define AIR_PHY_CONTROL 0x3a9c
+#define AIR_PHY_CONTROL_INTERNAL BIT(11)
+
#define EN8811H_2P5G_LPA 0x3b30
#define EN8811H_2P5G_LPA_2P5G BIT(0)
@@ -128,6 +142,34 @@
#define EN8811H_FW_CTRL_2 0x800000
#define EN8811H_FW_CTRL_2_LOADING BIT(11)
+#define AN8811HB_CRC_PM_SET1 0xf020c
+#define AN8811HB_CRC_PM_MON2 0xf0218
+#define AN8811HB_CRC_PM_MON3 0xf021c
+#define AN8811HB_CRC_DM_SET1 0xf0224
+#define AN8811HB_CRC_DM_MON2 0xf0230
+#define AN8811HB_CRC_DM_MON3 0xf0234
+#define AN8811HB_CRC_RD_EN BIT(0)
+#define AN8811HB_CRC_ST (BIT(0) | BIT(1))
+#define AN8811HB_CRC_CHECK_PASS BIT(0)
+
+#define AN8811HB_TX_POLARITY 0x5ce004
+#define AN8811HB_TX_POLARITY_NORMAL BIT(7)
+#define AN8811HB_RX_POLARITY 0x5ce61c
+#define AN8811HB_RX_POLARITY_NORMAL BIT(7)
+
+#define AN8811HB_GPIO_OUTPUT 0x5cf8b8
+#define AN8811HB_GPIO_OUTPUT_345 (BIT(3) | BIT(4) | BIT(5))
+
+#define AN8811HB_HWTRAP1 0x5cf910
+#define AN8811HB_HWTRAP2 0x5cf914
+#define AN8811HB_HWTRAP2_CKO BIT(28)
+
+#define AN8811HB_CLK_DRV 0x5cf9e4
+#define AN8811HB_CLK_DRV_CKO_MASK GENMASK(14, 12)
+#define AN8811HB_CLK_DRV_CKOPWD BIT(12)
+#define AN8811HB_CLK_DRV_CKO_LDPWD BIT(13)
+#define AN8811HB_CLK_DRV_CKO_LPPWD BIT(14)
+
/* Led definitions */
#define EN8811H_LED_COUNT 3
@@ -447,6 +489,11 @@ static int en8811h_wait_mcu_ready(struct phy_device *phydev)
{
int ret, reg_value;
+ ret = air_buckpbus_reg_write(phydev, EN8811H_FW_CTRL_1,
+ EN8811H_FW_CTRL_1_FINISH);
+ if (ret)
+ return ret;
+
/* Because of mdio-lock, may have to wait for multiple loads */
ret = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
EN8811H_PHY_FW_STATUS, reg_value,
@@ -460,9 +507,103 @@ static int en8811h_wait_mcu_ready(struct phy_device *phydev)
return 0;
}
-static int en8811h_load_firmware(struct phy_device *phydev)
+static int an8811hb_check_crc(struct phy_device *phydev, u32 set1,
+ u32 mon2, u32 mon3)
+{
+ u32 pbus_value;
+ int retry = 25;
+ int ret;
+
+ /* Configure CRC */
+ ret = air_buckpbus_reg_modify(phydev, set1,
+ AN8811HB_CRC_RD_EN,
+ AN8811HB_CRC_RD_EN);
+ if (ret < 0)
+ return ret;
+ air_buckpbus_reg_read(phydev, set1, &pbus_value);
+
+ do {
+ msleep(300);
+ air_buckpbus_reg_read(phydev, mon2, &pbus_value);
+
+ /* We do not know what errors this check is supposed
+ * catch or what to do about a failure. So print the
+ * result and continue like the vendor driver does.
+ */
+ if (pbus_value & AN8811HB_CRC_ST) {
+ air_buckpbus_reg_read(phydev, mon3, &pbus_value);
+ phydev_dbg(phydev, "CRC Check %s!\n",
+ pbus_value & AN8811HB_CRC_CHECK_PASS ?
+ "PASS" : "FAIL");
+ return air_buckpbus_reg_modify(phydev, set1,
+ AN8811HB_CRC_RD_EN, 0);
+ }
+ } while (--retry);
+
+ phydev_err(phydev, "CRC Check is not ready (%u)\n", pbus_value);
+ return -ENODEV;
+}
+
+static void en8811h_print_fw_version(struct phy_device *phydev)
{
struct en8811h_priv *priv = phydev->priv;
+
+ air_buckpbus_reg_read(phydev, EN8811H_FW_VERSION,
+ &priv->firmware_version);
+ phydev_info(phydev, "MD32 firmware version: %08x\n",
+ priv->firmware_version);
+}
+
+static int an8811hb_load_file(struct phy_device *phydev, const char *name,
+ u32 address)
+{
+ struct device *dev = &phydev->mdio.dev;
+ const struct firmware *fw;
+ int ret;
+
+ ret = request_firmware_direct(&fw, name, dev);
+ if (ret < 0)
+ return ret;
+
+ ret = air_write_buf(phydev, address, fw);
+ release_firmware(fw);
+ return ret;
+}
+
+static int an8811hb_load_firmware(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = air_buckpbus_reg_write(phydev, EN8811H_FW_CTRL_1,
+ EN8811H_FW_CTRL_1_START);
+ if (ret < 0)
+ return ret;
+
+ ret = an8811hb_load_file(phydev, AN8811HB_MD32_DM, AIR_FW_ADDR_DM);
+ if (ret < 0)
+ return ret;
+
+ ret = an8811hb_check_crc(phydev, AN8811HB_CRC_DM_SET1,
+ AN8811HB_CRC_DM_MON2,
+ AN8811HB_CRC_DM_MON3);
+ if (ret < 0)
+ return ret;
+
+ ret = an8811hb_load_file(phydev, AN8811HB_MD32_DSP, AIR_FW_ADDR_DSP);
+ if (ret < 0)
+ return ret;
+
+ ret = an8811hb_check_crc(phydev, AN8811HB_CRC_PM_SET1,
+ AN8811HB_CRC_PM_MON2,
+ AN8811HB_CRC_PM_MON3);
+ if (ret < 0)
+ return ret;
+
+ return en8811h_wait_mcu_ready(phydev);
+}
+
+static int en8811h_load_firmware(struct phy_device *phydev)
+{
struct device *dev = &phydev->mdio.dev;
const struct firmware *fw1, *fw2;
int ret;
@@ -499,17 +640,11 @@ static int en8811h_load_firmware(struct phy_device *phydev)
if (ret < 0)
goto en8811h_load_firmware_out;
- ret = air_buckpbus_reg_write(phydev, EN8811H_FW_CTRL_1,
- EN8811H_FW_CTRL_1_FINISH);
+ ret = en8811h_wait_mcu_ready(phydev);
if (ret < 0)
goto en8811h_load_firmware_out;
- ret = en8811h_wait_mcu_ready(phydev);
-
- air_buckpbus_reg_read(phydev, EN8811H_FW_VERSION,
- &priv->firmware_version);
- phydev_info(phydev, "MD32 firmware version: %08x\n",
- priv->firmware_version);
+ en8811h_print_fw_version(phydev);
en8811h_load_firmware_out:
release_firmware(fw2);
@@ -532,11 +667,6 @@ static int en8811h_restart_mcu(struct phy_device *phydev)
if (ret < 0)
return ret;
- ret = air_buckpbus_reg_write(phydev, EN8811H_FW_CTRL_1,
- EN8811H_FW_CTRL_1_FINISH);
- if (ret < 0)
- return ret;
-
return en8811h_wait_mcu_ready(phydev);
}
@@ -819,6 +949,105 @@ static int en8811h_led_hw_is_supported(struct phy_device *phydev, u8 index,
return 0;
};
+static unsigned long an8811hb_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent)
+{
+ struct en8811h_priv *priv = clk_hw_to_en8811h_priv(hw);
+ struct phy_device *phydev = priv->phydev;
+ u32 pbus_value;
+ int ret;
+
+ ret = air_buckpbus_reg_read(phydev, AN8811HB_HWTRAP2, &pbus_value);
+ if (ret < 0)
+ return ret;
+
+ return (pbus_value & AN8811HB_HWTRAP2_CKO) ? 50000000 : 25000000;
+}
+
+static int an8811hb_clk_enable(struct clk_hw *hw)
+{
+ struct en8811h_priv *priv = clk_hw_to_en8811h_priv(hw);
+ struct phy_device *phydev = priv->phydev;
+
+ return air_buckpbus_reg_modify(phydev, AN8811HB_CLK_DRV,
+ AN8811HB_CLK_DRV_CKO_MASK,
+ AN8811HB_CLK_DRV_CKO_MASK);
+}
+
+static void an8811hb_clk_disable(struct clk_hw *hw)
+{
+ struct en8811h_priv *priv = clk_hw_to_en8811h_priv(hw);
+ struct phy_device *phydev = priv->phydev;
+
+ air_buckpbus_reg_modify(phydev, AN8811HB_CLK_DRV,
+ AN8811HB_CLK_DRV_CKO_MASK, 0);
+}
+
+static int an8811hb_clk_is_enabled(struct clk_hw *hw)
+{
+ struct en8811h_priv *priv = clk_hw_to_en8811h_priv(hw);
+ struct phy_device *phydev = priv->phydev;
+ u32 pbus_value;
+ int ret;
+
+ ret = air_buckpbus_reg_read(phydev, AN8811HB_CLK_DRV, &pbus_value);
+ if (ret < 0)
+ return ret;
+
+ return (pbus_value & AN8811HB_CLK_DRV_CKO_MASK);
+}
+
+static int an8811hb_clk_save_context(struct clk_hw *hw)
+{
+ struct en8811h_priv *priv = clk_hw_to_en8811h_priv(hw);
+
+ priv->cko_is_enabled = an8811hb_clk_is_enabled(hw);
+
+ return 0;
+}
+
+static void an8811hb_clk_restore_context(struct clk_hw *hw)
+{
+ struct en8811h_priv *priv = clk_hw_to_en8811h_priv(hw);
+
+ if (!priv->cko_is_enabled)
+ an8811hb_clk_disable(hw);
+}
+
+static const struct clk_ops an8811hb_clk_ops = {
+ .recalc_rate = an8811hb_clk_recalc_rate,
+ .enable = an8811hb_clk_enable,
+ .disable = an8811hb_clk_disable,
+ .is_enabled = an8811hb_clk_is_enabled,
+ .save_context = an8811hb_clk_save_context,
+ .restore_context = an8811hb_clk_restore_context,
+};
+
+static int an8811hb_clk_provider_setup(struct device *dev, struct clk_hw *hw)
+{
+ struct clk_init_data init;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_COMMON_CLK))
+ return 0;
+
+ init.name = devm_kasprintf(dev, GFP_KERNEL, "%s-cko",
+ fwnode_get_name(dev_fwnode(dev)));
+ if (!init.name)
+ return -ENOMEM;
+
+ init.ops = &an8811hb_clk_ops;
+ init.flags = 0;
+ init.num_parents = 0;
+ hw->init = &init;
+
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret)
+ return ret;
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, hw);
+}
+
static unsigned long en8811h_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent)
{
@@ -918,6 +1147,68 @@ static int en8811h_clk_provider_setup(struct device *dev, struct clk_hw *hw)
return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, hw);
}
+static int en8811h_leds_setup(struct phy_device *phydev)
+{
+ struct en8811h_priv *priv = phydev->priv;
+ int ret;
+
+ priv->led[0].rules = AIR_DEFAULT_TRIGGER_LED0;
+ priv->led[1].rules = AIR_DEFAULT_TRIGGER_LED1;
+ priv->led[2].rules = AIR_DEFAULT_TRIGGER_LED2;
+
+ ret = air_leds_init(phydev, EN8811H_LED_COUNT, AIR_PHY_LED_DUR,
+ AIR_LED_MODE_DISABLE);
+ if (ret < 0)
+ phydev_err(phydev, "Failed to disable leds: %d\n", ret);
+
+ return ret;
+}
+
+static int an8811hb_probe(struct phy_device *phydev)
+{
+ struct en8811h_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&phydev->mdio.dev, sizeof(struct en8811h_priv),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ phydev->priv = priv;
+
+ ret = an8811hb_load_firmware(phydev);
+ if (ret < 0) {
+ phydev_err(phydev, "Load firmware failed: %d\n", ret);
+ return ret;
+ }
+
+ en8811h_print_fw_version(phydev);
+
+ /* mcu has just restarted after firmware load */
+ priv->mcu_needs_restart = false;
+
+ /* MDIO_DEVS1/2 empty, so set mmds_present bits here */
+ phydev->c45_ids.mmds_present |= MDIO_DEVS_PMAPMD | MDIO_DEVS_AN;
+
+ ret = en8811h_leds_setup(phydev);
+ if (ret < 0)
+ return ret;
+
+ priv->phydev = phydev;
+ /* Co-Clock Output */
+ ret = an8811hb_clk_provider_setup(&phydev->mdio.dev, &priv->hw);
+ if (ret)
+ return ret;
+
+ /* Configure led gpio pins as output */
+ ret = air_buckpbus_reg_modify(phydev, AN8811HB_GPIO_OUTPUT,
+ AN8811HB_GPIO_OUTPUT_345,
+ AN8811HB_GPIO_OUTPUT_345);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static int en8811h_probe(struct phy_device *phydev)
{
struct en8811h_priv *priv;
@@ -936,19 +1227,12 @@ static int en8811h_probe(struct phy_device *phydev)
/* mcu has just restarted after firmware load */
priv->mcu_needs_restart = false;
- priv->led[0].rules = AIR_DEFAULT_TRIGGER_LED0;
- priv->led[1].rules = AIR_DEFAULT_TRIGGER_LED1;
- priv->led[2].rules = AIR_DEFAULT_TRIGGER_LED2;
-
/* MDIO_DEVS1/2 empty, so set mmds_present bits here */
phydev->c45_ids.mmds_present |= MDIO_DEVS_PMAPMD | MDIO_DEVS_AN;
- ret = air_leds_init(phydev, EN8811H_LED_COUNT, AIR_PHY_LED_DUR,
- AIR_LED_MODE_DISABLE);
- if (ret < 0) {
- phydev_err(phydev, "Failed to disable leds: %d\n", ret);
+ ret = en8811h_leds_setup(phydev);
+ if (ret < 0)
return ret;
- }
priv->phydev = phydev;
/* Co-Clock Output */
@@ -966,11 +1250,103 @@ static int en8811h_probe(struct phy_device *phydev)
return 0;
}
+static int an8811hb_config_serdes_polarity(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ u32 pbus_value = 0;
+ unsigned int pol;
+ int ret;
+
+ ret = phy_get_manual_rx_polarity(dev_fwnode(dev),
+ phy_modes(phydev->interface), &pol);
+ if (ret)
+ return ret;
+ if (pol == PHY_POL_NORMAL)
+ pbus_value |= AN8811HB_RX_POLARITY_NORMAL;
+ ret = air_buckpbus_reg_modify(phydev, AN8811HB_RX_POLARITY,
+ AN8811HB_RX_POLARITY_NORMAL,
+ pbus_value);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_get_manual_tx_polarity(dev_fwnode(dev),
+ phy_modes(phydev->interface), &pol);
+ if (ret)
+ return ret;
+ pbus_value = 0;
+ if (pol == PHY_POL_NORMAL)
+ pbus_value |= AN8811HB_TX_POLARITY_NORMAL;
+ return air_buckpbus_reg_modify(phydev, AN8811HB_TX_POLARITY,
+ AN8811HB_TX_POLARITY_NORMAL,
+ pbus_value);
+}
+
+static int en8811h_config_serdes_polarity(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ unsigned int pol, default_pol;
+ u32 pbus_value = 0;
+ int ret;
+
+ default_pol = PHY_POL_NORMAL;
+ if (device_property_read_bool(dev, "airoha,pnswap-rx"))
+ default_pol = PHY_POL_INVERT;
+
+ ret = phy_get_rx_polarity(dev_fwnode(dev), phy_modes(phydev->interface),
+ BIT(PHY_POL_NORMAL) | BIT(PHY_POL_INVERT),
+ default_pol, &pol);
+ if (ret)
+ return ret;
+ if (pol == PHY_POL_INVERT)
+ pbus_value |= EN8811H_POLARITY_RX_REVERSE;
+
+ default_pol = PHY_POL_NORMAL;
+ if (device_property_read_bool(dev, "airoha,pnswap-tx"))
+ default_pol = PHY_POL_INVERT;
+
+ ret = phy_get_tx_polarity(dev_fwnode(dev), phy_modes(phydev->interface),
+ BIT(PHY_POL_NORMAL) | BIT(PHY_POL_INVERT),
+ default_pol, &pol);
+ if (ret)
+ return ret;
+ if (pol == PHY_POL_NORMAL)
+ pbus_value |= EN8811H_POLARITY_TX_NORMAL;
+
+ return air_buckpbus_reg_modify(phydev, EN8811H_POLARITY,
+ EN8811H_POLARITY_RX_REVERSE |
+ EN8811H_POLARITY_TX_NORMAL, pbus_value);
+}
+
+static int an8811hb_config_init(struct phy_device *phydev)
+{
+ struct en8811h_priv *priv = phydev->priv;
+ int ret;
+
+ /* If restart happened in .probe(), no need to restart now */
+ if (priv->mcu_needs_restart) {
+ ret = en8811h_restart_mcu(phydev);
+ if (ret < 0)
+ return ret;
+ } else {
+ /* Next calls to .config_init() mcu needs to restart */
+ priv->mcu_needs_restart = true;
+ }
+
+ ret = an8811hb_config_serdes_polarity(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = air_leds_init(phydev, EN8811H_LED_COUNT, AIR_PHY_LED_DUR,
+ AIR_LED_MODE_USER_DEFINE);
+ if (ret < 0)
+ phydev_err(phydev, "Failed to initialize leds: %d\n", ret);
+
+ return ret;
+}
+
static int en8811h_config_init(struct phy_device *phydev)
{
struct en8811h_priv *priv = phydev->priv;
- struct device *dev = &phydev->mdio.dev;
- u32 pbus_value;
int ret;
/* If restart happened in .probe(), no need to restart now */
@@ -1003,19 +1379,7 @@ static int en8811h_config_init(struct phy_device *phydev)
if (ret < 0)
return ret;
- /* Serdes polarity */
- pbus_value = 0;
- if (device_property_read_bool(dev, "airoha,pnswap-rx"))
- pbus_value |= EN8811H_POLARITY_RX_REVERSE;
- else
- pbus_value &= ~EN8811H_POLARITY_RX_REVERSE;
- if (device_property_read_bool(dev, "airoha,pnswap-tx"))
- pbus_value &= ~EN8811H_POLARITY_TX_NORMAL;
- else
- pbus_value |= EN8811H_POLARITY_TX_NORMAL;
- ret = air_buckpbus_reg_modify(phydev, EN8811H_POLARITY,
- EN8811H_POLARITY_RX_REVERSE |
- EN8811H_POLARITY_TX_NORMAL, pbus_value);
+ ret = en8811h_config_serdes_polarity(phydev);
if (ret < 0)
return ret;
@@ -1093,13 +1457,23 @@ static int en8811h_read_status(struct phy_device *phydev)
if (ret < 0)
return ret;
- /* Get link partner 2.5GBASE-T ability from vendor register */
- ret = air_buckpbus_reg_read(phydev, EN8811H_2P5G_LPA, &pbus_value);
- if (ret < 0)
- return ret;
- linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
- phydev->lp_advertising,
- pbus_value & EN8811H_2P5G_LPA_2P5G);
+ if (phy_id_compare_model(phydev->phy_id, AN8811HB_PHY_ID)) {
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_STAT);
+ if (val < 0)
+ return val;
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ phydev->lp_advertising,
+ val & MDIO_AN_10GBT_STAT_LP2_5G);
+ } else {
+ /* Get link partner 2.5GBASE-T ability from vendor register */
+ ret = air_buckpbus_reg_read(phydev, EN8811H_2P5G_LPA,
+ &pbus_value);
+ if (ret < 0)
+ return ret;
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ phydev->lp_advertising,
+ pbus_value & EN8811H_2P5G_LPA_2P5G);
+ }
if (phydev->autoneg_complete)
phy_resolve_aneg_pause(phydev);
@@ -1121,6 +1495,9 @@ static int en8811h_read_status(struct phy_device *phydev)
case AIR_AUX_CTRL_STATUS_SPEED_100:
phydev->speed = SPEED_100;
break;
+ case AIR_AUX_CTRL_STATUS_SPEED_10:
+ phydev->speed = SPEED_10;
+ break;
}
/* Firmware before version 24011202 has no vendor register 2P5G_LPA.
@@ -1205,20 +1582,44 @@ static struct phy_driver en8811h_driver[] = {
.led_brightness_set = air_led_brightness_set,
.led_hw_control_set = air_led_hw_control_set,
.led_hw_control_get = air_led_hw_control_get,
+},
+{
+ PHY_ID_MATCH_MODEL(AN8811HB_PHY_ID),
+ .name = "Airoha AN8811HB",
+ .probe = an8811hb_probe,
+ .get_features = en8811h_get_features,
+ .config_init = an8811hb_config_init,
+ .get_rate_matching = en8811h_get_rate_matching,
+ .config_aneg = en8811h_config_aneg,
+ .read_status = en8811h_read_status,
+ .resume = en8811h_resume,
+ .suspend = en8811h_suspend,
+ .config_intr = en8811h_clear_intr,
+ .handle_interrupt = en8811h_handle_interrupt,
+ .led_hw_is_supported = en8811h_led_hw_is_supported,
+ .read_page = air_phy_read_page,
+ .write_page = air_phy_write_page,
+ .led_blink_set = air_led_blink_set,
+ .led_brightness_set = air_led_brightness_set,
+ .led_hw_control_set = air_led_hw_control_set,
+ .led_hw_control_get = air_led_hw_control_get,
} };
module_phy_driver(en8811h_driver);
static const struct mdio_device_id __maybe_unused en8811h_tbl[] = {
{ PHY_ID_MATCH_MODEL(EN8811H_PHY_ID) },
+ { PHY_ID_MATCH_MODEL(AN8811HB_PHY_ID) },
{ }
};
MODULE_DEVICE_TABLE(mdio, en8811h_tbl);
MODULE_FIRMWARE(EN8811H_MD32_DM);
MODULE_FIRMWARE(EN8811H_MD32_DSP);
+MODULE_FIRMWARE(AN8811HB_MD32_DM);
+MODULE_FIRMWARE(AN8811HB_MD32_DSP);
-MODULE_DESCRIPTION("Airoha EN8811H PHY drivers");
+MODULE_DESCRIPTION("Airoha EN8811H and AN8811HB PHY drivers");
MODULE_AUTHOR("Airoha");
MODULE_AUTHOR("Eric Woudstra <ericwouds@gmail.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/ax88796b_rust.rs b/drivers/net/phy/ax88796b_rust.rs
index bc73ebccc2aa..2d24628a4e58 100644
--- a/drivers/net/phy/ax88796b_rust.rs
+++ b/drivers/net/phy/ax88796b_rust.rs
@@ -5,7 +5,6 @@
//!
//! C version of this driver: [`drivers/net/phy/ax88796b.c`](./ax88796b.c)
use kernel::{
- c_str,
net::phy::{self, reg::C22, DeviceId, Driver},
prelude::*,
uapi,
@@ -41,7 +40,7 @@ struct PhyAX88772A;
#[vtable]
impl Driver for PhyAX88772A {
const FLAGS: u32 = phy::flags::IS_INTERNAL;
- const NAME: &'static CStr = c_str!("Asix Electronics AX88772A");
+ const NAME: &'static CStr = c"Asix Electronics AX88772A";
const PHY_DEVICE_ID: DeviceId = DeviceId::new_with_exact_mask(0x003b1861);
// AX88772A is not working properly with some old switches (NETGEAR EN 108TP):
@@ -105,7 +104,7 @@ struct PhyAX88772C;
#[vtable]
impl Driver for PhyAX88772C {
const FLAGS: u32 = phy::flags::IS_INTERNAL;
- const NAME: &'static CStr = c_str!("Asix Electronics AX88772C");
+ const NAME: &'static CStr = c"Asix Electronics AX88772C";
const PHY_DEVICE_ID: DeviceId = DeviceId::new_with_exact_mask(0x003b1881);
fn suspend(dev: &mut phy::Device) -> Result {
@@ -125,7 +124,7 @@ struct PhyAX88796B;
#[vtable]
impl Driver for PhyAX88796B {
- const NAME: &'static CStr = c_str!("Asix Electronics AX88796B");
+ const NAME: &'static CStr = c"Asix Electronics AX88796B";
const PHY_DEVICE_ID: DeviceId = DeviceId::new_with_model_mask(0x003b1841);
fn soft_reset(dev: &mut phy::Device) -> Result {
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index 33db21251f2e..c012dfab3171 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy.h>
+#include <linux/phy_port.h>
#include <linux/netdevice.h>
#include <linux/bitfield.h>
@@ -811,17 +812,6 @@ static int dp83822_of_init(struct phy_device *phydev)
int i, ret;
u32 val;
- /* Signal detection for the PHY is only enabled if the FX_EN and the
- * SD_EN pins are strapped. Signal detection can only enabled if FX_EN
- * is strapped otherwise signal detection is disabled for the PHY.
- */
- if (dp83822->fx_enabled && dp83822->fx_sd_enable)
- dp83822->fx_signal_det_low = device_property_present(dev,
- "ti,link-loss-low");
- if (!dp83822->fx_enabled)
- dp83822->fx_enabled = device_property_present(dev,
- "ti,fiber-mode");
-
if (!device_property_read_string(dev, "ti,gpio2-clk-out", &of_val)) {
if (strcmp(of_val, "mac-if") == 0) {
dp83822->gpio2_clk_out = DP83822_CLK_SRC_MAC_IF;
@@ -950,6 +940,48 @@ static int dp83822_read_straps(struct phy_device *phydev)
return 0;
}
+static int dp83822_attach_mdi_port(struct phy_device *phydev,
+ struct phy_port *port)
+{
+ struct dp83822_private *dp83822 = phydev->priv;
+ int ret;
+
+ if (port->mediums) {
+ if (phy_port_is_fiber(port))
+ dp83822->fx_enabled = true;
+ } else {
+ ret = dp83822_read_straps(phydev);
+ if (ret)
+ return ret;
+
+#if IS_ENABLED(CONFIG_OF_MDIO)
+ if (dp83822->fx_enabled && dp83822->fx_sd_enable)
+ dp83822->fx_signal_det_low =
+ device_property_present(&phydev->mdio.dev,
+ "ti,link-loss-low");
+
+ /* ti,fiber-mode is still used for backwards compatibility, but
+ * has been replaced with the mdi node definition, see
+ * ethernet-port.yaml
+ */
+ if (!dp83822->fx_enabled)
+ dp83822->fx_enabled =
+ device_property_present(&phydev->mdio.dev,
+ "ti,fiber-mode");
+#endif /* CONFIG_OF_MDIO */
+
+ if (dp83822->fx_enabled) {
+ port->mediums = BIT(ETHTOOL_LINK_MEDIUM_BASEF);
+ } else {
+ /* This PHY can only to 100BaseTX max, so on 2 pairs */
+ port->pairs = 2;
+ port->mediums = BIT(ETHTOOL_LINK_MEDIUM_BASET);
+ }
+ }
+
+ return 0;
+}
+
static int dp8382x_probe(struct phy_device *phydev)
{
struct dp83822_private *dp83822;
@@ -968,27 +1000,13 @@ static int dp8382x_probe(struct phy_device *phydev)
static int dp83822_probe(struct phy_device *phydev)
{
- struct dp83822_private *dp83822;
int ret;
ret = dp8382x_probe(phydev);
if (ret)
return ret;
- dp83822 = phydev->priv;
-
- ret = dp83822_read_straps(phydev);
- if (ret)
- return ret;
-
- ret = dp83822_of_init(phydev);
- if (ret)
- return ret;
-
- if (dp83822->fx_enabled)
- phydev->port = PORT_FIBRE;
-
- return 0;
+ return dp83822_of_init(phydev);
}
static int dp83826_probe(struct phy_device *phydev)
@@ -1172,6 +1190,7 @@ static int dp83822_led_hw_control_get(struct phy_device *phydev, u8 index,
.led_hw_is_supported = dp83822_led_hw_is_supported, \
.led_hw_control_set = dp83822_led_hw_control_set, \
.led_hw_control_get = dp83822_led_hw_control_get, \
+ .attach_mdi_port = dp83822_attach_mdi_port \
}
#define DP83825_PHY_DRIVER(_id, _name) \
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 5f5de01c41e1..3fb2293f568f 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -75,6 +75,7 @@
#define MII_DP83867_MICR_JABBER_INT_EN BIT(0)
/* RGMIICTL bits */
+#define DP83867_RGMII_EN BIT(7)
#define DP83867_RGMII_TX_CLK_DELAY_EN BIT(1)
#define DP83867_RGMII_RX_CLK_DELAY_EN BIT(0)
@@ -100,7 +101,7 @@
#define DP83867_PHYCR_FIFO_DEPTH_MAX 0x03
#define DP83867_PHYCR_TX_FIFO_DEPTH_MASK GENMASK(15, 14)
#define DP83867_PHYCR_RX_FIFO_DEPTH_MASK GENMASK(13, 12)
-#define DP83867_PHYCR_RESERVED_MASK BIT(11)
+#define DP83867_PHYCR_SGMII_EN BIT(11)
#define DP83867_PHYCR_FORCE_LINK_GOOD BIT(10)
/* RGMIIDCTL bits */
@@ -744,53 +745,31 @@ static int dp83867_config_init(struct phy_device *phydev)
*/
phy_disable_eee(phydev);
- if (phy_interface_is_rgmii(phydev) ||
- phydev->interface == PHY_INTERFACE_MODE_SGMII) {
- val = phy_read(phydev, MII_DP83867_PHYCTRL);
- if (val < 0)
- return val;
+ val = phy_read(phydev, MII_DP83867_PHYCTRL);
+ if (val < 0)
+ return val;
- val &= ~DP83867_PHYCR_TX_FIFO_DEPTH_MASK;
- val |= (dp83867->tx_fifo_depth <<
- DP83867_PHYCR_TX_FIFO_DEPTH_SHIFT);
+ val &= ~DP83867_PHYCR_TX_FIFO_DEPTH_MASK;
+ val |= (dp83867->tx_fifo_depth <<
+ DP83867_PHYCR_TX_FIFO_DEPTH_SHIFT);
- if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
- val &= ~DP83867_PHYCR_RX_FIFO_DEPTH_MASK;
- val |= (dp83867->rx_fifo_depth <<
- DP83867_PHYCR_RX_FIFO_DEPTH_SHIFT);
- }
-
- ret = phy_write(phydev, MII_DP83867_PHYCTRL, val);
- if (ret)
- return ret;
+ val &= ~DP83867_PHYCR_SGMII_EN;
+ if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
+ val &= ~DP83867_PHYCR_RX_FIFO_DEPTH_MASK;
+ val |= (dp83867->rx_fifo_depth <<
+ DP83867_PHYCR_RX_FIFO_DEPTH_SHIFT) |
+ DP83867_PHYCR_SGMII_EN;
}
- if (phy_interface_is_rgmii(phydev)) {
- val = phy_read(phydev, MII_DP83867_PHYCTRL);
- if (val < 0)
- return val;
-
- /* The code below checks if "port mirroring" N/A MODE4 has been
- * enabled during power on bootstrap.
- *
- * Such N/A mode enabled by mistake can put PHY IC in some
- * internal testing mode and disable RGMII transmission.
- *
- * In this particular case one needs to check STRAP_STS1
- * register's bit 11 (marked as RESERVED).
- */
-
- bs = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_STRAP_STS1);
- if (bs & DP83867_STRAP_STS1_RESERVED)
- val &= ~DP83867_PHYCR_RESERVED_MASK;
-
- ret = phy_write(phydev, MII_DP83867_PHYCTRL, val);
- if (ret)
- return ret;
+ ret = phy_write(phydev, MII_DP83867_PHYCTRL, val);
+ if (ret)
+ return ret;
+ if (phy_interface_is_rgmii(phydev)) {
/* Set up RGMII delays */
val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL);
+ val |= DP83867_RGMII_EN;
val &= ~(DP83867_RGMII_TX_CLK_DELAY_EN | DP83867_RGMII_RX_CLK_DELAY_EN);
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
val |= (DP83867_RGMII_TX_CLK_DELAY_EN | DP83867_RGMII_RX_CLK_DELAY_EN);
@@ -806,6 +785,10 @@ static int dp83867_config_init(struct phy_device *phydev)
phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIIDCTL,
dp83867->rx_id_delay |
(dp83867->tx_id_delay << DP83867_RGMII_TX_CLK_DELAY_SHIFT));
+ } else {
+ val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL);
+ val &= ~DP83867_RGMII_EN;
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL, val);
}
/* If specified, set io impedance */
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index 50684271f81a..0b83fb30a548 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -10,39 +10,34 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/list.h>
#include <linux/mii.h>
#include <linux/phy.h>
#include <linux/phy_fixed.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/idr.h>
#include <linux/netdevice.h>
#include "swphy.h"
+/* The DSA loop driver may allocate 4 fixed PHY's, and 4 additional
+ * fixed PHY's for a system should be sufficient.
+ */
+#define NUM_FP 8
+
struct fixed_phy {
- int addr;
struct phy_device *phydev;
struct fixed_phy_status status;
int (*link_update)(struct net_device *, struct fixed_phy_status *);
- struct list_head node;
};
+static DECLARE_BITMAP(fixed_phy_ids, NUM_FP);
+static struct fixed_phy fmb_fixed_phys[NUM_FP];
static struct mii_bus *fmb_mii_bus;
-static LIST_HEAD(fmb_phys);
static struct fixed_phy *fixed_phy_find(int addr)
{
- struct fixed_phy *fp;
-
- list_for_each_entry(fp, &fmb_phys, node) {
- if (fp->addr == addr)
- return fp;
- }
-
- return NULL;
+ return test_bit(addr, fixed_phy_ids) ? fmb_fixed_phys + addr : NULL;
}
int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier)
@@ -108,42 +103,29 @@ int fixed_phy_set_link_update(struct phy_device *phydev,
}
EXPORT_SYMBOL_GPL(fixed_phy_set_link_update);
-static int __fixed_phy_add(int phy_addr,
- const struct fixed_phy_status *status)
+static void fixed_phy_del(int phy_addr)
{
struct fixed_phy *fp;
- int ret;
- ret = swphy_validate_state(status);
- if (ret < 0)
- return ret;
-
- fp = kzalloc(sizeof(*fp), GFP_KERNEL);
+ fp = fixed_phy_find(phy_addr);
if (!fp)
- return -ENOMEM;
-
- fp->addr = phy_addr;
- fp->status = *status;
- fp->status.link = true;
-
- list_add_tail(&fp->node, &fmb_phys);
+ return;
- return 0;
+ memset(fp, 0, sizeof(*fp));
+ clear_bit(phy_addr, fixed_phy_ids);
}
-static DEFINE_IDA(phy_fixed_ida);
-
-static void fixed_phy_del(int phy_addr)
+static int fixed_phy_get_free_addr(void)
{
- struct fixed_phy *fp;
+ int addr;
- fp = fixed_phy_find(phy_addr);
- if (!fp)
- return;
+ do {
+ addr = find_first_zero_bit(fixed_phy_ids, NUM_FP);
+ if (addr == NUM_FP)
+ return -ENOSPC;
+ } while (test_and_set_bit(addr, fixed_phy_ids));
- list_del(&fp->node);
- kfree(fp);
- ida_free(&phy_fixed_ida, phy_addr);
+ return addr;
}
struct phy_device *fixed_phy_register(const struct fixed_phy_status *status,
@@ -153,19 +135,20 @@ struct phy_device *fixed_phy_register(const struct fixed_phy_status *status,
int phy_addr;
int ret;
+ ret = swphy_validate_state(status);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
if (!fmb_mii_bus || fmb_mii_bus->state != MDIOBUS_REGISTERED)
return ERR_PTR(-EPROBE_DEFER);
- /* Get the next available PHY address, up to PHY_MAX_ADDR */
- phy_addr = ida_alloc_max(&phy_fixed_ida, PHY_MAX_ADDR - 1, GFP_KERNEL);
+ /* Get the next available PHY address, up to NUM_FP */
+ phy_addr = fixed_phy_get_free_addr();
if (phy_addr < 0)
return ERR_PTR(phy_addr);
- ret = __fixed_phy_add(phy_addr, status);
- if (ret < 0) {
- ida_free(&phy_fixed_ida, phy_addr);
- return ERR_PTR(ret);
- }
+ fmb_fixed_phys[phy_addr].status = *status;
+ fmb_fixed_phys[phy_addr].status.link = true;
phy = get_phy_device(fmb_mii_bus, phy_addr, false);
if (IS_ERR(phy)) {
@@ -237,16 +220,8 @@ module_init(fixed_mdio_bus_init);
static void __exit fixed_mdio_bus_exit(void)
{
- struct fixed_phy *fp, *tmp;
-
mdiobus_unregister(fmb_mii_bus);
mdiobus_free(fmb_mii_bus);
-
- list_for_each_entry_safe(fp, tmp, &fmb_phys, node) {
- list_del(&fp->node);
- kfree(fp);
- }
- ida_destroy(&phy_fixed_ida);
}
module_exit(fixed_mdio_bus_exit);
diff --git a/drivers/net/phy/marvell-88x2222.c b/drivers/net/phy/marvell-88x2222.c
index 894bcee61e65..ba1bbb6c63d6 100644
--- a/drivers/net/phy/marvell-88x2222.c
+++ b/drivers/net/phy/marvell-88x2222.c
@@ -13,7 +13,7 @@
#include <linux/mdio.h>
#include <linux/marvell_phy.h>
#include <linux/of.h>
-#include <linux/sfp.h>
+#include <linux/phy_port.h>
#include <linux/netdevice.h>
/* Port PCS Configuration */
@@ -473,89 +473,70 @@ static int mv2222_config_init(struct phy_device *phydev)
return 0;
}
-static int mv2222_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
+static int mv2222_configure_serdes(struct phy_port *port, bool enable,
+ phy_interface_t interface)
{
- struct phy_device *phydev = upstream;
- const struct sfp_module_caps *caps;
- phy_interface_t sfp_interface;
+ struct phy_device *phydev = port_phydev(port);
struct mv2222_data *priv;
- struct device *dev;
- int ret;
+ int ret = 0;
priv = phydev->priv;
- dev = &phydev->mdio.dev;
-
- caps = sfp_get_module_caps(phydev->sfp_bus);
-
- phydev->port = caps->port;
- sfp_interface = sfp_select_interface(phydev->sfp_bus, caps->link_modes);
-
- dev_info(dev, "%s SFP module inserted\n", phy_modes(sfp_interface));
+ priv->line_interface = interface;
- if (sfp_interface != PHY_INTERFACE_MODE_10GBASER &&
- sfp_interface != PHY_INTERFACE_MODE_1000BASEX &&
- sfp_interface != PHY_INTERFACE_MODE_SGMII) {
- dev_err(dev, "Incompatible SFP module inserted\n");
+ if (enable) {
+ linkmode_and(priv->supported, phydev->supported, port->supported);
- return -EINVAL;
- }
-
- priv->line_interface = sfp_interface;
- linkmode_and(priv->supported, phydev->supported, caps->link_modes);
+ ret = mv2222_config_line(phydev);
+ if (ret < 0)
+ return ret;
- ret = mv2222_config_line(phydev);
- if (ret < 0)
- return ret;
+ if (mutex_trylock(&phydev->lock)) {
+ ret = mv2222_config_aneg(phydev);
+ mutex_unlock(&phydev->lock);
+ }
- if (mutex_trylock(&phydev->lock)) {
- ret = mv2222_config_aneg(phydev);
- mutex_unlock(&phydev->lock);
+ } else {
+ linkmode_zero(priv->supported);
}
return ret;
}
-static void mv2222_sfp_remove(void *upstream)
+static void mv2222_port_link_up(struct phy_port *port)
{
- struct phy_device *phydev = upstream;
- struct mv2222_data *priv;
-
- priv = phydev->priv;
-
- priv->line_interface = PHY_INTERFACE_MODE_NA;
- linkmode_zero(priv->supported);
- phydev->port = PORT_NONE;
-}
-
-static void mv2222_sfp_link_up(void *upstream)
-{
- struct phy_device *phydev = upstream;
+ struct phy_device *phydev = port_phydev(port);
struct mv2222_data *priv;
priv = phydev->priv;
priv->sfp_link = true;
}
-static void mv2222_sfp_link_down(void *upstream)
+static void mv2222_port_link_down(struct phy_port *port)
{
- struct phy_device *phydev = upstream;
+ struct phy_device *phydev = port_phydev(port);
struct mv2222_data *priv;
priv = phydev->priv;
priv->sfp_link = false;
}
-static const struct sfp_upstream_ops sfp_phy_ops = {
- .module_insert = mv2222_sfp_insert,
- .module_remove = mv2222_sfp_remove,
- .link_up = mv2222_sfp_link_up,
- .link_down = mv2222_sfp_link_down,
- .attach = phy_sfp_attach,
- .detach = phy_sfp_detach,
- .connect_phy = phy_sfp_connect_phy,
- .disconnect_phy = phy_sfp_disconnect_phy,
+static const struct phy_port_ops mv2222_port_ops = {
+ .link_up = mv2222_port_link_up,
+ .link_down = mv2222_port_link_down,
+ .configure_mii = mv2222_configure_serdes,
};
+static int mv2222_attach_mii_port(struct phy_device *phydev, struct phy_port *port)
+{
+ port->ops = &mv2222_port_ops;
+
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, port->interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, port->interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII, port->interfaces);
+
+ return 0;
+}
+
static int mv2222_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
@@ -591,7 +572,7 @@ static int mv2222_probe(struct phy_device *phydev)
priv->line_interface = PHY_INTERFACE_MODE_NA;
phydev->priv = priv;
- return phy_sfp_probe(phydev, &sfp_phy_ops);
+ return 0;
}
static struct phy_driver mv2222_drivers[] = {
@@ -608,6 +589,7 @@ static struct phy_driver mv2222_drivers[] = {
.suspend = mv2222_suspend,
.resume = mv2222_resume,
.read_status = mv2222_read_status,
+ .attach_mii_port = mv2222_attach_mii_port,
},
};
module_phy_driver(mv2222_drivers);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index c248c90510ae..7a578b5aa2ed 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -29,10 +29,10 @@
#include <linux/ethtool.h>
#include <linux/ethtool_netlink.h>
#include <linux/phy.h>
+#include <linux/phy_port.h>
#include <linux/marvell_phy.h>
#include <linux/bitfield.h>
#include <linux/of.h>
-#include <linux/sfp.h>
#include <linux/io.h>
#include <asm/irq.h>
@@ -3598,11 +3598,10 @@ static int marvell_probe(struct phy_device *phydev)
return marvell_hwmon_probe(phydev);
}
-static int m88e1510_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
+static int m88e1510_port_configure_serdes(struct phy_port *port, bool enable,
+ phy_interface_t interface)
{
- struct phy_device *phydev = upstream;
- const struct sfp_module_caps *caps;
- phy_interface_t interface;
+ struct phy_device *phydev = port_phydev(port);
struct device *dev;
int oldpage;
int ret = 0;
@@ -3610,28 +3609,27 @@ static int m88e1510_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
dev = &phydev->mdio.dev;
- caps = sfp_get_module_caps(phydev->sfp_bus);
- interface = sfp_select_interface(phydev->sfp_bus, caps->link_modes);
+ if (enable) {
+ switch (interface) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ mode = MII_88E1510_GEN_CTRL_REG_1_MODE_RGMII_1000X;
- dev_info(dev, "%s SFP module inserted\n", phy_modes(interface));
+ break;
+ case PHY_INTERFACE_MODE_100BASEX:
+ mode = MII_88E1510_GEN_CTRL_REG_1_MODE_RGMII_100FX;
- switch (interface) {
- case PHY_INTERFACE_MODE_1000BASEX:
- mode = MII_88E1510_GEN_CTRL_REG_1_MODE_RGMII_1000X;
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ mode = MII_88E1510_GEN_CTRL_REG_1_MODE_RGMII_SGMII;
- break;
- case PHY_INTERFACE_MODE_100BASEX:
- mode = MII_88E1510_GEN_CTRL_REG_1_MODE_RGMII_100FX;
-
- break;
- case PHY_INTERFACE_MODE_SGMII:
- mode = MII_88E1510_GEN_CTRL_REG_1_MODE_RGMII_SGMII;
+ break;
+ default:
+ dev_err(dev, "Incompatible SFP module inserted\n");
- break;
- default:
- dev_err(dev, "Incompatible SFP module inserted\n");
-
- return -EINVAL;
+ return -EINVAL;
+ }
+ } else {
+ mode = MII_88E1510_GEN_CTRL_REG_1_MODE_RGMII;
}
oldpage = phy_select_page(phydev, MII_MARVELL_MODE_PAGE);
@@ -3650,47 +3648,20 @@ error:
return phy_restore_page(phydev, oldpage, ret);
}
-static void m88e1510_sfp_remove(void *upstream)
-{
- struct phy_device *phydev = upstream;
- int oldpage;
- int ret = 0;
-
- oldpage = phy_select_page(phydev, MII_MARVELL_MODE_PAGE);
- if (oldpage < 0)
- goto error;
-
- ret = __phy_modify(phydev, MII_88E1510_GEN_CTRL_REG_1,
- MII_88E1510_GEN_CTRL_REG_1_MODE_MASK,
- MII_88E1510_GEN_CTRL_REG_1_MODE_RGMII);
- if (ret < 0)
- goto error;
-
- ret = __phy_set_bits(phydev, MII_88E1510_GEN_CTRL_REG_1,
- MII_88E1510_GEN_CTRL_REG_1_RESET);
-
-error:
- phy_restore_page(phydev, oldpage, ret);
-}
-
-static const struct sfp_upstream_ops m88e1510_sfp_ops = {
- .module_insert = m88e1510_sfp_insert,
- .module_remove = m88e1510_sfp_remove,
- .attach = phy_sfp_attach,
- .detach = phy_sfp_detach,
- .connect_phy = phy_sfp_connect_phy,
- .disconnect_phy = phy_sfp_disconnect_phy,
+static const struct phy_port_ops m88e1510_serdes_port_ops = {
+ .configure_mii = m88e1510_port_configure_serdes,
};
-static int m88e1510_probe(struct phy_device *phydev)
+static int m88e1510_attach_mii_port(struct phy_device *phy_device,
+ struct phy_port *port)
{
- int err;
+ port->ops = &m88e1510_serdes_port_ops;
- err = marvell_probe(phydev);
- if (err)
- return err;
+ __set_bit(PHY_INTERFACE_MODE_SGMII, port->interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, port->interfaces);
+ __set_bit(PHY_INTERFACE_MODE_100BASEX, port->interfaces);
- return phy_sfp_probe(phydev, &m88e1510_sfp_ops);
+ return 0;
}
static struct phy_driver marvell_drivers[] = {
@@ -3950,7 +3921,7 @@ static struct phy_driver marvell_drivers[] = {
.driver_data = DEF_MARVELL_HWMON_OPS(m88e1510_hwmon_ops),
.features = PHY_GBIT_FIBRE_FEATURES,
.flags = PHY_POLL_CABLE_TEST,
- .probe = m88e1510_probe,
+ .probe = marvell_probe,
.config_init = m88e1510_config_init,
.config_aneg = m88e1510_config_aneg,
.read_status = marvell_read_status,
@@ -3976,6 +3947,7 @@ static struct phy_driver marvell_drivers[] = {
.led_hw_is_supported = m88e1318_led_hw_is_supported,
.led_hw_control_set = m88e1318_led_hw_control_set,
.led_hw_control_get = m88e1318_led_hw_control_get,
+ .attach_mii_port = m88e1510_attach_mii_port,
},
{
.phy_id = MARVELL_PHY_ID_88E1540,
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 8fd42131cdbf..b40df82152cd 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -28,7 +28,7 @@
#include <linux/hwmon.h>
#include <linux/marvell_phy.h>
#include <linux/phy.h>
-#include <linux/sfp.h>
+#include <linux/phy_port.h>
#include <linux/netdevice.h>
#define MV_PHY_ALASKA_NBT_QUIRK_MASK 0xfffffffe
@@ -463,30 +463,29 @@ static int mv3310_set_edpd(struct phy_device *phydev, u16 edpd)
return err;
}
-static int mv3310_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
+static int mv3310_attach_mii_port(struct phy_device *phydev,
+ struct phy_port *port)
{
- struct phy_device *phydev = upstream;
- const struct sfp_module_caps *caps;
- phy_interface_t iface;
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, port->interfaces);
+ return 0;
+}
- caps = sfp_get_module_caps(phydev->sfp_bus);
- iface = sfp_select_interface(phydev->sfp_bus, caps->link_modes);
+static int mv3310_attach_mdi_port(struct phy_device *phydev,
+ struct phy_port *port)
+{
+ /* This PHY can do combo-ports, i.e. 2 MDI outputs, usually one
+ * of them going to an SFP and the other one to a RJ45
+ * connector. If we don't have any representation for the port
+ * in DT, and we are dealing with a non-SFP port, then we
+ * mask the port's capabilities to report BaseT-only modes
+ */
+ if (port->not_described)
+ return phy_port_restrict_mediums(port,
+ BIT(ETHTOOL_LINK_MEDIUM_BASET));
- if (iface != PHY_INTERFACE_MODE_10GBASER) {
- dev_err(&phydev->mdio.dev, "incompatible SFP module inserted\n");
- return -EINVAL;
- }
return 0;
}
-static const struct sfp_upstream_ops mv3310_sfp_ops = {
- .attach = phy_sfp_attach,
- .detach = phy_sfp_detach,
- .connect_phy = phy_sfp_connect_phy,
- .disconnect_phy = phy_sfp_disconnect_phy,
- .module_insert = mv3310_sfp_insert,
-};
-
static int mv3310_probe(struct phy_device *phydev)
{
const struct mv3310_chip *chip = to_mv3310_chip(phydev);
@@ -544,7 +543,9 @@ static int mv3310_probe(struct phy_device *phydev)
chip->init_supported_interfaces(priv->supported_interfaces);
- return phy_sfp_probe(phydev, &mv3310_sfp_ops);
+ phydev->max_n_ports = 2;
+
+ return 0;
}
static void mv3310_remove(struct phy_device *phydev)
@@ -1405,6 +1406,8 @@ static struct phy_driver mv3310_drivers[] = {
.set_loopback = genphy_c45_loopback,
.get_wol = mv3110_get_wol,
.set_wol = mv3110_set_wol,
+ .attach_mii_port = mv3310_attach_mii_port,
+ .attach_mdi_port = mv3310_attach_mdi_port,
},
{
.phy_id = MARVELL_PHY_ID_88X3310,
@@ -1424,6 +1427,8 @@ static struct phy_driver mv3310_drivers[] = {
.set_tunable = mv3310_set_tunable,
.remove = mv3310_remove,
.set_loopback = genphy_c45_loopback,
+ .attach_mii_port = mv3310_attach_mii_port,
+ .attach_mdi_port = mv3310_attach_mdi_port,
},
{
.phy_id = MARVELL_PHY_ID_88E2110,
@@ -1444,6 +1449,8 @@ static struct phy_driver mv3310_drivers[] = {
.set_loopback = genphy_c45_loopback,
.get_wol = mv3110_get_wol,
.set_wol = mv3110_set_wol,
+ .attach_mii_port = mv3310_attach_mii_port,
+ .attach_mdi_port = mv3310_attach_mdi_port,
},
{
.phy_id = MARVELL_PHY_ID_88E2110,
@@ -1462,6 +1469,8 @@ static struct phy_driver mv3310_drivers[] = {
.set_tunable = mv3310_set_tunable,
.remove = mv3310_remove,
.set_loopback = genphy_c45_loopback,
+ .attach_mii_port = mv3310_attach_mii_port,
+ .attach_mdi_port = mv3310_attach_mdi_port,
},
};
diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c
index 6e90ed42cd98..65636070a222 100644
--- a/drivers/net/phy/mdio_device.c
+++ b/drivers/net/phy/mdio_device.c
@@ -36,18 +36,6 @@ static void mdio_device_release(struct device *dev)
kfree(to_mdio_device(dev));
}
-static int mdio_device_bus_match(struct device *dev,
- const struct device_driver *drv)
-{
- struct mdio_device *mdiodev = to_mdio_device(dev);
- const struct mdio_driver *mdiodrv = to_mdio_driver(drv);
-
- if (mdiodrv->mdiodrv.flags & MDIO_DEVICE_IS_PHY)
- return 0;
-
- return strcmp(mdiodev->modalias, drv->name) == 0;
-}
-
struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr)
{
struct mdio_device *mdiodev;
@@ -60,7 +48,6 @@ struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr)
mdiodev->dev.release = mdio_device_release;
mdiodev->dev.parent = &bus->dev;
mdiodev->dev.bus = &mdio_bus_type;
- mdiodev->bus_match = mdio_device_bus_match;
mdiodev->device_free = mdio_device_free;
mdiodev->device_remove = mdio_device_remove;
mdiodev->bus = bus;
diff --git a/drivers/net/phy/mediatek/mtk-ge-soc.c b/drivers/net/phy/mediatek/mtk-ge-soc.c
index 2c4bbc236202..9a54949644d5 100644
--- a/drivers/net/phy/mediatek/mtk-ge-soc.c
+++ b/drivers/net/phy/mediatek/mtk-ge-soc.c
@@ -1508,6 +1508,8 @@ static struct phy_driver mtk_socphy_driver[] = {
{
PHY_ID_MATCH_EXACT(MTK_GPHY_ID_AN7581),
.name = "Airoha AN7581 PHY",
+ .config_intr = genphy_no_config_intr,
+ .handle_interrupt = genphy_handle_interrupt_no_ack,
.probe = an7581_phy_probe,
.led_blink_set = mt798x_phy_led_blink_set,
.led_brightness_set = mt798x_phy_led_brightness_set,
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 8208ecbb575c..663dcdc92204 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -101,6 +101,14 @@
#define LAN8814_CABLE_DIAG_VCT_DATA_MASK GENMASK(7, 0)
#define LAN8814_PAIR_BIT_SHIFT 12
+/* KSZ9x31 remote loopback register */
+#define KSZ9x31_REMOTE_LOOPBACK 0x11
+/* This is an undocumented bit of the KSZ9131RNX.
+ * It was reported by NXP in cooperation with Micrel.
+ */
+#define KSZ9x31_REMOTE_LOOPBACK_KEEP_PREAMBLE BIT(2)
+#define KSZ9x31_REMOTE_LOOPBACK_EN BIT(8)
+
#define LAN8814_SKUS 0xB
#define LAN8814_WIRE_PAIR_MASK 0xF
@@ -1500,7 +1508,11 @@ static int ksz9131_config_init(struct phy_device *phydev)
if (ret < 0)
return ret;
- return 0;
+ if (phydev->dev_flags & PHY_F_KEEP_PREAMBLE_BEFORE_SFD)
+ ret = phy_modify(phydev, KSZ9x31_REMOTE_LOOPBACK, 0,
+ KSZ9x31_REMOTE_LOOPBACK_KEEP_PREAMBLE);
+
+ return ret;
}
#define MII_KSZ9131_AUTO_MDIX 0x1C
@@ -3156,6 +3168,18 @@ static void lan8814_flush_fifo(struct phy_device *phydev, bool egress)
lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS, PTP_TSU_INT_STS);
}
+static int lan8814_hwtstamp_get(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *config)
+{
+ struct kszphy_ptp_priv *ptp_priv =
+ container_of(mii_ts, struct kszphy_ptp_priv, mii_ts);
+
+ config->tx_type = ptp_priv->hwts_tx_type;
+ config->rx_filter = ptp_priv->rx_filter;
+
+ return 0;
+}
+
static int lan8814_hwtstamp_set(struct mii_timestamper *mii_ts,
struct kernel_hwtstamp_config *config,
struct netlink_ext_ack *extack)
@@ -3166,9 +3190,6 @@ static int lan8814_hwtstamp_set(struct mii_timestamper *mii_ts,
int txcfg = 0, rxcfg = 0;
int pkt_ts_enable;
- ptp_priv->hwts_tx_type = config->tx_type;
- ptp_priv->rx_filter = config->rx_filter;
-
switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
ptp_priv->layer = 0;
@@ -3196,6 +3217,18 @@ static int lan8814_hwtstamp_set(struct mii_timestamper *mii_ts,
return -ERANGE;
}
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ ptp_priv->hwts_tx_type = config->tx_type;
+ ptp_priv->rx_filter = config->rx_filter;
+
if (ptp_priv->layer & PTP_CLASS_L2) {
rxcfg = PTP_RX_PARSE_CONFIG_LAYER2_EN_;
txcfg = PTP_TX_PARSE_CONFIG_LAYER2_EN_;
@@ -4399,6 +4432,7 @@ static void lan8814_ptp_init(struct phy_device *phydev)
ptp_priv->mii_ts.rxtstamp = lan8814_rxtstamp;
ptp_priv->mii_ts.txtstamp = lan8814_txtstamp;
ptp_priv->mii_ts.hwtstamp_set = lan8814_hwtstamp_set;
+ ptp_priv->mii_ts.hwtstamp_get = lan8814_hwtstamp_get;
ptp_priv->mii_ts.ts_info = lan8814_ts_info;
phydev->mii_ts = &ptp_priv->mii_ts;
@@ -5060,9 +5094,6 @@ static int lan8841_hwtstamp_set(struct mii_timestamper *mii_ts,
int txcfg = 0, rxcfg = 0;
int pkt_ts_enable;
- ptp_priv->hwts_tx_type = config->tx_type;
- ptp_priv->rx_filter = config->rx_filter;
-
switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
ptp_priv->layer = 0;
@@ -5090,6 +5121,18 @@ static int lan8841_hwtstamp_set(struct mii_timestamper *mii_ts,
return -ERANGE;
}
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ ptp_priv->hwts_tx_type = config->tx_type;
+ ptp_priv->rx_filter = config->rx_filter;
+
/* Setup parsing of the frames and enable the timestamping for ptp
* frames
*/
@@ -5934,6 +5977,7 @@ static int lan8841_probe(struct phy_device *phydev)
ptp_priv->mii_ts.rxtstamp = lan8841_rxtstamp;
ptp_priv->mii_ts.txtstamp = lan8814_txtstamp;
ptp_priv->mii_ts.hwtstamp_set = lan8841_hwtstamp_set;
+ ptp_priv->mii_ts.hwtstamp_get = lan8814_hwtstamp_get;
ptp_priv->mii_ts.ts_info = lan8841_ts_info;
phydev->mii_ts = &ptp_priv->mii_ts;
diff --git a/drivers/net/phy/microchip_rds_ptp.c b/drivers/net/phy/microchip_rds_ptp.c
index 4c6326b0ceaf..f5f2928e705f 100644
--- a/drivers/net/phy/microchip_rds_ptp.c
+++ b/drivers/net/phy/microchip_rds_ptp.c
@@ -476,6 +476,18 @@ static bool mchp_rds_ptp_rxtstamp(struct mii_timestamper *mii_ts,
return true;
}
+static int mchp_rds_ptp_hwtstamp_get(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *config)
+{
+ struct mchp_rds_ptp_clock *clock =
+ container_of(mii_ts, struct mchp_rds_ptp_clock,
+ mii_ts);
+ config->tx_type = clock->hwts_tx_type;
+ config->rx_filter = clock->rx_filter;
+
+ return 0;
+}
+
static int mchp_rds_ptp_hwtstamp_set(struct mii_timestamper *mii_ts,
struct kernel_hwtstamp_config *config,
struct netlink_ext_ack *extack)
@@ -488,9 +500,6 @@ static int mchp_rds_ptp_hwtstamp_set(struct mii_timestamper *mii_ts,
unsigned long flags;
int rc;
- clock->hwts_tx_type = config->tx_type;
- clock->rx_filter = config->rx_filter;
-
switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
clock->layer = 0;
@@ -518,6 +527,15 @@ static int mchp_rds_ptp_hwtstamp_set(struct mii_timestamper *mii_ts,
return -ERANGE;
}
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ case HWTSTAMP_TX_ON:
+ case HWTSTAMP_TX_OFF:
+ break;
+ default:
+ return -ERANGE;
+ }
+
/* Setup parsing of the frames and enable the timestamping for ptp
* frames
*/
@@ -553,7 +571,7 @@ static int mchp_rds_ptp_hwtstamp_set(struct mii_timestamper *mii_ts,
if (rc < 0)
return rc;
- if (clock->hwts_tx_type == HWTSTAMP_TX_ONESTEP_SYNC)
+ if (config->tx_type == HWTSTAMP_TX_ONESTEP_SYNC)
/* Enable / disable of the TX timestamp in the SYNC frames */
rc = mchp_rds_phy_modify_mmd(clock, MCHP_RDS_PTP_TX_MOD,
MCHP_RDS_PTP_PORT,
@@ -587,8 +605,13 @@ static int mchp_rds_ptp_hwtstamp_set(struct mii_timestamper *mii_ts,
/* Now enable the timestamping interrupts */
rc = mchp_rds_ptp_config_intr(clock,
config->rx_filter != HWTSTAMP_FILTER_NONE);
+ if (rc < 0)
+ return rc;
+
+ clock->hwts_tx_type = config->tx_type;
+ clock->rx_filter = config->rx_filter;
- return rc < 0 ? rc : 0;
+ return 0;
}
static int mchp_rds_ptp_ts_info(struct mii_timestamper *mii_ts,
@@ -1282,6 +1305,7 @@ struct mchp_rds_ptp_clock *mchp_rds_ptp_probe(struct phy_device *phydev, u8 mmd,
clock->mii_ts.rxtstamp = mchp_rds_ptp_rxtstamp;
clock->mii_ts.txtstamp = mchp_rds_ptp_txtstamp;
clock->mii_ts.hwtstamp_set = mchp_rds_ptp_hwtstamp_set;
+ clock->mii_ts.hwtstamp_get = mchp_rds_ptp_hwtstamp_get;
clock->mii_ts.ts_info = mchp_rds_ptp_ts_info;
phydev->mii_ts = &clock->mii_ts;
diff --git a/drivers/net/phy/motorcomm.c b/drivers/net/phy/motorcomm.c
index 42d46b5758fc..4d62f7b36212 100644
--- a/drivers/net/phy/motorcomm.c
+++ b/drivers/net/phy/motorcomm.c
@@ -910,6 +910,10 @@ static int ytphy_rgmii_clk_delay_config(struct phy_device *phydev)
val |= FIELD_PREP(YT8521_RC1R_RX_DELAY_MASK, rx_reg) |
FIELD_PREP(YT8521_RC1R_GE_TX_DELAY_MASK, tx_reg);
break;
+ case PHY_INTERFACE_MODE_GMII:
+ if (phydev->drv->phy_id != PHY_ID_YT8531S)
+ return -EOPNOTSUPP;
+ return 0;
default: /* do not support other modes */
return -EOPNOTSUPP;
}
diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c
index 8e2fd6b942b6..5f99766fb64c 100644
--- a/drivers/net/phy/mxl-gpy.c
+++ b/drivers/net/phy/mxl-gpy.c
@@ -603,20 +603,6 @@ static int gpy_update_interface(struct phy_device *phydev)
case SPEED_100:
case SPEED_10:
phydev->interface = PHY_INTERFACE_MODE_SGMII;
- if (gpy_sgmii_aneg_en(phydev))
- break;
- /* Enable and restart SGMII ANEG for 10/100/1000Mbps link speed
- * if ANEG is disabled (in 2500-BaseX mode).
- */
- ret = phy_modify_mmd(phydev, MDIO_MMD_VEND1, VSPEC1_SGMII_CTRL,
- VSPEC1_SGMII_ANEN_ANRS,
- VSPEC1_SGMII_ANEN_ANRS);
- if (ret < 0) {
- phydev_err(phydev,
- "Error: Enable of SGMII ANEG failed: %d\n",
- ret);
- return ret;
- }
break;
}
@@ -1060,6 +1046,27 @@ static int gpy_led_polarity_set(struct phy_device *phydev, int index,
return -EINVAL;
}
+static unsigned int gpy_inband_caps(struct phy_device *phydev,
+ phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return LINK_INBAND_DISABLE;
+ default:
+ return 0;
+ }
+}
+
+static int gpy_config_inband(struct phy_device *phydev, unsigned int modes)
+{
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND1, VSPEC1_SGMII_CTRL,
+ VSPEC1_SGMII_ANEN_ANRS,
+ (modes == LINK_INBAND_DISABLE) ? 0 :
+ VSPEC1_SGMII_ANEN_ANRS);
+}
+
static struct phy_driver gpy_drivers[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_GPY2xx),
@@ -1067,6 +1074,8 @@ static struct phy_driver gpy_drivers[] = {
.get_features = genphy_c45_pma_read_abilities,
.config_init = gpy_config_init,
.probe = gpy_probe,
+ .inband_caps = gpy_inband_caps,
+ .config_inband = gpy_config_inband,
.suspend = genphy_suspend,
.resume = genphy_resume,
.config_aneg = gpy_config_aneg,
@@ -1090,6 +1099,8 @@ static struct phy_driver gpy_drivers[] = {
.get_features = genphy_c45_pma_read_abilities,
.config_init = gpy_config_init,
.probe = gpy_probe,
+ .inband_caps = gpy_inband_caps,
+ .config_inband = gpy_config_inband,
.suspend = genphy_suspend,
.resume = genphy_resume,
.config_aneg = gpy_config_aneg,
@@ -1112,6 +1123,8 @@ static struct phy_driver gpy_drivers[] = {
.get_features = genphy_c45_pma_read_abilities,
.config_init = gpy_config_init,
.probe = gpy_probe,
+ .inband_caps = gpy_inband_caps,
+ .config_inband = gpy_config_inband,
.suspend = genphy_suspend,
.resume = genphy_resume,
.config_aneg = gpy_config_aneg,
@@ -1135,6 +1148,8 @@ static struct phy_driver gpy_drivers[] = {
.get_features = genphy_c45_pma_read_abilities,
.config_init = gpy21x_config_init,
.probe = gpy_probe,
+ .inband_caps = gpy_inband_caps,
+ .config_inband = gpy_config_inband,
.suspend = genphy_suspend,
.resume = genphy_resume,
.config_aneg = gpy_config_aneg,
@@ -1157,6 +1172,8 @@ static struct phy_driver gpy_drivers[] = {
.get_features = genphy_c45_pma_read_abilities,
.config_init = gpy21x_config_init,
.probe = gpy_probe,
+ .inband_caps = gpy_inband_caps,
+ .config_inband = gpy_config_inband,
.suspend = genphy_suspend,
.resume = genphy_resume,
.config_aneg = gpy_config_aneg,
@@ -1179,6 +1196,8 @@ static struct phy_driver gpy_drivers[] = {
.name = "Maxlinear Ethernet GPY212B",
.get_features = genphy_c45_pma_read_abilities,
.config_init = gpy21x_config_init,
+ .inband_caps = gpy_inband_caps,
+ .config_inband = gpy_config_inband,
.probe = gpy_probe,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -1202,6 +1221,8 @@ static struct phy_driver gpy_drivers[] = {
.get_features = genphy_c45_pma_read_abilities,
.config_init = gpy21x_config_init,
.probe = gpy_probe,
+ .inband_caps = gpy_inband_caps,
+ .config_inband = gpy_config_inband,
.suspend = genphy_suspend,
.resume = genphy_resume,
.config_aneg = gpy_config_aneg,
@@ -1225,6 +1246,8 @@ static struct phy_driver gpy_drivers[] = {
.get_features = genphy_c45_pma_read_abilities,
.config_init = gpy21x_config_init,
.probe = gpy_probe,
+ .inband_caps = gpy_inband_caps,
+ .config_inband = gpy_config_inband,
.suspend = genphy_suspend,
.resume = genphy_resume,
.config_aneg = gpy_config_aneg,
@@ -1247,6 +1270,8 @@ static struct phy_driver gpy_drivers[] = {
.get_features = genphy_c45_pma_read_abilities,
.config_init = gpy21x_config_init,
.probe = gpy_probe,
+ .inband_caps = gpy_inband_caps,
+ .config_inband = gpy_config_inband,
.suspend = genphy_suspend,
.resume = genphy_resume,
.config_aneg = gpy_config_aneg,
@@ -1269,6 +1294,8 @@ static struct phy_driver gpy_drivers[] = {
.get_features = genphy_c45_pma_read_abilities,
.config_init = gpy_config_init,
.probe = gpy_probe,
+ .inband_caps = gpy_inband_caps,
+ .config_inband = gpy_config_inband,
.suspend = genphy_suspend,
.resume = genphy_resume,
.config_aneg = gpy_config_aneg,
@@ -1286,6 +1313,8 @@ static struct phy_driver gpy_drivers[] = {
.get_features = genphy_c45_pma_read_abilities,
.config_init = gpy_config_init,
.probe = gpy_probe,
+ .inband_caps = gpy_inband_caps,
+ .config_inband = gpy_config_inband,
.suspend = genphy_suspend,
.resume = genphy_resume,
.config_aneg = gpy_config_aneg,
@@ -1303,6 +1332,8 @@ static struct phy_driver gpy_drivers[] = {
.get_features = genphy_c45_pma_read_abilities,
.config_init = gpy_config_init,
.probe = gpy_probe,
+ .inband_caps = gpy_inband_caps,
+ .config_inband = gpy_config_inband,
.suspend = genphy_suspend,
.resume = genphy_resume,
.config_aneg = gpy_config_aneg,
@@ -1320,6 +1351,8 @@ static struct phy_driver gpy_drivers[] = {
.get_features = genphy_c45_pma_read_abilities,
.config_init = gpy_config_init,
.probe = gpy_probe,
+ .inband_caps = gpy_inband_caps,
+ .config_inband = gpy_config_inband,
.suspend = genphy_suspend,
.resume = genphy_resume,
.config_aneg = gpy_config_aneg,
diff --git a/drivers/net/phy/phy-caps.h b/drivers/net/phy/phy-caps.h
index 4951a39f3828..421088e6f6e8 100644
--- a/drivers/net/phy/phy-caps.h
+++ b/drivers/net/phy/phy-caps.h
@@ -25,6 +25,7 @@ enum {
LINK_CAPA_40000FD,
LINK_CAPA_50000FD,
LINK_CAPA_56000FD,
+ LINK_CAPA_80000FD,
LINK_CAPA_100000FD,
LINK_CAPA_200000FD,
LINK_CAPA_400000FD,
@@ -61,4 +62,9 @@ const struct link_capabilities *
phy_caps_lookup(int speed, unsigned int duplex, const unsigned long *supported,
bool exact);
+void phy_caps_medium_get_supported(unsigned long *supported,
+ enum ethtool_link_medium medium,
+ int lanes);
+u32 phy_caps_mediums_from_linkmodes(unsigned long *linkmodes);
+
#endif /* __PHY_CAPS_H */
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 277c034bc32f..d7a4a977fc8a 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -4,6 +4,7 @@
*/
#include <linux/export.h>
#include <linux/phy.h>
+#include <linux/phy_port.h>
#include <linux/of.h>
#include "phylib.h"
@@ -47,6 +48,8 @@ const char *phy_speed_to_str(int speed)
return "50Gbps";
case SPEED_56000:
return "56Gbps";
+ case SPEED_80000:
+ return "80Gbps";
case SPEED_100000:
return "100Gbps";
case SPEED_200000:
@@ -208,7 +211,12 @@ EXPORT_SYMBOL_GPL(phy_interface_num_ports);
static void __set_phy_supported(struct phy_device *phydev, u32 max_speed)
{
+ struct phy_port *port;
+
phy_caps_linkmode_max_speed(max_speed, phydev->supported);
+
+ phy_for_each_port(phydev, port)
+ phy_caps_linkmode_max_speed(max_speed, port->supported);
}
/**
diff --git a/drivers/net/phy/phy_caps.c b/drivers/net/phy/phy_caps.c
index 3a05982b39bf..942d43191561 100644
--- a/drivers/net/phy/phy_caps.c
+++ b/drivers/net/phy/phy_caps.c
@@ -21,6 +21,7 @@ static struct link_capabilities link_caps[__LINK_CAPA_MAX] __ro_after_init = {
{ SPEED_40000, DUPLEX_FULL, {0} }, /* LINK_CAPA_40000FD */
{ SPEED_50000, DUPLEX_FULL, {0} }, /* LINK_CAPA_50000FD */
{ SPEED_56000, DUPLEX_FULL, {0} }, /* LINK_CAPA_56000FD */
+ { SPEED_80000, DUPLEX_FULL, {0} }, /* LINK_CAPA_80000FD */
{ SPEED_100000, DUPLEX_FULL, {0} }, /* LINK_CAPA_100000FD */
{ SPEED_200000, DUPLEX_FULL, {0} }, /* LINK_CAPA_200000FD */
{ SPEED_400000, DUPLEX_FULL, {0} }, /* LINK_CAPA_400000FD */
@@ -49,6 +50,7 @@ static int speed_duplex_to_capa(int speed, unsigned int duplex)
case SPEED_40000: return LINK_CAPA_40000FD;
case SPEED_50000: return LINK_CAPA_50000FD;
case SPEED_56000: return LINK_CAPA_56000FD;
+ case SPEED_80000: return LINK_CAPA_80000FD;
case SPEED_100000: return LINK_CAPA_100000FD;
case SPEED_200000: return LINK_CAPA_200000FD;
case SPEED_400000: return LINK_CAPA_400000FD;
@@ -80,6 +82,14 @@ int __init phy_caps_init(void)
/* Fill the caps array from net/ethtool/common.c */
for (i = 0; i < __ETHTOOL_LINK_MODE_MASK_NBITS; i++) {
linkmode = &link_mode_params[i];
+
+ /* Sanity check the linkmodes array for number of pairs */
+ if (linkmode->pairs < linkmode->min_pairs) {
+ pr_err("Pairs count must not be under min_pairs for linkmode %d\n",
+ i);
+ return -EINVAL;
+ }
+
capa = speed_duplex_to_capa(linkmode->speed, linkmode->duplex);
if (capa < 0) {
@@ -378,3 +388,60 @@ unsigned long phy_caps_from_interface(phy_interface_t interface)
return link_caps;
}
EXPORT_SYMBOL_GPL(phy_caps_from_interface);
+
+/**
+ * phy_caps_medium_get_supported() - Returns linkmodes supported on a given medium
+ * @supported: After this call, contains all possible linkmodes on a given medium,
+ * and with the given number of pairs, or less.
+ * @medium: The medium to get the support from
+ * @pairs: The number of pairs used on the given medium. Only relevant for modes
+ * that support this notion, such as BaseT. Pass 0 if not applicable.
+ *
+ * If no match exists, the supported field is left untouched.
+ */
+void phy_caps_medium_get_supported(unsigned long *supported,
+ enum ethtool_link_medium medium,
+ int pairs)
+{
+ int i;
+
+ for (i = 0; i < __ETHTOOL_LINK_MODE_MASK_NBITS; i++) {
+ /* Special bits such as Autoneg, Pause, Asym_pause, etc. are
+ * set and will be masked away by the port parent.
+ */
+ if (link_mode_params[i].mediums == BIT(ETHTOOL_LINK_MEDIUM_NONE)) {
+ linkmode_set_bit(i, supported);
+ continue;
+ }
+
+ /* If this medium matches, and had a non-zero min-pairs */
+ if (link_mode_params[i].mediums & BIT(medium) &&
+ (!link_mode_params[i].min_pairs ||
+ (link_mode_params[i].min_pairs <= pairs &&
+ link_mode_params[i].pairs >= pairs)))
+ linkmode_set_bit(i, supported);
+ }
+}
+EXPORT_SYMBOL_GPL(phy_caps_medium_get_supported);
+
+/**
+ * phy_caps_mediums_from_linkmodes() - Get all mediums from a linkmodes list
+ * @linkmodes: A bitset of linkmodes to get the mediums from
+ *
+ * Returns: A bitset of ETHTOOL_MEDIUM_XXX values corresponding to all medium
+ * types in the linkmodes list
+ */
+u32 phy_caps_mediums_from_linkmodes(unsigned long *linkmodes)
+{
+ const struct link_mode_info *linkmode;
+ u32 mediums = 0;
+ int i;
+
+ for_each_set_bit(i, linkmodes, __ETHTOOL_LINK_MODE_MASK_NBITS) {
+ linkmode = &link_mode_params[i];
+ mediums |= linkmode->mediums;
+ }
+
+ return mediums;
+}
+EXPORT_SYMBOL_GPL(phy_caps_mediums_from_linkmodes);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 81984d4ebb7c..8a3eb1839a3d 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -30,6 +30,7 @@
#include <linux/phylib_stubs.h>
#include <linux/phy_led_triggers.h>
#include <linux/phy_link_topology.h>
+#include <linux/phy_port.h>
#include <linux/pse-pd/pse.h>
#include <linux/property.h>
#include <linux/ptp_clock_kernel.h>
@@ -48,9 +49,6 @@ MODULE_DESCRIPTION("PHY library");
MODULE_AUTHOR("Andy Fleming");
MODULE_LICENSE("GPL");
-#define PHY_ANY_ID "MATCH ANY PHY"
-#define PHY_ANY_UID 0xffffffff
-
struct phy_fixup {
struct list_head list;
char bus_id[MII_BUS_ID_SIZE + 3];
@@ -431,11 +429,10 @@ static SIMPLE_DEV_PM_OPS(mdio_bus_phy_pm_ops, mdio_bus_phy_suspend,
/**
* phy_register_fixup - creates a new phy_fixup and adds it to the list
- * @bus_id: A string which matches phydev->mdio.dev.bus_id (or PHY_ANY_ID)
+ * @bus_id: A string which matches phydev->mdio.dev.bus_id (or NULL)
* @phy_uid: Used to match against phydev->phy_id (the UID of the PHY)
- * It can also be PHY_ANY_UID
* @phy_uid_mask: Applied to phydev->phy_id and fixup->phy_uid before
- * comparison
+ * comparison (or 0 to disable id-based matching)
* @run: The actual code to be run when a matching PHY is found
*/
static int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
@@ -446,7 +443,8 @@ static int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
if (!fixup)
return -ENOMEM;
- strscpy(fixup->bus_id, bus_id, sizeof(fixup->bus_id));
+ if (bus_id)
+ strscpy(fixup->bus_id, bus_id, sizeof(fixup->bus_id));
fixup->phy_uid = phy_uid;
fixup->phy_uid_mask = phy_uid_mask;
fixup->run = run;
@@ -462,7 +460,7 @@ static int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask,
int (*run)(struct phy_device *))
{
- return phy_register_fixup(PHY_ANY_ID, phy_uid, phy_uid_mask, run);
+ return phy_register_fixup(NULL, phy_uid, phy_uid_mask, run);
}
EXPORT_SYMBOL(phy_register_fixup_for_uid);
@@ -470,71 +468,20 @@ EXPORT_SYMBOL(phy_register_fixup_for_uid);
int phy_register_fixup_for_id(const char *bus_id,
int (*run)(struct phy_device *))
{
- return phy_register_fixup(bus_id, PHY_ANY_UID, 0xffffffff, run);
+ return phy_register_fixup(bus_id, 0, 0, run);
}
EXPORT_SYMBOL(phy_register_fixup_for_id);
-/**
- * phy_unregister_fixup - remove a phy_fixup from the list
- * @bus_id: A string matches fixup->bus_id (or PHY_ANY_ID) in phy_fixup_list
- * @phy_uid: A phy id matches fixup->phy_id (or PHY_ANY_UID) in phy_fixup_list
- * @phy_uid_mask: Applied to phy_uid and fixup->phy_uid before comparison
- */
-int phy_unregister_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask)
+static bool phy_needs_fixup(struct phy_device *phydev, struct phy_fixup *fixup)
{
- struct list_head *pos, *n;
- struct phy_fixup *fixup;
- int ret;
-
- ret = -ENODEV;
-
- mutex_lock(&phy_fixup_lock);
- list_for_each_safe(pos, n, &phy_fixup_list) {
- fixup = list_entry(pos, struct phy_fixup, list);
-
- if ((!strcmp(fixup->bus_id, bus_id)) &&
- phy_id_compare(fixup->phy_uid, phy_uid, phy_uid_mask)) {
- list_del(&fixup->list);
- kfree(fixup);
- ret = 0;
- break;
- }
- }
- mutex_unlock(&phy_fixup_lock);
+ if (!strcmp(fixup->bus_id, phydev_name(phydev)))
+ return true;
- return ret;
-}
-EXPORT_SYMBOL(phy_unregister_fixup);
-
-/* Unregisters a fixup of any PHY with the UID in phy_uid */
-int phy_unregister_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask)
-{
- return phy_unregister_fixup(PHY_ANY_ID, phy_uid, phy_uid_mask);
-}
-EXPORT_SYMBOL(phy_unregister_fixup_for_uid);
+ if (fixup->phy_uid_mask &&
+ phy_id_compare(phydev->phy_id, fixup->phy_uid, fixup->phy_uid_mask))
+ return true;
-/* Unregisters a fixup of the PHY with id string bus_id */
-int phy_unregister_fixup_for_id(const char *bus_id)
-{
- return phy_unregister_fixup(bus_id, PHY_ANY_UID, 0xffffffff);
-}
-EXPORT_SYMBOL(phy_unregister_fixup_for_id);
-
-/* Returns 1 if fixup matches phydev in bus_id and phy_uid.
- * Fixups can be set to match any in one or more fields.
- */
-static int phy_needs_fixup(struct phy_device *phydev, struct phy_fixup *fixup)
-{
- if (strcmp(fixup->bus_id, phydev_name(phydev)) != 0)
- if (strcmp(fixup->bus_id, PHY_ANY_ID) != 0)
- return 0;
-
- if (!phy_id_compare(phydev->phy_id, fixup->phy_uid,
- fixup->phy_uid_mask))
- if (fixup->phy_uid != PHY_ANY_UID)
- return 0;
-
- return 1;
+ return false;
}
/* Runs any matching fixups for this phydev */
@@ -845,6 +792,13 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id,
dev->state = PHY_DOWN;
INIT_LIST_HEAD(&dev->leds);
+ INIT_LIST_HEAD(&dev->ports);
+
+ /* The driver's probe function must change that to the real number
+ * of ports possible on the PHY. We assume by default we are dealing
+ * with a single-port PHY
+ */
+ dev->max_n_ports = 1;
mutex_init(&dev->lock);
INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine);
@@ -1524,7 +1478,7 @@ static DEVICE_ATTR_RO(phy_standalone);
*
* Return: 0 on success, otherwise a negative error code.
*/
-int phy_sfp_connect_phy(void *upstream, struct phy_device *phy)
+static int phy_sfp_connect_phy(void *upstream, struct phy_device *phy)
{
struct phy_device *phydev = upstream;
struct net_device *dev = phydev->attached_dev;
@@ -1534,7 +1488,6 @@ int phy_sfp_connect_phy(void *upstream, struct phy_device *phy)
return 0;
}
-EXPORT_SYMBOL(phy_sfp_connect_phy);
/**
* phy_sfp_disconnect_phy - Disconnect the SFP module's PHY from the upstream PHY
@@ -1546,7 +1499,7 @@ EXPORT_SYMBOL(phy_sfp_connect_phy);
* will be destroyed, re-inserting the same module will add a new phy with a
* new index.
*/
-void phy_sfp_disconnect_phy(void *upstream, struct phy_device *phy)
+static void phy_sfp_disconnect_phy(void *upstream, struct phy_device *phy)
{
struct phy_device *phydev = upstream;
struct net_device *dev = phydev->attached_dev;
@@ -1554,7 +1507,6 @@ void phy_sfp_disconnect_phy(void *upstream, struct phy_device *phy)
if (dev)
phy_link_topo_del_phy(dev, phy);
}
-EXPORT_SYMBOL(phy_sfp_disconnect_phy);
/**
* phy_sfp_attach - attach the SFP bus to the PHY upstream network device
@@ -1563,7 +1515,7 @@ EXPORT_SYMBOL(phy_sfp_disconnect_phy);
*
* This is used to fill in the sfp_upstream_ops .attach member.
*/
-void phy_sfp_attach(void *upstream, struct sfp_bus *bus)
+static void phy_sfp_attach(void *upstream, struct sfp_bus *bus)
{
struct phy_device *phydev = upstream;
@@ -1571,7 +1523,6 @@ void phy_sfp_attach(void *upstream, struct sfp_bus *bus)
phydev->attached_dev->sfp_bus = bus;
phydev->sfp_bus_attached = true;
}
-EXPORT_SYMBOL(phy_sfp_attach);
/**
* phy_sfp_detach - detach the SFP bus from the PHY upstream network device
@@ -1580,7 +1531,7 @@ EXPORT_SYMBOL(phy_sfp_attach);
*
* This is used to fill in the sfp_upstream_ops .detach member.
*/
-void phy_sfp_detach(void *upstream, struct sfp_bus *bus)
+static void phy_sfp_detach(void *upstream, struct sfp_bus *bus)
{
struct phy_device *phydev = upstream;
@@ -1588,15 +1539,164 @@ void phy_sfp_detach(void *upstream, struct sfp_bus *bus)
phydev->attached_dev->sfp_bus = NULL;
phydev->sfp_bus_attached = false;
}
-EXPORT_SYMBOL(phy_sfp_detach);
+
+static int phy_sfp_module_insert(void *upstream, const struct sfp_eeprom_id *id)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
+ struct phy_device *phydev = upstream;
+ const struct sfp_module_caps *caps;
+ struct phy_port *port;
+
+ phy_interface_t iface;
+
+ linkmode_zero(sfp_support);
+
+ port = phy_get_sfp_port(phydev);
+ if (!port)
+ return -EINVAL;
+
+ caps = sfp_get_module_caps(phydev->sfp_bus);
+
+ linkmode_and(sfp_support, port->supported, caps->link_modes);
+ if (linkmode_empty(sfp_support)) {
+ dev_err(&phydev->mdio.dev, "incompatible SFP module inserted, no common linkmode\n");
+ return -EINVAL;
+ }
+
+ iface = sfp_select_interface(phydev->sfp_bus, sfp_support);
+ if (iface == PHY_INTERFACE_MODE_NA) {
+ dev_err(&phydev->mdio.dev, "PHY %s does not support the SFP module's requested MII interfaces\n",
+ phydev_name(phydev));
+ return -EINVAL;
+ }
+
+ if (phydev->n_ports == 1)
+ phydev->port = caps->port;
+
+ if (port->ops && port->ops->configure_mii)
+ return port->ops->configure_mii(port, true, iface);
+
+ return 0;
+}
+
+static void phy_sfp_module_remove(void *upstream)
+{
+ struct phy_device *phydev = upstream;
+ struct phy_port *port = phy_get_sfp_port(phydev);
+
+ if (port && port->ops && port->ops->configure_mii)
+ port->ops->configure_mii(port, false, PHY_INTERFACE_MODE_NA);
+
+ if (phydev->n_ports == 1)
+ phydev->port = PORT_NONE;
+}
+
+static void phy_sfp_link_up(void *upstream)
+{
+ struct phy_device *phydev = upstream;
+ struct phy_port *port = phy_get_sfp_port(phydev);
+
+ if (port && port->ops && port->ops->link_up)
+ port->ops->link_up(port);
+}
+
+static void phy_sfp_link_down(void *upstream)
+{
+ struct phy_device *phydev = upstream;
+ struct phy_port *port = phy_get_sfp_port(phydev);
+
+ if (port && port->ops && port->ops->link_down)
+ port->ops->link_down(port);
+}
+
+static const struct sfp_upstream_ops sfp_phydev_ops = {
+ .attach = phy_sfp_attach,
+ .detach = phy_sfp_detach,
+ .module_insert = phy_sfp_module_insert,
+ .module_remove = phy_sfp_module_remove,
+ .link_up = phy_sfp_link_up,
+ .link_down = phy_sfp_link_down,
+ .connect_phy = phy_sfp_connect_phy,
+ .disconnect_phy = phy_sfp_disconnect_phy,
+};
+
+static int phy_add_port(struct phy_device *phydev, struct phy_port *port)
+{
+ int ret = 0;
+
+ if (phydev->n_ports == phydev->max_n_ports)
+ return -EBUSY;
+
+ /* We set all ports as active by default, PHY drivers may deactivate
+ * them (when unused)
+ */
+ port->active = true;
+
+ if (port->is_mii) {
+ if (phydev->drv && phydev->drv->attach_mii_port)
+ ret = phydev->drv->attach_mii_port(phydev, port);
+ } else {
+ if (phydev->drv && phydev->drv->attach_mdi_port)
+ ret = phydev->drv->attach_mdi_port(phydev, port);
+ }
+
+ if (ret)
+ return ret;
+
+ /* The PHY driver might have added, removed or set medium/pairs info,
+ * so update the port supported accordingly.
+ */
+ phy_port_update_supported(port);
+
+ list_add(&port->head, &phydev->ports);
+
+ phydev->n_ports++;
+
+ return 0;
+}
+
+static void phy_del_port(struct phy_device *phydev, struct phy_port *port)
+{
+ if (!phydev->n_ports)
+ return;
+
+ list_del(&port->head);
+
+ phydev->n_ports--;
+}
+
+static int phy_setup_sfp_port(struct phy_device *phydev)
+{
+ struct phy_port *port = phy_port_alloc();
+ int ret;
+
+ if (!port)
+ return -ENOMEM;
+
+ port->parent_type = PHY_PORT_PHY;
+ port->phy = phydev;
+
+ /* The PHY is a media converter, the port connected to the SFP cage
+ * is a MII port.
+ */
+ port->is_mii = true;
+ port->is_sfp = true;
+
+ /* The port->supported and port->interfaces list will be populated
+ * when attaching the port to the phydev.
+ */
+ ret = phy_add_port(phydev, port);
+ if (ret)
+ phy_port_destroy(port);
+
+ return ret;
+}
/**
* phy_sfp_probe - probe for a SFP cage attached to this PHY device
* @phydev: Pointer to phy_device
- * @ops: SFP's upstream operations
*/
-int phy_sfp_probe(struct phy_device *phydev,
- const struct sfp_upstream_ops *ops)
+static int phy_sfp_probe(struct phy_device *phydev)
{
struct sfp_bus *bus;
int ret = 0;
@@ -1608,12 +1708,15 @@ int phy_sfp_probe(struct phy_device *phydev,
phydev->sfp_bus = bus;
- ret = sfp_bus_add_upstream(bus, phydev, ops);
+ ret = sfp_bus_add_upstream(bus, phydev, &sfp_phydev_ops);
sfp_bus_put(bus);
}
+
+ if (!ret && phydev->sfp_bus)
+ ret = phy_setup_sfp_port(phydev);
+
return ret;
}
-EXPORT_SYMBOL(phy_sfp_probe);
static bool phy_drv_supports_irq(const struct phy_driver *phydrv)
{
@@ -2369,7 +2472,7 @@ int genphy_update_link(struct phy_device *phydev)
/* The link state is latched low so that momentary link
* drops can be detected. Do not double-read the status
* in polling mode to detect such short link drops except
- * the link was already down.
+ * if the link was already down.
*/
if (!phy_polling_mode(phydev) || !phydev->link) {
status = phy_read(phydev, MII_BMSR);
@@ -3325,6 +3428,161 @@ exit:
return 0;
}
+static void phy_cleanup_ports(struct phy_device *phydev)
+{
+ struct phy_port *tmp, *port;
+
+ list_for_each_entry_safe(port, tmp, &phydev->ports, head) {
+ phy_del_port(phydev, port);
+ phy_port_destroy(port);
+ }
+}
+
+static int phy_default_setup_single_port(struct phy_device *phydev)
+{
+ struct phy_port *port = phy_port_alloc();
+ unsigned long mode;
+
+ if (!port)
+ return -ENOMEM;
+
+ port->parent_type = PHY_PORT_PHY;
+ port->phy = phydev;
+
+ /* Let the PHY driver know that this port was never described anywhere.
+ * This is the usual case, where we assume single-port PHY devices with
+ * no SFP. In that case, the port supports exactly the same thing as
+ * the PHY itself.
+ *
+ * However, this can also be because we have a combo-port PHY, with
+ * only one port described in DT, through SFP for example.
+ *
+ * In that case, the PHY driver will be in charge of saying what we can
+ * do on that non-represented port.
+ */
+ port->not_described = true;
+ linkmode_copy(port->supported, phydev->supported);
+ port->mediums = phy_caps_mediums_from_linkmodes(port->supported);
+
+ for_each_set_bit(mode, port->supported, __ETHTOOL_LINK_MODE_MASK_NBITS)
+ port->pairs = max_t(int, port->pairs,
+ ethtool_linkmode_n_pairs(mode));
+
+ phy_add_port(phydev, port);
+
+ return 0;
+}
+
+static int of_phy_ports(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ struct device_node *mdi;
+ struct phy_port *port;
+ int err;
+
+ if (!IS_ENABLED(CONFIG_OF_MDIO))
+ return 0;
+
+ if (!node)
+ return 0;
+
+ mdi = of_get_child_by_name(node, "mdi");
+ if (!mdi)
+ return 0;
+
+ for_each_available_child_of_node_scoped(mdi, port_node) {
+ port = phy_of_parse_port(port_node);
+ if (IS_ERR(port)) {
+ err = PTR_ERR(port);
+ goto out_err;
+ }
+
+ port->parent_type = PHY_PORT_PHY;
+ port->phy = phydev;
+ err = phy_add_port(phydev, port);
+ if (err) {
+ phy_port_destroy(port);
+ goto out_err;
+ }
+ }
+ of_node_put(mdi);
+
+ return 0;
+
+out_err:
+ phy_cleanup_ports(phydev);
+ of_node_put(mdi);
+ return err;
+}
+
+static int phy_setup_ports(struct phy_device *phydev)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(ports_supported);
+ struct phy_port *port;
+ int ret;
+
+ ret = of_phy_ports(phydev);
+ if (ret)
+ return ret;
+
+ ret = phy_sfp_probe(phydev);
+ if (ret)
+ goto out;
+
+ if (phydev->n_ports < phydev->max_n_ports) {
+ ret = phy_default_setup_single_port(phydev);
+ if (ret)
+ goto out;
+ }
+
+ linkmode_zero(ports_supported);
+
+ /* Aggregate the supported modes, which are made-up of :
+ * - What the PHY itself supports
+ * - What the sum of all ports support
+ */
+ list_for_each_entry(port, &phydev->ports, head)
+ if (port->active)
+ linkmode_or(ports_supported, ports_supported,
+ port->supported);
+
+ if (!linkmode_empty(ports_supported))
+ linkmode_and(phydev->supported, phydev->supported,
+ ports_supported);
+
+ /* For now, the phy->port field is set as the first active port's type */
+ list_for_each_entry(port, &phydev->ports, head)
+ if (port->active) {
+ phydev->port = phy_port_get_type(port);
+ break;
+ }
+
+ return 0;
+
+out:
+ phy_cleanup_ports(phydev);
+ return ret;
+}
+
+/**
+ * phy_get_sfp_port() - Returns the first valid SFP port of a PHY
+ * @phydev: pointer to the PHY device to get the SFP port from
+ *
+ * Returns: The first active SFP (serdes) port of a PHY device, NULL if none
+ * exist.
+ */
+struct phy_port *phy_get_sfp_port(struct phy_device *phydev)
+{
+ struct phy_port *port;
+
+ list_for_each_entry(port, &phydev->ports, head)
+ if (port->active && port->is_sfp)
+ return port;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(phy_get_sfp_port);
+
/**
* fwnode_mdio_find_device - Given a fwnode, find the mdio_device
* @fwnode: pointer to the mdio_device's fwnode
@@ -3462,6 +3720,11 @@ static int phy_probe(struct device *dev)
phydev->is_gigabit_capable = 1;
of_set_phy_supported(phydev);
+
+ err = phy_setup_ports(phydev);
+ if (err)
+ goto out;
+
phy_advertise_supported(phydev);
/* Get PHY default EEE advertising modes and handle them as potentially
@@ -3537,6 +3800,8 @@ static int phy_remove(struct device *dev)
phydev->state = PHY_DOWN;
+ phy_cleanup_ports(phydev);
+
sfp_bus_del_upstream(phydev->sfp_bus);
phydev->sfp_bus = NULL;
diff --git a/drivers/net/phy/phy_port.c b/drivers/net/phy/phy_port.c
new file mode 100644
index 000000000000..ec93c8ca051e
--- /dev/null
+++ b/drivers/net/phy/phy_port.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Framework to drive Ethernet ports
+ *
+ * Copyright (c) 2024 Maxime Chevallier <maxime.chevallier@bootlin.com>
+ */
+
+#include <linux/linkmode.h>
+#include <linux/of.h>
+#include <linux/phy_port.h>
+
+#include "phy-caps.h"
+
+/**
+ * phy_port_alloc() - Allocate a new phy_port
+ *
+ * Returns: a newly allocated struct phy_port, or NULL.
+ */
+struct phy_port *phy_port_alloc(void)
+{
+ struct phy_port *port;
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return NULL;
+
+ linkmode_zero(port->supported);
+ INIT_LIST_HEAD(&port->head);
+
+ return port;
+}
+EXPORT_SYMBOL_GPL(phy_port_alloc);
+
+/**
+ * phy_port_destroy() - Free a struct phy_port
+ * @port: The port to destroy
+ */
+void phy_port_destroy(struct phy_port *port)
+{
+ kfree(port);
+}
+EXPORT_SYMBOL_GPL(phy_port_destroy);
+
+/**
+ * phy_of_parse_port() - Create a phy_port from a firmware representation
+ * @dn: device_node representation of the port, following the
+ * ethernet-connector.yaml binding
+ *
+ * Returns: a newly allocated and initialized phy_port pointer, or an ERR_PTR.
+ */
+struct phy_port *phy_of_parse_port(struct device_node *dn)
+{
+ struct fwnode_handle *fwnode = of_fwnode_handle(dn);
+ enum ethtool_link_medium medium;
+ struct phy_port *port;
+ const char *med_str;
+ u32 pairs = 0, mediums = 0;
+ int ret;
+
+ ret = fwnode_property_read_string(fwnode, "media", &med_str);
+ if (ret)
+ return ERR_PTR(ret);
+
+ medium = ethtool_str_to_medium(med_str);
+ if (medium == ETHTOOL_LINK_MEDIUM_NONE)
+ return ERR_PTR(-EINVAL);
+
+ if (medium == ETHTOOL_LINK_MEDIUM_BASET) {
+ ret = fwnode_property_read_u32(fwnode, "pairs", &pairs);
+ if (ret)
+ return ERR_PTR(ret);
+
+ switch (pairs) {
+ case 1: /* BaseT1 */
+ case 2: /* 100BaseTX */
+ case 4:
+ break;
+ default:
+ pr_err("%u is not a valid number of pairs\n", pairs);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ if (pairs && medium != ETHTOOL_LINK_MEDIUM_BASET) {
+ pr_err("pairs property is only compatible with BaseT medium\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ mediums |= BIT(medium);
+
+ if (!mediums)
+ return ERR_PTR(-EINVAL);
+
+ port = phy_port_alloc();
+ if (!port)
+ return ERR_PTR(-ENOMEM);
+
+ port->pairs = pairs;
+ port->mediums = mediums;
+
+ return port;
+}
+EXPORT_SYMBOL_GPL(phy_of_parse_port);
+
+/**
+ * phy_port_update_supported() - Setup the port->supported field
+ * @port: the port to update
+ *
+ * Once the port's medium list and number of pairs has been configured based
+ * on firmware, straps and vendor-specific properties, this function may be
+ * called to update the port's supported linkmodes list.
+ *
+ * Any mode that was manually set in the port's supported list remains set.
+ */
+void phy_port_update_supported(struct phy_port *port)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0 };
+ unsigned long mode;
+ int i;
+
+ for_each_set_bit(i, &port->mediums, __ETHTOOL_LINK_MEDIUM_LAST) {
+ linkmode_zero(supported);
+ phy_caps_medium_get_supported(supported, i, port->pairs);
+ linkmode_or(port->supported, port->supported, supported);
+ }
+
+ /* If there's no pairs specified, we grab the default number of
+ * pairs as the max of the default pairs for each linkmode
+ */
+ if (!port->pairs)
+ for_each_set_bit(mode, port->supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS)
+ port->pairs = max_t(int, port->pairs,
+ ethtool_linkmode_n_pairs(mode));
+
+ /* Serdes ports supported through SFP may not have any medium set,
+ * as they will output PHY_INTERFACE_MODE_XXX modes. In that case, derive
+ * the supported list based on these interfaces
+ */
+ if (port->is_mii && !port->mediums) {
+ unsigned long interface, link_caps = 0;
+
+ /* Get each interface's caps */
+ for_each_set_bit(interface, port->interfaces,
+ PHY_INTERFACE_MODE_MAX)
+ link_caps |= phy_caps_from_interface(interface);
+
+ phy_caps_linkmodes(link_caps, port->supported);
+ }
+}
+EXPORT_SYMBOL_GPL(phy_port_update_supported);
+
+/**
+ * phy_port_filter_supported() - Make sure that port->supported match port->mediums
+ * @port: The port to filter
+ *
+ * After updating a port's mediums to a more restricted subset, this helper will
+ * make sure that port->supported only contains linkmodes that are compatible
+ * with port->mediums.
+ */
+static void phy_port_filter_supported(struct phy_port *port)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0 };
+ int i;
+
+ for_each_set_bit(i, &port->mediums, __ETHTOOL_LINK_MEDIUM_LAST)
+ phy_caps_medium_get_supported(supported, i, port->pairs);
+
+ linkmode_and(port->supported, port->supported, supported);
+}
+
+/**
+ * phy_port_restrict_mediums - Mask away some of the port's supported mediums
+ * @port: The port to act upon
+ * @mediums: A mask of mediums to support on the port
+ *
+ * This helper allows removing some mediums from a port's list of supported
+ * mediums, which occurs once we have enough information about the port to
+ * know its nature.
+ *
+ * Returns: 0 if the change was donne correctly, a negative value otherwise.
+ */
+int phy_port_restrict_mediums(struct phy_port *port, unsigned long mediums)
+{
+ /* We forbid ending-up with a port with empty mediums */
+ if (!(port->mediums & mediums))
+ return -EINVAL;
+
+ port->mediums &= mediums;
+
+ phy_port_filter_supported(port);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(phy_port_restrict_mediums);
+
+/**
+ * phy_port_get_type() - get the PORT_* attribute for that port.
+ * @port: The port we want the information from
+ *
+ * Returns: A PORT_XXX value.
+ */
+int phy_port_get_type(struct phy_port *port)
+{
+ if (port->mediums & BIT(ETHTOOL_LINK_MEDIUM_BASET))
+ return PORT_TP;
+
+ if (phy_port_is_fiber(port))
+ return PORT_FIBRE;
+
+ return PORT_OTHER;
+}
+EXPORT_SYMBOL_GPL(phy_port_get_type);
diff --git a/drivers/net/phy/phylib-internal.h b/drivers/net/phy/phylib-internal.h
index ebda74eb60a5..dc9592c6bb8e 100644
--- a/drivers/net/phy/phylib-internal.h
+++ b/drivers/net/phy/phylib-internal.h
@@ -7,7 +7,6 @@
#define __PHYLIB_INTERNAL_H
struct phy_device;
-struct mii_bus;
/*
* phy_supported_speeds - return all speeds currently supported by a PHY device
@@ -21,11 +20,6 @@ void of_set_phy_timing_role(struct phy_device *phydev);
int phy_speed_down_core(struct phy_device *phydev);
void phy_check_downshift(struct phy_device *phydev);
-int mmd_phy_read(struct mii_bus *bus, int phy_addr, bool is_c45,
- int devad, u32 regnum);
-int mmd_phy_write(struct mii_bus *bus, int phy_addr, bool is_c45,
- int devad, u32 regnum, u16 val);
-
int genphy_c45_read_eee_adv(struct phy_device *phydev, unsigned long *adv);
#endif /* __PHYLIB_INTERNAL_H */
diff --git a/drivers/net/phy/phylib.h b/drivers/net/phy/phylib.h
index c15484a805b3..0fba245f9745 100644
--- a/drivers/net/phy/phylib.h
+++ b/drivers/net/phy/phylib.h
@@ -8,6 +8,7 @@
struct device_node;
struct phy_device;
+struct mii_bus;
struct device_node *phy_package_get_node(struct phy_device *phydev);
void *phy_package_get_priv(struct phy_device *phydev);
@@ -30,5 +31,9 @@ int devm_phy_package_join(struct device *dev, struct phy_device *phydev,
int base_addr, size_t priv_size);
int devm_of_phy_package_join(struct device *dev, struct phy_device *phydev,
size_t priv_size);
+int mmd_phy_read(struct mii_bus *bus, int phy_addr, bool is_c45,
+ int devad, u32 regnum);
+int mmd_phy_write(struct mii_bus *bus, int phy_addr, bool is_c45,
+ int devad, u32 regnum, u16 val);
#endif /* __PHYLIB_H */
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 43d8380aaefb..e1f01d7fc4da 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -28,6 +28,7 @@ enum {
PHYLINK_DISABLE_STOPPED,
PHYLINK_DISABLE_LINK,
PHYLINK_DISABLE_MAC_WOL,
+ PHYLINK_DISABLE_REPLAY,
PCS_STATE_DOWN = 0,
PCS_STATE_STARTING,
@@ -77,6 +78,7 @@ struct phylink {
bool link_failed;
bool suspend_link_up;
+ bool force_major_config;
bool major_config_failed;
bool mac_supports_eee_ops;
bool mac_supports_eee;
@@ -311,6 +313,7 @@ static struct {
{ MAC_400000FD, SPEED_400000, DUPLEX_FULL, BIT(LINK_CAPA_400000FD) },
{ MAC_200000FD, SPEED_200000, DUPLEX_FULL, BIT(LINK_CAPA_200000FD) },
{ MAC_100000FD, SPEED_100000, DUPLEX_FULL, BIT(LINK_CAPA_100000FD) },
+ { MAC_80000FD, SPEED_80000, DUPLEX_FULL, BIT(LINK_CAPA_80000FD) },
{ MAC_56000FD, SPEED_56000, DUPLEX_FULL, BIT(LINK_CAPA_56000FD) },
{ MAC_50000FD, SPEED_50000, DUPLEX_FULL, BIT(LINK_CAPA_50000FD) },
{ MAC_40000FD, SPEED_40000, DUPLEX_FULL, BIT(LINK_CAPA_40000FD) },
@@ -1290,7 +1293,8 @@ static void phylink_major_config(struct phylink *pl, bool restart,
if (pl->pcs)
pl->pcs->phylink = NULL;
- pcs->phylink = pl;
+ if (pcs)
+ pcs->phylink = pl;
pl->pcs = pcs;
}
@@ -1683,18 +1687,18 @@ static void phylink_resolve(struct work_struct *w)
if (pl->act_link_an_mode != MLO_AN_FIXED)
phylink_apply_manual_flow(pl, &link_state);
- if (mac_config) {
- if (link_state.interface != pl->link_config.interface) {
- /* The interface has changed, force the link down and
- * then reconfigure.
- */
- if (cur_link_state) {
- phylink_link_down(pl);
- cur_link_state = false;
- }
- phylink_major_config(pl, false, &link_state);
- pl->link_config.interface = link_state.interface;
+ if ((mac_config && link_state.interface != pl->link_config.interface) ||
+ pl->force_major_config) {
+ /* The interface has changed or a forced major configuration
+ * was requested, so force the link down and then reconfigure.
+ */
+ if (cur_link_state) {
+ phylink_link_down(pl);
+ cur_link_state = false;
}
+ phylink_major_config(pl, false, &link_state);
+ pl->link_config.interface = link_state.interface;
+ pl->force_major_config = false;
}
/* If configuration of the interface failed, force the link down
@@ -4358,6 +4362,57 @@ void phylink_mii_c45_pcs_get_state(struct mdio_device *pcs,
}
EXPORT_SYMBOL_GPL(phylink_mii_c45_pcs_get_state);
+/**
+ * phylink_replay_link_begin() - begin replay of link callbacks for driver
+ * which loses state
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * Helper for MAC drivers which may perform a destructive reset at runtime.
+ * Both the own driver's mac_link_down() method is called, as well as the
+ * pcs_link_down() method of the split PCS (if any).
+ *
+ * This is similar to phylink_stop(), except it does not alter the state of
+ * the phylib PHY (it is assumed that it is not affected by the MAC destructive
+ * reset).
+ */
+void phylink_replay_link_begin(struct phylink *pl)
+{
+ ASSERT_RTNL();
+
+ phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_REPLAY);
+}
+EXPORT_SYMBOL_GPL(phylink_replay_link_begin);
+
+/**
+ * phylink_replay_link_end() - end replay of link callbacks for driver
+ * which lost state
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * Helper for MAC drivers which may perform a destructive reset at runtime.
+ * Both the own driver's mac_config() and mac_link_up() methods, as well as the
+ * pcs_config() and pcs_link_up() method of the split PCS (if any), are called.
+ *
+ * This is similar to phylink_start(), except it does not alter the state of
+ * the phylib PHY.
+ *
+ * One must call this method only within the same rtnl_lock() critical section
+ * as a previous phylink_replay_link_start().
+ */
+void phylink_replay_link_end(struct phylink *pl)
+{
+ ASSERT_RTNL();
+
+ if (WARN(!test_bit(PHYLINK_DISABLE_REPLAY,
+ &pl->phylink_disable_state),
+ "phylink_replay_link_end() called without a prior phylink_replay_link_begin()\n"))
+ return;
+
+ pl->force_major_config = true;
+ phylink_enable_and_run_resolve(pl, PHYLINK_DISABLE_REPLAY);
+ flush_work(&pl->resolve);
+}
+EXPORT_SYMBOL_GPL(phylink_replay_link_end);
+
static int __init phylink_init(void)
{
for (int i = 0; i < ARRAY_SIZE(phylink_sfp_interface_preference); ++i)
diff --git a/drivers/net/phy/qcom/at803x.c b/drivers/net/phy/qcom/at803x.c
index 338acd11a9b6..2995b08bac96 100644
--- a/drivers/net/phy/qcom/at803x.c
+++ b/drivers/net/phy/qcom/at803x.c
@@ -20,7 +20,7 @@
#include <linux/of.h>
#include <linux/phylink.h>
#include <linux/reset.h>
-#include <linux/sfp.h>
+#include <linux/phy_port.h>
#include <dt-bindings/net/qca-ar803x.h>
#include "qcom.h"
@@ -769,57 +769,44 @@ static int at8031_register_regulators(struct phy_device *phydev)
return 0;
}
-static int at8031_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
+static int at803x_configure_mii(struct phy_port *port, bool enable,
+ phy_interface_t interface)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_support);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
- struct phy_device *phydev = upstream;
- const struct sfp_module_caps *caps;
- phy_interface_t iface;
-
- linkmode_zero(phy_support);
- phylink_set(phy_support, 1000baseX_Full);
- phylink_set(phy_support, 1000baseT_Full);
- phylink_set(phy_support, Autoneg);
- phylink_set(phy_support, Pause);
- phylink_set(phy_support, Asym_Pause);
-
- caps = sfp_get_module_caps(phydev->sfp_bus);
- /* Some modules support 10G modes as well as others we support.
- * Mask out non-supported modes so the correct interface is picked.
- */
- linkmode_and(sfp_support, phy_support, caps->link_modes);
+ struct phy_device *phydev = port_phydev(port);
- if (linkmode_empty(sfp_support)) {
- dev_err(&phydev->mdio.dev, "incompatible SFP module inserted\n");
- return -EINVAL;
- }
+ if (interface == PHY_INTERFACE_MODE_SGMII)
+ dev_warn(&phydev->mdio.dev,
+ "module may not function if 1000Base-X not supported\n");
+
+ return 0;
+}
- iface = sfp_select_interface(phydev->sfp_bus, sfp_support);
+static const struct phy_port_ops at803x_port_ops = {
+ .configure_mii = at803x_configure_mii,
+};
- /* Only 1000Base-X is supported by AR8031/8033 as the downstream SerDes
- * interface for use with SFP modules.
- * However, some copper modules detected as having a preferred SGMII
- * interface do default to and function in 1000Base-X mode, so just
- * print a warning and allow such modules, as they may have some chance
- * of working.
+static int at8031_attach_mii_port(struct phy_device *phydev,
+ struct phy_port *port)
+{
+ linkmode_zero(port->supported);
+ phylink_set(port->supported, 1000baseX_Full);
+ phylink_set(port->supported, 1000baseT_Full);
+ phylink_set(port->supported, Autoneg);
+ phylink_set(port->supported, Pause);
+ phylink_set(port->supported, Asym_Pause);
+
+ /* This device doesn't really support SGMII. However, do our best
+ * to be compatible with copper modules (that usually require SGMII),
+ * in a degraded mode as we only allow 1000BaseT Full
*/
- if (iface == PHY_INTERFACE_MODE_SGMII)
- dev_warn(&phydev->mdio.dev, "module may not function if 1000Base-X not supported\n");
- else if (iface != PHY_INTERFACE_MODE_1000BASEX)
- return -EINVAL;
+ __set_bit(PHY_INTERFACE_MODE_SGMII, port->interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, port->interfaces);
+
+ port->ops = &at803x_port_ops;
return 0;
}
-static const struct sfp_upstream_ops at8031_sfp_ops = {
- .attach = phy_sfp_attach,
- .detach = phy_sfp_detach,
- .module_insert = at8031_sfp_insert,
- .connect_phy = phy_sfp_connect_phy,
- .disconnect_phy = phy_sfp_disconnect_phy,
-};
-
static int at8031_parse_dt(struct phy_device *phydev)
{
struct device_node *node = phydev->mdio.dev.of_node;
@@ -840,8 +827,7 @@ static int at8031_parse_dt(struct phy_device *phydev)
return ret;
}
- /* Only AR8031/8033 support 1000Base-X for SFP modules */
- return phy_sfp_probe(phydev, &at8031_sfp_ops);
+ return 0;
}
static int at8031_probe(struct phy_device *phydev)
@@ -1172,6 +1158,7 @@ static struct phy_driver at803x_driver[] = {
.set_tunable = at803x_set_tunable,
.cable_test_start = at8031_cable_test_start,
.cable_test_get_status = at8031_cable_test_get_status,
+ .attach_mii_port = at8031_attach_mii_port,
}, {
/* Qualcomm Atheros AR8032 */
PHY_ID_MATCH_EXACT(ATH8032_PHY_ID),
diff --git a/drivers/net/phy/qcom/qca807x.c b/drivers/net/phy/qcom/qca807x.c
index 1be8295a95cb..d8f1ce5a7128 100644
--- a/drivers/net/phy/qcom/qca807x.c
+++ b/drivers/net/phy/qcom/qca807x.c
@@ -13,7 +13,7 @@
#include <linux/phy.h>
#include <linux/bitfield.h>
#include <linux/gpio/driver.h>
-#include <linux/sfp.h>
+#include <linux/phy_port.h>
#include "../phylib.h"
#include "qcom.h"
@@ -643,67 +643,54 @@ exit:
return ret;
}
-static int qca807x_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
+static int qca807x_configure_serdes(struct phy_port *port, bool enable,
+ phy_interface_t interface)
{
- struct phy_device *phydev = upstream;
- const struct sfp_module_caps *caps;
- phy_interface_t iface;
+ struct phy_device *phydev = port_phydev(port);
int ret;
- caps = sfp_get_module_caps(phydev->sfp_bus);
- iface = sfp_select_interface(phydev->sfp_bus, caps->link_modes);
+ if (!phydev)
+ return -ENODEV;
- dev_info(&phydev->mdio.dev, "%s SFP module inserted\n", phy_modes(iface));
-
- switch (iface) {
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_100BASEX:
+ if (enable) {
/* Set PHY mode to PSGMII combo (1/4 copper + combo ports) mode */
ret = phy_modify(phydev,
QCA807X_CHIP_CONFIGURATION,
QCA807X_CHIP_CONFIGURATION_MODE_CFG_MASK,
QCA807X_CHIP_CONFIGURATION_MODE_PSGMII_FIBER);
+ if (ret)
+ return ret;
/* Enable fiber mode autodection (1000Base-X or 100Base-FX) */
ret = phy_set_bits_mmd(phydev,
MDIO_MMD_AN,
QCA807X_MMD7_FIBER_MODE_AUTO_DETECTION,
QCA807X_MMD7_FIBER_MODE_AUTO_DETECTION_EN);
- /* Select fiber page */
- ret = phy_clear_bits(phydev,
- QCA807X_CHIP_CONFIGURATION,
- QCA807X_BT_BX_REG_SEL);
-
- phydev->port = PORT_FIBRE;
- break;
- default:
- dev_err(&phydev->mdio.dev, "Incompatible SFP module inserted\n");
- return -EINVAL;
+ if (ret)
+ return ret;
}
- return ret;
+ phydev->port = enable ? PORT_FIBRE : PORT_TP;
+
+ return phy_modify(phydev, QCA807X_CHIP_CONFIGURATION,
+ QCA807X_BT_BX_REG_SEL,
+ enable ? 0 : QCA807X_BT_BX_REG_SEL);
}
-static void qca807x_sfp_remove(void *upstream)
+static const struct phy_port_ops qca807x_serdes_port_ops = {
+ .configure_mii = qca807x_configure_serdes,
+};
+
+static int qca807x_attach_mii_port(struct phy_device *phydev,
+ struct phy_port *port)
{
- struct phy_device *phydev = upstream;
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, port->interfaces);
+ __set_bit(PHY_INTERFACE_MODE_100BASEX, port->interfaces);
- /* Select copper page */
- phy_set_bits(phydev,
- QCA807X_CHIP_CONFIGURATION,
- QCA807X_BT_BX_REG_SEL);
+ port->ops = &qca807x_serdes_port_ops;
- phydev->port = PORT_TP;
+ return 0;
}
-static const struct sfp_upstream_ops qca807x_sfp_ops = {
- .attach = phy_sfp_attach,
- .detach = phy_sfp_detach,
- .module_insert = qca807x_sfp_insert,
- .module_remove = qca807x_sfp_remove,
- .connect_phy = phy_sfp_connect_phy,
- .disconnect_phy = phy_sfp_disconnect_phy,
-};
-
static int qca807x_probe(struct phy_device *phydev)
{
struct device_node *node = phydev->mdio.dev.of_node;
@@ -744,9 +731,8 @@ static int qca807x_probe(struct phy_device *phydev)
/* Attach SFP bus on combo port*/
if (phy_read(phydev, QCA807X_CHIP_CONFIGURATION)) {
- ret = phy_sfp_probe(phydev, &qca807x_sfp_ops);
- if (ret)
- return ret;
+ phydev->max_n_ports = 2;
+
linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->advertising);
}
@@ -824,6 +810,7 @@ static struct phy_driver qca807x_drivers[] = {
.get_phy_stats = qca807x_get_phy_stats,
.set_wol = at8031_set_wol,
.get_wol = at803x_get_wol,
+ .attach_mii_port = qca807x_attach_mii_port,
},
{
PHY_ID_MATCH_EXACT(PHY_ID_QCA8075),
@@ -851,6 +838,7 @@ static struct phy_driver qca807x_drivers[] = {
.get_phy_stats = qca807x_get_phy_stats,
.set_wol = at8031_set_wol,
.get_wol = at803x_get_wol,
+ .attach_mii_port = qca807x_attach_mii_port,
},
};
module_phy_driver(qca807x_drivers);
diff --git a/drivers/net/phy/qt2025.rs b/drivers/net/phy/qt2025.rs
index aaaead6512a0..470d89a0ac00 100644
--- a/drivers/net/phy/qt2025.rs
+++ b/drivers/net/phy/qt2025.rs
@@ -9,7 +9,6 @@
//!
//! The QT2025 PHY integrates an Intel 8051 micro-controller.
-use kernel::c_str;
use kernel::error::code;
use kernel::firmware::Firmware;
use kernel::io::poll::read_poll_timeout;
@@ -38,7 +37,7 @@ struct PhyQT2025;
#[vtable]
impl Driver for PhyQT2025 {
- const NAME: &'static CStr = c_str!("QT2025 10Gpbs SFP+");
+ const NAME: &'static CStr = c"QT2025 10Gpbs SFP+";
const PHY_DEVICE_ID: phy::DeviceId = phy::DeviceId::new_with_exact_mask(0x0043a400);
fn probe(dev: &mut phy::Device) -> Result<()> {
@@ -71,7 +70,7 @@ impl Driver for PhyQT2025 {
// The micro-controller will start running from the boot ROM.
dev.write(C45::new(Mmd::PCS, 0xe854), 0x00c0)?;
- let fw = Firmware::request(c_str!("qt2025-2.0.3.3.fw"), dev.as_ref())?;
+ let fw = Firmware::request(c"qt2025-2.0.3.3.fw", dev.as_ref())?;
if fw.data().len() > SZ_16K + SZ_8K {
return Err(code::EFBIG);
}
diff --git a/drivers/net/phy/realtek/realtek_main.c b/drivers/net/phy/realtek/realtek_main.c
index 6ff0385201a5..75565fbdbf6d 100644
--- a/drivers/net/phy/realtek/realtek_main.c
+++ b/drivers/net/phy/realtek/realtek_main.c
@@ -17,7 +17,9 @@
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/string_choices.h>
+#include <net/phy/realtek_phy.h>
+#include "../phylib.h"
#include "realtek.h"
#define RTL8201F_IER 0x13
@@ -66,7 +68,6 @@
#define RTL8211E_DELAY_MASK GENMASK(13, 11)
/* RTL8211F PHY configuration */
-#define RTL8211F_PHYCR_PAGE 0xa43
#define RTL8211F_PHYCR1 0x18
#define RTL8211F_ALDPS_PLL_OFF BIT(1)
#define RTL8211F_ALDPS_ENABLE BIT(2)
@@ -76,7 +77,6 @@
#define RTL8211F_CLKOUT_EN BIT(0)
#define RTL8211F_PHYCR2_PHY_EEE_ENABLE BIT(5)
-#define RTL8211F_INSR_PAGE 0xa43
#define RTL8211F_INSR 0x1d
/* RTL8211F LED configuration */
@@ -131,9 +131,18 @@
#define RTL822X_VND1_SERDES_CTRL3_MODE_SGMII 0x02
#define RTL822X_VND1_SERDES_CTRL3_MODE_2500BASEX 0x16
-/* RTL822X_VND2_XXXXX registers are only accessible when phydev->is_c45
- * is set, they cannot be accessed by C45-over-C22.
- */
+#define RTL822X_VND1_SERDES_CMD 0x7587
+#define RTL822X_VND1_SERDES_CMD_WRITE BIT(1)
+#define RTL822X_VND1_SERDES_CMD_BUSY BIT(0)
+#define RTL822X_VND1_SERDES_ADDR 0x7588
+#define RTL822X_VND1_SERDES_ADDR_AUTONEG 0x2
+#define RTL822X_VND1_SERDES_INBAND_DISABLE 0x71d0
+#define RTL822X_VND1_SERDES_INBAND_ENABLE 0x70d0
+#define RTL822X_VND1_SERDES_DATA 0x7589
+
+#define RTL822X_VND2_TO_PAGE(reg) ((reg) >> 4)
+#define RTL822X_VND2_TO_PAGE_REG(reg) (16 + (((reg) & GENMASK(3, 0)) >> 1))
+#define RTL822X_VND2_TO_C22_REG(reg) (((reg) - 0xa400) / 2)
#define RTL822X_VND2_C22_REG(reg) (0xa400 + 2 * (reg))
#define RTL8221B_VND2_INER 0xa4d2
@@ -168,12 +177,12 @@
#define RTL9000A_GINMR 0x14
#define RTL9000A_GINMR_LINK_STATUS BIT(4)
-#define RTL_VND2_PHYSR 0xa434
-#define RTL_VND2_PHYSR_DUPLEX BIT(3)
-#define RTL_VND2_PHYSR_SPEEDL GENMASK(5, 4)
-#define RTL_VND2_PHYSR_SPEEDH GENMASK(10, 9)
-#define RTL_VND2_PHYSR_MASTER BIT(11)
-#define RTL_VND2_PHYSR_SPEED_MASK (RTL_VND2_PHYSR_SPEEDL | RTL_VND2_PHYSR_SPEEDH)
+#define RTL_PHYSR MII_RESV2
+#define RTL_PHYSR_DUPLEX BIT(3)
+#define RTL_PHYSR_SPEEDL GENMASK(5, 4)
+#define RTL_PHYSR_SPEEDH GENMASK(10, 9)
+#define RTL_PHYSR_MASTER BIT(11)
+#define RTL_PHYSR_SPEED_MASK (RTL_PHYSR_SPEEDL | RTL_PHYSR_SPEEDH)
#define RTL_MDIO_PCS_EEE_ABLE 0xa5c4
#define RTL_MDIO_AN_EEE_ADV 0xa5d0
@@ -322,7 +331,7 @@ static int rtl8211f_ack_interrupt(struct phy_device *phydev)
{
int err;
- err = phy_read_paged(phydev, RTL8211F_INSR_PAGE, RTL8211F_INSR);
+ err = phy_read(phydev, RTL8211F_INSR);
return (err < 0) ? err : 0;
}
@@ -468,7 +477,7 @@ static irqreturn_t rtl8211f_handle_interrupt(struct phy_device *phydev)
{
int irq_status;
- irq_status = phy_read_paged(phydev, RTL8211F_INSR_PAGE, RTL8211F_INSR);
+ irq_status = phy_read(phydev, RTL8211F_INSR);
if (irq_status < 0) {
phy_error(phydev);
return IRQ_NONE;
@@ -659,8 +668,8 @@ static int rtl8211f_config_clk_out(struct phy_device *phydev)
RTL8211FVD_CLKOUT_REG,
RTL8211FVD_CLKOUT_EN, 0);
else
- ret = phy_modify_paged(phydev, RTL8211F_PHYCR_PAGE,
- RTL8211F_PHYCR2, RTL8211F_CLKOUT_EN, 0);
+ ret = phy_modify(phydev, RTL8211F_PHYCR2, RTL8211F_CLKOUT_EN,
+ 0);
if (ret)
return ret;
@@ -685,15 +694,14 @@ static int rtl8211f_config_aldps(struct phy_device *phydev)
if (!priv->enable_aldps)
return 0;
- return phy_modify_paged(phydev, RTL8211F_PHYCR_PAGE, RTL8211F_PHYCR1,
- mask, mask);
+ return phy_modify(phydev, RTL8211F_PHYCR1, mask, mask);
}
static int rtl8211f_config_phy_eee(struct phy_device *phydev)
{
/* Disable PHY-mode EEE so LPI is passed to the MAC */
- return phy_modify_paged(phydev, RTL8211F_PHYCR_PAGE, RTL8211F_PHYCR2,
- RTL8211F_PHYCR2_PHY_EEE_ENABLE, 0);
+ return phy_modify(phydev, RTL8211F_PHYCR2,
+ RTL8211F_PHYCR2_PHY_EEE_ENABLE, 0);
}
static int rtl8211f_config_init(struct phy_device *phydev)
@@ -759,7 +767,7 @@ static int rtl8211f_suspend(struct phy_device *phydev)
goto err;
/* Read the INSR to clear any pending interrupt */
- phy_read_paged(phydev, RTL8211F_INSR_PAGE, RTL8211F_INSR);
+ phy_read(phydev, RTL8211F_INSR);
/* Reset the WoL to ensure that an event is picked up.
* Unless we do this, even if we receive another packet,
@@ -1092,12 +1100,12 @@ static void rtlgen_decode_physr(struct phy_device *phydev, int val)
* 0: Half Duplex
* 1: Full Duplex
*/
- if (val & RTL_VND2_PHYSR_DUPLEX)
+ if (val & RTL_PHYSR_DUPLEX)
phydev->duplex = DUPLEX_FULL;
else
phydev->duplex = DUPLEX_HALF;
- switch (val & RTL_VND2_PHYSR_SPEED_MASK) {
+ switch (val & RTL_PHYSR_SPEED_MASK) {
case 0x0000:
phydev->speed = SPEED_10;
break;
@@ -1125,7 +1133,7 @@ static void rtlgen_decode_physr(struct phy_device *phydev, int val)
* 1: Master Mode
*/
if (phydev->speed >= 1000) {
- if (val & RTL_VND2_PHYSR_MASTER)
+ if (val & RTL_PHYSR_MASTER)
phydev->master_slave_state = MASTER_SLAVE_STATE_MASTER;
else
phydev->master_slave_state = MASTER_SLAVE_STATE_SLAVE;
@@ -1145,7 +1153,7 @@ static int rtlgen_read_status(struct phy_device *phydev)
if (!phydev->link)
return 0;
- val = phy_read_paged(phydev, 0xa43, 0x12);
+ val = phy_read(phydev, RTL_PHYSR);
if (val < 0)
return val;
@@ -1238,6 +1246,89 @@ static int rtl822x_probe(struct phy_device *phydev)
return 0;
}
+/* RTL822x cannot access MDIO_MMD_VEND2 via MII_MMD_CTRL/MII_MMD_DATA.
+ * A mapping to use paged access needs to be used instead.
+ * All other MMD devices can be accessed as usual.
+ */
+static int rtl822xb_read_mmd(struct phy_device *phydev, int devnum, u16 reg)
+{
+ int oldpage, ret, read_ret;
+ u16 page;
+
+ /* Use default method for all MMDs except MDIO_MMD_VEND2 or in case
+ * Clause-45 access is available
+ */
+ if (devnum != MDIO_MMD_VEND2 || phydev->is_c45)
+ return mmd_phy_read(phydev->mdio.bus, phydev->mdio.addr,
+ phydev->is_c45, devnum, reg);
+
+ /* Simplify access to C22-registers addressed inside MDIO_MMD_VEND2 */
+ if (reg >= RTL822X_VND2_C22_REG(0) &&
+ reg <= RTL822X_VND2_C22_REG(30))
+ return __phy_read(phydev, RTL822X_VND2_TO_C22_REG(reg));
+
+ /* Use paged access for MDIO_MMD_VEND2 over Clause-22 */
+ page = RTL822X_VND2_TO_PAGE(reg);
+ oldpage = __phy_read(phydev, RTL821x_PAGE_SELECT);
+ if (oldpage < 0)
+ return oldpage;
+
+ if (oldpage != page) {
+ ret = __phy_write(phydev, RTL821x_PAGE_SELECT, page);
+ if (ret < 0)
+ return ret;
+ }
+
+ read_ret = __phy_read(phydev, RTL822X_VND2_TO_PAGE_REG(reg));
+ if (oldpage != page) {
+ ret = __phy_write(phydev, RTL821x_PAGE_SELECT, oldpage);
+ if (ret < 0)
+ return ret;
+ }
+
+ return read_ret;
+}
+
+static int rtl822xb_write_mmd(struct phy_device *phydev, int devnum, u16 reg,
+ u16 val)
+{
+ int oldpage, ret, write_ret;
+ u16 page;
+
+ /* Use default method for all MMDs except MDIO_MMD_VEND2 or in case
+ * Clause-45 access is available
+ */
+ if (devnum != MDIO_MMD_VEND2 || phydev->is_c45)
+ return mmd_phy_write(phydev->mdio.bus, phydev->mdio.addr,
+ phydev->is_c45, devnum, reg, val);
+
+ /* Simplify access to C22-registers addressed inside MDIO_MMD_VEND2 */
+ if (reg >= RTL822X_VND2_C22_REG(0) &&
+ reg <= RTL822X_VND2_C22_REG(30))
+ return __phy_write(phydev, RTL822X_VND2_TO_C22_REG(reg), val);
+
+ /* Use paged access for MDIO_MMD_VEND2 over Clause-22 */
+ page = RTL822X_VND2_TO_PAGE(reg);
+ oldpage = __phy_read(phydev, RTL821x_PAGE_SELECT);
+ if (oldpage < 0)
+ return oldpage;
+
+ if (oldpage != page) {
+ ret = __phy_write(phydev, RTL821x_PAGE_SELECT, page);
+ if (ret < 0)
+ return ret;
+ }
+
+ write_ret = __phy_write(phydev, RTL822X_VND2_TO_PAGE_REG(reg), val);
+ if (oldpage != page) {
+ ret = __phy_write(phydev, RTL821x_PAGE_SELECT, oldpage);
+ if (ret < 0)
+ return ret;
+ }
+
+ return write_ret;
+}
+
static int rtl822x_set_serdes_option_mode(struct phy_device *phydev, bool gen1)
{
bool has_2500, has_sgmii;
@@ -1308,6 +1399,51 @@ static int rtl822xb_config_init(struct phy_device *phydev)
return rtl822x_set_serdes_option_mode(phydev, false);
}
+static int rtl822x_serdes_write(struct phy_device *phydev, u16 reg, u16 val)
+{
+ int ret, poll;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, RTL822X_VND1_SERDES_ADDR, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, RTL822X_VND1_SERDES_DATA, val);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, RTL822X_VND1_SERDES_CMD,
+ RTL822X_VND1_SERDES_CMD_WRITE |
+ RTL822X_VND1_SERDES_CMD_BUSY);
+ if (ret < 0)
+ return ret;
+
+ return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
+ RTL822X_VND1_SERDES_CMD, poll,
+ !(poll & RTL822X_VND1_SERDES_CMD_BUSY),
+ 500, 100000, false);
+}
+
+static int rtl822x_config_inband(struct phy_device *phydev, unsigned int modes)
+{
+ return rtl822x_serdes_write(phydev, RTL822X_VND1_SERDES_ADDR_AUTONEG,
+ (modes != LINK_INBAND_DISABLE) ?
+ RTL822X_VND1_SERDES_INBAND_ENABLE :
+ RTL822X_VND1_SERDES_INBAND_DISABLE);
+}
+
+static unsigned int rtl822x_inband_caps(struct phy_device *phydev,
+ phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return LINK_INBAND_DISABLE;
+ case PHY_INTERFACE_MODE_SGMII:
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+ default:
+ return 0;
+ }
+}
+
static int rtl822xb_get_rate_matching(struct phy_device *phydev,
phy_interface_t iface)
{
@@ -1484,7 +1620,8 @@ static int rtl822x_c45_read_status(struct phy_device *phydev)
}
/* Read actual speed from vendor register. */
- val = phy_read_mmd(phydev, MDIO_MMD_VEND2, RTL_VND2_PHYSR);
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND2,
+ RTL822X_VND2_C22_REG(RTL_PHYSR));
if (val < 0)
return val;
@@ -1741,28 +1878,18 @@ static int rtl8221b_match_phy_device(struct phy_device *phydev,
return phydev->phy_id == RTL_8221B && rtlgen_supports_mmd(phydev);
}
-static int rtl8221b_vb_cg_c22_match_phy_device(struct phy_device *phydev,
- const struct phy_driver *phydrv)
-{
- return rtlgen_is_c45_match(phydev, RTL_8221B_VB_CG, false);
-}
-
-static int rtl8221b_vb_cg_c45_match_phy_device(struct phy_device *phydev,
- const struct phy_driver *phydrv)
-{
- return rtlgen_is_c45_match(phydev, RTL_8221B_VB_CG, true);
-}
-
-static int rtl8221b_vm_cg_c22_match_phy_device(struct phy_device *phydev,
- const struct phy_driver *phydrv)
+static int rtl8221b_vb_cg_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
- return rtlgen_is_c45_match(phydev, RTL_8221B_VM_CG, false);
+ return rtlgen_is_c45_match(phydev, RTL_8221B_VB_CG, true) ||
+ rtlgen_is_c45_match(phydev, RTL_8221B_VB_CG, false);
}
-static int rtl8221b_vm_cg_c45_match_phy_device(struct phy_device *phydev,
- const struct phy_driver *phydrv)
+static int rtl8221b_vm_cg_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
- return rtlgen_is_c45_match(phydev, RTL_8221B_VM_CG, true);
+ return rtlgen_is_c45_match(phydev, RTL_8221B_VM_CG, true) ||
+ rtlgen_is_c45_match(phydev, RTL_8221B_VM_CG, false);
}
static int rtl_internal_nbaset_match_phy_device(struct phy_device *phydev,
@@ -1973,10 +2100,49 @@ static irqreturn_t rtl8221b_handle_interrupt(struct phy_device *phydev)
return IRQ_HANDLED;
}
+static int rtlgen_sfp_get_features(struct phy_device *phydev)
+{
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ phydev->supported);
+
+ /* set default mode */
+ phydev->speed = SPEED_10000;
+ phydev->duplex = DUPLEX_FULL;
+
+ phydev->port = PORT_FIBRE;
+
+ return 0;
+}
+
+static int rtlgen_sfp_read_status(struct phy_device *phydev)
+{
+ int val, err;
+
+ err = genphy_update_link(phydev);
+ if (err)
+ return err;
+
+ if (!phydev->link)
+ return 0;
+
+ val = phy_read(phydev, RTL_PHYSR);
+ if (val < 0)
+ return val;
+
+ rtlgen_decode_physr(phydev, val);
+
+ return 0;
+}
+
+static int rtlgen_sfp_config_aneg(struct phy_device *phydev)
+{
+ return 0;
+}
+
static struct phy_driver realtek_drvs[] = {
{
PHY_ID_MATCH_EXACT(0x00008201),
- .name = "RTL8201CP Ethernet",
+ .name = "RTL8201CP Ethernet",
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
@@ -2097,126 +2263,140 @@ static struct phy_driver realtek_drvs[] = {
.resume = rtlgen_resume,
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
+ .read_mmd = rtl822xb_read_mmd,
+ .write_mmd = rtl822xb_write_mmd,
}, {
.match_phy_device = rtl8221b_match_phy_device,
.name = "RTL8226B_RTL8221B 2.5Gbps PHY",
.get_features = rtl822x_get_features,
.config_aneg = rtl822x_config_aneg,
- .config_init = rtl822xb_config_init,
+ .config_init = rtl822xb_config_init,
+ .inband_caps = rtl822x_inband_caps,
+ .config_inband = rtl822x_config_inband,
.get_rate_matching = rtl822xb_get_rate_matching,
.read_status = rtl822xb_read_status,
.suspend = genphy_suspend,
.resume = rtlgen_resume,
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
+ .read_mmd = rtl822xb_read_mmd,
+ .write_mmd = rtl822xb_write_mmd,
}, {
PHY_ID_MATCH_EXACT(0x001cc838),
- .name = "RTL8226-CG 2.5Gbps PHY",
- .soft_reset = rtl822x_c45_soft_reset,
- .get_features = rtl822x_c45_get_features,
- .config_aneg = rtl822x_c45_config_aneg,
- .config_init = rtl822x_config_init,
- .read_status = rtl822xb_c45_read_status,
- .suspend = genphy_c45_pma_suspend,
- .resume = rtlgen_c45_resume,
+ .name = "RTL8226-CG 2.5Gbps PHY",
+ .soft_reset = rtl822x_c45_soft_reset,
+ .get_features = rtl822x_c45_get_features,
+ .config_aneg = rtl822x_c45_config_aneg,
+ .config_init = rtl822x_config_init,
+ .inband_caps = rtl822x_inband_caps,
+ .config_inband = rtl822x_config_inband,
+ .read_status = rtl822xb_c45_read_status,
+ .suspend = genphy_c45_pma_suspend,
+ .resume = rtlgen_c45_resume,
+ .read_mmd = rtl822xb_read_mmd,
+ .write_mmd = rtl822xb_write_mmd,
}, {
PHY_ID_MATCH_EXACT(0x001cc848),
- .name = "RTL8226B-CG_RTL8221B-CG 2.5Gbps PHY",
- .get_features = rtl822x_get_features,
- .config_aneg = rtl822x_config_aneg,
- .config_init = rtl822xb_config_init,
- .get_rate_matching = rtl822xb_get_rate_matching,
- .read_status = rtl822xb_read_status,
- .suspend = genphy_suspend,
- .resume = rtlgen_resume,
- .read_page = rtl821x_read_page,
- .write_page = rtl821x_write_page,
- }, {
- .match_phy_device = rtl8221b_vb_cg_c22_match_phy_device,
- .name = "RTL8221B-VB-CG 2.5Gbps PHY (C22)",
- .probe = rtl822x_probe,
- .get_features = rtl822x_get_features,
- .config_aneg = rtl822x_config_aneg,
- .config_init = rtl822xb_config_init,
+ .name = "RTL8226B-CG_RTL8221B-CG 2.5Gbps PHY",
+ .get_features = rtl822x_get_features,
+ .config_aneg = rtl822x_config_aneg,
+ .config_init = rtl822xb_config_init,
+ .inband_caps = rtl822x_inband_caps,
+ .config_inband = rtl822x_config_inband,
.get_rate_matching = rtl822xb_get_rate_matching,
- .read_status = rtl822xb_read_status,
- .suspend = genphy_suspend,
- .resume = rtlgen_resume,
- .read_page = rtl821x_read_page,
- .write_page = rtl821x_write_page,
+ .read_status = rtl822xb_read_status,
+ .suspend = genphy_suspend,
+ .resume = rtlgen_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
+ .read_mmd = rtl822xb_read_mmd,
+ .write_mmd = rtl822xb_write_mmd,
}, {
- .match_phy_device = rtl8221b_vb_cg_c45_match_phy_device,
- .name = "RTL8221B-VB-CG 2.5Gbps PHY (C45)",
+ .match_phy_device = rtl8221b_vb_cg_match_phy_device,
+ .name = "RTL8221B-VB-CG 2.5Gbps PHY",
.config_intr = rtl8221b_config_intr,
.handle_interrupt = rtl8221b_handle_interrupt,
.probe = rtl822x_probe,
- .config_init = rtl822xb_config_init,
- .get_rate_matching = rtl822xb_get_rate_matching,
- .get_features = rtl822x_c45_get_features,
- .config_aneg = rtl822x_c45_config_aneg,
- .read_status = rtl822xb_c45_read_status,
- .suspend = genphy_c45_pma_suspend,
- .resume = rtlgen_c45_resume,
- }, {
- .match_phy_device = rtl8221b_vm_cg_c22_match_phy_device,
- .name = "RTL8221B-VM-CG 2.5Gbps PHY (C22)",
- .probe = rtl822x_probe,
- .get_features = rtl822x_get_features,
- .config_aneg = rtl822x_config_aneg,
- .config_init = rtl822xb_config_init,
+ .config_init = rtl822xb_config_init,
+ .inband_caps = rtl822x_inband_caps,
+ .config_inband = rtl822x_config_inband,
.get_rate_matching = rtl822xb_get_rate_matching,
- .read_status = rtl822xb_read_status,
- .suspend = genphy_suspend,
- .resume = rtlgen_resume,
- .read_page = rtl821x_read_page,
- .write_page = rtl821x_write_page,
+ .get_features = rtl822x_c45_get_features,
+ .config_aneg = rtl822x_c45_config_aneg,
+ .read_status = rtl822xb_c45_read_status,
+ .suspend = genphy_c45_pma_suspend,
+ .resume = rtlgen_c45_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
+ .read_mmd = rtl822xb_read_mmd,
+ .write_mmd = rtl822xb_write_mmd,
}, {
- .match_phy_device = rtl8221b_vm_cg_c45_match_phy_device,
- .name = "RTL8221B-VM-CG 2.5Gbps PHY (C45)",
+ .match_phy_device = rtl8221b_vm_cg_match_phy_device,
+ .name = "RTL8221B-VM-CG 2.5Gbps PHY",
.config_intr = rtl8221b_config_intr,
.handle_interrupt = rtl8221b_handle_interrupt,
.probe = rtl822x_probe,
- .config_init = rtl822xb_config_init,
+ .config_init = rtl822xb_config_init,
+ .inband_caps = rtl822x_inband_caps,
+ .config_inband = rtl822x_config_inband,
.get_rate_matching = rtl822xb_get_rate_matching,
- .get_features = rtl822x_c45_get_features,
- .config_aneg = rtl822x_c45_config_aneg,
- .read_status = rtl822xb_c45_read_status,
- .suspend = genphy_c45_pma_suspend,
- .resume = rtlgen_c45_resume,
+ .get_features = rtl822x_c45_get_features,
+ .config_aneg = rtl822x_c45_config_aneg,
+ .read_status = rtl822xb_c45_read_status,
+ .suspend = genphy_c45_pma_suspend,
+ .resume = rtlgen_c45_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
+ .read_mmd = rtl822xb_read_mmd,
+ .write_mmd = rtl822xb_write_mmd,
}, {
.match_phy_device = rtl8251b_c45_match_phy_device,
- .name = "RTL8251B 5Gbps PHY",
+ .name = "RTL8251B 5Gbps PHY",
.probe = rtl822x_probe,
- .get_features = rtl822x_get_features,
- .config_aneg = rtl822x_config_aneg,
- .read_status = rtl822x_read_status,
- .suspend = genphy_suspend,
- .resume = rtlgen_resume,
- .read_page = rtl821x_read_page,
- .write_page = rtl821x_write_page,
+ .get_features = rtl822x_get_features,
+ .config_aneg = rtl822x_config_aneg,
+ .read_status = rtl822x_read_status,
+ .suspend = genphy_suspend,
+ .resume = rtlgen_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
}, {
.match_phy_device = rtl_internal_nbaset_match_phy_device,
- .name = "Realtek Internal NBASE-T PHY",
+ .name = "Realtek Internal NBASE-T PHY",
.flags = PHY_IS_INTERNAL,
.probe = rtl822x_probe,
- .get_features = rtl822x_get_features,
- .config_aneg = rtl822x_config_aneg,
- .read_status = rtl822x_read_status,
- .suspend = genphy_suspend,
- .resume = rtlgen_resume,
- .read_page = rtl821x_read_page,
- .write_page = rtl821x_write_page,
+ .get_features = rtl822x_get_features,
+ .config_aneg = rtl822x_config_aneg,
+ .read_status = rtl822x_read_status,
+ .suspend = genphy_suspend,
+ .resume = rtlgen_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
+ .read_mmd = rtl822x_read_mmd,
+ .write_mmd = rtl822x_write_mmd,
+ }, {
+ PHY_ID_MATCH_EXACT(PHY_ID_RTL_DUMMY_SFP),
+ .name = "Realtek SFP PHY Mode",
+ .flags = PHY_IS_INTERNAL,
+ .probe = rtl822x_probe,
+ .get_features = rtlgen_sfp_get_features,
+ .config_aneg = rtlgen_sfp_config_aneg,
+ .read_status = rtlgen_sfp_read_status,
+ .suspend = genphy_suspend,
+ .resume = rtlgen_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
.read_mmd = rtl822x_read_mmd,
.write_mmd = rtl822x_write_mmd,
}, {
PHY_ID_MATCH_EXACT(0x001ccad0),
.name = "RTL8224 2.5Gbps PHY",
.flags = PHY_POLL_CABLE_TEST,
- .get_features = rtl822x_c45_get_features,
- .config_aneg = rtl822x_c45_config_aneg,
- .read_status = rtl822x_c45_read_status,
- .suspend = genphy_c45_pma_suspend,
- .resume = rtlgen_c45_resume,
+ .get_features = rtl822x_c45_get_features,
+ .config_aneg = rtl822x_c45_config_aneg,
+ .read_status = rtl822x_c45_read_status,
+ .suspend = genphy_c45_pma_suspend,
+ .resume = rtlgen_c45_resume,
.cable_test_start = rtl8224_cable_test_start,
.cable_test_get_status = rtl8224_cable_test_get_status,
}, {
@@ -2235,7 +2415,7 @@ static struct phy_driver realtek_drvs[] = {
}, {
PHY_ID_MATCH_EXACT(0x001ccb00),
.name = "RTL9000AA_RTL9000AN Ethernet",
- .features = PHY_BASIC_T1_FEATURES,
+ .features = PHY_BASIC_T1_FEATURES,
.config_init = rtl9000a_config_init,
.config_aneg = rtl9000a_config_aneg,
.read_status = rtl9000a_read_status,
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 3e023723887c..43aefdd8b70f 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -532,9 +532,13 @@ static const struct sfp_quirk sfp_quirks[] = {
SFP_QUIRK("HUAWEI", "MA5671A", sfp_quirk_2500basex,
sfp_fixup_ignore_tx_fault),
- // Lantech 8330-262D-E can operate at 2500base-X, but incorrectly report
- // 2500MBd NRZ in their EEPROM
+ // Lantech 8330-262D-E and 8330-265D can operate at 2500base-X, but
+ // incorrectly report 2500MBd NRZ in their EEPROM.
+ // Some 8330-265D modules have inverted LOS, while all of them report
+ // normal LOS in EEPROM. Therefore we need to ignore LOS entirely.
SFP_QUIRK_S("Lantech", "8330-262D-E", sfp_quirk_2500basex),
+ SFP_QUIRK("Lantech", "8330-265D", sfp_quirk_2500basex,
+ sfp_fixup_ignore_los),
SFP_QUIRK_S("UBNT", "UF-INSTANT", sfp_quirk_ubnt_uf_instant),
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index f9f0f16c41d1..f8814d7be6f1 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1641,6 +1641,8 @@ static void ppp_setup(struct net_device *dev)
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
dev->priv_destructor = ppp_dev_priv_destructor;
dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
+ dev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
+ dev->hw_features = dev->features;
netif_keep_dst(dev);
}
@@ -1710,6 +1712,10 @@ pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
ppp->xcomp->comp_extra + ppp->dev->hard_header_len;
int compressor_skb_size = ppp->dev->mtu +
ppp->xcomp->comp_extra + PPP_HDRLEN;
+
+ if (skb_linearize(skb))
+ return NULL;
+
new_skb = alloc_skb(new_skb_size, GFP_ATOMIC);
if (!new_skb) {
if (net_ratelimit())
@@ -1797,6 +1803,10 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
case PPP_IP:
if (!ppp->vj || (ppp->flags & SC_COMP_TCP) == 0)
break;
+
+ if (skb_linearize(skb))
+ goto drop;
+
/* try to do VJ TCP header compression */
new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2,
GFP_ATOMIC);
@@ -1894,19 +1904,26 @@ ppp_push(struct ppp *ppp)
}
if ((ppp->flags & SC_MULTILINK) == 0) {
+ struct ppp_channel *chan;
/* not doing multilink: send it down the first channel */
list = list->next;
pch = list_entry(list, struct channel, clist);
spin_lock(&pch->downl);
- if (pch->chan) {
- if (pch->chan->ops->start_xmit(pch->chan, skb))
- ppp->xmit_pending = NULL;
- } else {
- /* channel got unregistered */
+ chan = pch->chan;
+ if (unlikely(!chan || (!chan->direct_xmit && skb_linearize(skb)))) {
+ /* channel got unregistered, or it requires a linear
+ * skb but linearization failed
+ */
kfree_skb(skb);
ppp->xmit_pending = NULL;
+ goto out;
}
+
+ if (chan->ops->start_xmit(chan, skb))
+ ppp->xmit_pending = NULL;
+
+out:
spin_unlock(&pch->downl);
return;
}
@@ -1991,6 +2008,8 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
return 0; /* can't take now, leave it in xmit_pending */
/* Do protocol field compression */
+ if (skb_linearize(skb))
+ goto err_linearize;
p = skb->data;
len = skb->len;
if (*p == 0 && mp_protocol_compress) {
@@ -2149,6 +2168,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
noskb:
spin_unlock(&pch->downl);
+ err_linearize:
if (ppp->debug & 1)
netdev_err(ppp->dev, "PPP: no memory (fragment)\n");
++ppp->dev->stats.tx_errors;
diff --git a/drivers/net/thunderbolt/main.c b/drivers/net/thunderbolt/main.c
index dcaa62377808..7aae5d915a1e 100644
--- a/drivers/net/thunderbolt/main.c
+++ b/drivers/net/thunderbolt/main.c
@@ -10,6 +10,7 @@
*/
#include <linux/atomic.h>
+#include <linux/ethtool.h>
#include <linux/highmem.h>
#include <linux/if_vlan.h>
#include <linux/jhash.h>
@@ -1261,9 +1262,57 @@ static const struct net_device_ops tbnet_netdev_ops = {
.ndo_open = tbnet_open,
.ndo_stop = tbnet_stop,
.ndo_start_xmit = tbnet_start_xmit,
+ .ndo_set_mac_address = eth_mac_addr,
.ndo_get_stats64 = tbnet_get_stats64,
};
+static int tbnet_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ const struct tbnet *net = netdev_priv(dev);
+ const struct tb_xdomain *xd = net->xd;
+ int speed;
+
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+
+ /* Figure out the current link speed and width */
+ switch (xd->link_speed) {
+ case 40:
+ speed = SPEED_80000;
+ break;
+
+ case 20:
+ if (xd->link_width == 2)
+ speed = SPEED_40000;
+ else
+ speed = SPEED_20000;
+ break;
+
+ case 10:
+ if (xd->link_width == 2) {
+ speed = SPEED_20000;
+ break;
+ }
+ fallthrough;
+
+ default:
+ speed = SPEED_10000;
+ break;
+ }
+
+ cmd->base.speed = speed;
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ cmd->base.port = PORT_OTHER;
+
+ return 0;
+}
+
+static const struct ethtool_ops tbnet_ethtool_ops = {
+ .get_link_ksettings = tbnet_get_link_ksettings,
+};
+
static void tbnet_generate_mac(struct net_device *dev)
{
const struct tbnet *net = netdev_priv(dev);
@@ -1281,6 +1330,9 @@ static void tbnet_generate_mac(struct net_device *dev)
hash = jhash2((u32 *)xd->local_uuid, 4, hash);
addr[5] = hash & 0xff;
eth_hw_addr_set(dev, addr);
+
+ /* Allow changing it if needed */
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
}
static int tbnet_probe(struct tb_service *svc, const struct tb_service_id *id)
@@ -1311,6 +1363,7 @@ static int tbnet_probe(struct tb_service *svc, const struct tb_service_id *id)
strcpy(dev->name, "thunderbolt%d");
dev->netdev_ops = &tbnet_netdev_ops;
+ dev->ethtool_ops = &tbnet_ethtool_ops;
/* ThunderboltIP takes advantage of TSO packets but instead of
* segmenting them we just split the packet into Thunderbolt
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 856e648d804e..52a5c0922c79 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -319,7 +319,6 @@ config USB_NET_DM9601
config USB_NET_SR9700
tristate "CoreChip-sz SR9700 based USB 1.1 10/100 ethernet devices"
depends on USB_USBNET
- select CRC32
help
This option adds support for CoreChip-sz SR9700 based USB 1.1
10/100 Ethernet adapters.
@@ -564,6 +563,7 @@ config USB_HSO
config USB_NET_INT51X1
tristate "Intellon PLC based usb adapter"
depends on USB_USBNET
+ select USB_NET_CDCETHER
help
Choose this option if you're using a 14Mb USB-based PLC
(Powerline Communications) solution with an Intellon
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
index 9201ee10a13f..0722050dbe32 100644
--- a/drivers/net/usb/aqc111.c
+++ b/drivers/net/usb/aqc111.c
@@ -20,8 +20,6 @@
#include "aqc111.h"
-#define DRIVER_NAME "aqc111"
-
static int aqc111_read_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
u16 index, u16 size, void *data)
{
@@ -207,13 +205,10 @@ static void aqc111_get_drvinfo(struct net_device *net,
/* Inherit standard device info */
usbnet_get_drvinfo(net, info);
- strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
snprintf(info->fw_version, sizeof(info->fw_version), "%u.%u.%u",
aqc111_data->fw_ver.major,
aqc111_data->fw_ver.minor,
aqc111_data->fw_ver.rev);
- info->eedump_len = 0x00;
- info->regdump_len = 0x00;
}
static void aqc111_get_wol(struct net_device *net,
diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
index 8531b804021a..cf97bc3d388b 100644
--- a/drivers/net/usb/asix.h
+++ b/drivers/net/usb/asix.h
@@ -29,7 +29,6 @@
#include <net/selftests.h>
#include <linux/phylink.h>
-#define DRIVER_VERSION "22-Dec-2011"
#define DRIVER_NAME "asix"
/* ASIX AX8817X based USB 2.0 Ethernet Devices */
@@ -248,8 +247,6 @@ int asix_get_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
int asix_set_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
u8 *data);
-void asix_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info);
-
int asix_set_mac_address(struct net_device *net, void *p);
#endif /* _ASIX_H */
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 6ab3486072cb..4f03f4e57655 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -731,14 +731,6 @@ free:
return ret;
}
-void asix_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
-{
- /* Inherit standard device info */
- usbnet_get_drvinfo(net, info);
- strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
- strscpy(info->version, DRIVER_VERSION, sizeof(info->version));
-}
-
int asix_set_mac_address(struct net_device *net, void *p)
{
struct usbnet *dev = netdev_priv(net);
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 232bbd79a4de..7eb6e86adb16 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -97,26 +97,12 @@ static u32 asix_get_phyid(struct usbnet *dev)
return phy_id;
}
-static u32 asix_get_link(struct net_device *net)
-{
- struct usbnet *dev = netdev_priv(net);
-
- return mii_link_ok(&dev->mii);
-}
-
-static int asix_ioctl (struct net_device *net, struct ifreq *rq, int cmd)
-{
- struct usbnet *dev = netdev_priv(net);
-
- return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
-}
-
/* We need to override some ethtool_ops so we require our
own structure so we don't interfere with other usbnet
devices that may be connected at the same time. */
static const struct ethtool_ops ax88172_ethtool_ops = {
- .get_drvinfo = asix_get_drvinfo,
- .get_link = asix_get_link,
+ .get_drvinfo = usbnet_get_drvinfo,
+ .get_link = usbnet_get_link,
.get_msglevel = usbnet_get_msglevel,
.set_msglevel = usbnet_set_msglevel,
.get_wol = asix_get_wol,
@@ -197,7 +183,7 @@ static const struct net_device_ops ax88172_netdev_ops = {
.ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_eth_ioctl = asix_ioctl,
+ .ndo_eth_ioctl = usbnet_mii_ioctl,
.ndo_set_rx_mode = ax88172_set_multicast,
};
@@ -324,7 +310,7 @@ static int ax88772_ethtool_set_pauseparam(struct net_device *ndev,
}
static const struct ethtool_ops ax88772_ethtool_ops = {
- .get_drvinfo = asix_get_drvinfo,
+ .get_drvinfo = usbnet_get_drvinfo,
.get_link = usbnet_get_link,
.get_msglevel = usbnet_get_msglevel,
.set_msglevel = usbnet_set_msglevel,
@@ -985,8 +971,8 @@ static void ax88178_unbind(struct usbnet *dev, struct usb_interface *intf)
}
static const struct ethtool_ops ax88178_ethtool_ops = {
- .get_drvinfo = asix_get_drvinfo,
- .get_link = asix_get_link,
+ .get_drvinfo = usbnet_get_drvinfo,
+ .get_link = usbnet_get_link,
.get_msglevel = usbnet_get_msglevel,
.set_msglevel = usbnet_set_msglevel,
.get_wol = asix_get_wol,
@@ -1276,7 +1262,7 @@ static const struct net_device_ops ax88178_netdev_ops = {
.ndo_set_mac_address = asix_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = asix_set_multicast,
- .ndo_eth_ioctl = asix_ioctl,
+ .ndo_eth_ioctl = usbnet_mii_ioctl,
.ndo_change_mtu = ax88178_change_mtu,
};
@@ -1642,7 +1628,5 @@ static struct usb_driver asix_driver = {
module_usb_driver(asix_driver);
MODULE_AUTHOR("David Hollis");
-MODULE_VERSION(DRIVER_VERSION);
MODULE_DESCRIPTION("ASIX AX8817X based USB 2.0 Ethernet Devices");
MODULE_LICENSE("GPL");
-
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index 758a423a459b..3100fbe153c0 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -114,7 +114,7 @@ static const struct net_device_ops ax88172a_netdev_ops = {
};
static const struct ethtool_ops ax88172a_ethtool_ops = {
- .get_drvinfo = asix_get_drvinfo,
+ .get_drvinfo = usbnet_get_drvinfo,
.get_link = usbnet_get_link,
.get_msglevel = usbnet_get_msglevel,
.set_msglevel = usbnet_set_msglevel,
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index b034ef8a73ea..0e9ae89b840e 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -847,12 +847,6 @@ static int ax88179_set_eee(struct net_device *net, struct ethtool_keee *edata)
return ret;
}
-static int ax88179_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
-{
- struct usbnet *dev = netdev_priv(net);
- return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
-}
-
static const struct ethtool_ops ax88179_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_msglevel = usbnet_get_msglevel,
@@ -998,7 +992,7 @@ static const struct net_device_ops ax88179_netdev_ops = {
.ndo_change_mtu = ax88179_change_mtu,
.ndo_set_mac_address = ax88179_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_eth_ioctl = ax88179_ioctl,
+ .ndo_eth_ioctl = usbnet_mii_ioctl,
.ndo_set_rx_mode = ax88179_set_multicast,
.ndo_set_features = ax88179_set_features,
};
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 6759388692f8..5c7f19cbacf6 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -37,14 +37,8 @@
#include <linux/usb.h>
-/*
- * Version information.
- */
-
-#define DRIVER_VERSION "v2.8"
#define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@suse.cz>"
#define DRIVER_DESC "CATC EL1210A NetMate USB Ethernet driver"
-#define SHORT_DRIVER_DESC "EL1210A NetMate USB Ethernet"
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
@@ -673,7 +667,6 @@ static void catc_get_drvinfo(struct net_device *dev,
{
struct catc *catc = netdev_priv(dev);
strscpy(info->driver, driver_name, sizeof(info->driver));
- strscpy(info->version, DRIVER_VERSION, sizeof(info->version));
usb_make_path(catc->usbdev, info->bus_info, sizeof(info->bus_info));
}
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 2b4716ccf0c5..c8e0f8868210 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -259,30 +259,9 @@ static void dm9601_mdio_write(struct net_device *netdev, int phy_id, int loc,
dm_write_shared_word(dev, 1, loc, res);
}
-static void dm9601_get_drvinfo(struct net_device *net,
- struct ethtool_drvinfo *info)
-{
- /* Inherit standard device info */
- usbnet_get_drvinfo(net, info);
-}
-
-static u32 dm9601_get_link(struct net_device *net)
-{
- struct usbnet *dev = netdev_priv(net);
-
- return mii_link_ok(&dev->mii);
-}
-
-static int dm9601_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
-{
- struct usbnet *dev = netdev_priv(net);
-
- return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
-}
-
static const struct ethtool_ops dm9601_ethtool_ops = {
- .get_drvinfo = dm9601_get_drvinfo,
- .get_link = dm9601_get_link,
+ .get_drvinfo = usbnet_get_drvinfo,
+ .get_link = usbnet_get_link,
.get_msglevel = usbnet_get_msglevel,
.set_msglevel = usbnet_set_msglevel,
.get_eeprom_len = dm9601_get_eeprom_len,
@@ -351,7 +330,7 @@ static const struct net_device_ops dm9601_netdev_ops = {
.ndo_change_mtu = usbnet_change_mtu,
.ndo_get_stats64 = dev_get_tstats64,
.ndo_validate_addr = eth_validate_addr,
- .ndo_eth_ioctl = dm9601_ioctl,
+ .ndo_eth_ioctl = usbnet_mii_ioctl,
.ndo_set_rx_mode = dm9601_set_multicast,
.ndo_set_mac_address = dm9601_set_mac_address,
};
diff --git a/drivers/net/usb/int51x1.c b/drivers/net/usb/int51x1.c
index 6fde41550de1..87bd6be1fcb6 100644
--- a/drivers/net/usb/int51x1.c
+++ b/drivers/net/usb/int51x1.c
@@ -4,14 +4,11 @@
*
* Intellon usb PLC (Powerline Communications) usb net driver
*
- * http://www.tandel.be/downloads/INT51X1_Datasheet.pdf
+ * https://web.archive.org/web/20101025091240id_/http://www.tandel.be/downloads/INT51X1_Datasheet.pdf
*
* Based on the work of Jan 'RedBully' Seiffert
*/
-/*
- */
-
#include <linux/module.h>
#include <linux/ctype.h>
#include <linux/netdevice.h>
@@ -27,14 +24,6 @@
#define INT51X1_HEADER_SIZE 2 /* 2 byte header */
-#define PACKET_TYPE_PROMISCUOUS (1 << 0)
-#define PACKET_TYPE_ALL_MULTICAST (1 << 1) /* no filter */
-#define PACKET_TYPE_DIRECTED (1 << 2)
-#define PACKET_TYPE_BROADCAST (1 << 3)
-#define PACKET_TYPE_MULTICAST (1 << 4) /* filtered */
-
-#define SET_ETHERNET_PACKET_FILTER 0x43
-
static int int51x1_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
int len;
@@ -104,29 +93,6 @@ static struct sk_buff *int51x1_tx_fixup(struct usbnet *dev,
return skb;
}
-static void int51x1_set_multicast(struct net_device *netdev)
-{
- struct usbnet *dev = netdev_priv(netdev);
- u16 filter = PACKET_TYPE_DIRECTED | PACKET_TYPE_BROADCAST;
-
- if (netdev->flags & IFF_PROMISC) {
- /* do not expect to see traffic of other PLCs */
- filter |= PACKET_TYPE_PROMISCUOUS;
- netdev_info(dev->net, "promiscuous mode enabled\n");
- } else if (!netdev_mc_empty(netdev) ||
- (netdev->flags & IFF_ALLMULTI)) {
- filter |= PACKET_TYPE_ALL_MULTICAST;
- netdev_dbg(dev->net, "receive all multicast enabled\n");
- } else {
- /* ~PROMISCUOUS, ~MULTICAST */
- netdev_dbg(dev->net, "receive own packets only\n");
- }
-
- usbnet_write_cmd_async(dev, SET_ETHERNET_PACKET_FILTER,
- USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- filter, 0, NULL, 0);
-}
-
static const struct net_device_ops int51x1_netdev_ops = {
.ndo_open = usbnet_open,
.ndo_stop = usbnet_stop,
@@ -136,7 +102,7 @@ static const struct net_device_ops int51x1_netdev_ops = {
.ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_rx_mode = int51x1_set_multicast,
+ .ndo_set_rx_mode = usbnet_set_rx_mode,
};
static int int51x1_bind(struct usbnet *dev, struct usb_interface *intf)
@@ -158,6 +124,7 @@ static const struct driver_info int51x1_info = {
.bind = int51x1_bind,
.rx_fixup = int51x1_rx_fixup,
.tx_fixup = int51x1_tx_fixup,
+ .set_rx_mode = usbnet_cdc_update_filter,
.in = 1,
.out = 2,
.flags = FLAG_ETHER,
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index fdda0616704e..d6698f30218d 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -325,12 +325,6 @@ static void mcs7830_mdio_write(struct net_device *netdev, int phy_id,
mcs7830_write_phy(dev, location, val);
}
-static int mcs7830_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
-{
- struct usbnet *dev = netdev_priv(net);
- return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
-}
-
static inline struct mcs7830_data *mcs7830_get_data(struct usbnet *dev)
{
return (struct mcs7830_data *)&dev->data;
@@ -438,11 +432,6 @@ static int mcs7830_get_regs_len(struct net_device *net)
return 0;
}
-static void mcs7830_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *drvinfo)
-{
- usbnet_get_drvinfo(net, drvinfo);
-}
-
static void mcs7830_get_regs(struct net_device *net, struct ethtool_regs *regs, void *data)
{
struct usbnet *dev = netdev_priv(net);
@@ -452,11 +441,11 @@ static void mcs7830_get_regs(struct net_device *net, struct ethtool_regs *regs,
}
static const struct ethtool_ops mcs7830_ethtool_ops = {
- .get_drvinfo = mcs7830_get_drvinfo,
.get_regs_len = mcs7830_get_regs_len,
.get_regs = mcs7830_get_regs,
/* common usbnet calls */
+ .get_drvinfo = usbnet_get_drvinfo,
.get_link = usbnet_get_link,
.get_msglevel = usbnet_get_msglevel,
.set_msglevel = usbnet_set_msglevel,
@@ -473,7 +462,7 @@ static const struct net_device_ops mcs7830_netdev_ops = {
.ndo_change_mtu = usbnet_change_mtu,
.ndo_get_stats64 = dev_get_tstats64,
.ndo_validate_addr = eth_validate_addr,
- .ndo_eth_ioctl = mcs7830_ioctl,
+ .ndo_eth_ioctl = usbnet_mii_ioctl,
.ndo_set_rx_mode = mcs7830_set_multicast,
.ndo_set_mac_address = mcs7830_set_mac_address,
};
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index c514483134f0..7b6d6eb60709 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -18,9 +18,6 @@
#include <linux/uaccess.h>
#include "pegasus.h"
-/*
- * Version Information
- */
#define DRIVER_AUTHOR "Petko Manolov <petkan@nucleusys.com>"
#define DRIVER_DESC "Pegasus/Pegasus II USB Ethernet driver"
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 2f3baa5f6e9c..adfc83b7ca6a 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -30,13 +30,6 @@
#include <linux/usb/r8152.h>
#include <net/gso.h>
-/* Information for net-next */
-#define NETNEXT_VERSION "12"
-
-/* Information for net */
-#define NET_VERSION "13"
-
-#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
#define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
#define MODULENAME "r8152"
@@ -2449,6 +2442,8 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
ret = usb_submit_urb(agg->urb, GFP_ATOMIC);
if (ret < 0)
usb_autopm_put_interface_async(tp->intf);
+ else
+ netif_trans_update(tp->netdev);
out_tx_fill:
return ret;
@@ -8755,7 +8750,6 @@ static void rtl8152_get_drvinfo(struct net_device *netdev,
struct r8152 *tp = netdev_priv(netdev);
strscpy(info->driver, MODULENAME, sizeof(info->driver));
- strscpy(info->version, DRIVER_VERSION, sizeof(info->version));
usb_make_path(tp->udev, info->bus_info, sizeof(info->bus_info));
if (!IS_ERR_OR_NULL(tp->rtl_fw.fw))
strscpy(info->fw_version, tp->rtl_fw.version,
@@ -9949,7 +9943,6 @@ static int rtl8152_probe_once(struct usb_interface *intf,
goto out2;
set_bit(PROBED_WITH_NO_ERRORS, &tp->flags);
- netif_info(tp, probe, netdev, "%s\n", DRIVER_VERSION);
return 0;
@@ -10144,4 +10137,3 @@ module_exit(rtl8152_driver_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index e40b0669d9f4..2f1f134b5b48 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -13,8 +13,6 @@
#include <linux/usb.h>
#include <linux/uaccess.h>
-/* Version Information */
-#define DRIVER_VERSION "v0.6.2 (2004/08/27)"
#define DRIVER_AUTHOR "Petko Manolov <petkan@users.sourceforge.net>"
#define DRIVER_DESC "rtl8150 based usb-ethernet driver"
@@ -785,7 +783,6 @@ static void rtl8150_get_drvinfo(struct net_device *netdev, struct ethtool_drvinf
rtl8150_t *dev = netdev_priv(netdev);
strscpy(info->driver, driver_name, sizeof(info->driver));
- strscpy(info->version, DRIVER_VERSION, sizeof(info->version));
usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
}
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index 36c73db44f77..3ca60ebdd468 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -12,10 +12,8 @@
* Sierra Wireless. Use at your own risk.
*/
-#define DRIVER_VERSION "v.2.0"
#define DRIVER_AUTHOR "Paxton Smith, Matthew Safar, Rory Filer"
#define DRIVER_DESC "USB-to-WWAN Driver for Sierra Wireless modems"
-static const char driver_name[] = "sierra_net";
/* if defined debug messages enabled */
/*#define DEBUG*/
@@ -607,15 +605,6 @@ static void sierra_net_status(struct usbnet *dev, struct urb *urb)
}
}
-static void sierra_net_get_drvinfo(struct net_device *net,
- struct ethtool_drvinfo *info)
-{
- /* Inherit standard device info */
- usbnet_get_drvinfo(net, info);
- strscpy(info->driver, driver_name, sizeof(info->driver));
- strscpy(info->version, DRIVER_VERSION, sizeof(info->version));
-}
-
static u32 sierra_net_get_link(struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
@@ -624,7 +613,7 @@ static u32 sierra_net_get_link(struct net_device *net)
}
static const struct ethtool_ops sierra_net_ethtool_ops = {
- .get_drvinfo = sierra_net_get_drvinfo,
+ .get_drvinfo = usbnet_get_drvinfo,
.get_link = sierra_net_get_link,
.get_msglevel = usbnet_get_msglevel,
.set_msglevel = usbnet_set_msglevel,
@@ -973,5 +962,4 @@ module_usb_driver(sierra_net_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 78c821349f48..1a61a8bcf5d3 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -21,7 +21,6 @@
#include "smsc75xx.h"
#define SMSC_CHIPNAME "smsc75xx"
-#define SMSC_DRIVER_VERSION "1.0.0"
#define HS_USB_PKT_SIZE (512)
#define FS_USB_PKT_SIZE (64)
#define DEFAULT_HS_BURST_CAP_SIZE (16 * 1024 + 5 * HS_USB_PKT_SIZE)
@@ -744,12 +743,10 @@ static const struct ethtool_ops smsc75xx_ethtool_ops = {
static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
{
- struct usbnet *dev = netdev_priv(netdev);
-
if (!netif_running(netdev))
return -EINVAL;
- return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
+ return usbnet_mii_ioctl(netdev, rq, cmd);
}
static void smsc75xx_init_mac_address(struct usbnet *dev)
@@ -1447,8 +1444,6 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
struct smsc75xx_priv *pdata = NULL;
int ret;
- printk(KERN_INFO SMSC_CHIPNAME " v" SMSC_DRIVER_VERSION "\n");
-
ret = usbnet_get_endpoints(dev, intf);
if (ret < 0) {
netdev_warn(dev->net, "usbnet_get_endpoints failed: %d\n", ret);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index de733e0488bf..7ecf98d97493 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -27,7 +27,6 @@
#include "smsc95xx.h"
#define SMSC_CHIPNAME "smsc95xx"
-#define SMSC_DRIVER_VERSION "2.0.0"
#define HS_USB_PKT_SIZE (512)
#define FS_USB_PKT_SIZE (64)
#define DEFAULT_HS_BURST_CAP_SIZE (16 * 1024 + 5 * HS_USB_PKT_SIZE)
@@ -854,14 +853,6 @@ static const struct ethtool_ops smsc95xx_ethtool_ops = {
.set_pauseparam = smsc95xx_set_pauseparam,
};
-static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
-{
- if (!netif_running(netdev))
- return -EINVAL;
-
- return phy_mii_ioctl(netdev->phydev, rq, cmd);
-}
-
static void smsc95xx_init_mac_address(struct usbnet *dev)
{
u8 addr[ETH_ALEN];
@@ -1139,7 +1130,7 @@ static const struct net_device_ops smsc95xx_netdev_ops = {
.ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_eth_ioctl = smsc95xx_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_rx_mode = smsc95xx_set_multicast,
.ndo_set_features = smsc95xx_set_features,
};
@@ -1160,8 +1151,6 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
int ret, phy_irq;
u32 val;
- printk(KERN_INFO SMSC_CHIPNAME " v" SMSC_DRIVER_VERSION "\n");
-
ret = usbnet_get_endpoints(dev, intf);
if (ret < 0) {
netdev_warn(dev->net, "usbnet_get_endpoints failed: %d\n", ret);
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index 820c4c506979..937e6fef3ac6 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -16,9 +16,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
-#include <linux/mii.h>
#include <linux/usb.h>
-#include <linux/crc32.h>
#include <linux/usb/usbnet.h>
#include "sr9700.h"
@@ -38,7 +36,7 @@ static int sr_write(struct usbnet *dev, u8 reg, u16 length, void *data)
{
int err;
- err = usbnet_write_cmd(dev, SR_WR_REGS, SR_REQ_WR_REG, 0, reg, data,
+ err = usbnet_write_cmd(dev, SR_WR_MULTIPLE_REGS, SR_REQ_WR_REG, 0, reg, data,
length);
if ((err >= 0) && (err < length))
err = -EINVAL;
@@ -52,28 +50,28 @@ static int sr_read_reg(struct usbnet *dev, u8 reg, u8 *value)
static int sr_write_reg(struct usbnet *dev, u8 reg, u8 value)
{
- return usbnet_write_cmd(dev, SR_WR_REG, SR_REQ_WR_REG,
+ return usbnet_write_cmd(dev, SR_WR_SINGLE_REG, SR_REQ_WR_REG,
value, reg, NULL, 0);
}
static void sr_write_async(struct usbnet *dev, u8 reg, u16 length,
const void *data)
{
- usbnet_write_cmd_async(dev, SR_WR_REGS, SR_REQ_WR_REG,
+ usbnet_write_cmd_async(dev, SR_WR_MULTIPLE_REGS, SR_REQ_WR_REG,
0, reg, data, length);
}
static void sr_write_reg_async(struct usbnet *dev, u8 reg, u8 value)
{
- usbnet_write_cmd_async(dev, SR_WR_REG, SR_REQ_WR_REG,
+ usbnet_write_cmd_async(dev, SR_WR_SINGLE_REG, SR_REQ_WR_REG,
value, reg, NULL, 0);
}
-static int wait_phy_eeprom_ready(struct usbnet *dev, int phy)
+static int wait_eeprom_ready(struct usbnet *dev)
{
int i;
- for (i = 0; i < SR_SHARE_TIMEOUT; i++) {
+ for (i = 0; i < SR_EEPROM_TIMEOUT; i++) {
u8 tmp = 0;
int ret;
@@ -87,38 +85,37 @@ static int wait_phy_eeprom_ready(struct usbnet *dev, int phy)
return 0;
}
- netdev_err(dev->net, "%s write timed out!\n", phy ? "phy" : "eeprom");
+ netdev_err(dev->net, "eeprom write timed out!\n");
return -EIO;
}
-static int sr_share_read_word(struct usbnet *dev, int phy, u8 reg,
- __le16 *value)
+static int sr_read_eeprom_word(struct usbnet *dev, u8 reg, __le16 *value)
{
int ret;
mutex_lock(&dev->phy_mutex);
- sr_write_reg(dev, SR_EPAR, phy ? (reg | EPAR_PHY_ADR) : reg);
- sr_write_reg(dev, SR_EPCR, phy ? (EPCR_EPOS | EPCR_ERPRR) : EPCR_ERPRR);
+ sr_write_reg(dev, SR_EPAR, reg);
+ sr_write_reg(dev, SR_EPCR, EPCR_ERPRR);
- ret = wait_phy_eeprom_ready(dev, phy);
+ ret = wait_eeprom_ready(dev);
if (ret < 0)
goto out_unlock;
sr_write_reg(dev, SR_EPCR, 0x0);
ret = sr_read(dev, SR_EPDR, 2, value);
- netdev_dbg(dev->net, "read shared %d 0x%02x returned 0x%04x, %d\n",
- phy, reg, *value, ret);
+ netdev_dbg(dev->net, "read eeprom 0x%02x returned 0x%04x, %d\n",
+ reg, *value, ret);
out_unlock:
mutex_unlock(&dev->phy_mutex);
return ret;
}
-static int sr_share_write_word(struct usbnet *dev, int phy, u8 reg,
- __le16 value)
+static int __maybe_unused sr_write_eeprom_word(struct usbnet *dev, u8 reg,
+ __le16 value)
{
int ret;
@@ -128,11 +125,10 @@ static int sr_share_write_word(struct usbnet *dev, int phy, u8 reg,
if (ret < 0)
goto out_unlock;
- sr_write_reg(dev, SR_EPAR, phy ? (reg | EPAR_PHY_ADR) : reg);
- sr_write_reg(dev, SR_EPCR, phy ? (EPCR_WEP | EPCR_EPOS | EPCR_ERPRW) :
- (EPCR_WEP | EPCR_ERPRW));
+ sr_write_reg(dev, SR_EPAR, reg);
+ sr_write_reg(dev, SR_EPCR, EPCR_WEP | EPCR_ERPRW);
- ret = wait_phy_eeprom_ready(dev, phy);
+ ret = wait_eeprom_ready(dev);
if (ret < 0)
goto out_unlock;
@@ -143,11 +139,6 @@ out_unlock:
return ret;
}
-static int sr_read_eeprom_word(struct usbnet *dev, u8 offset, void *value)
-{
- return sr_share_read_word(dev, 0, offset, value);
-}
-
static int sr9700_get_eeprom_len(struct net_device *netdev)
{
return SR_EEPROM_LEN;
@@ -174,80 +165,56 @@ static int sr9700_get_eeprom(struct net_device *netdev,
return ret;
}
-static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc)
+static void sr9700_handle_link_change(struct net_device *netdev, bool link)
{
- struct usbnet *dev = netdev_priv(netdev);
- int err, res;
- __le16 word;
- int rc = 0;
-
- if (phy_id) {
- netdev_dbg(netdev, "Only internal phy supported\n");
- return 0;
- }
-
- /* Access NSR_LINKST bit for link status instead of MII_BMSR */
- if (loc == MII_BMSR) {
- u8 value;
-
- err = sr_read_reg(dev, SR_NSR, &value);
- if (err < 0)
- return err;
-
- if (value & NSR_LINKST)
- rc = 1;
+ if (netif_carrier_ok(netdev) != link) {
+ if (link) {
+ netif_carrier_on(netdev);
+ netdev_info(netdev, "link up, 10Mbps, half-duplex\n");
+ } else {
+ netif_carrier_off(netdev);
+ netdev_info(netdev, "link down\n");
+ }
}
- err = sr_share_read_word(dev, 1, loc, &word);
- if (err < 0)
- return err;
-
- if (rc == 1)
- res = le16_to_cpu(word) | BMSR_LSTATUS;
- else
- res = le16_to_cpu(word) & ~BMSR_LSTATUS;
-
- netdev_dbg(netdev, "sr_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n",
- phy_id, loc, res);
-
- return res;
-}
-
-static void sr_mdio_write(struct net_device *netdev, int phy_id, int loc,
- int val)
-{
- struct usbnet *dev = netdev_priv(netdev);
- __le16 res = cpu_to_le16(val);
-
- if (phy_id) {
- netdev_dbg(netdev, "Only internal phy supported\n");
- return;
- }
-
- netdev_dbg(netdev, "sr_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
- phy_id, loc, val);
-
- sr_share_write_word(dev, 1, loc, res);
}
static u32 sr9700_get_link(struct net_device *netdev)
{
struct usbnet *dev = netdev_priv(netdev);
u8 value = 0;
- int rc = 0;
+ u32 link = 0;
- /* Get the Link Status directly */
sr_read_reg(dev, SR_NSR, &value);
- if (value & NSR_LINKST)
- rc = 1;
+ link = !!(value & NSR_LINKST);
+
+ sr9700_handle_link_change(netdev, link);
- return rc;
+ return link;
}
-static int sr9700_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
+/*
+ * The device supports only 10Mbps half-duplex operation. It implements the
+ * DM9601 speed/duplex status registers, but as the values are always the same,
+ * using them would add unnecessary complexity.
+ */
+static int sr9700_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
- struct usbnet *dev = netdev_priv(netdev);
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
+
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
- return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
+ cmd->base.speed = SPEED_10;
+ cmd->base.duplex = DUPLEX_HALF;
+ cmd->base.port = PORT_TP;
+ cmd->base.phy_address = 0;
+ cmd->base.autoneg = AUTONEG_DISABLE;
+
+ return 0;
}
static const struct ethtool_ops sr9700_ethtool_ops = {
@@ -257,39 +224,21 @@ static const struct ethtool_ops sr9700_ethtool_ops = {
.set_msglevel = usbnet_set_msglevel,
.get_eeprom_len = sr9700_get_eeprom_len,
.get_eeprom = sr9700_get_eeprom,
- .nway_reset = usbnet_nway_reset,
- .get_link_ksettings = usbnet_get_link_ksettings_mii,
- .set_link_ksettings = usbnet_set_link_ksettings_mii,
+ .get_link_ksettings = sr9700_get_link_ksettings,
};
static void sr9700_set_multicast(struct net_device *netdev)
{
struct usbnet *dev = netdev_priv(netdev);
- /* We use the 20 byte dev->data for our 8 byte filter buffer
- * to avoid allocating memory that is tricky to free later
- */
- u8 *hashes = (u8 *)&dev->data;
/* rx_ctl setting : enable, disable_long, disable_crc */
u8 rx_ctl = RCR_RXEN | RCR_DIS_CRC | RCR_DIS_LONG;
- memset(hashes, 0x00, SR_MCAST_SIZE);
- /* broadcast address */
- hashes[SR_MCAST_SIZE - 1] |= SR_MCAST_ADDR_FLAG;
- if (netdev->flags & IFF_PROMISC) {
+ if (netdev->flags & IFF_PROMISC)
rx_ctl |= RCR_PRMSC;
- } else if (netdev->flags & IFF_ALLMULTI ||
- netdev_mc_count(netdev) > SR_MCAST_MAX) {
- rx_ctl |= RCR_RUNT;
- } else if (!netdev_mc_empty(netdev)) {
- struct netdev_hw_addr *ha;
-
- netdev_for_each_mc_addr(ha, netdev) {
- u32 crc = ether_crc(ETH_ALEN, ha->addr) >> 26;
- hashes[crc >> 3] |= 1 << (crc & 0x7);
- }
- }
+ else if (netdev->flags & IFF_ALLMULTI || !netdev_mc_empty(netdev))
+ /* The chip has no multicast filter */
+ rx_ctl |= RCR_ALL;
- sr_write_async(dev, SR_MAR, SR_MCAST_SIZE, hashes);
sr_write_reg_async(dev, SR_RCR, rx_ctl);
}
@@ -305,7 +254,7 @@ static int sr9700_set_mac_address(struct net_device *netdev, void *p)
}
eth_hw_addr_set(netdev, addr->sa_data);
- sr_write_async(dev, SR_PAR, 6, netdev->dev_addr);
+ sr_write_async(dev, SR_PAR, ETH_ALEN, netdev->dev_addr);
return 0;
}
@@ -318,7 +267,6 @@ static const struct net_device_ops sr9700_netdev_ops = {
.ndo_change_mtu = usbnet_change_mtu,
.ndo_get_stats64 = dev_get_tstats64,
.ndo_validate_addr = eth_validate_addr,
- .ndo_eth_ioctl = sr9700_ioctl,
.ndo_set_rx_mode = sr9700_set_multicast,
.ndo_set_mac_address = sr9700_set_mac_address,
};
@@ -326,7 +274,6 @@ static const struct net_device_ops sr9700_netdev_ops = {
static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf)
{
struct net_device *netdev;
- struct mii_if_info *mii;
u8 addr[ETH_ALEN];
int ret;
@@ -343,13 +290,6 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf)
/* bulkin buffer is preferably not less than 3K */
dev->rx_urb_size = 3072;
- mii = &dev->mii;
- mii->dev = netdev;
- mii->mdio_read = sr_mdio_read;
- mii->mdio_write = sr_mdio_write;
- mii->phy_id_mask = 0x1f;
- mii->reg_num_mask = 0x1f;
-
sr_write_reg(dev, SR_NCR, NCR_RST);
udelay(20);
@@ -376,11 +316,6 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf)
/* receive broadcast packets */
sr9700_set_multicast(netdev);
- sr_mdio_write(netdev, mii->phy_id, MII_BMCR, BMCR_RESET);
- sr_mdio_write(netdev, mii->phy_id, MII_ADVERTISE, ADVERTISE_ALL |
- ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
- mii_nway_restart(mii);
-
out:
return ret;
}
@@ -391,20 +326,20 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
int len;
/* skb content (packets) format :
- * p0 p1 p2 ...... pm
+ * p1 p2 p3 ...... pn
* / \
* / \
* / \
* / \
- * p0b0 p0b1 p0b2 p0b3 ...... p0b(n-4) p0b(n-3)...p0bn
+ * p1b1 p1b2 p1b3 p1b4 ...... p1b(n-4) p1b(n-3)...p1bn
*
- * p0 : packet 0
- * p0b0 : packet 0 byte 0
+ * p1 : packet 1
+ * p1b1 : packet 1 byte 1
*
- * b0: rx status
- * b1: packet length (incl crc) low
- * b2: packet length (incl crc) high
- * b3..n-4: packet data
+ * b1: rx status
+ * b2: packet length (incl crc) low
+ * b3: packet length (incl crc) high
+ * b4..n-4: packet data
* bn-3..bn: ethernet packet crc
*/
if (unlikely(skb->len < SR_RX_OVERHEAD)) {
@@ -414,7 +349,7 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
/* one skb may contains multiple packets */
while (skb->len > SR_RX_OVERHEAD) {
- if (skb->data[0] != 0x40)
+ if (skb->data[0] != RSR_MF)
return 0;
/* ignore the CRC length */
@@ -452,12 +387,12 @@ static struct sk_buff *sr9700_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
/* SR9700 can only send out one ethernet packet at once.
*
- * b0 b1 b2 b3 ...... b(n-4) b(n-3)...bn
+ * b1 b2 b3 b4 ...... b(n-4) b(n-3)...bn
*
- * b0: rx status
- * b1: packet length (incl crc) low
- * b2: packet length (incl crc) high
- * b3..n-4: packet data
+ * b1: rx status
+ * b2: packet length (incl crc) low
+ * b3: packet length (incl crc) high
+ * b4..n-4: packet data
* bn-3..bn: ethernet packet crc
*/
@@ -484,18 +419,18 @@ static struct sk_buff *sr9700_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
static void sr9700_status(struct usbnet *dev, struct urb *urb)
{
- int link;
+ bool link;
u8 *buf;
/* format:
- b0: net status
- b1: tx status 1
- b2: tx status 2
- b3: rx status
- b4: rx overflow
- b5: rx count
- b6: tx count
- b7: gpr
+ b1: net status
+ b2: tx status 1
+ b3: tx status 2
+ b4: rx status
+ b5: rx overflow
+ b6: rx count
+ b7: tx count
+ b8: gpr
*/
if (urb->actual_length < 8)
@@ -503,24 +438,8 @@ static void sr9700_status(struct usbnet *dev, struct urb *urb)
buf = urb->transfer_buffer;
- link = !!(buf[0] & 0x40);
- if (netif_carrier_ok(dev->net) != link) {
- usbnet_link_change(dev, link, 1);
- netdev_dbg(dev->net, "Link Status is: %d\n", link);
- }
-}
-
-static int sr9700_link_reset(struct usbnet *dev)
-{
- struct ethtool_cmd ecmd;
-
- mii_check_media(&dev->mii, 1, 1);
- mii_ethtool_gset(&dev->mii, &ecmd);
-
- netdev_dbg(dev->net, "link_reset() speed: %d duplex: %d\n",
- ecmd.speed, ecmd.duplex);
-
- return 0;
+ link = !!(buf[0] & NSR_LINKST);
+ sr9700_handle_link_change(dev->net, link);
}
static const struct driver_info sr9700_driver_info = {
@@ -530,8 +449,6 @@ static const struct driver_info sr9700_driver_info = {
.rx_fixup = sr9700_rx_fixup,
.tx_fixup = sr9700_tx_fixup,
.status = sr9700_status,
- .link_reset = sr9700_link_reset,
- .reset = sr9700_link_reset,
};
static const struct usb_device_id products[] = {
diff --git a/drivers/net/usb/sr9700.h b/drivers/net/usb/sr9700.h
index ea2b4de621c8..d1663cb1e8cd 100644
--- a/drivers/net/usb/sr9700.h
+++ b/drivers/net/usb/sr9700.h
@@ -82,19 +82,16 @@
#define FCR_TXPEN (1 << 5)
#define FCR_TXPF (1 << 6)
#define FCR_TXP0 (1 << 7)
-/* Eeprom & Phy Control Reg */
+/* Eeprom Control Reg */
#define SR_EPCR 0x0B
#define EPCR_ERRE (1 << 0)
#define EPCR_ERPRW (1 << 1)
#define EPCR_ERPRR (1 << 2)
-#define EPCR_EPOS (1 << 3)
#define EPCR_WEP (1 << 4)
-/* Eeprom & Phy Address Reg */
+/* Eeprom Address Reg */
#define SR_EPAR 0x0C
#define EPAR_EROA (0x3F << 0)
-#define EPAR_PHY_ADR_MASK (0x03 << 6)
-#define EPAR_PHY_ADR (0x01 << 6)
-/* Eeprom & Phy Data Reg */
+/* Eeprom Data Reg */
#define SR_EPDR 0x0D /* 0x0D ~ 0x0E for Data Reg Low & High */
/* Wakeup Control Reg */
#define SR_WCR 0x0F
@@ -104,9 +101,7 @@
#define WCR_LINKEN (1 << 5)
/* Physical Address Reg */
#define SR_PAR 0x10 /* 0x10 ~ 0x15 6 bytes for PAR */
-/* Multicast Address Reg */
-#define SR_MAR 0x16 /* 0x16 ~ 0x1D 8 bytes for MAR */
-/* 0x1e unused */
+/* 0x16 --> 0x1E unused */
/* Phy Reset Reg */
#define SR_PRR 0x1F
#define PRR_PHY_RST (1 << 0)
@@ -153,17 +148,14 @@
/* Register access commands and flags */
#define SR_RD_REGS 0x00
-#define SR_WR_REGS 0x01
-#define SR_WR_REG 0x03
+#define SR_WR_MULTIPLE_REGS 0x01
+#define SR_WR_SINGLE_REG 0x03
#define SR_REQ_RD_REG (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE)
#define SR_REQ_WR_REG (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE)
/* parameters */
-#define SR_SHARE_TIMEOUT 1000
+#define SR_EEPROM_TIMEOUT 1000
#define SR_EEPROM_LEN 256
-#define SR_MCAST_SIZE 8
-#define SR_MCAST_ADDR_FLAG 0x80
-#define SR_MCAST_MAX 64
#define SR_TX_OVERHEAD 2 /* 2bytes header */
#define SR_RX_OVERHEAD 7 /* 3bytes header + 4crc tail */
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
index 57947a5590cc..6fd33a5b2279 100644
--- a/drivers/net/usb/sr9800.c
+++ b/drivers/net/usb/sr9800.c
@@ -469,29 +469,6 @@ static int sr_get_eeprom(struct net_device *net,
return 0;
}
-static void sr_get_drvinfo(struct net_device *net,
- struct ethtool_drvinfo *info)
-{
- /* Inherit standard device info */
- usbnet_get_drvinfo(net, info);
- strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
- strscpy(info->version, DRIVER_VERSION, sizeof(info->version));
-}
-
-static u32 sr_get_link(struct net_device *net)
-{
- struct usbnet *dev = netdev_priv(net);
-
- return mii_link_ok(&dev->mii);
-}
-
-static int sr_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
-{
- struct usbnet *dev = netdev_priv(net);
-
- return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
-}
-
static int sr_set_mac_address(struct net_device *net, void *p)
{
struct usbnet *dev = netdev_priv(net);
@@ -518,8 +495,8 @@ static int sr_set_mac_address(struct net_device *net, void *p)
}
static const struct ethtool_ops sr9800_ethtool_ops = {
- .get_drvinfo = sr_get_drvinfo,
- .get_link = sr_get_link,
+ .get_drvinfo = usbnet_get_drvinfo,
+ .get_link = usbnet_get_link,
.get_msglevel = usbnet_get_msglevel,
.set_msglevel = usbnet_set_msglevel,
.get_wol = sr_get_wol,
@@ -684,7 +661,7 @@ static const struct net_device_ops sr9800_netdev_ops = {
.ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = sr_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_eth_ioctl = sr_ioctl,
+ .ndo_eth_ioctl = usbnet_mii_ioctl,
.ndo_set_rx_mode = sr_set_multicast,
};
@@ -872,6 +849,5 @@ static struct usb_driver sr_driver = {
module_usb_driver(sr_driver);
MODULE_AUTHOR("Liu Junliang <liujunliang_ljl@163.com");
-MODULE_VERSION(DRIVER_VERSION);
MODULE_DESCRIPTION("SR9800 USB 2.0 USB2NET Dev : http://www.corechip-sz.com");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/sr9800.h b/drivers/net/usb/sr9800.h
index 952e6f7c0321..98ac1c45740e 100644
--- a/drivers/net/usb/sr9800.h
+++ b/drivers/net/usb/sr9800.h
@@ -147,8 +147,7 @@
#define SR_EEPROM_MAGIC 0xdeadbeef
#define SR9800_EEPROM_LEN 0xff
-/* SR9800 Driver Version and Driver Name */
-#define DRIVER_VERSION "11-Nov-2013"
+/* SR9800 Driver Name and Flags */
#define DRIVER_NAME "CoreChips"
#define DRIVER_FLAG \
(FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET)
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 9280ef544bbb..41b95b04143d 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1085,6 +1085,14 @@ int usbnet_nway_reset(struct net_device *net)
}
EXPORT_SYMBOL_GPL(usbnet_nway_reset);
+int usbnet_mii_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
+{
+ struct usbnet *dev = netdev_priv(net);
+
+ return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
+}
+EXPORT_SYMBOL_GPL(usbnet_mii_ioctl);
+
void usbnet_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
{
struct usbnet *dev = netdev_priv(net);
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index e957aa12a8a4..7bd0ae0a6a33 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -2183,11 +2183,12 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
struct vxlan_metadata *md, u32 vxflags,
bool udp_sum)
{
+ int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
+ __be16 inner_protocol = htons(ETH_P_TEB);
struct vxlanhdr *vxh;
+ bool double_encap;
int min_headroom;
int err;
- int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
- __be16 inner_protocol = htons(ETH_P_TEB);
if ((vxflags & VXLAN_F_REMCSUM_TX) &&
skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -2208,6 +2209,7 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
if (unlikely(err))
return err;
+ double_encap = udp_tunnel_handle_partial(skb);
err = iptunnel_handle_offloads(skb, type);
if (err)
return err;
@@ -2238,7 +2240,7 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
inner_protocol = skb->protocol;
}
- skb_set_inner_protocol(skb, inner_protocol);
+ udp_tunnel_set_inner_protocol(skb, double_encap, inner_protocol);
return 0;
}
@@ -3348,10 +3350,18 @@ static void vxlan_setup(struct net_device *dev)
dev->features |= NETIF_F_RXCSUM;
dev->features |= NETIF_F_GSO_SOFTWARE;
+ /* Partial features are disabled by default. */
dev->vlan_features = dev->features;
dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
dev->hw_features |= NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+ dev->hw_features |= UDP_TUNNEL_PARTIAL_FEATURES;
+ dev->hw_features |= NETIF_F_GSO_PARTIAL;
+
+ dev->hw_enc_features = dev->hw_features;
+ dev->gso_partial_features = UDP_TUNNEL_PARTIAL_FEATURES;
+ dev->mangleid_features = NETIF_F_GSO_PARTIAL;
+
netif_keep_dst(dev);
dev->priv_flags |= IFF_NO_QUEUE;
dev->change_proto_down = true;
diff --git a/drivers/net/vxlan/vxlan_vnifilter.c b/drivers/net/vxlan/vxlan_vnifilter.c
index adc89e651e27..cde897d92f24 100644
--- a/drivers/net/vxlan/vxlan_vnifilter.c
+++ b/drivers/net/vxlan/vxlan_vnifilter.c
@@ -126,7 +126,7 @@ static void vxlan_vnifilter_stats_get(const struct vxlan_vni_node *vninode,
pstats = per_cpu_ptr(vninode->stats, i);
do {
start = u64_stats_fetch_begin(&pstats->syncp);
- memcpy(&temp, &pstats->stats, sizeof(temp));
+ u64_stats_copy(&temp, &pstats->stats, sizeof(temp));
} while (u64_stats_fetch_retry(&pstats->syncp, start));
dest->rx_packets += temp.rx_packets;
diff --git a/drivers/net/wan/framer/framer-core.c b/drivers/net/wan/framer/framer-core.c
index 58f5143359df..bf7ac7dd2804 100644
--- a/drivers/net/wan/framer/framer-core.c
+++ b/drivers/net/wan/framer/framer-core.c
@@ -60,12 +60,12 @@ int framer_pm_runtime_get_sync(struct framer *framer)
}
EXPORT_SYMBOL_GPL(framer_pm_runtime_get_sync);
-int framer_pm_runtime_put(struct framer *framer)
+void framer_pm_runtime_put(struct framer *framer)
{
if (!pm_runtime_enabled(&framer->dev))
- return -EOPNOTSUPP;
+ return;
- return pm_runtime_put(&framer->dev);
+ pm_runtime_put(&framer->dev);
}
EXPORT_SYMBOL_GPL(framer_pm_runtime_put);
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index f999798a5612..dff84731343c 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -790,18 +790,14 @@ static void uhdlc_memclean(struct ucc_hdlc_private *priv)
if (priv->rx_buffer) {
dma_free_coherent(priv->dev,
- RX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
+ (RX_BD_RING_LEN + TX_BD_RING_LEN) * MAX_RX_BUF_LENGTH,
priv->rx_buffer, priv->dma_rx_addr);
priv->rx_buffer = NULL;
priv->dma_rx_addr = 0;
- }
- if (priv->tx_buffer) {
- dma_free_coherent(priv->dev,
- TX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
- priv->tx_buffer, priv->dma_tx_addr);
priv->tx_buffer = NULL;
priv->dma_tx_addr = 0;
+
}
}
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index c06d50db40b8..00d0556dafef 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -2487,7 +2487,11 @@ void ath10k_sdio_fw_crashed_dump(struct ath10k *ar)
if (fast_dump)
ath10k_bmi_start(ar);
+ mutex_lock(&ar->dump_mutex);
+
+ spin_lock_bh(&ar->data_lock);
ar->stats.fw_crash_counter++;
+ spin_unlock_bh(&ar->data_lock);
ath10k_sdio_disable_intrs(ar);
@@ -2505,6 +2509,8 @@ void ath10k_sdio_fw_crashed_dump(struct ath10k *ar)
ath10k_sdio_enable_intrs(ar);
+ mutex_unlock(&ar->dump_mutex);
+
ath10k_core_start_recovery(ar);
}
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index b3f6424c17d3..f72f236fb9eb 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <linux/bits.h>
@@ -11,6 +12,7 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/property.h>
+#include <linux/pwrseq/consumer.h>
#include <linux/regulator/consumer.h>
#include <linux/remoteproc/qcom_rproc.h>
#include <linux/of_reserved_mem.h>
@@ -1023,10 +1025,14 @@ static int ath10k_hw_power_on(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power on\n");
- ret = regulator_bulk_enable(ar_snoc->num_vregs, ar_snoc->vregs);
+ ret = pwrseq_power_on(ar_snoc->pwrseq);
if (ret)
return ret;
+ ret = regulator_bulk_enable(ar_snoc->num_vregs, ar_snoc->vregs);
+ if (ret)
+ goto pwrseq_off;
+
ret = clk_bulk_prepare_enable(ar_snoc->num_clks, ar_snoc->clks);
if (ret)
goto vreg_off;
@@ -1035,18 +1041,28 @@ static int ath10k_hw_power_on(struct ath10k *ar)
vreg_off:
regulator_bulk_disable(ar_snoc->num_vregs, ar_snoc->vregs);
+pwrseq_off:
+ pwrseq_power_off(ar_snoc->pwrseq);
+
return ret;
}
static int ath10k_hw_power_off(struct ath10k *ar)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ int ret_seq = 0;
+ int ret_vreg;
ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power off\n");
clk_bulk_disable_unprepare(ar_snoc->num_clks, ar_snoc->clks);
- return regulator_bulk_disable(ar_snoc->num_vregs, ar_snoc->vregs);
+ ret_vreg = regulator_bulk_disable(ar_snoc->num_vregs, ar_snoc->vregs);
+
+ if (ar_snoc->pwrseq)
+ ret_seq = pwrseq_power_off(ar_snoc->pwrseq);
+
+ return ret_vreg ? : ret_seq;
}
static void ath10k_snoc_wlan_disable(struct ath10k *ar)
@@ -1762,7 +1778,38 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
goto err_release_resource;
}
- ar_snoc->num_vregs = ARRAY_SIZE(ath10k_regulators);
+ /*
+ * devm_pwrseq_get() can return -EPROBE_DEFER in two cases:
+ * - it is not supposed to be used
+ * - it is supposed to be used, but the driver hasn't probed yet.
+ *
+ * There is no simple way to distinguish between these two cases, but:
+ * - if it is not supposed to be used, then regulator_bulk_get() will
+ * return all regulators as expected, continuing the probe
+ * - if it is supposed to be used, but wasn't probed yet, we will get
+ * -EPROBE_DEFER from regulator_bulk_get() too.
+ *
+ * For backwards compatibility with DTs specifying regulators directly
+ * rather than using the PMU device, ignore the defer error from
+ * pwrseq.
+ */
+ ar_snoc->pwrseq = devm_pwrseq_get(&pdev->dev, "wlan");
+ if (IS_ERR(ar_snoc->pwrseq)) {
+ ret = PTR_ERR(ar_snoc->pwrseq);
+ ar_snoc->pwrseq = NULL;
+ if (ret != -EPROBE_DEFER)
+ goto err_free_irq;
+
+ ar_snoc->num_vregs = ARRAY_SIZE(ath10k_regulators);
+ } else {
+ /*
+ * The first regulator (vdd-0.8-cx-mx) is used to power on part
+ * of the SoC rather than the PMU on WCN399x, the rest are
+ * handled via pwrseq.
+ */
+ ar_snoc->num_vregs = 1;
+ }
+
ar_snoc->vregs = devm_kcalloc(&pdev->dev, ar_snoc->num_vregs,
sizeof(*ar_snoc->vregs), GFP_KERNEL);
if (!ar_snoc->vregs) {
diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h
index d4bce1707696..1ecae34687c2 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.h
+++ b/drivers/net/wireless/ath/ath10k/snoc.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#ifndef _SNOC_H_
@@ -53,6 +54,7 @@ enum ath10k_snoc_flags {
};
struct clk_bulk_data;
+struct pwrseq_desc;
struct regulator_bulk_data;
struct ath10k_snoc {
@@ -73,6 +75,7 @@ struct ath10k_snoc {
struct ath10k_snoc_ce_irq ce_irqs[CE_COUNT_MAX];
struct ath10k_ce ce;
struct timer_list rx_post_retry;
+ struct pwrseq_desc *pwrseq;
struct regulator_bulk_data *vregs;
size_t num_vregs;
struct clk_bulk_data *clks;
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index b4aad6604d6d..ce22141e5efd 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -5289,8 +5289,6 @@ ath10k_wmi_event_peer_sta_ps_state_chg(struct ath10k *ar, struct sk_buff *skb)
struct ath10k_sta *arsta;
u8 peer_addr[ETH_ALEN];
- lockdep_assert_held(&ar->data_lock);
-
ev = (struct wmi_peer_sta_ps_state_chg_event *)skb->data;
ether_addr_copy(peer_addr, ev->peer_macaddr.addr);
@@ -5305,7 +5303,9 @@ ath10k_wmi_event_peer_sta_ps_state_chg(struct ath10k *ar, struct sk_buff *skb)
}
arsta = (struct ath10k_sta *)sta->drv_priv;
+ spin_lock_bh(&ar->data_lock);
arsta->peer_ps_state = __le32_to_cpu(ev->peer_ps_state);
+ spin_unlock_bh(&ar->data_lock);
exit:
rcu_read_unlock();
diff --git a/drivers/net/wireless/ath/ath11k/Kconfig b/drivers/net/wireless/ath/ath11k/Kconfig
index 659ef134ef16..47dfd39caa89 100644
--- a/drivers/net/wireless/ath/ath11k/Kconfig
+++ b/drivers/net/wireless/ath/ath11k/Kconfig
@@ -58,3 +58,14 @@ config ATH11K_SPECTRAL
Enable ath11k spectral scan support
Say Y to enable access to the FFT/spectral data via debugfs.
+
+config ATH11K_CFR
+ bool "ath11k channel frequency response support"
+ depends on ATH11K_DEBUGFS
+ depends on RELAY
+ help
+ Enable ath11k channel frequency response dump support.
+ This option exposes debugfs nodes that will allow the user
+ to enable, disable, and dump data.
+
+ Say Y to enable CFR data dump collection via debugfs.
diff --git a/drivers/net/wireless/ath/ath11k/Makefile b/drivers/net/wireless/ath/ath11k/Makefile
index d9092414b362..b1435fcf3e1b 100644
--- a/drivers/net/wireless/ath/ath11k/Makefile
+++ b/drivers/net/wireless/ath/ath11k/Makefile
@@ -28,6 +28,7 @@ ath11k-$(CONFIG_THERMAL) += thermal.o
ath11k-$(CONFIG_ATH11K_SPECTRAL) += spectral.o
ath11k-$(CONFIG_PM) += wow.o
ath11k-$(CONFIG_DEV_COREDUMP) += coredump.o
+ath11k-$(CONFIG_ATH11K_CFR) += cfr.o
obj-$(CONFIG_ATH11K_AHB) += ath11k_ahb.o
ath11k_ahb-y += ahb.o
diff --git a/drivers/net/wireless/ath/ath11k/cfr.c b/drivers/net/wireless/ath/ath11k/cfr.c
new file mode 100644
index 000000000000..61bf1c0884f7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/cfr.c
@@ -0,0 +1,1023 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/relay.h>
+#include "core.h"
+#include "debug.h"
+
+struct ath11k_dbring *ath11k_cfr_get_dbring(struct ath11k *ar)
+{
+ if (ar->cfr_enabled)
+ return &ar->cfr.rx_ring;
+
+ return NULL;
+}
+
+static int ath11k_cfr_calculate_tones_from_dma_hdr(struct ath11k_cfr_dma_hdr *hdr)
+{
+ u8 bw = FIELD_GET(CFIR_DMA_HDR_INFO1_UPLOAD_PKT_BW, hdr->info1);
+ u8 preamble = FIELD_GET(CFIR_DMA_HDR_INFO1_PREAMBLE_TYPE, hdr->info1);
+
+ switch (preamble) {
+ case ATH11K_CFR_PREAMBLE_TYPE_LEGACY:
+ fallthrough;
+ case ATH11K_CFR_PREAMBLE_TYPE_VHT:
+ switch (bw) {
+ case 0:
+ return TONES_IN_20MHZ;
+ case 1: /* DUP40/VHT40 */
+ return TONES_IN_40MHZ;
+ case 2: /* DUP80/VHT80 */
+ return TONES_IN_80MHZ;
+ case 3: /* DUP160/VHT160 */
+ return TONES_IN_160MHZ;
+ default:
+ return TONES_INVALID;
+ }
+ case ATH11K_CFR_PREAMBLE_TYPE_HT:
+ switch (bw) {
+ case 0:
+ return TONES_IN_20MHZ;
+ case 1:
+ return TONES_IN_40MHZ;
+ default:
+ return TONES_INVALID;
+ }
+ default:
+ return TONES_INVALID;
+ }
+}
+
+void ath11k_cfr_release_lut_entry(struct ath11k_look_up_table *lut)
+{
+ memset(lut, 0, sizeof(*lut));
+}
+
+static void ath11k_cfr_rfs_write(struct ath11k *ar, const void *head,
+ u32 head_len, const void *data, u32 data_len,
+ const void *tail, int tail_data)
+{
+ struct ath11k_cfr *cfr = &ar->cfr;
+
+ if (!cfr->rfs_cfr_capture)
+ return;
+
+ relay_write(cfr->rfs_cfr_capture, head, head_len);
+ relay_write(cfr->rfs_cfr_capture, data, data_len);
+ relay_write(cfr->rfs_cfr_capture, tail, tail_data);
+ relay_flush(cfr->rfs_cfr_capture);
+}
+
+static void ath11k_cfr_free_pending_dbr_events(struct ath11k *ar)
+{
+ struct ath11k_cfr *cfr = &ar->cfr;
+ struct ath11k_look_up_table *lut;
+ int i;
+
+ if (!cfr->lut)
+ return;
+
+ for (i = 0; i < cfr->lut_num; i++) {
+ lut = &cfr->lut[i];
+ if (lut->dbr_recv && !lut->tx_recv &&
+ lut->dbr_tstamp < cfr->last_success_tstamp) {
+ ath11k_dbring_bufs_replenish(ar, &cfr->rx_ring, lut->buff,
+ WMI_DIRECT_BUF_CFR);
+ ath11k_cfr_release_lut_entry(lut);
+ cfr->flush_dbr_cnt++;
+ }
+ }
+}
+
+/**
+ * ath11k_cfr_correlate_and_relay() - Correlate and relay CFR events
+ * @ar: Pointer to ath11k structure
+ * @lut: Lookup table for correlation
+ * @event_type: Type of event received (TX or DBR)
+ *
+ * Correlates WMI_PDEV_DMA_RING_BUF_RELEASE_EVENT (DBR) and
+ * WMI_PEER_CFR_CAPTURE_EVENT (TX capture) by PPDU ID. If both events
+ * are present and the PPDU IDs match, returns CORRELATE_STATUS_RELEASE
+ * to relay thecorrelated data to userspace. Otherwise returns
+ * CORRELATE_STATUS_HOLD to wait for the other event.
+ *
+ * Also checks pending DBR events and clears them when no corresponding TX
+ * capture event is received for the PPDU.
+ *
+ * Return: CORRELATE_STATUS_RELEASE or CORRELATE_STATUS_HOLD
+ */
+
+static enum ath11k_cfr_correlate_status
+ath11k_cfr_correlate_and_relay(struct ath11k *ar,
+ struct ath11k_look_up_table *lut,
+ u8 event_type)
+{
+ enum ath11k_cfr_correlate_status status;
+ struct ath11k_cfr *cfr = &ar->cfr;
+ u64 diff;
+
+ if (event_type == ATH11K_CORRELATE_TX_EVENT) {
+ if (lut->tx_recv)
+ cfr->cfr_dma_aborts++;
+ cfr->tx_evt_cnt++;
+ lut->tx_recv = true;
+ } else if (event_type == ATH11K_CORRELATE_DBR_EVENT) {
+ cfr->dbr_evt_cnt++;
+ lut->dbr_recv = true;
+ }
+
+ if (lut->dbr_recv && lut->tx_recv) {
+ if (lut->dbr_ppdu_id == lut->tx_ppdu_id) {
+ /*
+ * 64-bit counters make wraparound highly improbable,
+ * wraparound handling is omitted.
+ */
+ cfr->last_success_tstamp = lut->dbr_tstamp;
+ if (lut->dbr_tstamp > lut->txrx_tstamp) {
+ diff = lut->dbr_tstamp - lut->txrx_tstamp;
+ ath11k_dbg(ar->ab, ATH11K_DBG_CFR,
+ "txrx event -> dbr event delay = %u ms",
+ jiffies_to_msecs(diff));
+ } else if (lut->txrx_tstamp > lut->dbr_tstamp) {
+ diff = lut->txrx_tstamp - lut->dbr_tstamp;
+ ath11k_dbg(ar->ab, ATH11K_DBG_CFR,
+ "dbr event -> txrx event delay = %u ms",
+ jiffies_to_msecs(diff));
+ }
+
+ ath11k_cfr_free_pending_dbr_events(ar);
+
+ cfr->release_cnt++;
+ status = ATH11K_CORRELATE_STATUS_RELEASE;
+ } else {
+ /*
+ * Discard TXRX event on PPDU ID mismatch because multiple PPDUs
+ * may share the same DMA address due to ucode aborts.
+ */
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_CFR,
+ "Received dbr event twice for the same lut entry");
+ lut->tx_recv = false;
+ lut->tx_ppdu_id = 0;
+ cfr->clear_txrx_event++;
+ cfr->cfr_dma_aborts++;
+ status = ATH11K_CORRELATE_STATUS_HOLD;
+ }
+ } else {
+ status = ATH11K_CORRELATE_STATUS_HOLD;
+ }
+
+ return status;
+}
+
+static int ath11k_cfr_process_data(struct ath11k *ar,
+ struct ath11k_dbring_data *param)
+{
+ u32 end_magic = ATH11K_CFR_END_MAGIC;
+ struct ath11k_csi_cfr_header *header;
+ struct ath11k_cfr_dma_hdr *dma_hdr;
+ struct ath11k_cfr *cfr = &ar->cfr;
+ struct ath11k_look_up_table *lut;
+ struct ath11k_base *ab = ar->ab;
+ u32 buf_id, tones, length;
+ u8 num_chains;
+ int status;
+ u8 *data;
+
+ data = param->data;
+ buf_id = param->buf_id;
+
+ if (param->data_sz < sizeof(*dma_hdr))
+ return -EINVAL;
+
+ dma_hdr = (struct ath11k_cfr_dma_hdr *)data;
+
+ tones = ath11k_cfr_calculate_tones_from_dma_hdr(dma_hdr);
+ if (tones == TONES_INVALID) {
+ ath11k_warn(ar->ab, "Number of tones received is invalid\n");
+ return -EINVAL;
+ }
+
+ num_chains = FIELD_GET(CFIR_DMA_HDR_INFO1_NUM_CHAINS,
+ dma_hdr->info1);
+
+ length = sizeof(*dma_hdr);
+ length += tones * (num_chains + 1);
+
+ spin_lock_bh(&cfr->lut_lock);
+
+ if (!cfr->lut) {
+ spin_unlock_bh(&cfr->lut_lock);
+ return -EINVAL;
+ }
+
+ lut = &cfr->lut[buf_id];
+
+ ath11k_dbg_dump(ab, ATH11K_DBG_CFR_DUMP, "data_from_buf_rel:", "",
+ data, length);
+
+ lut->buff = param->buff;
+ lut->data = data;
+ lut->data_len = length;
+ lut->dbr_ppdu_id = dma_hdr->phy_ppdu_id;
+ lut->dbr_tstamp = jiffies;
+
+ memcpy(&lut->hdr, dma_hdr, sizeof(*dma_hdr));
+
+ header = &lut->header;
+ header->meta_data.channel_bw = FIELD_GET(CFIR_DMA_HDR_INFO1_UPLOAD_PKT_BW,
+ dma_hdr->info1);
+ header->meta_data.length = length;
+
+ status = ath11k_cfr_correlate_and_relay(ar, lut,
+ ATH11K_CORRELATE_DBR_EVENT);
+ if (status == ATH11K_CORRELATE_STATUS_RELEASE) {
+ ath11k_dbg(ab, ATH11K_DBG_CFR,
+ "releasing CFR data to user space");
+ ath11k_cfr_rfs_write(ar, &lut->header,
+ sizeof(struct ath11k_csi_cfr_header),
+ lut->data, lut->data_len,
+ &end_magic, sizeof(u32));
+ ath11k_cfr_release_lut_entry(lut);
+ } else if (status == ATH11K_CORRELATE_STATUS_HOLD) {
+ ath11k_dbg(ab, ATH11K_DBG_CFR,
+ "tx event is not yet received holding the buf");
+ }
+
+ spin_unlock_bh(&cfr->lut_lock);
+
+ return status;
+}
+
+static void ath11k_cfr_fill_hdr_info(struct ath11k *ar,
+ struct ath11k_csi_cfr_header *header,
+ struct ath11k_cfr_peer_tx_param *params)
+{
+ struct ath11k_cfr *cfr;
+
+ cfr = &ar->cfr;
+ header->cfr_metadata_version = ATH11K_CFR_META_VERSION_4;
+ header->cfr_data_version = ATH11K_CFR_DATA_VERSION_1;
+ header->cfr_metadata_len = sizeof(struct cfr_metadata);
+ header->chip_type = ar->ab->hw_rev;
+ header->meta_data.status = FIELD_GET(WMI_CFR_PEER_CAPTURE_STATUS,
+ params->status);
+ header->meta_data.capture_bw = params->bandwidth;
+
+ /*
+ * FW reports phymode will always be HE mode.
+ * Replace it with cached phy mode during peer assoc
+ */
+ header->meta_data.phy_mode = cfr->phymode;
+
+ header->meta_data.prim20_chan = params->primary_20mhz_chan;
+ header->meta_data.center_freq1 = params->band_center_freq1;
+ header->meta_data.center_freq2 = params->band_center_freq2;
+
+ /*
+ * CFR capture is triggered by the ACK of a QoS Null frame:
+ * - 20 MHz: Legacy ACK
+ * - 40/80/160 MHz: DUP Legacy ACK
+ */
+ header->meta_data.capture_mode = params->bandwidth ?
+ ATH11K_CFR_CAPTURE_DUP_LEGACY_ACK : ATH11K_CFR_CAPTURE_LEGACY_ACK;
+ header->meta_data.capture_type = params->capture_method;
+ header->meta_data.num_rx_chain = ar->num_rx_chains;
+ header->meta_data.sts_count = params->spatial_streams;
+ header->meta_data.timestamp = params->timestamp_us;
+ ether_addr_copy(header->meta_data.peer_addr, params->peer_mac_addr);
+ memcpy(header->meta_data.chain_rssi, params->chain_rssi,
+ sizeof(params->chain_rssi));
+ memcpy(header->meta_data.chain_phase, params->chain_phase,
+ sizeof(params->chain_phase));
+ memcpy(header->meta_data.agc_gain, params->agc_gain,
+ sizeof(params->agc_gain));
+}
+
+int ath11k_process_cfr_capture_event(struct ath11k_base *ab,
+ struct ath11k_cfr_peer_tx_param *params)
+{
+ struct ath11k_look_up_table *lut = NULL;
+ u32 end_magic = ATH11K_CFR_END_MAGIC;
+ struct ath11k_csi_cfr_header *header;
+ struct ath11k_dbring_element *buff;
+ struct ath11k_cfr *cfr;
+ dma_addr_t buf_addr;
+ struct ath11k *ar;
+ u8 tx_status;
+ int status;
+ int i;
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, params->vdev_id);
+ if (!ar) {
+ rcu_read_unlock();
+ ath11k_warn(ab, "Failed to get ar for vdev id %d\n",
+ params->vdev_id);
+ return -ENOENT;
+ }
+
+ cfr = &ar->cfr;
+ rcu_read_unlock();
+
+ if (WMI_CFR_CAPTURE_STATUS_PEER_PS & params->status) {
+ ath11k_warn(ab, "CFR capture failed as peer %pM is in powersave",
+ params->peer_mac_addr);
+ return -EINVAL;
+ }
+
+ if (!(WMI_CFR_PEER_CAPTURE_STATUS & params->status)) {
+ ath11k_warn(ab, "CFR capture failed for the peer : %pM",
+ params->peer_mac_addr);
+ cfr->tx_peer_status_cfr_fail++;
+ return -EINVAL;
+ }
+
+ tx_status = FIELD_GET(WMI_CFR_FRAME_TX_STATUS, params->status);
+ if (tx_status != WMI_FRAME_TX_STATUS_OK) {
+ ath11k_warn(ab, "WMI tx status %d for the peer %pM",
+ tx_status, params->peer_mac_addr);
+ cfr->tx_evt_status_cfr_fail++;
+ return -EINVAL;
+ }
+
+ buf_addr = (((u64)FIELD_GET(WMI_CFR_CORRELATION_INFO2_BUF_ADDR_HIGH,
+ params->correlation_info_2)) << 32) |
+ params->correlation_info_1;
+
+ spin_lock_bh(&cfr->lut_lock);
+
+ if (!cfr->lut) {
+ spin_unlock_bh(&cfr->lut_lock);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < cfr->lut_num; i++) {
+ struct ath11k_look_up_table *temp = &cfr->lut[i];
+
+ if (temp->dbr_address == buf_addr) {
+ lut = &cfr->lut[i];
+ break;
+ }
+ }
+
+ if (!lut) {
+ spin_unlock_bh(&cfr->lut_lock);
+ ath11k_warn(ab, "lut failure to process tx event\n");
+ cfr->tx_dbr_lookup_fail++;
+ return -EINVAL;
+ }
+
+ lut->tx_ppdu_id = FIELD_GET(WMI_CFR_CORRELATION_INFO2_PPDU_ID,
+ params->correlation_info_2);
+ lut->txrx_tstamp = jiffies;
+
+ header = &lut->header;
+ header->start_magic_num = ATH11K_CFR_START_MAGIC;
+ header->vendorid = VENDOR_QCA;
+ header->platform_type = PLATFORM_TYPE_ARM;
+
+ ath11k_cfr_fill_hdr_info(ar, header, params);
+
+ status = ath11k_cfr_correlate_and_relay(ar, lut,
+ ATH11K_CORRELATE_TX_EVENT);
+ if (status == ATH11K_CORRELATE_STATUS_RELEASE) {
+ ath11k_dbg(ab, ATH11K_DBG_CFR,
+ "Releasing CFR data to user space");
+ ath11k_cfr_rfs_write(ar, &lut->header,
+ sizeof(struct ath11k_csi_cfr_header),
+ lut->data, lut->data_len,
+ &end_magic, sizeof(u32));
+ buff = lut->buff;
+ ath11k_cfr_release_lut_entry(lut);
+
+ ath11k_dbring_bufs_replenish(ar, &cfr->rx_ring, buff,
+ WMI_DIRECT_BUF_CFR);
+ } else if (status == ATH11K_CORRELATE_STATUS_HOLD) {
+ ath11k_dbg(ab, ATH11K_DBG_CFR,
+ "dbr event is not yet received holding buf\n");
+ }
+
+ spin_unlock_bh(&cfr->lut_lock);
+
+ return 0;
+}
+
+/* Helper function to check whether the given peer mac address
+ * is in unassociated peer pool or not.
+ */
+bool ath11k_cfr_peer_is_in_cfr_unassoc_pool(struct ath11k *ar, const u8 *peer_mac)
+{
+ struct ath11k_cfr *cfr = &ar->cfr;
+ struct cfr_unassoc_pool_entry *entry;
+ int i;
+
+ if (!ar->cfr_enabled)
+ return false;
+
+ spin_lock_bh(&cfr->lock);
+ for (i = 0; i < ATH11K_MAX_CFR_ENABLED_CLIENTS; i++) {
+ entry = &cfr->unassoc_pool[i];
+ if (!entry->is_valid)
+ continue;
+
+ if (ether_addr_equal(peer_mac, entry->peer_mac)) {
+ spin_unlock_bh(&cfr->lock);
+ return true;
+ }
+ }
+
+ spin_unlock_bh(&cfr->lock);
+
+ return false;
+}
+
+void ath11k_cfr_update_unassoc_pool_entry(struct ath11k *ar,
+ const u8 *peer_mac)
+{
+ struct ath11k_cfr *cfr = &ar->cfr;
+ struct cfr_unassoc_pool_entry *entry;
+ int i;
+
+ spin_lock_bh(&cfr->lock);
+ for (i = 0; i < ATH11K_MAX_CFR_ENABLED_CLIENTS; i++) {
+ entry = &cfr->unassoc_pool[i];
+ if (!entry->is_valid)
+ continue;
+
+ if (ether_addr_equal(peer_mac, entry->peer_mac) &&
+ entry->period == 0) {
+ memset(entry->peer_mac, 0, ETH_ALEN);
+ entry->is_valid = false;
+ cfr->cfr_enabled_peer_cnt--;
+ break;
+ }
+ }
+
+ spin_unlock_bh(&cfr->lock);
+}
+
+void ath11k_cfr_decrement_peer_count(struct ath11k *ar,
+ struct ath11k_sta *arsta)
+{
+ struct ath11k_cfr *cfr = &ar->cfr;
+
+ spin_lock_bh(&cfr->lock);
+
+ if (arsta->cfr_capture.cfr_enable)
+ cfr->cfr_enabled_peer_cnt--;
+
+ spin_unlock_bh(&cfr->lock);
+}
+
+static enum ath11k_wmi_cfr_capture_bw
+ath11k_cfr_bw_to_fw_cfr_bw(enum ath11k_cfr_capture_bw bw)
+{
+ switch (bw) {
+ case ATH11K_CFR_CAPTURE_BW_20:
+ return WMI_PEER_CFR_CAPTURE_BW_20;
+ case ATH11K_CFR_CAPTURE_BW_40:
+ return WMI_PEER_CFR_CAPTURE_BW_40;
+ case ATH11K_CFR_CAPTURE_BW_80:
+ return WMI_PEER_CFR_CAPTURE_BW_80;
+ default:
+ return WMI_PEER_CFR_CAPTURE_BW_MAX;
+ }
+}
+
+static enum ath11k_wmi_cfr_capture_method
+ath11k_cfr_method_to_fw_cfr_method(enum ath11k_cfr_capture_method method)
+{
+ switch (method) {
+ case ATH11K_CFR_CAPTURE_METHOD_NULL_FRAME:
+ return WMI_CFR_CAPTURE_METHOD_NULL_FRAME;
+ case ATH11K_CFR_CAPTURE_METHOD_NULL_FRAME_WITH_PHASE:
+ return WMI_CFR_CAPTURE_METHOD_NULL_FRAME_WITH_PHASE;
+ case ATH11K_CFR_CAPTURE_METHOD_PROBE_RESP:
+ return WMI_CFR_CAPTURE_METHOD_PROBE_RESP;
+ default:
+ return WMI_CFR_CAPTURE_METHOD_MAX;
+ }
+}
+
+int ath11k_cfr_send_peer_cfr_capture_cmd(struct ath11k *ar,
+ struct ath11k_sta *arsta,
+ struct ath11k_per_peer_cfr_capture *params,
+ const u8 *peer_mac)
+{
+ struct ath11k_cfr *cfr = &ar->cfr;
+ struct wmi_peer_cfr_capture_conf_arg arg;
+ enum ath11k_wmi_cfr_capture_bw bw;
+ enum ath11k_wmi_cfr_capture_method method;
+ int ret = 0;
+
+ if (cfr->cfr_enabled_peer_cnt >= ATH11K_MAX_CFR_ENABLED_CLIENTS &&
+ !arsta->cfr_capture.cfr_enable) {
+ ath11k_err(ar->ab, "CFR enable peer threshold reached %u\n",
+ cfr->cfr_enabled_peer_cnt);
+ return -ENOSPC;
+ }
+
+ if (params->cfr_enable == arsta->cfr_capture.cfr_enable &&
+ params->cfr_period == arsta->cfr_capture.cfr_period &&
+ params->cfr_method == arsta->cfr_capture.cfr_method &&
+ params->cfr_bw == arsta->cfr_capture.cfr_bw)
+ return ret;
+
+ if (!params->cfr_enable && !arsta->cfr_capture.cfr_enable)
+ return ret;
+
+ bw = ath11k_cfr_bw_to_fw_cfr_bw(params->cfr_bw);
+ if (bw >= WMI_PEER_CFR_CAPTURE_BW_MAX) {
+ ath11k_warn(ar->ab, "FW doesn't support configured bw %d\n",
+ params->cfr_bw);
+ return -EINVAL;
+ }
+
+ method = ath11k_cfr_method_to_fw_cfr_method(params->cfr_method);
+ if (method >= WMI_CFR_CAPTURE_METHOD_MAX) {
+ ath11k_warn(ar->ab, "FW doesn't support configured method %d\n",
+ params->cfr_method);
+ return -EINVAL;
+ }
+
+ arg.request = params->cfr_enable;
+ arg.periodicity = params->cfr_period;
+ arg.bw = bw;
+ arg.method = method;
+
+ ret = ath11k_wmi_peer_set_cfr_capture_conf(ar, arsta->arvif->vdev_id,
+ peer_mac, &arg);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send cfr capture info: vdev_id %u peer %pM: %d\n",
+ arsta->arvif->vdev_id, peer_mac, ret);
+ return ret;
+ }
+
+ spin_lock_bh(&cfr->lock);
+
+ if (params->cfr_enable &&
+ params->cfr_enable != arsta->cfr_capture.cfr_enable)
+ cfr->cfr_enabled_peer_cnt++;
+ else if (!params->cfr_enable)
+ cfr->cfr_enabled_peer_cnt--;
+
+ spin_unlock_bh(&cfr->lock);
+
+ arsta->cfr_capture.cfr_enable = params->cfr_enable;
+ arsta->cfr_capture.cfr_period = params->cfr_period;
+ arsta->cfr_capture.cfr_method = params->cfr_method;
+ arsta->cfr_capture.cfr_bw = params->cfr_bw;
+
+ return ret;
+}
+
+void ath11k_cfr_update_unassoc_pool(struct ath11k *ar,
+ struct ath11k_per_peer_cfr_capture *params,
+ u8 *peer_mac)
+{
+ struct ath11k_cfr *cfr = &ar->cfr;
+ struct cfr_unassoc_pool_entry *entry;
+ int available_idx = -1;
+ int i;
+
+ guard(spinlock_bh)(&cfr->lock);
+
+ if (!params->cfr_enable) {
+ for (i = 0; i < ATH11K_MAX_CFR_ENABLED_CLIENTS; i++) {
+ entry = &cfr->unassoc_pool[i];
+ if (ether_addr_equal(peer_mac, entry->peer_mac)) {
+ memset(entry->peer_mac, 0, ETH_ALEN);
+ entry->is_valid = false;
+ cfr->cfr_enabled_peer_cnt--;
+ break;
+ }
+ }
+ return;
+ }
+
+ if (cfr->cfr_enabled_peer_cnt >= ATH11K_MAX_CFR_ENABLED_CLIENTS) {
+ ath11k_info(ar->ab, "Max cfr peer threshold reached\n");
+ return;
+ }
+
+ for (i = 0; i < ATH11K_MAX_CFR_ENABLED_CLIENTS; i++) {
+ entry = &cfr->unassoc_pool[i];
+
+ if (ether_addr_equal(peer_mac, entry->peer_mac)) {
+ ath11k_info(ar->ab,
+ "peer entry already present updating params\n");
+ entry->period = params->cfr_period;
+ available_idx = -1;
+ break;
+ }
+
+ if (available_idx < 0 && !entry->is_valid)
+ available_idx = i;
+ }
+
+ if (available_idx >= 0) {
+ entry = &cfr->unassoc_pool[available_idx];
+ ether_addr_copy(entry->peer_mac, peer_mac);
+ entry->period = params->cfr_period;
+ entry->is_valid = true;
+ cfr->cfr_enabled_peer_cnt++;
+ }
+}
+
+static ssize_t ath11k_read_file_enable_cfr(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32] = {};
+ size_t len;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf), "%d\n", ar->cfr_enabled);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath11k_write_file_enable_cfr(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ u32 enable_cfr;
+ int ret;
+
+ if (kstrtouint_from_user(ubuf, count, 0, &enable_cfr))
+ return -EINVAL;
+
+ guard(mutex)(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON)
+ return -ENETDOWN;
+
+ if (enable_cfr > 1)
+ return -EINVAL;
+
+ if (ar->cfr_enabled == enable_cfr)
+ return count;
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PER_PEER_CFR_ENABLE,
+ enable_cfr, ar->pdev->pdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "Failed to enable/disable per peer cfr %d\n", ret);
+ return ret;
+ }
+
+ ar->cfr_enabled = enable_cfr;
+
+ return count;
+}
+
+static const struct file_operations fops_enable_cfr = {
+ .read = ath11k_read_file_enable_cfr,
+ .write = ath11k_write_file_enable_cfr,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_write_file_cfr_unassoc(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ struct ath11k_cfr *cfr = &ar->cfr;
+ struct cfr_unassoc_pool_entry *entry;
+ char buf[64] = {};
+ u8 peer_mac[6];
+ u32 cfr_capture_enable;
+ u32 cfr_capture_period;
+ int available_idx = -1;
+ int ret, i;
+
+ simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+
+ guard(mutex)(&ar->conf_mutex);
+ guard(spinlock_bh)(&cfr->lock);
+
+ if (ar->state != ATH11K_STATE_ON)
+ return -ENETDOWN;
+
+ if (!ar->cfr_enabled) {
+ ath11k_err(ar->ab, "CFR is not enabled on this pdev %d\n",
+ ar->pdev_idx);
+ return -EINVAL;
+ }
+
+ ret = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %u %u",
+ &peer_mac[0], &peer_mac[1], &peer_mac[2], &peer_mac[3],
+ &peer_mac[4], &peer_mac[5], &cfr_capture_enable,
+ &cfr_capture_period);
+
+ if (ret < 1)
+ return -EINVAL;
+
+ if (cfr_capture_enable && ret != 8)
+ return -EINVAL;
+
+ if (!cfr_capture_enable) {
+ for (i = 0; i < ATH11K_MAX_CFR_ENABLED_CLIENTS; i++) {
+ entry = &cfr->unassoc_pool[i];
+ if (ether_addr_equal(peer_mac, entry->peer_mac)) {
+ memset(entry->peer_mac, 0, ETH_ALEN);
+ entry->is_valid = false;
+ cfr->cfr_enabled_peer_cnt--;
+ }
+ }
+
+ return count;
+ }
+
+ if (cfr->cfr_enabled_peer_cnt >= ATH11K_MAX_CFR_ENABLED_CLIENTS) {
+ ath11k_info(ar->ab, "Max cfr peer threshold reached\n");
+ return count;
+ }
+
+ for (i = 0; i < ATH11K_MAX_CFR_ENABLED_CLIENTS; i++) {
+ entry = &cfr->unassoc_pool[i];
+
+ if (available_idx < 0 && !entry->is_valid)
+ available_idx = i;
+
+ if (ether_addr_equal(peer_mac, entry->peer_mac)) {
+ ath11k_info(ar->ab,
+ "peer entry already present updating params\n");
+ entry->period = cfr_capture_period;
+ return count;
+ }
+ }
+
+ if (available_idx >= 0) {
+ entry = &cfr->unassoc_pool[available_idx];
+ ether_addr_copy(entry->peer_mac, peer_mac);
+ entry->period = cfr_capture_period;
+ entry->is_valid = true;
+ cfr->cfr_enabled_peer_cnt++;
+ }
+
+ return count;
+}
+
+static ssize_t ath11k_read_file_cfr_unassoc(struct file *file,
+ char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ struct ath11k_cfr *cfr = &ar->cfr;
+ struct cfr_unassoc_pool_entry *entry;
+ char buf[512] = {};
+ int len = 0, i;
+
+ spin_lock_bh(&cfr->lock);
+
+ for (i = 0; i < ATH11K_MAX_CFR_ENABLED_CLIENTS; i++) {
+ entry = &cfr->unassoc_pool[i];
+ if (entry->is_valid)
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "peer: %pM period: %u\n",
+ entry->peer_mac, entry->period);
+ }
+
+ spin_unlock_bh(&cfr->lock);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_configure_cfr_unassoc = {
+ .write = ath11k_write_file_cfr_unassoc,
+ .read = ath11k_read_file_cfr_unassoc,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static void ath11k_cfr_debug_unregister(struct ath11k *ar)
+{
+ debugfs_remove(ar->cfr.enable_cfr);
+ ar->cfr.enable_cfr = NULL;
+ debugfs_remove(ar->cfr.cfr_unassoc);
+ ar->cfr.cfr_unassoc = NULL;
+
+ relay_close(ar->cfr.rfs_cfr_capture);
+ ar->cfr.rfs_cfr_capture = NULL;
+}
+
+static struct dentry *ath11k_cfr_create_buf_file_handler(const char *filename,
+ struct dentry *parent,
+ umode_t mode,
+ struct rchan_buf *buf,
+ int *is_global)
+{
+ struct dentry *buf_file;
+
+ buf_file = debugfs_create_file(filename, mode, parent, buf,
+ &relay_file_operations);
+ *is_global = 1;
+ return buf_file;
+}
+
+static int ath11k_cfr_remove_buf_file_handler(struct dentry *dentry)
+{
+ debugfs_remove(dentry);
+
+ return 0;
+}
+
+static const struct rchan_callbacks rfs_cfr_capture_cb = {
+ .create_buf_file = ath11k_cfr_create_buf_file_handler,
+ .remove_buf_file = ath11k_cfr_remove_buf_file_handler,
+};
+
+static void ath11k_cfr_debug_register(struct ath11k *ar)
+{
+ ar->cfr.rfs_cfr_capture = relay_open("cfr_capture",
+ ar->debug.debugfs_pdev,
+ ar->ab->hw_params.cfr_stream_buf_size,
+ ar->ab->hw_params.cfr_num_stream_bufs,
+ &rfs_cfr_capture_cb, NULL);
+
+ ar->cfr.enable_cfr = debugfs_create_file("enable_cfr", 0600,
+ ar->debug.debugfs_pdev, ar,
+ &fops_enable_cfr);
+
+ ar->cfr.cfr_unassoc = debugfs_create_file("cfr_unassoc", 0600,
+ ar->debug.debugfs_pdev, ar,
+ &fops_configure_cfr_unassoc);
+}
+
+void ath11k_cfr_lut_update_paddr(struct ath11k *ar, dma_addr_t paddr,
+ u32 buf_id)
+{
+ struct ath11k_cfr *cfr = &ar->cfr;
+
+ if (cfr->lut)
+ cfr->lut[buf_id].dbr_address = paddr;
+}
+
+void ath11k_cfr_update_phymode(struct ath11k *ar, enum wmi_phy_mode phymode)
+{
+ struct ath11k_cfr *cfr = &ar->cfr;
+
+ cfr->phymode = phymode;
+}
+
+static void ath11k_cfr_ring_free(struct ath11k *ar)
+{
+ struct ath11k_cfr *cfr = &ar->cfr;
+
+ ath11k_dbring_buf_cleanup(ar, &cfr->rx_ring);
+ ath11k_dbring_srng_cleanup(ar, &cfr->rx_ring);
+}
+
+static int ath11k_cfr_ring_alloc(struct ath11k *ar,
+ struct ath11k_dbring_cap *db_cap)
+{
+ struct ath11k_cfr *cfr = &ar->cfr;
+ int ret;
+
+ ret = ath11k_dbring_srng_setup(ar, &cfr->rx_ring,
+ ATH11K_CFR_NUM_RING_ENTRIES,
+ db_cap->min_elem);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup db ring: %d\n", ret);
+ return ret;
+ }
+
+ ath11k_dbring_set_cfg(ar, &cfr->rx_ring,
+ ATH11K_CFR_NUM_RESP_PER_EVENT,
+ ATH11K_CFR_EVENT_TIMEOUT_MS,
+ ath11k_cfr_process_data);
+
+ ret = ath11k_dbring_buf_setup(ar, &cfr->rx_ring, db_cap);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup db ring buffer: %d\n", ret);
+ goto srng_cleanup;
+ }
+
+ ret = ath11k_dbring_wmi_cfg_setup(ar, &cfr->rx_ring, WMI_DIRECT_BUF_CFR);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup db ring cfg: %d\n", ret);
+ goto buffer_cleanup;
+ }
+
+ return 0;
+
+buffer_cleanup:
+ ath11k_dbring_buf_cleanup(ar, &cfr->rx_ring);
+srng_cleanup:
+ ath11k_dbring_srng_cleanup(ar, &cfr->rx_ring);
+ return ret;
+}
+
+void ath11k_cfr_deinit(struct ath11k_base *ab)
+{
+ struct ath11k_cfr *cfr;
+ struct ath11k *ar;
+ int i;
+
+ if (!test_bit(WMI_TLV_SERVICE_CFR_CAPTURE_SUPPORT, ab->wmi_ab.svc_map) ||
+ !ab->hw_params.cfr_support)
+ return;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ab->pdevs[i].ar;
+ cfr = &ar->cfr;
+
+ if (!cfr->enabled)
+ continue;
+
+ ath11k_cfr_debug_unregister(ar);
+ ath11k_cfr_ring_free(ar);
+
+ spin_lock_bh(&cfr->lut_lock);
+ kfree(cfr->lut);
+ cfr->lut = NULL;
+ cfr->enabled = false;
+ spin_unlock_bh(&cfr->lut_lock);
+ }
+}
+
+int ath11k_cfr_init(struct ath11k_base *ab)
+{
+ struct ath11k_dbring_cap db_cap;
+ struct ath11k_cfr *cfr;
+ u32 num_lut_entries;
+ struct ath11k *ar;
+ int i, ret;
+
+ if (!test_bit(WMI_TLV_SERVICE_CFR_CAPTURE_SUPPORT, ab->wmi_ab.svc_map) ||
+ !ab->hw_params.cfr_support)
+ return 0;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ab->pdevs[i].ar;
+ cfr = &ar->cfr;
+
+ ret = ath11k_dbring_get_cap(ar->ab, ar->pdev_idx,
+ WMI_DIRECT_BUF_CFR, &db_cap);
+ if (ret)
+ continue;
+
+ idr_init(&cfr->rx_ring.bufs_idr);
+ spin_lock_init(&cfr->rx_ring.idr_lock);
+ spin_lock_init(&cfr->lock);
+ spin_lock_init(&cfr->lut_lock);
+
+ num_lut_entries = min_t(u32, CFR_MAX_LUT_ENTRIES, db_cap.min_elem);
+ cfr->lut = kcalloc(num_lut_entries, sizeof(*cfr->lut),
+ GFP_KERNEL);
+ if (!cfr->lut) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = ath11k_cfr_ring_alloc(ar, &db_cap);
+ if (ret) {
+ ath11k_warn(ab, "failed to init cfr ring for pdev %d: %d\n",
+ i, ret);
+ spin_lock_bh(&cfr->lut_lock);
+ kfree(cfr->lut);
+ cfr->lut = NULL;
+ cfr->enabled = false;
+ spin_unlock_bh(&cfr->lut_lock);
+ goto err;
+ }
+
+ cfr->lut_num = num_lut_entries;
+ cfr->enabled = true;
+
+ ath11k_cfr_debug_register(ar);
+ }
+
+ return 0;
+
+err:
+ for (i = i - 1; i >= 0; i--) {
+ ar = ab->pdevs[i].ar;
+ cfr = &ar->cfr;
+
+ if (!cfr->enabled)
+ continue;
+
+ ath11k_cfr_debug_unregister(ar);
+ ath11k_cfr_ring_free(ar);
+
+ spin_lock_bh(&cfr->lut_lock);
+ kfree(cfr->lut);
+ cfr->lut = NULL;
+ cfr->enabled = false;
+ spin_unlock_bh(&cfr->lut_lock);
+ }
+ return ret;
+}
diff --git a/drivers/net/wireless/ath/ath11k/cfr.h b/drivers/net/wireless/ath/ath11k/cfr.h
new file mode 100644
index 000000000000..94fcb706f2ef
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/cfr.h
@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef ATH11K_CFR_H
+#define ATH11K_CFR_H
+
+#include "dbring.h"
+#include "wmi.h"
+
+#define ATH11K_CFR_NUM_RESP_PER_EVENT 1
+#define ATH11K_CFR_EVENT_TIMEOUT_MS 1
+#define ATH11K_CFR_NUM_RING_ENTRIES 1
+
+#define ATH11K_MAX_CFR_ENABLED_CLIENTS 10
+#define CFR_MAX_LUT_ENTRIES 136
+
+#define HOST_MAX_CHAINS 8
+
+enum ath11k_cfr_correlate_event_type {
+ ATH11K_CORRELATE_DBR_EVENT,
+ ATH11K_CORRELATE_TX_EVENT,
+};
+
+struct ath11k_sta;
+struct ath11k_per_peer_cfr_capture;
+
+#define ATH11K_CFR_START_MAGIC 0xDEADBEAF
+#define ATH11K_CFR_END_MAGIC 0xBEAFDEAD
+
+#define VENDOR_QCA 0x8cfdf0
+#define PLATFORM_TYPE_ARM 2
+
+enum ath11k_cfr_meta_version {
+ ATH11K_CFR_META_VERSION_NONE,
+ ATH11K_CFR_META_VERSION_1,
+ ATH11K_CFR_META_VERSION_2,
+ ATH11K_CFR_META_VERSION_3,
+ ATH11K_CFR_META_VERSION_4,
+ ATH11K_CFR_META_VERSION_MAX = 0xFF,
+};
+
+enum ath11k_cfr_data_version {
+ ATH11K_CFR_DATA_VERSION_NONE,
+ ATH11K_CFR_DATA_VERSION_1,
+ ATH11K_CFR_DATA_VERSION_MAX = 0xFF,
+};
+
+enum ath11k_cfr_capture_ack_mode {
+ ATH11K_CFR_CAPTURE_LEGACY_ACK,
+ ATH11K_CFR_CAPTURE_DUP_LEGACY_ACK,
+ ATH11K_CFR_CAPTURE_HT_ACK,
+ ATH11K_CFR_CAPTURE_VHT_ACK,
+
+ /*Always keep this at last*/
+ ATH11K_CFR_CAPTURE_INVALID_ACK
+};
+
+enum ath11k_cfr_correlate_status {
+ ATH11K_CORRELATE_STATUS_RELEASE,
+ ATH11K_CORRELATE_STATUS_HOLD,
+ ATH11K_CORRELATE_STATUS_ERR,
+};
+
+enum ath11k_cfr_preamble_type {
+ ATH11K_CFR_PREAMBLE_TYPE_LEGACY,
+ ATH11K_CFR_PREAMBLE_TYPE_HT,
+ ATH11K_CFR_PREAMBLE_TYPE_VHT,
+};
+
+struct ath11k_cfr_peer_tx_param {
+ u32 capture_method;
+ u32 vdev_id;
+ u8 peer_mac_addr[ETH_ALEN];
+ u32 primary_20mhz_chan;
+ u32 bandwidth;
+ u32 phy_mode;
+ u32 band_center_freq1;
+ u32 band_center_freq2;
+ u32 spatial_streams;
+ u32 correlation_info_1;
+ u32 correlation_info_2;
+ u32 status;
+ u32 timestamp_us;
+ u32 counter;
+ u32 chain_rssi[WMI_MAX_CHAINS];
+ u16 chain_phase[WMI_MAX_CHAINS];
+ u32 cfo_measurement;
+ u8 agc_gain[HOST_MAX_CHAINS];
+ u32 rx_start_ts;
+};
+
+struct cfr_metadata {
+ u8 peer_addr[ETH_ALEN];
+ u8 status;
+ u8 capture_bw;
+ u8 channel_bw;
+ u8 phy_mode;
+ u16 prim20_chan;
+ u16 center_freq1;
+ u16 center_freq2;
+ u8 capture_mode;
+ u8 capture_type;
+ u8 sts_count;
+ u8 num_rx_chain;
+ u32 timestamp;
+ u32 length;
+ u32 chain_rssi[HOST_MAX_CHAINS];
+ u16 chain_phase[HOST_MAX_CHAINS];
+ u32 cfo_measurement;
+ u8 agc_gain[HOST_MAX_CHAINS];
+ u32 rx_start_ts;
+} __packed;
+
+struct ath11k_csi_cfr_header {
+ u32 start_magic_num;
+ u32 vendorid;
+ u8 cfr_metadata_version;
+ u8 cfr_data_version;
+ u8 chip_type;
+ u8 platform_type;
+ u32 cfr_metadata_len;
+ struct cfr_metadata meta_data;
+} __packed;
+
+#define TONES_IN_20MHZ 256
+#define TONES_IN_40MHZ 512
+#define TONES_IN_80MHZ 1024
+#define TONES_IN_160MHZ 2048 /* 160 MHz isn't supported yet */
+#define TONES_INVALID 0
+
+#define CFIR_DMA_HDR_INFO0_TAG GENMASK(7, 0)
+#define CFIR_DMA_HDR_INFO0_LEN GENMASK(13, 8)
+
+#define CFIR_DMA_HDR_INFO1_UPLOAD_DONE GENMASK(0, 0)
+#define CFIR_DMA_HDR_INFO1_CAPTURE_TYPE GENMASK(3, 1)
+#define CFIR_DMA_HDR_INFO1_PREAMBLE_TYPE GENMASK(5, 4)
+#define CFIR_DMA_HDR_INFO1_NSS GENMASK(8, 6)
+#define CFIR_DMA_HDR_INFO1_NUM_CHAINS GENMASK(11, 9)
+#define CFIR_DMA_HDR_INFO1_UPLOAD_PKT_BW GENMASK(14, 12)
+#define CFIR_DMA_HDR_INFO1_SW_PEER_ID_VALID GENMASK(15, 15)
+
+struct ath11k_cfr_dma_hdr {
+ u16 info0;
+ u16 info1;
+ u16 sw_peer_id;
+ u16 phy_ppdu_id;
+};
+
+struct ath11k_look_up_table {
+ bool dbr_recv;
+ bool tx_recv;
+ u8 *data;
+ u32 data_len;
+ u16 dbr_ppdu_id;
+ u16 tx_ppdu_id;
+ dma_addr_t dbr_address;
+ struct ath11k_csi_cfr_header header;
+ struct ath11k_cfr_dma_hdr hdr;
+ u64 txrx_tstamp;
+ u64 dbr_tstamp;
+ u32 header_length;
+ u32 payload_length;
+ struct ath11k_dbring_element *buff;
+};
+
+struct cfr_unassoc_pool_entry {
+ u8 peer_mac[ETH_ALEN];
+ u32 period;
+ bool is_valid;
+};
+
+struct ath11k_cfr {
+ struct ath11k_dbring rx_ring;
+ /* Protects cfr data */
+ spinlock_t lock;
+ /* Protect for lut entries */
+ spinlock_t lut_lock;
+ struct ath11k_look_up_table *lut;
+ struct dentry *enable_cfr;
+ struct dentry *cfr_unassoc;
+ struct rchan *rfs_cfr_capture;
+ u8 cfr_enabled_peer_cnt;
+ u32 lut_num;
+ u64 tx_evt_cnt;
+ u64 dbr_evt_cnt;
+ u64 release_cnt;
+ u64 tx_peer_status_cfr_fail;
+ u64 tx_evt_status_cfr_fail;
+ u64 tx_dbr_lookup_fail;
+ u64 last_success_tstamp;
+ u64 flush_dbr_cnt;
+ u64 clear_txrx_event;
+ u64 cfr_dma_aborts;
+ bool enabled;
+ enum wmi_phy_mode phymode;
+ struct cfr_unassoc_pool_entry unassoc_pool[ATH11K_MAX_CFR_ENABLED_CLIENTS];
+};
+
+enum ath11k_cfr_capture_method {
+ ATH11K_CFR_CAPTURE_METHOD_NULL_FRAME,
+ ATH11K_CFR_CAPTURE_METHOD_NULL_FRAME_WITH_PHASE,
+ ATH11K_CFR_CAPTURE_METHOD_PROBE_RESP,
+ ATH11K_CFR_CAPTURE_METHOD_MAX,
+};
+
+enum ath11k_cfr_capture_bw {
+ ATH11K_CFR_CAPTURE_BW_20,
+ ATH11K_CFR_CAPTURE_BW_40,
+ ATH11K_CFR_CAPTURE_BW_80,
+ ATH11K_CFR_CAPTURE_BW_MAX,
+};
+
+#ifdef CONFIG_ATH11K_CFR
+int ath11k_cfr_init(struct ath11k_base *ab);
+void ath11k_cfr_deinit(struct ath11k_base *ab);
+void ath11k_cfr_lut_update_paddr(struct ath11k *ar, dma_addr_t paddr,
+ u32 buf_id);
+void ath11k_cfr_decrement_peer_count(struct ath11k *ar,
+ struct ath11k_sta *arsta);
+void ath11k_cfr_update_unassoc_pool_entry(struct ath11k *ar,
+ const u8 *peer_mac);
+bool ath11k_cfr_peer_is_in_cfr_unassoc_pool(struct ath11k *ar,
+ const u8 *peer_mac);
+void ath11k_cfr_update_unassoc_pool(struct ath11k *ar,
+ struct ath11k_per_peer_cfr_capture *params,
+ u8 *peer_mac);
+int ath11k_cfr_send_peer_cfr_capture_cmd(struct ath11k *ar,
+ struct ath11k_sta *arsta,
+ struct ath11k_per_peer_cfr_capture *params,
+ const u8 *peer_mac);
+struct ath11k_dbring *ath11k_cfr_get_dbring(struct ath11k *ar);
+void ath11k_cfr_release_lut_entry(struct ath11k_look_up_table *lut);
+int ath11k_process_cfr_capture_event(struct ath11k_base *ab,
+ struct ath11k_cfr_peer_tx_param *params);
+void ath11k_cfr_update_phymode(struct ath11k *ar, enum wmi_phy_mode phymode);
+#else
+static inline void ath11k_cfr_update_phymode(struct ath11k *ar,
+ enum wmi_phy_mode phymode)
+{
+}
+
+static inline int ath11k_cfr_init(struct ath11k_base *ab)
+{
+ return 0;
+}
+
+static inline void ath11k_cfr_deinit(struct ath11k_base *ab)
+{
+}
+
+static inline void ath11k_cfr_lut_update_paddr(struct ath11k *ar,
+ dma_addr_t paddr, u32 buf_id)
+{
+}
+
+static inline void ath11k_cfr_decrement_peer_count(struct ath11k *ar,
+ struct ath11k_sta *arsta)
+{
+}
+
+static inline void ath11k_cfr_update_unassoc_pool_entry(struct ath11k *ar,
+ const u8 *peer_mac)
+{
+}
+
+static inline bool
+ath11k_cfr_peer_is_in_cfr_unassoc_pool(struct ath11k *ar, const u8 *peer_mac)
+{
+ return false;
+}
+
+static inline void
+ath11k_cfr_update_unassoc_pool(struct ath11k *ar,
+ struct ath11k_per_peer_cfr_capture *params,
+ u8 *peer_mac)
+{
+}
+
+static inline int
+ath11k_cfr_send_peer_cfr_capture_cmd(struct ath11k *ar,
+ struct ath11k_sta *arsta,
+ struct ath11k_per_peer_cfr_capture *params,
+ const u8 *peer_mac)
+{
+ return 0;
+}
+
+static inline void ath11k_cfr_release_lut_entry(struct ath11k_look_up_table *lut)
+{
+}
+
+static inline
+struct ath11k_dbring *ath11k_cfr_get_dbring(struct ath11k *ar)
+{
+ return NULL;
+}
+
+static inline
+int ath11k_process_cfr_capture_event(struct ath11k_base *ab,
+ struct ath11k_cfr_peer_tx_param *params)
+{
+ return 0;
+}
+#endif /* CONFIG_ATH11K_CFR */
+#endif /* ATH11K_CFR_H */
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index 812686173ac8..3f6f4db5b7ee 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
@@ -100,7 +99,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.supports_regdb = false,
.fix_l1ss = true,
.credit_flow = false,
- .max_tx_ring = DP_TCL_NUM_RING_MAX,
.hal_params = &ath11k_hw_hal_params_ipq8074,
.supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = true,
@@ -126,6 +124,9 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.smp2p_wow_exit = false,
.support_dual_stations = false,
.pdev_suspend = false,
+ .cfr_support = true,
+ .cfr_num_stream_bufs = 255,
+ .cfr_stream_buf_size = 8200,
},
{
.hw_rev = ATH11K_HW_IPQ6018_HW10,
@@ -184,7 +185,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.supports_regdb = false,
.fix_l1ss = true,
.credit_flow = false,
- .max_tx_ring = DP_TCL_NUM_RING_MAX,
.hal_params = &ath11k_hw_hal_params_ipq8074,
.supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = true,
@@ -211,6 +211,9 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.support_fw_mac_sequence = false,
.support_dual_stations = false,
.pdev_suspend = false,
+ .cfr_support = false,
+ .cfr_num_stream_bufs = 0,
+ .cfr_stream_buf_size = 0,
},
{
.name = "qca6390 hw2.0",
@@ -271,7 +274,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.supports_regdb = false,
.fix_l1ss = true,
.credit_flow = true,
- .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
.hal_params = &ath11k_hw_hal_params_qca6390,
.supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = false,
@@ -301,6 +303,9 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.support_fw_mac_sequence = true,
.support_dual_stations = true,
.pdev_suspend = false,
+ .cfr_support = false,
+ .cfr_num_stream_bufs = 0,
+ .cfr_stream_buf_size = 0,
},
{
.name = "qcn9074 hw1.0",
@@ -358,7 +363,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.supports_regdb = false,
.fix_l1ss = true,
.credit_flow = false,
- .max_tx_ring = DP_TCL_NUM_RING_MAX,
.hal_params = &ath11k_hw_hal_params_ipq8074,
.supports_dynamic_smps_6ghz = true,
.alloc_cacheable_memory = true,
@@ -385,6 +389,9 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.support_fw_mac_sequence = false,
.support_dual_stations = false,
.pdev_suspend = false,
+ .cfr_support = false,
+ .cfr_num_stream_bufs = 0,
+ .cfr_stream_buf_size = 0,
},
{
.name = "wcn6855 hw2.0",
@@ -445,7 +452,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.supports_regdb = true,
.fix_l1ss = false,
.credit_flow = true,
- .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
.hal_params = &ath11k_hw_hal_params_qca6390,
.supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = false,
@@ -475,6 +481,9 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.support_fw_mac_sequence = true,
.support_dual_stations = true,
.pdev_suspend = false,
+ .cfr_support = false,
+ .cfr_num_stream_bufs = 0,
+ .cfr_stream_buf_size = 0,
},
{
.name = "wcn6855 hw2.1",
@@ -533,7 +542,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.supports_regdb = true,
.fix_l1ss = false,
.credit_flow = true,
- .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
.hal_params = &ath11k_hw_hal_params_qca6390,
.supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = false,
@@ -563,6 +571,9 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.support_fw_mac_sequence = true,
.support_dual_stations = true,
.pdev_suspend = false,
+ .cfr_support = true,
+ .cfr_num_stream_bufs = 255,
+ .cfr_stream_buf_size = 8200,
},
{
.name = "wcn6750 hw1.0",
@@ -619,7 +630,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.supports_regdb = true,
.fix_l1ss = false,
.credit_flow = true,
- .max_tx_ring = DP_TCL_NUM_RING_MAX,
.hal_params = &ath11k_hw_hal_params_wcn6750,
.supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = false,
@@ -646,6 +656,9 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.support_fw_mac_sequence = true,
.support_dual_stations = false,
.pdev_suspend = true,
+ .cfr_support = false,
+ .cfr_num_stream_bufs = 0,
+ .cfr_stream_buf_size = 0,
},
{
.hw_rev = ATH11K_HW_IPQ5018_HW10,
@@ -662,7 +675,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074,
.ring_mask = &ath11k_hw_ring_mask_ipq8074,
.credit_flow = false,
- .max_tx_ring = 1,
.spectral = {
.fft_sz = 2,
.fft_pad_sz = 0,
@@ -698,7 +710,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.supports_regdb = false,
.idle_ps = false,
.supports_suspend = false,
- .hal_params = &ath11k_hw_hal_params_ipq8074,
+ .hal_params = &ath11k_hw_hal_params_ipq5018,
.single_pdev_only = false,
.coldboot_cal_mm = true,
.coldboot_cal_ftm = true,
@@ -729,6 +741,9 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.support_fw_mac_sequence = false,
.support_dual_stations = false,
.pdev_suspend = false,
+ .cfr_support = false,
+ .cfr_num_stream_bufs = 0,
+ .cfr_stream_buf_size = 0,
},
{
.name = "qca2066 hw2.1",
@@ -789,7 +804,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.supports_regdb = true,
.fix_l1ss = false,
.credit_flow = true,
- .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
.hal_params = &ath11k_hw_hal_params_qca6390,
.supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = false,
@@ -818,6 +832,9 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
.support_dual_stations = true,
+ .cfr_support = false,
+ .cfr_num_stream_bufs = 0,
+ .cfr_stream_buf_size = 0,
},
{
.name = "qca6698aq hw2.1",
@@ -876,7 +893,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.supports_regdb = true,
.fix_l1ss = false,
.credit_flow = true,
- .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
.hal_params = &ath11k_hw_hal_params_qca6390,
.supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = false,
@@ -906,6 +922,9 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.support_fw_mac_sequence = true,
.support_dual_stations = true,
.pdev_suspend = false,
+ .cfr_support = true,
+ .cfr_num_stream_bufs = 255,
+ .cfr_stream_buf_size = 8200,
},
};
@@ -994,9 +1013,64 @@ static const struct dmi_system_id ath11k_pm_quirk_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "21F9"),
},
},
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* Z13 G1 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21D2"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* Z13 G1 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21D3"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* Z16 G1 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21D4"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* Z16 G1 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21D5"),
+ },
+ },
{}
};
+static const struct __ath11k_core_usecase_firmware_table {
+ u32 hw_rev;
+ const char *compatible;
+ const char *firmware_name;
+} ath11k_core_usecase_firmware_table[] = {
+ { ATH11K_HW_WCN6855_HW21, "qcom,lemans-evk", "nfa765"},
+ { ATH11K_HW_WCN6855_HW21, "qcom,monaco-evk", "nfa765"},
+ { ATH11K_HW_WCN6855_HW21, "qcom,hamoa-iot-evk", "nfa765"},
+ { /* Sentinel */ }
+};
+
+const char *ath11k_core_get_usecase_firmware(struct ath11k_base *ab)
+{
+ const struct __ath11k_core_usecase_firmware_table *entry = NULL;
+
+ entry = ath11k_core_usecase_firmware_table;
+ while (entry->compatible) {
+ if (ab->hw_rev == entry->hw_rev &&
+ of_machine_is_compatible(entry->compatible))
+ return entry->firmware_name;
+ entry++;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(ath11k_core_get_usecase_firmware);
+
void ath11k_fw_stats_pdevs_free(struct list_head *head)
{
struct ath11k_fw_stats_pdev *i, *tmp;
@@ -1987,8 +2061,16 @@ static int ath11k_core_pdev_create(struct ath11k_base *ab)
goto err_thermal_unregister;
}
+ ret = ath11k_cfr_init(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to init cfr %d\n", ret);
+ goto err_spectral_unregister;
+ }
+
return 0;
+err_spectral_unregister:
+ ath11k_spectral_deinit(ab);
err_thermal_unregister:
ath11k_thermal_unregister(ab);
err_mac_unregister:
@@ -2038,6 +2120,7 @@ static void ath11k_core_pdev_suspend_target(struct ath11k_base *ab)
static void ath11k_core_pdev_destroy(struct ath11k_base *ab)
{
+ ath11k_cfr_deinit(ab);
ath11k_spectral_deinit(ab);
ath11k_thermal_unregister(ab);
ath11k_mac_unregister(ab);
@@ -2250,6 +2333,7 @@ static int ath11k_core_reconfigure_on_crash(struct ath11k_base *ab)
mutex_lock(&ab->core_lock);
ath11k_thermal_unregister(ab);
ath11k_dp_pdev_free(ab);
+ ath11k_cfr_deinit(ab);
ath11k_spectral_deinit(ab);
ath11k_ce_cleanup_pipes(ab);
ath11k_wmi_detach(ab);
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index e8780b05ce11..a0d725923ef2 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#ifndef ATH11K_CORE_H
@@ -35,6 +35,7 @@
#include "wow.h"
#include "fw.h"
#include "coredump.h"
+#include "cfr.h"
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@@ -531,6 +532,13 @@ struct ath11k_per_ppdu_tx_stats {
DECLARE_EWMA(avg_rssi, 10, 8)
+struct ath11k_per_peer_cfr_capture {
+ enum ath11k_cfr_capture_method cfr_method;
+ enum ath11k_cfr_capture_bw cfr_bw;
+ u32 cfr_enable;
+ u32 cfr_period;
+};
+
struct ath11k_sta {
struct ath11k_vif *arvif;
@@ -571,6 +579,10 @@ struct ath11k_sta {
bool peer_current_ps_valid;
u32 bw_prev;
+
+#ifdef CONFIG_ATH11K_CFR
+ struct ath11k_per_peer_cfr_capture cfr_capture;
+#endif
};
#define ATH11K_MIN_5G_FREQ 4150
@@ -795,6 +807,11 @@ struct ath11k {
bool ps_state_enable;
bool ps_timekeeper_enable;
s8 max_allowed_tx_power;
+
+#ifdef CONFIG_ATH11K_CFR
+ struct ath11k_cfr cfr;
+#endif
+ bool cfr_enabled;
};
struct ath11k_band_cap {
@@ -1275,6 +1292,7 @@ bool ath11k_core_coldboot_cal_support(struct ath11k_base *ab);
const struct firmware *ath11k_core_firmware_request(struct ath11k_base *ab,
const char *filename);
+const char *ath11k_core_get_usecase_firmware(struct ath11k_base *ab);
static inline const char *ath11k_scan_state_str(enum ath11k_scan_state state)
{
@@ -1329,6 +1347,9 @@ static inline void ath11k_core_create_firmware_path(struct ath11k_base *ab,
of_property_read_string(ab->dev->of_node, "firmware-name", &fw_name);
+ if (!fw_name)
+ fw_name = ath11k_core_get_usecase_firmware(ab);
+
if (fw_name && strncmp(filename, "board", 5))
snprintf(buf, buf_len, "%s/%s/%s/%s", ATH11K_FW_DIR,
ab->hw_params.fw.dir, fw_name, filename);
diff --git a/drivers/net/wireless/ath/ath11k/dbring.c b/drivers/net/wireless/ath/ath11k/dbring.c
index 520d8b8662a2..d6994ce6ebff 100644
--- a/drivers/net/wireless/ath/ath11k/dbring.c
+++ b/drivers/net/wireless/ath/ath11k/dbring.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
@@ -37,10 +36,10 @@ static void ath11k_dbring_fill_magic_value(struct ath11k *ar,
memset32(buffer, ATH11K_DB_MAGIC_VALUE, size);
}
-static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
- struct ath11k_dbring *ring,
- struct ath11k_dbring_element *buff,
- enum wmi_direct_buffer_module id)
+int ath11k_dbring_bufs_replenish(struct ath11k *ar,
+ struct ath11k_dbring *ring,
+ struct ath11k_dbring_element *buff,
+ enum wmi_direct_buffer_module id)
{
struct ath11k_base *ab = ar->ab;
struct hal_srng *srng;
@@ -80,6 +79,9 @@ static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
goto err_idr_remove;
}
+ if (id == WMI_DIRECT_BUF_CFR)
+ ath11k_cfr_lut_update_paddr(ar, paddr, buf_id);
+
buff->paddr = paddr;
cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, ar->pdev_idx) |
@@ -155,12 +157,11 @@ int ath11k_dbring_wmi_cfg_setup(struct ath11k *ar,
enum wmi_direct_buffer_module id)
{
struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd param = {};
- int ret;
+ int ret, i;
if (id >= WMI_DIRECT_BUF_MAX)
return -EINVAL;
- param.pdev_id = DP_SW2HW_MACID(ring->pdev_id);
param.module_id = id;
param.base_paddr_lo = lower_32_bits(ring->refill_srng.paddr);
param.base_paddr_hi = upper_32_bits(ring->refill_srng.paddr);
@@ -173,10 +174,23 @@ int ath11k_dbring_wmi_cfg_setup(struct ath11k *ar,
param.num_resp_per_event = ring->num_resp_per_event;
param.event_timeout_ms = ring->event_timeout_ms;
- ret = ath11k_wmi_pdev_dma_ring_cfg(ar, &param);
- if (ret) {
- ath11k_warn(ar->ab, "failed to setup db ring cfg\n");
- return ret;
+ /* For single pdev, 2GHz and 5GHz use one DBR. */
+ if (ar->ab->hw_params.single_pdev_only) {
+ for (i = 0; i < ar->ab->target_pdev_count; i++) {
+ param.pdev_id = ar->ab->target_pdev_ids[i].pdev_id;
+ ret = ath11k_wmi_pdev_dma_ring_cfg(ar, &param);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup db ring cfg\n");
+ return ret;
+ }
+ }
+ } else {
+ param.pdev_id = DP_SW2HW_MACID(ring->pdev_id);
+ ret = ath11k_wmi_pdev_dma_ring_cfg(ar, &param);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup db ring cfg\n");
+ return ret;
+ }
}
return 0;
@@ -281,10 +295,15 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
int size;
dma_addr_t paddr;
int ret = 0;
+ int status;
pdev_idx = ev->fixed.pdev_id;
module_id = ev->fixed.module_id;
+ if (ab->hw_params.single_pdev_only &&
+ pdev_idx < ab->target_pdev_count)
+ pdev_idx = 0;
+
if (pdev_idx >= ab->num_radios) {
ath11k_warn(ab, "Invalid pdev id %d\n", pdev_idx);
return -EINVAL;
@@ -310,6 +329,9 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
case WMI_DIRECT_BUF_SPECTRAL:
ring = ath11k_spectral_get_dbring(ar);
break;
+ case WMI_DIRECT_BUF_CFR:
+ ring = ath11k_cfr_get_dbring(ar);
+ break;
default:
ring = NULL;
ath11k_warn(ab, "Recv dma buffer release ev on unsupp module %d\n",
@@ -360,8 +382,12 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
handler_data.data = PTR_ALIGN(vaddr_unalign,
ring->buf_align);
handler_data.data_sz = ring->buf_sz;
+ handler_data.buff = buff;
+ handler_data.buf_id = buf_id;
- ring->handler(ar, &handler_data);
+ status = ring->handler(ar, &handler_data);
+ if (status == ATH11K_CORRELATE_STATUS_HOLD)
+ continue;
}
buff->paddr = 0;
diff --git a/drivers/net/wireless/ath/ath11k/dbring.h b/drivers/net/wireless/ath/ath11k/dbring.h
index 2f93b78a50df..e5f244dfa963 100644
--- a/drivers/net/wireless/ath/ath11k/dbring.h
+++ b/drivers/net/wireless/ath/ath11k/dbring.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#ifndef ATH11K_DBRING_H
@@ -21,6 +21,8 @@ struct ath11k_dbring_data {
void *data;
u32 data_sz;
struct wmi_dma_buf_release_meta_data meta;
+ struct ath11k_dbring_element *buff;
+ u32 buf_id;
};
struct ath11k_dbring_buf_release_event {
@@ -61,6 +63,10 @@ int ath11k_dbring_set_cfg(struct ath11k *ar,
u32 event_timeout_ms,
int (*handler)(struct ath11k *,
struct ath11k_dbring_data *));
+int ath11k_dbring_bufs_replenish(struct ath11k *ar,
+ struct ath11k_dbring *ring,
+ struct ath11k_dbring_element *buff,
+ enum wmi_direct_buffer_module id);
int ath11k_dbring_wmi_cfg_setup(struct ath11k *ar,
struct ath11k_dbring *ring,
enum wmi_direct_buffer_module id);
diff --git a/drivers/net/wireless/ath/ath11k/debug.h b/drivers/net/wireless/ath/ath11k/debug.h
index cc8934d15697..aaa0034527a5 100644
--- a/drivers/net/wireless/ath/ath11k/debug.h
+++ b/drivers/net/wireless/ath/ath11k/debug.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#ifndef _ATH11K_DEBUG_H_
@@ -27,6 +27,8 @@ enum ath11k_debug_mask {
ATH11K_DBG_DP_TX = 0x00002000,
ATH11K_DBG_DP_RX = 0x00004000,
ATH11K_DBG_CE = 0x00008000,
+ ATH11K_DBG_CFR = 0x00010000,
+ ATH11K_DBG_CFR_DUMP = 0x00020000,
};
static inline const char *ath11k_dbg_str(enum ath11k_debug_mask mask)
@@ -64,6 +66,10 @@ static inline const char *ath11k_dbg_str(enum ath11k_debug_mask mask)
return "dp_rx";
case ATH11K_DBG_CE:
return "ce";
+ case ATH11K_DBG_CFR:
+ return "cfr";
+ case ATH11K_DBG_CFR_DUMP:
+ return "cfr_dump";
/* no default handler to allow compiler to check that the
* enum is fully handled
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
index 977f945b6e66..50f344803e8f 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs.c
@@ -707,7 +707,7 @@ static ssize_t ath11k_debugfs_dump_soc_dp_stats(struct file *file,
len += scnprintf(buf + len, size - len, "\nSOC TX STATS:\n");
len += scnprintf(buf + len, size - len, "\nTCL Ring Full Failures:\n");
- for (i = 0; i < ab->hw_params.max_tx_ring; i++)
+ for (i = 0; i < ab->hw_params.hal_params->num_tx_rings; i++)
len += scnprintf(buf + len, size - len, "ring%d: %u\n",
i, soc_stats->tx_err.desc_na[i]);
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_sta.c b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
index d89d0f28d890..621a8a8df4b8 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
@@ -240,6 +239,140 @@ static const struct file_operations fops_tx_stats = {
.llseek = default_llseek,
};
+#ifdef CONFIG_ATH11K_CFR
+static ssize_t ath11k_dbg_sta_write_cfr_capture(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ struct ath11k *ar = arsta->arvif->ar;
+ struct ath11k_cfr *cfr = &ar->cfr;
+ struct wmi_peer_cfr_capture_conf_arg arg;
+ u32 cfr_capture_enable = 0, cfr_capture_bw = 0;
+ u32 cfr_capture_method = 0, cfr_capture_period = 0;
+ char buf[64] = {};
+ int ret;
+
+ simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+
+ guard(mutex)(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON)
+ return -ENETDOWN;
+
+ if (!ar->cfr_enabled)
+ return -EINVAL;
+
+ ret = sscanf(buf, "%u %u %u %u", &cfr_capture_enable, &cfr_capture_bw,
+ &cfr_capture_period, &cfr_capture_method);
+
+ if (ret < 1 || (cfr_capture_enable && ret != 4))
+ return -EINVAL;
+
+ if (cfr_capture_enable == arsta->cfr_capture.cfr_enable &&
+ (cfr_capture_period &&
+ cfr_capture_period == arsta->cfr_capture.cfr_period) &&
+ cfr_capture_bw == arsta->cfr_capture.cfr_bw &&
+ cfr_capture_method == arsta->cfr_capture.cfr_method)
+ return count;
+
+ if (!cfr_capture_enable &&
+ cfr_capture_enable == arsta->cfr_capture.cfr_enable)
+ return count;
+
+ if (cfr_capture_enable > WMI_PEER_CFR_CAPTURE_ENABLE ||
+ cfr_capture_bw > WMI_PEER_CFR_CAPTURE_BW_80 ||
+ cfr_capture_method > ATH11K_CFR_CAPTURE_METHOD_NULL_FRAME_WITH_PHASE ||
+ cfr_capture_period > WMI_PEER_CFR_PERIODICITY_MAX)
+ return -EINVAL;
+
+ /* Target expects cfr period in multiple of 10 */
+ if (cfr_capture_period % 10) {
+ ath11k_err(ar->ab, "periodicity should be 10x\n");
+ return -EINVAL;
+ }
+
+ if (ar->cfr.cfr_enabled_peer_cnt >= ATH11K_MAX_CFR_ENABLED_CLIENTS &&
+ !arsta->cfr_capture.cfr_enable) {
+ ath11k_err(ar->ab, "CFR enable peer threshold reached %u\n",
+ ar->cfr.cfr_enabled_peer_cnt);
+ return -EINVAL;
+ }
+
+ if (!cfr_capture_enable) {
+ cfr_capture_bw = arsta->cfr_capture.cfr_bw;
+ cfr_capture_period = arsta->cfr_capture.cfr_period;
+ cfr_capture_method = arsta->cfr_capture.cfr_method;
+ }
+
+ arg.request = cfr_capture_enable;
+ arg.periodicity = cfr_capture_period;
+ arg.bw = cfr_capture_bw;
+ arg.method = cfr_capture_method;
+
+ ret = ath11k_wmi_peer_set_cfr_capture_conf(ar, arsta->arvif->vdev_id,
+ sta->addr, &arg);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send cfr capture info: vdev_id %u peer %pM: %d\n",
+ arsta->arvif->vdev_id, sta->addr, ret);
+ return ret;
+ }
+
+ spin_lock_bh(&ar->cfr.lock);
+
+ if (cfr_capture_enable &&
+ cfr_capture_enable != arsta->cfr_capture.cfr_enable)
+ cfr->cfr_enabled_peer_cnt++;
+ else if (!cfr_capture_enable)
+ cfr->cfr_enabled_peer_cnt--;
+
+ spin_unlock_bh(&ar->cfr.lock);
+
+ arsta->cfr_capture.cfr_enable = cfr_capture_enable;
+ arsta->cfr_capture.cfr_period = cfr_capture_period;
+ arsta->cfr_capture.cfr_bw = cfr_capture_bw;
+ arsta->cfr_capture.cfr_method = cfr_capture_method;
+
+ return count;
+}
+
+static ssize_t ath11k_dbg_sta_read_cfr_capture(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ struct ath11k *ar = arsta->arvif->ar;
+ char buf[512] = {};
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ len += scnprintf(buf + len, sizeof(buf) - len, "cfr_enabled = %d\n",
+ arsta->cfr_capture.cfr_enable);
+ len += scnprintf(buf + len, sizeof(buf) - len, "bandwidth = %d\n",
+ arsta->cfr_capture.cfr_bw);
+ len += scnprintf(buf + len, sizeof(buf) - len, "period = %d\n",
+ arsta->cfr_capture.cfr_period);
+ len += scnprintf(buf + len, sizeof(buf) - len, "cfr_method = %d\n",
+ arsta->cfr_capture.cfr_method);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_peer_cfr_capture = {
+ .write = ath11k_dbg_sta_write_cfr_capture,
+ .read = ath11k_dbg_sta_read_cfr_capture,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+#endif /* CONFIG_ATH11K_CFR */
+
static ssize_t ath11k_dbg_sta_dump_rx_stats(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
@@ -877,6 +1010,13 @@ void ath11k_debugfs_sta_op_add(struct ieee80211_hw *hw, struct ieee80211_vif *vi
debugfs_create_file("htt_peer_stats_reset", 0600, dir, sta,
&fops_htt_peer_stats_reset);
+#ifdef CONFIG_ATH11K_CFR
+ if (test_bit(WMI_TLV_SERVICE_CFR_CAPTURE_SUPPORT,
+ ar->ab->wmi_ab.svc_map))
+ debugfs_create_file("cfr_capture", 0600, dir, sta,
+ &fops_peer_cfr_capture);
+#endif/* CONFIG_ATH11K_CFR */
+
debugfs_create_file("peer_ps_state", 0400, dir, sta,
&fops_peer_ps_state);
diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c
index 56b1a657e0b0..c940de285276 100644
--- a/drivers/net/wireless/ath/ath11k/dp.c
+++ b/drivers/net/wireless/ath/ath11k/dp.c
@@ -344,7 +344,7 @@ void ath11k_dp_stop_shadow_timers(struct ath11k_base *ab)
if (!ab->hw_params.supports_shadow_regs)
return;
- for (i = 0; i < ab->hw_params.max_tx_ring; i++)
+ for (i = 0; i < ab->hw_params.hal_params->num_tx_rings; i++)
ath11k_dp_shadow_stop_timer(ab, &ab->dp.tx_ring_timer[i]);
ath11k_dp_shadow_stop_timer(ab, &ab->dp.reo_cmd_timer);
@@ -359,7 +359,7 @@ static void ath11k_dp_srng_common_cleanup(struct ath11k_base *ab)
ath11k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
ath11k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
ath11k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
- for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
+ for (i = 0; i < ab->hw_params.hal_params->num_tx_rings; i++) {
ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
}
@@ -400,7 +400,7 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
goto err;
}
- for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
+ for (i = 0; i < ab->hw_params.hal_params->num_tx_rings; i++) {
tcl_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].tcl_ring_num;
wbm_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num;
@@ -782,7 +782,7 @@ int ath11k_dp_service_srng(struct ath11k_base *ab,
int i, j;
int tot_work_done = 0;
- for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
+ for (i = 0; i < ab->hw_params.hal_params->num_tx_rings; i++) {
if (BIT(ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num) &
ab->hw_params.ring_mask->tx[grp_id])
ath11k_dp_tx_completion_handler(ab, i);
@@ -1035,7 +1035,7 @@ void ath11k_dp_free(struct ath11k_base *ab)
ath11k_dp_reo_cmd_list_cleanup(ab);
- for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
+ for (i = 0; i < ab->hw_params.hal_params->num_tx_rings; i++) {
spin_lock_bh(&dp->tx_ring[i].tx_idr_lock);
idr_for_each(&dp->tx_ring[i].txbuf_idr,
ath11k_dp_tx_pending_cleanup, ab);
@@ -1086,7 +1086,7 @@ int ath11k_dp_alloc(struct ath11k_base *ab)
size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE;
- for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
+ for (i = 0; i < ab->hw_params.hal_params->num_tx_rings; i++) {
idr_init(&dp->tx_ring[i].txbuf_idr);
spin_lock_init(&dp->tx_ring[i].tx_idr_lock);
dp->tx_ring[i].tcl_data_ring_id = i;
diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h
index 7a55afd33be8..1bd513f68a3c 100644
--- a/drivers/net/wireless/ath/ath11k/dp.h
+++ b/drivers/net/wireless/ath/ath11k/dp.h
@@ -199,7 +199,6 @@ struct ath11k_pdev_dp {
#define DP_BA_WIN_SZ_MAX 256
#define DP_TCL_NUM_RING_MAX 3
-#define DP_TCL_NUM_RING_MAX_QCA6390 1
#define DP_IDLE_SCATTER_BUFS_MAX 16
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
index 562aba66582f..86e1e6c27b36 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
@@ -91,6 +91,7 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
struct hal_srng *tcl_ring;
struct ieee80211_hdr *hdr = (void *)skb->data;
struct dp_tx_ring *tx_ring;
+ size_t num_tx_rings = ab->hw_params.hal_params->num_tx_rings;
void *hal_tcl_desc;
u8 pool_id;
u8 hal_ring_id;
@@ -113,7 +114,7 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
tcl_ring_sel:
tcl_ring_retry = false;
- ti.ring_id = ring_selector % ab->hw_params.max_tx_ring;
+ ti.ring_id = ring_selector % num_tx_rings;
ti.rbm_id = ab->hw_params.hal_params->tcl2wbm_rbm_map[ti.ring_id].rbm_id;
ring_map |= BIT(ti.ring_id);
@@ -126,7 +127,7 @@ tcl_ring_sel:
spin_unlock_bh(&tx_ring->tx_idr_lock);
if (unlikely(ret < 0)) {
- if (ring_map == (BIT(ab->hw_params.max_tx_ring) - 1) ||
+ if (ring_map == (BIT(num_tx_rings) - 1) ||
!ab->hw_params.tcl_ring_retry) {
atomic_inc(&ab->soc_stats.tx_err.misc_fail);
return -ENOSPC;
@@ -244,8 +245,8 @@ tcl_ring_sel:
* checking this ring earlier for each pkt tx.
* Restart ring selection if some rings are not checked yet.
*/
- if (unlikely(ring_map != (BIT(ab->hw_params.max_tx_ring)) - 1) &&
- ab->hw_params.tcl_ring_retry && ab->hw_params.max_tx_ring > 1) {
+ if (unlikely(ring_map != (BIT(num_tx_rings)) - 1) &&
+ ab->hw_params.tcl_ring_retry && num_tx_rings > 1) {
tcl_ring_retry = true;
ring_selector++;
}
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
index 0c797b8d0a27..e821e5a62c1c 100644
--- a/drivers/net/wireless/ath/ath11k/hal.c
+++ b/drivers/net/wireless/ath/ath11k/hal.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <linux/dma-mapping.h>
@@ -184,7 +183,7 @@ static const struct hal_srng_config hw_srng_config_template[] = {
},
{ /* RXDMA DIR BUF */
.start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
- .max_rings = 1,
+ .max_rings = 2,
.entry_size = 8 >> 2, /* TODO: Define the struct */
.lmac_ring = true,
.ring_dir = HAL_SRNG_DIR_SRC,
diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c
index caa6dc12a790..d19c4b372a2a 100644
--- a/drivers/net/wireless/ath/ath11k/hw.c
+++ b/drivers/net/wireless/ath/ath11k/hw.c
@@ -2707,6 +2707,14 @@ const struct ath11k_hw_regs wcn6750_regs = {
.hal_reo1_misc_ctl = 0x000005d8,
};
+static const struct ath11k_hw_tcl2wbm_rbm_map ath11k_hw_tcl2wbm_rbm_map_ipq5018[] = {
+ {
+ .tcl_ring_num = 0,
+ .wbm_ring_num = 0,
+ .rbm_id = HAL_RX_BUF_RBM_SW0_BM,
+ },
+};
+
static const struct ath11k_hw_tcl2wbm_rbm_map ath11k_hw_tcl2wbm_rbm_map_ipq8074[] = {
{
.tcl_ring_num = 0,
@@ -2822,19 +2830,28 @@ const struct ath11k_hw_regs ipq5018_regs = {
.hal_wbm1_release_ring_base_lsb = 0x0000097c,
};
+const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq5018 = {
+ .rx_buf_rbm = HAL_RX_BUF_RBM_SW3_BM,
+ .tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_ipq5018,
+ .num_tx_rings = ARRAY_SIZE(ath11k_hw_tcl2wbm_rbm_map_ipq5018),
+};
+
const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq8074 = {
.rx_buf_rbm = HAL_RX_BUF_RBM_SW3_BM,
.tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_ipq8074,
+ .num_tx_rings = ARRAY_SIZE(ath11k_hw_tcl2wbm_rbm_map_ipq8074),
};
const struct ath11k_hw_hal_params ath11k_hw_hal_params_qca6390 = {
.rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM,
- .tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_ipq8074,
+ .tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_ipq5018,
+ .num_tx_rings = ARRAY_SIZE(ath11k_hw_tcl2wbm_rbm_map_ipq5018),
};
const struct ath11k_hw_hal_params ath11k_hw_hal_params_wcn6750 = {
.rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM,
.tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_wcn6750,
+ .num_tx_rings = ARRAY_SIZE(ath11k_hw_tcl2wbm_rbm_map_wcn6750),
};
static const struct cfg80211_sar_freq_ranges ath11k_hw_sar_freq_ranges_wcn6855[] = {
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index 52d9f4c13b13..4996536fbd14 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#ifndef ATH11K_HW_H
@@ -134,6 +134,7 @@ struct ath11k_hw_tcl2wbm_rbm_map {
struct ath11k_hw_hal_params {
enum hal_rx_buf_return_buf_manager rx_buf_rbm;
const struct ath11k_hw_tcl2wbm_rbm_map *tcl2wbm_rbm_map;
+ size_t num_tx_rings;
};
struct ath11k_hw_params {
@@ -198,7 +199,6 @@ struct ath11k_hw_params {
bool supports_regdb;
bool fix_l1ss;
bool credit_flow;
- u8 max_tx_ring;
const struct ath11k_hw_hal_params *hal_params;
bool supports_dynamic_smps_6ghz;
bool alloc_cacheable_memory;
@@ -228,6 +228,9 @@ struct ath11k_hw_params {
bool support_fw_mac_sequence;
bool support_dual_stations;
bool pdev_suspend;
+ bool cfr_support;
+ u32 cfr_num_stream_bufs;
+ u32 cfr_stream_buf_size;
};
struct ath11k_hw_ops {
@@ -291,6 +294,7 @@ extern const struct ce_ie_addr ath11k_ce_ie_addr_ipq5018;
extern const struct ce_remap ath11k_ce_remap_ipq5018;
+extern const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq5018;
extern const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq8074;
extern const struct ath11k_hw_hal_params ath11k_hw_hal_params_qca6390;
extern const struct ath11k_hw_hal_params ath11k_hw_hal_params_wcn6750;
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 3276fe443502..4dfd08b58416 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -2911,6 +2911,8 @@ static void ath11k_peer_assoc_h_phymode(struct ath11k *ar,
arg->peer_phymode = phymode;
WARN_ON(phymode == MODE_UNKNOWN);
+
+ ath11k_cfr_update_phymode(ar, phymode);
}
static void ath11k_peer_assoc_prepare(struct ath11k *ar,
@@ -6186,6 +6188,8 @@ static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif,
dma_addr_t paddr;
int buf_id;
int ret;
+ bool tx_params_valid = false;
+ bool peer_in_unassoc_pool;
ATH11K_SKB_CB(skb)->ar = ar;
@@ -6224,7 +6228,18 @@ static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif,
ATH11K_SKB_CB(skb)->paddr = paddr;
- ret = ath11k_wmi_mgmt_send(ar, arvif->vdev_id, buf_id, skb);
+ peer_in_unassoc_pool = ath11k_cfr_peer_is_in_cfr_unassoc_pool(ar, hdr->addr1);
+
+ if (ar->cfr_enabled &&
+ ieee80211_is_probe_resp(hdr->frame_control) &&
+ peer_in_unassoc_pool)
+ tx_params_valid = true;
+
+ if (peer_in_unassoc_pool)
+ ath11k_cfr_update_unassoc_pool_entry(ar, hdr->addr1);
+
+ ret = ath11k_wmi_mgmt_send(ar, arvif->vdev_id, buf_id, skb,
+ tx_params_valid);
if (ret) {
ath11k_warn(ar->ab, "failed to send mgmt frame: %d\n", ret);
goto err_unmap_buf;
@@ -7392,7 +7407,7 @@ err_vdev_del:
idr_for_each(&ar->txmgmt_idr,
ath11k_mac_vif_txmgmt_idr_remove, vif);
- for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
+ for (i = 0; i < ab->hw_params.hal_params->num_tx_rings; i++) {
spin_lock_bh(&ab->dp.tx_ring[i].tx_idr_lock);
idr_for_each(&ab->dp.tx_ring[i].txbuf_idr,
ath11k_mac_vif_unref, vif);
@@ -9979,6 +9994,8 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
}
spin_unlock_bh(&ar->ab->base_lock);
mutex_unlock(&ar->ab->tbl_mtx_lock);
+
+ ath11k_cfr_decrement_peer_count(ar, arsta);
} else if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC &&
(vif->type == NL80211_IFTYPE_AP ||
@@ -10640,7 +10657,7 @@ static int __ath11k_mac_register(struct ath11k *ar)
if (!ab->hw_params.supports_monitor)
/* There's a race between calling ieee80211_register_hw()
* and here where the monitor mode is enabled for a little
- * while. But that time is so short and in practise it make
+ * while. But that time is so short and in practice it doesn't make
* a difference in real life.
*/
ar->hw->wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MONITOR);
diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
index d62a2014315a..49b79648752c 100644
--- a/drivers/net/wireless/ath/ath11k/reg.c
+++ b/drivers/net/wireless/ath/ath11k/reg.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <linux/rtnetlink.h>
@@ -926,8 +926,11 @@ int ath11k_reg_handle_chan_list(struct ath11k_base *ab,
*/
if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
!memcmp((char *)ab->default_regd[pdev_idx]->alpha2,
- (char *)reg_info->alpha2, 2))
- goto retfail;
+ (char *)reg_info->alpha2, 2) &&
+ power_type == IEEE80211_REG_UNSET_AP) {
+ ath11k_reg_reset_info(reg_info);
+ return 0;
+ }
/* Intersect new rules with default regd if a new country setting was
* requested, i.e a default regd was already set during initialization
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 110035dae8a6..451cc4c719ae 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -651,11 +651,12 @@ static u32 ath11k_wmi_mgmt_get_freq(struct ath11k *ar,
}
int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id,
- struct sk_buff *frame)
+ struct sk_buff *frame, bool tx_params_valid)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame);
struct wmi_mgmt_send_cmd *cmd;
+ struct wmi_mgmt_send_params *params;
struct wmi_tlv *frame_tlv;
struct sk_buff *skb;
u32 buf_len;
@@ -665,6 +666,8 @@ int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id,
frame->len : WMI_MGMT_SEND_DOWNLD_LEN;
len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4);
+ if (tx_params_valid)
+ len += sizeof(*params);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
@@ -680,7 +683,7 @@ int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id,
cmd->paddr_hi = upper_32_bits(ATH11K_SKB_CB(frame)->paddr);
cmd->frame_len = frame->len;
cmd->buf_len = buf_len;
- cmd->tx_params_valid = 0;
+ cmd->tx_params_valid = !!tx_params_valid;
frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
frame_tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
@@ -690,6 +693,15 @@ int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id,
ath11k_ce_byte_swap(frame_tlv->value, buf_len);
+ if (tx_params_valid) {
+ params =
+ (struct wmi_mgmt_send_params *)(skb->data + (len - sizeof(*params)));
+ params->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TX_SEND_PARAMS) |
+ FIELD_PREP(WMI_TLV_LEN,
+ sizeof(*params) - TLV_HDR_SIZE);
+ params->tx_params_dword1 |= WMI_TX_PARAMS_DWORD1_CFR_CAPTURE;
+ }
+
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID);
if (ret) {
ath11k_warn(ar->ab,
@@ -3941,6 +3953,47 @@ int ath11k_wmi_fils_discovery_tmpl(struct ath11k *ar, u32 vdev_id,
return 0;
}
+int ath11k_wmi_peer_set_cfr_capture_conf(struct ath11k *ar,
+ u32 vdev_id, const u8 *mac_addr,
+ struct wmi_peer_cfr_capture_conf_arg *arg)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_peer_cfr_capture_cmd_fixed_param *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_cfr_capture_cmd_fixed_param *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PEER_CFR_CAPTURE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ memcpy(&cmd->mac_addr, mac_addr, ETH_ALEN);
+ cmd->request = arg->request;
+ cmd->vdev_id = vdev_id;
+ cmd->periodicity = arg->periodicity;
+ cmd->bandwidth = arg->bw;
+ cmd->capture_method = arg->method;
+
+ ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_PEER_CFR_CAPTURE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "WMI vdev %d failed to send peer cfr capture cmd: %d\n",
+ vdev_id, ret);
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI peer CFR capture cmd req %u id %u period %u bw %u mode %u\n",
+ arg->request, vdev_id, arg->periodicity,
+ arg->bw, arg->method);
+
+ return ret;
+}
+
int ath11k_wmi_probe_resp_tmpl(struct ath11k *ar, u32 vdev_id,
struct sk_buff *tmpl)
{
@@ -8752,6 +8805,93 @@ out:
kfree(tb);
}
+static void ath11k_wmi_tlv_cfr_capture_event_fixed_param(const void *ptr,
+ void *data)
+{
+ struct ath11k_cfr_peer_tx_param *tx_params = data;
+ const struct ath11k_wmi_cfr_peer_tx_event_param *params = ptr;
+
+ tx_params->capture_method = params->capture_method;
+ tx_params->vdev_id = params->vdev_id;
+ ether_addr_copy(tx_params->peer_mac_addr, params->mac_addr.addr);
+ tx_params->primary_20mhz_chan = params->chan_mhz;
+ tx_params->bandwidth = params->bandwidth;
+ tx_params->phy_mode = params->phy_mode;
+ tx_params->band_center_freq1 = params->band_center_freq1;
+ tx_params->band_center_freq2 = params->band_center_freq2;
+ tx_params->spatial_streams = params->sts_count;
+ tx_params->correlation_info_1 = params->correlation_info_1;
+ tx_params->correlation_info_2 = params->correlation_info_2;
+ tx_params->status = params->status;
+ tx_params->timestamp_us = params->timestamp_us;
+ tx_params->counter = params->counter;
+ tx_params->rx_start_ts = params->rx_start_ts;
+
+ memcpy(tx_params->chain_rssi, params->chain_rssi,
+ sizeof(tx_params->chain_rssi));
+
+ if (WMI_CFR_CFO_MEASUREMENT_VALID & params->cfo_measurement)
+ tx_params->cfo_measurement = FIELD_GET(WMI_CFR_CFO_MEASUREMENT_RAW_DATA,
+ params->cfo_measurement);
+}
+
+static void ath11k_wmi_tlv_cfr_capture_phase_fixed_param(const void *ptr,
+ void *data)
+{
+ struct ath11k_cfr_peer_tx_param *tx_params = data;
+ const struct ath11k_wmi_cfr_peer_tx_event_phase_param *params = ptr;
+ int i;
+
+ for (i = 0; i < WMI_MAX_CHAINS; i++) {
+ tx_params->chain_phase[i] = params->chain_phase[i];
+ tx_params->agc_gain[i] = params->agc_gain[i];
+ }
+}
+
+static int ath11k_wmi_tlv_cfr_capture_evt_parse(struct ath11k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ switch (tag) {
+ case WMI_TAG_PEER_CFR_CAPTURE_EVENT:
+ ath11k_wmi_tlv_cfr_capture_event_fixed_param(ptr, data);
+ break;
+ case WMI_TAG_CFR_CAPTURE_PHASE_PARAM:
+ ath11k_wmi_tlv_cfr_capture_phase_fixed_param(ptr, data);
+ break;
+ default:
+ ath11k_warn(ab, "Invalid tag received tag %d len %d\n",
+ tag, len);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void ath11k_wmi_parse_cfr_capture_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath11k_cfr_peer_tx_param params = {};
+ int ret;
+
+ ath11k_dbg_dump(ab, ATH11K_DBG_CFR_DUMP, "cfr_dump:", "",
+ skb->data, skb->len);
+
+ ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath11k_wmi_tlv_cfr_capture_evt_parse,
+ &params);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse cfr capture event tlv %d\n",
+ ret);
+ return;
+ }
+
+ ret = ath11k_process_cfr_capture_event(ab, &params);
+ if (ret)
+ ath11k_dbg(ab, ATH11K_DBG_CFR,
+ "failed to process cfr capture ret = %d\n", ret);
+}
+
static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
@@ -8882,6 +9022,9 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
case WMI_P2P_NOA_EVENTID:
ath11k_wmi_p2p_noa_event(ab, skb);
break;
+ case WMI_PEER_CFR_CAPTURE_EVENTID:
+ ath11k_wmi_parse_cfr_capture_event(ab, skb);
+ break;
default:
ath11k_dbg(ab, ATH11K_DBG_WMI, "unsupported event id 0x%x\n", id);
break;
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
index 0f0de24a3840..baed501b640b 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.h
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -362,6 +362,10 @@ enum wmi_tlv_cmd_id {
WMI_PEER_REORDER_QUEUE_REMOVE_CMDID,
WMI_PEER_SET_RX_BLOCKSIZE_CMDID,
WMI_PEER_ANTDIV_INFO_REQ_CMDID,
+ WMI_PEER_RESERVED0_CMDID,
+ WMI_PEER_TID_MSDUQ_QDEPTH_THRESH_UPDATE_CMDID,
+ WMI_PEER_TID_CONFIGURATIONS_CMDID,
+ WMI_PEER_CFR_CAPTURE_CMDID,
WMI_BCN_TX_CMDID = WMI_TLV_CMD(WMI_GRP_MGMT),
WMI_PDEV_SEND_BCN_CMDID,
WMI_BCN_TMPL_CMDID,
@@ -981,6 +985,7 @@ enum wmi_tlv_pdev_param {
WMI_PDEV_PARAM_RADIO_CHAN_STATS_ENABLE,
WMI_PDEV_PARAM_RADIO_DIAGNOSIS_ENABLE,
WMI_PDEV_PARAM_MESH_MCAST_ENABLE,
+ WMI_PDEV_PARAM_PER_PEER_CFR_ENABLE = 0xa8,
WMI_PDEV_PARAM_SET_CMD_OBSS_PD_THRESHOLD = 0xbc,
WMI_PDEV_PARAM_SET_CMD_OBSS_PD_PER_AC = 0xbe,
WMI_PDEV_PARAM_ENABLE_SR_PROHIBIT = 0xc6,
@@ -1884,6 +1889,8 @@ enum wmi_tlv_tag {
WMI_TAG_NDP_EVENT,
WMI_TAG_PDEV_PEER_PKTLOG_FILTER_CMD = 0x301,
WMI_TAG_PDEV_PEER_PKTLOG_FILTER_INFO,
+ WMI_TAG_PEER_CFR_CAPTURE_EVENT = 0x317,
+ WMI_TAG_CFR_CAPTURE_PHASE_PARAM = 0x33b,
WMI_TAG_FILS_DISCOVERY_TMPL_CMD = 0x344,
WMI_TAG_PDEV_SRG_BSS_COLOR_BITMAP_CMD = 0x37b,
WMI_TAG_PDEV_SRG_PARTIAL_BSSID_BITMAP_CMD,
@@ -3832,7 +3839,8 @@ struct wmi_scan_prob_req_oui_cmd {
#define WMI_TX_PARAMS_DWORD1_BW_MASK GENMASK(14, 8)
#define WMI_TX_PARAMS_DWORD1_PREAMBLE_TYPE GENMASK(19, 15)
#define WMI_TX_PARAMS_DWORD1_FRAME_TYPE BIT(20)
-#define WMI_TX_PARAMS_DWORD1_RSVD GENMASK(31, 21)
+#define WMI_TX_PARAMS_DWORD1_CFR_CAPTURE BIT(21)
+#define WMI_TX_PARAMS_DWORD1_RSVD GENMASK(31, 22)
struct wmi_mgmt_send_params {
u32 tlv_header;
@@ -4217,6 +4225,87 @@ enum cc_setting_code {
*/
};
+enum ath11k_wmi_cfr_capture_bw {
+ WMI_PEER_CFR_CAPTURE_BW_20,
+ WMI_PEER_CFR_CAPTURE_BW_40,
+ WMI_PEER_CFR_CAPTURE_BW_80,
+ WMI_PEER_CFR_CAPTURE_BW_MAX,
+};
+
+enum ath11k_wmi_cfr_capture_method {
+ WMI_CFR_CAPTURE_METHOD_NULL_FRAME,
+ WMI_CFR_CAPTURE_METHOD_NULL_FRAME_WITH_PHASE,
+ WMI_CFR_CAPTURE_METHOD_PROBE_RESP,
+ WMI_CFR_CAPTURE_METHOD_MAX,
+};
+
+#define WMI_CFR_FRAME_TX_STATUS GENMASK(1, 0)
+#define WMI_CFR_CAPTURE_STATUS_PEER_PS BIT(30)
+#define WMI_CFR_PEER_CAPTURE_STATUS BIT(31)
+
+#define WMI_CFR_CORRELATION_INFO2_BUF_ADDR_HIGH GENMASK(3, 0)
+#define WMI_CFR_CORRELATION_INFO2_PPDU_ID GENMASK(31, 16)
+
+#define WMI_CFR_CFO_MEASUREMENT_VALID BIT(0)
+#define WMI_CFR_CFO_MEASUREMENT_RAW_DATA GENMASK(14, 1)
+
+struct ath11k_wmi_cfr_peer_tx_event_param {
+ u32 capture_method;
+ u32 vdev_id;
+ struct wmi_mac_addr mac_addr;
+ u32 chan_mhz;
+ u32 bandwidth;
+ u32 phy_mode;
+ u32 band_center_freq1;
+ u32 band_center_freq2;
+ u32 sts_count;
+ u32 correlation_info_1;
+ u32 correlation_info_2;
+ u32 status;
+ u32 timestamp_us;
+ u32 counter;
+ u32 chain_rssi[WMI_MAX_CHAINS];
+ u32 cfo_measurement;
+ u32 rx_start_ts;
+} __packed;
+
+struct ath11k_wmi_cfr_peer_tx_event_phase_param {
+ u32 chain_phase[WMI_MAX_CHAINS];
+ u8 agc_gain[WMI_MAX_CHAINS];
+} __packed;
+
+enum ath11k_wmi_frame_tx_status {
+ WMI_FRAME_TX_STATUS_OK,
+ WMI_FRAME_TX_STATUS_XRETRY,
+ WMI_FRAME_TX_STATUS_DROP,
+ WMI_FRAME_TX_STATUS_FILTERED,
+};
+
+struct wmi_peer_cfr_capture_conf_arg {
+ enum ath11k_wmi_cfr_capture_bw bw;
+ enum ath11k_wmi_cfr_capture_method method;
+ u32 request;
+ u32 periodicity;
+};
+
+struct wmi_peer_cfr_capture_cmd_fixed_param {
+ u32 tlv_header;
+ u32 request;
+ struct wmi_mac_addr mac_addr;
+ u32 vdev_id;
+ u32 periodicity;
+ /* BW of measurement - of type enum ath11k_wmi_cfr_capture_bw */
+ u32 bandwidth;
+ /* Method used to capture CFR - of type enum ath11k_wmi_cfr_capture_method */
+ u32 capture_method;
+} __packed;
+
+#define WMI_PEER_CFR_CAPTURE_ENABLE 1
+#define WMI_PEER_CFR_CAPTURE_DISABLE 0
+
+/*periodicity in ms */
+#define WMI_PEER_CFR_PERIODICITY_MAX 600000
+
static inline enum cc_setting_code
ath11k_wmi_cc_setting_code_to_reg(enum wmi_reg_cc_setting_code status_code)
{
@@ -6346,7 +6435,7 @@ int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
u32 cmd_id);
struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len);
int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id,
- struct sk_buff *frame);
+ struct sk_buff *frame, bool tx_params_valid);
int ath11k_wmi_p2p_go_bcn_ie(struct ath11k *ar, u32 vdev_id,
const u8 *p2p_ie);
int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id,
@@ -6531,5 +6620,7 @@ bool ath11k_wmi_supports_6ghz_cc_ext(struct ath11k *ar);
int ath11k_wmi_send_vdev_set_tpc_power(struct ath11k *ar,
u32 vdev_id,
struct ath11k_reg_tpc_power_info *param);
-
+int ath11k_wmi_peer_set_cfr_capture_conf(struct ath11k *ar,
+ u32 vdev_id, const u8 *mac,
+ struct wmi_peer_cfr_capture_conf_arg *arg);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/Makefile b/drivers/net/wireless/ath/ath12k/Makefile
index d95ee525a6cd..3ba1236956cc 100644
--- a/drivers/net/wireless/ath/ath12k/Makefile
+++ b/drivers/net/wireless/ath/ath12k/Makefile
@@ -2,8 +2,6 @@
obj-$(CONFIG_ATH12K) += ath12k.o
ath12k-y += core.o \
hal.o \
- hal_tx.o \
- hal_rx.o \
wmi.o \
mac.o \
reg.o \
@@ -12,11 +10,12 @@ ath12k-y += core.o \
dp.o \
dp_tx.o \
dp_rx.o \
+ dp_htt.o \
+ dp_peer.o \
debug.o \
ce.o \
peer.o \
dbring.o \
- hw.o \
mhi.o \
pci.o \
dp_mon.o \
@@ -24,6 +23,9 @@ ath12k-y += core.o \
p2p.o
ath12k-$(CONFIG_ATH12K_AHB) += ahb.o
+
+obj-$(CONFIG_ATH12K) += wifi7/
+
ath12k-$(CONFIG_ATH12K_DEBUGFS) += debugfs.o debugfs_htt_stats.o debugfs_sta.o
ath12k-$(CONFIG_ACPI) += acpi.o
ath12k-$(CONFIG_ATH12K_TRACING) += trace.o
diff --git a/drivers/net/wireless/ath/ath12k/ahb.c b/drivers/net/wireless/ath/ath12k/ahb.c
index b30527c402f6..9a4d34e49104 100644
--- a/drivers/net/wireless/ath/ath12k/ahb.c
+++ b/drivers/net/wireless/ath/ath12k/ahb.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <linux/dma-mapping.h>
@@ -16,18 +16,11 @@
#include "debug.h"
#include "hif.h"
-static const struct of_device_id ath12k_ahb_of_match[] = {
- { .compatible = "qcom,ipq5332-wifi",
- .data = (void *)ATH12K_HW_IPQ5332_HW10,
- },
- { }
-};
-
-MODULE_DEVICE_TABLE(of, ath12k_ahb_of_match);
-
#define ATH12K_IRQ_CE0_OFFSET 4
#define ATH12K_MAX_UPDS 1
#define ATH12K_UPD_IRQ_WRD_LEN 18
+
+static struct ath12k_ahb_driver *ath12k_ahb_family_drivers[ATH12K_DEVICE_FAMILY_MAX];
static const char ath12k_userpd_irq[][9] = {"spawn",
"ready",
"stop-ack"};
@@ -130,7 +123,7 @@ enum ext_irq_num {
static u32 ath12k_ahb_read32(struct ath12k_base *ab, u32 offset)
{
- if (ab->ce_remap && offset < HAL_SEQ_WCSS_CMEM_OFFSET)
+ if (ab->ce_remap && offset < ab->cmem_offset)
return ioread32(ab->mem_ce + offset);
return ioread32(ab->mem + offset);
}
@@ -138,7 +131,7 @@ static u32 ath12k_ahb_read32(struct ath12k_base *ab, u32 offset)
static void ath12k_ahb_write32(struct ath12k_base *ab, u32 offset,
u32 value)
{
- if (ab->ce_remap && offset < HAL_SEQ_WCSS_CMEM_OFFSET)
+ if (ab->ce_remap && offset < ab->cmem_offset)
iowrite32(value, ab->mem_ce + offset);
else
iowrite32(value, ab->mem + offset);
@@ -531,9 +524,10 @@ static int ath12k_ahb_ext_grp_napi_poll(struct napi_struct *napi, int budget)
struct ath12k_ext_irq_grp,
napi);
struct ath12k_base *ab = irq_grp->ab;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
int work_done;
- work_done = ath12k_dp_service_srng(ab, irq_grp, budget);
+ work_done = ath12k_dp_service_srng(dp, irq_grp, budget);
if (work_done < budget) {
napi_complete_done(napi, work_done);
ath12k_ahb_ext_grp_enable(irq_grp);
@@ -563,12 +557,10 @@ static int ath12k_ahb_config_ext_irq(struct ath12k_base *ab)
{
const struct ath12k_hw_ring_mask *ring_mask;
struct ath12k_ext_irq_grp *irq_grp;
- const struct hal_ops *hal_ops;
int i, j, irq, irq_idx, ret;
u32 num_irq;
ring_mask = ab->hw_params->ring_mask;
- hal_ops = ab->hw_params->hal_ops;
for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
irq_grp = &ab->ext_irq_grp[i];
num_irq = 0;
@@ -588,7 +580,7 @@ static int ath12k_ahb_config_ext_irq(struct ath12k_base *ab)
* tcl_to_wbm_rbm_map point to the same ring number.
*/
if (ring_mask->tx[i] &
- BIT(hal_ops->tcl_to_wbm_rbm_map[j].wbm_ring_num)) {
+ BIT(ab->hal.tcl_to_wbm_rbm_map[j].wbm_ring_num)) {
irq_grp->irqs[num_irq++] =
wbm2host_tx_completions_ring1 - j;
}
@@ -698,7 +690,7 @@ static int ath12k_ahb_map_service_to_pipe(struct ath12k_base *ab, u16 service_id
return 0;
}
-static const struct ath12k_hif_ops ath12k_ahb_hif_ops_ipq5332 = {
+static const struct ath12k_hif_ops ath12k_ahb_hif_ops = {
.start = ath12k_ahb_start,
.stop = ath12k_ahb_stop,
.read32 = ath12k_ahb_read32,
@@ -935,7 +927,8 @@ static int ath12k_ahb_resource_init(struct ath12k_base *ab)
goto err_mem_unmap;
}
ab->ce_remap = true;
- ab->ce_remap_base_addr = HAL_IPQ5332_CE_WFSS_REG_BASE;
+ ab->cmem_offset = ce_remap->cmem_offset;
+ ab->ce_remap_base_addr = ce_remap->base;
}
ab_ahb->xo_clk = devm_clk_get(ab->dev, "xo");
@@ -988,13 +981,34 @@ static void ath12k_ahb_resource_deinit(struct ath12k_base *ab)
ab_ahb->xo_clk = NULL;
}
+static enum ath12k_device_family
+ath12k_ahb_get_device_family(const struct platform_device *pdev)
+{
+ enum ath12k_device_family device_family_id;
+ struct ath12k_ahb_driver *driver;
+ const struct of_device_id *of_id;
+
+ for (device_family_id = ATH12K_DEVICE_FAMILY_START;
+ device_family_id < ATH12K_DEVICE_FAMILY_MAX; device_family_id++) {
+ driver = ath12k_ahb_family_drivers[device_family_id];
+ if (driver) {
+ of_id = of_match_device(driver->id_table, &pdev->dev);
+ if (of_id) {
+ /* Found the driver */
+ return device_family_id;
+ }
+ }
+ }
+
+ return ATH12K_DEVICE_FAMILY_MAX;
+}
+
static int ath12k_ahb_probe(struct platform_device *pdev)
{
- struct ath12k_base *ab;
- const struct ath12k_hif_ops *hif_ops;
+ enum ath12k_device_family device_id;
struct ath12k_ahb *ab_ahb;
- enum ath12k_hw_rev hw_rev;
- u32 addr, userpd_id;
+ struct ath12k_base *ab;
+ u32 addr;
int ret;
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
@@ -1008,25 +1022,32 @@ static int ath12k_ahb_probe(struct platform_device *pdev)
if (!ab)
return -ENOMEM;
- hw_rev = (enum ath12k_hw_rev)(kernel_ulong_t)of_device_get_match_data(&pdev->dev);
- switch (hw_rev) {
- case ATH12K_HW_IPQ5332_HW10:
- hif_ops = &ath12k_ahb_hif_ops_ipq5332;
- userpd_id = ATH12K_IPQ5332_USERPD_ID;
- break;
- default:
- ret = -EOPNOTSUPP;
+ ab_ahb = ath12k_ab_to_ahb(ab);
+ ab_ahb->ab = ab;
+ ab->hif.ops = &ath12k_ahb_hif_ops;
+ ab->pdev = pdev;
+ platform_set_drvdata(pdev, ab);
+
+ device_id = ath12k_ahb_get_device_family(pdev);
+ if (device_id >= ATH12K_DEVICE_FAMILY_MAX) {
+ ath12k_err(ab, "failed to get device family: %d\n", device_id);
+ ret = -EINVAL;
goto err_core_free;
}
- ab->hif.ops = hif_ops;
- ab->pdev = pdev;
- ab->hw_rev = hw_rev;
- ab->target_mem_mode = ATH12K_QMI_MEMORY_MODE_DEFAULT;
- platform_set_drvdata(pdev, ab);
- ab_ahb = ath12k_ab_to_ahb(ab);
- ab_ahb->ab = ab;
- ab_ahb->userpd_id = userpd_id;
+ ath12k_dbg(ab, ATH12K_DBG_AHB, "AHB device family id: %d\n", device_id);
+
+ ab_ahb->device_family_ops = &ath12k_ahb_family_drivers[device_id]->ops;
+
+ /* Call device specific probe. This is the callback that can
+ * be used to override any ops in future
+ * probe is validated for NULL during registration.
+ */
+ ret = ab_ahb->device_family_ops->probe(pdev);
+ if (ret) {
+ ath12k_err(ab, "failed to probe device: %d\n", ret);
+ goto err_core_free;
+ }
/* Set fixed_mem_region to true for platforms that support fixed memory
* reservation from DT. If memory is reserved from DT for FW, ath12k driver
@@ -1065,14 +1086,26 @@ static int ath12k_ahb_probe(struct platform_device *pdev)
goto err_rproc_deconfigure;
}
+ /* Invoke arch_init here so that arch-specific init operations
+ * can utilize already initialized ab fields, such as HAL SRNGs.
+ */
+ ret = ab_ahb->device_family_ops->arch_init(ab);
+ if (ret) {
+ ath12k_err(ab, "AHB arch_init failed %d\n", ret);
+ goto err_rproc_deconfigure;
+ }
+
ret = ath12k_core_init(ab);
if (ret) {
ath12k_err(ab, "failed to init core: %d\n", ret);
- goto err_rproc_deconfigure;
+ goto err_deinit_arch;
}
return 0;
+err_deinit_arch:
+ ab_ahb->device_family_ops->arch_deinit(ab);
+
err_rproc_deconfigure:
ath12k_ahb_deconfigure_rproc(ab);
@@ -1111,11 +1144,13 @@ static void ath12k_ahb_remove_prepare(struct ath12k_base *ab)
static void ath12k_ahb_free_resources(struct ath12k_base *ab)
{
struct platform_device *pdev = ab->pdev;
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
ath12k_hal_srng_deinit(ab);
ath12k_ce_free_pipes(ab);
ath12k_ahb_resource_deinit(ab);
ath12k_ahb_deconfigure_rproc(ab);
+ ab_ahb->device_family_ops->arch_deinit(ab);
ath12k_core_free(ab);
platform_set_drvdata(pdev, NULL);
}
@@ -1136,21 +1171,47 @@ qmi_fail:
ath12k_ahb_free_resources(ab);
}
-static struct platform_driver ath12k_ahb_driver = {
- .driver = {
- .name = "ath12k_ahb",
- .of_match_table = ath12k_ahb_of_match,
- },
- .probe = ath12k_ahb_probe,
- .remove = ath12k_ahb_remove,
-};
-
-int ath12k_ahb_init(void)
+int ath12k_ahb_register_driver(const enum ath12k_device_family device_id,
+ struct ath12k_ahb_driver *driver)
{
- return platform_driver_register(&ath12k_ahb_driver);
+ struct platform_driver *ahb_driver;
+
+ if (device_id >= ATH12K_DEVICE_FAMILY_MAX)
+ return -EINVAL;
+
+ if (!driver || !driver->ops.probe ||
+ !driver->ops.arch_init || !driver->ops.arch_deinit)
+ return -EINVAL;
+
+ if (ath12k_ahb_family_drivers[device_id]) {
+ pr_err("Driver already registered for id %d\n", device_id);
+ return -EALREADY;
+ }
+
+ ath12k_ahb_family_drivers[device_id] = driver;
+
+ ahb_driver = &ath12k_ahb_family_drivers[device_id]->driver;
+ ahb_driver->driver.name = driver->name;
+ ahb_driver->driver.of_match_table = driver->id_table;
+ ahb_driver->probe = ath12k_ahb_probe;
+ ahb_driver->remove = ath12k_ahb_remove;
+
+ return platform_driver_register(ahb_driver);
}
+EXPORT_SYMBOL(ath12k_ahb_register_driver);
-void ath12k_ahb_exit(void)
+void ath12k_ahb_unregister_driver(const enum ath12k_device_family device_id)
{
- platform_driver_unregister(&ath12k_ahb_driver);
+ struct platform_driver *ahb_driver;
+
+ if (device_id >= ATH12K_DEVICE_FAMILY_MAX)
+ return;
+
+ if (!ath12k_ahb_family_drivers[device_id])
+ return;
+
+ ahb_driver = &ath12k_ahb_family_drivers[device_id]->driver;
+ platform_driver_unregister(ahb_driver);
+ ath12k_ahb_family_drivers[device_id] = NULL;
}
+EXPORT_SYMBOL(ath12k_ahb_unregister_driver);
diff --git a/drivers/net/wireless/ath/ath12k/ahb.h b/drivers/net/wireless/ath/ath12k/ahb.h
index d56244b20a6a..8a040d03d27a 100644
--- a/drivers/net/wireless/ath/ath12k/ahb.h
+++ b/drivers/net/wireless/ath/ath12k/ahb.h
@@ -1,13 +1,14 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022-2025, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#ifndef ATH12K_AHB_H
#define ATH12K_AHB_H
#include <linux/clk.h>
#include <linux/remoteproc/qcom_rproc.h>
+#include <linux/platform_device.h>
#include "core.h"
#define ATH12K_AHB_RECOVERY_TIMEOUT (3 * HZ)
@@ -43,6 +44,12 @@ enum ath12k_ahb_userpd_irq {
struct ath12k_base;
+struct ath12k_ahb_device_family_ops {
+ int (*probe)(struct platform_device *pdev);
+ int (*arch_init)(struct ath12k_base *ab);
+ void (*arch_deinit)(struct ath12k_base *ab);
+};
+
struct ath12k_ahb {
struct ath12k_base *ab;
struct rproc *tgt_rproc;
@@ -59,6 +66,15 @@ struct ath12k_ahb {
u32 spawn_bit;
u32 stop_bit;
int userpd_irq_num[ATH12K_USERPD_MAX_IRQ];
+ const struct ath12k_ahb_ops *ahb_ops;
+ const struct ath12k_ahb_device_family_ops *device_family_ops;
+};
+
+struct ath12k_ahb_driver {
+ const char *name;
+ const struct of_device_id *id_table;
+ struct ath12k_ahb_device_family_ops ops;
+ struct platform_driver driver;
};
static inline struct ath12k_ahb *ath12k_ab_to_ahb(struct ath12k_base *ab)
@@ -66,15 +82,8 @@ static inline struct ath12k_ahb *ath12k_ab_to_ahb(struct ath12k_base *ab)
return (struct ath12k_ahb *)ab->drv_priv;
}
-#ifdef CONFIG_ATH12K_AHB
-int ath12k_ahb_init(void);
-void ath12k_ahb_exit(void);
-#else
-static inline int ath12k_ahb_init(void)
-{
- return 0;
-}
+int ath12k_ahb_register_driver(const enum ath12k_device_family device_id,
+ struct ath12k_ahb_driver *driver);
+void ath12k_ahb_unregister_driver(const enum ath12k_device_family device_id);
-static inline void ath12k_ahb_exit(void) {};
-#endif
#endif
diff --git a/drivers/net/wireless/ath/ath12k/ce.c b/drivers/net/wireless/ath/ath12k/ce.c
index 4aea58446838..f13b260c5c96 100644
--- a/drivers/net/wireless/ath/ath12k/ce.c
+++ b/drivers/net/wireless/ath/ath12k/ce.c
@@ -1,314 +1,13 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include "dp_rx.h"
#include "debug.h"
#include "hif.h"
-const struct ce_attr ath12k_host_ce_config_qcn9274[] = {
- /* CE0: host->target HTC control and raw streams */
- {
- .flags = CE_ATTR_FLAGS,
- .src_nentries = 16,
- .src_sz_max = 2048,
- .dest_nentries = 0,
- },
-
- /* CE1: target->host HTT + HTC control */
- {
- .flags = CE_ATTR_FLAGS,
- .src_nentries = 0,
- .src_sz_max = 2048,
- .dest_nentries = 512,
- .recv_cb = ath12k_htc_rx_completion_handler,
- },
-
- /* CE2: target->host WMI */
- {
- .flags = CE_ATTR_FLAGS,
- .src_nentries = 0,
- .src_sz_max = 2048,
- .dest_nentries = 128,
- .recv_cb = ath12k_htc_rx_completion_handler,
- },
-
- /* CE3: host->target WMI (mac0) */
- {
- .flags = CE_ATTR_FLAGS,
- .src_nentries = 32,
- .src_sz_max = 2048,
- .dest_nentries = 0,
- },
-
- /* CE4: host->target HTT */
- {
- .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
- .src_nentries = 2048,
- .src_sz_max = 256,
- .dest_nentries = 0,
- },
-
- /* CE5: target->host pktlog */
- {
- .flags = CE_ATTR_FLAGS,
- .src_nentries = 0,
- .src_sz_max = 2048,
- .dest_nentries = 512,
- .recv_cb = ath12k_dp_htt_htc_t2h_msg_handler,
- },
-
- /* CE6: target autonomous hif_memcpy */
- {
- .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
- .src_nentries = 0,
- .src_sz_max = 0,
- .dest_nentries = 0,
- },
-
- /* CE7: host->target WMI (mac1) */
- {
- .flags = CE_ATTR_FLAGS,
- .src_nentries = 32,
- .src_sz_max = 2048,
- .dest_nentries = 0,
- },
-
- /* CE8: target autonomous hif_memcpy */
- {
- .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
- .src_nentries = 0,
- .src_sz_max = 0,
- .dest_nentries = 0,
- },
-
- /* CE9: MHI */
- {
- .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
- .src_nentries = 0,
- .src_sz_max = 0,
- .dest_nentries = 0,
- },
-
- /* CE10: MHI */
- {
- .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
- .src_nentries = 0,
- .src_sz_max = 0,
- .dest_nentries = 0,
- },
-
- /* CE11: MHI */
- {
- .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
- .src_nentries = 0,
- .src_sz_max = 0,
- .dest_nentries = 0,
- },
-
- /* CE12: CV Prefetch */
- {
- .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
- .src_nentries = 0,
- .src_sz_max = 0,
- .dest_nentries = 0,
- },
-
- /* CE13: CV Prefetch */
- {
- .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
- .src_nentries = 0,
- .src_sz_max = 0,
- .dest_nentries = 0,
- },
-
- /* CE14: target->host dbg log */
- {
- .flags = CE_ATTR_FLAGS,
- .src_nentries = 0,
- .src_sz_max = 2048,
- .dest_nentries = 512,
- .recv_cb = ath12k_htc_rx_completion_handler,
- },
-
- /* CE15: reserved for future use */
- {
- .flags = (CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
- .src_nentries = 0,
- .src_sz_max = 0,
- .dest_nentries = 0,
- },
-};
-
-const struct ce_attr ath12k_host_ce_config_wcn7850[] = {
- /* CE0: host->target HTC control and raw streams */
- {
- .flags = CE_ATTR_FLAGS,
- .src_nentries = 16,
- .src_sz_max = 2048,
- .dest_nentries = 0,
- },
-
- /* CE1: target->host HTT + HTC control */
- {
- .flags = CE_ATTR_FLAGS,
- .src_nentries = 0,
- .src_sz_max = 2048,
- .dest_nentries = 512,
- .recv_cb = ath12k_htc_rx_completion_handler,
- },
-
- /* CE2: target->host WMI */
- {
- .flags = CE_ATTR_FLAGS,
- .src_nentries = 0,
- .src_sz_max = 2048,
- .dest_nentries = 64,
- .recv_cb = ath12k_htc_rx_completion_handler,
- },
-
- /* CE3: host->target WMI (mac0) */
- {
- .flags = CE_ATTR_FLAGS,
- .src_nentries = 32,
- .src_sz_max = 2048,
- .dest_nentries = 0,
- },
-
- /* CE4: host->target HTT */
- {
- .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
- .src_nentries = 2048,
- .src_sz_max = 256,
- .dest_nentries = 0,
- },
-
- /* CE5: target->host pktlog */
- {
- .flags = CE_ATTR_FLAGS,
- .src_nentries = 0,
- .src_sz_max = 0,
- .dest_nentries = 0,
- },
-
- /* CE6: target autonomous hif_memcpy */
- {
- .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
- .src_nentries = 0,
- .src_sz_max = 0,
- .dest_nentries = 0,
- },
-
- /* CE7: host->target WMI (mac1) */
- {
- .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
- .src_nentries = 0,
- .src_sz_max = 2048,
- .dest_nentries = 0,
- },
-
- /* CE8: target autonomous hif_memcpy */
- {
- .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
- .src_nentries = 0,
- .src_sz_max = 0,
- .dest_nentries = 0,
- },
-
-};
-
-const struct ce_attr ath12k_host_ce_config_ipq5332[] = {
- /* CE0: host->target HTC control and raw streams */
- {
- .flags = CE_ATTR_FLAGS,
- .src_nentries = 16,
- .src_sz_max = 2048,
- .dest_nentries = 0,
- },
- /* CE1: target->host HTT + HTC control */
- {
- .flags = CE_ATTR_FLAGS,
- .src_nentries = 0,
- .src_sz_max = 2048,
- .dest_nentries = 512,
- .recv_cb = ath12k_htc_rx_completion_handler,
- },
- /* CE2: target->host WMI */
- {
- .flags = CE_ATTR_FLAGS,
- .src_nentries = 0,
- .src_sz_max = 2048,
- .dest_nentries = 128,
- .recv_cb = ath12k_htc_rx_completion_handler,
- },
- /* CE3: host->target WMI */
- {
- .flags = CE_ATTR_FLAGS,
- .src_nentries = 32,
- .src_sz_max = 2048,
- .dest_nentries = 0,
- },
- /* CE4: host->target HTT */
- {
- .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
- .src_nentries = 2048,
- .src_sz_max = 256,
- .dest_nentries = 0,
- },
- /* CE5: target -> host PKTLOG */
- {
- .flags = CE_ATTR_FLAGS,
- .src_nentries = 0,
- .src_sz_max = 2048,
- .dest_nentries = 512,
- .recv_cb = ath12k_dp_htt_htc_t2h_msg_handler,
- },
- /* CE6: Target autonomous HIF_memcpy */
- {
- .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
- .src_nentries = 0,
- .src_sz_max = 0,
- .dest_nentries = 0,
- },
- /* CE7: CV Prefetch */
- {
- .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
- .src_nentries = 0,
- .src_sz_max = 0,
- .dest_nentries = 0,
- },
- /* CE8: Target HIF memcpy (Generic HIF memcypy) */
- {
- .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
- .src_nentries = 0,
- .src_sz_max = 0,
- .dest_nentries = 0,
- },
- /* CE9: WMI logging/CFR/Spectral/Radar */
- {
- .flags = CE_ATTR_FLAGS,
- .src_nentries = 0,
- .src_sz_max = 2048,
- .dest_nentries = 128,
- },
- /* CE10: Unused */
- {
- .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
- .src_nentries = 0,
- .src_sz_max = 0,
- .dest_nentries = 0,
- },
- /* CE11: Unused */
- {
- .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
- .src_nentries = 0,
- .src_sz_max = 0,
- .dest_nentries = 0,
- },
-};
-
static int ath12k_ce_rx_buf_enqueue_pipe(struct ath12k_ce_pipe *pipe,
struct sk_buff *skb, dma_addr_t paddr)
{
@@ -341,7 +40,7 @@ static int ath12k_ce_rx_buf_enqueue_pipe(struct ath12k_ce_pipe *pipe,
goto exit;
}
- ath12k_hal_ce_dst_set_desc(desc, paddr);
+ ath12k_hal_ce_dst_set_desc(&ab->hal, desc, paddr);
ring->skb[write_index] = skb;
write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
@@ -434,7 +133,7 @@ static int ath12k_ce_completed_recv_next(struct ath12k_ce_pipe *pipe,
goto err;
}
- *nbytes = ath12k_hal_ce_dst_status_get_length(desc);
+ *nbytes = ath12k_hal_ce_dst_status_get_length(&ab->hal, desc);
*skb = pipe->dest_ring->skb[sw_index];
pipe->dest_ring->skb[sw_index] = NULL;
@@ -666,6 +365,7 @@ ath12k_ce_alloc_ring(struct ath12k_base *ab, int nentries, int desc_sz)
static int ath12k_ce_alloc_pipe(struct ath12k_base *ab, int ce_id)
{
+ struct ath12k_hal *hal = &ab->hal;
struct ath12k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
const struct ce_attr *attr = &ab->hw_params->host_ce_config[ce_id];
struct ath12k_ce_ring *ring;
@@ -677,7 +377,7 @@ static int ath12k_ce_alloc_pipe(struct ath12k_base *ab, int ce_id)
if (attr->src_nentries) {
pipe->send_cb = ath12k_ce_send_done_cb;
nentries = roundup_pow_of_two(attr->src_nentries);
- desc_sz = ath12k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
+ desc_sz = ath12k_hal_ce_get_desc_size(hal, HAL_CE_DESC_SRC);
ring = ath12k_ce_alloc_ring(ab, nentries, desc_sz);
if (IS_ERR(ring))
return PTR_ERR(ring);
@@ -687,13 +387,13 @@ static int ath12k_ce_alloc_pipe(struct ath12k_base *ab, int ce_id)
if (attr->dest_nentries) {
pipe->recv_cb = attr->recv_cb;
nentries = roundup_pow_of_two(attr->dest_nentries);
- desc_sz = ath12k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
+ desc_sz = ath12k_hal_ce_get_desc_size(hal, HAL_CE_DESC_DST);
ring = ath12k_ce_alloc_ring(ab, nentries, desc_sz);
if (IS_ERR(ring))
return PTR_ERR(ring);
pipe->dest_ring = ring;
- desc_sz = ath12k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
+ desc_sz = ath12k_hal_ce_get_desc_size(hal, HAL_CE_DESC_DST_STATUS);
ring = ath12k_ce_alloc_ring(ab, nentries, desc_sz);
if (IS_ERR(ring))
return PTR_ERR(ring);
@@ -786,7 +486,7 @@ int ath12k_ce_send(struct ath12k_base *ab, struct sk_buff *skb, u8 pipe_id,
if (pipe->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
byte_swap_data = 1;
- ath12k_hal_ce_src_set_desc(desc, ATH12K_SKB_CB(skb)->paddr,
+ ath12k_hal_ce_src_set_desc(&ab->hal, desc, ATH12K_SKB_CB(skb)->paddr,
skb->len, transfer_id, byte_swap_data);
pipe->src_ring->skb[write_index] = skb;
@@ -972,6 +672,7 @@ int ath12k_ce_init_pipes(struct ath12k_base *ab)
void ath12k_ce_free_pipes(struct ath12k_base *ab)
{
+ struct ath12k_hal *hal = &ab->hal;
struct ath12k_ce_pipe *pipe;
int desc_sz;
int i;
@@ -980,7 +681,8 @@ void ath12k_ce_free_pipes(struct ath12k_base *ab)
pipe = &ab->ce.ce_pipe[i];
if (pipe->src_ring) {
- desc_sz = ath12k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
+ desc_sz = ath12k_hal_ce_get_desc_size(hal,
+ HAL_CE_DESC_SRC);
dma_free_coherent(ab->dev,
pipe->src_ring->nentries * desc_sz +
CE_DESC_RING_ALIGN,
@@ -991,7 +693,8 @@ void ath12k_ce_free_pipes(struct ath12k_base *ab)
}
if (pipe->dest_ring) {
- desc_sz = ath12k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
+ desc_sz = ath12k_hal_ce_get_desc_size(hal,
+ HAL_CE_DESC_DST);
dma_free_coherent(ab->dev,
pipe->dest_ring->nentries * desc_sz +
CE_DESC_RING_ALIGN,
@@ -1003,7 +706,8 @@ void ath12k_ce_free_pipes(struct ath12k_base *ab)
if (pipe->status_ring) {
desc_sz =
- ath12k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
+ ath12k_hal_ce_get_desc_size(hal,
+ HAL_CE_DESC_DST_STATUS);
dma_free_coherent(ab->dev,
pipe->status_ring->nentries * desc_sz +
CE_DESC_RING_ALIGN,
diff --git a/drivers/net/wireless/ath/ath12k/ce.h b/drivers/net/wireless/ath/ath12k/ce.h
index 57f75899ee03..df4f2a4f8480 100644
--- a/drivers/net/wireless/ath/ath12k/ce.h
+++ b/drivers/net/wireless/ath/ath12k/ce.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#ifndef ATH12K_CE_H
@@ -85,6 +85,7 @@ struct ce_ie_addr {
struct ce_remap {
u32 base;
u32 size;
+ u32 cmem_offset;
};
struct ce_attr {
@@ -173,10 +174,6 @@ struct ath12k_ce {
struct ath12k_hp_update_timer hp_timer[CE_COUNT_MAX];
};
-extern const struct ce_attr ath12k_host_ce_config_qcn9274[];
-extern const struct ce_attr ath12k_host_ce_config_wcn7850[];
-extern const struct ce_attr ath12k_host_ce_config_ipq5332[];
-
void ath12k_ce_cleanup_pipes(struct ath12k_base *ab);
void ath12k_ce_rx_replenish_retry(struct timer_list *t);
void ath12k_ce_per_engine_service(struct ath12k_base *ab, u16 ce_id);
diff --git a/drivers/net/wireless/ath/ath12k/cmn_defs.h b/drivers/net/wireless/ath/ath12k/cmn_defs.h
new file mode 100644
index 000000000000..20208ffea1c9
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/cmn_defs.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef ATH12K_CMN_DEFS_H
+#define ATH12K_CMN_DEFS_H
+
+#include <net/mac80211.h>
+
+#define MAX_RADIOS 2
+#define ATH12K_MAX_DEVICES 3
+#define ATH12K_GROUP_MAX_RADIO (ATH12K_MAX_DEVICES * MAX_RADIOS)
+
+#define ATH12K_SCAN_MAX_LINKS ATH12K_GROUP_MAX_RADIO
+/* Define 1 scan link for each radio for parallel scan purposes */
+#define ATH12K_NUM_MAX_LINKS (IEEE80211_MLD_MAX_NUM_LINKS + ATH12K_SCAN_MAX_LINKS)
+
+#define MAX_MU_GROUP_ID 64
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c
index cc352eef1939..9d6c50a94e64 100644
--- a/drivers/net/wireless/ath/ath12k/core.c
+++ b/drivers/net/wireless/ath/ath12k/core.c
@@ -21,15 +21,18 @@
#include "hif.h"
#include "pci.h"
#include "wow.h"
+#include "dp_cmn.h"
+#include "peer.h"
-static int ahb_err, pci_err;
unsigned int ath12k_debug_mask;
module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
MODULE_PARM_DESC(debug_mask, "Debugging mask");
+EXPORT_SYMBOL(ath12k_debug_mask);
bool ath12k_ftm_mode;
module_param_named(ftm_mode, ath12k_ftm_mode, bool, 0444);
MODULE_PARM_DESC(ftm_mode, "Boots up in factory test mode");
+EXPORT_SYMBOL(ath12k_ftm_mode);
/* protected with ath12k_hw_group_mutex */
static struct list_head ath12k_hw_group_list = LIST_HEAD_INIT(ath12k_hw_group_list);
@@ -632,6 +635,7 @@ u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab)
{
return ath12k_core_get_max_station_per_radio(ab) + TARGET_NUM_VDEVS(ab);
}
+EXPORT_SYMBOL(ath12k_core_get_max_peers_per_radio);
struct reserved_mem *ath12k_core_get_reserved_mem(struct ath12k_base *ab,
int index)
@@ -700,6 +704,8 @@ void ath12k_core_to_group_ref_put(struct ath12k_base *ab)
static void ath12k_core_stop(struct ath12k_base *ab)
{
+ ath12k_link_sta_rhash_tbl_destroy(ab);
+
ath12k_core_to_group_ref_put(ab);
if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
@@ -710,7 +716,7 @@ static void ath12k_core_stop(struct ath12k_base *ab)
ath12k_dp_rx_pdev_reo_cleanup(ab);
ath12k_hif_stop(ab);
ath12k_wmi_detach(ab);
- ath12k_dp_free(ab);
+ ath12k_dp_cmn_device_deinit(ath12k_ab_to_dp(ab));
/* De-Init of components as needed */
}
@@ -895,7 +901,7 @@ static int ath12k_core_start(struct ath12k_base *ab)
goto err_hif_stop;
}
- ret = ath12k_dp_htt_connect(&ab->dp);
+ ret = ath12k_dp_htt_connect(ath12k_ab_to_dp(ab));
if (ret) {
ath12k_err(ab, "failed to connect to HTT: %d\n", ret);
goto err_hif_stop;
@@ -920,7 +926,7 @@ static int ath12k_core_start(struct ath12k_base *ab)
goto err_hif_stop;
}
- ath12k_dp_cc_config(ab);
+ ath12k_hal_cc_config(ab);
ret = ath12k_dp_rx_pdev_reo_setup(ab);
if (ret) {
@@ -928,8 +934,6 @@ static int ath12k_core_start(struct ath12k_base *ab)
goto err_hif_stop;
}
- ath12k_dp_hal_rx_desc_init(ab);
-
ret = ath12k_wmi_cmd_init(ab);
if (ret) {
ath12k_err(ab, "failed to send wmi init cmd: %d\n", ret);
@@ -964,6 +968,12 @@ static int ath12k_core_start(struct ath12k_base *ab)
/* Indicate the core start in the appropriate group */
ath12k_core_to_group_ref_get(ab);
+ ret = ath12k_link_sta_rhash_tbl_init(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to init peer addr rhash table %d\n", ret);
+ goto err_reo_cleanup;
+ }
+
return 0;
err_reo_cleanup:
@@ -1288,7 +1298,7 @@ int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
goto err_firmware_stop;
}
- ret = ath12k_dp_alloc(ab);
+ ret = ath12k_dp_cmn_device_init(ath12k_ab_to_dp(ab));
if (ret) {
ath12k_err(ab, "failed to init DP: %d\n", ret);
goto err_firmware_stop;
@@ -1300,7 +1310,7 @@ int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
ret = ath12k_core_start(ab);
if (ret) {
ath12k_err(ab, "failed to start core: %d\n", ret);
- goto err_dp_free;
+ goto err_deinit;
}
mutex_unlock(&ab->core_lock);
@@ -1333,8 +1343,8 @@ err_core_stop:
mutex_unlock(&ag->mutex);
goto exit;
-err_dp_free:
- ath12k_dp_free(ab);
+err_deinit:
+ ath12k_dp_cmn_device_deinit(ath12k_ab_to_dp(ab));
mutex_unlock(&ab->core_lock);
mutex_unlock(&ag->mutex);
@@ -1350,13 +1360,14 @@ static int ath12k_core_reconfigure_on_crash(struct ath12k_base *ab)
int ret, total_vdev;
mutex_lock(&ab->core_lock);
+ ath12k_link_sta_rhash_tbl_destroy(ab);
ath12k_dp_pdev_free(ab);
ath12k_ce_cleanup_pipes(ab);
ath12k_wmi_detach(ab);
ath12k_dp_rx_pdev_reo_cleanup(ab);
mutex_unlock(&ab->core_lock);
- ath12k_dp_free(ab);
+ ath12k_dp_cmn_device_deinit(ath12k_ab_to_dp(ab));
ath12k_hal_srng_deinit(ab);
total_vdev = ab->num_radios * TARGET_NUM_VDEVS(ab);
ab->free_vdev_map = (1LL << total_vdev) - 1;
@@ -1565,6 +1576,7 @@ static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
ath12k_core_halt(ar);
}
+ ath12k_mac_dp_peer_cleanup(ah);
break;
case ATH12K_HW_STATE_OFF:
ath12k_warn(ab,
@@ -1739,17 +1751,11 @@ enum ath12k_qmi_mem_mode ath12k_core_get_memory_mode(struct ath12k_base *ab)
return ATH12K_QMI_MEMORY_MODE_DEFAULT;
}
+EXPORT_SYMBOL(ath12k_core_get_memory_mode);
int ath12k_core_pre_init(struct ath12k_base *ab)
{
const struct ath12k_mem_profile_based_param *param;
- int ret;
-
- ret = ath12k_hw_init(ab);
- if (ret) {
- ath12k_err(ab, "failed to init hw params: %d\n", ret);
- return ret;
- }
param = &ath12k_mem_profile_based_param[ab->target_mem_mode];
ab->profile_param = param;
@@ -1996,6 +2002,8 @@ exit:
ag->ab[ab->device_id] = ab;
ab->ag = ag;
+ ath12k_dp_cmn_hw_group_assign(ath12k_ab_to_dp(ab), ag);
+
ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi group-id %d num-devices %d index %d",
ag->id, ag->num_devices, wsi->index);
@@ -2023,6 +2031,8 @@ void ath12k_core_hw_group_unassign(struct ath12k_base *ab)
return;
}
+ ath12k_dp_cmn_hw_group_unassign(ath12k_ab_to_dp(ab), ag);
+
ag->ab[device_id] = NULL;
ab->ag = NULL;
ab->device_id = ATH12K_INVALID_DEVICE_ID;
@@ -2253,7 +2263,6 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
spin_lock_init(&ab->base_lock);
init_completion(&ab->reset_complete);
- INIT_LIST_HEAD(&ab->peers);
init_waitqueue_head(&ab->peer_mapping_wq);
init_waitqueue_head(&ab->wmi_ab.tx_credits_wq);
INIT_WORK(&ab->restart_work, ath12k_core_restart);
@@ -2291,31 +2300,5 @@ err_sc_free:
return NULL;
}
-static int ath12k_init(void)
-{
- ahb_err = ath12k_ahb_init();
- if (ahb_err)
- pr_warn("Failed to initialize ath12k AHB device: %d\n", ahb_err);
-
- pci_err = ath12k_pci_init();
- if (pci_err)
- pr_warn("Failed to initialize ath12k PCI device: %d\n", pci_err);
-
- /* If both failed, return one of the failures (arbitrary) */
- return ahb_err && pci_err ? ahb_err : 0;
-}
-
-static void ath12k_exit(void)
-{
- if (!pci_err)
- ath12k_pci_exit();
-
- if (!ahb_err)
- ath12k_ahb_exit();
-}
-
-module_init(ath12k_init);
-module_exit(ath12k_exit);
-
-MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11be WLAN devices");
+MODULE_DESCRIPTION("Driver support for Qualcomm Technologies WLAN devices");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h
index 3c1e0069be1e..990934ec92fc 100644
--- a/drivers/net/wireless/ath/ath12k/core.h
+++ b/drivers/net/wireless/ath/ath12k/core.h
@@ -18,6 +18,7 @@
#include <linux/panic_notifier.h>
#include <linux/average.h>
#include <linux/of.h>
+#include <linux/rhashtable.h>
#include "qmi.h"
#include "htc.h"
#include "wmi.h"
@@ -26,7 +27,6 @@
#include "ce.h"
#include "mac.h"
#include "hw.h"
-#include "hal_rx.h"
#include "reg.h"
#include "dbring.h"
#include "fw.h"
@@ -34,6 +34,8 @@
#include "wow.h"
#include "debugfs_htt_stats.h"
#include "coredump.h"
+#include "cmn_defs.h"
+#include "dp_cmn.h"
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@@ -64,8 +66,6 @@
#define ATH12K_RECONFIGURE_TIMEOUT_HZ (10 * HZ)
#define ATH12K_RECOVER_START_TIMEOUT_HZ (20 * HZ)
-#define ATH12K_MAX_DEVICES 3
-#define ATH12K_GROUP_MAX_RADIO (ATH12K_MAX_DEVICES * MAX_RADIOS)
#define ATH12K_INVALID_GROUP_ID 0xFF
#define ATH12K_INVALID_DEVICE_ID 0xFF
@@ -155,6 +155,7 @@ enum ath12k_hw_rev {
ATH12K_HW_QCN9274_HW20,
ATH12K_HW_WCN7850_HW20,
ATH12K_HW_IPQ5332_HW10,
+ ATH12K_HW_QCC2072_HW10,
};
enum ath12k_firmware_mode {
@@ -310,16 +311,9 @@ struct ath12k_link_vif {
u32 vdev_id;
u32 beacon_interval;
u32 dtim_period;
- u16 ast_hash;
- u16 ast_idx;
- u16 tcl_metadata;
- u8 hal_addr_search_flags;
- u8 search_type;
struct ath12k *ar;
- int bank_id;
- u8 vdev_id_check_en;
bool beacon_prot;
struct wmi_wmm_params_all_arg wmm_params;
@@ -360,6 +354,8 @@ struct ath12k_link_vif {
};
struct ath12k_vif {
+ struct ath12k_dp_vif dp_vif;
+
enum wmi_vdev_type vdev_type;
enum wmi_vdev_subtype vdev_subtype;
struct ieee80211_vif *vif;
@@ -383,10 +379,7 @@ struct ath12k_vif {
} u;
u32 aid;
- u32 key_cipher;
- u8 tx_encap_type;
bool ps;
- atomic_t mcbc_gsn;
struct ath12k_link_vif deflink;
struct ath12k_link_vif __rcu *link[ATH12K_NUM_MAX_LINKS];
@@ -407,51 +400,8 @@ struct ath12k_vif_iter {
struct ath12k_link_vif *arvif;
};
-#define HAL_AST_IDX_INVALID 0xFFFF
-#define HAL_RX_MAX_MCS 12
-#define HAL_RX_MAX_MCS_HT 31
-#define HAL_RX_MAX_MCS_VHT 9
-#define HAL_RX_MAX_MCS_HE 11
-#define HAL_RX_MAX_MCS_BE 15
-#define HAL_RX_MAX_NSS 8
-#define HAL_RX_MAX_NUM_LEGACY_RATES 12
-
#define ATH12K_SCAN_TIMEOUT_HZ (20 * HZ)
-struct ath12k_rx_peer_rate_stats {
- u64 ht_mcs_count[HAL_RX_MAX_MCS_HT + 1];
- u64 vht_mcs_count[HAL_RX_MAX_MCS_VHT + 1];
- u64 he_mcs_count[HAL_RX_MAX_MCS_HE + 1];
- u64 be_mcs_count[HAL_RX_MAX_MCS_BE + 1];
- u64 nss_count[HAL_RX_MAX_NSS];
- u64 bw_count[HAL_RX_BW_MAX];
- u64 gi_count[HAL_RX_GI_MAX];
- u64 legacy_count[HAL_RX_MAX_NUM_LEGACY_RATES];
- u64 rx_rate[HAL_RX_BW_MAX][HAL_RX_GI_MAX][HAL_RX_MAX_NSS][HAL_RX_MAX_MCS_HT + 1];
-};
-
-struct ath12k_rx_peer_stats {
- u64 num_msdu;
- u64 num_mpdu_fcs_ok;
- u64 num_mpdu_fcs_err;
- u64 tcp_msdu_count;
- u64 udp_msdu_count;
- u64 other_msdu_count;
- u64 ampdu_msdu_count;
- u64 non_ampdu_msdu_count;
- u64 stbc_count;
- u64 beamformed_count;
- u64 coding_count[HAL_RX_SU_MU_CODING_MAX];
- u64 tid_count[IEEE80211_NUM_TIDS + 1];
- u64 pream_cnt[HAL_RX_PREAMBLE_MAX];
- u64 reception_type[HAL_RX_RECEPTION_TYPE_MAX];
- u64 rx_duration;
- u64 dcm_count;
- u64 ru_alloc_cnt[HAL_RX_RU_ALLOC_TYPE_MAX];
- struct ath12k_rx_peer_rate_stats pkt_stats;
- struct ath12k_rx_peer_rate_stats byte_stats;
-};
-
#define ATH12K_HE_MCS_NUM 12
#define ATH12K_VHT_MCS_NUM 10
#define ATH12K_BW_NUM 5
@@ -533,12 +483,6 @@ struct ath12k_per_ppdu_tx_stats {
u32 retry_bytes;
};
-struct ath12k_wbm_tx_stats {
- u64 wbm_tx_comp_stats[HAL_WBM_REL_HTT_TX_COMP_STATUS_MAX];
-};
-
-DECLARE_EWMA(avg_rssi, 10, 8)
-
struct ath12k_link_sta {
struct ath12k_link_vif *arvif;
struct ath12k_sta *ahsta;
@@ -553,15 +497,7 @@ struct ath12k_link_sta {
u32 smps;
struct wiphy_work update_wk;
- struct rate_info txrate;
- struct rate_info last_txrate;
- u64 rx_duration;
- u64 tx_duration;
- u8 rssi_comb;
- struct ewma_avg_rssi avg_rssi;
u8 link_id;
- struct ath12k_rx_peer_stats *rx_stats;
- struct ath12k_wbm_tx_stats *wbm_tx_stats;
u32 bw_prev;
u32 peer_nss;
s8 rssi_beacon;
@@ -572,14 +508,9 @@ struct ath12k_link_sta {
/* for firmware use only */
u8 link_idx;
- u32 tx_retry_failed;
- u32 tx_retry_count;
-};
-struct ath12k_reoq_buf {
- void *vaddr;
- dma_addr_t paddr_aligned;
- u32 size;
+ /* peer addr based rhashtable list pointer */
+ struct rhash_head rhash_addr;
};
struct ath12k_sta {
@@ -594,8 +525,6 @@ struct ath12k_sta {
u8 num_peer;
enum ieee80211_sta_state state;
-
- struct ath12k_reoq_buf reoq_bufs[IEEE80211_NUM_TIDS + 1];
};
#define ATH12K_HALF_20MHZ_BW 10
@@ -667,23 +596,6 @@ struct ath12k_debug {
bool extd_rx_stats;
};
-struct ath12k_per_peer_tx_stats {
- u32 succ_bytes;
- u32 retry_bytes;
- u32 failed_bytes;
- u32 duration;
- u16 succ_pkts;
- u16 retry_pkts;
- u16 failed_pkts;
- u16 ru_start;
- u16 ru_tones;
- u8 ba_fails;
- u8 ppdu_type;
- u32 mu_grpid;
- u32 mu_pos;
- bool is_ampdu;
-};
-
struct ath12k_pdev_rssi_offsets {
s32 temp_offset;
s8 min_nf_dbm;
@@ -809,9 +721,6 @@ struct ath12k {
struct ath12k_wow wow;
struct completion target_suspend;
bool target_suspend_ack;
- struct ath12k_per_peer_tx_stats peer_tx_stats;
- struct list_head ppdu_stats_info;
- u32 ppdu_stat_list_depth;
struct ath12k_per_peer_tx_stats cached_stats;
u32 last_ppdu_id;
@@ -866,8 +775,7 @@ struct ath12k_hw {
DECLARE_BITMAP(free_ml_peer_id_map, ATH12K_MAX_MLO_PEERS);
- /* protected by wiphy_lock() */
- struct list_head ml_peers;
+ struct ath12k_dp_hw dp_hw;
/* Keep last */
struct ath12k radio[] __aligned(sizeof(void *));
@@ -941,32 +849,6 @@ struct ath12k_board_data {
size_t len;
};
-struct ath12k_device_dp_tx_err_stats {
- /* TCL Ring Descriptor unavailable */
- u32 desc_na[DP_TCL_NUM_RING_MAX];
- /* Other failures during dp_tx due to mem allocation failure
- * idr unavailable etc.
- */
- atomic_t misc_fail;
-};
-
-struct ath12k_device_dp_stats {
- u32 err_ring_pkts;
- u32 invalid_rbm;
- u32 rxdma_error[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX];
- u32 reo_error[HAL_REO_DEST_RING_ERROR_CODE_MAX];
- u32 hal_reo_error[DP_REO_DST_RING_MAX];
- struct ath12k_device_dp_tx_err_stats tx_err;
- u32 reo_rx[DP_REO_DST_RING_MAX][ATH12K_MAX_DEVICES];
- u32 rx_wbm_rel_source[HAL_WBM_REL_SRC_MODULE_MAX][ATH12K_MAX_DEVICES];
- u32 tqm_rel_reason[MAX_TQM_RELEASE_REASON];
- u32 fw_tx_status[MAX_FW_TX_STATUS];
- u32 tx_wbm_rel_source[HAL_WBM_REL_SRC_MODULE_MAX];
- u32 tx_enqueued[DP_TCL_NUM_RING_MAX];
- u32 tx_completed[DP_TCL_NUM_RING_MAX];
- u32 reo_excep_msdu_buf_type;
-};
-
struct ath12k_reg_freq {
u32 start_freq;
u32 end_freq;
@@ -987,6 +869,11 @@ struct ath12k_hw_link {
* wiphy, protected with struct ath12k_hw_group::mutex.
*/
struct ath12k_hw_group {
+ /* Keep dp_hw_grp as the first member to allow efficient
+ * usage of cache lines for DP fields
+ */
+ struct ath12k_dp_hw_group dp_hw_grp;
+ struct ath12k_hw_link hw_links[ATH12K_GROUP_MAX_RADIO];
struct list_head list;
u8 id;
u8 num_devices;
@@ -1009,7 +896,6 @@ struct ath12k_hw_group {
bool mlo_capable;
struct device_node *wsi_node[ATH12K_MAX_DEVICES];
struct ath12k_mlo_memory mlo_mem;
- struct ath12k_hw_link hw_links[ATH12K_GROUP_MAX_RADIO];
bool hw_link_id_init_done;
};
@@ -1035,6 +921,12 @@ struct ath12k_mem_profile_based_param {
struct ath12k_dp_profile_params dp_params;
};
+enum ath12k_device_family {
+ ATH12K_DEVICE_FAMILY_START,
+ ATH12K_DEVICE_FAMILY_WIFI7 = ATH12K_DEVICE_FAMILY_START,
+ ATH12K_DEVICE_FAMILY_MAX,
+};
+
/* Master structure to hold the hw data which may be used in core module */
struct ath12k_base {
enum ath12k_hw_rev hw_rev;
@@ -1054,13 +946,14 @@ struct ath12k_base {
struct ath12k_htc htc;
- struct ath12k_dp dp;
+ struct ath12k_dp *dp;
void __iomem *mem;
unsigned long mem_len;
void __iomem *mem_ce;
u32 ce_remap_base_addr;
+ u32 cmem_offset;
bool ce_remap;
struct {
@@ -1105,7 +998,6 @@ struct ath12k_base {
struct ath12k_wmi_hal_reg_capabilities_ext_arg hal_reg_cap[MAX_RADIOS];
unsigned long long free_vdev_map;
unsigned long long free_vdev_stats_id_map;
- struct list_head peers;
wait_queue_head_t peer_mapping_wq;
u8 mac_addr[ETH_ALEN];
bool wmi_ready;
@@ -1135,7 +1027,6 @@ struct ath12k_base {
/* Current DFS Regulatory */
enum ath12k_dfs_region dfs_region;
- struct ath12k_device_dp_stats device_stats;
#ifdef CONFIG_ATH12K_DEBUGFS
struct dentry *debugfs_soc;
#endif
@@ -1191,13 +1082,13 @@ struct ath12k_base {
size_t amss_dualmac_len;
const u8 *m3_data;
size_t m3_len;
+ const u8 *aux_uc_data;
+ size_t aux_uc_len;
DECLARE_BITMAP(fw_features, ATH12K_FW_FEATURE_COUNT);
bool fw_features_valid;
} fw;
- const struct hal_rx_ops *hal_rx_ops;
-
struct completion restart_completed;
#ifdef CONFIG_ACPI
@@ -1241,6 +1132,14 @@ struct ath12k_base {
const struct ath12k_mem_profile_based_param *profile_param;
enum ath12k_qmi_mem_mode target_mem_mode;
+ /* FIXME: Define this field in a ag equivalent object available
+ * during the initial phase of probe later.
+ */
+ const struct ieee80211_ops *ath12k_ops;
+
+ struct rhashtable *rhead_sta_addr;
+ struct rhashtable_params rhash_sta_addr_param;
+
/* must be last */
u8 drv_priv[] __aligned(sizeof(void *));
};
@@ -1516,4 +1415,18 @@ static inline s32 ath12k_pdev_get_noise_floor(struct ath12k *ar)
return ar->rssi_info.noise_floor;
}
+/* The @ab->dp NULL check or assertion is intentionally omitted because
+ * @ab->dp is guaranteed to be non-NULL after a successful probe and
+ * remains valid until teardown. Invoking this before allocation or
+ * after teardown is considered invalid usage.
+ */
+static inline struct ath12k_dp *ath12k_ab_to_dp(struct ath12k_base *ab)
+{
+ return ab->dp;
+}
+
+static inline struct ath12k *ath12k_pdev_dp_to_ar(struct ath12k_pdev_dp *dp)
+{
+ return container_of(dp, struct ath12k, dp);
+}
#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/dbring.c b/drivers/net/wireless/ath/ath12k/dbring.c
index 6604dacea2ae..f71ec2a58469 100644
--- a/drivers/net/wireless/ath/ath12k/dbring.c
+++ b/drivers/net/wireless/ath/ath12k/dbring.c
@@ -1,12 +1,12 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include "core.h"
#include "debug.h"
+#include "hal.h"
static int ath12k_dbring_bufs_replenish(struct ath12k *ar,
struct ath12k_dbring *ring,
@@ -55,7 +55,7 @@ static int ath12k_dbring_bufs_replenish(struct ath12k *ar,
cookie = u32_encode_bits(ar->pdev_idx, DP_RXDMA_BUF_COOKIE_PDEV_ID) |
u32_encode_bits(buf_id, DP_RXDMA_BUF_COOKIE_BUF_ID);
- ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, 0);
+ ath12k_hal_rx_buf_addr_info_set(&ab->hal, desc, paddr, cookie, 0);
ath12k_hal_srng_access_end(ab, srng);
@@ -298,7 +298,7 @@ int ath12k_dbring_buffer_release_event(struct ath12k_base *ab,
num_buff_reaped++;
- ath12k_hal_rx_buf_addr_info_get(&desc, &paddr, &cookie, &rbm);
+ ath12k_hal_rx_buf_addr_info_get(&ab->hal, &desc, &paddr, &cookie, &rbm);
buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
diff --git a/drivers/net/wireless/ath/ath12k/debug.c b/drivers/net/wireless/ath/ath12k/debug.c
index 5ce100cd9a9d..34b3b2c920dc 100644
--- a/drivers/net/wireless/ath/ath12k/debug.c
+++ b/drivers/net/wireless/ath/ath12k/debug.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ *
*/
#include <linux/vmalloc.h>
@@ -21,6 +22,7 @@ void ath12k_info(struct ath12k_base *ab, const char *fmt, ...)
/* TODO: Trace the log */
va_end(args);
}
+EXPORT_SYMBOL(ath12k_info);
void ath12k_err(struct ath12k_base *ab, const char *fmt, ...)
{
@@ -35,6 +37,7 @@ void ath12k_err(struct ath12k_base *ab, const char *fmt, ...)
/* TODO: Trace the log */
va_end(args);
}
+EXPORT_SYMBOL(ath12k_err);
void __ath12k_warn(struct device *dev, const char *fmt, ...)
{
@@ -49,6 +52,7 @@ void __ath12k_warn(struct device *dev, const char *fmt, ...)
/* TODO: Trace the log */
va_end(args);
}
+EXPORT_SYMBOL(__ath12k_warn);
#ifdef CONFIG_ATH12K_DEBUG
@@ -72,6 +76,7 @@ void __ath12k_dbg(struct ath12k_base *ab, enum ath12k_debug_mask mask,
va_end(args);
}
+EXPORT_SYMBOL(__ath12k_dbg);
void ath12k_dbg_dump(struct ath12k_base *ab,
enum ath12k_debug_mask mask,
@@ -100,5 +105,6 @@ void ath12k_dbg_dump(struct ath12k_base *ab,
}
}
}
+EXPORT_SYMBOL(ath12k_dbg_dump);
#endif /* CONFIG_ATH12K_DEBUG */
diff --git a/drivers/net/wireless/ath/ath12k/debugfs.c b/drivers/net/wireless/ath/ath12k/debugfs.c
index d6a86f075d73..358031fa14eb 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs.c
+++ b/drivers/net/wireless/ath/ath12k/debugfs.c
@@ -967,7 +967,7 @@ static int ath12k_open_link_stats(struct inode *inode, struct file *file)
"\nlink[%d] Tx Frame descriptor Encrypt Type = ",
link_id);
- for (i = 0; i < HAL_ENCRYPT_TYPE_MAX; i++) {
+ for (i = 0; i < DP_ENCRYPT_TYPE_MAX; i++) {
len += scnprintf(buf + len, buf_len - len,
" %d:%d", i,
linkstat.tx_encrypt_type[i]);
@@ -1020,13 +1020,15 @@ void ath12k_debugfs_op_vif_add(struct ieee80211_hw *hw,
debugfs_create_file("link_stats", 0400, vif->debugfs_dir, ahvif,
&ath12k_fops_link_stats);
}
+EXPORT_SYMBOL(ath12k_debugfs_op_vif_add);
static ssize_t ath12k_debugfs_dump_device_dp_stats(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath12k_base *ab = file->private_data;
- struct ath12k_device_dp_stats *device_stats = &ab->device_stats;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
+ struct ath12k_device_dp_stats *device_stats = &dp->device_stats;
int len = 0, i, j, ret;
struct ath12k *ar;
const int size = 4096;
@@ -1155,6 +1157,7 @@ static ssize_t ath12k_debugfs_dump_device_dp_stats(struct file *file,
len += scnprintf(buf + len, size - len, "\n");
+ rcu_read_lock();
for (i = 0; i < ab->num_radios; i++) {
ar = ath12k_mac_get_ar_by_pdev_id(ab, DP_SW2HW_MACID(i));
if (ar) {
@@ -1163,6 +1166,7 @@ static ssize_t ath12k_debugfs_dump_device_dp_stats(struct file *file,
atomic_read(&ar->dp.num_tx_pending));
}
}
+ rcu_read_unlock();
len += scnprintf(buf + len, size - len, "\nREO Rx Received:\n");
diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
index 48b010a1b756..7f6ca07fb335 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
+++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <linux/vmalloc.h>
@@ -29,8 +29,10 @@ print_array_to_buf_index(u8 *buf, u32 offset, const char *header, u32 stats_inde
" %u:%u,", stats_index++, le32_to_cpu(array[i]));
}
/* To overwrite the last trailing comma */
- index--;
- *(buf + offset + index) = '\0';
+ if (array_len > 0) {
+ index--;
+ *(buf + offset + index) = '\0';
+ }
if (footer) {
index += scnprintf(buf + offset + index,
@@ -5537,6 +5539,189 @@ ath12k_htt_print_pdev_rtt_tbr_cmd_res_stats_tlv(const void *tag_buf, u16 tag_len
stats_req->buf_len = len;
}
+static void
+ath12k_htt_print_rx_pdev_fw_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_pdev_fw_stats_tlv *htt_stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_FW_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ le32_to_cpu(htt_stats_buf->mac_id__word) & 0xFF);
+ len += scnprintf(buf + len, buf_len - len, "ppdu_recvd = %u\n",
+ le32_to_cpu(htt_stats_buf->ppdu_recvd));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_cnt_fcs_ok = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_cnt_fcs_ok));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_cnt_fcs_err = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_cnt_fcs_err));
+ len += scnprintf(buf + len, buf_len - len, "tcp_msdu_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->tcp_msdu_cnt));
+ len += scnprintf(buf + len, buf_len - len, "tcp_ack_msdu_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->tcp_ack_msdu_cnt));
+ len += scnprintf(buf + len, buf_len - len, "udp_msdu_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->udp_msdu_cnt));
+ len += scnprintf(buf + len, buf_len - len, "other_msdu_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->other_msdu_cnt));
+ len += scnprintf(buf + len, buf_len - len, "fw_ring_mpdu_ind = %u\n",
+ le32_to_cpu(htt_stats_buf->fw_ring_mpdu_ind));
+ len += print_array_to_buf(buf, len, "fw_ring_mgmt_subtype",
+ htt_stats_buf->fw_ring_mgmt_subtype,
+ ATH12K_HTT_STATS_SUBTYPE_MAX, "\n");
+ len += print_array_to_buf(buf, len, "fw_ring_ctrl_subtype",
+ htt_stats_buf->fw_ring_ctrl_subtype,
+ ATH12K_HTT_STATS_SUBTYPE_MAX, "\n");
+ len += scnprintf(buf + len, buf_len - len, "fw_ring_mcast_data_msdu = %u\n",
+ le32_to_cpu(htt_stats_buf->fw_ring_mcast_data_msdu));
+ len += scnprintf(buf + len, buf_len - len, "fw_ring_bcast_data_msdu = %u\n",
+ le32_to_cpu(htt_stats_buf->fw_ring_bcast_data_msdu));
+ len += scnprintf(buf + len, buf_len - len, "fw_ring_ucast_data_msdu = %u\n",
+ le32_to_cpu(htt_stats_buf->fw_ring_ucast_data_msdu));
+ len += scnprintf(buf + len, buf_len - len, "fw_ring_null_data_msdu = %u\n",
+ le32_to_cpu(htt_stats_buf->fw_ring_null_data_msdu));
+ len += scnprintf(buf + len, buf_len - len, "fw_ring_mpdu_drop = %u\n",
+ le32_to_cpu(htt_stats_buf->fw_ring_mpdu_drop));
+ len += scnprintf(buf + len, buf_len - len, "ofld_local_data_ind_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->ofld_local_data_ind_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "ofld_local_data_buf_recycle_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->ofld_local_data_buf_recycle_cnt));
+ len += scnprintf(buf + len, buf_len - len, "drx_local_data_ind_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->drx_local_data_ind_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "drx_local_data_buf_recycle_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->drx_local_data_buf_recycle_cnt));
+ len += scnprintf(buf + len, buf_len - len, "local_nondata_ind_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->local_nondata_ind_cnt));
+ len += scnprintf(buf + len, buf_len - len, "local_nondata_buf_recycle_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->local_nondata_buf_recycle_cnt));
+ len += scnprintf(buf + len, buf_len - len, "fw_status_buf_ring_refill_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->fw_status_buf_ring_refill_cnt));
+ len += scnprintf(buf + len, buf_len - len, "fw_status_buf_ring_empty_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->fw_status_buf_ring_empty_cnt));
+ len += scnprintf(buf + len, buf_len - len, "fw_pkt_buf_ring_refill_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->fw_pkt_buf_ring_refill_cnt));
+ len += scnprintf(buf + len, buf_len - len, "fw_pkt_buf_ring_empty_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->fw_pkt_buf_ring_empty_cnt));
+ len += scnprintf(buf + len, buf_len - len, "fw_link_buf_ring_refill_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->fw_link_buf_ring_refill_cnt));
+ len += scnprintf(buf + len, buf_len - len, "fw_link_buf_ring_empty_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->fw_link_buf_ring_empty_cnt));
+ len += scnprintf(buf + len, buf_len - len, "host_pkt_buf_ring_refill_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->host_pkt_buf_ring_refill_cnt));
+ len += scnprintf(buf + len, buf_len - len, "host_pkt_buf_ring_empty_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->host_pkt_buf_ring_empty_cnt));
+ len += scnprintf(buf + len, buf_len - len, "mon_pkt_buf_ring_refill_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->mon_pkt_buf_ring_refill_cnt));
+ len += scnprintf(buf + len, buf_len - len, "mon_pkt_buf_ring_empty_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->mon_pkt_buf_ring_empty_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "mon_status_buf_ring_refill_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->mon_status_buf_ring_refill_cnt));
+ len += scnprintf(buf + len, buf_len - len, "mon_status_buf_ring_empty_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->mon_status_buf_ring_empty_cnt));
+ len += scnprintf(buf + len, buf_len - len, "mon_desc_buf_ring_refill_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->mon_desc_buf_ring_refill_cnt));
+ len += scnprintf(buf + len, buf_len - len, "mon_desc_buf_ring_empty_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->mon_desc_buf_ring_empty_cnt));
+ len += scnprintf(buf + len, buf_len - len, "mon_dest_ring_update_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->mon_dest_ring_update_cnt));
+ len += scnprintf(buf + len, buf_len - len, "mon_dest_ring_full_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->mon_dest_ring_full_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rx_suspend_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_suspend_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rx_suspend_fail_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_suspend_fail_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rx_resume_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_resume_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rx_resume_fail_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_resume_fail_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rx_ring_switch_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_ring_switch_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rx_ring_restore_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_ring_restore_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rx_flush_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_flush_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rx_recovery_reset_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_recovery_reset_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rx_lwm_prom_filter_dis = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_lwm_prom_filter_dis));
+ len += scnprintf(buf + len, buf_len - len, "rx_hwm_prom_filter_en = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_hwm_prom_filter_en));
+ len += scnprintf(buf + len, buf_len - len, "bytes_received_low_32 = %u\n",
+ le32_to_cpu(htt_stats_buf->bytes_received_low_32));
+ len += scnprintf(buf + len, buf_len - len, "bytes_received_high_32 = %u\n",
+ le32_to_cpu(htt_stats_buf->bytes_received_high_32));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_hwq_stats_cmn_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_stats_cmn_tlv *htt_stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_STATS_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ le32_to_cpu(htt_stats_buf->mac_id__hwq_id__word) & 0xFF);
+ len += scnprintf(buf + len, buf_len - len, "hwq_id = %u\n",
+ (le32_to_cpu(htt_stats_buf->mac_id__hwq_id__word) & 0xFF00) >> 8);
+ len += scnprintf(buf + len, buf_len - len, "xretry = %u\n",
+ le32_to_cpu(htt_stats_buf->xretry));
+ len += scnprintf(buf + len, buf_len - len, "underrun_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->underrun_cnt));
+ len += scnprintf(buf + len, buf_len - len, "flush_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->flush_cnt));
+ len += scnprintf(buf + len, buf_len - len, "filt_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->filt_cnt));
+ len += scnprintf(buf + len, buf_len - len, "null_mpdu_bmap = %u\n",
+ le32_to_cpu(htt_stats_buf->null_mpdu_bmap));
+ len += scnprintf(buf + len, buf_len - len, "user_ack_failure = %u\n",
+ le32_to_cpu(htt_stats_buf->user_ack_failure));
+ len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n",
+ le32_to_cpu(htt_stats_buf->ack_tlv_proc));
+ len += scnprintf(buf + len, buf_len - len, "sched_id_proc = %u\n",
+ le32_to_cpu(htt_stats_buf->sched_id_proc));
+ len += scnprintf(buf + len, buf_len - len, "null_mpdu_tx_count = %u\n",
+ le32_to_cpu(htt_stats_buf->null_mpdu_tx_count));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_bmap_not_recvd = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_bmap_not_recvd));
+ len += scnprintf(buf + len, buf_len - len, "num_bar = %u\n",
+ le32_to_cpu(htt_stats_buf->num_bar));
+ len += scnprintf(buf + len, buf_len - len, "rts = %u\n",
+ le32_to_cpu(htt_stats_buf->rts));
+ len += scnprintf(buf + len, buf_len - len, "cts2self = %u\n",
+ le32_to_cpu(htt_stats_buf->cts2self));
+ len += scnprintf(buf + len, buf_len - len, "qos_null = %u\n",
+ le32_to_cpu(htt_stats_buf->qos_null));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_tried_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_tried_cnt));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_queued_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_queued_cnt));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_ack_fail_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_ack_fail_cnt));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_filt_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_filt_cnt));
+ len += scnprintf(buf + len, buf_len - len, "false_mpdu_ack_count = %u\n",
+ le32_to_cpu(htt_stats_buf->false_mpdu_ack_count));
+ len += scnprintf(buf + len, buf_len - len, "txq_timeout = %u\n",
+ le32_to_cpu(htt_stats_buf->txq_timeout));
+
+ stats_req->buf_len = len;
+}
+
static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab,
u16 tag, u16 len, const void *tag_buf,
void *user_data)
@@ -5690,6 +5875,9 @@ static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab,
case HTT_STATS_SFM_CLIENT_USER_TAG:
ath12k_htt_print_sfm_client_user_tlv(tag_buf, len, stats_req);
break;
+ case HTT_STATS_RX_PDEV_FW_STATS_TAG:
+ ath12k_htt_print_rx_pdev_fw_stats_tlv(tag_buf, len, stats_req);
+ break;
case HTT_STATS_TX_PDEV_MU_MIMO_STATS_TAG:
ath12k_htt_print_tx_pdev_mu_mimo_sch_stats_tlv(tag_buf, len, stats_req);
break;
@@ -5833,6 +6021,9 @@ static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab,
case HTT_STATS_PDEV_RTT_TBR_CMD_RESULT_STATS_TAG:
ath12k_htt_print_pdev_rtt_tbr_cmd_res_stats_tlv(tag_buf, len, stats_req);
break;
+ case HTT_STATS_TX_HWQ_CMN_TAG:
+ ath12k_htt_print_tx_hwq_stats_cmn_tlv(tag_buf, len, stats_req);
+ break;
default:
break;
}
diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
index 9bd3a632b002..bfabe6500d44 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
+++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
@@ -1,12 +1,14 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#ifndef DEBUG_HTT_STATS_H
#define DEBUG_HTT_STATS_H
+#include "dp_htt.h"
+
#define ATH12K_HTT_STATS_BUF_SIZE (1024 * 512)
#define ATH12K_HTT_STATS_COOKIE_LSB GENMASK_ULL(31, 0)
#define ATH12K_HTT_STATS_COOKIE_MSB GENMASK_ULL(63, 32)
@@ -125,6 +127,8 @@ struct ath12k_htt_extd_stats_msg {
enum ath12k_dbg_htt_ext_stats_type {
ATH12K_DBG_HTT_EXT_STATS_RESET = 0,
ATH12K_DBG_HTT_EXT_STATS_PDEV_TX = 1,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_RX = 2,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_HWQ = 3,
ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4,
ATH12K_DBG_HTT_EXT_STATS_PDEV_ERROR = 5,
ATH12K_DBG_HTT_EXT_STATS_PDEV_TQM = 6,
@@ -171,6 +175,7 @@ enum ath12k_dbg_htt_tlv_tag {
HTT_STATS_TX_PDEV_SIFS_TAG = 2,
HTT_STATS_TX_PDEV_FLUSH_TAG = 3,
HTT_STATS_STRING_TAG = 5,
+ HTT_STATS_TX_HWQ_CMN_TAG = 6,
HTT_STATS_TX_TQM_GEN_MPDU_TAG = 11,
HTT_STATS_TX_TQM_LIST_MPDU_TAG = 12,
HTT_STATS_TX_TQM_LIST_MPDU_CNT_TAG = 13,
@@ -186,6 +191,7 @@ enum ath12k_dbg_htt_tlv_tag {
HTT_STATS_TX_PDEV_MU_MIMO_STATS_TAG = 25,
HTT_STATS_SFM_CMN_TAG = 26,
HTT_STATS_SRING_STATS_TAG = 27,
+ HTT_STATS_RX_PDEV_FW_STATS_TAG = 28,
HTT_STATS_TX_PDEV_RATE_STATS_TAG = 34,
HTT_STATS_RX_PDEV_RATE_STATS_TAG = 35,
HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG = 36,
@@ -2073,4 +2079,81 @@ struct ath12k_htt_stats_pdev_rtt_tbr_cmd_result_stats_tlv {
__le32 mu_res[ATH12K_HTT_FTYPE_MAX][ATH12K_HTT_MAX_SCH_CMD_RESULT];
} __packed;
+struct htt_rx_pdev_fw_stats_tlv {
+ __le32 mac_id__word;
+ __le32 ppdu_recvd;
+ __le32 mpdu_cnt_fcs_ok;
+ __le32 mpdu_cnt_fcs_err;
+ __le32 tcp_msdu_cnt;
+ __le32 tcp_ack_msdu_cnt;
+ __le32 udp_msdu_cnt;
+ __le32 other_msdu_cnt;
+ __le32 fw_ring_mpdu_ind;
+ __le32 fw_ring_mgmt_subtype[ATH12K_HTT_STATS_SUBTYPE_MAX];
+ __le32 fw_ring_ctrl_subtype[ATH12K_HTT_STATS_SUBTYPE_MAX];
+ __le32 fw_ring_mcast_data_msdu;
+ __le32 fw_ring_bcast_data_msdu;
+ __le32 fw_ring_ucast_data_msdu;
+ __le32 fw_ring_null_data_msdu;
+ __le32 fw_ring_mpdu_drop;
+ __le32 ofld_local_data_ind_cnt;
+ __le32 ofld_local_data_buf_recycle_cnt;
+ __le32 drx_local_data_ind_cnt;
+ __le32 drx_local_data_buf_recycle_cnt;
+ __le32 local_nondata_ind_cnt;
+ __le32 local_nondata_buf_recycle_cnt;
+ __le32 fw_status_buf_ring_refill_cnt;
+ __le32 fw_status_buf_ring_empty_cnt;
+ __le32 fw_pkt_buf_ring_refill_cnt;
+ __le32 fw_pkt_buf_ring_empty_cnt;
+ __le32 fw_link_buf_ring_refill_cnt;
+ __le32 fw_link_buf_ring_empty_cnt;
+ __le32 host_pkt_buf_ring_refill_cnt;
+ __le32 host_pkt_buf_ring_empty_cnt;
+ __le32 mon_pkt_buf_ring_refill_cnt;
+ __le32 mon_pkt_buf_ring_empty_cnt;
+ __le32 mon_status_buf_ring_refill_cnt;
+ __le32 mon_status_buf_ring_empty_cnt;
+ __le32 mon_desc_buf_ring_refill_cnt;
+ __le32 mon_desc_buf_ring_empty_cnt;
+ __le32 mon_dest_ring_update_cnt;
+ __le32 mon_dest_ring_full_cnt;
+ __le32 rx_suspend_cnt;
+ __le32 rx_suspend_fail_cnt;
+ __le32 rx_resume_cnt;
+ __le32 rx_resume_fail_cnt;
+ __le32 rx_ring_switch_cnt;
+ __le32 rx_ring_restore_cnt;
+ __le32 rx_flush_cnt;
+ __le32 rx_recovery_reset_cnt;
+ __le32 rx_lwm_prom_filter_dis;
+ __le32 rx_hwm_prom_filter_en;
+ __le32 bytes_received_low_32;
+ __le32 bytes_received_high_32;
+} __packed;
+
+struct htt_tx_hwq_stats_cmn_tlv {
+ __le32 mac_id__hwq_id__word;
+ __le32 xretry;
+ __le32 underrun_cnt;
+ __le32 flush_cnt;
+ __le32 filt_cnt;
+ __le32 null_mpdu_bmap;
+ __le32 user_ack_failure;
+ __le32 ack_tlv_proc;
+ __le32 sched_id_proc;
+ __le32 null_mpdu_tx_count;
+ __le32 mpdu_bmap_not_recvd;
+ __le32 num_bar;
+ __le32 rts;
+ __le32 cts2self;
+ __le32 qos_null;
+ __le32 mpdu_tried_cnt;
+ __le32 mpdu_queued_cnt;
+ __le32 mpdu_ack_fail_cnt;
+ __le32 mpdu_filt_cnt;
+ __le32 false_mpdu_ack_count;
+ __le32 txq_timeout;
+} __packed;
+
#endif
diff --git a/drivers/net/wireless/ath/ath12k/debugfs_sta.c b/drivers/net/wireless/ath/ath12k/debugfs_sta.c
index 5bd2bf4c9dac..585c40bd2951 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath12k/debugfs_sta.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
- * Copyright (c) 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <linux/vmalloc.h>
@@ -11,6 +11,7 @@
#include "debug.h"
#include "debugfs_htt_stats.h"
#include "debugfs.h"
+#include "dp_cmn.h"
static
u32 ath12k_dbg_sta_dump_rate_stats(u8 *buf, u32 offset, const int size,
@@ -144,40 +145,40 @@ static ssize_t ath12k_dbg_sta_dump_rx_stats(struct file *file,
const int size = ATH12K_STA_RX_STATS_BUF_SIZE;
struct ath12k_hw *ah = ahsta->ahvif->ah;
struct ath12k_rx_peer_stats *rx_stats;
+ struct ath12k_dp_link_peer *link_peer;
struct ath12k_link_sta *arsta;
u8 link_id = link_sta->link_id;
int len = 0, i, ret = 0;
+ struct ath12k_dp *dp;
bool he_rates_avail;
struct ath12k *ar;
- wiphy_lock(ah->hw->wiphy);
+ guard(wiphy)(ah->hw->wiphy);
- if (!(BIT(link_id) & ahsta->links_map)) {
- wiphy_unlock(ah->hw->wiphy);
+ if (!(BIT(link_id) & ahsta->links_map))
return -ENOENT;
- }
arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
- if (!arsta || !arsta->arvif->ar) {
- wiphy_unlock(ah->hw->wiphy);
+ if (!arsta || !arsta->arvif->ar)
return -ENOENT;
- }
ar = arsta->arvif->ar;
u8 *buf __free(kfree) = kzalloc(size, GFP_KERNEL);
- if (!buf) {
- ret = -ENOENT;
- goto out;
- }
+ if (!buf)
+ return -ENOMEM;
- spin_lock_bh(&ar->ab->base_lock);
+ dp = ath12k_ab_to_dp(ar->ab);
- rx_stats = arsta->rx_stats;
- if (!rx_stats) {
- ret = -ENOENT;
- goto unlock;
- }
+ guard(spinlock_bh)(&dp->dp_lock);
+
+ link_peer = ath12k_dp_link_peer_find_by_addr(dp, arsta->addr);
+ if (!link_peer)
+ return -ENOENT;
+
+ rx_stats = link_peer->peer_stats.rx_stats;
+ if (!rx_stats)
+ return -ENOENT;
len += scnprintf(buf + len, size - len, "RX peer stats:\n\n");
len += scnprintf(buf + len, size - len, "Num of MSDUs: %llu\n",
@@ -237,13 +238,8 @@ static ssize_t ath12k_dbg_sta_dump_rx_stats(struct file *file,
len += ath12k_dbg_sta_dump_rate_stats(buf, len, size, he_rates_avail,
&rx_stats->byte_stats);
-unlock:
- spin_unlock_bh(&ar->ab->base_lock);
-
if (len)
ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
-out:
- wiphy_unlock(ah->hw->wiphy);
return ret;
}
@@ -261,10 +257,9 @@ static ssize_t ath12k_dbg_sta_reset_rx_stats(struct file *file,
struct ieee80211_link_sta *link_sta = file->private_data;
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(link_sta->sta);
struct ath12k_hw *ah = ahsta->ahvif->ah;
- struct ath12k_rx_peer_stats *rx_stats;
- struct ath12k_link_sta *arsta;
u8 link_id = link_sta->link_id;
- struct ath12k *ar;
+ struct ath12k_link_sta *arsta;
+ struct ath12k_dp *dp;
bool reset;
int ret;
@@ -288,19 +283,9 @@ static ssize_t ath12k_dbg_sta_reset_rx_stats(struct file *file,
goto out;
}
- ar = arsta->arvif->ar;
-
- spin_lock_bh(&ar->ab->base_lock);
-
- rx_stats = arsta->rx_stats;
- if (!rx_stats) {
- spin_unlock_bh(&ar->ab->base_lock);
- ret = -ENOENT;
- goto out;
- }
+ dp = ath12k_ab_to_dp(arsta->arvif->ar->ab);
- memset(rx_stats, 0, sizeof(*rx_stats));
- spin_unlock_bh(&ar->ab->base_lock);
+ ath12k_dp_link_peer_reset_rx_stats(dp, arsta->addr);
ret = count;
out:
@@ -335,3 +320,4 @@ void ath12k_debugfs_link_sta_op_add(struct ieee80211_hw *hw,
&fops_reset_rx_stats);
}
}
+EXPORT_SYMBOL(ath12k_debugfs_link_sta_op_add);
diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c
index 4a54b8c35311..ab54c8a84d3e 100644
--- a/drivers/net/wireless/ath/ath12k/dp.c
+++ b/drivers/net/wireless/ath/ath12k/dp.c
@@ -1,63 +1,58 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <crypto/hash.h>
#include "core.h"
#include "dp_tx.h"
-#include "hal_tx.h"
#include "hif.h"
+#include "hal.h"
#include "debug.h"
-#include "dp_rx.h"
#include "peer.h"
-#include "dp_mon.h"
+#include "dp_cmn.h"
enum ath12k_dp_desc_type {
ATH12K_DP_TX_DESC,
ATH12K_DP_RX_DESC,
};
-static void ath12k_dp_htt_htc_tx_complete(struct ath12k_base *ab,
- struct sk_buff *skb)
-{
- dev_kfree_skb_any(skb);
-}
-
void ath12k_dp_peer_cleanup(struct ath12k *ar, int vdev_id, const u8 *addr)
{
struct ath12k_base *ab = ar->ab;
- struct ath12k_peer *peer;
+ struct ath12k_dp_link_peer *peer;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
/* TODO: Any other peer specific DP cleanup */
- spin_lock_bh(&ab->base_lock);
- peer = ath12k_peer_find(ab, vdev_id, addr);
- if (!peer) {
+ spin_lock_bh(&dp->dp_lock);
+ peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, vdev_id, addr);
+ if (!peer || !peer->dp_peer) {
ath12k_warn(ab, "failed to lookup peer %pM on vdev %d\n",
addr, vdev_id);
- spin_unlock_bh(&ab->base_lock);
+ spin_unlock_bh(&dp->dp_lock);
return;
}
if (!peer->primary_link) {
- spin_unlock_bh(&ab->base_lock);
+ spin_unlock_bh(&dp->dp_lock);
return;
}
ath12k_dp_rx_peer_tid_cleanup(ar, peer);
- crypto_free_shash(peer->tfm_mmic);
- peer->dp_setup_done = false;
- spin_unlock_bh(&ab->base_lock);
+ crypto_free_shash(peer->dp_peer->tfm_mmic);
+ peer->dp_peer->dp_setup_done = false;
+ spin_unlock_bh(&dp->dp_lock);
}
int ath12k_dp_peer_setup(struct ath12k *ar, int vdev_id, const u8 *addr)
{
struct ath12k_base *ab = ar->ab;
- struct ath12k_peer *peer;
+ struct ath12k_dp_link_peer *peer;
u32 reo_dest;
int ret = 0, tid;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
/* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
reo_dest = ar->dp.mac_id + 1;
@@ -92,19 +87,19 @@ int ath12k_dp_peer_setup(struct ath12k *ar, int vdev_id, const u8 *addr)
return 0;
peer_clean:
- spin_lock_bh(&ab->base_lock);
+ spin_lock_bh(&dp->dp_lock);
- peer = ath12k_peer_find(ab, vdev_id, addr);
+ peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, vdev_id, addr);
if (!peer) {
ath12k_warn(ab, "failed to find the peer to del rx tid\n");
- spin_unlock_bh(&ab->base_lock);
+ spin_unlock_bh(&dp->dp_lock);
return -ENOENT;
}
for (tid--; tid >= 0; tid--)
- ath12k_dp_rx_peer_tid_delete(ar, peer, tid);
+ ath12k_dp_arch_rx_peer_tid_delete(dp, peer, tid);
- spin_unlock_bh(&ab->base_lock);
+ spin_unlock_bh(&dp->dp_lock);
return ret;
}
@@ -147,7 +142,7 @@ static int ath12k_dp_srng_calculate_msi_group(struct ath12k_base *ab,
grp_mask = &ab->hw_params->ring_mask->rx_wbm_rel[0];
ring_num = 0;
} else {
- map = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map;
+ map = ab->hal.tcl_to_wbm_rbm_map;
for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
if (ring_num == map[i].wbm_ring_num) {
ring_num = i;
@@ -338,50 +333,6 @@ int ath12k_dp_srng_setup(struct ath12k_base *ab, struct dp_srng *ring,
return 0;
}
-static
-u32 ath12k_dp_tx_get_vdev_bank_config(struct ath12k_base *ab,
- struct ath12k_link_vif *arvif)
-{
- u32 bank_config = 0;
- struct ath12k_vif *ahvif = arvif->ahvif;
-
- /* Only valid for raw frames with HW crypto enabled.
- * With SW crypto, mac80211 sets key per packet
- */
- if (ahvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW &&
- test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags))
- bank_config |=
- u32_encode_bits(ath12k_dp_tx_get_encrypt_type(ahvif->key_cipher),
- HAL_TX_BANK_CONFIG_ENCRYPT_TYPE);
-
- bank_config |= u32_encode_bits(ahvif->tx_encap_type,
- HAL_TX_BANK_CONFIG_ENCAP_TYPE);
- bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_SRC_BUFFER_SWAP) |
- u32_encode_bits(0, HAL_TX_BANK_CONFIG_LINK_META_SWAP) |
- u32_encode_bits(0, HAL_TX_BANK_CONFIG_EPD);
-
- /* only valid if idx_lookup_override is not set in tcl_data_cmd */
- if (ahvif->vdev_type == WMI_VDEV_TYPE_STA)
- bank_config |= u32_encode_bits(1, HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN);
- else
- bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN);
-
- bank_config |= u32_encode_bits(arvif->hal_addr_search_flags & HAL_TX_ADDRX_EN,
- HAL_TX_BANK_CONFIG_ADDRX_EN) |
- u32_encode_bits(!!(arvif->hal_addr_search_flags &
- HAL_TX_ADDRY_EN),
- HAL_TX_BANK_CONFIG_ADDRY_EN);
-
- bank_config |= u32_encode_bits(ieee80211_vif_is_mesh(ahvif->vif) ? 3 : 0,
- HAL_TX_BANK_CONFIG_MESH_EN) |
- u32_encode_bits(arvif->vdev_id_check_en,
- HAL_TX_BANK_CONFIG_VDEV_ID_CHECK_EN);
-
- bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_DSCP_TIP_MAP_ID);
-
- return bank_config;
-}
-
static int ath12k_dp_tx_get_bank_profile(struct ath12k_base *ab,
struct ath12k_link_vif *arvif,
struct ath12k_dp *dp)
@@ -392,7 +343,7 @@ static int ath12k_dp_tx_get_bank_profile(struct ath12k_base *ab,
bool configure_register = false;
/* convert vdev params into hal_tx_bank_config */
- bank_config = ath12k_dp_tx_get_vdev_bank_config(ab, arvif);
+ bank_config = ath12k_dp_arch_tx_get_vdev_bank_config(dp, arvif);
spin_lock_bh(&dp->tx_bank_lock);
/* TODO: implement using idr kernel framework*/
@@ -424,7 +375,8 @@ inc_ref_and_return:
spin_unlock_bh(&dp->tx_bank_lock);
if (configure_register)
- ath12k_hal_tx_configure_bank_register(ab, bank_config, bank_id);
+ ath12k_hal_tx_configure_bank_register(ab,
+ bank_config, bank_id);
ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt tcl bank_id %d input 0x%x match 0x%x num_users %u",
bank_id, bank_config, dp->bank_profiles[bank_id].bank_config,
@@ -442,7 +394,7 @@ void ath12k_dp_tx_put_bank_profile(struct ath12k_dp *dp, u8 bank_id)
static void ath12k_dp_deinit_bank_profiles(struct ath12k_base *ab)
{
- struct ath12k_dp *dp = &ab->dp;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
kfree(dp->bank_profiles);
dp->bank_profiles = NULL;
@@ -450,7 +402,7 @@ static void ath12k_dp_deinit_bank_profiles(struct ath12k_base *ab)
static int ath12k_dp_init_bank_profiles(struct ath12k_base *ab)
{
- struct ath12k_dp *dp = &ab->dp;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
u32 num_tcl_banks = ab->hw_params->num_tcl_banks;
int i;
@@ -473,7 +425,7 @@ static int ath12k_dp_init_bank_profiles(struct ath12k_base *ab)
static void ath12k_dp_srng_common_cleanup(struct ath12k_base *ab)
{
- struct ath12k_dp *dp = &ab->dp;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
int i;
ath12k_dp_srng_cleanup(ab, &dp->reo_status_ring);
@@ -490,7 +442,7 @@ static void ath12k_dp_srng_common_cleanup(struct ath12k_base *ab)
static int ath12k_dp_srng_common_setup(struct ath12k_base *ab)
{
- struct ath12k_dp *dp = &ab->dp;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
const struct ath12k_hal_tcl_to_wbm_rbm_map *map;
struct hal_srng *srng;
int i, ret, tx_comp_ring_num;
@@ -506,7 +458,7 @@ static int ath12k_dp_srng_common_setup(struct ath12k_base *ab)
}
for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
- map = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map;
+ map = ab->hal.tcl_to_wbm_rbm_map;
tx_comp_ring_num = map[i].wbm_ring_num;
ret = ath12k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
@@ -596,7 +548,7 @@ err:
static void ath12k_dp_scatter_idle_link_desc_cleanup(struct ath12k_base *ab)
{
- struct ath12k_dp *dp = &ab->dp;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
int i;
@@ -616,7 +568,7 @@ static int ath12k_dp_scatter_idle_link_desc_setup(struct ath12k_base *ab,
u32 n_link_desc,
u32 last_bank_sz)
{
- struct ath12k_dp *dp = &ab->dp;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
u32 n_entries_per_buf;
@@ -659,7 +611,7 @@ static int ath12k_dp_scatter_idle_link_desc_setup(struct ath12k_base *ab,
paddr = link_desc_banks[i].paddr;
while (n_entries) {
cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i);
- ath12k_hal_set_link_desc_addr(scatter_buf, cookie,
+ ath12k_hal_set_link_desc_addr(dp->hal, scatter_buf, cookie,
paddr, rbm);
n_entries--;
paddr += HAL_LINK_DESC_SIZE;
@@ -710,7 +662,7 @@ static int ath12k_dp_link_desc_bank_alloc(struct ath12k_base *ab,
int n_link_desc_bank,
int last_bank_sz)
{
- struct ath12k_dp *dp = &ab->dp;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
int i;
int ret = 0;
int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
@@ -758,7 +710,7 @@ void ath12k_dp_link_desc_cleanup(struct ath12k_base *ab,
static int ath12k_wbm_idle_ring_setup(struct ath12k_base *ab, u32 *n_link_desc)
{
- struct ath12k_dp *dp = &ab->dp;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
u32 n_mpdu_link_desc, n_mpdu_queue_desc;
u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc;
int ret = 0;
@@ -797,6 +749,7 @@ int ath12k_dp_link_desc_setup(struct ath12k_base *ab,
u32 ring_type, struct hal_srng *srng,
u32 n_link_desc)
{
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
u32 tot_mem_sz;
u32 n_link_desc_bank, last_bank_sz;
u32 entry_sz, align_bytes, n_entries;
@@ -804,7 +757,7 @@ int ath12k_dp_link_desc_setup(struct ath12k_base *ab,
u32 paddr;
int i, ret;
u32 cookie;
- enum hal_rx_buf_return_buf_manager rbm = ab->dp.idle_link_rbm;
+ enum hal_rx_buf_return_buf_manager rbm = dp->idle_link_rbm;
tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
tot_mem_sz += HAL_LINK_DESC_ALIGN;
@@ -865,7 +818,8 @@ int ath12k_dp_link_desc_setup(struct ath12k_base *ab,
while (n_entries &&
(desc = ath12k_hal_srng_src_get_next_entry(ab, srng))) {
cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i);
- ath12k_hal_set_link_desc_addr(desc, cookie, paddr, rbm);
+ ath12k_hal_set_link_desc_addr(dp->hal, desc, cookie, paddr,
+ rbm);
n_entries--;
paddr += HAL_LINK_DESC_SIZE;
}
@@ -883,133 +837,18 @@ fail_desc_bank_free:
return ret;
}
-int ath12k_dp_service_srng(struct ath12k_base *ab,
- struct ath12k_ext_irq_grp *irq_grp,
- int budget)
+void ath12k_dp_pdev_free(struct ath12k_base *ab)
{
- struct napi_struct *napi = &irq_grp->napi;
- int grp_id = irq_grp->grp_id;
- int work_done = 0;
- int i = 0, j;
- int tot_work_done = 0;
- enum dp_monitor_mode monitor_mode;
- u8 ring_mask;
-
- if (ab->hw_params->ring_mask->tx[grp_id]) {
- i = fls(ab->hw_params->ring_mask->tx[grp_id]) - 1;
- ath12k_dp_tx_completion_handler(ab, i);
- }
-
- if (ab->hw_params->ring_mask->rx_err[grp_id]) {
- work_done = ath12k_dp_rx_process_err(ab, napi, budget);
- budget -= work_done;
- tot_work_done += work_done;
- if (budget <= 0)
- goto done;
- }
-
- if (ab->hw_params->ring_mask->rx_wbm_rel[grp_id]) {
- work_done = ath12k_dp_rx_process_wbm_err(ab,
- napi,
- budget);
- budget -= work_done;
- tot_work_done += work_done;
-
- if (budget <= 0)
- goto done;
- }
-
- if (ab->hw_params->ring_mask->rx[grp_id]) {
- i = fls(ab->hw_params->ring_mask->rx[grp_id]) - 1;
- work_done = ath12k_dp_rx_process(ab, i, napi,
- budget);
- budget -= work_done;
- tot_work_done += work_done;
- if (budget <= 0)
- goto done;
- }
-
- if (ab->hw_params->ring_mask->rx_mon_status[grp_id]) {
- ring_mask = ab->hw_params->ring_mask->rx_mon_status[grp_id];
- for (i = 0; i < ab->num_radios; i++) {
- for (j = 0; j < ab->hw_params->num_rxdma_per_pdev; j++) {
- int id = i * ab->hw_params->num_rxdma_per_pdev + j;
-
- if (ring_mask & BIT(id)) {
- work_done =
- ath12k_dp_mon_process_ring(ab, id, napi, budget,
- 0);
- budget -= work_done;
- tot_work_done += work_done;
- if (budget <= 0)
- goto done;
- }
- }
- }
- }
-
- if (ab->hw_params->ring_mask->rx_mon_dest[grp_id]) {
- monitor_mode = ATH12K_DP_RX_MONITOR_MODE;
- ring_mask = ab->hw_params->ring_mask->rx_mon_dest[grp_id];
- for (i = 0; i < ab->num_radios; i++) {
- for (j = 0; j < ab->hw_params->num_rxdma_per_pdev; j++) {
- int id = i * ab->hw_params->num_rxdma_per_pdev + j;
-
- if (ring_mask & BIT(id)) {
- work_done =
- ath12k_dp_mon_process_ring(ab, id, napi, budget,
- monitor_mode);
- budget -= work_done;
- tot_work_done += work_done;
-
- if (budget <= 0)
- goto done;
- }
- }
- }
- }
-
- if (ab->hw_params->ring_mask->tx_mon_dest[grp_id]) {
- monitor_mode = ATH12K_DP_TX_MONITOR_MODE;
- ring_mask = ab->hw_params->ring_mask->tx_mon_dest[grp_id];
- for (i = 0; i < ab->num_radios; i++) {
- for (j = 0; j < ab->hw_params->num_rxdma_per_pdev; j++) {
- int id = i * ab->hw_params->num_rxdma_per_pdev + j;
-
- if (ring_mask & BIT(id)) {
- work_done =
- ath12k_dp_mon_process_ring(ab, id, napi, budget,
- monitor_mode);
- budget -= work_done;
- tot_work_done += work_done;
-
- if (budget <= 0)
- goto done;
- }
- }
- }
- }
-
- if (ab->hw_params->ring_mask->reo_status[grp_id])
- ath12k_dp_rx_process_reo_status(ab);
-
- if (ab->hw_params->ring_mask->host2rxdma[grp_id]) {
- struct ath12k_dp *dp = &ab->dp;
- struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
- LIST_HEAD(list);
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
+ struct ath12k *ar;
+ int i;
- ath12k_dp_rx_bufs_replenish(ab, rx_ring, &list, 0);
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ab->pdevs[i].ar;
+ rcu_assign_pointer(dp->dp_pdevs[ar->pdev_idx], NULL);
}
- /* TODO: Implement handler for other interrupts */
-
-done:
- return tot_work_done;
-}
-
-void ath12k_dp_pdev_free(struct ath12k_base *ab)
-{
- int i;
+ synchronize_rcu();
for (i = 0; i < ab->num_radios; i++)
ath12k_dp_rx_pdev_free(ab, i);
@@ -1025,31 +864,10 @@ void ath12k_dp_pdev_pre_alloc(struct ath12k *ar)
/* TODO: Add any RXDMA setup required per pdev */
}
-bool ath12k_dp_wmask_compaction_rx_tlv_supported(struct ath12k_base *ab)
-{
- if (test_bit(WMI_TLV_SERVICE_WMSK_COMPACTION_RX_TLVS, ab->wmi_ab.svc_map) &&
- ab->hw_params->hal_ops->rxdma_ring_wmask_rx_mpdu_start &&
- ab->hw_params->hal_ops->rxdma_ring_wmask_rx_msdu_end &&
- ab->hw_params->hal_ops->get_hal_rx_compact_ops) {
- return true;
- }
- return false;
-}
-
-void ath12k_dp_hal_rx_desc_init(struct ath12k_base *ab)
-{
- if (ath12k_dp_wmask_compaction_rx_tlv_supported(ab)) {
- /* RX TLVS compaction is supported, hence change the hal_rx_ops
- * to compact hal_rx_ops.
- */
- ab->hal_rx_ops = ab->hw_params->hal_ops->get_hal_rx_compact_ops();
- }
- ab->hal.hal_desc_sz =
- ab->hal_rx_ops->rx_desc_get_desc_size();
-}
-
int ath12k_dp_pdev_alloc(struct ath12k_base *ab)
{
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
+ struct ath12k_pdev_dp *dp_pdev;
struct ath12k *ar;
int ret;
int i;
@@ -1061,6 +879,14 @@ int ath12k_dp_pdev_alloc(struct ath12k_base *ab)
/* TODO: Per-pdev rx ring unlike tx ring which is mapped to different AC's */
for (i = 0; i < ab->num_radios; i++) {
ar = ab->pdevs[i].ar;
+
+ dp_pdev = &ar->dp;
+
+ dp_pdev->hw = ar->ah->hw;
+ dp_pdev->dp = dp;
+ dp_pdev->hw_link_id = ar->hw_link_id;
+ dp_pdev->dp_hw = &ar->ah->dp_hw;
+
ret = ath12k_dp_rx_pdev_alloc(ab, i);
if (ret) {
ath12k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n",
@@ -1074,6 +900,11 @@ int ath12k_dp_pdev_alloc(struct ath12k_base *ab)
}
}
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ab->pdevs[i].ar;
+ rcu_assign_pointer(dp->dp_pdevs[ar->pdev_idx], &ar->dp);
+ }
+
return 0;
err:
ath12k_dp_pdev_free(ab);
@@ -1081,40 +912,23 @@ out:
return ret;
}
-int ath12k_dp_htt_connect(struct ath12k_dp *dp)
+static void ath12k_dp_update_vdev_search(struct ath12k_link_vif *arvif)
{
- struct ath12k_htc_svc_conn_req conn_req = {};
- struct ath12k_htc_svc_conn_resp conn_resp = {};
- int status;
-
- conn_req.ep_ops.ep_tx_complete = ath12k_dp_htt_htc_tx_complete;
- conn_req.ep_ops.ep_rx_complete = ath12k_dp_htt_htc_t2h_msg_handler;
-
- /* connect to control service */
- conn_req.service_id = ATH12K_HTC_SVC_ID_HTT_DATA_MSG;
-
- status = ath12k_htc_connect_service(&dp->ab->htc, &conn_req,
- &conn_resp);
-
- if (status)
- return status;
-
- dp->eid = conn_resp.eid;
+ u8 link_id = arvif->link_id;
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ath12k_dp_link_vif *dp_link_vif;
- return 0;
-}
+ dp_link_vif = ath12k_dp_vif_to_dp_link_vif(&ahvif->dp_vif, link_id);
-static void ath12k_dp_update_vdev_search(struct ath12k_link_vif *arvif)
-{
switch (arvif->ahvif->vdev_type) {
case WMI_VDEV_TYPE_STA:
- arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
- arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
+ dp_link_vif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
+ dp_link_vif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
break;
case WMI_VDEV_TYPE_AP:
case WMI_VDEV_TYPE_IBSS:
- arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
- arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
+ dp_link_vif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
+ dp_link_vif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
break;
case WMI_VDEV_TYPE_MONITOR:
default:
@@ -1125,22 +939,29 @@ static void ath12k_dp_update_vdev_search(struct ath12k_link_vif *arvif)
void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_link_vif *arvif)
{
struct ath12k_base *ab = ar->ab;
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ u8 link_id = arvif->link_id;
+ int bank_id;
+ struct ath12k_dp_link_vif *dp_link_vif;
+
+ dp_link_vif = ath12k_dp_vif_to_dp_link_vif(&ahvif->dp_vif, link_id);
- arvif->tcl_metadata |= u32_encode_bits(1, HTT_TCL_META_DATA_TYPE) |
- u32_encode_bits(arvif->vdev_id,
- HTT_TCL_META_DATA_VDEV_ID) |
- u32_encode_bits(ar->pdev->pdev_id,
- HTT_TCL_META_DATA_PDEV_ID);
+ dp_link_vif->tcl_metadata |= u32_encode_bits(1, HTT_TCL_META_DATA_TYPE) |
+ u32_encode_bits(arvif->vdev_id,
+ HTT_TCL_META_DATA_VDEV_ID) |
+ u32_encode_bits(ar->pdev->pdev_id,
+ HTT_TCL_META_DATA_PDEV_ID);
/* set HTT extension valid bit to 0 by default */
- arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
+ dp_link_vif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
ath12k_dp_update_vdev_search(arvif);
- arvif->vdev_id_check_en = true;
- arvif->bank_id = ath12k_dp_tx_get_bank_profile(ab, arvif, &ab->dp);
+ dp_link_vif->vdev_id_check_en = true;
+ bank_id = ath12k_dp_tx_get_bank_profile(ab, arvif, ath12k_ab_to_dp(ab));
+ dp_link_vif->bank_id = bank_id;
/* TODO: error path for bank id failure */
- if (arvif->bank_id == DP_INVALID_BANK_ID) {
+ if (bank_id == DP_INVALID_BANK_ID) {
ath12k_err(ar->ab, "Failed to initialize DP TX Banks");
return;
}
@@ -1150,7 +971,7 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
{
struct ath12k_rx_desc_info *desc_info;
struct ath12k_tx_desc_info *tx_desc_info, *tmp1;
- struct ath12k_dp *dp = &ab->dp;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
struct ath12k_skb_cb *skb_cb;
struct sk_buff *skb;
struct ath12k *ar;
@@ -1273,15 +1094,13 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
static void ath12k_dp_reoq_lut_cleanup(struct ath12k_base *ab)
{
- struct ath12k_dp *dp = &ab->dp;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
if (!ab->hw_params->reoq_lut_support)
return;
if (dp->reoq_lut.vaddr_unaligned) {
- ath12k_hif_write32(ab,
- HAL_SEQ_WCSS_UMAC_REO_REG +
- HAL_REO1_QDESC_LUT_BASE0(ab), 0);
+ ath12k_hal_write_reoq_lut_addr(ab, 0);
dma_free_coherent(ab->dev, dp->reoq_lut.size,
dp->reoq_lut.vaddr_unaligned,
dp->reoq_lut.paddr_unaligned);
@@ -1289,9 +1108,7 @@ static void ath12k_dp_reoq_lut_cleanup(struct ath12k_base *ab)
}
if (dp->ml_reoq_lut.vaddr_unaligned) {
- ath12k_hif_write32(ab,
- HAL_SEQ_WCSS_UMAC_REO_REG +
- HAL_REO1_QDESC_LUT_BASE1(ab), 0);
+ ath12k_hal_write_ml_reoq_lut_addr(ab, 0);
dma_free_coherent(ab->dev, dp->ml_reoq_lut.size,
dp->ml_reoq_lut.vaddr_unaligned,
dp->ml_reoq_lut.paddr_unaligned);
@@ -1299,11 +1116,13 @@ static void ath12k_dp_reoq_lut_cleanup(struct ath12k_base *ab)
}
}
-void ath12k_dp_free(struct ath12k_base *ab)
+static void ath12k_dp_cleanup(struct ath12k_base *ab)
{
- struct ath12k_dp *dp = &ab->dp;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
int i;
+ ath12k_dp_link_peer_rhash_tbl_destroy(dp);
+
if (!dp->ab)
return;
@@ -1324,60 +1143,6 @@ void ath12k_dp_free(struct ath12k_base *ab)
ath12k_dp_rx_free(ab);
/* Deinit any SOC level resource */
- dp->ab = NULL;
-}
-
-void ath12k_dp_cc_config(struct ath12k_base *ab)
-{
- u32 cmem_base = ab->qmi.dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start;
- u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
- u32 wbm_base = HAL_SEQ_WCSS_UMAC_WBM_REG;
- u32 val = 0;
-
- if (ath12k_ftm_mode)
- return;
-
- ath12k_hif_write32(ab, reo_base + HAL_REO1_SW_COOKIE_CFG0(ab), cmem_base);
-
- val |= u32_encode_bits(ATH12K_CMEM_ADDR_MSB,
- HAL_REO1_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB) |
- u32_encode_bits(ATH12K_CC_PPT_MSB,
- HAL_REO1_SW_COOKIE_CFG_COOKIE_PPT_MSB) |
- u32_encode_bits(ATH12K_CC_SPT_MSB,
- HAL_REO1_SW_COOKIE_CFG_COOKIE_SPT_MSB) |
- u32_encode_bits(1, HAL_REO1_SW_COOKIE_CFG_ALIGN) |
- u32_encode_bits(1, HAL_REO1_SW_COOKIE_CFG_ENABLE) |
- u32_encode_bits(1, HAL_REO1_SW_COOKIE_CFG_GLOBAL_ENABLE);
-
- ath12k_hif_write32(ab, reo_base + HAL_REO1_SW_COOKIE_CFG1(ab), val);
-
- /* Enable HW CC for WBM */
- ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG0, cmem_base);
-
- val = u32_encode_bits(ATH12K_CMEM_ADDR_MSB,
- HAL_WBM_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB) |
- u32_encode_bits(ATH12K_CC_PPT_MSB,
- HAL_WBM_SW_COOKIE_CFG_COOKIE_PPT_MSB) |
- u32_encode_bits(ATH12K_CC_SPT_MSB,
- HAL_WBM_SW_COOKIE_CFG_COOKIE_SPT_MSB) |
- u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_ALIGN);
-
- ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG1, val);
-
- /* Enable conversion complete indication */
- val = ath12k_hif_read32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG2);
- val |= u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_RELEASE_PATH_EN) |
- u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_ERR_PATH_EN) |
- u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_CONV_IND_EN);
-
- ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG2, val);
-
- /* Enable Cookie conversion for WBM2SW Rings */
- val = ath12k_hif_read32(ab, wbm_base + HAL_WBM_SW_COOKIE_CONVERT_CFG);
- val |= u32_encode_bits(1, HAL_WBM_SW_COOKIE_CONV_CFG_GLOBAL_EN) |
- ab->hw_params->hal_params->wbm2sw_cc_enable;
-
- ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CONVERT_CFG, val);
}
static u32 ath12k_dp_cc_cookie_gen(u16 ppt_idx, u16 spt_idx)
@@ -1385,26 +1150,23 @@ static u32 ath12k_dp_cc_cookie_gen(u16 ppt_idx, u16 spt_idx)
return (u32)ppt_idx << ATH12K_CC_PPT_SHIFT | spt_idx;
}
-static inline void *ath12k_dp_cc_get_desc_addr_ptr(struct ath12k_base *ab,
- u16 ppt_idx, u16 spt_idx)
+static void *ath12k_dp_cc_get_desc_addr_ptr(struct ath12k_dp *dp,
+ u16 ppt_idx, u16 spt_idx)
{
- struct ath12k_dp *dp = &ab->dp;
-
return dp->spt_info[ppt_idx].vaddr + spt_idx;
}
-struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab,
+struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_dp *dp,
u32 cookie)
{
- struct ath12k_dp *dp = &ab->dp;
struct ath12k_rx_desc_info **desc_addr_ptr;
u16 start_ppt_idx, end_ppt_idx, ppt_idx, spt_idx;
ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT);
spt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_SPT);
- start_ppt_idx = dp->rx_ppt_base + ATH12K_RX_SPT_PAGE_OFFSET(ab);
- end_ppt_idx = start_ppt_idx + ATH12K_NUM_RX_SPT_PAGES(ab);
+ start_ppt_idx = dp->rx_ppt_base + ATH12K_RX_SPT_PAGE_OFFSET(dp->ab);
+ end_ppt_idx = start_ppt_idx + ATH12K_NUM_RX_SPT_PAGES(dp->ab);
if (ppt_idx < start_ppt_idx ||
ppt_idx >= end_ppt_idx ||
@@ -1412,12 +1174,13 @@ struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab,
return NULL;
ppt_idx = ppt_idx - dp->rx_ppt_base;
- desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, spt_idx);
+ desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(dp, ppt_idx, spt_idx);
return *desc_addr_ptr;
}
+EXPORT_SYMBOL(ath12k_dp_get_rx_desc);
-struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_base *ab,
+struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_dp *dp,
u32 cookie)
{
struct ath12k_tx_desc_info **desc_addr_ptr;
@@ -1428,21 +1191,22 @@ struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_base *ab,
start_ppt_idx = ATH12K_TX_SPT_PAGE_OFFSET;
end_ppt_idx = start_ppt_idx +
- (ATH12K_TX_SPT_PAGES_PER_POOL(ab) * ATH12K_HW_MAX_QUEUES);
+ (ATH12K_TX_SPT_PAGES_PER_POOL(dp->ab) * ATH12K_HW_MAX_QUEUES);
if (ppt_idx < start_ppt_idx ||
ppt_idx >= end_ppt_idx ||
spt_idx > ATH12K_MAX_SPT_ENTRIES)
return NULL;
- desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, spt_idx);
+ desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(dp, ppt_idx, spt_idx);
return *desc_addr_ptr;
}
+EXPORT_SYMBOL(ath12k_dp_get_tx_desc);
static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
{
- struct ath12k_dp *dp = &ab->dp;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
struct ath12k_rx_desc_info *rx_descs, **rx_desc_addr;
struct ath12k_tx_desc_info *tx_descs, **tx_desc_addr;
u32 num_rx_spt_pages = ATH12K_NUM_RX_SPT_PAGES(ab);
@@ -1482,7 +1246,7 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
list_add_tail(&rx_descs[j].list, &dp->rx_desc_free_list);
/* Update descriptor VA in SPT */
- rx_desc_addr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, j);
+ rx_desc_addr = ath12k_dp_cc_get_desc_addr_ptr(dp, ppt_idx, j);
*rx_desc_addr = &rx_descs[j];
}
}
@@ -1521,7 +1285,7 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
/* Update descriptor VA in SPT */
tx_desc_addr =
- ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, j);
+ ath12k_dp_cc_get_desc_addr_ptr(dp, ppt_idx, j);
*tx_desc_addr = &tx_descs[j];
}
}
@@ -1571,7 +1335,7 @@ void ath12k_dp_partner_cc_init(struct ath12k_base *ab)
if (ag->ab[i] == ab)
continue;
- ath12k_dp_cmem_init(ab, &ag->ab[i]->dp, ATH12K_DP_RX_DESC);
+ ath12k_dp_cmem_init(ab, ath12k_ab_to_dp(ag->ab[i]), ATH12K_DP_RX_DESC);
}
}
@@ -1582,7 +1346,7 @@ static u32 ath12k_dp_get_num_spt_pages(struct ath12k_base *ab)
static int ath12k_dp_cc_init(struct ath12k_base *ab)
{
- struct ath12k_dp *dp = &ab->dp;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
int i, ret = 0;
INIT_LIST_HEAD(&dp->rx_desc_free_list);
@@ -1668,8 +1432,7 @@ static int ath12k_dp_alloc_reoq_lut(struct ath12k_base *ab,
static int ath12k_dp_reoq_lut_setup(struct ath12k_base *ab)
{
- struct ath12k_dp *dp = &ab->dp;
- u32 val;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
int ret;
if (!ab->hw_params->reoq_lut_support)
@@ -1697,50 +1460,24 @@ static int ath12k_dp_reoq_lut_setup(struct ath12k_base *ab)
* register only
*/
- ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab),
- dp->reoq_lut.paddr >> 8);
-
- ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE1(ab),
- dp->ml_reoq_lut.paddr >> 8);
-
- val = ath12k_hif_read32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_ADDR(ab));
-
- ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_ADDR(ab),
- val | HAL_REO_QDESC_ADDR_READ_LUT_ENABLE);
-
- ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_MAX_PEERID(ab),
- HAL_REO_QDESC_MAX_PEERID);
+ ath12k_hal_write_reoq_lut_addr(ab, dp->reoq_lut.paddr >> 8);
+ ath12k_hal_write_ml_reoq_lut_addr(ab, dp->ml_reoq_lut.paddr >> 8);
+ ath12k_hal_reoq_lut_addr_read_enable(ab);
+ ath12k_hal_reoq_lut_set_max_peerid(ab);
return 0;
}
-static enum hal_rx_buf_return_buf_manager
-ath12k_dp_get_idle_link_rbm(struct ath12k_base *ab)
+static int ath12k_dp_setup(struct ath12k_base *ab)
{
- switch (ab->device_id) {
- case 0:
- return HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST;
- case 1:
- return HAL_RX_BUF_RBM_WBM_DEV1_IDLE_DESC_LIST;
- case 2:
- return HAL_RX_BUF_RBM_WBM_DEV2_IDLE_DESC_LIST;
- default:
- ath12k_warn(ab, "invalid %d device id, so choose default rbm\n",
- ab->device_id);
- WARN_ON(1);
- return HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST;
- }
-}
-
-int ath12k_dp_alloc(struct ath12k_base *ab)
-{
- struct ath12k_dp *dp = &ab->dp;
+ struct ath12k_dp *dp;
struct hal_srng *srng = NULL;
size_t size = 0;
u32 n_link_desc = 0;
int ret;
int i;
+ dp = ath12k_ab_to_dp(ab);
dp->ab = ab;
INIT_LIST_HEAD(&dp->reo_cmd_list);
@@ -1749,13 +1486,25 @@ int ath12k_dp_alloc(struct ath12k_base *ab)
spin_lock_init(&dp->reo_cmd_lock);
spin_lock_init(&dp->reo_rxq_flush_lock);
+ spin_lock_init(&dp->dp_lock);
+ INIT_LIST_HEAD(&dp->peers);
+
+ mutex_init(&dp->link_peer_rhash_tbl_lock);
+
dp->reo_cmd_cache_flush_count = 0;
- dp->idle_link_rbm = ath12k_dp_get_idle_link_rbm(ab);
+ dp->idle_link_rbm =
+ ath12k_hal_get_idle_link_rbm(&ab->hal, ab->device_id);
+
+ ret = ath12k_dp_link_peer_rhash_tbl_init(dp);
+ if (ret) {
+ ath12k_warn(ab, "failed to init link_peer rhash table: %d\n", ret);
+ return ret;
+ }
ret = ath12k_wbm_idle_ring_setup(ab, &n_link_desc);
if (ret) {
ath12k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
- return ret;
+ goto rhash_destroy;
}
srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id];
@@ -1764,7 +1513,7 @@ int ath12k_dp_alloc(struct ath12k_base *ab)
HAL_WBM_IDLE_LINK, srng, n_link_desc);
if (ret) {
ath12k_warn(ab, "failed to setup link desc: %d\n", ret);
- return ret;
+ goto rhash_destroy;
}
ret = ath12k_dp_cc_init(ab);
@@ -1783,7 +1532,7 @@ int ath12k_dp_alloc(struct ath12k_base *ab)
if (ret)
goto fail_dp_bank_profiles_cleanup;
- size = sizeof(struct hal_wbm_release_ring_tx) *
+ size = ab->hal.hal_wbm_release_ring_tx_size *
DP_TX_COMP_RING_SIZE(ab);
ret = ath12k_dp_reoq_lut_setup(ab);
@@ -1836,6 +1585,48 @@ fail_hw_cc_cleanup:
fail_link_desc_cleanup:
ath12k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
+rhash_destroy:
+ ath12k_dp_link_peer_rhash_tbl_destroy(dp);
return ret;
}
+
+void ath12k_dp_cmn_device_deinit(struct ath12k_dp *dp)
+{
+ ath12k_dp_cleanup(dp->ab);
+}
+
+int ath12k_dp_cmn_device_init(struct ath12k_dp *dp)
+{
+ int ret;
+
+ ret = ath12k_dp_setup(dp->ab);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void ath12k_dp_cmn_hw_group_unassign(struct ath12k_dp *dp,
+ struct ath12k_hw_group *ag)
+{
+ struct ath12k_dp_hw_group *dp_hw_grp = &ag->dp_hw_grp;
+
+ lockdep_assert_held(&ag->mutex);
+
+ dp_hw_grp->dp[dp->device_id] = NULL;
+
+ dp->ag = NULL;
+ dp->device_id = ATH12K_INVALID_DEVICE_ID;
+}
+
+void ath12k_dp_cmn_hw_group_assign(struct ath12k_dp *dp,
+ struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab = dp->ab;
+ struct ath12k_dp_hw_group *dp_hw_grp = &ag->dp_hw_grp;
+
+ dp->ag = ag;
+ dp->device_id = ab->device_id;
+ dp_hw_grp->dp[dp->device_id] = dp;
+}
diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h
index 4ffec6ad7d8d..f8cfc7bb29dd 100644
--- a/drivers/net/wireless/ath/ath12k/dp.h
+++ b/drivers/net/wireless/ath/ath12k/dp.h
@@ -1,29 +1,35 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#ifndef ATH12K_DP_H
#define ATH12K_DP_H
-#include "hal_desc.h"
-#include "hal_rx.h"
#include "hw.h"
+#include "dp_htt.h"
+#include "dp_cmn.h"
+#include <linux/rhashtable.h>
#define MAX_RXDMA_PER_PDEV 2
struct ath12k_base;
-struct ath12k_peer;
+struct ath12k_dp_link_peer;
struct ath12k_dp;
struct ath12k_vif;
struct ath12k_link_vif;
-struct hal_tcl_status_ring;
struct ath12k_ext_irq_grp;
+struct ath12k_dp_rx_tid;
+struct ath12k_dp_rx_tid_rxq;
#define DP_MON_PURGE_TIMEOUT_MS 100
#define DP_MON_SERVICE_BUDGET 128
+#define DP_ENCAP_TYPE_MAX 4
+#define DP_ENCRYPT_TYPE_MAX 12
+#define DP_DESC_TYPE_MAX 2
+
struct dp_srng {
u32 *vaddr_unaligned;
u32 *vaddr;
@@ -149,6 +155,18 @@ struct ath12k_pdev_dp {
u32 mac_id;
atomic_t num_tx_pending;
wait_queue_head_t tx_empty_waitq;
+
+ struct ath12k_dp *dp;
+ struct ieee80211_hw *hw;
+ u8 hw_link_id;
+ struct ath12k_dp_hw *dp_hw;
+
+ /* Protects ppdu stats */
+ spinlock_t ppdu_list_lock;
+ struct ath12k_per_peer_tx_stats peer_tx_stats;
+ struct list_head ppdu_stats_info;
+ u32 ppdu_stat_list_depth;
+
struct dp_srng rxdma_mon_dst_ring[MAX_RXDMA_PER_PDEV];
struct dp_srng tx_mon_dst_ring[MAX_RXDMA_PER_PDEV];
@@ -360,9 +378,77 @@ struct ath12k_link_stats {
u32 tx_completed;
u32 tx_bcast_mcast;
u32 tx_dropped;
- u32 tx_encap_type[HAL_TCL_ENCAP_TYPE_MAX];
- u32 tx_encrypt_type[HAL_ENCRYPT_TYPE_MAX];
- u32 tx_desc_type[HAL_TCL_DESC_TYPE_MAX];
+ u32 tx_encap_type[DP_ENCAP_TYPE_MAX];
+ u32 tx_encrypt_type[DP_ENCRYPT_TYPE_MAX];
+ u32 tx_desc_type[DP_DESC_TYPE_MAX];
+};
+
+/* DP arch ops to communicate from common module
+ * to arch specific module
+ */
+struct ath12k_dp_arch_ops {
+ int (*service_srng)(struct ath12k_dp *dp,
+ struct ath12k_ext_irq_grp *irq_grp,
+ int budget);
+ u32 (*tx_get_vdev_bank_config)(struct ath12k_base *ab,
+ struct ath12k_link_vif *arvif);
+ int (*reo_cmd_send)(struct ath12k_base *ab,
+ struct ath12k_dp_rx_tid_rxq *rx_tid,
+ enum hal_reo_cmd_type type,
+ struct ath12k_hal_reo_cmd *cmd,
+ void (*cb)(struct ath12k_dp *dp, void *ctx,
+ enum hal_reo_cmd_status status));
+ void (*setup_pn_check_reo_cmd)(struct ath12k_hal_reo_cmd *cmd,
+ struct ath12k_dp_rx_tid *rx_tid,
+ u32 cipher, enum set_key_cmd key_cmd);
+ void (*rx_peer_tid_delete)(struct ath12k_base *ab,
+ struct ath12k_dp_link_peer *peer, u8 tid);
+ int (*reo_cache_flush)(struct ath12k_base *ab,
+ struct ath12k_dp_rx_tid_rxq *rx_tid);
+ int (*rx_link_desc_return)(struct ath12k_dp *dp,
+ struct ath12k_buffer_addr *buf_addr_info,
+ enum hal_wbm_rel_bm_act action);
+ void (*rx_frags_cleanup)(struct ath12k_dp_rx_tid *rx_tid,
+ bool rel_link_desc);
+ int (*peer_rx_tid_reo_update)(struct ath12k_dp *dp,
+ struct ath12k_dp_link_peer *peer,
+ struct ath12k_dp_rx_tid *rx_tid,
+ u32 ba_win_sz, u16 ssn,
+ bool update_ssn);
+ int (*rx_assign_reoq)(struct ath12k_base *ab, struct ath12k_dp_peer *dp_peer,
+ struct ath12k_dp_rx_tid *rx_tid,
+ u16 ssn, enum hal_pn_type pn_type);
+ void (*peer_rx_tid_qref_setup)(struct ath12k_base *ab, u16 peer_id, u16 tid,
+ dma_addr_t paddr);
+ void (*peer_rx_tid_qref_reset)(struct ath12k_base *ab, u16 peer_id, u16 tid);
+ int (*rx_tid_delete_handler)(struct ath12k_base *ab,
+ struct ath12k_dp_rx_tid_rxq *rx_tid);
+};
+
+struct ath12k_device_dp_tx_err_stats {
+ /* TCL Ring Descriptor unavailable */
+ u32 desc_na[DP_TCL_NUM_RING_MAX];
+ /* Other failures during dp_tx due to mem allocation failure
+ * idr unavailable etc.
+ */
+ atomic_t misc_fail;
+};
+
+struct ath12k_device_dp_stats {
+ u32 err_ring_pkts;
+ u32 invalid_rbm;
+ u32 rxdma_error[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX];
+ u32 reo_error[HAL_REO_DEST_RING_ERROR_CODE_MAX];
+ u32 hal_reo_error[DP_REO_DST_RING_MAX];
+ struct ath12k_device_dp_tx_err_stats tx_err;
+ u32 reo_rx[DP_REO_DST_RING_MAX][ATH12K_MAX_DEVICES];
+ u32 rx_wbm_rel_source[HAL_WBM_REL_SRC_MODULE_MAX][ATH12K_MAX_DEVICES];
+ u32 tqm_rel_reason[MAX_TQM_RELEASE_REASON];
+ u32 fw_tx_status[MAX_FW_TX_STATUS];
+ u32 tx_wbm_rel_source[HAL_WBM_REL_SRC_MODULE_MAX];
+ u32 tx_enqueued[DP_TCL_NUM_RING_MAX];
+ u32 tx_completed[DP_TCL_NUM_RING_MAX];
+ u32 reo_excep_msdu_buf_type;
};
struct ath12k_dp {
@@ -427,1512 +513,137 @@ struct ath12k_dp {
struct dp_rxdma_mon_ring rx_mon_status_refill_ring[MAX_RXDMA_PER_PDEV];
struct ath12k_reo_q_addr_lut reoq_lut;
struct ath12k_reo_q_addr_lut ml_reoq_lut;
-};
-
-/* HTT definitions */
-#define HTT_TAG_TCL_METADATA_VERSION 5
-
-#define HTT_TCL_META_DATA_TYPE GENMASK(1, 0)
-#define HTT_TCL_META_DATA_VALID_HTT BIT(2)
-
-/* vdev meta data */
-#define HTT_TCL_META_DATA_VDEV_ID GENMASK(10, 3)
-#define HTT_TCL_META_DATA_PDEV_ID GENMASK(12, 11)
-#define HTT_TCL_META_DATA_HOST_INSPECTED_MISSION BIT(13)
-
-/* peer meta data */
-#define HTT_TCL_META_DATA_PEER_ID GENMASK(15, 3)
-
-/* Global sequence number */
-#define HTT_TCL_META_DATA_TYPE_GLOBAL_SEQ_NUM 3
-#define HTT_TCL_META_DATA_GLOBAL_SEQ_HOST_INSPECTED BIT(2)
-#define HTT_TCL_META_DATA_GLOBAL_SEQ_NUM GENMASK(14, 3)
-#define HTT_TX_MLO_MCAST_HOST_REINJECT_BASE_VDEV_ID 128
-
-/* HTT tx completion is overlaid in wbm_release_ring */
-#define HTT_TX_WBM_COMP_INFO0_STATUS GENMASK(16, 13)
-#define HTT_TX_WBM_COMP_INFO1_REINJECT_REASON GENMASK(3, 0)
-#define HTT_TX_WBM_COMP_INFO1_EXCEPTION_FRAME BIT(4)
-
-#define HTT_TX_WBM_COMP_INFO2_ACK_RSSI GENMASK(31, 24)
-
-struct htt_tx_wbm_completion {
- __le32 rsvd0[2];
- __le32 info0;
- __le32 info1;
- __le32 info2;
- __le32 info3;
- __le32 info4;
- __le32 rsvd1;
-
-} __packed;
-
-enum htt_h2t_msg_type {
- HTT_H2T_MSG_TYPE_VERSION_REQ = 0,
- HTT_H2T_MSG_TYPE_SRING_SETUP = 0xb,
- HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG = 0xc,
- HTT_H2T_MSG_TYPE_EXT_STATS_CFG = 0x10,
- HTT_H2T_MSG_TYPE_PPDU_STATS_CFG = 0x11,
- HTT_H2T_MSG_TYPE_VDEV_TXRX_STATS_CFG = 0x1a,
- HTT_H2T_MSG_TYPE_TX_MONITOR_CFG = 0x1b,
-};
-
-#define HTT_VER_REQ_INFO_MSG_ID GENMASK(7, 0)
-#define HTT_OPTION_TCL_METADATA_VER_V1 1
-#define HTT_OPTION_TCL_METADATA_VER_V2 2
-#define HTT_OPTION_TAG GENMASK(7, 0)
-#define HTT_OPTION_LEN GENMASK(15, 8)
-#define HTT_OPTION_VALUE GENMASK(31, 16)
-#define HTT_TCL_METADATA_VER_SZ 4
-
-struct htt_ver_req_cmd {
- __le32 ver_reg_info;
- __le32 tcl_metadata_version;
-} __packed;
-
-enum htt_srng_ring_type {
- HTT_HW_TO_SW_RING,
- HTT_SW_TO_HW_RING,
- HTT_SW_TO_SW_RING,
-};
-
-enum htt_srng_ring_id {
- HTT_RXDMA_HOST_BUF_RING,
- HTT_RXDMA_MONITOR_STATUS_RING,
- HTT_RXDMA_MONITOR_BUF_RING,
- HTT_RXDMA_MONITOR_DESC_RING,
- HTT_RXDMA_MONITOR_DEST_RING,
- HTT_HOST1_TO_FW_RXBUF_RING,
- HTT_HOST2_TO_FW_RXBUF_RING,
- HTT_RXDMA_NON_MONITOR_DEST_RING,
- HTT_RXDMA_HOST_BUF_RING2,
- HTT_TX_MON_HOST2MON_BUF_RING,
- HTT_TX_MON_MON2HOST_DEST_RING,
- HTT_RX_MON_HOST2MON_BUF_RING,
- HTT_RX_MON_MON2HOST_DEST_RING,
-};
-
-/* host -> target HTT_SRING_SETUP message
- *
- * After target is booted up, Host can send SRING setup message for
- * each host facing LMAC SRING. Target setups up HW registers based
- * on setup message and confirms back to Host if response_required is set.
- * Host should wait for confirmation message before sending new SRING
- * setup message
- *
- * The message would appear as follows:
- *
- * |31 24|23 20|19|18 16|15|14 8|7 0|
- * |--------------- +-----------------+----------------+------------------|
- * | ring_type | ring_id | pdev_id | msg_type |
- * |----------------------------------------------------------------------|
- * | ring_base_addr_lo |
- * |----------------------------------------------------------------------|
- * | ring_base_addr_hi |
- * |----------------------------------------------------------------------|
- * |ring_misc_cfg_flag|ring_entry_size| ring_size |
- * |----------------------------------------------------------------------|
- * | ring_head_offset32_remote_addr_lo |
- * |----------------------------------------------------------------------|
- * | ring_head_offset32_remote_addr_hi |
- * |----------------------------------------------------------------------|
- * | ring_tail_offset32_remote_addr_lo |
- * |----------------------------------------------------------------------|
- * | ring_tail_offset32_remote_addr_hi |
- * |----------------------------------------------------------------------|
- * | ring_msi_addr_lo |
- * |----------------------------------------------------------------------|
- * | ring_msi_addr_hi |
- * |----------------------------------------------------------------------|
- * | ring_msi_data |
- * |----------------------------------------------------------------------|
- * | intr_timer_th |IM| intr_batch_counter_th |
- * |----------------------------------------------------------------------|
- * | reserved |RR|PTCF| intr_low_threshold |
- * |----------------------------------------------------------------------|
- * Where
- * IM = sw_intr_mode
- * RR = response_required
- * PTCF = prefetch_timer_cfg
- *
- * The message is interpreted as follows:
- * dword0 - b'0:7 - msg_type: This will be set to
- * HTT_H2T_MSG_TYPE_SRING_SETUP
- * b'8:15 - pdev_id:
- * 0 (for rings at SOC/UMAC level),
- * 1/2/3 mac id (for rings at LMAC level)
- * b'16:23 - ring_id: identify which ring is to setup,
- * more details can be got from enum htt_srng_ring_id
- * b'24:31 - ring_type: identify type of host rings,
- * more details can be got from enum htt_srng_ring_type
- * dword1 - b'0:31 - ring_base_addr_lo: Lower 32bits of ring base address
- * dword2 - b'0:31 - ring_base_addr_hi: Upper 32bits of ring base address
- * dword3 - b'0:15 - ring_size: size of the ring in unit of 4-bytes words
- * b'16:23 - ring_entry_size: Size of each entry in 4-byte word units
- * b'24:31 - ring_misc_cfg_flag: Valid only for HW_TO_SW_RING and
- * SW_TO_HW_RING.
- * Refer to HTT_SRING_SETUP_RING_MISC_CFG_RING defs.
- * dword4 - b'0:31 - ring_head_off32_remote_addr_lo:
- * Lower 32 bits of memory address of the remote variable
- * storing the 4-byte word offset that identifies the head
- * element within the ring.
- * (The head offset variable has type u32.)
- * Valid for HW_TO_SW and SW_TO_SW rings.
- * dword5 - b'0:31 - ring_head_off32_remote_addr_hi:
- * Upper 32 bits of memory address of the remote variable
- * storing the 4-byte word offset that identifies the head
- * element within the ring.
- * (The head offset variable has type u32.)
- * Valid for HW_TO_SW and SW_TO_SW rings.
- * dword6 - b'0:31 - ring_tail_off32_remote_addr_lo:
- * Lower 32 bits of memory address of the remote variable
- * storing the 4-byte word offset that identifies the tail
- * element within the ring.
- * (The tail offset variable has type u32.)
- * Valid for HW_TO_SW and SW_TO_SW rings.
- * dword7 - b'0:31 - ring_tail_off32_remote_addr_hi:
- * Upper 32 bits of memory address of the remote variable
- * storing the 4-byte word offset that identifies the tail
- * element within the ring.
- * (The tail offset variable has type u32.)
- * Valid for HW_TO_SW and SW_TO_SW rings.
- * dword8 - b'0:31 - ring_msi_addr_lo: Lower 32bits of MSI cfg address
- * valid only for HW_TO_SW_RING and SW_TO_HW_RING
- * dword9 - b'0:31 - ring_msi_addr_hi: Upper 32bits of MSI cfg address
- * valid only for HW_TO_SW_RING and SW_TO_HW_RING
- * dword10 - b'0:31 - ring_msi_data: MSI data
- * Refer to HTT_SRING_SETUP_RING_MSC_CFG_xxx defs
- * valid only for HW_TO_SW_RING and SW_TO_HW_RING
- * dword11 - b'0:14 - intr_batch_counter_th:
- * batch counter threshold is in units of 4-byte words.
- * HW internally maintains and increments batch count.
- * (see SRING spec for detail description).
- * When batch count reaches threshold value, an interrupt
- * is generated by HW.
- * b'15 - sw_intr_mode:
- * This configuration shall be static.
- * Only programmed at power up.
- * 0: generate pulse style sw interrupts
- * 1: generate level style sw interrupts
- * b'16:31 - intr_timer_th:
- * The timer init value when timer is idle or is
- * initialized to start downcounting.
- * In 8us units (to cover a range of 0 to 524 ms)
- * dword12 - b'0:15 - intr_low_threshold:
- * Used only by Consumer ring to generate ring_sw_int_p.
- * Ring entries low threshold water mark, that is used
- * in combination with the interrupt timer as well as
- * the clearing of the level interrupt.
- * b'16:18 - prefetch_timer_cfg:
- * Used only by Consumer ring to set timer mode to
- * support Application prefetch handling.
- * The external tail offset/pointer will be updated
- * at following intervals:
- * 3'b000: (Prefetch feature disabled; used only for debug)
- * 3'b001: 1 usec
- * 3'b010: 4 usec
- * 3'b011: 8 usec (default)
- * 3'b100: 16 usec
- * Others: Reserved
- * b'19 - response_required:
- * Host needs HTT_T2H_MSG_TYPE_SRING_SETUP_DONE as response
- * b'20:31 - reserved: reserved for future use
- */
-
-#define HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE GENMASK(7, 0)
-#define HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID GENMASK(15, 8)
-#define HTT_SRNG_SETUP_CMD_INFO0_RING_ID GENMASK(23, 16)
-#define HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE GENMASK(31, 24)
-
-#define HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE GENMASK(15, 0)
-#define HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE GENMASK(23, 16)
-#define HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS BIT(25)
-#define HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP BIT(27)
-#define HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP BIT(28)
-#define HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP BIT(29)
-
-#define HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH GENMASK(14, 0)
-#define HTT_SRNG_SETUP_CMD_INTR_INFO_SW_INTR_MODE BIT(15)
-#define HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH GENMASK(31, 16)
-
-#define HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH GENMASK(15, 0)
-#define HTT_SRNG_SETUP_CMD_INFO2_PRE_FETCH_TIMER_CFG GENMASK(18, 16)
-#define HTT_SRNG_SETUP_CMD_INFO2_RESPONSE_REQUIRED BIT(19)
-
-struct htt_srng_setup_cmd {
- __le32 info0;
- __le32 ring_base_addr_lo;
- __le32 ring_base_addr_hi;
- __le32 info1;
- __le32 ring_head_off32_remote_addr_lo;
- __le32 ring_head_off32_remote_addr_hi;
- __le32 ring_tail_off32_remote_addr_lo;
- __le32 ring_tail_off32_remote_addr_hi;
- __le32 ring_msi_addr_lo;
- __le32 ring_msi_addr_hi;
- __le32 msi_data;
- __le32 intr_info;
- __le32 info2;
-} __packed;
-
-/* host -> target FW PPDU_STATS config message
- *
- * @details
- * The following field definitions describe the format of the HTT host
- * to target FW for PPDU_STATS_CFG msg.
- * The message allows the host to configure the PPDU_STATS_IND messages
- * produced by the target.
- *
- * |31 24|23 16|15 8|7 0|
- * |-----------------------------------------------------------|
- * | REQ bit mask | pdev_mask | msg type |
- * |-----------------------------------------------------------|
- * Header fields:
- * - MSG_TYPE
- * Bits 7:0
- * Purpose: identifies this is a req to configure ppdu_stats_ind from target
- * Value: 0x11
- * - PDEV_MASK
- * Bits 8:15
- * Purpose: identifies which pdevs this PPDU stats configuration applies to
- * Value: This is a overloaded field, refer to usage and interpretation of
- * PDEV in interface document.
- * Bit 8 : Reserved for SOC stats
- * Bit 9 - 15 : Indicates PDEV_MASK in DBDC
- * Indicates MACID_MASK in DBS
- * - REQ_TLV_BIT_MASK
- * Bits 16:31
- * Purpose: each set bit indicates the corresponding PPDU stats TLV type
- * needs to be included in the target's PPDU_STATS_IND messages.
- * Value: refer htt_ppdu_stats_tlv_tag_t <<<???
- *
- */
-
-struct htt_ppdu_stats_cfg_cmd {
- __le32 msg;
-} __packed;
-
-#define HTT_PPDU_STATS_CFG_MSG_TYPE GENMASK(7, 0)
-#define HTT_PPDU_STATS_CFG_SOC_STATS BIT(8)
-#define HTT_PPDU_STATS_CFG_PDEV_ID GENMASK(15, 9)
-#define HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK GENMASK(31, 16)
-
-enum htt_ppdu_stats_tag_type {
- HTT_PPDU_STATS_TAG_COMMON,
- HTT_PPDU_STATS_TAG_USR_COMMON,
- HTT_PPDU_STATS_TAG_USR_RATE,
- HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_64,
- HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_256,
- HTT_PPDU_STATS_TAG_SCH_CMD_STATUS,
- HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON,
- HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_64,
- HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_256,
- HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS,
- HTT_PPDU_STATS_TAG_USR_COMPLTN_FLUSH,
- HTT_PPDU_STATS_TAG_USR_COMMON_ARRAY,
- HTT_PPDU_STATS_TAG_INFO,
- HTT_PPDU_STATS_TAG_TX_MGMTCTRL_PAYLOAD,
-
- /* New TLV's are added above to this line */
- HTT_PPDU_STATS_TAG_MAX,
-};
-
-#define HTT_PPDU_STATS_TAG_DEFAULT (BIT(HTT_PPDU_STATS_TAG_COMMON) \
- | BIT(HTT_PPDU_STATS_TAG_USR_COMMON) \
- | BIT(HTT_PPDU_STATS_TAG_USR_RATE) \
- | BIT(HTT_PPDU_STATS_TAG_SCH_CMD_STATUS) \
- | BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON) \
- | BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS) \
- | BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_FLUSH) \
- | BIT(HTT_PPDU_STATS_TAG_USR_COMMON_ARRAY))
-
-#define HTT_PPDU_STATS_TAG_PKTLOG (BIT(HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_64) | \
- BIT(HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_256) | \
- BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_64) | \
- BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_256) | \
- BIT(HTT_PPDU_STATS_TAG_INFO) | \
- BIT(HTT_PPDU_STATS_TAG_TX_MGMTCTRL_PAYLOAD) | \
- HTT_PPDU_STATS_TAG_DEFAULT)
-
-enum htt_stats_internal_ppdu_frametype {
- HTT_STATS_PPDU_FTYPE_CTRL,
- HTT_STATS_PPDU_FTYPE_DATA,
- HTT_STATS_PPDU_FTYPE_BAR,
- HTT_STATS_PPDU_FTYPE_MAX
-};
-
-/* HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG Message
- *
- * details:
- * HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG message is sent by host to
- * configure RXDMA rings.
- * The configuration is per ring based and includes both packet subtypes
- * and PPDU/MPDU TLVs.
- *
- * The message would appear as follows:
- *
- * |31 29|28|27|26|25|24|23 16|15 8|7 0|
- * |-------+--+--+--+--+--+-----------+----------------+---------------|
- * | rsvd1 |ED|DT|OV|PS|SS| ring_id | pdev_id | msg_type |
- * |-------------------------------------------------------------------|
- * | rsvd2 | ring_buffer_size |
- * |-------------------------------------------------------------------|
- * | packet_type_enable_flags_0 |
- * |-------------------------------------------------------------------|
- * | packet_type_enable_flags_1 |
- * |-------------------------------------------------------------------|
- * | packet_type_enable_flags_2 |
- * |-------------------------------------------------------------------|
- * | packet_type_enable_flags_3 |
- * |-------------------------------------------------------------------|
- * | tlv_filter_in_flags |
- * |-------------------------------------------------------------------|
- * Where:
- * PS = pkt_swap
- * SS = status_swap
- * The message is interpreted as follows:
- * dword0 - b'0:7 - msg_type: This will be set to
- * HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG
- * b'8:15 - pdev_id:
- * 0 (for rings at SOC/UMAC level),
- * 1/2/3 mac id (for rings at LMAC level)
- * b'16:23 - ring_id : Identify the ring to configure.
- * More details can be got from enum htt_srng_ring_id
- * b'24 - status_swap: 1 is to swap status TLV
- * b'25 - pkt_swap: 1 is to swap packet TLV
- * b'26 - rx_offset_valid (OV): flag to indicate rx offsets
- * configuration fields are valid
- * b'27 - drop_thresh_valid (DT): flag to indicate if the
- * rx_drop_threshold field is valid
- * b'28 - rx_mon_global_en: Enable/Disable global register
- * configuration in Rx monitor module.
- * b'29:31 - rsvd1: reserved for future use
- * dword1 - b'0:16 - ring_buffer_size: size of buffers referenced by rx ring,
- * in byte units.
- * Valid only for HW_TO_SW_RING and SW_TO_HW_RING
- * - b'16:31 - rsvd2: Reserved for future use
- * dword2 - b'0:31 - packet_type_enable_flags_0:
- * Enable MGMT packet from 0b0000 to 0b1001
- * bits from low to high: FP, MD, MO - 3 bits
- * FP: Filter_Pass
- * MD: Monitor_Direct
- * MO: Monitor_Other
- * 10 mgmt subtypes * 3 bits -> 30 bits
- * Refer to PKT_TYPE_ENABLE_FLAG0_xxx_MGMT_xxx defs
- * dword3 - b'0:31 - packet_type_enable_flags_1:
- * Enable MGMT packet from 0b1010 to 0b1111
- * bits from low to high: FP, MD, MO - 3 bits
- * Refer to PKT_TYPE_ENABLE_FLAG1_xxx_MGMT_xxx defs
- * dword4 - b'0:31 - packet_type_enable_flags_2:
- * Enable CTRL packet from 0b0000 to 0b1001
- * bits from low to high: FP, MD, MO - 3 bits
- * Refer to PKT_TYPE_ENABLE_FLAG2_xxx_CTRL_xxx defs
- * dword5 - b'0:31 - packet_type_enable_flags_3:
- * Enable CTRL packet from 0b1010 to 0b1111,
- * MCAST_DATA, UCAST_DATA, NULL_DATA
- * bits from low to high: FP, MD, MO - 3 bits
- * Refer to PKT_TYPE_ENABLE_FLAG3_xxx_CTRL_xxx defs
- * dword6 - b'0:31 - tlv_filter_in_flags:
- * Filter in Attention/MPDU/PPDU/Header/User tlvs
- * Refer to CFG_TLV_FILTER_IN_FLAG defs
- */
-
-#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE GENMASK(7, 0)
-#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID GENMASK(15, 8)
-#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID GENMASK(23, 16)
-#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS BIT(24)
-#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS BIT(25)
-#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_OFFSET_VALID BIT(26)
-#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_DROP_THRES_VAL BIT(27)
-#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_EN_RXMON BIT(28)
-
-#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE GENMASK(15, 0)
-#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT GENMASK(18, 16)
-#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL GENMASK(21, 19)
-#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA GENMASK(24, 22)
-
-#define HTT_RX_RING_SELECTION_CFG_CMD_INFO2_DROP_THRESHOLD GENMASK(9, 0)
-#define HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_MGMT_TYPE BIT(17)
-#define HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_CTRL_TYPE BIT(18)
-#define HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_DATA_TYPE BIT(19)
-
-#define HTT_RX_RING_SELECTION_CFG_CMD_INFO3_EN_TLV_PKT_OFFSET BIT(0)
-#define HTT_RX_RING_SELECTION_CFG_CMD_INFO3_PKT_TLV_OFFSET GENMASK(14, 1)
-
-#define HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET GENMASK(15, 0)
-#define HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET GENMASK(31, 16)
-#define HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET GENMASK(15, 0)
-#define HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET GENMASK(31, 16)
-#define HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET GENMASK(15, 0)
-#define HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET GENMASK(31, 16)
-#define HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET GENMASK(15, 0)
-
-#define HTT_RX_RING_SELECTION_CFG_WORD_MASK_COMPACT_SET BIT(23)
-#define HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_MASK GENMASK(15, 0)
-#define HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_MASK GENMASK(18, 16)
-#define HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_MASK GENMASK(16, 0)
-
-enum htt_rx_filter_tlv_flags {
- HTT_RX_FILTER_TLV_FLAGS_MPDU_START = BIT(0),
- HTT_RX_FILTER_TLV_FLAGS_MSDU_START = BIT(1),
- HTT_RX_FILTER_TLV_FLAGS_RX_PACKET = BIT(2),
- HTT_RX_FILTER_TLV_FLAGS_MSDU_END = BIT(3),
- HTT_RX_FILTER_TLV_FLAGS_MPDU_END = BIT(4),
- HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER = BIT(5),
- HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER = BIT(6),
- HTT_RX_FILTER_TLV_FLAGS_ATTENTION = BIT(7),
- HTT_RX_FILTER_TLV_FLAGS_PPDU_START = BIT(8),
- HTT_RX_FILTER_TLV_FLAGS_PPDU_END = BIT(9),
- HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS = BIT(10),
- HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT = BIT(11),
- HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE = BIT(12),
- HTT_RX_FILTER_TLV_FLAGS_PPDU_START_USER_INFO = BIT(13),
-};
-
-enum htt_rx_mgmt_pkt_filter_tlv_flags0 {
- HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ = BIT(0),
- HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ = BIT(1),
- HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ = BIT(2),
- HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP = BIT(3),
- HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP = BIT(4),
- HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP = BIT(5),
- HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ = BIT(6),
- HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ = BIT(7),
- HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ = BIT(8),
- HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP = BIT(9),
- HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP = BIT(10),
- HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP = BIT(11),
- HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ = BIT(12),
- HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ = BIT(13),
- HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ = BIT(14),
- HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP = BIT(15),
- HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP = BIT(16),
- HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP = BIT(17),
- HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV = BIT(18),
- HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV = BIT(19),
- HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV = BIT(20),
- HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7 = BIT(21),
- HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7 = BIT(22),
- HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7 = BIT(23),
- HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON = BIT(24),
- HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON = BIT(25),
- HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON = BIT(26),
- HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM = BIT(27),
- HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM = BIT(28),
- HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM = BIT(29),
-};
-
-enum htt_rx_mgmt_pkt_filter_tlv_flags1 {
- HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC = BIT(0),
- HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC = BIT(1),
- HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC = BIT(2),
- HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH = BIT(3),
- HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH = BIT(4),
- HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH = BIT(5),
- HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH = BIT(6),
- HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH = BIT(7),
- HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH = BIT(8),
- HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION = BIT(9),
- HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION = BIT(10),
- HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION = BIT(11),
- HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK = BIT(12),
- HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK = BIT(13),
- HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK = BIT(14),
- HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15 = BIT(15),
- HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15 = BIT(16),
- HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15 = BIT(17),
-};
-
-enum htt_rx_ctrl_pkt_filter_tlv_flags2 {
- HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 = BIT(0),
- HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 = BIT(1),
- HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 = BIT(2),
- HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 = BIT(3),
- HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 = BIT(4),
- HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 = BIT(5),
- HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER = BIT(6),
- HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER = BIT(7),
- HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER = BIT(8),
- HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 = BIT(9),
- HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 = BIT(10),
- HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 = BIT(11),
- HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL = BIT(12),
- HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL = BIT(13),
- HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL = BIT(14),
- HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP = BIT(15),
- HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP = BIT(16),
- HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP = BIT(17),
- HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT = BIT(18),
- HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT = BIT(19),
- HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT = BIT(20),
- HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER = BIT(21),
- HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER = BIT(22),
- HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER = BIT(23),
- HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR = BIT(24),
- HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BAR = BIT(25),
- HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BAR = BIT(26),
- HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BA = BIT(27),
- HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BA = BIT(28),
- HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BA = BIT(29),
-};
-
-enum htt_rx_ctrl_pkt_filter_tlv_flags3 {
- HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL = BIT(0),
- HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL = BIT(1),
- HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL = BIT(2),
- HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_RTS = BIT(3),
- HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_RTS = BIT(4),
- HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_RTS = BIT(5),
- HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CTS = BIT(6),
- HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CTS = BIT(7),
- HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CTS = BIT(8),
- HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_ACK = BIT(9),
- HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_ACK = BIT(10),
- HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_ACK = BIT(11),
- HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND = BIT(12),
- HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND = BIT(13),
- HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND = BIT(14),
- HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK = BIT(15),
- HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK = BIT(16),
- HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK = BIT(17),
-};
-
-enum htt_rx_data_pkt_filter_tlv_flasg3 {
- HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST = BIT(18),
- HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_MCAST = BIT(19),
- HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_MCAST = BIT(20),
- HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST = BIT(21),
- HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_UCAST = BIT(22),
- HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_UCAST = BIT(23),
- HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA = BIT(24),
- HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA = BIT(25),
- HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA = BIT(26),
-};
-
-#define HTT_RX_FP_MGMT_FILTER_FLAGS0 \
- (HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ \
- | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP \
- | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ \
- | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP \
- | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ \
- | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP \
- | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV \
- | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON \
- | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM)
-
-#define HTT_RX_MD_MGMT_FILTER_FLAGS0 \
- (HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ \
- | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP \
- | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ \
- | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP \
- | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ \
- | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP \
- | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV \
- | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON \
- | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM)
-
-#define HTT_RX_MO_MGMT_FILTER_FLAGS0 \
- (HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ \
- | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP \
- | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ \
- | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP \
- | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ \
- | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP \
- | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV \
- | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON \
- | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM)
-
-#define HTT_RX_FP_MGMT_FILTER_FLAGS1 (HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC \
- | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH \
- | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH \
- | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION \
- | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK)
-
-#define HTT_RX_MD_MGMT_FILTER_FLAGS1 (HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC \
- | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH \
- | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH \
- | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION \
- | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK)
-
-#define HTT_RX_MO_MGMT_FILTER_FLAGS1 (HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC \
- | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH \
- | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH \
- | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION \
- | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK)
-
-#define HTT_RX_FP_CTRL_FILTER_FLASG2 (HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER \
- | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR \
- | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BA)
-
-#define HTT_RX_MD_CTRL_FILTER_FLASG2 (HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER \
- | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BAR \
- | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BA)
-
-#define HTT_RX_MO_CTRL_FILTER_FLASG2 (HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER \
- | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BAR \
- | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BA)
-
-#define HTT_RX_FP_CTRL_FILTER_FLASG3 (HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL \
- | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_RTS \
- | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CTS \
- | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_ACK \
- | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND \
- | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK)
-
-#define HTT_RX_MD_CTRL_FILTER_FLASG3 (HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL \
- | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_RTS \
- | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CTS \
- | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_ACK \
- | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND \
- | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK)
-
-#define HTT_RX_MO_CTRL_FILTER_FLASG3 (HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL \
- | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_RTS \
- | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CTS \
- | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_ACK \
- | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND \
- | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK)
-
-#define HTT_RX_FP_DATA_FILTER_FLASG3 (HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST \
- | HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST \
- | HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA)
-
-#define HTT_RX_MD_DATA_FILTER_FLASG3 (HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_MCAST \
- | HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_UCAST \
- | HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA)
-
-#define HTT_RX_MO_DATA_FILTER_FLASG3 (HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_MCAST \
- | HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_UCAST \
- | HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA)
-
-#define HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 \
- (HTT_RX_FP_MGMT_FILTER_FLAGS0 | \
- HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7)
-
-#define HTT_RX_MON_MO_MGMT_FILTER_FLAGS0 \
- (HTT_RX_MO_MGMT_FILTER_FLAGS0 | \
- HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7)
-
-#define HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 \
- (HTT_RX_FP_MGMT_FILTER_FLAGS1 | \
- HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15)
-
-#define HTT_RX_MON_MO_MGMT_FILTER_FLAGS1 \
- (HTT_RX_MO_MGMT_FILTER_FLAGS1 | \
- HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15)
-
-#define HTT_RX_MON_FP_CTRL_FILTER_FLASG2 \
- (HTT_RX_FP_CTRL_FILTER_FLASG2 | \
- HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 | \
- HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 | \
- HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER | \
- HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 | \
- HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL | \
- HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP | \
- HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT)
-
-#define HTT_RX_MON_MO_CTRL_FILTER_FLASG2 \
- (HTT_RX_MO_CTRL_FILTER_FLASG2 | \
- HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 | \
- HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 | \
- HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER | \
- HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 | \
- HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL | \
- HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP | \
- HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT)
-
-#define HTT_RX_MON_FP_CTRL_FILTER_FLASG3 HTT_RX_FP_CTRL_FILTER_FLASG3
-
-#define HTT_RX_MON_MO_CTRL_FILTER_FLASG3 HTT_RX_MO_CTRL_FILTER_FLASG3
-
-#define HTT_RX_MON_FP_DATA_FILTER_FLASG3 HTT_RX_FP_DATA_FILTER_FLASG3
-
-#define HTT_RX_MON_MO_DATA_FILTER_FLASG3 HTT_RX_MO_DATA_FILTER_FLASG3
-
-#define HTT_RX_MON_FILTER_TLV_FLAGS \
- (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
- HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
- HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
- HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
- HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
- HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE)
-
-#define HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING \
- (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
- HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
- HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
- HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
- HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
- HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE)
-
-#define HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING \
- (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
- HTT_RX_FILTER_TLV_FLAGS_MSDU_START | \
- HTT_RX_FILTER_TLV_FLAGS_RX_PACKET | \
- HTT_RX_FILTER_TLV_FLAGS_MSDU_END | \
- HTT_RX_FILTER_TLV_FLAGS_MPDU_END | \
- HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER | \
- HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER | \
- HTT_RX_FILTER_TLV_FLAGS_ATTENTION)
-
-#define HTT_RX_MON_FILTER_TLV_FLAGS_MON_DEST_RING \
- (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
- HTT_RX_FILTER_TLV_FLAGS_MSDU_START | \
- HTT_RX_FILTER_TLV_FLAGS_RX_PACKET | \
- HTT_RX_FILTER_TLV_FLAGS_MSDU_END | \
- HTT_RX_FILTER_TLV_FLAGS_MPDU_END | \
- HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER | \
- HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER | \
- HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
- HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
- HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
- HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
- HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE | \
- HTT_RX_FILTER_TLV_FLAGS_PPDU_START_USER_INFO)
-
-/* msdu start. mpdu end, attention, rx hdr tlv's are not subscribed */
-#define HTT_RX_TLV_FLAGS_RXDMA_RING \
- (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
- HTT_RX_FILTER_TLV_FLAGS_RX_PACKET | \
- HTT_RX_FILTER_TLV_FLAGS_MSDU_END)
-
-#define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE GENMASK(7, 0)
-#define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID GENMASK(15, 8)
-
-struct htt_rx_ring_selection_cfg_cmd {
- __le32 info0;
- __le32 info1;
- __le32 pkt_type_en_flags0;
- __le32 pkt_type_en_flags1;
- __le32 pkt_type_en_flags2;
- __le32 pkt_type_en_flags3;
- __le32 rx_filter_tlv;
- __le32 rx_packet_offset;
- __le32 rx_mpdu_offset;
- __le32 rx_msdu_offset;
- __le32 rx_attn_offset;
- __le32 info2;
- __le32 reserved[2];
- __le32 rx_mpdu_start_end_mask;
- __le32 rx_msdu_end_word_mask;
- __le32 info3;
-} __packed;
-
-#define HTT_RX_RING_TLV_DROP_THRESHOLD_VALUE 32
-#define HTT_RX_RING_DEFAULT_DMA_LENGTH 0x7
-#define HTT_RX_RING_PKT_TLV_OFFSET 0x1
-
-struct htt_rx_ring_tlv_filter {
- u32 rx_filter; /* see htt_rx_filter_tlv_flags */
- u32 pkt_filter_flags0; /* MGMT */
- u32 pkt_filter_flags1; /* MGMT */
- u32 pkt_filter_flags2; /* CTRL */
- u32 pkt_filter_flags3; /* DATA */
- bool offset_valid;
- u16 rx_packet_offset;
- u16 rx_header_offset;
- u16 rx_mpdu_end_offset;
- u16 rx_mpdu_start_offset;
- u16 rx_msdu_end_offset;
- u16 rx_msdu_start_offset;
- u16 rx_attn_offset;
- u16 rx_mpdu_start_wmask;
- u16 rx_mpdu_end_wmask;
- u32 rx_msdu_end_wmask;
- u32 conf_len_ctrl;
- u32 conf_len_mgmt;
- u32 conf_len_data;
- u16 rx_drop_threshold;
- bool enable_log_mgmt_type;
- bool enable_log_ctrl_type;
- bool enable_log_data_type;
- bool enable_rx_tlv_offset;
- u16 rx_tlv_offset;
- bool drop_threshold_valid;
- bool rxmon_disable;
-};
-
-#define HTT_STATS_FRAME_CTRL_TYPE_MGMT 0x0
-#define HTT_STATS_FRAME_CTRL_TYPE_CTRL 0x1
-#define HTT_STATS_FRAME_CTRL_TYPE_DATA 0x2
-#define HTT_STATS_FRAME_CTRL_TYPE_RESV 0x3
-
-#define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE GENMASK(7, 0)
-#define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID GENMASK(15, 8)
-#define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_RING_ID GENMASK(23, 16)
-#define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_SS BIT(24)
-#define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PS BIT(25)
-
-#define HTT_TX_RING_SELECTION_CFG_CMD_INFO1_RING_BUFF_SIZE GENMASK(15, 0)
-#define HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE GENMASK(18, 16)
-#define HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT GENMASK(21, 19)
-#define HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL GENMASK(24, 22)
-#define HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA GENMASK(27, 25)
-
-#define HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG GENMASK(2, 0)
-
-struct htt_tx_ring_selection_cfg_cmd {
- __le32 info0;
- __le32 info1;
- __le32 info2;
- __le32 tlv_filter_mask_in0;
- __le32 tlv_filter_mask_in1;
- __le32 tlv_filter_mask_in2;
- __le32 tlv_filter_mask_in3;
- __le32 reserved[3];
-} __packed;
-
-#define HTT_TX_RING_TLV_FILTER_MGMT_DMA_LEN GENMASK(3, 0)
-#define HTT_TX_RING_TLV_FILTER_CTRL_DMA_LEN GENMASK(7, 4)
-#define HTT_TX_RING_TLV_FILTER_DATA_DMA_LEN GENMASK(11, 8)
-
-#define HTT_TX_MON_FILTER_HYBRID_MODE \
- (HTT_TX_FILTER_TLV_FLAGS0_RESPONSE_START_STATUS | \
- HTT_TX_FILTER_TLV_FLAGS0_RESPONSE_END_STATUS | \
- HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_START | \
- HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_END | \
- HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_START_PPDU | \
- HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_USER_PPDU | \
- HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_ACK_OR_BA | \
- HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_1K_BA | \
- HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_START_PROT | \
- HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_PROT | \
- HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_USER_RESPONSE | \
- HTT_TX_FILTER_TLV_FLAGS0_RECEIVED_RESPONSE_INFO | \
- HTT_TX_FILTER_TLV_FLAGS0_RECEIVED_RESPONSE_INFO_PART2)
-
-struct htt_tx_ring_tlv_filter {
- u32 tx_mon_downstream_tlv_flags;
- u32 tx_mon_upstream_tlv_flags0;
- u32 tx_mon_upstream_tlv_flags1;
- u32 tx_mon_upstream_tlv_flags2;
- bool tx_mon_mgmt_filter;
- bool tx_mon_data_filter;
- bool tx_mon_ctrl_filter;
- u16 tx_mon_pkt_dma_len;
-} __packed;
-
-enum htt_tx_mon_upstream_tlv_flags0 {
- HTT_TX_FILTER_TLV_FLAGS0_RESPONSE_START_STATUS = BIT(1),
- HTT_TX_FILTER_TLV_FLAGS0_RESPONSE_END_STATUS = BIT(2),
- HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_START = BIT(3),
- HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_END = BIT(4),
- HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_START_PPDU = BIT(5),
- HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_USER_PPDU = BIT(6),
- HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_ACK_OR_BA = BIT(7),
- HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_1K_BA = BIT(8),
- HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_START_PROT = BIT(9),
- HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_PROT = BIT(10),
- HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_USER_RESPONSE = BIT(11),
- HTT_TX_FILTER_TLV_FLAGS0_RX_FRAME_BITMAP_ACK = BIT(12),
- HTT_TX_FILTER_TLV_FLAGS0_RX_FRAME_1K_BITMAP_ACK = BIT(13),
- HTT_TX_FILTER_TLV_FLAGS0_COEX_TX_STATUS = BIT(14),
- HTT_TX_FILTER_TLV_FLAGS0_RECEIVED_RESPONSE_INFO = BIT(15),
- HTT_TX_FILTER_TLV_FLAGS0_RECEIVED_RESPONSE_INFO_PART2 = BIT(16),
-};
-
-#define HTT_TX_FILTER_TLV_FLAGS2_TXPCU_PHYTX_OTHER_TRANSMIT_INFO32 BIT(11)
-
-/* HTT message target->host */
-
-enum htt_t2h_msg_type {
- HTT_T2H_MSG_TYPE_VERSION_CONF,
- HTT_T2H_MSG_TYPE_PEER_MAP = 0x3,
- HTT_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
- HTT_T2H_MSG_TYPE_RX_ADDBA = 0x5,
- HTT_T2H_MSG_TYPE_PKTLOG = 0x8,
- HTT_T2H_MSG_TYPE_SEC_IND = 0xb,
- HTT_T2H_MSG_TYPE_PEER_MAP2 = 0x1e,
- HTT_T2H_MSG_TYPE_PEER_UNMAP2 = 0x1f,
- HTT_T2H_MSG_TYPE_PPDU_STATS_IND = 0x1d,
- HTT_T2H_MSG_TYPE_EXT_STATS_CONF = 0x1c,
- HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND = 0x24,
- HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND = 0x28,
- HTT_T2H_MSG_TYPE_PEER_MAP3 = 0x2b,
- HTT_T2H_MSG_TYPE_VDEV_TXRX_STATS_PERIODIC_IND = 0x2c,
-};
-
-#define HTT_TARGET_VERSION_MAJOR 3
-
-#define HTT_T2H_MSG_TYPE GENMASK(7, 0)
-#define HTT_T2H_VERSION_CONF_MINOR GENMASK(15, 8)
-#define HTT_T2H_VERSION_CONF_MAJOR GENMASK(23, 16)
-
-struct htt_t2h_version_conf_msg {
- __le32 version;
-} __packed;
-
-#define HTT_T2H_PEER_MAP_INFO_VDEV_ID GENMASK(15, 8)
-#define HTT_T2H_PEER_MAP_INFO_PEER_ID GENMASK(31, 16)
-#define HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16 GENMASK(15, 0)
-#define HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID GENMASK(31, 16)
-#define HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL GENMASK(15, 0)
-#define HTT_T2H_PEER_MAP3_INFO2_HW_PEER_ID GENMASK(15, 0)
-#define HTT_T2H_PEER_MAP3_INFO2_AST_HASH_VAL GENMASK(31, 16)
-#define HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_M BIT(16)
-#define HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_S 16
-
-struct htt_t2h_peer_map_event {
- __le32 info;
- __le32 mac_addr_l32;
- __le32 info1;
- __le32 info2;
-} __packed;
-
-#define HTT_T2H_PEER_UNMAP_INFO_VDEV_ID HTT_T2H_PEER_MAP_INFO_VDEV_ID
-#define HTT_T2H_PEER_UNMAP_INFO_PEER_ID HTT_T2H_PEER_MAP_INFO_PEER_ID
-#define HTT_T2H_PEER_UNMAP_INFO1_MAC_ADDR_H16 \
- HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16
-#define HTT_T2H_PEER_MAP_INFO1_NEXT_HOP_M HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_M
-#define HTT_T2H_PEER_MAP_INFO1_NEXT_HOP_S HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_S
-
-struct htt_t2h_peer_unmap_event {
- __le32 info;
- __le32 mac_addr_l32;
- __le32 info1;
-} __packed;
-
-struct htt_resp_msg {
- union {
- struct htt_t2h_version_conf_msg version_msg;
- struct htt_t2h_peer_map_event peer_map_ev;
- struct htt_t2h_peer_unmap_event peer_unmap_ev;
- };
-} __packed;
-
-#define HTT_VDEV_GET_STATS_U64(msg_l32, msg_u32)\
- (((u64)__le32_to_cpu(msg_u32) << 32) | (__le32_to_cpu(msg_l32)))
-#define HTT_T2H_VDEV_STATS_PERIODIC_MSG_TYPE GENMASK(7, 0)
-#define HTT_T2H_VDEV_STATS_PERIODIC_PDEV_ID GENMASK(15, 8)
-#define HTT_T2H_VDEV_STATS_PERIODIC_NUM_VDEV GENMASK(23, 16)
-#define HTT_T2H_VDEV_STATS_PERIODIC_PAYLOAD_BYTES GENMASK(15, 0)
-#define HTT_VDEV_TXRX_STATS_COMMON_TLV 0
-#define HTT_VDEV_TXRX_STATS_HW_STATS_TLV 1
-
-struct htt_t2h_vdev_txrx_stats_ind {
- __le32 vdev_id;
- __le32 rx_msdu_byte_cnt_lo;
- __le32 rx_msdu_byte_cnt_hi;
- __le32 rx_msdu_cnt_lo;
- __le32 rx_msdu_cnt_hi;
- __le32 tx_msdu_byte_cnt_lo;
- __le32 tx_msdu_byte_cnt_hi;
- __le32 tx_msdu_cnt_lo;
- __le32 tx_msdu_cnt_hi;
- __le32 tx_retry_cnt_lo;
- __le32 tx_retry_cnt_hi;
- __le32 tx_retry_byte_cnt_lo;
- __le32 tx_retry_byte_cnt_hi;
- __le32 tx_drop_cnt_lo;
- __le32 tx_drop_cnt_hi;
- __le32 tx_drop_byte_cnt_lo;
- __le32 tx_drop_byte_cnt_hi;
- __le32 msdu_ttl_cnt_lo;
- __le32 msdu_ttl_cnt_hi;
- __le32 msdu_ttl_byte_cnt_lo;
- __le32 msdu_ttl_byte_cnt_hi;
-} __packed;
-
-struct htt_t2h_vdev_common_stats_tlv {
- __le32 soc_drop_count_lo;
- __le32 soc_drop_count_hi;
-} __packed;
-
-/* ppdu stats
- *
- * @details
- * The following field definitions describe the format of the HTT target
- * to host ppdu stats indication message.
- *
- *
- * |31 16|15 12|11 10|9 8|7 0 |
- * |----------------------------------------------------------------------|
- * | payload_size | rsvd |pdev_id|mac_id | msg type |
- * |----------------------------------------------------------------------|
- * | ppdu_id |
- * |----------------------------------------------------------------------|
- * | Timestamp in us |
- * |----------------------------------------------------------------------|
- * | reserved |
- * |----------------------------------------------------------------------|
- * | type-specific stats info |
- * | (see htt_ppdu_stats.h) |
- * |----------------------------------------------------------------------|
- * Header fields:
- * - MSG_TYPE
- * Bits 7:0
- * Purpose: Identifies this is a PPDU STATS indication
- * message.
- * Value: 0x1d
- * - mac_id
- * Bits 9:8
- * Purpose: mac_id of this ppdu_id
- * Value: 0-3
- * - pdev_id
- * Bits 11:10
- * Purpose: pdev_id of this ppdu_id
- * Value: 0-3
- * 0 (for rings at SOC level),
- * 1/2/3 PDEV -> 0/1/2
- * - payload_size
- * Bits 31:16
- * Purpose: total tlv size
- * Value: payload_size in bytes
- */
-
-#define HTT_T2H_PPDU_STATS_INFO_PDEV_ID GENMASK(11, 10)
-#define HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE GENMASK(31, 16)
-
-struct ath12k_htt_ppdu_stats_msg {
- __le32 info;
- __le32 ppdu_id;
- __le32 timestamp;
- __le32 rsvd;
- u8 data[];
-} __packed;
-
-struct htt_tlv {
- __le32 header;
- u8 value[];
-} __packed;
-
-#define HTT_TLV_TAG GENMASK(11, 0)
-#define HTT_TLV_LEN GENMASK(23, 12)
-
-enum HTT_PPDU_STATS_BW {
- HTT_PPDU_STATS_BANDWIDTH_5MHZ = 0,
- HTT_PPDU_STATS_BANDWIDTH_10MHZ = 1,
- HTT_PPDU_STATS_BANDWIDTH_20MHZ = 2,
- HTT_PPDU_STATS_BANDWIDTH_40MHZ = 3,
- HTT_PPDU_STATS_BANDWIDTH_80MHZ = 4,
- HTT_PPDU_STATS_BANDWIDTH_160MHZ = 5, /* includes 80+80 */
- HTT_PPDU_STATS_BANDWIDTH_DYN = 6,
-};
-
-#define HTT_PPDU_STATS_CMN_FLAGS_FRAME_TYPE_M GENMASK(7, 0)
-#define HTT_PPDU_STATS_CMN_FLAGS_QUEUE_TYPE_M GENMASK(15, 8)
-/* bw - HTT_PPDU_STATS_BW */
-#define HTT_PPDU_STATS_CMN_FLAGS_BW_M GENMASK(19, 16)
-
-struct htt_ppdu_stats_common {
- __le32 ppdu_id;
- __le16 sched_cmdid;
- u8 ring_id;
- u8 num_users;
- __le32 flags; /* %HTT_PPDU_STATS_COMMON_FLAGS_*/
- __le32 chain_mask;
- __le32 fes_duration_us; /* frame exchange sequence */
- __le32 ppdu_sch_eval_start_tstmp_us;
- __le32 ppdu_sch_end_tstmp_us;
- __le32 ppdu_start_tstmp_us;
- /* BIT [15 : 0] - phy mode (WLAN_PHY_MODE) with which ppdu was transmitted
- * BIT [31 : 16] - bandwidth (in MHz) with which ppdu was transmitted
+ const struct ath12k_hw_params *hw_params;
+ struct device *dev;
+ struct ath12k_hal *hal;
+
+ /* RCU on dp_pdevs[] provides a teardown synchronization mechanism,
+ * ensuring in-flight data path readers complete before reclaim. Writers
+ * update internal fields under their own synchronization, while readers of
+ * internal fields may perform lockless read if occasional inconsistency
+ * is acceptable or use additional synchronization for a coherent view.
+ *
+ * RCU is used for dp_pdevs[] at this stage to align with
+ * ab->pdevs_active[]. However, if the teardown paths ensure quiescence,
+ * both dp_pdevs[] and pdevs_active[] can be converted to plain pointers,
+ * removing RCU synchronize overhead.
+ *
+ * TODO: evaluate removal of RCU from dp_pdevs in the future
*/
- __le16 phy_mode;
- __le16 bw_mhz;
-} __packed;
+ struct ath12k_pdev_dp __rcu *dp_pdevs[MAX_RADIOS];
-enum htt_ppdu_stats_gi {
- HTT_PPDU_STATS_SGI_0_8_US,
- HTT_PPDU_STATS_SGI_0_4_US,
- HTT_PPDU_STATS_SGI_1_6_US,
- HTT_PPDU_STATS_SGI_3_2_US,
-};
+ struct ath12k_hw_group *ag;
+ u8 device_id;
-#define HTT_PPDU_STATS_USER_RATE_INFO0_USER_POS_M GENMASK(3, 0)
-#define HTT_PPDU_STATS_USER_RATE_INFO0_MU_GROUP_ID_M GENMASK(11, 4)
-
-enum HTT_PPDU_STATS_PPDU_TYPE {
- HTT_PPDU_STATS_PPDU_TYPE_SU,
- HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO,
- HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA,
- HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA,
- HTT_PPDU_STATS_PPDU_TYPE_UL_TRIG,
- HTT_PPDU_STATS_PPDU_TYPE_BURST_BCN,
- HTT_PPDU_STATS_PPDU_TYPE_UL_BSR_RESP,
- HTT_PPDU_STATS_PPDU_TYPE_UL_BSR_TRIG,
- HTT_PPDU_STATS_PPDU_TYPE_UL_RESP,
- HTT_PPDU_STATS_PPDU_TYPE_MAX
-};
+ /* Lock for protection of peers and rhead_peer_addr */
+ spinlock_t dp_lock;
-#define HTT_PPDU_STATS_USER_RATE_INFO1_RESP_TYPE_VALD_M BIT(0)
-#define HTT_PPDU_STATS_USER_RATE_INFO1_PPDU_TYPE_M GENMASK(5, 1)
-
-#define HTT_PPDU_STATS_USER_RATE_FLAGS_LTF_SIZE_M GENMASK(1, 0)
-#define HTT_PPDU_STATS_USER_RATE_FLAGS_STBC_M BIT(2)
-#define HTT_PPDU_STATS_USER_RATE_FLAGS_HE_RE_M BIT(3)
-#define HTT_PPDU_STATS_USER_RATE_FLAGS_TXBF_M GENMASK(7, 4)
-#define HTT_PPDU_STATS_USER_RATE_FLAGS_BW_M GENMASK(11, 8)
-#define HTT_PPDU_STATS_USER_RATE_FLAGS_NSS_M GENMASK(15, 12)
-#define HTT_PPDU_STATS_USER_RATE_FLAGS_MCS_M GENMASK(19, 16)
-#define HTT_PPDU_STATS_USER_RATE_FLAGS_PREAMBLE_M GENMASK(23, 20)
-#define HTT_PPDU_STATS_USER_RATE_FLAGS_GI_M GENMASK(27, 24)
-#define HTT_PPDU_STATS_USER_RATE_FLAGS_DCM_M BIT(28)
-#define HTT_PPDU_STATS_USER_RATE_FLAGS_LDPC_M BIT(29)
-
-#define HTT_USR_RATE_PPDU_TYPE(_val) \
- le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_INFO1_PPDU_TYPE_M)
-#define HTT_USR_RATE_PREAMBLE(_val) \
- le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_PREAMBLE_M)
-#define HTT_USR_RATE_BW(_val) \
- le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_BW_M)
-#define HTT_USR_RATE_NSS(_val) \
- le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_NSS_M)
-#define HTT_USR_RATE_MCS(_val) \
- le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_MCS_M)
-#define HTT_USR_RATE_GI(_val) \
- le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_GI_M)
-#define HTT_USR_RATE_DCM(_val) \
- le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_DCM_M)
-
-#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_LTF_SIZE_M GENMASK(1, 0)
-#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_STBC_M BIT(2)
-#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_HE_RE_M BIT(3)
-#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_TXBF_M GENMASK(7, 4)
-#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_BW_M GENMASK(11, 8)
-#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_NSS_M GENMASK(15, 12)
-#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_MCS_M GENMASK(19, 16)
-#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_PREAMBLE_M GENMASK(23, 20)
-#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_GI_M GENMASK(27, 24)
-#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_DCM_M BIT(28)
-#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_LDPC_M BIT(29)
-
-struct htt_ppdu_stats_user_rate {
- u8 tid_num;
- u8 reserved0;
- __le16 sw_peer_id;
- __le32 info0; /* %HTT_PPDU_STATS_USER_RATE_INFO0_*/
- __le16 ru_end;
- __le16 ru_start;
- __le16 resp_ru_end;
- __le16 resp_ru_start;
- __le32 info1; /* %HTT_PPDU_STATS_USER_RATE_INFO1_ */
- __le32 rate_flags; /* %HTT_PPDU_STATS_USER_RATE_FLAGS_ */
- /* Note: resp_rate_info is only valid for if resp_type is UL */
- __le32 resp_rate_flags; /* %HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_ */
-} __packed;
+ struct ath12k_dp_arch_ops *ops;
-#define HTT_PPDU_STATS_TX_INFO_FLAGS_RATECODE_M GENMASK(7, 0)
-#define HTT_PPDU_STATS_TX_INFO_FLAGS_IS_AMPDU_M BIT(8)
-#define HTT_PPDU_STATS_TX_INFO_FLAGS_BA_ACK_FAILED_M GENMASK(10, 9)
-#define HTT_PPDU_STATS_TX_INFO_FLAGS_BW_M GENMASK(13, 11)
-#define HTT_PPDU_STATS_TX_INFO_FLAGS_SGI_M BIT(14)
-#define HTT_PPDU_STATS_TX_INFO_FLAGS_PEERID_M GENMASK(31, 16)
-
-#define HTT_TX_INFO_IS_AMSDU(_flags) \
- u32_get_bits(_flags, HTT_PPDU_STATS_TX_INFO_FLAGS_IS_AMPDU_M)
-#define HTT_TX_INFO_BA_ACK_FAILED(_flags) \
- u32_get_bits(_flags, HTT_PPDU_STATS_TX_INFO_FLAGS_BA_ACK_FAILED_M)
-#define HTT_TX_INFO_RATECODE(_flags) \
- u32_get_bits(_flags, HTT_PPDU_STATS_TX_INFO_FLAGS_RATECODE_M)
-#define HTT_TX_INFO_PEERID(_flags) \
- u32_get_bits(_flags, HTT_PPDU_STATS_TX_INFO_FLAGS_PEERID_M)
-
-enum htt_ppdu_stats_usr_compln_status {
- HTT_PPDU_STATS_USER_STATUS_OK,
- HTT_PPDU_STATS_USER_STATUS_FILTERED,
- HTT_PPDU_STATS_USER_STATUS_RESP_TIMEOUT,
- HTT_PPDU_STATS_USER_STATUS_RESP_MISMATCH,
- HTT_PPDU_STATS_USER_STATUS_ABORT,
-};
-
-#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_LONG_RETRY_M GENMASK(3, 0)
-#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_SHORT_RETRY_M GENMASK(7, 4)
-#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_IS_AMPDU_M BIT(8)
-#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_RESP_TYPE_M GENMASK(12, 9)
-
-#define HTT_USR_CMPLTN_IS_AMPDU(_val) \
- le32_get_bits(_val, HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_IS_AMPDU_M)
-#define HTT_USR_CMPLTN_LONG_RETRY(_val) \
- le32_get_bits(_val, HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_LONG_RETRY_M)
-#define HTT_USR_CMPLTN_SHORT_RETRY(_val) \
- le32_get_bits(_val, HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_SHORT_RETRY_M)
-
-struct htt_ppdu_stats_usr_cmpltn_cmn {
- u8 status;
- u8 tid_num;
- __le16 sw_peer_id;
- /* RSSI value of last ack packet (units = dB above noise floor) */
- __le32 ack_rssi;
- __le16 mpdu_tried;
- __le16 mpdu_success;
- __le32 flags; /* %HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_LONG_RETRIES*/
-} __packed;
+ /* Linked list of struct ath12k_dp_link_peer */
+ struct list_head peers;
-#define HTT_PPDU_STATS_ACK_BA_INFO_NUM_MPDU_M GENMASK(8, 0)
-#define HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M GENMASK(24, 9)
-#define HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM GENMASK(31, 25)
+ /* For rhash table init and deinit protection */
+ struct mutex link_peer_rhash_tbl_lock;
-#define HTT_PPDU_STATS_NON_QOS_TID 16
-
-struct htt_ppdu_stats_usr_cmpltn_ack_ba_status {
- __le32 ppdu_id;
- __le16 sw_peer_id;
- __le16 reserved0;
- __le32 info; /* %HTT_PPDU_STATS_USR_CMPLTN_CMN_INFO_ */
- __le16 current_seq;
- __le16 start_seq;
- __le32 success_bytes;
-} __packed;
-
-struct htt_ppdu_user_stats {
- u16 peer_id;
- u16 delay_ba;
- u32 tlv_flags;
- bool is_valid_peer_id;
- struct htt_ppdu_stats_user_rate rate;
- struct htt_ppdu_stats_usr_cmpltn_cmn cmpltn_cmn;
- struct htt_ppdu_stats_usr_cmpltn_ack_ba_status ack_ba;
+ /* The rhashtable containing struct ath12k_link_peer keyed by mac addr */
+ struct rhashtable *rhead_peer_addr;
+ struct rhashtable_params rhash_peer_addr_param;
+ struct ath12k_device_dp_stats device_stats;
};
-#define HTT_PPDU_STATS_MAX_USERS 8
-#define HTT_PPDU_DESC_MAX_DEPTH 16
+static inline u32 ath12k_dp_arch_tx_get_vdev_bank_config(struct ath12k_dp *dp,
+ struct ath12k_link_vif *arvif)
+{
+ return dp->ops->tx_get_vdev_bank_config(dp->ab, arvif);
+}
-struct htt_ppdu_stats {
- struct htt_ppdu_stats_common common;
- struct htt_ppdu_user_stats user_stats[HTT_PPDU_STATS_MAX_USERS];
-};
+static inline int ath12k_dp_arch_reo_cmd_send(struct ath12k_dp *dp,
+ struct ath12k_dp_rx_tid_rxq *rx_tid,
+ enum hal_reo_cmd_type type,
+ struct ath12k_hal_reo_cmd *cmd,
+ void (*cb)(struct ath12k_dp *dp, void *ctx,
+ enum hal_reo_cmd_status status))
+{
+ return dp->ops->reo_cmd_send(dp->ab, rx_tid, type, cmd, cb);
+}
-struct htt_ppdu_stats_info {
- u32 tlv_bitmap;
- u32 ppdu_id;
- u32 frame_type;
- u32 frame_ctrl;
- u32 delay_ba;
- u32 bar_num_users;
- struct htt_ppdu_stats ppdu_stats;
- struct list_head list;
-};
+static inline
+void ath12k_dp_arch_setup_pn_check_reo_cmd(struct ath12k_dp *dp,
+ struct ath12k_hal_reo_cmd *cmd,
+ struct ath12k_dp_rx_tid *rx_tid,
+ u32 cipher,
+ enum set_key_cmd key_cmd)
+{
+ dp->ops->setup_pn_check_reo_cmd(cmd, rx_tid, cipher, key_cmd);
+}
-/* @brief target -> host MLO offset indiciation message
- *
- * @details
- * The following field definitions describe the format of the HTT target
- * to host mlo offset indication message.
- *
- *
- * |31 29|28 |26|25 22|21 16|15 13|12 10 |9 8|7 0|
- * |---------------------------------------------------------------------|
- * | rsvd1 | mac_freq |chip_id |pdev_id|msgtype|
- * |---------------------------------------------------------------------|
- * | sync_timestamp_lo_us |
- * |---------------------------------------------------------------------|
- * | sync_timestamp_hi_us |
- * |---------------------------------------------------------------------|
- * | mlo_offset_lo |
- * |---------------------------------------------------------------------|
- * | mlo_offset_hi |
- * |---------------------------------------------------------------------|
- * | mlo_offset_clcks |
- * |---------------------------------------------------------------------|
- * | rsvd2 | mlo_comp_clks |mlo_comp_us |
- * |---------------------------------------------------------------------|
- * | rsvd3 |mlo_comp_timer |
- * |---------------------------------------------------------------------|
- * Header fields
- * - MSG_TYPE
- * Bits 7:0
- * Purpose: Identifies this is a MLO offset indication msg
- * - PDEV_ID
- * Bits 9:8
- * Purpose: Pdev of this MLO offset
- * - CHIP_ID
- * Bits 12:10
- * Purpose: chip_id of this MLO offset
- * - MAC_FREQ
- * Bits 28:13
- * - SYNC_TIMESTAMP_LO_US
- * Purpose: clock frequency of the mac HW block in MHz
- * Bits: 31:0
- * Purpose: lower 32 bits of the WLAN global time stamp at which
- * last sync interrupt was received
- * - SYNC_TIMESTAMP_HI_US
- * Bits: 31:0
- * Purpose: upper 32 bits of WLAN global time stamp at which
- * last sync interrupt was received
- * - MLO_OFFSET_LO
- * Bits: 31:0
- * Purpose: lower 32 bits of the MLO offset in us
- * - MLO_OFFSET_HI
- * Bits: 31:0
- * Purpose: upper 32 bits of the MLO offset in us
- * - MLO_COMP_US
- * Bits: 15:0
- * Purpose: MLO time stamp compensation applied in us
- * - MLO_COMP_CLCKS
- * Bits: 25:16
- * Purpose: MLO time stamp compensation applied in clock ticks
- * - MLO_COMP_TIMER
- * Bits: 21:0
- * Purpose: Periodic timer at which compensation is applied
- */
+static inline void ath12k_dp_arch_rx_peer_tid_delete(struct ath12k_dp *dp,
+ struct ath12k_dp_link_peer *peer,
+ u8 tid)
+{
+ dp->ops->rx_peer_tid_delete(dp->ab, peer, tid);
+}
-#define HTT_T2H_MLO_OFFSET_INFO_MSG_TYPE GENMASK(7, 0)
-#define HTT_T2H_MLO_OFFSET_INFO_PDEV_ID GENMASK(9, 8)
-
-struct ath12k_htt_mlo_offset_msg {
- __le32 info;
- __le32 sync_timestamp_lo_us;
- __le32 sync_timestamp_hi_us;
- __le32 mlo_offset_hi;
- __le32 mlo_offset_lo;
- __le32 mlo_offset_clks;
- __le32 mlo_comp_clks;
- __le32 mlo_comp_timer;
-} __packed;
+static inline int ath12k_dp_arch_reo_cache_flush(struct ath12k_dp *dp,
+ struct ath12k_dp_rx_tid_rxq *rx_tid)
+{
+ return dp->ops->reo_cache_flush(dp->ab, rx_tid);
+}
-/* @brief host -> target FW extended statistics retrieve
- *
- * @details
- * The following field definitions describe the format of the HTT host
- * to target FW extended stats retrieve message.
- * The message specifies the type of stats the host wants to retrieve.
- *
- * |31 24|23 16|15 8|7 0|
- * |-----------------------------------------------------------|
- * | reserved | stats type | pdev_mask | msg type |
- * |-----------------------------------------------------------|
- * | config param [0] |
- * |-----------------------------------------------------------|
- * | config param [1] |
- * |-----------------------------------------------------------|
- * | config param [2] |
- * |-----------------------------------------------------------|
- * | config param [3] |
- * |-----------------------------------------------------------|
- * | reserved |
- * |-----------------------------------------------------------|
- * | cookie LSBs |
- * |-----------------------------------------------------------|
- * | cookie MSBs |
- * |-----------------------------------------------------------|
- * Header fields:
- * - MSG_TYPE
- * Bits 7:0
- * Purpose: identifies this is a extended stats upload request message
- * Value: 0x10
- * - PDEV_MASK
- * Bits 8:15
- * Purpose: identifies the mask of PDEVs to retrieve stats from
- * Value: This is a overloaded field, refer to usage and interpretation of
- * PDEV in interface document.
- * Bit 8 : Reserved for SOC stats
- * Bit 9 - 15 : Indicates PDEV_MASK in DBDC
- * Indicates MACID_MASK in DBS
- * - STATS_TYPE
- * Bits 23:16
- * Purpose: identifies which FW statistics to upload
- * Value: Defined by htt_dbg_ext_stats_type (see htt_stats.h)
- * - Reserved
- * Bits 31:24
- * - CONFIG_PARAM [0]
- * Bits 31:0
- * Purpose: give an opaque configuration value to the specified stats type
- * Value: stats-type specific configuration value
- * Refer to htt_stats.h for interpretation for each stats sub_type
- * - CONFIG_PARAM [1]
- * Bits 31:0
- * Purpose: give an opaque configuration value to the specified stats type
- * Value: stats-type specific configuration value
- * Refer to htt_stats.h for interpretation for each stats sub_type
- * - CONFIG_PARAM [2]
- * Bits 31:0
- * Purpose: give an opaque configuration value to the specified stats type
- * Value: stats-type specific configuration value
- * Refer to htt_stats.h for interpretation for each stats sub_type
- * - CONFIG_PARAM [3]
- * Bits 31:0
- * Purpose: give an opaque configuration value to the specified stats type
- * Value: stats-type specific configuration value
- * Refer to htt_stats.h for interpretation for each stats sub_type
- * - Reserved [31:0] for future use.
- * - COOKIE_LSBS
- * Bits 31:0
- * Purpose: Provide a mechanism to match a target->host stats confirmation
- * message with its preceding host->target stats request message.
- * Value: LSBs of the opaque cookie specified by the host-side requestor
- * - COOKIE_MSBS
- * Bits 31:0
- * Purpose: Provide a mechanism to match a target->host stats confirmation
- * message with its preceding host->target stats request message.
- * Value: MSBs of the opaque cookie specified by the host-side requestor
- */
+static inline
+int ath12k_dp_arch_rx_link_desc_return(struct ath12k_dp *dp,
+ struct ath12k_buffer_addr *buf_addr_info,
+ enum hal_wbm_rel_bm_act action)
+{
+ return dp->ops->rx_link_desc_return(dp, buf_addr_info, action);
+}
-struct htt_ext_stats_cfg_hdr {
- u8 msg_type;
- u8 pdev_mask;
- u8 stats_type;
- u8 reserved;
-} __packed;
+static inline
+void ath12k_dp_arch_rx_frags_cleanup(struct ath12k_dp *dp,
+ struct ath12k_dp_rx_tid *rx_tid,
+ bool rel_link_desc)
+{
+ dp->ops->rx_frags_cleanup(rx_tid, rel_link_desc);
+}
-struct htt_ext_stats_cfg_cmd {
- struct htt_ext_stats_cfg_hdr hdr;
- __le32 cfg_param0;
- __le32 cfg_param1;
- __le32 cfg_param2;
- __le32 cfg_param3;
- __le32 reserved;
- __le32 cookie_lsb;
- __le32 cookie_msb;
-} __packed;
+static inline int ath12k_dp_arch_peer_rx_tid_reo_update(struct ath12k_dp *dp,
+ struct ath12k_dp_link_peer *peer,
+ struct ath12k_dp_rx_tid *rx_tid,
+ u32 ba_win_sz, u16 ssn,
+ bool update_ssn)
+{
+ return dp->ops->peer_rx_tid_reo_update(dp, peer, rx_tid,
+ ba_win_sz, ssn, update_ssn);
+}
-/* htt stats config default params */
-#define HTT_STAT_DEFAULT_RESET_START_OFFSET 0
-#define HTT_STAT_DEFAULT_CFG0_ALL_HWQS 0xffffffff
-#define HTT_STAT_DEFAULT_CFG0_ALL_TXQS 0xffffffff
-#define HTT_STAT_DEFAULT_CFG0_ALL_CMDQS 0xffff
-#define HTT_STAT_DEFAULT_CFG0_ALL_RINGS 0xffff
-#define HTT_STAT_DEFAULT_CFG0_ACTIVE_PEERS 0xff
-#define HTT_STAT_DEFAULT_CFG0_CCA_CUMULATIVE 0x00
-#define HTT_STAT_DEFAULT_CFG0_ACTIVE_VDEVS 0x00
-
-/* HTT_DBG_EXT_STATS_PEER_INFO
- * PARAMS:
- * @config_param0:
- * [Bit0] - [0] for sw_peer_id, [1] for mac_addr based request
- * [Bit15 : Bit 1] htt_peer_stats_req_mode_t
- * [Bit31 : Bit16] sw_peer_id
- * @config_param1:
- * peer_stats_req_type_mask:32 (enum htt_peer_stats_tlv_enum)
- * 0 bit htt_peer_stats_cmn_tlv
- * 1 bit htt_peer_details_tlv
- * 2 bit htt_tx_peer_rate_stats_tlv
- * 3 bit htt_rx_peer_rate_stats_tlv
- * 4 bit htt_tx_tid_stats_tlv/htt_tx_tid_stats_v1_tlv
- * 5 bit htt_rx_tid_stats_tlv
- * 6 bit htt_msdu_flow_stats_tlv
- * @config_param2: [Bit31 : Bit0] mac_addr31to0
- * @config_param3: [Bit15 : Bit0] mac_addr47to32
- * [Bit31 : Bit16] reserved
- */
-#define HTT_STAT_PEER_INFO_MAC_ADDR BIT(0)
-#define HTT_STAT_DEFAULT_PEER_REQ_TYPE 0x7f
-
-/* Used to set different configs to the specified stats type.*/
-struct htt_ext_stats_cfg_params {
- u32 cfg0;
- u32 cfg1;
- u32 cfg2;
- u32 cfg3;
-};
+static inline int ath12k_dp_arch_rx_assign_reoq(struct ath12k_dp *dp,
+ struct ath12k_dp_peer *dp_peer,
+ struct ath12k_dp_rx_tid *rx_tid,
+ u16 ssn, enum hal_pn_type pn_type)
+{
+ return dp->ops->rx_assign_reoq(dp->ab, dp_peer, rx_tid, ssn, pn_type);
+}
-enum vdev_stats_offload_timer_duration {
- ATH12K_STATS_TIMER_DUR_500MS = 1,
- ATH12K_STATS_TIMER_DUR_1SEC = 2,
- ATH12K_STATS_TIMER_DUR_2SEC = 3,
-};
+static inline void ath12k_dp_arch_peer_rx_tid_qref_setup(struct ath12k_dp *dp,
+ u16 peer_id, u16 tid,
+ dma_addr_t paddr)
+{
+ dp->ops->peer_rx_tid_qref_setup(dp->ab, peer_id, tid, paddr);
+}
-#define ATH12K_HTT_MAC_ADDR_L32_0 GENMASK(7, 0)
-#define ATH12K_HTT_MAC_ADDR_L32_1 GENMASK(15, 8)
-#define ATH12K_HTT_MAC_ADDR_L32_2 GENMASK(23, 16)
-#define ATH12K_HTT_MAC_ADDR_L32_3 GENMASK(31, 24)
-#define ATH12K_HTT_MAC_ADDR_H16_0 GENMASK(7, 0)
-#define ATH12K_HTT_MAC_ADDR_H16_1 GENMASK(15, 8)
+static inline void ath12k_dp_arch_peer_rx_tid_qref_reset(struct ath12k_dp *dp,
+ u16 peer_id, u16 tid)
+{
+ dp->ops->peer_rx_tid_qref_reset(dp->ab, peer_id, tid);
+}
-struct htt_mac_addr {
- __le32 mac_addr_l32;
- __le32 mac_addr_h16;
-} __packed;
+static inline
+int ath12k_dp_arch_rx_tid_delete_handler(struct ath12k_dp *dp,
+ struct ath12k_dp_rx_tid_rxq *rx_tid)
+{
+ return dp->ops->rx_tid_delete_handler(dp->ab, rx_tid);
+}
static inline void ath12k_dp_get_mac_addr(u32 addr_l32, u16 addr_h16, u8 *addr)
{
@@ -1940,20 +651,39 @@ static inline void ath12k_dp_get_mac_addr(u32 addr_l32, u16 addr_h16, u8 *addr)
memcpy(addr + 4, &addr_h16, ETH_ALEN - 4);
}
-int ath12k_dp_service_srng(struct ath12k_base *ab,
- struct ath12k_ext_irq_grp *irq_grp,
- int budget);
-int ath12k_dp_htt_connect(struct ath12k_dp *dp);
+static inline struct ath12k_dp *
+ath12k_dp_hw_grp_to_dp(struct ath12k_dp_hw_group *dp_hw_grp, u8 device_id)
+{
+ return dp_hw_grp->dp[device_id];
+}
+
+static inline int
+ath12k_dp_service_srng(struct ath12k_dp *dp, struct ath12k_ext_irq_grp *irq_grp,
+ int budget)
+{
+ return dp->ops->service_srng(dp, irq_grp, budget);
+}
+
+static inline struct ieee80211_hw *
+ath12k_pdev_dp_to_hw(struct ath12k_pdev_dp *pdev)
+{
+ return pdev->hw;
+}
+
+static inline struct ath12k_pdev_dp *
+ath12k_dp_to_pdev_dp(struct ath12k_dp *dp, u8 pdev_idx)
+{
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
+ "ath12k dp to dp pdev called without rcu lock");
+
+ return rcu_dereference(dp->dp_pdevs[pdev_idx]);
+}
+
void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_link_vif *arvif);
-void ath12k_dp_free(struct ath12k_base *ab);
-int ath12k_dp_alloc(struct ath12k_base *ab);
-void ath12k_dp_cc_config(struct ath12k_base *ab);
void ath12k_dp_partner_cc_init(struct ath12k_base *ab);
int ath12k_dp_pdev_alloc(struct ath12k_base *ab);
void ath12k_dp_pdev_pre_alloc(struct ath12k *ar);
void ath12k_dp_pdev_free(struct ath12k_base *ab);
-int ath12k_dp_tx_htt_srng_setup(struct ath12k_base *ab, u32 ring_id,
- int mac_id, enum hal_ring_type ring_type);
int ath12k_dp_peer_setup(struct ath12k *ar, int vdev_id, const u8 *addr);
void ath12k_dp_peer_cleanup(struct ath12k *ar, int vdev_id, const u8 *addr);
void ath12k_dp_srng_cleanup(struct ath12k_base *ab, struct dp_srng *ring);
@@ -1967,10 +697,8 @@ int ath12k_dp_link_desc_setup(struct ath12k_base *ab,
struct dp_link_desc_bank *link_desc_banks,
u32 ring_type, struct hal_srng *srng,
u32 n_link_desc);
-struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab,
+struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_dp *dp,
u32 cookie);
-struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_base *ab,
+struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_dp *dp,
u32 desc_id);
-bool ath12k_dp_wmask_compaction_rx_tlv_supported(struct ath12k_base *ab);
-void ath12k_dp_hal_rx_desc_init(struct ath12k_base *ab);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/dp_cmn.h b/drivers/net/wireless/ath/ath12k/dp_cmn.h
new file mode 100644
index 000000000000..e17f044ff812
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/dp_cmn.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef ATH12K_DP_CMN_H
+#define ATH12K_DP_CMN_H
+
+#include "cmn_defs.h"
+
+struct ath12k_hw_group;
+
+/*
+ * ML Peer IDs start from 8192, assuming max SLO clients count 1536,
+ * then max peer id shall be 9728, therefore rounding the peer table size
+ * to the nearest next power of 2 i.e 16384.
+ */
+#define MAX_DP_PEER_LIST_SIZE 16384
+
+struct ath12k_dp_hw {
+ struct ath12k_dp_peer __rcu *dp_peers[MAX_DP_PEER_LIST_SIZE];
+
+ /* Lock for protection of dp_peer_list and peers */
+ spinlock_t peer_lock;
+ struct list_head dp_peers_list;
+};
+
+struct ath12k_dp_hw_group {
+ struct ath12k_dp *dp[ATH12K_MAX_DEVICES];
+};
+
+struct ath12k_dp_link_vif {
+ u32 vdev_id;
+ u8 search_type;
+ u8 hal_addr_search_flags;
+ u8 pdev_idx;
+ u8 lmac_id;
+ u16 ast_idx;
+ u16 ast_hash;
+ u16 tcl_metadata;
+ u8 vdev_id_check_en;
+ int bank_id;
+};
+
+struct ath12k_dp_vif {
+ u8 tx_encap_type;
+ u32 key_cipher;
+ atomic_t mcbc_gsn;
+ struct ath12k_dp_link_vif dp_link_vif[ATH12K_NUM_MAX_LINKS];
+};
+
+/* TODO: Move this to a separate dp_stats file */
+struct ath12k_per_peer_tx_stats {
+ u32 succ_bytes;
+ u32 retry_bytes;
+ u32 failed_bytes;
+ u32 duration;
+ u16 succ_pkts;
+ u16 retry_pkts;
+ u16 failed_pkts;
+ u16 ru_start;
+ u16 ru_tones;
+ u8 ba_fails;
+ u8 ppdu_type;
+ u32 mu_grpid;
+ u32 mu_pos;
+ bool is_ampdu;
+};
+
+struct ath12k_dp_peer_create_params {
+ struct ieee80211_sta *sta;
+ bool is_mlo;
+ u16 peer_id;
+ bool ucast_ra_only;
+};
+
+struct ath12k_dp_link_peer_rate_info {
+ struct rate_info txrate;
+ u64 rx_duration;
+ u64 tx_duration;
+ u8 rssi_comb;
+ s8 signal_avg;
+};
+
+static inline struct ath12k_dp_link_vif *
+ath12k_dp_vif_to_dp_link_vif(struct ath12k_dp_vif *dp_vif, u8 link_id)
+{
+ return &dp_vif->dp_link_vif[link_id];
+}
+
+void ath12k_dp_cmn_device_deinit(struct ath12k_dp *dp);
+int ath12k_dp_cmn_device_init(struct ath12k_dp *dp);
+void ath12k_dp_cmn_hw_group_unassign(struct ath12k_dp *dp,
+ struct ath12k_hw_group *ag);
+void ath12k_dp_cmn_hw_group_assign(struct ath12k_dp *dp,
+ struct ath12k_hw_group *ag);
+int ath12k_dp_link_peer_assign(struct ath12k_dp *dp, struct ath12k_dp_hw *dp_hw,
+ u8 vdev_id, struct ieee80211_sta *sta, u8 *addr,
+ u8 link_id, u32 hw_link_id);
+void ath12k_dp_link_peer_unassign(struct ath12k_dp *dp, struct ath12k_dp_hw *dp_hw,
+ u8 vdev_id, u8 *addr, u32 hw_link_id);
+void
+ath12k_dp_link_peer_get_sta_rate_info_stats(struct ath12k_dp *dp, const u8 *addr,
+ struct ath12k_dp_link_peer_rate_info *info);
+void ath12k_dp_link_peer_reset_rx_stats(struct ath12k_dp *dp, const u8 *addr);
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/dp_htt.c b/drivers/net/wireless/ath/ath12k/dp_htt.c
new file mode 100644
index 000000000000..cc71c5c5de5a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/dp_htt.c
@@ -0,0 +1,1353 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include "core.h"
+#include "peer.h"
+#include "htc.h"
+#include "dp_htt.h"
+#include "debugfs_htt_stats.h"
+#include "debugfs.h"
+
+static void ath12k_dp_htt_htc_tx_complete(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ dev_kfree_skb_any(skb);
+}
+
+int ath12k_dp_htt_connect(struct ath12k_dp *dp)
+{
+ struct ath12k_htc_svc_conn_req conn_req = {};
+ struct ath12k_htc_svc_conn_resp conn_resp = {};
+ int status;
+
+ conn_req.ep_ops.ep_tx_complete = ath12k_dp_htt_htc_tx_complete;
+ conn_req.ep_ops.ep_rx_complete = ath12k_dp_htt_htc_t2h_msg_handler;
+
+ /* connect to control service */
+ conn_req.service_id = ATH12K_HTC_SVC_ID_HTT_DATA_MSG;
+
+ status = ath12k_htc_connect_service(&dp->ab->htc, &conn_req,
+ &conn_resp);
+
+ if (status)
+ return status;
+
+ dp->eid = conn_resp.eid;
+
+ return 0;
+}
+
+static int ath12k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
+ u16 peer_id)
+{
+ int i;
+
+ for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {
+ if (ppdu_stats->user_stats[i].is_valid_peer_id) {
+ if (peer_id == ppdu_stats->user_stats[i].peer_id)
+ return i;
+ } else {
+ return i;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int ath12k_htt_tlv_ppdu_stats_parse(struct ath12k_base *ab,
+ u16 tag, u16 len, const void *ptr,
+ void *data)
+{
+ const struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *ba_status;
+ const struct htt_ppdu_stats_usr_cmpltn_cmn *cmplt_cmn;
+ const struct htt_ppdu_stats_user_rate *user_rate;
+ struct htt_ppdu_stats_info *ppdu_info;
+ struct htt_ppdu_user_stats *user_stats;
+ int cur_user;
+ u16 peer_id;
+
+ ppdu_info = data;
+
+ switch (tag) {
+ case HTT_PPDU_STATS_TAG_COMMON:
+ if (len < sizeof(struct htt_ppdu_stats_common)) {
+ ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+ memcpy(&ppdu_info->ppdu_stats.common, ptr,
+ sizeof(struct htt_ppdu_stats_common));
+ break;
+ case HTT_PPDU_STATS_TAG_USR_RATE:
+ if (len < sizeof(struct htt_ppdu_stats_user_rate)) {
+ ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+ user_rate = ptr;
+ peer_id = le16_to_cpu(user_rate->sw_peer_id);
+ cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
+ peer_id);
+ if (cur_user < 0)
+ return -EINVAL;
+ user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
+ user_stats->peer_id = peer_id;
+ user_stats->is_valid_peer_id = true;
+ memcpy(&user_stats->rate, ptr,
+ sizeof(struct htt_ppdu_stats_user_rate));
+ user_stats->tlv_flags |= BIT(tag);
+ break;
+ case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:
+ if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
+ ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+
+ cmplt_cmn = ptr;
+ peer_id = le16_to_cpu(cmplt_cmn->sw_peer_id);
+ cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
+ peer_id);
+ if (cur_user < 0)
+ return -EINVAL;
+ user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
+ user_stats->peer_id = peer_id;
+ user_stats->is_valid_peer_id = true;
+ memcpy(&user_stats->cmpltn_cmn, ptr,
+ sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
+ user_stats->tlv_flags |= BIT(tag);
+ break;
+ case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:
+ if (len <
+ sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {
+ ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+
+ ba_status = ptr;
+ peer_id = le16_to_cpu(ba_status->sw_peer_id);
+ cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
+ peer_id);
+ if (cur_user < 0)
+ return -EINVAL;
+ user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
+ user_stats->peer_id = peer_id;
+ user_stats->is_valid_peer_id = true;
+ memcpy(&user_stats->ack_ba, ptr,
+ sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));
+ user_stats->tlv_flags |= BIT(tag);
+ break;
+ }
+ return 0;
+}
+
+int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
+ int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
+ const void *ptr, void *data),
+ void *data)
+{
+ const struct htt_tlv *tlv;
+ const void *begin = ptr;
+ u16 tlv_tag, tlv_len;
+ int ret = -EINVAL;
+
+ while (len > 0) {
+ if (len < sizeof(*tlv)) {
+ ath12k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
+ ptr - begin, len, sizeof(*tlv));
+ return -EINVAL;
+ }
+ tlv = (struct htt_tlv *)ptr;
+ tlv_tag = le32_get_bits(tlv->header, HTT_TLV_TAG);
+ tlv_len = le32_get_bits(tlv->header, HTT_TLV_LEN);
+ ptr += sizeof(*tlv);
+ len -= sizeof(*tlv);
+
+ if (tlv_len > len) {
+ ath12k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
+ tlv_tag, ptr - begin, len, tlv_len);
+ return -EINVAL;
+ }
+ ret = iter(ab, tlv_tag, tlv_len, ptr, data);
+ if (ret == -ENOMEM)
+ return ret;
+
+ ptr += tlv_len;
+ len -= tlv_len;
+ }
+ return 0;
+}
+
+static void
+ath12k_update_per_peer_tx_stats(struct ath12k_pdev_dp *dp_pdev,
+ struct htt_ppdu_stats *ppdu_stats, u8 user)
+{
+ struct ath12k_dp *dp = dp_pdev->dp;
+ struct ath12k_base *ab = dp->ab;
+ struct ath12k_dp_link_peer *peer;
+ struct htt_ppdu_stats_user_rate *user_rate;
+ struct ath12k_per_peer_tx_stats *peer_stats = &dp_pdev->peer_tx_stats;
+ struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
+ struct htt_ppdu_stats_common *common = &ppdu_stats->common;
+ int ret;
+ u8 flags, mcs, nss, bw, sgi, dcm, ppdu_type, rate_idx = 0;
+ u32 v, succ_bytes = 0;
+ u16 tones, rate = 0, succ_pkts = 0;
+ u32 tx_duration = 0;
+ u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
+ u16 tx_retry_failed = 0, tx_retry_count = 0;
+ bool is_ampdu = false, is_ofdma;
+
+ if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
+ return;
+
+ if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON)) {
+ is_ampdu =
+ HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
+ tx_retry_failed =
+ __le16_to_cpu(usr_stats->cmpltn_cmn.mpdu_tried) -
+ __le16_to_cpu(usr_stats->cmpltn_cmn.mpdu_success);
+ tx_retry_count =
+ HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
+ HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
+ }
+
+ if (usr_stats->tlv_flags &
+ BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
+ succ_bytes = le32_to_cpu(usr_stats->ack_ba.success_bytes);
+ succ_pkts = le32_get_bits(usr_stats->ack_ba.info,
+ HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M);
+ tid = le32_get_bits(usr_stats->ack_ba.info,
+ HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM);
+ }
+
+ if (common->fes_duration_us)
+ tx_duration = le32_to_cpu(common->fes_duration_us);
+
+ user_rate = &usr_stats->rate;
+ flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
+ bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;
+ nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
+ mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
+ sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
+ dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);
+
+ ppdu_type = HTT_USR_RATE_PPDU_TYPE(user_rate->info1);
+ is_ofdma = (ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA) ||
+ (ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA);
+
+ /* Note: If host configured fixed rates and in some other special
+ * cases, the broadcast/management frames are sent in different rates.
+ * Firmware rate's control to be skipped for this?
+ */
+
+ if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH12K_HE_MCS_MAX) {
+ ath12k_warn(ab, "Invalid HE mcs %d peer stats", mcs);
+ return;
+ }
+
+ if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH12K_VHT_MCS_MAX) {
+ ath12k_warn(ab, "Invalid VHT mcs %d peer stats", mcs);
+ return;
+ }
+
+ if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH12K_HT_MCS_MAX || nss < 1)) {
+ ath12k_warn(ab, "Invalid HT mcs %d nss %d peer stats",
+ mcs, nss);
+ return;
+ }
+
+ if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {
+ ret = ath12k_mac_hw_ratecode_to_legacy_rate(mcs,
+ flags,
+ &rate_idx,
+ &rate);
+ if (ret < 0)
+ return;
+ }
+
+ rcu_read_lock();
+ peer = ath12k_dp_link_peer_find_by_peerid(dp_pdev, usr_stats->peer_id);
+
+ if (!peer || !peer->sta) {
+ rcu_read_unlock();
+ return;
+ }
+
+ spin_lock_bh(&dp->dp_lock);
+
+ memset(&peer->txrate, 0, sizeof(peer->txrate));
+
+ peer->txrate.bw = ath12k_mac_bw_to_mac80211_bw(bw);
+
+ switch (flags) {
+ case WMI_RATE_PREAMBLE_OFDM:
+ peer->txrate.legacy = rate;
+ break;
+ case WMI_RATE_PREAMBLE_CCK:
+ peer->txrate.legacy = rate;
+ break;
+ case WMI_RATE_PREAMBLE_HT:
+ peer->txrate.mcs = mcs + 8 * (nss - 1);
+ peer->txrate.flags = RATE_INFO_FLAGS_MCS;
+ if (sgi)
+ peer->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case WMI_RATE_PREAMBLE_VHT:
+ peer->txrate.mcs = mcs;
+ peer->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
+ if (sgi)
+ peer->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case WMI_RATE_PREAMBLE_HE:
+ peer->txrate.mcs = mcs;
+ peer->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
+ peer->txrate.he_dcm = dcm;
+ peer->txrate.he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
+ tones = le16_to_cpu(user_rate->ru_end) -
+ le16_to_cpu(user_rate->ru_start) + 1;
+ v = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(tones);
+ peer->txrate.he_ru_alloc = v;
+ if (is_ofdma)
+ peer->txrate.bw = RATE_INFO_BW_HE_RU;
+ break;
+ case WMI_RATE_PREAMBLE_EHT:
+ peer->txrate.mcs = mcs;
+ peer->txrate.flags = RATE_INFO_FLAGS_EHT_MCS;
+ peer->txrate.he_dcm = dcm;
+ peer->txrate.eht_gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(sgi);
+ tones = le16_to_cpu(user_rate->ru_end) -
+ le16_to_cpu(user_rate->ru_start) + 1;
+ v = ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(tones);
+ peer->txrate.eht_ru_alloc = v;
+ if (is_ofdma)
+ peer->txrate.bw = RATE_INFO_BW_EHT_RU;
+ break;
+ }
+
+ peer->tx_retry_failed += tx_retry_failed;
+ peer->tx_retry_count += tx_retry_count;
+ peer->txrate.nss = nss;
+ peer->tx_duration += tx_duration;
+ memcpy(&peer->last_txrate, &peer->txrate, sizeof(struct rate_info));
+
+ spin_unlock_bh(&dp->dp_lock);
+
+ /* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
+ * So skip peer stats update for mgmt packets.
+ */
+ if (tid < HTT_PPDU_STATS_NON_QOS_TID) {
+ memset(peer_stats, 0, sizeof(*peer_stats));
+ peer_stats->succ_pkts = succ_pkts;
+ peer_stats->succ_bytes = succ_bytes;
+ peer_stats->is_ampdu = is_ampdu;
+ peer_stats->duration = tx_duration;
+ peer_stats->ba_fails =
+ HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
+ HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
+ }
+
+ rcu_read_unlock();
+}
+
+static void ath12k_htt_update_ppdu_stats(struct ath12k_pdev_dp *dp_pdev,
+ struct htt_ppdu_stats *ppdu_stats)
+{
+ u8 user;
+
+ for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
+ ath12k_update_per_peer_tx_stats(dp_pdev, ppdu_stats, user);
+}
+
+static
+struct htt_ppdu_stats_info *ath12k_dp_htt_get_ppdu_desc(struct ath12k_pdev_dp *dp_pdev,
+ u32 ppdu_id)
+{
+ struct htt_ppdu_stats_info *ppdu_info;
+
+ lockdep_assert_held(&dp_pdev->ppdu_list_lock);
+ if (!list_empty(&dp_pdev->ppdu_stats_info)) {
+ list_for_each_entry(ppdu_info, &dp_pdev->ppdu_stats_info, list) {
+ if (ppdu_info->ppdu_id == ppdu_id)
+ return ppdu_info;
+ }
+
+ if (dp_pdev->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
+ ppdu_info = list_first_entry(&dp_pdev->ppdu_stats_info,
+ typeof(*ppdu_info), list);
+ list_del(&ppdu_info->list);
+ dp_pdev->ppdu_stat_list_depth--;
+ ath12k_htt_update_ppdu_stats(dp_pdev, &ppdu_info->ppdu_stats);
+ kfree(ppdu_info);
+ }
+ }
+
+ ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC);
+ if (!ppdu_info)
+ return NULL;
+
+ list_add_tail(&ppdu_info->list, &dp_pdev->ppdu_stats_info);
+ dp_pdev->ppdu_stat_list_depth++;
+
+ return ppdu_info;
+}
+
+static void ath12k_copy_to_delay_stats(struct ath12k_dp_link_peer *peer,
+ struct htt_ppdu_user_stats *usr_stats)
+{
+ peer->ppdu_stats_delayba.sw_peer_id = le16_to_cpu(usr_stats->rate.sw_peer_id);
+ peer->ppdu_stats_delayba.info0 = le32_to_cpu(usr_stats->rate.info0);
+ peer->ppdu_stats_delayba.ru_end = le16_to_cpu(usr_stats->rate.ru_end);
+ peer->ppdu_stats_delayba.ru_start = le16_to_cpu(usr_stats->rate.ru_start);
+ peer->ppdu_stats_delayba.info1 = le32_to_cpu(usr_stats->rate.info1);
+ peer->ppdu_stats_delayba.rate_flags = le32_to_cpu(usr_stats->rate.rate_flags);
+ peer->ppdu_stats_delayba.resp_rate_flags =
+ le32_to_cpu(usr_stats->rate.resp_rate_flags);
+
+ peer->delayba_flag = true;
+}
+
+static void ath12k_copy_to_bar(struct ath12k_dp_link_peer *peer,
+ struct htt_ppdu_user_stats *usr_stats)
+{
+ usr_stats->rate.sw_peer_id = cpu_to_le16(peer->ppdu_stats_delayba.sw_peer_id);
+ usr_stats->rate.info0 = cpu_to_le32(peer->ppdu_stats_delayba.info0);
+ usr_stats->rate.ru_end = cpu_to_le16(peer->ppdu_stats_delayba.ru_end);
+ usr_stats->rate.ru_start = cpu_to_le16(peer->ppdu_stats_delayba.ru_start);
+ usr_stats->rate.info1 = cpu_to_le32(peer->ppdu_stats_delayba.info1);
+ usr_stats->rate.rate_flags = cpu_to_le32(peer->ppdu_stats_delayba.rate_flags);
+ usr_stats->rate.resp_rate_flags =
+ cpu_to_le32(peer->ppdu_stats_delayba.resp_rate_flags);
+
+ peer->delayba_flag = false;
+}
+
+static int ath12k_htt_pull_ppdu_stats(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
+ struct ath12k_htt_ppdu_stats_msg *msg;
+ struct htt_ppdu_stats_info *ppdu_info;
+ struct ath12k_dp_link_peer *peer = NULL;
+ struct htt_ppdu_user_stats *usr_stats = NULL;
+ u32 peer_id = 0;
+ struct ath12k_pdev_dp *dp_pdev;
+ int ret, i;
+ u8 pdev_id, pdev_idx;
+ u32 ppdu_id, len;
+
+ msg = (struct ath12k_htt_ppdu_stats_msg *)skb->data;
+ len = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE);
+ if (len > (skb->len - struct_size(msg, data, 0))) {
+ ath12k_warn(ab,
+ "HTT PPDU STATS event has unexpected payload size %u, should be smaller than %u\n",
+ len, skb->len);
+ return -EINVAL;
+ }
+
+ pdev_id = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PDEV_ID);
+ ppdu_id = le32_to_cpu(msg->ppdu_id);
+
+ pdev_idx = DP_HW2SW_MACID(pdev_id);
+ if (pdev_idx >= MAX_RADIOS) {
+ ath12k_warn(ab, "HTT PPDU STATS invalid pdev id %u", pdev_id);
+ return -EINVAL;
+ }
+
+ rcu_read_lock();
+
+ dp_pdev = ath12k_dp_to_pdev_dp(dp, pdev_idx);
+ if (!dp_pdev) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ spin_lock_bh(&dp_pdev->ppdu_list_lock);
+ ppdu_info = ath12k_dp_htt_get_ppdu_desc(dp_pdev, ppdu_id);
+ if (!ppdu_info) {
+ spin_unlock_bh(&dp_pdev->ppdu_list_lock);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ppdu_info->ppdu_id = ppdu_id;
+ ret = ath12k_dp_htt_tlv_iter(ab, msg->data, len,
+ ath12k_htt_tlv_ppdu_stats_parse,
+ (void *)ppdu_info);
+ if (ret) {
+ spin_unlock_bh(&dp_pdev->ppdu_list_lock);
+ ath12k_warn(ab, "Failed to parse tlv %d\n", ret);
+ goto exit;
+ }
+
+ if (ppdu_info->ppdu_stats.common.num_users >= HTT_PPDU_STATS_MAX_USERS) {
+ spin_unlock_bh(&dp_pdev->ppdu_list_lock);
+ ath12k_warn(ab,
+ "HTT PPDU STATS event has unexpected num_users %u, should be smaller than %u\n",
+ ppdu_info->ppdu_stats.common.num_users,
+ HTT_PPDU_STATS_MAX_USERS);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /* back up data rate tlv for all peers */
+ if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_DATA &&
+ (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON)) &&
+ ppdu_info->delay_ba) {
+ for (i = 0; i < ppdu_info->ppdu_stats.common.num_users; i++) {
+ peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id;
+ peer = ath12k_dp_link_peer_find_by_peerid(dp_pdev, peer_id);
+ if (!peer)
+ continue;
+
+ usr_stats = &ppdu_info->ppdu_stats.user_stats[i];
+ if (usr_stats->delay_ba)
+ ath12k_copy_to_delay_stats(peer, usr_stats);
+ }
+ }
+
+ /* restore all peers' data rate tlv to mu-bar tlv */
+ if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_BAR &&
+ (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON))) {
+ for (i = 0; i < ppdu_info->bar_num_users; i++) {
+ peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id;
+ peer = ath12k_dp_link_peer_find_by_peerid(dp_pdev, peer_id);
+ if (!peer)
+ continue;
+
+ usr_stats = &ppdu_info->ppdu_stats.user_stats[i];
+ if (peer->delayba_flag)
+ ath12k_copy_to_bar(peer, usr_stats);
+ }
+ }
+
+ spin_unlock_bh(&dp_pdev->ppdu_list_lock);
+
+exit:
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath12k_htt_mlo_offset_msg *msg;
+ struct ath12k_pdev *pdev;
+ struct ath12k *ar;
+ u8 pdev_id;
+
+ msg = (struct ath12k_htt_mlo_offset_msg *)skb->data;
+ pdev_id = u32_get_bits(__le32_to_cpu(msg->info),
+ HTT_T2H_MLO_OFFSET_INFO_PDEV_ID);
+
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
+ if (!ar) {
+ /* It is possible that the ar is not yet active (started).
+ * The above function will only look for the active pdev
+ * and hence %NULL return is possible. Just silently
+ * discard this message
+ */
+ goto exit;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ pdev = ar->pdev;
+
+ pdev->timestamp.info = __le32_to_cpu(msg->info);
+ pdev->timestamp.sync_timestamp_lo_us = __le32_to_cpu(msg->sync_timestamp_lo_us);
+ pdev->timestamp.sync_timestamp_hi_us = __le32_to_cpu(msg->sync_timestamp_hi_us);
+ pdev->timestamp.mlo_offset_lo = __le32_to_cpu(msg->mlo_offset_lo);
+ pdev->timestamp.mlo_offset_hi = __le32_to_cpu(msg->mlo_offset_hi);
+ pdev->timestamp.mlo_offset_clks = __le32_to_cpu(msg->mlo_offset_clks);
+ pdev->timestamp.mlo_comp_clks = __le32_to_cpu(msg->mlo_comp_clks);
+ pdev->timestamp.mlo_comp_timer = __le32_to_cpu(msg->mlo_comp_timer);
+
+ spin_unlock_bh(&ar->data_lock);
+exit:
+ rcu_read_unlock();
+}
+
+void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
+ struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;
+ enum htt_t2h_msg_type type;
+ u16 peer_id;
+ u8 vdev_id;
+ u8 mac_addr[ETH_ALEN];
+ u16 peer_mac_h16;
+ u16 ast_hash = 0;
+ u16 hw_peer_id;
+
+ type = le32_get_bits(resp->version_msg.version, HTT_T2H_MSG_TYPE);
+
+ ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
+
+ switch (type) {
+ case HTT_T2H_MSG_TYPE_VERSION_CONF:
+ dp->htt_tgt_ver_major = le32_get_bits(resp->version_msg.version,
+ HTT_T2H_VERSION_CONF_MAJOR);
+ dp->htt_tgt_ver_minor = le32_get_bits(resp->version_msg.version,
+ HTT_T2H_VERSION_CONF_MINOR);
+ complete(&dp->htt_tgt_version_received);
+ break;
+ /* TODO: remove unused peer map versions after testing */
+ case HTT_T2H_MSG_TYPE_PEER_MAP:
+ vdev_id = le32_get_bits(resp->peer_map_ev.info,
+ HTT_T2H_PEER_MAP_INFO_VDEV_ID);
+ peer_id = le32_get_bits(resp->peer_map_ev.info,
+ HTT_T2H_PEER_MAP_INFO_PEER_ID);
+ peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
+ HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
+ ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
+ peer_mac_h16, mac_addr);
+ ath12k_dp_link_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0);
+ break;
+ case HTT_T2H_MSG_TYPE_PEER_MAP2:
+ vdev_id = le32_get_bits(resp->peer_map_ev.info,
+ HTT_T2H_PEER_MAP_INFO_VDEV_ID);
+ peer_id = le32_get_bits(resp->peer_map_ev.info,
+ HTT_T2H_PEER_MAP_INFO_PEER_ID);
+ peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
+ HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
+ ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
+ peer_mac_h16, mac_addr);
+ ast_hash = le32_get_bits(resp->peer_map_ev.info2,
+ HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL);
+ hw_peer_id = le32_get_bits(resp->peer_map_ev.info1,
+ HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID);
+ ath12k_dp_link_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
+ hw_peer_id);
+ break;
+ case HTT_T2H_MSG_TYPE_PEER_MAP3:
+ vdev_id = le32_get_bits(resp->peer_map_ev.info,
+ HTT_T2H_PEER_MAP_INFO_VDEV_ID);
+ peer_id = le32_get_bits(resp->peer_map_ev.info,
+ HTT_T2H_PEER_MAP_INFO_PEER_ID);
+ peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
+ HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
+ ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
+ peer_mac_h16, mac_addr);
+ ast_hash = le32_get_bits(resp->peer_map_ev.info2,
+ HTT_T2H_PEER_MAP3_INFO2_AST_HASH_VAL);
+ hw_peer_id = le32_get_bits(resp->peer_map_ev.info2,
+ HTT_T2H_PEER_MAP3_INFO2_HW_PEER_ID);
+ ath12k_dp_link_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
+ hw_peer_id);
+ break;
+ case HTT_T2H_MSG_TYPE_PEER_UNMAP:
+ case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
+ peer_id = le32_get_bits(resp->peer_unmap_ev.info,
+ HTT_T2H_PEER_UNMAP_INFO_PEER_ID);
+ ath12k_dp_link_peer_unmap_event(ab, peer_id);
+ break;
+ case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
+ ath12k_htt_pull_ppdu_stats(ab, skb);
+ break;
+ case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
+ ath12k_debugfs_htt_ext_stats_handler(ab, skb);
+ break;
+ case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND:
+ ath12k_htt_mlo_offset_event_handler(ab, skb);
+ break;
+ default:
+ ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt event %d not handled\n",
+ type);
+ break;
+ }
+
+ dev_kfree_skb_any(skb);
+}
+EXPORT_SYMBOL(ath12k_dp_htt_htc_t2h_msg_handler);
+
+static int
+ath12k_dp_tx_get_ring_id_type(struct ath12k_base *ab,
+ int mac_id, u32 ring_id,
+ enum hal_ring_type ring_type,
+ enum htt_srng_ring_type *htt_ring_type,
+ enum htt_srng_ring_id *htt_ring_id)
+{
+ int ret = 0;
+
+ switch (ring_type) {
+ case HAL_RXDMA_BUF:
+ /* for some targets, host fills rx buffer to fw and fw fills to
+ * rxbuf ring for each rxdma
+ */
+ if (!ab->hw_params->rx_mac_buf_ring) {
+ if (!(ring_id == HAL_SRNG_SW2RXDMA_BUF0 ||
+ ring_id == HAL_SRNG_SW2RXDMA_BUF1)) {
+ ret = -EINVAL;
+ }
+ *htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ } else {
+ if (ring_id == HAL_SRNG_SW2RXDMA_BUF0) {
+ *htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
+ *htt_ring_type = HTT_SW_TO_SW_RING;
+ } else {
+ *htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ }
+ }
+ break;
+ case HAL_RXDMA_DST:
+ *htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
+ *htt_ring_type = HTT_HW_TO_SW_RING;
+ break;
+ case HAL_RXDMA_MONITOR_BUF:
+ *htt_ring_id = HTT_RX_MON_HOST2MON_BUF_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ break;
+ case HAL_RXDMA_MONITOR_STATUS:
+ *htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ break;
+ case HAL_RXDMA_MONITOR_DST:
+ *htt_ring_id = HTT_RX_MON_MON2HOST_DEST_RING;
+ *htt_ring_type = HTT_HW_TO_SW_RING;
+ break;
+ case HAL_RXDMA_MONITOR_DESC:
+ *htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ break;
+ default:
+ ath12k_warn(ab, "Unsupported ring type in DP :%d\n", ring_type);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+int ath12k_dp_tx_htt_srng_setup(struct ath12k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type)
+{
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
+ struct htt_srng_setup_cmd *cmd;
+ struct hal_srng *srng = &ab->hal.srng_list[ring_id];
+ struct hal_srng_params params;
+ struct sk_buff *skb;
+ u32 ring_entry_sz;
+ int len = sizeof(*cmd);
+ dma_addr_t hp_addr, tp_addr;
+ enum htt_srng_ring_type htt_ring_type;
+ enum htt_srng_ring_id htt_ring_id;
+ int ret;
+
+ skb = ath12k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ memset(&params, 0, sizeof(params));
+ ath12k_hal_srng_get_params(ab, srng, &params);
+
+ hp_addr = ath12k_hal_srng_get_hp_addr(ab, srng);
+ tp_addr = ath12k_hal_srng_get_tp_addr(ab, srng);
+
+ ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
+ ring_type, &htt_ring_type,
+ &htt_ring_id);
+ if (ret)
+ goto err_free;
+
+ skb_put(skb, len);
+ cmd = (struct htt_srng_setup_cmd *)skb->data;
+ cmd->info0 = le32_encode_bits(HTT_H2T_MSG_TYPE_SRING_SETUP,
+ HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE);
+ if (htt_ring_type == HTT_SW_TO_HW_RING ||
+ htt_ring_type == HTT_HW_TO_SW_RING)
+ cmd->info0 |= le32_encode_bits(DP_SW2HW_MACID(mac_id),
+ HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID);
+ else
+ cmd->info0 |= le32_encode_bits(mac_id,
+ HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID);
+ cmd->info0 |= le32_encode_bits(htt_ring_type,
+ HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE);
+ cmd->info0 |= le32_encode_bits(htt_ring_id,
+ HTT_SRNG_SETUP_CMD_INFO0_RING_ID);
+
+ cmd->ring_base_addr_lo = cpu_to_le32(params.ring_base_paddr &
+ HAL_ADDR_LSB_REG_MASK);
+
+ cmd->ring_base_addr_hi = cpu_to_le32((u64)params.ring_base_paddr >>
+ HAL_ADDR_MSB_REG_SHIFT);
+
+ ret = ath12k_hal_srng_get_entrysize(ab, ring_type);
+ if (ret < 0)
+ goto err_free;
+
+ ring_entry_sz = ret;
+
+ ring_entry_sz >>= 2;
+ cmd->info1 = le32_encode_bits(ring_entry_sz,
+ HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE);
+ cmd->info1 |= le32_encode_bits(params.num_entries * ring_entry_sz,
+ HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE);
+ cmd->info1 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP),
+ HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP);
+ cmd->info1 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
+ HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP);
+ cmd->info1 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_RING_PTR_SWAP),
+ HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP);
+ if (htt_ring_type == HTT_SW_TO_HW_RING)
+ cmd->info1 |= cpu_to_le32(HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS);
+
+ cmd->ring_head_off32_remote_addr_lo = cpu_to_le32(lower_32_bits(hp_addr));
+ cmd->ring_head_off32_remote_addr_hi = cpu_to_le32(upper_32_bits(hp_addr));
+
+ cmd->ring_tail_off32_remote_addr_lo = cpu_to_le32(lower_32_bits(tp_addr));
+ cmd->ring_tail_off32_remote_addr_hi = cpu_to_le32(upper_32_bits(tp_addr));
+
+ cmd->ring_msi_addr_lo = cpu_to_le32(lower_32_bits(params.msi_addr));
+ cmd->ring_msi_addr_hi = cpu_to_le32(upper_32_bits(params.msi_addr));
+ cmd->msi_data = cpu_to_le32(params.msi_data);
+
+ cmd->intr_info =
+ le32_encode_bits(params.intr_batch_cntr_thres_entries * ring_entry_sz,
+ HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH);
+ cmd->intr_info |=
+ le32_encode_bits(params.intr_timer_thres_us >> 3,
+ HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH);
+
+ cmd->info2 = 0;
+ if (params.flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
+ cmd->info2 = le32_encode_bits(params.low_threshold,
+ HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH);
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_HAL,
+ "%s msi_addr_lo:0x%x, msi_addr_hi:0x%x, msi_data:0x%x\n",
+ __func__, cmd->ring_msi_addr_lo, cmd->ring_msi_addr_hi,
+ cmd->msi_data);
+
+ ath12k_dbg(ab, ATH12K_DBG_HAL,
+ "ring_id:%d, ring_type:%d, intr_info:0x%x, flags:0x%x\n",
+ ring_id, ring_type, cmd->intr_info, cmd->info2);
+
+ ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
+ struct sk_buff *skb;
+ struct htt_ver_req_cmd *cmd;
+ int len = sizeof(*cmd);
+ u32 metadata_version;
+ int ret;
+
+ init_completion(&dp->htt_tgt_version_received);
+
+ skb = ath12k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_ver_req_cmd *)skb->data;
+ cmd->ver_reg_info = le32_encode_bits(HTT_H2T_MSG_TYPE_VERSION_REQ,
+ HTT_OPTION_TAG);
+ metadata_version = ath12k_ftm_mode ? HTT_OPTION_TCL_METADATA_VER_V1 :
+ HTT_OPTION_TCL_METADATA_VER_V2;
+
+ cmd->tcl_metadata_version = le32_encode_bits(HTT_TAG_TCL_METADATA_VERSION,
+ HTT_OPTION_TAG) |
+ le32_encode_bits(HTT_TCL_METADATA_VER_SZ,
+ HTT_OPTION_LEN) |
+ le32_encode_bits(metadata_version,
+ HTT_OPTION_VALUE);
+
+ ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&dp->htt_tgt_version_received,
+ HTT_TARGET_VERSION_TIMEOUT_HZ);
+ if (ret == 0) {
+ ath12k_warn(ab, "htt target version request timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) {
+ ath12k_err(ab, "unsupported htt major version %d supported version is %d\n",
+ dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k *ar, u32 mask)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
+ struct sk_buff *skb;
+ struct htt_ppdu_stats_cfg_cmd *cmd;
+ int len = sizeof(*cmd);
+ u8 pdev_mask;
+ int ret;
+ int i;
+
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
+ skb = ath12k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_ppdu_stats_cfg_cmd *)skb->data;
+ cmd->msg = le32_encode_bits(HTT_H2T_MSG_TYPE_PPDU_STATS_CFG,
+ HTT_PPDU_STATS_CFG_MSG_TYPE);
+
+ pdev_mask = 1 << (i + ar->pdev_idx);
+ cmd->msg |= le32_encode_bits(pdev_mask, HTT_PPDU_STATS_CFG_PDEV_ID);
+ cmd->msg |= le32_encode_bits(mask, HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK);
+
+ ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int ath12k_dp_tx_htt_rx_filter_setup(struct ath12k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type,
+ int rx_buf_size,
+ struct htt_rx_ring_tlv_filter *tlv_filter)
+{
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
+ struct htt_rx_ring_selection_cfg_cmd *cmd;
+ struct hal_srng *srng = &ab->hal.srng_list[ring_id];
+ struct hal_srng_params params;
+ struct sk_buff *skb;
+ int len = sizeof(*cmd);
+ enum htt_srng_ring_type htt_ring_type;
+ enum htt_srng_ring_id htt_ring_id;
+ int ret;
+
+ skb = ath12k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ memset(&params, 0, sizeof(params));
+ ath12k_hal_srng_get_params(ab, srng, &params);
+
+ ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
+ ring_type, &htt_ring_type,
+ &htt_ring_id);
+ if (ret)
+ goto err_free;
+
+ skb_put(skb, len);
+ cmd = (struct htt_rx_ring_selection_cfg_cmd *)skb->data;
+ cmd->info0 = le32_encode_bits(HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE);
+ if (htt_ring_type == HTT_SW_TO_HW_RING ||
+ htt_ring_type == HTT_HW_TO_SW_RING)
+ cmd->info0 |=
+ le32_encode_bits(DP_SW2HW_MACID(mac_id),
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
+ else
+ cmd->info0 |=
+ le32_encode_bits(mac_id,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
+ cmd->info0 |= le32_encode_bits(htt_ring_id,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID);
+ cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP),
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS);
+ cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS);
+ cmd->info0 |= le32_encode_bits(tlv_filter->offset_valid,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO0_OFFSET_VALID);
+ cmd->info0 |=
+ le32_encode_bits(tlv_filter->drop_threshold_valid,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO0_DROP_THRES_VAL);
+ cmd->info0 |= le32_encode_bits(!tlv_filter->rxmon_disable,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO0_EN_RXMON);
+
+ cmd->info1 = le32_encode_bits(rx_buf_size,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE);
+ cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_mgmt,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT);
+ cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_ctrl,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL);
+ cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_data,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA);
+ cmd->pkt_type_en_flags0 = cpu_to_le32(tlv_filter->pkt_filter_flags0);
+ cmd->pkt_type_en_flags1 = cpu_to_le32(tlv_filter->pkt_filter_flags1);
+ cmd->pkt_type_en_flags2 = cpu_to_le32(tlv_filter->pkt_filter_flags2);
+ cmd->pkt_type_en_flags3 = cpu_to_le32(tlv_filter->pkt_filter_flags3);
+ cmd->rx_filter_tlv = cpu_to_le32(tlv_filter->rx_filter);
+
+ cmd->info2 = le32_encode_bits(tlv_filter->rx_drop_threshold,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO2_DROP_THRESHOLD);
+ cmd->info2 |=
+ le32_encode_bits(tlv_filter->enable_log_mgmt_type,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_MGMT_TYPE);
+ cmd->info2 |=
+ le32_encode_bits(tlv_filter->enable_log_ctrl_type,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_CTRL_TYPE);
+ cmd->info2 |=
+ le32_encode_bits(tlv_filter->enable_log_data_type,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_DATA_TYPE);
+
+ cmd->info3 =
+ le32_encode_bits(tlv_filter->enable_rx_tlv_offset,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO3_EN_TLV_PKT_OFFSET);
+ cmd->info3 |=
+ le32_encode_bits(tlv_filter->rx_tlv_offset,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO3_PKT_TLV_OFFSET);
+
+ if (tlv_filter->offset_valid) {
+ cmd->rx_packet_offset =
+ le32_encode_bits(tlv_filter->rx_packet_offset,
+ HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET);
+
+ cmd->rx_packet_offset |=
+ le32_encode_bits(tlv_filter->rx_header_offset,
+ HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET);
+
+ cmd->rx_mpdu_offset =
+ le32_encode_bits(tlv_filter->rx_mpdu_end_offset,
+ HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET);
+
+ cmd->rx_mpdu_offset |=
+ le32_encode_bits(tlv_filter->rx_mpdu_start_offset,
+ HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET);
+
+ cmd->rx_msdu_offset =
+ le32_encode_bits(tlv_filter->rx_msdu_end_offset,
+ HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET);
+
+ cmd->rx_msdu_offset |=
+ le32_encode_bits(tlv_filter->rx_msdu_start_offset,
+ HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET);
+
+ cmd->rx_attn_offset =
+ le32_encode_bits(tlv_filter->rx_attn_offset,
+ HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET);
+ }
+
+ if (tlv_filter->rx_mpdu_start_wmask > 0 &&
+ tlv_filter->rx_msdu_end_wmask > 0) {
+ cmd->info2 |=
+ le32_encode_bits(true,
+ HTT_RX_RING_SELECTION_CFG_WORD_MASK_COMPACT_SET);
+ cmd->rx_mpdu_start_end_mask =
+ le32_encode_bits(tlv_filter->rx_mpdu_start_wmask,
+ HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_MASK);
+ /* mpdu_end is not used for any hardwares so far
+ * please assign it in future if any chip is
+ * using through hal ops
+ */
+ cmd->rx_mpdu_start_end_mask |=
+ le32_encode_bits(tlv_filter->rx_mpdu_end_wmask,
+ HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_MASK);
+ cmd->rx_msdu_end_word_mask =
+ le32_encode_bits(tlv_filter->rx_msdu_end_wmask,
+ HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_MASK);
+ }
+
+ ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(ath12k_dp_tx_htt_rx_filter_setup);
+
+int
+ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k *ar, u8 type,
+ struct htt_ext_stats_cfg_params *cfg_params,
+ u64 cookie)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
+ struct sk_buff *skb;
+ struct htt_ext_stats_cfg_cmd *cmd;
+ int len = sizeof(*cmd);
+ int ret;
+ u32 pdev_id;
+
+ skb = ath12k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+
+ cmd = (struct htt_ext_stats_cfg_cmd *)skb->data;
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG;
+
+ pdev_id = ath12k_mac_get_target_pdev_id(ar);
+ cmd->hdr.pdev_mask = 1 << pdev_id;
+
+ cmd->hdr.stats_type = type;
+ cmd->cfg_param0 = cpu_to_le32(cfg_params->cfg0);
+ cmd->cfg_param1 = cpu_to_le32(cfg_params->cfg1);
+ cmd->cfg_param2 = cpu_to_le32(cfg_params->cfg2);
+ cmd->cfg_param3 = cpu_to_le32(cfg_params->cfg3);
+ cmd->cookie_lsb = cpu_to_le32(lower_32_bits(cookie));
+ cmd->cookie_msb = cpu_to_le32(upper_32_bits(cookie));
+
+ ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
+ if (ret) {
+ ath12k_warn(ab, "failed to send htt type stats request: %d",
+ ret);
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset)
+{
+ struct ath12k_base *ab = ar->ab;
+ int ret;
+
+ ret = ath12k_dp_tx_htt_rx_monitor_mode_ring_config(ar, reset);
+ if (ret) {
+ ath12k_err(ab, "failed to setup rx monitor filter %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
+ struct htt_rx_ring_tlv_filter tlv_filter = {};
+ int ret, ring_id, i;
+
+ tlv_filter.offset_valid = false;
+
+ if (!reset) {
+ tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_DEST_RING;
+
+ tlv_filter.drop_threshold_valid = true;
+ tlv_filter.rx_drop_threshold = HTT_RX_RING_TLV_DROP_THRESHOLD_VALUE;
+
+ tlv_filter.enable_log_mgmt_type = true;
+ tlv_filter.enable_log_ctrl_type = true;
+ tlv_filter.enable_log_data_type = true;
+
+ tlv_filter.conf_len_ctrl = HTT_RX_RING_DEFAULT_DMA_LENGTH;
+ tlv_filter.conf_len_mgmt = HTT_RX_RING_DEFAULT_DMA_LENGTH;
+ tlv_filter.conf_len_data = HTT_RX_RING_DEFAULT_DMA_LENGTH;
+
+ tlv_filter.enable_rx_tlv_offset = true;
+ tlv_filter.rx_tlv_offset = HTT_RX_RING_PKT_TLV_OFFSET;
+
+ tlv_filter.pkt_filter_flags0 =
+ HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 |
+ HTT_RX_MON_MO_MGMT_FILTER_FLAGS0;
+ tlv_filter.pkt_filter_flags1 =
+ HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 |
+ HTT_RX_MON_MO_MGMT_FILTER_FLAGS1;
+ tlv_filter.pkt_filter_flags2 =
+ HTT_RX_MON_FP_CTRL_FILTER_FLASG2 |
+ HTT_RX_MON_MO_CTRL_FILTER_FLASG2;
+ tlv_filter.pkt_filter_flags3 =
+ HTT_RX_MON_FP_CTRL_FILTER_FLASG3 |
+ HTT_RX_MON_MO_CTRL_FILTER_FLASG3 |
+ HTT_RX_MON_FP_DATA_FILTER_FLASG3 |
+ HTT_RX_MON_MO_DATA_FILTER_FLASG3;
+ } else {
+ tlv_filter = ath12k_mac_mon_status_filter_default;
+
+ if (ath12k_debugfs_is_extd_rx_stats_enabled(ar))
+ tlv_filter.rx_filter = ath12k_debugfs_rx_filter(ar);
+ }
+
+ if (ab->hw_params->rxdma1_enable) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
+ ring_id = ar->dp.rxdma_mon_dst_ring[i].ring_id;
+ ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
+ ar->dp.mac_id + i,
+ HAL_RXDMA_MONITOR_DST,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+ if (ret) {
+ ath12k_err(ab,
+ "failed to setup filter for monitor buf %d\n",
+ ret);
+ return ret;
+ }
+ }
+ return 0;
+ }
+
+ if (!reset) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
+ ring_id = dp->rx_mac_buf_ring[i].ring_id;
+ ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
+ i,
+ HAL_RXDMA_BUF,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+ if (ret) {
+ ath12k_err(ab,
+ "failed to setup filter for mon rx buf %d\n",
+ ret);
+ return ret;
+ }
+ }
+ }
+
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
+ ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
+ if (!reset) {
+ tlv_filter.rx_filter =
+ HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING;
+ }
+
+ ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id,
+ i,
+ HAL_RXDMA_MONITOR_STATUS,
+ RX_MON_STATUS_BUF_SIZE,
+ &tlv_filter);
+ if (ret) {
+ ath12k_err(ab,
+ "failed to setup filter for mon status buf %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int ath12k_dp_tx_htt_tx_filter_setup(struct ath12k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type,
+ int tx_buf_size,
+ struct htt_tx_ring_tlv_filter *htt_tlv_filter)
+{
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
+ struct htt_tx_ring_selection_cfg_cmd *cmd;
+ struct hal_srng *srng = &ab->hal.srng_list[ring_id];
+ struct hal_srng_params params;
+ struct sk_buff *skb;
+ int len = sizeof(*cmd);
+ enum htt_srng_ring_type htt_ring_type;
+ enum htt_srng_ring_id htt_ring_id;
+ int ret;
+
+ skb = ath12k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ memset(&params, 0, sizeof(params));
+ ath12k_hal_srng_get_params(ab, srng, &params);
+
+ ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
+ ring_type, &htt_ring_type,
+ &htt_ring_id);
+
+ if (ret)
+ goto err_free;
+
+ skb_put(skb, len);
+ cmd = (struct htt_tx_ring_selection_cfg_cmd *)skb->data;
+ cmd->info0 = le32_encode_bits(HTT_H2T_MSG_TYPE_TX_MONITOR_CFG,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE);
+ if (htt_ring_type == HTT_SW_TO_HW_RING ||
+ htt_ring_type == HTT_HW_TO_SW_RING)
+ cmd->info0 |=
+ le32_encode_bits(DP_SW2HW_MACID(mac_id),
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
+ else
+ cmd->info0 |=
+ le32_encode_bits(mac_id,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
+ cmd->info0 |= le32_encode_bits(htt_ring_id,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO0_RING_ID);
+ cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP),
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO0_SS);
+ cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PS);
+
+ cmd->info1 |=
+ le32_encode_bits(tx_buf_size,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO1_RING_BUFF_SIZE);
+
+ if (htt_tlv_filter->tx_mon_mgmt_filter) {
+ cmd->info1 |=
+ le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_MGMT,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE);
+ cmd->info1 |=
+ le32_encode_bits(htt_tlv_filter->tx_mon_pkt_dma_len,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT);
+ cmd->info2 |=
+ le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_MGMT,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG);
+ }
+
+ if (htt_tlv_filter->tx_mon_data_filter) {
+ cmd->info1 |=
+ le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_CTRL,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE);
+ cmd->info1 |=
+ le32_encode_bits(htt_tlv_filter->tx_mon_pkt_dma_len,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL);
+ cmd->info2 |=
+ le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_CTRL,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG);
+ }
+
+ if (htt_tlv_filter->tx_mon_ctrl_filter) {
+ cmd->info1 |=
+ le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_DATA,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE);
+ cmd->info1 |=
+ le32_encode_bits(htt_tlv_filter->tx_mon_pkt_dma_len,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA);
+ cmd->info2 |=
+ le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_DATA,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG);
+ }
+
+ cmd->tlv_filter_mask_in0 =
+ cpu_to_le32(htt_tlv_filter->tx_mon_downstream_tlv_flags);
+ cmd->tlv_filter_mask_in1 =
+ cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags0);
+ cmd->tlv_filter_mask_in2 =
+ cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags1);
+ cmd->tlv_filter_mask_in3 =
+ cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags2);
+
+ ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ dev_kfree_skb_any(skb);
+ return ret;
+}
diff --git a/drivers/net/wireless/ath/ath12k/dp_htt.h b/drivers/net/wireless/ath/ath12k/dp_htt.h
new file mode 100644
index 000000000000..6020e632f74e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/dp_htt.h
@@ -0,0 +1,1546 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef ATH12K_DP_HTT_H
+#define ATH12K_DP_HTT_H
+
+struct ath12k_dp;
+
+/* HTT definitions */
+#define HTT_TAG_TCL_METADATA_VERSION 5
+
+#define HTT_TCL_META_DATA_TYPE GENMASK(1, 0)
+#define HTT_TCL_META_DATA_VALID_HTT BIT(2)
+
+/* vdev meta data */
+#define HTT_TCL_META_DATA_VDEV_ID GENMASK(10, 3)
+#define HTT_TCL_META_DATA_PDEV_ID GENMASK(12, 11)
+#define HTT_TCL_META_DATA_HOST_INSPECTED_MISSION BIT(13)
+
+/* peer meta data */
+#define HTT_TCL_META_DATA_PEER_ID GENMASK(15, 3)
+
+/* Global sequence number */
+#define HTT_TCL_META_DATA_TYPE_GLOBAL_SEQ_NUM 3
+#define HTT_TCL_META_DATA_GLOBAL_SEQ_HOST_INSPECTED BIT(2)
+#define HTT_TCL_META_DATA_GLOBAL_SEQ_NUM GENMASK(14, 3)
+#define HTT_TX_MLO_MCAST_HOST_REINJECT_BASE_VDEV_ID 128
+
+/* HTT tx completion is overlaid in wbm_release_ring */
+#define HTT_TX_WBM_COMP_INFO0_STATUS GENMASK(16, 13)
+#define HTT_TX_WBM_COMP_INFO1_REINJECT_REASON GENMASK(3, 0)
+#define HTT_TX_WBM_COMP_INFO1_EXCEPTION_FRAME BIT(4)
+
+#define HTT_TX_WBM_COMP_INFO2_ACK_RSSI GENMASK(31, 24)
+
+#define HTT_TARGET_VERSION_TIMEOUT_HZ (3 * HZ)
+
+struct htt_tx_wbm_completion {
+ __le32 rsvd0[2];
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ __le32 info3;
+ __le32 info4;
+ __le32 rsvd1;
+
+} __packed;
+
+enum htt_h2t_msg_type {
+ HTT_H2T_MSG_TYPE_VERSION_REQ = 0,
+ HTT_H2T_MSG_TYPE_SRING_SETUP = 0xb,
+ HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG = 0xc,
+ HTT_H2T_MSG_TYPE_EXT_STATS_CFG = 0x10,
+ HTT_H2T_MSG_TYPE_PPDU_STATS_CFG = 0x11,
+ HTT_H2T_MSG_TYPE_VDEV_TXRX_STATS_CFG = 0x1a,
+ HTT_H2T_MSG_TYPE_TX_MONITOR_CFG = 0x1b,
+};
+
+#define HTT_VER_REQ_INFO_MSG_ID GENMASK(7, 0)
+#define HTT_OPTION_TCL_METADATA_VER_V1 1
+#define HTT_OPTION_TCL_METADATA_VER_V2 2
+#define HTT_OPTION_TAG GENMASK(7, 0)
+#define HTT_OPTION_LEN GENMASK(15, 8)
+#define HTT_OPTION_VALUE GENMASK(31, 16)
+#define HTT_TCL_METADATA_VER_SZ 4
+
+struct htt_ver_req_cmd {
+ __le32 ver_reg_info;
+ __le32 tcl_metadata_version;
+} __packed;
+
+enum htt_srng_ring_type {
+ HTT_HW_TO_SW_RING,
+ HTT_SW_TO_HW_RING,
+ HTT_SW_TO_SW_RING,
+};
+
+enum htt_srng_ring_id {
+ HTT_RXDMA_HOST_BUF_RING,
+ HTT_RXDMA_MONITOR_STATUS_RING,
+ HTT_RXDMA_MONITOR_BUF_RING,
+ HTT_RXDMA_MONITOR_DESC_RING,
+ HTT_RXDMA_MONITOR_DEST_RING,
+ HTT_HOST1_TO_FW_RXBUF_RING,
+ HTT_HOST2_TO_FW_RXBUF_RING,
+ HTT_RXDMA_NON_MONITOR_DEST_RING,
+ HTT_RXDMA_HOST_BUF_RING2,
+ HTT_TX_MON_HOST2MON_BUF_RING,
+ HTT_TX_MON_MON2HOST_DEST_RING,
+ HTT_RX_MON_HOST2MON_BUF_RING,
+ HTT_RX_MON_MON2HOST_DEST_RING,
+};
+
+/* host -> target HTT_SRING_SETUP message
+ *
+ * After target is booted up, Host can send SRING setup message for
+ * each host facing LMAC SRING. Target setups up HW registers based
+ * on setup message and confirms back to Host if response_required is set.
+ * Host should wait for confirmation message before sending new SRING
+ * setup message
+ *
+ * The message would appear as follows:
+ *
+ * |31 24|23 20|19|18 16|15|14 8|7 0|
+ * |--------------- +-----------------+----------------+------------------|
+ * | ring_type | ring_id | pdev_id | msg_type |
+ * |----------------------------------------------------------------------|
+ * | ring_base_addr_lo |
+ * |----------------------------------------------------------------------|
+ * | ring_base_addr_hi |
+ * |----------------------------------------------------------------------|
+ * |ring_misc_cfg_flag|ring_entry_size| ring_size |
+ * |----------------------------------------------------------------------|
+ * | ring_head_offset32_remote_addr_lo |
+ * |----------------------------------------------------------------------|
+ * | ring_head_offset32_remote_addr_hi |
+ * |----------------------------------------------------------------------|
+ * | ring_tail_offset32_remote_addr_lo |
+ * |----------------------------------------------------------------------|
+ * | ring_tail_offset32_remote_addr_hi |
+ * |----------------------------------------------------------------------|
+ * | ring_msi_addr_lo |
+ * |----------------------------------------------------------------------|
+ * | ring_msi_addr_hi |
+ * |----------------------------------------------------------------------|
+ * | ring_msi_data |
+ * |----------------------------------------------------------------------|
+ * | intr_timer_th |IM| intr_batch_counter_th |
+ * |----------------------------------------------------------------------|
+ * | reserved |RR|PTCF| intr_low_threshold |
+ * |----------------------------------------------------------------------|
+ * Where
+ * IM = sw_intr_mode
+ * RR = response_required
+ * PTCF = prefetch_timer_cfg
+ *
+ * The message is interpreted as follows:
+ * dword0 - b'0:7 - msg_type: This will be set to
+ * HTT_H2T_MSG_TYPE_SRING_SETUP
+ * b'8:15 - pdev_id:
+ * 0 (for rings at SOC/UMAC level),
+ * 1/2/3 mac id (for rings at LMAC level)
+ * b'16:23 - ring_id: identify which ring is to setup,
+ * more details can be got from enum htt_srng_ring_id
+ * b'24:31 - ring_type: identify type of host rings,
+ * more details can be got from enum htt_srng_ring_type
+ * dword1 - b'0:31 - ring_base_addr_lo: Lower 32bits of ring base address
+ * dword2 - b'0:31 - ring_base_addr_hi: Upper 32bits of ring base address
+ * dword3 - b'0:15 - ring_size: size of the ring in unit of 4-bytes words
+ * b'16:23 - ring_entry_size: Size of each entry in 4-byte word units
+ * b'24:31 - ring_misc_cfg_flag: Valid only for HW_TO_SW_RING and
+ * SW_TO_HW_RING.
+ * Refer to HTT_SRING_SETUP_RING_MISC_CFG_RING defs.
+ * dword4 - b'0:31 - ring_head_off32_remote_addr_lo:
+ * Lower 32 bits of memory address of the remote variable
+ * storing the 4-byte word offset that identifies the head
+ * element within the ring.
+ * (The head offset variable has type u32.)
+ * Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword5 - b'0:31 - ring_head_off32_remote_addr_hi:
+ * Upper 32 bits of memory address of the remote variable
+ * storing the 4-byte word offset that identifies the head
+ * element within the ring.
+ * (The head offset variable has type u32.)
+ * Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword6 - b'0:31 - ring_tail_off32_remote_addr_lo:
+ * Lower 32 bits of memory address of the remote variable
+ * storing the 4-byte word offset that identifies the tail
+ * element within the ring.
+ * (The tail offset variable has type u32.)
+ * Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword7 - b'0:31 - ring_tail_off32_remote_addr_hi:
+ * Upper 32 bits of memory address of the remote variable
+ * storing the 4-byte word offset that identifies the tail
+ * element within the ring.
+ * (The tail offset variable has type u32.)
+ * Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword8 - b'0:31 - ring_msi_addr_lo: Lower 32bits of MSI cfg address
+ * valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * dword9 - b'0:31 - ring_msi_addr_hi: Upper 32bits of MSI cfg address
+ * valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * dword10 - b'0:31 - ring_msi_data: MSI data
+ * Refer to HTT_SRING_SETUP_RING_MSC_CFG_xxx defs
+ * valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * dword11 - b'0:14 - intr_batch_counter_th:
+ * batch counter threshold is in units of 4-byte words.
+ * HW internally maintains and increments batch count.
+ * (see SRING spec for detail description).
+ * When batch count reaches threshold value, an interrupt
+ * is generated by HW.
+ * b'15 - sw_intr_mode:
+ * This configuration shall be static.
+ * Only programmed at power up.
+ * 0: generate pulse style sw interrupts
+ * 1: generate level style sw interrupts
+ * b'16:31 - intr_timer_th:
+ * The timer init value when timer is idle or is
+ * initialized to start downcounting.
+ * In 8us units (to cover a range of 0 to 524 ms)
+ * dword12 - b'0:15 - intr_low_threshold:
+ * Used only by Consumer ring to generate ring_sw_int_p.
+ * Ring entries low threshold water mark, that is used
+ * in combination with the interrupt timer as well as
+ * the clearing of the level interrupt.
+ * b'16:18 - prefetch_timer_cfg:
+ * Used only by Consumer ring to set timer mode to
+ * support Application prefetch handling.
+ * The external tail offset/pointer will be updated
+ * at following intervals:
+ * 3'b000: (Prefetch feature disabled; used only for debug)
+ * 3'b001: 1 usec
+ * 3'b010: 4 usec
+ * 3'b011: 8 usec (default)
+ * 3'b100: 16 usec
+ * Others: Reserved
+ * b'19 - response_required:
+ * Host needs HTT_T2H_MSG_TYPE_SRING_SETUP_DONE as response
+ * b'20:31 - reserved: reserved for future use
+ */
+
+#define HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE GENMASK(7, 0)
+#define HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID GENMASK(15, 8)
+#define HTT_SRNG_SETUP_CMD_INFO0_RING_ID GENMASK(23, 16)
+#define HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE GENMASK(31, 24)
+
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE GENMASK(15, 0)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE GENMASK(23, 16)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS BIT(25)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP BIT(27)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP BIT(28)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP BIT(29)
+
+#define HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH GENMASK(14, 0)
+#define HTT_SRNG_SETUP_CMD_INTR_INFO_SW_INTR_MODE BIT(15)
+#define HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH GENMASK(31, 16)
+
+#define HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH GENMASK(15, 0)
+#define HTT_SRNG_SETUP_CMD_INFO2_PRE_FETCH_TIMER_CFG GENMASK(18, 16)
+#define HTT_SRNG_SETUP_CMD_INFO2_RESPONSE_REQUIRED BIT(19)
+
+struct htt_srng_setup_cmd {
+ __le32 info0;
+ __le32 ring_base_addr_lo;
+ __le32 ring_base_addr_hi;
+ __le32 info1;
+ __le32 ring_head_off32_remote_addr_lo;
+ __le32 ring_head_off32_remote_addr_hi;
+ __le32 ring_tail_off32_remote_addr_lo;
+ __le32 ring_tail_off32_remote_addr_hi;
+ __le32 ring_msi_addr_lo;
+ __le32 ring_msi_addr_hi;
+ __le32 msi_data;
+ __le32 intr_info;
+ __le32 info2;
+} __packed;
+
+/* host -> target FW PPDU_STATS config message
+ *
+ * @details
+ * The following field definitions describe the format of the HTT host
+ * to target FW for PPDU_STATS_CFG msg.
+ * The message allows the host to configure the PPDU_STATS_IND messages
+ * produced by the target.
+ *
+ * |31 24|23 16|15 8|7 0|
+ * |-----------------------------------------------------------|
+ * | REQ bit mask | pdev_mask | msg type |
+ * |-----------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: identifies this is a req to configure ppdu_stats_ind from target
+ * Value: 0x11
+ * - PDEV_MASK
+ * Bits 8:15
+ * Purpose: identifies which pdevs this PPDU stats configuration applies to
+ * Value: This is a overloaded field, refer to usage and interpretation of
+ * PDEV in interface document.
+ * Bit 8 : Reserved for SOC stats
+ * Bit 9 - 15 : Indicates PDEV_MASK in DBDC
+ * Indicates MACID_MASK in DBS
+ * - REQ_TLV_BIT_MASK
+ * Bits 16:31
+ * Purpose: each set bit indicates the corresponding PPDU stats TLV type
+ * needs to be included in the target's PPDU_STATS_IND messages.
+ * Value: refer htt_ppdu_stats_tlv_tag_t <<<???
+ *
+ */
+
+struct htt_ppdu_stats_cfg_cmd {
+ __le32 msg;
+} __packed;
+
+#define HTT_PPDU_STATS_CFG_MSG_TYPE GENMASK(7, 0)
+#define HTT_PPDU_STATS_CFG_SOC_STATS BIT(8)
+#define HTT_PPDU_STATS_CFG_PDEV_ID GENMASK(15, 9)
+#define HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK GENMASK(31, 16)
+
+enum htt_ppdu_stats_tag_type {
+ HTT_PPDU_STATS_TAG_COMMON,
+ HTT_PPDU_STATS_TAG_USR_COMMON,
+ HTT_PPDU_STATS_TAG_USR_RATE,
+ HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_64,
+ HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_256,
+ HTT_PPDU_STATS_TAG_SCH_CMD_STATUS,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_64,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_256,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_FLUSH,
+ HTT_PPDU_STATS_TAG_USR_COMMON_ARRAY,
+ HTT_PPDU_STATS_TAG_INFO,
+ HTT_PPDU_STATS_TAG_TX_MGMTCTRL_PAYLOAD,
+
+ /* New TLV's are added above to this line */
+ HTT_PPDU_STATS_TAG_MAX,
+};
+
+#define HTT_PPDU_STATS_TAG_DEFAULT (BIT(HTT_PPDU_STATS_TAG_COMMON) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMMON) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_RATE) \
+ | BIT(HTT_PPDU_STATS_TAG_SCH_CMD_STATUS) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_FLUSH) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMMON_ARRAY))
+
+#define HTT_PPDU_STATS_TAG_PKTLOG (BIT(HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_64) | \
+ BIT(HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_256) | \
+ BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_64) | \
+ BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_256) | \
+ BIT(HTT_PPDU_STATS_TAG_INFO) | \
+ BIT(HTT_PPDU_STATS_TAG_TX_MGMTCTRL_PAYLOAD) | \
+ HTT_PPDU_STATS_TAG_DEFAULT)
+
+enum htt_stats_internal_ppdu_frametype {
+ HTT_STATS_PPDU_FTYPE_CTRL,
+ HTT_STATS_PPDU_FTYPE_DATA,
+ HTT_STATS_PPDU_FTYPE_BAR,
+ HTT_STATS_PPDU_FTYPE_MAX
+};
+
+/* HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG Message
+ *
+ * details:
+ * HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG message is sent by host to
+ * configure RXDMA rings.
+ * The configuration is per ring based and includes both packet subtypes
+ * and PPDU/MPDU TLVs.
+ *
+ * The message would appear as follows:
+ *
+ * |31 29|28|27|26|25|24|23 16|15 8|7 0|
+ * |-------+--+--+--+--+--+-----------+----------------+---------------|
+ * | rsvd1 |ED|DT|OV|PS|SS| ring_id | pdev_id | msg_type |
+ * |-------------------------------------------------------------------|
+ * | rsvd2 | ring_buffer_size |
+ * |-------------------------------------------------------------------|
+ * | packet_type_enable_flags_0 |
+ * |-------------------------------------------------------------------|
+ * | packet_type_enable_flags_1 |
+ * |-------------------------------------------------------------------|
+ * | packet_type_enable_flags_2 |
+ * |-------------------------------------------------------------------|
+ * | packet_type_enable_flags_3 |
+ * |-------------------------------------------------------------------|
+ * | tlv_filter_in_flags |
+ * |-------------------------------------------------------------------|
+ * Where:
+ * PS = pkt_swap
+ * SS = status_swap
+ * The message is interpreted as follows:
+ * dword0 - b'0:7 - msg_type: This will be set to
+ * HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG
+ * b'8:15 - pdev_id:
+ * 0 (for rings at SOC/UMAC level),
+ * 1/2/3 mac id (for rings at LMAC level)
+ * b'16:23 - ring_id : Identify the ring to configure.
+ * More details can be got from enum htt_srng_ring_id
+ * b'24 - status_swap: 1 is to swap status TLV
+ * b'25 - pkt_swap: 1 is to swap packet TLV
+ * b'26 - rx_offset_valid (OV): flag to indicate rx offsets
+ * configuration fields are valid
+ * b'27 - drop_thresh_valid (DT): flag to indicate if the
+ * rx_drop_threshold field is valid
+ * b'28 - rx_mon_global_en: Enable/Disable global register
+ * configuration in Rx monitor module.
+ * b'29:31 - rsvd1: reserved for future use
+ * dword1 - b'0:16 - ring_buffer_size: size of buffers referenced by rx ring,
+ * in byte units.
+ * Valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * - b'16:31 - rsvd2: Reserved for future use
+ * dword2 - b'0:31 - packet_type_enable_flags_0:
+ * Enable MGMT packet from 0b0000 to 0b1001
+ * bits from low to high: FP, MD, MO - 3 bits
+ * FP: Filter_Pass
+ * MD: Monitor_Direct
+ * MO: Monitor_Other
+ * 10 mgmt subtypes * 3 bits -> 30 bits
+ * Refer to PKT_TYPE_ENABLE_FLAG0_xxx_MGMT_xxx defs
+ * dword3 - b'0:31 - packet_type_enable_flags_1:
+ * Enable MGMT packet from 0b1010 to 0b1111
+ * bits from low to high: FP, MD, MO - 3 bits
+ * Refer to PKT_TYPE_ENABLE_FLAG1_xxx_MGMT_xxx defs
+ * dword4 - b'0:31 - packet_type_enable_flags_2:
+ * Enable CTRL packet from 0b0000 to 0b1001
+ * bits from low to high: FP, MD, MO - 3 bits
+ * Refer to PKT_TYPE_ENABLE_FLAG2_xxx_CTRL_xxx defs
+ * dword5 - b'0:31 - packet_type_enable_flags_3:
+ * Enable CTRL packet from 0b1010 to 0b1111,
+ * MCAST_DATA, UCAST_DATA, NULL_DATA
+ * bits from low to high: FP, MD, MO - 3 bits
+ * Refer to PKT_TYPE_ENABLE_FLAG3_xxx_CTRL_xxx defs
+ * dword6 - b'0:31 - tlv_filter_in_flags:
+ * Filter in Attention/MPDU/PPDU/Header/User tlvs
+ * Refer to CFG_TLV_FILTER_IN_FLAG defs
+ */
+
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE GENMASK(7, 0)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID GENMASK(15, 8)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID GENMASK(23, 16)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS BIT(24)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS BIT(25)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_OFFSET_VALID BIT(26)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_DROP_THRES_VAL BIT(27)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_EN_RXMON BIT(28)
+
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE GENMASK(15, 0)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT GENMASK(18, 16)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL GENMASK(21, 19)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA GENMASK(24, 22)
+
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO2_DROP_THRESHOLD GENMASK(9, 0)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_MGMT_TYPE BIT(17)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_CTRL_TYPE BIT(18)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_DATA_TYPE BIT(19)
+
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO3_EN_TLV_PKT_OFFSET BIT(0)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO3_PKT_TLV_OFFSET GENMASK(14, 1)
+
+#define HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET GENMASK(15, 0)
+#define HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET GENMASK(31, 16)
+#define HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET GENMASK(15, 0)
+#define HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET GENMASK(31, 16)
+#define HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET GENMASK(15, 0)
+#define HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET GENMASK(31, 16)
+#define HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET GENMASK(15, 0)
+
+#define HTT_RX_RING_SELECTION_CFG_WORD_MASK_COMPACT_SET BIT(23)
+#define HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_MASK GENMASK(15, 0)
+#define HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_MASK GENMASK(18, 16)
+#define HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_MASK GENMASK(16, 0)
+
+enum htt_rx_filter_tlv_flags {
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_START = BIT(0),
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_START = BIT(1),
+ HTT_RX_FILTER_TLV_FLAGS_RX_PACKET = BIT(2),
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_END = BIT(3),
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_END = BIT(4),
+ HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER = BIT(5),
+ HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER = BIT(6),
+ HTT_RX_FILTER_TLV_FLAGS_ATTENTION = BIT(7),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_START = BIT(8),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END = BIT(9),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS = BIT(10),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT = BIT(11),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE = BIT(12),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_START_USER_INFO = BIT(13),
+};
+
+enum htt_rx_mgmt_pkt_filter_tlv_flags0 {
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ = BIT(0),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ = BIT(1),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ = BIT(2),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP = BIT(3),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP = BIT(4),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP = BIT(5),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ = BIT(6),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ = BIT(7),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ = BIT(8),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP = BIT(9),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP = BIT(10),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP = BIT(11),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ = BIT(12),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ = BIT(13),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ = BIT(14),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP = BIT(15),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP = BIT(16),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP = BIT(17),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV = BIT(18),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV = BIT(19),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV = BIT(20),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7 = BIT(21),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7 = BIT(22),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7 = BIT(23),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON = BIT(24),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON = BIT(25),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON = BIT(26),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM = BIT(27),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM = BIT(28),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM = BIT(29),
+};
+
+enum htt_rx_mgmt_pkt_filter_tlv_flags1 {
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC = BIT(0),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC = BIT(1),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC = BIT(2),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH = BIT(3),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH = BIT(4),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH = BIT(5),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH = BIT(6),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH = BIT(7),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH = BIT(8),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION = BIT(9),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION = BIT(10),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION = BIT(11),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK = BIT(12),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK = BIT(13),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK = BIT(14),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15 = BIT(15),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15 = BIT(16),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15 = BIT(17),
+};
+
+enum htt_rx_ctrl_pkt_filter_tlv_flags2 {
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 = BIT(0),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 = BIT(1),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 = BIT(2),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 = BIT(3),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 = BIT(4),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 = BIT(5),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER = BIT(6),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER = BIT(7),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER = BIT(8),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 = BIT(9),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 = BIT(10),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 = BIT(11),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL = BIT(12),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL = BIT(13),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL = BIT(14),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP = BIT(15),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP = BIT(16),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP = BIT(17),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT = BIT(18),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT = BIT(19),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT = BIT(20),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER = BIT(21),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER = BIT(22),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER = BIT(23),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR = BIT(24),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BAR = BIT(25),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BAR = BIT(26),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BA = BIT(27),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BA = BIT(28),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BA = BIT(29),
+};
+
+enum htt_rx_ctrl_pkt_filter_tlv_flags3 {
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL = BIT(0),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL = BIT(1),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL = BIT(2),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_RTS = BIT(3),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_RTS = BIT(4),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_RTS = BIT(5),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CTS = BIT(6),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CTS = BIT(7),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CTS = BIT(8),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_ACK = BIT(9),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_ACK = BIT(10),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_ACK = BIT(11),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND = BIT(12),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND = BIT(13),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND = BIT(14),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK = BIT(15),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK = BIT(16),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK = BIT(17),
+};
+
+enum htt_rx_data_pkt_filter_tlv_flasg3 {
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST = BIT(18),
+ HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_MCAST = BIT(19),
+ HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_MCAST = BIT(20),
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST = BIT(21),
+ HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_UCAST = BIT(22),
+ HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_UCAST = BIT(23),
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA = BIT(24),
+ HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA = BIT(25),
+ HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA = BIT(26),
+};
+
+#define HTT_RX_FP_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM)
+
+#define HTT_RX_MD_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM)
+
+#define HTT_RX_MO_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM)
+
+#define HTT_RX_FP_MGMT_FILTER_FLAGS1 (HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK)
+
+#define HTT_RX_MD_MGMT_FILTER_FLAGS1 (HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK)
+
+#define HTT_RX_MO_MGMT_FILTER_FLAGS1 (HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK)
+
+#define HTT_RX_FP_CTRL_FILTER_FLASG2 (HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BA)
+
+#define HTT_RX_MD_CTRL_FILTER_FLASG2 (HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BAR \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BA)
+
+#define HTT_RX_MO_CTRL_FILTER_FLASG2 (HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BAR \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BA)
+
+#define HTT_RX_FP_CTRL_FILTER_FLASG3 (HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_RTS \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CTS \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_ACK \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK)
+
+#define HTT_RX_MD_CTRL_FILTER_FLASG3 (HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_RTS \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CTS \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_ACK \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK)
+
+#define HTT_RX_MO_CTRL_FILTER_FLASG3 (HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_RTS \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CTS \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_ACK \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK)
+
+#define HTT_RX_FP_DATA_FILTER_FLASG3 (HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST \
+ | HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST \
+ | HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA)
+
+#define HTT_RX_MD_DATA_FILTER_FLASG3 (HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_MCAST \
+ | HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_UCAST \
+ | HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA)
+
+#define HTT_RX_MO_DATA_FILTER_FLASG3 (HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_MCAST \
+ | HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_UCAST \
+ | HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA)
+
+#define HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_FP_MGMT_FILTER_FLAGS0 | \
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7)
+
+#define HTT_RX_MON_MO_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_MO_MGMT_FILTER_FLAGS0 | \
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7)
+
+#define HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 \
+ (HTT_RX_FP_MGMT_FILTER_FLAGS1 | \
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15)
+
+#define HTT_RX_MON_MO_MGMT_FILTER_FLAGS1 \
+ (HTT_RX_MO_MGMT_FILTER_FLAGS1 | \
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15)
+
+#define HTT_RX_MON_FP_CTRL_FILTER_FLASG2 \
+ (HTT_RX_FP_CTRL_FILTER_FLASG2 | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT)
+
+#define HTT_RX_MON_MO_CTRL_FILTER_FLASG2 \
+ (HTT_RX_MO_CTRL_FILTER_FLASG2 | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT)
+
+#define HTT_RX_MON_FP_CTRL_FILTER_FLASG3 HTT_RX_FP_CTRL_FILTER_FLASG3
+
+#define HTT_RX_MON_MO_CTRL_FILTER_FLASG3 HTT_RX_MO_CTRL_FILTER_FLASG3
+
+#define HTT_RX_MON_FP_DATA_FILTER_FLASG3 HTT_RX_FP_DATA_FILTER_FLASG3
+
+#define HTT_RX_MON_MO_DATA_FILTER_FLASG3 HTT_RX_MO_DATA_FILTER_FLASG3
+
+#define HTT_RX_MON_FILTER_TLV_FLAGS \
+ (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE)
+
+#define HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING \
+ (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE)
+
+#define HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING \
+ (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_RX_PACKET | \
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER | \
+ HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER | \
+ HTT_RX_FILTER_TLV_FLAGS_ATTENTION)
+
+#define HTT_RX_MON_FILTER_TLV_FLAGS_MON_DEST_RING \
+ (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_RX_PACKET | \
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER | \
+ HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_START_USER_INFO)
+
+/* msdu start. mpdu end, attention, rx hdr tlv's are not subscribed */
+#define HTT_RX_TLV_FLAGS_RXDMA_RING \
+ (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_RX_PACKET | \
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_END)
+
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE GENMASK(7, 0)
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID GENMASK(15, 8)
+
+struct htt_rx_ring_selection_cfg_cmd {
+ __le32 info0;
+ __le32 info1;
+ __le32 pkt_type_en_flags0;
+ __le32 pkt_type_en_flags1;
+ __le32 pkt_type_en_flags2;
+ __le32 pkt_type_en_flags3;
+ __le32 rx_filter_tlv;
+ __le32 rx_packet_offset;
+ __le32 rx_mpdu_offset;
+ __le32 rx_msdu_offset;
+ __le32 rx_attn_offset;
+ __le32 info2;
+ __le32 reserved[2];
+ __le32 rx_mpdu_start_end_mask;
+ __le32 rx_msdu_end_word_mask;
+ __le32 info3;
+} __packed;
+
+#define HTT_RX_RING_TLV_DROP_THRESHOLD_VALUE 32
+#define HTT_RX_RING_DEFAULT_DMA_LENGTH 0x7
+#define HTT_RX_RING_PKT_TLV_OFFSET 0x1
+
+struct htt_rx_ring_tlv_filter {
+ u32 rx_filter; /* see htt_rx_filter_tlv_flags */
+ u32 pkt_filter_flags0; /* MGMT */
+ u32 pkt_filter_flags1; /* MGMT */
+ u32 pkt_filter_flags2; /* CTRL */
+ u32 pkt_filter_flags3; /* DATA */
+ bool offset_valid;
+ u16 rx_packet_offset;
+ u16 rx_header_offset;
+ u16 rx_mpdu_end_offset;
+ u16 rx_mpdu_start_offset;
+ u16 rx_msdu_end_offset;
+ u16 rx_msdu_start_offset;
+ u16 rx_attn_offset;
+ u16 rx_mpdu_start_wmask;
+ u16 rx_mpdu_end_wmask;
+ u32 rx_msdu_end_wmask;
+ u32 conf_len_ctrl;
+ u32 conf_len_mgmt;
+ u32 conf_len_data;
+ u16 rx_drop_threshold;
+ bool enable_log_mgmt_type;
+ bool enable_log_ctrl_type;
+ bool enable_log_data_type;
+ bool enable_rx_tlv_offset;
+ u16 rx_tlv_offset;
+ bool drop_threshold_valid;
+ bool rxmon_disable;
+};
+
+#define HTT_STATS_FRAME_CTRL_TYPE_MGMT 0x0
+#define HTT_STATS_FRAME_CTRL_TYPE_CTRL 0x1
+#define HTT_STATS_FRAME_CTRL_TYPE_DATA 0x2
+#define HTT_STATS_FRAME_CTRL_TYPE_RESV 0x3
+
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE GENMASK(7, 0)
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID GENMASK(15, 8)
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_RING_ID GENMASK(23, 16)
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_SS BIT(24)
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PS BIT(25)
+
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO1_RING_BUFF_SIZE GENMASK(15, 0)
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE GENMASK(18, 16)
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT GENMASK(21, 19)
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL GENMASK(24, 22)
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA GENMASK(27, 25)
+
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG GENMASK(2, 0)
+
+struct htt_tx_ring_selection_cfg_cmd {
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ __le32 tlv_filter_mask_in0;
+ __le32 tlv_filter_mask_in1;
+ __le32 tlv_filter_mask_in2;
+ __le32 tlv_filter_mask_in3;
+ __le32 reserved[3];
+} __packed;
+
+#define HTT_TX_RING_TLV_FILTER_MGMT_DMA_LEN GENMASK(3, 0)
+#define HTT_TX_RING_TLV_FILTER_CTRL_DMA_LEN GENMASK(7, 4)
+#define HTT_TX_RING_TLV_FILTER_DATA_DMA_LEN GENMASK(11, 8)
+
+#define HTT_TX_MON_FILTER_HYBRID_MODE \
+ (HTT_TX_FILTER_TLV_FLAGS0_RESPONSE_START_STATUS | \
+ HTT_TX_FILTER_TLV_FLAGS0_RESPONSE_END_STATUS | \
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_START | \
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_END | \
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_START_PPDU | \
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_USER_PPDU | \
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_ACK_OR_BA | \
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_1K_BA | \
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_START_PROT | \
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_PROT | \
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_USER_RESPONSE | \
+ HTT_TX_FILTER_TLV_FLAGS0_RECEIVED_RESPONSE_INFO | \
+ HTT_TX_FILTER_TLV_FLAGS0_RECEIVED_RESPONSE_INFO_PART2)
+
+struct htt_tx_ring_tlv_filter {
+ u32 tx_mon_downstream_tlv_flags;
+ u32 tx_mon_upstream_tlv_flags0;
+ u32 tx_mon_upstream_tlv_flags1;
+ u32 tx_mon_upstream_tlv_flags2;
+ bool tx_mon_mgmt_filter;
+ bool tx_mon_data_filter;
+ bool tx_mon_ctrl_filter;
+ u16 tx_mon_pkt_dma_len;
+} __packed;
+
+enum htt_tx_mon_upstream_tlv_flags0 {
+ HTT_TX_FILTER_TLV_FLAGS0_RESPONSE_START_STATUS = BIT(1),
+ HTT_TX_FILTER_TLV_FLAGS0_RESPONSE_END_STATUS = BIT(2),
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_START = BIT(3),
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_END = BIT(4),
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_START_PPDU = BIT(5),
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_USER_PPDU = BIT(6),
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_ACK_OR_BA = BIT(7),
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_1K_BA = BIT(8),
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_START_PROT = BIT(9),
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_PROT = BIT(10),
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_USER_RESPONSE = BIT(11),
+ HTT_TX_FILTER_TLV_FLAGS0_RX_FRAME_BITMAP_ACK = BIT(12),
+ HTT_TX_FILTER_TLV_FLAGS0_RX_FRAME_1K_BITMAP_ACK = BIT(13),
+ HTT_TX_FILTER_TLV_FLAGS0_COEX_TX_STATUS = BIT(14),
+ HTT_TX_FILTER_TLV_FLAGS0_RECEIVED_RESPONSE_INFO = BIT(15),
+ HTT_TX_FILTER_TLV_FLAGS0_RECEIVED_RESPONSE_INFO_PART2 = BIT(16),
+};
+
+#define HTT_TX_FILTER_TLV_FLAGS2_TXPCU_PHYTX_OTHER_TRANSMIT_INFO32 BIT(11)
+
+/* HTT message target->host */
+
+enum htt_t2h_msg_type {
+ HTT_T2H_MSG_TYPE_VERSION_CONF,
+ HTT_T2H_MSG_TYPE_PEER_MAP = 0x3,
+ HTT_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
+ HTT_T2H_MSG_TYPE_RX_ADDBA = 0x5,
+ HTT_T2H_MSG_TYPE_PKTLOG = 0x8,
+ HTT_T2H_MSG_TYPE_SEC_IND = 0xb,
+ HTT_T2H_MSG_TYPE_PEER_MAP2 = 0x1e,
+ HTT_T2H_MSG_TYPE_PEER_UNMAP2 = 0x1f,
+ HTT_T2H_MSG_TYPE_PPDU_STATS_IND = 0x1d,
+ HTT_T2H_MSG_TYPE_EXT_STATS_CONF = 0x1c,
+ HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND = 0x24,
+ HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND = 0x28,
+ HTT_T2H_MSG_TYPE_PEER_MAP3 = 0x2b,
+ HTT_T2H_MSG_TYPE_VDEV_TXRX_STATS_PERIODIC_IND = 0x2c,
+};
+
+#define HTT_TARGET_VERSION_MAJOR 3
+
+#define HTT_T2H_MSG_TYPE GENMASK(7, 0)
+#define HTT_T2H_VERSION_CONF_MINOR GENMASK(15, 8)
+#define HTT_T2H_VERSION_CONF_MAJOR GENMASK(23, 16)
+
+struct htt_t2h_version_conf_msg {
+ __le32 version;
+} __packed;
+
+#define HTT_T2H_PEER_MAP_INFO_VDEV_ID GENMASK(15, 8)
+#define HTT_T2H_PEER_MAP_INFO_PEER_ID GENMASK(31, 16)
+#define HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16 GENMASK(15, 0)
+#define HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID GENMASK(31, 16)
+#define HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL GENMASK(15, 0)
+#define HTT_T2H_PEER_MAP3_INFO2_HW_PEER_ID GENMASK(15, 0)
+#define HTT_T2H_PEER_MAP3_INFO2_AST_HASH_VAL GENMASK(31, 16)
+#define HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_M BIT(16)
+#define HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_S 16
+
+struct htt_t2h_peer_map_event {
+ __le32 info;
+ __le32 mac_addr_l32;
+ __le32 info1;
+ __le32 info2;
+} __packed;
+
+#define HTT_T2H_PEER_UNMAP_INFO_VDEV_ID HTT_T2H_PEER_MAP_INFO_VDEV_ID
+#define HTT_T2H_PEER_UNMAP_INFO_PEER_ID HTT_T2H_PEER_MAP_INFO_PEER_ID
+#define HTT_T2H_PEER_UNMAP_INFO1_MAC_ADDR_H16 \
+ HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16
+#define HTT_T2H_PEER_MAP_INFO1_NEXT_HOP_M HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_M
+#define HTT_T2H_PEER_MAP_INFO1_NEXT_HOP_S HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_S
+
+struct htt_t2h_peer_unmap_event {
+ __le32 info;
+ __le32 mac_addr_l32;
+ __le32 info1;
+} __packed;
+
+struct htt_resp_msg {
+ union {
+ struct htt_t2h_version_conf_msg version_msg;
+ struct htt_t2h_peer_map_event peer_map_ev;
+ struct htt_t2h_peer_unmap_event peer_unmap_ev;
+ };
+} __packed;
+
+#define HTT_VDEV_GET_STATS_U64(msg_l32, msg_u32)\
+ (((u64)__le32_to_cpu(msg_u32) << 32) | (__le32_to_cpu(msg_l32)))
+#define HTT_T2H_VDEV_STATS_PERIODIC_MSG_TYPE GENMASK(7, 0)
+#define HTT_T2H_VDEV_STATS_PERIODIC_PDEV_ID GENMASK(15, 8)
+#define HTT_T2H_VDEV_STATS_PERIODIC_NUM_VDEV GENMASK(23, 16)
+#define HTT_T2H_VDEV_STATS_PERIODIC_PAYLOAD_BYTES GENMASK(15, 0)
+#define HTT_VDEV_TXRX_STATS_COMMON_TLV 0
+#define HTT_VDEV_TXRX_STATS_HW_STATS_TLV 1
+
+struct htt_t2h_vdev_txrx_stats_ind {
+ __le32 vdev_id;
+ __le32 rx_msdu_byte_cnt_lo;
+ __le32 rx_msdu_byte_cnt_hi;
+ __le32 rx_msdu_cnt_lo;
+ __le32 rx_msdu_cnt_hi;
+ __le32 tx_msdu_byte_cnt_lo;
+ __le32 tx_msdu_byte_cnt_hi;
+ __le32 tx_msdu_cnt_lo;
+ __le32 tx_msdu_cnt_hi;
+ __le32 tx_retry_cnt_lo;
+ __le32 tx_retry_cnt_hi;
+ __le32 tx_retry_byte_cnt_lo;
+ __le32 tx_retry_byte_cnt_hi;
+ __le32 tx_drop_cnt_lo;
+ __le32 tx_drop_cnt_hi;
+ __le32 tx_drop_byte_cnt_lo;
+ __le32 tx_drop_byte_cnt_hi;
+ __le32 msdu_ttl_cnt_lo;
+ __le32 msdu_ttl_cnt_hi;
+ __le32 msdu_ttl_byte_cnt_lo;
+ __le32 msdu_ttl_byte_cnt_hi;
+} __packed;
+
+struct htt_t2h_vdev_common_stats_tlv {
+ __le32 soc_drop_count_lo;
+ __le32 soc_drop_count_hi;
+} __packed;
+
+/* ppdu stats
+ *
+ * @details
+ * The following field definitions describe the format of the HTT target
+ * to host ppdu stats indication message.
+ *
+ *
+ * |31 16|15 12|11 10|9 8|7 0 |
+ * |----------------------------------------------------------------------|
+ * | payload_size | rsvd |pdev_id|mac_id | msg type |
+ * |----------------------------------------------------------------------|
+ * | ppdu_id |
+ * |----------------------------------------------------------------------|
+ * | Timestamp in us |
+ * |----------------------------------------------------------------------|
+ * | reserved |
+ * |----------------------------------------------------------------------|
+ * | type-specific stats info |
+ * | (see htt_ppdu_stats.h) |
+ * |----------------------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: Identifies this is a PPDU STATS indication
+ * message.
+ * Value: 0x1d
+ * - mac_id
+ * Bits 9:8
+ * Purpose: mac_id of this ppdu_id
+ * Value: 0-3
+ * - pdev_id
+ * Bits 11:10
+ * Purpose: pdev_id of this ppdu_id
+ * Value: 0-3
+ * 0 (for rings at SOC level),
+ * 1/2/3 PDEV -> 0/1/2
+ * - payload_size
+ * Bits 31:16
+ * Purpose: total tlv size
+ * Value: payload_size in bytes
+ */
+
+#define HTT_T2H_PPDU_STATS_INFO_PDEV_ID GENMASK(11, 10)
+#define HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE GENMASK(31, 16)
+
+struct ath12k_htt_ppdu_stats_msg {
+ __le32 info;
+ __le32 ppdu_id;
+ __le32 timestamp;
+ __le32 rsvd;
+ u8 data[];
+} __packed;
+
+struct htt_tlv {
+ __le32 header;
+ u8 value[];
+} __packed;
+
+#define HTT_TLV_TAG GENMASK(11, 0)
+#define HTT_TLV_LEN GENMASK(23, 12)
+
+enum HTT_PPDU_STATS_BW {
+ HTT_PPDU_STATS_BANDWIDTH_5MHZ = 0,
+ HTT_PPDU_STATS_BANDWIDTH_10MHZ = 1,
+ HTT_PPDU_STATS_BANDWIDTH_20MHZ = 2,
+ HTT_PPDU_STATS_BANDWIDTH_40MHZ = 3,
+ HTT_PPDU_STATS_BANDWIDTH_80MHZ = 4,
+ HTT_PPDU_STATS_BANDWIDTH_160MHZ = 5, /* includes 80+80 */
+ HTT_PPDU_STATS_BANDWIDTH_DYN = 6,
+};
+
+#define HTT_PPDU_STATS_CMN_FLAGS_FRAME_TYPE_M GENMASK(7, 0)
+#define HTT_PPDU_STATS_CMN_FLAGS_QUEUE_TYPE_M GENMASK(15, 8)
+/* bw - HTT_PPDU_STATS_BW */
+#define HTT_PPDU_STATS_CMN_FLAGS_BW_M GENMASK(19, 16)
+
+struct htt_ppdu_stats_common {
+ __le32 ppdu_id;
+ __le16 sched_cmdid;
+ u8 ring_id;
+ u8 num_users;
+ __le32 flags; /* %HTT_PPDU_STATS_COMMON_FLAGS_*/
+ __le32 chain_mask;
+ __le32 fes_duration_us; /* frame exchange sequence */
+ __le32 ppdu_sch_eval_start_tstmp_us;
+ __le32 ppdu_sch_end_tstmp_us;
+ __le32 ppdu_start_tstmp_us;
+ /* BIT [15 : 0] - phy mode (WLAN_PHY_MODE) with which ppdu was transmitted
+ * BIT [31 : 16] - bandwidth (in MHz) with which ppdu was transmitted
+ */
+ __le16 phy_mode;
+ __le16 bw_mhz;
+} __packed;
+
+enum htt_ppdu_stats_gi {
+ HTT_PPDU_STATS_SGI_0_8_US,
+ HTT_PPDU_STATS_SGI_0_4_US,
+ HTT_PPDU_STATS_SGI_1_6_US,
+ HTT_PPDU_STATS_SGI_3_2_US,
+};
+
+#define HTT_PPDU_STATS_USER_RATE_INFO0_USER_POS_M GENMASK(3, 0)
+#define HTT_PPDU_STATS_USER_RATE_INFO0_MU_GROUP_ID_M GENMASK(11, 4)
+
+enum HTT_PPDU_STATS_PPDU_TYPE {
+ HTT_PPDU_STATS_PPDU_TYPE_SU,
+ HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO,
+ HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA,
+ HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA,
+ HTT_PPDU_STATS_PPDU_TYPE_UL_TRIG,
+ HTT_PPDU_STATS_PPDU_TYPE_BURST_BCN,
+ HTT_PPDU_STATS_PPDU_TYPE_UL_BSR_RESP,
+ HTT_PPDU_STATS_PPDU_TYPE_UL_BSR_TRIG,
+ HTT_PPDU_STATS_PPDU_TYPE_UL_RESP,
+ HTT_PPDU_STATS_PPDU_TYPE_MAX
+};
+
+#define HTT_PPDU_STATS_USER_RATE_INFO1_RESP_TYPE_VALD_M BIT(0)
+#define HTT_PPDU_STATS_USER_RATE_INFO1_PPDU_TYPE_M GENMASK(5, 1)
+
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_LTF_SIZE_M GENMASK(1, 0)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_STBC_M BIT(2)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_HE_RE_M BIT(3)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_TXBF_M GENMASK(7, 4)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_BW_M GENMASK(11, 8)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_NSS_M GENMASK(15, 12)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_MCS_M GENMASK(19, 16)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_PREAMBLE_M GENMASK(23, 20)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_GI_M GENMASK(27, 24)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_DCM_M BIT(28)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_LDPC_M BIT(29)
+
+#define HTT_USR_RATE_PPDU_TYPE(_val) \
+ le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_INFO1_PPDU_TYPE_M)
+#define HTT_USR_RATE_PREAMBLE(_val) \
+ le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_PREAMBLE_M)
+#define HTT_USR_RATE_BW(_val) \
+ le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_BW_M)
+#define HTT_USR_RATE_NSS(_val) \
+ le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_NSS_M)
+#define HTT_USR_RATE_MCS(_val) \
+ le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_MCS_M)
+#define HTT_USR_RATE_GI(_val) \
+ le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_GI_M)
+#define HTT_USR_RATE_DCM(_val) \
+ le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_DCM_M)
+
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_LTF_SIZE_M GENMASK(1, 0)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_STBC_M BIT(2)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_HE_RE_M BIT(3)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_TXBF_M GENMASK(7, 4)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_BW_M GENMASK(11, 8)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_NSS_M GENMASK(15, 12)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_MCS_M GENMASK(19, 16)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_PREAMBLE_M GENMASK(23, 20)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_GI_M GENMASK(27, 24)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_DCM_M BIT(28)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_LDPC_M BIT(29)
+
+struct htt_ppdu_stats_user_rate {
+ u8 tid_num;
+ u8 reserved0;
+ __le16 sw_peer_id;
+ __le32 info0; /* %HTT_PPDU_STATS_USER_RATE_INFO0_*/
+ __le16 ru_end;
+ __le16 ru_start;
+ __le16 resp_ru_end;
+ __le16 resp_ru_start;
+ __le32 info1; /* %HTT_PPDU_STATS_USER_RATE_INFO1_ */
+ __le32 rate_flags; /* %HTT_PPDU_STATS_USER_RATE_FLAGS_ */
+ /* Note: resp_rate_info is only valid for if resp_type is UL */
+ __le32 resp_rate_flags; /* %HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_ */
+} __packed;
+
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_RATECODE_M GENMASK(7, 0)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_IS_AMPDU_M BIT(8)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_BA_ACK_FAILED_M GENMASK(10, 9)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_BW_M GENMASK(13, 11)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_SGI_M BIT(14)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_PEERID_M GENMASK(31, 16)
+
+#define HTT_TX_INFO_IS_AMSDU(_flags) \
+ u32_get_bits(_flags, HTT_PPDU_STATS_TX_INFO_FLAGS_IS_AMPDU_M)
+#define HTT_TX_INFO_BA_ACK_FAILED(_flags) \
+ u32_get_bits(_flags, HTT_PPDU_STATS_TX_INFO_FLAGS_BA_ACK_FAILED_M)
+#define HTT_TX_INFO_RATECODE(_flags) \
+ u32_get_bits(_flags, HTT_PPDU_STATS_TX_INFO_FLAGS_RATECODE_M)
+#define HTT_TX_INFO_PEERID(_flags) \
+ u32_get_bits(_flags, HTT_PPDU_STATS_TX_INFO_FLAGS_PEERID_M)
+
+enum htt_ppdu_stats_usr_compln_status {
+ HTT_PPDU_STATS_USER_STATUS_OK,
+ HTT_PPDU_STATS_USER_STATUS_FILTERED,
+ HTT_PPDU_STATS_USER_STATUS_RESP_TIMEOUT,
+ HTT_PPDU_STATS_USER_STATUS_RESP_MISMATCH,
+ HTT_PPDU_STATS_USER_STATUS_ABORT,
+};
+
+#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_LONG_RETRY_M GENMASK(3, 0)
+#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_SHORT_RETRY_M GENMASK(7, 4)
+#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_IS_AMPDU_M BIT(8)
+#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_RESP_TYPE_M GENMASK(12, 9)
+
+#define HTT_USR_CMPLTN_IS_AMPDU(_val) \
+ le32_get_bits(_val, HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_IS_AMPDU_M)
+#define HTT_USR_CMPLTN_LONG_RETRY(_val) \
+ le32_get_bits(_val, HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_LONG_RETRY_M)
+#define HTT_USR_CMPLTN_SHORT_RETRY(_val) \
+ le32_get_bits(_val, HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_SHORT_RETRY_M)
+
+struct htt_ppdu_stats_usr_cmpltn_cmn {
+ u8 status;
+ u8 tid_num;
+ __le16 sw_peer_id;
+ /* RSSI value of last ack packet (units = dB above noise floor) */
+ __le32 ack_rssi;
+ __le16 mpdu_tried;
+ __le16 mpdu_success;
+ __le32 flags; /* %HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_LONG_RETRIES*/
+} __packed;
+
+#define HTT_PPDU_STATS_ACK_BA_INFO_NUM_MPDU_M GENMASK(8, 0)
+#define HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M GENMASK(24, 9)
+#define HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM GENMASK(31, 25)
+
+#define HTT_PPDU_STATS_NON_QOS_TID 16
+
+struct htt_ppdu_stats_usr_cmpltn_ack_ba_status {
+ __le32 ppdu_id;
+ __le16 sw_peer_id;
+ __le16 reserved0;
+ __le32 info; /* %HTT_PPDU_STATS_USR_CMPLTN_CMN_INFO_ */
+ __le16 current_seq;
+ __le16 start_seq;
+ __le32 success_bytes;
+} __packed;
+
+struct htt_ppdu_user_stats {
+ u16 peer_id;
+ u16 delay_ba;
+ u32 tlv_flags;
+ bool is_valid_peer_id;
+ struct htt_ppdu_stats_user_rate rate;
+ struct htt_ppdu_stats_usr_cmpltn_cmn cmpltn_cmn;
+ struct htt_ppdu_stats_usr_cmpltn_ack_ba_status ack_ba;
+};
+
+#define HTT_PPDU_STATS_MAX_USERS 8
+#define HTT_PPDU_DESC_MAX_DEPTH 16
+
+struct htt_ppdu_stats {
+ struct htt_ppdu_stats_common common;
+ struct htt_ppdu_user_stats user_stats[HTT_PPDU_STATS_MAX_USERS];
+};
+
+struct htt_ppdu_stats_info {
+ u32 tlv_bitmap;
+ u32 ppdu_id;
+ u32 frame_type;
+ u32 frame_ctrl;
+ u32 delay_ba;
+ u32 bar_num_users;
+ struct htt_ppdu_stats ppdu_stats;
+ struct list_head list;
+};
+
+/* @brief target -> host MLO offset indiciation message
+ *
+ * @details
+ * The following field definitions describe the format of the HTT target
+ * to host mlo offset indication message.
+ *
+ *
+ * |31 29|28 |26|25 22|21 16|15 13|12 10 |9 8|7 0|
+ * |---------------------------------------------------------------------|
+ * | rsvd1 | mac_freq |chip_id |pdev_id|msgtype|
+ * |---------------------------------------------------------------------|
+ * | sync_timestamp_lo_us |
+ * |---------------------------------------------------------------------|
+ * | sync_timestamp_hi_us |
+ * |---------------------------------------------------------------------|
+ * | mlo_offset_lo |
+ * |---------------------------------------------------------------------|
+ * | mlo_offset_hi |
+ * |---------------------------------------------------------------------|
+ * | mlo_offset_clcks |
+ * |---------------------------------------------------------------------|
+ * | rsvd2 | mlo_comp_clks |mlo_comp_us |
+ * |---------------------------------------------------------------------|
+ * | rsvd3 |mlo_comp_timer |
+ * |---------------------------------------------------------------------|
+ * Header fields
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: Identifies this is a MLO offset indication msg
+ * - PDEV_ID
+ * Bits 9:8
+ * Purpose: Pdev of this MLO offset
+ * - CHIP_ID
+ * Bits 12:10
+ * Purpose: chip_id of this MLO offset
+ * - MAC_FREQ
+ * Bits 28:13
+ * - SYNC_TIMESTAMP_LO_US
+ * Purpose: clock frequency of the mac HW block in MHz
+ * Bits: 31:0
+ * Purpose: lower 32 bits of the WLAN global time stamp at which
+ * last sync interrupt was received
+ * - SYNC_TIMESTAMP_HI_US
+ * Bits: 31:0
+ * Purpose: upper 32 bits of WLAN global time stamp at which
+ * last sync interrupt was received
+ * - MLO_OFFSET_LO
+ * Bits: 31:0
+ * Purpose: lower 32 bits of the MLO offset in us
+ * - MLO_OFFSET_HI
+ * Bits: 31:0
+ * Purpose: upper 32 bits of the MLO offset in us
+ * - MLO_COMP_US
+ * Bits: 15:0
+ * Purpose: MLO time stamp compensation applied in us
+ * - MLO_COMP_CLCKS
+ * Bits: 25:16
+ * Purpose: MLO time stamp compensation applied in clock ticks
+ * - MLO_COMP_TIMER
+ * Bits: 21:0
+ * Purpose: Periodic timer at which compensation is applied
+ */
+
+#define HTT_T2H_MLO_OFFSET_INFO_MSG_TYPE GENMASK(7, 0)
+#define HTT_T2H_MLO_OFFSET_INFO_PDEV_ID GENMASK(9, 8)
+
+struct ath12k_htt_mlo_offset_msg {
+ __le32 info;
+ __le32 sync_timestamp_lo_us;
+ __le32 sync_timestamp_hi_us;
+ __le32 mlo_offset_hi;
+ __le32 mlo_offset_lo;
+ __le32 mlo_offset_clks;
+ __le32 mlo_comp_clks;
+ __le32 mlo_comp_timer;
+} __packed;
+
+/* @brief host -> target FW extended statistics retrieve
+ *
+ * @details
+ * The following field definitions describe the format of the HTT host
+ * to target FW extended stats retrieve message.
+ * The message specifies the type of stats the host wants to retrieve.
+ *
+ * |31 24|23 16|15 8|7 0|
+ * |-----------------------------------------------------------|
+ * | reserved | stats type | pdev_mask | msg type |
+ * |-----------------------------------------------------------|
+ * | config param [0] |
+ * |-----------------------------------------------------------|
+ * | config param [1] |
+ * |-----------------------------------------------------------|
+ * | config param [2] |
+ * |-----------------------------------------------------------|
+ * | config param [3] |
+ * |-----------------------------------------------------------|
+ * | reserved |
+ * |-----------------------------------------------------------|
+ * | cookie LSBs |
+ * |-----------------------------------------------------------|
+ * | cookie MSBs |
+ * |-----------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: identifies this is a extended stats upload request message
+ * Value: 0x10
+ * - PDEV_MASK
+ * Bits 8:15
+ * Purpose: identifies the mask of PDEVs to retrieve stats from
+ * Value: This is a overloaded field, refer to usage and interpretation of
+ * PDEV in interface document.
+ * Bit 8 : Reserved for SOC stats
+ * Bit 9 - 15 : Indicates PDEV_MASK in DBDC
+ * Indicates MACID_MASK in DBS
+ * - STATS_TYPE
+ * Bits 23:16
+ * Purpose: identifies which FW statistics to upload
+ * Value: Defined by htt_dbg_ext_stats_type (see htt_stats.h)
+ * - Reserved
+ * Bits 31:24
+ * - CONFIG_PARAM [0]
+ * Bits 31:0
+ * Purpose: give an opaque configuration value to the specified stats type
+ * Value: stats-type specific configuration value
+ * Refer to htt_stats.h for interpretation for each stats sub_type
+ * - CONFIG_PARAM [1]
+ * Bits 31:0
+ * Purpose: give an opaque configuration value to the specified stats type
+ * Value: stats-type specific configuration value
+ * Refer to htt_stats.h for interpretation for each stats sub_type
+ * - CONFIG_PARAM [2]
+ * Bits 31:0
+ * Purpose: give an opaque configuration value to the specified stats type
+ * Value: stats-type specific configuration value
+ * Refer to htt_stats.h for interpretation for each stats sub_type
+ * - CONFIG_PARAM [3]
+ * Bits 31:0
+ * Purpose: give an opaque configuration value to the specified stats type
+ * Value: stats-type specific configuration value
+ * Refer to htt_stats.h for interpretation for each stats sub_type
+ * - Reserved [31:0] for future use.
+ * - COOKIE_LSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: LSBs of the opaque cookie specified by the host-side requestor
+ * - COOKIE_MSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: MSBs of the opaque cookie specified by the host-side requestor
+ */
+
+struct htt_ext_stats_cfg_hdr {
+ u8 msg_type;
+ u8 pdev_mask;
+ u8 stats_type;
+ u8 reserved;
+} __packed;
+
+struct htt_ext_stats_cfg_cmd {
+ struct htt_ext_stats_cfg_hdr hdr;
+ __le32 cfg_param0;
+ __le32 cfg_param1;
+ __le32 cfg_param2;
+ __le32 cfg_param3;
+ __le32 reserved;
+ __le32 cookie_lsb;
+ __le32 cookie_msb;
+} __packed;
+
+/* htt stats config default params */
+#define HTT_STAT_DEFAULT_RESET_START_OFFSET 0
+#define HTT_STAT_DEFAULT_CFG0_ALL_HWQS 0xffffffff
+#define HTT_STAT_DEFAULT_CFG0_ALL_TXQS 0xffffffff
+#define HTT_STAT_DEFAULT_CFG0_ALL_CMDQS 0xffff
+#define HTT_STAT_DEFAULT_CFG0_ALL_RINGS 0xffff
+#define HTT_STAT_DEFAULT_CFG0_ACTIVE_PEERS 0xff
+#define HTT_STAT_DEFAULT_CFG0_CCA_CUMULATIVE 0x00
+#define HTT_STAT_DEFAULT_CFG0_ACTIVE_VDEVS 0x00
+
+/* HTT_DBG_EXT_STATS_PEER_INFO
+ * PARAMS:
+ * @config_param0:
+ * [Bit0] - [0] for sw_peer_id, [1] for mac_addr based request
+ * [Bit15 : Bit 1] htt_peer_stats_req_mode_t
+ * [Bit31 : Bit16] sw_peer_id
+ * @config_param1:
+ * peer_stats_req_type_mask:32 (enum htt_peer_stats_tlv_enum)
+ * 0 bit htt_peer_stats_cmn_tlv
+ * 1 bit htt_peer_details_tlv
+ * 2 bit htt_tx_peer_rate_stats_tlv
+ * 3 bit htt_rx_peer_rate_stats_tlv
+ * 4 bit htt_tx_tid_stats_tlv/htt_tx_tid_stats_v1_tlv
+ * 5 bit htt_rx_tid_stats_tlv
+ * 6 bit htt_msdu_flow_stats_tlv
+ * @config_param2: [Bit31 : Bit0] mac_addr31to0
+ * @config_param3: [Bit15 : Bit0] mac_addr47to32
+ * [Bit31 : Bit16] reserved
+ */
+#define HTT_STAT_PEER_INFO_MAC_ADDR BIT(0)
+#define HTT_STAT_DEFAULT_PEER_REQ_TYPE 0x7f
+
+/* Used to set different configs to the specified stats type.*/
+struct htt_ext_stats_cfg_params {
+ u32 cfg0;
+ u32 cfg1;
+ u32 cfg2;
+ u32 cfg3;
+};
+
+enum vdev_stats_offload_timer_duration {
+ ATH12K_STATS_TIMER_DUR_500MS = 1,
+ ATH12K_STATS_TIMER_DUR_1SEC = 2,
+ ATH12K_STATS_TIMER_DUR_2SEC = 3,
+};
+
+#define ATH12K_HTT_MAC_ADDR_L32_0 GENMASK(7, 0)
+#define ATH12K_HTT_MAC_ADDR_L32_1 GENMASK(15, 8)
+#define ATH12K_HTT_MAC_ADDR_L32_2 GENMASK(23, 16)
+#define ATH12K_HTT_MAC_ADDR_L32_3 GENMASK(31, 24)
+#define ATH12K_HTT_MAC_ADDR_H16_0 GENMASK(7, 0)
+#define ATH12K_HTT_MAC_ADDR_H16_1 GENMASK(15, 8)
+
+struct htt_mac_addr {
+ __le32 mac_addr_l32;
+ __le32 mac_addr_h16;
+} __packed;
+
+int ath12k_dp_htt_connect(struct ath12k_dp *dp);
+int ath12k_dp_tx_htt_srng_setup(struct ath12k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type);
+
+void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab,
+ struct sk_buff *skb);
+int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
+ int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
+ const void *ptr, void *data),
+ void *data);
+int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab);
+int ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k *ar, u32 mask);
+int
+ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k *ar, u8 type,
+ struct htt_ext_stats_cfg_params *cfg_params,
+ u64 cookie);
+int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset);
+
+int ath12k_dp_tx_htt_rx_filter_setup(struct ath12k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type,
+ int rx_buf_size,
+ struct htt_rx_ring_tlv_filter *tlv_filter);
+int ath12k_dp_tx_htt_tx_filter_setup(struct ath12k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type,
+ int tx_buf_size,
+ struct htt_tx_ring_tlv_filter *htt_tlv_filter);
+int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset);
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c
index 39d1967584db..737287a9aa46 100644
--- a/drivers/net/wireless/ath/ath12k/dp_mon.c
+++ b/drivers/net/wireless/ath/ath12k/dp_mon.c
@@ -6,1756 +6,11 @@
#include "dp_mon.h"
#include "debug.h"
-#include "dp_rx.h"
#include "dp_tx.h"
#include "peer.h"
-#define ATH12K_LE32_DEC_ENC(value, dec_bits, enc_bits) \
- u32_encode_bits(le32_get_bits(value, dec_bits), enc_bits)
-
-#define ATH12K_LE64_DEC_ENC(value, dec_bits, enc_bits) \
- u32_encode_bits(le64_get_bits(value, dec_bits), enc_bits)
-
-static void
-ath12k_dp_mon_rx_handle_ofdma_info(const struct hal_rx_ppdu_end_user_stats *ppdu_end_user,
- struct hal_rx_user_status *rx_user_status)
-{
- rx_user_status->ul_ofdma_user_v0_word0 =
- __le32_to_cpu(ppdu_end_user->usr_resp_ref);
- rx_user_status->ul_ofdma_user_v0_word1 =
- __le32_to_cpu(ppdu_end_user->usr_resp_ref_ext);
-}
-
-static void
-ath12k_dp_mon_rx_populate_byte_count(const struct hal_rx_ppdu_end_user_stats *stats,
- void *ppduinfo,
- struct hal_rx_user_status *rx_user_status)
-{
- rx_user_status->mpdu_ok_byte_count =
- le32_get_bits(stats->info7,
- HAL_RX_PPDU_END_USER_STATS_INFO7_MPDU_OK_BYTE_COUNT);
- rx_user_status->mpdu_err_byte_count =
- le32_get_bits(stats->info8,
- HAL_RX_PPDU_END_USER_STATS_INFO8_MPDU_ERR_BYTE_COUNT);
-}
-
-static void
-ath12k_dp_mon_rx_populate_mu_user_info(const struct hal_rx_ppdu_end_user_stats *rx_tlv,
- struct hal_rx_mon_ppdu_info *ppdu_info,
- struct hal_rx_user_status *rx_user_status)
-{
- rx_user_status->ast_index = ppdu_info->ast_index;
- rx_user_status->tid = ppdu_info->tid;
- rx_user_status->tcp_ack_msdu_count =
- ppdu_info->tcp_ack_msdu_count;
- rx_user_status->tcp_msdu_count =
- ppdu_info->tcp_msdu_count;
- rx_user_status->udp_msdu_count =
- ppdu_info->udp_msdu_count;
- rx_user_status->other_msdu_count =
- ppdu_info->other_msdu_count;
- rx_user_status->frame_control = ppdu_info->frame_control;
- rx_user_status->frame_control_info_valid =
- ppdu_info->frame_control_info_valid;
- rx_user_status->data_sequence_control_info_valid =
- ppdu_info->data_sequence_control_info_valid;
- rx_user_status->first_data_seq_ctrl =
- ppdu_info->first_data_seq_ctrl;
- rx_user_status->preamble_type = ppdu_info->preamble_type;
- rx_user_status->ht_flags = ppdu_info->ht_flags;
- rx_user_status->vht_flags = ppdu_info->vht_flags;
- rx_user_status->he_flags = ppdu_info->he_flags;
- rx_user_status->rs_flags = ppdu_info->rs_flags;
-
- rx_user_status->mpdu_cnt_fcs_ok =
- ppdu_info->num_mpdu_fcs_ok;
- rx_user_status->mpdu_cnt_fcs_err =
- ppdu_info->num_mpdu_fcs_err;
- memcpy(&rx_user_status->mpdu_fcs_ok_bitmap[0], &ppdu_info->mpdu_fcs_ok_bitmap[0],
- HAL_RX_NUM_WORDS_PER_PPDU_BITMAP *
- sizeof(ppdu_info->mpdu_fcs_ok_bitmap[0]));
-
- ath12k_dp_mon_rx_populate_byte_count(rx_tlv, ppdu_info, rx_user_status);
-}
-
-static void ath12k_dp_mon_parse_vht_sig_a(const struct hal_rx_vht_sig_a_info *vht_sig,
- struct hal_rx_mon_ppdu_info *ppdu_info)
-{
- u32 nsts, info0, info1;
- u8 gi_setting;
-
- info0 = __le32_to_cpu(vht_sig->info0);
- info1 = __le32_to_cpu(vht_sig->info1);
-
- ppdu_info->ldpc = u32_get_bits(info1, HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING);
- ppdu_info->mcs = u32_get_bits(info1, HAL_RX_VHT_SIG_A_INFO_INFO1_MCS);
- gi_setting = u32_get_bits(info1, HAL_RX_VHT_SIG_A_INFO_INFO1_GI_SETTING);
- switch (gi_setting) {
- case HAL_RX_VHT_SIG_A_NORMAL_GI:
- ppdu_info->gi = HAL_RX_GI_0_8_US;
- break;
- case HAL_RX_VHT_SIG_A_SHORT_GI:
- case HAL_RX_VHT_SIG_A_SHORT_GI_AMBIGUITY:
- ppdu_info->gi = HAL_RX_GI_0_4_US;
- break;
- }
-
- ppdu_info->is_stbc = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_STBC);
- nsts = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_NSTS);
- if (ppdu_info->is_stbc && nsts > 0)
- nsts = ((nsts + 1) >> 1) - 1;
-
- ppdu_info->nss = u32_get_bits(nsts, VHT_SIG_SU_NSS_MASK) + 1;
- ppdu_info->bw = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_BW);
- ppdu_info->beamformed = u32_get_bits(info1,
- HAL_RX_VHT_SIG_A_INFO_INFO1_BEAMFORMED);
- ppdu_info->vht_flag_values5 = u32_get_bits(info0,
- HAL_RX_VHT_SIG_A_INFO_INFO0_GROUP_ID);
- ppdu_info->vht_flag_values3[0] = (((ppdu_info->mcs) << 4) |
- ppdu_info->nss);
- ppdu_info->vht_flag_values2 = ppdu_info->bw;
- ppdu_info->vht_flag_values4 =
- u32_get_bits(info1, HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING);
-}
-
-static void ath12k_dp_mon_parse_ht_sig(const struct hal_rx_ht_sig_info *ht_sig,
- struct hal_rx_mon_ppdu_info *ppdu_info)
-{
- u32 info0 = __le32_to_cpu(ht_sig->info0);
- u32 info1 = __le32_to_cpu(ht_sig->info1);
-
- ppdu_info->mcs = u32_get_bits(info0, HAL_RX_HT_SIG_INFO_INFO0_MCS);
- ppdu_info->bw = u32_get_bits(info0, HAL_RX_HT_SIG_INFO_INFO0_BW);
- ppdu_info->is_stbc = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_STBC);
- ppdu_info->ldpc = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_FEC_CODING);
- ppdu_info->gi = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_GI);
- ppdu_info->nss = (ppdu_info->mcs >> 3) + 1;
-}
-
-static void ath12k_dp_mon_parse_l_sig_b(const struct hal_rx_lsig_b_info *lsigb,
- struct hal_rx_mon_ppdu_info *ppdu_info)
-{
- u32 info0 = __le32_to_cpu(lsigb->info0);
- u8 rate;
-
- rate = u32_get_bits(info0, HAL_RX_LSIG_B_INFO_INFO0_RATE);
- switch (rate) {
- case 1:
- rate = HAL_RX_LEGACY_RATE_1_MBPS;
- break;
- case 2:
- case 5:
- rate = HAL_RX_LEGACY_RATE_2_MBPS;
- break;
- case 3:
- case 6:
- rate = HAL_RX_LEGACY_RATE_5_5_MBPS;
- break;
- case 4:
- case 7:
- rate = HAL_RX_LEGACY_RATE_11_MBPS;
- break;
- default:
- rate = HAL_RX_LEGACY_RATE_INVALID;
- }
-
- ppdu_info->rate = rate;
- ppdu_info->cck_flag = 1;
-}
-
-static void ath12k_dp_mon_parse_l_sig_a(const struct hal_rx_lsig_a_info *lsiga,
- struct hal_rx_mon_ppdu_info *ppdu_info)
-{
- u32 info0 = __le32_to_cpu(lsiga->info0);
- u8 rate;
-
- rate = u32_get_bits(info0, HAL_RX_LSIG_A_INFO_INFO0_RATE);
- switch (rate) {
- case 8:
- rate = HAL_RX_LEGACY_RATE_48_MBPS;
- break;
- case 9:
- rate = HAL_RX_LEGACY_RATE_24_MBPS;
- break;
- case 10:
- rate = HAL_RX_LEGACY_RATE_12_MBPS;
- break;
- case 11:
- rate = HAL_RX_LEGACY_RATE_6_MBPS;
- break;
- case 12:
- rate = HAL_RX_LEGACY_RATE_54_MBPS;
- break;
- case 13:
- rate = HAL_RX_LEGACY_RATE_36_MBPS;
- break;
- case 14:
- rate = HAL_RX_LEGACY_RATE_18_MBPS;
- break;
- case 15:
- rate = HAL_RX_LEGACY_RATE_9_MBPS;
- break;
- default:
- rate = HAL_RX_LEGACY_RATE_INVALID;
- }
-
- ppdu_info->rate = rate;
-}
-
static void
-ath12k_dp_mon_parse_he_sig_b2_ofdma(const struct hal_rx_he_sig_b2_ofdma_info *ofdma,
- struct hal_rx_mon_ppdu_info *ppdu_info)
-{
- u32 info0, value;
-
- info0 = __le32_to_cpu(ofdma->info0);
-
- ppdu_info->he_data1 |= HE_MCS_KNOWN | HE_DCM_KNOWN | HE_CODING_KNOWN;
-
- /* HE-data2 */
- ppdu_info->he_data2 |= HE_TXBF_KNOWN;
-
- ppdu_info->mcs = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS);
- value = ppdu_info->mcs << HE_TRANSMIT_MCS_SHIFT;
- ppdu_info->he_data3 |= value;
-
- value = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_DCM);
- value = value << HE_DCM_SHIFT;
- ppdu_info->he_data3 |= value;
-
- value = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING);
- ppdu_info->ldpc = value;
- value = value << HE_CODING_SHIFT;
- ppdu_info->he_data3 |= value;
-
- /* HE-data4 */
- value = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_ID);
- value = value << HE_STA_ID_SHIFT;
- ppdu_info->he_data4 |= value;
-
- ppdu_info->nss =
- u32_get_bits(info0,
- HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS) + 1;
- ppdu_info->beamformed = u32_get_bits(info0,
- HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF);
-}
-
-static void
-ath12k_dp_mon_parse_he_sig_b2_mu(const struct hal_rx_he_sig_b2_mu_info *he_sig_b2_mu,
- struct hal_rx_mon_ppdu_info *ppdu_info)
-{
- u32 info0, value;
-
- info0 = __le32_to_cpu(he_sig_b2_mu->info0);
-
- ppdu_info->he_data1 |= HE_MCS_KNOWN | HE_CODING_KNOWN;
-
- ppdu_info->mcs = u32_get_bits(info0, HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS);
- value = ppdu_info->mcs << HE_TRANSMIT_MCS_SHIFT;
- ppdu_info->he_data3 |= value;
-
- value = u32_get_bits(info0, HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING);
- ppdu_info->ldpc = value;
- value = value << HE_CODING_SHIFT;
- ppdu_info->he_data3 |= value;
-
- value = u32_get_bits(info0, HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_ID);
- value = value << HE_STA_ID_SHIFT;
- ppdu_info->he_data4 |= value;
-
- ppdu_info->nss =
- u32_get_bits(info0,
- HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS) + 1;
-}
-
-static void
-ath12k_dp_mon_parse_he_sig_b1_mu(const struct hal_rx_he_sig_b1_mu_info *he_sig_b1_mu,
- struct hal_rx_mon_ppdu_info *ppdu_info)
-{
- u32 info0 = __le32_to_cpu(he_sig_b1_mu->info0);
- u16 ru_tones;
-
- ru_tones = u32_get_bits(info0,
- HAL_RX_HE_SIG_B1_MU_INFO_INFO0_RU_ALLOCATION);
- ppdu_info->ru_alloc = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones);
- ppdu_info->he_RU[0] = ru_tones;
-}
-
-static void
-ath12k_dp_mon_parse_he_sig_mu(const struct hal_rx_he_sig_a_mu_dl_info *he_sig_a_mu_dl,
- struct hal_rx_mon_ppdu_info *ppdu_info)
-{
- u32 info0, info1, value;
- u16 he_gi = 0, he_ltf = 0;
-
- info0 = __le32_to_cpu(he_sig_a_mu_dl->info0);
- info1 = __le32_to_cpu(he_sig_a_mu_dl->info1);
-
- ppdu_info->he_mu_flags = 1;
-
- ppdu_info->he_data1 = HE_MU_FORMAT_TYPE;
- ppdu_info->he_data1 |=
- HE_BSS_COLOR_KNOWN |
- HE_DL_UL_KNOWN |
- HE_LDPC_EXTRA_SYMBOL_KNOWN |
- HE_STBC_KNOWN |
- HE_DATA_BW_RU_KNOWN |
- HE_DOPPLER_KNOWN;
-
- ppdu_info->he_data2 =
- HE_GI_KNOWN |
- HE_LTF_SYMBOLS_KNOWN |
- HE_PRE_FEC_PADDING_KNOWN |
- HE_PE_DISAMBIGUITY_KNOWN |
- HE_TXOP_KNOWN |
- HE_MIDABLE_PERIODICITY_KNOWN;
-
- /* data3 */
- ppdu_info->he_data3 = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_BSS_COLOR);
- value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_UL_FLAG);
- value = value << HE_DL_UL_SHIFT;
- ppdu_info->he_data3 |= value;
-
- value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_LDPC_EXTRA);
- value = value << HE_LDPC_EXTRA_SYMBOL_SHIFT;
- ppdu_info->he_data3 |= value;
-
- value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_STBC);
- value = value << HE_STBC_SHIFT;
- ppdu_info->he_data3 |= value;
-
- /* data4 */
- ppdu_info->he_data4 = u32_get_bits(info0,
- HAL_RX_HE_SIG_A_MU_DL_INFO0_SPATIAL_REUSE);
- ppdu_info->he_data4 = value;
-
- /* data5 */
- value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_TRANSMIT_BW);
- ppdu_info->he_data5 = value;
- ppdu_info->bw = value;
-
- value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_CP_LTF_SIZE);
- switch (value) {
- case 0:
- he_gi = HE_GI_0_8;
- he_ltf = HE_LTF_4_X;
- break;
- case 1:
- he_gi = HE_GI_0_8;
- he_ltf = HE_LTF_2_X;
- break;
- case 2:
- he_gi = HE_GI_1_6;
- he_ltf = HE_LTF_2_X;
- break;
- case 3:
- he_gi = HE_GI_3_2;
- he_ltf = HE_LTF_4_X;
- break;
- }
-
- ppdu_info->gi = he_gi;
- value = he_gi << HE_GI_SHIFT;
- ppdu_info->he_data5 |= value;
-
- value = he_ltf << HE_LTF_SIZE_SHIFT;
- ppdu_info->he_data5 |= value;
-
- value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_NUM_LTF_SYMB);
- value = (value << HE_LTF_SYM_SHIFT);
- ppdu_info->he_data5 |= value;
-
- value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_PKT_EXT_FACTOR);
- value = value << HE_PRE_FEC_PAD_SHIFT;
- ppdu_info->he_data5 |= value;
-
- value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_PKT_EXT_PE_DISAM);
- value = value << HE_PE_DISAMBIGUITY_SHIFT;
- ppdu_info->he_data5 |= value;
-
- /*data6*/
- value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_DOPPLER_INDICATION);
- value = value << HE_DOPPLER_SHIFT;
- ppdu_info->he_data6 |= value;
-
- value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_TXOP_DURATION);
- value = value << HE_TXOP_SHIFT;
- ppdu_info->he_data6 |= value;
-
- /* HE-MU Flags */
- /* HE-MU-flags1 */
- ppdu_info->he_flags1 =
- HE_SIG_B_MCS_KNOWN |
- HE_SIG_B_DCM_KNOWN |
- HE_SIG_B_COMPRESSION_FLAG_1_KNOWN |
- HE_SIG_B_SYM_NUM_KNOWN |
- HE_RU_0_KNOWN;
-
- value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_MCS_OF_SIGB);
- ppdu_info->he_flags1 |= value;
- value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_DCM_OF_SIGB);
- value = value << HE_DCM_FLAG_1_SHIFT;
- ppdu_info->he_flags1 |= value;
-
- /* HE-MU-flags2 */
- ppdu_info->he_flags2 = HE_BW_KNOWN;
-
- value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_TRANSMIT_BW);
- ppdu_info->he_flags2 |= value;
- value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_COMP_MODE_SIGB);
- value = value << HE_SIG_B_COMPRESSION_FLAG_2_SHIFT;
- ppdu_info->he_flags2 |= value;
- value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_NUM_SIGB_SYMB);
- value = value - 1;
- value = value << HE_NUM_SIG_B_SYMBOLS_SHIFT;
- ppdu_info->he_flags2 |= value;
-
- ppdu_info->is_stbc = info1 &
- HAL_RX_HE_SIG_A_MU_DL_INFO1_STBC;
-}
-
-static void ath12k_dp_mon_parse_he_sig_su(const struct hal_rx_he_sig_a_su_info *he_sig_a,
- struct hal_rx_mon_ppdu_info *ppdu_info)
-{
- u32 info0, info1, value;
- u32 dcm;
- u8 he_dcm = 0, he_stbc = 0;
- u16 he_gi = 0, he_ltf = 0;
-
- ppdu_info->he_flags = 1;
-
- info0 = __le32_to_cpu(he_sig_a->info0);
- info1 = __le32_to_cpu(he_sig_a->info1);
-
- value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_FORMAT_IND);
- if (value == 0)
- ppdu_info->he_data1 = HE_TRIG_FORMAT_TYPE;
- else
- ppdu_info->he_data1 = HE_SU_FORMAT_TYPE;
-
- ppdu_info->he_data1 |=
- HE_BSS_COLOR_KNOWN |
- HE_BEAM_CHANGE_KNOWN |
- HE_DL_UL_KNOWN |
- HE_MCS_KNOWN |
- HE_DCM_KNOWN |
- HE_CODING_KNOWN |
- HE_LDPC_EXTRA_SYMBOL_KNOWN |
- HE_STBC_KNOWN |
- HE_DATA_BW_RU_KNOWN |
- HE_DOPPLER_KNOWN;
-
- ppdu_info->he_data2 |=
- HE_GI_KNOWN |
- HE_TXBF_KNOWN |
- HE_PE_DISAMBIGUITY_KNOWN |
- HE_TXOP_KNOWN |
- HE_LTF_SYMBOLS_KNOWN |
- HE_PRE_FEC_PADDING_KNOWN |
- HE_MIDABLE_PERIODICITY_KNOWN;
-
- ppdu_info->he_data3 = u32_get_bits(info0,
- HAL_RX_HE_SIG_A_SU_INFO_INFO0_BSS_COLOR);
- value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_BEAM_CHANGE);
- value = value << HE_BEAM_CHANGE_SHIFT;
- ppdu_info->he_data3 |= value;
- value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_DL_UL_FLAG);
- value = value << HE_DL_UL_SHIFT;
- ppdu_info->he_data3 |= value;
-
- value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS);
- ppdu_info->mcs = value;
- value = value << HE_TRANSMIT_MCS_SHIFT;
- ppdu_info->he_data3 |= value;
-
- value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM);
- he_dcm = value;
- value = value << HE_DCM_SHIFT;
- ppdu_info->he_data3 |= value;
- value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING);
- value = value << HE_CODING_SHIFT;
- ppdu_info->he_data3 |= value;
- value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_LDPC_EXTRA);
- value = value << HE_LDPC_EXTRA_SYMBOL_SHIFT;
- ppdu_info->he_data3 |= value;
- value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC);
- he_stbc = value;
- value = value << HE_STBC_SHIFT;
- ppdu_info->he_data3 |= value;
-
- /* data4 */
- ppdu_info->he_data4 = u32_get_bits(info0,
- HAL_RX_HE_SIG_A_SU_INFO_INFO0_SPATIAL_REUSE);
-
- /* data5 */
- value = u32_get_bits(info0,
- HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW);
- ppdu_info->he_data5 = value;
- ppdu_info->bw = value;
- value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE);
- switch (value) {
- case 0:
- he_gi = HE_GI_0_8;
- he_ltf = HE_LTF_1_X;
- break;
- case 1:
- he_gi = HE_GI_0_8;
- he_ltf = HE_LTF_2_X;
- break;
- case 2:
- he_gi = HE_GI_1_6;
- he_ltf = HE_LTF_2_X;
- break;
- case 3:
- if (he_dcm && he_stbc) {
- he_gi = HE_GI_0_8;
- he_ltf = HE_LTF_4_X;
- } else {
- he_gi = HE_GI_3_2;
- he_ltf = HE_LTF_4_X;
- }
- break;
- }
- ppdu_info->gi = he_gi;
- value = he_gi << HE_GI_SHIFT;
- ppdu_info->he_data5 |= value;
- value = he_ltf << HE_LTF_SIZE_SHIFT;
- ppdu_info->ltf_size = he_ltf;
- ppdu_info->he_data5 |= value;
-
- value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS);
- value = (value << HE_LTF_SYM_SHIFT);
- ppdu_info->he_data5 |= value;
-
- value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_FACTOR);
- value = value << HE_PRE_FEC_PAD_SHIFT;
- ppdu_info->he_data5 |= value;
-
- value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF);
- value = value << HE_TXBF_SHIFT;
- ppdu_info->he_data5 |= value;
- value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_PE_DISAM);
- value = value << HE_PE_DISAMBIGUITY_SHIFT;
- ppdu_info->he_data5 |= value;
-
- /* data6 */
- value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS);
- value++;
- ppdu_info->he_data6 = value;
- value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_DOPPLER_IND);
- value = value << HE_DOPPLER_SHIFT;
- ppdu_info->he_data6 |= value;
- value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXOP_DURATION);
- value = value << HE_TXOP_SHIFT;
- ppdu_info->he_data6 |= value;
-
- ppdu_info->mcs =
- u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS);
- ppdu_info->bw =
- u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW);
- ppdu_info->ldpc = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING);
- ppdu_info->is_stbc = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC);
- ppdu_info->beamformed = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF);
- dcm = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM);
- ppdu_info->nss = u32_get_bits(info0,
- HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS) + 1;
- ppdu_info->dcm = dcm;
-}
-
-static void
-ath12k_dp_mon_hal_rx_parse_u_sig_cmn(const struct hal_mon_usig_cmn *cmn,
- struct hal_rx_mon_ppdu_info *ppdu_info)
-{
- u32 common;
-
- ppdu_info->u_sig_info.bw = le32_get_bits(cmn->info0,
- HAL_RX_USIG_CMN_INFO0_BW);
- ppdu_info->u_sig_info.ul_dl = le32_get_bits(cmn->info0,
- HAL_RX_USIG_CMN_INFO0_UL_DL);
-
- common = __le32_to_cpu(ppdu_info->u_sig_info.usig.common);
- common |= IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER_KNOWN |
- IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_KNOWN |
- IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL_KNOWN |
- IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR_KNOWN |
- IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP_KNOWN |
- ATH12K_LE32_DEC_ENC(cmn->info0,
- HAL_RX_USIG_CMN_INFO0_PHY_VERSION,
- IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER) |
- u32_encode_bits(ppdu_info->u_sig_info.bw,
- IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW) |
- u32_encode_bits(ppdu_info->u_sig_info.ul_dl,
- IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL) |
- ATH12K_LE32_DEC_ENC(cmn->info0,
- HAL_RX_USIG_CMN_INFO0_BSS_COLOR,
- IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR) |
- ATH12K_LE32_DEC_ENC(cmn->info0,
- HAL_RX_USIG_CMN_INFO0_TXOP,
- IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP);
- ppdu_info->u_sig_info.usig.common = cpu_to_le32(common);
-
- switch (ppdu_info->u_sig_info.bw) {
- default:
- fallthrough;
- case HAL_EHT_BW_20:
- ppdu_info->bw = HAL_RX_BW_20MHZ;
- break;
- case HAL_EHT_BW_40:
- ppdu_info->bw = HAL_RX_BW_40MHZ;
- break;
- case HAL_EHT_BW_80:
- ppdu_info->bw = HAL_RX_BW_80MHZ;
- break;
- case HAL_EHT_BW_160:
- ppdu_info->bw = HAL_RX_BW_160MHZ;
- break;
- case HAL_EHT_BW_320_1:
- case HAL_EHT_BW_320_2:
- ppdu_info->bw = HAL_RX_BW_320MHZ;
- break;
- }
-}
-
-static void
-ath12k_dp_mon_hal_rx_parse_u_sig_tb(const struct hal_mon_usig_tb *usig_tb,
- struct hal_rx_mon_ppdu_info *ppdu_info)
-{
- struct ieee80211_radiotap_eht_usig *usig = &ppdu_info->u_sig_info.usig;
- enum ieee80211_radiotap_eht_usig_tb spatial_reuse1, spatial_reuse2;
- u32 common, value, mask;
-
- spatial_reuse1 = IEEE80211_RADIOTAP_EHT_USIG2_TB_B3_B6_SPATIAL_REUSE_1;
- spatial_reuse2 = IEEE80211_RADIOTAP_EHT_USIG2_TB_B7_B10_SPATIAL_REUSE_2;
-
- common = __le32_to_cpu(usig->common);
- value = __le32_to_cpu(usig->value);
- mask = __le32_to_cpu(usig->mask);
-
- ppdu_info->u_sig_info.ppdu_type_comp_mode =
- le32_get_bits(usig_tb->info0,
- HAL_RX_USIG_TB_INFO0_PPDU_TYPE_COMP_MODE);
-
- common |= ATH12K_LE32_DEC_ENC(usig_tb->info0,
- HAL_RX_USIG_TB_INFO0_RX_INTEG_CHECK_PASS,
- IEEE80211_RADIOTAP_EHT_USIG_COMMON_BAD_USIG_CRC);
-
- value |= IEEE80211_RADIOTAP_EHT_USIG1_TB_B20_B25_DISREGARD |
- u32_encode_bits(ppdu_info->u_sig_info.ppdu_type_comp_mode,
- IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE) |
- IEEE80211_RADIOTAP_EHT_USIG2_TB_B2_VALIDATE |
- ATH12K_LE32_DEC_ENC(usig_tb->info0,
- HAL_RX_USIG_TB_INFO0_SPATIAL_REUSE_1,
- spatial_reuse1) |
- ATH12K_LE32_DEC_ENC(usig_tb->info0,
- HAL_RX_USIG_TB_INFO0_SPATIAL_REUSE_2,
- spatial_reuse2) |
- IEEE80211_RADIOTAP_EHT_USIG2_TB_B11_B15_DISREGARD |
- ATH12K_LE32_DEC_ENC(usig_tb->info0,
- HAL_RX_USIG_TB_INFO0_CRC,
- IEEE80211_RADIOTAP_EHT_USIG2_TB_B16_B19_CRC) |
- ATH12K_LE32_DEC_ENC(usig_tb->info0,
- HAL_RX_USIG_TB_INFO0_TAIL,
- IEEE80211_RADIOTAP_EHT_USIG2_TB_B20_B25_TAIL);
-
- mask |= IEEE80211_RADIOTAP_EHT_USIG1_TB_B20_B25_DISREGARD |
- IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE |
- IEEE80211_RADIOTAP_EHT_USIG2_TB_B2_VALIDATE |
- spatial_reuse1 | spatial_reuse2 |
- IEEE80211_RADIOTAP_EHT_USIG2_TB_B11_B15_DISREGARD |
- IEEE80211_RADIOTAP_EHT_USIG2_TB_B16_B19_CRC |
- IEEE80211_RADIOTAP_EHT_USIG2_TB_B20_B25_TAIL;
-
- usig->common = cpu_to_le32(common);
- usig->value = cpu_to_le32(value);
- usig->mask = cpu_to_le32(mask);
-}
-
-static void
-ath12k_dp_mon_hal_rx_parse_u_sig_mu(const struct hal_mon_usig_mu *usig_mu,
- struct hal_rx_mon_ppdu_info *ppdu_info)
-{
- struct ieee80211_radiotap_eht_usig *usig = &ppdu_info->u_sig_info.usig;
- enum ieee80211_radiotap_eht_usig_mu sig_symb, punc;
- u32 common, value, mask;
-
- sig_symb = IEEE80211_RADIOTAP_EHT_USIG2_MU_B11_B15_EHT_SIG_SYMBOLS;
- punc = IEEE80211_RADIOTAP_EHT_USIG2_MU_B3_B7_PUNCTURED_INFO;
-
- common = __le32_to_cpu(usig->common);
- value = __le32_to_cpu(usig->value);
- mask = __le32_to_cpu(usig->mask);
-
- ppdu_info->u_sig_info.ppdu_type_comp_mode =
- le32_get_bits(usig_mu->info0,
- HAL_RX_USIG_MU_INFO0_PPDU_TYPE_COMP_MODE);
- ppdu_info->u_sig_info.eht_sig_mcs =
- le32_get_bits(usig_mu->info0,
- HAL_RX_USIG_MU_INFO0_EHT_SIG_MCS);
- ppdu_info->u_sig_info.num_eht_sig_sym =
- le32_get_bits(usig_mu->info0,
- HAL_RX_USIG_MU_INFO0_NUM_EHT_SIG_SYM);
-
- common |= ATH12K_LE32_DEC_ENC(usig_mu->info0,
- HAL_RX_USIG_MU_INFO0_RX_INTEG_CHECK_PASS,
- IEEE80211_RADIOTAP_EHT_USIG_COMMON_BAD_USIG_CRC);
-
- value |= IEEE80211_RADIOTAP_EHT_USIG1_MU_B20_B24_DISREGARD |
- IEEE80211_RADIOTAP_EHT_USIG1_MU_B25_VALIDATE |
- u32_encode_bits(ppdu_info->u_sig_info.ppdu_type_comp_mode,
- IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE) |
- IEEE80211_RADIOTAP_EHT_USIG2_MU_B2_VALIDATE |
- ATH12K_LE32_DEC_ENC(usig_mu->info0,
- HAL_RX_USIG_MU_INFO0_PUNC_CH_INFO,
- punc) |
- IEEE80211_RADIOTAP_EHT_USIG2_MU_B8_VALIDATE |
- u32_encode_bits(ppdu_info->u_sig_info.eht_sig_mcs,
- IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS) |
- u32_encode_bits(ppdu_info->u_sig_info.num_eht_sig_sym,
- sig_symb) |
- ATH12K_LE32_DEC_ENC(usig_mu->info0,
- HAL_RX_USIG_MU_INFO0_CRC,
- IEEE80211_RADIOTAP_EHT_USIG2_MU_B16_B19_CRC) |
- ATH12K_LE32_DEC_ENC(usig_mu->info0,
- HAL_RX_USIG_MU_INFO0_TAIL,
- IEEE80211_RADIOTAP_EHT_USIG2_MU_B20_B25_TAIL);
-
- mask |= IEEE80211_RADIOTAP_EHT_USIG1_MU_B20_B24_DISREGARD |
- IEEE80211_RADIOTAP_EHT_USIG1_MU_B25_VALIDATE |
- IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE |
- IEEE80211_RADIOTAP_EHT_USIG2_MU_B2_VALIDATE |
- punc |
- IEEE80211_RADIOTAP_EHT_USIG2_MU_B8_VALIDATE |
- IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS |
- sig_symb |
- IEEE80211_RADIOTAP_EHT_USIG2_MU_B16_B19_CRC |
- IEEE80211_RADIOTAP_EHT_USIG2_MU_B20_B25_TAIL;
-
- usig->common = cpu_to_le32(common);
- usig->value = cpu_to_le32(value);
- usig->mask = cpu_to_le32(mask);
-}
-
-static void
-ath12k_dp_mon_hal_rx_parse_u_sig_hdr(const struct hal_mon_usig_hdr *usig,
- struct hal_rx_mon_ppdu_info *ppdu_info)
-{
- u8 comp_mode;
-
- ppdu_info->eht_usig = true;
-
- ath12k_dp_mon_hal_rx_parse_u_sig_cmn(&usig->cmn, ppdu_info);
-
- comp_mode = le32_get_bits(usig->non_cmn.mu.info0,
- HAL_RX_USIG_MU_INFO0_PPDU_TYPE_COMP_MODE);
-
- if (comp_mode == 0 && ppdu_info->u_sig_info.ul_dl)
- ath12k_dp_mon_hal_rx_parse_u_sig_tb(&usig->non_cmn.tb, ppdu_info);
- else
- ath12k_dp_mon_hal_rx_parse_u_sig_mu(&usig->non_cmn.mu, ppdu_info);
-}
-
-static void
-ath12k_dp_mon_hal_aggr_tlv(struct hal_rx_mon_ppdu_info *ppdu_info,
- u16 tlv_len, const void *tlv_data)
-{
- if (tlv_len <= HAL_RX_MON_MAX_AGGR_SIZE - ppdu_info->tlv_aggr.cur_len) {
- memcpy(ppdu_info->tlv_aggr.buf + ppdu_info->tlv_aggr.cur_len,
- tlv_data, tlv_len);
- ppdu_info->tlv_aggr.cur_len += tlv_len;
- }
-}
-
-static inline bool
-ath12k_dp_mon_hal_rx_is_frame_type_ndp(const struct hal_rx_u_sig_info *usig_info)
-{
- if (usig_info->ppdu_type_comp_mode == 1 &&
- usig_info->eht_sig_mcs == 0 &&
- usig_info->num_eht_sig_sym == 0)
- return true;
-
- return false;
-}
-
-static inline bool
-ath12k_dp_mon_hal_rx_is_non_ofdma(const struct hal_rx_u_sig_info *usig_info)
-{
- u32 ppdu_type_comp_mode = usig_info->ppdu_type_comp_mode;
- u32 ul_dl = usig_info->ul_dl;
-
- if ((ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_MU_MIMO && ul_dl == 0) ||
- (ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_MU_OFDMA && ul_dl == 0) ||
- (ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_MU_MIMO && ul_dl == 1))
- return true;
-
- return false;
-}
-
-static inline bool
-ath12k_dp_mon_hal_rx_is_ofdma(const struct hal_rx_u_sig_info *usig_info)
-{
- if (usig_info->ppdu_type_comp_mode == 0 && usig_info->ul_dl == 0)
- return true;
-
- return false;
-}
-
-static void
-ath12k_dp_mon_hal_rx_parse_eht_sig_ndp(const struct hal_eht_sig_ndp_cmn_eb *eht_sig_ndp,
- struct hal_rx_mon_ppdu_info *ppdu_info)
-{
- struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht;
- u32 known, data;
-
- known = __le32_to_cpu(eht->known);
- known |= IEEE80211_RADIOTAP_EHT_KNOWN_SPATIAL_REUSE |
- IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF |
- IEEE80211_RADIOTAP_EHT_KNOWN_NSS_S |
- IEEE80211_RADIOTAP_EHT_KNOWN_BEAMFORMED_S |
- IEEE80211_RADIOTAP_EHT_KNOWN_DISREGARD_S |
- IEEE80211_RADIOTAP_EHT_KNOWN_CRC1 |
- IEEE80211_RADIOTAP_EHT_KNOWN_TAIL1;
- eht->known = cpu_to_le32(known);
-
- data = __le32_to_cpu(eht->data[0]);
- data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0,
- HAL_RX_EHT_SIG_NDP_CMN_INFO0_SPATIAL_REUSE,
- IEEE80211_RADIOTAP_EHT_DATA0_SPATIAL_REUSE);
- /* GI and LTF size are separately indicated in radiotap header
- * and hence will be parsed from other TLV
- */
- data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0,
- HAL_RX_EHT_SIG_NDP_CMN_INFO0_NUM_LTF_SYM,
- IEEE80211_RADIOTAP_EHT_DATA0_EHT_LTF);
-
- data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0,
- HAL_RX_EHT_SIG_NDP_CMN_INFO0_CRC,
- IEEE80211_RADIOTAP_EHT_DATA0_CRC1_O);
-
- data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0,
- HAL_RX_EHT_SIG_NDP_CMN_INFO0_DISREGARD,
- IEEE80211_RADIOTAP_EHT_DATA0_DISREGARD_S);
- eht->data[0] = cpu_to_le32(data);
-
- data = __le32_to_cpu(eht->data[7]);
- data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0,
- HAL_RX_EHT_SIG_NDP_CMN_INFO0_NSS,
- IEEE80211_RADIOTAP_EHT_DATA7_NSS_S);
-
- data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0,
- HAL_RX_EHT_SIG_NDP_CMN_INFO0_BEAMFORMED,
- IEEE80211_RADIOTAP_EHT_DATA7_BEAMFORMED_S);
- eht->data[7] = cpu_to_le32(data);
-}
-
-static void
-ath12k_dp_mon_hal_rx_parse_usig_overflow(const struct hal_eht_sig_usig_overflow *ovflow,
- struct hal_rx_mon_ppdu_info *ppdu_info)
-{
- struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht;
- u32 known, data;
-
- known = __le32_to_cpu(eht->known);
- known |= IEEE80211_RADIOTAP_EHT_KNOWN_SPATIAL_REUSE |
- IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF |
- IEEE80211_RADIOTAP_EHT_KNOWN_LDPC_EXTRA_SYM_OM |
- IEEE80211_RADIOTAP_EHT_KNOWN_PRE_PADD_FACOR_OM |
- IEEE80211_RADIOTAP_EHT_KNOWN_PE_DISAMBIGUITY_OM |
- IEEE80211_RADIOTAP_EHT_KNOWN_DISREGARD_O;
- eht->known = cpu_to_le32(known);
-
- data = __le32_to_cpu(eht->data[0]);
- data |= ATH12K_LE32_DEC_ENC(ovflow->info0,
- HAL_RX_EHT_SIG_OVERFLOW_INFO0_SPATIAL_REUSE,
- IEEE80211_RADIOTAP_EHT_DATA0_SPATIAL_REUSE);
-
- /* GI and LTF size are separately indicated in radiotap header
- * and hence will be parsed from other TLV
- */
- data |= ATH12K_LE32_DEC_ENC(ovflow->info0,
- HAL_RX_EHT_SIG_OVERFLOW_INFO0_NUM_LTF_SYM,
- IEEE80211_RADIOTAP_EHT_DATA0_EHT_LTF);
-
- data |= ATH12K_LE32_DEC_ENC(ovflow->info0,
- HAL_RX_EHT_SIG_OVERFLOW_INFO0_LDPC_EXTA_SYM,
- IEEE80211_RADIOTAP_EHT_DATA0_LDPC_EXTRA_SYM_OM);
-
- data |= ATH12K_LE32_DEC_ENC(ovflow->info0,
- HAL_RX_EHT_SIG_OVERFLOW_INFO0_PRE_FEC_PAD_FACTOR,
- IEEE80211_RADIOTAP_EHT_DATA0_PRE_PADD_FACOR_OM);
-
- data |= ATH12K_LE32_DEC_ENC(ovflow->info0,
- HAL_RX_EHT_SIG_OVERFLOW_INFO0_DISAMBIGUITY,
- IEEE80211_RADIOTAP_EHT_DATA0_PE_DISAMBIGUITY_OM);
-
- data |= ATH12K_LE32_DEC_ENC(ovflow->info0,
- HAL_RX_EHT_SIG_OVERFLOW_INFO0_DISREGARD,
- IEEE80211_RADIOTAP_EHT_DATA0_DISREGARD_O);
- eht->data[0] = cpu_to_le32(data);
-}
-
-static void
-ath12k_dp_mon_hal_rx_parse_non_ofdma_users(const struct hal_eht_sig_non_ofdma_cmn_eb *eb,
- struct hal_rx_mon_ppdu_info *ppdu_info)
-{
- struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht;
- u32 known, data;
-
- known = __le32_to_cpu(eht->known);
- known |= IEEE80211_RADIOTAP_EHT_KNOWN_NR_NON_OFDMA_USERS_M;
- eht->known = cpu_to_le32(known);
-
- data = __le32_to_cpu(eht->data[7]);
- data |= ATH12K_LE32_DEC_ENC(eb->info0,
- HAL_RX_EHT_SIG_NON_OFDMA_INFO0_NUM_USERS,
- IEEE80211_RADIOTAP_EHT_DATA7_NUM_OF_NON_OFDMA_USERS);
- eht->data[7] = cpu_to_le32(data);
-}
-
-static void
-ath12k_dp_mon_hal_rx_parse_eht_mumimo_user(const struct hal_eht_sig_mu_mimo *user,
- struct hal_rx_mon_ppdu_info *ppdu_info)
-{
- struct hal_rx_eht_info *eht_info = &ppdu_info->eht_info;
- u32 user_idx;
-
- if (eht_info->num_user_info >= ARRAY_SIZE(eht_info->user_info))
- return;
-
- user_idx = eht_info->num_user_info++;
-
- eht_info->user_info[user_idx] |=
- IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID_KNOWN |
- IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN |
- IEEE80211_RADIOTAP_EHT_USER_INFO_CODING_KNOWN |
- IEEE80211_RADIOTAP_EHT_USER_INFO_SPATIAL_CONFIG_KNOWN_M |
- ATH12K_LE32_DEC_ENC(user->info0,
- HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_STA_ID,
- IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID) |
- ATH12K_LE32_DEC_ENC(user->info0,
- HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_CODING,
- IEEE80211_RADIOTAP_EHT_USER_INFO_CODING) |
- ATH12K_LE32_DEC_ENC(user->info0,
- HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_MCS,
- IEEE80211_RADIOTAP_EHT_USER_INFO_MCS) |
- ATH12K_LE32_DEC_ENC(user->info0,
- HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_SPATIAL_CODING,
- IEEE80211_RADIOTAP_EHT_USER_INFO_SPATIAL_CONFIG_M);
-
- ppdu_info->mcs = le32_get_bits(user->info0,
- HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_MCS);
-}
-
-static void
-ath12k_dp_mon_hal_rx_parse_eht_non_mumimo_user(const struct hal_eht_sig_non_mu_mimo *user,
- struct hal_rx_mon_ppdu_info *ppdu_info)
-{
- struct hal_rx_eht_info *eht_info = &ppdu_info->eht_info;
- u32 user_idx;
-
- if (eht_info->num_user_info >= ARRAY_SIZE(eht_info->user_info))
- return;
-
- user_idx = eht_info->num_user_info++;
-
- eht_info->user_info[user_idx] |=
- IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID_KNOWN |
- IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN |
- IEEE80211_RADIOTAP_EHT_USER_INFO_CODING_KNOWN |
- IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_KNOWN_O |
- IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_KNOWN_O |
- ATH12K_LE32_DEC_ENC(user->info0,
- HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_STA_ID,
- IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID) |
- ATH12K_LE32_DEC_ENC(user->info0,
- HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_CODING,
- IEEE80211_RADIOTAP_EHT_USER_INFO_CODING) |
- ATH12K_LE32_DEC_ENC(user->info0,
- HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_MCS,
- IEEE80211_RADIOTAP_EHT_USER_INFO_MCS) |
- ATH12K_LE32_DEC_ENC(user->info0,
- HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_NSS,
- IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O) |
- ATH12K_LE32_DEC_ENC(user->info0,
- HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_BEAMFORMED,
- IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_O);
-
- ppdu_info->mcs = le32_get_bits(user->info0,
- HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_MCS);
-
- ppdu_info->nss = le32_get_bits(user->info0,
- HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_NSS) + 1;
-}
-
-static inline bool
-ath12k_dp_mon_hal_rx_is_mu_mimo_user(const struct hal_rx_u_sig_info *usig_info)
-{
- if (usig_info->ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_SU &&
- usig_info->ul_dl == 1)
- return true;
-
- return false;
-}
-
-static void
-ath12k_dp_mon_hal_rx_parse_eht_sig_non_ofdma(const void *tlv,
- struct hal_rx_mon_ppdu_info *ppdu_info)
-{
- const struct hal_eht_sig_non_ofdma_cmn_eb *eb = tlv;
-
- ath12k_dp_mon_hal_rx_parse_usig_overflow(tlv, ppdu_info);
- ath12k_dp_mon_hal_rx_parse_non_ofdma_users(eb, ppdu_info);
-
- if (ath12k_dp_mon_hal_rx_is_mu_mimo_user(&ppdu_info->u_sig_info))
- ath12k_dp_mon_hal_rx_parse_eht_mumimo_user(&eb->user_field.mu_mimo,
- ppdu_info);
- else
- ath12k_dp_mon_hal_rx_parse_eht_non_mumimo_user(&eb->user_field.n_mu_mimo,
- ppdu_info);
-}
-
-static void
-ath12k_dp_mon_hal_rx_parse_ru_allocation(const struct hal_eht_sig_ofdma_cmn_eb *eb,
- struct hal_rx_mon_ppdu_info *ppdu_info)
-{
- const struct hal_eht_sig_ofdma_cmn_eb1 *ofdma_cmn_eb1 = &eb->eb1;
- const struct hal_eht_sig_ofdma_cmn_eb2 *ofdma_cmn_eb2 = &eb->eb2;
- struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht;
- enum ieee80211_radiotap_eht_data ru_123, ru_124, ru_125, ru_126;
- enum ieee80211_radiotap_eht_data ru_121, ru_122, ru_112, ru_111;
- u32 data;
-
- ru_123 = IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_1_2_3;
- ru_124 = IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_4;
- ru_125 = IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_5;
- ru_126 = IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_1_2_6;
- ru_121 = IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_1;
- ru_122 = IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_2;
- ru_112 = IEEE80211_RADIOTAP_EHT_DATA2_RU_ALLOC_CC_1_1_2;
- ru_111 = IEEE80211_RADIOTAP_EHT_DATA1_RU_ALLOC_CC_1_1_1;
-
- switch (ppdu_info->u_sig_info.bw) {
- case HAL_EHT_BW_320_2:
- case HAL_EHT_BW_320_1:
- data = __le32_to_cpu(eht->data[4]);
- /* CC1 2::3 */
- data |= IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_1_2_3_KNOWN |
- ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0,
- HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_3,
- ru_123);
- eht->data[4] = cpu_to_le32(data);
-
- data = __le32_to_cpu(eht->data[5]);
- /* CC1 2::4 */
- data |= IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_4_KNOWN |
- ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0,
- HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_4,
- ru_124);
-
- /* CC1 2::5 */
- data |= IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_5_KNOWN |
- ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0,
- HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_5,
- ru_125);
- eht->data[5] = cpu_to_le32(data);
-
- data = __le32_to_cpu(eht->data[6]);
- /* CC1 2::6 */
- data |= IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_1_2_6_KNOWN |
- ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0,
- HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_6,
- ru_126);
- eht->data[6] = cpu_to_le32(data);
-
- fallthrough;
- case HAL_EHT_BW_160:
- data = __le32_to_cpu(eht->data[3]);
- /* CC1 2::1 */
- data |= IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_1_KNOWN |
- ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0,
- HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_1,
- ru_121);
- /* CC1 2::2 */
- data |= IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_2_KNOWN |
- ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0,
- HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_2,
- ru_122);
- eht->data[3] = cpu_to_le32(data);
-
- fallthrough;
- case HAL_EHT_BW_80:
- data = __le32_to_cpu(eht->data[2]);
- /* CC1 1::2 */
- data |= IEEE80211_RADIOTAP_EHT_DATA2_RU_ALLOC_CC_1_1_2_KNOWN |
- ATH12K_LE64_DEC_ENC(ofdma_cmn_eb1->info0,
- HAL_RX_EHT_SIG_OFDMA_EB1_RU_ALLOC_1_2,
- ru_112);
- eht->data[2] = cpu_to_le32(data);
-
- fallthrough;
- case HAL_EHT_BW_40:
- fallthrough;
- case HAL_EHT_BW_20:
- data = __le32_to_cpu(eht->data[1]);
- /* CC1 1::1 */
- data |= IEEE80211_RADIOTAP_EHT_DATA1_RU_ALLOC_CC_1_1_1_KNOWN |
- ATH12K_LE64_DEC_ENC(ofdma_cmn_eb1->info0,
- HAL_RX_EHT_SIG_OFDMA_EB1_RU_ALLOC_1_1,
- ru_111);
- eht->data[1] = cpu_to_le32(data);
- break;
- default:
- break;
- }
-}
-
-static void
-ath12k_dp_mon_hal_rx_parse_eht_sig_ofdma(const void *tlv,
- struct hal_rx_mon_ppdu_info *ppdu_info)
-{
- const struct hal_eht_sig_ofdma_cmn_eb *ofdma = tlv;
-
- ath12k_dp_mon_hal_rx_parse_usig_overflow(tlv, ppdu_info);
- ath12k_dp_mon_hal_rx_parse_ru_allocation(ofdma, ppdu_info);
-
- ath12k_dp_mon_hal_rx_parse_eht_non_mumimo_user(&ofdma->user_field.n_mu_mimo,
- ppdu_info);
-}
-
-static void
-ath12k_dp_mon_parse_eht_sig_hdr(struct hal_rx_mon_ppdu_info *ppdu_info,
- const void *tlv_data)
-{
- ppdu_info->is_eht = true;
-
- if (ath12k_dp_mon_hal_rx_is_frame_type_ndp(&ppdu_info->u_sig_info))
- ath12k_dp_mon_hal_rx_parse_eht_sig_ndp(tlv_data, ppdu_info);
- else if (ath12k_dp_mon_hal_rx_is_non_ofdma(&ppdu_info->u_sig_info))
- ath12k_dp_mon_hal_rx_parse_eht_sig_non_ofdma(tlv_data, ppdu_info);
- else if (ath12k_dp_mon_hal_rx_is_ofdma(&ppdu_info->u_sig_info))
- ath12k_dp_mon_hal_rx_parse_eht_sig_ofdma(tlv_data, ppdu_info);
-}
-
-static inline enum ath12k_eht_ru_size
-hal_rx_mon_hal_ru_size_to_ath12k_ru_size(u32 hal_ru_size)
-{
- switch (hal_ru_size) {
- case HAL_EHT_RU_26:
- return ATH12K_EHT_RU_26;
- case HAL_EHT_RU_52:
- return ATH12K_EHT_RU_52;
- case HAL_EHT_RU_78:
- return ATH12K_EHT_RU_52_26;
- case HAL_EHT_RU_106:
- return ATH12K_EHT_RU_106;
- case HAL_EHT_RU_132:
- return ATH12K_EHT_RU_106_26;
- case HAL_EHT_RU_242:
- return ATH12K_EHT_RU_242;
- case HAL_EHT_RU_484:
- return ATH12K_EHT_RU_484;
- case HAL_EHT_RU_726:
- return ATH12K_EHT_RU_484_242;
- case HAL_EHT_RU_996:
- return ATH12K_EHT_RU_996;
- case HAL_EHT_RU_996x2:
- return ATH12K_EHT_RU_996x2;
- case HAL_EHT_RU_996x3:
- return ATH12K_EHT_RU_996x3;
- case HAL_EHT_RU_996x4:
- return ATH12K_EHT_RU_996x4;
- case HAL_EHT_RU_NONE:
- return ATH12K_EHT_RU_INVALID;
- case HAL_EHT_RU_996_484:
- return ATH12K_EHT_RU_996_484;
- case HAL_EHT_RU_996x2_484:
- return ATH12K_EHT_RU_996x2_484;
- case HAL_EHT_RU_996x3_484:
- return ATH12K_EHT_RU_996x3_484;
- case HAL_EHT_RU_996_484_242:
- return ATH12K_EHT_RU_996_484_242;
- default:
- return ATH12K_EHT_RU_INVALID;
- }
-}
-
-static inline u32
-hal_rx_ul_ofdma_ru_size_to_width(enum ath12k_eht_ru_size ru_size)
-{
- switch (ru_size) {
- case ATH12K_EHT_RU_26:
- return RU_26;
- case ATH12K_EHT_RU_52:
- return RU_52;
- case ATH12K_EHT_RU_52_26:
- return RU_52_26;
- case ATH12K_EHT_RU_106:
- return RU_106;
- case ATH12K_EHT_RU_106_26:
- return RU_106_26;
- case ATH12K_EHT_RU_242:
- return RU_242;
- case ATH12K_EHT_RU_484:
- return RU_484;
- case ATH12K_EHT_RU_484_242:
- return RU_484_242;
- case ATH12K_EHT_RU_996:
- return RU_996;
- case ATH12K_EHT_RU_996_484:
- return RU_996_484;
- case ATH12K_EHT_RU_996_484_242:
- return RU_996_484_242;
- case ATH12K_EHT_RU_996x2:
- return RU_2X996;
- case ATH12K_EHT_RU_996x2_484:
- return RU_2X996_484;
- case ATH12K_EHT_RU_996x3:
- return RU_3X996;
- case ATH12K_EHT_RU_996x3_484:
- return RU_3X996_484;
- case ATH12K_EHT_RU_996x4:
- return RU_4X996;
- default:
- return RU_INVALID;
- }
-}
-
-static void
-ath12k_dp_mon_hal_rx_parse_user_info(const struct hal_receive_user_info *rx_usr_info,
- u16 user_id,
- struct hal_rx_mon_ppdu_info *ppdu_info)
-{
- struct hal_rx_user_status *mon_rx_user_status = NULL;
- struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht;
- enum ath12k_eht_ru_size rtap_ru_size = ATH12K_EHT_RU_INVALID;
- u32 ru_width, reception_type, ru_index = HAL_EHT_RU_INVALID;
- u32 ru_type_80_0, ru_start_index_80_0;
- u32 ru_type_80_1, ru_start_index_80_1;
- u32 ru_type_80_2, ru_start_index_80_2;
- u32 ru_type_80_3, ru_start_index_80_3;
- u32 ru_size = 0, num_80mhz_with_ru = 0;
- u64 ru_index_320mhz = 0;
- u32 ru_index_per80mhz;
-
- reception_type = le32_get_bits(rx_usr_info->info0,
- HAL_RX_USR_INFO0_RECEPTION_TYPE);
-
- switch (reception_type) {
- case HAL_RECEPTION_TYPE_SU:
- ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
- break;
- case HAL_RECEPTION_TYPE_DL_MU_MIMO:
- case HAL_RECEPTION_TYPE_UL_MU_MIMO:
- ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
- break;
- case HAL_RECEPTION_TYPE_DL_MU_OFMA:
- case HAL_RECEPTION_TYPE_UL_MU_OFDMA:
- ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA;
- break;
- case HAL_RECEPTION_TYPE_DL_MU_OFDMA_MIMO:
- case HAL_RECEPTION_TYPE_UL_MU_OFDMA_MIMO:
- ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO;
- }
-
- ppdu_info->is_stbc = le32_get_bits(rx_usr_info->info0, HAL_RX_USR_INFO0_STBC);
- ppdu_info->ldpc = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_LDPC);
- ppdu_info->dcm = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_STA_DCM);
- ppdu_info->bw = le32_get_bits(rx_usr_info->info1, HAL_RX_USR_INFO1_RX_BW);
- ppdu_info->mcs = le32_get_bits(rx_usr_info->info1, HAL_RX_USR_INFO1_MCS);
- ppdu_info->nss = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_NSS) + 1;
-
- if (user_id < HAL_MAX_UL_MU_USERS) {
- mon_rx_user_status = &ppdu_info->userstats[user_id];
- mon_rx_user_status->mcs = ppdu_info->mcs;
- mon_rx_user_status->nss = ppdu_info->nss;
- }
-
- if (!(ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_MIMO ||
- ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA ||
- ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO))
- return;
-
- /* RU allocation present only for OFDMA reception */
- ru_type_80_0 = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_RU_TYPE_80_0);
- ru_start_index_80_0 = le32_get_bits(rx_usr_info->info3,
- HAL_RX_USR_INFO3_RU_START_IDX_80_0);
- if (ru_type_80_0 != HAL_EHT_RU_NONE) {
- ru_size += ru_type_80_0;
- ru_index_per80mhz = ru_start_index_80_0;
- ru_index = ru_index_per80mhz;
- ru_index_320mhz |= HAL_RU_PER80(ru_type_80_0, 0, ru_index_per80mhz);
- num_80mhz_with_ru++;
- }
-
- ru_type_80_1 = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_RU_TYPE_80_1);
- ru_start_index_80_1 = le32_get_bits(rx_usr_info->info3,
- HAL_RX_USR_INFO3_RU_START_IDX_80_1);
- if (ru_type_80_1 != HAL_EHT_RU_NONE) {
- ru_size += ru_type_80_1;
- ru_index_per80mhz = ru_start_index_80_1;
- ru_index = ru_index_per80mhz;
- ru_index_320mhz |= HAL_RU_PER80(ru_type_80_1, 1, ru_index_per80mhz);
- num_80mhz_with_ru++;
- }
-
- ru_type_80_2 = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_RU_TYPE_80_2);
- ru_start_index_80_2 = le32_get_bits(rx_usr_info->info3,
- HAL_RX_USR_INFO3_RU_START_IDX_80_2);
- if (ru_type_80_2 != HAL_EHT_RU_NONE) {
- ru_size += ru_type_80_2;
- ru_index_per80mhz = ru_start_index_80_2;
- ru_index = ru_index_per80mhz;
- ru_index_320mhz |= HAL_RU_PER80(ru_type_80_2, 2, ru_index_per80mhz);
- num_80mhz_with_ru++;
- }
-
- ru_type_80_3 = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_RU_TYPE_80_3);
- ru_start_index_80_3 = le32_get_bits(rx_usr_info->info2,
- HAL_RX_USR_INFO3_RU_START_IDX_80_3);
- if (ru_type_80_3 != HAL_EHT_RU_NONE) {
- ru_size += ru_type_80_3;
- ru_index_per80mhz = ru_start_index_80_3;
- ru_index = ru_index_per80mhz;
- ru_index_320mhz |= HAL_RU_PER80(ru_type_80_3, 3, ru_index_per80mhz);
- num_80mhz_with_ru++;
- }
-
- if (num_80mhz_with_ru > 1) {
- /* Calculate the MRU index */
- switch (ru_index_320mhz) {
- case HAL_EHT_RU_996_484_0:
- case HAL_EHT_RU_996x2_484_0:
- case HAL_EHT_RU_996x3_484_0:
- ru_index = 0;
- break;
- case HAL_EHT_RU_996_484_1:
- case HAL_EHT_RU_996x2_484_1:
- case HAL_EHT_RU_996x3_484_1:
- ru_index = 1;
- break;
- case HAL_EHT_RU_996_484_2:
- case HAL_EHT_RU_996x2_484_2:
- case HAL_EHT_RU_996x3_484_2:
- ru_index = 2;
- break;
- case HAL_EHT_RU_996_484_3:
- case HAL_EHT_RU_996x2_484_3:
- case HAL_EHT_RU_996x3_484_3:
- ru_index = 3;
- break;
- case HAL_EHT_RU_996_484_4:
- case HAL_EHT_RU_996x2_484_4:
- case HAL_EHT_RU_996x3_484_4:
- ru_index = 4;
- break;
- case HAL_EHT_RU_996_484_5:
- case HAL_EHT_RU_996x2_484_5:
- case HAL_EHT_RU_996x3_484_5:
- ru_index = 5;
- break;
- case HAL_EHT_RU_996_484_6:
- case HAL_EHT_RU_996x2_484_6:
- case HAL_EHT_RU_996x3_484_6:
- ru_index = 6;
- break;
- case HAL_EHT_RU_996_484_7:
- case HAL_EHT_RU_996x2_484_7:
- case HAL_EHT_RU_996x3_484_7:
- ru_index = 7;
- break;
- case HAL_EHT_RU_996x2_484_8:
- ru_index = 8;
- break;
- case HAL_EHT_RU_996x2_484_9:
- ru_index = 9;
- break;
- case HAL_EHT_RU_996x2_484_10:
- ru_index = 10;
- break;
- case HAL_EHT_RU_996x2_484_11:
- ru_index = 11;
- break;
- default:
- ru_index = HAL_EHT_RU_INVALID;
- break;
- }
-
- ru_size += 4;
- }
-
- rtap_ru_size = hal_rx_mon_hal_ru_size_to_ath12k_ru_size(ru_size);
- if (rtap_ru_size != ATH12K_EHT_RU_INVALID) {
- u32 known, data;
-
- known = __le32_to_cpu(eht->known);
- known |= IEEE80211_RADIOTAP_EHT_KNOWN_RU_MRU_SIZE_OM;
- eht->known = cpu_to_le32(known);
-
- data = __le32_to_cpu(eht->data[1]);
- data |= u32_encode_bits(rtap_ru_size,
- IEEE80211_RADIOTAP_EHT_DATA1_RU_SIZE);
- eht->data[1] = cpu_to_le32(data);
- }
-
- if (ru_index != HAL_EHT_RU_INVALID) {
- u32 known, data;
-
- known = __le32_to_cpu(eht->known);
- known |= IEEE80211_RADIOTAP_EHT_KNOWN_RU_MRU_INDEX_OM;
- eht->known = cpu_to_le32(known);
-
- data = __le32_to_cpu(eht->data[1]);
- data |= u32_encode_bits(rtap_ru_size,
- IEEE80211_RADIOTAP_EHT_DATA1_RU_INDEX);
- eht->data[1] = cpu_to_le32(data);
- }
-
- if (mon_rx_user_status && ru_index != HAL_EHT_RU_INVALID &&
- rtap_ru_size != ATH12K_EHT_RU_INVALID) {
- mon_rx_user_status->ul_ofdma_ru_start_index = ru_index;
- mon_rx_user_status->ul_ofdma_ru_size = rtap_ru_size;
-
- ru_width = hal_rx_ul_ofdma_ru_size_to_width(rtap_ru_size);
-
- mon_rx_user_status->ul_ofdma_ru_width = ru_width;
- mon_rx_user_status->ofdma_info_valid = 1;
- }
-}
-
-static void ath12k_dp_mon_parse_rx_msdu_end_err(u32 info, u32 *errmap)
-{
- if (info & RX_MSDU_END_INFO13_FCS_ERR)
- *errmap |= HAL_RX_MPDU_ERR_FCS;
-
- if (info & RX_MSDU_END_INFO13_DECRYPT_ERR)
- *errmap |= HAL_RX_MPDU_ERR_DECRYPT;
-
- if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR)
- *errmap |= HAL_RX_MPDU_ERR_TKIP_MIC;
-
- if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR)
- *errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR;
-
- if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR)
- *errmap |= HAL_RX_MPDU_ERR_OVERFLOW;
-
- if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR)
- *errmap |= HAL_RX_MPDU_ERR_MSDU_LEN;
-
- if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR)
- *errmap |= HAL_RX_MPDU_ERR_MPDU_LEN;
-}
-
-static void
-ath12k_parse_cmn_usr_info(const struct hal_phyrx_common_user_info *cmn_usr_info,
- struct hal_rx_mon_ppdu_info *ppdu_info)
-{
- struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht;
- u32 known, data, cp_setting, ltf_size;
-
- known = __le32_to_cpu(eht->known);
- known |= IEEE80211_RADIOTAP_EHT_KNOWN_GI |
- IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF;
- eht->known = cpu_to_le32(known);
-
- cp_setting = le32_get_bits(cmn_usr_info->info0,
- HAL_RX_CMN_USR_INFO0_CP_SETTING);
- ltf_size = le32_get_bits(cmn_usr_info->info0,
- HAL_RX_CMN_USR_INFO0_LTF_SIZE);
-
- data = __le32_to_cpu(eht->data[0]);
- data |= u32_encode_bits(cp_setting, IEEE80211_RADIOTAP_EHT_DATA0_GI);
- data |= u32_encode_bits(ltf_size, IEEE80211_RADIOTAP_EHT_DATA0_LTF);
- eht->data[0] = cpu_to_le32(data);
-
- if (!ppdu_info->ltf_size)
- ppdu_info->ltf_size = ltf_size;
- if (!ppdu_info->gi)
- ppdu_info->gi = cp_setting;
-}
-
-static void
-ath12k_dp_mon_parse_status_msdu_end(struct ath12k_mon_data *pmon,
- const struct hal_rx_msdu_end *msdu_end)
-{
- ath12k_dp_mon_parse_rx_msdu_end_err(__le32_to_cpu(msdu_end->info2),
- &pmon->err_bitmap);
- pmon->decap_format = le32_get_bits(msdu_end->info1,
- RX_MSDU_END_INFO11_DECAP_FORMAT);
-}
-
-static enum hal_rx_mon_status
-ath12k_dp_mon_rx_parse_status_tlv(struct ath12k *ar,
- struct ath12k_mon_data *pmon,
- const struct hal_tlv_64_hdr *tlv)
-{
- struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
- const void *tlv_data = tlv->value;
- u32 info[7], userid;
- u16 tlv_tag, tlv_len;
-
- tlv_tag = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_TAG);
- tlv_len = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_LEN);
- userid = le64_get_bits(tlv->tl, HAL_TLV_64_USR_ID);
-
- if (ppdu_info->tlv_aggr.in_progress && ppdu_info->tlv_aggr.tlv_tag != tlv_tag) {
- ath12k_dp_mon_parse_eht_sig_hdr(ppdu_info, ppdu_info->tlv_aggr.buf);
-
- ppdu_info->tlv_aggr.in_progress = false;
- ppdu_info->tlv_aggr.cur_len = 0;
- }
-
- switch (tlv_tag) {
- case HAL_RX_PPDU_START: {
- const struct hal_rx_ppdu_start *ppdu_start = tlv_data;
-
- u64 ppdu_ts = ath12k_le32hilo_to_u64(ppdu_start->ppdu_start_ts_63_32,
- ppdu_start->ppdu_start_ts_31_0);
-
- info[0] = __le32_to_cpu(ppdu_start->info0);
-
- ppdu_info->ppdu_id = u32_get_bits(info[0],
- HAL_RX_PPDU_START_INFO0_PPDU_ID);
-
- info[1] = __le32_to_cpu(ppdu_start->info1);
- ppdu_info->chan_num = u32_get_bits(info[1],
- HAL_RX_PPDU_START_INFO1_CHAN_NUM);
- ppdu_info->freq = u32_get_bits(info[1],
- HAL_RX_PPDU_START_INFO1_CHAN_FREQ);
- ppdu_info->ppdu_ts = ppdu_ts;
-
- if (ppdu_info->ppdu_id != ppdu_info->last_ppdu_id) {
- ppdu_info->last_ppdu_id = ppdu_info->ppdu_id;
- ppdu_info->num_users = 0;
- memset(&ppdu_info->mpdu_fcs_ok_bitmap, 0,
- HAL_RX_NUM_WORDS_PER_PPDU_BITMAP *
- sizeof(ppdu_info->mpdu_fcs_ok_bitmap[0]));
- }
- break;
- }
- case HAL_RX_PPDU_END_USER_STATS: {
- const struct hal_rx_ppdu_end_user_stats *eu_stats = tlv_data;
- u32 tid_bitmap;
-
- info[0] = __le32_to_cpu(eu_stats->info0);
- info[1] = __le32_to_cpu(eu_stats->info1);
- info[2] = __le32_to_cpu(eu_stats->info2);
- info[4] = __le32_to_cpu(eu_stats->info4);
- info[5] = __le32_to_cpu(eu_stats->info5);
- info[6] = __le32_to_cpu(eu_stats->info6);
-
- ppdu_info->ast_index =
- u32_get_bits(info[2], HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX);
- ppdu_info->fc_valid =
- u32_get_bits(info[1], HAL_RX_PPDU_END_USER_STATS_INFO1_FC_VALID);
- tid_bitmap = u32_get_bits(info[6],
- HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP);
- ppdu_info->tid = ffs(tid_bitmap) - 1;
- ppdu_info->tcp_msdu_count =
- u32_get_bits(info[4],
- HAL_RX_PPDU_END_USER_STATS_INFO4_TCP_MSDU_CNT);
- ppdu_info->udp_msdu_count =
- u32_get_bits(info[4],
- HAL_RX_PPDU_END_USER_STATS_INFO4_UDP_MSDU_CNT);
- ppdu_info->other_msdu_count =
- u32_get_bits(info[5],
- HAL_RX_PPDU_END_USER_STATS_INFO5_OTHER_MSDU_CNT);
- ppdu_info->tcp_ack_msdu_count =
- u32_get_bits(info[5],
- HAL_RX_PPDU_END_USER_STATS_INFO5_TCP_ACK_MSDU_CNT);
- ppdu_info->preamble_type =
- u32_get_bits(info[1],
- HAL_RX_PPDU_END_USER_STATS_INFO1_PKT_TYPE);
- ppdu_info->num_mpdu_fcs_ok =
- u32_get_bits(info[1],
- HAL_RX_PPDU_END_USER_STATS_INFO1_MPDU_CNT_FCS_OK);
- ppdu_info->num_mpdu_fcs_err =
- u32_get_bits(info[0],
- HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR);
- ppdu_info->peer_id =
- u32_get_bits(info[0], HAL_RX_PPDU_END_USER_STATS_INFO0_PEER_ID);
-
- switch (ppdu_info->preamble_type) {
- case HAL_RX_PREAMBLE_11N:
- ppdu_info->ht_flags = 1;
- break;
- case HAL_RX_PREAMBLE_11AC:
- ppdu_info->vht_flags = 1;
- break;
- case HAL_RX_PREAMBLE_11AX:
- ppdu_info->he_flags = 1;
- break;
- case HAL_RX_PREAMBLE_11BE:
- ppdu_info->is_eht = true;
- break;
- default:
- break;
- }
-
- if (userid < HAL_MAX_UL_MU_USERS) {
- struct hal_rx_user_status *rxuser_stats =
- &ppdu_info->userstats[userid];
-
- if (ppdu_info->num_mpdu_fcs_ok > 1 ||
- ppdu_info->num_mpdu_fcs_err > 1)
- ppdu_info->userstats[userid].ampdu_present = true;
-
- ppdu_info->num_users += 1;
-
- ath12k_dp_mon_rx_handle_ofdma_info(eu_stats, rxuser_stats);
- ath12k_dp_mon_rx_populate_mu_user_info(eu_stats, ppdu_info,
- rxuser_stats);
- }
- ppdu_info->mpdu_fcs_ok_bitmap[0] = __le32_to_cpu(eu_stats->rsvd1[0]);
- ppdu_info->mpdu_fcs_ok_bitmap[1] = __le32_to_cpu(eu_stats->rsvd1[1]);
- break;
- }
- case HAL_RX_PPDU_END_USER_STATS_EXT: {
- const struct hal_rx_ppdu_end_user_stats_ext *eu_stats = tlv_data;
-
- ppdu_info->mpdu_fcs_ok_bitmap[2] = __le32_to_cpu(eu_stats->info1);
- ppdu_info->mpdu_fcs_ok_bitmap[3] = __le32_to_cpu(eu_stats->info2);
- ppdu_info->mpdu_fcs_ok_bitmap[4] = __le32_to_cpu(eu_stats->info3);
- ppdu_info->mpdu_fcs_ok_bitmap[5] = __le32_to_cpu(eu_stats->info4);
- ppdu_info->mpdu_fcs_ok_bitmap[6] = __le32_to_cpu(eu_stats->info5);
- ppdu_info->mpdu_fcs_ok_bitmap[7] = __le32_to_cpu(eu_stats->info6);
- break;
- }
- case HAL_PHYRX_HT_SIG:
- ath12k_dp_mon_parse_ht_sig(tlv_data, ppdu_info);
- break;
-
- case HAL_PHYRX_L_SIG_B:
- ath12k_dp_mon_parse_l_sig_b(tlv_data, ppdu_info);
- break;
-
- case HAL_PHYRX_L_SIG_A:
- ath12k_dp_mon_parse_l_sig_a(tlv_data, ppdu_info);
- break;
-
- case HAL_PHYRX_VHT_SIG_A:
- ath12k_dp_mon_parse_vht_sig_a(tlv_data, ppdu_info);
- break;
-
- case HAL_PHYRX_HE_SIG_A_SU:
- ath12k_dp_mon_parse_he_sig_su(tlv_data, ppdu_info);
- break;
-
- case HAL_PHYRX_HE_SIG_A_MU_DL:
- ath12k_dp_mon_parse_he_sig_mu(tlv_data, ppdu_info);
- break;
-
- case HAL_PHYRX_HE_SIG_B1_MU:
- ath12k_dp_mon_parse_he_sig_b1_mu(tlv_data, ppdu_info);
- break;
-
- case HAL_PHYRX_HE_SIG_B2_MU:
- ath12k_dp_mon_parse_he_sig_b2_mu(tlv_data, ppdu_info);
- break;
-
- case HAL_PHYRX_HE_SIG_B2_OFDMA:
- ath12k_dp_mon_parse_he_sig_b2_ofdma(tlv_data, ppdu_info);
- break;
-
- case HAL_PHYRX_RSSI_LEGACY: {
- const struct hal_rx_phyrx_rssi_legacy_info *rssi = tlv_data;
-
- info[0] = __le32_to_cpu(rssi->info0);
- info[2] = __le32_to_cpu(rssi->info2);
-
- /* TODO: Please note that the combined rssi will not be accurate
- * in MU case. Rssi in MU needs to be retrieved from
- * PHYRX_OTHER_RECEIVE_INFO TLV.
- */
- ppdu_info->rssi_comb =
- u32_get_bits(info[2],
- HAL_RX_RSSI_LEGACY_INFO_INFO2_RSSI_COMB_PPDU);
-
- ppdu_info->bw = u32_get_bits(info[0],
- HAL_RX_RSSI_LEGACY_INFO_INFO0_RX_BW);
- break;
- }
- case HAL_PHYRX_COMMON_USER_INFO: {
- ath12k_parse_cmn_usr_info(tlv_data, ppdu_info);
- break;
- }
- case HAL_RX_PPDU_START_USER_INFO:
- ath12k_dp_mon_hal_rx_parse_user_info(tlv_data, userid, ppdu_info);
- break;
-
- case HAL_RXPCU_PPDU_END_INFO: {
- const struct hal_rx_ppdu_end_duration *ppdu_rx_duration = tlv_data;
-
- info[0] = __le32_to_cpu(ppdu_rx_duration->info0);
- ppdu_info->rx_duration =
- u32_get_bits(info[0], HAL_RX_PPDU_END_DURATION);
- ppdu_info->tsft = __le32_to_cpu(ppdu_rx_duration->rsvd0[1]);
- ppdu_info->tsft = (ppdu_info->tsft << 32) |
- __le32_to_cpu(ppdu_rx_duration->rsvd0[0]);
- break;
- }
- case HAL_RX_MPDU_START: {
- const struct hal_rx_mpdu_start *mpdu_start = tlv_data;
- u16 peer_id;
-
- info[1] = __le32_to_cpu(mpdu_start->info1);
- peer_id = u32_get_bits(info[1], HAL_RX_MPDU_START_INFO1_PEERID);
- if (peer_id)
- ppdu_info->peer_id = peer_id;
-
- ppdu_info->mpdu_len += u32_get_bits(info[1],
- HAL_RX_MPDU_START_INFO2_MPDU_LEN);
- if (userid < HAL_MAX_UL_MU_USERS) {
- info[0] = __le32_to_cpu(mpdu_start->info0);
- ppdu_info->userid = userid;
- ppdu_info->userstats[userid].ampdu_id =
- u32_get_bits(info[0], HAL_RX_MPDU_START_INFO0_PPDU_ID);
- }
-
- return HAL_RX_MON_STATUS_MPDU_START;
- }
- case HAL_RX_MSDU_START:
- /* TODO: add msdu start parsing logic */
- break;
- case HAL_MON_BUF_ADDR:
- return HAL_RX_MON_STATUS_BUF_ADDR;
- case HAL_RX_MSDU_END:
- ath12k_dp_mon_parse_status_msdu_end(pmon, tlv_data);
- return HAL_RX_MON_STATUS_MSDU_END;
- case HAL_RX_MPDU_END:
- return HAL_RX_MON_STATUS_MPDU_END;
- case HAL_PHYRX_GENERIC_U_SIG:
- ath12k_dp_mon_hal_rx_parse_u_sig_hdr(tlv_data, ppdu_info);
- break;
- case HAL_PHYRX_GENERIC_EHT_SIG:
- /* Handle the case where aggregation is in progress
- * or the current TLV is one of the TLVs which should be
- * aggregated
- */
- if (!ppdu_info->tlv_aggr.in_progress) {
- ppdu_info->tlv_aggr.in_progress = true;
- ppdu_info->tlv_aggr.tlv_tag = tlv_tag;
- ppdu_info->tlv_aggr.cur_len = 0;
- }
-
- ppdu_info->is_eht = true;
-
- ath12k_dp_mon_hal_aggr_tlv(ppdu_info, tlv_len, tlv_data);
- break;
- case HAL_DUMMY:
- return HAL_RX_MON_STATUS_BUF_DONE;
- case HAL_RX_PPDU_END_STATUS_DONE:
- case 0:
- return HAL_RX_MON_STATUS_PPDU_DONE;
- default:
- break;
- }
-
- return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
-}
-
-static void
-ath12k_dp_mon_fill_rx_stats_info(struct ath12k *ar,
- struct hal_rx_mon_ppdu_info *ppdu_info,
+ath12k_dp_mon_fill_rx_stats_info(struct hal_rx_mon_ppdu_info *ppdu_info,
struct ieee80211_rx_status *rx_status)
{
u32 center_freq = ppdu_info->freq;
@@ -1781,7 +36,7 @@ ath12k_dp_mon_fill_rx_stats_info(struct ath12k *ar,
}
}
-static struct sk_buff
+struct sk_buff
*ath12k_dp_rx_alloc_mon_status_buf(struct ath12k_base *ab,
struct dp_rxdma_mon_ring *rx_ring,
int *buf_id)
@@ -1824,48 +79,9 @@ fail_free_skb:
fail_alloc_skb:
return NULL;
}
+EXPORT_SYMBOL(ath12k_dp_rx_alloc_mon_status_buf);
-static enum dp_mon_status_buf_state
-ath12k_dp_rx_mon_buf_done(struct ath12k_base *ab, struct hal_srng *srng,
- struct dp_rxdma_mon_ring *rx_ring)
-{
- struct ath12k_skb_rxcb *rxcb;
- struct hal_tlv_64_hdr *tlv;
- struct sk_buff *skb;
- void *status_desc;
- dma_addr_t paddr;
- u32 cookie;
- int buf_id;
- u8 rbm;
-
- status_desc = ath12k_hal_srng_src_next_peek(ab, srng);
- if (!status_desc)
- return DP_MON_STATUS_NO_DMA;
-
- ath12k_hal_rx_buf_addr_info_get(status_desc, &paddr, &cookie, &rbm);
-
- buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
-
- spin_lock_bh(&rx_ring->idr_lock);
- skb = idr_find(&rx_ring->bufs_idr, buf_id);
- spin_unlock_bh(&rx_ring->idr_lock);
-
- if (!skb)
- return DP_MON_STATUS_NO_DMA;
-
- rxcb = ATH12K_SKB_RXCB(skb);
- dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
- skb->len + skb_tailroom(skb),
- DMA_FROM_DEVICE);
-
- tlv = (struct hal_tlv_64_hdr *)skb->data;
- if (le64_get_bits(tlv->tl, HAL_TLV_HDR_TAG) != HAL_RX_STATUS_BUFFER_DONE)
- return DP_MON_STATUS_NO_DMA;
-
- return DP_MON_STATUS_REPLINISH;
-}
-
-static u32 ath12k_dp_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id)
+u32 ath12k_dp_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id)
{
u32 ret = 0;
@@ -1884,26 +100,15 @@ static u32 ath12k_dp_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id)
}
return ret;
}
-
-static
-void ath12k_dp_mon_next_link_desc_get(struct hal_rx_msdu_link *msdu_link,
- dma_addr_t *paddr, u32 *sw_cookie, u8 *rbm,
- struct ath12k_buffer_addr **pp_buf_addr_info)
-{
- struct ath12k_buffer_addr *buf_addr_info;
-
- buf_addr_info = &msdu_link->buf_addr_info;
-
- ath12k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm);
-
- *pp_buf_addr_info = buf_addr_info;
-}
+EXPORT_SYMBOL(ath12k_dp_mon_comp_ppduid);
static void
-ath12k_dp_mon_fill_rx_rate(struct ath12k *ar,
+ath12k_dp_mon_fill_rx_rate(struct ath12k_pdev_dp *dp_pdev,
struct hal_rx_mon_ppdu_info *ppdu_info,
struct ieee80211_rx_status *rx_status)
{
+ struct ath12k_dp *dp = dp_pdev->dp;
+ struct ath12k_base *ab = dp->ab;
struct ieee80211_supported_band *sband;
enum rx_msdu_start_pkt_type pkt_type;
u8 rate_mcs, nss, sgi;
@@ -1919,6 +124,8 @@ ath12k_dp_mon_fill_rx_rate(struct ath12k *ar,
case RX_MSDU_START_PKT_TYPE_11B:
is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
if (rx_status->band < NUM_NL80211_BANDS) {
+ struct ath12k *ar = ath12k_pdev_dp_to_ar(dp_pdev);
+
sband = &ar->mac.sbands[rx_status->band];
rx_status->rate_idx = ath12k_mac_hw_rate_to_idx(sband, rate_mcs,
is_cck);
@@ -1927,7 +134,7 @@ ath12k_dp_mon_fill_rx_rate(struct ath12k *ar,
case RX_MSDU_START_PKT_TYPE_11N:
rx_status->encoding = RX_ENC_HT;
if (rate_mcs > ATH12K_HT_MCS_MAX) {
- ath12k_warn(ar->ab,
+ ath12k_warn(ab,
"Received with invalid mcs in HT mode %d\n",
rate_mcs);
break;
@@ -1940,7 +147,7 @@ ath12k_dp_mon_fill_rx_rate(struct ath12k *ar,
rx_status->encoding = RX_ENC_VHT;
rx_status->rate_idx = rate_mcs;
if (rate_mcs > ATH12K_VHT_MCS_MAX) {
- ath12k_warn(ar->ab,
+ ath12k_warn(ab,
"Received with invalid mcs in VHT mode %d\n",
rate_mcs);
break;
@@ -1951,7 +158,7 @@ ath12k_dp_mon_fill_rx_rate(struct ath12k *ar,
case RX_MSDU_START_PKT_TYPE_11AX:
rx_status->rate_idx = rate_mcs;
if (rate_mcs > ATH12K_HE_MCS_MAX) {
- ath12k_warn(ar->ab,
+ ath12k_warn(ab,
"Received with invalid mcs in HE mode %d\n",
rate_mcs);
break;
@@ -1962,7 +169,7 @@ ath12k_dp_mon_fill_rx_rate(struct ath12k *ar,
case RX_MSDU_START_PKT_TYPE_11BE:
rx_status->rate_idx = rate_mcs;
if (rate_mcs > ATH12K_EHT_MCS_MAX) {
- ath12k_warn(ar->ab,
+ ath12k_warn(ab,
"Received with invalid mcs in EHT mode %d\n",
rate_mcs);
break;
@@ -1971,24 +178,24 @@ ath12k_dp_mon_fill_rx_rate(struct ath12k *ar,
rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
break;
default:
- ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
"monitor receives invalid preamble type %d",
pkt_type);
break;
}
}
-static void ath12k_dp_mon_rx_msdus_set_payload(struct ath12k *ar,
+static void ath12k_dp_mon_rx_msdus_set_payload(struct ath12k_base *ab,
struct sk_buff *head_msdu,
struct sk_buff *tail_msdu)
{
u32 rx_pkt_offset, l2_hdr_offset, total_offset;
- rx_pkt_offset = ar->ab->hal.hal_desc_sz;
+ rx_pkt_offset = ab->hal.hal_desc_sz;
l2_hdr_offset =
- ath12k_dp_rx_h_l3pad(ar->ab, (struct hal_rx_desc *)tail_msdu->data);
+ ath12k_dp_rx_h_l3pad(ab, (struct hal_rx_desc *)tail_msdu->data);
- if (ar->ab->hw_params->rxdma1_enable)
+ if (ab->hw_params->rxdma1_enable)
total_offset = ATH12K_MON_RX_PKT_OFFSET;
else
total_offset = rx_pkt_offset + l2_hdr_offset;
@@ -1996,13 +203,14 @@ static void ath12k_dp_mon_rx_msdus_set_payload(struct ath12k *ar,
skb_pull(head_msdu, total_offset);
}
-static struct sk_buff *
-ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
+struct sk_buff *
+ath12k_dp_mon_rx_merg_msdus(struct ath12k_pdev_dp *dp_pdev,
struct dp_mon_mpdu *mon_mpdu,
struct hal_rx_mon_ppdu_info *ppdu_info,
struct ieee80211_rx_status *rxs)
{
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_dp *dp = dp_pdev->dp;
+ struct ath12k_base *ab = dp->ab;
struct sk_buff *msdu, *mpdu_buf, *prev_buf, *head_frag_list;
struct sk_buff *head_msdu, *tail_msdu;
struct hal_rx_desc *rx_desc;
@@ -2019,11 +227,13 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
if (!head_msdu || !tail_msdu)
goto err_merge_fail;
- ath12k_dp_mon_fill_rx_stats_info(ar, ppdu_info, rxs);
+ ath12k_dp_mon_fill_rx_stats_info(ppdu_info, rxs);
if (unlikely(rxs->band == NUM_NL80211_BANDS ||
- !ath12k_ar_to_hw(ar)->wiphy->bands[rxs->band])) {
- ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+ !ath12k_pdev_dp_to_hw(dp_pdev)->wiphy->bands[rxs->band])) {
+ struct ath12k *ar = ath12k_pdev_dp_to_ar(dp_pdev);
+
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
"sband is NULL for status band %d channel_num %d center_freq %d pdev_id %d\n",
rxs->band, channel_num, ppdu_info->freq, ar->pdev_idx);
@@ -2041,17 +251,17 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
rxs->freq = ieee80211_channel_to_frequency(channel_num,
rxs->band);
- ath12k_dp_mon_fill_rx_rate(ar, ppdu_info, rxs);
+ ath12k_dp_mon_fill_rx_rate(dp_pdev, ppdu_info, rxs);
if (decap_format == DP_RX_DECAP_TYPE_RAW) {
- ath12k_dp_mon_rx_msdus_set_payload(ar, head_msdu, tail_msdu);
+ ath12k_dp_mon_rx_msdus_set_payload(ab, head_msdu, tail_msdu);
prev_buf = head_msdu;
msdu = head_msdu->next;
head_frag_list = NULL;
while (msdu) {
- ath12k_dp_mon_rx_msdus_set_payload(ar, head_msdu, tail_msdu);
+ ath12k_dp_mon_rx_msdus_set_payload(ab, head_msdu, tail_msdu);
if (!head_frag_list)
head_frag_list = msdu;
@@ -2075,7 +285,7 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
rx_desc = (struct hal_rx_desc *)head_msdu->data;
hdr_desc =
- ab->hal_rx_ops->rx_desc_get_msdu_payload(rx_desc);
+ ab->hal.ops->rx_desc_get_msdu_payload(rx_desc);
/* Base size */
wh = (struct ieee80211_hdr_3addr *)hdr_desc;
@@ -2086,7 +296,7 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
msdu = head_msdu;
while (msdu) {
- ath12k_dp_mon_rx_msdus_set_payload(ar, head_msdu, tail_msdu);
+ ath12k_dp_mon_rx_msdus_set_payload(ab, head_msdu, tail_msdu);
if (qos_pkt) {
dest = skb_push(msdu, sizeof(__le16));
if (!dest)
@@ -2121,6 +331,7 @@ err_merge_fail:
}
return NULL;
}
+EXPORT_SYMBOL(ath12k_dp_mon_rx_merg_msdus);
static void
ath12k_dp_mon_rx_update_radiotap_he(struct hal_rx_mon_ppdu_info *rx_status,
@@ -2170,11 +381,12 @@ ath12k_dp_mon_rx_update_radiotap_he_mu(struct hal_rx_mon_ppdu_info *rx_status,
rtap_buf[rtap_len] = rx_status->he_RU[3];
}
-static void ath12k_dp_mon_update_radiotap(struct ath12k *ar,
- struct hal_rx_mon_ppdu_info *ppduinfo,
- struct sk_buff *mon_skb,
- struct ieee80211_rx_status *rxs)
+void ath12k_dp_mon_update_radiotap(struct ath12k_pdev_dp *dp_pdev,
+ struct hal_rx_mon_ppdu_info *ppduinfo,
+ struct sk_buff *mon_skb,
+ struct ieee80211_rx_status *rxs)
{
+ struct ath12k *ar = ath12k_pdev_dp_to_ar(dp_pdev);
struct ieee80211_supported_band *sband;
s32 noise_floor;
u8 *ptr = NULL;
@@ -2267,6 +479,9 @@ static void ath12k_dp_mon_update_radiotap(struct ath12k *ar,
rxs->encoding = RX_ENC_HT;
rxs->rate_idx = ppduinfo->rate;
} else {
+ struct ath12k *ar;
+
+ ar = ath12k_pdev_dp_to_ar(dp_pdev);
rxs->encoding = RX_ENC_LEGACY;
sband = &ar->mac.sbands[rxs->band];
rxs->rate_idx = ath12k_mac_hw_rate_to_idx(sband, ppduinfo->rate,
@@ -2275,13 +490,17 @@ static void ath12k_dp_mon_update_radiotap(struct ath12k *ar,
rxs->mactime = ppduinfo->tsft;
}
+EXPORT_SYMBOL(ath12k_dp_mon_update_radiotap);
-static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi,
- struct sk_buff *msdu,
- const struct hal_rx_mon_ppdu_info *ppduinfo,
- struct ieee80211_rx_status *status,
- u8 decap)
+void ath12k_dp_mon_rx_deliver_msdu(struct ath12k_pdev_dp *dp_pdev,
+ struct napi_struct *napi,
+ struct sk_buff *msdu,
+ const struct hal_rx_mon_ppdu_info *ppduinfo,
+ struct ieee80211_rx_status *status,
+ u8 decap)
{
+ struct ath12k_dp *dp = dp_pdev->dp;
+ struct ath12k_base *ab = dp->ab;
static const struct ieee80211_radiotap_he known = {
.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
@@ -2290,10 +509,13 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
struct ieee80211_rx_status *rx_status;
struct ieee80211_radiotap_he *he = NULL;
struct ieee80211_sta *pubsta = NULL;
- struct ath12k_peer *peer;
+ struct ath12k_dp_link_peer *peer;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
+ struct hal_rx_desc_data rx_info;
bool is_mcbc = rxcb->is_mcbc;
bool is_eapol_tkip = rxcb->is_eapol;
+ struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
+ u8 addr[ETH_ALEN] = {};
status->link_valid = 0;
@@ -2304,23 +526,28 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
status->flag |= RX_FLAG_RADIOTAP_HE;
}
- spin_lock_bh(&ar->ab->base_lock);
- peer = ath12k_peer_find_by_id(ar->ab, ppduinfo->peer_id);
+ ath12k_dp_extract_rx_desc_data(dp->hal, &rx_info, rx_desc, rx_desc);
+
+ rcu_read_lock();
+ spin_lock_bh(&dp->dp_lock);
+ peer = ath12k_dp_rx_h_find_link_peer(dp_pdev, msdu, &rx_info);
if (peer && peer->sta) {
pubsta = peer->sta;
+ memcpy(addr, peer->addr, ETH_ALEN);
if (pubsta->valid_links) {
status->link_valid = 1;
status->link_id = peer->link_id;
}
}
- spin_unlock_bh(&ar->ab->base_lock);
+ spin_unlock_bh(&dp->dp_lock);
+ rcu_read_unlock();
- ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
"rx skb %p len %u peer %pM %u %s %s%s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
msdu,
msdu->len,
- peer ? peer->addr : NULL,
+ addr,
rxcb->tid,
(is_mcbc) ? "mcast" : "ucast",
(status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
@@ -2340,7 +567,7 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
!!(status->flag & RX_FLAG_MMIC_ERROR),
!!(status->flag & RX_FLAG_AMSDU_MORE));
- ath12k_dbg_dump(ar->ab, ATH12K_DBG_DP_RX, NULL, "dp rx msdu: ",
+ ath12k_dbg_dump(ab, ATH12K_DBG_DP_RX, NULL, "dp rx msdu: ",
msdu->data, msdu->len);
rx_status = IEEE80211_SKB_RXCB(msdu);
*rx_status = *status;
@@ -2356,66 +583,11 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
!(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
rx_status->flag |= RX_FLAG_8023;
- ieee80211_rx_napi(ath12k_ar_to_hw(ar), pubsta, msdu, napi);
+ ieee80211_rx_napi(ath12k_pdev_dp_to_hw(dp_pdev), pubsta, msdu, napi);
}
+EXPORT_SYMBOL(ath12k_dp_mon_rx_deliver_msdu);
-static int ath12k_dp_mon_rx_deliver(struct ath12k *ar,
- struct dp_mon_mpdu *mon_mpdu,
- struct hal_rx_mon_ppdu_info *ppduinfo,
- struct napi_struct *napi)
-{
- struct ath12k_pdev_dp *dp = &ar->dp;
- struct sk_buff *mon_skb, *skb_next, *header;
- struct ieee80211_rx_status *rxs = &dp->rx_status;
- u8 decap = DP_RX_DECAP_TYPE_RAW;
-
- mon_skb = ath12k_dp_mon_rx_merg_msdus(ar, mon_mpdu, ppduinfo, rxs);
- if (!mon_skb)
- goto mon_deliver_fail;
-
- header = mon_skb;
- rxs->flag = 0;
-
- if (mon_mpdu->err_bitmap & HAL_RX_MPDU_ERR_FCS)
- rxs->flag = RX_FLAG_FAILED_FCS_CRC;
-
- do {
- skb_next = mon_skb->next;
- if (!skb_next)
- rxs->flag &= ~RX_FLAG_AMSDU_MORE;
- else
- rxs->flag |= RX_FLAG_AMSDU_MORE;
-
- if (mon_skb == header) {
- header = NULL;
- rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN;
- } else {
- rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
- }
- rxs->flag |= RX_FLAG_ONLY_MONITOR;
-
- if (!(rxs->flag & RX_FLAG_ONLY_MONITOR))
- decap = mon_mpdu->decap_format;
-
- ath12k_dp_mon_update_radiotap(ar, ppduinfo, mon_skb, rxs);
- ath12k_dp_mon_rx_deliver_msdu(ar, napi, mon_skb, ppduinfo, rxs, decap);
- mon_skb = skb_next;
- } while (mon_skb);
- rxs->flag = 0;
-
- return 0;
-
-mon_deliver_fail:
- mon_skb = mon_mpdu->head;
- while (mon_skb) {
- skb_next = mon_skb->next;
- dev_kfree_skb_any(mon_skb);
- mon_skb = skb_next;
- }
- return -EINVAL;
-}
-
-static int ath12k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len)
+int ath12k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len)
{
if (skb->len > len) {
skb_trim(skb, len);
@@ -2432,40 +604,16 @@ static int ath12k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len)
return 0;
}
+EXPORT_SYMBOL(ath12k_dp_pkt_set_pktlen);
-/* Hardware fill buffer with 128 bytes aligned. So need to reap it
- * with 128 bytes aligned.
- */
-#define RXDMA_DATA_DMA_BLOCK_SIZE 128
-
-static void
-ath12k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info,
- bool *is_frag, u32 *total_len,
- u32 *frag_len, u32 *msdu_cnt)
-{
- if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) {
- *is_frag = true;
- *frag_len = (RX_MON_STATUS_BASE_BUF_SIZE -
- sizeof(struct hal_rx_desc)) &
- ~(RXDMA_DATA_DMA_BLOCK_SIZE - 1);
- *total_len += *frag_len;
- } else {
- if (*is_frag)
- *frag_len = info->msdu_len - *total_len;
- else
- *frag_len = info->msdu_len;
-
- *msdu_cnt -= 1;
- }
-}
-
-static int
-ath12k_dp_mon_parse_status_buf(struct ath12k *ar,
+int
+ath12k_dp_mon_parse_status_buf(struct ath12k_pdev_dp *dp_pdev,
struct ath12k_mon_data *pmon,
const struct dp_mon_packet_info *packet_info)
{
- struct ath12k_base *ab = ar->ab;
- struct dp_rxdma_mon_ring *buf_ring = &ab->dp.rxdma_mon_buf_ring;
+ struct ath12k_dp *dp = dp_pdev->dp;
+ struct ath12k_base *ab = dp->ab;
+ struct dp_rxdma_mon_ring *buf_ring = &dp->rxdma_mon_buf_ring;
struct sk_buff *msdu;
int buf_id;
u32 offset;
@@ -2503,121 +651,7 @@ dest_replenish:
return 0;
}
-
-static int
-ath12k_dp_mon_parse_rx_dest_tlv(struct ath12k *ar,
- struct ath12k_mon_data *pmon,
- enum hal_rx_mon_status hal_status,
- const void *tlv_data)
-{
- switch (hal_status) {
- case HAL_RX_MON_STATUS_MPDU_START:
- if (WARN_ON_ONCE(pmon->mon_mpdu))
- break;
-
- pmon->mon_mpdu = kzalloc(sizeof(*pmon->mon_mpdu), GFP_ATOMIC);
- if (!pmon->mon_mpdu)
- return -ENOMEM;
- break;
- case HAL_RX_MON_STATUS_BUF_ADDR:
- return ath12k_dp_mon_parse_status_buf(ar, pmon, tlv_data);
- case HAL_RX_MON_STATUS_MPDU_END:
- /* If no MSDU then free empty MPDU */
- if (pmon->mon_mpdu->tail) {
- pmon->mon_mpdu->tail->next = NULL;
- list_add_tail(&pmon->mon_mpdu->list, &pmon->dp_rx_mon_mpdu_list);
- } else {
- kfree(pmon->mon_mpdu);
- }
- pmon->mon_mpdu = NULL;
- break;
- case HAL_RX_MON_STATUS_MSDU_END:
- pmon->mon_mpdu->decap_format = pmon->decap_format;
- pmon->mon_mpdu->err_bitmap = pmon->err_bitmap;
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-static enum hal_rx_mon_status
-ath12k_dp_mon_parse_rx_dest(struct ath12k *ar, struct ath12k_mon_data *pmon,
- struct sk_buff *skb)
-{
- struct hal_tlv_64_hdr *tlv;
- struct ath12k_skb_rxcb *rxcb;
- enum hal_rx_mon_status hal_status;
- u16 tlv_tag, tlv_len;
- u8 *ptr = skb->data;
-
- do {
- tlv = (struct hal_tlv_64_hdr *)ptr;
- tlv_tag = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_TAG);
-
- /* The actual length of PPDU_END is the combined length of many PHY
- * TLVs that follow. Skip the TLV header and
- * rx_rxpcu_classification_overview that follows the header to get to
- * next TLV.
- */
-
- if (tlv_tag == HAL_RX_PPDU_END)
- tlv_len = sizeof(struct hal_rx_rxpcu_classification_overview);
- else
- tlv_len = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_LEN);
-
- hal_status = ath12k_dp_mon_rx_parse_status_tlv(ar, pmon, tlv);
-
- if (ar->monitor_started && ar->ab->hw_params->rxdma1_enable &&
- ath12k_dp_mon_parse_rx_dest_tlv(ar, pmon, hal_status, tlv->value))
- return HAL_RX_MON_STATUS_PPDU_DONE;
-
- ptr += sizeof(*tlv) + tlv_len;
- ptr = PTR_ALIGN(ptr, HAL_TLV_64_ALIGN);
-
- if ((ptr - skb->data) > skb->len)
- break;
-
- } while ((hal_status == HAL_RX_MON_STATUS_PPDU_NOT_DONE) ||
- (hal_status == HAL_RX_MON_STATUS_BUF_ADDR) ||
- (hal_status == HAL_RX_MON_STATUS_MPDU_START) ||
- (hal_status == HAL_RX_MON_STATUS_MPDU_END) ||
- (hal_status == HAL_RX_MON_STATUS_MSDU_END));
-
- rxcb = ATH12K_SKB_RXCB(skb);
- if (rxcb->is_end_of_ppdu)
- hal_status = HAL_RX_MON_STATUS_PPDU_DONE;
-
- return hal_status;
-}
-
-enum hal_rx_mon_status
-ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar,
- struct ath12k_mon_data *pmon,
- struct sk_buff *skb,
- struct napi_struct *napi)
-{
- struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
- struct dp_mon_mpdu *tmp;
- struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
- enum hal_rx_mon_status hal_status;
-
- hal_status = ath12k_dp_mon_parse_rx_dest(ar, pmon, skb);
- if (hal_status != HAL_RX_MON_STATUS_PPDU_DONE)
- return hal_status;
-
- list_for_each_entry_safe(mon_mpdu, tmp, &pmon->dp_rx_mon_mpdu_list, list) {
- list_del(&mon_mpdu->list);
-
- if (mon_mpdu->head && mon_mpdu->tail)
- ath12k_dp_mon_rx_deliver(ar, mon_mpdu, ppdu_info, napi);
-
- kfree(mon_mpdu);
- }
-
- return hal_status;
-}
+EXPORT_SYMBOL(ath12k_dp_mon_parse_status_buf);
int ath12k_dp_mon_buf_replenish(struct ath12k_base *ab,
struct dp_rxdma_mon_ring *buf_ring,
@@ -2693,13 +727,14 @@ fail_alloc_skb:
spin_unlock_bh(&srng->lock);
return -ENOMEM;
}
+EXPORT_SYMBOL(ath12k_dp_mon_buf_replenish);
int ath12k_dp_mon_status_bufs_replenish(struct ath12k_base *ab,
struct dp_rxdma_mon_ring *rx_ring,
int req_entries)
{
enum hal_rx_buf_return_buf_manager mgr =
- ab->hw_params->hal_params->rx_buf_rbm;
+ ab->hal.hal_params->rx_buf_rbm;
int num_free, num_remain, buf_id;
struct ath12k_buffer_addr *desc;
struct hal_srng *srng;
@@ -2756,7 +791,7 @@ int ath12k_dp_mon_status_bufs_replenish(struct ath12k_base *ab,
num_remain--;
- ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
+ ath12k_hal_rx_buf_addr_info_set(&ab->hal, desc, paddr, cookie, mgr);
}
ath12k_hal_srng_access_end(ab, srng);
@@ -2782,676 +817,6 @@ fail_free_skb:
return req_entries - num_remain;
}
-static struct dp_mon_tx_ppdu_info *
-ath12k_dp_mon_tx_get_ppdu_info(struct ath12k_mon_data *pmon,
- unsigned int ppdu_id,
- enum dp_mon_tx_ppdu_info_type type)
-{
- struct dp_mon_tx_ppdu_info *tx_ppdu_info;
-
- if (type == DP_MON_TX_PROT_PPDU_INFO) {
- tx_ppdu_info = pmon->tx_prot_ppdu_info;
-
- if (tx_ppdu_info && !tx_ppdu_info->is_used)
- return tx_ppdu_info;
- kfree(tx_ppdu_info);
- } else {
- tx_ppdu_info = pmon->tx_data_ppdu_info;
-
- if (tx_ppdu_info && !tx_ppdu_info->is_used)
- return tx_ppdu_info;
- kfree(tx_ppdu_info);
- }
-
- /* allocate new tx_ppdu_info */
- tx_ppdu_info = kzalloc(sizeof(*tx_ppdu_info), GFP_ATOMIC);
- if (!tx_ppdu_info)
- return NULL;
-
- tx_ppdu_info->is_used = 0;
- tx_ppdu_info->ppdu_id = ppdu_id;
-
- if (type == DP_MON_TX_PROT_PPDU_INFO)
- pmon->tx_prot_ppdu_info = tx_ppdu_info;
- else
- pmon->tx_data_ppdu_info = tx_ppdu_info;
-
- return tx_ppdu_info;
-}
-
-static struct dp_mon_tx_ppdu_info *
-ath12k_dp_mon_hal_tx_ppdu_info(struct ath12k_mon_data *pmon,
- u16 tlv_tag)
-{
- switch (tlv_tag) {
- case HAL_TX_FES_SETUP:
- case HAL_TX_FLUSH:
- case HAL_PCU_PPDU_SETUP_INIT:
- case HAL_TX_PEER_ENTRY:
- case HAL_TX_QUEUE_EXTENSION:
- case HAL_TX_MPDU_START:
- case HAL_TX_MSDU_START:
- case HAL_TX_DATA:
- case HAL_MON_BUF_ADDR:
- case HAL_TX_MPDU_END:
- case HAL_TX_LAST_MPDU_FETCHED:
- case HAL_TX_LAST_MPDU_END:
- case HAL_COEX_TX_REQ:
- case HAL_TX_RAW_OR_NATIVE_FRAME_SETUP:
- case HAL_SCH_CRITICAL_TLV_REFERENCE:
- case HAL_TX_FES_SETUP_COMPLETE:
- case HAL_TQM_MPDU_GLOBAL_START:
- case HAL_SCHEDULER_END:
- case HAL_TX_FES_STATUS_USER_PPDU:
- break;
- case HAL_TX_FES_STATUS_PROT: {
- if (!pmon->tx_prot_ppdu_info->is_used)
- pmon->tx_prot_ppdu_info->is_used = true;
-
- return pmon->tx_prot_ppdu_info;
- }
- }
-
- if (!pmon->tx_data_ppdu_info->is_used)
- pmon->tx_data_ppdu_info->is_used = true;
-
- return pmon->tx_data_ppdu_info;
-}
-
-#define MAX_MONITOR_HEADER 512
-#define MAX_DUMMY_FRM_BODY 128
-
-struct sk_buff *ath12k_dp_mon_tx_alloc_skb(void)
-{
- struct sk_buff *skb;
-
- skb = dev_alloc_skb(MAX_MONITOR_HEADER + MAX_DUMMY_FRM_BODY);
- if (!skb)
- return NULL;
-
- skb_reserve(skb, MAX_MONITOR_HEADER);
-
- if (!IS_ALIGNED((unsigned long)skb->data, 4))
- skb_pull(skb, PTR_ALIGN(skb->data, 4) - skb->data);
-
- return skb;
-}
-
-static int
-ath12k_dp_mon_tx_gen_cts2self_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info)
-{
- struct sk_buff *skb;
- struct ieee80211_cts *cts;
-
- skb = ath12k_dp_mon_tx_alloc_skb();
- if (!skb)
- return -ENOMEM;
-
- cts = (struct ieee80211_cts *)skb->data;
- memset(cts, 0, MAX_DUMMY_FRM_BODY);
- cts->frame_control =
- cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS);
- cts->duration = cpu_to_le16(tx_ppdu_info->rx_status.rx_duration);
- memcpy(cts->ra, tx_ppdu_info->rx_status.addr1, sizeof(cts->ra));
-
- skb_put(skb, sizeof(*cts));
- tx_ppdu_info->tx_mon_mpdu->head = skb;
- tx_ppdu_info->tx_mon_mpdu->tail = NULL;
- list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list,
- &tx_ppdu_info->dp_tx_mon_mpdu_list);
-
- return 0;
-}
-
-static int
-ath12k_dp_mon_tx_gen_rts_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info)
-{
- struct sk_buff *skb;
- struct ieee80211_rts *rts;
-
- skb = ath12k_dp_mon_tx_alloc_skb();
- if (!skb)
- return -ENOMEM;
-
- rts = (struct ieee80211_rts *)skb->data;
- memset(rts, 0, MAX_DUMMY_FRM_BODY);
- rts->frame_control =
- cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
- rts->duration = cpu_to_le16(tx_ppdu_info->rx_status.rx_duration);
- memcpy(rts->ra, tx_ppdu_info->rx_status.addr1, sizeof(rts->ra));
- memcpy(rts->ta, tx_ppdu_info->rx_status.addr2, sizeof(rts->ta));
-
- skb_put(skb, sizeof(*rts));
- tx_ppdu_info->tx_mon_mpdu->head = skb;
- tx_ppdu_info->tx_mon_mpdu->tail = NULL;
- list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list,
- &tx_ppdu_info->dp_tx_mon_mpdu_list);
-
- return 0;
-}
-
-static int
-ath12k_dp_mon_tx_gen_3addr_qos_null_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info)
-{
- struct sk_buff *skb;
- struct ieee80211_qos_hdr *qhdr;
-
- skb = ath12k_dp_mon_tx_alloc_skb();
- if (!skb)
- return -ENOMEM;
-
- qhdr = (struct ieee80211_qos_hdr *)skb->data;
- memset(qhdr, 0, MAX_DUMMY_FRM_BODY);
- qhdr->frame_control =
- cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC);
- qhdr->duration_id = cpu_to_le16(tx_ppdu_info->rx_status.rx_duration);
- memcpy(qhdr->addr1, tx_ppdu_info->rx_status.addr1, ETH_ALEN);
- memcpy(qhdr->addr2, tx_ppdu_info->rx_status.addr2, ETH_ALEN);
- memcpy(qhdr->addr3, tx_ppdu_info->rx_status.addr3, ETH_ALEN);
-
- skb_put(skb, sizeof(*qhdr));
- tx_ppdu_info->tx_mon_mpdu->head = skb;
- tx_ppdu_info->tx_mon_mpdu->tail = NULL;
- list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list,
- &tx_ppdu_info->dp_tx_mon_mpdu_list);
-
- return 0;
-}
-
-static int
-ath12k_dp_mon_tx_gen_4addr_qos_null_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info)
-{
- struct sk_buff *skb;
- struct dp_mon_qosframe_addr4 *qhdr;
-
- skb = ath12k_dp_mon_tx_alloc_skb();
- if (!skb)
- return -ENOMEM;
-
- qhdr = (struct dp_mon_qosframe_addr4 *)skb->data;
- memset(qhdr, 0, MAX_DUMMY_FRM_BODY);
- qhdr->frame_control =
- cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC);
- qhdr->duration = cpu_to_le16(tx_ppdu_info->rx_status.rx_duration);
- memcpy(qhdr->addr1, tx_ppdu_info->rx_status.addr1, ETH_ALEN);
- memcpy(qhdr->addr2, tx_ppdu_info->rx_status.addr2, ETH_ALEN);
- memcpy(qhdr->addr3, tx_ppdu_info->rx_status.addr3, ETH_ALEN);
- memcpy(qhdr->addr4, tx_ppdu_info->rx_status.addr4, ETH_ALEN);
-
- skb_put(skb, sizeof(*qhdr));
- tx_ppdu_info->tx_mon_mpdu->head = skb;
- tx_ppdu_info->tx_mon_mpdu->tail = NULL;
- list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list,
- &tx_ppdu_info->dp_tx_mon_mpdu_list);
-
- return 0;
-}
-
-static int
-ath12k_dp_mon_tx_gen_ack_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info)
-{
- struct sk_buff *skb;
- struct dp_mon_frame_min_one *fbmhdr;
-
- skb = ath12k_dp_mon_tx_alloc_skb();
- if (!skb)
- return -ENOMEM;
-
- fbmhdr = (struct dp_mon_frame_min_one *)skb->data;
- memset(fbmhdr, 0, MAX_DUMMY_FRM_BODY);
- fbmhdr->frame_control =
- cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_CFACK);
- memcpy(fbmhdr->addr1, tx_ppdu_info->rx_status.addr1, ETH_ALEN);
-
- /* set duration zero for ack frame */
- fbmhdr->duration = 0;
-
- skb_put(skb, sizeof(*fbmhdr));
- tx_ppdu_info->tx_mon_mpdu->head = skb;
- tx_ppdu_info->tx_mon_mpdu->tail = NULL;
- list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list,
- &tx_ppdu_info->dp_tx_mon_mpdu_list);
-
- return 0;
-}
-
-static int
-ath12k_dp_mon_tx_gen_prot_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info)
-{
- int ret = 0;
-
- switch (tx_ppdu_info->rx_status.medium_prot_type) {
- case DP_MON_TX_MEDIUM_RTS_LEGACY:
- case DP_MON_TX_MEDIUM_RTS_11AC_STATIC_BW:
- case DP_MON_TX_MEDIUM_RTS_11AC_DYNAMIC_BW:
- ret = ath12k_dp_mon_tx_gen_rts_frame(tx_ppdu_info);
- break;
- case DP_MON_TX_MEDIUM_CTS2SELF:
- ret = ath12k_dp_mon_tx_gen_cts2self_frame(tx_ppdu_info);
- break;
- case DP_MON_TX_MEDIUM_QOS_NULL_NO_ACK_3ADDR:
- ret = ath12k_dp_mon_tx_gen_3addr_qos_null_frame(tx_ppdu_info);
- break;
- case DP_MON_TX_MEDIUM_QOS_NULL_NO_ACK_4ADDR:
- ret = ath12k_dp_mon_tx_gen_4addr_qos_null_frame(tx_ppdu_info);
- break;
- }
-
- return ret;
-}
-
-static enum dp_mon_tx_tlv_status
-ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
- struct ath12k_mon_data *pmon,
- u16 tlv_tag, const void *tlv_data, u32 userid)
-{
- struct dp_mon_tx_ppdu_info *tx_ppdu_info;
- enum dp_mon_tx_tlv_status status = DP_MON_TX_STATUS_PPDU_NOT_DONE;
- u32 info[7];
-
- tx_ppdu_info = ath12k_dp_mon_hal_tx_ppdu_info(pmon, tlv_tag);
-
- switch (tlv_tag) {
- case HAL_TX_FES_SETUP: {
- const struct hal_tx_fes_setup *tx_fes_setup = tlv_data;
-
- info[0] = __le32_to_cpu(tx_fes_setup->info0);
- tx_ppdu_info->ppdu_id = __le32_to_cpu(tx_fes_setup->schedule_id);
- tx_ppdu_info->num_users =
- u32_get_bits(info[0], HAL_TX_FES_SETUP_INFO0_NUM_OF_USERS);
- status = DP_MON_TX_FES_SETUP;
- break;
- }
-
- case HAL_TX_FES_STATUS_END: {
- const struct hal_tx_fes_status_end *tx_fes_status_end = tlv_data;
- u32 tst_15_0, tst_31_16;
-
- info[0] = __le32_to_cpu(tx_fes_status_end->info0);
- tst_15_0 =
- u32_get_bits(info[0],
- HAL_TX_FES_STATUS_END_INFO0_START_TIMESTAMP_15_0);
- tst_31_16 =
- u32_get_bits(info[0],
- HAL_TX_FES_STATUS_END_INFO0_START_TIMESTAMP_31_16);
-
- tx_ppdu_info->rx_status.ppdu_ts = (tst_15_0 | (tst_31_16 << 16));
- status = DP_MON_TX_FES_STATUS_END;
- break;
- }
-
- case HAL_RX_RESPONSE_REQUIRED_INFO: {
- const struct hal_rx_resp_req_info *rx_resp_req_info = tlv_data;
- u32 addr_32;
- u16 addr_16;
-
- info[0] = __le32_to_cpu(rx_resp_req_info->info0);
- info[1] = __le32_to_cpu(rx_resp_req_info->info1);
- info[2] = __le32_to_cpu(rx_resp_req_info->info2);
- info[3] = __le32_to_cpu(rx_resp_req_info->info3);
- info[4] = __le32_to_cpu(rx_resp_req_info->info4);
- info[5] = __le32_to_cpu(rx_resp_req_info->info5);
-
- tx_ppdu_info->rx_status.ppdu_id =
- u32_get_bits(info[0], HAL_RX_RESP_REQ_INFO0_PPDU_ID);
- tx_ppdu_info->rx_status.reception_type =
- u32_get_bits(info[0], HAL_RX_RESP_REQ_INFO0_RECEPTION_TYPE);
- tx_ppdu_info->rx_status.rx_duration =
- u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_DURATION);
- tx_ppdu_info->rx_status.mcs =
- u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_RATE_MCS);
- tx_ppdu_info->rx_status.sgi =
- u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_SGI);
- tx_ppdu_info->rx_status.is_stbc =
- u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_STBC);
- tx_ppdu_info->rx_status.ldpc =
- u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_LDPC);
- tx_ppdu_info->rx_status.is_ampdu =
- u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_IS_AMPDU);
- tx_ppdu_info->rx_status.num_users =
- u32_get_bits(info[2], HAL_RX_RESP_REQ_INFO2_NUM_USER);
-
- addr_32 = u32_get_bits(info[3], HAL_RX_RESP_REQ_INFO3_ADDR1_31_0);
- addr_16 = u32_get_bits(info[3], HAL_RX_RESP_REQ_INFO4_ADDR1_47_32);
- ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr1);
-
- addr_16 = u32_get_bits(info[4], HAL_RX_RESP_REQ_INFO4_ADDR1_15_0);
- addr_32 = u32_get_bits(info[5], HAL_RX_RESP_REQ_INFO5_ADDR1_47_16);
- ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr2);
-
- if (tx_ppdu_info->rx_status.reception_type == 0)
- ath12k_dp_mon_tx_gen_cts2self_frame(tx_ppdu_info);
- status = DP_MON_RX_RESPONSE_REQUIRED_INFO;
- break;
- }
-
- case HAL_PCU_PPDU_SETUP_INIT: {
- const struct hal_tx_pcu_ppdu_setup_init *ppdu_setup = tlv_data;
- u32 addr_32;
- u16 addr_16;
-
- info[0] = __le32_to_cpu(ppdu_setup->info0);
- info[1] = __le32_to_cpu(ppdu_setup->info1);
- info[2] = __le32_to_cpu(ppdu_setup->info2);
- info[3] = __le32_to_cpu(ppdu_setup->info3);
- info[4] = __le32_to_cpu(ppdu_setup->info4);
- info[5] = __le32_to_cpu(ppdu_setup->info5);
- info[6] = __le32_to_cpu(ppdu_setup->info6);
-
- /* protection frame address 1 */
- addr_32 = u32_get_bits(info[1],
- HAL_TX_PPDU_SETUP_INFO1_PROT_FRAME_ADDR1_31_0);
- addr_16 = u32_get_bits(info[2],
- HAL_TX_PPDU_SETUP_INFO2_PROT_FRAME_ADDR1_47_32);
- ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr1);
-
- /* protection frame address 2 */
- addr_16 = u32_get_bits(info[2],
- HAL_TX_PPDU_SETUP_INFO2_PROT_FRAME_ADDR2_15_0);
- addr_32 = u32_get_bits(info[3],
- HAL_TX_PPDU_SETUP_INFO3_PROT_FRAME_ADDR2_47_16);
- ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr2);
-
- /* protection frame address 3 */
- addr_32 = u32_get_bits(info[4],
- HAL_TX_PPDU_SETUP_INFO4_PROT_FRAME_ADDR3_31_0);
- addr_16 = u32_get_bits(info[5],
- HAL_TX_PPDU_SETUP_INFO5_PROT_FRAME_ADDR3_47_32);
- ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr3);
-
- /* protection frame address 4 */
- addr_16 = u32_get_bits(info[5],
- HAL_TX_PPDU_SETUP_INFO5_PROT_FRAME_ADDR4_15_0);
- addr_32 = u32_get_bits(info[6],
- HAL_TX_PPDU_SETUP_INFO6_PROT_FRAME_ADDR4_47_16);
- ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr4);
-
- status = u32_get_bits(info[0],
- HAL_TX_PPDU_SETUP_INFO0_MEDIUM_PROT_TYPE);
- break;
- }
-
- case HAL_TX_QUEUE_EXTENSION: {
- const struct hal_tx_queue_exten *tx_q_exten = tlv_data;
-
- info[0] = __le32_to_cpu(tx_q_exten->info0);
-
- tx_ppdu_info->rx_status.frame_control =
- u32_get_bits(info[0],
- HAL_TX_Q_EXT_INFO0_FRAME_CTRL);
- tx_ppdu_info->rx_status.fc_valid = true;
- break;
- }
-
- case HAL_TX_FES_STATUS_START: {
- const struct hal_tx_fes_status_start *tx_fes_start = tlv_data;
-
- info[0] = __le32_to_cpu(tx_fes_start->info0);
-
- tx_ppdu_info->rx_status.medium_prot_type =
- u32_get_bits(info[0],
- HAL_TX_FES_STATUS_START_INFO0_MEDIUM_PROT_TYPE);
- break;
- }
-
- case HAL_TX_FES_STATUS_PROT: {
- const struct hal_tx_fes_status_prot *tx_fes_status = tlv_data;
- u32 start_timestamp;
- u32 end_timestamp;
-
- info[0] = __le32_to_cpu(tx_fes_status->info0);
- info[1] = __le32_to_cpu(tx_fes_status->info1);
-
- start_timestamp =
- u32_get_bits(info[0],
- HAL_TX_FES_STAT_PROT_INFO0_STRT_FRM_TS_15_0);
- start_timestamp |=
- u32_get_bits(info[0],
- HAL_TX_FES_STAT_PROT_INFO0_STRT_FRM_TS_31_16) << 15;
- end_timestamp =
- u32_get_bits(info[1],
- HAL_TX_FES_STAT_PROT_INFO1_END_FRM_TS_15_0);
- end_timestamp |=
- u32_get_bits(info[1],
- HAL_TX_FES_STAT_PROT_INFO1_END_FRM_TS_31_16) << 15;
- tx_ppdu_info->rx_status.rx_duration = end_timestamp - start_timestamp;
-
- ath12k_dp_mon_tx_gen_prot_frame(tx_ppdu_info);
- break;
- }
-
- case HAL_TX_FES_STATUS_START_PPDU:
- case HAL_TX_FES_STATUS_START_PROT: {
- const struct hal_tx_fes_status_start_prot *tx_fes_stat_start = tlv_data;
- u64 ppdu_ts;
-
- info[0] = __le32_to_cpu(tx_fes_stat_start->info0);
-
- tx_ppdu_info->rx_status.ppdu_ts =
- u32_get_bits(info[0],
- HAL_TX_FES_STAT_STRT_INFO0_PROT_TS_LOWER_32);
- ppdu_ts = (u32_get_bits(info[1],
- HAL_TX_FES_STAT_STRT_INFO1_PROT_TS_UPPER_32));
- tx_ppdu_info->rx_status.ppdu_ts |= ppdu_ts << 32;
- break;
- }
-
- case HAL_TX_FES_STATUS_USER_PPDU: {
- const struct hal_tx_fes_status_user_ppdu *tx_fes_usr_ppdu = tlv_data;
-
- info[0] = __le32_to_cpu(tx_fes_usr_ppdu->info0);
-
- tx_ppdu_info->rx_status.rx_duration =
- u32_get_bits(info[0],
- HAL_TX_FES_STAT_USR_PPDU_INFO0_DURATION);
- break;
- }
-
- case HAL_MACTX_HE_SIG_A_SU:
- ath12k_dp_mon_parse_he_sig_su(tlv_data, &tx_ppdu_info->rx_status);
- break;
-
- case HAL_MACTX_HE_SIG_A_MU_DL:
- ath12k_dp_mon_parse_he_sig_mu(tlv_data, &tx_ppdu_info->rx_status);
- break;
-
- case HAL_MACTX_HE_SIG_B1_MU:
- ath12k_dp_mon_parse_he_sig_b1_mu(tlv_data, &tx_ppdu_info->rx_status);
- break;
-
- case HAL_MACTX_HE_SIG_B2_MU:
- ath12k_dp_mon_parse_he_sig_b2_mu(tlv_data, &tx_ppdu_info->rx_status);
- break;
-
- case HAL_MACTX_HE_SIG_B2_OFDMA:
- ath12k_dp_mon_parse_he_sig_b2_ofdma(tlv_data, &tx_ppdu_info->rx_status);
- break;
-
- case HAL_MACTX_VHT_SIG_A:
- ath12k_dp_mon_parse_vht_sig_a(tlv_data, &tx_ppdu_info->rx_status);
- break;
-
- case HAL_MACTX_L_SIG_A:
- ath12k_dp_mon_parse_l_sig_a(tlv_data, &tx_ppdu_info->rx_status);
- break;
-
- case HAL_MACTX_L_SIG_B:
- ath12k_dp_mon_parse_l_sig_b(tlv_data, &tx_ppdu_info->rx_status);
- break;
-
- case HAL_RX_FRAME_BITMAP_ACK: {
- const struct hal_rx_frame_bitmap_ack *fbm_ack = tlv_data;
- u32 addr_32;
- u16 addr_16;
-
- info[0] = __le32_to_cpu(fbm_ack->info0);
- info[1] = __le32_to_cpu(fbm_ack->info1);
-
- addr_32 = u32_get_bits(info[0],
- HAL_RX_FBM_ACK_INFO0_ADDR1_31_0);
- addr_16 = u32_get_bits(info[1],
- HAL_RX_FBM_ACK_INFO1_ADDR1_47_32);
- ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr1);
-
- ath12k_dp_mon_tx_gen_ack_frame(tx_ppdu_info);
- break;
- }
-
- case HAL_MACTX_PHY_DESC: {
- const struct hal_tx_phy_desc *tx_phy_desc = tlv_data;
-
- info[0] = __le32_to_cpu(tx_phy_desc->info0);
- info[1] = __le32_to_cpu(tx_phy_desc->info1);
- info[2] = __le32_to_cpu(tx_phy_desc->info2);
- info[3] = __le32_to_cpu(tx_phy_desc->info3);
-
- tx_ppdu_info->rx_status.beamformed =
- u32_get_bits(info[0],
- HAL_TX_PHY_DESC_INFO0_BF_TYPE);
- tx_ppdu_info->rx_status.preamble_type =
- u32_get_bits(info[0],
- HAL_TX_PHY_DESC_INFO0_PREAMBLE_11B);
- tx_ppdu_info->rx_status.mcs =
- u32_get_bits(info[1],
- HAL_TX_PHY_DESC_INFO1_MCS);
- tx_ppdu_info->rx_status.ltf_size =
- u32_get_bits(info[3],
- HAL_TX_PHY_DESC_INFO3_LTF_SIZE);
- tx_ppdu_info->rx_status.nss =
- u32_get_bits(info[2],
- HAL_TX_PHY_DESC_INFO2_NSS);
- tx_ppdu_info->rx_status.chan_num =
- u32_get_bits(info[3],
- HAL_TX_PHY_DESC_INFO3_ACTIVE_CHANNEL);
- tx_ppdu_info->rx_status.bw =
- u32_get_bits(info[0],
- HAL_TX_PHY_DESC_INFO0_BANDWIDTH);
- break;
- }
-
- case HAL_TX_MPDU_START: {
- struct dp_mon_mpdu *mon_mpdu = tx_ppdu_info->tx_mon_mpdu;
-
- mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC);
- if (!mon_mpdu)
- return DP_MON_TX_STATUS_PPDU_NOT_DONE;
- status = DP_MON_TX_MPDU_START;
- break;
- }
-
- case HAL_TX_MPDU_END:
- list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list,
- &tx_ppdu_info->dp_tx_mon_mpdu_list);
- break;
- }
-
- return status;
-}
-
-enum dp_mon_tx_tlv_status
-ath12k_dp_mon_tx_status_get_num_user(u16 tlv_tag,
- struct hal_tlv_hdr *tx_tlv,
- u8 *num_users)
-{
- u32 tlv_status = DP_MON_TX_STATUS_PPDU_NOT_DONE;
- u32 info0;
-
- switch (tlv_tag) {
- case HAL_TX_FES_SETUP: {
- struct hal_tx_fes_setup *tx_fes_setup =
- (struct hal_tx_fes_setup *)tx_tlv;
-
- info0 = __le32_to_cpu(tx_fes_setup->info0);
-
- *num_users = u32_get_bits(info0, HAL_TX_FES_SETUP_INFO0_NUM_OF_USERS);
- tlv_status = DP_MON_TX_FES_SETUP;
- break;
- }
-
- case HAL_RX_RESPONSE_REQUIRED_INFO: {
- /* TODO: need to update *num_users */
- tlv_status = DP_MON_RX_RESPONSE_REQUIRED_INFO;
- break;
- }
- }
-
- return tlv_status;
-}
-
-static void
-ath12k_dp_mon_tx_process_ppdu_info(struct ath12k *ar,
- struct napi_struct *napi,
- struct dp_mon_tx_ppdu_info *tx_ppdu_info)
-{
- struct dp_mon_mpdu *tmp, *mon_mpdu;
-
- list_for_each_entry_safe(mon_mpdu, tmp,
- &tx_ppdu_info->dp_tx_mon_mpdu_list, list) {
- list_del(&mon_mpdu->list);
-
- if (mon_mpdu->head)
- ath12k_dp_mon_rx_deliver(ar, mon_mpdu,
- &tx_ppdu_info->rx_status, napi);
-
- kfree(mon_mpdu);
- }
-}
-
-enum hal_rx_mon_status
-ath12k_dp_mon_tx_parse_mon_status(struct ath12k *ar,
- struct ath12k_mon_data *pmon,
- struct sk_buff *skb,
- struct napi_struct *napi,
- u32 ppdu_id)
-{
- struct ath12k_base *ab = ar->ab;
- struct dp_mon_tx_ppdu_info *tx_prot_ppdu_info, *tx_data_ppdu_info;
- struct hal_tlv_hdr *tlv;
- u8 *ptr = skb->data;
- u16 tlv_tag;
- u16 tlv_len;
- u32 tlv_userid = 0;
- u8 num_user;
- u32 tlv_status = DP_MON_TX_STATUS_PPDU_NOT_DONE;
-
- tx_prot_ppdu_info = ath12k_dp_mon_tx_get_ppdu_info(pmon, ppdu_id,
- DP_MON_TX_PROT_PPDU_INFO);
- if (!tx_prot_ppdu_info)
- return -ENOMEM;
-
- tlv = (struct hal_tlv_hdr *)ptr;
- tlv_tag = le32_get_bits(tlv->tl, HAL_TLV_HDR_TAG);
-
- tlv_status = ath12k_dp_mon_tx_status_get_num_user(tlv_tag, tlv, &num_user);
- if (tlv_status == DP_MON_TX_STATUS_PPDU_NOT_DONE || !num_user)
- return -EINVAL;
-
- tx_data_ppdu_info = ath12k_dp_mon_tx_get_ppdu_info(pmon, ppdu_id,
- DP_MON_TX_DATA_PPDU_INFO);
- if (!tx_data_ppdu_info)
- return -ENOMEM;
-
- do {
- tlv = (struct hal_tlv_hdr *)ptr;
- tlv_tag = le32_get_bits(tlv->tl, HAL_TLV_HDR_TAG);
- tlv_len = le32_get_bits(tlv->tl, HAL_TLV_HDR_LEN);
- tlv_userid = le32_get_bits(tlv->tl, HAL_TLV_USR_ID);
-
- tlv_status = ath12k_dp_mon_tx_parse_status_tlv(ab, pmon,
- tlv_tag, ptr,
- tlv_userid);
- ptr += tlv_len;
- ptr = PTR_ALIGN(ptr, HAL_TLV_ALIGN);
- if ((ptr - skb->data) >= DP_TX_MONITOR_BUF_SIZE)
- break;
- } while (tlv_status != DP_MON_TX_FES_STATUS_END);
-
- ath12k_dp_mon_tx_process_ppdu_info(ar, napi, tx_data_ppdu_info);
- ath12k_dp_mon_tx_process_ppdu_info(ar, napi, tx_prot_ppdu_info);
-
- return tlv_status;
-}
-
static void
ath12k_dp_mon_rx_update_peer_rate_table_stats(struct ath12k_rx_peer_stats *rx_stats,
struct hal_rx_mon_ppdu_info *ppdu_info,
@@ -3465,6 +830,9 @@ ath12k_dp_mon_rx_update_peer_rate_table_stats(struct ath12k_rx_peer_stats *rx_st
u32 gi_idx = ppdu_info->gi;
u32 len;
+ if (!rx_stats)
+ return;
+
if (mcs_idx > HAL_RX_MAX_MCS_HT || nss_idx >= HAL_RX_MAX_NSS ||
bw_idx >= HAL_RX_BW_MAX || gi_idx >= HAL_RX_GI_MAX) {
return;
@@ -3485,15 +853,14 @@ ath12k_dp_mon_rx_update_peer_rate_table_stats(struct ath12k_rx_peer_stats *rx_st
stats->rx_rate[bw_idx][gi_idx][nss_idx][mcs_idx] += len;
}
-static void ath12k_dp_mon_rx_update_peer_su_stats(struct ath12k *ar,
- struct ath12k_link_sta *arsta,
- struct hal_rx_mon_ppdu_info *ppdu_info)
+void ath12k_dp_mon_rx_update_peer_su_stats(struct ath12k_dp_link_peer *peer,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
{
- struct ath12k_rx_peer_stats *rx_stats = arsta->rx_stats;
+ struct ath12k_rx_peer_stats *rx_stats = peer->peer_stats.rx_stats;
u32 num_msdu;
- arsta->rssi_comb = ppdu_info->rssi_comb;
- ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb);
+ peer->rssi_comb = ppdu_info->rssi_comb;
+ ewma_avg_rssi_add(&peer->avg_rssi, ppdu_info->rssi_comb);
if (!rx_stats)
return;
@@ -3541,7 +908,7 @@ static void ath12k_dp_mon_rx_update_peer_su_stats(struct ath12k *ar,
rx_stats->dcm_count += ppdu_info->dcm;
rx_stats->rx_duration += ppdu_info->rx_duration;
- arsta->rx_duration = rx_stats->rx_duration;
+ peer->rx_duration = rx_stats->rx_duration;
if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS) {
rx_stats->pkt_stats.nss_count[ppdu_info->nss - 1] += num_msdu;
@@ -3594,6 +961,7 @@ static void ath12k_dp_mon_rx_update_peer_su_stats(struct ath12k *ar,
ath12k_dp_mon_rx_update_peer_rate_table_stats(rx_stats, ppdu_info,
NULL, num_msdu);
}
+EXPORT_SYMBOL(ath12k_dp_mon_rx_update_peer_su_stats);
void ath12k_dp_mon_rx_process_ulofdma(struct hal_rx_mon_ppdu_info *ppdu_info)
{
@@ -3642,39 +1010,33 @@ void ath12k_dp_mon_rx_process_ulofdma(struct hal_rx_mon_ppdu_info *ppdu_info)
}
ppdu_info->ldpc = 1;
}
+EXPORT_SYMBOL(ath12k_dp_mon_rx_process_ulofdma);
static void
-ath12k_dp_mon_rx_update_user_stats(struct ath12k *ar,
+ath12k_dp_mon_rx_update_user_stats(struct ath12k_base *ab,
struct hal_rx_mon_ppdu_info *ppdu_info,
u32 uid)
{
- struct ath12k_link_sta *arsta;
struct ath12k_rx_peer_stats *rx_stats = NULL;
struct hal_rx_user_status *user_stats = &ppdu_info->userstats[uid];
- struct ath12k_peer *peer;
+ struct ath12k_dp_link_peer *peer;
u32 num_msdu;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
if (user_stats->ast_index == 0 || user_stats->ast_index == 0xFFFF)
return;
- peer = ath12k_peer_find_by_ast(ar->ab, user_stats->ast_index);
+ peer = ath12k_dp_link_peer_find_by_ast(dp, user_stats->ast_index);
if (!peer) {
- ath12k_warn(ar->ab, "peer ast idx %d can't be found\n",
+ ath12k_warn(ab, "peer ast idx %d can't be found\n",
user_stats->ast_index);
return;
}
- arsta = ath12k_peer_get_link_sta(ar->ab, peer);
- if (!arsta) {
- ath12k_warn(ar->ab, "link sta not found on peer %pM id %d\n",
- peer->addr, peer->peer_id);
- return;
- }
-
- arsta->rssi_comb = ppdu_info->rssi_comb;
- ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb);
- rx_stats = arsta->rx_stats;
+ peer->rssi_comb = ppdu_info->rssi_comb;
+ ewma_avg_rssi_add(&peer->avg_rssi, ppdu_info->rssi_comb);
+ rx_stats = peer->peer_stats.rx_stats;
if (!rx_stats)
return;
@@ -3718,7 +1080,7 @@ ath12k_dp_mon_rx_update_user_stats(struct ath12k *ar,
rx_stats->ru_alloc_cnt[user_stats->ul_ofdma_ru_size] += num_msdu;
rx_stats->rx_duration += ppdu_info->rx_duration;
- arsta->rx_duration = rx_stats->rx_duration;
+ peer->rx_duration = rx_stats->rx_duration;
if (user_stats->nss > 0 && user_stats->nss <= HAL_RX_MAX_NSS) {
rx_stats->pkt_stats.nss_count[user_stats->nss - 1] += num_msdu;
@@ -3749,8 +1111,8 @@ ath12k_dp_mon_rx_update_user_stats(struct ath12k *ar,
user_stats, num_msdu);
}
-static void
-ath12k_dp_mon_rx_update_peer_mu_stats(struct ath12k *ar,
+void
+ath12k_dp_mon_rx_update_peer_mu_stats(struct ath12k_base *ab,
struct hal_rx_mon_ppdu_info *ppdu_info)
{
u32 num_users, i;
@@ -3760,679 +1122,6 @@ ath12k_dp_mon_rx_update_peer_mu_stats(struct ath12k *ar,
num_users = HAL_MAX_UL_MU_USERS;
for (i = 0; i < num_users; i++)
- ath12k_dp_mon_rx_update_user_stats(ar, ppdu_info, i);
-}
-
-static void
-ath12k_dp_mon_rx_memset_ppdu_info(struct hal_rx_mon_ppdu_info *ppdu_info)
-{
- memset(ppdu_info, 0, sizeof(*ppdu_info));
- ppdu_info->peer_id = HAL_INVALID_PEERID;
-}
-
-int ath12k_dp_mon_srng_process(struct ath12k *ar, int *budget,
- struct napi_struct *napi)
-{
- struct ath12k_base *ab = ar->ab;
- struct ath12k_pdev_dp *pdev_dp = &ar->dp;
- struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&pdev_dp->mon_data;
- struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
- struct ath12k_dp *dp = &ab->dp;
- struct hal_mon_dest_desc *mon_dst_desc;
- struct sk_buff *skb;
- struct ath12k_skb_rxcb *rxcb;
- struct dp_srng *mon_dst_ring;
- struct hal_srng *srng;
- struct dp_rxdma_mon_ring *buf_ring;
- struct ath12k_link_sta *arsta;
- struct ath12k_peer *peer;
- struct sk_buff_head skb_list;
- u64 cookie;
- int num_buffs_reaped = 0, srng_id, buf_id;
- u32 hal_status, end_offset, info0, end_reason;
- u8 pdev_idx = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, ar->pdev_idx);
-
- __skb_queue_head_init(&skb_list);
- srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, pdev_idx);
- mon_dst_ring = &pdev_dp->rxdma_mon_dst_ring[srng_id];
- buf_ring = &dp->rxdma_mon_buf_ring;
-
- srng = &ab->hal.srng_list[mon_dst_ring->ring_id];
- spin_lock_bh(&srng->lock);
- ath12k_hal_srng_access_begin(ab, srng);
-
- while (likely(*budget)) {
- mon_dst_desc = ath12k_hal_srng_dst_peek(ab, srng);
- if (unlikely(!mon_dst_desc))
- break;
-
- /* In case of empty descriptor, the cookie in the ring descriptor
- * is invalid. Therefore, this entry is skipped, and ring processing
- * continues.
- */
- info0 = le32_to_cpu(mon_dst_desc->info0);
- if (u32_get_bits(info0, HAL_MON_DEST_INFO0_EMPTY_DESC))
- goto move_next;
-
- cookie = le32_to_cpu(mon_dst_desc->cookie);
- buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
-
- spin_lock_bh(&buf_ring->idr_lock);
- skb = idr_remove(&buf_ring->bufs_idr, buf_id);
- spin_unlock_bh(&buf_ring->idr_lock);
-
- if (unlikely(!skb)) {
- ath12k_warn(ab, "monitor destination with invalid buf_id %d\n",
- buf_id);
- goto move_next;
- }
-
- rxcb = ATH12K_SKB_RXCB(skb);
- dma_unmap_single(ab->dev, rxcb->paddr,
- skb->len + skb_tailroom(skb),
- DMA_FROM_DEVICE);
-
- end_reason = u32_get_bits(info0, HAL_MON_DEST_INFO0_END_REASON);
-
- /* HAL_MON_FLUSH_DETECTED implies that an rx flush received at the end of
- * rx PPDU and HAL_MON_PPDU_TRUNCATED implies that the PPDU got
- * truncated due to a system level error. In both the cases, buffer data
- * can be discarded
- */
- if ((end_reason == HAL_MON_FLUSH_DETECTED) ||
- (end_reason == HAL_MON_PPDU_TRUNCATED)) {
- ath12k_dbg(ab, ATH12K_DBG_DATA,
- "Monitor dest descriptor end reason %d", end_reason);
- dev_kfree_skb_any(skb);
- goto move_next;
- }
-
- /* Calculate the budget when the ring descriptor with the
- * HAL_MON_END_OF_PPDU to ensure that one PPDU worth of data is always
- * reaped. This helps to efficiently utilize the NAPI budget.
- */
- if (end_reason == HAL_MON_END_OF_PPDU) {
- *budget -= 1;
- rxcb->is_end_of_ppdu = true;
- }
-
- end_offset = u32_get_bits(info0, HAL_MON_DEST_INFO0_END_OFFSET);
- if (likely(end_offset <= DP_RX_BUFFER_SIZE)) {
- skb_put(skb, end_offset);
- } else {
- ath12k_warn(ab,
- "invalid offset on mon stats destination %u\n",
- end_offset);
- skb_put(skb, DP_RX_BUFFER_SIZE);
- }
-
- __skb_queue_tail(&skb_list, skb);
-
-move_next:
- ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
- ath12k_hal_srng_dst_get_next_entry(ab, srng);
- num_buffs_reaped++;
- }
-
- ath12k_hal_srng_access_end(ab, srng);
- spin_unlock_bh(&srng->lock);
-
- if (!num_buffs_reaped)
- return 0;
-
- /* In some cases, one PPDU worth of data can be spread across multiple NAPI
- * schedules, To avoid losing existing parsed ppdu_info information, skip
- * the memset of the ppdu_info structure and continue processing it.
- */
- if (!ppdu_info->ppdu_continuation)
- ath12k_dp_mon_rx_memset_ppdu_info(ppdu_info);
-
- while ((skb = __skb_dequeue(&skb_list))) {
- hal_status = ath12k_dp_mon_rx_parse_mon_status(ar, pmon, skb, napi);
- if (hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
- ppdu_info->ppdu_continuation = true;
- dev_kfree_skb_any(skb);
- continue;
- }
-
- if (ppdu_info->peer_id == HAL_INVALID_PEERID)
- goto free_skb;
-
- rcu_read_lock();
- spin_lock_bh(&ab->base_lock);
- peer = ath12k_peer_find_by_id(ab, ppdu_info->peer_id);
- if (!peer || !peer->sta) {
- ath12k_dbg(ab, ATH12K_DBG_DATA,
- "failed to find the peer with monitor peer_id %d\n",
- ppdu_info->peer_id);
- goto next_skb;
- }
-
- if (ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_SU) {
- arsta = ath12k_peer_get_link_sta(ar->ab, peer);
- if (!arsta) {
- ath12k_warn(ar->ab, "link sta not found on peer %pM id %d\n",
- peer->addr, peer->peer_id);
- spin_unlock_bh(&ab->base_lock);
- rcu_read_unlock();
- dev_kfree_skb_any(skb);
- continue;
- }
- ath12k_dp_mon_rx_update_peer_su_stats(ar, arsta,
- ppdu_info);
- } else if ((ppdu_info->fc_valid) &&
- (ppdu_info->ast_index != HAL_AST_IDX_INVALID)) {
- ath12k_dp_mon_rx_process_ulofdma(ppdu_info);
- ath12k_dp_mon_rx_update_peer_mu_stats(ar, ppdu_info);
- }
-
-next_skb:
- spin_unlock_bh(&ab->base_lock);
- rcu_read_unlock();
-free_skb:
- dev_kfree_skb_any(skb);
- ath12k_dp_mon_rx_memset_ppdu_info(ppdu_info);
- }
-
- return num_buffs_reaped;
-}
-
-static int ath12k_dp_rx_reap_mon_status_ring(struct ath12k_base *ab, int mac_id,
- int *budget, struct sk_buff_head *skb_list)
-{
- const struct ath12k_hw_hal_params *hal_params;
- int buf_id, srng_id, num_buffs_reaped = 0;
- enum dp_mon_status_buf_state reap_status;
- struct dp_rxdma_mon_ring *rx_ring;
- struct ath12k_mon_data *pmon;
- struct ath12k_skb_rxcb *rxcb;
- struct hal_tlv_64_hdr *tlv;
- void *rx_mon_status_desc;
- struct hal_srng *srng;
- struct ath12k_dp *dp;
- struct sk_buff *skb;
- struct ath12k *ar;
- dma_addr_t paddr;
- u32 cookie;
- u8 rbm;
-
- ar = ab->pdevs[ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id)].ar;
- dp = &ab->dp;
- pmon = &ar->dp.mon_data;
- srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, mac_id);
- rx_ring = &dp->rx_mon_status_refill_ring[srng_id];
-
- srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
-
- spin_lock_bh(&srng->lock);
-
- ath12k_hal_srng_access_begin(ab, srng);
-
- while (*budget) {
- *budget -= 1;
- rx_mon_status_desc = ath12k_hal_srng_src_peek(ab, srng);
- if (!rx_mon_status_desc) {
- pmon->buf_state = DP_MON_STATUS_REPLINISH;
- break;
- }
- ath12k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr,
- &cookie, &rbm);
- if (paddr) {
- buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
-
- spin_lock_bh(&rx_ring->idr_lock);
- skb = idr_find(&rx_ring->bufs_idr, buf_id);
- spin_unlock_bh(&rx_ring->idr_lock);
-
- if (!skb) {
- ath12k_warn(ab, "rx monitor status with invalid buf_id %d\n",
- buf_id);
- pmon->buf_state = DP_MON_STATUS_REPLINISH;
- goto move_next;
- }
-
- rxcb = ATH12K_SKB_RXCB(skb);
-
- dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
- skb->len + skb_tailroom(skb),
- DMA_FROM_DEVICE);
-
- tlv = (struct hal_tlv_64_hdr *)skb->data;
- if (le64_get_bits(tlv->tl, HAL_TLV_HDR_TAG) !=
- HAL_RX_STATUS_BUFFER_DONE) {
- pmon->buf_state = DP_MON_STATUS_NO_DMA;
- ath12k_warn(ab,
- "mon status DONE not set %llx, buf_id %d\n",
- le64_get_bits(tlv->tl, HAL_TLV_HDR_TAG),
- buf_id);
- /* RxDMA status done bit might not be set even
- * though tp is moved by HW.
- */
-
- /* If done status is missing:
- * 1. As per MAC team's suggestion,
- * when HP + 1 entry is peeked and if DMA
- * is not done and if HP + 2 entry's DMA done
- * is set. skip HP + 1 entry and
- * start processing in next interrupt.
- * 2. If HP + 2 entry's DMA done is not set,
- * poll onto HP + 1 entry DMA done to be set.
- * Check status for same buffer for next time
- * dp_rx_mon_status_srng_process
- */
- reap_status = ath12k_dp_rx_mon_buf_done(ab, srng,
- rx_ring);
- if (reap_status == DP_MON_STATUS_NO_DMA)
- continue;
-
- spin_lock_bh(&rx_ring->idr_lock);
- idr_remove(&rx_ring->bufs_idr, buf_id);
- spin_unlock_bh(&rx_ring->idr_lock);
-
- dma_unmap_single(ab->dev, rxcb->paddr,
- skb->len + skb_tailroom(skb),
- DMA_FROM_DEVICE);
-
- dev_kfree_skb_any(skb);
- pmon->buf_state = DP_MON_STATUS_REPLINISH;
- goto move_next;
- }
-
- spin_lock_bh(&rx_ring->idr_lock);
- idr_remove(&rx_ring->bufs_idr, buf_id);
- spin_unlock_bh(&rx_ring->idr_lock);
-
- dma_unmap_single(ab->dev, rxcb->paddr,
- skb->len + skb_tailroom(skb),
- DMA_FROM_DEVICE);
-
- if (ath12k_dp_pkt_set_pktlen(skb, RX_MON_STATUS_BUF_SIZE)) {
- dev_kfree_skb_any(skb);
- goto move_next;
- }
- __skb_queue_tail(skb_list, skb);
- } else {
- pmon->buf_state = DP_MON_STATUS_REPLINISH;
- }
-move_next:
- skb = ath12k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
- &buf_id);
-
- if (!skb) {
- ath12k_warn(ab, "failed to alloc buffer for status ring\n");
- hal_params = ab->hw_params->hal_params;
- ath12k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
- hal_params->rx_buf_rbm);
- num_buffs_reaped++;
- break;
- }
- rxcb = ATH12K_SKB_RXCB(skb);
-
- cookie = u32_encode_bits(mac_id, DP_RXDMA_BUF_COOKIE_PDEV_ID) |
- u32_encode_bits(buf_id, DP_RXDMA_BUF_COOKIE_BUF_ID);
-
- ath12k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr,
- cookie,
- ab->hw_params->hal_params->rx_buf_rbm);
- ath12k_hal_srng_src_get_next_entry(ab, srng);
- num_buffs_reaped++;
- }
- ath12k_hal_srng_access_end(ab, srng);
- spin_unlock_bh(&srng->lock);
-
- return num_buffs_reaped;
-}
-
-static u32
-ath12k_dp_rx_mon_mpdu_pop(struct ath12k *ar, int mac_id,
- void *ring_entry, struct sk_buff **head_msdu,
- struct sk_buff **tail_msdu,
- struct list_head *used_list,
- u32 *npackets, u32 *ppdu_id)
-{
- struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&ar->dp.mon_data;
- struct ath12k_buffer_addr *p_buf_addr_info, *p_last_buf_addr_info;
- u32 msdu_ppdu_id = 0, msdu_cnt = 0, total_len = 0, frag_len = 0;
- u32 rx_buf_size, rx_pkt_offset, sw_cookie;
- bool is_frag, is_first_msdu, drop_mpdu = false;
- struct hal_reo_entrance_ring *ent_desc =
- (struct hal_reo_entrance_ring *)ring_entry;
- u32 rx_bufs_used = 0, i = 0, desc_bank = 0;
- struct hal_rx_desc *rx_desc, *tail_rx_desc;
- struct hal_rx_msdu_link *msdu_link_desc;
- struct sk_buff *msdu = NULL, *last = NULL;
- struct ath12k_rx_desc_info *desc_info;
- struct ath12k_buffer_addr buf_info;
- struct hal_rx_msdu_list msdu_list;
- struct ath12k_skb_rxcb *rxcb;
- u16 num_msdus = 0;
- dma_addr_t paddr;
- u8 rbm;
-
- ath12k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr,
- &sw_cookie,
- &p_last_buf_addr_info, &rbm,
- &msdu_cnt);
-
- spin_lock_bh(&pmon->mon_lock);
-
- if (le32_get_bits(ent_desc->info1,
- HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON) ==
- HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
- u8 rxdma_err = le32_get_bits(ent_desc->info1,
- HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE);
- if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
- rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
- rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
- drop_mpdu = true;
- pmon->rx_mon_stats.dest_mpdu_drop++;
- }
- }
-
- is_frag = false;
- is_first_msdu = true;
- rx_pkt_offset = sizeof(struct hal_rx_desc);
-
- do {
- if (pmon->mon_last_linkdesc_paddr == paddr) {
- pmon->rx_mon_stats.dup_mon_linkdesc_cnt++;
- spin_unlock_bh(&pmon->mon_lock);
- return rx_bufs_used;
- }
-
- desc_bank = u32_get_bits(sw_cookie, DP_LINK_DESC_BANK_MASK);
- msdu_link_desc =
- ar->ab->dp.link_desc_banks[desc_bank].vaddr +
- (paddr - ar->ab->dp.link_desc_banks[desc_bank].paddr);
-
- ath12k_hal_rx_msdu_list_get(ar, msdu_link_desc, &msdu_list,
- &num_msdus);
- desc_info = ath12k_dp_get_rx_desc(ar->ab,
- msdu_list.sw_cookie[num_msdus - 1]);
- tail_rx_desc = (struct hal_rx_desc *)(desc_info->skb)->data;
-
- for (i = 0; i < num_msdus; i++) {
- u32 l2_hdr_offset;
-
- if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) {
- ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
- "i %d last_cookie %d is same\n",
- i, pmon->mon_last_buf_cookie);
- drop_mpdu = true;
- pmon->rx_mon_stats.dup_mon_buf_cnt++;
- continue;
- }
-
- desc_info =
- ath12k_dp_get_rx_desc(ar->ab, msdu_list.sw_cookie[i]);
- msdu = desc_info->skb;
-
- if (!msdu) {
- ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
- "msdu_pop: invalid msdu (%d/%d)\n",
- i + 1, num_msdus);
- goto next_msdu;
- }
- rxcb = ATH12K_SKB_RXCB(msdu);
- if (rxcb->paddr != msdu_list.paddr[i]) {
- ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
- "i %d paddr %lx != %lx\n",
- i, (unsigned long)rxcb->paddr,
- (unsigned long)msdu_list.paddr[i]);
- drop_mpdu = true;
- continue;
- }
- if (!rxcb->unmapped) {
- dma_unmap_single(ar->ab->dev, rxcb->paddr,
- msdu->len +
- skb_tailroom(msdu),
- DMA_FROM_DEVICE);
- rxcb->unmapped = 1;
- }
- if (drop_mpdu) {
- ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
- "i %d drop msdu %p *ppdu_id %x\n",
- i, msdu, *ppdu_id);
- dev_kfree_skb_any(msdu);
- msdu = NULL;
- goto next_msdu;
- }
-
- rx_desc = (struct hal_rx_desc *)msdu->data;
- l2_hdr_offset = ath12k_dp_rx_h_l3pad(ar->ab, tail_rx_desc);
- if (is_first_msdu) {
- if (!ath12k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) {
- drop_mpdu = true;
- dev_kfree_skb_any(msdu);
- msdu = NULL;
- pmon->mon_last_linkdesc_paddr = paddr;
- goto next_msdu;
- }
- msdu_ppdu_id =
- ath12k_dp_rxdesc_get_ppduid(ar->ab, rx_desc);
-
- if (ath12k_dp_mon_comp_ppduid(msdu_ppdu_id,
- ppdu_id)) {
- spin_unlock_bh(&pmon->mon_lock);
- return rx_bufs_used;
- }
- pmon->mon_last_linkdesc_paddr = paddr;
- is_first_msdu = false;
- }
- ath12k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
- &is_frag, &total_len,
- &frag_len, &msdu_cnt);
- rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
-
- if (ath12k_dp_pkt_set_pktlen(msdu, rx_buf_size)) {
- dev_kfree_skb_any(msdu);
- goto next_msdu;
- }
-
- if (!(*head_msdu))
- *head_msdu = msdu;
- else if (last)
- last->next = msdu;
-
- last = msdu;
-next_msdu:
- pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i];
- rx_bufs_used++;
- desc_info->skb = NULL;
- list_add_tail(&desc_info->list, used_list);
- }
-
- ath12k_hal_rx_buf_addr_info_set(&buf_info, paddr, sw_cookie, rbm);
-
- ath12k_dp_mon_next_link_desc_get(msdu_link_desc, &paddr,
- &sw_cookie, &rbm,
- &p_buf_addr_info);
-
- ath12k_dp_rx_link_desc_return(ar->ab, &buf_info,
- HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
-
- p_last_buf_addr_info = p_buf_addr_info;
-
- } while (paddr && msdu_cnt);
-
- spin_unlock_bh(&pmon->mon_lock);
-
- if (last)
- last->next = NULL;
-
- *tail_msdu = msdu;
-
- if (msdu_cnt == 0)
- *npackets = 1;
-
- return rx_bufs_used;
-}
-
-/* The destination ring processing is stuck if the destination is not
- * moving while status ring moves 16 PPDU. The destination ring processing
- * skips this destination ring PPDU as a workaround.
- */
-#define MON_DEST_RING_STUCK_MAX_CNT 16
-
-static void ath12k_dp_rx_mon_dest_process(struct ath12k *ar, int mac_id,
- u32 quota, struct napi_struct *napi)
-{
- struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&ar->dp.mon_data;
- struct ath12k_pdev_mon_stats *rx_mon_stats;
- u32 ppdu_id, rx_bufs_used = 0, ring_id;
- u32 mpdu_rx_bufs_used, npackets = 0;
- struct ath12k_dp *dp = &ar->ab->dp;
- struct ath12k_base *ab = ar->ab;
- void *ring_entry, *mon_dst_srng;
- struct dp_mon_mpdu *tmp_mpdu;
- LIST_HEAD(rx_desc_used_list);
- struct hal_srng *srng;
-
- ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id;
- srng = &ab->hal.srng_list[ring_id];
-
- mon_dst_srng = &ab->hal.srng_list[ring_id];
-
- spin_lock_bh(&srng->lock);
-
- ath12k_hal_srng_access_begin(ab, mon_dst_srng);
-
- ppdu_id = pmon->mon_ppdu_info.ppdu_id;
- rx_mon_stats = &pmon->rx_mon_stats;
-
- while ((ring_entry = ath12k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
- struct sk_buff *head_msdu, *tail_msdu;
-
- head_msdu = NULL;
- tail_msdu = NULL;
-
- mpdu_rx_bufs_used = ath12k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry,
- &head_msdu, &tail_msdu,
- &rx_desc_used_list,
- &npackets, &ppdu_id);
-
- rx_bufs_used += mpdu_rx_bufs_used;
-
- if (mpdu_rx_bufs_used) {
- dp->mon_dest_ring_stuck_cnt = 0;
- } else {
- dp->mon_dest_ring_stuck_cnt++;
- rx_mon_stats->dest_mon_not_reaped++;
- }
-
- if (dp->mon_dest_ring_stuck_cnt > MON_DEST_RING_STUCK_MAX_CNT) {
- rx_mon_stats->dest_mon_stuck++;
- ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
- "status ring ppdu_id=%d dest ring ppdu_id=%d mon_dest_ring_stuck_cnt=%d dest_mon_not_reaped=%u dest_mon_stuck=%u\n",
- pmon->mon_ppdu_info.ppdu_id, ppdu_id,
- dp->mon_dest_ring_stuck_cnt,
- rx_mon_stats->dest_mon_not_reaped,
- rx_mon_stats->dest_mon_stuck);
- spin_lock_bh(&pmon->mon_lock);
- pmon->mon_ppdu_info.ppdu_id = ppdu_id;
- spin_unlock_bh(&pmon->mon_lock);
- continue;
- }
-
- if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) {
- spin_lock_bh(&pmon->mon_lock);
- pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
- spin_unlock_bh(&pmon->mon_lock);
- ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
- "dest_rx: new ppdu_id %x != status ppdu_id %x dest_mon_not_reaped = %u dest_mon_stuck = %u\n",
- ppdu_id, pmon->mon_ppdu_info.ppdu_id,
- rx_mon_stats->dest_mon_not_reaped,
- rx_mon_stats->dest_mon_stuck);
- break;
- }
-
- if (head_msdu && tail_msdu) {
- tmp_mpdu = kzalloc(sizeof(*tmp_mpdu), GFP_ATOMIC);
- if (!tmp_mpdu)
- break;
-
- tmp_mpdu->head = head_msdu;
- tmp_mpdu->tail = tail_msdu;
- tmp_mpdu->err_bitmap = pmon->err_bitmap;
- tmp_mpdu->decap_format = pmon->decap_format;
- ath12k_dp_mon_rx_deliver(ar, tmp_mpdu,
- &pmon->mon_ppdu_info, napi);
- rx_mon_stats->dest_mpdu_done++;
- kfree(tmp_mpdu);
- }
-
- ring_entry = ath12k_hal_srng_dst_get_next_entry(ar->ab,
- mon_dst_srng);
- }
- ath12k_hal_srng_access_end(ar->ab, mon_dst_srng);
-
- spin_unlock_bh(&srng->lock);
-
- if (rx_bufs_used) {
- rx_mon_stats->dest_ppdu_done++;
- ath12k_dp_rx_bufs_replenish(ar->ab,
- &dp->rx_refill_buf_ring,
- &rx_desc_used_list,
- rx_bufs_used);
- }
-}
-
-static int
-__ath12k_dp_mon_process_ring(struct ath12k *ar, int mac_id,
- struct napi_struct *napi, int *budget)
-{
- struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&ar->dp.mon_data;
- struct ath12k_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats;
- struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
- enum hal_rx_mon_status hal_status;
- struct sk_buff_head skb_list;
- int num_buffs_reaped;
- struct sk_buff *skb;
-
- __skb_queue_head_init(&skb_list);
-
- num_buffs_reaped = ath12k_dp_rx_reap_mon_status_ring(ar->ab, mac_id,
- budget, &skb_list);
- if (!num_buffs_reaped)
- goto exit;
-
- while ((skb = __skb_dequeue(&skb_list))) {
- memset(ppdu_info, 0, sizeof(*ppdu_info));
- ppdu_info->peer_id = HAL_INVALID_PEERID;
-
- hal_status = ath12k_dp_mon_parse_rx_dest(ar, pmon, skb);
-
- if (ar->monitor_started &&
- pmon->mon_ppdu_status == DP_PPDU_STATUS_START &&
- hal_status == HAL_TLV_STATUS_PPDU_DONE) {
- rx_mon_stats->status_ppdu_done++;
- pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
- ath12k_dp_rx_mon_dest_process(ar, mac_id, *budget, napi);
- pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
- }
-
- dev_kfree_skb_any(skb);
- }
-
-exit:
- return num_buffs_reaped;
-}
-
-int ath12k_dp_mon_process_ring(struct ath12k_base *ab, int mac_id,
- struct napi_struct *napi, int budget,
- enum dp_monitor_mode monitor_mode)
-{
- struct ath12k *ar = ath12k_ab_to_ar(ab, mac_id);
- int num_buffs_reaped = 0;
-
- if (ab->hw_params->rxdma1_enable) {
- if (monitor_mode == ATH12K_DP_RX_MONITOR_MODE)
- num_buffs_reaped = ath12k_dp_mon_srng_process(ar, &budget, napi);
- } else {
- if (ar->monitor_started)
- num_buffs_reaped =
- __ath12k_dp_mon_process_ring(ar, mac_id, napi, &budget);
- }
-
- return num_buffs_reaped;
+ ath12k_dp_mon_rx_update_user_stats(ab, ppdu_info, i);
}
+EXPORT_SYMBOL(ath12k_dp_mon_rx_update_peer_mu_stats);
diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.h b/drivers/net/wireless/ath/ath12k/dp_mon.h
index e25595cbdcf3..167028d27513 100644
--- a/drivers/net/wireless/ath/ath12k/dp_mon.h
+++ b/drivers/net/wireless/ath/ath12k/dp_mon.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#ifndef ATH12K_DP_MON_H
@@ -12,6 +12,12 @@
#define ATH12K_MON_RX_DOT11_OFFSET 5
#define ATH12K_MON_RX_PKT_OFFSET 8
+#define ATH12K_LE32_DEC_ENC(value, dec_bits, enc_bits) \
+ u32_encode_bits(le32_get_bits(value, dec_bits), enc_bits)
+
+#define ATH12K_LE64_DEC_ENC(value, dec_bits, enc_bits) \
+ u32_encode_bits(le64_get_bits(value, dec_bits), enc_bits)
+
enum dp_monitor_mode {
ATH12K_DP_TX_MONITOR_MODE,
ATH12K_DP_RX_MONITOR_MODE
@@ -77,31 +83,41 @@ struct dp_mon_tx_ppdu_info {
struct dp_mon_mpdu *tx_mon_mpdu;
};
-enum hal_rx_mon_status
-ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar,
- struct ath12k_mon_data *pmon,
- struct sk_buff *skb,
- struct napi_struct *napi);
int ath12k_dp_mon_buf_replenish(struct ath12k_base *ab,
struct dp_rxdma_mon_ring *buf_ring,
int req_entries);
int ath12k_dp_mon_status_bufs_replenish(struct ath12k_base *ab,
struct dp_rxdma_mon_ring *rx_ring,
int req_entries);
-int ath12k_dp_mon_process_ring(struct ath12k_base *ab, int mac_id,
- struct napi_struct *napi, int budget,
- enum dp_monitor_mode monitor_mode);
-struct sk_buff *ath12k_dp_mon_tx_alloc_skb(void);
-enum dp_mon_tx_tlv_status
-ath12k_dp_mon_tx_status_get_num_user(u16 tlv_tag,
- struct hal_tlv_hdr *tx_tlv,
- u8 *num_users);
-enum hal_rx_mon_status
-ath12k_dp_mon_tx_parse_mon_status(struct ath12k *ar,
- struct ath12k_mon_data *pmon,
- struct sk_buff *skb,
- struct napi_struct *napi,
- u32 ppdu_id);
void ath12k_dp_mon_rx_process_ulofdma(struct hal_rx_mon_ppdu_info *ppdu_info);
-int ath12k_dp_mon_srng_process(struct ath12k *ar, int *budget, struct napi_struct *napi);
+void
+ath12k_dp_mon_rx_update_peer_mu_stats(struct ath12k_base *ab,
+ struct hal_rx_mon_ppdu_info *ppdu_info);
+void ath12k_dp_mon_rx_update_peer_su_stats(struct ath12k_dp_link_peer *peer,
+ struct hal_rx_mon_ppdu_info *ppdu_info);
+int ath12k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len);
+struct sk_buff
+*ath12k_dp_rx_alloc_mon_status_buf(struct ath12k_base *ab,
+ struct dp_rxdma_mon_ring *rx_ring,
+ int *buf_id);
+u32 ath12k_dp_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id);
+int
+ath12k_dp_mon_parse_status_buf(struct ath12k_pdev_dp *dp_pdev,
+ struct ath12k_mon_data *pmon,
+ const struct dp_mon_packet_info *packet_info);
+void ath12k_dp_mon_update_radiotap(struct ath12k_pdev_dp *dp_pdev,
+ struct hal_rx_mon_ppdu_info *ppduinfo,
+ struct sk_buff *mon_skb,
+ struct ieee80211_rx_status *rxs);
+void ath12k_dp_mon_rx_deliver_msdu(struct ath12k_pdev_dp *dp_pdev,
+ struct napi_struct *napi,
+ struct sk_buff *msdu,
+ const struct hal_rx_mon_ppdu_info *ppduinfo,
+ struct ieee80211_rx_status *status,
+ u8 decap);
+struct sk_buff *
+ath12k_dp_mon_rx_merg_msdus(struct ath12k_pdev_dp *dp_pdev,
+ struct dp_mon_mpdu *mon_mpdu,
+ struct hal_rx_mon_ppdu_info *ppdu_info,
+ struct ieee80211_rx_status *rxs);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/dp_peer.c b/drivers/net/wireless/ath/ath12k/dp_peer.c
new file mode 100644
index 000000000000..2e66872b5572
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/dp_peer.c
@@ -0,0 +1,690 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include "core.h"
+#include "dp_peer.h"
+#include "debug.h"
+#include "debugfs.h"
+
+void ath12k_dp_link_peer_free(struct ath12k_dp_link_peer *peer)
+{
+ list_del(&peer->list);
+
+ kfree(peer->peer_stats.rx_stats);
+ kfree(peer);
+}
+
+struct ath12k_dp_link_peer *
+ath12k_dp_link_peer_find_by_vdev_and_addr(struct ath12k_dp *dp,
+ int vdev_id, const u8 *addr)
+{
+ struct ath12k_dp_link_peer *peer;
+
+ lockdep_assert_held(&dp->dp_lock);
+
+ list_for_each_entry(peer, &dp->peers, list) {
+ if (peer->vdev_id != vdev_id)
+ continue;
+ if (!ether_addr_equal(peer->addr, addr))
+ continue;
+
+ return peer;
+ }
+
+ return NULL;
+}
+
+struct ath12k_dp_link_peer *
+ath12k_dp_link_peer_find_by_pdev_and_addr(struct ath12k_dp *dp, u8 pdev_idx,
+ const u8 *addr)
+{
+ struct ath12k_dp_link_peer *peer;
+
+ lockdep_assert_held(&dp->dp_lock);
+
+ list_for_each_entry(peer, &dp->peers, list) {
+ if (peer->pdev_idx != pdev_idx)
+ continue;
+ if (!ether_addr_equal(peer->addr, addr))
+ continue;
+
+ return peer;
+ }
+
+ return NULL;
+}
+
+struct ath12k_dp_link_peer *
+ath12k_dp_link_peer_find_by_addr(struct ath12k_dp *dp, const u8 *addr)
+{
+ lockdep_assert_held(&dp->dp_lock);
+
+ return rhashtable_lookup_fast(dp->rhead_peer_addr, addr,
+ dp->rhash_peer_addr_param);
+}
+EXPORT_SYMBOL(ath12k_dp_link_peer_find_by_addr);
+
+static struct ath12k_dp_link_peer *
+ath12k_dp_link_peer_find_by_ml_id(struct ath12k_dp *dp, int ml_peer_id)
+{
+ struct ath12k_dp_link_peer *peer;
+
+ lockdep_assert_held(&dp->dp_lock);
+
+ list_for_each_entry(peer, &dp->peers, list)
+ if (ml_peer_id == peer->ml_id)
+ return peer;
+
+ return NULL;
+}
+
+static struct ath12k_dp_link_peer *
+ath12k_dp_link_peer_search_by_id(struct ath12k_dp *dp, int peer_id)
+{
+ struct ath12k_dp_link_peer *peer;
+
+ lockdep_assert_held(&dp->dp_lock);
+
+ if (peer_id == HAL_INVALID_PEERID)
+ return NULL;
+
+ if (peer_id & ATH12K_PEER_ML_ID_VALID)
+ return ath12k_dp_link_peer_find_by_ml_id(dp, peer_id);
+
+ list_for_each_entry(peer, &dp->peers, list)
+ if (peer_id == peer->peer_id)
+ return peer;
+
+ return NULL;
+}
+
+bool ath12k_dp_link_peer_exist_by_vdev_id(struct ath12k_dp *dp, int vdev_id)
+{
+ struct ath12k_dp_link_peer *peer;
+
+ spin_lock_bh(&dp->dp_lock);
+
+ list_for_each_entry(peer, &dp->peers, list) {
+ if (vdev_id == peer->vdev_id) {
+ spin_unlock_bh(&dp->dp_lock);
+ return true;
+ }
+ }
+ spin_unlock_bh(&dp->dp_lock);
+ return false;
+}
+
+struct ath12k_dp_link_peer *
+ath12k_dp_link_peer_find_by_ast(struct ath12k_dp *dp, int ast_hash)
+{
+ struct ath12k_dp_link_peer *peer;
+
+ lockdep_assert_held(&dp->dp_lock);
+
+ list_for_each_entry(peer, &dp->peers, list)
+ if (ast_hash == peer->ast_hash)
+ return peer;
+
+ return NULL;
+}
+
+void ath12k_dp_link_peer_unmap_event(struct ath12k_base *ab, u16 peer_id)
+{
+ struct ath12k_dp_link_peer *peer;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
+
+ spin_lock_bh(&dp->dp_lock);
+
+ peer = ath12k_dp_link_peer_search_by_id(dp, peer_id);
+ if (!peer) {
+ ath12k_warn(ab, "peer-unmap-event: unknown peer id %d\n",
+ peer_id);
+ goto exit;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
+ peer->vdev_id, peer->addr, peer_id);
+
+ ath12k_dp_link_peer_free(peer);
+ wake_up(&ab->peer_mapping_wq);
+
+exit:
+ spin_unlock_bh(&dp->dp_lock);
+}
+
+void ath12k_dp_link_peer_map_event(struct ath12k_base *ab, u8 vdev_id, u16 peer_id,
+ u8 *mac_addr, u16 ast_hash, u16 hw_peer_id)
+{
+ struct ath12k_dp_link_peer *peer;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
+ struct ath12k *ar;
+
+ spin_lock_bh(&dp->dp_lock);
+ peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, vdev_id, mac_addr);
+ if (!peer) {
+ peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
+ if (!peer)
+ goto exit;
+
+ peer->vdev_id = vdev_id;
+ peer->peer_id = peer_id;
+ peer->ast_hash = ast_hash;
+ peer->hw_peer_id = hw_peer_id;
+ ether_addr_copy(peer->addr, mac_addr);
+
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
+ if (ar && ath12k_debugfs_is_extd_rx_stats_enabled(ar) &&
+ !peer->peer_stats.rx_stats) {
+ peer->peer_stats.rx_stats =
+ kzalloc(sizeof(*peer->peer_stats.rx_stats), GFP_ATOMIC);
+ }
+ rcu_read_unlock();
+
+ list_add(&peer->list, &dp->peers);
+ wake_up(&ab->peer_mapping_wq);
+ ewma_avg_rssi_init(&peer->avg_rssi);
+ }
+ ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "htt peer map vdev %d peer %pM id %d\n",
+ vdev_id, mac_addr, peer_id);
+
+exit:
+ spin_unlock_bh(&dp->dp_lock);
+}
+
+struct ath12k_link_sta *ath12k_dp_link_peer_to_link_sta(struct ath12k_base *ab,
+ struct ath12k_dp_link_peer *peer)
+{
+ struct ath12k_sta *ahsta;
+ struct ath12k_link_sta *arsta;
+
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
+ "ath12k_dp_link_peer to ath12k_link_sta called without rcu lock");
+
+ if (!peer->sta)
+ return NULL;
+
+ ahsta = ath12k_sta_to_ahsta(peer->sta);
+ if (peer->ml_id & ATH12K_PEER_ML_ID_VALID) {
+ if (!(ahsta->links_map & BIT(peer->link_id))) {
+ ath12k_warn(ab, "peer %pM id %d link_id %d can't found in STA link_map 0x%x\n",
+ peer->addr, peer->peer_id, peer->link_id,
+ ahsta->links_map);
+ return NULL;
+ }
+ arsta = rcu_dereference(ahsta->link[peer->link_id]);
+ if (!arsta)
+ return NULL;
+ } else {
+ arsta = &ahsta->deflink;
+ }
+ return arsta;
+}
+
+static int ath12k_dp_link_peer_rhash_addr_tbl_init(struct ath12k_dp *dp)
+{
+ struct ath12k_base *ab = dp->ab;
+ struct rhashtable_params *param;
+ struct rhashtable *rhash_addr_tbl;
+ int ret;
+
+ lockdep_assert_held(&dp->link_peer_rhash_tbl_lock);
+
+ rhash_addr_tbl = kzalloc(sizeof(*dp->rhead_peer_addr), GFP_KERNEL);
+ if (!rhash_addr_tbl)
+ return -ENOMEM;
+
+ param = &dp->rhash_peer_addr_param;
+
+ param->key_offset = offsetof(struct ath12k_dp_link_peer, addr);
+ param->head_offset = offsetof(struct ath12k_dp_link_peer, rhash_addr);
+ param->key_len = sizeof_field(struct ath12k_dp_link_peer, addr);
+ param->automatic_shrinking = true;
+ param->nelem_hint = ab->num_radios * ath12k_core_get_max_peers_per_radio(ab);
+
+ ret = rhashtable_init(rhash_addr_tbl, param);
+ if (ret) {
+ ath12k_warn(ab, "failed to init peer addr rhash table %d\n", ret);
+ goto err_free;
+ }
+
+ dp->rhead_peer_addr = rhash_addr_tbl;
+
+ return 0;
+
+err_free:
+ kfree(rhash_addr_tbl);
+
+ return ret;
+}
+
+int ath12k_dp_link_peer_rhash_tbl_init(struct ath12k_dp *dp)
+{
+ int ret;
+
+ mutex_lock(&dp->link_peer_rhash_tbl_lock);
+ ret = ath12k_dp_link_peer_rhash_addr_tbl_init(dp);
+ mutex_unlock(&dp->link_peer_rhash_tbl_lock);
+
+ return ret;
+}
+
+void ath12k_dp_link_peer_rhash_tbl_destroy(struct ath12k_dp *dp)
+{
+ mutex_lock(&dp->link_peer_rhash_tbl_lock);
+ rhashtable_destroy(dp->rhead_peer_addr);
+ kfree(dp->rhead_peer_addr);
+ dp->rhead_peer_addr = NULL;
+ mutex_unlock(&dp->link_peer_rhash_tbl_lock);
+}
+
+static int ath12k_dp_link_peer_rhash_insert(struct ath12k_dp *dp,
+ struct ath12k_dp_link_peer *peer)
+{
+ struct ath12k_dp_link_peer *tmp;
+
+ lockdep_assert_held(&dp->dp_lock);
+
+ tmp = rhashtable_lookup_get_insert_fast(dp->rhead_peer_addr, &peer->rhash_addr,
+ dp->rhash_peer_addr_param);
+ if (!tmp)
+ return 0;
+ else if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+ else
+ return -EEXIST;
+}
+
+static int ath12k_dp_link_peer_rhash_remove(struct ath12k_dp *dp,
+ struct ath12k_dp_link_peer *peer)
+{
+ int ret;
+
+ lockdep_assert_held(&dp->dp_lock);
+
+ ret = rhashtable_remove_fast(dp->rhead_peer_addr, &peer->rhash_addr,
+ dp->rhash_peer_addr_param);
+ if (ret && ret != -ENOENT)
+ return ret;
+
+ return 0;
+}
+
+int ath12k_dp_link_peer_rhash_add(struct ath12k_dp *dp,
+ struct ath12k_dp_link_peer *peer)
+{
+ int ret;
+
+ lockdep_assert_held(&dp->dp_lock);
+
+ ret = ath12k_dp_link_peer_rhash_insert(dp, peer);
+ if (ret)
+ ath12k_warn(dp, "failed to add peer %pM with id %d in rhash_addr ret %d\n",
+ peer->addr, peer->peer_id, ret);
+
+ return ret;
+}
+
+void ath12k_dp_link_peer_rhash_delete(struct ath12k_dp *dp,
+ struct ath12k_dp_link_peer *peer)
+{
+ /* No failure handling and hence return type is void */
+ int ret;
+
+ lockdep_assert_held(&dp->dp_lock);
+
+ ret = ath12k_dp_link_peer_rhash_remove(dp, peer);
+ if (ret)
+ ath12k_warn(dp, "failed to remove peer %pM with id %d in rhash_addr ret %d\n",
+ peer->addr, peer->peer_id, ret);
+}
+
+struct ath12k_dp_peer *ath12k_dp_peer_find_by_addr(struct ath12k_dp_hw *dp_hw, u8 *addr)
+{
+ struct ath12k_dp_peer *peer;
+
+ lockdep_assert_held(&dp_hw->peer_lock);
+
+ list_for_each_entry(peer, &dp_hw->dp_peers_list, list) {
+ if (ether_addr_equal(peer->addr, addr))
+ return peer;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(ath12k_dp_peer_find_by_addr);
+
+struct ath12k_dp_peer *ath12k_dp_peer_find_by_addr_and_sta(struct ath12k_dp_hw *dp_hw,
+ u8 *addr,
+ struct ieee80211_sta *sta)
+{
+ struct ath12k_dp_peer *dp_peer;
+
+ lockdep_assert_held(&dp_hw->peer_lock);
+
+ list_for_each_entry(dp_peer, &dp_hw->dp_peers_list, list) {
+ if (ether_addr_equal(dp_peer->addr, addr) && (dp_peer->sta == sta))
+ return dp_peer;
+ }
+
+ return NULL;
+}
+
+static struct ath12k_dp_peer *ath12k_dp_peer_create_find(struct ath12k_dp_hw *dp_hw,
+ u8 *addr,
+ struct ieee80211_sta *sta,
+ bool mlo_peer)
+{
+ struct ath12k_dp_peer *dp_peer;
+
+ lockdep_assert_held(&dp_hw->peer_lock);
+
+ list_for_each_entry(dp_peer, &dp_hw->dp_peers_list, list) {
+ if (ether_addr_equal(dp_peer->addr, addr)) {
+ if (!sta || mlo_peer || dp_peer->is_mlo ||
+ dp_peer->sta == sta)
+ return dp_peer;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * Index of ath12k_dp_peer for MLO client is same as peer id of ath12k_dp_peer,
+ * while for ath12k_dp_link_peer(mlo and non-mlo) and ath12k_dp_peer for
+ * Non-MLO client it is derived as ((DEVICE_ID << 10) | (10 bits of peer id)).
+ *
+ * This is done because ml_peer_id and peer_id_table are at hw granularity,
+ * while link_peer_id is at device granularity, hence in order to avoid
+ * conflict this approach is followed.
+ */
+#define ATH12K_DP_PEER_TABLE_DEVICE_ID_SHIFT 10
+
+u16 ath12k_dp_peer_get_peerid_index(struct ath12k_dp *dp, u16 peer_id)
+{
+ return (peer_id & ATH12K_PEER_ML_ID_VALID) ? peer_id :
+ ((dp->device_id << ATH12K_DP_PEER_TABLE_DEVICE_ID_SHIFT) | peer_id);
+}
+
+struct ath12k_dp_peer *ath12k_dp_peer_find_by_peerid(struct ath12k_pdev_dp *dp_pdev,
+ u16 peer_id)
+{
+ u16 index;
+ struct ath12k_dp *dp = dp_pdev->dp;
+
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
+ "ath12k dp peer find by peerid index called without rcu lock");
+
+ if (!peer_id || peer_id >= ATH12K_DP_PEER_ID_INVALID)
+ return NULL;
+
+ index = ath12k_dp_peer_get_peerid_index(dp, peer_id);
+
+ return rcu_dereference(dp_pdev->dp_hw->dp_peers[index]);
+}
+EXPORT_SYMBOL(ath12k_dp_peer_find_by_peerid);
+
+struct ath12k_dp_link_peer *
+ath12k_dp_link_peer_find_by_peerid(struct ath12k_pdev_dp *dp_pdev, u16 peer_id)
+{
+ struct ath12k_dp_peer *dp_peer = NULL;
+ u8 link_id;
+
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
+ "ath12k dp link peer find by peerid index called without rcu lock");
+
+ if (dp_pdev->hw_link_id >= ATH12K_GROUP_MAX_RADIO)
+ return NULL;
+
+ dp_peer = ath12k_dp_peer_find_by_peerid(dp_pdev, peer_id);
+ if (!dp_peer)
+ return NULL;
+
+ link_id = dp_peer->hw_links[dp_pdev->hw_link_id];
+
+ return rcu_dereference(dp_peer->link_peers[link_id]);
+}
+EXPORT_SYMBOL(ath12k_dp_link_peer_find_by_peerid);
+
+int ath12k_dp_peer_create(struct ath12k_dp_hw *dp_hw, u8 *addr,
+ struct ath12k_dp_peer_create_params *params)
+{
+ struct ath12k_dp_peer *dp_peer;
+
+ spin_lock_bh(&dp_hw->peer_lock);
+ dp_peer = ath12k_dp_peer_create_find(dp_hw, addr, params->sta, params->is_mlo);
+ if (dp_peer) {
+ spin_unlock_bh(&dp_hw->peer_lock);
+ return -EEXIST;
+ }
+ spin_unlock_bh(&dp_hw->peer_lock);
+
+ dp_peer = kzalloc(sizeof(*dp_peer), GFP_ATOMIC);
+ if (!dp_peer)
+ return -ENOMEM;
+
+ ether_addr_copy(dp_peer->addr, addr);
+ dp_peer->sta = params->sta;
+ dp_peer->is_mlo = params->is_mlo;
+
+ /*
+ * For MLO client, the host assigns the ML peer ID, so set peer_id in dp_peer
+ * For non-MLO client, host gets link peer ID from firmware and will be
+ * assigned at the time of link peer creation
+ */
+ dp_peer->peer_id = params->is_mlo ? params->peer_id : ATH12K_DP_PEER_ID_INVALID;
+ dp_peer->ucast_ra_only = params->ucast_ra_only;
+
+ dp_peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
+ dp_peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
+ dp_peer->ucast_ra_only = params->ucast_ra_only;
+
+ spin_lock_bh(&dp_hw->peer_lock);
+
+ list_add(&dp_peer->list, &dp_hw->dp_peers_list);
+
+ /*
+ * For MLO client, the peer_id for ath12k_dp_peer is allocated by host
+ * and that peer_id is known at this point, and hence this ath12k_dp_peer
+ * can be added to the RCU table using the peer_id.
+ * For non-MLO client, this addition to RCU table shall be done at the
+ * time of assignment of ath12k_dp_link_peer to ath12k_dp_peer.
+ */
+ if (dp_peer->is_mlo)
+ rcu_assign_pointer(dp_hw->dp_peers[dp_peer->peer_id], dp_peer);
+
+ spin_unlock_bh(&dp_hw->peer_lock);
+
+ return 0;
+}
+
+void ath12k_dp_peer_delete(struct ath12k_dp_hw *dp_hw, u8 *addr,
+ struct ieee80211_sta *sta)
+{
+ struct ath12k_dp_peer *dp_peer;
+
+ spin_lock_bh(&dp_hw->peer_lock);
+
+ dp_peer = ath12k_dp_peer_find_by_addr_and_sta(dp_hw, addr, sta);
+ if (!dp_peer) {
+ spin_unlock_bh(&dp_hw->peer_lock);
+ return;
+ }
+
+ if (dp_peer->is_mlo)
+ rcu_assign_pointer(dp_hw->dp_peers[dp_peer->peer_id], NULL);
+
+ list_del(&dp_peer->list);
+
+ spin_unlock_bh(&dp_hw->peer_lock);
+
+ synchronize_rcu();
+ kfree(dp_peer);
+}
+
+int ath12k_dp_link_peer_assign(struct ath12k_dp *dp, struct ath12k_dp_hw *dp_hw,
+ u8 vdev_id, struct ieee80211_sta *sta, u8 *addr,
+ u8 link_id, u32 hw_link_id)
+{
+ struct ath12k_dp_peer *dp_peer;
+ struct ath12k_dp_link_peer *peer, *temp_peer;
+ u16 peerid_index;
+ int ret = -EINVAL;
+ u8 *dp_peer_mac = !sta ? addr : sta->addr;
+
+ spin_lock_bh(&dp->dp_lock);
+
+ peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, vdev_id, addr);
+ if (!peer) {
+ ath12k_warn(dp, "failed to find dp_link_peer with mac %pM on vdev %u\n",
+ addr, vdev_id);
+ ret = -ENOENT;
+ goto err_peer;
+ }
+
+ spin_lock_bh(&dp_hw->peer_lock);
+
+ dp_peer = ath12k_dp_peer_find_by_addr_and_sta(dp_hw, dp_peer_mac, sta);
+ if (!dp_peer) {
+ ath12k_warn(dp, "failed to find dp_peer with mac %pM\n", dp_peer_mac);
+ ret = -ENOENT;
+ goto err_dp_peer;
+ }
+
+ /*
+ * Set peer_id in dp_peer for non-mlo client, peer_id for mlo client is
+ * set during dp_peer create
+ */
+ if (!dp_peer->is_mlo)
+ dp_peer->peer_id = peer->peer_id;
+
+ peer->dp_peer = dp_peer;
+ peer->hw_link_id = hw_link_id;
+
+ dp_peer->hw_links[peer->hw_link_id] = link_id;
+
+ peerid_index = ath12k_dp_peer_get_peerid_index(dp, peer->peer_id);
+
+ rcu_assign_pointer(dp_peer->link_peers[peer->link_id], peer);
+
+ rcu_assign_pointer(dp_hw->dp_peers[peerid_index], dp_peer);
+
+ spin_unlock_bh(&dp_hw->peer_lock);
+
+ /*
+ * In case of Split PHY and roaming scenario, pdev idx
+ * might differ but both the pdev will share same rhash
+ * table. In that case update the rhash table if link_peer is
+ * already present
+ */
+ temp_peer = ath12k_dp_link_peer_find_by_addr(dp, addr);
+ if (temp_peer && temp_peer->hw_link_id != hw_link_id)
+ ath12k_dp_link_peer_rhash_delete(dp, temp_peer);
+
+ ret = ath12k_dp_link_peer_rhash_add(dp, peer);
+ if (ret) {
+ /*
+ * If new entry addition failed, add back old entry
+ * If old entry addition also fails, then nothing
+ * can be done, simply proceed
+ */
+ if (temp_peer)
+ ath12k_dp_link_peer_rhash_add(dp, temp_peer);
+ }
+
+ spin_unlock_bh(&dp->dp_lock);
+
+ return ret;
+
+err_dp_peer:
+ spin_unlock_bh(&dp_hw->peer_lock);
+
+err_peer:
+ spin_unlock_bh(&dp->dp_lock);
+
+ return ret;
+}
+
+void ath12k_dp_link_peer_unassign(struct ath12k_dp *dp, struct ath12k_dp_hw *dp_hw,
+ u8 vdev_id, u8 *addr, u32 hw_link_id)
+{
+ struct ath12k_dp_peer *dp_peer;
+ struct ath12k_dp_link_peer *peer, *temp_peer;
+ u16 peerid_index;
+
+ spin_lock_bh(&dp->dp_lock);
+
+ peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, vdev_id, addr);
+ if (!peer || !peer->dp_peer) {
+ spin_unlock_bh(&dp->dp_lock);
+ return;
+ }
+
+ spin_lock_bh(&dp_hw->peer_lock);
+
+ dp_peer = peer->dp_peer;
+ dp_peer->hw_links[peer->hw_link_id] = 0;
+
+ peerid_index = ath12k_dp_peer_get_peerid_index(dp, peer->peer_id);
+
+ rcu_assign_pointer(dp_peer->link_peers[peer->link_id], NULL);
+
+ rcu_assign_pointer(dp_hw->dp_peers[peerid_index], NULL);
+
+ spin_unlock_bh(&dp_hw->peer_lock);
+
+ /* To handle roaming and split phy scenario */
+ temp_peer = ath12k_dp_link_peer_find_by_addr(dp, addr);
+ if (temp_peer && temp_peer->hw_link_id == hw_link_id)
+ ath12k_dp_link_peer_rhash_delete(dp, peer);
+
+ spin_unlock_bh(&dp->dp_lock);
+
+ synchronize_rcu();
+}
+
+void
+ath12k_dp_link_peer_get_sta_rate_info_stats(struct ath12k_dp *dp, const u8 *addr,
+ struct ath12k_dp_link_peer_rate_info *info)
+{
+ struct ath12k_dp_link_peer *link_peer;
+
+ guard(spinlock_bh)(&dp->dp_lock);
+
+ link_peer = ath12k_dp_link_peer_find_by_addr(dp, addr);
+ if (!link_peer)
+ return;
+
+ info->rx_duration = link_peer->rx_duration;
+ info->tx_duration = link_peer->tx_duration;
+ info->txrate.legacy = link_peer->txrate.legacy;
+ info->txrate.mcs = link_peer->txrate.mcs;
+ info->txrate.nss = link_peer->txrate.nss;
+ info->txrate.bw = link_peer->txrate.bw;
+ info->txrate.he_gi = link_peer->txrate.he_gi;
+ info->txrate.he_dcm = link_peer->txrate.he_dcm;
+ info->txrate.he_ru_alloc = link_peer->txrate.he_ru_alloc;
+ info->txrate.flags = link_peer->txrate.flags;
+ info->rssi_comb = link_peer->rssi_comb;
+ info->signal_avg = ewma_avg_rssi_read(&link_peer->avg_rssi);
+}
+
+void ath12k_dp_link_peer_reset_rx_stats(struct ath12k_dp *dp, const u8 *addr)
+{
+ struct ath12k_rx_peer_stats *rx_stats;
+ struct ath12k_dp_link_peer *link_peer;
+
+ guard(spinlock_bh)(&dp->dp_lock);
+
+ link_peer = ath12k_dp_link_peer_find_by_addr(dp, addr);
+ if (!link_peer || !link_peer->peer_stats.rx_stats)
+ return;
+
+ rx_stats = link_peer->peer_stats.rx_stats;
+ if (rx_stats)
+ memset(rx_stats, 0, sizeof(*rx_stats));
+}
diff --git a/drivers/net/wireless/ath/ath12k/dp_peer.h b/drivers/net/wireless/ath/ath12k/dp_peer.h
new file mode 100644
index 000000000000..20294ff09513
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/dp_peer.h
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef ATH12K_DP_PEER_H
+#define ATH12K_DP_PEER_H
+
+#include "dp_rx.h"
+
+#define ATH12K_DP_PEER_ID_INVALID 0x3FFF
+
+struct ppdu_user_delayba {
+ u16 sw_peer_id;
+ u32 info0;
+ u16 ru_end;
+ u16 ru_start;
+ u32 info1;
+ u32 rate_flags;
+ u32 resp_rate_flags;
+};
+
+#define ATH12K_PEER_ML_ID_VALID BIT(13)
+
+struct ath12k_rx_peer_rate_stats {
+ u64 ht_mcs_count[HAL_RX_MAX_MCS_HT + 1];
+ u64 vht_mcs_count[HAL_RX_MAX_MCS_VHT + 1];
+ u64 he_mcs_count[HAL_RX_MAX_MCS_HE + 1];
+ u64 be_mcs_count[HAL_RX_MAX_MCS_BE + 1];
+ u64 nss_count[HAL_RX_MAX_NSS];
+ u64 bw_count[HAL_RX_BW_MAX];
+ u64 gi_count[HAL_RX_GI_MAX];
+ u64 legacy_count[HAL_RX_MAX_NUM_LEGACY_RATES];
+ u64 rx_rate[HAL_RX_BW_MAX][HAL_RX_GI_MAX][HAL_RX_MAX_NSS][HAL_RX_MAX_MCS_HT + 1];
+};
+
+struct ath12k_rx_peer_stats {
+ u64 num_msdu;
+ u64 num_mpdu_fcs_ok;
+ u64 num_mpdu_fcs_err;
+ u64 tcp_msdu_count;
+ u64 udp_msdu_count;
+ u64 other_msdu_count;
+ u64 ampdu_msdu_count;
+ u64 non_ampdu_msdu_count;
+ u64 stbc_count;
+ u64 beamformed_count;
+ u64 coding_count[HAL_RX_SU_MU_CODING_MAX];
+ u64 tid_count[IEEE80211_NUM_TIDS + 1];
+ u64 pream_cnt[HAL_RX_PREAMBLE_MAX];
+ u64 reception_type[HAL_RX_RECEPTION_TYPE_MAX];
+ u64 rx_duration;
+ u64 dcm_count;
+ u64 ru_alloc_cnt[HAL_RX_RU_ALLOC_TYPE_MAX];
+ struct ath12k_rx_peer_rate_stats pkt_stats;
+ struct ath12k_rx_peer_rate_stats byte_stats;
+};
+
+struct ath12k_wbm_tx_stats {
+ u64 wbm_tx_comp_stats[HAL_WBM_REL_HTT_TX_COMP_STATUS_MAX];
+};
+
+struct ath12k_dp_peer_stats {
+ struct ath12k_rx_peer_stats *rx_stats;
+ struct ath12k_wbm_tx_stats *wbm_tx_stats;
+};
+
+DECLARE_EWMA(avg_rssi, 10, 8)
+
+struct ath12k_dp_link_peer {
+ struct list_head list;
+ struct ieee80211_sta *sta;
+ struct ath12k_dp_peer *dp_peer;
+ int vdev_id;
+ u8 addr[ETH_ALEN];
+ int peer_id;
+ u16 ast_hash;
+ u8 pdev_idx;
+ u16 hw_peer_id;
+
+ struct ppdu_user_delayba ppdu_stats_delayba;
+ bool delayba_flag;
+ bool is_authorized;
+ bool mlo;
+ /* protected by ab->data_lock */
+
+ u16 ml_id;
+
+ /* any other ML info common for all partners can be added
+ * here and would be same for all partner peers.
+ */
+ u8 ml_addr[ETH_ALEN];
+
+ /* To ensure only certain work related to dp is done once */
+ bool primary_link;
+
+ /* for reference to ath12k_link_sta */
+ u8 link_id;
+
+ /* peer addr based rhashtable list pointer */
+ struct rhash_head rhash_addr;
+
+ u8 hw_link_id;
+ u32 rx_tid_active_bitmask;
+
+ /* link stats */
+ struct rate_info txrate;
+ struct rate_info last_txrate;
+ u64 rx_duration;
+ u64 tx_duration;
+ u8 rssi_comb;
+ struct ewma_avg_rssi avg_rssi;
+ struct ath12k_dp_peer_stats peer_stats;
+ u32 tx_retry_failed;
+ u32 tx_retry_count;
+};
+
+void ath12k_dp_link_peer_unmap_event(struct ath12k_base *ab, u16 peer_id);
+void ath12k_dp_link_peer_map_event(struct ath12k_base *ab, u8 vdev_id, u16 peer_id,
+ u8 *mac_addr, u16 ast_hash, u16 hw_peer_id);
+
+struct ath12k_dp_peer {
+ struct list_head list;
+ bool is_mlo;
+ bool dp_setup_done;
+
+ u8 ucast_keyidx;
+ u8 addr[ETH_ALEN];
+
+ u8 mcast_keyidx;
+ bool ucast_ra_only;
+ int peer_id;
+ struct ieee80211_sta *sta;
+
+ u8 hw_links[ATH12K_GROUP_MAX_RADIO];
+
+ u16 sec_type_grp;
+ u16 sec_type;
+
+ /* Info used in MMIC verification of * RX fragments */
+ struct crypto_shash *tfm_mmic;
+ struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
+ struct ath12k_dp_link_peer __rcu *link_peers[ATH12K_NUM_MAX_LINKS];
+ struct ath12k_reoq_buf reoq_bufs[IEEE80211_NUM_TIDS + 1];
+ struct ath12k_dp_rx_tid rx_tid[IEEE80211_NUM_TIDS + 1];
+};
+
+struct ath12k_dp_link_peer *
+ath12k_dp_link_peer_find_by_vdev_and_addr(struct ath12k_dp *dp,
+ int vdev_id, const u8 *addr);
+struct ath12k_dp_link_peer *
+ath12k_dp_link_peer_find_by_addr(struct ath12k_dp *dp, const u8 *addr);
+bool ath12k_dp_link_peer_exist_by_vdev_id(struct ath12k_dp *dp, int vdev_id);
+struct ath12k_dp_link_peer *
+ath12k_dp_link_peer_find_by_ast(struct ath12k_dp *dp, int ast_hash);
+struct ath12k_dp_link_peer *
+ath12k_dp_link_peer_find_by_pdev_and_addr(struct ath12k_dp *dp, u8 pdev_idx,
+ const u8 *addr);
+struct ath12k_link_sta *ath12k_dp_link_peer_to_link_sta(struct ath12k_base *ab,
+ struct ath12k_dp_link_peer *peer);
+int ath12k_dp_link_peer_rhash_tbl_init(struct ath12k_dp *dp);
+void ath12k_dp_link_peer_rhash_tbl_destroy(struct ath12k_dp *dp);
+int ath12k_dp_link_peer_rhash_add(struct ath12k_dp *dp,
+ struct ath12k_dp_link_peer *peer);
+void ath12k_dp_link_peer_rhash_delete(struct ath12k_dp *dp,
+ struct ath12k_dp_link_peer *peer);
+int ath12k_dp_peer_create(struct ath12k_dp_hw *dp_hw, u8 *addr,
+ struct ath12k_dp_peer_create_params *params);
+void ath12k_dp_peer_delete(struct ath12k_dp_hw *dp_hw, u8 *addr,
+ struct ieee80211_sta *sta);
+struct ath12k_dp_peer *ath12k_dp_peer_find_by_addr(struct ath12k_dp_hw *dp_hw, u8 *addr);
+struct ath12k_dp_peer *ath12k_dp_peer_find_by_addr_and_sta(struct ath12k_dp_hw *dp_hw,
+ u8 *addr,
+ struct ieee80211_sta *sta);
+u16 ath12k_dp_peer_get_peerid_index(struct ath12k_dp *dp, u16 peer_id);
+struct ath12k_dp_peer *ath12k_dp_peer_find_by_peerid(struct ath12k_pdev_dp *dp_pdev,
+ u16 peer_id);
+struct ath12k_dp_link_peer *
+ath12k_dp_link_peer_find_by_peerid(struct ath12k_pdev_dp *dp_pdev, u16 peer_id);
+void ath12k_dp_link_peer_free(struct ath12k_dp_link_peer *peer);
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
index d28d8ffec0f8..a32ee9f8061a 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
@@ -10,257 +10,16 @@
#include <crypto/hash.h>
#include "core.h"
#include "debug.h"
-#include "hal_desc.h"
#include "hw.h"
#include "dp_rx.h"
-#include "hal_rx.h"
#include "dp_tx.h"
#include "peer.h"
#include "dp_mon.h"
#include "debugfs_htt_stats.h"
-#define ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
-
static int ath12k_dp_rx_tid_delete_handler(struct ath12k_base *ab,
struct ath12k_dp_rx_tid_rxq *rx_tid);
-static enum hal_encrypt_type ath12k_dp_rx_h_enctype(struct ath12k_base *ab,
- struct hal_rx_desc *desc)
-{
- if (!ab->hal_rx_ops->rx_desc_encrypt_valid(desc))
- return HAL_ENCRYPT_TYPE_OPEN;
-
- return ab->hal_rx_ops->rx_desc_get_encrypt_type(desc);
-}
-
-u8 ath12k_dp_rx_h_decap_type(struct ath12k_base *ab,
- struct hal_rx_desc *desc)
-{
- return ab->hal_rx_ops->rx_desc_get_decap_type(desc);
-}
-
-static u8 ath12k_dp_rx_h_mesh_ctl_present(struct ath12k_base *ab,
- struct hal_rx_desc *desc)
-{
- return ab->hal_rx_ops->rx_desc_get_mesh_ctl(desc);
-}
-
-static bool ath12k_dp_rx_h_seq_ctrl_valid(struct ath12k_base *ab,
- struct hal_rx_desc *desc)
-{
- return ab->hal_rx_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
-}
-
-static bool ath12k_dp_rx_h_fc_valid(struct ath12k_base *ab,
- struct hal_rx_desc *desc)
-{
- return ab->hal_rx_ops->rx_desc_get_mpdu_fc_valid(desc);
-}
-
-static bool ath12k_dp_rx_h_more_frags(struct ath12k_base *ab,
- struct sk_buff *skb)
-{
- struct ieee80211_hdr *hdr;
-
- hdr = (struct ieee80211_hdr *)(skb->data + ab->hal.hal_desc_sz);
- return ieee80211_has_morefrags(hdr->frame_control);
-}
-
-static u16 ath12k_dp_rx_h_frag_no(struct ath12k_base *ab,
- struct sk_buff *skb)
-{
- struct ieee80211_hdr *hdr;
-
- hdr = (struct ieee80211_hdr *)(skb->data + ab->hal.hal_desc_sz);
- return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
-}
-
-static u16 ath12k_dp_rx_h_seq_no(struct ath12k_base *ab,
- struct hal_rx_desc *desc)
-{
- return ab->hal_rx_ops->rx_desc_get_mpdu_start_seq_no(desc);
-}
-
-static bool ath12k_dp_rx_h_msdu_done(struct ath12k_base *ab,
- struct hal_rx_desc *desc)
-{
- return ab->hal_rx_ops->dp_rx_h_msdu_done(desc);
-}
-
-static bool ath12k_dp_rx_h_l4_cksum_fail(struct ath12k_base *ab,
- struct hal_rx_desc *desc)
-{
- return ab->hal_rx_ops->dp_rx_h_l4_cksum_fail(desc);
-}
-
-static bool ath12k_dp_rx_h_ip_cksum_fail(struct ath12k_base *ab,
- struct hal_rx_desc *desc)
-{
- return ab->hal_rx_ops->dp_rx_h_ip_cksum_fail(desc);
-}
-
-static bool ath12k_dp_rx_h_is_decrypted(struct ath12k_base *ab,
- struct hal_rx_desc *desc)
-{
- return ab->hal_rx_ops->dp_rx_h_is_decrypted(desc);
-}
-
-u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab,
- struct hal_rx_desc *desc)
-{
- return ab->hal_rx_ops->dp_rx_h_mpdu_err(desc);
-}
-
-static u16 ath12k_dp_rx_h_msdu_len(struct ath12k_base *ab,
- struct hal_rx_desc *desc)
-{
- return ab->hal_rx_ops->rx_desc_get_msdu_len(desc);
-}
-
-static u8 ath12k_dp_rx_h_sgi(struct ath12k_base *ab,
- struct hal_rx_desc *desc)
-{
- return ab->hal_rx_ops->rx_desc_get_msdu_sgi(desc);
-}
-
-static u8 ath12k_dp_rx_h_rate_mcs(struct ath12k_base *ab,
- struct hal_rx_desc *desc)
-{
- return ab->hal_rx_ops->rx_desc_get_msdu_rate_mcs(desc);
-}
-
-static u8 ath12k_dp_rx_h_rx_bw(struct ath12k_base *ab,
- struct hal_rx_desc *desc)
-{
- return ab->hal_rx_ops->rx_desc_get_msdu_rx_bw(desc);
-}
-
-static u32 ath12k_dp_rx_h_freq(struct ath12k_base *ab,
- struct hal_rx_desc *desc)
-{
- return ab->hal_rx_ops->rx_desc_get_msdu_freq(desc);
-}
-
-static u8 ath12k_dp_rx_h_pkt_type(struct ath12k_base *ab,
- struct hal_rx_desc *desc)
-{
- return ab->hal_rx_ops->rx_desc_get_msdu_pkt_type(desc);
-}
-
-static u8 ath12k_dp_rx_h_nss(struct ath12k_base *ab,
- struct hal_rx_desc *desc)
-{
- return hweight8(ab->hal_rx_ops->rx_desc_get_msdu_nss(desc));
-}
-
-static u8 ath12k_dp_rx_h_tid(struct ath12k_base *ab,
- struct hal_rx_desc *desc)
-{
- return ab->hal_rx_ops->rx_desc_get_mpdu_tid(desc);
-}
-
-static u16 ath12k_dp_rx_h_peer_id(struct ath12k_base *ab,
- struct hal_rx_desc *desc)
-{
- return ab->hal_rx_ops->rx_desc_get_mpdu_peer_id(desc);
-}
-
-u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab,
- struct hal_rx_desc *desc)
-{
- return ab->hal_rx_ops->rx_desc_get_l3_pad_bytes(desc);
-}
-
-static bool ath12k_dp_rx_h_first_msdu(struct ath12k_base *ab,
- struct hal_rx_desc *desc)
-{
- return ab->hal_rx_ops->rx_desc_get_first_msdu(desc);
-}
-
-static bool ath12k_dp_rx_h_last_msdu(struct ath12k_base *ab,
- struct hal_rx_desc *desc)
-{
- return ab->hal_rx_ops->rx_desc_get_last_msdu(desc);
-}
-
-static void ath12k_dp_rx_desc_end_tlv_copy(struct ath12k_base *ab,
- struct hal_rx_desc *fdesc,
- struct hal_rx_desc *ldesc)
-{
- ab->hal_rx_ops->rx_desc_copy_end_tlv(fdesc, ldesc);
-}
-
-static void ath12k_dp_rxdesc_set_msdu_len(struct ath12k_base *ab,
- struct hal_rx_desc *desc,
- u16 len)
-{
- ab->hal_rx_ops->rx_desc_set_msdu_len(desc, len);
-}
-
-u32 ath12k_dp_rxdesc_get_ppduid(struct ath12k_base *ab,
- struct hal_rx_desc *rx_desc)
-{
- return ab->hal_rx_ops->rx_desc_get_mpdu_ppdu_id(rx_desc);
-}
-
-bool ath12k_dp_rxdesc_mpdu_valid(struct ath12k_base *ab,
- struct hal_rx_desc *rx_desc)
-{
- u32 tlv_tag;
-
- tlv_tag = ab->hal_rx_ops->rx_desc_get_mpdu_start_tag(rx_desc);
-
- return tlv_tag == HAL_RX_MPDU_START;
-}
-
-static bool ath12k_dp_rx_h_is_da_mcbc(struct ath12k_base *ab,
- struct hal_rx_desc *desc)
-{
- return (ath12k_dp_rx_h_first_msdu(ab, desc) &&
- ab->hal_rx_ops->rx_desc_is_da_mcbc(desc));
-}
-
-static bool ath12k_dp_rxdesc_mac_addr2_valid(struct ath12k_base *ab,
- struct hal_rx_desc *desc)
-{
- return ab->hal_rx_ops->rx_desc_mac_addr2_valid(desc);
-}
-
-static u8 *ath12k_dp_rxdesc_get_mpdu_start_addr2(struct ath12k_base *ab,
- struct hal_rx_desc *desc)
-{
- return ab->hal_rx_ops->rx_desc_mpdu_start_addr2(desc);
-}
-
-static void ath12k_dp_rx_desc_get_dot11_hdr(struct ath12k_base *ab,
- struct hal_rx_desc *desc,
- struct ieee80211_hdr *hdr)
-{
- ab->hal_rx_ops->rx_desc_get_dot11_hdr(desc, hdr);
-}
-
-static void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base *ab,
- struct hal_rx_desc *desc,
- u8 *crypto_hdr,
- enum hal_encrypt_type enctype)
-{
- ab->hal_rx_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype);
-}
-
-static inline u8 ath12k_dp_rx_get_msdu_src_link(struct ath12k_base *ab,
- struct hal_rx_desc *desc)
-{
- return ab->hal_rx_ops->rx_desc_get_msdu_src_link_id(desc);
-}
-
-static void ath12k_dp_clean_up_skb_list(struct sk_buff_head *skb_list)
-{
- struct sk_buff *skb;
-
- while ((skb = __skb_dequeue(skb_list)))
- dev_kfree_skb_any(skb);
-}
-
static size_t ath12k_dp_list_cut_nodes(struct list_head *list,
struct list_head *head,
size_t count)
@@ -305,11 +64,12 @@ static void ath12k_dp_rx_enqueue_free(struct ath12k_dp *dp,
}
/* Returns number of Rx buffers replenished */
-int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
+int ath12k_dp_rx_bufs_replenish(struct ath12k_dp *dp,
struct dp_rxdma_ring *rx_ring,
struct list_head *used_list,
int req_entries)
{
+ struct ath12k_base *ab = dp->ab;
struct ath12k_buffer_addr *desc;
struct hal_srng *srng;
struct sk_buff *skb;
@@ -317,13 +77,12 @@ int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
int num_remain;
u32 cookie;
dma_addr_t paddr;
- struct ath12k_dp *dp = &ab->dp;
struct ath12k_rx_desc_info *rx_desc;
- enum hal_rx_buf_return_buf_manager mgr = ab->hw_params->hal_params->rx_buf_rbm;
+ enum hal_rx_buf_return_buf_manager mgr = dp->hal->hal_params->rx_buf_rbm;
req_entries = min(req_entries, rx_ring->bufs_max);
- srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
+ srng = &dp->hal->srng_list[rx_ring->refill_buf_ring.ring_id];
spin_lock_bh(&srng->lock);
@@ -362,10 +121,10 @@ int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
skb->data);
}
- paddr = dma_map_single(ab->dev, skb->data,
+ paddr = dma_map_single(dp->dev, skb->data,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
- if (dma_mapping_error(ab->dev, paddr))
+ if (dma_mapping_error(dp->dev, paddr))
goto fail_free_skb;
rx_desc = list_first_entry_or_null(used_list,
@@ -386,13 +145,14 @@ int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
num_remain--;
- ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
+ ath12k_hal_rx_buf_addr_info_set(dp->hal, desc, paddr, cookie,
+ mgr);
}
goto out;
fail_dma_unmap:
- dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
+ dma_unmap_single(dp->dev, paddr, skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
fail_free_skb:
dev_kfree_skb_any(skb);
@@ -406,6 +166,7 @@ out:
return req_entries - num_remain;
}
+EXPORT_SYMBOL(ath12k_dp_rx_bufs_replenish);
static int ath12k_dp_rxdma_mon_buf_ring_free(struct ath12k_base *ab,
struct dp_rxdma_mon_ring *rx_ring)
@@ -432,7 +193,7 @@ static int ath12k_dp_rxdma_mon_buf_ring_free(struct ath12k_base *ab,
static int ath12k_dp_rxdma_buf_free(struct ath12k_base *ab)
{
- struct ath12k_dp *dp = &ab->dp;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
int i;
ath12k_dp_rxdma_mon_buf_ring_free(ab, &dp->rxdma_mon_buf_ring);
@@ -475,14 +236,14 @@ static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base *ab,
rx_ring->bufs_max = rx_ring->refill_buf_ring.size /
ath12k_hal_srng_get_entrysize(ab, HAL_RXDMA_BUF);
- ath12k_dp_rx_bufs_replenish(ab, rx_ring, &list, 0);
+ ath12k_dp_rx_bufs_replenish(ath12k_ab_to_dp(ab), rx_ring, &list, 0);
return 0;
}
static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab)
{
- struct ath12k_dp *dp = &ab->dp;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
struct dp_rxdma_mon_ring *mon_ring;
int ret, i;
@@ -529,7 +290,7 @@ static void ath12k_dp_rx_pdev_srng_free(struct ath12k *ar)
void ath12k_dp_rx_pdev_reo_cleanup(struct ath12k_base *ab)
{
- struct ath12k_dp *dp = &ab->dp;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
int i;
for (i = 0; i < DP_REO_DST_RING_MAX; i++)
@@ -538,7 +299,7 @@ void ath12k_dp_rx_pdev_reo_cleanup(struct ath12k_base *ab)
int ath12k_dp_rx_pdev_reo_setup(struct ath12k_base *ab)
{
- struct ath12k_dp *dp = &ab->dp;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
int ret;
int i;
@@ -584,13 +345,15 @@ static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar)
return 0;
}
-static void ath12k_dp_init_rx_tid_rxq(struct ath12k_dp_rx_tid_rxq *rx_tid_rxq,
- struct ath12k_dp_rx_tid *rx_tid)
+void ath12k_dp_init_rx_tid_rxq(struct ath12k_dp_rx_tid_rxq *rx_tid_rxq,
+ struct ath12k_dp_rx_tid *rx_tid,
+ bool active)
{
rx_tid_rxq->tid = rx_tid->tid;
- rx_tid_rxq->active = rx_tid->active;
+ rx_tid_rxq->active = active;
rx_tid_rxq->qbuf = rx_tid->qbuf;
}
+EXPORT_SYMBOL(ath12k_dp_init_rx_tid_rxq);
static void ath12k_dp_rx_tid_cleanup(struct ath12k_base *ab,
struct ath12k_reoq_buf *tid_qbuf)
@@ -605,7 +368,7 @@ static void ath12k_dp_rx_tid_cleanup(struct ath12k_base *ab,
void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab)
{
- struct ath12k_dp *dp = &ab->dp;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
struct ath12k_dp_rx_reo_cmd *cmd, *tmp;
struct ath12k_dp_rx_reo_cache_flush_elem *cmd_cache, *tmp_cache;
struct dp_reo_update_rx_queue_elem *cmd_queue, *tmp_queue;
@@ -635,8 +398,8 @@ void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab)
spin_unlock_bh(&dp->reo_cmd_lock);
}
-static void ath12k_dp_reo_cmd_free(struct ath12k_dp *dp, void *ctx,
- enum hal_reo_cmd_status status)
+void ath12k_dp_reo_cmd_free(struct ath12k_dp *dp, void *ctx,
+ enum hal_reo_cmd_status status)
{
struct ath12k_dp_rx_tid_rxq *rx_tid = ctx;
@@ -646,109 +409,9 @@ static void ath12k_dp_reo_cmd_free(struct ath12k_dp *dp, void *ctx,
ath12k_dp_rx_tid_cleanup(dp->ab, &rx_tid->qbuf);
}
+EXPORT_SYMBOL(ath12k_dp_reo_cmd_free);
-static int ath12k_dp_reo_cmd_send(struct ath12k_base *ab,
- struct ath12k_dp_rx_tid_rxq *rx_tid,
- enum hal_reo_cmd_type type,
- struct ath12k_hal_reo_cmd *cmd,
- void (*cb)(struct ath12k_dp *dp, void *ctx,
- enum hal_reo_cmd_status status))
-{
- struct ath12k_dp *dp = &ab->dp;
- struct ath12k_dp_rx_reo_cmd *dp_cmd;
- struct hal_srng *cmd_ring;
- int cmd_num;
-
- cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
- cmd_num = ath12k_hal_reo_cmd_send(ab, cmd_ring, type, cmd);
-
- /* cmd_num should start from 1, during failure return the error code */
- if (cmd_num < 0)
- return cmd_num;
-
- /* reo cmd ring descriptors has cmd_num starting from 1 */
- if (cmd_num == 0)
- return -EINVAL;
-
- if (!cb)
- return 0;
-
- /* Can this be optimized so that we keep the pending command list only
- * for tid delete command to free up the resource on the command status
- * indication?
- */
- dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC);
-
- if (!dp_cmd)
- return -ENOMEM;
-
- memcpy(&dp_cmd->data, rx_tid, sizeof(*rx_tid));
- dp_cmd->cmd_num = cmd_num;
- dp_cmd->handler = cb;
-
- spin_lock_bh(&dp->reo_cmd_lock);
- list_add_tail(&dp_cmd->list, &dp->reo_cmd_list);
- spin_unlock_bh(&dp->reo_cmd_lock);
-
- return 0;
-}
-
-static int ath12k_dp_reo_cache_flush(struct ath12k_base *ab,
- struct ath12k_dp_rx_tid_rxq *rx_tid)
-{
- struct ath12k_hal_reo_cmd cmd = {};
- int ret;
-
- cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
- cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
- /* HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS - all pending MPDUs
- *in the bitmap will be forwarded/flushed to REO output rings
- */
- cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS |
- HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS;
-
- /* For all QoS TIDs (except NON_QOS), the driver allocates a maximum
- * window size of 1024. In such cases, the driver can issue a single
- * 1KB descriptor flush command instead of sending multiple 128-byte
- * flush commands for each QoS TID, improving efficiency.
- */
-
- if (rx_tid->tid != HAL_DESC_REO_NON_QOS_TID)
- cmd.flag |= HAL_REO_CMD_FLG_FLUSH_QUEUE_1K_DESC;
-
- ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
- HAL_REO_CMD_FLUSH_CACHE,
- &cmd, ath12k_dp_reo_cmd_free);
- return ret;
-}
-
-static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u16 tid)
-{
- struct ath12k_reo_queue_ref *qref;
- struct ath12k_dp *dp = &ab->dp;
- bool ml_peer = false;
-
- if (!ab->hw_params->reoq_lut_support)
- return;
-
- if (peer_id & ATH12K_PEER_ML_ID_VALID) {
- peer_id &= ~ATH12K_PEER_ML_ID_VALID;
- ml_peer = true;
- }
-
- if (ml_peer)
- qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr +
- (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
- else
- qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
- (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
-
- qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR);
- qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR) |
- u32_encode_bits(tid, DP_REO_QREF_NUM);
-}
-
-static void ath12k_dp_rx_process_reo_cmd_update_rx_queue_list(struct ath12k_dp *dp)
+void ath12k_dp_rx_process_reo_cmd_update_rx_queue_list(struct ath12k_dp *dp)
{
struct ath12k_base *ab = dp->ab;
struct dp_reo_update_rx_queue_elem *elem, *tmp;
@@ -762,10 +425,10 @@ static void ath12k_dp_rx_process_reo_cmd_update_rx_queue_list(struct ath12k_dp *
if (ath12k_dp_rx_tid_delete_handler(ab, &elem->rx_tid))
break;
- ath12k_peer_rx_tid_qref_reset(ab,
- elem->is_ml_peer ? elem->ml_peer_id :
- elem->peer_id,
- elem->rx_tid.tid);
+ ath12k_dp_arch_peer_rx_tid_qref_reset(dp,
+ elem->is_ml_peer ?
+ elem->ml_peer_id : elem->peer_id,
+ elem->rx_tid.tid);
if (ab->hw_params->reoq_lut_support)
ath12k_hal_reo_shared_qaddr_cache_clear(ab);
@@ -776,9 +439,10 @@ static void ath12k_dp_rx_process_reo_cmd_update_rx_queue_list(struct ath12k_dp *
spin_unlock_bh(&dp->reo_rxq_flush_lock);
}
+EXPORT_SYMBOL(ath12k_dp_rx_process_reo_cmd_update_rx_queue_list);
-static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,
- enum hal_reo_cmd_status status)
+void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,
+ enum hal_reo_cmd_status status)
{
struct ath12k_base *ab = dp->ab;
struct ath12k_dp_rx_tid_rxq *rx_tid = ctx;
@@ -796,9 +460,9 @@ static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,
/* Retry the HAL_REO_CMD_UPDATE_RX_QUEUE command for entries
* in the pending queue list marked TID as inactive
*/
- spin_lock_bh(&dp->ab->base_lock);
+ spin_lock_bh(&dp->dp_lock);
ath12k_dp_rx_process_reo_cmd_update_rx_queue_list(dp);
- spin_unlock_bh(&dp->ab->base_lock);
+ spin_unlock_bh(&dp->dp_lock);
elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
if (!elem)
@@ -825,11 +489,12 @@ static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,
* will be called during core destroy.
*/
- if (ath12k_dp_reo_cache_flush(ab, &elem->data))
+ if (ath12k_dp_arch_reo_cache_flush(dp, &elem->data))
break;
list_del(&elem->list);
dp->reo_cmd_cache_flush_count--;
+
kfree(elem);
}
}
@@ -839,55 +504,17 @@ static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,
free_desc:
ath12k_dp_rx_tid_cleanup(ab, &rx_tid->qbuf);
}
+EXPORT_SYMBOL(ath12k_dp_rx_tid_del_func);
static int ath12k_dp_rx_tid_delete_handler(struct ath12k_base *ab,
struct ath12k_dp_rx_tid_rxq *rx_tid)
{
- struct ath12k_hal_reo_cmd cmd = {};
-
- cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
- cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
- cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
- cmd.upd0 |= HAL_REO_CMD_UPD0_VLD;
- /* Observed flush cache failure, to avoid that set vld bit during delete */
- cmd.upd1 |= HAL_REO_CMD_UPD1_VLD;
-
- return ath12k_dp_reo_cmd_send(ab, rx_tid,
- HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
- ath12k_dp_rx_tid_del_func);
-}
-
-static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u16 tid,
- dma_addr_t paddr)
-{
- struct ath12k_reo_queue_ref *qref;
- struct ath12k_dp *dp = &ab->dp;
- bool ml_peer = false;
-
- if (!ab->hw_params->reoq_lut_support)
- return;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
- if (peer_id & ATH12K_PEER_ML_ID_VALID) {
- peer_id &= ~ATH12K_PEER_ML_ID_VALID;
- ml_peer = true;
- }
-
- if (ml_peer)
- qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr +
- (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
- else
- qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
- (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
-
- qref->info0 = u32_encode_bits(lower_32_bits(paddr),
- BUFFER_ADDR_INFO0_ADDR);
- qref->info1 = u32_encode_bits(upper_32_bits(paddr),
- BUFFER_ADDR_INFO1_ADDR) |
- u32_encode_bits(tid, DP_REO_QREF_NUM);
- ath12k_hal_reo_shared_qaddr_cache_clear(ab);
+ return ath12k_dp_arch_rx_tid_delete_handler(dp, rx_tid);
}
-static void ath12k_dp_mark_tid_as_inactive(struct ath12k_dp *dp, int peer_id, u8 tid)
+void ath12k_dp_mark_tid_as_inactive(struct ath12k_dp *dp, int peer_id, u8 tid)
{
struct dp_reo_update_rx_queue_elem *elem;
struct ath12k_dp_rx_tid_rxq *rx_tid;
@@ -904,192 +531,39 @@ static void ath12k_dp_mark_tid_as_inactive(struct ath12k_dp *dp, int peer_id, u8
}
spin_unlock_bh(&dp->reo_rxq_flush_lock);
}
+EXPORT_SYMBOL(ath12k_dp_mark_tid_as_inactive);
-void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
- struct ath12k_peer *peer, u8 tid)
-{
- struct ath12k_dp_rx_tid *rx_tid = &peer->rx_tid[tid];
- struct ath12k_base *ab = ar->ab;
- struct ath12k_dp *dp = &ab->dp;
-
- if (!rx_tid->active)
- return;
-
- rx_tid->active = false;
-
- ath12k_dp_mark_tid_as_inactive(dp, peer->peer_id, tid);
- ath12k_dp_rx_process_reo_cmd_update_rx_queue_list(dp);
-}
-
-int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab,
- struct ath12k_buffer_addr *buf_addr_info,
- enum hal_wbm_rel_bm_act action)
-{
- struct hal_wbm_release_ring *desc;
- struct ath12k_dp *dp = &ab->dp;
- struct hal_srng *srng;
- int ret = 0;
-
- srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
-
- spin_lock_bh(&srng->lock);
-
- ath12k_hal_srng_access_begin(ab, srng);
-
- desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
- if (!desc) {
- ret = -ENOBUFS;
- goto exit;
- }
-
- ath12k_hal_rx_msdu_link_desc_set(ab, desc, buf_addr_info, action);
-
-exit:
- ath12k_hal_srng_access_end(ab, srng);
-
- spin_unlock_bh(&srng->lock);
-
- return ret;
-}
-
-static void ath12k_dp_rx_frags_cleanup(struct ath12k_dp_rx_tid *rx_tid,
- bool rel_link_desc)
-{
- struct ath12k_buffer_addr *buf_addr_info;
- struct ath12k_base *ab = rx_tid->ab;
-
- lockdep_assert_held(&ab->base_lock);
-
- if (rx_tid->dst_ring_desc) {
- if (rel_link_desc) {
- buf_addr_info = &rx_tid->dst_ring_desc->buf_addr_info;
- ath12k_dp_rx_link_desc_return(ab, buf_addr_info,
- HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
- }
- kfree(rx_tid->dst_ring_desc);
- rx_tid->dst_ring_desc = NULL;
- }
-
- rx_tid->cur_sn = 0;
- rx_tid->last_frag_no = 0;
- rx_tid->rx_frag_bitmap = 0;
- __skb_queue_purge(&rx_tid->rx_frags);
-}
-
-void ath12k_dp_rx_peer_tid_cleanup(struct ath12k *ar, struct ath12k_peer *peer)
+void ath12k_dp_rx_peer_tid_cleanup(struct ath12k *ar, struct ath12k_dp_link_peer *peer)
{
struct ath12k_dp_rx_tid *rx_tid;
int i;
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
+
+ lockdep_assert_held(&dp->dp_lock);
- lockdep_assert_held(&ar->ab->base_lock);
+ if (!peer->primary_link)
+ return;
for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
- rx_tid = &peer->rx_tid[i];
+ rx_tid = &peer->dp_peer->rx_tid[i];
- ath12k_dp_rx_peer_tid_delete(ar, peer, i);
- ath12k_dp_rx_frags_cleanup(rx_tid, true);
+ ath12k_dp_arch_rx_peer_tid_delete(dp, peer, i);
+ ath12k_dp_arch_rx_frags_cleanup(dp, rx_tid, true);
- spin_unlock_bh(&ar->ab->base_lock);
+ spin_unlock_bh(&dp->dp_lock);
timer_delete_sync(&rx_tid->frag_timer);
- spin_lock_bh(&ar->ab->base_lock);
+ spin_lock_bh(&dp->dp_lock);
}
}
-static int ath12k_peer_rx_tid_reo_update(struct ath12k *ar,
- struct ath12k_peer *peer,
- struct ath12k_dp_rx_tid *rx_tid,
- u32 ba_win_sz, u16 ssn,
- bool update_ssn)
-{
- struct ath12k_hal_reo_cmd cmd = {};
- int ret;
- struct ath12k_dp_rx_tid_rxq rx_tid_rxq;
-
- ath12k_dp_init_rx_tid_rxq(&rx_tid_rxq, rx_tid);
-
- cmd.addr_lo = lower_32_bits(rx_tid_rxq.qbuf.paddr_aligned);
- cmd.addr_hi = upper_32_bits(rx_tid_rxq.qbuf.paddr_aligned);
- cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
- cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
- cmd.ba_window_size = ba_win_sz;
-
- if (update_ssn) {
- cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
- cmd.upd2 = u32_encode_bits(ssn, HAL_REO_CMD_UPD2_SSN);
- }
-
- ret = ath12k_dp_reo_cmd_send(ar->ab, &rx_tid_rxq,
- HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
- NULL);
- if (ret) {
- ath12k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
- rx_tid_rxq.tid, ret);
- return ret;
- }
-
- rx_tid->ba_win_sz = ba_win_sz;
-
- return 0;
-}
-
-static int ath12k_dp_rx_assign_reoq(struct ath12k_base *ab,
- struct ath12k_sta *ahsta,
- struct ath12k_dp_rx_tid *rx_tid,
- u16 ssn, enum hal_pn_type pn_type)
-{
- u32 ba_win_sz = rx_tid->ba_win_sz;
- struct ath12k_reoq_buf *buf;
- void *vaddr, *vaddr_aligned;
- dma_addr_t paddr_aligned;
- u8 tid = rx_tid->tid;
- u32 hw_desc_sz;
- int ret;
-
- buf = &ahsta->reoq_bufs[tid];
- if (!buf->vaddr) {
- /* TODO: Optimize the memory allocation for qos tid based on
- * the actual BA window size in REO tid update path.
- */
- if (tid == HAL_DESC_REO_NON_QOS_TID)
- hw_desc_sz = ath12k_hal_reo_qdesc_size(ba_win_sz, tid);
- else
- hw_desc_sz = ath12k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
-
- vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
- if (!vaddr)
- return -ENOMEM;
-
- vaddr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
-
- ath12k_hal_reo_qdesc_setup(vaddr_aligned, tid, ba_win_sz,
- ssn, pn_type);
-
- paddr_aligned = dma_map_single(ab->dev, vaddr_aligned, hw_desc_sz,
- DMA_BIDIRECTIONAL);
- ret = dma_mapping_error(ab->dev, paddr_aligned);
- if (ret) {
- kfree(vaddr);
- return ret;
- }
-
- buf->vaddr = vaddr;
- buf->paddr_aligned = paddr_aligned;
- buf->size = hw_desc_sz;
- }
-
- rx_tid->qbuf = *buf;
- rx_tid->active = true;
-
- return 0;
-}
-
static int ath12k_dp_prepare_reo_update_elem(struct ath12k_dp *dp,
- struct ath12k_peer *peer,
+ struct ath12k_dp_link_peer *peer,
struct ath12k_dp_rx_tid *rx_tid)
{
struct dp_reo_update_rx_queue_elem *elem;
- lockdep_assert_held(&dp->ab->base_lock);
+ lockdep_assert_held(&dp->dp_lock);
elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
if (!elem)
@@ -1099,7 +573,8 @@ static int ath12k_dp_prepare_reo_update_elem(struct ath12k_dp *dp,
elem->is_ml_peer = peer->mlo;
elem->ml_peer_id = peer->ml_id;
- ath12k_dp_init_rx_tid_rxq(&elem->rx_tid, rx_tid);
+ ath12k_dp_init_rx_tid_rxq(&elem->rx_tid, rx_tid,
+ (peer->rx_tid_active_bitmask & (1 << rx_tid->tid)));
spin_lock_bh(&dp->reo_rxq_flush_lock);
list_add_tail(&elem->list, &dp->reo_cmd_update_rx_queue_list);
@@ -1113,31 +588,30 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
enum hal_pn_type pn_type)
{
struct ath12k_base *ab = ar->ab;
- struct ath12k_dp *dp = &ab->dp;
- struct ath12k_peer *peer;
- struct ath12k_sta *ahsta;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
+ struct ath12k_dp_link_peer *peer;
struct ath12k_dp_rx_tid *rx_tid;
dma_addr_t paddr_aligned;
int ret;
- spin_lock_bh(&ab->base_lock);
+ spin_lock_bh(&dp->dp_lock);
- peer = ath12k_peer_find(ab, vdev_id, peer_mac);
- if (!peer) {
- spin_unlock_bh(&ab->base_lock);
+ peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, vdev_id, peer_mac);
+ if (!peer || !peer->dp_peer) {
+ spin_unlock_bh(&dp->dp_lock);
ath12k_warn(ab, "failed to find the peer to set up rx tid\n");
return -ENOENT;
}
if (ab->hw_params->dp_primary_link_only &&
!peer->primary_link) {
- spin_unlock_bh(&ab->base_lock);
+ spin_unlock_bh(&dp->dp_lock);
return 0;
}
if (ab->hw_params->reoq_lut_support &&
(!dp->reoq_lut.vaddr || !dp->ml_reoq_lut.vaddr)) {
- spin_unlock_bh(&ab->base_lock);
+ spin_unlock_bh(&dp->dp_lock);
ath12k_warn(ab, "reo qref table is not setup\n");
return -EINVAL;
}
@@ -1145,16 +619,16 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
if (peer->peer_id > DP_MAX_PEER_ID || tid > IEEE80211_NUM_TIDS) {
ath12k_warn(ab, "peer id of peer %d or tid %d doesn't allow reoq setup\n",
peer->peer_id, tid);
- spin_unlock_bh(&ab->base_lock);
+ spin_unlock_bh(&dp->dp_lock);
return -EINVAL;
}
- rx_tid = &peer->rx_tid[tid];
+ rx_tid = &peer->dp_peer->rx_tid[tid];
/* Update the tid queue if it is already setup */
- if (rx_tid->active) {
- ret = ath12k_peer_rx_tid_reo_update(ar, peer, rx_tid,
- ba_win_sz, ssn, true);
- spin_unlock_bh(&ab->base_lock);
+ if (peer->rx_tid_active_bitmask & (1 << tid)) {
+ ret = ath12k_dp_arch_peer_rx_tid_reo_update(dp, peer, rx_tid,
+ ba_win_sz, ssn, true);
+ spin_unlock_bh(&dp->dp_lock);
if (ret) {
ath12k_warn(ab, "failed to update reo for rx tid %d\n", tid);
return ret;
@@ -1180,14 +654,15 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
rx_tid->ba_win_sz = ba_win_sz;
- ahsta = ath12k_sta_to_ahsta(peer->sta);
- ret = ath12k_dp_rx_assign_reoq(ab, ahsta, rx_tid, ssn, pn_type);
+ ret = ath12k_dp_arch_rx_assign_reoq(dp, peer->dp_peer, rx_tid, ssn, pn_type);
if (ret) {
- spin_unlock_bh(&ab->base_lock);
+ spin_unlock_bh(&dp->dp_lock);
ath12k_warn(ab, "failed to assign reoq buf for rx tid %u\n", tid);
return ret;
}
+ peer->rx_tid_active_bitmask |= (1 << tid);
+
/* Pre-allocate the update_rxq_list for the corresponding tid
* This will be used during the tid delete. The reason we are not
* allocating during tid delete is that, if any alloc fail in update_rxq_list
@@ -1197,7 +672,7 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
if (ret) {
ath12k_warn(ab, "failed to alloc update_rxq_list for rx tid %u\n", tid);
ath12k_dp_rx_tid_cleanup(ab, &rx_tid->qbuf);
- spin_unlock_bh(&ab->base_lock);
+ spin_unlock_bh(&dp->dp_lock);
return ret;
}
@@ -1207,15 +682,15 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
* and tid with qaddr.
*/
if (peer->mlo)
- ath12k_peer_rx_tid_qref_setup(ab, peer->ml_id, tid,
- paddr_aligned);
+ ath12k_dp_arch_peer_rx_tid_qref_setup(dp, peer->ml_id, tid,
+ paddr_aligned);
else
- ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid,
- paddr_aligned);
+ ath12k_dp_arch_peer_rx_tid_qref_setup(dp, peer->peer_id, tid,
+ paddr_aligned);
- spin_unlock_bh(&ab->base_lock);
+ spin_unlock_bh(&dp->dp_lock);
} else {
- spin_unlock_bh(&ab->base_lock);
+ spin_unlock_bh(&dp->dp_lock);
ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
paddr_aligned, tid, 1,
ba_win_sz);
@@ -1257,7 +732,8 @@ int ath12k_dp_rx_ampdu_stop(struct ath12k *ar,
u8 link_id)
{
struct ath12k_base *ab = ar->ab;
- struct ath12k_peer *peer;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
+ struct ath12k_dp_link_peer *peer;
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta);
struct ath12k_link_sta *arsta;
int vdev_id;
@@ -1273,24 +749,30 @@ int ath12k_dp_rx_ampdu_stop(struct ath12k *ar,
vdev_id = arsta->arvif->vdev_id;
- spin_lock_bh(&ab->base_lock);
+ spin_lock_bh(&dp->dp_lock);
- peer = ath12k_peer_find(ab, vdev_id, arsta->addr);
- if (!peer) {
- spin_unlock_bh(&ab->base_lock);
+ peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, vdev_id, arsta->addr);
+ if (!peer || !peer->dp_peer) {
+ spin_unlock_bh(&dp->dp_lock);
ath12k_warn(ab, "failed to find the peer to stop rx aggregation\n");
return -ENOENT;
}
- active = peer->rx_tid[params->tid].active;
+ if (ab->hw_params->dp_primary_link_only &&
+ !peer->primary_link) {
+ spin_unlock_bh(&dp->dp_lock);
+ return 0;
+ }
+ active = peer->rx_tid_active_bitmask & (1 << params->tid);
if (!active) {
- spin_unlock_bh(&ab->base_lock);
+ spin_unlock_bh(&dp->dp_lock);
return 0;
}
- ret = ath12k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false);
- spin_unlock_bh(&ab->base_lock);
+ ret = ath12k_dp_arch_peer_rx_tid_reo_update(dp, peer, peer->dp_peer->rx_tid,
+ 1, 0, false);
+ spin_unlock_bh(&dp->dp_lock);
if (ret) {
ath12k_warn(ab, "failed to update reo for rx tid %d: %d\n",
params->tid, ret);
@@ -1307,8 +789,9 @@ int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif,
{
struct ath12k *ar = arvif->ar;
struct ath12k_base *ab = ar->ab;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
struct ath12k_hal_reo_cmd cmd = {};
- struct ath12k_peer *peer;
+ struct ath12k_dp_link_peer *peer;
struct ath12k_dp_rx_tid *rx_tid;
struct ath12k_dp_rx_tid_rxq rx_tid_rxq;
u8 tid;
@@ -1321,49 +804,29 @@ int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif,
if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
return 0;
- cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
- cmd.upd0 = HAL_REO_CMD_UPD0_PN |
- HAL_REO_CMD_UPD0_PN_SIZE |
- HAL_REO_CMD_UPD0_PN_VALID |
- HAL_REO_CMD_UPD0_PN_CHECK |
- HAL_REO_CMD_UPD0_SVLD;
-
- switch (key->cipher) {
- case WLAN_CIPHER_SUITE_TKIP:
- case WLAN_CIPHER_SUITE_CCMP:
- case WLAN_CIPHER_SUITE_CCMP_256:
- case WLAN_CIPHER_SUITE_GCMP:
- case WLAN_CIPHER_SUITE_GCMP_256:
- if (key_cmd == SET_KEY) {
- cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;
- cmd.pn_size = 48;
- }
- break;
- default:
- break;
- }
-
- spin_lock_bh(&ab->base_lock);
+ spin_lock_bh(&dp->dp_lock);
- peer = ath12k_peer_find(ab, arvif->vdev_id, peer_addr);
- if (!peer) {
- spin_unlock_bh(&ab->base_lock);
+ peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, arvif->vdev_id,
+ peer_addr);
+ if (!peer || !peer->dp_peer) {
+ spin_unlock_bh(&dp->dp_lock);
ath12k_warn(ab, "failed to find the peer %pM to configure pn replay detection\n",
peer_addr);
return -ENOENT;
}
for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
- rx_tid = &peer->rx_tid[tid];
- if (!rx_tid->active)
+ if (!(peer->rx_tid_active_bitmask & (1 << tid)))
continue;
- ath12k_dp_init_rx_tid_rxq(&rx_tid_rxq, rx_tid);
- cmd.addr_lo = lower_32_bits(rx_tid_rxq.qbuf.paddr_aligned);
- cmd.addr_hi = upper_32_bits(rx_tid_rxq.qbuf.paddr_aligned);
- ret = ath12k_dp_reo_cmd_send(ab, &rx_tid_rxq,
- HAL_REO_CMD_UPDATE_RX_QUEUE,
- &cmd, NULL);
+ rx_tid = &peer->dp_peer->rx_tid[tid];
+ ath12k_dp_init_rx_tid_rxq(&rx_tid_rxq, rx_tid,
+ (peer->rx_tid_active_bitmask & (1 << tid)));
+ ath12k_dp_arch_setup_pn_check_reo_cmd(dp, &cmd, rx_tid, key->cipher,
+ key_cmd);
+ ret = ath12k_dp_arch_reo_cmd_send(dp, &rx_tid_rxq,
+ HAL_REO_CMD_UPDATE_RX_QUEUE,
+ &cmd, NULL);
if (ret) {
ath12k_warn(ab, "failed to configure rx tid %d queue of peer %pM for pn replay detection %d\n",
tid, peer_addr, ret);
@@ -1371,732 +834,14 @@ int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif,
}
}
- spin_unlock_bh(&ab->base_lock);
-
- return ret;
-}
-
-static int ath12k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
- u16 peer_id)
-{
- int i;
-
- for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {
- if (ppdu_stats->user_stats[i].is_valid_peer_id) {
- if (peer_id == ppdu_stats->user_stats[i].peer_id)
- return i;
- } else {
- return i;
- }
- }
-
- return -EINVAL;
-}
-
-static int ath12k_htt_tlv_ppdu_stats_parse(struct ath12k_base *ab,
- u16 tag, u16 len, const void *ptr,
- void *data)
-{
- const struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *ba_status;
- const struct htt_ppdu_stats_usr_cmpltn_cmn *cmplt_cmn;
- const struct htt_ppdu_stats_user_rate *user_rate;
- struct htt_ppdu_stats_info *ppdu_info;
- struct htt_ppdu_user_stats *user_stats;
- int cur_user;
- u16 peer_id;
-
- ppdu_info = data;
-
- switch (tag) {
- case HTT_PPDU_STATS_TAG_COMMON:
- if (len < sizeof(struct htt_ppdu_stats_common)) {
- ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
- len, tag);
- return -EINVAL;
- }
- memcpy(&ppdu_info->ppdu_stats.common, ptr,
- sizeof(struct htt_ppdu_stats_common));
- break;
- case HTT_PPDU_STATS_TAG_USR_RATE:
- if (len < sizeof(struct htt_ppdu_stats_user_rate)) {
- ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
- len, tag);
- return -EINVAL;
- }
- user_rate = ptr;
- peer_id = le16_to_cpu(user_rate->sw_peer_id);
- cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
- peer_id);
- if (cur_user < 0)
- return -EINVAL;
- user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
- user_stats->peer_id = peer_id;
- user_stats->is_valid_peer_id = true;
- memcpy(&user_stats->rate, ptr,
- sizeof(struct htt_ppdu_stats_user_rate));
- user_stats->tlv_flags |= BIT(tag);
- break;
- case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:
- if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
- ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
- len, tag);
- return -EINVAL;
- }
-
- cmplt_cmn = ptr;
- peer_id = le16_to_cpu(cmplt_cmn->sw_peer_id);
- cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
- peer_id);
- if (cur_user < 0)
- return -EINVAL;
- user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
- user_stats->peer_id = peer_id;
- user_stats->is_valid_peer_id = true;
- memcpy(&user_stats->cmpltn_cmn, ptr,
- sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
- user_stats->tlv_flags |= BIT(tag);
- break;
- case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:
- if (len <
- sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {
- ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
- len, tag);
- return -EINVAL;
- }
-
- ba_status = ptr;
- peer_id = le16_to_cpu(ba_status->sw_peer_id);
- cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
- peer_id);
- if (cur_user < 0)
- return -EINVAL;
- user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
- user_stats->peer_id = peer_id;
- user_stats->is_valid_peer_id = true;
- memcpy(&user_stats->ack_ba, ptr,
- sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));
- user_stats->tlv_flags |= BIT(tag);
- break;
- }
- return 0;
-}
-
-int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
- int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
- const void *ptr, void *data),
- void *data)
-{
- const struct htt_tlv *tlv;
- const void *begin = ptr;
- u16 tlv_tag, tlv_len;
- int ret = -EINVAL;
-
- while (len > 0) {
- if (len < sizeof(*tlv)) {
- ath12k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
- ptr - begin, len, sizeof(*tlv));
- return -EINVAL;
- }
- tlv = (struct htt_tlv *)ptr;
- tlv_tag = le32_get_bits(tlv->header, HTT_TLV_TAG);
- tlv_len = le32_get_bits(tlv->header, HTT_TLV_LEN);
- ptr += sizeof(*tlv);
- len -= sizeof(*tlv);
-
- if (tlv_len > len) {
- ath12k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
- tlv_tag, ptr - begin, len, tlv_len);
- return -EINVAL;
- }
- ret = iter(ab, tlv_tag, tlv_len, ptr, data);
- if (ret == -ENOMEM)
- return ret;
-
- ptr += tlv_len;
- len -= tlv_len;
- }
- return 0;
-}
-
-static void
-ath12k_update_per_peer_tx_stats(struct ath12k *ar,
- struct htt_ppdu_stats *ppdu_stats, u8 user)
-{
- struct ath12k_base *ab = ar->ab;
- struct ath12k_peer *peer;
- struct ath12k_link_sta *arsta;
- struct htt_ppdu_stats_user_rate *user_rate;
- struct ath12k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
- struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
- struct htt_ppdu_stats_common *common = &ppdu_stats->common;
- int ret;
- u8 flags, mcs, nss, bw, sgi, dcm, ppdu_type, rate_idx = 0;
- u32 v, succ_bytes = 0;
- u16 tones, rate = 0, succ_pkts = 0;
- u32 tx_duration = 0;
- u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
- u16 tx_retry_failed = 0, tx_retry_count = 0;
- bool is_ampdu = false, is_ofdma;
-
- if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
- return;
-
- if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON)) {
- is_ampdu =
- HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
- tx_retry_failed =
- __le16_to_cpu(usr_stats->cmpltn_cmn.mpdu_tried) -
- __le16_to_cpu(usr_stats->cmpltn_cmn.mpdu_success);
- tx_retry_count =
- HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
- HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
- }
-
- if (usr_stats->tlv_flags &
- BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
- succ_bytes = le32_to_cpu(usr_stats->ack_ba.success_bytes);
- succ_pkts = le32_get_bits(usr_stats->ack_ba.info,
- HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M);
- tid = le32_get_bits(usr_stats->ack_ba.info,
- HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM);
- }
-
- if (common->fes_duration_us)
- tx_duration = le32_to_cpu(common->fes_duration_us);
-
- user_rate = &usr_stats->rate;
- flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
- bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;
- nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
- mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
- sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
- dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);
-
- ppdu_type = HTT_USR_RATE_PPDU_TYPE(user_rate->info1);
- is_ofdma = (ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA) ||
- (ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA);
-
- /* Note: If host configured fixed rates and in some other special
- * cases, the broadcast/management frames are sent in different rates.
- * Firmware rate's control to be skipped for this?
- */
-
- if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH12K_HE_MCS_MAX) {
- ath12k_warn(ab, "Invalid HE mcs %d peer stats", mcs);
- return;
- }
-
- if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH12K_VHT_MCS_MAX) {
- ath12k_warn(ab, "Invalid VHT mcs %d peer stats", mcs);
- return;
- }
-
- if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH12K_HT_MCS_MAX || nss < 1)) {
- ath12k_warn(ab, "Invalid HT mcs %d nss %d peer stats",
- mcs, nss);
- return;
- }
-
- if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {
- ret = ath12k_mac_hw_ratecode_to_legacy_rate(mcs,
- flags,
- &rate_idx,
- &rate);
- if (ret < 0)
- return;
- }
-
- rcu_read_lock();
- spin_lock_bh(&ab->base_lock);
- peer = ath12k_peer_find_by_id(ab, usr_stats->peer_id);
-
- if (!peer || !peer->sta) {
- spin_unlock_bh(&ab->base_lock);
- rcu_read_unlock();
- return;
- }
-
- arsta = ath12k_peer_get_link_sta(ab, peer);
- if (!arsta) {
- spin_unlock_bh(&ab->base_lock);
- rcu_read_unlock();
- return;
- }
-
- memset(&arsta->txrate, 0, sizeof(arsta->txrate));
-
- arsta->txrate.bw = ath12k_mac_bw_to_mac80211_bw(bw);
-
- switch (flags) {
- case WMI_RATE_PREAMBLE_OFDM:
- arsta->txrate.legacy = rate;
- break;
- case WMI_RATE_PREAMBLE_CCK:
- arsta->txrate.legacy = rate;
- break;
- case WMI_RATE_PREAMBLE_HT:
- arsta->txrate.mcs = mcs + 8 * (nss - 1);
- arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
- if (sgi)
- arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
- break;
- case WMI_RATE_PREAMBLE_VHT:
- arsta->txrate.mcs = mcs;
- arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
- if (sgi)
- arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
- break;
- case WMI_RATE_PREAMBLE_HE:
- arsta->txrate.mcs = mcs;
- arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
- arsta->txrate.he_dcm = dcm;
- arsta->txrate.he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
- tones = le16_to_cpu(user_rate->ru_end) -
- le16_to_cpu(user_rate->ru_start) + 1;
- v = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(tones);
- arsta->txrate.he_ru_alloc = v;
- if (is_ofdma)
- arsta->txrate.bw = RATE_INFO_BW_HE_RU;
- break;
- case WMI_RATE_PREAMBLE_EHT:
- arsta->txrate.mcs = mcs;
- arsta->txrate.flags = RATE_INFO_FLAGS_EHT_MCS;
- arsta->txrate.he_dcm = dcm;
- arsta->txrate.eht_gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(sgi);
- tones = le16_to_cpu(user_rate->ru_end) -
- le16_to_cpu(user_rate->ru_start) + 1;
- v = ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(tones);
- arsta->txrate.eht_ru_alloc = v;
- if (is_ofdma)
- arsta->txrate.bw = RATE_INFO_BW_EHT_RU;
- break;
- }
-
- arsta->tx_retry_failed += tx_retry_failed;
- arsta->tx_retry_count += tx_retry_count;
- arsta->txrate.nss = nss;
- arsta->tx_duration += tx_duration;
- memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
-
- /* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
- * So skip peer stats update for mgmt packets.
- */
- if (tid < HTT_PPDU_STATS_NON_QOS_TID) {
- memset(peer_stats, 0, sizeof(*peer_stats));
- peer_stats->succ_pkts = succ_pkts;
- peer_stats->succ_bytes = succ_bytes;
- peer_stats->is_ampdu = is_ampdu;
- peer_stats->duration = tx_duration;
- peer_stats->ba_fails =
- HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
- HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
- }
-
- spin_unlock_bh(&ab->base_lock);
- rcu_read_unlock();
-}
-
-static void ath12k_htt_update_ppdu_stats(struct ath12k *ar,
- struct htt_ppdu_stats *ppdu_stats)
-{
- u8 user;
-
- for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
- ath12k_update_per_peer_tx_stats(ar, ppdu_stats, user);
-}
-
-static
-struct htt_ppdu_stats_info *ath12k_dp_htt_get_ppdu_desc(struct ath12k *ar,
- u32 ppdu_id)
-{
- struct htt_ppdu_stats_info *ppdu_info;
-
- lockdep_assert_held(&ar->data_lock);
- if (!list_empty(&ar->ppdu_stats_info)) {
- list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {
- if (ppdu_info->ppdu_id == ppdu_id)
- return ppdu_info;
- }
-
- if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
- ppdu_info = list_first_entry(&ar->ppdu_stats_info,
- typeof(*ppdu_info), list);
- list_del(&ppdu_info->list);
- ar->ppdu_stat_list_depth--;
- ath12k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats);
- kfree(ppdu_info);
- }
- }
-
- ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC);
- if (!ppdu_info)
- return NULL;
-
- list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);
- ar->ppdu_stat_list_depth++;
-
- return ppdu_info;
-}
-
-static void ath12k_copy_to_delay_stats(struct ath12k_peer *peer,
- struct htt_ppdu_user_stats *usr_stats)
-{
- peer->ppdu_stats_delayba.sw_peer_id = le16_to_cpu(usr_stats->rate.sw_peer_id);
- peer->ppdu_stats_delayba.info0 = le32_to_cpu(usr_stats->rate.info0);
- peer->ppdu_stats_delayba.ru_end = le16_to_cpu(usr_stats->rate.ru_end);
- peer->ppdu_stats_delayba.ru_start = le16_to_cpu(usr_stats->rate.ru_start);
- peer->ppdu_stats_delayba.info1 = le32_to_cpu(usr_stats->rate.info1);
- peer->ppdu_stats_delayba.rate_flags = le32_to_cpu(usr_stats->rate.rate_flags);
- peer->ppdu_stats_delayba.resp_rate_flags =
- le32_to_cpu(usr_stats->rate.resp_rate_flags);
-
- peer->delayba_flag = true;
-}
-
-static void ath12k_copy_to_bar(struct ath12k_peer *peer,
- struct htt_ppdu_user_stats *usr_stats)
-{
- usr_stats->rate.sw_peer_id = cpu_to_le16(peer->ppdu_stats_delayba.sw_peer_id);
- usr_stats->rate.info0 = cpu_to_le32(peer->ppdu_stats_delayba.info0);
- usr_stats->rate.ru_end = cpu_to_le16(peer->ppdu_stats_delayba.ru_end);
- usr_stats->rate.ru_start = cpu_to_le16(peer->ppdu_stats_delayba.ru_start);
- usr_stats->rate.info1 = cpu_to_le32(peer->ppdu_stats_delayba.info1);
- usr_stats->rate.rate_flags = cpu_to_le32(peer->ppdu_stats_delayba.rate_flags);
- usr_stats->rate.resp_rate_flags =
- cpu_to_le32(peer->ppdu_stats_delayba.resp_rate_flags);
-
- peer->delayba_flag = false;
-}
-
-static int ath12k_htt_pull_ppdu_stats(struct ath12k_base *ab,
- struct sk_buff *skb)
-{
- struct ath12k_htt_ppdu_stats_msg *msg;
- struct htt_ppdu_stats_info *ppdu_info;
- struct ath12k_peer *peer = NULL;
- struct htt_ppdu_user_stats *usr_stats = NULL;
- u32 peer_id = 0;
- struct ath12k *ar;
- int ret, i;
- u8 pdev_id;
- u32 ppdu_id, len;
-
- msg = (struct ath12k_htt_ppdu_stats_msg *)skb->data;
- len = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE);
- if (len > (skb->len - struct_size(msg, data, 0))) {
- ath12k_warn(ab,
- "HTT PPDU STATS event has unexpected payload size %u, should be smaller than %u\n",
- len, skb->len);
- return -EINVAL;
- }
-
- pdev_id = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PDEV_ID);
- ppdu_id = le32_to_cpu(msg->ppdu_id);
-
- rcu_read_lock();
- ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
- if (!ar) {
- ret = -EINVAL;
- goto exit;
- }
-
- spin_lock_bh(&ar->data_lock);
- ppdu_info = ath12k_dp_htt_get_ppdu_desc(ar, ppdu_id);
- if (!ppdu_info) {
- spin_unlock_bh(&ar->data_lock);
- ret = -EINVAL;
- goto exit;
- }
-
- ppdu_info->ppdu_id = ppdu_id;
- ret = ath12k_dp_htt_tlv_iter(ab, msg->data, len,
- ath12k_htt_tlv_ppdu_stats_parse,
- (void *)ppdu_info);
- if (ret) {
- spin_unlock_bh(&ar->data_lock);
- ath12k_warn(ab, "Failed to parse tlv %d\n", ret);
- goto exit;
- }
-
- if (ppdu_info->ppdu_stats.common.num_users >= HTT_PPDU_STATS_MAX_USERS) {
- spin_unlock_bh(&ar->data_lock);
- ath12k_warn(ab,
- "HTT PPDU STATS event has unexpected num_users %u, should be smaller than %u\n",
- ppdu_info->ppdu_stats.common.num_users,
- HTT_PPDU_STATS_MAX_USERS);
- ret = -EINVAL;
- goto exit;
- }
-
- /* back up data rate tlv for all peers */
- if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_DATA &&
- (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON)) &&
- ppdu_info->delay_ba) {
- for (i = 0; i < ppdu_info->ppdu_stats.common.num_users; i++) {
- peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id;
- spin_lock_bh(&ab->base_lock);
- peer = ath12k_peer_find_by_id(ab, peer_id);
- if (!peer) {
- spin_unlock_bh(&ab->base_lock);
- continue;
- }
-
- usr_stats = &ppdu_info->ppdu_stats.user_stats[i];
- if (usr_stats->delay_ba)
- ath12k_copy_to_delay_stats(peer, usr_stats);
- spin_unlock_bh(&ab->base_lock);
- }
- }
-
- /* restore all peers' data rate tlv to mu-bar tlv */
- if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_BAR &&
- (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON))) {
- for (i = 0; i < ppdu_info->bar_num_users; i++) {
- peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id;
- spin_lock_bh(&ab->base_lock);
- peer = ath12k_peer_find_by_id(ab, peer_id);
- if (!peer) {
- spin_unlock_bh(&ab->base_lock);
- continue;
- }
-
- usr_stats = &ppdu_info->ppdu_stats.user_stats[i];
- if (peer->delayba_flag)
- ath12k_copy_to_bar(peer, usr_stats);
- spin_unlock_bh(&ab->base_lock);
- }
- }
-
- spin_unlock_bh(&ar->data_lock);
-
-exit:
- rcu_read_unlock();
+ spin_unlock_bh(&dp->dp_lock);
return ret;
}
+EXPORT_SYMBOL(ath12k_dp_rx_get_msdu_last_buf);
-static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab,
- struct sk_buff *skb)
-{
- struct ath12k_htt_mlo_offset_msg *msg;
- struct ath12k_pdev *pdev;
- struct ath12k *ar;
- u8 pdev_id;
-
- msg = (struct ath12k_htt_mlo_offset_msg *)skb->data;
- pdev_id = u32_get_bits(__le32_to_cpu(msg->info),
- HTT_T2H_MLO_OFFSET_INFO_PDEV_ID);
-
- rcu_read_lock();
- ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
- if (!ar) {
- /* It is possible that the ar is not yet active (started).
- * The above function will only look for the active pdev
- * and hence %NULL return is possible. Just silently
- * discard this message
- */
- goto exit;
- }
-
- spin_lock_bh(&ar->data_lock);
- pdev = ar->pdev;
-
- pdev->timestamp.info = __le32_to_cpu(msg->info);
- pdev->timestamp.sync_timestamp_lo_us = __le32_to_cpu(msg->sync_timestamp_lo_us);
- pdev->timestamp.sync_timestamp_hi_us = __le32_to_cpu(msg->sync_timestamp_hi_us);
- pdev->timestamp.mlo_offset_lo = __le32_to_cpu(msg->mlo_offset_lo);
- pdev->timestamp.mlo_offset_hi = __le32_to_cpu(msg->mlo_offset_hi);
- pdev->timestamp.mlo_offset_clks = __le32_to_cpu(msg->mlo_offset_clks);
- pdev->timestamp.mlo_comp_clks = __le32_to_cpu(msg->mlo_comp_clks);
- pdev->timestamp.mlo_comp_timer = __le32_to_cpu(msg->mlo_comp_timer);
-
- spin_unlock_bh(&ar->data_lock);
-exit:
- rcu_read_unlock();
-}
-
-void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab,
- struct sk_buff *skb)
-{
- struct ath12k_dp *dp = &ab->dp;
- struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;
- enum htt_t2h_msg_type type;
- u16 peer_id;
- u8 vdev_id;
- u8 mac_addr[ETH_ALEN];
- u16 peer_mac_h16;
- u16 ast_hash = 0;
- u16 hw_peer_id;
-
- type = le32_get_bits(resp->version_msg.version, HTT_T2H_MSG_TYPE);
-
- ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
-
- switch (type) {
- case HTT_T2H_MSG_TYPE_VERSION_CONF:
- dp->htt_tgt_ver_major = le32_get_bits(resp->version_msg.version,
- HTT_T2H_VERSION_CONF_MAJOR);
- dp->htt_tgt_ver_minor = le32_get_bits(resp->version_msg.version,
- HTT_T2H_VERSION_CONF_MINOR);
- complete(&dp->htt_tgt_version_received);
- break;
- /* TODO: remove unused peer map versions after testing */
- case HTT_T2H_MSG_TYPE_PEER_MAP:
- vdev_id = le32_get_bits(resp->peer_map_ev.info,
- HTT_T2H_PEER_MAP_INFO_VDEV_ID);
- peer_id = le32_get_bits(resp->peer_map_ev.info,
- HTT_T2H_PEER_MAP_INFO_PEER_ID);
- peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
- HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
- ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
- peer_mac_h16, mac_addr);
- ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0);
- break;
- case HTT_T2H_MSG_TYPE_PEER_MAP2:
- vdev_id = le32_get_bits(resp->peer_map_ev.info,
- HTT_T2H_PEER_MAP_INFO_VDEV_ID);
- peer_id = le32_get_bits(resp->peer_map_ev.info,
- HTT_T2H_PEER_MAP_INFO_PEER_ID);
- peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
- HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
- ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
- peer_mac_h16, mac_addr);
- ast_hash = le32_get_bits(resp->peer_map_ev.info2,
- HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL);
- hw_peer_id = le32_get_bits(resp->peer_map_ev.info1,
- HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID);
- ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
- hw_peer_id);
- break;
- case HTT_T2H_MSG_TYPE_PEER_MAP3:
- vdev_id = le32_get_bits(resp->peer_map_ev.info,
- HTT_T2H_PEER_MAP_INFO_VDEV_ID);
- peer_id = le32_get_bits(resp->peer_map_ev.info,
- HTT_T2H_PEER_MAP_INFO_PEER_ID);
- peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
- HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
- ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
- peer_mac_h16, mac_addr);
- ast_hash = le32_get_bits(resp->peer_map_ev.info2,
- HTT_T2H_PEER_MAP3_INFO2_AST_HASH_VAL);
- hw_peer_id = le32_get_bits(resp->peer_map_ev.info2,
- HTT_T2H_PEER_MAP3_INFO2_HW_PEER_ID);
- ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
- hw_peer_id);
- break;
- case HTT_T2H_MSG_TYPE_PEER_UNMAP:
- case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
- peer_id = le32_get_bits(resp->peer_unmap_ev.info,
- HTT_T2H_PEER_UNMAP_INFO_PEER_ID);
- ath12k_peer_unmap_event(ab, peer_id);
- break;
- case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
- ath12k_htt_pull_ppdu_stats(ab, skb);
- break;
- case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
- ath12k_debugfs_htt_ext_stats_handler(ab, skb);
- break;
- case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND:
- ath12k_htt_mlo_offset_event_handler(ab, skb);
- break;
- default:
- ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt event %d not handled\n",
- type);
- break;
- }
-
- dev_kfree_skb_any(skb);
-}
-
-static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar,
- struct sk_buff_head *msdu_list,
- struct sk_buff *first, struct sk_buff *last,
- u8 l3pad_bytes, int msdu_len)
-{
- struct ath12k_base *ab = ar->ab;
- struct sk_buff *skb;
- struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first);
- int buf_first_hdr_len, buf_first_len;
- struct hal_rx_desc *ldesc;
- int space_extra, rem_len, buf_len;
- u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
- bool is_continuation;
-
- /* As the msdu is spread across multiple rx buffers,
- * find the offset to the start of msdu for computing
- * the length of the msdu in the first buffer.
- */
- buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes;
- buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;
-
- if (WARN_ON_ONCE(msdu_len <= buf_first_len)) {
- skb_put(first, buf_first_hdr_len + msdu_len);
- skb_pull(first, buf_first_hdr_len);
- return 0;
- }
-
- ldesc = (struct hal_rx_desc *)last->data;
- rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, ldesc);
- rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, ldesc);
-
- /* MSDU spans over multiple buffers because the length of the MSDU
- * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
- * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
- */
- skb_put(first, DP_RX_BUFFER_SIZE);
- skb_pull(first, buf_first_hdr_len);
-
- /* When an MSDU spread over multiple buffers MSDU_END
- * tlvs are valid only in the last buffer. Copy those tlvs.
- */
- ath12k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc);
-
- space_extra = msdu_len - (buf_first_len + skb_tailroom(first));
- if (space_extra > 0 &&
- (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) {
- /* Free up all buffers of the MSDU */
- while ((skb = __skb_dequeue(msdu_list)) != NULL) {
- rxcb = ATH12K_SKB_RXCB(skb);
- if (!rxcb->is_continuation) {
- dev_kfree_skb_any(skb);
- break;
- }
- dev_kfree_skb_any(skb);
- }
- return -ENOMEM;
- }
-
- rem_len = msdu_len - buf_first_len;
- while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
- rxcb = ATH12K_SKB_RXCB(skb);
- is_continuation = rxcb->is_continuation;
- if (is_continuation)
- buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz;
- else
- buf_len = rem_len;
-
- if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) {
- WARN_ON_ONCE(1);
- dev_kfree_skb_any(skb);
- return -EINVAL;
- }
-
- skb_put(skb, buf_len + hal_rx_desc_sz);
- skb_pull(skb, hal_rx_desc_sz);
- skb_copy_from_linear_data(skb, skb_put(first, buf_len),
- buf_len);
- dev_kfree_skb_any(skb);
-
- rem_len -= buf_len;
- if (!is_continuation)
- break;
- }
-
- return 0;
-}
-
-static struct sk_buff *ath12k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,
- struct sk_buff *first)
+struct sk_buff *ath12k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,
+ struct sk_buff *first)
{
struct sk_buff *skb;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first);
@@ -2113,14 +858,7 @@ static struct sk_buff *ath12k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_
return NULL;
}
-static void ath12k_dp_rx_h_csum_offload(struct sk_buff *msdu,
- struct ath12k_dp_rx_info *rx_info)
-{
- msdu->ip_summed = (rx_info->ip_csum_fail || rx_info->l4_csum_fail) ?
- CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
-}
-
-int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar, enum hal_encrypt_type enctype)
+int ath12k_dp_rx_crypto_mic_len(struct ath12k_dp *dp, enum hal_encrypt_type enctype)
{
switch (enctype) {
case HAL_ENCRYPT_TYPE_OPEN:
@@ -2142,11 +880,11 @@ int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar, enum hal_encrypt_type enctype
break;
}
- ath12k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype);
+ ath12k_warn(dp->ab, "unsupported encryption type %d for mic len\n", enctype);
return 0;
}
-static int ath12k_dp_rx_crypto_param_len(struct ath12k *ar,
+static int ath12k_dp_rx_crypto_param_len(struct ath12k_pdev_dp *dp_pdev,
enum hal_encrypt_type enctype)
{
switch (enctype) {
@@ -2170,11 +908,11 @@ static int ath12k_dp_rx_crypto_param_len(struct ath12k *ar,
break;
}
- ath12k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
+ ath12k_warn(dp_pdev->dp->ab, "unsupported encryption type %d\n", enctype);
return 0;
}
-static int ath12k_dp_rx_crypto_icv_len(struct ath12k *ar,
+static int ath12k_dp_rx_crypto_icv_len(struct ath12k_pdev_dp *dp_pdev,
enum hal_encrypt_type enctype)
{
switch (enctype) {
@@ -2195,16 +933,15 @@ static int ath12k_dp_rx_crypto_icv_len(struct ath12k *ar,
break;
}
- ath12k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
+ ath12k_warn(dp_pdev->dp->ab, "unsupported encryption type %d\n", enctype);
return 0;
}
-static void ath12k_dp_rx_h_undecap_nwifi(struct ath12k *ar,
+static void ath12k_dp_rx_h_undecap_nwifi(struct ath12k_pdev_dp *dp_pdev,
struct sk_buff *msdu,
enum hal_encrypt_type enctype,
- struct ieee80211_rx_status *status)
+ struct hal_rx_desc_data *rx_info)
{
- struct ath12k_base *ab = ar->ab;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN];
struct ieee80211_hdr *hdr;
@@ -2225,7 +962,7 @@ static void ath12k_dp_rx_h_undecap_nwifi(struct ath12k *ar,
qos_ctl = rxcb->tid;
- if (ath12k_dp_rx_h_mesh_ctl_present(ab, rxcb->rx_desc))
+ if (rx_info->mesh_ctrl_present)
qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
/* TODO: Add other QoS ctl fields when required */
@@ -2234,9 +971,10 @@ static void ath12k_dp_rx_h_undecap_nwifi(struct ath12k *ar,
memcpy(decap_hdr, hdr, hdr_len);
/* Rebuild crypto header for mac80211 use */
- if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
- crypto_hdr = skb_push(msdu, ath12k_dp_rx_crypto_param_len(ar, enctype));
- ath12k_dp_rx_desc_get_crypto_header(ar->ab,
+ if (!(rx_info->rx_status->flag & RX_FLAG_IV_STRIPPED)) {
+ crypto_hdr = skb_push(msdu,
+ ath12k_dp_rx_crypto_param_len(dp_pdev, enctype));
+ ath12k_dp_rx_desc_get_crypto_header(dp_pdev->dp->hal,
rxcb->rx_desc, crypto_hdr,
enctype);
}
@@ -2247,11 +985,13 @@ static void ath12k_dp_rx_h_undecap_nwifi(struct ath12k *ar,
memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len);
}
-static void ath12k_dp_rx_h_undecap_raw(struct ath12k *ar, struct sk_buff *msdu,
+static void ath12k_dp_rx_h_undecap_raw(struct ath12k_pdev_dp *dp_pdev,
+ struct sk_buff *msdu,
enum hal_encrypt_type enctype,
struct ieee80211_rx_status *status,
bool decrypted)
{
+ struct ath12k_dp *dp = dp_pdev->dp;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
struct ieee80211_hdr *hdr;
size_t hdr_len;
@@ -2273,20 +1013,20 @@ static void ath12k_dp_rx_h_undecap_raw(struct ath12k *ar, struct sk_buff *msdu,
/* Tail */
if (status->flag & RX_FLAG_IV_STRIPPED) {
skb_trim(msdu, msdu->len -
- ath12k_dp_rx_crypto_mic_len(ar, enctype));
+ ath12k_dp_rx_crypto_mic_len(dp, enctype));
skb_trim(msdu, msdu->len -
- ath12k_dp_rx_crypto_icv_len(ar, enctype));
+ ath12k_dp_rx_crypto_icv_len(dp_pdev, enctype));
} else {
/* MIC */
if (status->flag & RX_FLAG_MIC_STRIPPED)
skb_trim(msdu, msdu->len -
- ath12k_dp_rx_crypto_mic_len(ar, enctype));
+ ath12k_dp_rx_crypto_mic_len(dp, enctype));
/* ICV */
if (status->flag & RX_FLAG_ICV_STRIPPED)
skb_trim(msdu, msdu->len -
- ath12k_dp_rx_crypto_icv_len(ar, enctype));
+ ath12k_dp_rx_crypto_icv_len(dp_pdev, enctype));
}
/* MMIC */
@@ -2298,58 +1038,59 @@ static void ath12k_dp_rx_h_undecap_raw(struct ath12k *ar, struct sk_buff *msdu,
/* Head */
if (status->flag & RX_FLAG_IV_STRIPPED) {
hdr_len = ieee80211_hdrlen(hdr->frame_control);
- crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
+ crypto_len = ath12k_dp_rx_crypto_param_len(dp_pdev, enctype);
memmove(msdu->data + crypto_len, msdu->data, hdr_len);
skb_pull(msdu, crypto_len);
}
}
-static void ath12k_get_dot11_hdr_from_rx_desc(struct ath12k *ar,
+static void ath12k_get_dot11_hdr_from_rx_desc(struct ath12k_pdev_dp *dp_pdev,
struct sk_buff *msdu,
struct ath12k_skb_rxcb *rxcb,
- struct ieee80211_rx_status *status,
- enum hal_encrypt_type enctype)
+ enum hal_encrypt_type enctype,
+ struct hal_rx_desc_data *rx_info)
{
struct hal_rx_desc *rx_desc = rxcb->rx_desc;
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_dp *dp = dp_pdev->dp;
+ struct ath12k_hal *hal = dp->hal;
size_t hdr_len, crypto_len;
struct ieee80211_hdr hdr;
__le16 qos_ctl;
- u8 *crypto_hdr, mesh_ctrl;
+ u8 *crypto_hdr;
- ath12k_dp_rx_desc_get_dot11_hdr(ab, rx_desc, &hdr);
+ ath12k_dp_rx_desc_get_dot11_hdr(hal, rx_desc, &hdr);
hdr_len = ieee80211_hdrlen(hdr.frame_control);
- mesh_ctrl = ath12k_dp_rx_h_mesh_ctl_present(ab, rx_desc);
- if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
- crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
+ if (!(rx_info->rx_status->flag & RX_FLAG_IV_STRIPPED)) {
+ crypto_len = ath12k_dp_rx_crypto_param_len(dp_pdev, enctype);
crypto_hdr = skb_push(msdu, crypto_len);
- ath12k_dp_rx_desc_get_crypto_header(ab, rx_desc, crypto_hdr, enctype);
+ ath12k_dp_rx_desc_get_crypto_header(dp->hal, rx_desc, crypto_hdr,
+ enctype);
}
skb_push(msdu, hdr_len);
memcpy(msdu->data, &hdr, min(hdr_len, sizeof(hdr)));
if (rxcb->is_mcbc)
- status->flag &= ~RX_FLAG_PN_VALIDATED;
+ rx_info->rx_status->flag &= ~RX_FLAG_PN_VALIDATED;
/* Add QOS header */
if (ieee80211_is_data_qos(hdr.frame_control)) {
struct ieee80211_hdr *qos_ptr = (struct ieee80211_hdr *)msdu->data;
qos_ctl = cpu_to_le16(rxcb->tid & IEEE80211_QOS_CTL_TID_MASK);
- if (mesh_ctrl)
+ if (rx_info->mesh_ctrl_present)
qos_ctl |= cpu_to_le16(IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT);
memcpy(ieee80211_get_qos_ctl(qos_ptr), &qos_ctl, IEEE80211_QOS_CTL_LEN);
}
}
-static void ath12k_dp_rx_h_undecap_eth(struct ath12k *ar,
+static void ath12k_dp_rx_h_undecap_eth(struct ath12k_pdev_dp *dp_pdev,
struct sk_buff *msdu,
enum hal_encrypt_type enctype,
- struct ieee80211_rx_status *status)
+ struct hal_rx_desc_data *rx_info)
{
struct ieee80211_hdr *hdr;
struct ethhdr *eth;
@@ -2365,7 +1106,7 @@ static void ath12k_dp_rx_h_undecap_eth(struct ath12k *ar,
skb_pull(msdu, sizeof(*eth));
memcpy(skb_push(msdu, sizeof(rfc)), &rfc,
sizeof(rfc));
- ath12k_get_dot11_hdr_from_rx_desc(ar, msdu, rxcb, status, enctype);
+ ath12k_get_dot11_hdr_from_rx_desc(dp_pdev, msdu, rxcb, enctype, rx_info);
/* original 802.11 header has a different DA and in
* case of 4addr it may also have different SA
@@ -2375,24 +1116,20 @@ static void ath12k_dp_rx_h_undecap_eth(struct ath12k *ar,
ether_addr_copy(ieee80211_get_SA(hdr), sa);
}
-static void ath12k_dp_rx_h_undecap(struct ath12k *ar, struct sk_buff *msdu,
- struct hal_rx_desc *rx_desc,
- enum hal_encrypt_type enctype,
- struct ieee80211_rx_status *status,
- bool decrypted)
+void ath12k_dp_rx_h_undecap(struct ath12k_pdev_dp *dp_pdev, struct sk_buff *msdu,
+ struct hal_rx_desc *rx_desc,
+ enum hal_encrypt_type enctype,
+ bool decrypted,
+ struct hal_rx_desc_data *rx_info)
{
- struct ath12k_base *ab = ar->ab;
- u8 decap;
struct ethhdr *ehdr;
- decap = ath12k_dp_rx_h_decap_type(ab, rx_desc);
-
- switch (decap) {
+ switch (rx_info->decap_type) {
case DP_RX_DECAP_TYPE_NATIVE_WIFI:
- ath12k_dp_rx_h_undecap_nwifi(ar, msdu, enctype, status);
+ ath12k_dp_rx_h_undecap_nwifi(dp_pdev, msdu, enctype, rx_info);
break;
case DP_RX_DECAP_TYPE_RAW:
- ath12k_dp_rx_h_undecap_raw(ar, msdu, enctype, status,
+ ath12k_dp_rx_h_undecap_raw(dp_pdev, msdu, enctype, rx_info->rx_status,
decrypted);
break;
case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
@@ -2401,7 +1138,7 @@ static void ath12k_dp_rx_h_undecap(struct ath12k *ar, struct sk_buff *msdu,
/* mac80211 allows fast path only for authorized STA */
if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) {
ATH12K_SKB_RXCB(msdu)->is_eapol = true;
- ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status);
+ ath12k_dp_rx_h_undecap_eth(dp_pdev, msdu, enctype, rx_info);
break;
}
@@ -2409,125 +1146,53 @@ static void ath12k_dp_rx_h_undecap(struct ath12k *ar, struct sk_buff *msdu,
* remove eth header and add 802.11 header.
*/
if (ATH12K_SKB_RXCB(msdu)->is_mcbc && decrypted)
- ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status);
+ ath12k_dp_rx_h_undecap_eth(dp_pdev, msdu, enctype, rx_info);
break;
case DP_RX_DECAP_TYPE_8023:
/* TODO: Handle undecap for these formats */
break;
}
}
+EXPORT_SYMBOL(ath12k_dp_rx_h_undecap);
-struct ath12k_peer *
-ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu,
- struct ath12k_dp_rx_info *rx_info)
+struct ath12k_dp_link_peer *
+ath12k_dp_rx_h_find_link_peer(struct ath12k_pdev_dp *dp_pdev, struct sk_buff *msdu,
+ struct hal_rx_desc_data *rx_info)
{
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
- struct ath12k_peer *peer = NULL;
+ struct ath12k_dp_link_peer *peer = NULL;
+ struct ath12k_dp *dp = dp_pdev->dp;
- lockdep_assert_held(&ab->base_lock);
+ lockdep_assert_held(&dp->dp_lock);
if (rxcb->peer_id)
- peer = ath12k_peer_find_by_id(ab, rxcb->peer_id);
+ peer = ath12k_dp_link_peer_find_by_peerid(dp_pdev, rxcb->peer_id);
if (peer)
return peer;
if (rx_info->addr2_present)
- peer = ath12k_peer_find_by_addr(ab, rx_info->addr2);
+ peer = ath12k_dp_link_peer_find_by_addr(dp, rx_info->addr2);
return peer;
}
-static void ath12k_dp_rx_h_mpdu(struct ath12k *ar,
- struct sk_buff *msdu,
- struct hal_rx_desc *rx_desc,
- struct ath12k_dp_rx_info *rx_info)
-{
- struct ath12k_base *ab = ar->ab;
- struct ath12k_skb_rxcb *rxcb;
- enum hal_encrypt_type enctype;
- bool is_decrypted = false;
- struct ieee80211_hdr *hdr;
- struct ath12k_peer *peer;
- struct ieee80211_rx_status *rx_status = rx_info->rx_status;
- u32 err_bitmap;
-
- /* PN for multicast packets will be checked in mac80211 */
- rxcb = ATH12K_SKB_RXCB(msdu);
- rxcb->is_mcbc = rx_info->is_mcbc;
-
- if (rxcb->is_mcbc)
- rxcb->peer_id = rx_info->peer_id;
-
- spin_lock_bh(&ar->ab->base_lock);
- peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu, rx_info);
- if (peer) {
- /* resetting mcbc bit because mcbc packets are unicast
- * packets only for AP as STA sends unicast packets.
- */
- rxcb->is_mcbc = rxcb->is_mcbc && !peer->ucast_ra_only;
-
- if (rxcb->is_mcbc)
- enctype = peer->sec_type_grp;
- else
- enctype = peer->sec_type;
- } else {
- enctype = HAL_ENCRYPT_TYPE_OPEN;
- }
- spin_unlock_bh(&ar->ab->base_lock);
-
- err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);
- if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap)
- is_decrypted = ath12k_dp_rx_h_is_decrypted(ab, rx_desc);
-
- /* Clear per-MPDU flags while leaving per-PPDU flags intact */
- rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
- RX_FLAG_MMIC_ERROR |
- RX_FLAG_DECRYPTED |
- RX_FLAG_IV_STRIPPED |
- RX_FLAG_MMIC_STRIPPED);
-
- if (err_bitmap & HAL_RX_MPDU_ERR_FCS)
- rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
- if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC)
- rx_status->flag |= RX_FLAG_MMIC_ERROR;
-
- if (is_decrypted) {
- rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
-
- if (rx_info->is_mcbc)
- rx_status->flag |= RX_FLAG_MIC_STRIPPED |
- RX_FLAG_ICV_STRIPPED;
- else
- rx_status->flag |= RX_FLAG_IV_STRIPPED |
- RX_FLAG_PN_VALIDATED;
- }
-
- ath12k_dp_rx_h_csum_offload(msdu, rx_info);
- ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
- enctype, rx_status, is_decrypted);
-
- if (!is_decrypted || rx_info->is_mcbc)
- return;
-
- if (rx_info->decap_type != DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
- hdr = (void *)msdu->data;
- hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
- }
-}
-
-static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info)
+static void ath12k_dp_rx_h_rate(struct ath12k_pdev_dp *dp_pdev,
+ struct hal_rx_desc_data *rx_info)
{
+ struct ath12k_dp *dp = dp_pdev->dp;
struct ieee80211_supported_band *sband;
struct ieee80211_rx_status *rx_status = rx_info->rx_status;
enum rx_msdu_start_pkt_type pkt_type = rx_info->pkt_type;
u8 bw = rx_info->bw, sgi = rx_info->sgi;
u8 rate_mcs = rx_info->rate_mcs, nss = rx_info->nss;
bool is_cck;
+ struct ath12k *ar;
switch (pkt_type) {
case RX_MSDU_START_PKT_TYPE_11A:
case RX_MSDU_START_PKT_TYPE_11B:
+ ar = ath12k_pdev_dp_to_ar(dp_pdev);
is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
sband = &ar->mac.sbands[rx_status->band];
rx_status->rate_idx = ath12k_mac_hw_rate_to_idx(sband, rate_mcs,
@@ -2536,7 +1201,7 @@ static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct ath12k_dp_rx_info *rx_
case RX_MSDU_START_PKT_TYPE_11N:
rx_status->encoding = RX_ENC_HT;
if (rate_mcs > ATH12K_HT_MCS_MAX) {
- ath12k_warn(ar->ab,
+ ath12k_warn(dp->ab,
"Received with invalid mcs in HT mode %d\n",
rate_mcs);
break;
@@ -2550,7 +1215,7 @@ static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct ath12k_dp_rx_info *rx_
rx_status->encoding = RX_ENC_VHT;
rx_status->rate_idx = rate_mcs;
if (rate_mcs > ATH12K_VHT_MCS_MAX) {
- ath12k_warn(ar->ab,
+ ath12k_warn(dp->ab,
"Received with invalid mcs in VHT mode %d\n",
rate_mcs);
break;
@@ -2563,7 +1228,7 @@ static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct ath12k_dp_rx_info *rx_
case RX_MSDU_START_PKT_TYPE_11AX:
rx_status->rate_idx = rate_mcs;
if (rate_mcs > ATH12K_HE_MCS_MAX) {
- ath12k_warn(ar->ab,
+ ath12k_warn(dp->ab,
"Received with invalid mcs in HE mode %d\n",
rate_mcs);
break;
@@ -2577,7 +1242,7 @@ static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct ath12k_dp_rx_info *rx_
rx_status->rate_idx = rate_mcs;
if (rate_mcs > ATH12K_EHT_MCS_MAX) {
- ath12k_warn(ar->ab,
+ ath12k_warn(dp->ab,
"Received with invalid mcs in EHT mode %d\n",
rate_mcs);
break;
@@ -2593,33 +1258,8 @@ static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct ath12k_dp_rx_info *rx_
}
}
-void ath12k_dp_rx_h_fetch_info(struct ath12k_base *ab, struct hal_rx_desc *rx_desc,
- struct ath12k_dp_rx_info *rx_info)
-{
- rx_info->ip_csum_fail = ath12k_dp_rx_h_ip_cksum_fail(ab, rx_desc);
- rx_info->l4_csum_fail = ath12k_dp_rx_h_l4_cksum_fail(ab, rx_desc);
- rx_info->is_mcbc = ath12k_dp_rx_h_is_da_mcbc(ab, rx_desc);
- rx_info->decap_type = ath12k_dp_rx_h_decap_type(ab, rx_desc);
- rx_info->pkt_type = ath12k_dp_rx_h_pkt_type(ab, rx_desc);
- rx_info->sgi = ath12k_dp_rx_h_sgi(ab, rx_desc);
- rx_info->rate_mcs = ath12k_dp_rx_h_rate_mcs(ab, rx_desc);
- rx_info->bw = ath12k_dp_rx_h_rx_bw(ab, rx_desc);
- rx_info->nss = ath12k_dp_rx_h_nss(ab, rx_desc);
- rx_info->tid = ath12k_dp_rx_h_tid(ab, rx_desc);
- rx_info->peer_id = ath12k_dp_rx_h_peer_id(ab, rx_desc);
- rx_info->phy_meta_data = ath12k_dp_rx_h_freq(ab, rx_desc);
-
- if (ath12k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)) {
- ether_addr_copy(rx_info->addr2,
- ath12k_dp_rxdesc_get_mpdu_start_addr2(ab, rx_desc));
- rx_info->addr2_present = true;
- }
-
- ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "rx_desc: ",
- rx_desc, sizeof(*rx_desc));
-}
-
-void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info)
+void ath12k_dp_rx_h_ppdu(struct ath12k_pdev_dp *dp_pdev,
+ struct hal_rx_desc_data *rx_info)
{
struct ieee80211_rx_status *rx_status = rx_info->rx_status;
u8 channel_num;
@@ -2652,7 +1292,9 @@ void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info)
}
if (unlikely(rx_status->band == NUM_NL80211_BANDS ||
- !ath12k_ar_to_hw(ar)->wiphy->bands[rx_status->band])) {
+ !ath12k_pdev_dp_to_hw(dp_pdev)->wiphy->bands[rx_status->band])) {
+ struct ath12k *ar = ath12k_pdev_dp_to_ar(dp_pdev);
+
ath12k_warn(ar->ab, "sband is NULL for status band %d channel_num %d center_freq %d pdev_id %d\n",
rx_status->band, channel_num, center_freq, ar->pdev_idx);
@@ -2676,43 +1318,41 @@ void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info)
rx_status->band);
h_rate:
- ath12k_dp_rx_h_rate(ar, rx_info);
+ ath12k_dp_rx_h_rate(dp_pdev, rx_info);
}
+EXPORT_SYMBOL(ath12k_dp_rx_h_ppdu);
-static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi,
- struct sk_buff *msdu,
- struct ath12k_dp_rx_info *rx_info)
+void ath12k_dp_rx_deliver_msdu(struct ath12k_pdev_dp *dp_pdev, struct napi_struct *napi,
+ struct sk_buff *msdu,
+ struct hal_rx_desc_data *rx_info)
{
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_dp *dp = dp_pdev->dp;
struct ieee80211_rx_status *rx_status;
struct ieee80211_sta *pubsta;
- struct ath12k_peer *peer;
+ struct ath12k_dp_peer *peer;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
struct ieee80211_rx_status *status = rx_info->rx_status;
u8 decap = rx_info->decap_type;
bool is_mcbc = rxcb->is_mcbc;
bool is_eapol = rxcb->is_eapol;
- spin_lock_bh(&ab->base_lock);
- peer = ath12k_dp_rx_h_find_peer(ab, msdu, rx_info);
+ peer = ath12k_dp_peer_find_by_peerid(dp_pdev, rx_info->peer_id);
pubsta = peer ? peer->sta : NULL;
if (pubsta && pubsta->valid_links) {
status->link_valid = 1;
- status->link_id = peer->link_id;
+ status->link_id = peer->hw_links[rxcb->hw_link_id];
}
- spin_unlock_bh(&ab->base_lock);
-
- ath12k_dbg(ab, ATH12K_DBG_DATA,
+ ath12k_dbg(dp->ab, ATH12K_DBG_DATA,
"rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
msdu,
msdu->len,
peer ? peer->addr : NULL,
rxcb->tid,
is_mcbc ? "mcast" : "ucast",
- ath12k_dp_rx_h_seq_no(ab, rxcb->rx_desc),
+ rx_info->seq_no,
(status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
(status->encoding == RX_ENC_HT) ? "ht" : "",
(status->encoding == RX_ENC_VHT) ? "vht" : "",
@@ -2731,7 +1371,7 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap
!!(status->flag & RX_FLAG_MMIC_ERROR),
!!(status->flag & RX_FLAG_AMSDU_MORE));
- ath12k_dbg_dump(ab, ATH12K_DBG_DP_RX, NULL, "dp rx msdu: ",
+ ath12k_dbg_dump(dp->ab, ATH12K_DBG_DP_RX, NULL, "dp rx msdu: ",
msdu->data, msdu->len);
rx_status = IEEE80211_SKB_RXCB(msdu);
@@ -2748,19 +1388,19 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap
!(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
rx_status->flag |= RX_FLAG_8023;
- ieee80211_rx_napi(ath12k_ar_to_hw(ar), pubsta, msdu, napi);
+ ieee80211_rx_napi(ath12k_pdev_dp_to_hw(dp_pdev), pubsta, msdu, napi);
}
+EXPORT_SYMBOL(ath12k_dp_rx_deliver_msdu);
-static bool ath12k_dp_rx_check_nwifi_hdr_len_valid(struct ath12k_base *ab,
- struct hal_rx_desc *rx_desc,
- struct sk_buff *msdu)
+bool ath12k_dp_rx_check_nwifi_hdr_len_valid(struct ath12k_dp *dp,
+ struct hal_rx_desc *rx_desc,
+ struct sk_buff *msdu,
+ struct hal_rx_desc_data *rx_info)
{
struct ieee80211_hdr *hdr;
- u8 decap_type;
u32 hdr_len;
- decap_type = ath12k_dp_rx_h_decap_type(ab, rx_desc);
- if (decap_type != DP_RX_DECAP_TYPE_NATIVE_WIFI)
+ if (rx_info->decap_type != DP_RX_DECAP_TYPE_NATIVE_WIFI)
return true;
hdr = (struct ieee80211_hdr *)msdu->data;
@@ -2769,385 +1409,73 @@ static bool ath12k_dp_rx_check_nwifi_hdr_len_valid(struct ath12k_base *ab,
if ((likely(hdr_len <= DP_MAX_NWIFI_HDR_LEN)))
return true;
- ab->device_stats.invalid_rbm++;
+ dp->device_stats.invalid_rbm++;
WARN_ON_ONCE(1);
return false;
}
-
-static int ath12k_dp_rx_process_msdu(struct ath12k *ar,
- struct sk_buff *msdu,
- struct sk_buff_head *msdu_list,
- struct ath12k_dp_rx_info *rx_info)
-{
- struct ath12k_base *ab = ar->ab;
- struct hal_rx_desc *rx_desc, *lrx_desc;
- struct ath12k_skb_rxcb *rxcb;
- struct sk_buff *last_buf;
- u8 l3_pad_bytes;
- u16 msdu_len;
- int ret;
- u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
-
- last_buf = ath12k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
- if (!last_buf) {
- ath12k_warn(ab,
- "No valid Rx buffer to access MSDU_END tlv\n");
- ret = -EIO;
- goto free_out;
- }
-
- rx_desc = (struct hal_rx_desc *)msdu->data;
- lrx_desc = (struct hal_rx_desc *)last_buf->data;
- if (!ath12k_dp_rx_h_msdu_done(ab, lrx_desc)) {
- ath12k_warn(ab, "msdu_done bit in msdu_end is not set\n");
- ret = -EIO;
- goto free_out;
- }
-
- rxcb = ATH12K_SKB_RXCB(msdu);
- rxcb->rx_desc = rx_desc;
- msdu_len = ath12k_dp_rx_h_msdu_len(ab, lrx_desc);
- l3_pad_bytes = ath12k_dp_rx_h_l3pad(ab, lrx_desc);
-
- if (rxcb->is_frag) {
- skb_pull(msdu, hal_rx_desc_sz);
- } else if (!rxcb->is_continuation) {
- if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
- ret = -EINVAL;
- ath12k_warn(ab, "invalid msdu len %u\n", msdu_len);
- ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", rx_desc,
- sizeof(*rx_desc));
- goto free_out;
- }
- skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len);
- skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes);
- } else {
- ret = ath12k_dp_rx_msdu_coalesce(ar, msdu_list,
- msdu, last_buf,
- l3_pad_bytes, msdu_len);
- if (ret) {
- ath12k_warn(ab,
- "failed to coalesce msdu rx buffer%d\n", ret);
- goto free_out;
- }
- }
-
- if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, rx_desc, msdu))) {
- ret = -EINVAL;
- goto free_out;
- }
-
- ath12k_dp_rx_h_fetch_info(ab, rx_desc, rx_info);
- ath12k_dp_rx_h_ppdu(ar, rx_info);
- ath12k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_info);
-
- rx_info->rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
-
- return 0;
-
-free_out:
- return ret;
-}
-
-static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab,
- struct napi_struct *napi,
- struct sk_buff_head *msdu_list,
- int ring_id)
-{
- struct ath12k_hw_group *ag = ab->ag;
- struct ieee80211_rx_status rx_status = {};
- struct ath12k_skb_rxcb *rxcb;
- struct sk_buff *msdu;
- struct ath12k *ar;
- struct ath12k_hw_link *hw_links = ag->hw_links;
- struct ath12k_base *partner_ab;
- struct ath12k_dp_rx_info rx_info;
- u8 hw_link_id, pdev_id;
- int ret;
-
- if (skb_queue_empty(msdu_list))
- return;
-
- rx_info.addr2_present = false;
- rx_info.rx_status = &rx_status;
-
- rcu_read_lock();
-
- while ((msdu = __skb_dequeue(msdu_list))) {
- rxcb = ATH12K_SKB_RXCB(msdu);
- hw_link_id = rxcb->hw_link_id;
- partner_ab = ath12k_ag_to_ab(ag,
- hw_links[hw_link_id].device_id);
- pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params,
- hw_links[hw_link_id].pdev_idx);
- ar = partner_ab->pdevs[pdev_id].ar;
- if (!rcu_dereference(partner_ab->pdevs_active[pdev_id])) {
- dev_kfree_skb_any(msdu);
- continue;
- }
-
- if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) {
- dev_kfree_skb_any(msdu);
- continue;
- }
-
- ret = ath12k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_info);
- if (ret) {
- ath12k_dbg(ab, ATH12K_DBG_DATA,
- "Unable to process msdu %d", ret);
- dev_kfree_skb_any(msdu);
- continue;
- }
-
- ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_info);
- }
-
- rcu_read_unlock();
-}
-
-static u16 ath12k_dp_rx_get_peer_id(struct ath12k_base *ab,
- enum ath12k_peer_metadata_version ver,
- __le32 peer_metadata)
-{
- switch (ver) {
- default:
- ath12k_warn(ab, "Unknown peer metadata version: %d", ver);
- fallthrough;
- case ATH12K_PEER_METADATA_V0:
- return le32_get_bits(peer_metadata,
- RX_MPDU_DESC_META_DATA_V0_PEER_ID);
- case ATH12K_PEER_METADATA_V1:
- return le32_get_bits(peer_metadata,
- RX_MPDU_DESC_META_DATA_V1_PEER_ID);
- case ATH12K_PEER_METADATA_V1A:
- return le32_get_bits(peer_metadata,
- RX_MPDU_DESC_META_DATA_V1A_PEER_ID);
- case ATH12K_PEER_METADATA_V1B:
- return le32_get_bits(peer_metadata,
- RX_MPDU_DESC_META_DATA_V1B_PEER_ID);
- }
-}
-
-int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id,
- struct napi_struct *napi, int budget)
-{
- struct ath12k_hw_group *ag = ab->ag;
- struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES];
- struct ath12k_hw_link *hw_links = ag->hw_links;
- int num_buffs_reaped[ATH12K_MAX_DEVICES] = {};
- struct ath12k_rx_desc_info *desc_info;
- struct ath12k_dp *dp = &ab->dp;
- struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
- struct hal_reo_dest_ring *desc;
- struct ath12k_base *partner_ab;
- struct sk_buff_head msdu_list;
- struct ath12k_skb_rxcb *rxcb;
- int total_msdu_reaped = 0;
- u8 hw_link_id, device_id;
- struct hal_srng *srng;
- struct sk_buff *msdu;
- bool done = false;
- u64 desc_va;
-
- __skb_queue_head_init(&msdu_list);
-
- for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++)
- INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
-
- srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
-
- spin_lock_bh(&srng->lock);
-
-try_again:
- ath12k_hal_srng_access_begin(ab, srng);
-
- while ((desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
- struct rx_mpdu_desc *mpdu_info;
- struct rx_msdu_desc *msdu_info;
- enum hal_reo_dest_ring_push_reason push_reason;
- u32 cookie;
-
- cookie = le32_get_bits(desc->buf_addr_info.info1,
- BUFFER_ADDR_INFO1_SW_COOKIE);
-
- hw_link_id = le32_get_bits(desc->info0,
- HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
-
- desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 |
- le32_to_cpu(desc->buf_va_lo));
- desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va);
-
- device_id = hw_links[hw_link_id].device_id;
- partner_ab = ath12k_ag_to_ab(ag, device_id);
- if (unlikely(!partner_ab)) {
- if (desc_info->skb) {
- dev_kfree_skb_any(desc_info->skb);
- desc_info->skb = NULL;
- }
-
- continue;
- }
-
- /* retry manual desc retrieval */
- if (!desc_info) {
- desc_info = ath12k_dp_get_rx_desc(partner_ab, cookie);
- if (!desc_info) {
- ath12k_warn(partner_ab, "Invalid cookie in manual descriptor retrieval: 0x%x\n",
- cookie);
- continue;
- }
- }
-
- if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
- ath12k_warn(ab, "Check HW CC implementation");
-
- msdu = desc_info->skb;
- desc_info->skb = NULL;
-
- list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]);
-
- rxcb = ATH12K_SKB_RXCB(msdu);
- dma_unmap_single(partner_ab->dev, rxcb->paddr,
- msdu->len + skb_tailroom(msdu),
- DMA_FROM_DEVICE);
-
- num_buffs_reaped[device_id]++;
- ab->device_stats.reo_rx[ring_id][ab->device_id]++;
-
- push_reason = le32_get_bits(desc->info0,
- HAL_REO_DEST_RING_INFO0_PUSH_REASON);
- if (push_reason !=
- HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
- dev_kfree_skb_any(msdu);
- ab->device_stats.hal_reo_error[ring_id]++;
- continue;
- }
-
- msdu_info = &desc->rx_msdu_info;
- mpdu_info = &desc->rx_mpdu_info;
-
- rxcb->is_first_msdu = !!(le32_to_cpu(msdu_info->info0) &
- RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
- rxcb->is_last_msdu = !!(le32_to_cpu(msdu_info->info0) &
- RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
- rxcb->is_continuation = !!(le32_to_cpu(msdu_info->info0) &
- RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
- rxcb->hw_link_id = hw_link_id;
- rxcb->peer_id = ath12k_dp_rx_get_peer_id(ab, dp->peer_metadata_ver,
- mpdu_info->peer_meta_data);
- rxcb->tid = le32_get_bits(mpdu_info->info0,
- RX_MPDU_DESC_INFO0_TID);
-
- __skb_queue_tail(&msdu_list, msdu);
-
- if (!rxcb->is_continuation) {
- total_msdu_reaped++;
- done = true;
- } else {
- done = false;
- }
-
- if (total_msdu_reaped >= budget)
- break;
- }
-
- /* Hw might have updated the head pointer after we cached it.
- * In this case, even though there are entries in the ring we'll
- * get rx_desc NULL. Give the read another try with updated cached
- * head pointer so that we can reap complete MPDU in the current
- * rx processing.
- */
- if (!done && ath12k_hal_srng_dst_num_free(ab, srng, true)) {
- ath12k_hal_srng_access_end(ab, srng);
- goto try_again;
- }
-
- ath12k_hal_srng_access_end(ab, srng);
-
- spin_unlock_bh(&srng->lock);
-
- if (!total_msdu_reaped)
- goto exit;
-
- for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) {
- if (!num_buffs_reaped[device_id])
- continue;
-
- partner_ab = ath12k_ag_to_ab(ag, device_id);
- rx_ring = &partner_ab->dp.rx_refill_buf_ring;
-
- ath12k_dp_rx_bufs_replenish(partner_ab, rx_ring,
- &rx_desc_used_list[device_id],
- num_buffs_reaped[device_id]);
- }
-
- ath12k_dp_rx_process_received_packets(ab, napi, &msdu_list,
- ring_id);
-
-exit:
- return total_msdu_reaped;
-}
+EXPORT_SYMBOL(ath12k_dp_rx_check_nwifi_hdr_len_valid);
static void ath12k_dp_rx_frag_timer(struct timer_list *timer)
{
struct ath12k_dp_rx_tid *rx_tid = timer_container_of(rx_tid, timer,
frag_timer);
- spin_lock_bh(&rx_tid->ab->base_lock);
+ spin_lock_bh(&rx_tid->dp->dp_lock);
if (rx_tid->last_frag_no &&
rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) {
- spin_unlock_bh(&rx_tid->ab->base_lock);
+ spin_unlock_bh(&rx_tid->dp->dp_lock);
return;
}
- ath12k_dp_rx_frags_cleanup(rx_tid, true);
- spin_unlock_bh(&rx_tid->ab->base_lock);
+ ath12k_dp_arch_rx_frags_cleanup(rx_tid->dp, rx_tid, true);
+ spin_unlock_bh(&rx_tid->dp->dp_lock);
}
int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id)
{
struct ath12k_base *ab = ar->ab;
struct crypto_shash *tfm;
- struct ath12k_peer *peer;
+ struct ath12k_dp_link_peer *peer;
struct ath12k_dp_rx_tid *rx_tid;
int i;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
tfm = crypto_alloc_shash("michael_mic", 0, 0);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
- spin_lock_bh(&ab->base_lock);
+ spin_lock_bh(&dp->dp_lock);
- peer = ath12k_peer_find(ab, vdev_id, peer_mac);
- if (!peer) {
- spin_unlock_bh(&ab->base_lock);
+ peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, vdev_id, peer_mac);
+ if (!peer || !peer->dp_peer) {
+ spin_unlock_bh(&dp->dp_lock);
crypto_free_shash(tfm);
ath12k_warn(ab, "failed to find the peer to set up fragment info\n");
return -ENOENT;
}
if (!peer->primary_link) {
- spin_unlock_bh(&ab->base_lock);
+ spin_unlock_bh(&dp->dp_lock);
crypto_free_shash(tfm);
return 0;
}
for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
- rx_tid = &peer->rx_tid[i];
- rx_tid->ab = ab;
+ rx_tid = &peer->dp_peer->rx_tid[i];
+ rx_tid->dp = dp;
timer_setup(&rx_tid->frag_timer, ath12k_dp_rx_frag_timer, 0);
skb_queue_head_init(&rx_tid->rx_frags);
}
- peer->tfm_mmic = tfm;
- peer->dp_setup_done = true;
- spin_unlock_bh(&ab->base_lock);
+ peer->dp_peer->tfm_mmic = tfm;
+ peer->dp_peer->dp_setup_done = true;
+ spin_unlock_bh(&dp->dp_lock);
return 0;
}
-static int ath12k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,
- struct ieee80211_hdr *hdr, u8 *data,
- size_t data_len, u8 *mic)
+int ath12k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,
+ struct ieee80211_hdr *hdr, u8 *data,
+ size_t data_len, u8 *mic)
{
SHASH_DESC_ON_STACK(desc, tfm);
u8 mic_hdr[16] = {};
@@ -3185,78 +1513,16 @@ out:
shash_desc_zero(desc);
return ret;
}
+EXPORT_SYMBOL(ath12k_dp_rx_h_michael_mic);
-static int ath12k_dp_rx_h_verify_tkip_mic(struct ath12k *ar, struct ath12k_peer *peer,
- struct sk_buff *msdu)
-{
- struct ath12k_base *ab = ar->ab;
- struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
- struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);
- struct ieee80211_key_conf *key_conf;
- struct ieee80211_hdr *hdr;
- struct ath12k_dp_rx_info rx_info;
- u8 mic[IEEE80211_CCMP_MIC_LEN];
- int head_len, tail_len, ret;
- size_t data_len;
- u32 hdr_len, hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
- u8 *key, *data;
- u8 key_idx;
-
- if (ath12k_dp_rx_h_enctype(ab, rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC)
- return 0;
-
- rx_info.addr2_present = false;
- rx_info.rx_status = rxs;
-
- hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
- hdr_len = ieee80211_hdrlen(hdr->frame_control);
- head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN;
- tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN;
-
- if (!is_multicast_ether_addr(hdr->addr1))
- key_idx = peer->ucast_keyidx;
- else
- key_idx = peer->mcast_keyidx;
-
- key_conf = peer->keys[key_idx];
-
- data = msdu->data + head_len;
- data_len = msdu->len - head_len - tail_len;
- key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
-
- ret = ath12k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic);
- if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN))
- goto mic_fail;
-
- return 0;
-
-mic_fail:
- (ATH12K_SKB_RXCB(msdu))->is_first_msdu = true;
- (ATH12K_SKB_RXCB(msdu))->is_last_msdu = true;
-
- ath12k_dp_rx_h_fetch_info(ab, rx_desc, &rx_info);
-
- rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
- RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
- skb_pull(msdu, hal_rx_desc_sz);
-
- if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, rx_desc, msdu)))
- return -EINVAL;
-
- ath12k_dp_rx_h_ppdu(ar, &rx_info);
- ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
- HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
- ieee80211_rx(ath12k_ar_to_hw(ar), msdu);
- return -EINVAL;
-}
-
-static void ath12k_dp_rx_h_undecap_frag(struct ath12k *ar, struct sk_buff *msdu,
- enum hal_encrypt_type enctype, u32 flags)
+void ath12k_dp_rx_h_undecap_frag(struct ath12k_pdev_dp *dp_pdev, struct sk_buff *msdu,
+ enum hal_encrypt_type enctype, u32 flags)
{
+ struct ath12k_dp *dp = dp_pdev->dp;
struct ieee80211_hdr *hdr;
size_t hdr_len;
size_t crypto_len;
- u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
+ u32 hal_rx_desc_sz = dp->ab->hal.hal_desc_sz;
if (!flags)
return;
@@ -3265,258 +1531,43 @@ static void ath12k_dp_rx_h_undecap_frag(struct ath12k *ar, struct sk_buff *msdu,
if (flags & RX_FLAG_MIC_STRIPPED)
skb_trim(msdu, msdu->len -
- ath12k_dp_rx_crypto_mic_len(ar, enctype));
+ ath12k_dp_rx_crypto_mic_len(dp, enctype));
if (flags & RX_FLAG_ICV_STRIPPED)
skb_trim(msdu, msdu->len -
- ath12k_dp_rx_crypto_icv_len(ar, enctype));
+ ath12k_dp_rx_crypto_icv_len(dp_pdev, enctype));
if (flags & RX_FLAG_IV_STRIPPED) {
hdr_len = ieee80211_hdrlen(hdr->frame_control);
- crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
+ crypto_len = ath12k_dp_rx_crypto_param_len(dp_pdev, enctype);
memmove(msdu->data + hal_rx_desc_sz + crypto_len,
msdu->data + hal_rx_desc_sz, hdr_len);
skb_pull(msdu, crypto_len);
}
}
+EXPORT_SYMBOL(ath12k_dp_rx_h_undecap_frag);
-static int ath12k_dp_rx_h_defrag(struct ath12k *ar,
- struct ath12k_peer *peer,
- struct ath12k_dp_rx_tid *rx_tid,
- struct sk_buff **defrag_skb)
-{
- struct ath12k_base *ab = ar->ab;
- struct hal_rx_desc *rx_desc;
- struct sk_buff *skb, *first_frag, *last_frag;
- struct ieee80211_hdr *hdr;
- enum hal_encrypt_type enctype;
- bool is_decrypted = false;
- int msdu_len = 0;
- int extra_space;
- u32 flags, hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
-
- first_frag = skb_peek(&rx_tid->rx_frags);
- last_frag = skb_peek_tail(&rx_tid->rx_frags);
-
- skb_queue_walk(&rx_tid->rx_frags, skb) {
- flags = 0;
- rx_desc = (struct hal_rx_desc *)skb->data;
- hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
-
- enctype = ath12k_dp_rx_h_enctype(ab, rx_desc);
- if (enctype != HAL_ENCRYPT_TYPE_OPEN)
- is_decrypted = ath12k_dp_rx_h_is_decrypted(ab,
- rx_desc);
-
- if (is_decrypted) {
- if (skb != first_frag)
- flags |= RX_FLAG_IV_STRIPPED;
- if (skb != last_frag)
- flags |= RX_FLAG_ICV_STRIPPED |
- RX_FLAG_MIC_STRIPPED;
- }
-
- /* RX fragments are always raw packets */
- if (skb != last_frag)
- skb_trim(skb, skb->len - FCS_LEN);
- ath12k_dp_rx_h_undecap_frag(ar, skb, enctype, flags);
-
- if (skb != first_frag)
- skb_pull(skb, hal_rx_desc_sz +
- ieee80211_hdrlen(hdr->frame_control));
- msdu_len += skb->len;
- }
-
- extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag));
- if (extra_space > 0 &&
- (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0))
- return -ENOMEM;
-
- __skb_unlink(first_frag, &rx_tid->rx_frags);
- while ((skb = __skb_dequeue(&rx_tid->rx_frags))) {
- skb_put_data(first_frag, skb->data, skb->len);
- dev_kfree_skb_any(skb);
- }
-
- hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz);
- hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
- ATH12K_SKB_RXCB(first_frag)->is_frag = 1;
-
- if (ath12k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag))
- first_frag = NULL;
-
- *defrag_skb = first_frag;
- return 0;
-}
-
-static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
- struct ath12k_dp_rx_tid *rx_tid,
- struct sk_buff *defrag_skb)
-{
- struct ath12k_base *ab = ar->ab;
- struct ath12k_dp *dp = &ab->dp;
- struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data;
- struct hal_reo_entrance_ring *reo_ent_ring;
- struct hal_reo_dest_ring *reo_dest_ring;
- struct dp_link_desc_bank *link_desc_banks;
- struct hal_rx_msdu_link *msdu_link;
- struct hal_rx_msdu_details *msdu0;
- struct hal_srng *srng;
- dma_addr_t link_paddr, buf_paddr;
- u32 desc_bank, msdu_info, msdu_ext_info, mpdu_info;
- u32 cookie, hal_rx_desc_sz, dest_ring_info0, queue_addr_hi;
- int ret;
- struct ath12k_rx_desc_info *desc_info;
- enum hal_rx_buf_return_buf_manager idle_link_rbm = dp->idle_link_rbm;
- u8 dst_ind;
-
- hal_rx_desc_sz = ab->hal.hal_desc_sz;
- link_desc_banks = dp->link_desc_banks;
- reo_dest_ring = rx_tid->dst_ring_desc;
-
- ath12k_hal_rx_reo_ent_paddr_get(ab, &reo_dest_ring->buf_addr_info,
- &link_paddr, &cookie);
- desc_bank = u32_get_bits(cookie, DP_LINK_DESC_BANK_MASK);
-
- msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr +
- (link_paddr - link_desc_banks[desc_bank].paddr));
- msdu0 = &msdu_link->msdu_link[0];
- msdu_ext_info = le32_to_cpu(msdu0->rx_msdu_ext_info.info0);
- dst_ind = u32_get_bits(msdu_ext_info, RX_MSDU_EXT_DESC_INFO0_REO_DEST_IND);
-
- memset(msdu0, 0, sizeof(*msdu0));
-
- msdu_info = u32_encode_bits(1, RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU) |
- u32_encode_bits(1, RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU) |
- u32_encode_bits(0, RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) |
- u32_encode_bits(defrag_skb->len - hal_rx_desc_sz,
- RX_MSDU_DESC_INFO0_MSDU_LENGTH) |
- u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_SA) |
- u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_DA);
- msdu0->rx_msdu_info.info0 = cpu_to_le32(msdu_info);
- msdu0->rx_msdu_ext_info.info0 = cpu_to_le32(msdu_ext_info);
-
- /* change msdu len in hal rx desc */
- ath12k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz);
-
- buf_paddr = dma_map_single(ab->dev, defrag_skb->data,
- defrag_skb->len + skb_tailroom(defrag_skb),
- DMA_TO_DEVICE);
- if (dma_mapping_error(ab->dev, buf_paddr))
- return -ENOMEM;
-
- spin_lock_bh(&dp->rx_desc_lock);
- desc_info = list_first_entry_or_null(&dp->rx_desc_free_list,
- struct ath12k_rx_desc_info,
- list);
- if (!desc_info) {
- spin_unlock_bh(&dp->rx_desc_lock);
- ath12k_warn(ab, "failed to find rx desc for reinject\n");
- ret = -ENOMEM;
- goto err_unmap_dma;
- }
-
- desc_info->skb = defrag_skb;
- desc_info->in_use = true;
-
- list_del(&desc_info->list);
- spin_unlock_bh(&dp->rx_desc_lock);
-
- ATH12K_SKB_RXCB(defrag_skb)->paddr = buf_paddr;
-
- ath12k_hal_rx_buf_addr_info_set(&msdu0->buf_addr_info, buf_paddr,
- desc_info->cookie,
- HAL_RX_BUF_RBM_SW3_BM);
-
- /* Fill mpdu details into reo entrance ring */
- srng = &ab->hal.srng_list[dp->reo_reinject_ring.ring_id];
-
- spin_lock_bh(&srng->lock);
- ath12k_hal_srng_access_begin(ab, srng);
-
- reo_ent_ring = ath12k_hal_srng_src_get_next_entry(ab, srng);
- if (!reo_ent_ring) {
- ath12k_hal_srng_access_end(ab, srng);
- spin_unlock_bh(&srng->lock);
- ret = -ENOSPC;
- goto err_free_desc;
- }
- memset(reo_ent_ring, 0, sizeof(*reo_ent_ring));
-
- ath12k_hal_rx_buf_addr_info_set(&reo_ent_ring->buf_addr_info, link_paddr,
- cookie,
- idle_link_rbm);
-
- mpdu_info = u32_encode_bits(1, RX_MPDU_DESC_INFO0_MSDU_COUNT) |
- u32_encode_bits(0, RX_MPDU_DESC_INFO0_FRAG_FLAG) |
- u32_encode_bits(1, RX_MPDU_DESC_INFO0_RAW_MPDU) |
- u32_encode_bits(1, RX_MPDU_DESC_INFO0_VALID_PN) |
- u32_encode_bits(rx_tid->tid, RX_MPDU_DESC_INFO0_TID);
-
- reo_ent_ring->rx_mpdu_info.info0 = cpu_to_le32(mpdu_info);
- reo_ent_ring->rx_mpdu_info.peer_meta_data =
- reo_dest_ring->rx_mpdu_info.peer_meta_data;
-
- if (ab->hw_params->reoq_lut_support) {
- reo_ent_ring->queue_addr_lo = reo_dest_ring->rx_mpdu_info.peer_meta_data;
- queue_addr_hi = 0;
- } else {
- reo_ent_ring->queue_addr_lo =
- cpu_to_le32(lower_32_bits(rx_tid->qbuf.paddr_aligned));
- queue_addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
- }
-
- reo_ent_ring->info0 = le32_encode_bits(queue_addr_hi,
- HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI) |
- le32_encode_bits(dst_ind,
- HAL_REO_ENTR_RING_INFO0_DEST_IND);
-
- reo_ent_ring->info1 = le32_encode_bits(rx_tid->cur_sn,
- HAL_REO_ENTR_RING_INFO1_MPDU_SEQ_NUM);
- dest_ring_info0 = le32_get_bits(reo_dest_ring->info0,
- HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
- reo_ent_ring->info2 =
- cpu_to_le32(u32_get_bits(dest_ring_info0,
- HAL_REO_ENTR_RING_INFO2_SRC_LINK_ID));
-
- ath12k_hal_srng_access_end(ab, srng);
- spin_unlock_bh(&srng->lock);
-
- return 0;
-
-err_free_desc:
- spin_lock_bh(&dp->rx_desc_lock);
- desc_info->in_use = false;
- desc_info->skb = NULL;
- list_add_tail(&desc_info->list, &dp->rx_desc_free_list);
- spin_unlock_bh(&dp->rx_desc_lock);
-err_unmap_dma:
- dma_unmap_single(ab->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb),
- DMA_TO_DEVICE);
- return ret;
-}
-
-static int ath12k_dp_rx_h_cmp_frags(struct ath12k_base *ab,
+static int ath12k_dp_rx_h_cmp_frags(struct ath12k_hal *hal,
struct sk_buff *a, struct sk_buff *b)
{
int frag1, frag2;
- frag1 = ath12k_dp_rx_h_frag_no(ab, a);
- frag2 = ath12k_dp_rx_h_frag_no(ab, b);
+ frag1 = ath12k_dp_rx_h_frag_no(hal, a);
+ frag2 = ath12k_dp_rx_h_frag_no(hal, b);
return frag1 - frag2;
}
-static void ath12k_dp_rx_h_sort_frags(struct ath12k_base *ab,
- struct sk_buff_head *frag_list,
- struct sk_buff *cur_frag)
+void ath12k_dp_rx_h_sort_frags(struct ath12k_hal *hal,
+ struct sk_buff_head *frag_list,
+ struct sk_buff *cur_frag)
{
struct sk_buff *skb;
int cmp;
skb_queue_walk(frag_list, skb) {
- cmp = ath12k_dp_rx_h_cmp_frags(ab, skb, cur_frag);
+ cmp = ath12k_dp_rx_h_cmp_frags(hal, skb, cur_frag);
if (cmp < 0)
continue;
__skb_queue_before(frag_list, skb, cur_frag);
@@ -3524,13 +1575,14 @@ static void ath12k_dp_rx_h_sort_frags(struct ath12k_base *ab,
}
__skb_queue_tail(frag_list, cur_frag);
}
+EXPORT_SYMBOL(ath12k_dp_rx_h_sort_frags);
-static u64 ath12k_dp_rx_h_get_pn(struct ath12k *ar, struct sk_buff *skb)
+u64 ath12k_dp_rx_h_get_pn(struct ath12k_dp *dp, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
u64 pn = 0;
u8 *ehdr;
- u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
+ u32 hal_rx_desc_sz = dp->ab->hal.hal_desc_sz;
hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control);
@@ -3544,956 +1596,11 @@ static u64 ath12k_dp_rx_h_get_pn(struct ath12k *ar, struct sk_buff *skb)
return pn;
}
-
-static bool
-ath12k_dp_rx_h_defrag_validate_incr_pn(struct ath12k *ar, struct ath12k_dp_rx_tid *rx_tid)
-{
- struct ath12k_base *ab = ar->ab;
- enum hal_encrypt_type encrypt_type;
- struct sk_buff *first_frag, *skb;
- struct hal_rx_desc *desc;
- u64 last_pn;
- u64 cur_pn;
-
- first_frag = skb_peek(&rx_tid->rx_frags);
- desc = (struct hal_rx_desc *)first_frag->data;
-
- encrypt_type = ath12k_dp_rx_h_enctype(ab, desc);
- if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 &&
- encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 &&
- encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 &&
- encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256)
- return true;
-
- last_pn = ath12k_dp_rx_h_get_pn(ar, first_frag);
- skb_queue_walk(&rx_tid->rx_frags, skb) {
- if (skb == first_frag)
- continue;
-
- cur_pn = ath12k_dp_rx_h_get_pn(ar, skb);
- if (cur_pn != last_pn + 1)
- return false;
- last_pn = cur_pn;
- }
- return true;
-}
-
-static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar,
- struct sk_buff *msdu,
- struct hal_reo_dest_ring *ring_desc)
-{
- struct ath12k_base *ab = ar->ab;
- struct hal_rx_desc *rx_desc;
- struct ath12k_peer *peer;
- struct ath12k_dp_rx_tid *rx_tid;
- struct sk_buff *defrag_skb = NULL;
- u32 peer_id;
- u16 seqno, frag_no;
- u8 tid;
- int ret = 0;
- bool more_frags;
-
- rx_desc = (struct hal_rx_desc *)msdu->data;
- peer_id = ath12k_dp_rx_h_peer_id(ab, rx_desc);
- tid = ath12k_dp_rx_h_tid(ab, rx_desc);
- seqno = ath12k_dp_rx_h_seq_no(ab, rx_desc);
- frag_no = ath12k_dp_rx_h_frag_no(ab, msdu);
- more_frags = ath12k_dp_rx_h_more_frags(ab, msdu);
-
- if (!ath12k_dp_rx_h_seq_ctrl_valid(ab, rx_desc) ||
- !ath12k_dp_rx_h_fc_valid(ab, rx_desc) ||
- tid > IEEE80211_NUM_TIDS)
- return -EINVAL;
-
- /* received unfragmented packet in reo
- * exception ring, this shouldn't happen
- * as these packets typically come from
- * reo2sw srngs.
- */
- if (WARN_ON_ONCE(!frag_no && !more_frags))
- return -EINVAL;
-
- spin_lock_bh(&ab->base_lock);
- peer = ath12k_peer_find_by_id(ab, peer_id);
- if (!peer) {
- ath12k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n",
- peer_id);
- ret = -ENOENT;
- goto out_unlock;
- }
-
- if (!peer->dp_setup_done) {
- ath12k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n",
- peer->addr, peer_id);
- ret = -ENOENT;
- goto out_unlock;
- }
-
- rx_tid = &peer->rx_tid[tid];
-
- if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
- skb_queue_empty(&rx_tid->rx_frags)) {
- /* Flush stored fragments and start a new sequence */
- ath12k_dp_rx_frags_cleanup(rx_tid, true);
- rx_tid->cur_sn = seqno;
- }
-
- if (rx_tid->rx_frag_bitmap & BIT(frag_no)) {
- /* Fragment already present */
- ret = -EINVAL;
- goto out_unlock;
- }
-
- if ((!rx_tid->rx_frag_bitmap || frag_no > __fls(rx_tid->rx_frag_bitmap)))
- __skb_queue_tail(&rx_tid->rx_frags, msdu);
- else
- ath12k_dp_rx_h_sort_frags(ab, &rx_tid->rx_frags, msdu);
-
- rx_tid->rx_frag_bitmap |= BIT(frag_no);
- if (!more_frags)
- rx_tid->last_frag_no = frag_no;
-
- if (frag_no == 0) {
- rx_tid->dst_ring_desc = kmemdup(ring_desc,
- sizeof(*rx_tid->dst_ring_desc),
- GFP_ATOMIC);
- if (!rx_tid->dst_ring_desc) {
- ret = -ENOMEM;
- goto out_unlock;
- }
- } else {
- ath12k_dp_rx_link_desc_return(ab, &ring_desc->buf_addr_info,
- HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
- }
-
- if (!rx_tid->last_frag_no ||
- rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) {
- mod_timer(&rx_tid->frag_timer, jiffies +
- ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS);
- goto out_unlock;
- }
-
- spin_unlock_bh(&ab->base_lock);
- timer_delete_sync(&rx_tid->frag_timer);
- spin_lock_bh(&ab->base_lock);
-
- peer = ath12k_peer_find_by_id(ab, peer_id);
- if (!peer)
- goto err_frags_cleanup;
-
- if (!ath12k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid))
- goto err_frags_cleanup;
-
- if (ath12k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb))
- goto err_frags_cleanup;
-
- if (!defrag_skb)
- goto err_frags_cleanup;
-
- if (ath12k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb))
- goto err_frags_cleanup;
-
- ath12k_dp_rx_frags_cleanup(rx_tid, false);
- goto out_unlock;
-
-err_frags_cleanup:
- dev_kfree_skb_any(defrag_skb);
- ath12k_dp_rx_frags_cleanup(rx_tid, true);
-out_unlock:
- spin_unlock_bh(&ab->base_lock);
- return ret;
-}
-
-static int
-ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
- struct list_head *used_list,
- bool drop, u32 cookie)
-{
- struct ath12k_base *ab = ar->ab;
- struct sk_buff *msdu;
- struct ath12k_skb_rxcb *rxcb;
- struct hal_rx_desc *rx_desc;
- u16 msdu_len;
- u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
- struct ath12k_rx_desc_info *desc_info;
- u64 desc_va;
-
- desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 |
- le32_to_cpu(desc->buf_va_lo));
- desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va);
-
- /* retry manual desc retrieval */
- if (!desc_info) {
- desc_info = ath12k_dp_get_rx_desc(ab, cookie);
- if (!desc_info) {
- ath12k_warn(ab, "Invalid cookie in DP rx error descriptor retrieval: 0x%x\n",
- cookie);
- return -EINVAL;
- }
- }
-
- if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
- ath12k_warn(ab, " RX Exception, Check HW CC implementation");
-
- msdu = desc_info->skb;
- desc_info->skb = NULL;
-
- list_add_tail(&desc_info->list, used_list);
-
- rxcb = ATH12K_SKB_RXCB(msdu);
- dma_unmap_single(ar->ab->dev, rxcb->paddr,
- msdu->len + skb_tailroom(msdu),
- DMA_FROM_DEVICE);
-
- if (drop) {
- dev_kfree_skb_any(msdu);
- return 0;
- }
-
- rcu_read_lock();
- if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {
- dev_kfree_skb_any(msdu);
- goto exit;
- }
-
- if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) {
- dev_kfree_skb_any(msdu);
- goto exit;
- }
-
- rx_desc = (struct hal_rx_desc *)msdu->data;
- msdu_len = ath12k_dp_rx_h_msdu_len(ar->ab, rx_desc);
- if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
- ath12k_warn(ar->ab, "invalid msdu leng %u", msdu_len);
- ath12k_dbg_dump(ar->ab, ATH12K_DBG_DATA, NULL, "", rx_desc,
- sizeof(*rx_desc));
- dev_kfree_skb_any(msdu);
- goto exit;
- }
-
- skb_put(msdu, hal_rx_desc_sz + msdu_len);
-
- if (ath12k_dp_rx_frag_h_mpdu(ar, msdu, desc)) {
- dev_kfree_skb_any(msdu);
- ath12k_dp_rx_link_desc_return(ar->ab, &desc->buf_addr_info,
- HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
- }
-exit:
- rcu_read_unlock();
- return 0;
-}
-
-static int ath12k_dp_h_msdu_buffer_type(struct ath12k_base *ab,
- struct list_head *list,
- struct hal_reo_dest_ring *desc)
-{
- struct ath12k_rx_desc_info *desc_info;
- struct ath12k_skb_rxcb *rxcb;
- struct sk_buff *msdu;
- u64 desc_va;
-
- ab->device_stats.reo_excep_msdu_buf_type++;
-
- desc_va = (u64)le32_to_cpu(desc->buf_va_hi) << 32 |
- le32_to_cpu(desc->buf_va_lo);
- desc_info = (struct ath12k_rx_desc_info *)(uintptr_t)desc_va;
- if (!desc_info) {
- u32 cookie;
-
- cookie = le32_get_bits(desc->buf_addr_info.info1,
- BUFFER_ADDR_INFO1_SW_COOKIE);
- desc_info = ath12k_dp_get_rx_desc(ab, cookie);
- if (!desc_info) {
- ath12k_warn(ab, "Invalid cookie in manual descriptor retrieval: 0x%x\n",
- cookie);
- return -EINVAL;
- }
- }
-
- if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC) {
- ath12k_warn(ab, "rx exception, magic check failed with value: %u\n",
- desc_info->magic);
- return -EINVAL;
- }
-
- msdu = desc_info->skb;
- desc_info->skb = NULL;
- list_add_tail(&desc_info->list, list);
- rxcb = ATH12K_SKB_RXCB(msdu);
- dma_unmap_single(ab->dev, rxcb->paddr, msdu->len + skb_tailroom(msdu),
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(msdu);
-
- return 0;
-}
-
-int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
- int budget)
-{
- struct ath12k_hw_group *ag = ab->ag;
- struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES];
- u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
- int num_buffs_reaped[ATH12K_MAX_DEVICES] = {};
- struct dp_link_desc_bank *link_desc_banks;
- enum hal_rx_buf_return_buf_manager rbm;
- struct hal_rx_msdu_link *link_desc_va;
- int tot_n_bufs_reaped, quota, ret, i;
- struct hal_reo_dest_ring *reo_desc;
- struct dp_rxdma_ring *rx_ring;
- struct dp_srng *reo_except;
- struct ath12k_hw_link *hw_links = ag->hw_links;
- struct ath12k_base *partner_ab;
- u8 hw_link_id, device_id;
- u32 desc_bank, num_msdus;
- struct hal_srng *srng;
- struct ath12k *ar;
- dma_addr_t paddr;
- bool is_frag;
- bool drop;
- int pdev_id;
-
- tot_n_bufs_reaped = 0;
- quota = budget;
-
- for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++)
- INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
-
- reo_except = &ab->dp.reo_except_ring;
-
- srng = &ab->hal.srng_list[reo_except->ring_id];
-
- spin_lock_bh(&srng->lock);
-
- ath12k_hal_srng_access_begin(ab, srng);
-
- while (budget &&
- (reo_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
- drop = false;
- ab->device_stats.err_ring_pkts++;
-
- hw_link_id = le32_get_bits(reo_desc->info0,
- HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
- device_id = hw_links[hw_link_id].device_id;
- partner_ab = ath12k_ag_to_ab(ag, device_id);
-
- /* Below case is added to handle data packet from un-associated clients.
- * As it is expected that AST lookup will fail for
- * un-associated station's data packets.
- */
- if (le32_get_bits(reo_desc->info0, HAL_REO_DEST_RING_INFO0_BUFFER_TYPE) ==
- HAL_REO_DEST_RING_BUFFER_TYPE_MSDU) {
- if (!ath12k_dp_h_msdu_buffer_type(partner_ab,
- &rx_desc_used_list[device_id],
- reo_desc)) {
- num_buffs_reaped[device_id]++;
- tot_n_bufs_reaped++;
- }
- goto next_desc;
- }
-
- ret = ath12k_hal_desc_reo_parse_err(ab, reo_desc, &paddr,
- &desc_bank);
- if (ret) {
- ath12k_warn(ab, "failed to parse error reo desc %d\n",
- ret);
- continue;
- }
-
- pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params,
- hw_links[hw_link_id].pdev_idx);
- ar = partner_ab->pdevs[pdev_id].ar;
-
- link_desc_banks = partner_ab->dp.link_desc_banks;
- link_desc_va = link_desc_banks[desc_bank].vaddr +
- (paddr - link_desc_banks[desc_bank].paddr);
- ath12k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
- &rbm);
- if (rbm != partner_ab->dp.idle_link_rbm &&
- rbm != HAL_RX_BUF_RBM_SW3_BM &&
- rbm != partner_ab->hw_params->hal_params->rx_buf_rbm) {
- ab->device_stats.invalid_rbm++;
- ath12k_warn(ab, "invalid return buffer manager %d\n", rbm);
- ath12k_dp_rx_link_desc_return(partner_ab,
- &reo_desc->buf_addr_info,
- HAL_WBM_REL_BM_ACT_REL_MSDU);
- continue;
- }
-
- is_frag = !!(le32_to_cpu(reo_desc->rx_mpdu_info.info0) &
- RX_MPDU_DESC_INFO0_FRAG_FLAG);
-
- /* Process only rx fragments with one msdu per link desc below, and drop
- * msdu's indicated due to error reasons.
- * Dynamic fragmentation not supported in Multi-link client, so drop the
- * partner device buffers.
- */
- if (!is_frag || num_msdus > 1 ||
- partner_ab->device_id != ab->device_id) {
- drop = true;
-
- /* Return the link desc back to wbm idle list */
- ath12k_dp_rx_link_desc_return(partner_ab,
- &reo_desc->buf_addr_info,
- HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
- }
-
- for (i = 0; i < num_msdus; i++) {
- if (!ath12k_dp_process_rx_err_buf(ar, reo_desc,
- &rx_desc_used_list[device_id],
- drop,
- msdu_cookies[i])) {
- num_buffs_reaped[device_id]++;
- tot_n_bufs_reaped++;
- }
- }
-
-next_desc:
- if (tot_n_bufs_reaped >= quota) {
- tot_n_bufs_reaped = quota;
- goto exit;
- }
-
- budget = quota - tot_n_bufs_reaped;
- }
-
-exit:
- ath12k_hal_srng_access_end(ab, srng);
-
- spin_unlock_bh(&srng->lock);
-
- for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) {
- if (!num_buffs_reaped[device_id])
- continue;
-
- partner_ab = ath12k_ag_to_ab(ag, device_id);
- rx_ring = &partner_ab->dp.rx_refill_buf_ring;
-
- ath12k_dp_rx_bufs_replenish(partner_ab, rx_ring,
- &rx_desc_used_list[device_id],
- num_buffs_reaped[device_id]);
- }
-
- return tot_n_bufs_reaped;
-}
-
-static void ath12k_dp_rx_null_q_desc_sg_drop(struct ath12k *ar,
- int msdu_len,
- struct sk_buff_head *msdu_list)
-{
- struct sk_buff *skb, *tmp;
- struct ath12k_skb_rxcb *rxcb;
- int n_buffs;
-
- n_buffs = DIV_ROUND_UP(msdu_len,
- (DP_RX_BUFFER_SIZE - ar->ab->hal.hal_desc_sz));
-
- skb_queue_walk_safe(msdu_list, skb, tmp) {
- rxcb = ATH12K_SKB_RXCB(skb);
- if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO &&
- rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) {
- if (!n_buffs)
- break;
- __skb_unlink(skb, msdu_list);
- dev_kfree_skb_any(skb);
- n_buffs--;
- }
- }
-}
-
-static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu,
- struct ath12k_dp_rx_info *rx_info,
- struct sk_buff_head *msdu_list)
-{
- struct ath12k_base *ab = ar->ab;
- u16 msdu_len;
- struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
- u8 l3pad_bytes;
- struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
- u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
-
- msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc);
-
- if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) {
- /* First buffer will be freed by the caller, so deduct it's length */
- msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz);
- ath12k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);
- return -EINVAL;
- }
-
- /* Even after cleaning up the sg buffers in the msdu list with above check
- * any msdu received with continuation flag needs to be dropped as invalid.
- * This protects against some random err frame with continuation flag.
- */
- if (rxcb->is_continuation)
- return -EINVAL;
-
- if (!ath12k_dp_rx_h_msdu_done(ab, desc)) {
- ath12k_warn(ar->ab,
- "msdu_done bit not set in null_q_des processing\n");
- __skb_queue_purge(msdu_list);
- return -EIO;
- }
-
- /* Handle NULL queue descriptor violations arising out a missing
- * REO queue for a given peer or a given TID. This typically
- * may happen if a packet is received on a QOS enabled TID before the
- * ADDBA negotiation for that TID, when the TID queue is setup. Or
- * it may also happen for MC/BC frames if they are not routed to the
- * non-QOS TID queue, in the absence of any other default TID queue.
- * This error can show up both in a REO destination or WBM release ring.
- */
-
- if (rxcb->is_frag) {
- skb_pull(msdu, hal_rx_desc_sz);
- } else {
- l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc);
-
- if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
- return -EINVAL;
-
- skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
- skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
- }
- if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, desc, msdu)))
- return -EINVAL;
-
- ath12k_dp_rx_h_fetch_info(ab, desc, rx_info);
- ath12k_dp_rx_h_ppdu(ar, rx_info);
- ath12k_dp_rx_h_mpdu(ar, msdu, desc, rx_info);
-
- rxcb->tid = rx_info->tid;
-
- /* Please note that caller will having the access to msdu and completing
- * rx with mac80211. Need not worry about cleaning up amsdu_list.
- */
-
- return 0;
-}
-
-static bool ath12k_dp_rx_h_reo_err(struct ath12k *ar, struct sk_buff *msdu,
- struct ath12k_dp_rx_info *rx_info,
- struct sk_buff_head *msdu_list)
-{
- struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
- bool drop = false;
-
- ar->ab->device_stats.reo_error[rxcb->err_code]++;
-
- switch (rxcb->err_code) {
- case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
- if (ath12k_dp_rx_h_null_q_desc(ar, msdu, rx_info, msdu_list))
- drop = true;
- break;
- case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
- /* TODO: Do not drop PN failed packets in the driver;
- * instead, it is good to drop such packets in mac80211
- * after incrementing the replay counters.
- */
- fallthrough;
- default:
- /* TODO: Review other errors and process them to mac80211
- * as appropriate.
- */
- drop = true;
- break;
- }
-
- return drop;
-}
-
-static bool ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
- struct ath12k_dp_rx_info *rx_info)
-{
- struct ath12k_base *ab = ar->ab;
- u16 msdu_len;
- struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
- u8 l3pad_bytes;
- struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
- u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
-
- rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, desc);
- rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, desc);
-
- l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc);
- msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc);
-
- if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) {
- ath12k_dbg(ab, ATH12K_DBG_DATA,
- "invalid msdu len in tkip mic err %u\n", msdu_len);
- ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", desc,
- sizeof(*desc));
- return true;
- }
-
- skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
- skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
-
- if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, desc, msdu)))
- return true;
-
- ath12k_dp_rx_h_ppdu(ar, rx_info);
-
- rx_info->rx_status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
- RX_FLAG_DECRYPTED);
-
- ath12k_dp_rx_h_undecap(ar, msdu, desc,
- HAL_ENCRYPT_TYPE_TKIP_MIC, rx_info->rx_status, false);
- return false;
-}
-
-static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar, struct sk_buff *msdu,
- struct ath12k_dp_rx_info *rx_info)
-{
- struct ath12k_base *ab = ar->ab;
- struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
- struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
- bool drop = false;
- u32 err_bitmap;
-
- ar->ab->device_stats.rxdma_error[rxcb->err_code]++;
-
- switch (rxcb->err_code) {
- case HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR:
- case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
- err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);
- if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) {
- ath12k_dp_rx_h_fetch_info(ab, rx_desc, rx_info);
- drop = ath12k_dp_rx_h_tkip_mic_err(ar, msdu, rx_info);
- break;
- }
- fallthrough;
- default:
- /* TODO: Review other rxdma error code to check if anything is
- * worth reporting to mac80211
- */
- drop = true;
- break;
- }
-
- return drop;
-}
-
-static void ath12k_dp_rx_wbm_err(struct ath12k *ar,
- struct napi_struct *napi,
- struct sk_buff *msdu,
- struct sk_buff_head *msdu_list)
-{
- struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
- struct ieee80211_rx_status rxs = {};
- struct ath12k_dp_rx_info rx_info;
- bool drop = true;
-
- rx_info.addr2_present = false;
- rx_info.rx_status = &rxs;
-
- switch (rxcb->err_rel_src) {
- case HAL_WBM_REL_SRC_MODULE_REO:
- drop = ath12k_dp_rx_h_reo_err(ar, msdu, &rx_info, msdu_list);
- break;
- case HAL_WBM_REL_SRC_MODULE_RXDMA:
- drop = ath12k_dp_rx_h_rxdma_err(ar, msdu, &rx_info);
- break;
- default:
- /* msdu will get freed */
- break;
- }
-
- if (drop) {
- dev_kfree_skb_any(msdu);
- return;
- }
-
- rx_info.rx_status->flag |= RX_FLAG_SKIP_MONITOR;
-
- ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_info);
-}
-
-int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
- struct napi_struct *napi, int budget)
-{
- struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES];
- struct ath12k_hw_group *ag = ab->ag;
- struct ath12k *ar;
- struct ath12k_dp *dp = &ab->dp;
- struct dp_rxdma_ring *rx_ring;
- struct hal_rx_wbm_rel_info err_info;
- struct hal_srng *srng;
- struct sk_buff *msdu;
- struct sk_buff_head msdu_list, scatter_msdu_list;
- struct ath12k_skb_rxcb *rxcb;
- void *rx_desc;
- int num_buffs_reaped[ATH12K_MAX_DEVICES] = {};
- int total_num_buffs_reaped = 0;
- struct ath12k_rx_desc_info *desc_info;
- struct ath12k_device_dp_stats *device_stats = &ab->device_stats;
- struct ath12k_hw_link *hw_links = ag->hw_links;
- struct ath12k_base *partner_ab;
- u8 hw_link_id, device_id;
- int ret, pdev_id;
- struct hal_rx_desc *msdu_data;
-
- __skb_queue_head_init(&msdu_list);
- __skb_queue_head_init(&scatter_msdu_list);
-
- for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++)
- INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
-
- srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
- spin_lock_bh(&srng->lock);
-
- ath12k_hal_srng_access_begin(ab, srng);
-
- while (budget) {
- rx_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng);
- if (!rx_desc)
- break;
-
- ret = ath12k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info);
- if (ret) {
- ath12k_warn(ab,
- "failed to parse rx error in wbm_rel ring desc %d\n",
- ret);
- continue;
- }
-
- desc_info = err_info.rx_desc;
-
- /* retry manual desc retrieval if hw cc is not done */
- if (!desc_info) {
- desc_info = ath12k_dp_get_rx_desc(ab, err_info.cookie);
- if (!desc_info) {
- ath12k_warn(ab, "Invalid cookie in DP WBM rx error descriptor retrieval: 0x%x\n",
- err_info.cookie);
- continue;
- }
- }
-
- if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
- ath12k_warn(ab, "WBM RX err, Check HW CC implementation");
-
- msdu = desc_info->skb;
- desc_info->skb = NULL;
-
- device_id = desc_info->device_id;
- partner_ab = ath12k_ag_to_ab(ag, device_id);
- if (unlikely(!partner_ab)) {
- dev_kfree_skb_any(msdu);
-
- /* In any case continuation bit is set
- * in the previous record, cleanup scatter_msdu_list
- */
- ath12k_dp_clean_up_skb_list(&scatter_msdu_list);
- continue;
- }
-
- list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]);
-
- rxcb = ATH12K_SKB_RXCB(msdu);
- dma_unmap_single(partner_ab->dev, rxcb->paddr,
- msdu->len + skb_tailroom(msdu),
- DMA_FROM_DEVICE);
-
- num_buffs_reaped[device_id]++;
- total_num_buffs_reaped++;
-
- if (!err_info.continuation)
- budget--;
-
- if (err_info.push_reason !=
- HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
- dev_kfree_skb_any(msdu);
- continue;
- }
-
- msdu_data = (struct hal_rx_desc *)msdu->data;
- rxcb->err_rel_src = err_info.err_rel_src;
- rxcb->err_code = err_info.err_code;
- rxcb->is_first_msdu = err_info.first_msdu;
- rxcb->is_last_msdu = err_info.last_msdu;
- rxcb->is_continuation = err_info.continuation;
- rxcb->rx_desc = msdu_data;
-
- if (err_info.continuation) {
- __skb_queue_tail(&scatter_msdu_list, msdu);
- continue;
- }
-
- hw_link_id = ath12k_dp_rx_get_msdu_src_link(partner_ab,
- msdu_data);
- if (hw_link_id >= ATH12K_GROUP_MAX_RADIO) {
- dev_kfree_skb_any(msdu);
-
- /* In any case continuation bit is set
- * in the previous record, cleanup scatter_msdu_list
- */
- ath12k_dp_clean_up_skb_list(&scatter_msdu_list);
- continue;
- }
-
- if (!skb_queue_empty(&scatter_msdu_list)) {
- struct sk_buff *msdu;
-
- skb_queue_walk(&scatter_msdu_list, msdu) {
- rxcb = ATH12K_SKB_RXCB(msdu);
- rxcb->hw_link_id = hw_link_id;
- }
-
- skb_queue_splice_tail_init(&scatter_msdu_list,
- &msdu_list);
- }
-
- rxcb = ATH12K_SKB_RXCB(msdu);
- rxcb->hw_link_id = hw_link_id;
- __skb_queue_tail(&msdu_list, msdu);
- }
-
- /* In any case continuation bit is set in the
- * last record, cleanup scatter_msdu_list
- */
- ath12k_dp_clean_up_skb_list(&scatter_msdu_list);
-
- ath12k_hal_srng_access_end(ab, srng);
-
- spin_unlock_bh(&srng->lock);
-
- if (!total_num_buffs_reaped)
- goto done;
-
- for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) {
- if (!num_buffs_reaped[device_id])
- continue;
-
- partner_ab = ath12k_ag_to_ab(ag, device_id);
- rx_ring = &partner_ab->dp.rx_refill_buf_ring;
-
- ath12k_dp_rx_bufs_replenish(ab, rx_ring,
- &rx_desc_used_list[device_id],
- num_buffs_reaped[device_id]);
- }
-
- rcu_read_lock();
- while ((msdu = __skb_dequeue(&msdu_list))) {
- rxcb = ATH12K_SKB_RXCB(msdu);
- hw_link_id = rxcb->hw_link_id;
-
- device_id = hw_links[hw_link_id].device_id;
- partner_ab = ath12k_ag_to_ab(ag, device_id);
- if (unlikely(!partner_ab)) {
- ath12k_dbg(ab, ATH12K_DBG_DATA,
- "Unable to process WBM error msdu due to invalid hw link id %d device id %d\n",
- hw_link_id, device_id);
- dev_kfree_skb_any(msdu);
- continue;
- }
-
- pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params,
- hw_links[hw_link_id].pdev_idx);
- ar = partner_ab->pdevs[pdev_id].ar;
-
- if (!ar || !rcu_dereference(ar->ab->pdevs_active[pdev_id])) {
- dev_kfree_skb_any(msdu);
- continue;
- }
-
- if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) {
- dev_kfree_skb_any(msdu);
- continue;
- }
-
- if (rxcb->err_rel_src < HAL_WBM_REL_SRC_MODULE_MAX) {
- device_id = ar->ab->device_id;
- device_stats->rx_wbm_rel_source[rxcb->err_rel_src][device_id]++;
- }
-
- ath12k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list);
- }
- rcu_read_unlock();
-done:
- return total_num_buffs_reaped;
-}
-
-void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab)
-{
- struct ath12k_dp *dp = &ab->dp;
- struct hal_tlv_64_hdr *hdr;
- struct hal_srng *srng;
- struct ath12k_dp_rx_reo_cmd *cmd, *tmp;
- bool found = false;
- u16 tag;
- struct hal_reo_status reo_status;
-
- srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id];
-
- memset(&reo_status, 0, sizeof(reo_status));
-
- spin_lock_bh(&srng->lock);
-
- ath12k_hal_srng_access_begin(ab, srng);
-
- while ((hdr = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
- tag = le64_get_bits(hdr->tl, HAL_SRNG_TLV_HDR_TAG);
-
- switch (tag) {
- case HAL_REO_GET_QUEUE_STATS_STATUS:
- ath12k_hal_reo_status_queue_stats(ab, hdr,
- &reo_status);
- break;
- case HAL_REO_FLUSH_QUEUE_STATUS:
- ath12k_hal_reo_flush_queue_status(ab, hdr,
- &reo_status);
- break;
- case HAL_REO_FLUSH_CACHE_STATUS:
- ath12k_hal_reo_flush_cache_status(ab, hdr,
- &reo_status);
- break;
- case HAL_REO_UNBLOCK_CACHE_STATUS:
- ath12k_hal_reo_unblk_cache_status(ab, hdr,
- &reo_status);
- break;
- case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
- ath12k_hal_reo_flush_timeout_list_status(ab, hdr,
- &reo_status);
- break;
- case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
- ath12k_hal_reo_desc_thresh_reached_status(ab, hdr,
- &reo_status);
- break;
- case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
- ath12k_hal_reo_update_rx_reo_queue_status(ab, hdr,
- &reo_status);
- break;
- default:
- ath12k_warn(ab, "Unknown reo status type %d\n", tag);
- continue;
- }
-
- spin_lock_bh(&dp->reo_cmd_lock);
- list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
- if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
- found = true;
- list_del(&cmd->list);
- break;
- }
- }
- spin_unlock_bh(&dp->reo_cmd_lock);
-
- if (found) {
- cmd->handler(dp, (void *)&cmd->data,
- reo_status.uniform_hdr.cmd_status);
- kfree(cmd);
- }
-
- found = false;
- }
-
- ath12k_hal_srng_access_end(ab, srng);
-
- spin_unlock_bh(&srng->lock);
-}
+EXPORT_SYMBOL(ath12k_dp_rx_h_get_pn);
void ath12k_dp_rx_free(struct ath12k_base *ab)
{
- struct ath12k_dp *dp = &ab->dp;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
struct dp_srng *srng;
int i;
@@ -4523,92 +1630,9 @@ void ath12k_dp_rx_pdev_free(struct ath12k_base *ab, int mac_id)
ath12k_dp_rx_pdev_srng_free(ar);
}
-int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab)
-{
- struct ath12k_dp *dp = &ab->dp;
- struct htt_rx_ring_tlv_filter tlv_filter = {};
- u32 ring_id;
- int ret;
- u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
-
- ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
-
- tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING;
- tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR;
- tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST |
- HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST |
- HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA;
- tlv_filter.offset_valid = true;
- tlv_filter.rx_packet_offset = hal_rx_desc_sz;
-
- tlv_filter.rx_mpdu_start_offset =
- ab->hal_rx_ops->rx_desc_get_mpdu_start_offset();
- tlv_filter.rx_msdu_end_offset =
- ab->hal_rx_ops->rx_desc_get_msdu_end_offset();
-
- if (ath12k_dp_wmask_compaction_rx_tlv_supported(ab)) {
- tlv_filter.rx_mpdu_start_wmask =
- ab->hw_params->hal_ops->rxdma_ring_wmask_rx_mpdu_start();
- tlv_filter.rx_msdu_end_wmask =
- ab->hw_params->hal_ops->rxdma_ring_wmask_rx_msdu_end();
- ath12k_dbg(ab, ATH12K_DBG_DATA,
- "Configuring compact tlv masks rx_mpdu_start_wmask 0x%x rx_msdu_end_wmask 0x%x\n",
- tlv_filter.rx_mpdu_start_wmask, tlv_filter.rx_msdu_end_wmask);
- }
-
- ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, 0,
- HAL_RXDMA_BUF,
- DP_RXDMA_REFILL_RING_SIZE,
- &tlv_filter);
-
- return ret;
-}
-
-int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab)
-{
- struct ath12k_dp *dp = &ab->dp;
- struct htt_rx_ring_tlv_filter tlv_filter = {};
- u32 ring_id;
- int ret = 0;
- u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
- int i;
-
- ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
-
- tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING;
- tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR;
- tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST |
- HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST |
- HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA;
- tlv_filter.offset_valid = true;
- tlv_filter.rx_packet_offset = hal_rx_desc_sz;
-
- tlv_filter.rx_header_offset = offsetof(struct hal_rx_desc_wcn7850, pkt_hdr_tlv);
-
- tlv_filter.rx_mpdu_start_offset =
- ab->hal_rx_ops->rx_desc_get_mpdu_start_offset();
- tlv_filter.rx_msdu_end_offset =
- ab->hal_rx_ops->rx_desc_get_msdu_end_offset();
-
- /* TODO: Selectively subscribe to required qwords within msdu_end
- * and mpdu_start and setup the mask in below msg
- * and modify the rx_desc struct
- */
-
- for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
- ring_id = dp->rx_mac_buf_ring[i].ring_id;
- ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, i,
- HAL_RXDMA_BUF,
- DP_RXDMA_REFILL_RING_SIZE,
- &tlv_filter);
- }
-
- return ret;
-}
-
int ath12k_dp_rx_htt_setup(struct ath12k_base *ab)
{
- struct ath12k_dp *dp = &ab->dp;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
u32 ring_id;
int i, ret;
@@ -4680,7 +1704,7 @@ int ath12k_dp_rx_htt_setup(struct ath12k_base *ab)
int ath12k_dp_rx_alloc(struct ath12k_base *ab)
{
- struct ath12k_dp *dp = &ab->dp;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
struct dp_srng *srng;
int i, ret;
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.h b/drivers/net/wireless/ath/ath12k/dp_rx.h
index 69d0a36a91d8..1ec5382f5995 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.h
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.h
@@ -1,21 +1,26 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#ifndef ATH12K_DP_RX_H
#define ATH12K_DP_RX_H
+#include <crypto/hash.h>
#include "core.h"
-#include "rx_desc.h"
#include "debug.h"
#define DP_MAX_NWIFI_HDR_LEN 30
+struct ath12k_reoq_buf {
+ void *vaddr;
+ dma_addr_t paddr_aligned;
+ u32 size;
+};
+
struct ath12k_dp_rx_tid {
u8 tid;
u32 ba_win_sz;
- bool active;
struct ath12k_reoq_buf qbuf;
/* Info related to rx fragments */
@@ -28,7 +33,7 @@ struct ath12k_dp_rx_tid {
/* Timer info related to fragments */
struct timer_list frag_timer;
- struct ath12k_base *ab;
+ struct ath12k_dp *dp;
};
struct ath12k_dp_rx_tid_rxq {
@@ -59,6 +64,8 @@ struct ath12k_dp_rx_reo_cmd {
enum hal_reo_cmd_status status);
};
+#define ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
+
#define ATH12K_DP_RX_REO_DESC_FREE_THRES 64
#define ATH12K_DP_RX_REO_DESC_FREE_TIMEOUT_MS 1000
@@ -77,24 +84,6 @@ struct ath12k_dp_rx_rfc1042_hdr {
__be16 snap_type;
} __packed;
-struct ath12k_dp_rx_info {
- struct ieee80211_rx_status *rx_status;
- u32 phy_meta_data;
- u16 peer_id;
- u8 decap_type;
- u8 pkt_type;
- u8 sgi;
- u8 rate_mcs;
- u8 bw;
- u8 nss;
- u8 addr2[ETH_ALEN];
- u8 tid;
- bool ip_csum_fail;
- bool l4_csum_fail;
- bool is_mcbc;
- bool addr2_present;
-};
-
static inline u32 ath12k_he_gi_to_nl80211_he_gi(u8 sgi)
{
u32 ret = 0;
@@ -117,6 +106,109 @@ static inline u32 ath12k_he_gi_to_nl80211_he_gi(u8 sgi)
return ret;
}
+static inline bool ath12k_dp_rx_h_more_frags(struct ath12k_hal *hal,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+
+ hdr = (struct ieee80211_hdr *)(skb->data + hal->hal_desc_sz);
+ return ieee80211_has_morefrags(hdr->frame_control);
+}
+
+static inline u16 ath12k_dp_rx_h_frag_no(struct ath12k_hal *hal,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+
+ hdr = (struct ieee80211_hdr *)(skb->data + hal->hal_desc_sz);
+ return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
+}
+
+static inline u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hal.ops->rx_desc_get_l3_pad_bytes(desc);
+}
+
+static inline void ath12k_dp_rx_desc_end_tlv_copy(struct ath12k_hal *hal,
+ struct hal_rx_desc *fdesc,
+ struct hal_rx_desc *ldesc)
+{
+ hal->ops->rx_desc_copy_end_tlv(fdesc, ldesc);
+}
+
+static inline void ath12k_dp_rxdesc_set_msdu_len(struct ath12k_hal *hal,
+ struct hal_rx_desc *desc,
+ u16 len)
+{
+ hal->ops->rx_desc_set_msdu_len(desc, len);
+}
+
+static inline u32 ath12k_dp_rxdesc_get_ppduid(struct ath12k_base *ab,
+ struct hal_rx_desc *rx_desc)
+{
+ return ab->hal.ops->rx_desc_get_mpdu_ppdu_id(rx_desc);
+}
+
+static inline void ath12k_dp_rx_desc_get_dot11_hdr(struct ath12k_hal *hal,
+ struct hal_rx_desc *desc,
+ struct ieee80211_hdr *hdr)
+{
+ hal->ops->rx_desc_get_dot11_hdr(desc, hdr);
+}
+
+static inline void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_hal *hal,
+ struct hal_rx_desc *desc,
+ u8 *crypto_hdr,
+ enum hal_encrypt_type enctype)
+{
+ hal->ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype);
+}
+
+static inline u8 ath12k_dp_rx_get_msdu_src_link(struct ath12k_hal *hal,
+ struct hal_rx_desc *desc)
+{
+ return hal->ops->rx_desc_get_msdu_src_link_id(desc);
+}
+
+static inline void ath12k_dp_clean_up_skb_list(struct sk_buff_head *skb_list)
+{
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(skb_list)))
+ dev_kfree_skb_any(skb);
+}
+
+static inline
+void ath12k_dp_extract_rx_desc_data(struct ath12k_hal *hal,
+ struct hal_rx_desc_data *rx_info,
+ struct hal_rx_desc *rx_desc,
+ struct hal_rx_desc *ldesc)
+{
+ hal->ops->extract_rx_desc_data(rx_info, rx_desc, ldesc);
+}
+
+void ath12k_dp_rx_h_undecap(struct ath12k_pdev_dp *dp_pdev, struct sk_buff *msdu,
+ struct hal_rx_desc *rx_desc,
+ enum hal_encrypt_type enctype,
+ bool decrypted,
+ struct hal_rx_desc_data *rx_info);
+void ath12k_dp_rx_deliver_msdu(struct ath12k_pdev_dp *dp_pdev, struct napi_struct *napi,
+ struct sk_buff *msdu,
+ struct hal_rx_desc_data *rx_info);
+bool ath12k_dp_rx_check_nwifi_hdr_len_valid(struct ath12k_dp *dp,
+ struct hal_rx_desc *rx_desc,
+ struct sk_buff *msdu,
+ struct hal_rx_desc_data *rx_info);
+u64 ath12k_dp_rx_h_get_pn(struct ath12k_dp *dp, struct sk_buff *skb);
+void ath12k_dp_rx_h_sort_frags(struct ath12k_hal *hal,
+ struct sk_buff_head *frag_list,
+ struct sk_buff *cur_frag);
+void ath12k_dp_rx_h_undecap_frag(struct ath12k_pdev_dp *dp_pdev, struct sk_buff *msdu,
+ enum hal_encrypt_type enctype, u32 flags);
+int ath12k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,
+ struct ieee80211_hdr *hdr, u8 *data,
+ size_t data_len, u8 *mic);
int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
struct ieee80211_ampdu_params *params,
u8 link_id);
@@ -127,14 +219,12 @@ int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif,
const u8 *peer_addr,
enum set_key_cmd key_cmd,
struct ieee80211_key_conf *key);
-void ath12k_dp_rx_peer_tid_cleanup(struct ath12k *ar, struct ath12k_peer *peer);
+void ath12k_dp_rx_peer_tid_cleanup(struct ath12k *ar, struct ath12k_dp_link_peer *peer);
void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
- struct ath12k_peer *peer, u8 tid);
+ struct ath12k_dp_link_peer *peer, u8 tid);
int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id,
u8 tid, u32 ba_win_sz, u16 ssn,
enum hal_pn_type pn_type);
-void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab,
- struct sk_buff *skb);
int ath12k_dp_rx_pdev_reo_setup(struct ath12k_base *ab);
void ath12k_dp_rx_pdev_reo_cleanup(struct ath12k_base *ab);
int ath12k_dp_rx_htt_setup(struct ath12k_base *ab);
@@ -143,15 +233,7 @@ void ath12k_dp_rx_free(struct ath12k_base *ab);
int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int pdev_idx);
void ath12k_dp_rx_pdev_free(struct ath12k_base *ab, int pdev_idx);
void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab);
-void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab);
-int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
- struct napi_struct *napi, int budget);
-int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
- int budget);
-int ath12k_dp_rx_process(struct ath12k_base *ab, int mac_id,
- struct napi_struct *napi,
- int budget);
-int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
+int ath12k_dp_rx_bufs_replenish(struct ath12k_dp *dp,
struct dp_rxdma_ring *rx_ring,
struct list_head *used_list,
int req_entries);
@@ -160,32 +242,27 @@ int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev
u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab,
struct hal_rx_desc *desc);
-struct ath12k_peer *
-ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu,
- struct ath12k_dp_rx_info *rx_info);
+struct ath12k_dp_link_peer *
+ath12k_dp_rx_h_find_link_peer(struct ath12k_pdev_dp *dp_pdev, struct sk_buff *msdu,
+ struct hal_rx_desc_data *rx_info);
u8 ath12k_dp_rx_h_decap_type(struct ath12k_base *ab,
struct hal_rx_desc *desc);
u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab,
struct hal_rx_desc *desc);
-void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info);
-int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab);
-int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab);
-
-int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
- int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
- const void *ptr, void *data),
- void *data);
-void ath12k_dp_rx_h_fetch_info(struct ath12k_base *ab, struct hal_rx_desc *rx_desc,
- struct ath12k_dp_rx_info *rx_info);
-
-int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar, enum hal_encrypt_type enctype);
+int ath12k_dp_rx_crypto_mic_len(struct ath12k_dp *dp, enum hal_encrypt_type enctype);
u32 ath12k_dp_rxdesc_get_ppduid(struct ath12k_base *ab,
struct hal_rx_desc *rx_desc);
-bool ath12k_dp_rxdesc_mpdu_valid(struct ath12k_base *ab,
- struct hal_rx_desc *rx_desc);
-int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab,
- struct ath12k_buffer_addr *buf_addr_info,
- enum hal_wbm_rel_bm_act action);
-bool ath12k_dp_rxdesc_mpdu_valid(struct ath12k_base *ab,
- struct hal_rx_desc *rx_desc);
+void ath12k_dp_rx_h_ppdu(struct ath12k_pdev_dp *dp_pdev,
+ struct hal_rx_desc_data *rx_info);
+struct sk_buff *ath12k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,
+ struct sk_buff *first);
+void ath12k_dp_reo_cmd_free(struct ath12k_dp *dp, void *ctx,
+ enum hal_reo_cmd_status status);
+void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,
+ enum hal_reo_cmd_status status);
+void ath12k_dp_rx_process_reo_cmd_update_rx_queue_list(struct ath12k_dp *dp);
+void ath12k_dp_init_rx_tid_rxq(struct ath12k_dp_rx_tid_rxq *rx_tid_rxq,
+ struct ath12k_dp_rx_tid *rx_tid,
+ bool active);
+void ath12k_dp_mark_tid_as_inactive(struct ath12k_dp *dp, int peer_id, u8 tid);
#endif /* ATH12K_DP_RX_H */
diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c
index abc84ca8467a..c10da6195c9c 100644
--- a/drivers/net/wireless/ath/ath12k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_tx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include "core.h"
@@ -12,7 +12,7 @@
#include "peer.h"
#include "mac.h"
-static enum hal_tcl_encap_type
+enum hal_tcl_encap_type
ath12k_dp_tx_get_encap_type(struct ath12k_base *ab, struct sk_buff *skb)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -25,8 +25,9 @@ ath12k_dp_tx_get_encap_type(struct ath12k_base *ab, struct sk_buff *skb)
return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI;
}
+EXPORT_SYMBOL(ath12k_dp_tx_get_encap_type);
-static void ath12k_dp_tx_encap_nwifi(struct sk_buff *skb)
+void ath12k_dp_tx_encap_nwifi(struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
u8 *qos_ctl;
@@ -42,8 +43,9 @@ static void ath12k_dp_tx_encap_nwifi(struct sk_buff *skb)
hdr = (void *)skb->data;
hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
}
+EXPORT_SYMBOL(ath12k_dp_tx_encap_nwifi);
-static u8 ath12k_dp_tx_get_tid(struct sk_buff *skb)
+u8 ath12k_dp_tx_get_tid(struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
struct ath12k_skb_cb *cb = ATH12K_SKB_CB(skb);
@@ -55,6 +57,7 @@ static u8 ath12k_dp_tx_get_tid(struct sk_buff *skb)
else
return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
}
+EXPORT_SYMBOL(ath12k_dp_tx_get_tid);
enum hal_encrypt_type ath12k_dp_tx_get_encrypt_type(u32 cipher)
{
@@ -77,19 +80,21 @@ enum hal_encrypt_type ath12k_dp_tx_get_encrypt_type(u32 cipher)
return HAL_ENCRYPT_TYPE_OPEN;
}
}
+EXPORT_SYMBOL(ath12k_dp_tx_get_encrypt_type);
-static void ath12k_dp_tx_release_txbuf(struct ath12k_dp *dp,
- struct ath12k_tx_desc_info *tx_desc,
- u8 pool_id)
+void ath12k_dp_tx_release_txbuf(struct ath12k_dp *dp,
+ struct ath12k_tx_desc_info *tx_desc,
+ u8 pool_id)
{
spin_lock_bh(&dp->tx_desc_lock[pool_id]);
tx_desc->skb_ext_desc = NULL;
list_move_tail(&tx_desc->list, &dp->tx_desc_free_list[pool_id]);
spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
}
+EXPORT_SYMBOL(ath12k_dp_tx_release_txbuf);
-static struct ath12k_tx_desc_info *ath12k_dp_tx_assign_buffer(struct ath12k_dp *dp,
- u8 pool_id)
+struct ath12k_tx_desc_info *ath12k_dp_tx_assign_buffer(struct ath12k_dp *dp,
+ u8 pool_id)
{
struct ath12k_tx_desc_info *desc;
@@ -108,28 +113,9 @@ static struct ath12k_tx_desc_info *ath12k_dp_tx_assign_buffer(struct ath12k_dp *
return desc;
}
+EXPORT_SYMBOL(ath12k_dp_tx_assign_buffer);
-static void ath12k_hal_tx_cmd_ext_desc_setup(struct ath12k_base *ab,
- struct hal_tx_msdu_ext_desc *tcl_ext_cmd,
- struct hal_tx_info *ti)
-{
- tcl_ext_cmd->info0 = le32_encode_bits(ti->paddr,
- HAL_TX_MSDU_EXT_INFO0_BUF_PTR_LO);
- tcl_ext_cmd->info1 = le32_encode_bits(0x0,
- HAL_TX_MSDU_EXT_INFO1_BUF_PTR_HI) |
- le32_encode_bits(ti->data_len,
- HAL_TX_MSDU_EXT_INFO1_BUF_LEN);
-
- tcl_ext_cmd->info1 |= le32_encode_bits(1, HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE) |
- le32_encode_bits(ti->encap_type,
- HAL_TX_MSDU_EXT_INFO1_ENCAP_TYPE) |
- le32_encode_bits(ti->encrypt_type,
- HAL_TX_MSDU_EXT_INFO1_ENCRYPT_TYPE);
-}
-
-#define HTT_META_DATA_ALIGNMENT 0x8
-
-static void *ath12k_dp_metadata_align_skb(struct sk_buff *skb, u8 tail_len)
+void *ath12k_dp_metadata_align_skb(struct sk_buff *skb, u8 tail_len)
{
struct sk_buff *tail;
void *metadata;
@@ -141,29 +127,7 @@ static void *ath12k_dp_metadata_align_skb(struct sk_buff *skb, u8 tail_len)
memset(metadata, 0, tail_len);
return metadata;
}
-
-/* Preparing HTT Metadata when utilized with ext MSDU */
-static int ath12k_dp_prepare_htt_metadata(struct sk_buff *skb)
-{
- struct hal_tx_msdu_metadata *desc_ext;
- u8 htt_desc_size;
- /* Size rounded of multiple of 8 bytes */
- u8 htt_desc_size_aligned;
-
- htt_desc_size = sizeof(struct hal_tx_msdu_metadata);
- htt_desc_size_aligned = ALIGN(htt_desc_size, HTT_META_DATA_ALIGNMENT);
-
- desc_ext = ath12k_dp_metadata_align_skb(skb, htt_desc_size_aligned);
- if (!desc_ext)
- return -ENOMEM;
-
- desc_ext->info0 = le32_encode_bits(1, HAL_TX_MSDU_METADATA_INFO0_ENCRYPT_FLAG) |
- le32_encode_bits(0, HAL_TX_MSDU_METADATA_INFO0_ENCRYPT_TYPE) |
- le32_encode_bits(1,
- HAL_TX_MSDU_METADATA_INFO0_HOST_TX_DESC_POOL);
-
- return 0;
-}
+EXPORT_SYMBOL(ath12k_dp_metadata_align_skb);
static void ath12k_dp_tx_move_payload(struct sk_buff *skb,
unsigned long delta,
@@ -182,10 +146,9 @@ static void ath12k_dp_tx_move_payload(struct sk_buff *skb,
}
}
-static int ath12k_dp_tx_align_payload(struct ath12k_base *ab,
- struct sk_buff **pskb)
+int ath12k_dp_tx_align_payload(struct ath12k_dp *dp, struct sk_buff **pskb)
{
- u32 iova_mask = ab->hw_params->iova_mask;
+ u32 iova_mask = dp->hw_params->iova_mask;
unsigned long offset, delta1, delta2;
struct sk_buff *skb2, *skb = *pskb;
unsigned int headroom = skb_headroom(skb);
@@ -218,1564 +181,33 @@ static int ath12k_dp_tx_align_payload(struct ath12k_base *ab,
out:
return ret;
}
+EXPORT_SYMBOL(ath12k_dp_tx_align_payload);
-int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif,
- struct sk_buff *skb, bool gsn_valid, int mcbc_gsn,
- bool is_mcast)
-{
- struct ath12k_base *ab = ar->ab;
- struct ath12k_dp *dp = &ab->dp;
- struct hal_tx_info ti = {};
- struct ath12k_tx_desc_info *tx_desc;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
- struct hal_tcl_data_cmd *hal_tcl_desc;
- struct hal_tx_msdu_ext_desc *msg;
- struct sk_buff *skb_ext_desc = NULL;
- struct hal_srng *tcl_ring;
- struct ieee80211_hdr *hdr = (void *)skb->data;
- struct ath12k_vif *ahvif = arvif->ahvif;
- struct dp_tx_ring *tx_ring;
- u8 pool_id;
- u8 hal_ring_id;
- int ret;
- u8 ring_selector, ring_map = 0;
- bool tcl_ring_retry;
- bool msdu_ext_desc = false;
- bool add_htt_metadata = false;
- u32 iova_mask = ab->hw_params->iova_mask;
- bool is_diff_encap = false;
- bool is_null_frame = false;
-
- if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
- return -ESHUTDOWN;
-
- if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
- !ieee80211_is_data(hdr->frame_control))
- return -EOPNOTSUPP;
-
- pool_id = skb_get_queue_mapping(skb) & (ATH12K_HW_MAX_QUEUES - 1);
-
- /* Let the default ring selection be based on current processor
- * number, where one of the 3 tcl rings are selected based on
- * the smp_processor_id(). In case that ring
- * is full/busy, we resort to other available rings.
- * If all rings are full, we drop the packet.
- * TODO: Add throttling logic when all rings are full
- */
- ring_selector = ab->hw_params->hw_ops->get_ring_selector(skb);
-
-tcl_ring_sel:
- tcl_ring_retry = false;
- ti.ring_id = ring_selector % ab->hw_params->max_tx_ring;
-
- ring_map |= BIT(ti.ring_id);
- ti.rbm_id = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map[ti.ring_id].rbm_id;
-
- tx_ring = &dp->tx_ring[ti.ring_id];
-
- tx_desc = ath12k_dp_tx_assign_buffer(dp, pool_id);
- if (!tx_desc)
- return -ENOMEM;
-
- ti.bank_id = arvif->bank_id;
- ti.meta_data_flags = arvif->tcl_metadata;
-
- if (ahvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW &&
- test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags)) {
- if (skb_cb->flags & ATH12K_SKB_CIPHER_SET) {
- ti.encrypt_type =
- ath12k_dp_tx_get_encrypt_type(skb_cb->cipher);
-
- if (ieee80211_has_protected(hdr->frame_control))
- skb_put(skb, IEEE80211_CCMP_MIC_LEN);
- } else {
- ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
- }
-
- msdu_ext_desc = true;
- }
-
- if (gsn_valid) {
- /* Reset and Initialize meta_data_flags with Global Sequence
- * Number (GSN) info.
- */
- ti.meta_data_flags =
- u32_encode_bits(HTT_TCL_META_DATA_TYPE_GLOBAL_SEQ_NUM,
- HTT_TCL_META_DATA_TYPE) |
- u32_encode_bits(mcbc_gsn, HTT_TCL_META_DATA_GLOBAL_SEQ_NUM);
- }
-
- ti.encap_type = ath12k_dp_tx_get_encap_type(ab, skb);
- ti.addr_search_flags = arvif->hal_addr_search_flags;
- ti.search_type = arvif->search_type;
- ti.type = HAL_TCL_DESC_TYPE_BUFFER;
- ti.pkt_offset = 0;
- ti.lmac_id = ar->lmac_id;
-
- ti.vdev_id = arvif->vdev_id;
- if (gsn_valid)
- ti.vdev_id += HTT_TX_MLO_MCAST_HOST_REINJECT_BASE_VDEV_ID;
-
- ti.bss_ast_hash = arvif->ast_hash;
- ti.bss_ast_idx = arvif->ast_idx;
- ti.dscp_tid_tbl_idx = 0;
-
- if (skb->ip_summed == CHECKSUM_PARTIAL &&
- ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW) {
- ti.flags0 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_IP4_CKSUM_EN) |
- u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_UDP4_CKSUM_EN) |
- u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_UDP6_CKSUM_EN) |
- u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TCP4_CKSUM_EN) |
- u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TCP6_CKSUM_EN);
- }
-
- ti.flags1 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO3_TID_OVERWRITE);
-
- ti.tid = ath12k_dp_tx_get_tid(skb);
-
- switch (ti.encap_type) {
- case HAL_TCL_ENCAP_TYPE_NATIVE_WIFI:
- is_null_frame = ieee80211_is_nullfunc(hdr->frame_control);
- if (ahvif->vif->offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED) {
- if (skb->protocol == cpu_to_be16(ETH_P_PAE) || is_null_frame)
- is_diff_encap = true;
-
- /* Firmware expects msdu ext descriptor for nwifi/raw packets
- * received in ETH mode. Without this, observed tx fail for
- * Multicast packets in ETH mode.
- */
- msdu_ext_desc = true;
- } else {
- ath12k_dp_tx_encap_nwifi(skb);
- }
- break;
- case HAL_TCL_ENCAP_TYPE_RAW:
- if (!test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) {
- ret = -EINVAL;
- goto fail_remove_tx_buf;
- }
- break;
- case HAL_TCL_ENCAP_TYPE_ETHERNET:
- /* no need to encap */
- break;
- case HAL_TCL_ENCAP_TYPE_802_3:
- default:
- /* TODO: Take care of other encap modes as well */
- ret = -EINVAL;
- atomic_inc(&ab->device_stats.tx_err.misc_fail);
- goto fail_remove_tx_buf;
- }
-
- if (iova_mask &&
- (unsigned long)skb->data & iova_mask) {
- ret = ath12k_dp_tx_align_payload(ab, &skb);
- if (ret) {
- ath12k_warn(ab, "failed to align TX buffer %d\n", ret);
- /* don't bail out, give original buffer
- * a chance even unaligned.
- */
- goto map;
- }
-
- /* hdr is pointing to a wrong place after alignment,
- * so refresh it for later use.
- */
- hdr = (void *)skb->data;
- }
-map:
- ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
- if (dma_mapping_error(ab->dev, ti.paddr)) {
- atomic_inc(&ab->device_stats.tx_err.misc_fail);
- ath12k_warn(ab, "failed to DMA map data Tx buffer\n");
- ret = -ENOMEM;
- goto fail_remove_tx_buf;
- }
-
- if ((!test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags) &&
- !(skb_cb->flags & ATH12K_SKB_HW_80211_ENCAP) &&
- !(skb_cb->flags & ATH12K_SKB_CIPHER_SET) &&
- ieee80211_has_protected(hdr->frame_control)) ||
- is_diff_encap) {
- /* Firmware is not expecting meta data for qos null
- * nwifi packet received in ETH encap mode.
- */
- if (is_null_frame && msdu_ext_desc)
- goto skip_htt_meta;
-
- /* Add metadata for sw encrypted vlan group traffic
- * and EAPOL nwifi packet received in ETH encap mode.
- */
- add_htt_metadata = true;
- msdu_ext_desc = true;
- ti.meta_data_flags |= HTT_TCL_META_DATA_VALID_HTT;
-skip_htt_meta:
- ti.flags0 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TO_FW);
- ti.encap_type = HAL_TCL_ENCAP_TYPE_RAW;
- ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
- }
-
- tx_desc->skb = skb;
- tx_desc->mac_id = ar->pdev_idx;
- ti.desc_id = tx_desc->desc_id;
- ti.data_len = skb->len;
- skb_cb->paddr = ti.paddr;
- skb_cb->vif = ahvif->vif;
- skb_cb->ar = ar;
-
- if (msdu_ext_desc) {
- skb_ext_desc = dev_alloc_skb(sizeof(struct hal_tx_msdu_ext_desc));
- if (!skb_ext_desc) {
- ret = -ENOMEM;
- goto fail_unmap_dma;
- }
-
- skb_put(skb_ext_desc, sizeof(struct hal_tx_msdu_ext_desc));
- memset(skb_ext_desc->data, 0, skb_ext_desc->len);
-
- msg = (struct hal_tx_msdu_ext_desc *)skb_ext_desc->data;
- ath12k_hal_tx_cmd_ext_desc_setup(ab, msg, &ti);
-
- if (add_htt_metadata) {
- ret = ath12k_dp_prepare_htt_metadata(skb_ext_desc);
- if (ret < 0) {
- ath12k_dbg(ab, ATH12K_DBG_DP_TX,
- "Failed to add HTT meta data, dropping packet\n");
- goto fail_free_ext_skb;
- }
- }
-
- ti.paddr = dma_map_single(ab->dev, skb_ext_desc->data,
- skb_ext_desc->len, DMA_TO_DEVICE);
- ret = dma_mapping_error(ab->dev, ti.paddr);
- if (ret)
- goto fail_free_ext_skb;
-
- ti.data_len = skb_ext_desc->len;
- ti.type = HAL_TCL_DESC_TYPE_EXT_DESC;
-
- skb_cb->paddr_ext_desc = ti.paddr;
- tx_desc->skb_ext_desc = skb_ext_desc;
- }
-
- hal_ring_id = tx_ring->tcl_data_ring.ring_id;
- tcl_ring = &ab->hal.srng_list[hal_ring_id];
-
- spin_lock_bh(&tcl_ring->lock);
-
- ath12k_hal_srng_access_begin(ab, tcl_ring);
-
- hal_tcl_desc = ath12k_hal_srng_src_get_next_entry(ab, tcl_ring);
- if (!hal_tcl_desc) {
- /* NOTE: It is highly unlikely we'll be running out of tcl_ring
- * desc because the desc is directly enqueued onto hw queue.
- */
- ath12k_hal_srng_access_end(ab, tcl_ring);
- ab->device_stats.tx_err.desc_na[ti.ring_id]++;
- spin_unlock_bh(&tcl_ring->lock);
- ret = -ENOMEM;
-
- /* Checking for available tcl descriptors in another ring in
- * case of failure due to full tcl ring now, is better than
- * checking this ring earlier for each pkt tx.
- * Restart ring selection if some rings are not checked yet.
- */
- if (ring_map != (BIT(ab->hw_params->max_tx_ring) - 1) &&
- ab->hw_params->tcl_ring_retry) {
- tcl_ring_retry = true;
- ring_selector++;
- }
-
- goto fail_unmap_dma_ext;
- }
-
- spin_lock_bh(&arvif->link_stats_lock);
- arvif->link_stats.tx_encap_type[ti.encap_type]++;
- arvif->link_stats.tx_encrypt_type[ti.encrypt_type]++;
- arvif->link_stats.tx_desc_type[ti.type]++;
-
- if (is_mcast)
- arvif->link_stats.tx_bcast_mcast++;
- else
- arvif->link_stats.tx_enqueued++;
- spin_unlock_bh(&arvif->link_stats_lock);
-
- ab->device_stats.tx_enqueued[ti.ring_id]++;
-
- ath12k_hal_tx_cmd_desc_setup(ab, hal_tcl_desc, &ti);
-
- ath12k_hal_srng_access_end(ab, tcl_ring);
-
- spin_unlock_bh(&tcl_ring->lock);
-
- ath12k_dbg_dump(ab, ATH12K_DBG_DP_TX, NULL, "dp tx msdu: ",
- skb->data, skb->len);
-
- atomic_inc(&ar->dp.num_tx_pending);
-
- return 0;
-
-fail_unmap_dma_ext:
- if (skb_cb->paddr_ext_desc)
- dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
- skb_ext_desc->len,
- DMA_TO_DEVICE);
-fail_free_ext_skb:
- kfree_skb(skb_ext_desc);
-
-fail_unmap_dma:
- dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
-
-fail_remove_tx_buf:
- ath12k_dp_tx_release_txbuf(dp, tx_desc, pool_id);
-
- spin_lock_bh(&arvif->link_stats_lock);
- arvif->link_stats.tx_dropped++;
- spin_unlock_bh(&arvif->link_stats_lock);
-
- if (tcl_ring_retry)
- goto tcl_ring_sel;
-
- return ret;
-}
-
-static void ath12k_dp_tx_free_txbuf(struct ath12k_base *ab,
- struct dp_tx_ring *tx_ring,
- struct ath12k_tx_desc_params *desc_params)
+void ath12k_dp_tx_free_txbuf(struct ath12k_dp *dp,
+ struct dp_tx_ring *tx_ring,
+ struct ath12k_tx_desc_params *desc_params)
{
- struct ath12k *ar;
+ struct ath12k_pdev_dp *dp_pdev;
struct sk_buff *msdu = desc_params->skb;
struct ath12k_skb_cb *skb_cb;
- u8 pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, desc_params->mac_id);
+ u8 pdev_idx = ath12k_hw_mac_id_to_pdev_id(dp->hw_params, desc_params->mac_id);
skb_cb = ATH12K_SKB_CB(msdu);
- ar = ab->pdevs[pdev_id].ar;
- dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+ dma_unmap_single(dp->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
if (skb_cb->paddr_ext_desc) {
- dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
+ dma_unmap_single(dp->dev, skb_cb->paddr_ext_desc,
desc_params->skb_ext_desc->len, DMA_TO_DEVICE);
dev_kfree_skb_any(desc_params->skb_ext_desc);
}
- ieee80211_free_txskb(ar->ah->hw, msdu);
-
- if (atomic_dec_and_test(&ar->dp.num_tx_pending))
- wake_up(&ar->dp.tx_empty_waitq);
-}
-
-static void
-ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base *ab,
- struct ath12k_tx_desc_params *desc_params,
- struct dp_tx_ring *tx_ring,
- struct ath12k_dp_htt_wbm_tx_status *ts,
- u16 peer_id)
-{
- struct ieee80211_tx_info *info;
- struct ath12k_link_vif *arvif;
- struct ath12k_skb_cb *skb_cb;
- struct ieee80211_vif *vif;
- struct ath12k_vif *ahvif;
- struct ath12k *ar;
- struct sk_buff *msdu = desc_params->skb;
- s32 noise_floor;
- struct ieee80211_tx_status status = {};
- struct ath12k_peer *peer;
-
- skb_cb = ATH12K_SKB_CB(msdu);
- info = IEEE80211_SKB_CB(msdu);
-
- ar = skb_cb->ar;
- ab->device_stats.tx_completed[tx_ring->tcl_data_ring_id]++;
-
- if (atomic_dec_and_test(&ar->dp.num_tx_pending))
- wake_up(&ar->dp.tx_empty_waitq);
-
- dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
- if (skb_cb->paddr_ext_desc) {
- dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
- desc_params->skb_ext_desc->len, DMA_TO_DEVICE);
- dev_kfree_skb_any(desc_params->skb_ext_desc);
- }
-
- vif = skb_cb->vif;
- if (vif) {
- ahvif = ath12k_vif_to_ahvif(vif);
- rcu_read_lock();
- arvif = rcu_dereference(ahvif->link[skb_cb->link_id]);
- if (arvif) {
- spin_lock_bh(&arvif->link_stats_lock);
- arvif->link_stats.tx_completed++;
- spin_unlock_bh(&arvif->link_stats_lock);
- }
- rcu_read_unlock();
- }
-
- memset(&info->status, 0, sizeof(info->status));
-
- if (ts->acked) {
- if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
- info->flags |= IEEE80211_TX_STAT_ACK;
- info->status.ack_signal = ts->ack_rssi;
-
- if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
- ab->wmi_ab.svc_map)) {
- spin_lock_bh(&ar->data_lock);
- noise_floor = ath12k_pdev_get_noise_floor(ar);
- spin_unlock_bh(&ar->data_lock);
-
- info->status.ack_signal += noise_floor;
- }
-
- info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
- } else {
- info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
- }
- }
- rcu_read_lock();
- spin_lock_bh(&ab->base_lock);
- peer = ath12k_peer_find_by_id(ab, peer_id);
- if (!peer || !peer->sta) {
- ath12k_dbg(ab, ATH12K_DBG_DATA,
- "dp_tx: failed to find the peer with peer_id %d\n", peer_id);
- spin_unlock_bh(&ab->base_lock);
- ieee80211_free_txskb(ath12k_ar_to_hw(ar), msdu);
- goto exit;
- } else {
- status.sta = peer->sta;
- }
- spin_unlock_bh(&ab->base_lock);
-
- status.info = info;
- status.skb = msdu;
- ieee80211_tx_status_ext(ath12k_ar_to_hw(ar), &status);
-exit:
- rcu_read_unlock();
-}
-
-static void
-ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab, void *desc,
- struct dp_tx_ring *tx_ring,
- struct ath12k_tx_desc_params *desc_params)
-{
- struct htt_tx_wbm_completion *status_desc;
- struct ath12k_dp_htt_wbm_tx_status ts = {};
- enum hal_wbm_htt_tx_comp_status wbm_status;
- u16 peer_id;
-
- status_desc = desc;
-
- wbm_status = le32_get_bits(status_desc->info0,
- HTT_TX_WBM_COMP_INFO0_STATUS);
- ab->device_stats.fw_tx_status[wbm_status]++;
-
- switch (wbm_status) {
- case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK:
- ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK);
- ts.ack_rssi = le32_get_bits(status_desc->info2,
- HTT_TX_WBM_COMP_INFO2_ACK_RSSI);
-
- peer_id = le32_get_bits(((struct hal_wbm_completion_ring_tx *)desc)->
- info3, HAL_WBM_COMPL_TX_INFO3_PEER_ID);
-
- ath12k_dp_tx_htt_tx_complete_buf(ab, desc_params, tx_ring, &ts, peer_id);
- break;
- case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
- case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
- case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
- case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
- case HAL_WBM_REL_HTT_TX_COMP_STATUS_VDEVID_MISMATCH:
- ath12k_dp_tx_free_txbuf(ab, tx_ring, desc_params);
- break;
- case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY:
- /* This event is to be handled only when the driver decides to
- * use WDS offload functionality.
- */
- break;
- default:
- ath12k_warn(ab, "Unknown htt wbm tx status %d\n", wbm_status);
- break;
- }
-}
-
-static void ath12k_dp_tx_update_txcompl(struct ath12k *ar, struct hal_tx_status *ts)
-{
- struct ath12k_base *ab = ar->ab;
- struct ath12k_peer *peer;
- struct ieee80211_sta *sta;
- struct ath12k_sta *ahsta;
- struct ath12k_link_sta *arsta;
- struct rate_info txrate = {};
- u16 rate, ru_tones;
- u8 rate_idx = 0;
- int ret;
-
- spin_lock_bh(&ab->base_lock);
- peer = ath12k_peer_find_by_id(ab, ts->peer_id);
- if (!peer || !peer->sta) {
- ath12k_dbg(ab, ATH12K_DBG_DP_TX,
- "failed to find the peer by id %u\n", ts->peer_id);
- spin_unlock_bh(&ab->base_lock);
- return;
- }
- sta = peer->sta;
- ahsta = ath12k_sta_to_ahsta(sta);
- arsta = &ahsta->deflink;
-
- /* This is to prefer choose the real NSS value arsta->last_txrate.nss,
- * if it is invalid, then choose the NSS value while assoc.
- */
- if (arsta->last_txrate.nss)
- txrate.nss = arsta->last_txrate.nss;
- else
- txrate.nss = arsta->peer_nss;
- spin_unlock_bh(&ab->base_lock);
-
- switch (ts->pkt_type) {
- case HAL_TX_RATE_STATS_PKT_TYPE_11A:
- case HAL_TX_RATE_STATS_PKT_TYPE_11B:
- ret = ath12k_mac_hw_ratecode_to_legacy_rate(ts->mcs,
- ts->pkt_type,
- &rate_idx,
- &rate);
- if (ret < 0) {
- ath12k_warn(ab, "Invalid tx legacy rate %d\n", ret);
- return;
- }
-
- txrate.legacy = rate;
- break;
- case HAL_TX_RATE_STATS_PKT_TYPE_11N:
- if (ts->mcs > ATH12K_HT_MCS_MAX) {
- ath12k_warn(ab, "Invalid HT mcs index %d\n", ts->mcs);
- return;
- }
-
- if (txrate.nss != 0)
- txrate.mcs = ts->mcs + 8 * (txrate.nss - 1);
-
- txrate.flags = RATE_INFO_FLAGS_MCS;
-
- if (ts->sgi)
- txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
- break;
- case HAL_TX_RATE_STATS_PKT_TYPE_11AC:
- if (ts->mcs > ATH12K_VHT_MCS_MAX) {
- ath12k_warn(ab, "Invalid VHT mcs index %d\n", ts->mcs);
- return;
- }
-
- txrate.mcs = ts->mcs;
- txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
-
- if (ts->sgi)
- txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
- break;
- case HAL_TX_RATE_STATS_PKT_TYPE_11AX:
- if (ts->mcs > ATH12K_HE_MCS_MAX) {
- ath12k_warn(ab, "Invalid HE mcs index %d\n", ts->mcs);
- return;
- }
-
- txrate.mcs = ts->mcs;
- txrate.flags = RATE_INFO_FLAGS_HE_MCS;
- txrate.he_gi = ath12k_he_gi_to_nl80211_he_gi(ts->sgi);
- break;
- case HAL_TX_RATE_STATS_PKT_TYPE_11BE:
- if (ts->mcs > ATH12K_EHT_MCS_MAX) {
- ath12k_warn(ab, "Invalid EHT mcs index %d\n", ts->mcs);
- return;
- }
-
- txrate.mcs = ts->mcs;
- txrate.flags = RATE_INFO_FLAGS_EHT_MCS;
- txrate.eht_gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(ts->sgi);
- break;
- default:
- ath12k_warn(ab, "Invalid tx pkt type: %d\n", ts->pkt_type);
- return;
- }
-
- txrate.bw = ath12k_mac_bw_to_mac80211_bw(ts->bw);
-
- if (ts->ofdma && ts->pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) {
- txrate.bw = RATE_INFO_BW_HE_RU;
- ru_tones = ath12k_mac_he_convert_tones_to_ru_tones(ts->tones);
- txrate.he_ru_alloc =
- ath12k_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones);
- }
-
- if (ts->ofdma && ts->pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11BE) {
- txrate.bw = RATE_INFO_BW_EHT_RU;
- txrate.eht_ru_alloc =
- ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(ts->tones);
- }
-
- spin_lock_bh(&ab->base_lock);
- arsta->txrate = txrate;
- spin_unlock_bh(&ab->base_lock);
-}
-
-static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
- struct ath12k_tx_desc_params *desc_params,
- struct hal_tx_status *ts,
- int ring)
-{
- struct ath12k_base *ab = ar->ab;
- struct ath12k_hw *ah = ar->ah;
- struct ieee80211_tx_info *info;
- struct ath12k_link_vif *arvif;
- struct ath12k_skb_cb *skb_cb;
- struct ieee80211_vif *vif;
- struct ath12k_vif *ahvif;
- struct sk_buff *msdu = desc_params->skb;
- s32 noise_floor;
- struct ieee80211_tx_status status = {};
- struct ieee80211_rate_status status_rate = {};
- struct ath12k_peer *peer;
- struct ath12k_link_sta *arsta;
- struct ath12k_sta *ahsta;
- struct rate_info rate;
-
- if (WARN_ON_ONCE(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) {
- /* Must not happen */
- return;
- }
-
- skb_cb = ATH12K_SKB_CB(msdu);
- ab->device_stats.tx_completed[ring]++;
-
- dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
- if (skb_cb->paddr_ext_desc) {
- dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
- desc_params->skb_ext_desc->len, DMA_TO_DEVICE);
- dev_kfree_skb_any(desc_params->skb_ext_desc);
- }
-
- rcu_read_lock();
-
- if (!rcu_dereference(ab->pdevs_active[ar->pdev_idx])) {
- ieee80211_free_txskb(ah->hw, msdu);
- goto exit;
- }
-
- if (!skb_cb->vif) {
- ieee80211_free_txskb(ah->hw, msdu);
- goto exit;
- }
-
- vif = skb_cb->vif;
- if (vif) {
- ahvif = ath12k_vif_to_ahvif(vif);
- arvif = rcu_dereference(ahvif->link[skb_cb->link_id]);
- if (arvif) {
- spin_lock_bh(&arvif->link_stats_lock);
- arvif->link_stats.tx_completed++;
- spin_unlock_bh(&arvif->link_stats_lock);
- }
- }
-
- info = IEEE80211_SKB_CB(msdu);
- memset(&info->status, 0, sizeof(info->status));
-
- /* skip tx rate update from ieee80211_status*/
- info->status.rates[0].idx = -1;
-
- switch (ts->status) {
- case HAL_WBM_TQM_REL_REASON_FRAME_ACKED:
- if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
- info->flags |= IEEE80211_TX_STAT_ACK;
- info->status.ack_signal = ts->ack_rssi;
-
- if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
- ab->wmi_ab.svc_map)) {
- spin_lock_bh(&ar->data_lock);
- noise_floor = ath12k_pdev_get_noise_floor(ar);
- spin_unlock_bh(&ar->data_lock);
-
- info->status.ack_signal += noise_floor;
- }
-
- info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
- }
- break;
- case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX:
- if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
- info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
- break;
- }
- fallthrough;
- case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_MPDU:
- case HAL_WBM_TQM_REL_REASON_DROP_THRESHOLD:
- case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_AGED_FRAMES:
- /* The failure status is due to internal firmware tx failure
- * hence drop the frame; do not update the status of frame to
- * the upper layer
- */
- ieee80211_free_txskb(ah->hw, msdu);
- goto exit;
- default:
- ath12k_dbg(ab, ATH12K_DBG_DP_TX, "tx frame is not acked status %d\n",
- ts->status);
- break;
- }
-
- /* NOTE: Tx rate status reporting. Tx completion status does not have
- * necessary information (for example nss) to build the tx rate.
- * Might end up reporting it out-of-band from HTT stats.
- */
-
- ath12k_dp_tx_update_txcompl(ar, ts);
-
- spin_lock_bh(&ab->base_lock);
- peer = ath12k_peer_find_by_id(ab, ts->peer_id);
- if (!peer || !peer->sta) {
- ath12k_err(ab,
- "dp_tx: failed to find the peer with peer_id %d\n",
- ts->peer_id);
- spin_unlock_bh(&ab->base_lock);
- ieee80211_free_txskb(ath12k_ar_to_hw(ar), msdu);
- goto exit;
- }
- ahsta = ath12k_sta_to_ahsta(peer->sta);
- arsta = &ahsta->deflink;
-
- spin_unlock_bh(&ab->base_lock);
-
- status.sta = peer->sta;
- status.info = info;
- status.skb = msdu;
- rate = arsta->last_txrate;
-
- status_rate.rate_idx = rate;
- status_rate.try_count = 1;
-
- status.rates = &status_rate;
- status.n_rates = 1;
- ieee80211_tx_status_ext(ath12k_ar_to_hw(ar), &status);
-
-exit:
- rcu_read_unlock();
-}
-
-static void ath12k_dp_tx_status_parse(struct ath12k_base *ab,
- struct hal_wbm_completion_ring_tx *desc,
- struct hal_tx_status *ts)
-{
- u32 info0 = le32_to_cpu(desc->rate_stats.info0);
-
- ts->buf_rel_source =
- le32_get_bits(desc->info0, HAL_WBM_COMPL_TX_INFO0_REL_SRC_MODULE);
- if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
- ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)
- return;
-
- if (ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)
- return;
-
- ts->status = le32_get_bits(desc->info0,
- HAL_WBM_COMPL_TX_INFO0_TQM_RELEASE_REASON);
-
- ts->ppdu_id = le32_get_bits(desc->info1,
- HAL_WBM_COMPL_TX_INFO1_TQM_STATUS_NUMBER);
-
- ts->peer_id = le32_get_bits(desc->info3, HAL_WBM_COMPL_TX_INFO3_PEER_ID);
-
- ts->ack_rssi = le32_get_bits(desc->info2,
- HAL_WBM_COMPL_TX_INFO2_ACK_FRAME_RSSI);
-
- if (info0 & HAL_TX_RATE_STATS_INFO0_VALID) {
- ts->pkt_type = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_PKT_TYPE);
- ts->mcs = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_MCS);
- ts->sgi = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_SGI);
- ts->bw = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_BW);
- ts->tones = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_TONES_IN_RU);
- ts->ofdma = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_OFDMA_TX);
- }
-}
-
-void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id)
-{
- struct ath12k *ar;
- struct ath12k_dp *dp = &ab->dp;
- int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
- struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id];
- struct ath12k_tx_desc_info *tx_desc = NULL;
- struct hal_tx_status ts = {};
- struct ath12k_tx_desc_params desc_params;
- struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
- struct hal_wbm_release_ring *desc;
- u8 pdev_id;
- u64 desc_va;
- enum hal_wbm_rel_src_module buf_rel_source;
- enum hal_wbm_tqm_rel_reason rel_status;
-
- spin_lock_bh(&status_ring->lock);
-
- ath12k_hal_srng_access_begin(ab, status_ring);
-
- while (ATH12K_TX_COMPL_NEXT(ab, tx_ring->tx_status_head) !=
- tx_ring->tx_status_tail) {
- desc = ath12k_hal_srng_dst_get_next_entry(ab, status_ring);
- if (!desc)
- break;
-
- memcpy(&tx_ring->tx_status[tx_ring->tx_status_head],
- desc, sizeof(*desc));
- tx_ring->tx_status_head =
- ATH12K_TX_COMPL_NEXT(ab, tx_ring->tx_status_head);
- }
-
- if (ath12k_hal_srng_dst_peek(ab, status_ring) &&
- (ATH12K_TX_COMPL_NEXT(ab, tx_ring->tx_status_head) ==
- tx_ring->tx_status_tail)) {
- /* TODO: Process pending tx_status messages when kfifo_is_full() */
- ath12k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
- }
-
- ath12k_hal_srng_access_end(ab, status_ring);
-
- spin_unlock_bh(&status_ring->lock);
-
- while (ATH12K_TX_COMPL_NEXT(ab, tx_ring->tx_status_tail) !=
- tx_ring->tx_status_head) {
- struct hal_wbm_completion_ring_tx *tx_status;
- u32 desc_id;
-
- tx_ring->tx_status_tail =
- ATH12K_TX_COMPL_NEXT(ab, tx_ring->tx_status_tail);
- tx_status = &tx_ring->tx_status[tx_ring->tx_status_tail];
- ath12k_dp_tx_status_parse(ab, tx_status, &ts);
-
- if (le32_get_bits(tx_status->info0, HAL_WBM_COMPL_TX_INFO0_CC_DONE)) {
- /* HW done cookie conversion */
- desc_va = ((u64)le32_to_cpu(tx_status->buf_va_hi) << 32 |
- le32_to_cpu(tx_status->buf_va_lo));
- tx_desc = (struct ath12k_tx_desc_info *)((unsigned long)desc_va);
- } else {
- /* SW does cookie conversion to VA */
- desc_id = le32_get_bits(tx_status->buf_va_hi,
- BUFFER_ADDR_INFO1_SW_COOKIE);
-
- tx_desc = ath12k_dp_get_tx_desc(ab, desc_id);
- }
- if (!tx_desc) {
- ath12k_warn(ab, "unable to retrieve tx_desc!");
- continue;
- }
-
- desc_params.mac_id = tx_desc->mac_id;
- desc_params.skb = tx_desc->skb;
- desc_params.skb_ext_desc = tx_desc->skb_ext_desc;
-
- /* Find the HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE value */
- buf_rel_source = le32_get_bits(tx_status->info0,
- HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE);
- ab->device_stats.tx_wbm_rel_source[buf_rel_source]++;
-
- rel_status = le32_get_bits(tx_status->info0,
- HAL_WBM_COMPL_TX_INFO0_TQM_RELEASE_REASON);
- ab->device_stats.tqm_rel_reason[rel_status]++;
-
- /* Release descriptor as soon as extracting necessary info
- * to reduce contention
- */
- ath12k_dp_tx_release_txbuf(dp, tx_desc, tx_desc->pool_id);
- if (ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW) {
- ath12k_dp_tx_process_htt_tx_complete(ab, (void *)tx_status,
- tx_ring, &desc_params);
- continue;
- }
-
- pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, desc_params.mac_id);
- ar = ab->pdevs[pdev_id].ar;
-
- if (atomic_dec_and_test(&ar->dp.num_tx_pending))
- wake_up(&ar->dp.tx_empty_waitq);
-
- ath12k_dp_tx_complete_msdu(ar, &desc_params, &ts,
- tx_ring->tcl_data_ring_id);
- }
-}
-
-static int
-ath12k_dp_tx_get_ring_id_type(struct ath12k_base *ab,
- int mac_id, u32 ring_id,
- enum hal_ring_type ring_type,
- enum htt_srng_ring_type *htt_ring_type,
- enum htt_srng_ring_id *htt_ring_id)
-{
- int ret = 0;
-
- switch (ring_type) {
- case HAL_RXDMA_BUF:
- /* for some targets, host fills rx buffer to fw and fw fills to
- * rxbuf ring for each rxdma
- */
- if (!ab->hw_params->rx_mac_buf_ring) {
- if (!(ring_id == HAL_SRNG_SW2RXDMA_BUF0 ||
- ring_id == HAL_SRNG_SW2RXDMA_BUF1)) {
- ret = -EINVAL;
- }
- *htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
- *htt_ring_type = HTT_SW_TO_HW_RING;
- } else {
- if (ring_id == HAL_SRNG_SW2RXDMA_BUF0) {
- *htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
- *htt_ring_type = HTT_SW_TO_SW_RING;
- } else {
- *htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
- *htt_ring_type = HTT_SW_TO_HW_RING;
- }
- }
- break;
- case HAL_RXDMA_DST:
- *htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
- *htt_ring_type = HTT_HW_TO_SW_RING;
- break;
- case HAL_RXDMA_MONITOR_BUF:
- *htt_ring_id = HTT_RX_MON_HOST2MON_BUF_RING;
- *htt_ring_type = HTT_SW_TO_HW_RING;
- break;
- case HAL_RXDMA_MONITOR_STATUS:
- *htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
- *htt_ring_type = HTT_SW_TO_HW_RING;
- break;
- case HAL_RXDMA_MONITOR_DST:
- *htt_ring_id = HTT_RX_MON_MON2HOST_DEST_RING;
- *htt_ring_type = HTT_HW_TO_SW_RING;
- break;
- case HAL_RXDMA_MONITOR_DESC:
- *htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
- *htt_ring_type = HTT_SW_TO_HW_RING;
- break;
- default:
- ath12k_warn(ab, "Unsupported ring type in DP :%d\n", ring_type);
- ret = -EINVAL;
- }
- return ret;
-}
-
-int ath12k_dp_tx_htt_srng_setup(struct ath12k_base *ab, u32 ring_id,
- int mac_id, enum hal_ring_type ring_type)
-{
- struct htt_srng_setup_cmd *cmd;
- struct hal_srng *srng = &ab->hal.srng_list[ring_id];
- struct hal_srng_params params;
- struct sk_buff *skb;
- u32 ring_entry_sz;
- int len = sizeof(*cmd);
- dma_addr_t hp_addr, tp_addr;
- enum htt_srng_ring_type htt_ring_type;
- enum htt_srng_ring_id htt_ring_id;
- int ret;
-
- skb = ath12k_htc_alloc_skb(ab, len);
- if (!skb)
- return -ENOMEM;
-
- memset(&params, 0, sizeof(params));
- ath12k_hal_srng_get_params(ab, srng, &params);
-
- hp_addr = ath12k_hal_srng_get_hp_addr(ab, srng);
- tp_addr = ath12k_hal_srng_get_tp_addr(ab, srng);
-
- ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
- ring_type, &htt_ring_type,
- &htt_ring_id);
- if (ret)
- goto err_free;
+ guard(rcu)();
- skb_put(skb, len);
- cmd = (struct htt_srng_setup_cmd *)skb->data;
- cmd->info0 = le32_encode_bits(HTT_H2T_MSG_TYPE_SRING_SETUP,
- HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE);
- if (htt_ring_type == HTT_SW_TO_HW_RING ||
- htt_ring_type == HTT_HW_TO_SW_RING)
- cmd->info0 |= le32_encode_bits(DP_SW2HW_MACID(mac_id),
- HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID);
- else
- cmd->info0 |= le32_encode_bits(mac_id,
- HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID);
- cmd->info0 |= le32_encode_bits(htt_ring_type,
- HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE);
- cmd->info0 |= le32_encode_bits(htt_ring_id,
- HTT_SRNG_SETUP_CMD_INFO0_RING_ID);
-
- cmd->ring_base_addr_lo = cpu_to_le32(params.ring_base_paddr &
- HAL_ADDR_LSB_REG_MASK);
-
- cmd->ring_base_addr_hi = cpu_to_le32((u64)params.ring_base_paddr >>
- HAL_ADDR_MSB_REG_SHIFT);
-
- ret = ath12k_hal_srng_get_entrysize(ab, ring_type);
- if (ret < 0)
- goto err_free;
-
- ring_entry_sz = ret;
-
- ring_entry_sz >>= 2;
- cmd->info1 = le32_encode_bits(ring_entry_sz,
- HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE);
- cmd->info1 |= le32_encode_bits(params.num_entries * ring_entry_sz,
- HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE);
- cmd->info1 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP),
- HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP);
- cmd->info1 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
- HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP);
- cmd->info1 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_RING_PTR_SWAP),
- HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP);
- if (htt_ring_type == HTT_SW_TO_HW_RING)
- cmd->info1 |= cpu_to_le32(HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS);
-
- cmd->ring_head_off32_remote_addr_lo = cpu_to_le32(lower_32_bits(hp_addr));
- cmd->ring_head_off32_remote_addr_hi = cpu_to_le32(upper_32_bits(hp_addr));
-
- cmd->ring_tail_off32_remote_addr_lo = cpu_to_le32(lower_32_bits(tp_addr));
- cmd->ring_tail_off32_remote_addr_hi = cpu_to_le32(upper_32_bits(tp_addr));
-
- cmd->ring_msi_addr_lo = cpu_to_le32(lower_32_bits(params.msi_addr));
- cmd->ring_msi_addr_hi = cpu_to_le32(upper_32_bits(params.msi_addr));
- cmd->msi_data = cpu_to_le32(params.msi_data);
-
- cmd->intr_info =
- le32_encode_bits(params.intr_batch_cntr_thres_entries * ring_entry_sz,
- HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH);
- cmd->intr_info |=
- le32_encode_bits(params.intr_timer_thres_us >> 3,
- HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH);
-
- cmd->info2 = 0;
- if (params.flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
- cmd->info2 = le32_encode_bits(params.low_threshold,
- HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH);
- }
+ dp_pdev = ath12k_dp_to_pdev_dp(dp, pdev_idx);
- ath12k_dbg(ab, ATH12K_DBG_HAL,
- "%s msi_addr_lo:0x%x, msi_addr_hi:0x%x, msi_data:0x%x\n",
- __func__, cmd->ring_msi_addr_lo, cmd->ring_msi_addr_hi,
- cmd->msi_data);
+ ieee80211_free_txskb(ath12k_pdev_dp_to_hw(dp_pdev), msdu);
- ath12k_dbg(ab, ATH12K_DBG_HAL,
- "ring_id:%d, ring_type:%d, intr_info:0x%x, flags:0x%x\n",
- ring_id, ring_type, cmd->intr_info, cmd->info2);
-
- ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb);
- if (ret)
- goto err_free;
-
- return 0;
-
-err_free:
- dev_kfree_skb_any(skb);
-
- return ret;
-}
-
-#define HTT_TARGET_VERSION_TIMEOUT_HZ (3 * HZ)
-
-int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab)
-{
- struct ath12k_dp *dp = &ab->dp;
- struct sk_buff *skb;
- struct htt_ver_req_cmd *cmd;
- int len = sizeof(*cmd);
- u32 metadata_version;
- int ret;
-
- init_completion(&dp->htt_tgt_version_received);
-
- skb = ath12k_htc_alloc_skb(ab, len);
- if (!skb)
- return -ENOMEM;
-
- skb_put(skb, len);
- cmd = (struct htt_ver_req_cmd *)skb->data;
- cmd->ver_reg_info = le32_encode_bits(HTT_H2T_MSG_TYPE_VERSION_REQ,
- HTT_OPTION_TAG);
- metadata_version = ath12k_ftm_mode ? HTT_OPTION_TCL_METADATA_VER_V1 :
- HTT_OPTION_TCL_METADATA_VER_V2;
-
- cmd->tcl_metadata_version = le32_encode_bits(HTT_TAG_TCL_METADATA_VERSION,
- HTT_OPTION_TAG) |
- le32_encode_bits(HTT_TCL_METADATA_VER_SZ,
- HTT_OPTION_LEN) |
- le32_encode_bits(metadata_version,
- HTT_OPTION_VALUE);
-
- ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
- if (ret) {
- dev_kfree_skb_any(skb);
- return ret;
- }
-
- ret = wait_for_completion_timeout(&dp->htt_tgt_version_received,
- HTT_TARGET_VERSION_TIMEOUT_HZ);
- if (ret == 0) {
- ath12k_warn(ab, "htt target version request timed out\n");
- return -ETIMEDOUT;
- }
-
- if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) {
- ath12k_err(ab, "unsupported htt major version %d supported version is %d\n",
- dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR);
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-int ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k *ar, u32 mask)
-{
- struct ath12k_base *ab = ar->ab;
- struct ath12k_dp *dp = &ab->dp;
- struct sk_buff *skb;
- struct htt_ppdu_stats_cfg_cmd *cmd;
- int len = sizeof(*cmd);
- u8 pdev_mask;
- int ret;
- int i;
-
- for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
- skb = ath12k_htc_alloc_skb(ab, len);
- if (!skb)
- return -ENOMEM;
-
- skb_put(skb, len);
- cmd = (struct htt_ppdu_stats_cfg_cmd *)skb->data;
- cmd->msg = le32_encode_bits(HTT_H2T_MSG_TYPE_PPDU_STATS_CFG,
- HTT_PPDU_STATS_CFG_MSG_TYPE);
-
- pdev_mask = 1 << (i + ar->pdev_idx);
- cmd->msg |= le32_encode_bits(pdev_mask, HTT_PPDU_STATS_CFG_PDEV_ID);
- cmd->msg |= le32_encode_bits(mask, HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK);
-
- ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
- if (ret) {
- dev_kfree_skb_any(skb);
- return ret;
- }
- }
-
- return 0;
-}
-
-int ath12k_dp_tx_htt_rx_filter_setup(struct ath12k_base *ab, u32 ring_id,
- int mac_id, enum hal_ring_type ring_type,
- int rx_buf_size,
- struct htt_rx_ring_tlv_filter *tlv_filter)
-{
- struct htt_rx_ring_selection_cfg_cmd *cmd;
- struct hal_srng *srng = &ab->hal.srng_list[ring_id];
- struct hal_srng_params params;
- struct sk_buff *skb;
- int len = sizeof(*cmd);
- enum htt_srng_ring_type htt_ring_type;
- enum htt_srng_ring_id htt_ring_id;
- int ret;
-
- skb = ath12k_htc_alloc_skb(ab, len);
- if (!skb)
- return -ENOMEM;
-
- memset(&params, 0, sizeof(params));
- ath12k_hal_srng_get_params(ab, srng, &params);
-
- ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
- ring_type, &htt_ring_type,
- &htt_ring_id);
- if (ret)
- goto err_free;
-
- skb_put(skb, len);
- cmd = (struct htt_rx_ring_selection_cfg_cmd *)skb->data;
- cmd->info0 = le32_encode_bits(HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG,
- HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE);
- if (htt_ring_type == HTT_SW_TO_HW_RING ||
- htt_ring_type == HTT_HW_TO_SW_RING)
- cmd->info0 |=
- le32_encode_bits(DP_SW2HW_MACID(mac_id),
- HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
- else
- cmd->info0 |=
- le32_encode_bits(mac_id,
- HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
- cmd->info0 |= le32_encode_bits(htt_ring_id,
- HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID);
- cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP),
- HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS);
- cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
- HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS);
- cmd->info0 |= le32_encode_bits(tlv_filter->offset_valid,
- HTT_RX_RING_SELECTION_CFG_CMD_INFO0_OFFSET_VALID);
- cmd->info0 |=
- le32_encode_bits(tlv_filter->drop_threshold_valid,
- HTT_RX_RING_SELECTION_CFG_CMD_INFO0_DROP_THRES_VAL);
- cmd->info0 |= le32_encode_bits(!tlv_filter->rxmon_disable,
- HTT_RX_RING_SELECTION_CFG_CMD_INFO0_EN_RXMON);
-
- cmd->info1 = le32_encode_bits(rx_buf_size,
- HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE);
- cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_mgmt,
- HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT);
- cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_ctrl,
- HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL);
- cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_data,
- HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA);
- cmd->pkt_type_en_flags0 = cpu_to_le32(tlv_filter->pkt_filter_flags0);
- cmd->pkt_type_en_flags1 = cpu_to_le32(tlv_filter->pkt_filter_flags1);
- cmd->pkt_type_en_flags2 = cpu_to_le32(tlv_filter->pkt_filter_flags2);
- cmd->pkt_type_en_flags3 = cpu_to_le32(tlv_filter->pkt_filter_flags3);
- cmd->rx_filter_tlv = cpu_to_le32(tlv_filter->rx_filter);
-
- cmd->info2 = le32_encode_bits(tlv_filter->rx_drop_threshold,
- HTT_RX_RING_SELECTION_CFG_CMD_INFO2_DROP_THRESHOLD);
- cmd->info2 |=
- le32_encode_bits(tlv_filter->enable_log_mgmt_type,
- HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_MGMT_TYPE);
- cmd->info2 |=
- le32_encode_bits(tlv_filter->enable_log_ctrl_type,
- HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_CTRL_TYPE);
- cmd->info2 |=
- le32_encode_bits(tlv_filter->enable_log_data_type,
- HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_DATA_TYPE);
-
- cmd->info3 =
- le32_encode_bits(tlv_filter->enable_rx_tlv_offset,
- HTT_RX_RING_SELECTION_CFG_CMD_INFO3_EN_TLV_PKT_OFFSET);
- cmd->info3 |=
- le32_encode_bits(tlv_filter->rx_tlv_offset,
- HTT_RX_RING_SELECTION_CFG_CMD_INFO3_PKT_TLV_OFFSET);
-
- if (tlv_filter->offset_valid) {
- cmd->rx_packet_offset =
- le32_encode_bits(tlv_filter->rx_packet_offset,
- HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET);
-
- cmd->rx_packet_offset |=
- le32_encode_bits(tlv_filter->rx_header_offset,
- HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET);
-
- cmd->rx_mpdu_offset =
- le32_encode_bits(tlv_filter->rx_mpdu_end_offset,
- HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET);
-
- cmd->rx_mpdu_offset |=
- le32_encode_bits(tlv_filter->rx_mpdu_start_offset,
- HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET);
-
- cmd->rx_msdu_offset =
- le32_encode_bits(tlv_filter->rx_msdu_end_offset,
- HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET);
-
- cmd->rx_msdu_offset |=
- le32_encode_bits(tlv_filter->rx_msdu_start_offset,
- HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET);
-
- cmd->rx_attn_offset =
- le32_encode_bits(tlv_filter->rx_attn_offset,
- HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET);
- }
-
- if (tlv_filter->rx_mpdu_start_wmask > 0 &&
- tlv_filter->rx_msdu_end_wmask > 0) {
- cmd->info2 |=
- le32_encode_bits(true,
- HTT_RX_RING_SELECTION_CFG_WORD_MASK_COMPACT_SET);
- cmd->rx_mpdu_start_end_mask =
- le32_encode_bits(tlv_filter->rx_mpdu_start_wmask,
- HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_MASK);
- /* mpdu_end is not used for any hardwares so far
- * please assign it in future if any chip is
- * using through hal ops
- */
- cmd->rx_mpdu_start_end_mask |=
- le32_encode_bits(tlv_filter->rx_mpdu_end_wmask,
- HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_MASK);
- cmd->rx_msdu_end_word_mask =
- le32_encode_bits(tlv_filter->rx_msdu_end_wmask,
- HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_MASK);
- }
-
- ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb);
- if (ret)
- goto err_free;
-
- return 0;
-
-err_free:
- dev_kfree_skb_any(skb);
-
- return ret;
-}
-
-int
-ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k *ar, u8 type,
- struct htt_ext_stats_cfg_params *cfg_params,
- u64 cookie)
-{
- struct ath12k_base *ab = ar->ab;
- struct ath12k_dp *dp = &ab->dp;
- struct sk_buff *skb;
- struct htt_ext_stats_cfg_cmd *cmd;
- int len = sizeof(*cmd);
- int ret;
- u32 pdev_id;
-
- skb = ath12k_htc_alloc_skb(ab, len);
- if (!skb)
- return -ENOMEM;
-
- skb_put(skb, len);
-
- cmd = (struct htt_ext_stats_cfg_cmd *)skb->data;
- memset(cmd, 0, sizeof(*cmd));
- cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG;
-
- pdev_id = ath12k_mac_get_target_pdev_id(ar);
- cmd->hdr.pdev_mask = 1 << pdev_id;
-
- cmd->hdr.stats_type = type;
- cmd->cfg_param0 = cpu_to_le32(cfg_params->cfg0);
- cmd->cfg_param1 = cpu_to_le32(cfg_params->cfg1);
- cmd->cfg_param2 = cpu_to_le32(cfg_params->cfg2);
- cmd->cfg_param3 = cpu_to_le32(cfg_params->cfg3);
- cmd->cookie_lsb = cpu_to_le32(lower_32_bits(cookie));
- cmd->cookie_msb = cpu_to_le32(upper_32_bits(cookie));
-
- ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
- if (ret) {
- ath12k_warn(ab, "failed to send htt type stats request: %d",
- ret);
- dev_kfree_skb_any(skb);
- return ret;
- }
-
- return 0;
-}
-
-int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset)
-{
- struct ath12k_base *ab = ar->ab;
- int ret;
-
- ret = ath12k_dp_tx_htt_rx_monitor_mode_ring_config(ar, reset);
- if (ret) {
- ath12k_err(ab, "failed to setup rx monitor filter %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset)
-{
- struct ath12k_base *ab = ar->ab;
- struct htt_rx_ring_tlv_filter tlv_filter = {};
- int ret, ring_id, i;
-
- tlv_filter.offset_valid = false;
-
- if (!reset) {
- tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_DEST_RING;
-
- tlv_filter.drop_threshold_valid = true;
- tlv_filter.rx_drop_threshold = HTT_RX_RING_TLV_DROP_THRESHOLD_VALUE;
-
- tlv_filter.enable_log_mgmt_type = true;
- tlv_filter.enable_log_ctrl_type = true;
- tlv_filter.enable_log_data_type = true;
-
- tlv_filter.conf_len_ctrl = HTT_RX_RING_DEFAULT_DMA_LENGTH;
- tlv_filter.conf_len_mgmt = HTT_RX_RING_DEFAULT_DMA_LENGTH;
- tlv_filter.conf_len_data = HTT_RX_RING_DEFAULT_DMA_LENGTH;
-
- tlv_filter.enable_rx_tlv_offset = true;
- tlv_filter.rx_tlv_offset = HTT_RX_RING_PKT_TLV_OFFSET;
-
- tlv_filter.pkt_filter_flags0 =
- HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 |
- HTT_RX_MON_MO_MGMT_FILTER_FLAGS0;
- tlv_filter.pkt_filter_flags1 =
- HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 |
- HTT_RX_MON_MO_MGMT_FILTER_FLAGS1;
- tlv_filter.pkt_filter_flags2 =
- HTT_RX_MON_FP_CTRL_FILTER_FLASG2 |
- HTT_RX_MON_MO_CTRL_FILTER_FLASG2;
- tlv_filter.pkt_filter_flags3 =
- HTT_RX_MON_FP_CTRL_FILTER_FLASG3 |
- HTT_RX_MON_MO_CTRL_FILTER_FLASG3 |
- HTT_RX_MON_FP_DATA_FILTER_FLASG3 |
- HTT_RX_MON_MO_DATA_FILTER_FLASG3;
- } else {
- tlv_filter = ath12k_mac_mon_status_filter_default;
-
- if (ath12k_debugfs_is_extd_rx_stats_enabled(ar))
- tlv_filter.rx_filter = ath12k_debugfs_rx_filter(ar);
- }
-
- if (ab->hw_params->rxdma1_enable) {
- for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
- ring_id = ar->dp.rxdma_mon_dst_ring[i].ring_id;
- ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
- ar->dp.mac_id + i,
- HAL_RXDMA_MONITOR_DST,
- DP_RXDMA_REFILL_RING_SIZE,
- &tlv_filter);
- if (ret) {
- ath12k_err(ab,
- "failed to setup filter for monitor buf %d\n",
- ret);
- return ret;
- }
- }
- return 0;
- }
-
- if (!reset) {
- for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
- ring_id = ab->dp.rx_mac_buf_ring[i].ring_id;
- ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
- i,
- HAL_RXDMA_BUF,
- DP_RXDMA_REFILL_RING_SIZE,
- &tlv_filter);
- if (ret) {
- ath12k_err(ab,
- "failed to setup filter for mon rx buf %d\n",
- ret);
- return ret;
- }
- }
- }
-
- for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
- ring_id = ab->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
- if (!reset) {
- tlv_filter.rx_filter =
- HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING;
- }
-
- ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id,
- i,
- HAL_RXDMA_MONITOR_STATUS,
- RX_MON_STATUS_BUF_SIZE,
- &tlv_filter);
- if (ret) {
- ath12k_err(ab,
- "failed to setup filter for mon status buf %d\n",
- ret);
- return ret;
- }
- }
-
- return 0;
-}
-
-int ath12k_dp_tx_htt_tx_filter_setup(struct ath12k_base *ab, u32 ring_id,
- int mac_id, enum hal_ring_type ring_type,
- int tx_buf_size,
- struct htt_tx_ring_tlv_filter *htt_tlv_filter)
-{
- struct htt_tx_ring_selection_cfg_cmd *cmd;
- struct hal_srng *srng = &ab->hal.srng_list[ring_id];
- struct hal_srng_params params;
- struct sk_buff *skb;
- int len = sizeof(*cmd);
- enum htt_srng_ring_type htt_ring_type;
- enum htt_srng_ring_id htt_ring_id;
- int ret;
-
- skb = ath12k_htc_alloc_skb(ab, len);
- if (!skb)
- return -ENOMEM;
-
- memset(&params, 0, sizeof(params));
- ath12k_hal_srng_get_params(ab, srng, &params);
-
- ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
- ring_type, &htt_ring_type,
- &htt_ring_id);
-
- if (ret)
- goto err_free;
-
- skb_put(skb, len);
- cmd = (struct htt_tx_ring_selection_cfg_cmd *)skb->data;
- cmd->info0 = le32_encode_bits(HTT_H2T_MSG_TYPE_TX_MONITOR_CFG,
- HTT_TX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE);
- if (htt_ring_type == HTT_SW_TO_HW_RING ||
- htt_ring_type == HTT_HW_TO_SW_RING)
- cmd->info0 |=
- le32_encode_bits(DP_SW2HW_MACID(mac_id),
- HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
- else
- cmd->info0 |=
- le32_encode_bits(mac_id,
- HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
- cmd->info0 |= le32_encode_bits(htt_ring_id,
- HTT_TX_RING_SELECTION_CFG_CMD_INFO0_RING_ID);
- cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP),
- HTT_TX_RING_SELECTION_CFG_CMD_INFO0_SS);
- cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
- HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PS);
-
- cmd->info1 |=
- le32_encode_bits(tx_buf_size,
- HTT_TX_RING_SELECTION_CFG_CMD_INFO1_RING_BUFF_SIZE);
-
- if (htt_tlv_filter->tx_mon_mgmt_filter) {
- cmd->info1 |=
- le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_MGMT,
- HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE);
- cmd->info1 |=
- le32_encode_bits(htt_tlv_filter->tx_mon_pkt_dma_len,
- HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT);
- cmd->info2 |=
- le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_MGMT,
- HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG);
- }
-
- if (htt_tlv_filter->tx_mon_data_filter) {
- cmd->info1 |=
- le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_CTRL,
- HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE);
- cmd->info1 |=
- le32_encode_bits(htt_tlv_filter->tx_mon_pkt_dma_len,
- HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL);
- cmd->info2 |=
- le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_CTRL,
- HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG);
- }
-
- if (htt_tlv_filter->tx_mon_ctrl_filter) {
- cmd->info1 |=
- le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_DATA,
- HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE);
- cmd->info1 |=
- le32_encode_bits(htt_tlv_filter->tx_mon_pkt_dma_len,
- HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA);
- cmd->info2 |=
- le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_DATA,
- HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG);
- }
-
- cmd->tlv_filter_mask_in0 =
- cpu_to_le32(htt_tlv_filter->tx_mon_downstream_tlv_flags);
- cmd->tlv_filter_mask_in1 =
- cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags0);
- cmd->tlv_filter_mask_in2 =
- cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags1);
- cmd->tlv_filter_mask_in3 =
- cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags2);
-
- ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb);
- if (ret)
- goto err_free;
-
- return 0;
-
-err_free:
- dev_kfree_skb_any(skb);
- return ret;
+ if (atomic_dec_and_test(&dp_pdev->num_tx_pending))
+ wake_up(&dp_pdev->tx_empty_waitq);
}
+EXPORT_SYMBOL(ath12k_dp_tx_free_txbuf);
diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.h b/drivers/net/wireless/ath/ath12k/dp_tx.h
index 10acdcf1fa8f..7cef20540179 100644
--- a/drivers/net/wireless/ath/ath12k/dp_tx.h
+++ b/drivers/net/wireless/ath/ath12k/dp_tx.h
@@ -1,41 +1,32 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#ifndef ATH12K_DP_TX_H
#define ATH12K_DP_TX_H
#include "core.h"
-#include "hal_tx.h"
struct ath12k_dp_htt_wbm_tx_status {
bool acked;
s8 ack_rssi;
};
-int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab);
-int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif,
- struct sk_buff *skb, bool gsn_valid, int mcbc_gsn,
- bool is_mcast);
-void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id);
-
-int ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k *ar, u32 mask);
-int
-ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k *ar, u8 type,
- struct htt_ext_stats_cfg_params *cfg_params,
- u64 cookie);
-int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset);
-
-int ath12k_dp_tx_htt_rx_filter_setup(struct ath12k_base *ab, u32 ring_id,
- int mac_id, enum hal_ring_type ring_type,
- int rx_buf_size,
- struct htt_rx_ring_tlv_filter *tlv_filter);
void ath12k_dp_tx_put_bank_profile(struct ath12k_dp *dp, u8 bank_id);
-int ath12k_dp_tx_htt_tx_filter_setup(struct ath12k_base *ab, u32 ring_id,
- int mac_id, enum hal_ring_type ring_type,
- int tx_buf_size,
- struct htt_tx_ring_tlv_filter *htt_tlv_filter);
-int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset);
+enum hal_tcl_encap_type
+ath12k_dp_tx_get_encap_type(struct ath12k_base *ab, struct sk_buff *skb);
+void ath12k_dp_tx_encap_nwifi(struct sk_buff *skb);
+u8 ath12k_dp_tx_get_tid(struct sk_buff *skb);
+void *ath12k_dp_metadata_align_skb(struct sk_buff *skb, u8 tail_len);
+int ath12k_dp_tx_align_payload(struct ath12k_dp *dp, struct sk_buff **pskb);
+void ath12k_dp_tx_release_txbuf(struct ath12k_dp *dp,
+ struct ath12k_tx_desc_info *tx_desc,
+ u8 pool_id);
+struct ath12k_tx_desc_info *ath12k_dp_tx_assign_buffer(struct ath12k_dp *dp,
+ u8 pool_id);
+void ath12k_dp_tx_free_txbuf(struct ath12k_dp *dp,
+ struct dp_tx_ring *tx_ring,
+ struct ath12k_tx_desc_params *desc_params);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/fw.c b/drivers/net/wireless/ath/ath12k/fw.c
index 5ac497f80cad..22074653cbb8 100644
--- a/drivers/net/wireless/ath/ath12k/fw.c
+++ b/drivers/net/wireless/ath/ath12k/fw.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
- * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include "core.h"
@@ -121,6 +121,14 @@ static int ath12k_fw_request_firmware_api_n(struct ath12k_base *ab,
ab->fw.m3_data = data;
ab->fw.m3_len = ie_len;
break;
+ case ATH12K_FW_IE_AUX_UC_IMAGE:
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "found aux_uc image ie (%zd B)\n",
+ ie_len);
+
+ ab->fw.aux_uc_data = data;
+ ab->fw.aux_uc_len = ie_len;
+ break;
case ATH12K_FW_IE_AMSS_DUALMAC_IMAGE:
ath12k_dbg(ab, ATH12K_DBG_BOOT,
"found dualmac fw image ie (%zd B)\n",
diff --git a/drivers/net/wireless/ath/ath12k/fw.h b/drivers/net/wireless/ath/ath12k/fw.h
index 7afaefed5086..e146d24dfea4 100644
--- a/drivers/net/wireless/ath/ath12k/fw.h
+++ b/drivers/net/wireless/ath/ath12k/fw.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
- * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#ifndef ATH12K_FW_H
@@ -15,6 +15,7 @@ enum ath12k_fw_ie_type {
ATH12K_FW_IE_AMSS_IMAGE = 2,
ATH12K_FW_IE_M3_IMAGE = 3,
ATH12K_FW_IE_AMSS_DUALMAC_IMAGE = 4,
+ ATH12K_FW_IE_AUX_UC_IMAGE = 5,
};
enum ath12k_fw_features {
diff --git a/drivers/net/wireless/ath/ath12k/hal.c b/drivers/net/wireless/ath/ath12k/hal.c
index 6406fcf5d69f..a164563fff28 100644
--- a/drivers/net/wireless/ath/ath12k/hal.c
+++ b/drivers/net/wireless/ath/ath12k/hal.c
@@ -1,1573 +1,162 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <linux/dma-mapping.h>
-#include "hal_tx.h"
-#include "hal_rx.h"
#include "debug.h"
-#include "hal_desc.h"
#include "hif.h"
-static const struct hal_srng_config hw_srng_config_template[] = {
- /* TODO: max_rings can populated by querying HW capabilities */
- [HAL_REO_DST] = {
- .start_ring_id = HAL_SRNG_RING_ID_REO2SW1,
- .max_rings = 8,
- .entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
- .mac_type = ATH12K_HAL_SRNG_UMAC,
- .ring_dir = HAL_SRNG_DIR_DST,
- .max_size = HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE,
- },
- [HAL_REO_EXCEPTION] = {
- /* Designating REO2SW0 ring as exception ring.
- * Any of theREO2SW rings can be used as exception ring.
- */
- .start_ring_id = HAL_SRNG_RING_ID_REO2SW0,
- .max_rings = 1,
- .entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
- .mac_type = ATH12K_HAL_SRNG_UMAC,
- .ring_dir = HAL_SRNG_DIR_DST,
- .max_size = HAL_REO_REO2SW0_RING_BASE_MSB_RING_SIZE,
- },
- [HAL_REO_REINJECT] = {
- .start_ring_id = HAL_SRNG_RING_ID_SW2REO,
- .max_rings = 4,
- .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
- .mac_type = ATH12K_HAL_SRNG_UMAC,
- .ring_dir = HAL_SRNG_DIR_SRC,
- .max_size = HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE,
- },
- [HAL_REO_CMD] = {
- .start_ring_id = HAL_SRNG_RING_ID_REO_CMD,
- .max_rings = 1,
- .entry_size = (sizeof(struct hal_tlv_64_hdr) +
- sizeof(struct hal_reo_get_queue_stats)) >> 2,
- .mac_type = ATH12K_HAL_SRNG_UMAC,
- .ring_dir = HAL_SRNG_DIR_SRC,
- .max_size = HAL_REO_CMD_RING_BASE_MSB_RING_SIZE,
- },
- [HAL_REO_STATUS] = {
- .start_ring_id = HAL_SRNG_RING_ID_REO_STATUS,
- .max_rings = 1,
- .entry_size = (sizeof(struct hal_tlv_64_hdr) +
- sizeof(struct hal_reo_get_queue_stats_status)) >> 2,
- .mac_type = ATH12K_HAL_SRNG_UMAC,
- .ring_dir = HAL_SRNG_DIR_DST,
- .max_size = HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE,
- },
- [HAL_TCL_DATA] = {
- .start_ring_id = HAL_SRNG_RING_ID_SW2TCL1,
- .max_rings = 6,
- .entry_size = sizeof(struct hal_tcl_data_cmd) >> 2,
- .mac_type = ATH12K_HAL_SRNG_UMAC,
- .ring_dir = HAL_SRNG_DIR_SRC,
- .max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE,
- },
- [HAL_TCL_CMD] = {
- .start_ring_id = HAL_SRNG_RING_ID_SW2TCL_CMD,
- .max_rings = 1,
- .entry_size = sizeof(struct hal_tcl_gse_cmd) >> 2,
- .mac_type = ATH12K_HAL_SRNG_UMAC,
- .ring_dir = HAL_SRNG_DIR_SRC,
- .max_size = HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE,
- },
- [HAL_TCL_STATUS] = {
- .start_ring_id = HAL_SRNG_RING_ID_TCL_STATUS,
- .max_rings = 1,
- .entry_size = (sizeof(struct hal_tlv_hdr) +
- sizeof(struct hal_tcl_status_ring)) >> 2,
- .mac_type = ATH12K_HAL_SRNG_UMAC,
- .ring_dir = HAL_SRNG_DIR_DST,
- .max_size = HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE,
- },
- [HAL_CE_SRC] = {
- .start_ring_id = HAL_SRNG_RING_ID_CE0_SRC,
- .max_rings = 16,
- .entry_size = sizeof(struct hal_ce_srng_src_desc) >> 2,
- .mac_type = ATH12K_HAL_SRNG_UMAC,
- .ring_dir = HAL_SRNG_DIR_SRC,
- .max_size = HAL_CE_SRC_RING_BASE_MSB_RING_SIZE,
- },
- [HAL_CE_DST] = {
- .start_ring_id = HAL_SRNG_RING_ID_CE0_DST,
- .max_rings = 16,
- .entry_size = sizeof(struct hal_ce_srng_dest_desc) >> 2,
- .mac_type = ATH12K_HAL_SRNG_UMAC,
- .ring_dir = HAL_SRNG_DIR_SRC,
- .max_size = HAL_CE_DST_RING_BASE_MSB_RING_SIZE,
- },
- [HAL_CE_DST_STATUS] = {
- .start_ring_id = HAL_SRNG_RING_ID_CE0_DST_STATUS,
- .max_rings = 16,
- .entry_size = sizeof(struct hal_ce_srng_dst_status_desc) >> 2,
- .mac_type = ATH12K_HAL_SRNG_UMAC,
- .ring_dir = HAL_SRNG_DIR_DST,
- .max_size = HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE,
- },
- [HAL_WBM_IDLE_LINK] = {
- .start_ring_id = HAL_SRNG_RING_ID_WBM_IDLE_LINK,
- .max_rings = 1,
- .entry_size = sizeof(struct hal_wbm_link_desc) >> 2,
- .mac_type = ATH12K_HAL_SRNG_UMAC,
- .ring_dir = HAL_SRNG_DIR_SRC,
- .max_size = HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE,
- },
- [HAL_SW2WBM_RELEASE] = {
- .start_ring_id = HAL_SRNG_RING_ID_WBM_SW0_RELEASE,
- .max_rings = 2,
- .entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
- .mac_type = ATH12K_HAL_SRNG_UMAC,
- .ring_dir = HAL_SRNG_DIR_SRC,
- .max_size = HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE,
- },
- [HAL_WBM2SW_RELEASE] = {
- .start_ring_id = HAL_SRNG_RING_ID_WBM2SW0_RELEASE,
- .max_rings = 8,
- .entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
- .mac_type = ATH12K_HAL_SRNG_UMAC,
- .ring_dir = HAL_SRNG_DIR_DST,
- .max_size = HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE,
- },
- [HAL_RXDMA_BUF] = {
- .start_ring_id = HAL_SRNG_SW2RXDMA_BUF0,
- .max_rings = 1,
- .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
- .mac_type = ATH12K_HAL_SRNG_DMAC,
- .ring_dir = HAL_SRNG_DIR_SRC,
- .max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
- },
- [HAL_RXDMA_DST] = {
- .start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0,
- .max_rings = 0,
- .entry_size = 0,
- .mac_type = ATH12K_HAL_SRNG_PMAC,
- .ring_dir = HAL_SRNG_DIR_DST,
- .max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
- },
- [HAL_RXDMA_MONITOR_BUF] = {
- .start_ring_id = HAL_SRNG_SW2RXMON_BUF0,
- .max_rings = 1,
- .entry_size = sizeof(struct hal_mon_buf_ring) >> 2,
- .mac_type = ATH12K_HAL_SRNG_PMAC,
- .ring_dir = HAL_SRNG_DIR_SRC,
- .max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
- },
- [HAL_RXDMA_MONITOR_STATUS] = {
- .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF,
- .max_rings = 1,
- .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
- .mac_type = ATH12K_HAL_SRNG_PMAC,
- .ring_dir = HAL_SRNG_DIR_SRC,
- .max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
- },
- [HAL_RXDMA_MONITOR_DESC] = { 0, },
- [HAL_RXDMA_DIR_BUF] = {
- .start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
- .max_rings = 2,
- .entry_size = 8 >> 2, /* TODO: Define the struct */
- .mac_type = ATH12K_HAL_SRNG_PMAC,
- .ring_dir = HAL_SRNG_DIR_SRC,
- .max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
- },
- [HAL_PPE2TCL] = {
- .start_ring_id = HAL_SRNG_RING_ID_PPE2TCL1,
- .max_rings = 1,
- .entry_size = sizeof(struct hal_tcl_entrance_from_ppe_ring) >> 2,
- .mac_type = ATH12K_HAL_SRNG_PMAC,
- .ring_dir = HAL_SRNG_DIR_SRC,
- .max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE,
- },
- [HAL_PPE_RELEASE] = {
- .start_ring_id = HAL_SRNG_RING_ID_WBM_PPE_RELEASE,
- .max_rings = 1,
- .entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
- .mac_type = ATH12K_HAL_SRNG_PMAC,
- .ring_dir = HAL_SRNG_DIR_SRC,
- .max_size = HAL_WBM2PPE_RELEASE_RING_BASE_MSB_RING_SIZE,
- },
- [HAL_TX_MONITOR_BUF] = {
- .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2TXMON_BUF0,
- .max_rings = 1,
- .entry_size = sizeof(struct hal_mon_buf_ring) >> 2,
- .mac_type = ATH12K_HAL_SRNG_PMAC,
- .ring_dir = HAL_SRNG_DIR_SRC,
- .max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
- },
- [HAL_RXDMA_MONITOR_DST] = {
- .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXMON_BUF0,
- .max_rings = 1,
- .entry_size = sizeof(struct hal_mon_dest_desc) >> 2,
- .mac_type = ATH12K_HAL_SRNG_PMAC,
- .ring_dir = HAL_SRNG_DIR_DST,
- .max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
- },
- [HAL_TX_MONITOR_DST] = {
- .start_ring_id = HAL_SRNG_RING_ID_WMAC1_TXMON2SW0_BUF0,
- .max_rings = 1,
- .entry_size = sizeof(struct hal_mon_dest_desc) >> 2,
- .mac_type = ATH12K_HAL_SRNG_PMAC,
- .ring_dir = HAL_SRNG_DIR_DST,
- .max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
- }
-};
-
-static const struct ath12k_hal_tcl_to_wbm_rbm_map
-ath12k_hal_qcn9274_tcl_to_wbm_rbm_map[DP_TCL_NUM_RING_MAX] = {
- {
- .wbm_ring_num = 0,
- .rbm_id = HAL_RX_BUF_RBM_SW0_BM,
- },
- {
- .wbm_ring_num = 1,
- .rbm_id = HAL_RX_BUF_RBM_SW1_BM,
- },
- {
- .wbm_ring_num = 2,
- .rbm_id = HAL_RX_BUF_RBM_SW2_BM,
- },
- {
- .wbm_ring_num = 4,
- .rbm_id = HAL_RX_BUF_RBM_SW4_BM,
- }
-};
-
-static const struct ath12k_hal_tcl_to_wbm_rbm_map
-ath12k_hal_wcn7850_tcl_to_wbm_rbm_map[DP_TCL_NUM_RING_MAX] = {
- {
- .wbm_ring_num = 0,
- .rbm_id = HAL_RX_BUF_RBM_SW0_BM,
- },
- {
- .wbm_ring_num = 2,
- .rbm_id = HAL_RX_BUF_RBM_SW2_BM,
- },
- {
- .wbm_ring_num = 4,
- .rbm_id = HAL_RX_BUF_RBM_SW4_BM,
- },
-};
-
-static unsigned int ath12k_hal_reo1_ring_id_offset(struct ath12k_base *ab)
-{
- return HAL_REO1_RING_ID(ab) - HAL_REO1_RING_BASE_LSB(ab);
-}
-
-static unsigned int ath12k_hal_reo1_ring_msi1_base_lsb_offset(struct ath12k_base *ab)
-{
- return HAL_REO1_RING_MSI1_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
-}
-
-static unsigned int ath12k_hal_reo1_ring_msi1_base_msb_offset(struct ath12k_base *ab)
-{
- return HAL_REO1_RING_MSI1_BASE_MSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
-}
-
-static unsigned int ath12k_hal_reo1_ring_msi1_data_offset(struct ath12k_base *ab)
-{
- return HAL_REO1_RING_MSI1_DATA(ab) - HAL_REO1_RING_BASE_LSB(ab);
-}
-
-static unsigned int ath12k_hal_reo1_ring_base_msb_offset(struct ath12k_base *ab)
-{
- return HAL_REO1_RING_BASE_MSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
-}
-
-static unsigned int ath12k_hal_reo1_ring_producer_int_setup_offset(struct ath12k_base *ab)
-{
- return HAL_REO1_RING_PRODUCER_INT_SETUP(ab) - HAL_REO1_RING_BASE_LSB(ab);
-}
-
-static unsigned int ath12k_hal_reo1_ring_hp_addr_lsb_offset(struct ath12k_base *ab)
-{
- return HAL_REO1_RING_HP_ADDR_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
-}
-
-static unsigned int ath12k_hal_reo1_ring_hp_addr_msb_offset(struct ath12k_base *ab)
-{
- return HAL_REO1_RING_HP_ADDR_MSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
-}
-
-static unsigned int ath12k_hal_reo1_ring_misc_offset(struct ath12k_base *ab)
-{
- return HAL_REO1_RING_MISC(ab) - HAL_REO1_RING_BASE_LSB(ab);
-}
-
-static bool ath12k_hw_qcn9274_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
-{
- return !!le16_get_bits(desc->u.qcn9274.msdu_end.info5,
- RX_MSDU_END_INFO5_FIRST_MSDU);
-}
-
-static bool ath12k_hw_qcn9274_rx_desc_get_last_msdu(struct hal_rx_desc *desc)
-{
- return !!le16_get_bits(desc->u.qcn9274.msdu_end.info5,
- RX_MSDU_END_INFO5_LAST_MSDU);
-}
-
-static u8 ath12k_hw_qcn9274_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
-{
- return le16_get_bits(desc->u.qcn9274.msdu_end.info5,
- RX_MSDU_END_INFO5_L3_HDR_PADDING);
-}
-
-static bool ath12k_hw_qcn9274_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
-{
- return !!le32_get_bits(desc->u.qcn9274.mpdu_start.info4,
- RX_MPDU_START_INFO4_ENCRYPT_INFO_VALID);
-}
-
-static u32 ath12k_hw_qcn9274_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
-{
- return le32_get_bits(desc->u.qcn9274.mpdu_start.info2,
- RX_MPDU_START_INFO2_ENC_TYPE);
-}
-
-static u8 ath12k_hw_qcn9274_rx_desc_get_decap_type(struct hal_rx_desc *desc)
-{
- return le32_get_bits(desc->u.qcn9274.msdu_end.info11,
- RX_MSDU_END_INFO11_DECAP_FORMAT);
-}
-
-static u8 ath12k_hw_qcn9274_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
-{
- return le32_get_bits(desc->u.qcn9274.msdu_end.info11,
- RX_MSDU_END_INFO11_MESH_CTRL_PRESENT);
-}
-
-static bool ath12k_hw_qcn9274_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
-{
- return !!le32_get_bits(desc->u.qcn9274.mpdu_start.info4,
- RX_MPDU_START_INFO4_MPDU_SEQ_CTRL_VALID);
-}
-
-static bool ath12k_hw_qcn9274_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
-{
- return !!le32_get_bits(desc->u.qcn9274.mpdu_start.info4,
- RX_MPDU_START_INFO4_MPDU_FCTRL_VALID);
-}
-
-static u16 ath12k_hw_qcn9274_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
-{
- return le32_get_bits(desc->u.qcn9274.mpdu_start.info4,
- RX_MPDU_START_INFO4_MPDU_SEQ_NUM);
-}
-
-static u16 ath12k_hw_qcn9274_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
-{
- return le32_get_bits(desc->u.qcn9274.msdu_end.info10,
- RX_MSDU_END_INFO10_MSDU_LENGTH);
-}
-
-static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
-{
- return le32_get_bits(desc->u.qcn9274.msdu_end.info12,
- RX_MSDU_END_INFO12_SGI);
-}
-
-static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
-{
- return le32_get_bits(desc->u.qcn9274.msdu_end.info12,
- RX_MSDU_END_INFO12_RATE_MCS);
-}
-
-static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
-{
- return le32_get_bits(desc->u.qcn9274.msdu_end.info12,
- RX_MSDU_END_INFO12_RECV_BW);
-}
-
-static u32 ath12k_hw_qcn9274_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
-{
- return __le32_to_cpu(desc->u.qcn9274.msdu_end.phy_meta_data);
-}
-
-static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
-{
- return le32_get_bits(desc->u.qcn9274.msdu_end.info12,
- RX_MSDU_END_INFO12_PKT_TYPE);
-}
-
-static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
-{
- return le32_get_bits(desc->u.qcn9274.msdu_end.info12,
- RX_MSDU_END_INFO12_MIMO_SS_BITMAP);
-}
-
-static u8 ath12k_hw_qcn9274_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
-{
- return le16_get_bits(desc->u.qcn9274.msdu_end.info5,
- RX_MSDU_END_INFO5_TID);
-}
-
-static u16 ath12k_hw_qcn9274_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
-{
- return __le16_to_cpu(desc->u.qcn9274.mpdu_start.sw_peer_id);
-}
-
-static void ath12k_hw_qcn9274_rx_desc_copy_end_tlv(struct hal_rx_desc *fdesc,
- struct hal_rx_desc *ldesc)
-{
- memcpy(&fdesc->u.qcn9274.msdu_end, &ldesc->u.qcn9274.msdu_end,
- sizeof(struct rx_msdu_end_qcn9274));
-}
-
-static u32 ath12k_hw_qcn9274_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
-{
- return __le16_to_cpu(desc->u.qcn9274.mpdu_start.phy_ppdu_id);
-}
-
-static void ath12k_hw_qcn9274_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 len)
-{
- u32 info = __le32_to_cpu(desc->u.qcn9274.msdu_end.info10);
-
- info &= ~RX_MSDU_END_INFO10_MSDU_LENGTH;
- info |= u32_encode_bits(len, RX_MSDU_END_INFO10_MSDU_LENGTH);
-
- desc->u.qcn9274.msdu_end.info10 = __cpu_to_le32(info);
-}
-
-static u8 *ath12k_hw_qcn9274_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
-{
- return &desc->u.qcn9274.msdu_payload[0];
-}
-
-static u32 ath12k_hw_qcn9274_rx_desc_get_mpdu_start_offset(void)
-{
- return offsetof(struct hal_rx_desc_qcn9274, mpdu_start);
-}
-
-static u32 ath12k_hw_qcn9274_rx_desc_get_msdu_end_offset(void)
-{
- return offsetof(struct hal_rx_desc_qcn9274, msdu_end);
-}
-
-static bool ath12k_hw_qcn9274_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
-{
- return __le32_to_cpu(desc->u.qcn9274.mpdu_start.info4) &
- RX_MPDU_START_INFO4_MAC_ADDR2_VALID;
-}
-
-static u8 *ath12k_hw_qcn9274_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
-{
- return desc->u.qcn9274.mpdu_start.addr2;
-}
-
-static bool ath12k_hw_qcn9274_rx_desc_is_da_mcbc(struct hal_rx_desc *desc)
-{
- return __le16_to_cpu(desc->u.qcn9274.msdu_end.info5) &
- RX_MSDU_END_INFO5_DA_IS_MCBC;
-}
-
-static void ath12k_hw_qcn9274_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc,
- struct ieee80211_hdr *hdr)
-{
- hdr->frame_control = desc->u.qcn9274.mpdu_start.frame_ctrl;
- hdr->duration_id = desc->u.qcn9274.mpdu_start.duration;
- ether_addr_copy(hdr->addr1, desc->u.qcn9274.mpdu_start.addr1);
- ether_addr_copy(hdr->addr2, desc->u.qcn9274.mpdu_start.addr2);
- ether_addr_copy(hdr->addr3, desc->u.qcn9274.mpdu_start.addr3);
- if (__le32_to_cpu(desc->u.qcn9274.mpdu_start.info4) &
- RX_MPDU_START_INFO4_MAC_ADDR4_VALID) {
- ether_addr_copy(hdr->addr4, desc->u.qcn9274.mpdu_start.addr4);
- }
- hdr->seq_ctrl = desc->u.qcn9274.mpdu_start.seq_ctrl;
-}
-
-static void ath12k_hw_qcn9274_rx_desc_get_crypto_hdr(struct hal_rx_desc *desc,
- u8 *crypto_hdr,
- enum hal_encrypt_type enctype)
-{
- unsigned int key_id;
-
- switch (enctype) {
- case HAL_ENCRYPT_TYPE_OPEN:
- return;
- case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
- case HAL_ENCRYPT_TYPE_TKIP_MIC:
- crypto_hdr[0] =
- HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274.mpdu_start.pn[0]);
- crypto_hdr[1] = 0;
- crypto_hdr[2] =
- HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274.mpdu_start.pn[0]);
- break;
- case HAL_ENCRYPT_TYPE_CCMP_128:
- case HAL_ENCRYPT_TYPE_CCMP_256:
- case HAL_ENCRYPT_TYPE_GCMP_128:
- case HAL_ENCRYPT_TYPE_AES_GCMP_256:
- crypto_hdr[0] =
- HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274.mpdu_start.pn[0]);
- crypto_hdr[1] =
- HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274.mpdu_start.pn[0]);
- crypto_hdr[2] = 0;
- break;
- case HAL_ENCRYPT_TYPE_WEP_40:
- case HAL_ENCRYPT_TYPE_WEP_104:
- case HAL_ENCRYPT_TYPE_WEP_128:
- case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
- case HAL_ENCRYPT_TYPE_WAPI:
- return;
- }
- key_id = le32_get_bits(desc->u.qcn9274.mpdu_start.info5,
- RX_MPDU_START_INFO5_KEY_ID);
- crypto_hdr[3] = 0x20 | (key_id << 6);
- crypto_hdr[4] = HAL_RX_MPDU_INFO_PN_GET_BYTE3(desc->u.qcn9274.mpdu_start.pn[0]);
- crypto_hdr[5] = HAL_RX_MPDU_INFO_PN_GET_BYTE4(desc->u.qcn9274.mpdu_start.pn[0]);
- crypto_hdr[6] = HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274.mpdu_start.pn[1]);
- crypto_hdr[7] = HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274.mpdu_start.pn[1]);
-}
-
-static int ath12k_hal_srng_create_config_qcn9274(struct ath12k_base *ab)
-{
- struct ath12k_hal *hal = &ab->hal;
- struct hal_srng_config *s;
-
- hal->srng_config = kmemdup(hw_srng_config_template,
- sizeof(hw_srng_config_template),
- GFP_KERNEL);
- if (!hal->srng_config)
- return -ENOMEM;
-
- s = &hal->srng_config[HAL_REO_DST];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB(ab);
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP;
- s->reg_size[0] = HAL_REO2_RING_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
- s->reg_size[1] = HAL_REO2_RING_HP - HAL_REO1_RING_HP;
-
- s = &hal->srng_config[HAL_REO_EXCEPTION];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_SW0_RING_BASE_LSB(ab);
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_SW0_RING_HP;
-
- s = &hal->srng_config[HAL_REO_REINJECT];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB(ab);
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP;
- s->reg_size[0] = HAL_SW2REO1_RING_BASE_LSB(ab) - HAL_SW2REO_RING_BASE_LSB(ab);
- s->reg_size[1] = HAL_SW2REO1_RING_HP - HAL_SW2REO_RING_HP;
-
- s = &hal->srng_config[HAL_REO_CMD];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB(ab);
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP;
-
- s = &hal->srng_config[HAL_REO_STATUS];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(ab);
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP;
-
- s = &hal->srng_config[HAL_TCL_DATA];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB(ab);
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP;
- s->reg_size[0] = HAL_TCL2_RING_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab);
- s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP;
-
- s = &hal->srng_config[HAL_TCL_CMD];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB(ab);
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP;
-
- s = &hal->srng_config[HAL_TCL_STATUS];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(ab);
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP;
-
- s = &hal->srng_config[HAL_CE_SRC];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_BASE_LSB;
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_HP;
- s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
- HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
- s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
- HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
-
- s = &hal->srng_config[HAL_CE_DST];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_BASE_LSB;
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_HP;
- s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
- HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
- s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
- HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
-
- s = &hal->srng_config[HAL_CE_DST_STATUS];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) +
- HAL_CE_DST_STATUS_RING_BASE_LSB;
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_HP;
- s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
- HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
- s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
- HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
-
- s = &hal->srng_config[HAL_WBM_IDLE_LINK];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab);
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP;
-
- s = &hal->srng_config[HAL_SW2WBM_RELEASE];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG +
- HAL_WBM_SW_RELEASE_RING_BASE_LSB(ab);
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SW_RELEASE_RING_HP;
- s->reg_size[0] = HAL_WBM_SW1_RELEASE_RING_BASE_LSB(ab) -
- HAL_WBM_SW_RELEASE_RING_BASE_LSB(ab);
- s->reg_size[1] = HAL_WBM_SW1_RELEASE_RING_HP - HAL_WBM_SW_RELEASE_RING_HP;
-
- s = &hal->srng_config[HAL_WBM2SW_RELEASE];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_BASE_LSB(ab);
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP;
- s->reg_size[0] = HAL_WBM1_RELEASE_RING_BASE_LSB(ab) -
- HAL_WBM0_RELEASE_RING_BASE_LSB(ab);
- s->reg_size[1] = HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP;
-
- /* Some LMAC rings are not accessed from the host:
- * RXDMA_BUG, RXDMA_DST, RXDMA_MONITOR_BUF, RXDMA_MONITOR_STATUS,
- * RXDMA_MONITOR_DST, RXDMA_MONITOR_DESC, RXDMA_DIR_BUF_SRC,
- * RXDMA_RX_MONITOR_BUF, TX_MONITOR_BUF, TX_MONITOR_DST, SW2RXDMA
- */
- s = &hal->srng_config[HAL_PPE2TCL];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_PPE2TCL1_RING_BASE_LSB;
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_PPE2TCL1_RING_HP;
-
- s = &hal->srng_config[HAL_PPE_RELEASE];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG +
- HAL_WBM_PPE_RELEASE_RING_BASE_LSB(ab);
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_PPE_RELEASE_RING_HP;
-
- return 0;
-}
-
-static u16 ath12k_hal_qcn9274_rx_mpdu_start_wmask_get(void)
-{
- return QCN9274_MPDU_START_WMASK;
-}
-
-static u32 ath12k_hal_qcn9274_rx_msdu_end_wmask_get(void)
-{
- return QCN9274_MSDU_END_WMASK;
-}
-
-static const struct hal_rx_ops *ath12k_hal_qcn9274_get_hal_rx_compact_ops(void)
-{
- return &hal_rx_qcn9274_compact_ops;
-}
-
-static bool ath12k_hw_qcn9274_dp_rx_h_msdu_done(struct hal_rx_desc *desc)
-{
- return !!le32_get_bits(desc->u.qcn9274.msdu_end.info14,
- RX_MSDU_END_INFO14_MSDU_DONE);
-}
-
-static bool ath12k_hw_qcn9274_dp_rx_h_l4_cksum_fail(struct hal_rx_desc *desc)
-{
- return !!le32_get_bits(desc->u.qcn9274.msdu_end.info13,
- RX_MSDU_END_INFO13_TCP_UDP_CKSUM_FAIL);
-}
-
-static bool ath12k_hw_qcn9274_dp_rx_h_ip_cksum_fail(struct hal_rx_desc *desc)
-{
- return !!le32_get_bits(desc->u.qcn9274.msdu_end.info13,
- RX_MSDU_END_INFO13_IP_CKSUM_FAIL);
-}
-
-static bool ath12k_hw_qcn9274_dp_rx_h_is_decrypted(struct hal_rx_desc *desc)
-{
- return (le32_get_bits(desc->u.qcn9274.msdu_end.info14,
- RX_MSDU_END_INFO14_DECRYPT_STATUS_CODE) ==
- RX_DESC_DECRYPT_STATUS_CODE_OK);
-}
-
-static u32 ath12k_hw_qcn9274_dp_rx_h_mpdu_err(struct hal_rx_desc *desc)
-{
- u32 info = __le32_to_cpu(desc->u.qcn9274.msdu_end.info13);
- u32 errmap = 0;
-
- if (info & RX_MSDU_END_INFO13_FCS_ERR)
- errmap |= HAL_RX_MPDU_ERR_FCS;
-
- if (info & RX_MSDU_END_INFO13_DECRYPT_ERR)
- errmap |= HAL_RX_MPDU_ERR_DECRYPT;
-
- if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR)
- errmap |= HAL_RX_MPDU_ERR_TKIP_MIC;
-
- if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR)
- errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR;
-
- if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR)
- errmap |= HAL_RX_MPDU_ERR_OVERFLOW;
-
- if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR)
- errmap |= HAL_RX_MPDU_ERR_MSDU_LEN;
-
- if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR)
- errmap |= HAL_RX_MPDU_ERR_MPDU_LEN;
-
- return errmap;
-}
-
-static u32 ath12k_hw_qcn9274_get_rx_desc_size(void)
-{
- return sizeof(struct hal_rx_desc_qcn9274);
-}
-
-static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_src_link(struct hal_rx_desc *desc)
-{
- return 0;
-}
-
-const struct hal_rx_ops hal_rx_qcn9274_ops = {
- .rx_desc_get_first_msdu = ath12k_hw_qcn9274_rx_desc_get_first_msdu,
- .rx_desc_get_last_msdu = ath12k_hw_qcn9274_rx_desc_get_last_msdu,
- .rx_desc_get_l3_pad_bytes = ath12k_hw_qcn9274_rx_desc_get_l3_pad_bytes,
- .rx_desc_encrypt_valid = ath12k_hw_qcn9274_rx_desc_encrypt_valid,
- .rx_desc_get_encrypt_type = ath12k_hw_qcn9274_rx_desc_get_encrypt_type,
- .rx_desc_get_decap_type = ath12k_hw_qcn9274_rx_desc_get_decap_type,
- .rx_desc_get_mesh_ctl = ath12k_hw_qcn9274_rx_desc_get_mesh_ctl,
- .rx_desc_get_mpdu_seq_ctl_vld = ath12k_hw_qcn9274_rx_desc_get_mpdu_seq_ctl_vld,
- .rx_desc_get_mpdu_fc_valid = ath12k_hw_qcn9274_rx_desc_get_mpdu_fc_valid,
- .rx_desc_get_mpdu_start_seq_no = ath12k_hw_qcn9274_rx_desc_get_mpdu_start_seq_no,
- .rx_desc_get_msdu_len = ath12k_hw_qcn9274_rx_desc_get_msdu_len,
- .rx_desc_get_msdu_sgi = ath12k_hw_qcn9274_rx_desc_get_msdu_sgi,
- .rx_desc_get_msdu_rate_mcs = ath12k_hw_qcn9274_rx_desc_get_msdu_rate_mcs,
- .rx_desc_get_msdu_rx_bw = ath12k_hw_qcn9274_rx_desc_get_msdu_rx_bw,
- .rx_desc_get_msdu_freq = ath12k_hw_qcn9274_rx_desc_get_msdu_freq,
- .rx_desc_get_msdu_pkt_type = ath12k_hw_qcn9274_rx_desc_get_msdu_pkt_type,
- .rx_desc_get_msdu_nss = ath12k_hw_qcn9274_rx_desc_get_msdu_nss,
- .rx_desc_get_mpdu_tid = ath12k_hw_qcn9274_rx_desc_get_mpdu_tid,
- .rx_desc_get_mpdu_peer_id = ath12k_hw_qcn9274_rx_desc_get_mpdu_peer_id,
- .rx_desc_copy_end_tlv = ath12k_hw_qcn9274_rx_desc_copy_end_tlv,
- .rx_desc_get_mpdu_ppdu_id = ath12k_hw_qcn9274_rx_desc_get_mpdu_ppdu_id,
- .rx_desc_set_msdu_len = ath12k_hw_qcn9274_rx_desc_set_msdu_len,
- .rx_desc_get_msdu_payload = ath12k_hw_qcn9274_rx_desc_get_msdu_payload,
- .rx_desc_get_mpdu_start_offset = ath12k_hw_qcn9274_rx_desc_get_mpdu_start_offset,
- .rx_desc_get_msdu_end_offset = ath12k_hw_qcn9274_rx_desc_get_msdu_end_offset,
- .rx_desc_mac_addr2_valid = ath12k_hw_qcn9274_rx_desc_mac_addr2_valid,
- .rx_desc_mpdu_start_addr2 = ath12k_hw_qcn9274_rx_desc_mpdu_start_addr2,
- .rx_desc_is_da_mcbc = ath12k_hw_qcn9274_rx_desc_is_da_mcbc,
- .rx_desc_get_dot11_hdr = ath12k_hw_qcn9274_rx_desc_get_dot11_hdr,
- .rx_desc_get_crypto_header = ath12k_hw_qcn9274_rx_desc_get_crypto_hdr,
- .dp_rx_h_msdu_done = ath12k_hw_qcn9274_dp_rx_h_msdu_done,
- .dp_rx_h_l4_cksum_fail = ath12k_hw_qcn9274_dp_rx_h_l4_cksum_fail,
- .dp_rx_h_ip_cksum_fail = ath12k_hw_qcn9274_dp_rx_h_ip_cksum_fail,
- .dp_rx_h_is_decrypted = ath12k_hw_qcn9274_dp_rx_h_is_decrypted,
- .dp_rx_h_mpdu_err = ath12k_hw_qcn9274_dp_rx_h_mpdu_err,
- .rx_desc_get_desc_size = ath12k_hw_qcn9274_get_rx_desc_size,
- .rx_desc_get_msdu_src_link_id = ath12k_hw_qcn9274_rx_desc_get_msdu_src_link,
-};
-
-static bool ath12k_hw_qcn9274_compact_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
-{
- return !!le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5,
- RX_MSDU_END_INFO5_FIRST_MSDU);
-}
-
-static bool ath12k_hw_qcn9274_compact_rx_desc_get_last_msdu(struct hal_rx_desc *desc)
-{
- return !!le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5,
- RX_MSDU_END_INFO5_LAST_MSDU);
-}
-
-static u8 ath12k_hw_qcn9274_compact_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
-{
- return le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5,
- RX_MSDU_END_INFO5_L3_HDR_PADDING);
-}
-
-static bool ath12k_hw_qcn9274_compact_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
-{
- return !!le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4,
- RX_MPDU_START_INFO4_ENCRYPT_INFO_VALID);
-}
-
-static u32 ath12k_hw_qcn9274_compact_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
-{
- return le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info2,
- RX_MPDU_START_INFO2_ENC_TYPE);
-}
-
-static u8 ath12k_hw_qcn9274_compact_rx_desc_get_decap_type(struct hal_rx_desc *desc)
-{
- return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info11,
- RX_MSDU_END_INFO11_DECAP_FORMAT);
-}
-
-static u8 ath12k_hw_qcn9274_compact_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
-{
- return le32_get_bits(desc->u.qcn9274.msdu_end.info11,
- RX_MSDU_END_INFO11_MESH_CTRL_PRESENT);
-}
-
-static bool
-ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
-{
- return !!le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4,
- RX_MPDU_START_INFO4_MPDU_SEQ_CTRL_VALID);
-}
-
-static bool ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
-{
- return !!le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4,
- RX_MPDU_START_INFO4_MPDU_FCTRL_VALID);
-}
-
-static u16
-ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
-{
- return le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4,
- RX_MPDU_START_INFO4_MPDU_SEQ_NUM);
-}
-
-static u16 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
-{
- return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info10,
- RX_MSDU_END_INFO10_MSDU_LENGTH);
-}
-
-static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
-{
- return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12,
- RX_MSDU_END_INFO12_SGI);
-}
-
-static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
-{
- return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12,
- RX_MSDU_END_INFO12_RATE_MCS);
-}
-
-static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
-{
- return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12,
- RX_MSDU_END_INFO12_RECV_BW);
-}
-
-static u32 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
-{
- return __le32_to_cpu(desc->u.qcn9274_compact.msdu_end.phy_meta_data);
-}
-
-static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
-{
- return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12,
- RX_MSDU_END_INFO12_PKT_TYPE);
-}
-
-static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
-{
- return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12,
- RX_MSDU_END_INFO12_MIMO_SS_BITMAP);
-}
-
-static u8 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
-{
- return le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5,
- RX_MSDU_END_INFO5_TID);
-}
-
-static u16 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
-{
- return __le16_to_cpu(desc->u.qcn9274_compact.mpdu_start.sw_peer_id);
-}
-
-static void ath12k_hw_qcn9274_compact_rx_desc_copy_end_tlv(struct hal_rx_desc *fdesc,
- struct hal_rx_desc *ldesc)
-{
- fdesc->u.qcn9274_compact.msdu_end = ldesc->u.qcn9274_compact.msdu_end;
-}
-
-static u32 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
-{
- return __le16_to_cpu(desc->u.qcn9274_compact.mpdu_start.phy_ppdu_id);
-}
-
-static void
-ath12k_hw_qcn9274_compact_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 len)
-{
- u32 info = __le32_to_cpu(desc->u.qcn9274_compact.msdu_end.info10);
-
- info = u32_replace_bits(info, len, RX_MSDU_END_INFO10_MSDU_LENGTH);
- desc->u.qcn9274_compact.msdu_end.info10 = __cpu_to_le32(info);
-}
-
-static u8 *ath12k_hw_qcn9274_compact_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
-{
- return &desc->u.qcn9274_compact.msdu_payload[0];
-}
-
-static u32 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_start_offset(void)
-{
- return offsetof(struct hal_rx_desc_qcn9274_compact, mpdu_start);
-}
-
-static u32 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_end_offset(void)
-{
- return offsetof(struct hal_rx_desc_qcn9274_compact, msdu_end);
-}
-
-static bool ath12k_hw_qcn9274_compact_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
-{
- return __le32_to_cpu(desc->u.qcn9274_compact.mpdu_start.info4) &
- RX_MPDU_START_INFO4_MAC_ADDR2_VALID;
-}
-
-static u8 *ath12k_hw_qcn9274_compact_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
-{
- return desc->u.qcn9274_compact.mpdu_start.addr2;
-}
-
-static bool ath12k_hw_qcn9274_compact_rx_desc_is_da_mcbc(struct hal_rx_desc *desc)
-{
- return __le16_to_cpu(desc->u.qcn9274_compact.msdu_end.info5) &
- RX_MSDU_END_INFO5_DA_IS_MCBC;
-}
-
-static void ath12k_hw_qcn9274_compact_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc,
- struct ieee80211_hdr *hdr)
-{
- hdr->frame_control = desc->u.qcn9274_compact.mpdu_start.frame_ctrl;
- hdr->duration_id = desc->u.qcn9274_compact.mpdu_start.duration;
- ether_addr_copy(hdr->addr1, desc->u.qcn9274_compact.mpdu_start.addr1);
- ether_addr_copy(hdr->addr2, desc->u.qcn9274_compact.mpdu_start.addr2);
- ether_addr_copy(hdr->addr3, desc->u.qcn9274_compact.mpdu_start.addr3);
- if (__le32_to_cpu(desc->u.qcn9274_compact.mpdu_start.info4) &
- RX_MPDU_START_INFO4_MAC_ADDR4_VALID) {
- ether_addr_copy(hdr->addr4, desc->u.qcn9274_compact.mpdu_start.addr4);
- }
- hdr->seq_ctrl = desc->u.qcn9274_compact.mpdu_start.seq_ctrl;
-}
-
-static void
-ath12k_hw_qcn9274_compact_rx_desc_get_crypto_hdr(struct hal_rx_desc *desc,
- u8 *crypto_hdr,
- enum hal_encrypt_type enctype)
-{
- unsigned int key_id;
-
- switch (enctype) {
- case HAL_ENCRYPT_TYPE_OPEN:
- return;
- case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
- case HAL_ENCRYPT_TYPE_TKIP_MIC:
- crypto_hdr[0] =
- HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274_compact.mpdu_start.pn[0]);
- crypto_hdr[1] = 0;
- crypto_hdr[2] =
- HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274_compact.mpdu_start.pn[0]);
- break;
- case HAL_ENCRYPT_TYPE_CCMP_128:
- case HAL_ENCRYPT_TYPE_CCMP_256:
- case HAL_ENCRYPT_TYPE_GCMP_128:
- case HAL_ENCRYPT_TYPE_AES_GCMP_256:
- crypto_hdr[0] =
- HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274_compact.mpdu_start.pn[0]);
- crypto_hdr[1] =
- HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274_compact.mpdu_start.pn[0]);
- crypto_hdr[2] = 0;
- break;
- case HAL_ENCRYPT_TYPE_WEP_40:
- case HAL_ENCRYPT_TYPE_WEP_104:
- case HAL_ENCRYPT_TYPE_WEP_128:
- case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
- case HAL_ENCRYPT_TYPE_WAPI:
- return;
- }
- key_id = le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info5,
- RX_MPDU_START_INFO5_KEY_ID);
- crypto_hdr[3] = 0x20 | (key_id << 6);
- crypto_hdr[4] =
- HAL_RX_MPDU_INFO_PN_GET_BYTE3(desc->u.qcn9274_compact.mpdu_start.pn[0]);
- crypto_hdr[5] =
- HAL_RX_MPDU_INFO_PN_GET_BYTE4(desc->u.qcn9274_compact.mpdu_start.pn[0]);
- crypto_hdr[6] =
- HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274_compact.mpdu_start.pn[1]);
- crypto_hdr[7] =
- HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274_compact.mpdu_start.pn[1]);
-}
-
-static bool ath12k_hw_qcn9274_compact_dp_rx_h_msdu_done(struct hal_rx_desc *desc)
-{
- return !!le32_get_bits(desc->u.qcn9274_compact.msdu_end.info14,
- RX_MSDU_END_INFO14_MSDU_DONE);
-}
-
-static bool ath12k_hw_qcn9274_compact_dp_rx_h_l4_cksum_fail(struct hal_rx_desc *desc)
-{
- return !!le32_get_bits(desc->u.qcn9274_compact.msdu_end.info13,
- RX_MSDU_END_INFO13_TCP_UDP_CKSUM_FAIL);
-}
-
-static bool ath12k_hw_qcn9274_compact_dp_rx_h_ip_cksum_fail(struct hal_rx_desc *desc)
-{
- return !!le32_get_bits(desc->u.qcn9274_compact.msdu_end.info13,
- RX_MSDU_END_INFO13_IP_CKSUM_FAIL);
-}
-
-static bool ath12k_hw_qcn9274_compact_dp_rx_h_is_decrypted(struct hal_rx_desc *desc)
-{
- return (le32_get_bits(desc->u.qcn9274_compact.msdu_end.info14,
- RX_MSDU_END_INFO14_DECRYPT_STATUS_CODE) ==
- RX_DESC_DECRYPT_STATUS_CODE_OK);
-}
-
-static u32 ath12k_hw_qcn9274_compact_dp_rx_h_mpdu_err(struct hal_rx_desc *desc)
-{
- u32 info = __le32_to_cpu(desc->u.qcn9274_compact.msdu_end.info13);
- u32 errmap = 0;
-
- if (info & RX_MSDU_END_INFO13_FCS_ERR)
- errmap |= HAL_RX_MPDU_ERR_FCS;
-
- if (info & RX_MSDU_END_INFO13_DECRYPT_ERR)
- errmap |= HAL_RX_MPDU_ERR_DECRYPT;
-
- if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR)
- errmap |= HAL_RX_MPDU_ERR_TKIP_MIC;
-
- if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR)
- errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR;
-
- if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR)
- errmap |= HAL_RX_MPDU_ERR_OVERFLOW;
-
- if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR)
- errmap |= HAL_RX_MPDU_ERR_MSDU_LEN;
-
- if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR)
- errmap |= HAL_RX_MPDU_ERR_MPDU_LEN;
-
- return errmap;
-}
-
-static u32 ath12k_hw_qcn9274_compact_get_rx_desc_size(void)
-{
- return sizeof(struct hal_rx_desc_qcn9274_compact);
-}
-
-static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_src_link(struct hal_rx_desc *desc)
-{
- return le64_get_bits(desc->u.qcn9274_compact.msdu_end.msdu_end_tag,
- RX_MSDU_END_64_TLV_SRC_LINK_ID);
-}
-
-const struct hal_rx_ops hal_rx_qcn9274_compact_ops = {
- .rx_desc_get_first_msdu = ath12k_hw_qcn9274_compact_rx_desc_get_first_msdu,
- .rx_desc_get_last_msdu = ath12k_hw_qcn9274_compact_rx_desc_get_last_msdu,
- .rx_desc_get_l3_pad_bytes = ath12k_hw_qcn9274_compact_rx_desc_get_l3_pad_bytes,
- .rx_desc_encrypt_valid = ath12k_hw_qcn9274_compact_rx_desc_encrypt_valid,
- .rx_desc_get_encrypt_type = ath12k_hw_qcn9274_compact_rx_desc_get_encrypt_type,
- .rx_desc_get_decap_type = ath12k_hw_qcn9274_compact_rx_desc_get_decap_type,
- .rx_desc_get_mesh_ctl = ath12k_hw_qcn9274_compact_rx_desc_get_mesh_ctl,
- .rx_desc_get_mpdu_seq_ctl_vld =
- ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_seq_ctl_vld,
- .rx_desc_get_mpdu_fc_valid = ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_fc_valid,
- .rx_desc_get_mpdu_start_seq_no =
- ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_start_seq_no,
- .rx_desc_get_msdu_len = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_len,
- .rx_desc_get_msdu_sgi = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_sgi,
- .rx_desc_get_msdu_rate_mcs = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_rate_mcs,
- .rx_desc_get_msdu_rx_bw = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_rx_bw,
- .rx_desc_get_msdu_freq = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_freq,
- .rx_desc_get_msdu_pkt_type = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_pkt_type,
- .rx_desc_get_msdu_nss = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_nss,
- .rx_desc_get_mpdu_tid = ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_tid,
- .rx_desc_get_mpdu_peer_id = ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_peer_id,
- .rx_desc_copy_end_tlv = ath12k_hw_qcn9274_compact_rx_desc_copy_end_tlv,
- .rx_desc_get_mpdu_ppdu_id = ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_ppdu_id,
- .rx_desc_set_msdu_len = ath12k_hw_qcn9274_compact_rx_desc_set_msdu_len,
- .rx_desc_get_msdu_payload = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_payload,
- .rx_desc_get_mpdu_start_offset =
- ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_start_offset,
- .rx_desc_get_msdu_end_offset =
- ath12k_hw_qcn9274_compact_rx_desc_get_msdu_end_offset,
- .rx_desc_mac_addr2_valid = ath12k_hw_qcn9274_compact_rx_desc_mac_addr2_valid,
- .rx_desc_mpdu_start_addr2 = ath12k_hw_qcn9274_compact_rx_desc_mpdu_start_addr2,
- .rx_desc_is_da_mcbc = ath12k_hw_qcn9274_compact_rx_desc_is_da_mcbc,
- .rx_desc_get_dot11_hdr = ath12k_hw_qcn9274_compact_rx_desc_get_dot11_hdr,
- .rx_desc_get_crypto_header = ath12k_hw_qcn9274_compact_rx_desc_get_crypto_hdr,
- .dp_rx_h_msdu_done = ath12k_hw_qcn9274_compact_dp_rx_h_msdu_done,
- .dp_rx_h_l4_cksum_fail = ath12k_hw_qcn9274_compact_dp_rx_h_l4_cksum_fail,
- .dp_rx_h_ip_cksum_fail = ath12k_hw_qcn9274_compact_dp_rx_h_ip_cksum_fail,
- .dp_rx_h_is_decrypted = ath12k_hw_qcn9274_compact_dp_rx_h_is_decrypted,
- .dp_rx_h_mpdu_err = ath12k_hw_qcn9274_compact_dp_rx_h_mpdu_err,
- .rx_desc_get_desc_size = ath12k_hw_qcn9274_compact_get_rx_desc_size,
- .rx_desc_get_msdu_src_link_id =
- ath12k_hw_qcn9274_compact_rx_desc_get_msdu_src_link,
-};
-
-const struct hal_ops hal_qcn9274_ops = {
- .create_srng_config = ath12k_hal_srng_create_config_qcn9274,
- .tcl_to_wbm_rbm_map = ath12k_hal_qcn9274_tcl_to_wbm_rbm_map,
- .rxdma_ring_wmask_rx_mpdu_start = ath12k_hal_qcn9274_rx_mpdu_start_wmask_get,
- .rxdma_ring_wmask_rx_msdu_end = ath12k_hal_qcn9274_rx_msdu_end_wmask_get,
- .get_hal_rx_compact_ops = ath12k_hal_qcn9274_get_hal_rx_compact_ops,
-};
-
-static bool ath12k_hw_wcn7850_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
-{
- return !!le16_get_bits(desc->u.wcn7850.msdu_end.info5,
- RX_MSDU_END_INFO5_FIRST_MSDU);
-}
-
-static bool ath12k_hw_wcn7850_rx_desc_get_last_msdu(struct hal_rx_desc *desc)
-{
- return !!le16_get_bits(desc->u.wcn7850.msdu_end.info5,
- RX_MSDU_END_INFO5_LAST_MSDU);
-}
-
-static u8 ath12k_hw_wcn7850_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
-{
- return le16_get_bits(desc->u.wcn7850.msdu_end.info5,
- RX_MSDU_END_INFO5_L3_HDR_PADDING);
-}
-
-static bool ath12k_hw_wcn7850_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
-{
- return !!le32_get_bits(desc->u.wcn7850.mpdu_start.info4,
- RX_MPDU_START_INFO4_ENCRYPT_INFO_VALID);
-}
-
-static u32 ath12k_hw_wcn7850_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
-{
- return le32_get_bits(desc->u.wcn7850.mpdu_start.info2,
- RX_MPDU_START_INFO2_ENC_TYPE);
-}
-
-static u8 ath12k_hw_wcn7850_rx_desc_get_decap_type(struct hal_rx_desc *desc)
-{
- return le32_get_bits(desc->u.wcn7850.msdu_end.info11,
- RX_MSDU_END_INFO11_DECAP_FORMAT);
-}
-
-static u8 ath12k_hw_wcn7850_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
-{
- return le32_get_bits(desc->u.wcn7850.msdu_end.info11,
- RX_MSDU_END_INFO11_MESH_CTRL_PRESENT);
-}
-
-static bool ath12k_hw_wcn7850_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
-{
- return !!le32_get_bits(desc->u.wcn7850.mpdu_start.info4,
- RX_MPDU_START_INFO4_MPDU_SEQ_CTRL_VALID);
-}
-
-static bool ath12k_hw_wcn7850_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
-{
- return !!le32_get_bits(desc->u.wcn7850.mpdu_start.info4,
- RX_MPDU_START_INFO4_MPDU_FCTRL_VALID);
-}
-
-static u16 ath12k_hw_wcn7850_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
-{
- return le32_get_bits(desc->u.wcn7850.mpdu_start.info4,
- RX_MPDU_START_INFO4_MPDU_SEQ_NUM);
-}
-
-static u16 ath12k_hw_wcn7850_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
-{
- return le32_get_bits(desc->u.wcn7850.msdu_end.info10,
- RX_MSDU_END_INFO10_MSDU_LENGTH);
-}
-
-static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
-{
- return le32_get_bits(desc->u.wcn7850.msdu_end.info12,
- RX_MSDU_END_INFO12_SGI);
-}
-
-static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
-{
- return le32_get_bits(desc->u.wcn7850.msdu_end.info12,
- RX_MSDU_END_INFO12_RATE_MCS);
-}
-
-static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
-{
- return le32_get_bits(desc->u.wcn7850.msdu_end.info12,
- RX_MSDU_END_INFO12_RECV_BW);
-}
-
-static u32 ath12k_hw_wcn7850_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
-{
- return __le32_to_cpu(desc->u.wcn7850.msdu_end.phy_meta_data);
-}
-
-static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
-{
- return le32_get_bits(desc->u.wcn7850.msdu_end.info12,
- RX_MSDU_END_INFO12_PKT_TYPE);
-}
-
-static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
+static void ath12k_hal_ce_dst_setup(struct ath12k_base *ab,
+ struct hal_srng *srng, int ring_num)
{
- return le32_get_bits(desc->u.wcn7850.msdu_end.info12,
- RX_MSDU_END_INFO12_MIMO_SS_BITMAP);
+ ab->hal.ops->ce_dst_setup(ab, srng, ring_num);
}
-static u8 ath12k_hw_wcn7850_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
+static void ath12k_hal_srng_src_hw_init(struct ath12k_base *ab,
+ struct hal_srng *srng)
{
- return le32_get_bits(desc->u.wcn7850.mpdu_start.info2,
- RX_MPDU_START_INFO2_TID);
+ ab->hal.ops->srng_src_hw_init(ab, srng);
}
-static u16 ath12k_hw_wcn7850_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
+static void ath12k_hal_srng_dst_hw_init(struct ath12k_base *ab,
+ struct hal_srng *srng)
{
- return __le16_to_cpu(desc->u.wcn7850.mpdu_start.sw_peer_id);
+ ab->hal.ops->srng_dst_hw_init(ab, srng);
}
-static void ath12k_hw_wcn7850_rx_desc_copy_end_tlv(struct hal_rx_desc *fdesc,
- struct hal_rx_desc *ldesc)
+static void ath12k_hal_set_umac_srng_ptr_addr(struct ath12k_base *ab,
+ struct hal_srng *srng)
{
- memcpy(&fdesc->u.wcn7850.msdu_end, &ldesc->u.wcn7850.msdu_end,
- sizeof(struct rx_msdu_end_qcn9274));
+ ab->hal.ops->set_umac_srng_ptr_addr(ab, srng);
}
-static u32 ath12k_hw_wcn7850_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc)
+static int ath12k_hal_srng_get_ring_id(struct ath12k_hal *hal,
+ enum hal_ring_type type,
+ int ring_num, int mac_id)
{
- return le64_get_bits(desc->u.wcn7850.mpdu_start_tag,
- HAL_TLV_HDR_TAG);
+ return hal->ops->srng_get_ring_id(hal, type, ring_num, mac_id);
}
-static u32 ath12k_hw_wcn7850_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
+int ath12k_hal_srng_update_shadow_config(struct ath12k_base *ab,
+ enum hal_ring_type ring_type,
+ int ring_num)
{
- return __le16_to_cpu(desc->u.wcn7850.mpdu_start.phy_ppdu_id);
+ return ab->hal.ops->srng_update_shadow_config(ab, ring_type,
+ ring_num);
}
-static void ath12k_hw_wcn7850_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 len)
+u32 ath12k_hal_ce_get_desc_size(struct ath12k_hal *hal, enum hal_ce_desc type)
{
- u32 info = __le32_to_cpu(desc->u.wcn7850.msdu_end.info10);
-
- info &= ~RX_MSDU_END_INFO10_MSDU_LENGTH;
- info |= u32_encode_bits(len, RX_MSDU_END_INFO10_MSDU_LENGTH);
-
- desc->u.wcn7850.msdu_end.info10 = __cpu_to_le32(info);
+ return hal->ops->ce_get_desc_size(type);
}
-static u8 *ath12k_hw_wcn7850_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
+void ath12k_hal_tx_set_dscp_tid_map(struct ath12k_base *ab, int id)
{
- return &desc->u.wcn7850.msdu_payload[0];
+ ab->hal.ops->tx_set_dscp_tid_map(ab, id);
}
-static u32 ath12k_hw_wcn7850_rx_desc_get_mpdu_start_offset(void)
+void ath12k_hal_tx_configure_bank_register(struct ath12k_base *ab,
+ u32 bank_config, u8 bank_id)
{
- return offsetof(struct hal_rx_desc_wcn7850, mpdu_start_tag);
+ ab->hal.ops->tx_configure_bank_register(ab, bank_config, bank_id);
}
-static u32 ath12k_hw_wcn7850_rx_desc_get_msdu_end_offset(void)
+void ath12k_hal_reoq_lut_addr_read_enable(struct ath12k_base *ab)
{
- return offsetof(struct hal_rx_desc_wcn7850, msdu_end_tag);
+ ab->hal.ops->reoq_lut_addr_read_enable(ab);
}
-static bool ath12k_hw_wcn7850_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
+void ath12k_hal_reoq_lut_set_max_peerid(struct ath12k_base *ab)
{
- return __le32_to_cpu(desc->u.wcn7850.mpdu_start.info4) &
- RX_MPDU_START_INFO4_MAC_ADDR2_VALID;
+ ab->hal.ops->reoq_lut_set_max_peerid(ab);
}
-static u8 *ath12k_hw_wcn7850_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
+void ath12k_hal_write_ml_reoq_lut_addr(struct ath12k_base *ab, dma_addr_t paddr)
{
- return desc->u.wcn7850.mpdu_start.addr2;
+ ab->hal.ops->write_ml_reoq_lut_addr(ab, paddr);
}
-static bool ath12k_hw_wcn7850_rx_desc_is_da_mcbc(struct hal_rx_desc *desc)
+void ath12k_hal_write_reoq_lut_addr(struct ath12k_base *ab, dma_addr_t paddr)
{
- return __le32_to_cpu(desc->u.wcn7850.msdu_end.info13) &
- RX_MSDU_END_INFO13_MCAST_BCAST;
+ ab->hal.ops->write_reoq_lut_addr(ab, paddr);
}
-static void ath12k_hw_wcn7850_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc,
- struct ieee80211_hdr *hdr)
+void ath12k_hal_setup_link_idle_list(struct ath12k_base *ab,
+ struct hal_wbm_idle_scatter_list *sbuf,
+ u32 nsbufs, u32 tot_link_desc,
+ u32 end_offset)
{
- hdr->frame_control = desc->u.wcn7850.mpdu_start.frame_ctrl;
- hdr->duration_id = desc->u.wcn7850.mpdu_start.duration;
- ether_addr_copy(hdr->addr1, desc->u.wcn7850.mpdu_start.addr1);
- ether_addr_copy(hdr->addr2, desc->u.wcn7850.mpdu_start.addr2);
- ether_addr_copy(hdr->addr3, desc->u.wcn7850.mpdu_start.addr3);
- if (__le32_to_cpu(desc->u.wcn7850.mpdu_start.info4) &
- RX_MPDU_START_INFO4_MAC_ADDR4_VALID) {
- ether_addr_copy(hdr->addr4, desc->u.wcn7850.mpdu_start.addr4);
- }
- hdr->seq_ctrl = desc->u.wcn7850.mpdu_start.seq_ctrl;
+ ab->hal.ops->setup_link_idle_list(ab, sbuf, nsbufs, tot_link_desc,
+ end_offset);
}
-static void ath12k_hw_wcn7850_rx_desc_get_crypto_hdr(struct hal_rx_desc *desc,
- u8 *crypto_hdr,
- enum hal_encrypt_type enctype)
+void ath12k_hal_reo_hw_setup(struct ath12k_base *ab, u32 ring_hash_map)
{
- unsigned int key_id;
-
- switch (enctype) {
- case HAL_ENCRYPT_TYPE_OPEN:
- return;
- case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
- case HAL_ENCRYPT_TYPE_TKIP_MIC:
- crypto_hdr[0] =
- HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.wcn7850.mpdu_start.pn[0]);
- crypto_hdr[1] = 0;
- crypto_hdr[2] =
- HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.wcn7850.mpdu_start.pn[0]);
- break;
- case HAL_ENCRYPT_TYPE_CCMP_128:
- case HAL_ENCRYPT_TYPE_CCMP_256:
- case HAL_ENCRYPT_TYPE_GCMP_128:
- case HAL_ENCRYPT_TYPE_AES_GCMP_256:
- crypto_hdr[0] =
- HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.wcn7850.mpdu_start.pn[0]);
- crypto_hdr[1] =
- HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.wcn7850.mpdu_start.pn[0]);
- crypto_hdr[2] = 0;
- break;
- case HAL_ENCRYPT_TYPE_WEP_40:
- case HAL_ENCRYPT_TYPE_WEP_104:
- case HAL_ENCRYPT_TYPE_WEP_128:
- case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
- case HAL_ENCRYPT_TYPE_WAPI:
- return;
- }
- key_id = u32_get_bits(__le32_to_cpu(desc->u.wcn7850.mpdu_start.info5),
- RX_MPDU_START_INFO5_KEY_ID);
- crypto_hdr[3] = 0x20 | (key_id << 6);
- crypto_hdr[4] = HAL_RX_MPDU_INFO_PN_GET_BYTE3(desc->u.wcn7850.mpdu_start.pn[0]);
- crypto_hdr[5] = HAL_RX_MPDU_INFO_PN_GET_BYTE4(desc->u.wcn7850.mpdu_start.pn[0]);
- crypto_hdr[6] = HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.wcn7850.mpdu_start.pn[1]);
- crypto_hdr[7] = HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.wcn7850.mpdu_start.pn[1]);
+ ab->hal.ops->reo_hw_setup(ab, ring_hash_map);
}
-static int ath12k_hal_srng_create_config_wcn7850(struct ath12k_base *ab)
+void ath12k_hal_reo_init_cmd_ring(struct ath12k_base *ab, struct hal_srng *srng)
{
- struct ath12k_hal *hal = &ab->hal;
- struct hal_srng_config *s;
-
- hal->srng_config = kmemdup(hw_srng_config_template,
- sizeof(hw_srng_config_template),
- GFP_KERNEL);
- if (!hal->srng_config)
- return -ENOMEM;
-
- s = &hal->srng_config[HAL_REO_DST];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB(ab);
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP;
- s->reg_size[0] = HAL_REO2_RING_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
- s->reg_size[1] = HAL_REO2_RING_HP - HAL_REO1_RING_HP;
-
- s = &hal->srng_config[HAL_REO_EXCEPTION];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_SW0_RING_BASE_LSB(ab);
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_SW0_RING_HP;
-
- s = &hal->srng_config[HAL_REO_REINJECT];
- s->max_rings = 1;
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB(ab);
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP;
-
- s = &hal->srng_config[HAL_REO_CMD];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB(ab);
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP;
-
- s = &hal->srng_config[HAL_REO_STATUS];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(ab);
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP;
-
- s = &hal->srng_config[HAL_TCL_DATA];
- s->max_rings = 5;
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB(ab);
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP;
- s->reg_size[0] = HAL_TCL2_RING_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab);
- s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP;
-
- s = &hal->srng_config[HAL_TCL_CMD];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB(ab);
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP;
-
- s = &hal->srng_config[HAL_TCL_STATUS];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(ab);
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP;
-
- s = &hal->srng_config[HAL_CE_SRC];
- s->max_rings = 12;
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_BASE_LSB;
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_HP;
- s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
- HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
- s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
- HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
-
- s = &hal->srng_config[HAL_CE_DST];
- s->max_rings = 12;
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_BASE_LSB;
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_HP;
- s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
- HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
- s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
- HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
-
- s = &hal->srng_config[HAL_CE_DST_STATUS];
- s->max_rings = 12;
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) +
- HAL_CE_DST_STATUS_RING_BASE_LSB;
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_HP;
- s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
- HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
- s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
- HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
-
- s = &hal->srng_config[HAL_WBM_IDLE_LINK];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab);
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP;
-
- s = &hal->srng_config[HAL_SW2WBM_RELEASE];
- s->max_rings = 1;
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG +
- HAL_WBM_SW_RELEASE_RING_BASE_LSB(ab);
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SW_RELEASE_RING_HP;
-
- s = &hal->srng_config[HAL_WBM2SW_RELEASE];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_BASE_LSB(ab);
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP;
- s->reg_size[0] = HAL_WBM1_RELEASE_RING_BASE_LSB(ab) -
- HAL_WBM0_RELEASE_RING_BASE_LSB(ab);
- s->reg_size[1] = HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP;
-
- s = &hal->srng_config[HAL_RXDMA_BUF];
- s->max_rings = 2;
- s->mac_type = ATH12K_HAL_SRNG_PMAC;
-
- s = &hal->srng_config[HAL_RXDMA_DST];
- s->max_rings = 1;
- s->entry_size = sizeof(struct hal_reo_entrance_ring) >> 2;
-
- /* below rings are not used */
- s = &hal->srng_config[HAL_RXDMA_DIR_BUF];
- s->max_rings = 0;
-
- s = &hal->srng_config[HAL_PPE2TCL];
- s->max_rings = 0;
-
- s = &hal->srng_config[HAL_PPE_RELEASE];
- s->max_rings = 0;
-
- s = &hal->srng_config[HAL_TX_MONITOR_BUF];
- s->max_rings = 0;
-
- s = &hal->srng_config[HAL_TX_MONITOR_DST];
- s->max_rings = 0;
-
- s = &hal->srng_config[HAL_PPE2TCL];
- s->max_rings = 0;
-
- return 0;
+ ab->hal.ops->reo_init_cmd_ring(ab, srng);
}
-static bool ath12k_hw_wcn7850_dp_rx_h_msdu_done(struct hal_rx_desc *desc)
+void ath12k_hal_reo_shared_qaddr_cache_clear(struct ath12k_base *ab)
{
- return !!le32_get_bits(desc->u.wcn7850.msdu_end.info14,
- RX_MSDU_END_INFO14_MSDU_DONE);
+ ab->hal.ops->reo_shared_qaddr_cache_clear(ab);
}
+EXPORT_SYMBOL(ath12k_hal_reo_shared_qaddr_cache_clear);
-static bool ath12k_hw_wcn7850_dp_rx_h_l4_cksum_fail(struct hal_rx_desc *desc)
+void ath12k_hal_rx_buf_addr_info_set(struct ath12k_hal *hal,
+ struct ath12k_buffer_addr *binfo,
+ dma_addr_t paddr, u32 cookie, u8 manager)
{
- return !!le32_get_bits(desc->u.wcn7850.msdu_end.info13,
- RX_MSDU_END_INFO13_TCP_UDP_CKSUM_FAIL);
+ hal->ops->rx_buf_addr_info_set(binfo, paddr, cookie, manager);
}
-static bool ath12k_hw_wcn7850_dp_rx_h_ip_cksum_fail(struct hal_rx_desc *desc)
+void ath12k_hal_rx_buf_addr_info_get(struct ath12k_hal *hal,
+ struct ath12k_buffer_addr *binfo,
+ dma_addr_t *paddr, u32 *msdu_cookies,
+ u8 *rbm)
{
- return !!le32_get_bits(desc->u.wcn7850.msdu_end.info13,
- RX_MSDU_END_INFO13_IP_CKSUM_FAIL);
+ hal->ops->rx_buf_addr_info_get(binfo, paddr, msdu_cookies, rbm);
}
-static bool ath12k_hw_wcn7850_dp_rx_h_is_decrypted(struct hal_rx_desc *desc)
+void ath12k_hal_rx_msdu_list_get(struct ath12k_hal *hal, struct ath12k *ar,
+ void *link_desc,
+ void *msdu_list,
+ u16 *num_msdus)
{
- return (le32_get_bits(desc->u.wcn7850.msdu_end.info14,
- RX_MSDU_END_INFO14_DECRYPT_STATUS_CODE) ==
- RX_DESC_DECRYPT_STATUS_CODE_OK);
+ hal->ops->rx_msdu_list_get(ar, link_desc, msdu_list, num_msdus);
}
-static u32 ath12k_hw_wcn7850_dp_rx_h_mpdu_err(struct hal_rx_desc *desc)
+void ath12k_hal_rx_reo_ent_buf_paddr_get(struct ath12k_hal *hal, void *rx_desc,
+ dma_addr_t *paddr,
+ u32 *sw_cookie,
+ struct ath12k_buffer_addr **pp_buf_addr,
+ u8 *rbm, u32 *msdu_cnt)
{
- u32 info = __le32_to_cpu(desc->u.wcn7850.msdu_end.info13);
- u32 errmap = 0;
-
- if (info & RX_MSDU_END_INFO13_FCS_ERR)
- errmap |= HAL_RX_MPDU_ERR_FCS;
-
- if (info & RX_MSDU_END_INFO13_DECRYPT_ERR)
- errmap |= HAL_RX_MPDU_ERR_DECRYPT;
-
- if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR)
- errmap |= HAL_RX_MPDU_ERR_TKIP_MIC;
-
- if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR)
- errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR;
-
- if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR)
- errmap |= HAL_RX_MPDU_ERR_OVERFLOW;
-
- if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR)
- errmap |= HAL_RX_MPDU_ERR_MSDU_LEN;
-
- if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR)
- errmap |= HAL_RX_MPDU_ERR_MPDU_LEN;
-
- return errmap;
+ hal->ops->rx_reo_ent_buf_paddr_get(rx_desc, paddr, sw_cookie,
+ pp_buf_addr, rbm, msdu_cnt);
}
-static u32 ath12k_hw_wcn7850_get_rx_desc_size(void)
+void ath12k_hal_cc_config(struct ath12k_base *ab)
{
- return sizeof(struct hal_rx_desc_wcn7850);
+ ab->hal.ops->cc_config(ab);
}
-static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_src_link(struct hal_rx_desc *desc)
+enum hal_rx_buf_return_buf_manager
+ath12k_hal_get_idle_link_rbm(struct ath12k_hal *hal, u8 device_id)
{
- return 0;
+ return hal->ops->get_idle_link_rbm(hal, device_id);
}
-const struct hal_rx_ops hal_rx_wcn7850_ops = {
- .rx_desc_get_first_msdu = ath12k_hw_wcn7850_rx_desc_get_first_msdu,
- .rx_desc_get_last_msdu = ath12k_hw_wcn7850_rx_desc_get_last_msdu,
- .rx_desc_get_l3_pad_bytes = ath12k_hw_wcn7850_rx_desc_get_l3_pad_bytes,
- .rx_desc_encrypt_valid = ath12k_hw_wcn7850_rx_desc_encrypt_valid,
- .rx_desc_get_encrypt_type = ath12k_hw_wcn7850_rx_desc_get_encrypt_type,
- .rx_desc_get_decap_type = ath12k_hw_wcn7850_rx_desc_get_decap_type,
- .rx_desc_get_mesh_ctl = ath12k_hw_wcn7850_rx_desc_get_mesh_ctl,
- .rx_desc_get_mpdu_seq_ctl_vld = ath12k_hw_wcn7850_rx_desc_get_mpdu_seq_ctl_vld,
- .rx_desc_get_mpdu_fc_valid = ath12k_hw_wcn7850_rx_desc_get_mpdu_fc_valid,
- .rx_desc_get_mpdu_start_seq_no = ath12k_hw_wcn7850_rx_desc_get_mpdu_start_seq_no,
- .rx_desc_get_msdu_len = ath12k_hw_wcn7850_rx_desc_get_msdu_len,
- .rx_desc_get_msdu_sgi = ath12k_hw_wcn7850_rx_desc_get_msdu_sgi,
- .rx_desc_get_msdu_rate_mcs = ath12k_hw_wcn7850_rx_desc_get_msdu_rate_mcs,
- .rx_desc_get_msdu_rx_bw = ath12k_hw_wcn7850_rx_desc_get_msdu_rx_bw,
- .rx_desc_get_msdu_freq = ath12k_hw_wcn7850_rx_desc_get_msdu_freq,
- .rx_desc_get_msdu_pkt_type = ath12k_hw_wcn7850_rx_desc_get_msdu_pkt_type,
- .rx_desc_get_msdu_nss = ath12k_hw_wcn7850_rx_desc_get_msdu_nss,
- .rx_desc_get_mpdu_tid = ath12k_hw_wcn7850_rx_desc_get_mpdu_tid,
- .rx_desc_get_mpdu_peer_id = ath12k_hw_wcn7850_rx_desc_get_mpdu_peer_id,
- .rx_desc_copy_end_tlv = ath12k_hw_wcn7850_rx_desc_copy_end_tlv,
- .rx_desc_get_mpdu_start_tag = ath12k_hw_wcn7850_rx_desc_get_mpdu_start_tag,
- .rx_desc_get_mpdu_ppdu_id = ath12k_hw_wcn7850_rx_desc_get_mpdu_ppdu_id,
- .rx_desc_set_msdu_len = ath12k_hw_wcn7850_rx_desc_set_msdu_len,
- .rx_desc_get_msdu_payload = ath12k_hw_wcn7850_rx_desc_get_msdu_payload,
- .rx_desc_get_mpdu_start_offset = ath12k_hw_wcn7850_rx_desc_get_mpdu_start_offset,
- .rx_desc_get_msdu_end_offset = ath12k_hw_wcn7850_rx_desc_get_msdu_end_offset,
- .rx_desc_mac_addr2_valid = ath12k_hw_wcn7850_rx_desc_mac_addr2_valid,
- .rx_desc_mpdu_start_addr2 = ath12k_hw_wcn7850_rx_desc_mpdu_start_addr2,
- .rx_desc_is_da_mcbc = ath12k_hw_wcn7850_rx_desc_is_da_mcbc,
- .rx_desc_get_dot11_hdr = ath12k_hw_wcn7850_rx_desc_get_dot11_hdr,
- .rx_desc_get_crypto_header = ath12k_hw_wcn7850_rx_desc_get_crypto_hdr,
- .dp_rx_h_msdu_done = ath12k_hw_wcn7850_dp_rx_h_msdu_done,
- .dp_rx_h_l4_cksum_fail = ath12k_hw_wcn7850_dp_rx_h_l4_cksum_fail,
- .dp_rx_h_ip_cksum_fail = ath12k_hw_wcn7850_dp_rx_h_ip_cksum_fail,
- .dp_rx_h_is_decrypted = ath12k_hw_wcn7850_dp_rx_h_is_decrypted,
- .dp_rx_h_mpdu_err = ath12k_hw_wcn7850_dp_rx_h_mpdu_err,
- .rx_desc_get_desc_size = ath12k_hw_wcn7850_get_rx_desc_size,
- .rx_desc_get_msdu_src_link_id = ath12k_hw_wcn7850_rx_desc_get_msdu_src_link,
-};
-
-const struct hal_ops hal_wcn7850_ops = {
- .create_srng_config = ath12k_hal_srng_create_config_wcn7850,
- .tcl_to_wbm_rbm_map = ath12k_hal_wcn7850_tcl_to_wbm_rbm_map,
- .rxdma_ring_wmask_rx_mpdu_start = NULL,
- .rxdma_ring_wmask_rx_msdu_end = NULL,
- .get_hal_rx_compact_ops = NULL,
-};
-
-static int ath12k_hal_alloc_cont_rdp(struct ath12k_base *ab)
+static int ath12k_hal_alloc_cont_rdp(struct ath12k_hal *hal)
{
- struct ath12k_hal *hal = &ab->hal;
size_t size;
size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
- hal->rdp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->rdp.paddr,
+ hal->rdp.vaddr = dma_alloc_coherent(hal->dev, size, &hal->rdp.paddr,
GFP_KERNEL);
if (!hal->rdp.vaddr)
return -ENOMEM;
@@ -1575,27 +164,25 @@ static int ath12k_hal_alloc_cont_rdp(struct ath12k_base *ab)
return 0;
}
-static void ath12k_hal_free_cont_rdp(struct ath12k_base *ab)
+static void ath12k_hal_free_cont_rdp(struct ath12k_hal *hal)
{
- struct ath12k_hal *hal = &ab->hal;
size_t size;
if (!hal->rdp.vaddr)
return;
size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
- dma_free_coherent(ab->dev, size,
+ dma_free_coherent(hal->dev, size,
hal->rdp.vaddr, hal->rdp.paddr);
hal->rdp.vaddr = NULL;
}
-static int ath12k_hal_alloc_cont_wrp(struct ath12k_base *ab)
+static int ath12k_hal_alloc_cont_wrp(struct ath12k_hal *hal)
{
- struct ath12k_hal *hal = &ab->hal;
size_t size;
size = sizeof(u32) * (HAL_SRNG_NUM_PMAC_RINGS + HAL_SRNG_NUM_DMAC_RINGS);
- hal->wrp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->wrp.paddr,
+ hal->wrp.vaddr = dma_alloc_coherent(hal->dev, size, &hal->wrp.paddr,
GFP_KERNEL);
if (!hal->wrp.vaddr)
return -ENOMEM;
@@ -1603,209 +190,19 @@ static int ath12k_hal_alloc_cont_wrp(struct ath12k_base *ab)
return 0;
}
-static void ath12k_hal_free_cont_wrp(struct ath12k_base *ab)
+static void ath12k_hal_free_cont_wrp(struct ath12k_hal *hal)
{
- struct ath12k_hal *hal = &ab->hal;
size_t size;
if (!hal->wrp.vaddr)
return;
size = sizeof(u32) * (HAL_SRNG_NUM_PMAC_RINGS + HAL_SRNG_NUM_DMAC_RINGS);
- dma_free_coherent(ab->dev, size,
+ dma_free_coherent(hal->dev, size,
hal->wrp.vaddr, hal->wrp.paddr);
hal->wrp.vaddr = NULL;
}
-static void ath12k_hal_ce_dst_setup(struct ath12k_base *ab,
- struct hal_srng *srng, int ring_num)
-{
- struct hal_srng_config *srng_config = &ab->hal.srng_config[HAL_CE_DST];
- u32 addr;
- u32 val;
-
- addr = HAL_CE_DST_RING_CTRL +
- srng_config->reg_start[HAL_SRNG_REG_GRP_R0] +
- ring_num * srng_config->reg_size[HAL_SRNG_REG_GRP_R0];
-
- val = ath12k_hif_read32(ab, addr);
- val &= ~HAL_CE_DST_R0_DEST_CTRL_MAX_LEN;
- val |= u32_encode_bits(srng->u.dst_ring.max_buffer_length,
- HAL_CE_DST_R0_DEST_CTRL_MAX_LEN);
- ath12k_hif_write32(ab, addr, val);
-}
-
-static void ath12k_hal_srng_dst_hw_init(struct ath12k_base *ab,
- struct hal_srng *srng)
-{
- struct ath12k_hal *hal = &ab->hal;
- u32 val;
- u64 hp_addr;
- u32 reg_base;
-
- reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
-
- if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
- ath12k_hif_write32(ab, reg_base +
- ath12k_hal_reo1_ring_msi1_base_lsb_offset(ab),
- srng->msi_addr);
-
- val = u32_encode_bits(((u64)srng->msi_addr >> HAL_ADDR_MSB_REG_SHIFT),
- HAL_REO1_RING_MSI1_BASE_MSB_ADDR) |
- HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
- ath12k_hif_write32(ab, reg_base +
- ath12k_hal_reo1_ring_msi1_base_msb_offset(ab), val);
-
- ath12k_hif_write32(ab,
- reg_base + ath12k_hal_reo1_ring_msi1_data_offset(ab),
- srng->msi_data);
- }
-
- ath12k_hif_write32(ab, reg_base, srng->ring_base_paddr);
-
- val = u32_encode_bits(((u64)srng->ring_base_paddr >> HAL_ADDR_MSB_REG_SHIFT),
- HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB) |
- u32_encode_bits((srng->entry_size * srng->num_entries),
- HAL_REO1_RING_BASE_MSB_RING_SIZE);
- ath12k_hif_write32(ab, reg_base + ath12k_hal_reo1_ring_base_msb_offset(ab), val);
-
- val = u32_encode_bits(srng->ring_id, HAL_REO1_RING_ID_RING_ID) |
- u32_encode_bits(srng->entry_size, HAL_REO1_RING_ID_ENTRY_SIZE);
- ath12k_hif_write32(ab, reg_base + ath12k_hal_reo1_ring_id_offset(ab), val);
-
- /* interrupt setup */
- val = u32_encode_bits((srng->intr_timer_thres_us >> 3),
- HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD);
-
- val |= u32_encode_bits((srng->intr_batch_cntr_thres_entries * srng->entry_size),
- HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD);
-
- ath12k_hif_write32(ab,
- reg_base + ath12k_hal_reo1_ring_producer_int_setup_offset(ab),
- val);
-
- hp_addr = hal->rdp.paddr +
- ((unsigned long)srng->u.dst_ring.hp_addr -
- (unsigned long)hal->rdp.vaddr);
- ath12k_hif_write32(ab, reg_base + ath12k_hal_reo1_ring_hp_addr_lsb_offset(ab),
- hp_addr & HAL_ADDR_LSB_REG_MASK);
- ath12k_hif_write32(ab, reg_base + ath12k_hal_reo1_ring_hp_addr_msb_offset(ab),
- hp_addr >> HAL_ADDR_MSB_REG_SHIFT);
-
- /* Initialize head and tail pointers to indicate ring is empty */
- reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
- ath12k_hif_write32(ab, reg_base, 0);
- ath12k_hif_write32(ab, reg_base + HAL_REO1_RING_TP_OFFSET, 0);
- *srng->u.dst_ring.hp_addr = 0;
-
- reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
- val = 0;
- if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
- val |= HAL_REO1_RING_MISC_DATA_TLV_SWAP;
- if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
- val |= HAL_REO1_RING_MISC_HOST_FW_SWAP;
- if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
- val |= HAL_REO1_RING_MISC_MSI_SWAP;
- val |= HAL_REO1_RING_MISC_SRNG_ENABLE;
-
- ath12k_hif_write32(ab, reg_base + ath12k_hal_reo1_ring_misc_offset(ab), val);
-}
-
-static void ath12k_hal_srng_src_hw_init(struct ath12k_base *ab,
- struct hal_srng *srng)
-{
- struct ath12k_hal *hal = &ab->hal;
- u32 val;
- u64 tp_addr;
- u32 reg_base;
-
- reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
-
- if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
- ath12k_hif_write32(ab, reg_base +
- HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab),
- srng->msi_addr);
-
- val = u32_encode_bits(((u64)srng->msi_addr >> HAL_ADDR_MSB_REG_SHIFT),
- HAL_TCL1_RING_MSI1_BASE_MSB_ADDR) |
- HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
- ath12k_hif_write32(ab, reg_base +
- HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(ab),
- val);
-
- ath12k_hif_write32(ab, reg_base +
- HAL_TCL1_RING_MSI1_DATA_OFFSET(ab),
- srng->msi_data);
- }
-
- ath12k_hif_write32(ab, reg_base, srng->ring_base_paddr);
-
- val = u32_encode_bits(((u64)srng->ring_base_paddr >> HAL_ADDR_MSB_REG_SHIFT),
- HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB) |
- u32_encode_bits((srng->entry_size * srng->num_entries),
- HAL_TCL1_RING_BASE_MSB_RING_SIZE);
- ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(ab), val);
-
- val = u32_encode_bits(srng->entry_size, HAL_REO1_RING_ID_ENTRY_SIZE);
- ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET(ab), val);
-
- val = u32_encode_bits(srng->intr_timer_thres_us,
- HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD);
-
- val |= u32_encode_bits((srng->intr_batch_cntr_thres_entries * srng->entry_size),
- HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD);
-
- ath12k_hif_write32(ab,
- reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(ab),
- val);
-
- val = 0;
- if (srng->flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
- val |= u32_encode_bits(srng->u.src_ring.low_threshold,
- HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD);
- }
- ath12k_hif_write32(ab,
- reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(ab),
- val);
-
- if (srng->ring_id != HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
- tp_addr = hal->rdp.paddr +
- ((unsigned long)srng->u.src_ring.tp_addr -
- (unsigned long)hal->rdp.vaddr);
- ath12k_hif_write32(ab,
- reg_base + HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(ab),
- tp_addr & HAL_ADDR_LSB_REG_MASK);
- ath12k_hif_write32(ab,
- reg_base + HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(ab),
- tp_addr >> HAL_ADDR_MSB_REG_SHIFT);
- }
-
- /* Initialize head and tail pointers to indicate ring is empty */
- reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
- ath12k_hif_write32(ab, reg_base, 0);
- ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_TP_OFFSET, 0);
- *srng->u.src_ring.tp_addr = 0;
-
- reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
- val = 0;
- if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
- val |= HAL_TCL1_RING_MISC_DATA_TLV_SWAP;
- if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
- val |= HAL_TCL1_RING_MISC_HOST_FW_SWAP;
- if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
- val |= HAL_TCL1_RING_MISC_MSI_SWAP;
-
- /* Loop count is not used for SRC rings */
- val |= HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE;
-
- val |= HAL_TCL1_RING_MISC_SRNG_ENABLE;
-
- if (srng->ring_id == HAL_SRNG_RING_ID_WBM_IDLE_LINK)
- val |= HAL_TCL1_RING_MISC_MSI_RING_ID_DISABLE;
-
- ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_MISC_OFFSET(ab), val);
-}
-
static void ath12k_hal_srng_hw_init(struct ath12k_base *ab,
struct hal_srng *srng)
{
@@ -1815,28 +212,6 @@ static void ath12k_hal_srng_hw_init(struct ath12k_base *ab,
ath12k_hal_srng_dst_hw_init(ab, srng);
}
-static int ath12k_hal_srng_get_ring_id(struct ath12k_base *ab,
- enum hal_ring_type type,
- int ring_num, int mac_id)
-{
- struct hal_srng_config *srng_config = &ab->hal.srng_config[type];
- int ring_id;
-
- if (ring_num >= srng_config->max_rings) {
- ath12k_warn(ab, "invalid ring number :%d\n", ring_num);
- return -EINVAL;
- }
-
- ring_id = srng_config->start_ring_id + ring_num;
- if (srng_config->mac_type == ATH12K_HAL_SRNG_PMAC)
- ring_id += mac_id * HAL_SRNG_RINGS_PER_PMAC;
-
- if (WARN_ON(ring_id >= HAL_SRNG_RING_ID_MAX))
- return -EINVAL;
-
- return ring_id;
-}
-
int ath12k_hal_srng_get_entrysize(struct ath12k_base *ab, u32 ring_type)
{
struct hal_srng_config *srng_config;
@@ -1848,6 +223,7 @@ int ath12k_hal_srng_get_entrysize(struct ath12k_base *ab, u32 ring_type)
return (srng_config->entry_size << 2);
}
+EXPORT_SYMBOL(ath12k_hal_srng_get_entrysize);
int ath12k_hal_srng_get_max_entries(struct ath12k_base *ab, u32 ring_type)
{
@@ -1877,6 +253,7 @@ void ath12k_hal_srng_get_params(struct ath12k_base *ab, struct hal_srng *srng,
params->msi2_data = srng->msi2_data;
params->flags = srng->flags;
}
+EXPORT_SYMBOL(ath12k_hal_srng_get_params);
dma_addr_t ath12k_hal_srng_get_hp_addr(struct ath12k_base *ab,
struct hal_srng *srng)
@@ -1910,63 +287,32 @@ dma_addr_t ath12k_hal_srng_get_tp_addr(struct ath12k_base *ab,
(unsigned long)ab->hal.wrp.vaddr);
}
-u32 ath12k_hal_ce_get_desc_size(enum hal_ce_desc type)
-{
- switch (type) {
- case HAL_CE_DESC_SRC:
- return sizeof(struct hal_ce_srng_src_desc);
- case HAL_CE_DESC_DST:
- return sizeof(struct hal_ce_srng_dest_desc);
- case HAL_CE_DESC_DST_STATUS:
- return sizeof(struct hal_ce_srng_dst_status_desc);
- }
-
- return 0;
-}
-
-void ath12k_hal_ce_src_set_desc(struct hal_ce_srng_src_desc *desc, dma_addr_t paddr,
- u32 len, u32 id, u8 byte_swap_data)
+void ath12k_hal_ce_src_set_desc(struct ath12k_hal *hal,
+ struct hal_ce_srng_src_desc *desc,
+ dma_addr_t paddr, u32 len, u32 id,
+ u8 byte_swap_data)
{
- desc->buffer_addr_low = cpu_to_le32(paddr & HAL_ADDR_LSB_REG_MASK);
- desc->buffer_addr_info =
- le32_encode_bits(((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT),
- HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI) |
- le32_encode_bits(byte_swap_data,
- HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP) |
- le32_encode_bits(0, HAL_CE_SRC_DESC_ADDR_INFO_GATHER) |
- le32_encode_bits(len, HAL_CE_SRC_DESC_ADDR_INFO_LEN);
- desc->meta_info = le32_encode_bits(id, HAL_CE_SRC_DESC_META_INFO_DATA);
+ hal->ops->ce_src_set_desc(desc, paddr, len, id, byte_swap_data);
}
-void ath12k_hal_ce_dst_set_desc(struct hal_ce_srng_dest_desc *desc, dma_addr_t paddr)
+void ath12k_hal_ce_dst_set_desc(struct ath12k_hal *hal,
+ struct hal_ce_srng_dest_desc *desc,
+ dma_addr_t paddr)
{
- desc->buffer_addr_low = cpu_to_le32(paddr & HAL_ADDR_LSB_REG_MASK);
- desc->buffer_addr_info =
- le32_encode_bits(((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT),
- HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI);
+ hal->ops->ce_dst_set_desc(desc, paddr);
}
-u32 ath12k_hal_ce_dst_status_get_length(struct hal_ce_srng_dst_status_desc *desc)
+u32 ath12k_hal_ce_dst_status_get_length(struct ath12k_hal *hal,
+ struct hal_ce_srng_dst_status_desc *desc)
{
- u32 len;
-
- len = le32_get_bits(desc->flags, HAL_CE_DST_STATUS_DESC_FLAGS_LEN);
- desc->flags &= ~cpu_to_le32(HAL_CE_DST_STATUS_DESC_FLAGS_LEN);
-
- return len;
+ return hal->ops->ce_dst_status_get_length(desc);
}
-void ath12k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie,
- dma_addr_t paddr,
- enum hal_rx_buf_return_buf_manager rbm)
+void ath12k_hal_set_link_desc_addr(struct ath12k_hal *hal,
+ struct hal_wbm_link_desc *desc, u32 cookie,
+ dma_addr_t paddr, int rbm)
{
- desc->buf_addr_info.info0 = le32_encode_bits((paddr & HAL_ADDR_LSB_REG_MASK),
- BUFFER_ADDR_INFO0_ADDR);
- desc->buf_addr_info.info1 =
- le32_encode_bits(((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT),
- BUFFER_ADDR_INFO1_ADDR) |
- le32_encode_bits(rbm, BUFFER_ADDR_INFO1_RET_BUF_MGR) |
- le32_encode_bits(cookie, BUFFER_ADDR_INFO1_SW_COOKIE);
+ hal->ops->set_link_desc_addr(desc, cookie, paddr, rbm);
}
void *ath12k_hal_srng_dst_peek(struct ath12k_base *ab, struct hal_srng *srng)
@@ -1978,6 +324,7 @@ void *ath12k_hal_srng_dst_peek(struct ath12k_base *ab, struct hal_srng *srng)
return NULL;
}
+EXPORT_SYMBOL(ath12k_hal_srng_dst_peek);
void *ath12k_hal_srng_dst_get_next_entry(struct ath12k_base *ab,
struct hal_srng *srng)
@@ -1996,6 +343,7 @@ void *ath12k_hal_srng_dst_get_next_entry(struct ath12k_base *ab,
return desc;
}
+EXPORT_SYMBOL(ath12k_hal_srng_dst_get_next_entry);
int ath12k_hal_srng_dst_num_free(struct ath12k_base *ab, struct hal_srng *srng,
bool sync_hw_ptr)
@@ -2018,6 +366,7 @@ int ath12k_hal_srng_dst_num_free(struct ath12k_base *ab, struct hal_srng *srng,
else
return (srng->ring_size - tp + hp) / srng->entry_size;
}
+EXPORT_SYMBOL(ath12k_hal_srng_dst_num_free);
/* Returns number of available entries in src ring */
int ath12k_hal_srng_src_num_free(struct ath12k_base *ab, struct hal_srng *srng,
@@ -2059,6 +408,7 @@ void *ath12k_hal_srng_src_next_peek(struct ath12k_base *ab,
return desc;
}
+EXPORT_SYMBOL(ath12k_hal_srng_src_next_peek);
void *ath12k_hal_srng_src_get_next_entry(struct ath12k_base *ab,
struct hal_srng *srng)
@@ -2092,6 +442,7 @@ void *ath12k_hal_srng_src_get_next_entry(struct ath12k_base *ab,
return desc;
}
+EXPORT_SYMBOL(ath12k_hal_srng_src_get_next_entry);
void *ath12k_hal_srng_src_peek(struct ath12k_base *ab, struct hal_srng *srng)
{
@@ -2103,6 +454,7 @@ void *ath12k_hal_srng_src_peek(struct ath12k_base *ab, struct hal_srng *srng)
return srng->ring_base_vaddr + srng->u.src_ring.hp;
}
+EXPORT_SYMBOL(ath12k_hal_srng_src_peek);
void *ath12k_hal_srng_src_reap_next(struct ath12k_base *ab,
struct hal_srng *srng)
@@ -2162,6 +514,7 @@ void ath12k_hal_srng_access_begin(struct ath12k_base *ab, struct hal_srng *srng)
}
}
}
+EXPORT_SYMBOL(ath12k_hal_srng_access_begin);
/* Update cached ring head/tail pointers to HW. ath12k_hal_srng_access_begin()
* should have been called before this.
@@ -2217,112 +570,7 @@ void ath12k_hal_srng_access_end(struct ath12k_base *ab, struct hal_srng *srng)
srng->timestamp = jiffies;
}
-
-void ath12k_hal_setup_link_idle_list(struct ath12k_base *ab,
- struct hal_wbm_idle_scatter_list *sbuf,
- u32 nsbufs, u32 tot_link_desc,
- u32 end_offset)
-{
- struct ath12k_buffer_addr *link_addr;
- int i;
- u32 reg_scatter_buf_sz = HAL_WBM_IDLE_SCATTER_BUF_SIZE / 64;
- u32 val;
-
- link_addr = (void *)sbuf[0].vaddr + HAL_WBM_IDLE_SCATTER_BUF_SIZE;
-
- for (i = 1; i < nsbufs; i++) {
- link_addr->info0 = cpu_to_le32(sbuf[i].paddr & HAL_ADDR_LSB_REG_MASK);
-
- link_addr->info1 =
- le32_encode_bits((u64)sbuf[i].paddr >> HAL_ADDR_MSB_REG_SHIFT,
- HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32) |
- le32_encode_bits(BASE_ADDR_MATCH_TAG_VAL,
- HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG);
-
- link_addr = (void *)sbuf[i].vaddr +
- HAL_WBM_IDLE_SCATTER_BUF_SIZE;
- }
-
- val = u32_encode_bits(reg_scatter_buf_sz, HAL_WBM_SCATTER_BUFFER_SIZE) |
- u32_encode_bits(0x1, HAL_WBM_LINK_DESC_IDLE_LIST_MODE);
-
- ath12k_hif_write32(ab,
- HAL_SEQ_WCSS_UMAC_WBM_REG +
- HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR(ab),
- val);
-
- val = u32_encode_bits(reg_scatter_buf_sz * nsbufs,
- HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST);
- ath12k_hif_write32(ab,
- HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_SIZE_ADDR(ab),
- val);
-
- val = u32_encode_bits(sbuf[0].paddr & HAL_ADDR_LSB_REG_MASK,
- BUFFER_ADDR_INFO0_ADDR);
- ath12k_hif_write32(ab,
- HAL_SEQ_WCSS_UMAC_WBM_REG +
- HAL_WBM_SCATTERED_RING_BASE_LSB(ab),
- val);
-
- val = u32_encode_bits(BASE_ADDR_MATCH_TAG_VAL,
- HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG) |
- u32_encode_bits((u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT,
- HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32);
- ath12k_hif_write32(ab,
- HAL_SEQ_WCSS_UMAC_WBM_REG +
- HAL_WBM_SCATTERED_RING_BASE_MSB(ab),
- val);
-
- /* Setup head and tail pointers for the idle list */
- val = u32_encode_bits(sbuf[nsbufs - 1].paddr, BUFFER_ADDR_INFO0_ADDR);
- ath12k_hif_write32(ab,
- HAL_SEQ_WCSS_UMAC_WBM_REG +
- HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0(ab),
- val);
-
- val = u32_encode_bits(((u64)sbuf[nsbufs - 1].paddr >> HAL_ADDR_MSB_REG_SHIFT),
- HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32) |
- u32_encode_bits((end_offset >> 2),
- HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1);
- ath12k_hif_write32(ab,
- HAL_SEQ_WCSS_UMAC_WBM_REG +
- HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1(ab),
- val);
-
- val = u32_encode_bits(sbuf[0].paddr, BUFFER_ADDR_INFO0_ADDR);
- ath12k_hif_write32(ab,
- HAL_SEQ_WCSS_UMAC_WBM_REG +
- HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0(ab),
- val);
-
- val = u32_encode_bits(sbuf[0].paddr, BUFFER_ADDR_INFO0_ADDR);
- ath12k_hif_write32(ab,
- HAL_SEQ_WCSS_UMAC_WBM_REG +
- HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0(ab),
- val);
-
- val = u32_encode_bits(((u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT),
- HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32) |
- u32_encode_bits(0, HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1);
- ath12k_hif_write32(ab,
- HAL_SEQ_WCSS_UMAC_WBM_REG +
- HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1(ab),
- val);
-
- val = 2 * tot_link_desc;
- ath12k_hif_write32(ab,
- HAL_SEQ_WCSS_UMAC_WBM_REG +
- HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR(ab),
- val);
-
- /* Enable the SRNG */
- val = u32_encode_bits(1, HAL_WBM_IDLE_LINK_RING_MISC_SRNG_ENABLE) |
- u32_encode_bits(1, HAL_WBM_IDLE_LINK_RING_MISC_RIND_ID_DISABLE);
- ath12k_hif_write32(ab,
- HAL_SEQ_WCSS_UMAC_WBM_REG +
- HAL_WBM_IDLE_LINK_RING_MISC_ADDR(ab),
- val);
-}
+EXPORT_SYMBOL(ath12k_hal_srng_access_end);
int ath12k_hal_srng_setup(struct ath12k_base *ab, enum hal_ring_type type,
int ring_num, int mac_id,
@@ -2334,9 +582,8 @@ int ath12k_hal_srng_setup(struct ath12k_base *ab, enum hal_ring_type type,
int ring_id;
u32 idx;
int i;
- u32 reg_base;
- ring_id = ath12k_hal_srng_get_ring_id(ab, type, ring_num, mac_id);
+ ring_id = ath12k_hal_srng_get_ring_id(hal, type, ring_num, mac_id);
if (ring_id < 0)
return ring_id;
@@ -2369,8 +616,6 @@ int ath12k_hal_srng_setup(struct ath12k_base *ab, enum hal_ring_type type,
memset(srng->ring_base_vaddr, 0,
(srng->entry_size * srng->num_entries) << 2);
- reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
-
if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
srng->u.src_ring.hp = 0;
srng->u.src_ring.cached_tp = 0;
@@ -2379,16 +624,7 @@ int ath12k_hal_srng_setup(struct ath12k_base *ab, enum hal_ring_type type,
srng->u.src_ring.low_threshold = params->low_threshold *
srng->entry_size;
if (srng_config->mac_type == ATH12K_HAL_SRNG_UMAC) {
- if (!ab->hw_params->supports_shadow_regs)
- srng->u.src_ring.hp_addr =
- (u32 *)((unsigned long)ab->mem + reg_base);
- else
- ath12k_dbg(ab, ATH12K_DBG_HAL,
- "hal type %d ring_num %d reg_base 0x%x shadow 0x%lx\n",
- type, ring_num,
- reg_base,
- (unsigned long)srng->u.src_ring.hp_addr -
- (unsigned long)ab->mem);
+ ath12k_hal_set_umac_srng_ptr_addr(ab, srng);
} else {
idx = ring_id - HAL_SRNG_RING_ID_DMAC_CMN_ID_START;
srng->u.src_ring.hp_addr = (void *)(hal->wrp.vaddr +
@@ -2409,17 +645,7 @@ int ath12k_hal_srng_setup(struct ath12k_base *ab, enum hal_ring_type type,
srng->u.dst_ring.cached_hp = 0;
srng->u.dst_ring.hp_addr = (void *)(hal->rdp.vaddr + ring_id);
if (srng_config->mac_type == ATH12K_HAL_SRNG_UMAC) {
- if (!ab->hw_params->supports_shadow_regs)
- srng->u.dst_ring.tp_addr =
- (u32 *)((unsigned long)ab->mem + reg_base +
- (HAL_REO1_RING_TP - HAL_REO1_RING_HP));
- else
- ath12k_dbg(ab, ATH12K_DBG_HAL,
- "type %d ring_num %d target_reg 0x%x shadow 0x%lx\n",
- type, ring_num,
- reg_base + HAL_REO1_RING_TP - HAL_REO1_RING_HP,
- (unsigned long)srng->u.dst_ring.tp_addr -
- (unsigned long)ab->mem);
+ ath12k_hal_set_umac_srng_ptr_addr(ab, srng);
} else {
/* For PMAC & DMAC rings, tail pointer updates will be done
* through FW by writing to a shared memory location
@@ -2444,68 +670,6 @@ int ath12k_hal_srng_setup(struct ath12k_base *ab, enum hal_ring_type type,
return ring_id;
}
-static void ath12k_hal_srng_update_hp_tp_addr(struct ath12k_base *ab,
- int shadow_cfg_idx,
- enum hal_ring_type ring_type,
- int ring_num)
-{
- struct hal_srng *srng;
- struct ath12k_hal *hal = &ab->hal;
- int ring_id;
- struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
-
- ring_id = ath12k_hal_srng_get_ring_id(ab, ring_type, ring_num, 0);
- if (ring_id < 0)
- return;
-
- srng = &hal->srng_list[ring_id];
-
- if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
- srng->u.dst_ring.tp_addr = (u32 *)(HAL_SHADOW_REG(shadow_cfg_idx) +
- (unsigned long)ab->mem);
- else
- srng->u.src_ring.hp_addr = (u32 *)(HAL_SHADOW_REG(shadow_cfg_idx) +
- (unsigned long)ab->mem);
-}
-
-int ath12k_hal_srng_update_shadow_config(struct ath12k_base *ab,
- enum hal_ring_type ring_type,
- int ring_num)
-{
- struct ath12k_hal *hal = &ab->hal;
- struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
- int shadow_cfg_idx = hal->num_shadow_reg_configured;
- u32 target_reg;
-
- if (shadow_cfg_idx >= HAL_SHADOW_NUM_REGS)
- return -EINVAL;
-
- hal->num_shadow_reg_configured++;
-
- target_reg = srng_config->reg_start[HAL_HP_OFFSET_IN_REG_START];
- target_reg += srng_config->reg_size[HAL_HP_OFFSET_IN_REG_START] *
- ring_num;
-
- /* For destination ring, shadow the TP */
- if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
- target_reg += HAL_OFFSET_FROM_HP_TO_TP;
-
- hal->shadow_reg_addr[shadow_cfg_idx] = target_reg;
-
- /* update hp/tp addr to hal structure*/
- ath12k_hal_srng_update_hp_tp_addr(ab, shadow_cfg_idx, ring_type,
- ring_num);
-
- ath12k_dbg(ab, ATH12K_DBG_HAL,
- "target_reg %x, shadow reg 0x%x shadow_idx 0x%x, ring_type %d, ring num %d",
- target_reg,
- HAL_SHADOW_REG(shadow_cfg_idx),
- shadow_cfg_idx,
- ring_type, ring_num);
-
- return 0;
-}
-
void ath12k_hal_srng_shadow_config(struct ath12k_base *ab)
{
struct ath12k_hal *hal = &ab->hal;
@@ -2551,18 +715,16 @@ void ath12k_hal_srng_shadow_update_hp_tp(struct ath12k_base *ab,
ath12k_hal_srng_access_end(ab, srng);
}
-static void ath12k_hal_register_srng_lock_keys(struct ath12k_base *ab)
+static void ath12k_hal_register_srng_lock_keys(struct ath12k_hal *hal)
{
- struct ath12k_hal *hal = &ab->hal;
u32 ring_id;
for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++)
lockdep_register_key(&hal->srng_list[ring_id].lock_key);
}
-static void ath12k_hal_unregister_srng_lock_keys(struct ath12k_base *ab)
+static void ath12k_hal_unregister_srng_lock_keys(struct ath12k_hal *hal)
{
- struct ath12k_hal *hal = &ab->hal;
u32 ring_id;
for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++)
@@ -2574,26 +736,26 @@ int ath12k_hal_srng_init(struct ath12k_base *ab)
struct ath12k_hal *hal = &ab->hal;
int ret;
- memset(hal, 0, sizeof(*hal));
-
- ret = ab->hw_params->hal_ops->create_srng_config(ab);
+ ret = hal->ops->create_srng_config(hal);
if (ret)
goto err_hal;
- ret = ath12k_hal_alloc_cont_rdp(ab);
+ hal->dev = ab->dev;
+
+ ret = ath12k_hal_alloc_cont_rdp(hal);
if (ret)
goto err_hal;
- ret = ath12k_hal_alloc_cont_wrp(ab);
+ ret = ath12k_hal_alloc_cont_wrp(hal);
if (ret)
goto err_free_cont_rdp;
- ath12k_hal_register_srng_lock_keys(ab);
+ ath12k_hal_register_srng_lock_keys(hal);
return 0;
err_free_cont_rdp:
- ath12k_hal_free_cont_rdp(ab);
+ ath12k_hal_free_cont_rdp(hal);
err_hal:
return ret;
@@ -2603,9 +765,9 @@ void ath12k_hal_srng_deinit(struct ath12k_base *ab)
{
struct ath12k_hal *hal = &ab->hal;
- ath12k_hal_unregister_srng_lock_keys(ab);
- ath12k_hal_free_cont_rdp(ab);
- ath12k_hal_free_cont_wrp(ab);
+ ath12k_hal_unregister_srng_lock_keys(hal);
+ ath12k_hal_free_cont_rdp(hal);
+ ath12k_hal_free_cont_wrp(hal);
kfree(hal->srng_config);
hal->srng_config = NULL;
}
@@ -2661,3 +823,49 @@ void ath12k_hal_dump_srng_stats(struct ath12k_base *ab)
jiffies_to_msecs(jiffies - srng->timestamp));
}
}
+
+void *ath12k_hal_encode_tlv64_hdr(void *tlv, u64 tag, u64 len)
+{
+ struct hal_tlv_64_hdr *tlv64 = tlv;
+
+ tlv64->tl = le64_encode_bits(tag, HAL_TLV_HDR_TAG) |
+ le64_encode_bits(len, HAL_TLV_HDR_LEN);
+
+ return tlv64->value;
+}
+EXPORT_SYMBOL(ath12k_hal_encode_tlv64_hdr);
+
+void *ath12k_hal_encode_tlv32_hdr(void *tlv, u64 tag, u64 len)
+{
+ struct hal_tlv_hdr *tlv32 = tlv;
+
+ tlv32->tl = le32_encode_bits(tag, HAL_TLV_HDR_TAG) |
+ le32_encode_bits(len, HAL_TLV_HDR_LEN);
+
+ return tlv32->value;
+}
+EXPORT_SYMBOL(ath12k_hal_encode_tlv32_hdr);
+
+u16 ath12k_hal_decode_tlv64_hdr(void *tlv, void **desc)
+{
+ struct hal_tlv_64_hdr *tlv64 = tlv;
+ u16 tag;
+
+ tag = le64_get_bits(tlv64->tl, HAL_SRNG_TLV_HDR_TAG);
+ *desc = tlv64->value;
+
+ return tag;
+}
+EXPORT_SYMBOL(ath12k_hal_decode_tlv64_hdr);
+
+u16 ath12k_hal_decode_tlv32_hdr(void *tlv, void **desc)
+{
+ struct hal_tlv_hdr *tlv32 = tlv;
+ u16 tag;
+
+ tag = le32_get_bits(tlv32->tl, HAL_SRNG_TLV_HDR_TAG);
+ *desc = tlv32->value;
+
+ return tag;
+}
+EXPORT_SYMBOL(ath12k_hal_decode_tlv32_hdr);
diff --git a/drivers/net/wireless/ath/ath12k/hal.h b/drivers/net/wireless/ath/ath12k/hal.h
index efe00e167998..43e3880f8257 100644
--- a/drivers/net/wireless/ath/ath12k/hal.h
+++ b/drivers/net/wireless/ath/ath12k/hal.h
@@ -1,16 +1,43 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#ifndef ATH12K_HAL_H
#define ATH12K_HAL_H
-#include "hal_desc.h"
-#include "rx_desc.h"
+#include "hw.h"
struct ath12k_base;
+
+#define HAL_DESC_REO_NON_QOS_TID 16
+
+#define HAL_INVALID_PEERID 0x3fff
+#define VHT_SIG_SU_NSS_MASK 0x7
+
+#define HAL_TX_ADDRX_EN 1
+#define HAL_TX_ADDRY_EN 2
+
+#define HAL_TX_ADDR_SEARCH_DEFAULT 0
+#define HAL_TX_ADDR_SEARCH_INDEX 1
+
+#define HAL_RX_MAX_MPDU 256
+#define HAL_RX_NUM_WORDS_PER_PPDU_BITMAP (HAL_RX_MAX_MPDU >> 5)
+
+/* TODO: 16 entries per radio times MAX_VAPS_SUPPORTED */
+#define HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX 32
+#define HAL_DSCP_TID_TBL_SIZE 24
+
+#define EHT_MAX_USER_INFO 4
+#define HAL_RX_MON_MAX_AGGR_SIZE 128
+#define HAL_MAX_UL_MU_USERS 37
+
+#define MAX_USER_POS 8
+#define MAX_MU_GROUP_ID 64
+#define MAX_MU_GROUP_SHOW 16
+#define MAX_MU_GROUP_LENGTH (6 * MAX_MU_GROUP_SHOW)
+
#define HAL_CE_REMAP_REG_BASE (ab->ce_remap_base_addr)
#define HAL_LINK_DESC_SIZE (32 << 2)
@@ -24,364 +51,37 @@ struct ath12k_base;
#define HAL_RING_BASE_ALIGN 8
#define HAL_REO_QLUT_ADDR_ALIGN 256
+#define HAL_ADDR_LSB_REG_MASK 0xffffffff
+#define HAL_ADDR_MSB_REG_SHIFT 32
+
+#define HAL_WBM2SW_REL_ERR_RING_NUM 3
+
+#define HAL_SHADOW_NUM_REGS_MAX 40
+
#define HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX 32704
/* TODO: Check with hw team on the supported scatter buf size */
#define HAL_WBM_IDLE_SCATTER_NEXT_PTR_SIZE 8
#define HAL_WBM_IDLE_SCATTER_BUF_SIZE (HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX - \
HAL_WBM_IDLE_SCATTER_NEXT_PTR_SIZE)
-/* TODO: 16 entries per radio times MAX_VAPS_SUPPORTED */
-#define HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX 32
-#define HAL_DSCP_TID_TBL_SIZE 24
-
-/* calculate the register address from bar0 of shadow register x */
-#define HAL_SHADOW_BASE_ADDR 0x000008fc
-#define HAL_SHADOW_NUM_REGS 40
-#define HAL_HP_OFFSET_IN_REG_START 1
-#define HAL_OFFSET_FROM_HP_TO_TP 4
-
-#define HAL_SHADOW_REG(x) (HAL_SHADOW_BASE_ADDR + (4 * (x)))
-#define HAL_REO_QDESC_MAX_PEERID 8191
-
-/* WCSS Relative address */
-#define HAL_SEQ_WCSS_CMEM_OFFSET 0x00100000
-#define HAL_SEQ_WCSS_UMAC_OFFSET 0x00a00000
-#define HAL_SEQ_WCSS_UMAC_REO_REG 0x00a38000
-#define HAL_SEQ_WCSS_UMAC_TCL_REG 0x00a44000
-#define HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) \
- ((ab)->hw_params->regs->hal_umac_ce0_src_reg_base)
-#define HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) \
- ((ab)->hw_params->regs->hal_umac_ce0_dest_reg_base)
-#define HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) \
- ((ab)->hw_params->regs->hal_umac_ce1_src_reg_base)
-#define HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) \
- ((ab)->hw_params->regs->hal_umac_ce1_dest_reg_base)
-#define HAL_SEQ_WCSS_UMAC_WBM_REG 0x00a34000
-
-#define HAL_CE_WFSS_CE_REG_BASE 0x01b80000
-
-#define HAL_TCL_SW_CONFIG_BANK_ADDR 0x00a4408c
-
-/* SW2TCL(x) R0 ring configuration address */
-#define HAL_TCL1_RING_CMN_CTRL_REG 0x00000020
-#define HAL_TCL1_RING_DSCP_TID_MAP 0x00000240
-#define HAL_TCL1_RING_BASE_LSB(ab) \
- ((ab)->hw_params->regs->hal_tcl1_ring_base_lsb)
-#define HAL_TCL1_RING_BASE_MSB(ab) \
- ((ab)->hw_params->regs->hal_tcl1_ring_base_msb)
-#define HAL_TCL1_RING_ID(ab) ((ab)->hw_params->regs->hal_tcl1_ring_id)
-#define HAL_TCL1_RING_MISC(ab) \
- ((ab)->hw_params->regs->hal_tcl1_ring_misc)
-#define HAL_TCL1_RING_TP_ADDR_LSB(ab) \
- ((ab)->hw_params->regs->hal_tcl1_ring_tp_addr_lsb)
-#define HAL_TCL1_RING_TP_ADDR_MSB(ab) \
- ((ab)->hw_params->regs->hal_tcl1_ring_tp_addr_msb)
-#define HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0(ab) \
- ((ab)->hw_params->regs->hal_tcl1_ring_consumer_int_setup_ix0)
-#define HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1(ab) \
- ((ab)->hw_params->regs->hal_tcl1_ring_consumer_int_setup_ix1)
-#define HAL_TCL1_RING_MSI1_BASE_LSB(ab) \
- ((ab)->hw_params->regs->hal_tcl1_ring_msi1_base_lsb)
-#define HAL_TCL1_RING_MSI1_BASE_MSB(ab) \
- ((ab)->hw_params->regs->hal_tcl1_ring_msi1_base_msb)
-#define HAL_TCL1_RING_MSI1_DATA(ab) \
- ((ab)->hw_params->regs->hal_tcl1_ring_msi1_data)
-#define HAL_TCL2_RING_BASE_LSB(ab) \
- ((ab)->hw_params->regs->hal_tcl2_ring_base_lsb)
-#define HAL_TCL_RING_BASE_LSB(ab) \
- ((ab)->hw_params->regs->hal_tcl_ring_base_lsb)
-
-#define HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
- (HAL_TCL1_RING_MSI1_BASE_LSB(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
-#define HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
- (HAL_TCL1_RING_MSI1_BASE_MSB(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
-#define HAL_TCL1_RING_MSI1_DATA_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
- (HAL_TCL1_RING_MSI1_DATA(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
-#define HAL_TCL1_RING_BASE_MSB_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
- (HAL_TCL1_RING_BASE_MSB(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
-#define HAL_TCL1_RING_ID_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
- (HAL_TCL1_RING_ID(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
-#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
- (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
-#define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
- (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
-#define HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
- (HAL_TCL1_RING_TP_ADDR_LSB(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
-#define HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
- (HAL_TCL1_RING_TP_ADDR_MSB(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
-#define HAL_TCL1_RING_MISC_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
- (HAL_TCL1_RING_MISC(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
-
-/* SW2TCL(x) R2 ring pointers (head/tail) address */
-#define HAL_TCL1_RING_HP 0x00002000
-#define HAL_TCL1_RING_TP 0x00002004
-#define HAL_TCL2_RING_HP 0x00002008
-#define HAL_TCL_RING_HP 0x00002028
-
-#define HAL_TCL1_RING_TP_OFFSET \
- (HAL_TCL1_RING_TP - HAL_TCL1_RING_HP)
-
-/* TCL STATUS ring address */
-#define HAL_TCL_STATUS_RING_BASE_LSB(ab) \
- ((ab)->hw_params->regs->hal_tcl_status_ring_base_lsb)
-#define HAL_TCL_STATUS_RING_HP 0x00002048
-
-/* PPE2TCL1 Ring address */
-#define HAL_TCL_PPE2TCL1_RING_BASE_LSB 0x00000c48
-#define HAL_TCL_PPE2TCL1_RING_HP 0x00002038
-
-/* WBM PPE Release Ring address */
-#define HAL_WBM_PPE_RELEASE_RING_BASE_LSB(ab) \
- ((ab)->hw_params->regs->hal_ppe_rel_ring_base)
-#define HAL_WBM_PPE_RELEASE_RING_HP 0x00003020
-
-/* REO2SW(x) R0 ring configuration address */
-#define HAL_REO1_GEN_ENABLE 0x00000000
-#define HAL_REO1_MISC_CTRL_ADDR(ab) \
- ((ab)->hw_params->regs->hal_reo1_misc_ctrl_addr)
-#define HAL_REO1_DEST_RING_CTRL_IX_0 0x00000004
-#define HAL_REO1_DEST_RING_CTRL_IX_1 0x00000008
-#define HAL_REO1_DEST_RING_CTRL_IX_2 0x0000000c
-#define HAL_REO1_DEST_RING_CTRL_IX_3 0x00000010
-#define HAL_REO1_QDESC_ADDR(ab) ((ab)->hw_params->regs->hal_reo1_qdesc_addr)
-#define HAL_REO1_QDESC_MAX_PEERID(ab) ((ab)->hw_params->regs->hal_reo1_qdesc_max_peerid)
-#define HAL_REO1_SW_COOKIE_CFG0(ab) ((ab)->hw_params->regs->hal_reo1_sw_cookie_cfg0)
-#define HAL_REO1_SW_COOKIE_CFG1(ab) ((ab)->hw_params->regs->hal_reo1_sw_cookie_cfg1)
-#define HAL_REO1_QDESC_LUT_BASE0(ab) ((ab)->hw_params->regs->hal_reo1_qdesc_lut_base0)
-#define HAL_REO1_QDESC_LUT_BASE1(ab) ((ab)->hw_params->regs->hal_reo1_qdesc_lut_base1)
-#define HAL_REO1_RING_BASE_LSB(ab) ((ab)->hw_params->regs->hal_reo1_ring_base_lsb)
-#define HAL_REO1_RING_BASE_MSB(ab) ((ab)->hw_params->regs->hal_reo1_ring_base_msb)
-#define HAL_REO1_RING_ID(ab) ((ab)->hw_params->regs->hal_reo1_ring_id)
-#define HAL_REO1_RING_MISC(ab) ((ab)->hw_params->regs->hal_reo1_ring_misc)
-#define HAL_REO1_RING_HP_ADDR_LSB(ab) ((ab)->hw_params->regs->hal_reo1_ring_hp_addr_lsb)
-#define HAL_REO1_RING_HP_ADDR_MSB(ab) ((ab)->hw_params->regs->hal_reo1_ring_hp_addr_msb)
-#define HAL_REO1_RING_PRODUCER_INT_SETUP(ab) \
- ((ab)->hw_params->regs->hal_reo1_ring_producer_int_setup)
-#define HAL_REO1_RING_MSI1_BASE_LSB(ab) \
- ((ab)->hw_params->regs->hal_reo1_ring_msi1_base_lsb)
-#define HAL_REO1_RING_MSI1_BASE_MSB(ab) \
- ((ab)->hw_params->regs->hal_reo1_ring_msi1_base_msb)
-#define HAL_REO1_RING_MSI1_DATA(ab) ((ab)->hw_params->regs->hal_reo1_ring_msi1_data)
-#define HAL_REO2_RING_BASE_LSB(ab) ((ab)->hw_params->regs->hal_reo2_ring_base)
-#define HAL_REO1_AGING_THRESH_IX_0(ab) ((ab)->hw_params->regs->hal_reo1_aging_thres_ix0)
-#define HAL_REO1_AGING_THRESH_IX_1(ab) ((ab)->hw_params->regs->hal_reo1_aging_thres_ix1)
-#define HAL_REO1_AGING_THRESH_IX_2(ab) ((ab)->hw_params->regs->hal_reo1_aging_thres_ix2)
-#define HAL_REO1_AGING_THRESH_IX_3(ab) ((ab)->hw_params->regs->hal_reo1_aging_thres_ix3)
-
-/* REO2SW(x) R2 ring pointers (head/tail) address */
-#define HAL_REO1_RING_HP 0x00003048
-#define HAL_REO1_RING_TP 0x0000304c
-#define HAL_REO2_RING_HP 0x00003050
-
-#define HAL_REO1_RING_TP_OFFSET (HAL_REO1_RING_TP - HAL_REO1_RING_HP)
-
-/* REO2SW0 ring configuration address */
-#define HAL_REO_SW0_RING_BASE_LSB(ab) \
- ((ab)->hw_params->regs->hal_reo2_sw0_ring_base)
-
-/* REO2SW0 R2 ring pointer (head/tail) address */
-#define HAL_REO_SW0_RING_HP 0x00003088
-
-/* REO CMD R0 address */
-#define HAL_REO_CMD_RING_BASE_LSB(ab) \
- ((ab)->hw_params->regs->hal_reo_cmd_ring_base)
-
-/* REO CMD R2 address */
-#define HAL_REO_CMD_HP 0x00003020
-
-/* SW2REO R0 address */
-#define HAL_SW2REO_RING_BASE_LSB(ab) \
- ((ab)->hw_params->regs->hal_sw2reo_ring_base)
-#define HAL_SW2REO1_RING_BASE_LSB(ab) \
- ((ab)->hw_params->regs->hal_sw2reo1_ring_base)
-
-/* SW2REO R2 address */
-#define HAL_SW2REO_RING_HP 0x00003028
-#define HAL_SW2REO1_RING_HP 0x00003030
-
-/* CE ring R0 address */
-#define HAL_CE_SRC_RING_BASE_LSB 0x00000000
-#define HAL_CE_DST_RING_BASE_LSB 0x00000000
-#define HAL_CE_DST_STATUS_RING_BASE_LSB 0x00000058
-#define HAL_CE_DST_RING_CTRL 0x000000b0
-
-/* CE ring R2 address */
-#define HAL_CE_DST_RING_HP 0x00000400
-#define HAL_CE_DST_STATUS_RING_HP 0x00000408
-
-/* REO status address */
-#define HAL_REO_STATUS_RING_BASE_LSB(ab) \
- ((ab)->hw_params->regs->hal_reo_status_ring_base)
-#define HAL_REO_STATUS_HP 0x000030a8
-
-/* WBM Idle R0 address */
-#define HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab) \
- ((ab)->hw_params->regs->hal_wbm_idle_ring_base_lsb)
-#define HAL_WBM_IDLE_LINK_RING_MISC_ADDR(ab) \
- ((ab)->hw_params->regs->hal_wbm_idle_ring_misc_addr)
-#define HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR(ab) \
- ((ab)->hw_params->regs->hal_wbm_r0_idle_list_cntl_addr)
-#define HAL_WBM_R0_IDLE_LIST_SIZE_ADDR(ab) \
- ((ab)->hw_params->regs->hal_wbm_r0_idle_list_size_addr)
-#define HAL_WBM_SCATTERED_RING_BASE_LSB(ab) \
- ((ab)->hw_params->regs->hal_wbm_scattered_ring_base_lsb)
-#define HAL_WBM_SCATTERED_RING_BASE_MSB(ab) \
- ((ab)->hw_params->regs->hal_wbm_scattered_ring_base_msb)
-#define HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0(ab) \
- ((ab)->hw_params->regs->hal_wbm_scattered_desc_head_info_ix0)
-#define HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1(ab) \
- ((ab)->hw_params->regs->hal_wbm_scattered_desc_head_info_ix1)
-#define HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0(ab) \
- ((ab)->hw_params->regs->hal_wbm_scattered_desc_tail_info_ix0)
-#define HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1(ab) \
- ((ab)->hw_params->regs->hal_wbm_scattered_desc_tail_info_ix1)
-#define HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR(ab) \
- ((ab)->hw_params->regs->hal_wbm_scattered_desc_ptr_hp_addr)
-
-/* WBM Idle R2 address */
-#define HAL_WBM_IDLE_LINK_RING_HP 0x000030b8
-
-/* SW2WBM R0 release address */
-#define HAL_WBM_SW_RELEASE_RING_BASE_LSB(ab) \
- ((ab)->hw_params->regs->hal_wbm_sw_release_ring_base_lsb)
-#define HAL_WBM_SW1_RELEASE_RING_BASE_LSB(ab) \
- ((ab)->hw_params->regs->hal_wbm_sw1_release_ring_base_lsb)
-
-/* SW2WBM R2 release address */
-#define HAL_WBM_SW_RELEASE_RING_HP 0x00003010
-#define HAL_WBM_SW1_RELEASE_RING_HP 0x00003018
-
-/* WBM2SW R0 release address */
-#define HAL_WBM0_RELEASE_RING_BASE_LSB(ab) \
- ((ab)->hw_params->regs->hal_wbm0_release_ring_base_lsb)
-
-#define HAL_WBM1_RELEASE_RING_BASE_LSB(ab) \
- ((ab)->hw_params->regs->hal_wbm1_release_ring_base_lsb)
-
-/* WBM2SW R2 release address */
-#define HAL_WBM0_RELEASE_RING_HP 0x000030c8
-#define HAL_WBM1_RELEASE_RING_HP 0x000030d0
-
-/* WBM cookie config address and mask */
-#define HAL_WBM_SW_COOKIE_CFG0 0x00000040
-#define HAL_WBM_SW_COOKIE_CFG1 0x00000044
-#define HAL_WBM_SW_COOKIE_CFG2 0x00000090
-#define HAL_WBM_SW_COOKIE_CONVERT_CFG 0x00000094
-
-#define HAL_WBM_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB GENMASK(7, 0)
-#define HAL_WBM_SW_COOKIE_CFG_COOKIE_PPT_MSB GENMASK(12, 8)
-#define HAL_WBM_SW_COOKIE_CFG_COOKIE_SPT_MSB GENMASK(17, 13)
-#define HAL_WBM_SW_COOKIE_CFG_ALIGN BIT(18)
-#define HAL_WBM_SW_COOKIE_CFG_RELEASE_PATH_EN BIT(0)
-#define HAL_WBM_SW_COOKIE_CFG_ERR_PATH_EN BIT(1)
-#define HAL_WBM_SW_COOKIE_CFG_CONV_IND_EN BIT(3)
-
-#define HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW0_EN BIT(1)
-#define HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW1_EN BIT(2)
-#define HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW2_EN BIT(3)
-#define HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW3_EN BIT(4)
-#define HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW4_EN BIT(5)
-#define HAL_WBM_SW_COOKIE_CONV_CFG_GLOBAL_EN BIT(8)
-
-/* TCL ring field mask and offset */
-#define HAL_TCL1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8)
-#define HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0)
-#define HAL_TCL1_RING_ID_ENTRY_SIZE GENMASK(7, 0)
-#define HAL_TCL1_RING_MISC_MSI_RING_ID_DISABLE BIT(0)
-#define HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE BIT(1)
-#define HAL_TCL1_RING_MISC_MSI_SWAP BIT(3)
-#define HAL_TCL1_RING_MISC_HOST_FW_SWAP BIT(4)
-#define HAL_TCL1_RING_MISC_DATA_TLV_SWAP BIT(5)
-#define HAL_TCL1_RING_MISC_SRNG_ENABLE BIT(6)
-#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD GENMASK(31, 16)
-#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD GENMASK(14, 0)
-#define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD GENMASK(15, 0)
-#define HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE BIT(8)
-#define HAL_TCL1_RING_MSI1_BASE_MSB_ADDR GENMASK(7, 0)
-#define HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN BIT(23)
-#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP GENMASK(31, 0)
-#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP0 GENMASK(2, 0)
-#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP1 GENMASK(5, 3)
-#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP2 GENMASK(8, 6)
-#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP3 GENMASK(11, 9)
-#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP4 GENMASK(14, 12)
-#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP5 GENMASK(17, 15)
-#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP6 GENMASK(20, 18)
-#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP7 GENMASK(23, 21)
-
-/* REO ring field mask and offset */
-#define HAL_REO1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8)
-#define HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0)
-#define HAL_REO1_RING_ID_RING_ID GENMASK(15, 8)
-#define HAL_REO1_RING_ID_ENTRY_SIZE GENMASK(7, 0)
-#define HAL_REO1_RING_MISC_MSI_SWAP BIT(3)
-#define HAL_REO1_RING_MISC_HOST_FW_SWAP BIT(4)
-#define HAL_REO1_RING_MISC_DATA_TLV_SWAP BIT(5)
-#define HAL_REO1_RING_MISC_SRNG_ENABLE BIT(6)
-#define HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD GENMASK(31, 16)
-#define HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD GENMASK(14, 0)
-#define HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE BIT(8)
-#define HAL_REO1_RING_MSI1_BASE_MSB_ADDR GENMASK(7, 0)
-#define HAL_REO1_MISC_CTL_FRAG_DST_RING GENMASK(20, 17)
-#define HAL_REO1_MISC_CTL_BAR_DST_RING GENMASK(24, 21)
-#define HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE BIT(2)
-#define HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE BIT(3)
-#define HAL_REO1_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB GENMASK(7, 0)
-#define HAL_REO1_SW_COOKIE_CFG_COOKIE_PPT_MSB GENMASK(12, 8)
-#define HAL_REO1_SW_COOKIE_CFG_COOKIE_SPT_MSB GENMASK(17, 13)
-#define HAL_REO1_SW_COOKIE_CFG_ALIGN BIT(18)
-#define HAL_REO1_SW_COOKIE_CFG_ENABLE BIT(19)
-#define HAL_REO1_SW_COOKIE_CFG_GLOBAL_ENABLE BIT(20)
-#define HAL_REO_QDESC_ADDR_READ_LUT_ENABLE BIT(7)
-#define HAL_REO_QDESC_ADDR_READ_CLEAR_QDESC_ARRAY BIT(6)
-
-/* CE ring bit field mask and shift */
-#define HAL_CE_DST_R0_DEST_CTRL_MAX_LEN GENMASK(15, 0)
-
-#define HAL_ADDR_LSB_REG_MASK 0xffffffff
-
-#define HAL_ADDR_MSB_REG_SHIFT 32
-
-/* WBM ring bit field mask and shift */
-#define HAL_WBM_LINK_DESC_IDLE_LIST_MODE BIT(1)
-#define HAL_WBM_SCATTER_BUFFER_SIZE GENMASK(10, 2)
-#define HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST GENMASK(31, 16)
-#define HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32 GENMASK(7, 0)
-#define HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG GENMASK(31, 8)
-
-#define HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1 GENMASK(20, 8)
-#define HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1 GENMASK(20, 8)
-
-#define HAL_WBM_IDLE_LINK_RING_MISC_SRNG_ENABLE BIT(6)
-#define HAL_WBM_IDLE_LINK_RING_MISC_RIND_ID_DISABLE BIT(0)
-
-#define BASE_ADDR_MATCH_TAG_VAL 0x5
-
-#define HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE 0x000fffff
-#define HAL_REO_REO2SW0_RING_BASE_MSB_RING_SIZE 0x000fffff
-#define HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE 0x0000ffff
-#define HAL_REO_CMD_RING_BASE_MSB_RING_SIZE 0x0000ffff
-#define HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff
-#define HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE 0x000fffff
-#define HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE 0x000fffff
-#define HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff
-#define HAL_CE_SRC_RING_BASE_MSB_RING_SIZE 0x0000ffff
-#define HAL_CE_DST_RING_BASE_MSB_RING_SIZE 0x0000ffff
-#define HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff
-#define HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE 0x000fffff
-#define HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE 0x0000ffff
-#define HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE 0x000fffff
-#define HAL_RXDMA_RING_MAX_SIZE 0x0000ffff
-#define HAL_RXDMA_RING_MAX_SIZE_BE 0x000fffff
-#define HAL_WBM2PPE_RELEASE_RING_BASE_MSB_RING_SIZE 0x000fffff
-
-#define HAL_WBM2SW_REL_ERR_RING_NUM 3
-/* Add any other errors here and return them in
- * ath12k_hal_rx_desc_get_err().
- */
-
-#define HAL_IPQ5332_CE_WFSS_REG_BASE 0x740000
-#define HAL_IPQ5332_CE_SIZE 0x100000
+#define HAL_AST_IDX_INVALID 0xFFFF
+#define HAL_RX_MAX_MCS 12
+#define HAL_RX_MAX_MCS_HT 31
+#define HAL_RX_MAX_MCS_VHT 9
+#define HAL_RX_MAX_MCS_HE 11
+#define HAL_RX_MAX_MCS_BE 15
+#define HAL_RX_MAX_NSS 8
+#define HAL_RX_MAX_NUM_LEGACY_RATES 12
+
+#define HAL_RX_UL_OFDMA_USER_INFO_V0_W0_VALID BIT(30)
+#define HAL_RX_UL_OFDMA_USER_INFO_V0_W0_VER BIT(31)
+#define HAL_RX_UL_OFDMA_USER_INFO_V0_W1_NSS GENMASK(2, 0)
+#define HAL_RX_UL_OFDMA_USER_INFO_V0_W1_MCS GENMASK(6, 3)
+#define HAL_RX_UL_OFDMA_USER_INFO_V0_W1_LDPC BIT(7)
+#define HAL_RX_UL_OFDMA_USER_INFO_V0_W1_DCM BIT(8)
+#define HAL_RX_UL_OFDMA_USER_INFO_V0_W1_RU_START GENMASK(15, 9)
+#define HAL_RX_UL_OFDMA_USER_INFO_V0_W1_RU_SIZE GENMASK(18, 16)
+#define HAL_RX_FCS_LEN 4
enum hal_srng_ring_id {
HAL_SRNG_RING_ID_REO2SW0 = 0,
@@ -525,6 +225,64 @@ enum hal_srng_ring_id {
#define HAL_SRNG_RING_ID_MAX (HAL_SRNG_RING_ID_DMAC_CMN_ID_END + \
HAL_SRNG_NUM_PMAC_RINGS)
+enum hal_rx_su_mu_coding {
+ HAL_RX_SU_MU_CODING_BCC,
+ HAL_RX_SU_MU_CODING_LDPC,
+ HAL_RX_SU_MU_CODING_MAX,
+};
+
+enum hal_rx_gi {
+ HAL_RX_GI_0_8_US,
+ HAL_RX_GI_0_4_US,
+ HAL_RX_GI_1_6_US,
+ HAL_RX_GI_3_2_US,
+ HAL_RX_GI_MAX,
+};
+
+enum hal_rx_bw {
+ HAL_RX_BW_20MHZ,
+ HAL_RX_BW_40MHZ,
+ HAL_RX_BW_80MHZ,
+ HAL_RX_BW_160MHZ,
+ HAL_RX_BW_320MHZ,
+ HAL_RX_BW_MAX,
+};
+
+enum hal_rx_preamble {
+ HAL_RX_PREAMBLE_11A,
+ HAL_RX_PREAMBLE_11B,
+ HAL_RX_PREAMBLE_11N,
+ HAL_RX_PREAMBLE_11AC,
+ HAL_RX_PREAMBLE_11AX,
+ HAL_RX_PREAMBLE_11BA,
+ HAL_RX_PREAMBLE_11BE,
+ HAL_RX_PREAMBLE_MAX,
+};
+
+enum hal_rx_reception_type {
+ HAL_RX_RECEPTION_TYPE_SU,
+ HAL_RX_RECEPTION_TYPE_MU_MIMO,
+ HAL_RX_RECEPTION_TYPE_MU_OFDMA,
+ HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO,
+ HAL_RX_RECEPTION_TYPE_MAX,
+};
+
+enum hal_rx_legacy_rate {
+ HAL_RX_LEGACY_RATE_1_MBPS,
+ HAL_RX_LEGACY_RATE_2_MBPS,
+ HAL_RX_LEGACY_RATE_5_5_MBPS,
+ HAL_RX_LEGACY_RATE_6_MBPS,
+ HAL_RX_LEGACY_RATE_9_MBPS,
+ HAL_RX_LEGACY_RATE_11_MBPS,
+ HAL_RX_LEGACY_RATE_12_MBPS,
+ HAL_RX_LEGACY_RATE_18_MBPS,
+ HAL_RX_LEGACY_RATE_24_MBPS,
+ HAL_RX_LEGACY_RATE_36_MBPS,
+ HAL_RX_LEGACY_RATE_48_MBPS,
+ HAL_RX_LEGACY_RATE_54_MBPS,
+ HAL_RX_LEGACY_RATE_INVALID,
+};
+
enum hal_ring_type {
HAL_REO_DST,
HAL_REO_EXCEPTION,
@@ -554,11 +312,6 @@ enum hal_ring_type {
HAL_MAX_RING_TYPES,
};
-#define HAL_RX_MAX_BA_WINDOW 256
-
-#define HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_USEC (100 * 1000)
-#define HAL_DEFAULT_VO_REO_TIMEOUT_USEC (40 * 1000)
-
/**
* enum hal_reo_cmd_type: Enum for REO command type
* @HAL_REO_CMD_GET_QUEUE_STATS: Get REO queue status/stats
@@ -597,6 +350,128 @@ enum hal_reo_cmd_status {
HAL_REO_CMD_DRAIN = 0xff,
};
+enum hal_tcl_encap_type {
+ HAL_TCL_ENCAP_TYPE_RAW,
+ HAL_TCL_ENCAP_TYPE_NATIVE_WIFI,
+ HAL_TCL_ENCAP_TYPE_ETHERNET,
+ HAL_TCL_ENCAP_TYPE_802_3 = 3,
+ HAL_TCL_ENCAP_TYPE_MAX
+};
+
+enum hal_tcl_desc_type {
+ HAL_TCL_DESC_TYPE_BUFFER,
+ HAL_TCL_DESC_TYPE_EXT_DESC,
+ HAL_TCL_DESC_TYPE_MAX,
+};
+
+enum hal_reo_dest_ring_buffer_type {
+ HAL_REO_DEST_RING_BUFFER_TYPE_MSDU,
+ HAL_REO_DEST_RING_BUFFER_TYPE_LINK_DESC,
+};
+
+enum hal_reo_dest_ring_push_reason {
+ HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED,
+ HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION,
+};
+
+enum hal_reo_entr_rxdma_push_reason {
+ HAL_REO_ENTR_RING_RXDMA_PUSH_REASON_ERR_DETECTED,
+ HAL_REO_ENTR_RING_RXDMA_PUSH_REASON_ROUTING_INSTRUCTION,
+ HAL_REO_ENTR_RING_RXDMA_PUSH_REASON_RX_FLUSH,
+};
+
+enum hal_reo_dest_ring_error_code {
+ HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO,
+ HAL_REO_DEST_RING_ERROR_CODE_DESC_INVALID,
+ HAL_REO_DEST_RING_ERROR_CODE_AMPDU_IN_NON_BA,
+ HAL_REO_DEST_RING_ERROR_CODE_NON_BA_DUPLICATE,
+ HAL_REO_DEST_RING_ERROR_CODE_BA_DUPLICATE,
+ HAL_REO_DEST_RING_ERROR_CODE_FRAME_2K_JUMP,
+ HAL_REO_DEST_RING_ERROR_CODE_BAR_2K_JUMP,
+ HAL_REO_DEST_RING_ERROR_CODE_FRAME_OOR,
+ HAL_REO_DEST_RING_ERROR_CODE_BAR_OOR,
+ HAL_REO_DEST_RING_ERROR_CODE_NO_BA_SESSION,
+ HAL_REO_DEST_RING_ERROR_CODE_FRAME_SN_EQUALS_SSN,
+ HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED,
+ HAL_REO_DEST_RING_ERROR_CODE_2K_ERR_FLAG_SET,
+ HAL_REO_DEST_RING_ERROR_CODE_PN_ERR_FLAG_SET,
+ HAL_REO_DEST_RING_ERROR_CODE_DESC_BLOCKED,
+ HAL_REO_DEST_RING_ERROR_CODE_MAX,
+};
+
+enum hal_reo_entr_rxdma_ecode {
+ HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_FCS_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_UNECRYPTED_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_MSDU_LEN_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_MSDU_LIMIT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_WIFI_PARSE_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_PARSE_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_SA_TIMEOUT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_DA_TIMEOUT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_FLOW_TIMEOUT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_FRAG_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_MULTICAST_ECHO_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_MISMATCH_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_UNAUTH_WDS_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_GRPCAST_AMSDU_WDS_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_MAX,
+};
+
+enum hal_wbm_htt_tx_comp_status {
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_OK,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_VDEVID_MISMATCH,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_MAX,
+};
+
+enum hal_encrypt_type {
+ HAL_ENCRYPT_TYPE_WEP_40,
+ HAL_ENCRYPT_TYPE_WEP_104,
+ HAL_ENCRYPT_TYPE_TKIP_NO_MIC,
+ HAL_ENCRYPT_TYPE_WEP_128,
+ HAL_ENCRYPT_TYPE_TKIP_MIC,
+ HAL_ENCRYPT_TYPE_WAPI,
+ HAL_ENCRYPT_TYPE_CCMP_128,
+ HAL_ENCRYPT_TYPE_OPEN,
+ HAL_ENCRYPT_TYPE_CCMP_256,
+ HAL_ENCRYPT_TYPE_GCMP_128,
+ HAL_ENCRYPT_TYPE_AES_GCMP_256,
+ HAL_ENCRYPT_TYPE_WAPI_GCM_SM4,
+};
+
+enum hal_tx_rate_stats_bw {
+ HAL_TX_RATE_STATS_BW_20,
+ HAL_TX_RATE_STATS_BW_40,
+ HAL_TX_RATE_STATS_BW_80,
+ HAL_TX_RATE_STATS_BW_160,
+};
+
+enum hal_tx_rate_stats_pkt_type {
+ HAL_TX_RATE_STATS_PKT_TYPE_11A,
+ HAL_TX_RATE_STATS_PKT_TYPE_11B,
+ HAL_TX_RATE_STATS_PKT_TYPE_11N,
+ HAL_TX_RATE_STATS_PKT_TYPE_11AC,
+ HAL_TX_RATE_STATS_PKT_TYPE_11AX,
+ HAL_TX_RATE_STATS_PKT_TYPE_11BA,
+ HAL_TX_RATE_STATS_PKT_TYPE_11BE,
+};
+
+enum hal_tx_rate_stats_sgi {
+ HAL_TX_RATE_STATS_SGI_08US,
+ HAL_TX_RATE_STATS_SGI_04US,
+ HAL_TX_RATE_STATS_SGI_16US,
+ HAL_TX_RATE_STATS_SGI_32US,
+};
+
struct hal_wbm_idle_scatter_list {
dma_addr_t paddr;
struct hal_wbm_link_desc *vaddr;
@@ -625,6 +500,339 @@ enum hal_srng_dir {
HAL_SRNG_DIR_DST
};
+enum rx_msdu_start_pkt_type {
+ RX_MSDU_START_PKT_TYPE_11A,
+ RX_MSDU_START_PKT_TYPE_11B,
+ RX_MSDU_START_PKT_TYPE_11N,
+ RX_MSDU_START_PKT_TYPE_11AC,
+ RX_MSDU_START_PKT_TYPE_11AX,
+ RX_MSDU_START_PKT_TYPE_11BA,
+ RX_MSDU_START_PKT_TYPE_11BE,
+};
+
+enum rx_msdu_start_sgi {
+ RX_MSDU_START_SGI_0_8_US,
+ RX_MSDU_START_SGI_0_4_US,
+ RX_MSDU_START_SGI_1_6_US,
+ RX_MSDU_START_SGI_3_2_US,
+};
+
+enum rx_msdu_start_recv_bw {
+ RX_MSDU_START_RECV_BW_20MHZ,
+ RX_MSDU_START_RECV_BW_40MHZ,
+ RX_MSDU_START_RECV_BW_80MHZ,
+ RX_MSDU_START_RECV_BW_160MHZ,
+};
+
+enum rx_msdu_start_reception_type {
+ RX_MSDU_START_RECEPTION_TYPE_SU,
+ RX_MSDU_START_RECEPTION_TYPE_DL_MU_MIMO,
+ RX_MSDU_START_RECEPTION_TYPE_DL_MU_OFDMA,
+ RX_MSDU_START_RECEPTION_TYPE_DL_MU_OFDMA_MIMO,
+ RX_MSDU_START_RECEPTION_TYPE_UL_MU_MIMO,
+ RX_MSDU_START_RECEPTION_TYPE_UL_MU_OFDMA,
+ RX_MSDU_START_RECEPTION_TYPE_UL_MU_OFDMA_MIMO,
+};
+
+enum rx_desc_decap_type {
+ RX_DESC_DECAP_TYPE_RAW,
+ RX_DESC_DECAP_TYPE_NATIVE_WIFI,
+ RX_DESC_DECAP_TYPE_ETHERNET2_DIX,
+ RX_DESC_DECAP_TYPE_8023,
+};
+
+struct hal_rx_user_status {
+ u32 mcs:4,
+ nss:3,
+ ofdma_info_valid:1,
+ ul_ofdma_ru_start_index:7,
+ ul_ofdma_ru_width:7,
+ ul_ofdma_ru_size:8;
+ u32 ul_ofdma_user_v0_word0;
+ u32 ul_ofdma_user_v0_word1;
+ u32 ast_index;
+ u32 tid;
+ u16 tcp_msdu_count;
+ u16 tcp_ack_msdu_count;
+ u16 udp_msdu_count;
+ u16 other_msdu_count;
+ u16 frame_control;
+ u8 frame_control_info_valid;
+ u8 data_sequence_control_info_valid;
+ u16 first_data_seq_ctrl;
+ u32 preamble_type;
+ u16 ht_flags;
+ u16 vht_flags;
+ u16 he_flags;
+ u8 rs_flags;
+ u8 ldpc;
+ u32 mpdu_cnt_fcs_ok;
+ u32 mpdu_cnt_fcs_err;
+ u32 mpdu_fcs_ok_bitmap[HAL_RX_NUM_WORDS_PER_PPDU_BITMAP];
+ u32 mpdu_ok_byte_count;
+ u32 mpdu_err_byte_count;
+ bool ampdu_present;
+ u16 ampdu_id;
+};
+
+struct hal_rx_u_sig_info {
+ bool ul_dl;
+ u8 bw;
+ u8 ppdu_type_comp_mode;
+ u8 eht_sig_mcs;
+ u8 num_eht_sig_sym;
+ struct ieee80211_radiotap_eht_usig usig;
+};
+
+struct hal_rx_tlv_aggr_info {
+ bool in_progress;
+ u16 cur_len;
+ u16 tlv_tag;
+ u8 buf[HAL_RX_MON_MAX_AGGR_SIZE];
+};
+
+struct hal_rx_radiotap_eht {
+ __le32 known;
+ __le32 data[9];
+};
+
+struct hal_rx_eht_info {
+ u8 num_user_info;
+ struct hal_rx_radiotap_eht eht;
+ u32 user_info[EHT_MAX_USER_INFO];
+};
+
+struct hal_rx_msdu_desc_info {
+ u32 msdu_flags;
+ u16 msdu_len; /* 14 bits for length */
+};
+
+/* hal_mon_buf_ring
+ * Producer : SW
+ * Consumer : Monitor
+ *
+ * paddr_lo
+ * Lower 32-bit physical address of the buffer pointer from the source ring.
+ * paddr_hi
+ * bit range 7-0 : upper 8 bit of the physical address.
+ * bit range 31-8 : reserved.
+ * cookie
+ * Consumer: RxMon/TxMon 64 bit cookie of the buffers.
+ */
+struct hal_mon_buf_ring {
+ __le32 paddr_lo;
+ __le32 paddr_hi;
+ __le64 cookie;
+};
+
+struct hal_rx_mon_ppdu_info {
+ u32 ppdu_id;
+ u32 last_ppdu_id;
+ u64 ppdu_ts;
+ u32 num_mpdu_fcs_ok;
+ u32 num_mpdu_fcs_err;
+ u32 preamble_type;
+ u32 mpdu_len;
+ u16 chan_num;
+ u16 freq;
+ u16 tcp_msdu_count;
+ u16 tcp_ack_msdu_count;
+ u16 udp_msdu_count;
+ u16 other_msdu_count;
+ u16 peer_id;
+ u8 rate;
+ u8 mcs;
+ u8 nss;
+ u8 bw;
+ u8 vht_flag_values1;
+ u8 vht_flag_values2;
+ u8 vht_flag_values3[4];
+ u8 vht_flag_values4;
+ u8 vht_flag_values5;
+ u16 vht_flag_values6;
+ u8 is_stbc;
+ u8 gi;
+ u8 sgi;
+ u8 ldpc;
+ u8 beamformed;
+ u8 rssi_comb;
+ u16 tid;
+ u8 fc_valid;
+ u16 ht_flags;
+ u16 vht_flags;
+ u16 he_flags;
+ u16 he_mu_flags;
+ u8 dcm;
+ u8 ru_alloc;
+ u8 reception_type;
+ u64 tsft;
+ u64 rx_duration;
+ u16 frame_control;
+ u32 ast_index;
+ u8 rs_fcs_err;
+ u8 rs_flags;
+ u8 cck_flag;
+ u8 ofdm_flag;
+ u8 ulofdma_flag;
+ u8 frame_control_info_valid;
+ u16 he_per_user_1;
+ u16 he_per_user_2;
+ u8 he_per_user_position;
+ u8 he_per_user_known;
+ u16 he_flags1;
+ u16 he_flags2;
+ u8 he_RU[4];
+ u16 he_data1;
+ u16 he_data2;
+ u16 he_data3;
+ u16 he_data4;
+ u16 he_data5;
+ u16 he_data6;
+ u32 ppdu_len;
+ u32 prev_ppdu_id;
+ u32 device_id;
+ u16 first_data_seq_ctrl;
+ u8 monitor_direct_used;
+ u8 data_sequence_control_info_valid;
+ u8 ltf_size;
+ u8 rxpcu_filter_pass;
+ s8 rssi_chain[8][8];
+ u32 num_users;
+ u32 mpdu_fcs_ok_bitmap[HAL_RX_NUM_WORDS_PER_PPDU_BITMAP];
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ u8 addr4[ETH_ALEN];
+ struct hal_rx_user_status userstats[HAL_MAX_UL_MU_USERS];
+ u8 userid;
+ bool first_msdu_in_mpdu;
+ bool is_ampdu;
+ u8 medium_prot_type;
+ bool ppdu_continuation;
+ bool eht_usig;
+ struct hal_rx_u_sig_info u_sig_info;
+ bool is_eht;
+ struct hal_rx_eht_info eht_info;
+ struct hal_rx_tlv_aggr_info tlv_aggr;
+};
+
+struct hal_rx_desc_data {
+ struct ieee80211_rx_status *rx_status;
+ u32 phy_meta_data;
+ u32 err_bitmap;
+ u32 enctype;
+ u32 msdu_done:1,
+ is_decrypted:1,
+ ip_csum_fail:1,
+ l4_csum_fail:1,
+ is_first_msdu:1,
+ is_last_msdu:1,
+ mesh_ctrl_present:1,
+ addr2_present:1,
+ is_mcbc:1,
+ seq_ctl_valid:1,
+ fc_valid:1;
+ u16 msdu_len;
+ u16 peer_id;
+ u16 seq_no;
+ u8 *addr2;
+ u8 pkt_type;
+ u8 l3_pad_bytes;
+ u8 decap_type;
+ u8 bw;
+ u8 rate_mcs;
+ u8 nss;
+ u8 sgi;
+ u8 tid;
+};
+
+#define BUFFER_ADDR_INFO0_ADDR GENMASK(31, 0)
+
+#define BUFFER_ADDR_INFO1_ADDR GENMASK(7, 0)
+#define BUFFER_ADDR_INFO1_RET_BUF_MGR GENMASK(11, 8)
+#define BUFFER_ADDR_INFO1_SW_COOKIE GENMASK(31, 12)
+
+struct ath12k_buffer_addr {
+ __le32 info0;
+ __le32 info1;
+} __packed;
+
+/* ath12k_buffer_addr
+ *
+ * buffer_addr_31_0
+ * Address (lower 32 bits) of the MSDU buffer or MSDU_EXTENSION
+ * descriptor or Link descriptor
+ *
+ * buffer_addr_39_32
+ * Address (upper 8 bits) of the MSDU buffer or MSDU_EXTENSION
+ * descriptor or Link descriptor
+ *
+ * return_buffer_manager (RBM)
+ * Consumer: WBM
+ * Producer: SW/FW
+ * Indicates to which buffer manager the buffer or MSDU_EXTENSION
+ * descriptor or link descriptor that is being pointed to shall be
+ * returned after the frame has been processed. It is used by WBM
+ * for routing purposes.
+ *
+ * Values are defined in enum %HAL_RX_BUF_RBM_
+ *
+ * sw_buffer_cookie
+ * Cookie field exclusively used by SW. HW ignores the contents,
+ * accept that it passes the programmed value on to other
+ * descriptors together with the physical address.
+ *
+ * Field can be used by SW to for example associate the buffers
+ * physical address with the virtual address.
+ *
+ * NOTE1:
+ * The three most significant bits can have a special meaning
+ * in case this struct is embedded in a TX_MPDU_DETAILS STRUCT,
+ * and field transmit_bw_restriction is set
+ *
+ * In case of NON punctured transmission:
+ * Sw_buffer_cookie[19:17] = 3'b000: 20 MHz TX only
+ * Sw_buffer_cookie[19:17] = 3'b001: 40 MHz TX only
+ * Sw_buffer_cookie[19:17] = 3'b010: 80 MHz TX only
+ * Sw_buffer_cookie[19:17] = 3'b011: 160 MHz TX only
+ * Sw_buffer_cookie[19:17] = 3'b101: 240 MHz TX only
+ * Sw_buffer_cookie[19:17] = 3'b100: 320 MHz TX only
+ * Sw_buffer_cookie[19:18] = 2'b11: reserved
+ *
+ * In case of punctured transmission:
+ * Sw_buffer_cookie[19:16] = 4'b0000: pattern 0 only
+ * Sw_buffer_cookie[19:16] = 4'b0001: pattern 1 only
+ * Sw_buffer_cookie[19:16] = 4'b0010: pattern 2 only
+ * Sw_buffer_cookie[19:16] = 4'b0011: pattern 3 only
+ * Sw_buffer_cookie[19:16] = 4'b0100: pattern 4 only
+ * Sw_buffer_cookie[19:16] = 4'b0101: pattern 5 only
+ * Sw_buffer_cookie[19:16] = 4'b0110: pattern 6 only
+ * Sw_buffer_cookie[19:16] = 4'b0111: pattern 7 only
+ * Sw_buffer_cookie[19:16] = 4'b1000: pattern 8 only
+ * Sw_buffer_cookie[19:16] = 4'b1001: pattern 9 only
+ * Sw_buffer_cookie[19:16] = 4'b1010: pattern 10 only
+ * Sw_buffer_cookie[19:16] = 4'b1011: pattern 11 only
+ * Sw_buffer_cookie[19:18] = 2'b11: reserved
+ *
+ * Note: a punctured transmission is indicated by the presence
+ * of TLV TX_PUNCTURE_SETUP embedded in the scheduler TLV
+ *
+ * Sw_buffer_cookie[20:17]: Tid: The TID field in the QoS control
+ * field
+ *
+ * Sw_buffer_cookie[16]: Mpdu_qos_control_valid: This field
+ * indicates MPDUs with a QoS control field.
+ *
+ */
+
+struct hal_ce_srng_dest_desc;
+struct hal_ce_srng_dst_status_desc;
+struct hal_ce_srng_src_desc;
+
+struct hal_wbm_link_desc {
+ struct ath12k_buffer_addr buf_addr_info;
+} __packed;
+
/* srng flags */
#define HAL_SRNG_FLAGS_MSI_SWAP 0x00000008
#define HAL_SRNG_FLAGS_RING_PTR_SWAP 0x00000010
@@ -634,9 +842,6 @@ enum hal_srng_dir {
#define HAL_SRNG_FLAGS_HIGH_THRESH_INTR_EN 0x00080000
#define HAL_SRNG_FLAGS_LMAC_RING 0x80000000
-#define HAL_SRNG_TLV_HDR_TAG GENMASK(9, 1)
-#define HAL_SRNG_TLV_HDR_LEN GENMASK(25, 10)
-
/* Common SRNG ring structure for source and destination rings */
struct hal_srng {
/* Unique SRNG ring ID */
@@ -758,6 +963,51 @@ struct hal_srng {
} u;
};
+/* hal_wbm_link_desc
+ *
+ * Producer: WBM
+ * Consumer: WBM
+ *
+ * buf_addr_info
+ * Details of the physical address of a buffer or MSDU
+ * link descriptor.
+ */
+
+enum hal_wbm_rel_src_module {
+ HAL_WBM_REL_SRC_MODULE_TQM,
+ HAL_WBM_REL_SRC_MODULE_RXDMA,
+ HAL_WBM_REL_SRC_MODULE_REO,
+ HAL_WBM_REL_SRC_MODULE_FW,
+ HAL_WBM_REL_SRC_MODULE_SW,
+ HAL_WBM_REL_SRC_MODULE_MAX,
+};
+
+/* hal_wbm_rel_desc_type
+ *
+ * msdu_buffer
+ * The address points to an MSDU buffer
+ *
+ * msdu_link_descriptor
+ * The address points to an Tx MSDU link descriptor
+ *
+ * mpdu_link_descriptor
+ * The address points to an MPDU link descriptor
+ *
+ * msdu_ext_descriptor
+ * The address points to an MSDU extension descriptor
+ *
+ * queue_ext_descriptor
+ * The address points to an TQM queue extension descriptor. WBM should
+ * treat this is the same way as a link descriptor.
+ */
+enum hal_wbm_rel_desc_type {
+ HAL_WBM_REL_DESC_TYPE_REL_MSDU,
+ HAL_WBM_REL_DESC_TYPE_MSDU_LINK,
+ HAL_WBM_REL_DESC_TYPE_MPDU_LINK,
+ HAL_WBM_REL_DESC_TYPE_MSDU_EXT,
+ HAL_WBM_REL_DESC_TYPE_QUEUE_EXT,
+};
+
/* Interrupt mitigation - Batch threshold in terms of number of frames */
#define HAL_SRNG_INT_BATCH_THRESHOLD_TX 256
#define HAL_SRNG_INT_BATCH_THRESHOLD_RX 128
@@ -821,66 +1071,6 @@ enum hal_rx_buf_return_buf_manager {
HAL_RX_BUF_RBM_SW6_BM,
};
-#define HAL_SRNG_DESC_LOOP_CNT 0xf0000000
-
-#define HAL_REO_CMD_FLG_NEED_STATUS BIT(0)
-#define HAL_REO_CMD_FLG_STATS_CLEAR BIT(1)
-#define HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER BIT(2)
-#define HAL_REO_CMD_FLG_FLUSH_RELEASE_BLOCKING BIT(3)
-#define HAL_REO_CMD_FLG_FLUSH_NO_INVAL BIT(4)
-#define HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS BIT(5)
-#define HAL_REO_CMD_FLG_FLUSH_ALL BIT(6)
-#define HAL_REO_CMD_FLG_UNBLK_RESOURCE BIT(7)
-#define HAL_REO_CMD_FLG_UNBLK_CACHE BIT(8)
-#define HAL_REO_CMD_FLG_FLUSH_QUEUE_1K_DESC BIT(9)
-
-/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO0_UPD_* fields */
-#define HAL_REO_CMD_UPD0_RX_QUEUE_NUM BIT(8)
-#define HAL_REO_CMD_UPD0_VLD BIT(9)
-#define HAL_REO_CMD_UPD0_ALDC BIT(10)
-#define HAL_REO_CMD_UPD0_DIS_DUP_DETECTION BIT(11)
-#define HAL_REO_CMD_UPD0_SOFT_REORDER_EN BIT(12)
-#define HAL_REO_CMD_UPD0_AC BIT(13)
-#define HAL_REO_CMD_UPD0_BAR BIT(14)
-#define HAL_REO_CMD_UPD0_RETRY BIT(15)
-#define HAL_REO_CMD_UPD0_CHECK_2K_MODE BIT(16)
-#define HAL_REO_CMD_UPD0_OOR_MODE BIT(17)
-#define HAL_REO_CMD_UPD0_BA_WINDOW_SIZE BIT(18)
-#define HAL_REO_CMD_UPD0_PN_CHECK BIT(19)
-#define HAL_REO_CMD_UPD0_EVEN_PN BIT(20)
-#define HAL_REO_CMD_UPD0_UNEVEN_PN BIT(21)
-#define HAL_REO_CMD_UPD0_PN_HANDLE_ENABLE BIT(22)
-#define HAL_REO_CMD_UPD0_PN_SIZE BIT(23)
-#define HAL_REO_CMD_UPD0_IGNORE_AMPDU_FLG BIT(24)
-#define HAL_REO_CMD_UPD0_SVLD BIT(25)
-#define HAL_REO_CMD_UPD0_SSN BIT(26)
-#define HAL_REO_CMD_UPD0_SEQ_2K_ERR BIT(27)
-#define HAL_REO_CMD_UPD0_PN_ERR BIT(28)
-#define HAL_REO_CMD_UPD0_PN_VALID BIT(29)
-#define HAL_REO_CMD_UPD0_PN BIT(30)
-
-/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO1_* fields */
-#define HAL_REO_CMD_UPD1_VLD BIT(16)
-#define HAL_REO_CMD_UPD1_ALDC GENMASK(18, 17)
-#define HAL_REO_CMD_UPD1_DIS_DUP_DETECTION BIT(19)
-#define HAL_REO_CMD_UPD1_SOFT_REORDER_EN BIT(20)
-#define HAL_REO_CMD_UPD1_AC GENMASK(22, 21)
-#define HAL_REO_CMD_UPD1_BAR BIT(23)
-#define HAL_REO_CMD_UPD1_RETRY BIT(24)
-#define HAL_REO_CMD_UPD1_CHECK_2K_MODE BIT(25)
-#define HAL_REO_CMD_UPD1_OOR_MODE BIT(26)
-#define HAL_REO_CMD_UPD1_PN_CHECK BIT(27)
-#define HAL_REO_CMD_UPD1_EVEN_PN BIT(28)
-#define HAL_REO_CMD_UPD1_UNEVEN_PN BIT(29)
-#define HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE BIT(30)
-#define HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG BIT(31)
-
-/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO2_* fields */
-#define HAL_REO_CMD_UPD2_SVLD BIT(10)
-#define HAL_REO_CMD_UPD2_SSN GENMASK(22, 11)
-#define HAL_REO_CMD_UPD2_SEQ_2K_ERR BIT(23)
-#define HAL_REO_CMD_UPD2_PN_ERR BIT(24)
-
struct ath12k_hal_reo_cmd {
u32 addr_lo;
u32 flag;
@@ -926,91 +1116,93 @@ struct hal_reo_status_header {
u32 timestamp;
};
-struct hal_reo_status_queue_stats {
- u16 ssn;
- u16 curr_idx;
- u32 pn[4];
- u32 last_rx_queue_ts;
- u32 last_rx_dequeue_ts;
- u32 rx_bitmap[8]; /* Bitmap from 0-255 */
- u32 curr_mpdu_cnt;
- u32 curr_msdu_cnt;
- u16 fwd_due_to_bar_cnt;
- u16 dup_cnt;
- u32 frames_in_order_cnt;
- u32 num_mpdu_processed_cnt;
- u32 num_msdu_processed_cnt;
- u32 total_num_processed_byte_cnt;
- u32 late_rx_mpdu_cnt;
- u32 reorder_hole_cnt;
- u8 timeout_cnt;
- u8 bar_rx_cnt;
- u8 num_window_2k_jump_cnt;
-};
-
-struct hal_reo_status_flush_queue {
- bool err_detected;
+struct ath12k_hw_hal_params {
+ enum hal_rx_buf_return_buf_manager rx_buf_rbm;
+ u32 wbm2sw_cc_enable;
};
-enum hal_reo_status_flush_cache_err_code {
- HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_SUCCESS,
- HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_IN_USE,
- HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_NOT_FOUND,
-};
-
-struct hal_reo_status_flush_cache {
- bool err_detected;
- enum hal_reo_status_flush_cache_err_code err_code;
- bool cache_controller_flush_status_hit;
- u8 cache_controller_flush_status_desc_type;
- u8 cache_controller_flush_status_client_id;
- u8 cache_controller_flush_status_err;
- u8 cache_controller_flush_status_cnt;
-};
-
-enum hal_reo_status_unblock_cache_type {
- HAL_REO_STATUS_UNBLOCK_BLOCKING_RESOURCE,
- HAL_REO_STATUS_UNBLOCK_ENTIRE_CACHE_USAGE,
-};
-
-struct hal_reo_status_unblock_cache {
- bool err_detected;
- enum hal_reo_status_unblock_cache_type unblock_type;
-};
-
-struct hal_reo_status_flush_timeout_list {
- bool err_detected;
- bool list_empty;
- u16 release_desc_cnt;
- u16 fwd_buf_cnt;
-};
-
-enum hal_reo_threshold_idx {
- HAL_REO_THRESHOLD_IDX_DESC_COUNTER0,
- HAL_REO_THRESHOLD_IDX_DESC_COUNTER1,
- HAL_REO_THRESHOLD_IDX_DESC_COUNTER2,
- HAL_REO_THRESHOLD_IDX_DESC_COUNTER_SUM,
-};
-
-struct hal_reo_status_desc_thresh_reached {
- enum hal_reo_threshold_idx threshold_idx;
- u32 link_desc_counter0;
- u32 link_desc_counter1;
- u32 link_desc_counter2;
- u32 link_desc_counter_sum;
-};
-
-struct hal_reo_status {
- struct hal_reo_status_header uniform_hdr;
- u8 loop_cnt;
- union {
- struct hal_reo_status_queue_stats queue_stats;
- struct hal_reo_status_flush_queue flush_queue;
- struct hal_reo_status_flush_cache flush_cache;
- struct hal_reo_status_unblock_cache unblock_cache;
- struct hal_reo_status_flush_timeout_list timeout_list;
- struct hal_reo_status_desc_thresh_reached desc_thresh_reached;
- } u;
+#define ATH12K_HW_REG_UNDEFINED 0xdeadbeaf
+
+struct ath12k_hw_regs {
+ u32 tcl1_ring_id;
+ u32 tcl1_ring_misc;
+ u32 tcl1_ring_tp_addr_lsb;
+ u32 tcl1_ring_tp_addr_msb;
+ u32 tcl1_ring_consumer_int_setup_ix0;
+ u32 tcl1_ring_consumer_int_setup_ix1;
+ u32 tcl1_ring_msi1_base_lsb;
+ u32 tcl1_ring_msi1_base_msb;
+ u32 tcl1_ring_msi1_data;
+ u32 tcl_ring_base_lsb;
+ u32 tcl1_ring_base_lsb;
+ u32 tcl1_ring_base_msb;
+ u32 tcl2_ring_base_lsb;
+
+ u32 tcl_status_ring_base_lsb;
+
+ u32 reo1_qdesc_addr;
+ u32 reo1_qdesc_max_peerid;
+
+ u32 wbm_idle_ring_base_lsb;
+ u32 wbm_idle_ring_misc_addr;
+ u32 wbm_r0_idle_list_cntl_addr;
+ u32 wbm_r0_idle_list_size_addr;
+ u32 wbm_scattered_ring_base_lsb;
+ u32 wbm_scattered_ring_base_msb;
+ u32 wbm_scattered_desc_head_info_ix0;
+ u32 wbm_scattered_desc_head_info_ix1;
+ u32 wbm_scattered_desc_tail_info_ix0;
+ u32 wbm_scattered_desc_tail_info_ix1;
+ u32 wbm_scattered_desc_ptr_hp_addr;
+
+ u32 wbm_sw_release_ring_base_lsb;
+ u32 wbm_sw1_release_ring_base_lsb;
+ u32 wbm0_release_ring_base_lsb;
+ u32 wbm1_release_ring_base_lsb;
+
+ u32 pcie_qserdes_sysclk_en_sel;
+ u32 pcie_pcs_osc_dtct_config_base;
+
+ u32 umac_ce0_src_reg_base;
+ u32 umac_ce0_dest_reg_base;
+ u32 umac_ce1_src_reg_base;
+ u32 umac_ce1_dest_reg_base;
+
+ u32 ppe_rel_ring_base;
+
+ u32 reo2_ring_base;
+ u32 reo1_misc_ctrl_addr;
+ u32 reo1_sw_cookie_cfg0;
+ u32 reo1_sw_cookie_cfg1;
+ u32 reo1_qdesc_lut_base0;
+ u32 reo1_qdesc_lut_base1;
+ u32 reo1_ring_base_lsb;
+ u32 reo1_ring_base_msb;
+ u32 reo1_ring_id;
+ u32 reo1_ring_misc;
+ u32 reo1_ring_hp_addr_lsb;
+ u32 reo1_ring_hp_addr_msb;
+ u32 reo1_ring_producer_int_setup;
+ u32 reo1_ring_msi1_base_lsb;
+ u32 reo1_ring_msi1_base_msb;
+ u32 reo1_ring_msi1_data;
+ u32 reo1_aging_thres_ix0;
+ u32 reo1_aging_thres_ix1;
+ u32 reo1_aging_thres_ix2;
+ u32 reo1_aging_thres_ix3;
+
+ u32 reo2_sw0_ring_base;
+
+ u32 sw2reo_ring_base;
+ u32 sw2reo1_ring_base;
+
+ u32 reo_cmd_ring_base;
+
+ u32 reo_status_ring_base;
+
+ u32 gcc_gcc_pcie_hot_rst;
+
+ u32 qrtr_node_id;
};
/* HAL context to be used to access SRNG APIs (currently used by data path
@@ -1036,16 +1228,23 @@ struct ath12k_hal {
dma_addr_t paddr;
} wrp;
+ struct device *dev;
+ const struct hal_ops *ops;
+ const struct ath12k_hw_regs *regs;
+ const struct ath12k_hw_hal_params *hal_params;
/* Available REO blocking resources bitmap */
u8 avail_blk_resource;
u8 current_blk_index;
/* shadow register configuration */
- u32 shadow_reg_addr[HAL_SHADOW_NUM_REGS];
+ u32 shadow_reg_addr[HAL_SHADOW_NUM_REGS_MAX];
int num_shadow_reg_configured;
u32 hal_desc_sz;
+ u32 hal_wbm_release_ring_tx_size;
+
+ const struct ath12k_hal_tcl_to_wbm_rbm_map *tcl_to_wbm_rbm_map;
};
/* Maps WBM ring number and Return Buffer Manager Id per TCL ring */
@@ -1054,92 +1253,223 @@ struct ath12k_hal_tcl_to_wbm_rbm_map {
u8 rbm_id;
};
-struct hal_rx_ops {
- bool (*rx_desc_get_first_msdu)(struct hal_rx_desc *desc);
- bool (*rx_desc_get_last_msdu)(struct hal_rx_desc *desc);
- u8 (*rx_desc_get_l3_pad_bytes)(struct hal_rx_desc *desc);
- u8 *(*rx_desc_get_hdr_status)(struct hal_rx_desc *desc);
- bool (*rx_desc_encrypt_valid)(struct hal_rx_desc *desc);
- u32 (*rx_desc_get_encrypt_type)(struct hal_rx_desc *desc);
- u8 (*rx_desc_get_decap_type)(struct hal_rx_desc *desc);
- u8 (*rx_desc_get_mesh_ctl)(struct hal_rx_desc *desc);
- bool (*rx_desc_get_mpdu_seq_ctl_vld)(struct hal_rx_desc *desc);
- bool (*rx_desc_get_mpdu_fc_valid)(struct hal_rx_desc *desc);
- u16 (*rx_desc_get_mpdu_start_seq_no)(struct hal_rx_desc *desc);
- u16 (*rx_desc_get_msdu_len)(struct hal_rx_desc *desc);
- u8 (*rx_desc_get_msdu_sgi)(struct hal_rx_desc *desc);
- u8 (*rx_desc_get_msdu_rate_mcs)(struct hal_rx_desc *desc);
- u8 (*rx_desc_get_msdu_rx_bw)(struct hal_rx_desc *desc);
- u32 (*rx_desc_get_msdu_freq)(struct hal_rx_desc *desc);
- u8 (*rx_desc_get_msdu_pkt_type)(struct hal_rx_desc *desc);
- u8 (*rx_desc_get_msdu_nss)(struct hal_rx_desc *desc);
- u8 (*rx_desc_get_mpdu_tid)(struct hal_rx_desc *desc);
- u16 (*rx_desc_get_mpdu_peer_id)(struct hal_rx_desc *desc);
- void (*rx_desc_copy_end_tlv)(struct hal_rx_desc *fdesc,
- struct hal_rx_desc *ldesc);
- u32 (*rx_desc_get_mpdu_start_tag)(struct hal_rx_desc *desc);
- u32 (*rx_desc_get_mpdu_ppdu_id)(struct hal_rx_desc *desc);
+enum hal_wbm_rel_bm_act {
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE,
+ HAL_WBM_REL_BM_ACT_REL_MSDU,
+};
+
+/* hal_wbm_rel_bm_act
+ *
+ * put_in_idle_list
+ * Put the buffer or descriptor back in the idle list. In case of MSDU or
+ * MDPU link descriptor, BM does not need to check to release any
+ * individual MSDU buffers.
+ *
+ * release_msdu_list
+ * This BM action can only be used in combination with desc_type being
+ * msdu_link_descriptor. Field first_msdu_index points out which MSDU
+ * pointer in the MSDU link descriptor is the first of an MPDU that is
+ * released. BM shall release all the MSDU buffers linked to this first
+ * MSDU buffer pointer. All related MSDU buffer pointer entries shall be
+ * set to value 0, which represents the 'NULL' pointer. When all MSDU
+ * buffer pointers in the MSDU link descriptor are 'NULL', the MSDU link
+ * descriptor itself shall also be released.
+ */
+
+#define RU_INVALID 0
+#define RU_26 1
+#define RU_52 2
+#define RU_106 4
+#define RU_242 9
+#define RU_484 18
+#define RU_996 37
+#define RU_2X996 74
+#define RU_3X996 111
+#define RU_4X996 148
+#define RU_52_26 (RU_52 + RU_26)
+#define RU_106_26 (RU_106 + RU_26)
+#define RU_484_242 (RU_484 + RU_242)
+#define RU_996_484 (RU_996 + RU_484)
+#define RU_996_484_242 (RU_996 + RU_484_242)
+#define RU_2X996_484 (RU_2X996 + RU_484)
+#define RU_3X996_484 (RU_3X996 + RU_484)
+
+enum ath12k_eht_ru_size {
+ ATH12K_EHT_RU_26,
+ ATH12K_EHT_RU_52,
+ ATH12K_EHT_RU_106,
+ ATH12K_EHT_RU_242,
+ ATH12K_EHT_RU_484,
+ ATH12K_EHT_RU_996,
+ ATH12K_EHT_RU_996x2,
+ ATH12K_EHT_RU_996x4,
+ ATH12K_EHT_RU_52_26,
+ ATH12K_EHT_RU_106_26,
+ ATH12K_EHT_RU_484_242,
+ ATH12K_EHT_RU_996_484,
+ ATH12K_EHT_RU_996_484_242,
+ ATH12K_EHT_RU_996x2_484,
+ ATH12K_EHT_RU_996x3,
+ ATH12K_EHT_RU_996x3_484,
+
+ /* Keep last */
+ ATH12K_EHT_RU_INVALID,
+};
+
+#define HAL_RX_RU_ALLOC_TYPE_MAX ATH12K_EHT_RU_INVALID
+
+static inline
+enum nl80211_he_ru_alloc ath12k_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones)
+{
+ enum nl80211_he_ru_alloc ret;
+
+ switch (ru_tones) {
+ case RU_52:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_52;
+ break;
+ case RU_106:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ break;
+ case RU_242:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_242;
+ break;
+ case RU_484:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_484;
+ break;
+ case RU_996:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_996;
+ break;
+ case RU_2X996:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
+ break;
+ case RU_26:
+ fallthrough;
+ default:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+ break;
+ }
+ return ret;
+}
+
+struct ath12k_hw_version_map {
+ const struct hal_ops *hal_ops;
+ u32 hal_desc_sz;
+ const struct ath12k_hal_tcl_to_wbm_rbm_map *tcl_to_wbm_rbm_map;
+ const struct ath12k_hw_hal_params *hal_params;
+ const struct ath12k_hw_regs *hw_regs;
+};
+
+struct hal_ops {
+ int (*create_srng_config)(struct ath12k_hal *hal);
void (*rx_desc_set_msdu_len)(struct hal_rx_desc *desc, u16 len);
- struct rx_attention *(*rx_desc_get_attention)(struct hal_rx_desc *desc);
- u8 *(*rx_desc_get_msdu_payload)(struct hal_rx_desc *desc);
- u32 (*rx_desc_get_mpdu_start_offset)(void);
- u32 (*rx_desc_get_msdu_end_offset)(void);
- bool (*rx_desc_mac_addr2_valid)(struct hal_rx_desc *desc);
- u8* (*rx_desc_mpdu_start_addr2)(struct hal_rx_desc *desc);
- bool (*rx_desc_is_da_mcbc)(struct hal_rx_desc *desc);
void (*rx_desc_get_dot11_hdr)(struct hal_rx_desc *desc,
struct ieee80211_hdr *hdr);
void (*rx_desc_get_crypto_header)(struct hal_rx_desc *desc,
u8 *crypto_hdr,
enum hal_encrypt_type enctype);
- bool (*dp_rx_h_msdu_done)(struct hal_rx_desc *desc);
- bool (*dp_rx_h_l4_cksum_fail)(struct hal_rx_desc *desc);
- bool (*dp_rx_h_ip_cksum_fail)(struct hal_rx_desc *desc);
- bool (*dp_rx_h_is_decrypted)(struct hal_rx_desc *desc);
- u32 (*dp_rx_h_mpdu_err)(struct hal_rx_desc *desc);
- u32 (*rx_desc_get_desc_size)(void);
+ void (*rx_desc_copy_end_tlv)(struct hal_rx_desc *fdesc,
+ struct hal_rx_desc *ldesc);
u8 (*rx_desc_get_msdu_src_link_id)(struct hal_rx_desc *desc);
+ void (*extract_rx_desc_data)(struct hal_rx_desc_data *rx_desc_data,
+ struct hal_rx_desc *rx_desc,
+ struct hal_rx_desc *ldesc);
+ u32 (*rx_desc_get_mpdu_start_tag)(struct hal_rx_desc *desc);
+ u32 (*rx_desc_get_mpdu_ppdu_id)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_l3_pad_bytes)(struct hal_rx_desc *desc);
+ u8 *(*rx_desc_get_msdu_payload)(struct hal_rx_desc *desc);
+ void (*ce_dst_setup)(struct ath12k_base *ab,
+ struct hal_srng *srng, int ring_num);
+ void (*set_umac_srng_ptr_addr)(struct ath12k_base *ab,
+ struct hal_srng *srng);
+ void (*srng_src_hw_init)(struct ath12k_base *ab, struct hal_srng *srng);
+ void (*srng_dst_hw_init)(struct ath12k_base *ab, struct hal_srng *srng);
+ int (*srng_update_shadow_config)(struct ath12k_base *ab,
+ enum hal_ring_type ring_type,
+ int ring_num);
+ int (*srng_get_ring_id)(struct ath12k_hal *hal, enum hal_ring_type type,
+ int ring_num, int mac_id);
+ u32 (*ce_get_desc_size)(enum hal_ce_desc type);
+ void (*ce_src_set_desc)(struct hal_ce_srng_src_desc *desc,
+ dma_addr_t paddr, u32 len, u32 id,
+ u8 byte_swap_data);
+ void (*ce_dst_set_desc)(struct hal_ce_srng_dest_desc *desc,
+ dma_addr_t paddr);
+ u32 (*ce_dst_status_get_length)(struct hal_ce_srng_dst_status_desc *desc);
+ void (*set_link_desc_addr)(struct hal_wbm_link_desc *desc, u32 cookie,
+ dma_addr_t paddr,
+ enum hal_rx_buf_return_buf_manager rbm);
+ void (*tx_set_dscp_tid_map)(struct ath12k_base *ab, int id);
+ void (*tx_configure_bank_register)(struct ath12k_base *ab,
+ u32 bank_config, u8 bank_id);
+ void (*reoq_lut_addr_read_enable)(struct ath12k_base *ab);
+ void (*reoq_lut_set_max_peerid)(struct ath12k_base *ab);
+ void (*write_ml_reoq_lut_addr)(struct ath12k_base *ab,
+ dma_addr_t paddr);
+ void (*write_reoq_lut_addr)(struct ath12k_base *ab, dma_addr_t paddr);
+ void (*setup_link_idle_list)(struct ath12k_base *ab,
+ struct hal_wbm_idle_scatter_list *sbuf,
+ u32 nsbufs, u32 tot_link_desc,
+ u32 end_offset);
+ void (*reo_init_cmd_ring)(struct ath12k_base *ab,
+ struct hal_srng *srng);
+ void (*reo_shared_qaddr_cache_clear)(struct ath12k_base *ab);
+ void (*reo_hw_setup)(struct ath12k_base *ab, u32 ring_hash_map);
+ void (*rx_buf_addr_info_set)(struct ath12k_buffer_addr *binfo,
+ dma_addr_t paddr, u32 cookie, u8 manager);
+ void (*rx_buf_addr_info_get)(struct ath12k_buffer_addr *binfo,
+ dma_addr_t *paddr, u32 *msdu_cookies,
+ u8 *rbm);
+ void (*cc_config)(struct ath12k_base *ab);
+ enum hal_rx_buf_return_buf_manager
+ (*get_idle_link_rbm)(struct ath12k_hal *hal, u8 device_id);
+ void (*rx_msdu_list_get)(struct ath12k *ar,
+ void *link_desc,
+ void *msdu_list,
+ u16 *num_msdus);
+ void (*rx_reo_ent_buf_paddr_get)(void *rx_desc, dma_addr_t *paddr,
+ u32 *sw_cookie,
+ struct ath12k_buffer_addr **pp_buf_addr,
+ u8 *rbm, u32 *msdu_cnt);
+ void *(*reo_cmd_enc_tlv_hdr)(void *tlv, u64 tag, u64 len);
+ u16 (*reo_status_dec_tlv_hdr)(void *tlv, void **desc);
};
-struct hal_ops {
- int (*create_srng_config)(struct ath12k_base *ab);
- u16 (*rxdma_ring_wmask_rx_mpdu_start)(void);
- u32 (*rxdma_ring_wmask_rx_msdu_end)(void);
- const struct hal_rx_ops *(*get_hal_rx_compact_ops)(void);
- const struct ath12k_hal_tcl_to_wbm_rbm_map *tcl_to_wbm_rbm_map;
-};
+#define HAL_TLV_HDR_TAG GENMASK(9, 1)
+#define HAL_TLV_HDR_LEN GENMASK(25, 10)
+#define HAL_TLV_USR_ID GENMASK(31, 26)
-extern const struct hal_ops hal_qcn9274_ops;
-extern const struct hal_ops hal_wcn7850_ops;
+#define HAL_TLV_ALIGN 4
-extern const struct hal_rx_ops hal_rx_qcn9274_ops;
-extern const struct hal_rx_ops hal_rx_qcn9274_compact_ops;
-extern const struct hal_rx_ops hal_rx_wcn7850_ops;
+struct hal_tlv_hdr {
+ __le32 tl;
+ u8 value[];
+} __packed;
-u32 ath12k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid);
-void ath12k_hal_reo_qdesc_setup(struct hal_rx_reo_queue *qdesc,
- int tid, u32 ba_window_size,
- u32 start_seq, enum hal_pn_type type);
-void ath12k_hal_reo_init_cmd_ring(struct ath12k_base *ab,
- struct hal_srng *srng);
-void ath12k_hal_reo_hw_setup(struct ath12k_base *ab, u32 ring_hash_map);
-void ath12k_hal_setup_link_idle_list(struct ath12k_base *ab,
- struct hal_wbm_idle_scatter_list *sbuf,
- u32 nsbufs, u32 tot_link_desc,
- u32 end_offset);
+#define HAL_TLV_64_HDR_TAG GENMASK(9, 1)
+#define HAL_TLV_64_HDR_LEN GENMASK(21, 10)
+#define HAL_TLV_64_USR_ID GENMASK(31, 26)
+#define HAL_TLV_64_ALIGN 8
+
+struct hal_tlv_64_hdr {
+ __le64 tl;
+ u8 value[];
+} __packed;
+
+#define HAL_SRNG_TLV_HDR_TAG GENMASK(9, 1)
+#define HAL_SRNG_TLV_HDR_LEN GENMASK(25, 10)
dma_addr_t ath12k_hal_srng_get_tp_addr(struct ath12k_base *ab,
struct hal_srng *srng);
dma_addr_t ath12k_hal_srng_get_hp_addr(struct ath12k_base *ab,
struct hal_srng *srng);
-void ath12k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie,
- dma_addr_t paddr,
- enum hal_rx_buf_return_buf_manager rbm);
-u32 ath12k_hal_ce_get_desc_size(enum hal_ce_desc type);
-void ath12k_hal_ce_src_set_desc(struct hal_ce_srng_src_desc *desc, dma_addr_t paddr,
- u32 len, u32 id, u8 byte_swap_data);
-void ath12k_hal_ce_dst_set_desc(struct hal_ce_srng_dest_desc *desc, dma_addr_t paddr);
-u32 ath12k_hal_ce_dst_status_get_length(struct hal_ce_srng_dst_status_desc *desc);
+u32 ath12k_hal_ce_get_desc_size(struct ath12k_hal *hal, enum hal_ce_desc type);
+void ath12k_hal_ce_dst_set_desc(struct ath12k_hal *hal,
+ struct hal_ce_srng_dest_desc *desc,
+ dma_addr_t paddr);
+void ath12k_hal_ce_src_set_desc(struct ath12k_hal *hal,
+ struct hal_ce_srng_src_desc *desc,
+ dma_addr_t paddr, u32 len, u32 id,
+ u8 byte_swap_data);
int ath12k_hal_srng_get_entrysize(struct ath12k_base *ab, u32 ring_type);
int ath12k_hal_srng_get_max_entries(struct ath12k_base *ab, u32 ring_type);
void ath12k_hal_srng_get_params(struct ath12k_base *ab, struct hal_srng *srng,
@@ -1178,4 +1508,45 @@ void ath12k_hal_srng_shadow_config(struct ath12k_base *ab);
void ath12k_hal_srng_shadow_update_hp_tp(struct ath12k_base *ab,
struct hal_srng *srng);
void ath12k_hal_reo_shared_qaddr_cache_clear(struct ath12k_base *ab);
+void ath12k_hal_set_link_desc_addr(struct ath12k_hal *hal,
+ struct hal_wbm_link_desc *desc, u32 cookie,
+ dma_addr_t paddr, int rbm);
+void ath12k_hal_setup_link_idle_list(struct ath12k_base *ab,
+ struct hal_wbm_idle_scatter_list *sbuf,
+ u32 nsbufs, u32 tot_link_desc,
+ u32 end_offset);
+u32
+ath12k_hal_ce_dst_status_get_length(struct ath12k_hal *hal,
+ struct hal_ce_srng_dst_status_desc *desc);
+void ath12k_hal_tx_set_dscp_tid_map(struct ath12k_base *ab, int id);
+void ath12k_hal_tx_configure_bank_register(struct ath12k_base *ab,
+ u32 bank_config, u8 bank_id);
+void ath12k_hal_reoq_lut_addr_read_enable(struct ath12k_base *ab);
+void ath12k_hal_reoq_lut_set_max_peerid(struct ath12k_base *ab);
+void ath12k_hal_write_reoq_lut_addr(struct ath12k_base *ab, dma_addr_t paddr);
+void
+ath12k_hal_write_ml_reoq_lut_addr(struct ath12k_base *ab, dma_addr_t paddr);
+void ath12k_hal_reo_init_cmd_ring(struct ath12k_base *ab, struct hal_srng *srng);
+void ath12k_hal_reo_hw_setup(struct ath12k_base *ab, u32 ring_hash_map);
+void ath12k_hal_rx_buf_addr_info_set(struct ath12k_hal *hal,
+ struct ath12k_buffer_addr *binfo,
+ dma_addr_t paddr, u32 cookie, u8 manager);
+void ath12k_hal_rx_buf_addr_info_get(struct ath12k_hal *hal,
+ struct ath12k_buffer_addr *binfo,
+ dma_addr_t *paddr, u32 *msdu_cookies,
+ u8 *rbm);
+void ath12k_hal_cc_config(struct ath12k_base *ab);
+enum hal_rx_buf_return_buf_manager
+ath12k_hal_get_idle_link_rbm(struct ath12k_hal *hal, u8 device_id);
+void ath12k_hal_rx_msdu_list_get(struct ath12k_hal *hal, struct ath12k *ar,
+ void *link_desc, void *msdu_list,
+ u16 *num_msdus);
+void ath12k_hal_rx_reo_ent_buf_paddr_get(struct ath12k_hal *hal, void *rx_desc,
+ dma_addr_t *paddr, u32 *sw_cookie,
+ struct ath12k_buffer_addr **pp_buf_addr,
+ u8 *rbm, u32 *msdu_cnt);
+void *ath12k_hal_encode_tlv64_hdr(void *tlv, u64 tag, u64 len);
+void *ath12k_hal_encode_tlv32_hdr(void *tlv, u64 tag, u64 len);
+u16 ath12k_hal_decode_tlv64_hdr(void *tlv, void **desc);
+u16 ath12k_hal_decode_tlv32_hdr(void *tlv, void **desc);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/htc.c b/drivers/net/wireless/ath/ath12k/htc.c
index d13616bf07f4..92138caa2a82 100644
--- a/drivers/net/wireless/ath/ath12k/htc.c
+++ b/drivers/net/wireless/ath/ath12k/htc.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <linux/skbuff.h>
#include <linux/ctype.h>
@@ -376,6 +376,7 @@ void ath12k_htc_rx_completion_handler(struct ath12k_base *ab,
out:
kfree_skb(skb);
}
+EXPORT_SYMBOL(ath12k_htc_rx_completion_handler);
static void ath12k_htc_control_rx_complete(struct ath12k_base *ab,
struct sk_buff *skb)
diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c
deleted file mode 100644
index 6791ae1d64e5..000000000000
--- a/drivers/net/wireless/ath/ath12k/hw.c
+++ /dev/null
@@ -1,1680 +0,0 @@
-// SPDX-License-Identifier: BSD-3-Clause-Clear
-/*
- * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
- */
-
-#include <linux/types.h>
-#include <linux/bitops.h>
-#include <linux/bitfield.h>
-
-#include "debug.h"
-#include "core.h"
-#include "ce.h"
-#include "hw.h"
-#include "mhi.h"
-#include "dp_rx.h"
-#include "peer.h"
-
-static const guid_t wcn7850_uuid = GUID_INIT(0xf634f534, 0x6147, 0x11ec,
- 0x90, 0xd6, 0x02, 0x42,
- 0xac, 0x12, 0x00, 0x03);
-
-static u8 ath12k_hw_qcn9274_mac_from_pdev_id(int pdev_idx)
-{
- return pdev_idx;
-}
-
-static int ath12k_hw_mac_id_to_pdev_id_qcn9274(const struct ath12k_hw_params *hw,
- int mac_id)
-{
- return mac_id;
-}
-
-static int ath12k_hw_mac_id_to_srng_id_qcn9274(const struct ath12k_hw_params *hw,
- int mac_id)
-{
- return 0;
-}
-
-static u8 ath12k_hw_get_ring_selector_qcn9274(struct sk_buff *skb)
-{
- return smp_processor_id();
-}
-
-static bool ath12k_dp_srng_is_comp_ring_qcn9274(int ring_num)
-{
- if (ring_num < 3 || ring_num == 4)
- return true;
-
- return false;
-}
-
-static bool ath12k_is_frame_link_agnostic_qcn9274(struct ath12k_link_vif *arvif,
- struct ieee80211_mgmt *mgmt)
-{
- return ieee80211_is_action(mgmt->frame_control);
-}
-
-static int ath12k_hw_mac_id_to_pdev_id_wcn7850(const struct ath12k_hw_params *hw,
- int mac_id)
-{
- return 0;
-}
-
-static int ath12k_hw_mac_id_to_srng_id_wcn7850(const struct ath12k_hw_params *hw,
- int mac_id)
-{
- return mac_id;
-}
-
-static u8 ath12k_hw_get_ring_selector_wcn7850(struct sk_buff *skb)
-{
- return skb_get_queue_mapping(skb);
-}
-
-static bool ath12k_dp_srng_is_comp_ring_wcn7850(int ring_num)
-{
- if (ring_num == 0 || ring_num == 2 || ring_num == 4)
- return true;
-
- return false;
-}
-
-static bool ath12k_is_addba_resp_action_code(struct ieee80211_mgmt *mgmt)
-{
- if (!ieee80211_is_action(mgmt->frame_control))
- return false;
-
- if (mgmt->u.action.category != WLAN_CATEGORY_BACK)
- return false;
-
- if (mgmt->u.action.u.addba_resp.action_code != WLAN_ACTION_ADDBA_RESP)
- return false;
-
- return true;
-}
-
-static bool ath12k_is_frame_link_agnostic_wcn7850(struct ath12k_link_vif *arvif,
- struct ieee80211_mgmt *mgmt)
-{
- struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
- struct ath12k_hw *ah = ath12k_ar_to_ah(arvif->ar);
- struct ath12k_base *ab = arvif->ar->ab;
- __le16 fc = mgmt->frame_control;
-
- spin_lock_bh(&ab->base_lock);
- if (!ath12k_peer_find_by_addr(ab, mgmt->da) &&
- !ath12k_peer_ml_find(ah, mgmt->da)) {
- spin_unlock_bh(&ab->base_lock);
- return false;
- }
- spin_unlock_bh(&ab->base_lock);
-
- if (vif->type == NL80211_IFTYPE_STATION)
- return arvif->is_up &&
- (vif->valid_links == vif->active_links) &&
- !ieee80211_is_probe_req(fc) &&
- !ieee80211_is_auth(fc) &&
- !ieee80211_is_deauth(fc) &&
- !ath12k_is_addba_resp_action_code(mgmt);
-
- if (vif->type == NL80211_IFTYPE_AP)
- return !(ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc) ||
- ieee80211_is_assoc_resp(fc) || ieee80211_is_reassoc_resp(fc) ||
- ath12k_is_addba_resp_action_code(mgmt));
-
- return false;
-}
-
-static const struct ath12k_hw_ops qcn9274_ops = {
- .get_hw_mac_from_pdev_id = ath12k_hw_qcn9274_mac_from_pdev_id,
- .mac_id_to_pdev_id = ath12k_hw_mac_id_to_pdev_id_qcn9274,
- .mac_id_to_srng_id = ath12k_hw_mac_id_to_srng_id_qcn9274,
- .rxdma_ring_sel_config = ath12k_dp_rxdma_ring_sel_config_qcn9274,
- .get_ring_selector = ath12k_hw_get_ring_selector_qcn9274,
- .dp_srng_is_tx_comp_ring = ath12k_dp_srng_is_comp_ring_qcn9274,
- .is_frame_link_agnostic = ath12k_is_frame_link_agnostic_qcn9274,
-};
-
-static const struct ath12k_hw_ops wcn7850_ops = {
- .get_hw_mac_from_pdev_id = ath12k_hw_qcn9274_mac_from_pdev_id,
- .mac_id_to_pdev_id = ath12k_hw_mac_id_to_pdev_id_wcn7850,
- .mac_id_to_srng_id = ath12k_hw_mac_id_to_srng_id_wcn7850,
- .rxdma_ring_sel_config = ath12k_dp_rxdma_ring_sel_config_wcn7850,
- .get_ring_selector = ath12k_hw_get_ring_selector_wcn7850,
- .dp_srng_is_tx_comp_ring = ath12k_dp_srng_is_comp_ring_wcn7850,
- .is_frame_link_agnostic = ath12k_is_frame_link_agnostic_wcn7850,
-};
-
-#define ATH12K_TX_RING_MASK_0 0x1
-#define ATH12K_TX_RING_MASK_1 0x2
-#define ATH12K_TX_RING_MASK_2 0x4
-#define ATH12K_TX_RING_MASK_3 0x8
-#define ATH12K_TX_RING_MASK_4 0x10
-
-#define ATH12K_RX_RING_MASK_0 0x1
-#define ATH12K_RX_RING_MASK_1 0x2
-#define ATH12K_RX_RING_MASK_2 0x4
-#define ATH12K_RX_RING_MASK_3 0x8
-
-#define ATH12K_RX_ERR_RING_MASK_0 0x1
-
-#define ATH12K_RX_WBM_REL_RING_MASK_0 0x1
-
-#define ATH12K_REO_STATUS_RING_MASK_0 0x1
-
-#define ATH12K_HOST2RXDMA_RING_MASK_0 0x1
-
-#define ATH12K_RX_MON_RING_MASK_0 0x1
-#define ATH12K_RX_MON_RING_MASK_1 0x2
-#define ATH12K_RX_MON_RING_MASK_2 0x4
-
-#define ATH12K_TX_MON_RING_MASK_0 0x1
-#define ATH12K_TX_MON_RING_MASK_1 0x2
-
-#define ATH12K_RX_MON_STATUS_RING_MASK_0 0x1
-#define ATH12K_RX_MON_STATUS_RING_MASK_1 0x2
-#define ATH12K_RX_MON_STATUS_RING_MASK_2 0x4
-
-/* Target firmware's Copy Engine configuration. */
-static const struct ce_pipe_config ath12k_target_ce_config_wlan_qcn9274[] = {
- /* CE0: host->target HTC control and raw streams */
- {
- .pipenum = __cpu_to_le32(0),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(2048),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE1: target->host HTT + HTC control */
- {
- .pipenum = __cpu_to_le32(1),
- .pipedir = __cpu_to_le32(PIPEDIR_IN),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(2048),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE2: target->host WMI */
- {
- .pipenum = __cpu_to_le32(2),
- .pipedir = __cpu_to_le32(PIPEDIR_IN),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(2048),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE3: host->target WMI (mac0) */
- {
- .pipenum = __cpu_to_le32(3),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(2048),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE4: host->target HTT */
- {
- .pipenum = __cpu_to_le32(4),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT),
- .nentries = __cpu_to_le32(256),
- .nbytes_max = __cpu_to_le32(256),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE5: target->host Pktlog */
- {
- .pipenum = __cpu_to_le32(5),
- .pipedir = __cpu_to_le32(PIPEDIR_IN),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(2048),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE6: Reserved for target autonomous hif_memcpy */
- {
- .pipenum = __cpu_to_le32(6),
- .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(16384),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE7: host->target WMI (mac1) */
- {
- .pipenum = __cpu_to_le32(7),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(2048),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE8: Reserved for target autonomous hif_memcpy */
- {
- .pipenum = __cpu_to_le32(8),
- .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(16384),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE9, 10 and 11: Reserved for MHI */
-
- /* CE12: Target CV prefetch */
- {
- .pipenum = __cpu_to_le32(12),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(2048),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE13: Target CV prefetch */
- {
- .pipenum = __cpu_to_le32(13),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(2048),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE14: WMI logging/CFR/Spectral/Radar */
- {
- .pipenum = __cpu_to_le32(14),
- .pipedir = __cpu_to_le32(PIPEDIR_IN),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(2048),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE15: Reserved */
-};
-
-/* Target firmware's Copy Engine configuration. */
-static const struct ce_pipe_config ath12k_target_ce_config_wlan_wcn7850[] = {
- /* CE0: host->target HTC control and raw streams */
- {
- .pipenum = __cpu_to_le32(0),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(2048),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE1: target->host HTT + HTC control */
- {
- .pipenum = __cpu_to_le32(1),
- .pipedir = __cpu_to_le32(PIPEDIR_IN),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(2048),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE2: target->host WMI */
- {
- .pipenum = __cpu_to_le32(2),
- .pipedir = __cpu_to_le32(PIPEDIR_IN),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(2048),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE3: host->target WMI */
- {
- .pipenum = __cpu_to_le32(3),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(2048),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE4: host->target HTT */
- {
- .pipenum = __cpu_to_le32(4),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT),
- .nentries = __cpu_to_le32(256),
- .nbytes_max = __cpu_to_le32(256),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE5: target->host Pktlog */
- {
- .pipenum = __cpu_to_le32(5),
- .pipedir = __cpu_to_le32(PIPEDIR_IN),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(2048),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE6: Reserved for target autonomous hif_memcpy */
- {
- .pipenum = __cpu_to_le32(6),
- .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(16384),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE7 used only by Host */
- {
- .pipenum = __cpu_to_le32(7),
- .pipedir = __cpu_to_le32(PIPEDIR_INOUT_H2H),
- .nentries = __cpu_to_le32(0),
- .nbytes_max = __cpu_to_le32(0),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE8 target->host used only by IPA */
- {
- .pipenum = __cpu_to_le32(8),
- .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(16384),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
- /* CE 9, 10, 11 are used by MHI driver */
-};
-
-/* Map from service/endpoint to Copy Engine.
- * This table is derived from the CE_PCI TABLE, above.
- * It is passed to the Target at startup for use by firmware.
- */
-static const struct service_to_pipe ath12k_target_service_to_ce_map_wlan_qcn9274[] = {
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
- __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
- __cpu_to_le32(3),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
- __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- __cpu_to_le32(2),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
- __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
- __cpu_to_le32(3),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
- __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- __cpu_to_le32(2),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
- __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
- __cpu_to_le32(3),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
- __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- __cpu_to_le32(2),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
- __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
- __cpu_to_le32(3),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
- __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- __cpu_to_le32(2),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
- __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
- __cpu_to_le32(3),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
- __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- __cpu_to_le32(2),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
- __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
- __cpu_to_le32(0),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
- __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- __cpu_to_le32(1),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS),
- __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
- __cpu_to_le32(0),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS),
- __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- __cpu_to_le32(1),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
- __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
- __cpu_to_le32(4),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
- __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- __cpu_to_le32(1),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1),
- __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
- __cpu_to_le32(7),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1),
- __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- __cpu_to_le32(2),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_PKT_LOG),
- __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- __cpu_to_le32(5),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL_DIAG),
- __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- __cpu_to_le32(14),
- },
-
- /* (Additions here) */
-
- { /* must be last */
- __cpu_to_le32(0),
- __cpu_to_le32(0),
- __cpu_to_le32(0),
- },
-};
-
-static const struct service_to_pipe ath12k_target_service_to_ce_map_wlan_wcn7850[] = {
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
- __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
- __cpu_to_le32(3),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
- __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- __cpu_to_le32(2),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
- __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
- __cpu_to_le32(3),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
- __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- __cpu_to_le32(2),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
- __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
- __cpu_to_le32(3),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
- __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- __cpu_to_le32(2),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
- __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
- __cpu_to_le32(3),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
- __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- __cpu_to_le32(2),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
- __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
- __cpu_to_le32(3),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
- __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- __cpu_to_le32(2),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
- __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
- __cpu_to_le32(0),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
- __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- __cpu_to_le32(2),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
- __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
- __cpu_to_le32(4),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
- __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- __cpu_to_le32(1),
- },
-
- /* (Additions here) */
-
- { /* must be last */
- __cpu_to_le32(0),
- __cpu_to_le32(0),
- __cpu_to_le32(0),
- },
-};
-
-static const struct ce_pipe_config ath12k_target_ce_config_wlan_ipq5332[] = {
- /* host->target HTC control and raw streams */
- {
- .pipenum = __cpu_to_le32(0),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(2048),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
- /* target->host HTT */
- {
- .pipenum = __cpu_to_le32(1),
- .pipedir = __cpu_to_le32(PIPEDIR_IN),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(2048),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
- /* target->host WMI + HTC control */
- {
- .pipenum = __cpu_to_le32(2),
- .pipedir = __cpu_to_le32(PIPEDIR_IN),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(2048),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
- /* host->target WMI */
- {
- .pipenum = __cpu_to_le32(3),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(2048),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
- /* host->target HTT */
- {
- .pipenum = __cpu_to_le32(4),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT),
- .nentries = __cpu_to_le32(256),
- .nbytes_max = __cpu_to_le32(256),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
- .reserved = __cpu_to_le32(0),
- },
- /* Target -> host PKTLOG */
- {
- .pipenum = __cpu_to_le32(5),
- .pipedir = __cpu_to_le32(PIPEDIR_IN),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(2048),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
- /* Reserved for target autonomous HIF_memcpy */
- {
- .pipenum = __cpu_to_le32(6),
- .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(16384),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
- /* CE7 Reserved for CV Prefetch */
- {
- .pipenum = __cpu_to_le32(7),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(2048),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
- /* CE8 Reserved for target generic HIF memcpy */
- {
- .pipenum = __cpu_to_le32(8),
- .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(16384),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
- /* CE9 WMI logging/CFR/Spectral/Radar/ */
- {
- .pipenum = __cpu_to_le32(9),
- .pipedir = __cpu_to_le32(PIPEDIR_IN),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(2048),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
- /* Unused TBD */
- {
- .pipenum = __cpu_to_le32(10),
- .pipedir = __cpu_to_le32(PIPEDIR_NONE),
- .nentries = __cpu_to_le32(0),
- .nbytes_max = __cpu_to_le32(0),
- .flags = __cpu_to_le32(0),
- .reserved = __cpu_to_le32(0),
- },
- /* Unused TBD */
- {
- .pipenum = __cpu_to_le32(11),
- .pipedir = __cpu_to_le32(PIPEDIR_NONE),
- .nentries = __cpu_to_le32(0),
- .nbytes_max = __cpu_to_le32(0),
- .flags = __cpu_to_le32(0),
- .reserved = __cpu_to_le32(0),
- },
-};
-
-static const struct service_to_pipe ath12k_target_service_to_ce_map_wlan_ipq5332[] = {
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
- __cpu_to_le32(PIPEDIR_OUT),
- __cpu_to_le32(3),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
- __cpu_to_le32(PIPEDIR_IN),
- __cpu_to_le32(2),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
- __cpu_to_le32(PIPEDIR_OUT),
- __cpu_to_le32(3),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
- __cpu_to_le32(PIPEDIR_IN),
- __cpu_to_le32(2),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
- __cpu_to_le32(PIPEDIR_OUT),
- __cpu_to_le32(3),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
- __cpu_to_le32(PIPEDIR_IN),
- __cpu_to_le32(2),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
- __cpu_to_le32(PIPEDIR_OUT),
- __cpu_to_le32(3),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
- __cpu_to_le32(PIPEDIR_IN),
- __cpu_to_le32(2),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
- __cpu_to_le32(PIPEDIR_OUT),
- __cpu_to_le32(3),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
- __cpu_to_le32(PIPEDIR_IN),
- __cpu_to_le32(2),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
- __cpu_to_le32(PIPEDIR_OUT),
- __cpu_to_le32(0),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
- __cpu_to_le32(PIPEDIR_IN),
- __cpu_to_le32(1),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS),
- __cpu_to_le32(PIPEDIR_OUT),
- __cpu_to_le32(0),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS),
- __cpu_to_le32(PIPEDIR_IN),
- __cpu_to_le32(1),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
- __cpu_to_le32(PIPEDIR_OUT),
- __cpu_to_le32(4),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
- __cpu_to_le32(PIPEDIR_IN),
- __cpu_to_le32(1),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_PKT_LOG),
- __cpu_to_le32(PIPEDIR_IN),
- __cpu_to_le32(5),
- },
- {
- __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL_DIAG),
- __cpu_to_le32(PIPEDIR_IN),
- __cpu_to_le32(9),
- },
- /* (Additions here) */
-
- { /* must be last */
- __cpu_to_le32(0),
- __cpu_to_le32(0),
- __cpu_to_le32(0),
- },
-};
-
-static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qcn9274 = {
- .tx = {
- ATH12K_TX_RING_MASK_0,
- ATH12K_TX_RING_MASK_1,
- ATH12K_TX_RING_MASK_2,
- ATH12K_TX_RING_MASK_3,
- },
- .rx_mon_dest = {
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- ATH12K_RX_MON_RING_MASK_0,
- ATH12K_RX_MON_RING_MASK_1,
- ATH12K_RX_MON_RING_MASK_2,
- },
- .rx = {
- 0, 0, 0, 0,
- ATH12K_RX_RING_MASK_0,
- ATH12K_RX_RING_MASK_1,
- ATH12K_RX_RING_MASK_2,
- ATH12K_RX_RING_MASK_3,
- },
- .rx_err = {
- 0, 0, 0,
- ATH12K_RX_ERR_RING_MASK_0,
- },
- .rx_wbm_rel = {
- 0, 0, 0,
- ATH12K_RX_WBM_REL_RING_MASK_0,
- },
- .reo_status = {
- 0, 0, 0,
- ATH12K_REO_STATUS_RING_MASK_0,
- },
- .host2rxdma = {
- 0, 0, 0,
- ATH12K_HOST2RXDMA_RING_MASK_0,
- },
- .tx_mon_dest = {
- 0, 0, 0,
- },
-};
-
-static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_ipq5332 = {
- .tx = {
- ATH12K_TX_RING_MASK_0,
- ATH12K_TX_RING_MASK_1,
- ATH12K_TX_RING_MASK_2,
- ATH12K_TX_RING_MASK_3,
- },
- .rx_mon_dest = {
- 0, 0, 0, 0, 0, 0, 0, 0,
- ATH12K_RX_MON_RING_MASK_0,
- },
- .rx = {
- 0, 0, 0, 0,
- ATH12K_RX_RING_MASK_0,
- ATH12K_RX_RING_MASK_1,
- ATH12K_RX_RING_MASK_2,
- ATH12K_RX_RING_MASK_3,
- },
- .rx_err = {
- 0, 0, 0,
- ATH12K_RX_ERR_RING_MASK_0,
- },
- .rx_wbm_rel = {
- 0, 0, 0,
- ATH12K_RX_WBM_REL_RING_MASK_0,
- },
- .reo_status = {
- 0, 0, 0,
- ATH12K_REO_STATUS_RING_MASK_0,
- },
- .host2rxdma = {
- 0, 0, 0,
- ATH12K_HOST2RXDMA_RING_MASK_0,
- },
- .tx_mon_dest = {
- ATH12K_TX_MON_RING_MASK_0,
- ATH12K_TX_MON_RING_MASK_1,
- },
-};
-
-static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_wcn7850 = {
- .tx = {
- ATH12K_TX_RING_MASK_0,
- ATH12K_TX_RING_MASK_1,
- ATH12K_TX_RING_MASK_2,
- },
- .rx_mon_dest = {
- },
- .rx_mon_status = {
- 0, 0, 0, 0,
- ATH12K_RX_MON_STATUS_RING_MASK_0,
- ATH12K_RX_MON_STATUS_RING_MASK_1,
- ATH12K_RX_MON_STATUS_RING_MASK_2,
- },
- .rx = {
- 0, 0, 0,
- ATH12K_RX_RING_MASK_0,
- ATH12K_RX_RING_MASK_1,
- ATH12K_RX_RING_MASK_2,
- ATH12K_RX_RING_MASK_3,
- },
- .rx_err = {
- ATH12K_RX_ERR_RING_MASK_0,
- },
- .rx_wbm_rel = {
- ATH12K_RX_WBM_REL_RING_MASK_0,
- },
- .reo_status = {
- ATH12K_REO_STATUS_RING_MASK_0,
- },
- .host2rxdma = {
- },
- .tx_mon_dest = {
- },
-};
-
-static const struct ath12k_hw_regs qcn9274_v1_regs = {
- /* SW2TCL(x) R0 ring configuration address */
- .hal_tcl1_ring_id = 0x00000908,
- .hal_tcl1_ring_misc = 0x00000910,
- .hal_tcl1_ring_tp_addr_lsb = 0x0000091c,
- .hal_tcl1_ring_tp_addr_msb = 0x00000920,
- .hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000930,
- .hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000934,
- .hal_tcl1_ring_msi1_base_lsb = 0x00000948,
- .hal_tcl1_ring_msi1_base_msb = 0x0000094c,
- .hal_tcl1_ring_msi1_data = 0x00000950,
- .hal_tcl_ring_base_lsb = 0x00000b58,
- .hal_tcl1_ring_base_lsb = 0x00000900,
- .hal_tcl1_ring_base_msb = 0x00000904,
- .hal_tcl2_ring_base_lsb = 0x00000978,
-
- /* TCL STATUS ring address */
- .hal_tcl_status_ring_base_lsb = 0x00000d38,
-
- .hal_wbm_idle_ring_base_lsb = 0x00000d0c,
- .hal_wbm_idle_ring_misc_addr = 0x00000d1c,
- .hal_wbm_r0_idle_list_cntl_addr = 0x00000210,
- .hal_wbm_r0_idle_list_size_addr = 0x00000214,
- .hal_wbm_scattered_ring_base_lsb = 0x00000220,
- .hal_wbm_scattered_ring_base_msb = 0x00000224,
- .hal_wbm_scattered_desc_head_info_ix0 = 0x00000230,
- .hal_wbm_scattered_desc_head_info_ix1 = 0x00000234,
- .hal_wbm_scattered_desc_tail_info_ix0 = 0x00000240,
- .hal_wbm_scattered_desc_tail_info_ix1 = 0x00000244,
- .hal_wbm_scattered_desc_ptr_hp_addr = 0x0000024c,
-
- .hal_wbm_sw_release_ring_base_lsb = 0x0000034c,
- .hal_wbm_sw1_release_ring_base_lsb = 0x000003c4,
- .hal_wbm0_release_ring_base_lsb = 0x00000dd8,
- .hal_wbm1_release_ring_base_lsb = 0x00000e50,
-
- /* PCIe base address */
- .pcie_qserdes_sysclk_en_sel = 0x01e0c0a8,
- .pcie_pcs_osc_dtct_config_base = 0x01e0d45c,
-
- /* PPE release ring address */
- .hal_ppe_rel_ring_base = 0x0000043c,
-
- /* REO DEST ring address */
- .hal_reo2_ring_base = 0x0000055c,
- .hal_reo1_misc_ctrl_addr = 0x00000b7c,
- .hal_reo1_sw_cookie_cfg0 = 0x00000050,
- .hal_reo1_sw_cookie_cfg1 = 0x00000054,
- .hal_reo1_qdesc_lut_base0 = 0x00000058,
- .hal_reo1_qdesc_lut_base1 = 0x0000005c,
- .hal_reo1_ring_base_lsb = 0x000004e4,
- .hal_reo1_ring_base_msb = 0x000004e8,
- .hal_reo1_ring_id = 0x000004ec,
- .hal_reo1_ring_misc = 0x000004f4,
- .hal_reo1_ring_hp_addr_lsb = 0x000004f8,
- .hal_reo1_ring_hp_addr_msb = 0x000004fc,
- .hal_reo1_ring_producer_int_setup = 0x00000508,
- .hal_reo1_ring_msi1_base_lsb = 0x0000052C,
- .hal_reo1_ring_msi1_base_msb = 0x00000530,
- .hal_reo1_ring_msi1_data = 0x00000534,
- .hal_reo1_aging_thres_ix0 = 0x00000b08,
- .hal_reo1_aging_thres_ix1 = 0x00000b0c,
- .hal_reo1_aging_thres_ix2 = 0x00000b10,
- .hal_reo1_aging_thres_ix3 = 0x00000b14,
-
- /* REO Exception ring address */
- .hal_reo2_sw0_ring_base = 0x000008a4,
-
- /* REO Reinject ring address */
- .hal_sw2reo_ring_base = 0x00000304,
- .hal_sw2reo1_ring_base = 0x0000037c,
-
- /* REO cmd ring address */
- .hal_reo_cmd_ring_base = 0x0000028c,
-
- /* REO status ring address */
- .hal_reo_status_ring_base = 0x00000a84,
-
- /* CE base address */
- .hal_umac_ce0_src_reg_base = 0x01b80000,
- .hal_umac_ce0_dest_reg_base = 0x01b81000,
- .hal_umac_ce1_src_reg_base = 0x01b82000,
- .hal_umac_ce1_dest_reg_base = 0x01b83000,
-
- .gcc_gcc_pcie_hot_rst = 0x1e38338,
-};
-
-static const struct ath12k_hw_regs qcn9274_v2_regs = {
- /* SW2TCL(x) R0 ring configuration address */
- .hal_tcl1_ring_id = 0x00000908,
- .hal_tcl1_ring_misc = 0x00000910,
- .hal_tcl1_ring_tp_addr_lsb = 0x0000091c,
- .hal_tcl1_ring_tp_addr_msb = 0x00000920,
- .hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000930,
- .hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000934,
- .hal_tcl1_ring_msi1_base_lsb = 0x00000948,
- .hal_tcl1_ring_msi1_base_msb = 0x0000094c,
- .hal_tcl1_ring_msi1_data = 0x00000950,
- .hal_tcl_ring_base_lsb = 0x00000b58,
- .hal_tcl1_ring_base_lsb = 0x00000900,
- .hal_tcl1_ring_base_msb = 0x00000904,
- .hal_tcl2_ring_base_lsb = 0x00000978,
-
- /* TCL STATUS ring address */
- .hal_tcl_status_ring_base_lsb = 0x00000d38,
-
- /* WBM idle link ring address */
- .hal_wbm_idle_ring_base_lsb = 0x00000d3c,
- .hal_wbm_idle_ring_misc_addr = 0x00000d4c,
- .hal_wbm_r0_idle_list_cntl_addr = 0x00000240,
- .hal_wbm_r0_idle_list_size_addr = 0x00000244,
- .hal_wbm_scattered_ring_base_lsb = 0x00000250,
- .hal_wbm_scattered_ring_base_msb = 0x00000254,
- .hal_wbm_scattered_desc_head_info_ix0 = 0x00000260,
- .hal_wbm_scattered_desc_head_info_ix1 = 0x00000264,
- .hal_wbm_scattered_desc_tail_info_ix0 = 0x00000270,
- .hal_wbm_scattered_desc_tail_info_ix1 = 0x00000274,
- .hal_wbm_scattered_desc_ptr_hp_addr = 0x0000027c,
-
- /* SW2WBM release ring address */
- .hal_wbm_sw_release_ring_base_lsb = 0x0000037c,
- .hal_wbm_sw1_release_ring_base_lsb = 0x000003f4,
-
- /* WBM2SW release ring address */
- .hal_wbm0_release_ring_base_lsb = 0x00000e08,
- .hal_wbm1_release_ring_base_lsb = 0x00000e80,
-
- /* PCIe base address */
- .pcie_qserdes_sysclk_en_sel = 0x01e0c0a8,
- .pcie_pcs_osc_dtct_config_base = 0x01e0d45c,
-
- /* PPE release ring address */
- .hal_ppe_rel_ring_base = 0x0000046c,
-
- /* REO DEST ring address */
- .hal_reo2_ring_base = 0x00000578,
- .hal_reo1_misc_ctrl_addr = 0x00000b9c,
- .hal_reo1_sw_cookie_cfg0 = 0x0000006c,
- .hal_reo1_sw_cookie_cfg1 = 0x00000070,
- .hal_reo1_qdesc_lut_base0 = 0x00000074,
- .hal_reo1_qdesc_lut_base1 = 0x00000078,
- .hal_reo1_qdesc_addr = 0x0000007c,
- .hal_reo1_qdesc_max_peerid = 0x00000088,
- .hal_reo1_ring_base_lsb = 0x00000500,
- .hal_reo1_ring_base_msb = 0x00000504,
- .hal_reo1_ring_id = 0x00000508,
- .hal_reo1_ring_misc = 0x00000510,
- .hal_reo1_ring_hp_addr_lsb = 0x00000514,
- .hal_reo1_ring_hp_addr_msb = 0x00000518,
- .hal_reo1_ring_producer_int_setup = 0x00000524,
- .hal_reo1_ring_msi1_base_lsb = 0x00000548,
- .hal_reo1_ring_msi1_base_msb = 0x0000054C,
- .hal_reo1_ring_msi1_data = 0x00000550,
- .hal_reo1_aging_thres_ix0 = 0x00000B28,
- .hal_reo1_aging_thres_ix1 = 0x00000B2C,
- .hal_reo1_aging_thres_ix2 = 0x00000B30,
- .hal_reo1_aging_thres_ix3 = 0x00000B34,
-
- /* REO Exception ring address */
- .hal_reo2_sw0_ring_base = 0x000008c0,
-
- /* REO Reinject ring address */
- .hal_sw2reo_ring_base = 0x00000320,
- .hal_sw2reo1_ring_base = 0x00000398,
-
- /* REO cmd ring address */
- .hal_reo_cmd_ring_base = 0x000002A8,
-
- /* REO status ring address */
- .hal_reo_status_ring_base = 0x00000aa0,
-
- /* CE base address */
- .hal_umac_ce0_src_reg_base = 0x01b80000,
- .hal_umac_ce0_dest_reg_base = 0x01b81000,
- .hal_umac_ce1_src_reg_base = 0x01b82000,
- .hal_umac_ce1_dest_reg_base = 0x01b83000,
-
- .gcc_gcc_pcie_hot_rst = 0x1e38338,
-};
-
-static const struct ath12k_hw_regs ipq5332_regs = {
- /* SW2TCL(x) R0 ring configuration address */
- .hal_tcl1_ring_id = 0x00000918,
- .hal_tcl1_ring_misc = 0x00000920,
- .hal_tcl1_ring_tp_addr_lsb = 0x0000092c,
- .hal_tcl1_ring_tp_addr_msb = 0x00000930,
- .hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000940,
- .hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000944,
- .hal_tcl1_ring_msi1_base_lsb = 0x00000958,
- .hal_tcl1_ring_msi1_base_msb = 0x0000095c,
- .hal_tcl1_ring_base_lsb = 0x00000910,
- .hal_tcl1_ring_base_msb = 0x00000914,
- .hal_tcl1_ring_msi1_data = 0x00000960,
- .hal_tcl2_ring_base_lsb = 0x00000988,
- .hal_tcl_ring_base_lsb = 0x00000b68,
-
- /* TCL STATUS ring address */
- .hal_tcl_status_ring_base_lsb = 0x00000d48,
-
- /* REO DEST ring address */
- .hal_reo2_ring_base = 0x00000578,
- .hal_reo1_misc_ctrl_addr = 0x00000b9c,
- .hal_reo1_sw_cookie_cfg0 = 0x0000006c,
- .hal_reo1_sw_cookie_cfg1 = 0x00000070,
- .hal_reo1_qdesc_lut_base0 = 0x00000074,
- .hal_reo1_qdesc_lut_base1 = 0x00000078,
- .hal_reo1_ring_base_lsb = 0x00000500,
- .hal_reo1_ring_base_msb = 0x00000504,
- .hal_reo1_ring_id = 0x00000508,
- .hal_reo1_ring_misc = 0x00000510,
- .hal_reo1_ring_hp_addr_lsb = 0x00000514,
- .hal_reo1_ring_hp_addr_msb = 0x00000518,
- .hal_reo1_ring_producer_int_setup = 0x00000524,
- .hal_reo1_ring_msi1_base_lsb = 0x00000548,
- .hal_reo1_ring_msi1_base_msb = 0x0000054C,
- .hal_reo1_ring_msi1_data = 0x00000550,
- .hal_reo1_aging_thres_ix0 = 0x00000B28,
- .hal_reo1_aging_thres_ix1 = 0x00000B2C,
- .hal_reo1_aging_thres_ix2 = 0x00000B30,
- .hal_reo1_aging_thres_ix3 = 0x00000B34,
-
- /* REO Exception ring address */
- .hal_reo2_sw0_ring_base = 0x000008c0,
-
- /* REO Reinject ring address */
- .hal_sw2reo_ring_base = 0x00000320,
- .hal_sw2reo1_ring_base = 0x00000398,
-
- /* REO cmd ring address */
- .hal_reo_cmd_ring_base = 0x000002A8,
-
- /* REO status ring address */
- .hal_reo_status_ring_base = 0x00000aa0,
-
- /* WBM idle link ring address */
- .hal_wbm_idle_ring_base_lsb = 0x00000d3c,
- .hal_wbm_idle_ring_misc_addr = 0x00000d4c,
- .hal_wbm_r0_idle_list_cntl_addr = 0x00000240,
- .hal_wbm_r0_idle_list_size_addr = 0x00000244,
- .hal_wbm_scattered_ring_base_lsb = 0x00000250,
- .hal_wbm_scattered_ring_base_msb = 0x00000254,
- .hal_wbm_scattered_desc_head_info_ix0 = 0x00000260,
- .hal_wbm_scattered_desc_head_info_ix1 = 0x00000264,
- .hal_wbm_scattered_desc_tail_info_ix0 = 0x00000270,
- .hal_wbm_scattered_desc_tail_info_ix1 = 0x00000274,
- .hal_wbm_scattered_desc_ptr_hp_addr = 0x0000027c,
-
- /* SW2WBM release ring address */
- .hal_wbm_sw_release_ring_base_lsb = 0x0000037c,
-
- /* WBM2SW release ring address */
- .hal_wbm0_release_ring_base_lsb = 0x00000e08,
- .hal_wbm1_release_ring_base_lsb = 0x00000e80,
-
- /* PPE release ring address */
- .hal_ppe_rel_ring_base = 0x0000046c,
-
- /* CE address */
- .hal_umac_ce0_src_reg_base = 0x00740000 -
- HAL_IPQ5332_CE_WFSS_REG_BASE,
- .hal_umac_ce0_dest_reg_base = 0x00741000 -
- HAL_IPQ5332_CE_WFSS_REG_BASE,
- .hal_umac_ce1_src_reg_base = 0x00742000 -
- HAL_IPQ5332_CE_WFSS_REG_BASE,
- .hal_umac_ce1_dest_reg_base = 0x00743000 -
- HAL_IPQ5332_CE_WFSS_REG_BASE,
-};
-
-static const struct ath12k_hw_regs wcn7850_regs = {
- /* SW2TCL(x) R0 ring configuration address */
- .hal_tcl1_ring_id = 0x00000908,
- .hal_tcl1_ring_misc = 0x00000910,
- .hal_tcl1_ring_tp_addr_lsb = 0x0000091c,
- .hal_tcl1_ring_tp_addr_msb = 0x00000920,
- .hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000930,
- .hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000934,
- .hal_tcl1_ring_msi1_base_lsb = 0x00000948,
- .hal_tcl1_ring_msi1_base_msb = 0x0000094c,
- .hal_tcl1_ring_msi1_data = 0x00000950,
- .hal_tcl_ring_base_lsb = 0x00000b58,
- .hal_tcl1_ring_base_lsb = 0x00000900,
- .hal_tcl1_ring_base_msb = 0x00000904,
- .hal_tcl2_ring_base_lsb = 0x00000978,
-
- /* TCL STATUS ring address */
- .hal_tcl_status_ring_base_lsb = 0x00000d38,
-
- .hal_wbm_idle_ring_base_lsb = 0x00000d3c,
- .hal_wbm_idle_ring_misc_addr = 0x00000d4c,
- .hal_wbm_r0_idle_list_cntl_addr = 0x00000240,
- .hal_wbm_r0_idle_list_size_addr = 0x00000244,
- .hal_wbm_scattered_ring_base_lsb = 0x00000250,
- .hal_wbm_scattered_ring_base_msb = 0x00000254,
- .hal_wbm_scattered_desc_head_info_ix0 = 0x00000260,
- .hal_wbm_scattered_desc_head_info_ix1 = 0x00000264,
- .hal_wbm_scattered_desc_tail_info_ix0 = 0x00000270,
- .hal_wbm_scattered_desc_tail_info_ix1 = 0x00000274,
- .hal_wbm_scattered_desc_ptr_hp_addr = 0x00000027c,
-
- .hal_wbm_sw_release_ring_base_lsb = 0x0000037c,
- .hal_wbm_sw1_release_ring_base_lsb = 0x00000284,
- .hal_wbm0_release_ring_base_lsb = 0x00000e08,
- .hal_wbm1_release_ring_base_lsb = 0x00000e80,
-
- /* PCIe base address */
- .pcie_qserdes_sysclk_en_sel = 0x01e0e0a8,
- .pcie_pcs_osc_dtct_config_base = 0x01e0f45c,
-
- /* PPE release ring address */
- .hal_ppe_rel_ring_base = 0x0000043c,
-
- /* REO DEST ring address */
- .hal_reo2_ring_base = 0x0000055c,
- .hal_reo1_misc_ctrl_addr = 0x00000b7c,
- .hal_reo1_sw_cookie_cfg0 = 0x00000050,
- .hal_reo1_sw_cookie_cfg1 = 0x00000054,
- .hal_reo1_qdesc_lut_base0 = 0x00000058,
- .hal_reo1_qdesc_lut_base1 = 0x0000005c,
- .hal_reo1_ring_base_lsb = 0x000004e4,
- .hal_reo1_ring_base_msb = 0x000004e8,
- .hal_reo1_ring_id = 0x000004ec,
- .hal_reo1_ring_misc = 0x000004f4,
- .hal_reo1_ring_hp_addr_lsb = 0x000004f8,
- .hal_reo1_ring_hp_addr_msb = 0x000004fc,
- .hal_reo1_ring_producer_int_setup = 0x00000508,
- .hal_reo1_ring_msi1_base_lsb = 0x0000052C,
- .hal_reo1_ring_msi1_base_msb = 0x00000530,
- .hal_reo1_ring_msi1_data = 0x00000534,
- .hal_reo1_aging_thres_ix0 = 0x00000b08,
- .hal_reo1_aging_thres_ix1 = 0x00000b0c,
- .hal_reo1_aging_thres_ix2 = 0x00000b10,
- .hal_reo1_aging_thres_ix3 = 0x00000b14,
-
- /* REO Exception ring address */
- .hal_reo2_sw0_ring_base = 0x000008a4,
-
- /* REO Reinject ring address */
- .hal_sw2reo_ring_base = 0x00000304,
- .hal_sw2reo1_ring_base = 0x0000037c,
-
- /* REO cmd ring address */
- .hal_reo_cmd_ring_base = 0x0000028c,
-
- /* REO status ring address */
- .hal_reo_status_ring_base = 0x00000a84,
-
- /* CE base address */
- .hal_umac_ce0_src_reg_base = 0x01b80000,
- .hal_umac_ce0_dest_reg_base = 0x01b81000,
- .hal_umac_ce1_src_reg_base = 0x01b82000,
- .hal_umac_ce1_dest_reg_base = 0x01b83000,
-
- .gcc_gcc_pcie_hot_rst = 0x1e40304,
-};
-
-static const struct ath12k_hw_hal_params ath12k_hw_hal_params_qcn9274 = {
- .rx_buf_rbm = HAL_RX_BUF_RBM_SW3_BM,
- .wbm2sw_cc_enable = HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW0_EN |
- HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW1_EN |
- HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW2_EN |
- HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW3_EN |
- HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW4_EN,
-};
-
-static const struct ath12k_hw_hal_params ath12k_hw_hal_params_wcn7850 = {
- .rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM,
- .wbm2sw_cc_enable = HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW0_EN |
- HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW2_EN |
- HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW3_EN |
- HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW4_EN,
-};
-
-static const struct ath12k_hw_hal_params ath12k_hw_hal_params_ipq5332 = {
- .rx_buf_rbm = HAL_RX_BUF_RBM_SW3_BM,
- .wbm2sw_cc_enable = HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW0_EN |
- HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW1_EN |
- HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW2_EN |
- HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW3_EN |
- HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW4_EN,
-};
-
-static const struct ce_ie_addr ath12k_ce_ie_addr_ipq5332 = {
- .ie1_reg_addr = CE_HOST_IE_ADDRESS - HAL_IPQ5332_CE_WFSS_REG_BASE,
- .ie2_reg_addr = CE_HOST_IE_2_ADDRESS - HAL_IPQ5332_CE_WFSS_REG_BASE,
- .ie3_reg_addr = CE_HOST_IE_3_ADDRESS - HAL_IPQ5332_CE_WFSS_REG_BASE,
-};
-
-static const struct ce_remap ath12k_ce_remap_ipq5332 = {
- .base = HAL_IPQ5332_CE_WFSS_REG_BASE,
- .size = HAL_IPQ5332_CE_SIZE,
-};
-
-static const struct ath12k_hw_params ath12k_hw_params[] = {
- {
- .name = "qcn9274 hw1.0",
- .hw_rev = ATH12K_HW_QCN9274_HW10,
- .fw = {
- .dir = "QCN9274/hw1.0",
- .board_size = 256 * 1024,
- .cal_offset = 128 * 1024,
- .m3_loader = ath12k_m3_fw_loader_driver,
- },
- .max_radios = 1,
- .single_pdev_only = false,
- .qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9274,
- .internal_sleep_clock = false,
-
- .hw_ops = &qcn9274_ops,
- .ring_mask = &ath12k_hw_ring_mask_qcn9274,
- .regs = &qcn9274_v1_regs,
-
- .host_ce_config = ath12k_host_ce_config_qcn9274,
- .ce_count = 16,
- .target_ce_config = ath12k_target_ce_config_wlan_qcn9274,
- .target_ce_count = 12,
- .svc_to_ce_map = ath12k_target_service_to_ce_map_wlan_qcn9274,
- .svc_to_ce_map_len = 18,
-
- .hal_params = &ath12k_hw_hal_params_qcn9274,
-
- .rxdma1_enable = false,
- .num_rxdma_per_pdev = 1,
- .num_rxdma_dst_ring = 0,
- .rx_mac_buf_ring = false,
- .vdev_start_delay = false,
-
- .interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_MESH_POINT) |
- BIT(NL80211_IFTYPE_AP_VLAN),
- .supports_monitor = false,
-
- .idle_ps = false,
- .download_calib = true,
- .supports_suspend = false,
- .tcl_ring_retry = true,
- .reoq_lut_support = true,
- .supports_shadow_regs = false,
-
- .num_tcl_banks = 48,
- .max_tx_ring = 4,
-
- .mhi_config = &ath12k_mhi_config_qcn9274,
-
- .wmi_init = ath12k_wmi_init_qcn9274,
-
- .hal_ops = &hal_qcn9274_ops,
-
- .qmi_cnss_feature_bitmap = BIT(CNSS_QDSS_CFG_MISS_V01),
-
- .rfkill_pin = 0,
- .rfkill_cfg = 0,
- .rfkill_on_level = 0,
-
- .rddm_size = 0x600000,
-
- .def_num_link = 0,
- .max_mlo_peer = 256,
-
- .otp_board_id_register = QCN9274_QFPROM_RAW_RFA_PDET_ROW13_LSB,
-
- .supports_sta_ps = false,
-
- .acpi_guid = NULL,
- .supports_dynamic_smps_6ghz = true,
-
- .iova_mask = 0,
-
- .supports_aspm = false,
-
- .ce_ie_addr = NULL,
- .ce_remap = NULL,
- .bdf_addr_offset = 0,
-
- .current_cc_support = false,
-
- .dp_primary_link_only = true,
- },
- {
- .name = "wcn7850 hw2.0",
- .hw_rev = ATH12K_HW_WCN7850_HW20,
-
- .fw = {
- .dir = "WCN7850/hw2.0",
- .board_size = 256 * 1024,
- .cal_offset = 256 * 1024,
- .m3_loader = ath12k_m3_fw_loader_driver,
- },
-
- .max_radios = 1,
- .single_pdev_only = true,
- .qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_WCN7850,
- .internal_sleep_clock = true,
-
- .hw_ops = &wcn7850_ops,
- .ring_mask = &ath12k_hw_ring_mask_wcn7850,
- .regs = &wcn7850_regs,
-
- .host_ce_config = ath12k_host_ce_config_wcn7850,
- .ce_count = 9,
- .target_ce_config = ath12k_target_ce_config_wlan_wcn7850,
- .target_ce_count = 9,
- .svc_to_ce_map = ath12k_target_service_to_ce_map_wlan_wcn7850,
- .svc_to_ce_map_len = 14,
-
- .hal_params = &ath12k_hw_hal_params_wcn7850,
-
- .rxdma1_enable = false,
- .num_rxdma_per_pdev = 2,
- .num_rxdma_dst_ring = 1,
- .rx_mac_buf_ring = true,
- .vdev_start_delay = true,
-
- .interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_P2P_DEVICE) |
- BIT(NL80211_IFTYPE_P2P_CLIENT) |
- BIT(NL80211_IFTYPE_P2P_GO),
- .supports_monitor = true,
-
- .idle_ps = true,
- .download_calib = false,
- .supports_suspend = true,
- .tcl_ring_retry = false,
- .reoq_lut_support = false,
- .supports_shadow_regs = true,
-
- .num_tcl_banks = 7,
- .max_tx_ring = 3,
-
- .mhi_config = &ath12k_mhi_config_wcn7850,
-
- .wmi_init = ath12k_wmi_init_wcn7850,
-
- .hal_ops = &hal_wcn7850_ops,
-
- .qmi_cnss_feature_bitmap = BIT(CNSS_QDSS_CFG_MISS_V01) |
- BIT(CNSS_PCIE_PERST_NO_PULL_V01),
-
- .rfkill_pin = 48,
- .rfkill_cfg = 0,
- .rfkill_on_level = 1,
-
- .rddm_size = 0x780000,
-
- .def_num_link = 2,
- .max_mlo_peer = 32,
-
- .otp_board_id_register = 0,
-
- .supports_sta_ps = true,
-
- .acpi_guid = &wcn7850_uuid,
- .supports_dynamic_smps_6ghz = false,
-
- .iova_mask = ATH12K_PCIE_MAX_PAYLOAD_SIZE - 1,
-
- .supports_aspm = true,
-
- .ce_ie_addr = NULL,
- .ce_remap = NULL,
- .bdf_addr_offset = 0,
-
- .current_cc_support = true,
-
- .dp_primary_link_only = false,
- },
- {
- .name = "qcn9274 hw2.0",
- .hw_rev = ATH12K_HW_QCN9274_HW20,
- .fw = {
- .dir = "QCN9274/hw2.0",
- .board_size = 256 * 1024,
- .cal_offset = 128 * 1024,
- .m3_loader = ath12k_m3_fw_loader_driver,
- },
- .max_radios = 2,
- .single_pdev_only = false,
- .qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9274,
- .internal_sleep_clock = false,
-
- .hw_ops = &qcn9274_ops,
- .ring_mask = &ath12k_hw_ring_mask_qcn9274,
- .regs = &qcn9274_v2_regs,
-
- .host_ce_config = ath12k_host_ce_config_qcn9274,
- .ce_count = 16,
- .target_ce_config = ath12k_target_ce_config_wlan_qcn9274,
- .target_ce_count = 12,
- .svc_to_ce_map = ath12k_target_service_to_ce_map_wlan_qcn9274,
- .svc_to_ce_map_len = 18,
-
- .hal_params = &ath12k_hw_hal_params_qcn9274,
-
- .rxdma1_enable = true,
- .num_rxdma_per_pdev = 1,
- .num_rxdma_dst_ring = 0,
- .rx_mac_buf_ring = false,
- .vdev_start_delay = false,
-
- .interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_MESH_POINT) |
- BIT(NL80211_IFTYPE_AP_VLAN),
- .supports_monitor = true,
-
- .idle_ps = false,
- .download_calib = true,
- .supports_suspend = false,
- .tcl_ring_retry = true,
- .reoq_lut_support = true,
- .supports_shadow_regs = false,
-
- .num_tcl_banks = 48,
- .max_tx_ring = 4,
-
- .mhi_config = &ath12k_mhi_config_qcn9274,
-
- .wmi_init = ath12k_wmi_init_qcn9274,
-
- .hal_ops = &hal_qcn9274_ops,
-
- .qmi_cnss_feature_bitmap = BIT(CNSS_QDSS_CFG_MISS_V01),
-
- .rfkill_pin = 0,
- .rfkill_cfg = 0,
- .rfkill_on_level = 0,
-
- .rddm_size = 0x600000,
-
- .def_num_link = 0,
- .max_mlo_peer = 256,
-
- .otp_board_id_register = QCN9274_QFPROM_RAW_RFA_PDET_ROW13_LSB,
-
- .supports_sta_ps = false,
-
- .acpi_guid = NULL,
- .supports_dynamic_smps_6ghz = true,
-
- .iova_mask = 0,
-
- .supports_aspm = false,
-
- .ce_ie_addr = NULL,
- .ce_remap = NULL,
- .bdf_addr_offset = 0,
-
- .current_cc_support = false,
-
- .dp_primary_link_only = true,
- },
- {
- .name = "ipq5332 hw1.0",
- .hw_rev = ATH12K_HW_IPQ5332_HW10,
- .fw = {
- .dir = "IPQ5332/hw1.0",
- .board_size = 256 * 1024,
- .cal_offset = 128 * 1024,
- .m3_loader = ath12k_m3_fw_loader_remoteproc,
- },
- .max_radios = 1,
- .single_pdev_only = false,
- .qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ5332,
- .internal_sleep_clock = false,
-
- .hw_ops = &qcn9274_ops,
- .regs = &ipq5332_regs,
- .ring_mask = &ath12k_hw_ring_mask_ipq5332,
-
- .host_ce_config = ath12k_host_ce_config_ipq5332,
- .ce_count = 12,
- .target_ce_config = ath12k_target_ce_config_wlan_ipq5332,
- .target_ce_count = 12,
- .svc_to_ce_map = ath12k_target_service_to_ce_map_wlan_ipq5332,
- .svc_to_ce_map_len = 18,
-
- .hal_params = &ath12k_hw_hal_params_ipq5332,
-
- .rxdma1_enable = false,
- .num_rxdma_per_pdev = 1,
- .num_rxdma_dst_ring = 0,
- .rx_mac_buf_ring = false,
- .vdev_start_delay = false,
-
- .interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_MESH_POINT),
- .supports_monitor = false,
-
- .idle_ps = false,
- .download_calib = true,
- .supports_suspend = false,
- .tcl_ring_retry = true,
- .reoq_lut_support = false,
- .supports_shadow_regs = false,
-
- .num_tcl_banks = 48,
- .max_tx_ring = 4,
-
- .wmi_init = &ath12k_wmi_init_qcn9274,
-
- .hal_ops = &hal_qcn9274_ops,
-
- .qmi_cnss_feature_bitmap = BIT(CNSS_QDSS_CFG_MISS_V01),
-
- .rfkill_pin = 0,
- .rfkill_cfg = 0,
- .rfkill_on_level = 0,
-
- .rddm_size = 0,
-
- .def_num_link = 0,
- .max_mlo_peer = 256,
-
- .otp_board_id_register = 0,
-
- .supports_sta_ps = false,
-
- .acpi_guid = NULL,
- .supports_dynamic_smps_6ghz = false,
- .iova_mask = 0,
- .supports_aspm = false,
-
- .ce_ie_addr = &ath12k_ce_ie_addr_ipq5332,
- .ce_remap = &ath12k_ce_remap_ipq5332,
- .bdf_addr_offset = 0xC00000,
-
- .dp_primary_link_only = true,
- },
-};
-
-int ath12k_hw_init(struct ath12k_base *ab)
-{
- const struct ath12k_hw_params *hw_params = NULL;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(ath12k_hw_params); i++) {
- hw_params = &ath12k_hw_params[i];
-
- if (hw_params->hw_rev == ab->hw_rev)
- break;
- }
-
- if (i == ARRAY_SIZE(ath12k_hw_params)) {
- ath12k_err(ab, "Unsupported hardware version: 0x%x\n", ab->hw_rev);
- return -EINVAL;
- }
-
- ab->hw_params = hw_params;
-
- ath12k_info(ab, "Hardware name: %s\n", ab->hw_params->name);
-
- return 0;
-}
diff --git a/drivers/net/wireless/ath/ath12k/hw.h b/drivers/net/wireless/ath/ath12k/hw.h
index 8ce11c3e6d5c..a9888e0521a1 100644
--- a/drivers/net/wireless/ath/ath12k/hw.h
+++ b/drivers/net/wireless/ath/ath12k/hw.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#ifndef ATH12K_HW_H
@@ -78,6 +78,7 @@
#define ATH12K_DEFAULT_CAL_FILE "caldata.bin"
#define ATH12K_AMSS_FILE "amss.bin"
#define ATH12K_M3_FILE "m3.bin"
+#define ATH12K_AUX_UC_FILE "aux_ucode.bin"
#define ATH12K_REGDB_FILE_NAME "regdb.bin"
#define ATH12K_PCIE_MAX_PAYLOAD_SIZE 128
@@ -128,11 +129,6 @@ struct ath12k_hw_ring_mask {
u8 tx_mon_dest[ATH12K_EXT_IRQ_GRP_NUM_MAX];
};
-struct ath12k_hw_hal_params {
- enum hal_rx_buf_return_buf_manager rx_buf_rbm;
- u32 wbm2sw_cc_enable;
-};
-
enum ath12k_m3_fw_loaders {
ath12k_m3_fw_loader_driver,
ath12k_m3_fw_loader_remoteproc,
@@ -147,6 +143,7 @@ struct ath12k_hw_params {
size_t board_size;
size_t cal_offset;
enum ath12k_m3_fw_loaders m3_loader;
+ bool download_aux_ucode:1;
} fw;
u8 max_radios;
@@ -156,7 +153,6 @@ struct ath12k_hw_params {
const struct ath12k_hw_ops *hw_ops;
const struct ath12k_hw_ring_mask *ring_mask;
- const struct ath12k_hw_regs *regs;
const struct ce_attr *host_ce_config;
u32 ce_count;
@@ -165,8 +161,6 @@ struct ath12k_hw_params {
const struct service_to_pipe *svc_to_ce_map;
u32 svc_to_ce_map_len;
- const struct ath12k_hw_hal_params *hal_params;
-
bool rxdma1_enable:1;
int num_rxdma_per_pdev;
int num_rxdma_dst_ring;
@@ -193,8 +187,6 @@ struct ath12k_hw_params {
void (*wmi_init)(struct ath12k_base *ab,
struct ath12k_wmi_resource_config_arg *config);
- const struct hal_ops *hal_ops;
-
u64 qmi_cnss_feature_bitmap;
u32 rfkill_pin;
@@ -285,86 +277,6 @@ enum ath12k_bd_ie_type {
ATH12K_BD_IE_REGDB = 1,
};
-struct ath12k_hw_regs {
- u32 hal_tcl1_ring_id;
- u32 hal_tcl1_ring_misc;
- u32 hal_tcl1_ring_tp_addr_lsb;
- u32 hal_tcl1_ring_tp_addr_msb;
- u32 hal_tcl1_ring_consumer_int_setup_ix0;
- u32 hal_tcl1_ring_consumer_int_setup_ix1;
- u32 hal_tcl1_ring_msi1_base_lsb;
- u32 hal_tcl1_ring_msi1_base_msb;
- u32 hal_tcl1_ring_msi1_data;
- u32 hal_tcl_ring_base_lsb;
- u32 hal_tcl1_ring_base_lsb;
- u32 hal_tcl1_ring_base_msb;
- u32 hal_tcl2_ring_base_lsb;
-
- u32 hal_tcl_status_ring_base_lsb;
-
- u32 hal_reo1_qdesc_addr;
- u32 hal_reo1_qdesc_max_peerid;
-
- u32 hal_wbm_idle_ring_base_lsb;
- u32 hal_wbm_idle_ring_misc_addr;
- u32 hal_wbm_r0_idle_list_cntl_addr;
- u32 hal_wbm_r0_idle_list_size_addr;
- u32 hal_wbm_scattered_ring_base_lsb;
- u32 hal_wbm_scattered_ring_base_msb;
- u32 hal_wbm_scattered_desc_head_info_ix0;
- u32 hal_wbm_scattered_desc_head_info_ix1;
- u32 hal_wbm_scattered_desc_tail_info_ix0;
- u32 hal_wbm_scattered_desc_tail_info_ix1;
- u32 hal_wbm_scattered_desc_ptr_hp_addr;
-
- u32 hal_wbm_sw_release_ring_base_lsb;
- u32 hal_wbm_sw1_release_ring_base_lsb;
- u32 hal_wbm0_release_ring_base_lsb;
- u32 hal_wbm1_release_ring_base_lsb;
-
- u32 pcie_qserdes_sysclk_en_sel;
- u32 pcie_pcs_osc_dtct_config_base;
-
- u32 hal_umac_ce0_src_reg_base;
- u32 hal_umac_ce0_dest_reg_base;
- u32 hal_umac_ce1_src_reg_base;
- u32 hal_umac_ce1_dest_reg_base;
-
- u32 hal_ppe_rel_ring_base;
-
- u32 hal_reo2_ring_base;
- u32 hal_reo1_misc_ctrl_addr;
- u32 hal_reo1_sw_cookie_cfg0;
- u32 hal_reo1_sw_cookie_cfg1;
- u32 hal_reo1_qdesc_lut_base0;
- u32 hal_reo1_qdesc_lut_base1;
- u32 hal_reo1_ring_base_lsb;
- u32 hal_reo1_ring_base_msb;
- u32 hal_reo1_ring_id;
- u32 hal_reo1_ring_misc;
- u32 hal_reo1_ring_hp_addr_lsb;
- u32 hal_reo1_ring_hp_addr_msb;
- u32 hal_reo1_ring_producer_int_setup;
- u32 hal_reo1_ring_msi1_base_lsb;
- u32 hal_reo1_ring_msi1_base_msb;
- u32 hal_reo1_ring_msi1_data;
- u32 hal_reo1_aging_thres_ix0;
- u32 hal_reo1_aging_thres_ix1;
- u32 hal_reo1_aging_thres_ix2;
- u32 hal_reo1_aging_thres_ix3;
-
- u32 hal_reo2_sw0_ring_base;
-
- u32 hal_sw2reo_ring_base;
- u32 hal_sw2reo1_ring_base;
-
- u32 hal_reo_cmd_ring_base;
-
- u32 hal_reo_status_ring_base;
-
- u32 gcc_gcc_pcie_hot_rst;
-};
-
static inline const char *ath12k_bd_ie_type_str(enum ath12k_bd_ie_type type)
{
switch (type) {
@@ -377,6 +289,4 @@ static inline const char *ath12k_bd_ie_type_str(enum ath12k_bd_ie_type type)
return "unknown";
}
-int ath12k_hw_init(struct ath12k_base *ab);
-
#endif
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index e0e49f782bf8..68431a0e128e 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -21,6 +21,8 @@
#include "hif.h"
#include "wow.h"
#include "debugfs_sta.h"
+#include "dp.h"
+#include "dp_cmn.h"
#define CHAN2G(_channel, _freq, _flags) { \
.band = NL80211_BAND_2GHZ, \
@@ -361,6 +363,7 @@ u16 ath12k_mac_he_convert_tones_to_ru_tones(u16 tones)
return RU_26;
}
}
+EXPORT_SYMBOL(ath12k_mac_he_convert_tones_to_ru_tones);
enum nl80211_eht_gi ath12k_mac_eht_gi_to_nl80211_eht_gi(u8 sgi)
{
@@ -375,6 +378,7 @@ enum nl80211_eht_gi ath12k_mac_eht_gi_to_nl80211_eht_gi(u8 sgi)
return NL80211_RATE_INFO_EHT_GI_0_8;
}
}
+EXPORT_SYMBOL(ath12k_mac_eht_gi_to_nl80211_eht_gi);
enum nl80211_eht_ru_alloc ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(u16 ru_tones)
{
@@ -415,6 +419,7 @@ enum nl80211_eht_ru_alloc ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(u16 ru
return NL80211_RATE_INFO_EHT_RU_ALLOC_26;
}
}
+EXPORT_SYMBOL(ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc);
enum rate_info_bw
ath12k_mac_bw_to_mac80211_bw(enum ath12k_supported_bw bw)
@@ -441,6 +446,7 @@ ath12k_mac_bw_to_mac80211_bw(enum ath12k_supported_bw bw)
return ret;
}
+EXPORT_SYMBOL(ath12k_mac_bw_to_mac80211_bw);
enum ath12k_supported_bw ath12k_mac_mac80211_bw_to_ath12k_bw(enum rate_info_bw bw)
{
@@ -484,6 +490,7 @@ int ath12k_mac_hw_ratecode_to_legacy_rate(u8 hw_rc, u8 preamble, u8 *rateidx,
return -EINVAL;
}
+EXPORT_SYMBOL(ath12k_mac_hw_ratecode_to_legacy_rate);
u8 ath12k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
u32 bitrate)
@@ -1167,26 +1174,119 @@ static int ath12k_mac_set_kickout(struct ath12k_link_vif *arvif)
return 0;
}
+static void ath12k_mac_link_sta_rhash_cleanup(void *data, struct ieee80211_sta *sta)
+{
+ u8 link_id;
+ unsigned long links_map;
+ struct ath12k_sta *ahsta;
+ struct ath12k *ar = data;
+ struct ath12k_link_sta *arsta;
+ struct ath12k_link_vif *arvif;
+ struct ath12k_base *ab = ar->ab;
+
+ ahsta = ath12k_sta_to_ahsta(sta);
+ links_map = ahsta->links_map;
+
+ rcu_read_lock();
+ for_each_set_bit(link_id, &links_map, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arsta = rcu_dereference(ahsta->link[link_id]);
+ if (!arsta)
+ continue;
+ arvif = arsta->arvif;
+ if (!(arvif->ar == ar))
+ continue;
+
+ spin_lock_bh(&ab->base_lock);
+ ath12k_link_sta_rhash_delete(ab, arsta);
+ spin_unlock_bh(&ab->base_lock);
+ }
+ rcu_read_unlock();
+}
+
void ath12k_mac_peer_cleanup_all(struct ath12k *ar)
{
- struct ath12k_peer *peer, *tmp;
+ struct ath12k_dp_link_peer *peer, *tmp;
struct ath12k_base *ab = ar->ab;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
+ struct ath12k_link_vif *arvif, *tmp_vif;
+ struct ath12k_dp_hw *dp_hw = &ar->ah->dp_hw;
+ struct ath12k_dp_peer *dp_peer = NULL;
+ u16 peerid_index;
+ struct list_head peers;
+
+ INIT_LIST_HEAD(&peers);
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
- spin_lock_bh(&ab->base_lock);
- list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
+ spin_lock_bh(&dp->dp_lock);
+ list_for_each_entry_safe(peer, tmp, &dp->peers, list) {
/* Skip Rx TID cleanup for self peer */
- if (peer->sta)
+ if (peer->sta && peer->dp_peer)
ath12k_dp_rx_peer_tid_cleanup(ar, peer);
- list_del(&peer->list);
- kfree(peer);
+ /* cleanup dp peer */
+ spin_lock_bh(&dp_hw->peer_lock);
+ dp_peer = peer->dp_peer;
+ peerid_index = ath12k_dp_peer_get_peerid_index(dp, peer->peer_id);
+ rcu_assign_pointer(dp_peer->link_peers[peer->link_id], NULL);
+ rcu_assign_pointer(dp_hw->dp_peers[peerid_index], NULL);
+ spin_unlock_bh(&dp_hw->peer_lock);
+
+ ath12k_dp_link_peer_rhash_delete(dp, peer);
+
+ list_move(&peer->list, &peers);
+ }
+ spin_unlock_bh(&dp->dp_lock);
+
+ synchronize_rcu();
+
+ list_for_each_entry_safe(peer, tmp, &peers, list) {
+ ath12k_dp_link_peer_free(peer);
}
- spin_unlock_bh(&ab->base_lock);
ar->num_peers = 0;
ar->num_stations = 0;
+
+ /* Cleanup rhash table maintained for arsta by iterating over sta */
+ ieee80211_iterate_stations_mtx(ar->ah->hw, ath12k_mac_link_sta_rhash_cleanup,
+ ar);
+
+ /* Delete all the self dp_peers on asserted radio */
+ list_for_each_entry_safe_reverse(arvif, tmp_vif, &ar->arvifs, list) {
+ if ((arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP) &&
+ (arvif->link_id < IEEE80211_MLD_MAX_NUM_LINKS)) {
+ ath12k_dp_peer_delete(dp_hw, arvif->bssid, NULL);
+ arvif->num_stations = 0;
+ }
+ }
+}
+
+void ath12k_mac_dp_peer_cleanup(struct ath12k_hw *ah)
+{
+ struct list_head peers;
+ struct ath12k_dp_peer *dp_peer, *tmp;
+ struct ath12k_dp_hw *dp_hw = &ah->dp_hw;
+
+ INIT_LIST_HEAD(&peers);
+
+ spin_lock_bh(&dp_hw->peer_lock);
+ list_for_each_entry_safe(dp_peer, tmp, &dp_hw->dp_peers_list, list) {
+ if (dp_peer->is_mlo) {
+ rcu_assign_pointer(dp_hw->dp_peers[dp_peer->peer_id], NULL);
+ clear_bit(dp_peer->peer_id, ah->free_ml_peer_id_map);
+ }
+
+ list_move(&dp_peer->list, &peers);
+ }
+
+ spin_unlock_bh(&dp_hw->peer_lock);
+
+ synchronize_rcu();
+
+ list_for_each_entry_safe(dp_peer, tmp, &peers, list) {
+ list_del(&dp_peer->list);
+ kfree(dp_peer);
+ }
}
static int ath12k_mac_vdev_setup_sync(struct ath12k *ar)
@@ -1458,10 +1558,11 @@ err:
return ret;
}
-static int ath12k_mac_op_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
+int ath12k_mac_op_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
return 0;
}
+EXPORT_SYMBOL(ath12k_mac_op_config);
static int ath12k_mac_setup_bcn_p2p_ie(struct ath12k_link_vif *arvif,
struct sk_buff *bcn)
@@ -3728,10 +3829,11 @@ static void ath12k_bss_assoc(struct ath12k *ar,
struct ath12k_link_sta *arsta;
struct ieee80211_sta *ap_sta;
struct ath12k_sta *ahsta;
- struct ath12k_peer *peer;
+ struct ath12k_dp_link_peer *peer;
bool is_auth = false;
u32 hemode = 0;
int ret;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ar->ab);
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
@@ -3843,13 +3945,14 @@ static void ath12k_bss_assoc(struct ath12k *ar,
"mac vdev %d up (associated) bssid %pM aid %d\n",
arvif->vdev_id, bss_conf->bssid, vif->cfg.aid);
- spin_lock_bh(&ar->ab->base_lock);
+ spin_lock_bh(&dp->dp_lock);
- peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arvif->bssid);
+ peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, arvif->vdev_id,
+ arvif->bssid);
if (peer && peer->is_authorized)
is_auth = true;
- spin_unlock_bh(&ar->ab->base_lock);
+ spin_unlock_bh(&dp->dp_lock);
/* Authorize BSS Peer */
if (is_auth) {
@@ -4085,6 +4188,9 @@ static void ath12k_mac_remove_link_interface(struct ieee80211_hw *hw,
if (ret)
ath12k_warn(ar->ab, "failed to submit AP self-peer removal on vdev %d link id %d: %d",
arvif->vdev_id, arvif->link_id, ret);
+
+ if (arvif->link_id < IEEE80211_MLD_MAX_NUM_LINKS)
+ ath12k_dp_peer_delete(&ah->dp_hw, arvif->bssid, NULL);
}
ath12k_mac_vdev_delete(ar, arvif);
}
@@ -4138,7 +4244,7 @@ static void ath12k_mac_unassign_link_vif(struct ath12k_link_vif *arvif)
memset(arvif, 0, sizeof(*arvif));
}
-static int
+int
ath12k_mac_op_change_vif_links(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u16 old_links, u16 new_links,
@@ -4175,8 +4281,10 @@ ath12k_mac_op_change_vif_links(struct ieee80211_hw *hw,
if (WARN_ON(!arvif))
return -EINVAL;
- if (!arvif->is_created)
+ if (!arvif->is_created) {
+ ath12k_mac_unassign_link_vif(arvif);
continue;
+ }
if (WARN_ON(!arvif->ar))
return -EINVAL;
@@ -4187,6 +4295,7 @@ ath12k_mac_op_change_vif_links(struct ieee80211_hw *hw,
return 0;
}
+EXPORT_SYMBOL(ath12k_mac_op_change_vif_links);
static int ath12k_mac_fils_discovery(struct ath12k_link_vif *arvif,
struct ieee80211_bss_conf *info)
@@ -4235,9 +4344,9 @@ static int ath12k_mac_fils_discovery(struct ath12k_link_vif *arvif,
return ret;
}
-static void ath12k_mac_op_vif_cfg_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- u64 changed)
+void ath12k_mac_op_vif_cfg_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u64 changed)
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
unsigned long links = ahvif->links_map;
@@ -4305,6 +4414,7 @@ static void ath12k_mac_op_vif_cfg_changed(struct ieee80211_hw *hw,
}
}
}
+EXPORT_SYMBOL(ath12k_mac_op_vif_cfg_changed);
static void ath12k_mac_vif_setup_ps(struct ath12k_link_vif *arvif)
{
@@ -4397,6 +4507,166 @@ static void ath12k_wmi_vdev_params_up(struct ath12k *ar,
arvif->vdev_id, ret);
}
+static int ath12k_mac_config_obss_pd(struct ath12k_link_vif *arvif,
+ const struct ieee80211_he_obss_pd *he_obss_pd)
+{
+ struct ath12k_wmi_obss_pd_arg obss_pd_arg = {};
+ u32 srg_bitmap[2], non_srg_bitmap[2];
+ struct ath12k *ar = arvif->ar;
+ u32 param_id, pdev_id;
+ u32 param_val;
+ int ret;
+
+ if (ar->ab->hw_params->single_pdev_only)
+ pdev_id = ath12k_mac_get_target_pdev_id_from_vif(arvif);
+ else
+ pdev_id = ar->pdev->pdev_id;
+
+ /* Set and enable SRG/non-SRG OBSS PD threshold */
+ param_id = WMI_PDEV_PARAM_SET_CMD_OBSS_PD_THRESHOLD;
+ if (ar->monitor_started || !he_obss_pd->enable) {
+ ret = ath12k_wmi_pdev_set_param(ar, param_id, 0, pdev_id);
+ if (ret)
+ ath12k_warn(ar->ab,
+ "failed to set OBSS PD threshold for pdev %u: %d\n",
+ pdev_id, ret);
+ return ret;
+ }
+
+ /*
+ * This service flag indicates firmware support for SRG/SRP-based
+ * spatial reuse. It also specifies whether OBSS PD threshold values
+ * should be interpreted as dB (offset) or dBm (absolute) units.
+ */
+ obss_pd_arg.srp_support = test_bit(WMI_TLV_SERVICE_SRG_SRP_SPATIAL_REUSE_SUPPORT,
+ ar->ab->wmi_ab.svc_map);
+
+ if (!(he_obss_pd->sr_ctrl &
+ IEEE80211_HE_SPR_NON_SRG_OBSS_PD_SR_DISALLOWED)) {
+ if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT)
+ obss_pd_arg.non_srg_th = ATH12K_OBSS_PD_MAX_THRESHOLD +
+ he_obss_pd->non_srg_max_offset;
+ else
+ obss_pd_arg.non_srg_th = ATH12K_OBSS_PD_NON_SRG_MAX_THRESHOLD;
+
+ if (!obss_pd_arg.srp_support)
+ obss_pd_arg.non_srg_th -= ATH12K_DEFAULT_NOISE_FLOOR;
+
+ obss_pd_arg.non_srg_enabled = true;
+ }
+
+ if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT) {
+ obss_pd_arg.srg_th = ATH12K_OBSS_PD_MAX_THRESHOLD +
+ he_obss_pd->max_offset;
+ obss_pd_arg.srg_enabled = true;
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "pdev %u OBSS PD sr_ctrl 0x%x srg_th %d dBm non_srg_th %d dBm\n",
+ pdev_id, he_obss_pd->sr_ctrl,
+ obss_pd_arg.srg_th, obss_pd_arg.non_srg_th);
+
+ param_val = ath12k_wmi_build_obss_pd(&obss_pd_arg);
+ ret = ath12k_wmi_pdev_set_param(ar, param_id, param_val, pdev_id);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to set OBSS PD threshold for pdev %u: %d\n",
+ pdev_id, ret);
+ return ret;
+ }
+
+ /* Enable OBSS PD for all access category */
+ param_id = WMI_PDEV_PARAM_SET_CMD_OBSS_PD_PER_AC;
+ param_val = 0xf;
+ ret = ath12k_wmi_pdev_set_param(ar, param_id, param_val, pdev_id);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to set OBSS PD per ac for pdev %u: %d\n",
+ pdev_id, ret);
+ return ret;
+ }
+
+ /* Set SR prohibit */
+ param_id = WMI_PDEV_PARAM_ENABLE_SR_PROHIBIT;
+ param_val = !!(he_obss_pd->sr_ctrl &
+ IEEE80211_HE_SPR_HESIGA_SR_VAL15_ALLOWED);
+ ret = ath12k_wmi_pdev_set_param(ar, param_id, param_val, pdev_id);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set SR prohibit for pdev %u: %d\n",
+ pdev_id, ret);
+ return ret;
+ }
+
+ if (!obss_pd_arg.srp_support)
+ return 0;
+
+ memcpy(srg_bitmap, he_obss_pd->bss_color_bitmap, sizeof(srg_bitmap));
+ /* Set SRG BSS color bitmap */
+ ret = ath12k_wmi_pdev_set_srg_bss_color_bitmap(ar, pdev_id, srg_bitmap);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to set SRG bss color bitmap for pdev %u: %d\n",
+ pdev_id, ret);
+ return ret;
+ }
+
+ /* Enable BSS colors for SRG */
+ ret = ath12k_wmi_pdev_srg_obss_color_enable_bitmap(ar, pdev_id, srg_bitmap);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to enable SRG bss color bitmap pdev %u: %d\n",
+ pdev_id, ret);
+ return ret;
+ }
+
+ memcpy(srg_bitmap, he_obss_pd->partial_bssid_bitmap, sizeof(srg_bitmap));
+ /* Set SRG partial bssid bitmap */
+ ret = ath12k_wmi_pdev_set_srg_partial_bssid_bitmap(ar, pdev_id, srg_bitmap);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to set SRG partial bssid bitmap for pdev %u: %d\n",
+ pdev_id, ret);
+ return ret;
+ }
+
+ /* Enable partial bssid mask for SRG */
+ ret = ath12k_wmi_pdev_srg_obss_bssid_enable_bitmap(ar, pdev_id, srg_bitmap);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to enable SRG bssid bitmap pdev %u: %d\n",
+ pdev_id, ret);
+ return ret;
+ }
+
+ /*
+ * No explicit non-SRG bitmap from mac80211; enable all colors/bssids
+ * as non-SRG candidates. Actual SRG members are filtered by SRG bitmaps.
+ */
+ memset(non_srg_bitmap, 0xff, sizeof(non_srg_bitmap));
+
+ /* Enable BSS colors for non-SRG */
+ ret = ath12k_wmi_pdev_non_srg_obss_color_enable_bitmap(ar, pdev_id,
+ non_srg_bitmap);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to enable non SRG color bitmap pdev %u: %d\n",
+ pdev_id, ret);
+ return ret;
+ }
+
+ /* Enable partial bssid mask for non-SRG */
+ ret = ath12k_wmi_pdev_non_srg_obss_bssid_enable_bitmap(ar, pdev_id,
+ non_srg_bitmap);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to enable non SRG bssid bitmap pdev %u: %d\n",
+ pdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static void ath12k_mac_bss_info_changed(struct ath12k *ar,
struct ath12k_link_vif *arvif,
struct ieee80211_bss_conf *info,
@@ -4688,9 +4958,13 @@ skip_vdev_up:
ath12k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id);
}
- if (changed & BSS_CHANGED_HE_OBSS_PD)
- ath12k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id,
- &info->he_obss_pd);
+ if (changed & BSS_CHANGED_HE_OBSS_PD) {
+ if (vif->type == NL80211_IFTYPE_AP)
+ ath12k_mac_config_obss_pd(arvif, &info->he_obss_pd);
+ else
+ ath12k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id,
+ &info->he_obss_pd);
+ }
if (changed & BSS_CHANGED_HE_BSS_COLOR) {
if (vif->type == NL80211_IFTYPE_AP) {
@@ -4780,10 +5054,10 @@ static void ath12k_ahvif_put_link_cache(struct ath12k_vif *ahvif, u8 link_id)
ahvif->cache[link_id] = NULL;
}
-static void ath12k_mac_op_link_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info,
- u64 changed)
+void ath12k_mac_op_link_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u64 changed)
{
struct ath12k *ar;
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
@@ -4813,6 +5087,7 @@ static void ath12k_mac_op_link_info_changed(struct ieee80211_hw *hw,
ath12k_mac_bss_info_changed(ar, arvif, info, changed);
}
+EXPORT_SYMBOL(ath12k_mac_op_link_info_changed);
static struct ath12k*
ath12k_mac_select_scan_device(struct ieee80211_hw *hw,
@@ -5114,10 +5389,10 @@ int ath12k_mac_get_fw_stats(struct ath12k *ar,
return 0;
}
-static int ath12k_mac_op_get_txpower(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- unsigned int link_id,
- int *dbm)
+int ath12k_mac_op_get_txpower(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int link_id,
+ int *dbm)
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_fw_stats_req_params params = {};
@@ -5191,6 +5466,7 @@ err_fallback:
*dbm);
return 0;
}
+EXPORT_SYMBOL(ath12k_mac_op_get_txpower);
static u8
ath12k_mac_find_link_id_by_ar(struct ath12k_vif *ahvif, struct ath12k *ar)
@@ -5402,9 +5678,9 @@ exit:
return ret;
}
-static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_scan_request *hw_req)
+int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *hw_req)
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ieee80211_channel **chan_list, *chan;
@@ -5482,9 +5758,10 @@ abort:
kfree(chan_list);
return ret;
}
+EXPORT_SYMBOL(ath12k_mac_op_hw_scan);
-static void ath12k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+void ath12k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
unsigned long link_id, links_map = ahvif->links_map;
@@ -5506,6 +5783,7 @@ static void ath12k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw,
cancel_delayed_work_sync(&ar->scan.timeout);
}
}
+EXPORT_SYMBOL(ath12k_mac_op_cancel_hw_scan);
static int ath12k_install_key(struct ath12k_link_vif *arvif,
struct ieee80211_key_conf *key,
@@ -5613,7 +5891,7 @@ install:
return -ETIMEDOUT;
if (ether_addr_equal(arg.macaddr, arvif->bssid))
- ahvif->key_cipher = arg.ieee80211_key_cipher;
+ ahvif->dp_vif.key_cipher = arg.ieee80211_key_cipher;
if (ar->install_key_status) {
ret = -EINVAL;
@@ -5656,27 +5934,40 @@ static int ath12k_clear_peer_keys(struct ath12k_link_vif *arvif,
{
struct ath12k *ar = arvif->ar;
struct ath12k_base *ab = ar->ab;
- struct ath12k_peer *peer;
+ struct ath12k_dp_link_peer *peer;
int first_errno = 0;
int ret;
- int i;
+ int i, len;
u32 flags = 0;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
+ struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1] = {};
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
- spin_lock_bh(&ab->base_lock);
- peer = ath12k_peer_find(ab, arvif->vdev_id, addr);
- spin_unlock_bh(&ab->base_lock);
-
- if (!peer)
+ spin_lock_bh(&dp->dp_lock);
+ peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, arvif->vdev_id, addr);
+ if (!peer || !peer->dp_peer) {
+ spin_unlock_bh(&dp->dp_lock);
return -ENOENT;
+ }
+
+ len = ARRAY_SIZE(peer->dp_peer->keys);
+ for (i = 0; i < len; i++) {
+ if (!peer->dp_peer->keys[i])
+ continue;
- for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
- if (!peer->keys[i])
+ keys[i] = peer->dp_peer->keys[i];
+ peer->dp_peer->keys[i] = NULL;
+ }
+
+ spin_unlock_bh(&dp->dp_lock);
+
+ for (i = 0; i < len; i++) {
+ if (!keys[i])
continue;
/* key flags are not required to delete the key */
- ret = ath12k_install_key(arvif, peer->keys[i],
+ ret = ath12k_install_key(arvif, keys[i],
DISABLE_KEY, addr, flags);
if (ret < 0 && first_errno == 0)
first_errno = ret;
@@ -5684,10 +5975,6 @@ static int ath12k_clear_peer_keys(struct ath12k_link_vif *arvif,
if (ret < 0)
ath12k_warn(ab, "failed to remove peer key %d: %d\n",
i, ret);
-
- spin_lock_bh(&ab->base_lock);
- peer->keys[i] = NULL;
- spin_unlock_bh(&ab->base_lock);
}
return first_errno;
@@ -5700,11 +5987,12 @@ static int ath12k_mac_set_key(struct ath12k *ar, enum set_key_cmd cmd,
{
struct ieee80211_sta *sta = NULL;
struct ath12k_base *ab = ar->ab;
- struct ath12k_peer *peer;
+ struct ath12k_dp_link_peer *peer;
struct ath12k_sta *ahsta;
const u8 *peer_addr;
int ret;
u32 flags = 0;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
@@ -5724,11 +6012,12 @@ static int ath12k_mac_set_key(struct ath12k *ar, enum set_key_cmd cmd,
/* the peer should not disappear in mid-way (unless FW goes awry) since
* we already hold wiphy lock. we just make sure its there now.
*/
- spin_lock_bh(&ab->base_lock);
- peer = ath12k_peer_find(ab, arvif->vdev_id, peer_addr);
- spin_unlock_bh(&ab->base_lock);
+ spin_lock_bh(&dp->dp_lock);
+ peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, arvif->vdev_id,
+ peer_addr);
+ if (!peer || !peer->dp_peer) {
+ spin_unlock_bh(&dp->dp_lock);
- if (!peer) {
if (cmd == SET_KEY) {
ath12k_warn(ab, "cannot install key for non-existent peer %pM\n",
peer_addr);
@@ -5741,6 +6030,8 @@ static int ath12k_mac_set_key(struct ath12k *ar, enum set_key_cmd cmd,
return 0;
}
+ spin_unlock_bh(&dp->dp_lock);
+
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
flags = WMI_KEY_PAIRWISE;
else
@@ -5758,23 +6049,26 @@ static int ath12k_mac_set_key(struct ath12k *ar, enum set_key_cmd cmd,
return ret;
}
- spin_lock_bh(&ab->base_lock);
- peer = ath12k_peer_find(ab, arvif->vdev_id, peer_addr);
- if (peer && cmd == SET_KEY) {
- peer->keys[key->keyidx] = key;
+ spin_lock_bh(&dp->dp_lock);
+ peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, arvif->vdev_id,
+ peer_addr);
+ if (peer && peer->dp_peer && cmd == SET_KEY) {
+ peer->dp_peer->keys[key->keyidx] = key;
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
- peer->ucast_keyidx = key->keyidx;
- peer->sec_type = ath12k_dp_tx_get_encrypt_type(key->cipher);
+ peer->dp_peer->ucast_keyidx = key->keyidx;
+ peer->dp_peer->sec_type =
+ ath12k_dp_tx_get_encrypt_type(key->cipher);
} else {
- peer->mcast_keyidx = key->keyidx;
- peer->sec_type_grp = ath12k_dp_tx_get_encrypt_type(key->cipher);
+ peer->dp_peer->mcast_keyidx = key->keyidx;
+ peer->dp_peer->sec_type_grp =
+ ath12k_dp_tx_get_encrypt_type(key->cipher);
}
- } else if (peer && cmd == DISABLE_KEY) {
- peer->keys[key->keyidx] = NULL;
+ } else if (peer && peer->dp_peer && cmd == DISABLE_KEY) {
+ peer->dp_peer->keys[key->keyidx] = NULL;
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
- peer->ucast_keyidx = 0;
+ peer->dp_peer->ucast_keyidx = 0;
else
- peer->mcast_keyidx = 0;
+ peer->dp_peer->mcast_keyidx = 0;
} else if (!peer)
/* impossible unless FW goes crazy */
ath12k_warn(ab, "peer %pM disappeared!\n", peer_addr);
@@ -5799,7 +6093,7 @@ static int ath12k_mac_set_key(struct ath12k *ar, enum set_key_cmd cmd,
}
}
- spin_unlock_bh(&ab->base_lock);
+ spin_unlock_bh(&dp->dp_lock);
return 0;
}
@@ -5843,9 +6137,9 @@ static int ath12k_mac_update_key_cache(struct ath12k_vif_cache *cache,
return 0;
}
-static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
+int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_link_vif *arvif;
@@ -5932,6 +6226,7 @@ static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return 0;
}
+EXPORT_SYMBOL(ath12k_mac_op_set_key);
static int
ath12k_mac_bitrate_mask_num_vht_rates(struct ath12k *ar,
@@ -6557,46 +6852,47 @@ static void ath12k_mac_station_post_remove(struct ath12k *ar,
{
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
- struct ath12k_peer *peer;
+ struct ath12k_dp_link_peer *peer;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ar->ab);
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
ath12k_mac_dec_num_stations(arvif, arsta);
- spin_lock_bh(&ar->ab->base_lock);
+ spin_lock_bh(&dp->dp_lock);
- peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arsta->addr);
+ peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, arvif->vdev_id,
+ arsta->addr);
if (peer && peer->sta == sta) {
ath12k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n",
vif->addr, arvif->vdev_id);
peer->sta = NULL;
- list_del(&peer->list);
- kfree(peer);
+
+ ath12k_dp_link_peer_free(peer);
ar->num_peers--;
}
- spin_unlock_bh(&ar->ab->base_lock);
-
- kfree(arsta->rx_stats);
- arsta->rx_stats = NULL;
+ spin_unlock_bh(&dp->dp_lock);
}
static int ath12k_mac_station_unauthorize(struct ath12k *ar,
struct ath12k_link_vif *arvif,
struct ath12k_link_sta *arsta)
{
- struct ath12k_peer *peer;
+ struct ath12k_dp_link_peer *peer;
int ret;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ar->ab);
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
- spin_lock_bh(&ar->ab->base_lock);
+ spin_lock_bh(&dp->dp_lock);
- peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arsta->addr);
+ peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, arvif->vdev_id,
+ arsta->addr);
if (peer)
peer->is_authorized = false;
- spin_unlock_bh(&ar->ab->base_lock);
+ spin_unlock_bh(&dp->dp_lock);
/* Driver must clear the keys during the state change from
* IEEE80211_STA_AUTHORIZED to IEEE80211_STA_ASSOC, since after
@@ -6618,19 +6914,21 @@ static int ath12k_mac_station_authorize(struct ath12k *ar,
struct ath12k_link_vif *arvif,
struct ath12k_link_sta *arsta)
{
- struct ath12k_peer *peer;
+ struct ath12k_dp_link_peer *peer;
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
int ret;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ar->ab);
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
- spin_lock_bh(&ar->ab->base_lock);
+ spin_lock_bh(&dp->dp_lock);
- peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arsta->addr);
+ peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, arvif->vdev_id,
+ arsta->addr);
if (peer)
peer->is_authorized = true;
- spin_unlock_bh(&ar->ab->base_lock);
+ spin_unlock_bh(&dp->dp_lock);
if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) {
ret = ath12k_wmi_set_peer_param(ar, arsta->addr,
@@ -6654,6 +6952,7 @@ static int ath12k_mac_station_remove(struct ath12k *ar,
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
struct ath12k_vif *ahvif = arvif->ahvif;
int ret = 0;
+ struct ath12k_link_sta *temp_arsta;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
@@ -6682,6 +6981,15 @@ static int ath12k_mac_station_remove(struct ath12k *ar,
ath12k_mac_station_post_remove(ar, arvif, arsta);
+ spin_lock_bh(&ar->ab->base_lock);
+
+ /* To handle roaming and split phy scenario */
+ temp_arsta = ath12k_link_sta_find_by_addr(ar->ab, arsta->addr);
+ if (temp_arsta && temp_arsta->arvif->ar == ar)
+ ath12k_link_sta_rhash_delete(ar->ab, arsta);
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
if (sta->valid_links)
ath12k_mac_free_unassign_link_sta(ahvif->ah,
arsta->ahsta, arsta->link_id);
@@ -6698,6 +7006,7 @@ static int ath12k_mac_station_add(struct ath12k *ar,
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
struct ath12k_wmi_peer_create_arg peer_param = {};
int ret;
+ struct ath12k_link_sta *temp_arsta;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
@@ -6708,12 +7017,24 @@ static int ath12k_mac_station_add(struct ath12k *ar,
goto exit;
}
- if (ath12k_debugfs_is_extd_rx_stats_enabled(ar) && !arsta->rx_stats) {
- arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL);
- if (!arsta->rx_stats) {
- ret = -ENOMEM;
- goto dec_num_station;
- }
+ spin_lock_bh(&ab->base_lock);
+
+ /*
+ * In case of Split PHY and roaming scenario, pdev idx
+ * might differ but both the pdev will share same rhash
+ * table. In that case update the rhash table if link_sta is
+ * already present
+ */
+ temp_arsta = ath12k_link_sta_find_by_addr(ab, arsta->addr);
+ if (temp_arsta && temp_arsta->arvif->ar != ar)
+ ath12k_link_sta_rhash_delete(ab, temp_arsta);
+
+ ret = ath12k_link_sta_rhash_add(ab, arsta);
+ spin_unlock_bh(&ab->base_lock);
+ if (ret) {
+ ath12k_warn(ab, "Failed to add arsta: %pM to hash table, ret: %d",
+ arsta->addr, ret);
+ goto dec_num_station;
}
peer_param.vdev_id = arvif->vdev_id;
@@ -6759,13 +7080,13 @@ static int ath12k_mac_station_add(struct ath12k *ar,
}
}
- ewma_avg_rssi_init(&arsta->avg_rssi);
return 0;
free_peer:
ath12k_peer_delete(ar, arvif->vdev_id, arsta->addr);
- kfree(arsta->rx_stats);
- arsta->rx_stats = NULL;
+ spin_lock_bh(&ab->base_lock);
+ ath12k_link_sta_rhash_delete(ab, arsta);
+ spin_unlock_bh(&ab->base_lock);
dec_num_station:
ath12k_mac_dec_num_stations(arvif, arsta);
exit:
@@ -6842,10 +7163,17 @@ static void ath12k_mac_ml_station_remove(struct ath12k_vif *ahvif,
ath12k_mac_station_post_remove(ar, arvif, arsta);
+ spin_lock_bh(&ar->ab->base_lock);
+ ath12k_link_sta_rhash_delete(ar->ab, arsta);
+ spin_unlock_bh(&ar->ab->base_lock);
+
ath12k_mac_free_unassign_link_sta(ah, ahsta, link_id);
}
- ath12k_peer_ml_delete(ah, sta);
+ if (sta->mlo) {
+ clear_bit(ahsta->ml_peer_id, ah->free_ml_peer_id_map);
+ ahsta->ml_peer_id = ATH12K_MLO_PEER_ID_INVALID;
+ }
}
static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw,
@@ -7268,11 +7596,11 @@ static int ath12k_mac_select_links(struct ath12k_base *ab,
return 0;
}
-static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- enum ieee80211_sta_state old_state,
- enum ieee80211_sta_state new_state)
+int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
@@ -7284,7 +7612,8 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
u16 selected_links = 0;
u8 link_id = 0, i;
struct ath12k *ar;
- int ret;
+ int ret = -EINVAL;
+ struct ath12k_dp_peer_create_params dp_params = {};
lockdep_assert_wiphy(hw->wiphy);
@@ -7307,12 +7636,28 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
/* ML sta */
if (sta->mlo && !ahsta->links_map &&
(hweight16(sta->valid_links) == 1)) {
- ret = ath12k_peer_ml_create(ah, sta);
- if (ret) {
- ath12k_hw_warn(ah, "unable to create ML peer for sta %pM",
+ ahsta->ml_peer_id = ath12k_peer_ml_alloc(ah);
+ if (ahsta->ml_peer_id == ATH12K_MLO_PEER_ID_INVALID) {
+ ath12k_hw_warn(ah, "unable to allocate ML peer id for sta %pM",
sta->addr);
goto exit;
}
+
+ dp_params.is_mlo = true;
+ dp_params.peer_id = ahsta->ml_peer_id | ATH12K_PEER_ML_ID_VALID;
+ }
+
+ dp_params.sta = sta;
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ dp_params.ucast_ra_only = true;
+
+ ret = ath12k_dp_peer_create(&ah->dp_hw, sta->addr, &dp_params);
+ if (ret) {
+ ath12k_hw_warn(ah, "unable to create ath12k_dp_peer for sta %pM, ret: %d",
+ sta->addr, ret);
+
+ goto ml_peer_id_clear;
}
ret = ath12k_mac_assign_link_sta(ah, ahsta, arsta, ahvif,
@@ -7320,7 +7665,7 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
if (ret) {
ath12k_hw_warn(ah, "unable assign link %d for sta %pM",
link_id, sta->addr);
- goto exit;
+ goto peer_delete;
}
/* above arsta will get memset, hence do this after assign
@@ -7390,7 +7735,12 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
if (ret) {
ath12k_hw_warn(ah, "unable to move link sta %d of sta %pM from state %d to %d",
link_id, arsta->addr, old_state, new_state);
- goto exit;
+
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE)
+ goto peer_delete;
+ else
+ goto exit;
}
}
@@ -7418,11 +7768,23 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
* handler below
*/
if (old_state == IEEE80211_STA_NONE &&
- new_state == IEEE80211_STA_NOTEXIST && sta->mlo)
- ath12k_mac_ml_station_remove(ahvif, ahsta);
+ new_state == IEEE80211_STA_NOTEXIST) {
+ if (sta->mlo)
+ ath12k_mac_ml_station_remove(ahvif, ahsta);
+
+ ath12k_dp_peer_delete(&ah->dp_hw, sta->addr, sta);
+ }
ret = 0;
+ goto exit;
+peer_delete:
+ ath12k_dp_peer_delete(&ah->dp_hw, sta->addr, sta);
+ml_peer_id_clear:
+ if (sta->mlo) {
+ clear_bit(ahsta->ml_peer_id, ah->free_ml_peer_id_map);
+ ahsta->ml_peer_id = ATH12K_MLO_PEER_ID_INVALID;
+ }
exit:
/* update the state if everything went well */
if (!ret)
@@ -7430,10 +7792,11 @@ exit:
return ret;
}
+EXPORT_SYMBOL(ath12k_mac_op_sta_state);
-static int ath12k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+int ath12k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
struct ath12k *ar;
@@ -7480,11 +7843,12 @@ static int ath12k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw,
out:
return ret;
}
+EXPORT_SYMBOL(ath12k_mac_op_sta_set_txpwr);
-static void ath12k_mac_op_link_sta_rc_update(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_link_sta *link_sta,
- u32 changed)
+void ath12k_mac_op_link_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ u32 changed)
{
struct ieee80211_sta *sta = link_sta->sta;
struct ath12k *ar;
@@ -7493,8 +7857,9 @@ static void ath12k_mac_op_link_sta_rc_update(struct ieee80211_hw *hw,
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k_link_sta *arsta;
struct ath12k_link_vif *arvif;
- struct ath12k_peer *peer;
+ struct ath12k_dp_link_peer *peer;
u32 bw, smps;
+ struct ath12k_dp *dp;
rcu_read_lock();
arvif = rcu_dereference(ahvif->link[link_sta->link_id]);
@@ -7506,6 +7871,7 @@ static void ath12k_mac_op_link_sta_rc_update(struct ieee80211_hw *hw,
}
ar = arvif->ar;
+ dp = ath12k_ab_to_dp(ar->ab);
arsta = rcu_dereference(ahsta->link[link_sta->link_id]);
if (!arsta) {
@@ -7514,18 +7880,19 @@ static void ath12k_mac_op_link_sta_rc_update(struct ieee80211_hw *hw,
link_sta->link_id, sta->addr);
return;
}
- spin_lock_bh(&ar->ab->base_lock);
+ spin_lock_bh(&dp->dp_lock);
- peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arsta->addr);
+ peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, arvif->vdev_id,
+ arsta->addr);
if (!peer) {
- spin_unlock_bh(&ar->ab->base_lock);
+ spin_unlock_bh(&dp->dp_lock);
rcu_read_unlock();
ath12k_warn(ar->ab, "mac sta rc update failed to find peer %pM on vdev %i\n",
arsta->addr, arvif->vdev_id);
return;
}
- spin_unlock_bh(&ar->ab->base_lock);
+ spin_unlock_bh(&dp->dp_lock);
if (arsta->link_id >= IEEE80211_MLD_MAX_NUM_LINKS) {
rcu_read_unlock();
@@ -7588,6 +7955,7 @@ static void ath12k_mac_op_link_sta_rc_update(struct ieee80211_hw *hw,
rcu_read_unlock();
}
+EXPORT_SYMBOL(ath12k_mac_op_link_sta_rc_update);
static struct ath12k_link_sta *ath12k_mac_alloc_assign_link_sta(struct ath12k_hw *ah,
struct ath12k_sta *ahsta,
@@ -7619,10 +7987,10 @@ static struct ath12k_link_sta *ath12k_mac_alloc_assign_link_sta(struct ath12k_hw
return arsta;
}
-static int ath12k_mac_op_change_sta_links(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- u16 old_links, u16 new_links)
+int ath12k_mac_op_change_sta_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u16 old_links, u16 new_links)
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
@@ -7683,15 +8051,17 @@ static int ath12k_mac_op_change_sta_links(struct ieee80211_hw *hw,
return 0;
}
+EXPORT_SYMBOL(ath12k_mac_op_change_sta_links);
-static bool ath12k_mac_op_can_activate_links(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- u16 active_links)
+bool ath12k_mac_op_can_activate_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u16 active_links)
{
/* TODO: Handle recovery case */
return true;
}
+EXPORT_SYMBOL(ath12k_mac_op_can_activate_links);
static int ath12k_conf_tx_uapsd(struct ath12k_link_vif *arvif,
u16 ac, bool enable)
@@ -7803,10 +8173,10 @@ exit:
return ret;
}
-static int ath12k_mac_op_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- unsigned int link_id, u16 ac,
- const struct ieee80211_tx_queue_params *params)
+int ath12k_mac_op_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 ac,
+ const struct ieee80211_tx_queue_params *params)
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_link_vif *arvif;
@@ -7835,6 +8205,7 @@ static int ath12k_mac_op_conf_tx(struct ieee80211_hw *hw,
return ret;
}
+EXPORT_SYMBOL(ath12k_mac_op_conf_tx);
static struct ieee80211_sta_ht_cap
ath12k_create_ht_cap(struct ath12k *ar, u32 ar_ht_cap, u32 rate_cap_rx_chainmask)
@@ -8686,7 +9057,7 @@ static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_link_vif *arv
ieee80211_is_disassoc(hdr->frame_control)) &&
ieee80211_has_protected(hdr->frame_control)) {
enctype = ath12k_dp_tx_get_encrypt_type(skb_cb->cipher);
- mic_len = ath12k_dp_rx_crypto_mic_len(ar, enctype);
+ mic_len = ath12k_dp_rx_crypto_mic_len(ab->dp, enctype);
skb_put(skb, mic_len);
}
}
@@ -8957,8 +9328,8 @@ static void ath12k_mgmt_over_wmi_tx_work(struct wiphy *wiphy, struct wiphy_work
}
}
-static int ath12k_mac_mgmt_tx(struct ath12k *ar, struct sk_buff *skb,
- bool is_prb_rsp)
+int ath12k_mac_mgmt_tx(struct ath12k *ar, struct sk_buff *skb,
+ bool is_prb_rsp)
{
struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
@@ -8988,11 +9359,12 @@ static int ath12k_mac_mgmt_tx(struct ath12k *ar, struct sk_buff *skb,
return 0;
}
+EXPORT_SYMBOL(ath12k_mac_mgmt_tx);
-static void ath12k_mac_add_p2p_noa_ie(struct ath12k *ar,
- struct ieee80211_vif *vif,
- struct sk_buff *skb,
- bool is_prb_rsp)
+void ath12k_mac_add_p2p_noa_ie(struct ath12k *ar,
+ struct ieee80211_vif *vif,
+ struct sk_buff *skb,
+ bool is_prb_rsp)
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
@@ -9009,11 +9381,12 @@ static void ath12k_mac_add_p2p_noa_ie(struct ath12k *ar,
spin_unlock_bh(&ar->data_lock);
}
+EXPORT_SYMBOL(ath12k_mac_add_p2p_noa_ie);
/* Note: called under rcu_read_lock() */
-static void ath12k_mlo_mcast_update_tx_link_address(struct ieee80211_vif *vif,
- u8 link_id, struct sk_buff *skb,
- u32 info_flags)
+void ath12k_mlo_mcast_update_tx_link_address(struct ieee80211_vif *vif,
+ u8 link_id, struct sk_buff *skb,
+ u32 info_flags)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_bss_conf *bss_conf;
@@ -9025,10 +9398,11 @@ static void ath12k_mlo_mcast_update_tx_link_address(struct ieee80211_vif *vif,
if (bss_conf)
ether_addr_copy(hdr->addr2, bss_conf->addr);
}
+EXPORT_SYMBOL(ath12k_mlo_mcast_update_tx_link_address);
/* Note: called under rcu_read_lock() */
-static u8 ath12k_mac_get_tx_link(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
- u8 link, struct sk_buff *skb, u32 info_flags)
+u8 ath12k_mac_get_tx_link(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
+ u8 link, struct sk_buff *skb, u32 info_flags)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
@@ -9123,193 +9497,7 @@ static u8 ath12k_mac_get_tx_link(struct ieee80211_sta *sta, struct ieee80211_vif
return link;
}
-
-/* Note: called under rcu_read_lock() */
-static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
- struct ieee80211_tx_control *control,
- struct sk_buff *skb)
-{
- struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_vif *vif = info->control.vif;
- struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
- struct ath12k_link_vif *arvif = &ahvif->deflink;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct ieee80211_key_conf *key = info->control.hw_key;
- struct ieee80211_sta *sta = control->sta;
- struct ath12k_link_vif *tmp_arvif;
- u32 info_flags = info->flags;
- struct sk_buff *msdu_copied;
- struct ath12k *ar, *tmp_ar;
- struct ath12k_peer *peer;
- unsigned long links_map;
- bool is_mcast = false;
- bool is_dvlan = false;
- struct ethhdr *eth;
- bool is_prb_rsp;
- u16 mcbc_gsn;
- u8 link_id;
- int ret;
-
- if (ahvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
- ieee80211_free_txskb(hw, skb);
- return;
- }
-
- link_id = u32_get_bits(info->control.flags, IEEE80211_TX_CTRL_MLO_LINK);
- memset(skb_cb, 0, sizeof(*skb_cb));
- skb_cb->vif = vif;
-
- if (key) {
- skb_cb->cipher = key->cipher;
- skb_cb->flags |= ATH12K_SKB_CIPHER_SET;
- }
-
- /* handle only for MLO case, use deflink for non MLO case */
- if (ieee80211_vif_is_mld(vif)) {
- link_id = ath12k_mac_get_tx_link(sta, vif, link_id, skb, info_flags);
- if (link_id >= IEEE80211_MLD_MAX_NUM_LINKS) {
- ieee80211_free_txskb(hw, skb);
- return;
- }
- } else {
- if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
- link_id = ATH12K_FIRST_SCAN_LINK;
- else
- link_id = 0;
- }
-
- arvif = rcu_dereference(ahvif->link[link_id]);
- if (!arvif || !arvif->ar) {
- ath12k_warn(ahvif->ah, "failed to find arvif link id %u for frame transmission",
- link_id);
- ieee80211_free_txskb(hw, skb);
- return;
- }
-
- ar = arvif->ar;
- skb_cb->link_id = link_id;
- is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
-
- if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
- eth = (struct ethhdr *)skb->data;
- is_mcast = is_multicast_ether_addr(eth->h_dest);
-
- skb_cb->flags |= ATH12K_SKB_HW_80211_ENCAP;
- } else if (ieee80211_is_mgmt(hdr->frame_control)) {
- if (sta && sta->mlo)
- skb_cb->flags |= ATH12K_SKB_MLO_STA;
-
- ret = ath12k_mac_mgmt_tx(ar, skb, is_prb_rsp);
- if (ret) {
- ath12k_warn(ar->ab, "failed to queue management frame %d\n",
- ret);
- ieee80211_free_txskb(hw, skb);
- }
- return;
- }
-
- if (!(info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP))
- is_mcast = is_multicast_ether_addr(hdr->addr1);
-
- /* This is case only for P2P_GO */
- if (vif->type == NL80211_IFTYPE_AP && vif->p2p)
- ath12k_mac_add_p2p_noa_ie(ar, vif, skb, is_prb_rsp);
-
- /* Checking if it is a DVLAN frame */
- if (!test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags) &&
- !(skb_cb->flags & ATH12K_SKB_HW_80211_ENCAP) &&
- !(skb_cb->flags & ATH12K_SKB_CIPHER_SET) &&
- ieee80211_has_protected(hdr->frame_control))
- is_dvlan = true;
-
- if (!vif->valid_links || !is_mcast || is_dvlan ||
- (skb_cb->flags & ATH12K_SKB_HW_80211_ENCAP) ||
- test_bit(ATH12K_FLAG_RAW_MODE, &ar->ab->dev_flags)) {
- ret = ath12k_dp_tx(ar, arvif, skb, false, 0, is_mcast);
- if (unlikely(ret)) {
- ath12k_warn(ar->ab, "failed to transmit frame %d\n", ret);
- ieee80211_free_txskb(ar->ah->hw, skb);
- return;
- }
- } else {
- mcbc_gsn = atomic_inc_return(&ahvif->mcbc_gsn) & 0xfff;
-
- links_map = ahvif->links_map;
- for_each_set_bit(link_id, &links_map,
- IEEE80211_MLD_MAX_NUM_LINKS) {
- tmp_arvif = rcu_dereference(ahvif->link[link_id]);
- if (!tmp_arvif || !tmp_arvif->is_up)
- continue;
-
- tmp_ar = tmp_arvif->ar;
- msdu_copied = skb_copy(skb, GFP_ATOMIC);
- if (!msdu_copied) {
- ath12k_err(ar->ab,
- "skb copy failure link_id 0x%X vdevid 0x%X\n",
- link_id, tmp_arvif->vdev_id);
- continue;
- }
-
- ath12k_mlo_mcast_update_tx_link_address(vif, link_id,
- msdu_copied,
- info_flags);
-
- skb_cb = ATH12K_SKB_CB(msdu_copied);
- skb_cb->link_id = link_id;
-
- /* For open mode, skip peer find logic */
- if (unlikely(!ahvif->key_cipher))
- goto skip_peer_find;
-
- spin_lock_bh(&tmp_ar->ab->base_lock);
- peer = ath12k_peer_find_by_addr(tmp_ar->ab, tmp_arvif->bssid);
- if (!peer) {
- spin_unlock_bh(&tmp_ar->ab->base_lock);
- ath12k_warn(tmp_ar->ab,
- "failed to find peer for vdev_id 0x%X addr %pM link_map 0x%X\n",
- tmp_arvif->vdev_id, tmp_arvif->bssid,
- ahvif->links_map);
- dev_kfree_skb_any(msdu_copied);
- continue;
- }
-
- key = peer->keys[peer->mcast_keyidx];
- if (key) {
- skb_cb->cipher = key->cipher;
- skb_cb->flags |= ATH12K_SKB_CIPHER_SET;
-
- hdr = (struct ieee80211_hdr *)msdu_copied->data;
- if (!ieee80211_has_protected(hdr->frame_control))
- hdr->frame_control |=
- cpu_to_le16(IEEE80211_FCTL_PROTECTED);
- }
- spin_unlock_bh(&tmp_ar->ab->base_lock);
-
-skip_peer_find:
- ret = ath12k_dp_tx(tmp_ar, tmp_arvif,
- msdu_copied, true, mcbc_gsn, is_mcast);
- if (unlikely(ret)) {
- if (ret == -ENOMEM) {
- /* Drops are expected during heavy multicast
- * frame flood. Print with debug log
- * level to avoid lot of console prints
- */
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
- "failed to transmit frame %d\n",
- ret);
- } else {
- ath12k_warn(ar->ab,
- "failed to transmit frame %d\n",
- ret);
- }
-
- dev_kfree_skb_any(msdu_copied);
- }
- }
- ieee80211_free_txskb(ar->ah->hw, skb);
- }
-}
+EXPORT_SYMBOL(ath12k_mac_get_tx_link);
void ath12k_mac_drain_tx(struct ath12k *ar)
{
@@ -9484,7 +9672,7 @@ static void ath12k_drain_tx(struct ath12k_hw *ah)
ath12k_mac_drain_tx(ar);
}
-static int ath12k_mac_op_start(struct ieee80211_hw *hw)
+int ath12k_mac_op_start(struct ieee80211_hw *hw)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
@@ -9537,6 +9725,7 @@ fail_start:
return ret;
}
+EXPORT_SYMBOL(ath12k_mac_op_start);
int ath12k_mac_rfkill_config(struct ath12k *ar)
{
@@ -9597,6 +9786,7 @@ int ath12k_mac_rfkill_enable_radio(struct ath12k *ar, bool enable)
static void ath12k_mac_stop(struct ath12k *ar)
{
+ struct ath12k_pdev_dp *dp_pdev = &ar->dp;
struct ath12k_hw *ah = ar->ah;
struct htt_ppdu_stats_info *ppdu_stats, *tmp;
struct ath12k_wmi_scan_chan_list_arg *arg;
@@ -9621,13 +9811,14 @@ static void ath12k_mac_stop(struct ath12k *ar)
ar->state_11d = ATH12K_11D_IDLE;
complete(&ar->completed_11d_scan);
- spin_lock_bh(&ar->data_lock);
-
- list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) {
+ spin_lock_bh(&dp_pdev->ppdu_list_lock);
+ list_for_each_entry_safe(ppdu_stats, tmp, &dp_pdev->ppdu_stats_info, list) {
list_del(&ppdu_stats->list);
kfree(ppdu_stats);
}
+ spin_unlock_bh(&dp_pdev->ppdu_list_lock);
+ spin_lock_bh(&ar->data_lock);
while ((arg = list_first_entry_or_null(&ar->regd_channel_update_queue,
struct ath12k_wmi_scan_chan_list_arg,
list))) {
@@ -9643,7 +9834,7 @@ static void ath12k_mac_stop(struct ath12k *ar)
atomic_set(&ar->num_pending_mgmt_tx, 0);
}
-static void ath12k_mac_op_stop(struct ieee80211_hw *hw, bool suspend)
+void ath12k_mac_op_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
@@ -9662,6 +9853,7 @@ static void ath12k_mac_op_stop(struct ieee80211_hw *hw, bool suspend)
mutex_unlock(&ah->hw_mutex);
}
+EXPORT_SYMBOL(ath12k_mac_op_stop);
static u8
ath12k_mac_get_vdev_stats_id(struct ath12k_link_vif *arvif)
@@ -9795,14 +9987,14 @@ static void ath12k_mac_update_vif_offload(struct ath12k_link_vif *arvif)
IEEE80211_OFFLOAD_DECAP_ENABLED);
if (vif->offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED)
- ahvif->tx_encap_type = ATH12K_HW_TXRX_ETHERNET;
+ ahvif->dp_vif.tx_encap_type = ATH12K_HW_TXRX_ETHERNET;
else if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
- ahvif->tx_encap_type = ATH12K_HW_TXRX_RAW;
+ ahvif->dp_vif.tx_encap_type = ATH12K_HW_TXRX_RAW;
else
- ahvif->tx_encap_type = ATH12K_HW_TXRX_NATIVE_WIFI;
+ ahvif->dp_vif.tx_encap_type = ATH12K_HW_TXRX_NATIVE_WIFI;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
- param_id, ahvif->tx_encap_type);
+ param_id, ahvif->dp_vif.tx_encap_type);
if (ret) {
ath12k_warn(ab, "failed to set vdev %d tx encap mode: %d\n",
arvif->vdev_id, ret);
@@ -9826,8 +10018,8 @@ static void ath12k_mac_update_vif_offload(struct ath12k_link_vif *arvif)
}
}
-static void ath12k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+void ath12k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_link_vif *arvif;
@@ -9851,6 +10043,7 @@ static void ath12k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
ath12k_mac_update_vif_offload(&ahvif->deflink);
}
+EXPORT_SYMBOL(ath12k_mac_op_update_vif_offload);
static bool ath12k_mac_vif_ap_active_any(struct ath12k_base *ab)
{
@@ -10016,6 +10209,9 @@ int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif)
int i;
int ret, vdev_id;
u8 link_id;
+ struct ath12k_dp_link_vif *dp_link_vif = NULL;
+ struct ath12k_dp_peer_create_params params = {};
+ bool dp_peer_created = false;
lockdep_assert_wiphy(hw->wiphy);
@@ -10097,8 +10293,26 @@ int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif)
goto err_vdev_del;
}
+ dp_link_vif = ath12k_dp_vif_to_dp_link_vif(&ahvif->dp_vif, arvif->link_id);
+
+ dp_link_vif->vdev_id = arvif->vdev_id;
+ dp_link_vif->lmac_id = ar->lmac_id;
+ dp_link_vif->pdev_idx = ar->pdev_idx;
+
switch (ahvif->vdev_type) {
case WMI_VDEV_TYPE_AP:
+ params.ucast_ra_only = true;
+
+ if (arvif->link_id < IEEE80211_MLD_MAX_NUM_LINKS) {
+ ret = ath12k_dp_peer_create(&ah->dp_hw, arvif->bssid, &params);
+ if (ret) {
+ ath12k_warn(ab, "failed to vdev %d create dp_peer for AP: %d\n",
+ arvif->vdev_id, ret);
+ goto err_vdev_del;
+ }
+ dp_peer_created = true;
+ }
+
peer_param.vdev_id = arvif->vdev_id;
peer_param.peer_addr = arvif->bssid;
peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
@@ -10106,7 +10320,7 @@ int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif)
if (ret) {
ath12k_warn(ab, "failed to vdev %d create peer for AP: %d\n",
arvif->vdev_id, ret);
- goto err_vdev_del;
+ goto err_dp_peer_del;
}
ret = ath12k_mac_set_kickout(arvif);
@@ -10212,6 +10426,10 @@ err_peer_del:
ar->num_peers--;
}
+err_dp_peer_del:
+ if (dp_peer_created)
+ ath12k_dp_peer_delete(&ah->dp_hw, arvif->bssid, NULL);
+
err_vdev_del:
if (ahvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ar->monitor_vdev_id = -1;
@@ -10406,8 +10624,8 @@ unlock:
return arvif->ar;
}
-static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
@@ -10454,6 +10672,7 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
*/
return 0;
}
+EXPORT_SYMBOL(ath12k_mac_op_add_interface);
static void ath12k_mac_vif_unref(struct ath12k_dp *dp, struct ieee80211_vif *vif)
{
@@ -10484,6 +10703,7 @@ static int ath12k_mac_vdev_delete(struct ath12k *ar, struct ath12k_link_vif *arv
{
struct ath12k_vif *ahvif = arvif->ahvif;
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
+ struct ath12k_dp_link_vif *dp_link_vif;
struct ath12k_base *ab = ar->ab;
unsigned long time_left;
int ret;
@@ -10529,8 +10749,10 @@ err_vdev_del:
idr_for_each(&ar->txmgmt_idr,
ath12k_mac_vif_txmgmt_idr_remove, vif);
- ath12k_mac_vif_unref(&ab->dp, vif);
- ath12k_dp_tx_put_bank_profile(&ab->dp, arvif->bank_id);
+ ath12k_mac_vif_unref(ath12k_ab_to_dp(ab), vif);
+
+ dp_link_vif = ath12k_dp_vif_to_dp_link_vif(&ahvif->dp_vif, arvif->link_id);
+ ath12k_dp_tx_put_bank_profile(ath12k_ab_to_dp(ab), dp_link_vif->bank_id);
/* Recalc txpower for remaining vdev */
ath12k_mac_txpower_recalc(ar);
@@ -10542,8 +10764,8 @@ err_vdev_del:
return ret;
}
-static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_link_vif *arvif;
@@ -10590,6 +10812,7 @@ static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw,
ath12k_mac_unassign_link_vif(arvif);
}
}
+EXPORT_SYMBOL(ath12k_mac_op_remove_interface);
/* FIXME: Has to be verified. */
#define SUPPORTED_FILTERS \
@@ -10601,10 +10824,10 @@ static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw,
FIF_PROBE_REQ | \
FIF_FCSFAIL)
-static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags,
- u64 multicast)
+void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
@@ -10616,9 +10839,10 @@ static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw,
*total_flags &= SUPPORTED_FILTERS;
ar->filter_flags = *total_flags;
}
+EXPORT_SYMBOL(ath12k_mac_op_configure_filter);
-static int ath12k_mac_op_get_antenna(struct ieee80211_hw *hw, int radio_idx,
- u32 *tx_ant, u32 *rx_ant)
+int ath12k_mac_op_get_antenna(struct ieee80211_hw *hw, int radio_idx,
+ u32 *tx_ant, u32 *rx_ant)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
int antennas_rx = 0, antennas_tx = 0;
@@ -10637,9 +10861,10 @@ static int ath12k_mac_op_get_antenna(struct ieee80211_hw *hw, int radio_idx,
return 0;
}
+EXPORT_SYMBOL(ath12k_mac_op_get_antenna);
-static int ath12k_mac_op_set_antenna(struct ieee80211_hw *hw, int radio_idx,
- u32 tx_ant, u32 rx_ant)
+int ath12k_mac_op_set_antenna(struct ieee80211_hw *hw, int radio_idx,
+ u32 tx_ant, u32 rx_ant)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
@@ -10656,6 +10881,7 @@ static int ath12k_mac_op_set_antenna(struct ieee80211_hw *hw, int radio_idx,
return ret;
}
+EXPORT_SYMBOL(ath12k_mac_op_set_antenna);
static int ath12k_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -10697,9 +10923,9 @@ static int ath12k_mac_ampdu_action(struct ieee80211_hw *hw,
return ret;
}
-static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_ampdu_params *params)
+int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
{
struct ieee80211_sta *sta = params->sta;
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
@@ -10720,9 +10946,10 @@ static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw,
return 0;
}
+EXPORT_SYMBOL(ath12k_mac_op_ampdu_action);
-static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw,
- struct ieee80211_chanctx_conf *ctx)
+int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
{
struct ath12k *ar;
struct ath12k_base *ab;
@@ -10749,9 +10976,10 @@ static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw,
return 0;
}
+EXPORT_SYMBOL(ath12k_mac_op_add_chanctx);
-static void ath12k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
- struct ieee80211_chanctx_conf *ctx)
+void ath12k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
{
struct ath12k *ar;
struct ath12k_base *ab;
@@ -10776,6 +11004,7 @@ static void ath12k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
spin_unlock_bh(&ar->data_lock);
ar->chan_tx_pwr = ATH12K_PDEV_TX_POWER_INVALID;
}
+EXPORT_SYMBOL(ath12k_mac_op_remove_chanctx);
static enum wmi_phy_mode
ath12k_mac_check_down_grade_phy_mode(struct ath12k *ar,
@@ -11334,9 +11563,9 @@ ath12k_mac_update_active_vif_chan(struct ath12k *ar,
kfree(arg.vifs);
}
-static void ath12k_mac_op_change_chanctx(struct ieee80211_hw *hw,
- struct ieee80211_chanctx_conf *ctx,
- u32 changed)
+void ath12k_mac_op_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed)
{
struct ath12k *ar;
struct ath12k_base *ab;
@@ -11366,6 +11595,7 @@ static void ath12k_mac_op_change_chanctx(struct ieee80211_hw *hw,
/* TODO: Recalc radar detection */
}
+EXPORT_SYMBOL(ath12k_mac_op_change_chanctx);
static int ath12k_start_vdev_delay(struct ath12k *ar,
struct ath12k_link_vif *arvif)
@@ -11807,7 +12037,7 @@ static void ath12k_mac_parse_tx_pwr_env(struct ath12k *ar,
}
}
-static int
+int
ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf,
@@ -11856,7 +12086,7 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
if (ab->hw_params->vdev_start_delay &&
ahvif->vdev_type != WMI_VDEV_TYPE_AP &&
ahvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
- !ath12k_peer_exist_by_vdev_id(ab, arvif->vdev_id)) {
+ !ath12k_dp_link_peer_exist_by_vdev_id(ath12k_ab_to_dp(ab), arvif->vdev_id)) {
ret = 0;
goto out;
}
@@ -11892,8 +12122,9 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
out:
return ret;
}
+EXPORT_SYMBOL(ath12k_mac_op_assign_vif_chanctx);
-static void
+void
ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf,
@@ -11960,8 +12191,9 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
ar->scan.arvif = NULL;
}
}
+EXPORT_SYMBOL(ath12k_mac_op_unassign_vif_chanctx);
-static int
+int
ath12k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif_chanctx_switch *vifs,
int n_vifs,
@@ -11986,6 +12218,7 @@ ath12k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
return 0;
}
+EXPORT_SYMBOL(ath12k_mac_op_switch_vif_chanctx);
static int
ath12k_set_vdev_param_to_all_vifs(struct ath12k *ar, int param, u32 value)
@@ -12014,8 +12247,8 @@ ath12k_set_vdev_param_to_all_vifs(struct ath12k *ar, int param, u32 value)
/* mac80211 stores device specific RTS/Fragmentation threshold value,
* this is set interface specific to firmware from ath12k driver
*/
-static int ath12k_mac_op_set_rts_threshold(struct ieee80211_hw *hw,
- int radio_idx, u32 value)
+int ath12k_mac_op_set_rts_threshold(struct ieee80211_hw *hw,
+ int radio_idx, u32 value)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct wiphy *wiphy = hw->wiphy;
@@ -12074,9 +12307,10 @@ static int ath12k_mac_op_set_rts_threshold(struct ieee80211_hw *hw,
return ret;
}
+EXPORT_SYMBOL(ath12k_mac_op_set_rts_threshold);
-static int ath12k_mac_op_set_frag_threshold(struct ieee80211_hw *hw,
- int radio_idx, u32 value)
+int ath12k_mac_op_set_frag_threshold(struct ieee80211_hw *hw,
+ int radio_idx, u32 value)
{
/* Even though there's a WMI vdev param for fragmentation threshold no
* known firmware actually implements it. Moreover it is not possible to
@@ -12093,6 +12327,7 @@ static int ath12k_mac_op_set_frag_threshold(struct ieee80211_hw *hw,
return -EOPNOTSUPP;
}
+EXPORT_SYMBOL(ath12k_mac_op_set_frag_threshold);
static int ath12k_mac_flush(struct ath12k *ar)
{
@@ -12130,8 +12365,8 @@ int ath12k_mac_wait_tx_complete(struct ath12k *ar)
return ath12k_mac_flush(ar);
}
-static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- u32 queues, bool drop)
+void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k_link_vif *arvif;
@@ -12166,6 +12401,7 @@ static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *v
ath12k_mac_flush(arvif->ar);
}
}
+EXPORT_SYMBOL(ath12k_mac_op_flush);
static int
ath12k_mac_bitrate_mask_num_ht_rates(struct ath12k *ar,
@@ -12729,9 +12965,11 @@ ath12k_mac_validate_fixed_rate_settings(struct ath12k *ar, enum nl80211_band ban
bool eht_fixed_rate = false, he_fixed_rate = false, vht_fixed_rate = false;
const u16 *vht_mcs_mask, *he_mcs_mask, *eht_mcs_mask;
struct ieee80211_link_sta *link_sta;
- struct ath12k_peer *peer, *tmp;
+ struct ath12k_dp_link_peer *peer, *tmp;
u8 vht_nss, he_nss, eht_nss;
int ret = true;
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
vht_mcs_mask = mask->control[band].vht_mcs;
he_mcs_mask = mask->control[band].he_mcs;
@@ -12754,8 +12992,8 @@ ath12k_mac_validate_fixed_rate_settings(struct ath12k *ar, enum nl80211_band ban
eht_nss = ath12k_mac_max_eht_nss(eht_mcs_mask);
rcu_read_lock();
- spin_lock_bh(&ar->ab->base_lock);
- list_for_each_entry_safe(peer, tmp, &ar->ab->peers, list) {
+ spin_lock_bh(&dp->dp_lock);
+ list_for_each_entry_safe(peer, tmp, &dp->peers, list) {
if (peer->sta) {
link_sta = rcu_dereference(peer->sta->link[link_id]);
if (!link_sta) {
@@ -12781,12 +13019,12 @@ ath12k_mac_validate_fixed_rate_settings(struct ath12k *ar, enum nl80211_band ban
}
}
exit:
- spin_unlock_bh(&ar->ab->base_lock);
+ spin_unlock_bh(&dp->dp_lock);
rcu_read_unlock();
return ret;
}
-static int
+int
ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
const struct cfg80211_bitrate_mask *mask)
@@ -12965,8 +13203,9 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
out:
return ret;
}
+EXPORT_SYMBOL(ath12k_mac_op_set_bitrate_mask);
-static void
+void
ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
enum ieee80211_reconfig_type reconfig_type)
{
@@ -13027,7 +13266,7 @@ ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
ahvif = arvif->ahvif;
ath12k_dbg(ab, ATH12K_DBG_BOOT,
"reconfig cipher %d up %d vdev type %d\n",
- ahvif->key_cipher,
+ ahvif->dp_vif.key_cipher,
arvif->is_up,
ahvif->vdev_type);
@@ -13047,6 +13286,7 @@ ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
}
}
}
+EXPORT_SYMBOL(ath12k_mac_op_reconfig_complete);
static void
ath12k_mac_update_bss_chan_survey(struct ath12k *ar,
@@ -13080,8 +13320,8 @@ ath12k_mac_update_bss_chan_survey(struct ath12k *ar,
ath12k_warn(ar->ab, "bss channel survey timed out\n");
}
-static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
- struct survey_info *survey)
+int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
{
struct ath12k *ar;
struct ieee80211_supported_band *sband;
@@ -13135,6 +13375,7 @@ static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
return 0;
}
+EXPORT_SYMBOL(ath12k_mac_op_get_survey);
static void ath12k_mac_put_chain_rssi(struct station_info *sinfo,
struct ath12k_link_sta *arsta)
@@ -13157,15 +13398,18 @@ static void ath12k_mac_put_chain_rssi(struct station_info *sinfo,
}
}
-static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct station_info *sinfo)
+void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo)
{
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+ struct ath12k_dp_link_peer_rate_info rate_info = {};
struct ath12k_fw_stats_req_params params = {};
+ struct ath12k_dp_link_peer *peer;
struct ath12k_link_sta *arsta;
s8 signal, noise_floor;
+ struct ath12k_dp *dp;
struct ath12k *ar;
bool db2dbm;
@@ -13176,34 +13420,37 @@ static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw,
if (!ar)
return;
+ dp = ath12k_ab_to_dp(ar->ab);
+ ath12k_dp_link_peer_get_sta_rate_info_stats(dp, arsta->addr, &rate_info);
+
db2dbm = test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
ar->ab->wmi_ab.svc_map);
- sinfo->rx_duration = arsta->rx_duration;
+ sinfo->rx_duration = rate_info.rx_duration;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
- sinfo->tx_duration = arsta->tx_duration;
+ sinfo->tx_duration = rate_info.tx_duration;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION);
- if (arsta->txrate.legacy || arsta->txrate.nss) {
- if (arsta->txrate.legacy) {
- sinfo->txrate.legacy = arsta->txrate.legacy;
+ if (rate_info.txrate.legacy || rate_info.txrate.nss) {
+ if (rate_info.txrate.legacy) {
+ sinfo->txrate.legacy = rate_info.txrate.legacy;
} else {
- sinfo->txrate.mcs = arsta->txrate.mcs;
- sinfo->txrate.nss = arsta->txrate.nss;
- sinfo->txrate.bw = arsta->txrate.bw;
- sinfo->txrate.he_gi = arsta->txrate.he_gi;
- sinfo->txrate.he_dcm = arsta->txrate.he_dcm;
- sinfo->txrate.he_ru_alloc = arsta->txrate.he_ru_alloc;
- sinfo->txrate.eht_gi = arsta->txrate.eht_gi;
- sinfo->txrate.eht_ru_alloc = arsta->txrate.eht_ru_alloc;
- }
- sinfo->txrate.flags = arsta->txrate.flags;
+ sinfo->txrate.mcs = rate_info.txrate.mcs;
+ sinfo->txrate.nss = rate_info.txrate.nss;
+ sinfo->txrate.bw = rate_info.txrate.bw;
+ sinfo->txrate.he_gi = rate_info.txrate.he_gi;
+ sinfo->txrate.he_dcm = rate_info.txrate.he_dcm;
+ sinfo->txrate.he_ru_alloc = rate_info.txrate.he_ru_alloc;
+ sinfo->txrate.eht_gi = rate_info.txrate.eht_gi;
+ sinfo->txrate.eht_ru_alloc = rate_info.txrate.eht_ru_alloc;
+ }
+ sinfo->txrate.flags = rate_info.txrate.flags;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
}
/* TODO: Use real NF instead of default one. */
- signal = arsta->rssi_comb;
+ signal = rate_info.rssi_comb;
params.pdev_id = ar->pdev->pdev_id;
params.vdev_id = 0;
@@ -13233,26 +13480,37 @@ static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw,
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
}
- sinfo->signal_avg = ewma_avg_rssi_read(&arsta->avg_rssi);
+ sinfo->signal_avg = rate_info.signal_avg;
if (!db2dbm)
sinfo->signal_avg += noise_floor;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
- sinfo->tx_retries = arsta->tx_retry_count;
- sinfo->tx_failed = arsta->tx_retry_failed;
+ spin_lock_bh(&dp->dp_lock);
+ peer = ath12k_dp_link_peer_find_by_addr(dp, arsta->addr);
+ if (!peer) {
+ spin_unlock_bh(&dp->dp_lock);
+ return;
+ }
+
+ sinfo->tx_retries = peer->tx_retry_count;
+ sinfo->tx_failed = peer->tx_retry_failed;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
+
+ spin_unlock_bh(&dp->dp_lock);
}
+EXPORT_SYMBOL(ath12k_mac_op_sta_statistics);
-static void ath12k_mac_op_link_sta_statistics(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_link_sta *link_sta,
- struct link_station_info *link_sinfo)
+void ath12k_mac_op_link_sta_statistics(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ struct link_station_info *link_sinfo)
{
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(link_sta->sta);
struct ath12k_fw_stats_req_params params = {};
+ struct ath12k_dp_link_peer *peer;
struct ath12k_link_sta *arsta;
struct ath12k *ar;
s8 signal;
@@ -13272,43 +13530,64 @@ static void ath12k_mac_op_link_sta_statistics(struct ieee80211_hw *hw,
db2dbm = test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
ar->ab->wmi_ab.svc_map);
- link_sinfo->rx_duration = arsta->rx_duration;
+ spin_lock_bh(&ar->ab->dp->dp_lock);
+ peer = ath12k_dp_link_peer_find_by_addr(ar->ab->dp, arsta->addr);
+ if (!peer) {
+ spin_unlock_bh(&ar->ab->dp->dp_lock);
+ return;
+ }
+
+ link_sinfo->rx_duration = peer->rx_duration;
link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
- link_sinfo->tx_duration = arsta->tx_duration;
+ link_sinfo->tx_duration = peer->tx_duration;
link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION);
- if (arsta->txrate.legacy || arsta->txrate.nss) {
- if (arsta->txrate.legacy) {
- link_sinfo->txrate.legacy = arsta->txrate.legacy;
+ if (peer->txrate.legacy || peer->txrate.nss) {
+ if (peer->txrate.legacy) {
+ link_sinfo->txrate.legacy = peer->txrate.legacy;
} else {
- link_sinfo->txrate.mcs = arsta->txrate.mcs;
- link_sinfo->txrate.nss = arsta->txrate.nss;
- link_sinfo->txrate.bw = arsta->txrate.bw;
- link_sinfo->txrate.he_gi = arsta->txrate.he_gi;
- link_sinfo->txrate.he_dcm = arsta->txrate.he_dcm;
+ link_sinfo->txrate.mcs = peer->txrate.mcs;
+ link_sinfo->txrate.nss = peer->txrate.nss;
+ link_sinfo->txrate.bw = peer->txrate.bw;
+ link_sinfo->txrate.he_gi = peer->txrate.he_gi;
+ link_sinfo->txrate.he_dcm = peer->txrate.he_dcm;
link_sinfo->txrate.he_ru_alloc =
- arsta->txrate.he_ru_alloc;
- link_sinfo->txrate.eht_gi = arsta->txrate.eht_gi;
+ peer->txrate.he_ru_alloc;
+ link_sinfo->txrate.eht_gi = peer->txrate.eht_gi;
link_sinfo->txrate.eht_ru_alloc =
- arsta->txrate.eht_ru_alloc;
+ peer->txrate.eht_ru_alloc;
}
- link_sinfo->txrate.flags = arsta->txrate.flags;
+ link_sinfo->txrate.flags = peer->txrate.flags;
link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
}
+ link_sinfo->signal_avg = ewma_avg_rssi_read(&peer->avg_rssi);
+
+ if (!db2dbm)
+ link_sinfo->signal_avg += ATH12K_DEFAULT_NOISE_FLOOR;
+
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
+
+ link_sinfo->tx_retries = peer->tx_retry_count;
+ link_sinfo->tx_failed = peer->tx_retry_failed;
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
+
/* TODO: Use real NF instead of default one. */
- signal = arsta->rssi_comb;
+ signal = peer->rssi_comb;
- params.pdev_id = ar->pdev->pdev_id;
- params.vdev_id = 0;
- params.stats_id = WMI_REQUEST_VDEV_STAT;
+ spin_unlock_bh(&ar->ab->dp->dp_lock);
- if (!signal &&
- ahsta->ahvif->vdev_type == WMI_VDEV_TYPE_STA &&
- !(ath12k_mac_get_fw_stats(ar, &params))) {
- signal = arsta->rssi_beacon;
- ath12k_fw_stats_reset(ar);
+ if (!signal && ahsta->ahvif->vdev_type == WMI_VDEV_TYPE_STA) {
+ params.pdev_id = ar->pdev->pdev_id;
+ params.vdev_id = 0;
+ params.stats_id = WMI_REQUEST_VDEV_STAT;
+
+ if (!ath12k_mac_get_fw_stats(ar, &params)) {
+ signal = arsta->rssi_beacon;
+ ath12k_fw_stats_reset(ar);
+ }
}
if (signal) {
@@ -13316,22 +13595,11 @@ static void ath12k_mac_op_link_sta_statistics(struct ieee80211_hw *hw,
db2dbm ? signal : signal + ATH12K_DEFAULT_NOISE_FLOOR;
link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
}
-
- link_sinfo->signal_avg = ewma_avg_rssi_read(&arsta->avg_rssi);
-
- if (!db2dbm)
- link_sinfo->signal_avg += ATH12K_DEFAULT_NOISE_FLOOR;
-
- link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
-
- link_sinfo->tx_retries = arsta->tx_retry_count;
- link_sinfo->tx_failed = arsta->tx_retry_failed;
- link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
- link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
}
+EXPORT_SYMBOL(ath12k_mac_op_link_sta_statistics);
-static int ath12k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+int ath12k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
@@ -13351,12 +13619,13 @@ static int ath12k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
return 0;
}
+EXPORT_SYMBOL(ath12k_mac_op_cancel_remain_on_channel);
-static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_channel *chan,
- int duration,
- enum ieee80211_roc_type type)
+int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel *chan,
+ int duration,
+ enum ieee80211_roc_type type)
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
@@ -13491,10 +13760,11 @@ static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
return 0;
}
+EXPORT_SYMBOL(ath12k_mac_op_remain_on_channel);
-static void ath12k_mac_op_set_rekey_data(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_gtk_rekey_data *data)
+void ath12k_mac_op_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data)
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_rekey_data *rekey_data;
@@ -13527,63 +13797,7 @@ static void ath12k_mac_op_set_rekey_data(struct ieee80211_hw *hw,
ath12k_dbg_dump(ar->ab, ATH12K_DBG_MAC, "replay ctr", NULL,
&rekey_data->replay_ctr, sizeof(rekey_data->replay_ctr));
}
-
-static const struct ieee80211_ops ath12k_ops = {
- .tx = ath12k_mac_op_tx,
- .wake_tx_queue = ieee80211_handle_wake_tx_queue,
- .start = ath12k_mac_op_start,
- .stop = ath12k_mac_op_stop,
- .reconfig_complete = ath12k_mac_op_reconfig_complete,
- .add_interface = ath12k_mac_op_add_interface,
- .remove_interface = ath12k_mac_op_remove_interface,
- .update_vif_offload = ath12k_mac_op_update_vif_offload,
- .config = ath12k_mac_op_config,
- .link_info_changed = ath12k_mac_op_link_info_changed,
- .vif_cfg_changed = ath12k_mac_op_vif_cfg_changed,
- .change_vif_links = ath12k_mac_op_change_vif_links,
- .configure_filter = ath12k_mac_op_configure_filter,
- .hw_scan = ath12k_mac_op_hw_scan,
- .cancel_hw_scan = ath12k_mac_op_cancel_hw_scan,
- .set_key = ath12k_mac_op_set_key,
- .set_rekey_data = ath12k_mac_op_set_rekey_data,
- .sta_state = ath12k_mac_op_sta_state,
- .sta_set_txpwr = ath12k_mac_op_sta_set_txpwr,
- .link_sta_rc_update = ath12k_mac_op_link_sta_rc_update,
- .conf_tx = ath12k_mac_op_conf_tx,
- .set_antenna = ath12k_mac_op_set_antenna,
- .get_antenna = ath12k_mac_op_get_antenna,
- .ampdu_action = ath12k_mac_op_ampdu_action,
- .add_chanctx = ath12k_mac_op_add_chanctx,
- .remove_chanctx = ath12k_mac_op_remove_chanctx,
- .change_chanctx = ath12k_mac_op_change_chanctx,
- .assign_vif_chanctx = ath12k_mac_op_assign_vif_chanctx,
- .unassign_vif_chanctx = ath12k_mac_op_unassign_vif_chanctx,
- .switch_vif_chanctx = ath12k_mac_op_switch_vif_chanctx,
- .get_txpower = ath12k_mac_op_get_txpower,
- .set_rts_threshold = ath12k_mac_op_set_rts_threshold,
- .set_frag_threshold = ath12k_mac_op_set_frag_threshold,
- .set_bitrate_mask = ath12k_mac_op_set_bitrate_mask,
- .get_survey = ath12k_mac_op_get_survey,
- .flush = ath12k_mac_op_flush,
- .sta_statistics = ath12k_mac_op_sta_statistics,
- .link_sta_statistics = ath12k_mac_op_link_sta_statistics,
- .remain_on_channel = ath12k_mac_op_remain_on_channel,
- .cancel_remain_on_channel = ath12k_mac_op_cancel_remain_on_channel,
- .change_sta_links = ath12k_mac_op_change_sta_links,
- .can_activate_links = ath12k_mac_op_can_activate_links,
-#ifdef CONFIG_PM
- .suspend = ath12k_wow_op_suspend,
- .resume = ath12k_wow_op_resume,
- .set_wakeup = ath12k_wow_op_set_wakeup,
-#endif
-#ifdef CONFIG_ATH12K_DEBUGFS
- .vif_add_debugfs = ath12k_debugfs_op_vif_add,
-#endif
- CFG80211_TESTMODE_CMD(ath12k_tm_cmd)
-#ifdef CONFIG_ATH12K_DEBUGFS
- .link_sta_add_debugfs = ath12k_debugfs_link_sta_op_add,
-#endif
-};
+EXPORT_SYMBOL(ath12k_mac_op_set_rekey_data);
void ath12k_mac_update_freq_range(struct ath12k *ar,
u32 freq_low, u32 freq_high)
@@ -14483,7 +14697,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
if (is_monitor_disable)
/* There's a race between calling ieee80211_register_hw()
* and here where the monitor mode is enabled for a little
- * while. But that time is so short and in practise it make
+ * while. But that time is so short and in practice it doesn't make
* a difference in real life.
*/
wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MONITOR);
@@ -14562,8 +14776,9 @@ static void ath12k_mac_setup(struct ath12k *ar)
ar->vdev_id_11d_scan = ATH12K_11D_INVALID_VDEV_ID;
spin_lock_init(&ar->data_lock);
+ spin_lock_init(&ar->dp.ppdu_list_lock);
INIT_LIST_HEAD(&ar->arvifs);
- INIT_LIST_HEAD(&ar->ppdu_stats_info);
+ INIT_LIST_HEAD(&ar->dp.ppdu_stats_info);
init_completion(&ar->vdev_setup_done);
init_completion(&ar->vdev_delete_done);
@@ -14802,7 +15017,7 @@ static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_hw_group *ag,
u8 pdev_idx;
hw = ieee80211_alloc_hw(struct_size(ah, radio, num_pdev_map),
- &ath12k_ops);
+ pdev_map->ab->ath12k_ops);
if (!hw)
return NULL;
@@ -14811,7 +15026,9 @@ static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_hw_group *ag,
ah->num_radio = num_pdev_map;
mutex_init(&ah->hw_mutex);
- INIT_LIST_HEAD(&ah->ml_peers);
+
+ spin_lock_init(&ah->dp_hw.peer_lock);
+ INIT_LIST_HEAD(&ah->dp_hw.dp_peers_list);
for (i = 0; i < num_pdev_map; i++) {
ab = pdev_map[i].ab;
diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h
index 1f689e367c8a..7b50c5976384 100644
--- a/drivers/net/wireless/ath/ath12k/mac.h
+++ b/drivers/net/wireless/ath/ath12k/mac.h
@@ -54,9 +54,6 @@ struct ath12k_generic_iter {
* for driver usage purpose.
*/
#define ATH12K_FIRST_SCAN_LINK IEEE80211_MLD_MAX_NUM_LINKS
-#define ATH12K_SCAN_MAX_LINKS ATH12K_GROUP_MAX_RADIO
-/* Define 1 scan link for each radio for parallel scan purposes */
-#define ATH12K_NUM_MAX_LINKS (IEEE80211_MLD_MAX_NUM_LINKS + ATH12K_SCAN_MAX_LINKS)
#define ATH12K_SCAN_LINKS_MASK GENMASK(ATH12K_NUM_MAX_LINKS, IEEE80211_MLD_MAX_NUM_LINKS)
#define ATH12K_NUM_MAX_ACTIVE_LINKS_PER_DEVICE 2
@@ -141,6 +138,9 @@ struct ath12k_reg_tpc_power_info {
struct ath12k_chan_power_info chan_power_info[ATH12K_NUM_PWR_LEVELS];
};
+#define ATH12K_OBSS_PD_MAX_THRESHOLD -82
+#define ATH12K_OBSS_PD_NON_SRG_MAX_THRESHOLD -62
+
extern const struct htt_rx_ring_tlv_filter ath12k_mac_mon_status_filter_default;
#define ATH12K_SCAN_11D_INTERVAL 600000
@@ -172,6 +172,7 @@ struct ath12k *ath12k_mac_get_ar_by_pdev_id(struct ath12k_base *ab, u32 pdev_id)
void ath12k_mac_drain_tx(struct ath12k *ar);
void ath12k_mac_peer_cleanup_all(struct ath12k *ar);
+void ath12k_mac_dp_peer_cleanup(struct ath12k_hw *ah);
int ath12k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx);
enum rate_info_bw ath12k_mac_bw_to_mac80211_bw(enum ath12k_supported_bw bw);
enum ath12k_supported_bw ath12k_mac_mac80211_bw_to_ath12k_bw(enum rate_info_bw bw);
@@ -206,4 +207,139 @@ void ath12k_mac_update_freq_range(struct ath12k *ar,
void ath12k_mac_fill_reg_tpc_info(struct ath12k *ar,
struct ath12k_link_vif *arvif,
struct ieee80211_chanctx_conf *ctx);
+int ath12k_mac_op_start(struct ieee80211_hw *hw);
+void ath12k_mac_op_stop(struct ieee80211_hw *hw, bool suspend);
+void
+ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type);
+int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+void ath12k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+int ath12k_mac_op_config(struct ieee80211_hw *hw, int radio_idx, u32 changed);
+void ath12k_mac_op_link_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u64 changed);
+void ath12k_mac_op_vif_cfg_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u64 changed);
+int
+ath12k_mac_op_change_vif_links
+ (struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u16 old_links, u16 new_links,
+ struct ieee80211_bss_conf *ol[IEEE80211_MLD_MAX_NUM_LINKS]);
+void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast);
+int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *hw_req);
+void ath12k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
+void ath12k_mac_op_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data);
+int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state);
+int ath12k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void ath12k_mac_op_link_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ u32 changed);
+int ath12k_mac_op_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 ac,
+ const struct ieee80211_tx_queue_params *params);
+int ath12k_mac_op_set_antenna(struct ieee80211_hw *hw, int radio_idx,
+ u32 tx_ant, u32 rx_ant);
+int ath12k_mac_op_get_antenna(struct ieee80211_hw *hw, int radio_idx,
+ u32 *tx_ant, u32 *rx_ant);
+int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params);
+int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx);
+void ath12k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx);
+void ath12k_mac_op_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed);
+int
+ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx);
+void
+ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx);
+int
+ath12k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode);
+int ath12k_mac_op_set_rts_threshold(struct ieee80211_hw *hw,
+ int radio_idx, u32 value);
+int ath12k_mac_op_set_frag_threshold(struct ieee80211_hw *hw,
+ int radio_idx, u32 value);
+int
+ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct cfg80211_bitrate_mask *mask);
+int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey);
+void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop);
+void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo);
+void ath12k_mac_op_link_sta_statistics(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ struct link_station_info *link_sinfo);
+int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel *chan,
+ int duration,
+ enum ieee80211_roc_type type);
+int ath12k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+int ath12k_mac_op_change_sta_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u16 old_links, u16 new_links);
+bool ath12k_mac_op_can_activate_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u16 active_links);
+int ath12k_mac_op_get_txpower(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int link_id,
+ int *dbm);
+int ath12k_mac_mgmt_tx(struct ath12k *ar, struct sk_buff *skb,
+ bool is_prb_rsp);
+void ath12k_mac_add_p2p_noa_ie(struct ath12k *ar,
+ struct ieee80211_vif *vif,
+ struct sk_buff *skb,
+ bool is_prb_rsp);
+u8 ath12k_mac_get_tx_link(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
+ u8 link, struct sk_buff *skb, u32 info_flags);
+
+void ath12k_mlo_mcast_update_tx_link_address(struct ieee80211_vif *vif,
+ u8 link_id, struct sk_buff *skb,
+ u32 info_flags);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/mhi.c b/drivers/net/wireless/ath/ath12k/mhi.c
index 08f44baf182a..45c0f66dcc5e 100644
--- a/drivers/net/wireless/ath/ath12k/mhi.c
+++ b/drivers/net/wireless/ath/ath12k/mhi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <linux/msi.h>
@@ -18,136 +18,6 @@
#define OTP_VALID_DUALMAC_BOARD_ID_MASK 0x1000
#define MHI_CB_INVALID 0xff
-static const struct mhi_channel_config ath12k_mhi_channels_qcn9274[] = {
- {
- .num = 20,
- .name = "IPCR",
- .num_elements = 32,
- .event_ring = 1,
- .dir = DMA_TO_DEVICE,
- .ee_mask = 0x4,
- .pollcfg = 0,
- .doorbell = MHI_DB_BRST_DISABLE,
- .lpm_notify = false,
- .offload_channel = false,
- .doorbell_mode_switch = false,
- .auto_queue = false,
- },
- {
- .num = 21,
- .name = "IPCR",
- .num_elements = 32,
- .event_ring = 1,
- .dir = DMA_FROM_DEVICE,
- .ee_mask = 0x4,
- .pollcfg = 0,
- .doorbell = MHI_DB_BRST_DISABLE,
- .lpm_notify = false,
- .offload_channel = false,
- .doorbell_mode_switch = false,
- .auto_queue = true,
- },
-};
-
-static struct mhi_event_config ath12k_mhi_events_qcn9274[] = {
- {
- .num_elements = 32,
- .irq_moderation_ms = 0,
- .irq = 1,
- .data_type = MHI_ER_CTRL,
- .mode = MHI_DB_BRST_DISABLE,
- .hardware_event = false,
- .client_managed = false,
- .offload_channel = false,
- },
- {
- .num_elements = 256,
- .irq_moderation_ms = 1,
- .irq = 2,
- .mode = MHI_DB_BRST_DISABLE,
- .priority = 1,
- .hardware_event = false,
- .client_managed = false,
- .offload_channel = false,
- },
-};
-
-const struct mhi_controller_config ath12k_mhi_config_qcn9274 = {
- .max_channels = 30,
- .timeout_ms = 10000,
- .use_bounce_buf = false,
- .buf_len = 0,
- .num_channels = ARRAY_SIZE(ath12k_mhi_channels_qcn9274),
- .ch_cfg = ath12k_mhi_channels_qcn9274,
- .num_events = ARRAY_SIZE(ath12k_mhi_events_qcn9274),
- .event_cfg = ath12k_mhi_events_qcn9274,
-};
-
-static const struct mhi_channel_config ath12k_mhi_channels_wcn7850[] = {
- {
- .num = 20,
- .name = "IPCR",
- .num_elements = 64,
- .event_ring = 1,
- .dir = DMA_TO_DEVICE,
- .ee_mask = 0x4,
- .pollcfg = 0,
- .doorbell = MHI_DB_BRST_DISABLE,
- .lpm_notify = false,
- .offload_channel = false,
- .doorbell_mode_switch = false,
- .auto_queue = false,
- },
- {
- .num = 21,
- .name = "IPCR",
- .num_elements = 64,
- .event_ring = 1,
- .dir = DMA_FROM_DEVICE,
- .ee_mask = 0x4,
- .pollcfg = 0,
- .doorbell = MHI_DB_BRST_DISABLE,
- .lpm_notify = false,
- .offload_channel = false,
- .doorbell_mode_switch = false,
- .auto_queue = true,
- },
-};
-
-static struct mhi_event_config ath12k_mhi_events_wcn7850[] = {
- {
- .num_elements = 32,
- .irq_moderation_ms = 0,
- .irq = 1,
- .mode = MHI_DB_BRST_DISABLE,
- .data_type = MHI_ER_CTRL,
- .hardware_event = false,
- .client_managed = false,
- .offload_channel = false,
- },
- {
- .num_elements = 256,
- .irq_moderation_ms = 1,
- .irq = 2,
- .mode = MHI_DB_BRST_DISABLE,
- .priority = 1,
- .hardware_event = false,
- .client_managed = false,
- .offload_channel = false,
- },
-};
-
-const struct mhi_controller_config ath12k_mhi_config_wcn7850 = {
- .max_channels = 128,
- .timeout_ms = 2000,
- .use_bounce_buf = false,
- .buf_len = 8192,
- .num_channels = ARRAY_SIZE(ath12k_mhi_channels_wcn7850),
- .ch_cfg = ath12k_mhi_channels_wcn7850,
- .num_events = ARRAY_SIZE(ath12k_mhi_events_wcn7850),
- .event_cfg = ath12k_mhi_events_wcn7850,
-};
-
void ath12k_mhi_set_mhictrl_reset(struct ath12k_base *ab)
{
u32 val;
diff --git a/drivers/net/wireless/ath/ath12k/mhi.h b/drivers/net/wireless/ath/ath12k/mhi.h
index 7358b8477536..367432676385 100644
--- a/drivers/net/wireless/ath/ath12k/mhi.h
+++ b/drivers/net/wireless/ath/ath12k/mhi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#ifndef _ATH12K_MHI_H
#define _ATH12K_MHI_H
@@ -31,9 +31,6 @@ enum ath12k_mhi_state {
ATH12K_MHI_RDDM_DONE,
};
-extern const struct mhi_controller_config ath12k_mhi_config_qcn9274;
-extern const struct mhi_controller_config ath12k_mhi_config_wcn7850;
-
int ath12k_mhi_start(struct ath12k_pci *ar_pci);
void ath12k_mhi_stop(struct ath12k_pci *ar_pci, bool is_suspend);
int ath12k_mhi_register(struct ath12k_pci *ar_pci);
diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c
index a12c8379cb46..375277ca2b89 100644
--- a/drivers/net/wireless/ath/ath12k/pci.c
+++ b/drivers/net/wireless/ath/ath12k/pci.c
@@ -15,6 +15,7 @@
#include "hif.h"
#include "mhi.h"
#include "debug.h"
+#include "hal.h"
#define ATH12K_PCI_BAR_NUM 0
#define ATH12K_PCI_DMA_MASK 36
@@ -22,50 +23,18 @@
#define ATH12K_PCI_IRQ_CE0_OFFSET 3
#define WINDOW_ENABLE_BIT 0x40000000
-#define WINDOW_REG_ADDRESS 0x310c
#define WINDOW_VALUE_MASK GENMASK(24, 19)
#define WINDOW_START 0x80000
#define WINDOW_RANGE_MASK GENMASK(18, 0)
#define WINDOW_STATIC_MASK GENMASK(31, 6)
-#define TCSR_SOC_HW_VERSION 0x1B00000
-#define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(11, 8)
-#define TCSR_SOC_HW_VERSION_MINOR_MASK GENMASK(7, 4)
-
/* BAR0 + 4k is always accessible, and no
* need to force wakeup.
* 4K - 32 = 0xFE0
*/
#define ACCESS_ALWAYS_OFF 0xFE0
-#define QCN9274_DEVICE_ID 0x1109
-#define WCN7850_DEVICE_ID 0x1107
-
-#define PCIE_LOCAL_REG_QRTR_NODE_ID 0x1E03164
-#define DOMAIN_NUMBER_MASK GENMASK(7, 4)
-#define BUS_NUMBER_MASK GENMASK(3, 0)
-
-static const struct pci_device_id ath12k_pci_id_table[] = {
- { PCI_VDEVICE(QCOM, QCN9274_DEVICE_ID) },
- { PCI_VDEVICE(QCOM, WCN7850_DEVICE_ID) },
- {}
-};
-
-MODULE_DEVICE_TABLE(pci, ath12k_pci_id_table);
-
-/* TODO: revisit IRQ mapping for new SRNG's */
-static const struct ath12k_msi_config ath12k_msi_config[] = {
- {
- .total_vectors = 16,
- .total_users = 3,
- .users = (struct ath12k_msi_user[]) {
- { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
- { .name = "CE", .num_vectors = 5, .base_vector = 3 },
- { .name = "DP", .num_vectors = 8, .base_vector = 8 },
- },
- },
-};
-
+static struct ath12k_pci_driver *ath12k_pci_family_drivers[ATH12K_DEVICE_FAMILY_MAX];
static const struct ath12k_msi_config msi_config_one_msi = {
.total_vectors = 1,
.total_users = 4,
@@ -136,30 +105,6 @@ static const char *irq_name[ATH12K_IRQ_NUM_MAX] = {
"tcl2host-status-ring",
};
-static int ath12k_pci_bus_wake_up(struct ath12k_base *ab)
-{
- struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
-
- return mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
-}
-
-static void ath12k_pci_bus_release(struct ath12k_base *ab)
-{
- struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
-
- mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
-}
-
-static const struct ath12k_pci_ops ath12k_pci_ops_qcn9274 = {
- .wakeup = NULL,
- .release = NULL,
-};
-
-static const struct ath12k_pci_ops ath12k_pci_ops_wcn7850 = {
- .wakeup = ath12k_pci_bus_wake_up,
- .release = ath12k_pci_bus_release,
-};
-
static void ath12k_pci_select_window(struct ath12k_pci *ab_pci, u32 offset)
{
struct ath12k_base *ab = ab_pci->ab;
@@ -175,37 +120,40 @@ static void ath12k_pci_select_window(struct ath12k_pci *ab_pci, u32 offset)
if (window != ab_pci->register_window) {
iowrite32(WINDOW_ENABLE_BIT | window,
- ab->mem + WINDOW_REG_ADDRESS);
- ioread32(ab->mem + WINDOW_REG_ADDRESS);
+ ab->mem + ab_pci->window_reg_addr);
+ ioread32(ab->mem + ab_pci->window_reg_addr);
ab_pci->register_window = window;
}
}
static void ath12k_pci_select_static_window(struct ath12k_pci *ab_pci)
{
- u32 umac_window = u32_get_bits(HAL_SEQ_WCSS_UMAC_OFFSET, WINDOW_VALUE_MASK);
- u32 ce_window = u32_get_bits(HAL_CE_WFSS_CE_REG_BASE, WINDOW_VALUE_MASK);
+ u32 umac_window;
+ u32 ce_window;
u32 window;
+ umac_window = u32_get_bits(ab_pci->reg_base->umac_base, WINDOW_VALUE_MASK);
+ ce_window = u32_get_bits(ab_pci->reg_base->ce_reg_base, WINDOW_VALUE_MASK);
window = (umac_window << 12) | (ce_window << 6);
spin_lock_bh(&ab_pci->window_lock);
ab_pci->register_window = window;
spin_unlock_bh(&ab_pci->window_lock);
- iowrite32(WINDOW_ENABLE_BIT | window, ab_pci->ab->mem + WINDOW_REG_ADDRESS);
+ iowrite32(WINDOW_ENABLE_BIT | window, ab_pci->ab->mem + ab_pci->window_reg_addr);
}
static u32 ath12k_pci_get_window_start(struct ath12k_base *ab,
u32 offset)
{
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
u32 window_start;
/* If offset lies within DP register range, use 3rd window */
- if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < WINDOW_RANGE_MASK)
+ if ((offset ^ ab_pci->reg_base->umac_base) < WINDOW_RANGE_MASK)
window_start = 3 * WINDOW_START;
/* If offset lies within CE register range, use 2nd window */
- else if ((offset ^ HAL_CE_WFSS_CE_REG_BASE) < WINDOW_RANGE_MASK)
+ else if ((offset ^ ab_pci->reg_base->ce_reg_base) < WINDOW_RANGE_MASK)
window_start = 2 * WINDOW_START;
else
window_start = WINDOW_START;
@@ -225,8 +173,8 @@ static void ath12k_pci_restore_window(struct ath12k_base *ab)
spin_lock_bh(&ab_pci->window_lock);
iowrite32(WINDOW_ENABLE_BIT | ab_pci->register_window,
- ab->mem + WINDOW_REG_ADDRESS);
- ioread32(ab->mem + WINDOW_REG_ADDRESS);
+ ab->mem + ab_pci->window_reg_addr);
+ ioread32(ab->mem + ab_pci->window_reg_addr);
spin_unlock_bh(&ab_pci->window_lock);
}
@@ -544,10 +492,11 @@ static int ath12k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget)
struct ath12k_ext_irq_grp,
napi);
struct ath12k_base *ab = irq_grp->ab;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
int work_done;
int i;
- work_done = ath12k_dp_service_srng(ab, irq_grp, budget);
+ work_done = ath12k_dp_service_srng(dp, irq_grp, budget);
if (work_done < budget) {
napi_complete_done(napi, work_done);
for (i = 0; i < irq_grp->num_irq; i++)
@@ -965,7 +914,7 @@ static void ath12k_pci_update_qrtr_node_id(struct ath12k_base *ab)
* writes to the given register, it is available for firmware when the QMI service
* is spawned.
*/
- reg = PCIE_LOCAL_REG_QRTR_NODE_ID & WINDOW_RANGE_MASK;
+ reg = PCIE_LOCAL_REG_QRTR_NODE_ID(ab) & WINDOW_RANGE_MASK;
ath12k_pci_write32(ab, reg, ab_pci->qmi_instance);
ath12k_dbg(ab, ATH12K_DBG_PCI, "pci reg 0x%x instance 0x%x read val 0x%x\n",
@@ -1244,6 +1193,7 @@ u32 ath12k_pci_read32(struct ath12k_base *ab, u32 offset)
ab_pci->pci_ops->release(ab);
return val;
}
+EXPORT_SYMBOL(ath12k_pci_read32);
void ath12k_pci_write32(struct ath12k_base *ab, u32 offset, u32 value)
{
@@ -1549,28 +1499,34 @@ static const struct ath12k_hif_ops ath12k_pci_hif_ops = {
#endif
};
-static
-void ath12k_pci_read_hw_version(struct ath12k_base *ab, u32 *major, u32 *minor)
+static enum ath12k_device_family
+ath12k_get_device_family(const struct pci_device_id *pci_dev)
{
- u32 soc_hw_version;
+ enum ath12k_device_family device_family_id;
+ const struct pci_device_id *id;
+
+ for (device_family_id = ATH12K_DEVICE_FAMILY_START;
+ device_family_id < ATH12K_DEVICE_FAMILY_MAX; device_family_id++) {
+ if (!ath12k_pci_family_drivers[device_family_id])
+ continue;
- soc_hw_version = ath12k_pci_read32(ab, TCSR_SOC_HW_VERSION);
- *major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK,
- soc_hw_version);
- *minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK,
- soc_hw_version);
+ id = ath12k_pci_family_drivers[device_family_id]->id_table;
+ while (id->device) {
+ if (id->device == pci_dev->device)
+ return device_family_id;
+ id += 1;
+ }
+ }
- ath12k_dbg(ab, ATH12K_DBG_PCI,
- "pci tcsr_soc_hw_version major %d minor %d\n",
- *major, *minor);
+ return ATH12K_DEVICE_FAMILY_MAX;
}
static int ath12k_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_dev)
{
- struct ath12k_base *ab;
+ enum ath12k_device_family device_id;
struct ath12k_pci *ab_pci;
- u32 soc_hw_version_major, soc_hw_version_minor;
+ struct ath12k_base *ab;
int ret;
ab = ath12k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH12K_BUS_PCI);
@@ -1580,7 +1536,6 @@ static int ath12k_pci_probe(struct pci_dev *pdev,
}
ab->dev = &pdev->dev;
- pci_set_drvdata(pdev, ab);
ab_pci = ath12k_pci_priv(ab);
ab_pci->dev_id = pci_dev->device;
ab_pci->ab = ab;
@@ -1605,56 +1560,25 @@ static int ath12k_pci_probe(struct pci_dev *pdev,
ab->id.subsystem_vendor = pdev->subsystem_vendor;
ab->id.subsystem_device = pdev->subsystem_device;
- switch (pci_dev->device) {
- case QCN9274_DEVICE_ID:
- ab_pci->msi_config = &ath12k_msi_config[0];
- ab->static_window_map = true;
- ab_pci->pci_ops = &ath12k_pci_ops_qcn9274;
- ab->hal_rx_ops = &hal_rx_qcn9274_ops;
- ath12k_pci_read_hw_version(ab, &soc_hw_version_major,
- &soc_hw_version_minor);
- ab->target_mem_mode = ath12k_core_get_memory_mode(ab);
- switch (soc_hw_version_major) {
- case ATH12K_PCI_SOC_HW_VERSION_2:
- ab->hw_rev = ATH12K_HW_QCN9274_HW20;
- break;
- case ATH12K_PCI_SOC_HW_VERSION_1:
- ab->hw_rev = ATH12K_HW_QCN9274_HW10;
- break;
- default:
- dev_err(&pdev->dev,
- "Unknown hardware version found for QCN9274: 0x%x\n",
- soc_hw_version_major);
- ret = -EOPNOTSUPP;
- goto err_pci_free_region;
- }
- break;
- case WCN7850_DEVICE_ID:
- ab->id.bdf_search = ATH12K_BDF_SEARCH_BUS_AND_BOARD;
- ab_pci->msi_config = &ath12k_msi_config[0];
- ab->static_window_map = false;
- ab_pci->pci_ops = &ath12k_pci_ops_wcn7850;
- ab->hal_rx_ops = &hal_rx_wcn7850_ops;
- ath12k_pci_read_hw_version(ab, &soc_hw_version_major,
- &soc_hw_version_minor);
- ab->target_mem_mode = ATH12K_QMI_MEMORY_MODE_DEFAULT;
- switch (soc_hw_version_major) {
- case ATH12K_PCI_SOC_HW_VERSION_2:
- ab->hw_rev = ATH12K_HW_WCN7850_HW20;
- break;
- default:
- dev_err(&pdev->dev,
- "Unknown hardware version found for WCN7850: 0x%x\n",
- soc_hw_version_major);
- ret = -EOPNOTSUPP;
- goto err_pci_free_region;
- }
- break;
+ device_id = ath12k_get_device_family(pci_dev);
+ if (device_id >= ATH12K_DEVICE_FAMILY_MAX) {
+ ath12k_err(ab, "failed to get device family id\n");
+ ret = -EINVAL;
+ goto err_pci_free_region;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_PCI, "PCI device family id: %d\n", device_id);
- default:
- dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n",
- pci_dev->device);
- ret = -EOPNOTSUPP;
+ ab_pci->device_family_ops = &ath12k_pci_family_drivers[device_id]->ops;
+ ab_pci->reg_base = ath12k_pci_family_drivers[device_id]->reg_base;
+
+ /* Call device specific probe. This is the callback that can
+ * be used to override any ops in future
+ * probe is validated for NULL during registration.
+ */
+ ret = ab_pci->device_family_ops->probe(pdev, pci_dev);
+ if (ret) {
+ ath12k_err(ab, "failed to probe device: %d\n", ret);
goto err_pci_free_region;
}
@@ -1709,13 +1633,25 @@ static int ath12k_pci_probe(struct pci_dev *pdev,
goto err_free_irq;
}
+ /* Invoke arch_init here so that arch-specific init operations
+ * can utilize already initialized ab fields, such as HAL SRNGs.
+ */
+ ret = ab_pci->device_family_ops->arch_init(ab);
+ if (ret) {
+ ath12k_err(ab, "PCI arch_init failed %d\n", ret);
+ goto err_pci_msi_free;
+ }
+
ret = ath12k_core_init(ab);
if (ret) {
ath12k_err(ab, "failed to init core: %d\n", ret);
- goto err_free_irq;
+ goto err_deinit_arch;
}
return 0;
+err_deinit_arch:
+ ab_pci->device_family_ops->arch_deinit(ab);
+
err_free_irq:
/* __free_irq() expects the caller to have cleared the affinity hint */
ath12k_pci_set_irq_affinity_hint(ab_pci, NULL);
@@ -1774,6 +1710,9 @@ qmi_fail:
ath12k_hal_srng_deinit(ab);
ath12k_ce_free_pipes(ab);
+
+ ab_pci->device_family_ops->arch_deinit(ab);
+
ath12k_core_free(ab);
}
@@ -1862,33 +1801,47 @@ static const struct dev_pm_ops __maybe_unused ath12k_pci_pm_ops = {
ath12k_pci_pm_resume_early)
};
-static struct pci_driver ath12k_pci_driver = {
- .name = "ath12k_pci",
- .id_table = ath12k_pci_id_table,
- .probe = ath12k_pci_probe,
- .remove = ath12k_pci_remove,
- .shutdown = ath12k_pci_shutdown,
- .driver.pm = &ath12k_pci_pm_ops,
-};
-
-int ath12k_pci_init(void)
+int ath12k_pci_register_driver(const enum ath12k_device_family device_id,
+ struct ath12k_pci_driver *driver)
{
- int ret;
+ struct pci_driver *pci_driver;
- ret = pci_register_driver(&ath12k_pci_driver);
- if (ret) {
- pr_err("failed to register ath12k pci driver: %d\n",
- ret);
- return ret;
+ if (device_id >= ATH12K_DEVICE_FAMILY_MAX)
+ return -EINVAL;
+
+ if (!driver || !driver->ops.probe ||
+ !driver->ops.arch_init || !driver->ops.arch_deinit)
+ return -EINVAL;
+
+ if (ath12k_pci_family_drivers[device_id]) {
+ pr_err("Driver already registered for %d\n", device_id);
+ return -EALREADY;
}
- return 0;
+ ath12k_pci_family_drivers[device_id] = driver;
+
+ pci_driver = &ath12k_pci_family_drivers[device_id]->driver;
+ pci_driver->name = driver->name;
+ pci_driver->id_table = driver->id_table;
+ pci_driver->probe = ath12k_pci_probe;
+ pci_driver->remove = ath12k_pci_remove;
+ pci_driver->shutdown = ath12k_pci_shutdown;
+ pci_driver->driver.pm = &ath12k_pci_pm_ops;
+
+ return pci_register_driver(pci_driver);
}
+EXPORT_SYMBOL(ath12k_pci_register_driver);
-void ath12k_pci_exit(void)
+void ath12k_pci_unregister_driver(const enum ath12k_device_family device_id)
{
- pci_unregister_driver(&ath12k_pci_driver);
+ if (device_id >= ATH12K_DEVICE_FAMILY_MAX ||
+ !ath12k_pci_family_drivers[device_id])
+ return;
+
+ pci_unregister_driver(&ath12k_pci_family_drivers[device_id]->driver);
+ ath12k_pci_family_drivers[device_id] = NULL;
}
+EXPORT_SYMBOL(ath12k_pci_unregister_driver);
/* firmware files */
MODULE_FIRMWARE(ATH12K_FW_DIR "/QCN9274/hw2.0/*");
diff --git a/drivers/net/wireless/ath/ath12k/pci.h b/drivers/net/wireless/ath/ath12k/pci.h
index d1ec8aad7f6c..0e0e2020c6ae 100644
--- a/drivers/net/wireless/ath/ath12k/pci.h
+++ b/drivers/net/wireless/ath/ath12k/pci.h
@@ -1,12 +1,13 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#ifndef ATH12K_PCI_H
#define ATH12K_PCI_H
#include <linux/mhi.h>
+#include <linux/pci.h>
#include "core.h"
@@ -29,7 +30,7 @@
#define PARM_LTSSM_VALUE 0x111
#define GCC_GCC_PCIE_HOT_RST(ab) \
- ((ab)->hw_params->regs->gcc_gcc_pcie_hot_rst)
+ ((ab)->hal.regs->gcc_gcc_pcie_hot_rst)
#define GCC_GCC_PCIE_HOT_RST_VAL 0x10
@@ -38,17 +39,17 @@
#define PCIE_INT_CLEAR_ALL 0xffffffff
#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG(ab) \
- ((ab)->hw_params->regs->pcie_qserdes_sysclk_en_sel)
+ ((ab)->hal.regs->pcie_qserdes_sysclk_en_sel)
#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL 0x10
#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK 0xffffffff
#define PCIE_PCS_OSC_DTCT_CONFIG1_REG(ab) \
- ((ab)->hw_params->regs->pcie_pcs_osc_dtct_config_base)
+ ((ab)->hal.regs->pcie_pcs_osc_dtct_config_base)
#define PCIE_PCS_OSC_DTCT_CONFIG1_VAL 0x02
#define PCIE_PCS_OSC_DTCT_CONFIG2_REG(ab) \
- ((ab)->hw_params->regs->pcie_pcs_osc_dtct_config_base + 0x4)
+ ((ab)->hal.regs->pcie_pcs_osc_dtct_config_base + 0x4)
#define PCIE_PCS_OSC_DTCT_CONFIG2_VAL 0x52
#define PCIE_PCS_OSC_DTCT_CONFIG4_REG(ab) \
- ((ab)->hw_params->regs->pcie_pcs_osc_dtct_config_base + 0xc)
+ ((ab)->hal.regs->pcie_pcs_osc_dtct_config_base + 0xc)
#define PCIE_PCS_OSC_DTCT_CONFIG4_VAL 0xff
#define PCIE_PCS_OSC_DTCT_CONFIG_MSK 0x000000ff
@@ -58,6 +59,11 @@
#define QCN9274_QFPROM_RAW_RFA_PDET_ROW13_LSB 0x1E20338
#define OTP_BOARD_ID_MASK GENMASK(15, 0)
+#define PCIE_LOCAL_REG_QRTR_NODE_ID(ab) \
+ ((ab)->hal.regs->qrtr_node_id)
+#define DOMAIN_NUMBER_MASK GENMASK(7, 4)
+#define BUS_NUMBER_MASK GENMASK(3, 0)
+
#define PCI_BAR_WINDOW0_BASE 0x1E00000
#define PCI_BAR_WINDOW0_END 0x1E7FFFC
#define PCI_SOC_RANGE_MASK 0x3FFF
@@ -70,9 +76,6 @@
#define QRTR_PCI_DOMAIN_NR_MASK GENMASK(7, 4)
#define QRTR_PCI_BUS_NUMBER_MASK GENMASK(3, 0)
-#define ATH12K_PCI_SOC_HW_VERSION_1 1
-#define ATH12K_PCI_SOC_HW_VERSION_2 2
-
struct ath12k_msi_user {
const char *name;
int num_vectors;
@@ -97,6 +100,17 @@ struct ath12k_pci_ops {
void (*release)(struct ath12k_base *ab);
};
+struct ath12k_pci_device_family_ops {
+ int (*probe)(struct pci_dev *pdev, const struct pci_device_id *pci_dev);
+ int (*arch_init)(struct ath12k_base *ab);
+ void (*arch_deinit)(struct ath12k_base *ab);
+};
+
+struct ath12k_pci_reg_base {
+ u32 umac_base;
+ u32 ce_reg_base;
+};
+
struct ath12k_pci {
struct pci_dev *pdev;
struct ath12k_base *ab;
@@ -119,6 +133,18 @@ struct ath12k_pci {
const struct ath12k_pci_ops *pci_ops;
u32 qmi_instance;
u64 dma_mask;
+ const struct ath12k_pci_device_family_ops *device_family_ops;
+ const struct ath12k_pci_reg_base *reg_base;
+
+ u32 window_reg_addr;
+};
+
+struct ath12k_pci_driver {
+ const char *name;
+ const struct pci_device_id *id_table;
+ struct ath12k_pci_device_family_ops ops;
+ struct pci_driver driver;
+ const struct ath12k_pci_reg_base *reg_base;
};
static inline struct ath12k_pci *ath12k_pci_priv(struct ath12k_base *ab)
@@ -148,6 +174,7 @@ void ath12k_pci_stop(struct ath12k_base *ab);
int ath12k_pci_start(struct ath12k_base *ab);
int ath12k_pci_power_up(struct ath12k_base *ab);
void ath12k_pci_power_down(struct ath12k_base *ab, bool is_suspend);
-int ath12k_pci_init(void);
-void ath12k_pci_exit(void);
+int ath12k_pci_register_driver(const enum ath12k_device_family device_id,
+ struct ath12k_pci_driver *driver);
+void ath12k_pci_unregister_driver(const enum ath12k_device_family device_id);
#endif /* ATH12K_PCI_H */
diff --git a/drivers/net/wireless/ath/ath12k/peer.c b/drivers/net/wireless/ath/ath12k/peer.c
index f1ae9e5b5af7..5f3bd3b9a3e9 100644
--- a/drivers/net/wireless/ath/ath12k/peer.c
+++ b/drivers/net/wireless/ath/ath12k/peer.c
@@ -1,211 +1,28 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include "core.h"
#include "peer.h"
#include "debug.h"
+#include "debugfs.h"
-struct ath12k_ml_peer *ath12k_peer_ml_find(struct ath12k_hw *ah, const u8 *addr)
-{
- struct ath12k_ml_peer *ml_peer;
-
- lockdep_assert_wiphy(ah->hw->wiphy);
-
- list_for_each_entry(ml_peer, &ah->ml_peers, list) {
- if (!ether_addr_equal(ml_peer->addr, addr))
- continue;
-
- return ml_peer;
- }
-
- return NULL;
-}
-
-struct ath12k_peer *ath12k_peer_find(struct ath12k_base *ab, int vdev_id,
- const u8 *addr)
-{
- struct ath12k_peer *peer;
-
- lockdep_assert_held(&ab->base_lock);
-
- list_for_each_entry(peer, &ab->peers, list) {
- if (peer->vdev_id != vdev_id)
- continue;
- if (!ether_addr_equal(peer->addr, addr))
- continue;
-
- return peer;
- }
-
- return NULL;
-}
-
-static struct ath12k_peer *ath12k_peer_find_by_pdev_idx(struct ath12k_base *ab,
- u8 pdev_idx, const u8 *addr)
-{
- struct ath12k_peer *peer;
-
- lockdep_assert_held(&ab->base_lock);
-
- list_for_each_entry(peer, &ab->peers, list) {
- if (peer->pdev_idx != pdev_idx)
- continue;
- if (!ether_addr_equal(peer->addr, addr))
- continue;
-
- return peer;
- }
-
- return NULL;
-}
-
-struct ath12k_peer *ath12k_peer_find_by_addr(struct ath12k_base *ab,
- const u8 *addr)
-{
- struct ath12k_peer *peer;
-
- lockdep_assert_held(&ab->base_lock);
-
- list_for_each_entry(peer, &ab->peers, list) {
- if (!ether_addr_equal(peer->addr, addr))
- continue;
-
- return peer;
- }
-
- return NULL;
-}
-
-static struct ath12k_peer *ath12k_peer_find_by_ml_id(struct ath12k_base *ab,
- int ml_peer_id)
-{
- struct ath12k_peer *peer;
-
- lockdep_assert_held(&ab->base_lock);
-
- list_for_each_entry(peer, &ab->peers, list)
- if (ml_peer_id == peer->ml_id)
- return peer;
-
- return NULL;
-}
-
-struct ath12k_peer *ath12k_peer_find_by_id(struct ath12k_base *ab,
- int peer_id)
-{
- struct ath12k_peer *peer;
-
- lockdep_assert_held(&ab->base_lock);
-
- if (peer_id == HAL_INVALID_PEERID)
- return NULL;
-
- if (peer_id & ATH12K_PEER_ML_ID_VALID)
- return ath12k_peer_find_by_ml_id(ab, peer_id);
-
- list_for_each_entry(peer, &ab->peers, list)
- if (peer_id == peer->peer_id)
- return peer;
-
- return NULL;
-}
-
-bool ath12k_peer_exist_by_vdev_id(struct ath12k_base *ab, int vdev_id)
-{
- struct ath12k_peer *peer;
-
- spin_lock_bh(&ab->base_lock);
-
- list_for_each_entry(peer, &ab->peers, list) {
- if (vdev_id == peer->vdev_id) {
- spin_unlock_bh(&ab->base_lock);
- return true;
- }
- }
- spin_unlock_bh(&ab->base_lock);
- return false;
-}
-
-struct ath12k_peer *ath12k_peer_find_by_ast(struct ath12k_base *ab,
- int ast_hash)
-{
- struct ath12k_peer *peer;
-
- lockdep_assert_held(&ab->base_lock);
-
- list_for_each_entry(peer, &ab->peers, list)
- if (ast_hash == peer->ast_hash)
- return peer;
-
- return NULL;
-}
-
-void ath12k_peer_unmap_event(struct ath12k_base *ab, u16 peer_id)
-{
- struct ath12k_peer *peer;
-
- spin_lock_bh(&ab->base_lock);
-
- peer = ath12k_peer_find_by_id(ab, peer_id);
- if (!peer) {
- ath12k_warn(ab, "peer-unmap-event: unknown peer id %d\n",
- peer_id);
- goto exit;
- }
-
- ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
- peer->vdev_id, peer->addr, peer_id);
-
- list_del(&peer->list);
- kfree(peer);
- wake_up(&ab->peer_mapping_wq);
-
-exit:
- spin_unlock_bh(&ab->base_lock);
-}
-
-void ath12k_peer_map_event(struct ath12k_base *ab, u8 vdev_id, u16 peer_id,
- u8 *mac_addr, u16 ast_hash, u16 hw_peer_id)
-{
- struct ath12k_peer *peer;
-
- spin_lock_bh(&ab->base_lock);
- peer = ath12k_peer_find(ab, vdev_id, mac_addr);
- if (!peer) {
- peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
- if (!peer)
- goto exit;
-
- peer->vdev_id = vdev_id;
- peer->peer_id = peer_id;
- peer->ast_hash = ast_hash;
- peer->hw_peer_id = hw_peer_id;
- ether_addr_copy(peer->addr, mac_addr);
- list_add(&peer->list, &ab->peers);
- wake_up(&ab->peer_mapping_wq);
- }
-
- ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "htt peer map vdev %d peer %pM id %d\n",
- vdev_id, mac_addr, peer_id);
-
-exit:
- spin_unlock_bh(&ab->base_lock);
-}
-
-static int ath12k_wait_for_peer_common(struct ath12k_base *ab, int vdev_id,
- const u8 *addr, bool expect_mapped)
+static int ath12k_wait_for_dp_link_peer_common(struct ath12k_base *ab, int vdev_id,
+ const u8 *addr, bool expect_mapped)
{
int ret;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
ret = wait_event_timeout(ab->peer_mapping_wq, ({
bool mapped;
- spin_lock_bh(&ab->base_lock);
- mapped = !!ath12k_peer_find(ab, vdev_id, addr);
- spin_unlock_bh(&ab->base_lock);
+ spin_lock_bh(&dp->dp_lock);
+ mapped = !!ath12k_dp_link_peer_find_by_vdev_and_addr(dp,
+ vdev_id,
+ addr);
+ spin_unlock_bh(&dp->dp_lock);
(mapped == expect_mapped ||
test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags));
@@ -219,30 +36,30 @@ static int ath12k_wait_for_peer_common(struct ath12k_base *ab, int vdev_id,
void ath12k_peer_cleanup(struct ath12k *ar, u32 vdev_id)
{
- struct ath12k_peer *peer, *tmp;
+ struct ath12k_dp_link_peer *peer, *tmp;
struct ath12k_base *ab = ar->ab;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
- spin_lock_bh(&ab->base_lock);
- list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
+ spin_lock_bh(&dp->dp_lock);
+ list_for_each_entry_safe(peer, tmp, &dp->peers, list) {
if (peer->vdev_id != vdev_id)
continue;
ath12k_warn(ab, "removing stale peer %pM from vdev_id %d\n",
peer->addr, vdev_id);
- list_del(&peer->list);
- kfree(peer);
+ ath12k_dp_link_peer_free(peer);
ar->num_peers--;
}
- spin_unlock_bh(&ab->base_lock);
+ spin_unlock_bh(&dp->dp_lock);
}
static int ath12k_wait_for_peer_deleted(struct ath12k *ar, int vdev_id, const u8 *addr)
{
- return ath12k_wait_for_peer_common(ar->ab, vdev_id, addr, false);
+ return ath12k_wait_for_dp_link_peer_common(ar->ab, vdev_id, addr, false);
}
int ath12k_wait_for_peer_delete_done(struct ath12k *ar, u32 vdev_id,
@@ -293,6 +110,10 @@ int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr)
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+ ath12k_dp_link_peer_unassign(ath12k_ab_to_dp(ar->ab),
+ &(ath12k_ar_to_ah(ar)->dp_hw), vdev_id,
+ addr, ar->hw_link_id);
+
ret = ath12k_peer_delete_send(ar, vdev_id, addr);
if (ret)
return ret;
@@ -308,7 +129,7 @@ int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr)
static int ath12k_wait_for_peer_created(struct ath12k *ar, int vdev_id, const u8 *addr)
{
- return ath12k_wait_for_peer_common(ar->ab, vdev_id, addr, true);
+ return ath12k_wait_for_dp_link_peer_common(ar->ab, vdev_id, addr, true);
}
int ath12k_peer_create(struct ath12k *ar, struct ath12k_link_vif *arvif,
@@ -316,28 +137,34 @@ int ath12k_peer_create(struct ath12k *ar, struct ath12k_link_vif *arvif,
struct ath12k_wmi_peer_create_arg *arg)
{
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ath12k_dp_link_vif *dp_link_vif;
struct ath12k_link_sta *arsta;
u8 link_id = arvif->link_id;
- struct ath12k_peer *peer;
+ struct ath12k_dp_link_peer *peer;
struct ath12k_sta *ahsta;
u16 ml_peer_id;
int ret;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ar->ab);
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+ dp_link_vif = ath12k_dp_vif_to_dp_link_vif(&ahvif->dp_vif, link_id);
+
if (ar->num_peers > (ar->max_num_peers - 1)) {
ath12k_warn(ar->ab,
"failed to create peer due to insufficient peer entry resource in firmware\n");
return -ENOBUFS;
}
- spin_lock_bh(&ar->ab->base_lock);
- peer = ath12k_peer_find_by_pdev_idx(ar->ab, ar->pdev_idx, arg->peer_addr);
+ spin_lock_bh(&dp->dp_lock);
+ peer = ath12k_dp_link_peer_find_by_pdev_and_addr(dp, ar->pdev_idx,
+ arg->peer_addr);
if (peer) {
- spin_unlock_bh(&ar->ab->base_lock);
+ spin_unlock_bh(&dp->dp_lock);
return -EINVAL;
}
- spin_unlock_bh(&ar->ab->base_lock);
+ spin_unlock_bh(&dp->dp_lock);
ret = ath12k_wmi_send_peer_create_cmd(ar, arg);
if (ret) {
@@ -352,11 +179,12 @@ int ath12k_peer_create(struct ath12k *ar, struct ath12k_link_vif *arvif,
if (ret)
return ret;
- spin_lock_bh(&ar->ab->base_lock);
+ spin_lock_bh(&dp->dp_lock);
- peer = ath12k_peer_find(ar->ab, arg->vdev_id, arg->peer_addr);
+ peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, arg->vdev_id,
+ arg->peer_addr);
if (!peer) {
- spin_unlock_bh(&ar->ab->base_lock);
+ spin_unlock_bh(&dp->dp_lock);
ath12k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n",
arg->peer_addr, arg->vdev_id);
@@ -382,13 +210,10 @@ int ath12k_peer_create(struct ath12k *ar, struct ath12k_link_vif *arvif,
peer->sta = sta;
if (vif->type == NL80211_IFTYPE_STATION) {
- arvif->ast_hash = peer->ast_hash;
- arvif->ast_idx = peer->hw_peer_id;
+ dp_link_vif->ast_hash = peer->ast_hash;
+ dp_link_vif->ast_idx = peer->hw_peer_id;
}
- if (vif->type == NL80211_IFTYPE_AP)
- peer->ucast_ra_only = true;
-
if (sta) {
ahsta = ath12k_sta_to_ahsta(sta);
arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
@@ -412,17 +237,22 @@ int ath12k_peer_create(struct ath12k *ar, struct ath12k_link_vif *arvif,
}
}
- peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
- peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
-
ar->num_peers++;
- spin_unlock_bh(&ar->ab->base_lock);
+ spin_unlock_bh(&dp->dp_lock);
- return 0;
+ if (arvif->link_id < IEEE80211_MLD_MAX_NUM_LINKS) {
+ ret = ath12k_dp_link_peer_assign(ath12k_ab_to_dp(ar->ab),
+ &(ath12k_ar_to_ah(ar)->dp_hw),
+ arvif->vdev_id, sta,
+ (u8 *)arg->peer_addr, link_id,
+ ar->hw_link_id);
+ }
+
+ return ret;
}
-static u16 ath12k_peer_ml_alloc(struct ath12k_hw *ah)
+u16 ath12k_peer_ml_alloc(struct ath12k_hw *ah)
{
u16 ml_peer_id;
@@ -442,68 +272,6 @@ static u16 ath12k_peer_ml_alloc(struct ath12k_hw *ah)
return ml_peer_id;
}
-int ath12k_peer_ml_create(struct ath12k_hw *ah, struct ieee80211_sta *sta)
-{
- struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
- struct ath12k_ml_peer *ml_peer;
-
- lockdep_assert_wiphy(ah->hw->wiphy);
-
- if (!sta->mlo)
- return -EINVAL;
-
- ml_peer = ath12k_peer_ml_find(ah, sta->addr);
- if (ml_peer) {
- ath12k_hw_warn(ah, "ML peer %d exists already, unable to add new entry for %pM",
- ml_peer->id, sta->addr);
- return -EEXIST;
- }
-
- ml_peer = kzalloc(sizeof(*ml_peer), GFP_ATOMIC);
- if (!ml_peer)
- return -ENOMEM;
-
- ahsta->ml_peer_id = ath12k_peer_ml_alloc(ah);
-
- if (ahsta->ml_peer_id == ATH12K_MLO_PEER_ID_INVALID) {
- ath12k_hw_warn(ah, "unable to allocate ML peer id for sta %pM",
- sta->addr);
- kfree(ml_peer);
- return -ENOMEM;
- }
-
- ether_addr_copy(ml_peer->addr, sta->addr);
- ml_peer->id = ahsta->ml_peer_id;
- list_add(&ml_peer->list, &ah->ml_peers);
-
- return 0;
-}
-
-int ath12k_peer_ml_delete(struct ath12k_hw *ah, struct ieee80211_sta *sta)
-{
- struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
- struct ath12k_ml_peer *ml_peer;
-
- lockdep_assert_wiphy(ah->hw->wiphy);
-
- if (!sta->mlo)
- return -EINVAL;
-
- clear_bit(ahsta->ml_peer_id, ah->free_ml_peer_id_map);
- ahsta->ml_peer_id = ATH12K_MLO_PEER_ID_INVALID;
-
- ml_peer = ath12k_peer_ml_find(ah, sta->addr);
- if (!ml_peer) {
- ath12k_hw_warn(ah, "ML peer for %pM not found", sta->addr);
- return -EINVAL;
- }
-
- list_del(&ml_peer->list);
- kfree(ml_peer);
-
- return 0;
-}
-
int ath12k_peer_mlo_link_peers_delete(struct ath12k_vif *ahvif, struct ath12k_sta *ahsta)
{
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(ahsta);
@@ -536,6 +304,11 @@ int ath12k_peer_mlo_link_peers_delete(struct ath12k_vif *ahvif, struct ath12k_st
ath12k_dp_peer_cleanup(ar, arvif->vdev_id, arsta->addr);
+ ath12k_dp_link_peer_unassign(ath12k_ab_to_dp(ar->ab),
+ &(ath12k_ar_to_ah(ar)->dp_hw),
+ arvif->vdev_id, arsta->addr,
+ ar->hw_link_id);
+
ret = ath12k_peer_delete_send(ar, arvif->vdev_id, arsta->addr);
if (ret) {
ath12k_warn(ar->ab,
@@ -568,3 +341,119 @@ int ath12k_peer_mlo_link_peers_delete(struct ath12k_vif *ahvif, struct ath12k_st
return err_ret;
}
+
+static int ath12k_link_sta_rhash_insert(struct ath12k_base *ab,
+ struct ath12k_link_sta *arsta)
+{
+ struct ath12k_link_sta *tmp;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ tmp = rhashtable_lookup_get_insert_fast(ab->rhead_sta_addr, &arsta->rhash_addr,
+ ab->rhash_sta_addr_param);
+ if (!tmp)
+ return 0;
+ else if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+ else
+ return -EEXIST;
+}
+
+static int ath12k_link_sta_rhash_remove(struct ath12k_base *ab,
+ struct ath12k_link_sta *arsta)
+{
+ int ret;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ ret = rhashtable_remove_fast(ab->rhead_sta_addr, &arsta->rhash_addr,
+ ab->rhash_sta_addr_param);
+ if (ret && ret != -ENOENT)
+ return ret;
+
+ return 0;
+}
+
+int ath12k_link_sta_rhash_add(struct ath12k_base *ab,
+ struct ath12k_link_sta *arsta)
+{
+ int ret;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ ret = ath12k_link_sta_rhash_insert(ab, arsta);
+ if (ret)
+ ath12k_warn(ab, "failed to add arsta %pM in rhash_addr ret %d\n",
+ arsta->addr, ret);
+
+ return ret;
+}
+
+void ath12k_link_sta_rhash_delete(struct ath12k_base *ab,
+ struct ath12k_link_sta *arsta)
+{
+ /*
+ * Return type of this function is void since there is nothing to be
+ * done in failure case
+ */
+ int ret;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ ret = ath12k_link_sta_rhash_remove(ab, arsta);
+ if (ret)
+ ath12k_warn(ab,
+ "failed to remove arsta %pM in rhash_addr ret %d\n",
+ arsta->addr, ret);
+}
+
+int ath12k_link_sta_rhash_tbl_init(struct ath12k_base *ab)
+{
+ struct rhashtable_params *param;
+ struct rhashtable *rhash_addr_tbl;
+ int ret;
+
+ rhash_addr_tbl = kzalloc(sizeof(*ab->rhead_sta_addr), GFP_KERNEL);
+ if (!rhash_addr_tbl)
+ return -ENOMEM;
+
+ param = &ab->rhash_sta_addr_param;
+
+ param->key_offset = offsetof(struct ath12k_link_sta, addr);
+ param->head_offset = offsetof(struct ath12k_link_sta, rhash_addr);
+ param->key_len = sizeof_field(struct ath12k_link_sta, addr);
+ param->automatic_shrinking = true;
+ param->nelem_hint = ab->num_radios * ath12k_core_get_max_peers_per_radio(ab);
+
+ ret = rhashtable_init(rhash_addr_tbl, param);
+ if (ret) {
+ ath12k_warn(ab, "failed to init peer addr rhash table %d\n",
+ ret);
+ goto err_free;
+ }
+
+ ab->rhead_sta_addr = rhash_addr_tbl;
+
+ return 0;
+
+err_free:
+ kfree(rhash_addr_tbl);
+
+ return ret;
+}
+
+void ath12k_link_sta_rhash_tbl_destroy(struct ath12k_base *ab)
+{
+ rhashtable_destroy(ab->rhead_sta_addr);
+ kfree(ab->rhead_sta_addr);
+ ab->rhead_sta_addr = NULL;
+}
+
+struct ath12k_link_sta *ath12k_link_sta_find_by_addr(struct ath12k_base *ab,
+ const u8 *addr)
+{
+ lockdep_assert_held(&ab->base_lock);
+
+ return rhashtable_lookup_fast(ab->rhead_sta_addr, addr,
+ ab->rhash_sta_addr_param);
+}
diff --git a/drivers/net/wireless/ath/ath12k/peer.h b/drivers/net/wireless/ath/ath12k/peer.h
index 44afc0b7dd53..49d89796bc46 100644
--- a/drivers/net/wireless/ath/ath12k/peer.h
+++ b/drivers/net/wireless/ath/ath12k/peer.h
@@ -1,84 +1,14 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#ifndef ATH12K_PEER_H
#define ATH12K_PEER_H
-#include "dp_rx.h"
+#include "dp_peer.h"
-struct ppdu_user_delayba {
- u16 sw_peer_id;
- u32 info0;
- u16 ru_end;
- u16 ru_start;
- u32 info1;
- u32 rate_flags;
- u32 resp_rate_flags;
-};
-
-#define ATH12K_PEER_ML_ID_VALID BIT(13)
-
-struct ath12k_peer {
- struct list_head list;
- struct ieee80211_sta *sta;
- int vdev_id;
- u8 addr[ETH_ALEN];
- int peer_id;
- u16 ast_hash;
- u8 pdev_idx;
- u16 hw_peer_id;
-
- /* protected by ab->data_lock */
- struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
- struct ath12k_dp_rx_tid rx_tid[IEEE80211_NUM_TIDS + 1];
-
- /* Info used in MMIC verification of
- * RX fragments
- */
- struct crypto_shash *tfm_mmic;
- u8 mcast_keyidx;
- u8 ucast_keyidx;
- u16 sec_type;
- u16 sec_type_grp;
- struct ppdu_user_delayba ppdu_stats_delayba;
- bool delayba_flag;
- bool is_authorized;
- bool mlo;
- /* protected by ab->data_lock */
- bool dp_setup_done;
-
- u16 ml_id;
-
- /* any other ML info common for all partners can be added
- * here and would be same for all partner peers.
- */
- u8 ml_addr[ETH_ALEN];
-
- /* To ensure only certain work related to dp is done once */
- bool primary_link;
-
- /* for reference to ath12k_link_sta */
- u8 link_id;
- bool ucast_ra_only;
-};
-
-struct ath12k_ml_peer {
- struct list_head list;
- u8 addr[ETH_ALEN];
- u16 id;
-};
-
-void ath12k_peer_unmap_event(struct ath12k_base *ab, u16 peer_id);
-void ath12k_peer_map_event(struct ath12k_base *ab, u8 vdev_id, u16 peer_id,
- u8 *mac_addr, u16 ast_hash, u16 hw_peer_id);
-struct ath12k_peer *ath12k_peer_find(struct ath12k_base *ab, int vdev_id,
- const u8 *addr);
-struct ath12k_peer *ath12k_peer_find_by_addr(struct ath12k_base *ab,
- const u8 *addr);
-struct ath12k_peer *ath12k_peer_find_by_id(struct ath12k_base *ab, int peer_id);
void ath12k_peer_cleanup(struct ath12k *ar, u32 vdev_id);
int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr);
int ath12k_peer_create(struct ath12k *ar, struct ath12k_link_vif *arvif,
@@ -86,38 +16,14 @@ int ath12k_peer_create(struct ath12k *ar, struct ath12k_link_vif *arvif,
struct ath12k_wmi_peer_create_arg *arg);
int ath12k_wait_for_peer_delete_done(struct ath12k *ar, u32 vdev_id,
const u8 *addr);
-bool ath12k_peer_exist_by_vdev_id(struct ath12k_base *ab, int vdev_id);
-struct ath12k_peer *ath12k_peer_find_by_ast(struct ath12k_base *ab, int ast_hash);
-int ath12k_peer_ml_create(struct ath12k_hw *ah, struct ieee80211_sta *sta);
-int ath12k_peer_ml_delete(struct ath12k_hw *ah, struct ieee80211_sta *sta);
int ath12k_peer_mlo_link_peers_delete(struct ath12k_vif *ahvif, struct ath12k_sta *ahsta);
struct ath12k_ml_peer *ath12k_peer_ml_find(struct ath12k_hw *ah,
const u8 *addr);
-static inline
-struct ath12k_link_sta *ath12k_peer_get_link_sta(struct ath12k_base *ab,
- struct ath12k_peer *peer)
-{
- struct ath12k_sta *ahsta;
- struct ath12k_link_sta *arsta;
-
- if (!peer->sta)
- return NULL;
-
- ahsta = ath12k_sta_to_ahsta(peer->sta);
- if (peer->ml_id & ATH12K_PEER_ML_ID_VALID) {
- if (!(ahsta->links_map & BIT(peer->link_id))) {
- ath12k_warn(ab, "peer %pM id %d link_id %d can't found in STA link_map 0x%x\n",
- peer->addr, peer->peer_id, peer->link_id,
- ahsta->links_map);
- return NULL;
- }
- arsta = rcu_dereference(ahsta->link[peer->link_id]);
- if (!arsta)
- return NULL;
- } else {
- arsta = &ahsta->deflink;
- }
- return arsta;
-}
-
+int ath12k_link_sta_rhash_tbl_init(struct ath12k_base *ab);
+void ath12k_link_sta_rhash_tbl_destroy(struct ath12k_base *ab);
+void ath12k_link_sta_rhash_delete(struct ath12k_base *ab, struct ath12k_link_sta *arsta);
+int ath12k_link_sta_rhash_add(struct ath12k_base *ab, struct ath12k_link_sta *arsta);
+struct ath12k_link_sta *ath12k_link_sta_find_by_addr(struct ath12k_base *ab,
+ const u8 *addr);
+u16 ath12k_peer_ml_alloc(struct ath12k_hw *ah);
#endif /* _PEER_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c
index b7c48b6706df..cfde4147c8fc 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.c
+++ b/drivers/net/wireless/ath/ath12k/qmi.c
@@ -1623,6 +1623,47 @@ static const struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = {
},
};
+static const struct qmi_elem_info qmi_wlanfw_aux_uc_info_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_aux_uc_info_req_msg_v01, addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_aux_uc_info_req_msg_v01, size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_aux_uc_info_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_aux_uc_info_resp_msg_v01, resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
static const struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
@@ -2609,6 +2650,7 @@ static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab)
case M3_DUMP_REGION_TYPE:
case PAGEABLE_MEM_REGION_TYPE:
case CALDB_MEM_REGION_TYPE:
+ case LPASS_SHARED_V01_REGION_TYPE:
ret = ath12k_qmi_alloc_chunk(ab, chunk);
if (ret)
goto err;
@@ -3236,6 +3278,131 @@ out:
return ret;
}
+static void ath12k_qmi_aux_uc_free(struct ath12k_base *ab)
+{
+ struct m3_mem_region *aux_uc_mem = &ab->qmi.aux_uc_mem;
+
+ if (!aux_uc_mem->vaddr)
+ return;
+
+ dma_free_coherent(ab->dev, aux_uc_mem->total_size,
+ aux_uc_mem->vaddr, aux_uc_mem->paddr);
+ aux_uc_mem->vaddr = NULL;
+ aux_uc_mem->total_size = 0;
+ aux_uc_mem->size = 0;
+}
+
+static int ath12k_qmi_aux_uc_load(struct ath12k_base *ab)
+{
+ struct m3_mem_region *aux_uc_mem = &ab->qmi.aux_uc_mem;
+ const struct firmware *fw = NULL;
+ const void *aux_uc_data;
+ char path[100];
+ size_t aux_uc_len;
+ int ret;
+
+ if (ab->fw.aux_uc_data && ab->fw.aux_uc_len > 0) {
+ /* firmware-N.bin had a aux_uc firmware file so use that */
+ aux_uc_data = ab->fw.aux_uc_data;
+ aux_uc_len = ab->fw.aux_uc_len;
+ } else {
+ /*
+ * No aux_uc file in firmware-N.bin so try to request old
+ * separate aux_ucode.bin.
+ */
+ fw = ath12k_core_firmware_request(ab, ATH12K_AUX_UC_FILE);
+ if (IS_ERR(fw)) {
+ ret = PTR_ERR(fw);
+ ath12k_core_create_firmware_path(ab, ATH12K_AUX_UC_FILE,
+ path, sizeof(path));
+ ath12k_err(ab, "failed to load %s: %d\n", path, ret);
+ return ret;
+ }
+
+ aux_uc_data = fw->data;
+ aux_uc_len = fw->size;
+ }
+
+ /* In recovery/resume cases, AUX_UC buffer is not freed, try to reuse that */
+ if (aux_uc_mem->vaddr) {
+ if (aux_uc_mem->total_size >= aux_uc_len)
+ goto copy;
+
+ /* Old buffer is too small, free and reallocate */
+ ath12k_qmi_aux_uc_free(ab);
+ }
+
+ aux_uc_mem->vaddr = dma_alloc_coherent(ab->dev, aux_uc_len,
+ &aux_uc_mem->paddr, GFP_KERNEL);
+ if (!aux_uc_mem->vaddr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ aux_uc_mem->total_size = aux_uc_len;
+
+copy:
+ memcpy(aux_uc_mem->vaddr, aux_uc_data, aux_uc_len);
+ aux_uc_mem->size = aux_uc_len;
+
+ ret = 0;
+
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static noinline_for_stack
+int ath12k_qmi_wlanfw_aux_uc_info_send(struct ath12k_base *ab)
+{
+ struct m3_mem_region *aux_uc_mem = &ab->qmi.aux_uc_mem;
+ struct qmi_wlanfw_aux_uc_info_req_msg_v01 req = {};
+ struct qmi_wlanfw_aux_uc_info_resp_msg_v01 resp = {};
+ struct qmi_txn txn;
+ int ret = 0;
+
+ ret = ath12k_qmi_aux_uc_load(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to load aux_uc firmware: %d", ret);
+ return ret;
+ }
+
+ req.addr = aux_uc_mem->paddr;
+ req.size = aux_uc_mem->size;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_aux_uc_info_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_AUX_UC_INFO_REQ_V01,
+ QMI_WLANFW_AUX_UC_INFO_REQ_MSG_V01_MAX_MSG_LEN,
+ qmi_wlanfw_aux_uc_info_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath12k_warn(ab, "qmi failed to send AUX_UC information request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed AUX_UC information request %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath12k_warn(ab, "qmi AUX_UC info request failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+out:
+ return ret;
+}
+
static int ath12k_qmi_wlanfw_mode_send(struct ath12k_base *ab,
u32 mode)
{
@@ -3600,6 +3767,7 @@ static noinline_for_stack
int ath12k_qmi_event_load_bdf(struct ath12k_qmi *qmi)
{
struct ath12k_base *ab = qmi->ab;
+ const struct ath12k_hw_params *hw_params = ab->hw_params;
int ret;
ret = ath12k_qmi_request_target_cap(ab);
@@ -3620,7 +3788,7 @@ int ath12k_qmi_event_load_bdf(struct ath12k_qmi *qmi)
return ret;
}
- if (ab->hw_params->download_calib) {
+ if (hw_params->download_calib) {
ret = ath12k_qmi_load_bdf_qmi(ab, ATH12K_QMI_BDF_TYPE_CALIBRATION);
if (ret < 0)
ath12k_warn(ab, "qmi failed to load calibrated data :%d\n", ret);
@@ -3632,6 +3800,14 @@ int ath12k_qmi_event_load_bdf(struct ath12k_qmi *qmi)
return ret;
}
+ if (hw_params->fw.download_aux_ucode) {
+ ret = ath12k_qmi_wlanfw_aux_uc_info_send(ab);
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed to send aux_uc info req: %d\n", ret);
+ return ret;
+ }
+ }
+
return ret;
}
@@ -3905,6 +4081,7 @@ void ath12k_qmi_deinit_service(struct ath12k_base *ab)
qmi_handle_release(&ab->qmi.handle);
cancel_work_sync(&ab->qmi.event_work);
destroy_workqueue(ab->qmi.event_wq);
+ ath12k_qmi_aux_uc_free(ab);
ath12k_qmi_m3_free(ab);
ath12k_qmi_free_target_mem_chunk(ab);
ab->qmi.ab = NULL;
@@ -3913,5 +4090,6 @@ void ath12k_qmi_deinit_service(struct ath12k_base *ab)
void ath12k_qmi_free_resource(struct ath12k_base *ab)
{
ath12k_qmi_free_target_mem_chunk(ab);
+ ath12k_qmi_aux_uc_free(ab);
ath12k_qmi_m3_free(ab);
}
diff --git a/drivers/net/wireless/ath/ath12k/qmi.h b/drivers/net/wireless/ath/ath12k/qmi.h
index 7a88268aa1e9..b5a4a01391cb 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.h
+++ b/drivers/net/wireless/ath/ath12k/qmi.h
@@ -154,6 +154,7 @@ struct ath12k_qmi {
u8 num_radios;
struct target_info target;
struct m3_mem_region m3_mem;
+ struct m3_mem_region aux_uc_mem;
unsigned int service_ins_id;
struct dev_mem_info dev_mem[ATH12K_QMI_WLFW_MAX_DEV_MEM_NUM_V01];
};
@@ -178,6 +179,7 @@ enum ath12k_qmi_target_mem {
CALDB_MEM_REGION_TYPE = 0x4,
MLO_GLOBAL_MEM_REGION_TYPE = 0x8,
PAGEABLE_MEM_REGION_TYPE = 0x9,
+ LPASS_SHARED_V01_REGION_TYPE = 0xb,
};
enum qmi_wlanfw_host_build_type {
@@ -202,6 +204,7 @@ enum ath12k_qmi_cnss_feature {
CNSS_FEATURE_MIN_ENUM_VAL_V01 = INT_MIN,
CNSS_QDSS_CFG_MISS_V01 = 3,
CNSS_PCIE_PERST_NO_PULL_V01 = 4,
+ CNSS_AUX_UC_SUPPORT_V01 = 6,
CNSS_MAX_FEATURE_V01 = 64,
CNSS_FEATURE_MAX_ENUM_VAL_V01 = INT_MAX,
};
@@ -540,6 +543,19 @@ struct qmi_wlanfw_m3_info_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
+#define QMI_WLANFW_AUX_UC_INFO_REQ_MSG_V01_MAX_MSG_LEN 18
+#define QMI_WLANFW_AUX_UC_INFO_RESP_MSG_V01_MAX_MSG_LEN 7
+#define QMI_WLANFW_AUX_UC_INFO_REQ_V01 0x005A
+
+struct qmi_wlanfw_aux_uc_info_req_msg_v01 {
+ u64 addr;
+ u32 size;
+};
+
+struct qmi_wlanfw_aux_uc_info_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
#define QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN 11
#define QMI_WLANFW_WLAN_MODE_RESP_MSG_V01_MAX_LEN 7
#define QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN 803
diff --git a/drivers/net/wireless/ath/ath12k/testmode.c b/drivers/net/wireless/ath/ath12k/testmode.c
index fb6af7ccf71f..05a65970c862 100644
--- a/drivers/net/wireless/ath/ath12k/testmode.c
+++ b/drivers/net/wireless/ath/ath12k/testmode.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include "testmode.h"
@@ -393,3 +393,4 @@ int ath12k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return -EOPNOTSUPP;
}
}
+EXPORT_SYMBOL(ath12k_tm_cmd);
diff --git a/drivers/net/wireless/ath/ath12k/wifi7/Makefile b/drivers/net/wireless/ath/ath12k/wifi7/Makefile
new file mode 100644
index 000000000000..45b561cdba4b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wifi7/Makefile
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+obj-$(CONFIG_ATH12K) += ath12k_wifi7.o
+ath12k_wifi7-y += core.o \
+ pci.o \
+ wmi.o \
+ mhi.o \
+ ce.o \
+ hw.o \
+ hal_tx.o \
+ hal_rx.o \
+ dp_rx.o \
+ dp_tx.o \
+ dp.o \
+ dp_mon.o \
+ hal.o \
+ hal_qcn9274.o \
+ hal_wcn7850.o \
+ hal_qcc2072.o
+
+ath12k_wifi7-$(CONFIG_ATH12K_AHB) += ahb.o
diff --git a/drivers/net/wireless/ath/ath12k/wifi7/ahb.c b/drivers/net/wireless/ath/ath12k/wifi7/ahb.c
new file mode 100644
index 000000000000..a6c5f7689edd
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wifi7/ahb.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/soc/qcom/mdt_loader.h>
+#include "../ahb.h"
+#include "ahb.h"
+#include "../debug.h"
+#include "../hif.h"
+#include "hw.h"
+#include "dp.h"
+#include "core.h"
+
+static const struct of_device_id ath12k_wifi7_ahb_of_match[] = {
+ { .compatible = "qcom,ipq5332-wifi",
+ .data = (void *)ATH12K_HW_IPQ5332_HW10,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, ath12k_wifi7_ahb_of_match);
+
+static int ath12k_wifi7_ahb_probe(struct platform_device *pdev)
+{
+ struct ath12k_ahb *ab_ahb;
+ enum ath12k_hw_rev hw_rev;
+ struct ath12k_base *ab;
+ int ret;
+
+ ab = platform_get_drvdata(pdev);
+ ab_ahb = ath12k_ab_to_ahb(ab);
+
+ hw_rev = (enum ath12k_hw_rev)(kernel_ulong_t)of_device_get_match_data(&pdev->dev);
+ switch (hw_rev) {
+ case ATH12K_HW_IPQ5332_HW10:
+ ab_ahb->userpd_id = ATH12K_IPQ5332_USERPD_ID;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ ab->target_mem_mode = ATH12K_QMI_MEMORY_MODE_DEFAULT;
+ ab->hw_rev = hw_rev;
+
+ ret = ath12k_wifi7_hw_init(ab);
+ if (ret) {
+ ath12k_err(ab, "WiFi-7 hw_init for AHB failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct ath12k_ahb_driver ath12k_wifi7_ahb_driver = {
+ .name = "ath12k_wifi7_ahb",
+ .id_table = ath12k_wifi7_ahb_of_match,
+ .ops.probe = ath12k_wifi7_ahb_probe,
+ .ops.arch_init = ath12k_wifi7_arch_init,
+ .ops.arch_deinit = ath12k_wifi7_arch_deinit,
+};
+
+int ath12k_wifi7_ahb_init(void)
+{
+ return ath12k_ahb_register_driver(ATH12K_DEVICE_FAMILY_WIFI7,
+ &ath12k_wifi7_ahb_driver);
+}
+
+void ath12k_wifi7_ahb_exit(void)
+{
+ ath12k_ahb_unregister_driver(ATH12K_DEVICE_FAMILY_WIFI7);
+}
diff --git a/drivers/net/wireless/ath/ath12k/wifi7/ahb.h b/drivers/net/wireless/ath/ath12k/wifi7/ahb.h
new file mode 100644
index 000000000000..5974c7cad69a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wifi7/ahb.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+#ifndef ATH12K_AHB_WIFI7_H
+#define ATH12K_AHB_WIFI7_H
+
+#ifdef CONFIG_ATH12K_AHB
+int ath12k_wifi7_ahb_init(void);
+void ath12k_wifi7_ahb_exit(void);
+#else
+static inline int ath12k_wifi7_ahb_init(void)
+{
+ return 0;
+}
+
+static inline void ath12k_wifi7_ahb_exit(void) {}
+#endif
+#endif /* ATH12K_AHB_WIFI7_H */
diff --git a/drivers/net/wireless/ath/ath12k/wifi7/ce.c b/drivers/net/wireless/ath/ath12k/wifi7/ce.c
new file mode 100644
index 000000000000..952d6c39c333
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wifi7/ce.c
@@ -0,0 +1,973 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+
+#include "../core.h"
+#include "../ce.h"
+#include "ce.h"
+#include "../dp_rx.h"
+
+/* Copy Engine (CE) configs for QCN9274 */
+/* Target firmware's Copy Engine configuration. */
+const struct ce_pipe_config ath12k_wifi7_target_ce_config_wlan_qcn9274[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .pipenum = __cpu_to_le32(0),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .pipenum = __cpu_to_le32(1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .pipenum = __cpu_to_le32(2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE3: host->target WMI (mac0) */
+ {
+ .pipenum = __cpu_to_le32(3),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(4),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(256),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE5: target->host Pktlog */
+ {
+ .pipenum = __cpu_to_le32(5),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE6: Reserved for target autonomous hif_memcpy */
+ {
+ .pipenum = __cpu_to_le32(6),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE7: host->target WMI (mac1) */
+ {
+ .pipenum = __cpu_to_le32(7),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE8: Reserved for target autonomous hif_memcpy */
+ {
+ .pipenum = __cpu_to_le32(8),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE9, 10 and 11: Reserved for MHI */
+
+ /* CE12: Target CV prefetch */
+ {
+ .pipenum = __cpu_to_le32(12),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE13: Target CV prefetch */
+ {
+ .pipenum = __cpu_to_le32(13),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE14: WMI logging/CFR/Spectral/Radar */
+ {
+ .pipenum = __cpu_to_le32(14),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE15: Reserved */
+};
+
+/* Map from service/endpoint to Copy Engine.
+ * This table is derived from the CE_PCI TABLE, above.
+ * It is passed to the Target at startup for use by firmware.
+ * Pipe direction:
+ * PIPEDIR_OUT = UL = host -> target
+ * PIPEDIR_IN = DL = target -> host
+ */
+const struct service_to_pipe
+ath12k_wifi7_target_service_to_ce_map_wlan_qcn9274[] = {
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(0),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(1),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(0),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(1),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(4),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(1),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(7),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_PKT_LOG),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(5),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL_DIAG),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(14),
+ },
+
+ /* (Additions here) */
+
+ { /* must be last */
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ },
+};
+
+const struct ce_attr ath12k_wifi7_host_ce_config_qcn9274[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 16,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath12k_htc_rx_completion_handler,
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 128,
+ .recv_cb = ath12k_htc_rx_completion_handler,
+ },
+
+ /* CE3: host->target WMI (mac0) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 2048,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ },
+
+ /* CE5: target->host pktlog */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath12k_dp_htt_htc_t2h_msg_handler,
+ },
+
+ /* CE6: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE7: host->target WMI (mac1) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE8: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE9: MHI */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE10: MHI */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE11: MHI */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE12: CV Prefetch */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE13: CV Prefetch */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE14: target->host dbg log */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath12k_htc_rx_completion_handler,
+ },
+
+ /* CE15: reserved for future use */
+ {
+ .flags = (CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+};
+
+/* Copy Engine (CE) configs for WCN7850 */
+/* Target firmware's Copy Engine configuration. */
+const struct ce_pipe_config ath12k_wifi7_target_ce_config_wlan_wcn7850[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .pipenum = __cpu_to_le32(0),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .pipenum = __cpu_to_le32(1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .pipenum = __cpu_to_le32(2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .pipenum = __cpu_to_le32(3),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(4),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(256),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE5: target->host Pktlog */
+ {
+ .pipenum = __cpu_to_le32(5),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE6: Reserved for target autonomous hif_memcpy */
+ {
+ .pipenum = __cpu_to_le32(6),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE7 used only by Host */
+ {
+ .pipenum = __cpu_to_le32(7),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT_H2H),
+ .nentries = __cpu_to_le32(0),
+ .nbytes_max = __cpu_to_le32(0),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE8 target->host used only by IPA */
+ {
+ .pipenum = __cpu_to_le32(8),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+ /* CE 9, 10, 11 are used by MHI driver */
+};
+
+const struct service_to_pipe
+ath12k_wifi7_target_service_to_ce_map_wlan_wcn7850[] = {
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(0),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(4),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(1),
+ },
+
+ /* (Additions here) */
+
+ { /* must be last */
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ },
+};
+
+const struct ce_attr ath12k_wifi7_host_ce_config_wcn7850[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 16,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath12k_htc_rx_completion_handler,
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 64,
+ .recv_cb = ath12k_htc_rx_completion_handler,
+ },
+
+ /* CE3: host->target WMI (mac0) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 2048,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ },
+
+ /* CE5: target->host pktlog */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE6: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE7: host->target WMI (mac1) */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE8: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+};
+
+/* Copy Engine (CE) configs for IPQ5332 */
+/* Target firmware's Copy Engine configuration. */
+const struct ce_pipe_config ath12k_wifi7_target_ce_config_wlan_ipq5332[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .pipenum = __cpu_to_le32(0),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE1: target->host HTT */
+ {
+ .pipenum = __cpu_to_le32(1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE2: target->host WMI + HTC control */
+ {
+ .pipenum = __cpu_to_le32(2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .pipenum = __cpu_to_le32(3),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(4),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(256),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE5: Target -> host PKTLOG */
+ {
+ .pipenum = __cpu_to_le32(5),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE6: Reserved for target autonomous HIF_memcpy */
+ {
+ .pipenum = __cpu_to_le32(6),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE7: Reserved for CV Prefetch */
+ {
+ .pipenum = __cpu_to_le32(7),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE8: Reserved for target generic HIF memcpy */
+ {
+ .pipenum = __cpu_to_le32(8),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE9: WMI logging/CFR/Spectral/Radar/ */
+ {
+ .pipenum = __cpu_to_le32(9),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE10: Unused TBD */
+ {
+ .pipenum = __cpu_to_le32(10),
+ .pipedir = __cpu_to_le32(PIPEDIR_NONE),
+ .nentries = __cpu_to_le32(0),
+ .nbytes_max = __cpu_to_le32(0),
+ .flags = __cpu_to_le32(0),
+ .reserved = __cpu_to_le32(0),
+ },
+ /* CE11: Unused TBD */
+ {
+ .pipenum = __cpu_to_le32(11),
+ .pipedir = __cpu_to_le32(PIPEDIR_NONE),
+ .nentries = __cpu_to_le32(0),
+ .nbytes_max = __cpu_to_le32(0),
+ .flags = __cpu_to_le32(0),
+ .reserved = __cpu_to_le32(0),
+ },
+};
+
+const struct service_to_pipe
+ath12k_wifi7_target_service_to_ce_map_wlan_ipq5332[] = {
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(0),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(1),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(0),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(1),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(4),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(1),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_PKT_LOG),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(5),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL_DIAG),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(9),
+ },
+ /* (Additions here) */
+
+ { /* must be last */
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ },
+};
+
+const struct ce_attr ath12k_wifi7_host_ce_config_ipq5332[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 16,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath12k_htc_rx_completion_handler,
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 128,
+ .recv_cb = ath12k_htc_rx_completion_handler,
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 2048,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ },
+
+ /* CE5: target -> host PKTLOG */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath12k_dp_htt_htc_t2h_msg_handler,
+ },
+
+ /* CE6: Target autonomous HIF_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE7: CV Prefetch */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE8: Target HIF memcpy (Generic HIF memcypy) */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE9: WMI logging/CFR/Spectral/Radar */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 128,
+ },
+
+ /* CE10: Unused */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE11: Unused */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+};
diff --git a/drivers/net/wireless/ath/ath12k/wifi7/ce.h b/drivers/net/wireless/ath/ath12k/wifi7/ce.h
new file mode 100644
index 000000000000..369a14472913
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wifi7/ce.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef ATH12K_WIFI7_CE_H
+#define ATH12K_WIFI7_CE_H
+
+extern const struct ce_pipe_config ath12k_wifi7_target_ce_config_wlan_qcn9274[];
+extern const struct ce_pipe_config ath12k_wifi7_target_ce_config_wlan_wcn7850[];
+extern const struct ce_pipe_config ath12k_wifi7_target_ce_config_wlan_ipq5332[];
+
+extern const struct service_to_pipe ath12k_wifi7_target_service_to_ce_map_wlan_qcn9274[];
+extern const struct service_to_pipe ath12k_wifi7_target_service_to_ce_map_wlan_wcn7850[];
+extern const struct service_to_pipe ath12k_wifi7_target_service_to_ce_map_wlan_ipq5332[];
+
+extern const struct ce_attr ath12k_wifi7_host_ce_config_qcn9274[];
+extern const struct ce_attr ath12k_wifi7_host_ce_config_wcn7850[];
+extern const struct ce_attr ath12k_wifi7_host_ce_config_ipq5332[];
+
+#endif /* ATH12K_WIFI7_CE_H */
diff --git a/drivers/net/wireless/ath/ath12k/wifi7/core.c b/drivers/net/wireless/ath/ath12k/wifi7/core.c
new file mode 100644
index 000000000000..a02c57acf137
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wifi7/core.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/module.h>
+#include "../ahb.h"
+#include "../pci.h"
+#include "pci.h"
+#include "ahb.h"
+#include "core.h"
+#include "dp.h"
+#include "../debug.h"
+
+static int ahb_err, pci_err;
+
+int ath12k_wifi7_arch_init(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp;
+
+ dp = ath12k_wifi7_dp_device_alloc(ab);
+ if (!dp) {
+ ath12k_err(ab, "dp alloc failed");
+ return -EINVAL;
+ }
+
+ ab->dp = dp;
+
+ return 0;
+}
+
+void ath12k_wifi7_arch_deinit(struct ath12k_base *ab)
+{
+ ath12k_wifi7_dp_device_free(ab->dp);
+ ab->dp = NULL;
+}
+
+static int ath12k_wifi7_init(void)
+{
+ ahb_err = ath12k_wifi7_ahb_init();
+ if (ahb_err)
+ pr_warn("Failed to initialize ath12k Wi-Fi 7 AHB device: %d\n",
+ ahb_err);
+
+ pci_err = ath12k_wifi7_pci_init();
+ if (pci_err)
+ pr_warn("Failed to initialize ath12k Wi-Fi 7 PCI device: %d\n",
+ pci_err);
+
+ /* If both failed, return one of the failures (arbitrary) */
+ return ahb_err && pci_err ? ahb_err : 0;
+}
+
+static void ath12k_wifi7_exit(void)
+{
+ if (!pci_err)
+ ath12k_wifi7_pci_exit();
+
+ if (!ahb_err)
+ ath12k_wifi7_ahb_exit();
+}
+
+module_init(ath12k_wifi7_init);
+module_exit(ath12k_wifi7_exit);
+
+MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11be WLAN devices");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath12k/wifi7/core.h b/drivers/net/wireless/ath/ath12k/wifi7/core.h
new file mode 100644
index 000000000000..7e9689d2ddd7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wifi7/core.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+#ifndef ATH12K_CORE_WIFI7_H
+#define ATH12K_CORE_WIFI7_H
+
+int ath12k_wifi7_arch_init(struct ath12k_base *ab);
+void ath12k_wifi7_arch_deinit(struct ath12k_base *ab);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/wifi7/dp.c b/drivers/net/wireless/ath/ath12k/wifi7/dp.c
new file mode 100644
index 000000000000..2b194879ee80
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wifi7/dp.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+#include "../core.h"
+#include "../debug.h"
+#include "../dp_rx.h"
+#include "../dp_tx.h"
+#include "hal_desc.h"
+#include "../dp_mon.h"
+#include "dp_mon.h"
+#include "../dp_cmn.h"
+#include "dp_rx.h"
+#include "dp.h"
+#include "dp_tx.h"
+#include "hal.h"
+
+static int ath12k_wifi7_dp_service_srng(struct ath12k_dp *dp,
+ struct ath12k_ext_irq_grp *irq_grp,
+ int budget)
+{
+ struct napi_struct *napi = &irq_grp->napi;
+ int grp_id = irq_grp->grp_id;
+ int work_done = 0;
+ int i = 0, j;
+ int tot_work_done = 0;
+ enum dp_monitor_mode monitor_mode;
+ u8 ring_mask;
+
+ if (dp->hw_params->ring_mask->tx[grp_id]) {
+ i = fls(dp->hw_params->ring_mask->tx[grp_id]) - 1;
+ ath12k_wifi7_dp_tx_completion_handler(dp, i);
+ }
+
+ if (dp->hw_params->ring_mask->rx_err[grp_id]) {
+ work_done = ath12k_wifi7_dp_rx_process_err(dp, napi, budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+ if (budget <= 0)
+ goto done;
+ }
+
+ if (dp->hw_params->ring_mask->rx_wbm_rel[grp_id]) {
+ work_done = ath12k_wifi7_dp_rx_process_wbm_err(dp, napi, budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+
+ if (budget <= 0)
+ goto done;
+ }
+
+ if (dp->hw_params->ring_mask->rx[grp_id]) {
+ i = fls(dp->hw_params->ring_mask->rx[grp_id]) - 1;
+ work_done = ath12k_wifi7_dp_rx_process(dp, i, napi, budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+ if (budget <= 0)
+ goto done;
+ }
+
+ if (dp->hw_params->ring_mask->rx_mon_status[grp_id]) {
+ ring_mask = dp->hw_params->ring_mask->rx_mon_status[grp_id];
+ for (i = 0; i < dp->ab->num_radios; i++) {
+ for (j = 0; j < dp->hw_params->num_rxdma_per_pdev; j++) {
+ int id = i * dp->hw_params->num_rxdma_per_pdev + j;
+
+ if (ring_mask & BIT(id)) {
+ work_done =
+ ath12k_wifi7_dp_mon_process_ring(dp, id, napi,
+ budget,
+ 0);
+ budget -= work_done;
+ tot_work_done += work_done;
+ if (budget <= 0)
+ goto done;
+ }
+ }
+ }
+ }
+
+ if (dp->hw_params->ring_mask->rx_mon_dest[grp_id]) {
+ monitor_mode = ATH12K_DP_RX_MONITOR_MODE;
+ ring_mask = dp->hw_params->ring_mask->rx_mon_dest[grp_id];
+ for (i = 0; i < dp->ab->num_radios; i++) {
+ for (j = 0; j < dp->hw_params->num_rxdma_per_pdev; j++) {
+ int id = i * dp->hw_params->num_rxdma_per_pdev + j;
+
+ if (ring_mask & BIT(id)) {
+ work_done =
+ ath12k_wifi7_dp_mon_process_ring(dp, id, napi,
+ budget,
+ monitor_mode);
+ budget -= work_done;
+ tot_work_done += work_done;
+
+ if (budget <= 0)
+ goto done;
+ }
+ }
+ }
+ }
+
+ if (dp->hw_params->ring_mask->tx_mon_dest[grp_id]) {
+ monitor_mode = ATH12K_DP_TX_MONITOR_MODE;
+ ring_mask = dp->hw_params->ring_mask->tx_mon_dest[grp_id];
+ for (i = 0; i < dp->ab->num_radios; i++) {
+ for (j = 0; j < dp->hw_params->num_rxdma_per_pdev; j++) {
+ int id = i * dp->hw_params->num_rxdma_per_pdev + j;
+
+ if (ring_mask & BIT(id)) {
+ work_done =
+ ath12k_wifi7_dp_mon_process_ring(dp, id,
+ napi, budget,
+ monitor_mode);
+ budget -= work_done;
+ tot_work_done += work_done;
+
+ if (budget <= 0)
+ goto done;
+ }
+ }
+ }
+ }
+
+ if (dp->hw_params->ring_mask->reo_status[grp_id])
+ ath12k_wifi7_dp_rx_process_reo_status(dp);
+
+ if (dp->hw_params->ring_mask->host2rxdma[grp_id]) {
+ struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+ LIST_HEAD(list);
+
+ ath12k_dp_rx_bufs_replenish(dp, rx_ring, &list, 0);
+ }
+
+ /* TODO: Implement handler for other interrupts */
+
+done:
+ return tot_work_done;
+}
+
+static struct ath12k_dp_arch_ops ath12k_wifi7_dp_arch_ops = {
+ .service_srng = ath12k_wifi7_dp_service_srng,
+ .tx_get_vdev_bank_config = ath12k_wifi7_dp_tx_get_vdev_bank_config,
+ .reo_cmd_send = ath12k_wifi7_dp_reo_cmd_send,
+ .setup_pn_check_reo_cmd = ath12k_wifi7_dp_setup_pn_check_reo_cmd,
+ .rx_peer_tid_delete = ath12k_wifi7_dp_rx_peer_tid_delete,
+ .reo_cache_flush = ath12k_wifi7_dp_reo_cache_flush,
+ .rx_link_desc_return = ath12k_wifi7_dp_rx_link_desc_return,
+ .rx_frags_cleanup = ath12k_wifi7_dp_rx_frags_cleanup,
+ .peer_rx_tid_reo_update = ath12k_wifi7_peer_rx_tid_reo_update,
+ .rx_assign_reoq = ath12k_wifi7_dp_rx_assign_reoq,
+ .peer_rx_tid_qref_setup = ath12k_wifi7_peer_rx_tid_qref_setup,
+ .peer_rx_tid_qref_reset = ath12k_wifi7_peer_rx_tid_qref_reset,
+ .rx_tid_delete_handler = ath12k_wifi7_dp_rx_tid_delete_handler,
+};
+
+/* TODO: remove export once this file is built with wifi7 ko */
+struct ath12k_dp *ath12k_wifi7_dp_device_alloc(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp;
+
+ /* TODO: align dp later if cache alignment becomes a bottleneck */
+ dp = kzalloc(sizeof(*dp), GFP_KERNEL);
+ if (!dp)
+ return NULL;
+
+ dp->ab = ab;
+ dp->dev = ab->dev;
+ dp->hw_params = ab->hw_params;
+ dp->hal = &ab->hal;
+
+ dp->ops = &ath12k_wifi7_dp_arch_ops;
+
+ return dp;
+}
+
+void ath12k_wifi7_dp_device_free(struct ath12k_dp *dp)
+{
+ kfree(dp);
+}
diff --git a/drivers/net/wireless/ath/ath12k/wifi7/dp.h b/drivers/net/wireless/ath/ath12k/wifi7/dp.h
new file mode 100644
index 000000000000..a5f0941d34e2
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wifi7/dp.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef ATH12K_DP_WIFI7_H
+#define ATH12K_DP_WIFI7_H
+
+#include "../dp_cmn.h"
+#include "hw.h"
+
+struct ath12k_base;
+struct ath12k_dp;
+enum dp_monitor_mode;
+
+struct ath12k_dp *ath12k_wifi7_dp_device_alloc(struct ath12k_base *ab);
+void ath12k_wifi7_dp_device_free(struct ath12k_dp *dp);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/wifi7/dp_mon.c b/drivers/net/wireless/ath/ath12k/wifi7/dp_mon.c
new file mode 100644
index 000000000000..bd741532b7dc
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wifi7/dp_mon.c
@@ -0,0 +1,3385 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include "hal_desc.h"
+#include "../dp_mon.h"
+#include "dp_mon.h"
+#include "../debug.h"
+#include "hal_qcn9274.h"
+#include "dp_rx.h"
+#include "../dp_tx.h"
+#include "../peer.h"
+
+static void
+ath12k_wifi7_dp_mon_hal_aggr_tlv(struct hal_rx_mon_ppdu_info *ppdu_info,
+ u16 tlv_len, const void *tlv_data)
+{
+ if (tlv_len <= HAL_RX_MON_MAX_AGGR_SIZE - ppdu_info->tlv_aggr.cur_len) {
+ memcpy(ppdu_info->tlv_aggr.buf + ppdu_info->tlv_aggr.cur_len,
+ tlv_data, tlv_len);
+ ppdu_info->tlv_aggr.cur_len += tlv_len;
+ }
+}
+
+static void
+ath12k_wifi7_dp_mon_rx_memset_ppdu_info(struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ memset(ppdu_info, 0, sizeof(*ppdu_info));
+ ppdu_info->peer_id = HAL_INVALID_PEERID;
+}
+
+/* Hardware fill buffer with 128 bytes aligned. So need to reap it
+ * with 128 bytes aligned.
+ */
+#define RXDMA_DATA_DMA_BLOCK_SIZE 128
+
+static void
+ath12k_wifi7_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info,
+ bool *is_frag, u32 *total_len,
+ u32 *frag_len, u32 *msdu_cnt)
+{
+ if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) {
+ *is_frag = true;
+ *frag_len = (RX_MON_STATUS_BASE_BUF_SIZE -
+ sizeof(struct hal_rx_desc)) &
+ ~(RXDMA_DATA_DMA_BLOCK_SIZE - 1);
+ *total_len += *frag_len;
+ } else {
+ if (*is_frag)
+ *frag_len = info->msdu_len - *total_len;
+ else
+ *frag_len = info->msdu_len;
+
+ *msdu_cnt -= 1;
+ }
+}
+
+static void
+ath12k_wifi7_dp_mon_rx_handle_ofdma_info(const struct hal_rx_ppdu_end_user_stats *ppdu_end_user,
+ struct hal_rx_user_status *rx_user_status)
+{
+ rx_user_status->ul_ofdma_user_v0_word0 =
+ __le32_to_cpu(ppdu_end_user->usr_resp_ref);
+ rx_user_status->ul_ofdma_user_v0_word1 =
+ __le32_to_cpu(ppdu_end_user->usr_resp_ref_ext);
+}
+
+static void
+ath12k_wifi7_dp_mon_rx_populate_byte_count(const struct hal_rx_ppdu_end_user_stats *stats,
+ void *ppduinfo,
+ struct hal_rx_user_status *rx_user_status)
+{
+ rx_user_status->mpdu_ok_byte_count =
+ le32_get_bits(stats->info7,
+ HAL_RX_PPDU_END_USER_STATS_INFO7_MPDU_OK_BYTE_COUNT);
+ rx_user_status->mpdu_err_byte_count =
+ le32_get_bits(stats->info8,
+ HAL_RX_PPDU_END_USER_STATS_INFO8_MPDU_ERR_BYTE_COUNT);
+}
+
+static void
+ath12k_wifi7_dp_mon_rx_populate_mu_user_info(const struct hal_rx_ppdu_end_user_stats *rx_tlv,
+ struct hal_rx_mon_ppdu_info *ppdu_info,
+ struct hal_rx_user_status *rx_user_status)
+{
+ rx_user_status->ast_index = ppdu_info->ast_index;
+ rx_user_status->tid = ppdu_info->tid;
+ rx_user_status->tcp_ack_msdu_count =
+ ppdu_info->tcp_ack_msdu_count;
+ rx_user_status->tcp_msdu_count =
+ ppdu_info->tcp_msdu_count;
+ rx_user_status->udp_msdu_count =
+ ppdu_info->udp_msdu_count;
+ rx_user_status->other_msdu_count =
+ ppdu_info->other_msdu_count;
+ rx_user_status->frame_control = ppdu_info->frame_control;
+ rx_user_status->frame_control_info_valid =
+ ppdu_info->frame_control_info_valid;
+ rx_user_status->data_sequence_control_info_valid =
+ ppdu_info->data_sequence_control_info_valid;
+ rx_user_status->first_data_seq_ctrl =
+ ppdu_info->first_data_seq_ctrl;
+ rx_user_status->preamble_type = ppdu_info->preamble_type;
+ rx_user_status->ht_flags = ppdu_info->ht_flags;
+ rx_user_status->vht_flags = ppdu_info->vht_flags;
+ rx_user_status->he_flags = ppdu_info->he_flags;
+ rx_user_status->rs_flags = ppdu_info->rs_flags;
+
+ rx_user_status->mpdu_cnt_fcs_ok =
+ ppdu_info->num_mpdu_fcs_ok;
+ rx_user_status->mpdu_cnt_fcs_err =
+ ppdu_info->num_mpdu_fcs_err;
+ memcpy(&rx_user_status->mpdu_fcs_ok_bitmap[0], &ppdu_info->mpdu_fcs_ok_bitmap[0],
+ HAL_RX_NUM_WORDS_PER_PPDU_BITMAP *
+ sizeof(ppdu_info->mpdu_fcs_ok_bitmap[0]));
+
+ ath12k_wifi7_dp_mon_rx_populate_byte_count(rx_tlv, ppdu_info, rx_user_status);
+}
+
+static inline enum ath12k_eht_ru_size
+hal_rx_mon_hal_ru_size_to_ath12k_ru_size(u32 hal_ru_size)
+{
+ switch (hal_ru_size) {
+ case HAL_EHT_RU_26:
+ return ATH12K_EHT_RU_26;
+ case HAL_EHT_RU_52:
+ return ATH12K_EHT_RU_52;
+ case HAL_EHT_RU_78:
+ return ATH12K_EHT_RU_52_26;
+ case HAL_EHT_RU_106:
+ return ATH12K_EHT_RU_106;
+ case HAL_EHT_RU_132:
+ return ATH12K_EHT_RU_106_26;
+ case HAL_EHT_RU_242:
+ return ATH12K_EHT_RU_242;
+ case HAL_EHT_RU_484:
+ return ATH12K_EHT_RU_484;
+ case HAL_EHT_RU_726:
+ return ATH12K_EHT_RU_484_242;
+ case HAL_EHT_RU_996:
+ return ATH12K_EHT_RU_996;
+ case HAL_EHT_RU_996x2:
+ return ATH12K_EHT_RU_996x2;
+ case HAL_EHT_RU_996x3:
+ return ATH12K_EHT_RU_996x3;
+ case HAL_EHT_RU_996x4:
+ return ATH12K_EHT_RU_996x4;
+ case HAL_EHT_RU_NONE:
+ return ATH12K_EHT_RU_INVALID;
+ case HAL_EHT_RU_996_484:
+ return ATH12K_EHT_RU_996_484;
+ case HAL_EHT_RU_996x2_484:
+ return ATH12K_EHT_RU_996x2_484;
+ case HAL_EHT_RU_996x3_484:
+ return ATH12K_EHT_RU_996x3_484;
+ case HAL_EHT_RU_996_484_242:
+ return ATH12K_EHT_RU_996_484_242;
+ default:
+ return ATH12K_EHT_RU_INVALID;
+ }
+}
+
+static inline u32
+hal_rx_ul_ofdma_ru_size_to_width(enum ath12k_eht_ru_size ru_size)
+{
+ switch (ru_size) {
+ case ATH12K_EHT_RU_26:
+ return RU_26;
+ case ATH12K_EHT_RU_52:
+ return RU_52;
+ case ATH12K_EHT_RU_52_26:
+ return RU_52_26;
+ case ATH12K_EHT_RU_106:
+ return RU_106;
+ case ATH12K_EHT_RU_106_26:
+ return RU_106_26;
+ case ATH12K_EHT_RU_242:
+ return RU_242;
+ case ATH12K_EHT_RU_484:
+ return RU_484;
+ case ATH12K_EHT_RU_484_242:
+ return RU_484_242;
+ case ATH12K_EHT_RU_996:
+ return RU_996;
+ case ATH12K_EHT_RU_996_484:
+ return RU_996_484;
+ case ATH12K_EHT_RU_996_484_242:
+ return RU_996_484_242;
+ case ATH12K_EHT_RU_996x2:
+ return RU_2X996;
+ case ATH12K_EHT_RU_996x2_484:
+ return RU_2X996_484;
+ case ATH12K_EHT_RU_996x3:
+ return RU_3X996;
+ case ATH12K_EHT_RU_996x3_484:
+ return RU_3X996_484;
+ case ATH12K_EHT_RU_996x4:
+ return RU_4X996;
+ default:
+ return RU_INVALID;
+ }
+}
+
+static void
+ath12k_wifi7_dp_mon_hal_rx_parse_user_info(const struct hal_receive_user_info *rx_usr_info,
+ u16 user_id,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_user_status *mon_rx_user_status = NULL;
+ struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht;
+ enum ath12k_eht_ru_size rtap_ru_size = ATH12K_EHT_RU_INVALID;
+ u32 ru_width, reception_type, ru_index = HAL_EHT_RU_INVALID;
+ u32 ru_type_80_0, ru_start_index_80_0;
+ u32 ru_type_80_1, ru_start_index_80_1;
+ u32 ru_type_80_2, ru_start_index_80_2;
+ u32 ru_type_80_3, ru_start_index_80_3;
+ u32 ru_size = 0, num_80mhz_with_ru = 0;
+ u64 ru_index_320mhz = 0;
+ u32 ru_index_per80mhz;
+
+ reception_type = le32_get_bits(rx_usr_info->info0,
+ HAL_RX_USR_INFO0_RECEPTION_TYPE);
+
+ switch (reception_type) {
+ case HAL_RECEPTION_TYPE_SU:
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+ break;
+ case HAL_RECEPTION_TYPE_DL_MU_MIMO:
+ case HAL_RECEPTION_TYPE_UL_MU_MIMO:
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
+ break;
+ case HAL_RECEPTION_TYPE_DL_MU_OFMA:
+ case HAL_RECEPTION_TYPE_UL_MU_OFDMA:
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA;
+ break;
+ case HAL_RECEPTION_TYPE_DL_MU_OFDMA_MIMO:
+ case HAL_RECEPTION_TYPE_UL_MU_OFDMA_MIMO:
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO;
+ }
+
+ ppdu_info->is_stbc = le32_get_bits(rx_usr_info->info0, HAL_RX_USR_INFO0_STBC);
+ ppdu_info->ldpc = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_LDPC);
+ ppdu_info->dcm = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_STA_DCM);
+ ppdu_info->bw = le32_get_bits(rx_usr_info->info1, HAL_RX_USR_INFO1_RX_BW);
+ ppdu_info->mcs = le32_get_bits(rx_usr_info->info1, HAL_RX_USR_INFO1_MCS);
+ ppdu_info->nss = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_NSS) + 1;
+
+ if (user_id < HAL_MAX_UL_MU_USERS) {
+ mon_rx_user_status = &ppdu_info->userstats[user_id];
+ mon_rx_user_status->mcs = ppdu_info->mcs;
+ mon_rx_user_status->nss = ppdu_info->nss;
+ }
+
+ if (!(ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_MIMO ||
+ ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA ||
+ ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO))
+ return;
+
+ /* RU allocation present only for OFDMA reception */
+ ru_type_80_0 = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_RU_TYPE_80_0);
+ ru_start_index_80_0 = le32_get_bits(rx_usr_info->info3,
+ HAL_RX_USR_INFO3_RU_START_IDX_80_0);
+ if (ru_type_80_0 != HAL_EHT_RU_NONE) {
+ ru_size += ru_type_80_0;
+ ru_index_per80mhz = ru_start_index_80_0;
+ ru_index = ru_index_per80mhz;
+ ru_index_320mhz |= HAL_RU_PER80(ru_type_80_0, 0, ru_index_per80mhz);
+ num_80mhz_with_ru++;
+ }
+
+ ru_type_80_1 = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_RU_TYPE_80_1);
+ ru_start_index_80_1 = le32_get_bits(rx_usr_info->info3,
+ HAL_RX_USR_INFO3_RU_START_IDX_80_1);
+ if (ru_type_80_1 != HAL_EHT_RU_NONE) {
+ ru_size += ru_type_80_1;
+ ru_index_per80mhz = ru_start_index_80_1;
+ ru_index = ru_index_per80mhz;
+ ru_index_320mhz |= HAL_RU_PER80(ru_type_80_1, 1, ru_index_per80mhz);
+ num_80mhz_with_ru++;
+ }
+
+ ru_type_80_2 = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_RU_TYPE_80_2);
+ ru_start_index_80_2 = le32_get_bits(rx_usr_info->info3,
+ HAL_RX_USR_INFO3_RU_START_IDX_80_2);
+ if (ru_type_80_2 != HAL_EHT_RU_NONE) {
+ ru_size += ru_type_80_2;
+ ru_index_per80mhz = ru_start_index_80_2;
+ ru_index = ru_index_per80mhz;
+ ru_index_320mhz |= HAL_RU_PER80(ru_type_80_2, 2, ru_index_per80mhz);
+ num_80mhz_with_ru++;
+ }
+
+ ru_type_80_3 = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_RU_TYPE_80_3);
+ ru_start_index_80_3 = le32_get_bits(rx_usr_info->info2,
+ HAL_RX_USR_INFO3_RU_START_IDX_80_3);
+ if (ru_type_80_3 != HAL_EHT_RU_NONE) {
+ ru_size += ru_type_80_3;
+ ru_index_per80mhz = ru_start_index_80_3;
+ ru_index = ru_index_per80mhz;
+ ru_index_320mhz |= HAL_RU_PER80(ru_type_80_3, 3, ru_index_per80mhz);
+ num_80mhz_with_ru++;
+ }
+
+ if (num_80mhz_with_ru > 1) {
+ /* Calculate the MRU index */
+ switch (ru_index_320mhz) {
+ case HAL_EHT_RU_996_484_0:
+ case HAL_EHT_RU_996x2_484_0:
+ case HAL_EHT_RU_996x3_484_0:
+ ru_index = 0;
+ break;
+ case HAL_EHT_RU_996_484_1:
+ case HAL_EHT_RU_996x2_484_1:
+ case HAL_EHT_RU_996x3_484_1:
+ ru_index = 1;
+ break;
+ case HAL_EHT_RU_996_484_2:
+ case HAL_EHT_RU_996x2_484_2:
+ case HAL_EHT_RU_996x3_484_2:
+ ru_index = 2;
+ break;
+ case HAL_EHT_RU_996_484_3:
+ case HAL_EHT_RU_996x2_484_3:
+ case HAL_EHT_RU_996x3_484_3:
+ ru_index = 3;
+ break;
+ case HAL_EHT_RU_996_484_4:
+ case HAL_EHT_RU_996x2_484_4:
+ case HAL_EHT_RU_996x3_484_4:
+ ru_index = 4;
+ break;
+ case HAL_EHT_RU_996_484_5:
+ case HAL_EHT_RU_996x2_484_5:
+ case HAL_EHT_RU_996x3_484_5:
+ ru_index = 5;
+ break;
+ case HAL_EHT_RU_996_484_6:
+ case HAL_EHT_RU_996x2_484_6:
+ case HAL_EHT_RU_996x3_484_6:
+ ru_index = 6;
+ break;
+ case HAL_EHT_RU_996_484_7:
+ case HAL_EHT_RU_996x2_484_7:
+ case HAL_EHT_RU_996x3_484_7:
+ ru_index = 7;
+ break;
+ case HAL_EHT_RU_996x2_484_8:
+ ru_index = 8;
+ break;
+ case HAL_EHT_RU_996x2_484_9:
+ ru_index = 9;
+ break;
+ case HAL_EHT_RU_996x2_484_10:
+ ru_index = 10;
+ break;
+ case HAL_EHT_RU_996x2_484_11:
+ ru_index = 11;
+ break;
+ default:
+ ru_index = HAL_EHT_RU_INVALID;
+ break;
+ }
+
+ ru_size += 4;
+ }
+
+ rtap_ru_size = hal_rx_mon_hal_ru_size_to_ath12k_ru_size(ru_size);
+ if (rtap_ru_size != ATH12K_EHT_RU_INVALID) {
+ u32 known, data;
+
+ known = __le32_to_cpu(eht->known);
+ known |= IEEE80211_RADIOTAP_EHT_KNOWN_RU_MRU_SIZE_OM;
+ eht->known = cpu_to_le32(known);
+
+ data = __le32_to_cpu(eht->data[1]);
+ data |= u32_encode_bits(rtap_ru_size,
+ IEEE80211_RADIOTAP_EHT_DATA1_RU_SIZE);
+ eht->data[1] = cpu_to_le32(data);
+ }
+
+ if (ru_index != HAL_EHT_RU_INVALID) {
+ u32 known, data;
+
+ known = __le32_to_cpu(eht->known);
+ known |= IEEE80211_RADIOTAP_EHT_KNOWN_RU_MRU_INDEX_OM;
+ eht->known = cpu_to_le32(known);
+
+ data = __le32_to_cpu(eht->data[1]);
+ data |= u32_encode_bits(rtap_ru_size,
+ IEEE80211_RADIOTAP_EHT_DATA1_RU_INDEX);
+ eht->data[1] = cpu_to_le32(data);
+ }
+
+ if (mon_rx_user_status && ru_index != HAL_EHT_RU_INVALID &&
+ rtap_ru_size != ATH12K_EHT_RU_INVALID) {
+ mon_rx_user_status->ul_ofdma_ru_start_index = ru_index;
+ mon_rx_user_status->ul_ofdma_ru_size = rtap_ru_size;
+
+ ru_width = hal_rx_ul_ofdma_ru_size_to_width(rtap_ru_size);
+
+ mon_rx_user_status->ul_ofdma_ru_width = ru_width;
+ mon_rx_user_status->ofdma_info_valid = 1;
+ }
+}
+
+static void
+ath12k_wifi7_dp_mon_parse_l_sig_b(const struct hal_rx_lsig_b_info *lsigb,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ u32 info0 = __le32_to_cpu(lsigb->info0);
+ u8 rate;
+
+ rate = u32_get_bits(info0, HAL_RX_LSIG_B_INFO_INFO0_RATE);
+ switch (rate) {
+ case 1:
+ rate = HAL_RX_LEGACY_RATE_1_MBPS;
+ break;
+ case 2:
+ case 5:
+ rate = HAL_RX_LEGACY_RATE_2_MBPS;
+ break;
+ case 3:
+ case 6:
+ rate = HAL_RX_LEGACY_RATE_5_5_MBPS;
+ break;
+ case 4:
+ case 7:
+ rate = HAL_RX_LEGACY_RATE_11_MBPS;
+ break;
+ default:
+ rate = HAL_RX_LEGACY_RATE_INVALID;
+ }
+
+ ppdu_info->rate = rate;
+ ppdu_info->cck_flag = 1;
+}
+
+static void
+ath12k_wifi7_dp_mon_parse_l_sig_a(const struct hal_rx_lsig_a_info *lsiga,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ u32 info0 = __le32_to_cpu(lsiga->info0);
+ u8 rate;
+
+ rate = u32_get_bits(info0, HAL_RX_LSIG_A_INFO_INFO0_RATE);
+ switch (rate) {
+ case 8:
+ rate = HAL_RX_LEGACY_RATE_48_MBPS;
+ break;
+ case 9:
+ rate = HAL_RX_LEGACY_RATE_24_MBPS;
+ break;
+ case 10:
+ rate = HAL_RX_LEGACY_RATE_12_MBPS;
+ break;
+ case 11:
+ rate = HAL_RX_LEGACY_RATE_6_MBPS;
+ break;
+ case 12:
+ rate = HAL_RX_LEGACY_RATE_54_MBPS;
+ break;
+ case 13:
+ rate = HAL_RX_LEGACY_RATE_36_MBPS;
+ break;
+ case 14:
+ rate = HAL_RX_LEGACY_RATE_18_MBPS;
+ break;
+ case 15:
+ rate = HAL_RX_LEGACY_RATE_9_MBPS;
+ break;
+ default:
+ rate = HAL_RX_LEGACY_RATE_INVALID;
+ }
+
+ ppdu_info->rate = rate;
+}
+
+static void
+ath12k_wifi7_dp_mon_hal_rx_parse_u_sig_cmn(const struct hal_mon_usig_cmn *cmn,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ u32 common;
+
+ ppdu_info->u_sig_info.bw = le32_get_bits(cmn->info0,
+ HAL_RX_USIG_CMN_INFO0_BW);
+ ppdu_info->u_sig_info.ul_dl = le32_get_bits(cmn->info0,
+ HAL_RX_USIG_CMN_INFO0_UL_DL);
+
+ common = __le32_to_cpu(ppdu_info->u_sig_info.usig.common);
+ common |= IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP_KNOWN |
+ ATH12K_LE32_DEC_ENC(cmn->info0,
+ HAL_RX_USIG_CMN_INFO0_PHY_VERSION,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER) |
+ u32_encode_bits(ppdu_info->u_sig_info.bw,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW) |
+ u32_encode_bits(ppdu_info->u_sig_info.ul_dl,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL) |
+ ATH12K_LE32_DEC_ENC(cmn->info0,
+ HAL_RX_USIG_CMN_INFO0_BSS_COLOR,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR) |
+ ATH12K_LE32_DEC_ENC(cmn->info0,
+ HAL_RX_USIG_CMN_INFO0_TXOP,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP);
+ ppdu_info->u_sig_info.usig.common = cpu_to_le32(common);
+
+ switch (ppdu_info->u_sig_info.bw) {
+ default:
+ fallthrough;
+ case HAL_EHT_BW_20:
+ ppdu_info->bw = HAL_RX_BW_20MHZ;
+ break;
+ case HAL_EHT_BW_40:
+ ppdu_info->bw = HAL_RX_BW_40MHZ;
+ break;
+ case HAL_EHT_BW_80:
+ ppdu_info->bw = HAL_RX_BW_80MHZ;
+ break;
+ case HAL_EHT_BW_160:
+ ppdu_info->bw = HAL_RX_BW_160MHZ;
+ break;
+ case HAL_EHT_BW_320_1:
+ case HAL_EHT_BW_320_2:
+ ppdu_info->bw = HAL_RX_BW_320MHZ;
+ break;
+ }
+}
+
+static void
+ath12k_wifi7_dp_mon_hal_rx_parse_u_sig_tb(const struct hal_mon_usig_tb *usig_tb,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct ieee80211_radiotap_eht_usig *usig = &ppdu_info->u_sig_info.usig;
+ enum ieee80211_radiotap_eht_usig_tb spatial_reuse1, spatial_reuse2;
+ u32 common, value, mask;
+
+ spatial_reuse1 = IEEE80211_RADIOTAP_EHT_USIG2_TB_B3_B6_SPATIAL_REUSE_1;
+ spatial_reuse2 = IEEE80211_RADIOTAP_EHT_USIG2_TB_B7_B10_SPATIAL_REUSE_2;
+
+ common = __le32_to_cpu(usig->common);
+ value = __le32_to_cpu(usig->value);
+ mask = __le32_to_cpu(usig->mask);
+
+ ppdu_info->u_sig_info.ppdu_type_comp_mode =
+ le32_get_bits(usig_tb->info0,
+ HAL_RX_USIG_TB_INFO0_PPDU_TYPE_COMP_MODE);
+
+ common |= ATH12K_LE32_DEC_ENC(usig_tb->info0,
+ HAL_RX_USIG_TB_INFO0_RX_INTEG_CHECK_PASS,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BAD_USIG_CRC);
+
+ value |= IEEE80211_RADIOTAP_EHT_USIG1_TB_B20_B25_DISREGARD |
+ u32_encode_bits(ppdu_info->u_sig_info.ppdu_type_comp_mode,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE) |
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B2_VALIDATE |
+ ATH12K_LE32_DEC_ENC(usig_tb->info0,
+ HAL_RX_USIG_TB_INFO0_SPATIAL_REUSE_1,
+ spatial_reuse1) |
+ ATH12K_LE32_DEC_ENC(usig_tb->info0,
+ HAL_RX_USIG_TB_INFO0_SPATIAL_REUSE_2,
+ spatial_reuse2) |
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B11_B15_DISREGARD |
+ ATH12K_LE32_DEC_ENC(usig_tb->info0,
+ HAL_RX_USIG_TB_INFO0_CRC,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B16_B19_CRC) |
+ ATH12K_LE32_DEC_ENC(usig_tb->info0,
+ HAL_RX_USIG_TB_INFO0_TAIL,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B20_B25_TAIL);
+
+ mask |= IEEE80211_RADIOTAP_EHT_USIG1_TB_B20_B25_DISREGARD |
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE |
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B2_VALIDATE |
+ spatial_reuse1 | spatial_reuse2 |
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B11_B15_DISREGARD |
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B16_B19_CRC |
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B20_B25_TAIL;
+
+ usig->common = cpu_to_le32(common);
+ usig->value = cpu_to_le32(value);
+ usig->mask = cpu_to_le32(mask);
+}
+
+static void
+ath12k_wifi7_dp_mon_hal_rx_parse_u_sig_mu(const struct hal_mon_usig_mu *usig_mu,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct ieee80211_radiotap_eht_usig *usig = &ppdu_info->u_sig_info.usig;
+ enum ieee80211_radiotap_eht_usig_mu sig_symb, punc;
+ u32 common, value, mask;
+
+ sig_symb = IEEE80211_RADIOTAP_EHT_USIG2_MU_B11_B15_EHT_SIG_SYMBOLS;
+ punc = IEEE80211_RADIOTAP_EHT_USIG2_MU_B3_B7_PUNCTURED_INFO;
+
+ common = __le32_to_cpu(usig->common);
+ value = __le32_to_cpu(usig->value);
+ mask = __le32_to_cpu(usig->mask);
+
+ ppdu_info->u_sig_info.ppdu_type_comp_mode =
+ le32_get_bits(usig_mu->info0,
+ HAL_RX_USIG_MU_INFO0_PPDU_TYPE_COMP_MODE);
+ ppdu_info->u_sig_info.eht_sig_mcs =
+ le32_get_bits(usig_mu->info0,
+ HAL_RX_USIG_MU_INFO0_EHT_SIG_MCS);
+ ppdu_info->u_sig_info.num_eht_sig_sym =
+ le32_get_bits(usig_mu->info0,
+ HAL_RX_USIG_MU_INFO0_NUM_EHT_SIG_SYM);
+
+ common |= ATH12K_LE32_DEC_ENC(usig_mu->info0,
+ HAL_RX_USIG_MU_INFO0_RX_INTEG_CHECK_PASS,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BAD_USIG_CRC);
+
+ value |= IEEE80211_RADIOTAP_EHT_USIG1_MU_B20_B24_DISREGARD |
+ IEEE80211_RADIOTAP_EHT_USIG1_MU_B25_VALIDATE |
+ u32_encode_bits(ppdu_info->u_sig_info.ppdu_type_comp_mode,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE) |
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B2_VALIDATE |
+ ATH12K_LE32_DEC_ENC(usig_mu->info0,
+ HAL_RX_USIG_MU_INFO0_PUNC_CH_INFO,
+ punc) |
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B8_VALIDATE |
+ u32_encode_bits(ppdu_info->u_sig_info.eht_sig_mcs,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS) |
+ u32_encode_bits(ppdu_info->u_sig_info.num_eht_sig_sym,
+ sig_symb) |
+ ATH12K_LE32_DEC_ENC(usig_mu->info0,
+ HAL_RX_USIG_MU_INFO0_CRC,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B16_B19_CRC) |
+ ATH12K_LE32_DEC_ENC(usig_mu->info0,
+ HAL_RX_USIG_MU_INFO0_TAIL,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B20_B25_TAIL);
+
+ mask |= IEEE80211_RADIOTAP_EHT_USIG1_MU_B20_B24_DISREGARD |
+ IEEE80211_RADIOTAP_EHT_USIG1_MU_B25_VALIDATE |
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE |
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B2_VALIDATE |
+ punc |
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B8_VALIDATE |
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS |
+ sig_symb |
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B16_B19_CRC |
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B20_B25_TAIL;
+
+ usig->common = cpu_to_le32(common);
+ usig->value = cpu_to_le32(value);
+ usig->mask = cpu_to_le32(mask);
+}
+
+static void
+ath12k_wifi7_dp_mon_hal_rx_parse_u_sig_hdr(const struct hal_mon_usig_hdr *usig,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ u8 comp_mode;
+
+ ppdu_info->eht_usig = true;
+
+ ath12k_wifi7_dp_mon_hal_rx_parse_u_sig_cmn(&usig->cmn, ppdu_info);
+
+ comp_mode = le32_get_bits(usig->non_cmn.mu.info0,
+ HAL_RX_USIG_MU_INFO0_PPDU_TYPE_COMP_MODE);
+
+ if (comp_mode == 0 && ppdu_info->u_sig_info.ul_dl)
+ ath12k_wifi7_dp_mon_hal_rx_parse_u_sig_tb(&usig->non_cmn.tb, ppdu_info);
+ else
+ ath12k_wifi7_dp_mon_hal_rx_parse_u_sig_mu(&usig->non_cmn.mu, ppdu_info);
+}
+
+static void
+ath12k_wifi7_dp_mon_parse_vht_sig_a(const struct hal_rx_vht_sig_a_info *vht_sig,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ u32 nsts, info0, info1;
+ u8 gi_setting;
+
+ info0 = __le32_to_cpu(vht_sig->info0);
+ info1 = __le32_to_cpu(vht_sig->info1);
+
+ ppdu_info->ldpc = u32_get_bits(info1, HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING);
+ ppdu_info->mcs = u32_get_bits(info1, HAL_RX_VHT_SIG_A_INFO_INFO1_MCS);
+ gi_setting = u32_get_bits(info1, HAL_RX_VHT_SIG_A_INFO_INFO1_GI_SETTING);
+ switch (gi_setting) {
+ case HAL_RX_VHT_SIG_A_NORMAL_GI:
+ ppdu_info->gi = HAL_RX_GI_0_8_US;
+ break;
+ case HAL_RX_VHT_SIG_A_SHORT_GI:
+ case HAL_RX_VHT_SIG_A_SHORT_GI_AMBIGUITY:
+ ppdu_info->gi = HAL_RX_GI_0_4_US;
+ break;
+ }
+
+ ppdu_info->is_stbc = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_STBC);
+ nsts = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_NSTS);
+ if (ppdu_info->is_stbc && nsts > 0)
+ nsts = ((nsts + 1) >> 1) - 1;
+
+ ppdu_info->nss = u32_get_bits(nsts, VHT_SIG_SU_NSS_MASK) + 1;
+ ppdu_info->bw = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_BW);
+ ppdu_info->beamformed = u32_get_bits(info1,
+ HAL_RX_VHT_SIG_A_INFO_INFO1_BEAMFORMED);
+ ppdu_info->vht_flag_values5 = u32_get_bits(info0,
+ HAL_RX_VHT_SIG_A_INFO_INFO0_GROUP_ID);
+ ppdu_info->vht_flag_values3[0] = (((ppdu_info->mcs) << 4) |
+ ppdu_info->nss);
+ ppdu_info->vht_flag_values2 = ppdu_info->bw;
+ ppdu_info->vht_flag_values4 =
+ u32_get_bits(info1, HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING);
+}
+
+static void
+ath12k_wifi7_dp_mon_parse_ht_sig(const struct hal_rx_ht_sig_info *ht_sig,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ u32 info0 = __le32_to_cpu(ht_sig->info0);
+ u32 info1 = __le32_to_cpu(ht_sig->info1);
+
+ ppdu_info->mcs = u32_get_bits(info0, HAL_RX_HT_SIG_INFO_INFO0_MCS);
+ ppdu_info->bw = u32_get_bits(info0, HAL_RX_HT_SIG_INFO_INFO0_BW);
+ ppdu_info->is_stbc = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_STBC);
+ ppdu_info->ldpc = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_FEC_CODING);
+ ppdu_info->gi = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_GI);
+ ppdu_info->nss = (ppdu_info->mcs >> 3) + 1;
+}
+
+static void
+ath12k_wifi7_dp_mon_parse_he_sig_b2_ofdma(const struct hal_rx_he_sig_b2_ofdma_info *ofdma,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ u32 info0, value;
+
+ info0 = __le32_to_cpu(ofdma->info0);
+
+ ppdu_info->he_data1 |= HE_MCS_KNOWN | HE_DCM_KNOWN | HE_CODING_KNOWN;
+
+ /* HE-data2 */
+ ppdu_info->he_data2 |= HE_TXBF_KNOWN;
+
+ ppdu_info->mcs = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS);
+ value = ppdu_info->mcs << HE_TRANSMIT_MCS_SHIFT;
+ ppdu_info->he_data3 |= value;
+
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_DCM);
+ value = value << HE_DCM_SHIFT;
+ ppdu_info->he_data3 |= value;
+
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING);
+ ppdu_info->ldpc = value;
+ value = value << HE_CODING_SHIFT;
+ ppdu_info->he_data3 |= value;
+
+ /* HE-data4 */
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_ID);
+ value = value << HE_STA_ID_SHIFT;
+ ppdu_info->he_data4 |= value;
+
+ ppdu_info->nss =
+ u32_get_bits(info0,
+ HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS) + 1;
+ ppdu_info->beamformed = u32_get_bits(info0,
+ HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF);
+}
+
+static void
+ath12k_wifi7_dp_mon_parse_he_sig_b2_mu(const struct hal_rx_he_sig_b2_mu_info *he_sig_b2_mu,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ u32 info0, value;
+
+ info0 = __le32_to_cpu(he_sig_b2_mu->info0);
+
+ ppdu_info->he_data1 |= HE_MCS_KNOWN | HE_CODING_KNOWN;
+
+ ppdu_info->mcs = u32_get_bits(info0, HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS);
+ value = ppdu_info->mcs << HE_TRANSMIT_MCS_SHIFT;
+ ppdu_info->he_data3 |= value;
+
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING);
+ ppdu_info->ldpc = value;
+ value = value << HE_CODING_SHIFT;
+ ppdu_info->he_data3 |= value;
+
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_ID);
+ value = value << HE_STA_ID_SHIFT;
+ ppdu_info->he_data4 |= value;
+
+ ppdu_info->nss =
+ u32_get_bits(info0,
+ HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS) + 1;
+}
+
+static void
+ath12k_wifi7_dp_mon_parse_he_sig_b1_mu(const struct hal_rx_he_sig_b1_mu_info *he_sig_b1_mu,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ u32 info0 = __le32_to_cpu(he_sig_b1_mu->info0);
+ u16 ru_tones;
+
+ ru_tones = u32_get_bits(info0,
+ HAL_RX_HE_SIG_B1_MU_INFO_INFO0_RU_ALLOCATION);
+ ppdu_info->ru_alloc = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones);
+ ppdu_info->he_RU[0] = ru_tones;
+}
+
+static void
+ath12k_wifi7_dp_mon_parse_he_sig_mu(const struct hal_rx_he_sig_a_mu_dl_info *he_sig_a_mu_dl,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ u32 info0, info1, value;
+ u16 he_gi = 0, he_ltf = 0;
+
+ info0 = __le32_to_cpu(he_sig_a_mu_dl->info0);
+ info1 = __le32_to_cpu(he_sig_a_mu_dl->info1);
+
+ ppdu_info->he_mu_flags = 1;
+
+ ppdu_info->he_data1 = HE_MU_FORMAT_TYPE;
+ ppdu_info->he_data1 |=
+ HE_BSS_COLOR_KNOWN |
+ HE_DL_UL_KNOWN |
+ HE_LDPC_EXTRA_SYMBOL_KNOWN |
+ HE_STBC_KNOWN |
+ HE_DATA_BW_RU_KNOWN |
+ HE_DOPPLER_KNOWN;
+
+ ppdu_info->he_data2 =
+ HE_GI_KNOWN |
+ HE_LTF_SYMBOLS_KNOWN |
+ HE_PRE_FEC_PADDING_KNOWN |
+ HE_PE_DISAMBIGUITY_KNOWN |
+ HE_TXOP_KNOWN |
+ HE_MIDABLE_PERIODICITY_KNOWN;
+
+ /* data3 */
+ ppdu_info->he_data3 = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_BSS_COLOR);
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_UL_FLAG);
+ value = value << HE_DL_UL_SHIFT;
+ ppdu_info->he_data3 |= value;
+
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_LDPC_EXTRA);
+ value = value << HE_LDPC_EXTRA_SYMBOL_SHIFT;
+ ppdu_info->he_data3 |= value;
+
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_STBC);
+ value = value << HE_STBC_SHIFT;
+ ppdu_info->he_data3 |= value;
+
+ /* data4 */
+ ppdu_info->he_data4 = u32_get_bits(info0,
+ HAL_RX_HE_SIG_A_MU_DL_INFO0_SPATIAL_REUSE);
+ ppdu_info->he_data4 = value;
+
+ /* data5 */
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_TRANSMIT_BW);
+ ppdu_info->he_data5 = value;
+ ppdu_info->bw = value;
+
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_CP_LTF_SIZE);
+ switch (value) {
+ case 0:
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_4_X;
+ break;
+ case 1:
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_2_X;
+ break;
+ case 2:
+ he_gi = HE_GI_1_6;
+ he_ltf = HE_LTF_2_X;
+ break;
+ case 3:
+ he_gi = HE_GI_3_2;
+ he_ltf = HE_LTF_4_X;
+ break;
+ }
+
+ ppdu_info->gi = he_gi;
+ value = he_gi << HE_GI_SHIFT;
+ ppdu_info->he_data5 |= value;
+
+ value = he_ltf << HE_LTF_SIZE_SHIFT;
+ ppdu_info->he_data5 |= value;
+
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_NUM_LTF_SYMB);
+ value = (value << HE_LTF_SYM_SHIFT);
+ ppdu_info->he_data5 |= value;
+
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_PKT_EXT_FACTOR);
+ value = value << HE_PRE_FEC_PAD_SHIFT;
+ ppdu_info->he_data5 |= value;
+
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_PKT_EXT_PE_DISAM);
+ value = value << HE_PE_DISAMBIGUITY_SHIFT;
+ ppdu_info->he_data5 |= value;
+
+ /*data6*/
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_DOPPLER_INDICATION);
+ value = value << HE_DOPPLER_SHIFT;
+ ppdu_info->he_data6 |= value;
+
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_TXOP_DURATION);
+ value = value << HE_TXOP_SHIFT;
+ ppdu_info->he_data6 |= value;
+
+ /* HE-MU Flags */
+ /* HE-MU-flags1 */
+ ppdu_info->he_flags1 =
+ HE_SIG_B_MCS_KNOWN |
+ HE_SIG_B_DCM_KNOWN |
+ HE_SIG_B_COMPRESSION_FLAG_1_KNOWN |
+ HE_SIG_B_SYM_NUM_KNOWN |
+ HE_RU_0_KNOWN;
+
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_MCS_OF_SIGB);
+ ppdu_info->he_flags1 |= value;
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_DCM_OF_SIGB);
+ value = value << HE_DCM_FLAG_1_SHIFT;
+ ppdu_info->he_flags1 |= value;
+
+ /* HE-MU-flags2 */
+ ppdu_info->he_flags2 = HE_BW_KNOWN;
+
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_TRANSMIT_BW);
+ ppdu_info->he_flags2 |= value;
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_COMP_MODE_SIGB);
+ value = value << HE_SIG_B_COMPRESSION_FLAG_2_SHIFT;
+ ppdu_info->he_flags2 |= value;
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_NUM_SIGB_SYMB);
+ value = value - 1;
+ value = value << HE_NUM_SIG_B_SYMBOLS_SHIFT;
+ ppdu_info->he_flags2 |= value;
+
+ ppdu_info->is_stbc = info1 &
+ HAL_RX_HE_SIG_A_MU_DL_INFO1_STBC;
+}
+
+static void
+ath12k_wifi7_dp_mon_parse_he_sig_su(const struct hal_rx_he_sig_a_su_info *he_sig_a,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ u32 info0, info1, value;
+ u32 dcm;
+ u8 he_dcm = 0, he_stbc = 0;
+ u16 he_gi = 0, he_ltf = 0;
+
+ ppdu_info->he_flags = 1;
+
+ info0 = __le32_to_cpu(he_sig_a->info0);
+ info1 = __le32_to_cpu(he_sig_a->info1);
+
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_FORMAT_IND);
+ if (value == 0)
+ ppdu_info->he_data1 = HE_TRIG_FORMAT_TYPE;
+ else
+ ppdu_info->he_data1 = HE_SU_FORMAT_TYPE;
+
+ ppdu_info->he_data1 |=
+ HE_BSS_COLOR_KNOWN |
+ HE_BEAM_CHANGE_KNOWN |
+ HE_DL_UL_KNOWN |
+ HE_MCS_KNOWN |
+ HE_DCM_KNOWN |
+ HE_CODING_KNOWN |
+ HE_LDPC_EXTRA_SYMBOL_KNOWN |
+ HE_STBC_KNOWN |
+ HE_DATA_BW_RU_KNOWN |
+ HE_DOPPLER_KNOWN;
+
+ ppdu_info->he_data2 |=
+ HE_GI_KNOWN |
+ HE_TXBF_KNOWN |
+ HE_PE_DISAMBIGUITY_KNOWN |
+ HE_TXOP_KNOWN |
+ HE_LTF_SYMBOLS_KNOWN |
+ HE_PRE_FEC_PADDING_KNOWN |
+ HE_MIDABLE_PERIODICITY_KNOWN;
+
+ ppdu_info->he_data3 = u32_get_bits(info0,
+ HAL_RX_HE_SIG_A_SU_INFO_INFO0_BSS_COLOR);
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_BEAM_CHANGE);
+ value = value << HE_BEAM_CHANGE_SHIFT;
+ ppdu_info->he_data3 |= value;
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_DL_UL_FLAG);
+ value = value << HE_DL_UL_SHIFT;
+ ppdu_info->he_data3 |= value;
+
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS);
+ ppdu_info->mcs = value;
+ value = value << HE_TRANSMIT_MCS_SHIFT;
+ ppdu_info->he_data3 |= value;
+
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM);
+ he_dcm = value;
+ value = value << HE_DCM_SHIFT;
+ ppdu_info->he_data3 |= value;
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING);
+ value = value << HE_CODING_SHIFT;
+ ppdu_info->he_data3 |= value;
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_LDPC_EXTRA);
+ value = value << HE_LDPC_EXTRA_SYMBOL_SHIFT;
+ ppdu_info->he_data3 |= value;
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC);
+ he_stbc = value;
+ value = value << HE_STBC_SHIFT;
+ ppdu_info->he_data3 |= value;
+
+ /* data4 */
+ ppdu_info->he_data4 = u32_get_bits(info0,
+ HAL_RX_HE_SIG_A_SU_INFO_INFO0_SPATIAL_REUSE);
+
+ /* data5 */
+ value = u32_get_bits(info0,
+ HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW);
+ ppdu_info->he_data5 = value;
+ ppdu_info->bw = value;
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE);
+ switch (value) {
+ case 0:
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_1_X;
+ break;
+ case 1:
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_2_X;
+ break;
+ case 2:
+ he_gi = HE_GI_1_6;
+ he_ltf = HE_LTF_2_X;
+ break;
+ case 3:
+ if (he_dcm && he_stbc) {
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_4_X;
+ } else {
+ he_gi = HE_GI_3_2;
+ he_ltf = HE_LTF_4_X;
+ }
+ break;
+ }
+ ppdu_info->gi = he_gi;
+ value = he_gi << HE_GI_SHIFT;
+ ppdu_info->he_data5 |= value;
+ value = he_ltf << HE_LTF_SIZE_SHIFT;
+ ppdu_info->ltf_size = he_ltf;
+ ppdu_info->he_data5 |= value;
+
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS);
+ value = (value << HE_LTF_SYM_SHIFT);
+ ppdu_info->he_data5 |= value;
+
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_FACTOR);
+ value = value << HE_PRE_FEC_PAD_SHIFT;
+ ppdu_info->he_data5 |= value;
+
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF);
+ value = value << HE_TXBF_SHIFT;
+ ppdu_info->he_data5 |= value;
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_PE_DISAM);
+ value = value << HE_PE_DISAMBIGUITY_SHIFT;
+ ppdu_info->he_data5 |= value;
+
+ /* data6 */
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS);
+ value++;
+ ppdu_info->he_data6 = value;
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_DOPPLER_IND);
+ value = value << HE_DOPPLER_SHIFT;
+ ppdu_info->he_data6 |= value;
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXOP_DURATION);
+ value = value << HE_TXOP_SHIFT;
+ ppdu_info->he_data6 |= value;
+
+ ppdu_info->mcs =
+ u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS);
+ ppdu_info->bw =
+ u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW);
+ ppdu_info->ldpc = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING);
+ ppdu_info->is_stbc = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC);
+ ppdu_info->beamformed = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF);
+ dcm = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM);
+ ppdu_info->nss = u32_get_bits(info0,
+ HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS) + 1;
+ ppdu_info->dcm = dcm;
+}
+
+static inline bool
+ath12k_wifi7_dp_mon_hal_rx_is_non_ofdma(const struct hal_rx_u_sig_info *usig_info)
+{
+ u32 ppdu_type_comp_mode = usig_info->ppdu_type_comp_mode;
+ u32 ul_dl = usig_info->ul_dl;
+
+ if ((ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_MU_MIMO && ul_dl == 0) ||
+ (ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_MU_OFDMA && ul_dl == 0) ||
+ (ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_MU_MIMO && ul_dl == 1))
+ return true;
+
+ return false;
+}
+
+static inline bool
+ath12k_wifi7_dp_mon_hal_rx_is_ofdma(const struct hal_rx_u_sig_info *usig_info)
+{
+ if (usig_info->ppdu_type_comp_mode == 0 && usig_info->ul_dl == 0)
+ return true;
+
+ return false;
+}
+
+static inline bool
+ath12k_wifi7_dp_mon_hal_rx_is_frame_type_ndp(const struct hal_rx_u_sig_info *usig_info)
+{
+ if (usig_info->ppdu_type_comp_mode == 1 &&
+ usig_info->eht_sig_mcs == 0 &&
+ usig_info->num_eht_sig_sym == 0)
+ return true;
+
+ return false;
+}
+
+static void
+ath12k_wifi7_dp_mon_hal_rx_parse_eht_sig_ndp(const struct hal_eht_sig_ndp_cmn_eb *eht_sig_ndp,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht;
+ u32 known, data;
+
+ known = __le32_to_cpu(eht->known);
+ known |= IEEE80211_RADIOTAP_EHT_KNOWN_SPATIAL_REUSE |
+ IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF |
+ IEEE80211_RADIOTAP_EHT_KNOWN_NSS_S |
+ IEEE80211_RADIOTAP_EHT_KNOWN_BEAMFORMED_S |
+ IEEE80211_RADIOTAP_EHT_KNOWN_DISREGARD_S |
+ IEEE80211_RADIOTAP_EHT_KNOWN_CRC1 |
+ IEEE80211_RADIOTAP_EHT_KNOWN_TAIL1;
+ eht->known = cpu_to_le32(known);
+
+ data = __le32_to_cpu(eht->data[0]);
+ data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0,
+ HAL_RX_EHT_SIG_NDP_CMN_INFO0_SPATIAL_REUSE,
+ IEEE80211_RADIOTAP_EHT_DATA0_SPATIAL_REUSE);
+ /* GI and LTF size are separately indicated in radiotap header
+ * and hence will be parsed from other TLV
+ */
+ data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0,
+ HAL_RX_EHT_SIG_NDP_CMN_INFO0_NUM_LTF_SYM,
+ IEEE80211_RADIOTAP_EHT_DATA0_EHT_LTF);
+
+ data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0,
+ HAL_RX_EHT_SIG_NDP_CMN_INFO0_CRC,
+ IEEE80211_RADIOTAP_EHT_DATA0_CRC1_O);
+
+ data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0,
+ HAL_RX_EHT_SIG_NDP_CMN_INFO0_DISREGARD,
+ IEEE80211_RADIOTAP_EHT_DATA0_DISREGARD_S);
+ eht->data[0] = cpu_to_le32(data);
+
+ data = __le32_to_cpu(eht->data[7]);
+ data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0,
+ HAL_RX_EHT_SIG_NDP_CMN_INFO0_NSS,
+ IEEE80211_RADIOTAP_EHT_DATA7_NSS_S);
+
+ data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0,
+ HAL_RX_EHT_SIG_NDP_CMN_INFO0_BEAMFORMED,
+ IEEE80211_RADIOTAP_EHT_DATA7_BEAMFORMED_S);
+ eht->data[7] = cpu_to_le32(data);
+}
+
+static void
+ath12k_wifi7_dp_mon_hal_rx_parse_usig_overflow(const struct hal_eht_sig_usig_overflow *ovflow,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht;
+ u32 known, data;
+
+ known = __le32_to_cpu(eht->known);
+ known |= IEEE80211_RADIOTAP_EHT_KNOWN_SPATIAL_REUSE |
+ IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF |
+ IEEE80211_RADIOTAP_EHT_KNOWN_LDPC_EXTRA_SYM_OM |
+ IEEE80211_RADIOTAP_EHT_KNOWN_PRE_PADD_FACOR_OM |
+ IEEE80211_RADIOTAP_EHT_KNOWN_PE_DISAMBIGUITY_OM |
+ IEEE80211_RADIOTAP_EHT_KNOWN_DISREGARD_O;
+ eht->known = cpu_to_le32(known);
+
+ data = __le32_to_cpu(eht->data[0]);
+ data |= ATH12K_LE32_DEC_ENC(ovflow->info0,
+ HAL_RX_EHT_SIG_OVERFLOW_INFO0_SPATIAL_REUSE,
+ IEEE80211_RADIOTAP_EHT_DATA0_SPATIAL_REUSE);
+
+ /* GI and LTF size are separately indicated in radiotap header
+ * and hence will be parsed from other TLV
+ */
+ data |= ATH12K_LE32_DEC_ENC(ovflow->info0,
+ HAL_RX_EHT_SIG_OVERFLOW_INFO0_NUM_LTF_SYM,
+ IEEE80211_RADIOTAP_EHT_DATA0_EHT_LTF);
+
+ data |= ATH12K_LE32_DEC_ENC(ovflow->info0,
+ HAL_RX_EHT_SIG_OVERFLOW_INFO0_LDPC_EXTA_SYM,
+ IEEE80211_RADIOTAP_EHT_DATA0_LDPC_EXTRA_SYM_OM);
+
+ data |= ATH12K_LE32_DEC_ENC(ovflow->info0,
+ HAL_RX_EHT_SIG_OVERFLOW_INFO0_PRE_FEC_PAD_FACTOR,
+ IEEE80211_RADIOTAP_EHT_DATA0_PRE_PADD_FACOR_OM);
+
+ data |= ATH12K_LE32_DEC_ENC(ovflow->info0,
+ HAL_RX_EHT_SIG_OVERFLOW_INFO0_DISAMBIGUITY,
+ IEEE80211_RADIOTAP_EHT_DATA0_PE_DISAMBIGUITY_OM);
+
+ data |= ATH12K_LE32_DEC_ENC(ovflow->info0,
+ HAL_RX_EHT_SIG_OVERFLOW_INFO0_DISREGARD,
+ IEEE80211_RADIOTAP_EHT_DATA0_DISREGARD_O);
+ eht->data[0] = cpu_to_le32(data);
+}
+
+static void
+ath12k_wifi7_dp_mon_hal_rx_parse_non_ofdma_users(const struct hal_eht_sig_non_ofdma_cmn_eb *eb,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht;
+ u32 known, data;
+
+ known = __le32_to_cpu(eht->known);
+ known |= IEEE80211_RADIOTAP_EHT_KNOWN_NR_NON_OFDMA_USERS_M;
+ eht->known = cpu_to_le32(known);
+
+ data = __le32_to_cpu(eht->data[7]);
+ data |= ATH12K_LE32_DEC_ENC(eb->info0,
+ HAL_RX_EHT_SIG_NON_OFDMA_INFO0_NUM_USERS,
+ IEEE80211_RADIOTAP_EHT_DATA7_NUM_OF_NON_OFDMA_USERS);
+ eht->data[7] = cpu_to_le32(data);
+}
+
+static void
+ath12k_wifi7_dp_mon_hal_rx_parse_eht_mumimo_user(const struct hal_eht_sig_mu_mimo *user,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_eht_info *eht_info = &ppdu_info->eht_info;
+ u32 user_idx;
+
+ if (eht_info->num_user_info >= ARRAY_SIZE(eht_info->user_info))
+ return;
+
+ user_idx = eht_info->num_user_info++;
+
+ eht_info->user_info[user_idx] |=
+ IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USER_INFO_CODING_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USER_INFO_SPATIAL_CONFIG_KNOWN_M |
+ ATH12K_LE32_DEC_ENC(user->info0,
+ HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_STA_ID,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID) |
+ ATH12K_LE32_DEC_ENC(user->info0,
+ HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_CODING,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_CODING) |
+ ATH12K_LE32_DEC_ENC(user->info0,
+ HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_MCS,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_MCS) |
+ ATH12K_LE32_DEC_ENC(user->info0,
+ HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_SPATIAL_CODING,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_SPATIAL_CONFIG_M);
+
+ ppdu_info->mcs = le32_get_bits(user->info0,
+ HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_MCS);
+}
+
+static void
+ath12k_wifi7_dp_mon_hal_rx_parse_eht_non_mumimo_user(const struct hal_eht_sig_non_mu_mimo *user,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_eht_info *eht_info = &ppdu_info->eht_info;
+ u32 user_idx;
+
+ if (eht_info->num_user_info >= ARRAY_SIZE(eht_info->user_info))
+ return;
+
+ user_idx = eht_info->num_user_info++;
+
+ eht_info->user_info[user_idx] |=
+ IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USER_INFO_CODING_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_KNOWN_O |
+ IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_KNOWN_O |
+ ATH12K_LE32_DEC_ENC(user->info0,
+ HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_STA_ID,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID) |
+ ATH12K_LE32_DEC_ENC(user->info0,
+ HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_CODING,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_CODING) |
+ ATH12K_LE32_DEC_ENC(user->info0,
+ HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_MCS,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_MCS) |
+ ATH12K_LE32_DEC_ENC(user->info0,
+ HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_NSS,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O) |
+ ATH12K_LE32_DEC_ENC(user->info0,
+ HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_BEAMFORMED,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_O);
+
+ ppdu_info->mcs = le32_get_bits(user->info0,
+ HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_MCS);
+
+ ppdu_info->nss = le32_get_bits(user->info0,
+ HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_NSS) + 1;
+}
+
+static inline bool
+ath12k_wifi7_dp_mon_hal_rx_is_mu_mimo_user(const struct hal_rx_u_sig_info *usig_info)
+{
+ if (usig_info->ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_SU &&
+ usig_info->ul_dl == 1)
+ return true;
+
+ return false;
+}
+
+static void
+ath12k_wifi7_dp_mon_hal_rx_parse_eht_sig_non_ofdma(const void *tlv,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ const struct hal_eht_sig_non_ofdma_cmn_eb *eb = tlv;
+
+ ath12k_wifi7_dp_mon_hal_rx_parse_usig_overflow(tlv, ppdu_info);
+ ath12k_wifi7_dp_mon_hal_rx_parse_non_ofdma_users(eb, ppdu_info);
+
+ if (ath12k_wifi7_dp_mon_hal_rx_is_mu_mimo_user(&ppdu_info->u_sig_info))
+ ath12k_wifi7_dp_mon_hal_rx_parse_eht_mumimo_user(&eb->user_field.mu_mimo,
+ ppdu_info);
+ else
+ ath12k_wifi7_dp_mon_hal_rx_parse_eht_non_mumimo_user(&eb->user_field.n_mu_mimo,
+ ppdu_info);
+}
+
+static void
+ath12k_wifi7_dp_mon_hal_rx_parse_ru_allocation(const struct hal_eht_sig_ofdma_cmn_eb *eb,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ const struct hal_eht_sig_ofdma_cmn_eb1 *ofdma_cmn_eb1 = &eb->eb1;
+ const struct hal_eht_sig_ofdma_cmn_eb2 *ofdma_cmn_eb2 = &eb->eb2;
+ struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht;
+ enum ieee80211_radiotap_eht_data ru_123, ru_124, ru_125, ru_126;
+ enum ieee80211_radiotap_eht_data ru_121, ru_122, ru_112, ru_111;
+ u32 data;
+
+ ru_123 = IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_1_2_3;
+ ru_124 = IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_4;
+ ru_125 = IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_5;
+ ru_126 = IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_1_2_6;
+ ru_121 = IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_1;
+ ru_122 = IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_2;
+ ru_112 = IEEE80211_RADIOTAP_EHT_DATA2_RU_ALLOC_CC_1_1_2;
+ ru_111 = IEEE80211_RADIOTAP_EHT_DATA1_RU_ALLOC_CC_1_1_1;
+
+ switch (ppdu_info->u_sig_info.bw) {
+ case HAL_EHT_BW_320_2:
+ case HAL_EHT_BW_320_1:
+ data = __le32_to_cpu(eht->data[4]);
+ /* CC1 2::3 */
+ data |= IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_1_2_3_KNOWN |
+ ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0,
+ HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_3,
+ ru_123);
+ eht->data[4] = cpu_to_le32(data);
+
+ data = __le32_to_cpu(eht->data[5]);
+ /* CC1 2::4 */
+ data |= IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_4_KNOWN |
+ ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0,
+ HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_4,
+ ru_124);
+
+ /* CC1 2::5 */
+ data |= IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_5_KNOWN |
+ ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0,
+ HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_5,
+ ru_125);
+ eht->data[5] = cpu_to_le32(data);
+
+ data = __le32_to_cpu(eht->data[6]);
+ /* CC1 2::6 */
+ data |= IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_1_2_6_KNOWN |
+ ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0,
+ HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_6,
+ ru_126);
+ eht->data[6] = cpu_to_le32(data);
+
+ fallthrough;
+ case HAL_EHT_BW_160:
+ data = __le32_to_cpu(eht->data[3]);
+ /* CC1 2::1 */
+ data |= IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_1_KNOWN |
+ ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0,
+ HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_1,
+ ru_121);
+ /* CC1 2::2 */
+ data |= IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_2_KNOWN |
+ ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0,
+ HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_2,
+ ru_122);
+ eht->data[3] = cpu_to_le32(data);
+
+ fallthrough;
+ case HAL_EHT_BW_80:
+ data = __le32_to_cpu(eht->data[2]);
+ /* CC1 1::2 */
+ data |= IEEE80211_RADIOTAP_EHT_DATA2_RU_ALLOC_CC_1_1_2_KNOWN |
+ ATH12K_LE64_DEC_ENC(ofdma_cmn_eb1->info0,
+ HAL_RX_EHT_SIG_OFDMA_EB1_RU_ALLOC_1_2,
+ ru_112);
+ eht->data[2] = cpu_to_le32(data);
+
+ fallthrough;
+ case HAL_EHT_BW_40:
+ fallthrough;
+ case HAL_EHT_BW_20:
+ data = __le32_to_cpu(eht->data[1]);
+ /* CC1 1::1 */
+ data |= IEEE80211_RADIOTAP_EHT_DATA1_RU_ALLOC_CC_1_1_1_KNOWN |
+ ATH12K_LE64_DEC_ENC(ofdma_cmn_eb1->info0,
+ HAL_RX_EHT_SIG_OFDMA_EB1_RU_ALLOC_1_1,
+ ru_111);
+ eht->data[1] = cpu_to_le32(data);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+ath12k_wifi7_dp_mon_hal_rx_parse_eht_sig_ofdma(const void *tlv,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ const struct hal_eht_sig_ofdma_cmn_eb *ofdma = tlv;
+
+ ath12k_wifi7_dp_mon_hal_rx_parse_usig_overflow(tlv, ppdu_info);
+ ath12k_wifi7_dp_mon_hal_rx_parse_ru_allocation(ofdma, ppdu_info);
+
+ ath12k_wifi7_dp_mon_hal_rx_parse_eht_non_mumimo_user(&ofdma->user_field.n_mu_mimo,
+ ppdu_info);
+}
+
+static void
+ath12k_wifi7_dp_mon_parse_eht_sig_hdr(struct hal_rx_mon_ppdu_info *ppdu_info,
+ const void *tlv_data)
+{
+ ppdu_info->is_eht = true;
+
+ if (ath12k_wifi7_dp_mon_hal_rx_is_frame_type_ndp(&ppdu_info->u_sig_info))
+ ath12k_wifi7_dp_mon_hal_rx_parse_eht_sig_ndp(tlv_data, ppdu_info);
+ else if (ath12k_wifi7_dp_mon_hal_rx_is_non_ofdma(&ppdu_info->u_sig_info))
+ ath12k_wifi7_dp_mon_hal_rx_parse_eht_sig_non_ofdma(tlv_data, ppdu_info);
+ else if (ath12k_wifi7_dp_mon_hal_rx_is_ofdma(&ppdu_info->u_sig_info))
+ ath12k_wifi7_dp_mon_hal_rx_parse_eht_sig_ofdma(tlv_data, ppdu_info);
+}
+
+static void ath12k_wifi7_dp_mon_parse_rx_msdu_end_err(u32 info, u32 *errmap)
+{
+ if (info & RX_MSDU_END_INFO13_FCS_ERR)
+ *errmap |= HAL_RX_MPDU_ERR_FCS;
+
+ if (info & RX_MSDU_END_INFO13_DECRYPT_ERR)
+ *errmap |= HAL_RX_MPDU_ERR_DECRYPT;
+
+ if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR)
+ *errmap |= HAL_RX_MPDU_ERR_TKIP_MIC;
+
+ if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR)
+ *errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR;
+
+ if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR)
+ *errmap |= HAL_RX_MPDU_ERR_OVERFLOW;
+
+ if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR)
+ *errmap |= HAL_RX_MPDU_ERR_MSDU_LEN;
+
+ if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR)
+ *errmap |= HAL_RX_MPDU_ERR_MPDU_LEN;
+}
+
+static void
+ath12k_wifi7_parse_cmn_usr_info(const struct hal_phyrx_common_user_info *cmn_usr_info,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht;
+ u32 known, data, cp_setting, ltf_size;
+
+ known = __le32_to_cpu(eht->known);
+ known |= IEEE80211_RADIOTAP_EHT_KNOWN_GI |
+ IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF;
+ eht->known = cpu_to_le32(known);
+
+ cp_setting = le32_get_bits(cmn_usr_info->info0,
+ HAL_RX_CMN_USR_INFO0_CP_SETTING);
+ ltf_size = le32_get_bits(cmn_usr_info->info0,
+ HAL_RX_CMN_USR_INFO0_LTF_SIZE);
+
+ data = __le32_to_cpu(eht->data[0]);
+ data |= u32_encode_bits(cp_setting, IEEE80211_RADIOTAP_EHT_DATA0_GI);
+ data |= u32_encode_bits(ltf_size, IEEE80211_RADIOTAP_EHT_DATA0_LTF);
+ eht->data[0] = cpu_to_le32(data);
+
+ if (!ppdu_info->ltf_size)
+ ppdu_info->ltf_size = ltf_size;
+ if (!ppdu_info->gi)
+ ppdu_info->gi = cp_setting;
+}
+
+static void
+ath12k_wifi7_dp_mon_parse_status_msdu_end(struct ath12k_mon_data *pmon,
+ const struct hal_rx_msdu_end *msdu_end)
+{
+ ath12k_wifi7_dp_mon_parse_rx_msdu_end_err(__le32_to_cpu(msdu_end->info2),
+ &pmon->err_bitmap);
+ pmon->decap_format = le32_get_bits(msdu_end->info1,
+ RX_MSDU_END_INFO11_DECAP_FORMAT);
+}
+
+static enum hal_rx_mon_status
+ath12k_wifi7_dp_mon_rx_parse_status_tlv(struct ath12k_pdev_dp *dp_pdev,
+ struct ath12k_mon_data *pmon,
+ const struct hal_tlv_64_hdr *tlv)
+{
+ struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
+ const void *tlv_data = tlv->value;
+ u32 info[7], userid;
+ u16 tlv_tag, tlv_len;
+
+ tlv_tag = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_TAG);
+ tlv_len = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_LEN);
+ userid = le64_get_bits(tlv->tl, HAL_TLV_64_USR_ID);
+
+ if (ppdu_info->tlv_aggr.in_progress && ppdu_info->tlv_aggr.tlv_tag != tlv_tag) {
+ ath12k_wifi7_dp_mon_parse_eht_sig_hdr(ppdu_info,
+ ppdu_info->tlv_aggr.buf);
+
+ ppdu_info->tlv_aggr.in_progress = false;
+ ppdu_info->tlv_aggr.cur_len = 0;
+ }
+
+ switch (tlv_tag) {
+ case HAL_RX_PPDU_START: {
+ const struct hal_rx_ppdu_start *ppdu_start = tlv_data;
+
+ u64 ppdu_ts = ath12k_le32hilo_to_u64(ppdu_start->ppdu_start_ts_63_32,
+ ppdu_start->ppdu_start_ts_31_0);
+
+ info[0] = __le32_to_cpu(ppdu_start->info0);
+
+ ppdu_info->ppdu_id = u32_get_bits(info[0],
+ HAL_RX_PPDU_START_INFO0_PPDU_ID);
+
+ info[1] = __le32_to_cpu(ppdu_start->info1);
+ ppdu_info->chan_num = u32_get_bits(info[1],
+ HAL_RX_PPDU_START_INFO1_CHAN_NUM);
+ ppdu_info->freq = u32_get_bits(info[1],
+ HAL_RX_PPDU_START_INFO1_CHAN_FREQ);
+ ppdu_info->ppdu_ts = ppdu_ts;
+
+ if (ppdu_info->ppdu_id != ppdu_info->last_ppdu_id) {
+ ppdu_info->last_ppdu_id = ppdu_info->ppdu_id;
+ ppdu_info->num_users = 0;
+ memset(&ppdu_info->mpdu_fcs_ok_bitmap, 0,
+ HAL_RX_NUM_WORDS_PER_PPDU_BITMAP *
+ sizeof(ppdu_info->mpdu_fcs_ok_bitmap[0]));
+ }
+ break;
+ }
+ case HAL_RX_PPDU_END_USER_STATS: {
+ const struct hal_rx_ppdu_end_user_stats *eu_stats = tlv_data;
+ u32 tid_bitmap;
+
+ info[0] = __le32_to_cpu(eu_stats->info0);
+ info[1] = __le32_to_cpu(eu_stats->info1);
+ info[2] = __le32_to_cpu(eu_stats->info2);
+ info[4] = __le32_to_cpu(eu_stats->info4);
+ info[5] = __le32_to_cpu(eu_stats->info5);
+ info[6] = __le32_to_cpu(eu_stats->info6);
+
+ ppdu_info->ast_index =
+ u32_get_bits(info[2], HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX);
+ ppdu_info->fc_valid =
+ u32_get_bits(info[1], HAL_RX_PPDU_END_USER_STATS_INFO1_FC_VALID);
+ tid_bitmap = u32_get_bits(info[6],
+ HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP);
+ ppdu_info->tid = ffs(tid_bitmap) - 1;
+ ppdu_info->tcp_msdu_count =
+ u32_get_bits(info[4],
+ HAL_RX_PPDU_END_USER_STATS_INFO4_TCP_MSDU_CNT);
+ ppdu_info->udp_msdu_count =
+ u32_get_bits(info[4],
+ HAL_RX_PPDU_END_USER_STATS_INFO4_UDP_MSDU_CNT);
+ ppdu_info->other_msdu_count =
+ u32_get_bits(info[5],
+ HAL_RX_PPDU_END_USER_STATS_INFO5_OTHER_MSDU_CNT);
+ ppdu_info->tcp_ack_msdu_count =
+ u32_get_bits(info[5],
+ HAL_RX_PPDU_END_USER_STATS_INFO5_TCP_ACK_MSDU_CNT);
+ ppdu_info->preamble_type =
+ u32_get_bits(info[1],
+ HAL_RX_PPDU_END_USER_STATS_INFO1_PKT_TYPE);
+ ppdu_info->num_mpdu_fcs_ok =
+ u32_get_bits(info[1],
+ HAL_RX_PPDU_END_USER_STATS_INFO1_MPDU_CNT_FCS_OK);
+ ppdu_info->num_mpdu_fcs_err =
+ u32_get_bits(info[0],
+ HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR);
+ ppdu_info->peer_id =
+ u32_get_bits(info[0], HAL_RX_PPDU_END_USER_STATS_INFO0_PEER_ID);
+
+ switch (ppdu_info->preamble_type) {
+ case HAL_RX_PREAMBLE_11N:
+ ppdu_info->ht_flags = 1;
+ break;
+ case HAL_RX_PREAMBLE_11AC:
+ ppdu_info->vht_flags = 1;
+ break;
+ case HAL_RX_PREAMBLE_11AX:
+ ppdu_info->he_flags = 1;
+ break;
+ case HAL_RX_PREAMBLE_11BE:
+ ppdu_info->is_eht = true;
+ break;
+ default:
+ break;
+ }
+
+ if (userid < HAL_MAX_UL_MU_USERS) {
+ struct hal_rx_user_status *rxuser_stats =
+ &ppdu_info->userstats[userid];
+
+ if (ppdu_info->num_mpdu_fcs_ok > 1 ||
+ ppdu_info->num_mpdu_fcs_err > 1)
+ ppdu_info->userstats[userid].ampdu_present = true;
+
+ ppdu_info->num_users += 1;
+
+ ath12k_wifi7_dp_mon_rx_handle_ofdma_info(eu_stats, rxuser_stats);
+ ath12k_wifi7_dp_mon_rx_populate_mu_user_info(eu_stats, ppdu_info,
+ rxuser_stats);
+ }
+ ppdu_info->mpdu_fcs_ok_bitmap[0] = __le32_to_cpu(eu_stats->rsvd1[0]);
+ ppdu_info->mpdu_fcs_ok_bitmap[1] = __le32_to_cpu(eu_stats->rsvd1[1]);
+ break;
+ }
+ case HAL_RX_PPDU_END_USER_STATS_EXT: {
+ const struct hal_rx_ppdu_end_user_stats_ext *eu_stats = tlv_data;
+
+ ppdu_info->mpdu_fcs_ok_bitmap[2] = __le32_to_cpu(eu_stats->info1);
+ ppdu_info->mpdu_fcs_ok_bitmap[3] = __le32_to_cpu(eu_stats->info2);
+ ppdu_info->mpdu_fcs_ok_bitmap[4] = __le32_to_cpu(eu_stats->info3);
+ ppdu_info->mpdu_fcs_ok_bitmap[5] = __le32_to_cpu(eu_stats->info4);
+ ppdu_info->mpdu_fcs_ok_bitmap[6] = __le32_to_cpu(eu_stats->info5);
+ ppdu_info->mpdu_fcs_ok_bitmap[7] = __le32_to_cpu(eu_stats->info6);
+ break;
+ }
+ case HAL_PHYRX_HT_SIG:
+ ath12k_wifi7_dp_mon_parse_ht_sig(tlv_data, ppdu_info);
+ break;
+
+ case HAL_PHYRX_L_SIG_B:
+ ath12k_wifi7_dp_mon_parse_l_sig_b(tlv_data, ppdu_info);
+ break;
+
+ case HAL_PHYRX_L_SIG_A:
+ ath12k_wifi7_dp_mon_parse_l_sig_a(tlv_data, ppdu_info);
+ break;
+
+ case HAL_PHYRX_VHT_SIG_A:
+ ath12k_wifi7_dp_mon_parse_vht_sig_a(tlv_data, ppdu_info);
+ break;
+
+ case HAL_PHYRX_HE_SIG_A_SU:
+ ath12k_wifi7_dp_mon_parse_he_sig_su(tlv_data, ppdu_info);
+ break;
+
+ case HAL_PHYRX_HE_SIG_A_MU_DL:
+ ath12k_wifi7_dp_mon_parse_he_sig_mu(tlv_data, ppdu_info);
+ break;
+
+ case HAL_PHYRX_HE_SIG_B1_MU:
+ ath12k_wifi7_dp_mon_parse_he_sig_b1_mu(tlv_data, ppdu_info);
+ break;
+
+ case HAL_PHYRX_HE_SIG_B2_MU:
+ ath12k_wifi7_dp_mon_parse_he_sig_b2_mu(tlv_data, ppdu_info);
+ break;
+
+ case HAL_PHYRX_HE_SIG_B2_OFDMA:
+ ath12k_wifi7_dp_mon_parse_he_sig_b2_ofdma(tlv_data, ppdu_info);
+ break;
+
+ case HAL_PHYRX_RSSI_LEGACY: {
+ const struct hal_rx_phyrx_rssi_legacy_info *rssi = tlv_data;
+
+ info[0] = __le32_to_cpu(rssi->info0);
+ info[2] = __le32_to_cpu(rssi->info2);
+
+ /* TODO: Please note that the combined rssi will not be accurate
+ * in MU case. Rssi in MU needs to be retrieved from
+ * PHYRX_OTHER_RECEIVE_INFO TLV.
+ */
+ ppdu_info->rssi_comb =
+ u32_get_bits(info[2],
+ HAL_RX_RSSI_LEGACY_INFO_INFO2_RSSI_COMB_PPDU);
+
+ ppdu_info->bw = u32_get_bits(info[0],
+ HAL_RX_RSSI_LEGACY_INFO_INFO0_RX_BW);
+ break;
+ }
+ case HAL_PHYRX_COMMON_USER_INFO: {
+ ath12k_wifi7_parse_cmn_usr_info(tlv_data, ppdu_info);
+ break;
+ }
+ case HAL_RX_PPDU_START_USER_INFO:
+ ath12k_wifi7_dp_mon_hal_rx_parse_user_info(tlv_data, userid, ppdu_info);
+ break;
+
+ case HAL_RXPCU_PPDU_END_INFO: {
+ const struct hal_rx_ppdu_end_duration *ppdu_rx_duration = tlv_data;
+
+ info[0] = __le32_to_cpu(ppdu_rx_duration->info0);
+ ppdu_info->rx_duration =
+ u32_get_bits(info[0], HAL_RX_PPDU_END_DURATION);
+ ppdu_info->tsft = __le32_to_cpu(ppdu_rx_duration->rsvd0[1]);
+ ppdu_info->tsft = (ppdu_info->tsft << 32) |
+ __le32_to_cpu(ppdu_rx_duration->rsvd0[0]);
+ break;
+ }
+ case HAL_RX_MPDU_START: {
+ const struct hal_rx_mpdu_start *mpdu_start = tlv_data;
+ u16 peer_id;
+
+ info[1] = __le32_to_cpu(mpdu_start->info1);
+ peer_id = u32_get_bits(info[1], HAL_RX_MPDU_START_INFO1_PEERID);
+ if (peer_id)
+ ppdu_info->peer_id = peer_id;
+
+ ppdu_info->mpdu_len += u32_get_bits(info[1],
+ HAL_RX_MPDU_START_INFO2_MPDU_LEN);
+ if (userid < HAL_MAX_UL_MU_USERS) {
+ info[0] = __le32_to_cpu(mpdu_start->info0);
+ ppdu_info->userid = userid;
+ ppdu_info->userstats[userid].ampdu_id =
+ u32_get_bits(info[0], HAL_RX_MPDU_START_INFO0_PPDU_ID);
+ }
+
+ return HAL_RX_MON_STATUS_MPDU_START;
+ }
+ case HAL_RX_MSDU_START:
+ /* TODO: add msdu start parsing logic */
+ break;
+ case HAL_MON_BUF_ADDR:
+ return HAL_RX_MON_STATUS_BUF_ADDR;
+ case HAL_RX_MSDU_END:
+ ath12k_wifi7_dp_mon_parse_status_msdu_end(pmon, tlv_data);
+ return HAL_RX_MON_STATUS_MSDU_END;
+ case HAL_RX_MPDU_END:
+ return HAL_RX_MON_STATUS_MPDU_END;
+ case HAL_PHYRX_GENERIC_U_SIG:
+ ath12k_wifi7_dp_mon_hal_rx_parse_u_sig_hdr(tlv_data, ppdu_info);
+ break;
+ case HAL_PHYRX_GENERIC_EHT_SIG:
+ /* Handle the case where aggregation is in progress
+ * or the current TLV is one of the TLVs which should be
+ * aggregated
+ */
+ if (!ppdu_info->tlv_aggr.in_progress) {
+ ppdu_info->tlv_aggr.in_progress = true;
+ ppdu_info->tlv_aggr.tlv_tag = tlv_tag;
+ ppdu_info->tlv_aggr.cur_len = 0;
+ }
+
+ ppdu_info->is_eht = true;
+
+ ath12k_wifi7_dp_mon_hal_aggr_tlv(ppdu_info, tlv_len, tlv_data);
+ break;
+ case HAL_DUMMY:
+ return HAL_RX_MON_STATUS_BUF_DONE;
+ case HAL_RX_PPDU_END_STATUS_DONE:
+ case 0:
+ return HAL_RX_MON_STATUS_PPDU_DONE;
+ default:
+ break;
+ }
+
+ return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
+}
+
+static int
+ath12k_wifi7_dp_mon_parse_rx_dest_tlv(struct ath12k_pdev_dp *dp_pdev,
+ struct ath12k_mon_data *pmon,
+ enum hal_rx_mon_status hal_status,
+ const void *tlv_data)
+{
+ switch (hal_status) {
+ case HAL_RX_MON_STATUS_MPDU_START:
+ if (WARN_ON_ONCE(pmon->mon_mpdu))
+ break;
+
+ pmon->mon_mpdu = kzalloc(sizeof(*pmon->mon_mpdu), GFP_ATOMIC);
+ if (!pmon->mon_mpdu)
+ return -ENOMEM;
+ break;
+ case HAL_RX_MON_STATUS_BUF_ADDR:
+ return ath12k_dp_mon_parse_status_buf(dp_pdev, pmon, tlv_data);
+ case HAL_RX_MON_STATUS_MPDU_END:
+ /* If no MSDU then free empty MPDU */
+ if (pmon->mon_mpdu->tail) {
+ pmon->mon_mpdu->tail->next = NULL;
+ list_add_tail(&pmon->mon_mpdu->list, &pmon->dp_rx_mon_mpdu_list);
+ } else {
+ kfree(pmon->mon_mpdu);
+ }
+ pmon->mon_mpdu = NULL;
+ break;
+ case HAL_RX_MON_STATUS_MSDU_END:
+ pmon->mon_mpdu->decap_format = pmon->decap_format;
+ pmon->mon_mpdu->err_bitmap = pmon->err_bitmap;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static struct dp_mon_tx_ppdu_info *
+ath12k_wifi7_dp_mon_tx_get_ppdu_info(struct ath12k_mon_data *pmon,
+ unsigned int ppdu_id,
+ enum dp_mon_tx_ppdu_info_type type)
+{
+ struct dp_mon_tx_ppdu_info *tx_ppdu_info;
+
+ if (type == DP_MON_TX_PROT_PPDU_INFO) {
+ tx_ppdu_info = pmon->tx_prot_ppdu_info;
+
+ if (tx_ppdu_info && !tx_ppdu_info->is_used)
+ return tx_ppdu_info;
+ kfree(tx_ppdu_info);
+ } else {
+ tx_ppdu_info = pmon->tx_data_ppdu_info;
+
+ if (tx_ppdu_info && !tx_ppdu_info->is_used)
+ return tx_ppdu_info;
+ kfree(tx_ppdu_info);
+ }
+
+ /* allocate new tx_ppdu_info */
+ tx_ppdu_info = kzalloc(sizeof(*tx_ppdu_info), GFP_ATOMIC);
+ if (!tx_ppdu_info)
+ return NULL;
+
+ tx_ppdu_info->is_used = 0;
+ tx_ppdu_info->ppdu_id = ppdu_id;
+
+ if (type == DP_MON_TX_PROT_PPDU_INFO)
+ pmon->tx_prot_ppdu_info = tx_ppdu_info;
+ else
+ pmon->tx_data_ppdu_info = tx_ppdu_info;
+
+ return tx_ppdu_info;
+}
+
+static struct dp_mon_tx_ppdu_info *
+ath12k_wifi7_dp_mon_hal_tx_ppdu_info(struct ath12k_mon_data *pmon,
+ u16 tlv_tag)
+{
+ switch (tlv_tag) {
+ case HAL_TX_FES_SETUP:
+ case HAL_TX_FLUSH:
+ case HAL_PCU_PPDU_SETUP_INIT:
+ case HAL_TX_PEER_ENTRY:
+ case HAL_TX_QUEUE_EXTENSION:
+ case HAL_TX_MPDU_START:
+ case HAL_TX_MSDU_START:
+ case HAL_TX_DATA:
+ case HAL_MON_BUF_ADDR:
+ case HAL_TX_MPDU_END:
+ case HAL_TX_LAST_MPDU_FETCHED:
+ case HAL_TX_LAST_MPDU_END:
+ case HAL_COEX_TX_REQ:
+ case HAL_TX_RAW_OR_NATIVE_FRAME_SETUP:
+ case HAL_SCH_CRITICAL_TLV_REFERENCE:
+ case HAL_TX_FES_SETUP_COMPLETE:
+ case HAL_TQM_MPDU_GLOBAL_START:
+ case HAL_SCHEDULER_END:
+ case HAL_TX_FES_STATUS_USER_PPDU:
+ break;
+ case HAL_TX_FES_STATUS_PROT: {
+ if (!pmon->tx_prot_ppdu_info->is_used)
+ pmon->tx_prot_ppdu_info->is_used = true;
+
+ return pmon->tx_prot_ppdu_info;
+ }
+ }
+
+ if (!pmon->tx_data_ppdu_info->is_used)
+ pmon->tx_data_ppdu_info->is_used = true;
+
+ return pmon->tx_data_ppdu_info;
+}
+
+#define MAX_MONITOR_HEADER 512
+#define MAX_DUMMY_FRM_BODY 128
+
+static struct
+sk_buff *ath12k_wifi7_dp_mon_tx_alloc_skb(void)
+{
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(MAX_MONITOR_HEADER + MAX_DUMMY_FRM_BODY);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, MAX_MONITOR_HEADER);
+
+ if (!IS_ALIGNED((unsigned long)skb->data, 4))
+ skb_pull(skb, PTR_ALIGN(skb->data, 4) - skb->data);
+
+ return skb;
+}
+
+static int
+ath12k_wifi7_dp_mon_tx_gen_cts2self_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info)
+{
+ struct sk_buff *skb;
+ struct ieee80211_cts *cts;
+
+ skb = ath12k_wifi7_dp_mon_tx_alloc_skb();
+ if (!skb)
+ return -ENOMEM;
+
+ cts = (struct ieee80211_cts *)skb->data;
+ memset(cts, 0, MAX_DUMMY_FRM_BODY);
+ cts->frame_control =
+ cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS);
+ cts->duration = cpu_to_le16(tx_ppdu_info->rx_status.rx_duration);
+ memcpy(cts->ra, tx_ppdu_info->rx_status.addr1, sizeof(cts->ra));
+
+ skb_put(skb, sizeof(*cts));
+ tx_ppdu_info->tx_mon_mpdu->head = skb;
+ tx_ppdu_info->tx_mon_mpdu->tail = NULL;
+ list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list,
+ &tx_ppdu_info->dp_tx_mon_mpdu_list);
+
+ return 0;
+}
+
+static int
+ath12k_wifi7_dp_mon_tx_gen_rts_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info)
+{
+ struct sk_buff *skb;
+ struct ieee80211_rts *rts;
+
+ skb = ath12k_wifi7_dp_mon_tx_alloc_skb();
+ if (!skb)
+ return -ENOMEM;
+
+ rts = (struct ieee80211_rts *)skb->data;
+ memset(rts, 0, MAX_DUMMY_FRM_BODY);
+ rts->frame_control =
+ cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
+ rts->duration = cpu_to_le16(tx_ppdu_info->rx_status.rx_duration);
+ memcpy(rts->ra, tx_ppdu_info->rx_status.addr1, sizeof(rts->ra));
+ memcpy(rts->ta, tx_ppdu_info->rx_status.addr2, sizeof(rts->ta));
+
+ skb_put(skb, sizeof(*rts));
+ tx_ppdu_info->tx_mon_mpdu->head = skb;
+ tx_ppdu_info->tx_mon_mpdu->tail = NULL;
+ list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list,
+ &tx_ppdu_info->dp_tx_mon_mpdu_list);
+
+ return 0;
+}
+
+static int
+ath12k_wifi7_dp_mon_tx_gen_3addr_qos_null_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info)
+{
+ struct sk_buff *skb;
+ struct ieee80211_qos_hdr *qhdr;
+
+ skb = ath12k_wifi7_dp_mon_tx_alloc_skb();
+ if (!skb)
+ return -ENOMEM;
+
+ qhdr = (struct ieee80211_qos_hdr *)skb->data;
+ memset(qhdr, 0, MAX_DUMMY_FRM_BODY);
+ qhdr->frame_control =
+ cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC);
+ qhdr->duration_id = cpu_to_le16(tx_ppdu_info->rx_status.rx_duration);
+ memcpy(qhdr->addr1, tx_ppdu_info->rx_status.addr1, ETH_ALEN);
+ memcpy(qhdr->addr2, tx_ppdu_info->rx_status.addr2, ETH_ALEN);
+ memcpy(qhdr->addr3, tx_ppdu_info->rx_status.addr3, ETH_ALEN);
+
+ skb_put(skb, sizeof(*qhdr));
+ tx_ppdu_info->tx_mon_mpdu->head = skb;
+ tx_ppdu_info->tx_mon_mpdu->tail = NULL;
+ list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list,
+ &tx_ppdu_info->dp_tx_mon_mpdu_list);
+
+ return 0;
+}
+
+static int
+ath12k_wifi7_dp_mon_tx_gen_4addr_qos_null_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info)
+{
+ struct sk_buff *skb;
+ struct dp_mon_qosframe_addr4 *qhdr;
+
+ skb = ath12k_wifi7_dp_mon_tx_alloc_skb();
+ if (!skb)
+ return -ENOMEM;
+
+ qhdr = (struct dp_mon_qosframe_addr4 *)skb->data;
+ memset(qhdr, 0, MAX_DUMMY_FRM_BODY);
+ qhdr->frame_control =
+ cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC);
+ qhdr->duration = cpu_to_le16(tx_ppdu_info->rx_status.rx_duration);
+ memcpy(qhdr->addr1, tx_ppdu_info->rx_status.addr1, ETH_ALEN);
+ memcpy(qhdr->addr2, tx_ppdu_info->rx_status.addr2, ETH_ALEN);
+ memcpy(qhdr->addr3, tx_ppdu_info->rx_status.addr3, ETH_ALEN);
+ memcpy(qhdr->addr4, tx_ppdu_info->rx_status.addr4, ETH_ALEN);
+
+ skb_put(skb, sizeof(*qhdr));
+ tx_ppdu_info->tx_mon_mpdu->head = skb;
+ tx_ppdu_info->tx_mon_mpdu->tail = NULL;
+ list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list,
+ &tx_ppdu_info->dp_tx_mon_mpdu_list);
+
+ return 0;
+}
+
+static int
+ath12k_wifi7_dp_mon_tx_gen_ack_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info)
+{
+ struct sk_buff *skb;
+ struct dp_mon_frame_min_one *fbmhdr;
+
+ skb = ath12k_wifi7_dp_mon_tx_alloc_skb();
+ if (!skb)
+ return -ENOMEM;
+
+ fbmhdr = (struct dp_mon_frame_min_one *)skb->data;
+ memset(fbmhdr, 0, MAX_DUMMY_FRM_BODY);
+ fbmhdr->frame_control =
+ cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_CFACK);
+ memcpy(fbmhdr->addr1, tx_ppdu_info->rx_status.addr1, ETH_ALEN);
+
+ /* set duration zero for ack frame */
+ fbmhdr->duration = 0;
+
+ skb_put(skb, sizeof(*fbmhdr));
+ tx_ppdu_info->tx_mon_mpdu->head = skb;
+ tx_ppdu_info->tx_mon_mpdu->tail = NULL;
+ list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list,
+ &tx_ppdu_info->dp_tx_mon_mpdu_list);
+
+ return 0;
+}
+
+static int
+ath12k_wifi7_dp_mon_tx_gen_prot_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info)
+{
+ int ret = 0;
+
+ switch (tx_ppdu_info->rx_status.medium_prot_type) {
+ case DP_MON_TX_MEDIUM_RTS_LEGACY:
+ case DP_MON_TX_MEDIUM_RTS_11AC_STATIC_BW:
+ case DP_MON_TX_MEDIUM_RTS_11AC_DYNAMIC_BW:
+ ret = ath12k_wifi7_dp_mon_tx_gen_rts_frame(tx_ppdu_info);
+ break;
+ case DP_MON_TX_MEDIUM_CTS2SELF:
+ ret = ath12k_wifi7_dp_mon_tx_gen_cts2self_frame(tx_ppdu_info);
+ break;
+ case DP_MON_TX_MEDIUM_QOS_NULL_NO_ACK_3ADDR:
+ ret = ath12k_wifi7_dp_mon_tx_gen_3addr_qos_null_frame(tx_ppdu_info);
+ break;
+ case DP_MON_TX_MEDIUM_QOS_NULL_NO_ACK_4ADDR:
+ ret = ath12k_wifi7_dp_mon_tx_gen_4addr_qos_null_frame(tx_ppdu_info);
+ break;
+ }
+
+ return ret;
+}
+
+static enum dp_mon_tx_tlv_status
+ath12k_wifi7_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
+ struct ath12k_mon_data *pmon,
+ u16 tlv_tag, const void *tlv_data,
+ u32 userid)
+{
+ struct dp_mon_tx_ppdu_info *tx_ppdu_info;
+ enum dp_mon_tx_tlv_status status = DP_MON_TX_STATUS_PPDU_NOT_DONE;
+ u32 info[7];
+
+ tx_ppdu_info = ath12k_wifi7_dp_mon_hal_tx_ppdu_info(pmon, tlv_tag);
+
+ switch (tlv_tag) {
+ case HAL_TX_FES_SETUP: {
+ const struct hal_tx_fes_setup *tx_fes_setup = tlv_data;
+
+ info[0] = __le32_to_cpu(tx_fes_setup->info0);
+ tx_ppdu_info->ppdu_id = __le32_to_cpu(tx_fes_setup->schedule_id);
+ tx_ppdu_info->num_users =
+ u32_get_bits(info[0], HAL_TX_FES_SETUP_INFO0_NUM_OF_USERS);
+ status = DP_MON_TX_FES_SETUP;
+ break;
+ }
+
+ case HAL_TX_FES_STATUS_END: {
+ const struct hal_tx_fes_status_end *tx_fes_status_end = tlv_data;
+ u32 tst_15_0, tst_31_16;
+
+ info[0] = __le32_to_cpu(tx_fes_status_end->info0);
+ tst_15_0 =
+ u32_get_bits(info[0],
+ HAL_TX_FES_STATUS_END_INFO0_START_TIMESTAMP_15_0);
+ tst_31_16 =
+ u32_get_bits(info[0],
+ HAL_TX_FES_STATUS_END_INFO0_START_TIMESTAMP_31_16);
+
+ tx_ppdu_info->rx_status.ppdu_ts = (tst_15_0 | (tst_31_16 << 16));
+ status = DP_MON_TX_FES_STATUS_END;
+ break;
+ }
+
+ case HAL_RX_RESPONSE_REQUIRED_INFO: {
+ const struct hal_rx_resp_req_info *rx_resp_req_info = tlv_data;
+ u32 addr_32;
+ u16 addr_16;
+
+ info[0] = __le32_to_cpu(rx_resp_req_info->info0);
+ info[1] = __le32_to_cpu(rx_resp_req_info->info1);
+ info[2] = __le32_to_cpu(rx_resp_req_info->info2);
+ info[3] = __le32_to_cpu(rx_resp_req_info->info3);
+ info[4] = __le32_to_cpu(rx_resp_req_info->info4);
+ info[5] = __le32_to_cpu(rx_resp_req_info->info5);
+
+ tx_ppdu_info->rx_status.ppdu_id =
+ u32_get_bits(info[0], HAL_RX_RESP_REQ_INFO0_PPDU_ID);
+ tx_ppdu_info->rx_status.reception_type =
+ u32_get_bits(info[0], HAL_RX_RESP_REQ_INFO0_RECEPTION_TYPE);
+ tx_ppdu_info->rx_status.rx_duration =
+ u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_DURATION);
+ tx_ppdu_info->rx_status.mcs =
+ u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_RATE_MCS);
+ tx_ppdu_info->rx_status.sgi =
+ u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_SGI);
+ tx_ppdu_info->rx_status.is_stbc =
+ u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_STBC);
+ tx_ppdu_info->rx_status.ldpc =
+ u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_LDPC);
+ tx_ppdu_info->rx_status.is_ampdu =
+ u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_IS_AMPDU);
+ tx_ppdu_info->rx_status.num_users =
+ u32_get_bits(info[2], HAL_RX_RESP_REQ_INFO2_NUM_USER);
+
+ addr_32 = u32_get_bits(info[3], HAL_RX_RESP_REQ_INFO3_ADDR1_31_0);
+ addr_16 = u32_get_bits(info[3], HAL_RX_RESP_REQ_INFO4_ADDR1_47_32);
+ ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr1);
+
+ addr_16 = u32_get_bits(info[4], HAL_RX_RESP_REQ_INFO4_ADDR1_15_0);
+ addr_32 = u32_get_bits(info[5], HAL_RX_RESP_REQ_INFO5_ADDR1_47_16);
+ ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr2);
+
+ if (tx_ppdu_info->rx_status.reception_type == 0)
+ ath12k_wifi7_dp_mon_tx_gen_cts2self_frame(tx_ppdu_info);
+ status = DP_MON_RX_RESPONSE_REQUIRED_INFO;
+ break;
+ }
+
+ case HAL_PCU_PPDU_SETUP_INIT: {
+ const struct hal_tx_pcu_ppdu_setup_init *ppdu_setup = tlv_data;
+ u32 addr_32;
+ u16 addr_16;
+
+ info[0] = __le32_to_cpu(ppdu_setup->info0);
+ info[1] = __le32_to_cpu(ppdu_setup->info1);
+ info[2] = __le32_to_cpu(ppdu_setup->info2);
+ info[3] = __le32_to_cpu(ppdu_setup->info3);
+ info[4] = __le32_to_cpu(ppdu_setup->info4);
+ info[5] = __le32_to_cpu(ppdu_setup->info5);
+ info[6] = __le32_to_cpu(ppdu_setup->info6);
+
+ /* protection frame address 1 */
+ addr_32 = u32_get_bits(info[1],
+ HAL_TX_PPDU_SETUP_INFO1_PROT_FRAME_ADDR1_31_0);
+ addr_16 = u32_get_bits(info[2],
+ HAL_TX_PPDU_SETUP_INFO2_PROT_FRAME_ADDR1_47_32);
+ ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr1);
+
+ /* protection frame address 2 */
+ addr_16 = u32_get_bits(info[2],
+ HAL_TX_PPDU_SETUP_INFO2_PROT_FRAME_ADDR2_15_0);
+ addr_32 = u32_get_bits(info[3],
+ HAL_TX_PPDU_SETUP_INFO3_PROT_FRAME_ADDR2_47_16);
+ ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr2);
+
+ /* protection frame address 3 */
+ addr_32 = u32_get_bits(info[4],
+ HAL_TX_PPDU_SETUP_INFO4_PROT_FRAME_ADDR3_31_0);
+ addr_16 = u32_get_bits(info[5],
+ HAL_TX_PPDU_SETUP_INFO5_PROT_FRAME_ADDR3_47_32);
+ ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr3);
+
+ /* protection frame address 4 */
+ addr_16 = u32_get_bits(info[5],
+ HAL_TX_PPDU_SETUP_INFO5_PROT_FRAME_ADDR4_15_0);
+ addr_32 = u32_get_bits(info[6],
+ HAL_TX_PPDU_SETUP_INFO6_PROT_FRAME_ADDR4_47_16);
+ ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr4);
+
+ status = u32_get_bits(info[0],
+ HAL_TX_PPDU_SETUP_INFO0_MEDIUM_PROT_TYPE);
+ break;
+ }
+
+ case HAL_TX_QUEUE_EXTENSION: {
+ const struct hal_tx_queue_exten *tx_q_exten = tlv_data;
+
+ info[0] = __le32_to_cpu(tx_q_exten->info0);
+
+ tx_ppdu_info->rx_status.frame_control =
+ u32_get_bits(info[0],
+ HAL_TX_Q_EXT_INFO0_FRAME_CTRL);
+ tx_ppdu_info->rx_status.fc_valid = true;
+ break;
+ }
+
+ case HAL_TX_FES_STATUS_START: {
+ const struct hal_tx_fes_status_start *tx_fes_start = tlv_data;
+
+ info[0] = __le32_to_cpu(tx_fes_start->info0);
+
+ tx_ppdu_info->rx_status.medium_prot_type =
+ u32_get_bits(info[0],
+ HAL_TX_FES_STATUS_START_INFO0_MEDIUM_PROT_TYPE);
+ break;
+ }
+
+ case HAL_TX_FES_STATUS_PROT: {
+ const struct hal_tx_fes_status_prot *tx_fes_status = tlv_data;
+ u32 start_timestamp;
+ u32 end_timestamp;
+
+ info[0] = __le32_to_cpu(tx_fes_status->info0);
+ info[1] = __le32_to_cpu(tx_fes_status->info1);
+
+ start_timestamp =
+ u32_get_bits(info[0],
+ HAL_TX_FES_STAT_PROT_INFO0_STRT_FRM_TS_15_0);
+ start_timestamp |=
+ u32_get_bits(info[0],
+ HAL_TX_FES_STAT_PROT_INFO0_STRT_FRM_TS_31_16) << 15;
+ end_timestamp =
+ u32_get_bits(info[1],
+ HAL_TX_FES_STAT_PROT_INFO1_END_FRM_TS_15_0);
+ end_timestamp |=
+ u32_get_bits(info[1],
+ HAL_TX_FES_STAT_PROT_INFO1_END_FRM_TS_31_16) << 15;
+ tx_ppdu_info->rx_status.rx_duration = end_timestamp - start_timestamp;
+
+ ath12k_wifi7_dp_mon_tx_gen_prot_frame(tx_ppdu_info);
+ break;
+ }
+
+ case HAL_TX_FES_STATUS_START_PPDU:
+ case HAL_TX_FES_STATUS_START_PROT: {
+ const struct hal_tx_fes_status_start_prot *tx_fes_stat_start = tlv_data;
+ u64 ppdu_ts;
+
+ info[0] = __le32_to_cpu(tx_fes_stat_start->info0);
+
+ tx_ppdu_info->rx_status.ppdu_ts =
+ u32_get_bits(info[0],
+ HAL_TX_FES_STAT_STRT_INFO0_PROT_TS_LOWER_32);
+ ppdu_ts = (u32_get_bits(info[1],
+ HAL_TX_FES_STAT_STRT_INFO1_PROT_TS_UPPER_32));
+ tx_ppdu_info->rx_status.ppdu_ts |= ppdu_ts << 32;
+ break;
+ }
+
+ case HAL_TX_FES_STATUS_USER_PPDU: {
+ const struct hal_tx_fes_status_user_ppdu *tx_fes_usr_ppdu = tlv_data;
+
+ info[0] = __le32_to_cpu(tx_fes_usr_ppdu->info0);
+
+ tx_ppdu_info->rx_status.rx_duration =
+ u32_get_bits(info[0],
+ HAL_TX_FES_STAT_USR_PPDU_INFO0_DURATION);
+ break;
+ }
+
+ case HAL_MACTX_HE_SIG_A_SU:
+ ath12k_wifi7_dp_mon_parse_he_sig_su(tlv_data,
+ &tx_ppdu_info->rx_status);
+ break;
+
+ case HAL_MACTX_HE_SIG_A_MU_DL:
+ ath12k_wifi7_dp_mon_parse_he_sig_mu(tlv_data, &tx_ppdu_info->rx_status);
+ break;
+
+ case HAL_MACTX_HE_SIG_B1_MU:
+ ath12k_wifi7_dp_mon_parse_he_sig_b1_mu(tlv_data,
+ &tx_ppdu_info->rx_status);
+ break;
+
+ case HAL_MACTX_HE_SIG_B2_MU:
+ ath12k_wifi7_dp_mon_parse_he_sig_b2_mu(tlv_data,
+ &tx_ppdu_info->rx_status);
+ break;
+
+ case HAL_MACTX_HE_SIG_B2_OFDMA:
+ ath12k_wifi7_dp_mon_parse_he_sig_b2_ofdma(tlv_data,
+ &tx_ppdu_info->rx_status);
+ break;
+
+ case HAL_MACTX_VHT_SIG_A:
+ ath12k_wifi7_dp_mon_parse_vht_sig_a(tlv_data, &tx_ppdu_info->rx_status);
+ break;
+
+ case HAL_MACTX_L_SIG_A:
+ ath12k_wifi7_dp_mon_parse_l_sig_a(tlv_data, &tx_ppdu_info->rx_status);
+ break;
+
+ case HAL_MACTX_L_SIG_B:
+ ath12k_wifi7_dp_mon_parse_l_sig_b(tlv_data, &tx_ppdu_info->rx_status);
+ break;
+
+ case HAL_RX_FRAME_BITMAP_ACK: {
+ const struct hal_rx_frame_bitmap_ack *fbm_ack = tlv_data;
+ u32 addr_32;
+ u16 addr_16;
+
+ info[0] = __le32_to_cpu(fbm_ack->info0);
+ info[1] = __le32_to_cpu(fbm_ack->info1);
+
+ addr_32 = u32_get_bits(info[0],
+ HAL_RX_FBM_ACK_INFO0_ADDR1_31_0);
+ addr_16 = u32_get_bits(info[1],
+ HAL_RX_FBM_ACK_INFO1_ADDR1_47_32);
+ ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr1);
+
+ ath12k_wifi7_dp_mon_tx_gen_ack_frame(tx_ppdu_info);
+ break;
+ }
+
+ case HAL_MACTX_PHY_DESC: {
+ const struct hal_tx_phy_desc *tx_phy_desc = tlv_data;
+
+ info[0] = __le32_to_cpu(tx_phy_desc->info0);
+ info[1] = __le32_to_cpu(tx_phy_desc->info1);
+ info[2] = __le32_to_cpu(tx_phy_desc->info2);
+ info[3] = __le32_to_cpu(tx_phy_desc->info3);
+
+ tx_ppdu_info->rx_status.beamformed =
+ u32_get_bits(info[0],
+ HAL_TX_PHY_DESC_INFO0_BF_TYPE);
+ tx_ppdu_info->rx_status.preamble_type =
+ u32_get_bits(info[0],
+ HAL_TX_PHY_DESC_INFO0_PREAMBLE_11B);
+ tx_ppdu_info->rx_status.mcs =
+ u32_get_bits(info[1],
+ HAL_TX_PHY_DESC_INFO1_MCS);
+ tx_ppdu_info->rx_status.ltf_size =
+ u32_get_bits(info[3],
+ HAL_TX_PHY_DESC_INFO3_LTF_SIZE);
+ tx_ppdu_info->rx_status.nss =
+ u32_get_bits(info[2],
+ HAL_TX_PHY_DESC_INFO2_NSS);
+ tx_ppdu_info->rx_status.chan_num =
+ u32_get_bits(info[3],
+ HAL_TX_PHY_DESC_INFO3_ACTIVE_CHANNEL);
+ tx_ppdu_info->rx_status.bw =
+ u32_get_bits(info[0],
+ HAL_TX_PHY_DESC_INFO0_BANDWIDTH);
+ break;
+ }
+
+ case HAL_TX_MPDU_START: {
+ struct dp_mon_mpdu *mon_mpdu = tx_ppdu_info->tx_mon_mpdu;
+
+ mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC);
+ if (!mon_mpdu)
+ return DP_MON_TX_STATUS_PPDU_NOT_DONE;
+ status = DP_MON_TX_MPDU_START;
+ break;
+ }
+
+ case HAL_TX_MPDU_END:
+ list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list,
+ &tx_ppdu_info->dp_tx_mon_mpdu_list);
+ break;
+ }
+
+ return status;
+}
+
+static enum dp_mon_tx_tlv_status
+ath12k_wifi7_dp_mon_tx_status_get_num_user(u16 tlv_tag,
+ struct hal_tlv_hdr *tx_tlv,
+ u8 *num_users)
+{
+ u32 tlv_status = DP_MON_TX_STATUS_PPDU_NOT_DONE;
+ u32 info0;
+
+ switch (tlv_tag) {
+ case HAL_TX_FES_SETUP: {
+ struct hal_tx_fes_setup *tx_fes_setup =
+ (struct hal_tx_fes_setup *)tx_tlv;
+
+ info0 = __le32_to_cpu(tx_fes_setup->info0);
+
+ *num_users = u32_get_bits(info0, HAL_TX_FES_SETUP_INFO0_NUM_OF_USERS);
+ tlv_status = DP_MON_TX_FES_SETUP;
+ break;
+ }
+
+ case HAL_RX_RESPONSE_REQUIRED_INFO: {
+ /* TODO: need to update *num_users */
+ tlv_status = DP_MON_RX_RESPONSE_REQUIRED_INFO;
+ break;
+ }
+ }
+
+ return tlv_status;
+}
+
+static int
+ath12k_wifi7_dp_mon_rx_deliver(struct ath12k_pdev_dp *dp_pdev,
+ struct dp_mon_mpdu *mon_mpdu,
+ struct hal_rx_mon_ppdu_info *ppduinfo,
+ struct napi_struct *napi)
+{
+ struct sk_buff *mon_skb, *skb_next, *header;
+ struct ieee80211_rx_status *rxs = &dp_pdev->rx_status;
+ u8 decap = DP_RX_DECAP_TYPE_RAW;
+
+ mon_skb = ath12k_dp_mon_rx_merg_msdus(dp_pdev, mon_mpdu, ppduinfo, rxs);
+ if (!mon_skb)
+ goto mon_deliver_fail;
+
+ header = mon_skb;
+ rxs->flag = 0;
+
+ if (mon_mpdu->err_bitmap & HAL_RX_MPDU_ERR_FCS)
+ rxs->flag = RX_FLAG_FAILED_FCS_CRC;
+
+ do {
+ skb_next = mon_skb->next;
+ if (!skb_next)
+ rxs->flag &= ~RX_FLAG_AMSDU_MORE;
+ else
+ rxs->flag |= RX_FLAG_AMSDU_MORE;
+
+ if (mon_skb == header) {
+ header = NULL;
+ rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN;
+ } else {
+ rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
+ }
+ rxs->flag |= RX_FLAG_ONLY_MONITOR;
+
+ if (!(rxs->flag & RX_FLAG_ONLY_MONITOR))
+ decap = mon_mpdu->decap_format;
+
+ ath12k_dp_mon_update_radiotap(dp_pdev, ppduinfo, mon_skb, rxs);
+ ath12k_dp_mon_rx_deliver_msdu(dp_pdev, napi, mon_skb, ppduinfo,
+ rxs, decap);
+ mon_skb = skb_next;
+ } while (mon_skb);
+ rxs->flag = 0;
+
+ return 0;
+
+mon_deliver_fail:
+ mon_skb = mon_mpdu->head;
+ while (mon_skb) {
+ skb_next = mon_skb->next;
+ dev_kfree_skb_any(mon_skb);
+ mon_skb = skb_next;
+ }
+ return -EINVAL;
+}
+
+static void
+ath12k_wifi7_dp_mon_tx_process_ppdu_info(struct ath12k_pdev_dp *dp_pdev,
+ struct napi_struct *napi,
+ struct dp_mon_tx_ppdu_info *tx_ppdu_info)
+{
+ struct dp_mon_mpdu *tmp, *mon_mpdu;
+
+ list_for_each_entry_safe(mon_mpdu, tmp,
+ &tx_ppdu_info->dp_tx_mon_mpdu_list, list) {
+ list_del(&mon_mpdu->list);
+
+ if (mon_mpdu->head)
+ ath12k_wifi7_dp_mon_rx_deliver(dp_pdev, mon_mpdu,
+ &tx_ppdu_info->rx_status, napi);
+
+ kfree(mon_mpdu);
+ }
+}
+
+enum hal_rx_mon_status
+ath12k_wifi7_dp_mon_tx_parse_mon_status(struct ath12k_pdev_dp *dp_pdev,
+ struct ath12k_mon_data *pmon,
+ struct sk_buff *skb,
+ struct napi_struct *napi,
+ u32 ppdu_id)
+{
+ struct ath12k_dp *dp = dp_pdev->dp;
+ struct ath12k_base *ab = dp->ab;
+ struct dp_mon_tx_ppdu_info *tx_prot_ppdu_info, *tx_data_ppdu_info;
+ struct hal_tlv_hdr *tlv;
+ u8 *ptr = skb->data;
+ u16 tlv_tag;
+ u16 tlv_len;
+ u32 tlv_userid = 0;
+ u8 num_user;
+ u32 tlv_status = DP_MON_TX_STATUS_PPDU_NOT_DONE;
+
+ tx_prot_ppdu_info =
+ ath12k_wifi7_dp_mon_tx_get_ppdu_info(pmon, ppdu_id,
+ DP_MON_TX_PROT_PPDU_INFO);
+ if (!tx_prot_ppdu_info)
+ return -ENOMEM;
+
+ tlv = (struct hal_tlv_hdr *)ptr;
+ tlv_tag = le32_get_bits(tlv->tl, HAL_TLV_HDR_TAG);
+
+ tlv_status = ath12k_wifi7_dp_mon_tx_status_get_num_user(tlv_tag, tlv,
+ &num_user);
+ if (tlv_status == DP_MON_TX_STATUS_PPDU_NOT_DONE || !num_user)
+ return -EINVAL;
+
+ tx_data_ppdu_info =
+ ath12k_wifi7_dp_mon_tx_get_ppdu_info(pmon, ppdu_id,
+ DP_MON_TX_DATA_PPDU_INFO);
+ if (!tx_data_ppdu_info)
+ return -ENOMEM;
+
+ do {
+ tlv = (struct hal_tlv_hdr *)ptr;
+ tlv_tag = le32_get_bits(tlv->tl, HAL_TLV_HDR_TAG);
+ tlv_len = le32_get_bits(tlv->tl, HAL_TLV_HDR_LEN);
+ tlv_userid = le32_get_bits(tlv->tl, HAL_TLV_USR_ID);
+
+ tlv_status = ath12k_wifi7_dp_mon_tx_parse_status_tlv(ab, pmon,
+ tlv_tag, ptr,
+ tlv_userid);
+ ptr += tlv_len;
+ ptr = PTR_ALIGN(ptr, HAL_TLV_ALIGN);
+ if ((ptr - skb->data) >= DP_TX_MONITOR_BUF_SIZE)
+ break;
+ } while (tlv_status != DP_MON_TX_FES_STATUS_END);
+
+ ath12k_wifi7_dp_mon_tx_process_ppdu_info(dp_pdev, napi, tx_data_ppdu_info);
+ ath12k_wifi7_dp_mon_tx_process_ppdu_info(dp_pdev, napi, tx_prot_ppdu_info);
+
+ return tlv_status;
+}
+
+static void
+ath12k_wifi7_dp_mon_next_link_desc_get(struct ath12k_base *ab,
+ struct hal_rx_msdu_link *msdu_link,
+ dma_addr_t *paddr, u32 *sw_cookie, u8 *rbm,
+ struct ath12k_buffer_addr **pp_buf_addr_info)
+{
+ struct ath12k_buffer_addr *buf_addr_info;
+
+ buf_addr_info = &msdu_link->buf_addr_info;
+
+ ath12k_wifi7_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm);
+
+ *pp_buf_addr_info = buf_addr_info;
+}
+
+static u32
+ath12k_wifi7_dp_rx_mon_mpdu_pop(struct ath12k *ar, int mac_id,
+ void *ring_entry, struct sk_buff **head_msdu,
+ struct sk_buff **tail_msdu,
+ struct list_head *used_list,
+ u32 *npackets, u32 *ppdu_id)
+{
+ struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&ar->dp.mon_data;
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
+ struct ath12k_buffer_addr *p_buf_addr_info, *p_last_buf_addr_info;
+ u32 msdu_ppdu_id = 0, msdu_cnt = 0, total_len = 0, frag_len = 0;
+ u32 rx_buf_size, rx_pkt_offset, sw_cookie;
+ bool is_frag, is_first_msdu, drop_mpdu = false;
+ struct hal_reo_entrance_ring *ent_desc =
+ (struct hal_reo_entrance_ring *)ring_entry;
+ u32 rx_bufs_used = 0, i = 0, desc_bank = 0;
+ struct hal_rx_desc *rx_desc, *tail_rx_desc;
+ struct hal_rx_msdu_link *msdu_link_desc;
+ struct sk_buff *msdu = NULL, *last = NULL;
+ struct ath12k_rx_desc_info *desc_info;
+ struct ath12k_buffer_addr buf_info;
+ struct hal_rx_msdu_list msdu_list;
+ struct ath12k_skb_rxcb *rxcb;
+ u16 num_msdus = 0;
+ dma_addr_t paddr;
+ u8 rbm;
+
+ ath12k_wifi7_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr,
+ &sw_cookie,
+ &p_last_buf_addr_info,
+ &rbm,
+ &msdu_cnt);
+
+ spin_lock_bh(&pmon->mon_lock);
+
+ if (le32_get_bits(ent_desc->info1,
+ HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON) ==
+ HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
+ u8 rxdma_err = le32_get_bits(ent_desc->info1,
+ HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE);
+ if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
+ rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
+ rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
+ drop_mpdu = true;
+ pmon->rx_mon_stats.dest_mpdu_drop++;
+ }
+ }
+
+ is_frag = false;
+ is_first_msdu = true;
+ rx_pkt_offset = sizeof(struct hal_rx_desc);
+
+ do {
+ if (pmon->mon_last_linkdesc_paddr == paddr) {
+ pmon->rx_mon_stats.dup_mon_linkdesc_cnt++;
+ spin_unlock_bh(&pmon->mon_lock);
+ return rx_bufs_used;
+ }
+
+ desc_bank = u32_get_bits(sw_cookie, DP_LINK_DESC_BANK_MASK);
+ msdu_link_desc =
+ dp->link_desc_banks[desc_bank].vaddr +
+ (paddr - dp->link_desc_banks[desc_bank].paddr);
+
+ ath12k_wifi7_hal_rx_msdu_list_get(ar, msdu_link_desc, &msdu_list,
+ &num_msdus);
+ desc_info = ath12k_dp_get_rx_desc(ar->ab->dp,
+ msdu_list.sw_cookie[num_msdus - 1]);
+ tail_rx_desc = (struct hal_rx_desc *)(desc_info->skb)->data;
+
+ for (i = 0; i < num_msdus; i++) {
+ u32 l2_hdr_offset;
+
+ if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+ "i %d last_cookie %d is same\n",
+ i, pmon->mon_last_buf_cookie);
+ drop_mpdu = true;
+ pmon->rx_mon_stats.dup_mon_buf_cnt++;
+ continue;
+ }
+
+ desc_info =
+ ath12k_dp_get_rx_desc(ar->ab->dp, msdu_list.sw_cookie[i]);
+ msdu = desc_info->skb;
+
+ if (!msdu) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+ "msdu_pop: invalid msdu (%d/%d)\n",
+ i + 1, num_msdus);
+ goto next_msdu;
+ }
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ if (rxcb->paddr != msdu_list.paddr[i]) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+ "i %d paddr %lx != %lx\n",
+ i, (unsigned long)rxcb->paddr,
+ (unsigned long)msdu_list.paddr[i]);
+ drop_mpdu = true;
+ continue;
+ }
+ if (!rxcb->unmapped) {
+ dma_unmap_single(ar->ab->dev, rxcb->paddr,
+ msdu->len +
+ skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+ rxcb->unmapped = 1;
+ }
+ if (drop_mpdu) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+ "i %d drop msdu %p *ppdu_id %x\n",
+ i, msdu, *ppdu_id);
+ dev_kfree_skb_any(msdu);
+ msdu = NULL;
+ goto next_msdu;
+ }
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+ l2_hdr_offset = ath12k_dp_rx_h_l3pad(ar->ab, tail_rx_desc);
+ if (is_first_msdu) {
+ if (!ath12k_wifi7_dp_rxdesc_mpdu_valid(ar->ab,
+ rx_desc)) {
+ drop_mpdu = true;
+ dev_kfree_skb_any(msdu);
+ msdu = NULL;
+ pmon->mon_last_linkdesc_paddr = paddr;
+ goto next_msdu;
+ }
+ msdu_ppdu_id =
+ ath12k_dp_rxdesc_get_ppduid(ar->ab, rx_desc);
+
+ if (ath12k_dp_mon_comp_ppduid(msdu_ppdu_id,
+ ppdu_id)) {
+ spin_unlock_bh(&pmon->mon_lock);
+ return rx_bufs_used;
+ }
+ pmon->mon_last_linkdesc_paddr = paddr;
+ is_first_msdu = false;
+ }
+ ath12k_wifi7_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
+ &is_frag, &total_len,
+ &frag_len, &msdu_cnt);
+ rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
+
+ if (ath12k_dp_pkt_set_pktlen(msdu, rx_buf_size)) {
+ dev_kfree_skb_any(msdu);
+ goto next_msdu;
+ }
+
+ if (!(*head_msdu))
+ *head_msdu = msdu;
+ else if (last)
+ last->next = msdu;
+
+ last = msdu;
+next_msdu:
+ pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i];
+ rx_bufs_used++;
+ desc_info->skb = NULL;
+ list_add_tail(&desc_info->list, used_list);
+ }
+
+ ath12k_wifi7_hal_rx_buf_addr_info_set(&buf_info, paddr,
+ sw_cookie, rbm);
+
+ ath12k_wifi7_dp_mon_next_link_desc_get(ab,
+ msdu_link_desc, &paddr,
+ &sw_cookie, &rbm,
+ &p_buf_addr_info);
+
+ ath12k_dp_arch_rx_link_desc_return(ar->ab->dp, &buf_info,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+
+ p_last_buf_addr_info = p_buf_addr_info;
+
+ } while (paddr && msdu_cnt);
+
+ spin_unlock_bh(&pmon->mon_lock);
+
+ if (last)
+ last->next = NULL;
+
+ *tail_msdu = msdu;
+
+ if (msdu_cnt == 0)
+ *npackets = 1;
+
+ return rx_bufs_used;
+}
+
+/* The destination ring processing is stuck if the destination is not
+ * moving while status ring moves 16 PPDU. The destination ring processing
+ * skips this destination ring PPDU as a workaround.
+ */
+#define MON_DEST_RING_STUCK_MAX_CNT 16
+
+static void
+ath12k_wifi7_dp_rx_mon_dest_process(struct ath12k *ar, int mac_id,
+ u32 quota, struct napi_struct *napi)
+{
+ struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&ar->dp.mon_data;
+ struct ath12k_pdev_mon_stats *rx_mon_stats;
+ u32 ppdu_id, rx_bufs_used = 0, ring_id;
+ u32 mpdu_rx_bufs_used, npackets = 0;
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
+ void *ring_entry, *mon_dst_srng;
+ struct dp_mon_mpdu *tmp_mpdu;
+ LIST_HEAD(rx_desc_used_list);
+ struct hal_srng *srng;
+
+ ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id;
+ srng = &ab->hal.srng_list[ring_id];
+
+ mon_dst_srng = &ab->hal.srng_list[ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, mon_dst_srng);
+
+ ppdu_id = pmon->mon_ppdu_info.ppdu_id;
+ rx_mon_stats = &pmon->rx_mon_stats;
+
+ while ((ring_entry = ath12k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
+ struct sk_buff *head_msdu, *tail_msdu;
+
+ head_msdu = NULL;
+ tail_msdu = NULL;
+
+ mpdu_rx_bufs_used = ath12k_wifi7_dp_rx_mon_mpdu_pop(ar, mac_id,
+ ring_entry,
+ &head_msdu,
+ &tail_msdu,
+ &rx_desc_used_list,
+ &npackets,
+ &ppdu_id);
+
+ rx_bufs_used += mpdu_rx_bufs_used;
+
+ if (mpdu_rx_bufs_used) {
+ dp->mon_dest_ring_stuck_cnt = 0;
+ } else {
+ dp->mon_dest_ring_stuck_cnt++;
+ rx_mon_stats->dest_mon_not_reaped++;
+ }
+
+ if (dp->mon_dest_ring_stuck_cnt > MON_DEST_RING_STUCK_MAX_CNT) {
+ rx_mon_stats->dest_mon_stuck++;
+ ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+ "status ring ppdu_id=%d dest ring ppdu_id=%d mon_dest_ring_stuck_cnt=%d dest_mon_not_reaped=%u dest_mon_stuck=%u\n",
+ pmon->mon_ppdu_info.ppdu_id, ppdu_id,
+ dp->mon_dest_ring_stuck_cnt,
+ rx_mon_stats->dest_mon_not_reaped,
+ rx_mon_stats->dest_mon_stuck);
+ spin_lock_bh(&pmon->mon_lock);
+ pmon->mon_ppdu_info.ppdu_id = ppdu_id;
+ spin_unlock_bh(&pmon->mon_lock);
+ continue;
+ }
+
+ if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) {
+ spin_lock_bh(&pmon->mon_lock);
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
+ spin_unlock_bh(&pmon->mon_lock);
+ ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+ "dest_rx: new ppdu_id %x != status ppdu_id %x dest_mon_not_reaped = %u dest_mon_stuck = %u\n",
+ ppdu_id, pmon->mon_ppdu_info.ppdu_id,
+ rx_mon_stats->dest_mon_not_reaped,
+ rx_mon_stats->dest_mon_stuck);
+ break;
+ }
+
+ if (head_msdu && tail_msdu) {
+ tmp_mpdu = kzalloc(sizeof(*tmp_mpdu), GFP_ATOMIC);
+ if (!tmp_mpdu)
+ break;
+
+ tmp_mpdu->head = head_msdu;
+ tmp_mpdu->tail = tail_msdu;
+ tmp_mpdu->err_bitmap = pmon->err_bitmap;
+ tmp_mpdu->decap_format = pmon->decap_format;
+ ath12k_wifi7_dp_mon_rx_deliver(&ar->dp, tmp_mpdu,
+ &pmon->mon_ppdu_info, napi);
+ rx_mon_stats->dest_mpdu_done++;
+ kfree(tmp_mpdu);
+ }
+
+ ring_entry = ath12k_hal_srng_dst_get_next_entry(ar->ab,
+ mon_dst_srng);
+ }
+ ath12k_hal_srng_access_end(ar->ab, mon_dst_srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ if (rx_bufs_used) {
+ rx_mon_stats->dest_ppdu_done++;
+ ath12k_dp_rx_bufs_replenish(ar->ab->dp,
+ &dp->rx_refill_buf_ring,
+ &rx_desc_used_list,
+ rx_bufs_used);
+ }
+}
+
+static enum dp_mon_status_buf_state
+ath12k_wifi7_dp_rx_mon_buf_done(struct ath12k_base *ab, struct hal_srng *srng,
+ struct dp_rxdma_mon_ring *rx_ring)
+{
+ struct ath12k_skb_rxcb *rxcb;
+ struct hal_tlv_64_hdr *tlv;
+ struct sk_buff *skb;
+ void *status_desc;
+ dma_addr_t paddr;
+ u32 cookie;
+ int buf_id;
+ u8 rbm;
+
+ status_desc = ath12k_hal_srng_src_next_peek(ab, srng);
+ if (!status_desc)
+ return DP_MON_STATUS_NO_DMA;
+
+ ath12k_wifi7_hal_rx_buf_addr_info_get(status_desc, &paddr, &cookie, &rbm);
+
+ buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ skb = idr_find(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ if (!skb)
+ return DP_MON_STATUS_NO_DMA;
+
+ rxcb = ATH12K_SKB_RXCB(skb);
+ dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ tlv = (struct hal_tlv_64_hdr *)skb->data;
+ if (le64_get_bits(tlv->tl, HAL_TLV_HDR_TAG) != HAL_RX_STATUS_BUFFER_DONE)
+ return DP_MON_STATUS_NO_DMA;
+
+ return DP_MON_STATUS_REPLINISH;
+}
+
+static enum hal_rx_mon_status
+ath12k_wifi7_dp_mon_parse_rx_dest(struct ath12k_pdev_dp *dp_pdev,
+ struct ath12k_mon_data *pmon,
+ struct sk_buff *skb)
+{
+ struct ath12k *ar = ath12k_pdev_dp_to_ar(dp_pdev);
+ struct hal_tlv_64_hdr *tlv;
+ struct ath12k_skb_rxcb *rxcb;
+ enum hal_rx_mon_status hal_status;
+ u16 tlv_tag, tlv_len;
+ u8 *ptr = skb->data;
+
+ do {
+ tlv = (struct hal_tlv_64_hdr *)ptr;
+ tlv_tag = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_TAG);
+
+ /* The actual length of PPDU_END is the combined length of many PHY
+ * TLVs that follow. Skip the TLV header and
+ * rx_rxpcu_classification_overview that follows the header to get to
+ * next TLV.
+ */
+
+ if (tlv_tag == HAL_RX_PPDU_END)
+ tlv_len = sizeof(struct hal_rx_rxpcu_classification_overview);
+ else
+ tlv_len = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_LEN);
+
+ hal_status = ath12k_wifi7_dp_mon_rx_parse_status_tlv(dp_pdev, pmon,
+ tlv);
+
+ if (ar->monitor_started && ar->ab->hw_params->rxdma1_enable &&
+ ath12k_wifi7_dp_mon_parse_rx_dest_tlv(dp_pdev, pmon, hal_status,
+ tlv->value))
+ return HAL_RX_MON_STATUS_PPDU_DONE;
+
+ ptr += sizeof(*tlv) + tlv_len;
+ ptr = PTR_ALIGN(ptr, HAL_TLV_64_ALIGN);
+
+ if ((ptr - skb->data) > skb->len)
+ break;
+
+ } while ((hal_status == HAL_RX_MON_STATUS_PPDU_NOT_DONE) ||
+ (hal_status == HAL_RX_MON_STATUS_BUF_ADDR) ||
+ (hal_status == HAL_RX_MON_STATUS_MPDU_START) ||
+ (hal_status == HAL_RX_MON_STATUS_MPDU_END) ||
+ (hal_status == HAL_RX_MON_STATUS_MSDU_END));
+
+ rxcb = ATH12K_SKB_RXCB(skb);
+ if (rxcb->is_end_of_ppdu)
+ hal_status = HAL_RX_MON_STATUS_PPDU_DONE;
+
+ return hal_status;
+}
+
+static enum hal_rx_mon_status
+ath12k_wifi7_dp_mon_rx_parse_mon_status(struct ath12k_pdev_dp *dp_pdev,
+ struct ath12k_mon_data *pmon,
+ struct sk_buff *skb,
+ struct napi_struct *napi)
+{
+ struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
+ struct dp_mon_mpdu *tmp;
+ struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
+ enum hal_rx_mon_status hal_status;
+
+ hal_status = ath12k_wifi7_dp_mon_parse_rx_dest(dp_pdev, pmon, skb);
+ if (hal_status != HAL_RX_MON_STATUS_PPDU_DONE)
+ return hal_status;
+
+ list_for_each_entry_safe(mon_mpdu, tmp, &pmon->dp_rx_mon_mpdu_list, list) {
+ list_del(&mon_mpdu->list);
+
+ if (mon_mpdu->head && mon_mpdu->tail)
+ ath12k_wifi7_dp_mon_rx_deliver(dp_pdev, mon_mpdu,
+ ppdu_info, napi);
+
+ kfree(mon_mpdu);
+ }
+
+ return hal_status;
+}
+
+static int
+ath12k_wifi7_dp_rx_reap_mon_status_ring(struct ath12k_base *ab, int mac_id,
+ int *budget, struct sk_buff_head *skb_list)
+{
+ const struct ath12k_hw_hal_params *hal_params;
+ int buf_id, srng_id, num_buffs_reaped = 0;
+ enum dp_mon_status_buf_state reap_status;
+ struct dp_rxdma_mon_ring *rx_ring;
+ struct ath12k_mon_data *pmon;
+ struct ath12k_skb_rxcb *rxcb;
+ struct hal_tlv_64_hdr *tlv;
+ void *rx_mon_status_desc;
+ struct hal_srng *srng;
+ struct ath12k_dp *dp;
+ struct sk_buff *skb;
+ struct ath12k *ar;
+ dma_addr_t paddr;
+ u32 cookie;
+ u8 rbm;
+
+ ar = ab->pdevs[ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id)].ar;
+ dp = ath12k_ab_to_dp(ab);
+ pmon = &ar->dp.mon_data;
+ srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, mac_id);
+ rx_ring = &dp->rx_mon_status_refill_ring[srng_id];
+
+ srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ while (*budget) {
+ *budget -= 1;
+ rx_mon_status_desc = ath12k_hal_srng_src_peek(ab, srng);
+ if (!rx_mon_status_desc) {
+ pmon->buf_state = DP_MON_STATUS_REPLINISH;
+ break;
+ }
+ ath12k_wifi7_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr,
+ &cookie, &rbm);
+ if (paddr) {
+ buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ skb = idr_find(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ if (!skb) {
+ ath12k_warn(ab, "rx monitor status with invalid buf_id %d\n",
+ buf_id);
+ pmon->buf_state = DP_MON_STATUS_REPLINISH;
+ goto move_next;
+ }
+
+ rxcb = ATH12K_SKB_RXCB(skb);
+
+ dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ tlv = (struct hal_tlv_64_hdr *)skb->data;
+ if (le64_get_bits(tlv->tl, HAL_TLV_HDR_TAG) !=
+ HAL_RX_STATUS_BUFFER_DONE) {
+ pmon->buf_state = DP_MON_STATUS_NO_DMA;
+ ath12k_warn(ab,
+ "mon status DONE not set %llx, buf_id %d\n",
+ le64_get_bits(tlv->tl, HAL_TLV_HDR_TAG),
+ buf_id);
+ /* RxDMA status done bit might not be set even
+ * though tp is moved by HW.
+ */
+
+ /* If done status is missing:
+ * 1. As per MAC team's suggestion,
+ * when HP + 1 entry is peeked and if DMA
+ * is not done and if HP + 2 entry's DMA done
+ * is set. skip HP + 1 entry and
+ * start processing in next interrupt.
+ * 2. If HP + 2 entry's DMA done is not set,
+ * poll onto HP + 1 entry DMA done to be set.
+ * Check status for same buffer for next time
+ * dp_rx_mon_status_srng_process
+ */
+ reap_status = ath12k_wifi7_dp_rx_mon_buf_done(ab, srng,
+ rx_ring);
+ if (reap_status == DP_MON_STATUS_NO_DMA)
+ continue;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ dev_kfree_skb_any(skb);
+ pmon->buf_state = DP_MON_STATUS_REPLINISH;
+ goto move_next;
+ }
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ if (ath12k_dp_pkt_set_pktlen(skb, RX_MON_STATUS_BUF_SIZE)) {
+ dev_kfree_skb_any(skb);
+ goto move_next;
+ }
+ __skb_queue_tail(skb_list, skb);
+ } else {
+ pmon->buf_state = DP_MON_STATUS_REPLINISH;
+ }
+move_next:
+ skb = ath12k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
+ &buf_id);
+ hal_params = ab->hal.hal_params;
+
+ if (!skb) {
+ ath12k_warn(ab, "failed to alloc buffer for status ring\n");
+ ath12k_wifi7_hal_rx_buf_addr_info_set(rx_mon_status_desc,
+ 0, 0,
+ hal_params->rx_buf_rbm);
+ num_buffs_reaped++;
+ break;
+ }
+ rxcb = ATH12K_SKB_RXCB(skb);
+
+ cookie = u32_encode_bits(mac_id, DP_RXDMA_BUF_COOKIE_PDEV_ID) |
+ u32_encode_bits(buf_id, DP_RXDMA_BUF_COOKIE_BUF_ID);
+
+ ath12k_wifi7_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr,
+ cookie, hal_params->rx_buf_rbm);
+ ath12k_hal_srng_src_get_next_entry(ab, srng);
+ num_buffs_reaped++;
+ }
+ ath12k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+
+ return num_buffs_reaped;
+}
+
+static int
+__ath12k_wifi7_dp_mon_process_ring(struct ath12k *ar, int mac_id,
+ struct napi_struct *napi, int *budget)
+{
+ struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&ar->dp.mon_data;
+ struct ath12k_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats;
+ struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
+ enum hal_rx_mon_status hal_status;
+ struct sk_buff_head skb_list;
+ int num_buffs_reaped;
+ struct sk_buff *skb;
+
+ __skb_queue_head_init(&skb_list);
+
+ num_buffs_reaped = ath12k_wifi7_dp_rx_reap_mon_status_ring(ar->ab, mac_id,
+ budget, &skb_list);
+ if (!num_buffs_reaped)
+ goto exit;
+
+ while ((skb = __skb_dequeue(&skb_list))) {
+ memset(ppdu_info, 0, sizeof(*ppdu_info));
+ ppdu_info->peer_id = HAL_INVALID_PEERID;
+
+ hal_status = ath12k_wifi7_dp_mon_parse_rx_dest(&ar->dp, pmon, skb);
+
+ if (ar->monitor_started &&
+ pmon->mon_ppdu_status == DP_PPDU_STATUS_START &&
+ hal_status == HAL_TLV_STATUS_PPDU_DONE) {
+ rx_mon_stats->status_ppdu_done++;
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
+ ath12k_wifi7_dp_rx_mon_dest_process(ar, mac_id, *budget, napi);
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
+ }
+
+ dev_kfree_skb_any(skb);
+ }
+
+exit:
+ return num_buffs_reaped;
+}
+
+static int
+ath12k_wifi7_dp_mon_srng_process(struct ath12k_pdev_dp *pdev_dp, int *budget,
+ struct napi_struct *napi)
+{
+ struct ath12k_dp *dp = pdev_dp->dp;
+ struct ath12k_base *ab = dp->ab;
+ struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&pdev_dp->mon_data;
+ struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
+ struct hal_mon_dest_desc *mon_dst_desc;
+ struct sk_buff *skb;
+ struct ath12k_skb_rxcb *rxcb;
+ struct dp_srng *mon_dst_ring;
+ struct hal_srng *srng;
+ struct dp_rxdma_mon_ring *buf_ring;
+ struct ath12k_dp_link_peer *peer;
+ struct sk_buff_head skb_list;
+ u64 cookie;
+ int num_buffs_reaped = 0, srng_id, buf_id;
+ u32 hal_status, end_offset, info0, end_reason;
+ u8 pdev_idx = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, pdev_dp->mac_id);
+
+ __skb_queue_head_init(&skb_list);
+ srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, pdev_idx);
+ mon_dst_ring = &pdev_dp->rxdma_mon_dst_ring[srng_id];
+ buf_ring = &dp->rxdma_mon_buf_ring;
+
+ srng = &ab->hal.srng_list[mon_dst_ring->ring_id];
+ spin_lock_bh(&srng->lock);
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ while (likely(*budget)) {
+ mon_dst_desc = ath12k_hal_srng_dst_peek(ab, srng);
+ if (unlikely(!mon_dst_desc))
+ break;
+
+ /* In case of empty descriptor, the cookie in the ring descriptor
+ * is invalid. Therefore, this entry is skipped, and ring processing
+ * continues.
+ */
+ info0 = le32_to_cpu(mon_dst_desc->info0);
+ if (u32_get_bits(info0, HAL_MON_DEST_INFO0_EMPTY_DESC))
+ goto move_next;
+
+ cookie = le32_to_cpu(mon_dst_desc->cookie);
+ buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
+
+ spin_lock_bh(&buf_ring->idr_lock);
+ skb = idr_remove(&buf_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&buf_ring->idr_lock);
+
+ if (unlikely(!skb)) {
+ ath12k_warn(ab, "monitor destination with invalid buf_id %d\n",
+ buf_id);
+ goto move_next;
+ }
+
+ rxcb = ATH12K_SKB_RXCB(skb);
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ end_reason = u32_get_bits(info0, HAL_MON_DEST_INFO0_END_REASON);
+
+ /* HAL_MON_FLUSH_DETECTED implies that an rx flush received at the end of
+ * rx PPDU and HAL_MON_PPDU_TRUNCATED implies that the PPDU got
+ * truncated due to a system level error. In both the cases, buffer data
+ * can be discarded
+ */
+ if ((end_reason == HAL_MON_FLUSH_DETECTED) ||
+ (end_reason == HAL_MON_PPDU_TRUNCATED)) {
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
+ "Monitor dest descriptor end reason %d", end_reason);
+ dev_kfree_skb_any(skb);
+ goto move_next;
+ }
+
+ /* Calculate the budget when the ring descriptor with the
+ * HAL_MON_END_OF_PPDU to ensure that one PPDU worth of data is always
+ * reaped. This helps to efficiently utilize the NAPI budget.
+ */
+ if (end_reason == HAL_MON_END_OF_PPDU) {
+ *budget -= 1;
+ rxcb->is_end_of_ppdu = true;
+ }
+
+ end_offset = u32_get_bits(info0, HAL_MON_DEST_INFO0_END_OFFSET);
+ if (likely(end_offset <= DP_RX_BUFFER_SIZE)) {
+ skb_put(skb, end_offset);
+ } else {
+ ath12k_warn(ab,
+ "invalid offset on mon stats destination %u\n",
+ end_offset);
+ skb_put(skb, DP_RX_BUFFER_SIZE);
+ }
+
+ __skb_queue_tail(&skb_list, skb);
+
+move_next:
+ ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
+ ath12k_hal_srng_dst_get_next_entry(ab, srng);
+ num_buffs_reaped++;
+ }
+
+ ath12k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+
+ if (!num_buffs_reaped)
+ return 0;
+
+ /* In some cases, one PPDU worth of data can be spread across multiple NAPI
+ * schedules, To avoid losing existing parsed ppdu_info information, skip
+ * the memset of the ppdu_info structure and continue processing it.
+ */
+ if (!ppdu_info->ppdu_continuation)
+ ath12k_wifi7_dp_mon_rx_memset_ppdu_info(ppdu_info);
+
+ while ((skb = __skb_dequeue(&skb_list))) {
+ hal_status = ath12k_wifi7_dp_mon_rx_parse_mon_status(pdev_dp, pmon,
+ skb, napi);
+ if (hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
+ ppdu_info->ppdu_continuation = true;
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ if (ppdu_info->peer_id == HAL_INVALID_PEERID)
+ goto free_skb;
+
+ rcu_read_lock();
+ peer = ath12k_dp_link_peer_find_by_peerid(pdev_dp, ppdu_info->peer_id);
+ if (!peer || !peer->sta) {
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
+ "failed to find the peer with monitor peer_id %d\n",
+ ppdu_info->peer_id);
+ goto next_skb;
+ }
+
+ if (ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_SU) {
+ ath12k_dp_mon_rx_update_peer_su_stats(peer, ppdu_info);
+ } else if ((ppdu_info->fc_valid) &&
+ (ppdu_info->ast_index != HAL_AST_IDX_INVALID)) {
+ ath12k_dp_mon_rx_process_ulofdma(ppdu_info);
+ ath12k_dp_mon_rx_update_peer_mu_stats(ab, ppdu_info);
+ }
+
+next_skb:
+ rcu_read_unlock();
+free_skb:
+ dev_kfree_skb_any(skb);
+ ath12k_wifi7_dp_mon_rx_memset_ppdu_info(ppdu_info);
+ }
+
+ return num_buffs_reaped;
+}
+
+int ath12k_wifi7_dp_mon_process_ring(struct ath12k_dp *dp, int mac_id,
+ struct napi_struct *napi, int budget,
+ enum dp_monitor_mode monitor_mode)
+{
+ u8 pdev_idx = ath12k_hw_mac_id_to_pdev_id(dp->hw_params, mac_id);
+ struct ath12k_pdev_dp *dp_pdev;
+ struct ath12k *ar;
+ int num_buffs_reaped = 0;
+
+ rcu_read_lock();
+
+ dp_pdev = ath12k_dp_to_pdev_dp(dp, pdev_idx);
+ if (!dp_pdev) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ if (dp->hw_params->rxdma1_enable) {
+ if (monitor_mode == ATH12K_DP_RX_MONITOR_MODE)
+ num_buffs_reaped = ath12k_wifi7_dp_mon_srng_process(dp_pdev,
+ &budget,
+ napi);
+ } else {
+ ar = ath12k_pdev_dp_to_ar(dp_pdev);
+
+ if (ar->monitor_started)
+ num_buffs_reaped =
+ __ath12k_wifi7_dp_mon_process_ring(ar, mac_id, napi,
+ &budget);
+ }
+
+ rcu_read_unlock();
+
+ return num_buffs_reaped;
+}
diff --git a/drivers/net/wireless/ath/ath12k/wifi7/dp_mon.h b/drivers/net/wireless/ath/ath12k/wifi7/dp_mon.h
new file mode 100644
index 000000000000..148d1e0b70fe
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wifi7/dp_mon.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef ATH12K_DP_MON_WIFI7_H
+#define ATH12K_DP_MON_WIFI7_H
+
+#include "hw.h"
+
+enum dp_monitor_mode;
+
+int ath12k_wifi7_dp_mon_process_ring(struct ath12k_dp *dp, int mac_id,
+ struct napi_struct *napi, int budget,
+ enum dp_monitor_mode monitor_mode);
+enum hal_rx_mon_status
+ath12k_wifi7_dp_mon_tx_parse_mon_status(struct ath12k_pdev_dp *dp_pdev,
+ struct ath12k_mon_data *pmon,
+ struct sk_buff *skb,
+ struct napi_struct *napi,
+ u32 ppdu_id);
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/wifi7/dp_rx.c b/drivers/net/wireless/ath/ath12k/wifi7/dp_rx.c
new file mode 100644
index 000000000000..7450938adf65
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wifi7/dp_rx.c
@@ -0,0 +1,2246 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include "dp_rx.h"
+#include "../dp_tx.h"
+#include "../peer.h"
+#include "hal_qcn9274.h"
+#include "hal_wcn7850.h"
+#include "hal_qcc2072.h"
+
+static u16 ath12k_wifi7_dp_rx_get_peer_id(struct ath12k_dp *dp,
+ enum ath12k_peer_metadata_version ver,
+ __le32 peer_metadata)
+{
+ switch (ver) {
+ default:
+ ath12k_warn(dp->ab, "Unknown peer metadata version: %d", ver);
+ fallthrough;
+ case ATH12K_PEER_METADATA_V0:
+ return le32_get_bits(peer_metadata,
+ RX_MPDU_DESC_META_DATA_V0_PEER_ID);
+ case ATH12K_PEER_METADATA_V1:
+ return le32_get_bits(peer_metadata,
+ RX_MPDU_DESC_META_DATA_V1_PEER_ID);
+ case ATH12K_PEER_METADATA_V1A:
+ return le32_get_bits(peer_metadata,
+ RX_MPDU_DESC_META_DATA_V1A_PEER_ID);
+ case ATH12K_PEER_METADATA_V1B:
+ return le32_get_bits(peer_metadata,
+ RX_MPDU_DESC_META_DATA_V1B_PEER_ID);
+ }
+}
+
+void ath12k_wifi7_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u16 tid,
+ dma_addr_t paddr)
+{
+ struct ath12k_reo_queue_ref *qref;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
+ bool ml_peer = false;
+
+ if (!ab->hw_params->reoq_lut_support)
+ return;
+
+ if (peer_id & ATH12K_PEER_ML_ID_VALID) {
+ peer_id &= ~ATH12K_PEER_ML_ID_VALID;
+ ml_peer = true;
+ }
+
+ if (ml_peer)
+ qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr +
+ (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+ else
+ qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
+ (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+
+ qref->info0 = u32_encode_bits(lower_32_bits(paddr),
+ BUFFER_ADDR_INFO0_ADDR);
+ qref->info1 = u32_encode_bits(upper_32_bits(paddr),
+ BUFFER_ADDR_INFO1_ADDR) |
+ u32_encode_bits(tid, DP_REO_QREF_NUM);
+
+ ath12k_hal_reo_shared_qaddr_cache_clear(ab);
+}
+
+void ath12k_wifi7_peer_rx_tid_qref_reset(struct ath12k_base *ab,
+ u16 peer_id, u16 tid)
+{
+ struct ath12k_reo_queue_ref *qref;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
+ bool ml_peer = false;
+
+ if (!ab->hw_params->reoq_lut_support)
+ return;
+
+ if (peer_id & ATH12K_PEER_ML_ID_VALID) {
+ peer_id &= ~ATH12K_PEER_ML_ID_VALID;
+ ml_peer = true;
+ }
+
+ if (ml_peer)
+ qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr +
+ (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+ else
+ qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
+ (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+
+ qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR);
+ qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR) |
+ u32_encode_bits(tid, DP_REO_QREF_NUM);
+}
+
+void ath12k_wifi7_dp_rx_peer_tid_delete(struct ath12k_base *ab,
+ struct ath12k_dp_link_peer *peer, u8 tid)
+{
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
+
+ if (!(peer->rx_tid_active_bitmask & (1 << tid)))
+ return;
+
+ ath12k_dp_mark_tid_as_inactive(dp, peer->peer_id, tid);
+ ath12k_dp_rx_process_reo_cmd_update_rx_queue_list(dp);
+}
+
+int ath12k_wifi7_dp_rx_link_desc_return(struct ath12k_dp *dp,
+ struct ath12k_buffer_addr *buf_addr_info,
+ enum hal_wbm_rel_bm_act action)
+{
+ struct ath12k_base *ab = dp->ab;
+ struct hal_wbm_release_ring *desc;
+ struct hal_srng *srng;
+ int ret = 0;
+
+ srng = &dp->hal->srng_list[dp->wbm_desc_rel_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
+ if (!desc) {
+ ret = -ENOBUFS;
+ goto exit;
+ }
+
+ ath12k_wifi7_hal_rx_msdu_link_desc_set(ab, desc, buf_addr_info, action);
+
+exit:
+ ath12k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return ret;
+}
+
+int ath12k_wifi7_dp_reo_cmd_send(struct ath12k_base *ab,
+ struct ath12k_dp_rx_tid_rxq *rx_tid,
+ enum hal_reo_cmd_type type,
+ struct ath12k_hal_reo_cmd *cmd,
+ void (*cb)(struct ath12k_dp *dp, void *ctx,
+ enum hal_reo_cmd_status status))
+{
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
+ struct ath12k_dp_rx_reo_cmd *dp_cmd;
+ struct hal_srng *cmd_ring;
+ int cmd_num;
+
+ cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
+ cmd_num = ath12k_wifi7_hal_reo_cmd_send(ab, cmd_ring, type, cmd);
+
+ /* cmd_num should start from 1, during failure return the error code */
+ if (cmd_num < 0)
+ return cmd_num;
+
+ /* reo cmd ring descriptors has cmd_num starting from 1 */
+ if (cmd_num == 0)
+ return -EINVAL;
+
+ if (!cb)
+ return 0;
+
+ /* Can this be optimized so that we keep the pending command list only
+ * for tid delete command to free up the resource on the command status
+ * indication?
+ */
+ dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC);
+
+ if (!dp_cmd)
+ return -ENOMEM;
+
+ memcpy(&dp_cmd->data, rx_tid, sizeof(*rx_tid));
+ dp_cmd->cmd_num = cmd_num;
+ dp_cmd->handler = cb;
+
+ spin_lock_bh(&dp->reo_cmd_lock);
+ list_add_tail(&dp_cmd->list, &dp->reo_cmd_list);
+ spin_unlock_bh(&dp->reo_cmd_lock);
+
+ return 0;
+}
+
+int ath12k_wifi7_peer_rx_tid_reo_update(struct ath12k_dp *dp,
+ struct ath12k_dp_link_peer *peer,
+ struct ath12k_dp_rx_tid *rx_tid,
+ u32 ba_win_sz, u16 ssn,
+ bool update_ssn)
+{
+ struct ath12k_hal_reo_cmd cmd = {};
+ struct ath12k_base *ab = dp->ab;
+ int ret;
+ struct ath12k_dp_rx_tid_rxq rx_tid_rxq;
+
+ ath12k_dp_init_rx_tid_rxq(&rx_tid_rxq, rx_tid,
+ (peer->rx_tid_active_bitmask & (1 << rx_tid->tid)));
+
+ cmd.addr_lo = lower_32_bits(rx_tid_rxq.qbuf.paddr_aligned);
+ cmd.addr_hi = upper_32_bits(rx_tid_rxq.qbuf.paddr_aligned);
+ cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
+ cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
+ cmd.ba_window_size = ba_win_sz;
+
+ if (update_ssn) {
+ cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
+ cmd.upd2 = u32_encode_bits(ssn, HAL_REO_CMD_UPD2_SSN);
+ }
+
+ ret = ath12k_wifi7_dp_reo_cmd_send(ab, &rx_tid_rxq,
+ HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
+ NULL);
+ if (ret) {
+ ath12k_warn(ab, "failed to update rx tid queue, tid %d (%d)\n",
+ rx_tid_rxq.tid, ret);
+ return ret;
+ }
+
+ rx_tid->ba_win_sz = ba_win_sz;
+
+ return 0;
+}
+
+int ath12k_wifi7_dp_reo_cache_flush(struct ath12k_base *ab,
+ struct ath12k_dp_rx_tid_rxq *rx_tid)
+{
+ struct ath12k_hal_reo_cmd cmd = {};
+ int ret;
+
+ cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
+ cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
+ /* HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS - all pending MPDUs
+ *in the bitmap will be forwarded/flushed to REO output rings
+ */
+ cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS |
+ HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS;
+
+ /* For all QoS TIDs (except NON_QOS), the driver allocates a maximum
+ * window size of 1024. In such cases, the driver can issue a single
+ * 1KB descriptor flush command instead of sending multiple 128-byte
+ * flush commands for each QoS TID, improving efficiency.
+ */
+
+ if (rx_tid->tid != HAL_DESC_REO_NON_QOS_TID)
+ cmd.flag |= HAL_REO_CMD_FLG_FLUSH_QUEUE_1K_DESC;
+
+ ret = ath12k_wifi7_dp_reo_cmd_send(ab, rx_tid,
+ HAL_REO_CMD_FLUSH_CACHE,
+ &cmd, ath12k_dp_reo_cmd_free);
+ return ret;
+}
+
+int ath12k_wifi7_dp_rx_assign_reoq(struct ath12k_base *ab, struct ath12k_dp_peer *dp_peer,
+ struct ath12k_dp_rx_tid *rx_tid,
+ u16 ssn, enum hal_pn_type pn_type)
+{
+ u32 ba_win_sz = rx_tid->ba_win_sz;
+ struct ath12k_reoq_buf *buf;
+ void *vaddr, *vaddr_aligned;
+ dma_addr_t paddr_aligned;
+ u8 tid = rx_tid->tid;
+ u32 hw_desc_sz;
+ int ret;
+
+ buf = &dp_peer->reoq_bufs[tid];
+ if (!buf->vaddr) {
+ /* TODO: Optimize the memory allocation for qos tid based on
+ * the actual BA window size in REO tid update path.
+ */
+ if (tid == HAL_DESC_REO_NON_QOS_TID)
+ hw_desc_sz = ath12k_wifi7_hal_reo_qdesc_size(ba_win_sz, tid);
+ else
+ hw_desc_sz = ath12k_wifi7_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX,
+ tid);
+
+ vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
+ if (!vaddr)
+ return -ENOMEM;
+
+ vaddr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
+
+ ath12k_wifi7_hal_reo_qdesc_setup(vaddr_aligned, tid, ba_win_sz,
+ ssn, pn_type);
+
+ paddr_aligned = dma_map_single(ab->dev, vaddr_aligned, hw_desc_sz,
+ DMA_BIDIRECTIONAL);
+ ret = dma_mapping_error(ab->dev, paddr_aligned);
+ if (ret) {
+ kfree(vaddr);
+ return ret;
+ }
+
+ buf->vaddr = vaddr;
+ buf->paddr_aligned = paddr_aligned;
+ buf->size = hw_desc_sz;
+ }
+
+ rx_tid->qbuf = *buf;
+
+ return 0;
+}
+
+int ath12k_wifi7_dp_rx_tid_delete_handler(struct ath12k_base *ab,
+ struct ath12k_dp_rx_tid_rxq *rx_tid)
+{
+ struct ath12k_hal_reo_cmd cmd = {};
+
+ cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
+ cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
+ cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
+ cmd.upd0 |= HAL_REO_CMD_UPD0_VLD;
+ /* Observed flush cache failure, to avoid that set vld bit during delete */
+ cmd.upd1 |= HAL_REO_CMD_UPD1_VLD;
+
+ return ath12k_wifi7_dp_reo_cmd_send(ab, rx_tid,
+ HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
+ ath12k_dp_rx_tid_del_func);
+}
+
+static void ath12k_wifi7_dp_rx_h_csum_offload(struct sk_buff *msdu,
+ struct hal_rx_desc_data *rx_info)
+{
+ msdu->ip_summed = (rx_info->ip_csum_fail || rx_info->l4_csum_fail) ?
+ CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
+}
+
+static void ath12k_wifi7_dp_rx_h_mpdu(struct ath12k_pdev_dp *dp_pdev,
+ struct sk_buff *msdu,
+ struct hal_rx_desc *rx_desc,
+ struct hal_rx_desc_data *rx_info)
+{
+ struct ath12k_skb_rxcb *rxcb;
+ enum hal_encrypt_type enctype;
+ bool is_decrypted = false;
+ struct ieee80211_hdr *hdr;
+ struct ath12k_dp_peer *peer;
+ struct ieee80211_rx_status *rx_status = rx_info->rx_status;
+ u32 err_bitmap = rx_info->err_bitmap;
+
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
+ "dp_rx_h_mpdu called without rcu lock");
+
+ /* PN for multicast packets will be checked in mac80211 */
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ rxcb->is_mcbc = rx_info->is_mcbc;
+
+ if (rxcb->is_mcbc)
+ rxcb->peer_id = rx_info->peer_id;
+
+ peer = ath12k_dp_peer_find_by_peerid(dp_pdev, rxcb->peer_id);
+ if (peer) {
+ /* resetting mcbc bit because mcbc packets are unicast
+ * packets only for AP as STA sends unicast packets.
+ */
+ rxcb->is_mcbc = rxcb->is_mcbc && !peer->ucast_ra_only;
+
+ if (rxcb->is_mcbc)
+ enctype = peer->sec_type_grp;
+ else
+ enctype = peer->sec_type;
+ } else {
+ enctype = HAL_ENCRYPT_TYPE_OPEN;
+ }
+
+ if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap)
+ is_decrypted = rx_info->is_decrypted;
+
+ /* Clear per-MPDU flags while leaving per-PPDU flags intact */
+ rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
+ RX_FLAG_MMIC_ERROR |
+ RX_FLAG_DECRYPTED |
+ RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED);
+
+ if (err_bitmap & HAL_RX_MPDU_ERR_FCS)
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC)
+ rx_status->flag |= RX_FLAG_MMIC_ERROR;
+
+ if (is_decrypted) {
+ rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
+
+ if (rx_info->is_mcbc)
+ rx_status->flag |= RX_FLAG_MIC_STRIPPED |
+ RX_FLAG_ICV_STRIPPED;
+ else
+ rx_status->flag |= RX_FLAG_IV_STRIPPED |
+ RX_FLAG_PN_VALIDATED;
+ }
+
+ ath12k_wifi7_dp_rx_h_csum_offload(msdu, rx_info);
+ ath12k_dp_rx_h_undecap(dp_pdev, msdu, rx_desc,
+ enctype, is_decrypted, rx_info);
+
+ if (!is_decrypted || rx_info->is_mcbc)
+ return;
+
+ if (rx_info->decap_type != DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
+ hdr = (void *)msdu->data;
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+ }
+}
+
+static int ath12k_wifi7_dp_rx_msdu_coalesce(struct ath12k_hal *hal,
+ struct sk_buff_head *msdu_list,
+ struct sk_buff *first, struct sk_buff *last,
+ u8 l3pad_bytes, int msdu_len,
+ struct hal_rx_desc_data *rx_info)
+{
+ struct sk_buff *skb;
+ struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first);
+ int buf_first_hdr_len, buf_first_len;
+ struct hal_rx_desc *ldesc;
+ int space_extra, rem_len, buf_len;
+ u32 hal_rx_desc_sz = hal->hal_desc_sz;
+ bool is_continuation;
+
+ /* As the msdu is spread across multiple rx buffers,
+ * find the offset to the start of msdu for computing
+ * the length of the msdu in the first buffer.
+ */
+ buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes;
+ buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;
+
+ if (WARN_ON_ONCE(msdu_len <= buf_first_len)) {
+ skb_put(first, buf_first_hdr_len + msdu_len);
+ skb_pull(first, buf_first_hdr_len);
+ return 0;
+ }
+
+ ldesc = (struct hal_rx_desc *)last->data;
+ rxcb->is_first_msdu = rx_info->is_first_msdu;
+ rxcb->is_last_msdu = rx_info->is_last_msdu;
+
+ /* MSDU spans over multiple buffers because the length of the MSDU
+ * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
+ * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
+ */
+ skb_put(first, DP_RX_BUFFER_SIZE);
+ skb_pull(first, buf_first_hdr_len);
+
+ /* When an MSDU spread over multiple buffers MSDU_END
+ * tlvs are valid only in the last buffer. Copy those tlvs.
+ */
+ ath12k_dp_rx_desc_end_tlv_copy(hal, rxcb->rx_desc, ldesc);
+
+ space_extra = msdu_len - (buf_first_len + skb_tailroom(first));
+ if (space_extra > 0 &&
+ (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) {
+ /* Free up all buffers of the MSDU */
+ while ((skb = __skb_dequeue(msdu_list)) != NULL) {
+ rxcb = ATH12K_SKB_RXCB(skb);
+ if (!rxcb->is_continuation) {
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ dev_kfree_skb_any(skb);
+ }
+ return -ENOMEM;
+ }
+
+ rem_len = msdu_len - buf_first_len;
+ while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
+ rxcb = ATH12K_SKB_RXCB(skb);
+ is_continuation = rxcb->is_continuation;
+ if (is_continuation)
+ buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz;
+ else
+ buf_len = rem_len;
+
+ if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) {
+ WARN_ON_ONCE(1);
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+
+ skb_put(skb, buf_len + hal_rx_desc_sz);
+ skb_pull(skb, hal_rx_desc_sz);
+ skb_copy_from_linear_data(skb, skb_put(first, buf_len),
+ buf_len);
+ dev_kfree_skb_any(skb);
+
+ rem_len -= buf_len;
+ if (!is_continuation)
+ break;
+ }
+
+ return 0;
+}
+
+static int ath12k_wifi7_dp_rx_process_msdu(struct ath12k_pdev_dp *dp_pdev,
+ struct sk_buff *msdu,
+ struct sk_buff_head *msdu_list,
+ struct hal_rx_desc_data *rx_info)
+{
+ struct ath12k_dp *dp = dp_pdev->dp;
+ struct hal_rx_desc *rx_desc, *lrx_desc;
+ struct ath12k_skb_rxcb *rxcb;
+ struct sk_buff *last_buf;
+ struct ath12k_hal *hal = dp->hal;
+ u8 l3_pad_bytes;
+ u16 msdu_len;
+ int ret;
+ u32 hal_rx_desc_sz = hal->hal_desc_sz;
+
+ last_buf = ath12k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
+ if (!last_buf) {
+ ath12k_warn(dp->ab,
+ "No valid Rx buffer to access MSDU_END tlv\n");
+ ret = -EIO;
+ goto free_out;
+ }
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+ lrx_desc = (struct hal_rx_desc *)last_buf->data;
+
+ ath12k_dp_extract_rx_desc_data(hal, rx_info, rx_desc, lrx_desc);
+ if (!rx_info->msdu_done) {
+ ath12k_warn(dp->ab, "msdu_done bit in msdu_end is not set\n");
+ ret = -EIO;
+ goto free_out;
+ }
+
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ rxcb->rx_desc = rx_desc;
+ msdu_len = rx_info->msdu_len;
+ l3_pad_bytes = rx_info->l3_pad_bytes;
+
+ if (rxcb->is_frag) {
+ skb_pull(msdu, hal_rx_desc_sz);
+ } else if (!rxcb->is_continuation) {
+ if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
+ ret = -EINVAL;
+ ath12k_warn(dp->ab, "invalid msdu len %u\n", msdu_len);
+ ath12k_dbg_dump(dp->ab, ATH12K_DBG_DATA, NULL, "", rx_desc,
+ sizeof(*rx_desc));
+ goto free_out;
+ }
+ skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len);
+ skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes);
+ } else {
+ ret = ath12k_wifi7_dp_rx_msdu_coalesce(hal, msdu_list,
+ msdu, last_buf,
+ l3_pad_bytes, msdu_len,
+ rx_info);
+ if (ret) {
+ ath12k_warn(dp->ab,
+ "failed to coalesce msdu rx buffer%d\n", ret);
+ goto free_out;
+ }
+ }
+
+ if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(dp, rx_desc, msdu,
+ rx_info))) {
+ ret = -EINVAL;
+ goto free_out;
+ }
+
+ ath12k_dp_rx_h_ppdu(dp_pdev, rx_info);
+ ath12k_wifi7_dp_rx_h_mpdu(dp_pdev, msdu, rx_desc, rx_info);
+
+ rx_info->rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
+
+ return 0;
+
+free_out:
+ return ret;
+}
+
+static void
+ath12k_wifi7_dp_rx_process_received_packets(struct ath12k_dp *dp,
+ struct napi_struct *napi,
+ struct sk_buff_head *msdu_list,
+ int ring_id)
+{
+ struct ath12k_hw_group *ag = dp->ag;
+ struct ath12k_dp_hw_group *dp_hw_grp = &ag->dp_hw_grp;
+ struct ieee80211_rx_status rx_status = {};
+ struct ath12k_skb_rxcb *rxcb;
+ struct sk_buff *msdu;
+ struct ath12k *ar;
+ struct ath12k_pdev_dp *dp_pdev;
+ struct ath12k_hw_link *hw_links = ag->hw_links;
+ struct ath12k_base *partner_ab;
+ struct hal_rx_desc_data rx_info;
+ struct ath12k_dp *partner_dp;
+ u8 hw_link_id, pdev_idx;
+ int ret;
+
+ if (skb_queue_empty(msdu_list))
+ return;
+
+ rx_info.addr2_present = false;
+ rx_info.rx_status = &rx_status;
+
+ rcu_read_lock();
+
+ while ((msdu = __skb_dequeue(msdu_list))) {
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ hw_link_id = rxcb->hw_link_id;
+ partner_dp = ath12k_dp_hw_grp_to_dp(dp_hw_grp,
+ hw_links[hw_link_id].device_id);
+ pdev_idx = ath12k_hw_mac_id_to_pdev_id(partner_dp->hw_params,
+ hw_links[hw_link_id].pdev_idx);
+ partner_ab = partner_dp->ab;
+ ar = partner_ab->pdevs[pdev_idx].ar;
+ if (!rcu_dereference(partner_ab->pdevs_active[pdev_idx])) {
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
+
+ if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) {
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
+
+ dp_pdev = ath12k_dp_to_pdev_dp(partner_dp, pdev_idx);
+ if (!dp_pdev) {
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
+
+ ret = ath12k_wifi7_dp_rx_process_msdu(dp_pdev, msdu, msdu_list, &rx_info);
+ if (ret) {
+ ath12k_dbg(dp->ab, ATH12K_DBG_DATA,
+ "Unable to process msdu %d", ret);
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
+
+ ath12k_dp_rx_deliver_msdu(dp_pdev, napi, msdu, &rx_info);
+ }
+
+ rcu_read_unlock();
+}
+
+int ath12k_wifi7_dp_rx_process(struct ath12k_dp *dp, int ring_id,
+ struct napi_struct *napi, int budget)
+{
+ struct ath12k_hw_group *ag = dp->ag;
+ struct ath12k_base *ab = dp->ab;
+ struct ath12k_hal *hal = dp->hal;
+ struct ath12k_dp_hw_group *dp_hw_grp = &ag->dp_hw_grp;
+ struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES];
+ struct ath12k_hw_link *hw_links = ag->hw_links;
+ int num_buffs_reaped[ATH12K_MAX_DEVICES] = {};
+ struct ath12k_rx_desc_info *desc_info;
+ struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+ struct hal_reo_dest_ring *desc;
+ struct ath12k_dp *partner_dp;
+ struct sk_buff_head msdu_list;
+ struct ath12k_skb_rxcb *rxcb;
+ int total_msdu_reaped = 0;
+ u8 hw_link_id, device_id;
+ struct hal_srng *srng;
+ struct sk_buff *msdu;
+ bool done = false;
+ u64 desc_va;
+
+ __skb_queue_head_init(&msdu_list);
+
+ for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++)
+ INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
+
+ srng = &hal->srng_list[dp->reo_dst_ring[ring_id].ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+try_again:
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ while ((desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
+ struct rx_mpdu_desc *mpdu_info;
+ struct rx_msdu_desc *msdu_info;
+ enum hal_reo_dest_ring_push_reason push_reason;
+ u32 cookie;
+
+ cookie = le32_get_bits(desc->buf_addr_info.info1,
+ BUFFER_ADDR_INFO1_SW_COOKIE);
+
+ hw_link_id = le32_get_bits(desc->info0,
+ HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
+
+ desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 |
+ le32_to_cpu(desc->buf_va_lo));
+ desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va);
+
+ device_id = hw_links[hw_link_id].device_id;
+ partner_dp = ath12k_dp_hw_grp_to_dp(dp_hw_grp, device_id);
+ if (unlikely(!partner_dp)) {
+ if (desc_info->skb) {
+ dev_kfree_skb_any(desc_info->skb);
+ desc_info->skb = NULL;
+ }
+
+ continue;
+ }
+
+ /* retry manual desc retrieval */
+ if (!desc_info) {
+ desc_info = ath12k_dp_get_rx_desc(partner_dp, cookie);
+ if (!desc_info) {
+ ath12k_warn(partner_dp->ab, "Invalid cookie in manual descriptor retrieval: 0x%x\n",
+ cookie);
+ continue;
+ }
+ }
+
+ if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
+ ath12k_warn(ab, "Check HW CC implementation");
+
+ msdu = desc_info->skb;
+ desc_info->skb = NULL;
+
+ list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]);
+
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ dma_unmap_single(partner_dp->dev, rxcb->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+
+ num_buffs_reaped[device_id]++;
+ dp->device_stats.reo_rx[ring_id][dp->device_id]++;
+
+ push_reason = le32_get_bits(desc->info0,
+ HAL_REO_DEST_RING_INFO0_PUSH_REASON);
+ if (push_reason !=
+ HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
+ dev_kfree_skb_any(msdu);
+ dp->device_stats.hal_reo_error[ring_id]++;
+ continue;
+ }
+
+ msdu_info = &desc->rx_msdu_info;
+ mpdu_info = &desc->rx_mpdu_info;
+
+ rxcb->is_first_msdu = !!(le32_to_cpu(msdu_info->info0) &
+ RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
+ rxcb->is_last_msdu = !!(le32_to_cpu(msdu_info->info0) &
+ RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
+ rxcb->is_continuation = !!(le32_to_cpu(msdu_info->info0) &
+ RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
+ rxcb->hw_link_id = hw_link_id;
+ rxcb->peer_id = ath12k_wifi7_dp_rx_get_peer_id(dp, dp->peer_metadata_ver,
+ mpdu_info->peer_meta_data);
+ rxcb->tid = le32_get_bits(mpdu_info->info0,
+ RX_MPDU_DESC_INFO0_TID);
+
+ __skb_queue_tail(&msdu_list, msdu);
+
+ if (!rxcb->is_continuation) {
+ total_msdu_reaped++;
+ done = true;
+ } else {
+ done = false;
+ }
+
+ if (total_msdu_reaped >= budget)
+ break;
+ }
+
+ /* Hw might have updated the head pointer after we cached it.
+ * In this case, even though there are entries in the ring we'll
+ * get rx_desc NULL. Give the read another try with updated cached
+ * head pointer so that we can reap complete MPDU in the current
+ * rx processing.
+ */
+ if (!done && ath12k_hal_srng_dst_num_free(ab, srng, true)) {
+ ath12k_hal_srng_access_end(ab, srng);
+ goto try_again;
+ }
+
+ ath12k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ if (!total_msdu_reaped)
+ goto exit;
+
+ for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) {
+ if (!num_buffs_reaped[device_id])
+ continue;
+
+ partner_dp = ath12k_dp_hw_grp_to_dp(dp_hw_grp, device_id);
+ rx_ring = &partner_dp->rx_refill_buf_ring;
+
+ ath12k_dp_rx_bufs_replenish(partner_dp, rx_ring,
+ &rx_desc_used_list[device_id],
+ num_buffs_reaped[device_id]);
+ }
+
+ ath12k_wifi7_dp_rx_process_received_packets(dp, napi, &msdu_list,
+ ring_id);
+
+exit:
+ return total_msdu_reaped;
+}
+
+static bool
+ath12k_wifi7_dp_rx_h_defrag_validate_incr_pn(struct ath12k_pdev_dp *dp_pdev,
+ struct ath12k_dp_rx_tid *rx_tid,
+ enum hal_encrypt_type encrypt_type)
+{
+ struct ath12k_dp *dp = dp_pdev->dp;
+ struct sk_buff *first_frag, *skb;
+ u64 last_pn;
+ u64 cur_pn;
+
+ first_frag = skb_peek(&rx_tid->rx_frags);
+
+ if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 &&
+ encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 &&
+ encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 &&
+ encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256)
+ return true;
+
+ last_pn = ath12k_dp_rx_h_get_pn(dp, first_frag);
+ skb_queue_walk(&rx_tid->rx_frags, skb) {
+ if (skb == first_frag)
+ continue;
+
+ cur_pn = ath12k_dp_rx_h_get_pn(dp, skb);
+ if (cur_pn != last_pn + 1)
+ return false;
+ last_pn = cur_pn;
+ }
+ return true;
+}
+
+static int ath12k_wifi7_dp_rx_h_defrag_reo_reinject(struct ath12k_dp *dp,
+ struct ath12k_dp_rx_tid *rx_tid,
+ struct sk_buff *defrag_skb)
+{
+ struct ath12k_base *ab = dp->ab;
+ struct ath12k_hal *hal = dp->hal;
+ struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data;
+ struct hal_reo_entrance_ring *reo_ent_ring;
+ struct hal_reo_dest_ring *reo_dest_ring;
+ struct dp_link_desc_bank *link_desc_banks;
+ struct hal_rx_msdu_link *msdu_link;
+ struct hal_rx_msdu_details *msdu0;
+ struct hal_srng *srng;
+ dma_addr_t link_paddr, buf_paddr;
+ u32 desc_bank, msdu_info, msdu_ext_info, mpdu_info;
+ u32 cookie, hal_rx_desc_sz, dest_ring_info0, queue_addr_hi;
+ int ret;
+ struct ath12k_rx_desc_info *desc_info;
+ enum hal_rx_buf_return_buf_manager idle_link_rbm = dp->idle_link_rbm;
+ u8 dst_ind;
+
+ hal_rx_desc_sz = hal->hal_desc_sz;
+ link_desc_banks = dp->link_desc_banks;
+ reo_dest_ring = rx_tid->dst_ring_desc;
+
+ ath12k_wifi7_hal_rx_reo_ent_paddr_get(&reo_dest_ring->buf_addr_info,
+ &link_paddr, &cookie);
+ desc_bank = u32_get_bits(cookie, DP_LINK_DESC_BANK_MASK);
+
+ msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr +
+ (link_paddr - link_desc_banks[desc_bank].paddr));
+ msdu0 = &msdu_link->msdu_link[0];
+ msdu_ext_info = le32_to_cpu(msdu0->rx_msdu_ext_info.info0);
+ dst_ind = u32_get_bits(msdu_ext_info, RX_MSDU_EXT_DESC_INFO0_REO_DEST_IND);
+
+ memset(msdu0, 0, sizeof(*msdu0));
+
+ msdu_info = u32_encode_bits(1, RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU) |
+ u32_encode_bits(1, RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU) |
+ u32_encode_bits(0, RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) |
+ u32_encode_bits(defrag_skb->len - hal_rx_desc_sz,
+ RX_MSDU_DESC_INFO0_MSDU_LENGTH) |
+ u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_SA) |
+ u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_DA);
+ msdu0->rx_msdu_info.info0 = cpu_to_le32(msdu_info);
+ msdu0->rx_msdu_ext_info.info0 = cpu_to_le32(msdu_ext_info);
+
+ /* change msdu len in hal rx desc */
+ ath12k_dp_rxdesc_set_msdu_len(hal, rx_desc, defrag_skb->len - hal_rx_desc_sz);
+
+ buf_paddr = dma_map_single(dp->dev, defrag_skb->data,
+ defrag_skb->len + skb_tailroom(defrag_skb),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dp->dev, buf_paddr))
+ return -ENOMEM;
+
+ spin_lock_bh(&dp->rx_desc_lock);
+ desc_info = list_first_entry_or_null(&dp->rx_desc_free_list,
+ struct ath12k_rx_desc_info,
+ list);
+ if (!desc_info) {
+ spin_unlock_bh(&dp->rx_desc_lock);
+ ath12k_warn(ab, "failed to find rx desc for reinject\n");
+ ret = -ENOMEM;
+ goto err_unmap_dma;
+ }
+
+ desc_info->skb = defrag_skb;
+ desc_info->in_use = true;
+
+ list_del(&desc_info->list);
+ spin_unlock_bh(&dp->rx_desc_lock);
+
+ ATH12K_SKB_RXCB(defrag_skb)->paddr = buf_paddr;
+
+ ath12k_wifi7_hal_rx_buf_addr_info_set(&msdu0->buf_addr_info, buf_paddr,
+ desc_info->cookie,
+ HAL_RX_BUF_RBM_SW3_BM);
+
+ /* Fill mpdu details into reo entrance ring */
+ srng = &hal->srng_list[dp->reo_reinject_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ reo_ent_ring = ath12k_hal_srng_src_get_next_entry(ab, srng);
+ if (!reo_ent_ring) {
+ ath12k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+ ret = -ENOSPC;
+ goto err_free_desc;
+ }
+ memset(reo_ent_ring, 0, sizeof(*reo_ent_ring));
+
+ ath12k_wifi7_hal_rx_buf_addr_info_set(&reo_ent_ring->buf_addr_info, link_paddr,
+ cookie, idle_link_rbm);
+
+ mpdu_info = u32_encode_bits(1, RX_MPDU_DESC_INFO0_MSDU_COUNT) |
+ u32_encode_bits(0, RX_MPDU_DESC_INFO0_FRAG_FLAG) |
+ u32_encode_bits(1, RX_MPDU_DESC_INFO0_RAW_MPDU) |
+ u32_encode_bits(1, RX_MPDU_DESC_INFO0_VALID_PN) |
+ u32_encode_bits(rx_tid->tid, RX_MPDU_DESC_INFO0_TID);
+
+ reo_ent_ring->rx_mpdu_info.info0 = cpu_to_le32(mpdu_info);
+ reo_ent_ring->rx_mpdu_info.peer_meta_data =
+ reo_dest_ring->rx_mpdu_info.peer_meta_data;
+
+ if (dp->hw_params->reoq_lut_support) {
+ reo_ent_ring->queue_addr_lo = reo_dest_ring->rx_mpdu_info.peer_meta_data;
+ queue_addr_hi = 0;
+ } else {
+ reo_ent_ring->queue_addr_lo =
+ cpu_to_le32(lower_32_bits(rx_tid->qbuf.paddr_aligned));
+ queue_addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
+ }
+
+ reo_ent_ring->info0 = le32_encode_bits(queue_addr_hi,
+ HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI) |
+ le32_encode_bits(dst_ind,
+ HAL_REO_ENTR_RING_INFO0_DEST_IND);
+
+ reo_ent_ring->info1 = le32_encode_bits(rx_tid->cur_sn,
+ HAL_REO_ENTR_RING_INFO1_MPDU_SEQ_NUM);
+ dest_ring_info0 = le32_get_bits(reo_dest_ring->info0,
+ HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
+ reo_ent_ring->info2 =
+ cpu_to_le32(u32_get_bits(dest_ring_info0,
+ HAL_REO_ENTR_RING_INFO2_SRC_LINK_ID));
+
+ ath12k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+
+ return 0;
+
+err_free_desc:
+ spin_lock_bh(&dp->rx_desc_lock);
+ desc_info->in_use = false;
+ desc_info->skb = NULL;
+ list_add_tail(&desc_info->list, &dp->rx_desc_free_list);
+ spin_unlock_bh(&dp->rx_desc_lock);
+err_unmap_dma:
+ dma_unmap_single(dp->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb),
+ DMA_TO_DEVICE);
+ return ret;
+}
+
+static int ath12k_wifi7_dp_rx_h_verify_tkip_mic(struct ath12k_pdev_dp *dp_pdev,
+ struct ath12k_dp_peer *peer,
+ enum hal_encrypt_type enctype,
+ struct sk_buff *msdu,
+ struct hal_rx_desc_data *rx_info)
+{
+ struct ath12k_dp *dp = dp_pdev->dp;
+ struct ath12k_hal *hal = dp->hal;
+ struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
+ struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);
+ struct ieee80211_key_conf *key_conf;
+ struct ieee80211_hdr *hdr;
+ u8 mic[IEEE80211_CCMP_MIC_LEN];
+ int head_len, tail_len, ret;
+ size_t data_len;
+ u32 hdr_len, hal_rx_desc_sz = hal->hal_desc_sz;
+ u8 *key, *data;
+ u8 key_idx;
+
+ if (enctype != HAL_ENCRYPT_TYPE_TKIP_MIC)
+ return 0;
+
+ rx_info->addr2_present = false;
+ rx_info->rx_status = rxs;
+
+ hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN;
+ tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN;
+
+ if (!is_multicast_ether_addr(hdr->addr1))
+ key_idx = peer->ucast_keyidx;
+ else
+ key_idx = peer->mcast_keyidx;
+
+ key_conf = peer->keys[key_idx];
+
+ data = msdu->data + head_len;
+ data_len = msdu->len - head_len - tail_len;
+ key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
+
+ ret = ath12k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data,
+ data_len, mic);
+ if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN))
+ goto mic_fail;
+
+ return 0;
+
+mic_fail:
+ (ATH12K_SKB_RXCB(msdu))->is_first_msdu = true;
+ (ATH12K_SKB_RXCB(msdu))->is_last_msdu = true;
+
+ ath12k_dp_extract_rx_desc_data(hal, rx_info, rx_desc, rx_desc);
+
+ rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
+ RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
+ skb_pull(msdu, hal_rx_desc_sz);
+
+ if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(dp, rx_desc, msdu,
+ rx_info)))
+ return -EINVAL;
+
+ ath12k_dp_rx_h_ppdu(dp_pdev, rx_info);
+ ath12k_dp_rx_h_undecap(dp_pdev, msdu, rx_desc,
+ HAL_ENCRYPT_TYPE_TKIP_MIC, true, rx_info);
+ ieee80211_rx(ath12k_pdev_dp_to_hw(dp_pdev), msdu);
+ return -EINVAL;
+}
+
+static int ath12k_wifi7_dp_rx_h_defrag(struct ath12k_pdev_dp *dp_pdev,
+ struct ath12k_dp_peer *peer,
+ struct ath12k_dp_rx_tid *rx_tid,
+ struct sk_buff **defrag_skb,
+ enum hal_encrypt_type enctype,
+ struct hal_rx_desc_data *rx_info)
+{
+ struct ath12k_dp *dp = dp_pdev->dp;
+ struct ath12k_base *ab = dp->ab;
+ struct sk_buff *skb, *first_frag, *last_frag;
+ struct ieee80211_hdr *hdr;
+ bool is_decrypted = false;
+ int msdu_len = 0;
+ int extra_space;
+ u32 flags, hal_rx_desc_sz = ab->hal.hal_desc_sz;
+
+ first_frag = skb_peek(&rx_tid->rx_frags);
+ last_frag = skb_peek_tail(&rx_tid->rx_frags);
+
+ skb_queue_walk(&rx_tid->rx_frags, skb) {
+ flags = 0;
+ hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
+
+ if (enctype != HAL_ENCRYPT_TYPE_OPEN)
+ is_decrypted = rx_info->is_decrypted;
+
+ if (is_decrypted) {
+ if (skb != first_frag)
+ flags |= RX_FLAG_IV_STRIPPED;
+ if (skb != last_frag)
+ flags |= RX_FLAG_ICV_STRIPPED |
+ RX_FLAG_MIC_STRIPPED;
+ }
+
+ /* RX fragments are always raw packets */
+ if (skb != last_frag)
+ skb_trim(skb, skb->len - FCS_LEN);
+ ath12k_dp_rx_h_undecap_frag(dp_pdev, skb, enctype, flags);
+
+ if (skb != first_frag)
+ skb_pull(skb, hal_rx_desc_sz +
+ ieee80211_hdrlen(hdr->frame_control));
+ msdu_len += skb->len;
+ }
+
+ extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag));
+ if (extra_space > 0 &&
+ (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0))
+ return -ENOMEM;
+
+ __skb_unlink(first_frag, &rx_tid->rx_frags);
+ while ((skb = __skb_dequeue(&rx_tid->rx_frags))) {
+ skb_put_data(first_frag, skb->data, skb->len);
+ dev_kfree_skb_any(skb);
+ }
+
+ hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz);
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
+ ATH12K_SKB_RXCB(first_frag)->is_frag = 1;
+
+ if (ath12k_wifi7_dp_rx_h_verify_tkip_mic(dp_pdev, peer, enctype, first_frag,
+ rx_info))
+ first_frag = NULL;
+
+ *defrag_skb = first_frag;
+ return 0;
+}
+
+void ath12k_wifi7_dp_rx_frags_cleanup(struct ath12k_dp_rx_tid *rx_tid,
+ bool rel_link_desc)
+{
+ enum hal_wbm_rel_bm_act act = HAL_WBM_REL_BM_ACT_PUT_IN_IDLE;
+ struct ath12k_buffer_addr *buf_addr_info;
+ struct ath12k_dp *dp = rx_tid->dp;
+
+ lockdep_assert_held(&dp->dp_lock);
+
+ if (rx_tid->dst_ring_desc) {
+ if (rel_link_desc) {
+ buf_addr_info = &rx_tid->dst_ring_desc->buf_addr_info;
+ ath12k_wifi7_dp_rx_link_desc_return(dp, buf_addr_info, act);
+ }
+ kfree(rx_tid->dst_ring_desc);
+ rx_tid->dst_ring_desc = NULL;
+ }
+
+ rx_tid->cur_sn = 0;
+ rx_tid->last_frag_no = 0;
+ rx_tid->rx_frag_bitmap = 0;
+ __skb_queue_purge(&rx_tid->rx_frags);
+}
+
+static int ath12k_wifi7_dp_rx_frag_h_mpdu(struct ath12k_pdev_dp *dp_pdev,
+ struct sk_buff *msdu,
+ struct hal_reo_dest_ring *ring_desc,
+ struct hal_rx_desc_data *rx_info)
+{
+ struct ath12k_dp *dp = dp_pdev->dp;
+ struct ath12k_hal *hal = dp->hal;
+ struct ath12k_base *ab = dp->ab;
+ struct ath12k_dp_peer *peer;
+ struct ath12k_dp_rx_tid *rx_tid;
+ struct sk_buff *defrag_skb = NULL;
+ u32 peer_id = rx_info->peer_id;
+ u16 seqno, frag_no;
+ u8 tid = rx_info->tid;
+ int ret = 0;
+ bool more_frags;
+ enum hal_encrypt_type enctype = rx_info->enctype;
+
+ frag_no = ath12k_dp_rx_h_frag_no(hal, msdu);
+ more_frags = ath12k_dp_rx_h_more_frags(hal, msdu);
+ seqno = rx_info->seq_no;
+
+ if (!rx_info->seq_ctl_valid || !rx_info->fc_valid ||
+ tid > IEEE80211_NUM_TIDS)
+ return -EINVAL;
+
+ /* received unfragmented packet in reo
+ * exception ring, this shouldn't happen
+ * as these packets typically come from
+ * reo2sw srngs.
+ */
+ if (WARN_ON_ONCE(!frag_no && !more_frags))
+ return -EINVAL;
+
+ spin_lock_bh(&dp->dp_lock);
+ peer = ath12k_dp_peer_find_by_peerid(dp_pdev, peer_id);
+ if (!peer) {
+ ath12k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n",
+ peer_id);
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ if (!peer->dp_setup_done) {
+ ath12k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n",
+ peer->addr, peer_id);
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ rx_tid = &peer->rx_tid[tid];
+
+ if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
+ skb_queue_empty(&rx_tid->rx_frags)) {
+ /* Flush stored fragments and start a new sequence */
+ ath12k_wifi7_dp_rx_frags_cleanup(rx_tid, true);
+ rx_tid->cur_sn = seqno;
+ }
+
+ if (rx_tid->rx_frag_bitmap & BIT(frag_no)) {
+ /* Fragment already present */
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if ((!rx_tid->rx_frag_bitmap || frag_no > __fls(rx_tid->rx_frag_bitmap)))
+ __skb_queue_tail(&rx_tid->rx_frags, msdu);
+ else
+ ath12k_dp_rx_h_sort_frags(hal, &rx_tid->rx_frags, msdu);
+
+ rx_tid->rx_frag_bitmap |= BIT(frag_no);
+ if (!more_frags)
+ rx_tid->last_frag_no = frag_no;
+
+ if (frag_no == 0) {
+ rx_tid->dst_ring_desc = kmemdup(ring_desc,
+ sizeof(*rx_tid->dst_ring_desc),
+ GFP_ATOMIC);
+ if (!rx_tid->dst_ring_desc) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+ } else {
+ ath12k_wifi7_dp_rx_link_desc_return(dp, &ring_desc->buf_addr_info,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ }
+
+ if (!rx_tid->last_frag_no ||
+ rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) {
+ mod_timer(&rx_tid->frag_timer, jiffies +
+ ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS);
+ goto out_unlock;
+ }
+
+ spin_unlock_bh(&dp->dp_lock);
+ timer_delete_sync(&rx_tid->frag_timer);
+ spin_lock_bh(&dp->dp_lock);
+
+ peer = ath12k_dp_peer_find_by_peerid(dp_pdev, peer_id);
+ if (!peer)
+ goto err_frags_cleanup;
+
+ if (!ath12k_wifi7_dp_rx_h_defrag_validate_incr_pn(dp_pdev, rx_tid, enctype))
+ goto err_frags_cleanup;
+
+ if (ath12k_wifi7_dp_rx_h_defrag(dp_pdev, peer, rx_tid, &defrag_skb,
+ enctype, rx_info))
+ goto err_frags_cleanup;
+
+ if (!defrag_skb)
+ goto err_frags_cleanup;
+
+ if (ath12k_wifi7_dp_rx_h_defrag_reo_reinject(dp, rx_tid, defrag_skb))
+ goto err_frags_cleanup;
+
+ ath12k_wifi7_dp_rx_frags_cleanup(rx_tid, false);
+ goto out_unlock;
+
+err_frags_cleanup:
+ dev_kfree_skb_any(defrag_skb);
+ ath12k_wifi7_dp_rx_frags_cleanup(rx_tid, true);
+out_unlock:
+ spin_unlock_bh(&dp->dp_lock);
+ return ret;
+}
+
+static int
+ath12k_wifi7_dp_process_rx_err_buf(struct ath12k_pdev_dp *dp_pdev,
+ struct hal_reo_dest_ring *desc,
+ struct list_head *used_list,
+ bool drop, u32 cookie)
+{
+ struct ath12k *ar = ath12k_pdev_dp_to_ar(dp_pdev);
+ struct ath12k_dp *dp = dp_pdev->dp;
+ struct ath12k_hal *hal = dp->hal;
+ struct sk_buff *msdu;
+ struct ath12k_skb_rxcb *rxcb;
+ struct hal_rx_desc_data rx_info;
+ struct hal_rx_desc *rx_desc;
+ u16 msdu_len;
+ u32 hal_rx_desc_sz = hal->hal_desc_sz;
+ struct ath12k_rx_desc_info *desc_info;
+ u64 desc_va;
+
+ desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 |
+ le32_to_cpu(desc->buf_va_lo));
+ desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va);
+
+ /* retry manual desc retrieval */
+ if (!desc_info) {
+ desc_info = ath12k_dp_get_rx_desc(dp, cookie);
+ if (!desc_info) {
+ ath12k_warn(dp->ab,
+ "Invalid cookie in DP rx error descriptor retrieval: 0x%x\n",
+ cookie);
+ return -EINVAL;
+ }
+ }
+
+ if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
+ ath12k_warn(dp->ab, "RX Exception, Check HW CC implementation");
+
+ msdu = desc_info->skb;
+ desc_info->skb = NULL;
+
+ list_add_tail(&desc_info->list, used_list);
+
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ dma_unmap_single(dp->dev, rxcb->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+
+ if (drop) {
+ dev_kfree_skb_any(msdu);
+ return 0;
+ }
+
+ rcu_read_lock();
+ if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {
+ dev_kfree_skb_any(msdu);
+ goto exit;
+ }
+
+ if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) {
+ dev_kfree_skb_any(msdu);
+ goto exit;
+ }
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+ ath12k_dp_extract_rx_desc_data(hal, &rx_info, rx_desc, rx_desc);
+
+ msdu_len = rx_info.msdu_len;
+ if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
+ ath12k_warn(dp->ab, "invalid msdu leng %u", msdu_len);
+ ath12k_dbg_dump(dp->ab, ATH12K_DBG_DATA, NULL, "", rx_desc,
+ sizeof(*rx_desc));
+ dev_kfree_skb_any(msdu);
+ goto exit;
+ }
+
+ skb_put(msdu, hal_rx_desc_sz + msdu_len);
+
+ if (ath12k_wifi7_dp_rx_frag_h_mpdu(dp_pdev, msdu, desc, &rx_info)) {
+ dev_kfree_skb_any(msdu);
+ ath12k_wifi7_dp_rx_link_desc_return(dp, &desc->buf_addr_info,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ }
+exit:
+ rcu_read_unlock();
+ return 0;
+}
+
+static int ath12k_dp_h_msdu_buffer_type(struct ath12k_dp *dp,
+ struct list_head *list,
+ struct hal_reo_dest_ring *desc)
+{
+ struct ath12k_rx_desc_info *desc_info;
+ struct ath12k_skb_rxcb *rxcb;
+ struct sk_buff *msdu;
+ u64 desc_va;
+
+ dp->device_stats.reo_excep_msdu_buf_type++;
+
+ desc_va = (u64)le32_to_cpu(desc->buf_va_hi) << 32 |
+ le32_to_cpu(desc->buf_va_lo);
+ desc_info = (struct ath12k_rx_desc_info *)(uintptr_t)desc_va;
+ if (!desc_info) {
+ u32 cookie;
+
+ cookie = le32_get_bits(desc->buf_addr_info.info1,
+ BUFFER_ADDR_INFO1_SW_COOKIE);
+ desc_info = ath12k_dp_get_rx_desc(dp, cookie);
+ if (!desc_info) {
+ ath12k_warn(dp->ab, "Invalid cookie in manual descriptor retrieval: 0x%x\n",
+ cookie);
+ return -EINVAL;
+ }
+ }
+
+ if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC) {
+ ath12k_warn(dp->ab, "rx exception, magic check failed with value: %u\n",
+ desc_info->magic);
+ return -EINVAL;
+ }
+
+ msdu = desc_info->skb;
+ desc_info->skb = NULL;
+ list_add_tail(&desc_info->list, list);
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ dma_unmap_single(dp->dev, rxcb->paddr, msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(msdu);
+
+ return 0;
+}
+
+int ath12k_wifi7_dp_rx_process_err(struct ath12k_dp *dp, struct napi_struct *napi,
+ int budget)
+{
+ struct ath12k_base *ab = dp->ab;
+ struct ath12k_hal *hal = dp->hal;
+ struct ath12k_hw_group *ag = dp->ag;
+ struct ath12k_dp_hw_group *dp_hw_grp = &ag->dp_hw_grp;
+ struct ath12k_dp *partner_dp;
+ struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES];
+ u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
+ int num_buffs_reaped[ATH12K_MAX_DEVICES] = {};
+ struct dp_link_desc_bank *link_desc_banks;
+ enum hal_rx_buf_return_buf_manager rbm;
+ struct hal_rx_msdu_link *link_desc_va;
+ int tot_n_bufs_reaped, quota, ret, i;
+ struct hal_reo_dest_ring *reo_desc;
+ struct dp_rxdma_ring *rx_ring;
+ struct dp_srng *reo_except;
+ struct ath12k_hw_link *hw_links = ag->hw_links;
+ struct ath12k_pdev_dp *dp_pdev;
+ u8 hw_link_id, device_id;
+ u32 desc_bank, num_msdus;
+ struct hal_srng *srng;
+ dma_addr_t paddr;
+ bool is_frag;
+ bool drop;
+ int pdev_idx;
+ struct list_head *used_list;
+ enum hal_wbm_rel_bm_act act;
+
+ tot_n_bufs_reaped = 0;
+ quota = budget;
+
+ for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++)
+ INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
+
+ reo_except = &dp->reo_except_ring;
+
+ srng = &hal->srng_list[reo_except->ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ while (budget &&
+ (reo_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
+ drop = false;
+ dp->device_stats.err_ring_pkts++;
+
+ hw_link_id = le32_get_bits(reo_desc->info0,
+ HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
+ device_id = hw_links[hw_link_id].device_id;
+ partner_dp = ath12k_dp_hw_grp_to_dp(dp_hw_grp, device_id);
+
+ /* Below case is added to handle data packet from un-associated clients.
+ * As it is expected that AST lookup will fail for
+ * un-associated station's data packets.
+ */
+ if (le32_get_bits(reo_desc->info0, HAL_REO_DEST_RING_INFO0_BUFFER_TYPE) ==
+ HAL_REO_DEST_RING_BUFFER_TYPE_MSDU) {
+ if (!ath12k_dp_h_msdu_buffer_type(partner_dp,
+ &rx_desc_used_list[device_id],
+ reo_desc)) {
+ num_buffs_reaped[device_id]++;
+ tot_n_bufs_reaped++;
+ }
+ goto next_desc;
+ }
+
+ ret = ath12k_wifi7_hal_desc_reo_parse_err(dp, reo_desc, &paddr,
+ &desc_bank);
+ if (ret) {
+ ath12k_warn(ab, "failed to parse error reo desc %d\n",
+ ret);
+ continue;
+ }
+
+ pdev_idx = ath12k_hw_mac_id_to_pdev_id(partner_dp->hw_params,
+ hw_links[hw_link_id].pdev_idx);
+
+ link_desc_banks = partner_dp->link_desc_banks;
+ link_desc_va = link_desc_banks[desc_bank].vaddr +
+ (paddr - link_desc_banks[desc_bank].paddr);
+ ath12k_wifi7_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus,
+ msdu_cookies, &rbm);
+ if (rbm != partner_dp->idle_link_rbm &&
+ rbm != HAL_RX_BUF_RBM_SW3_BM &&
+ rbm != partner_dp->hal->hal_params->rx_buf_rbm) {
+ act = HAL_WBM_REL_BM_ACT_REL_MSDU;
+ dp->device_stats.invalid_rbm++;
+ ath12k_warn(ab, "invalid return buffer manager %d\n", rbm);
+ ath12k_wifi7_dp_rx_link_desc_return(partner_dp,
+ &reo_desc->buf_addr_info,
+ act);
+ continue;
+ }
+
+ is_frag = !!(le32_to_cpu(reo_desc->rx_mpdu_info.info0) &
+ RX_MPDU_DESC_INFO0_FRAG_FLAG);
+
+ /* Process only rx fragments with one msdu per link desc below, and drop
+ * msdu's indicated due to error reasons.
+ * Dynamic fragmentation not supported in Multi-link client, so drop the
+ * partner device buffers.
+ */
+ if (!is_frag || num_msdus > 1 ||
+ partner_dp->device_id != dp->device_id) {
+ drop = true;
+ act = HAL_WBM_REL_BM_ACT_PUT_IN_IDLE;
+
+ /* Return the link desc back to wbm idle list */
+ ath12k_wifi7_dp_rx_link_desc_return(partner_dp,
+ &reo_desc->buf_addr_info,
+ act);
+ }
+
+ rcu_read_lock();
+
+ dp_pdev = ath12k_dp_to_pdev_dp(dp, pdev_idx);
+ if (!dp_pdev) {
+ rcu_read_unlock();
+ continue;
+ }
+
+ for (i = 0; i < num_msdus; i++) {
+ used_list = &rx_desc_used_list[device_id];
+
+ if (!ath12k_wifi7_dp_process_rx_err_buf(dp_pdev, reo_desc,
+ used_list,
+ drop,
+ msdu_cookies[i])) {
+ num_buffs_reaped[device_id]++;
+ tot_n_bufs_reaped++;
+ }
+ }
+
+ rcu_read_unlock();
+
+next_desc:
+ if (tot_n_bufs_reaped >= quota) {
+ tot_n_bufs_reaped = quota;
+ goto exit;
+ }
+
+ budget = quota - tot_n_bufs_reaped;
+ }
+
+exit:
+ ath12k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) {
+ if (!num_buffs_reaped[device_id])
+ continue;
+
+ partner_dp = ath12k_dp_hw_grp_to_dp(dp_hw_grp, device_id);
+ rx_ring = &partner_dp->rx_refill_buf_ring;
+
+ ath12k_dp_rx_bufs_replenish(partner_dp, rx_ring,
+ &rx_desc_used_list[device_id],
+ num_buffs_reaped[device_id]);
+ }
+
+ return tot_n_bufs_reaped;
+}
+
+static void
+ath12k_wifi7_dp_rx_null_q_desc_sg_drop(struct ath12k_dp *dp, int msdu_len,
+ struct sk_buff_head *msdu_list)
+{
+ struct sk_buff *skb, *tmp;
+ struct ath12k_skb_rxcb *rxcb;
+ int n_buffs;
+
+ n_buffs = DIV_ROUND_UP(msdu_len,
+ (DP_RX_BUFFER_SIZE - dp->ab->hal.hal_desc_sz));
+
+ skb_queue_walk_safe(msdu_list, skb, tmp) {
+ rxcb = ATH12K_SKB_RXCB(skb);
+ if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO &&
+ rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) {
+ if (!n_buffs)
+ break;
+ __skb_unlink(skb, msdu_list);
+ dev_kfree_skb_any(skb);
+ n_buffs--;
+ }
+ }
+}
+
+static int ath12k_wifi7_dp_rx_h_null_q_desc(struct ath12k_pdev_dp *dp_pdev,
+ struct sk_buff *msdu,
+ struct hal_rx_desc_data *rx_info,
+ struct sk_buff_head *msdu_list)
+{
+ struct ath12k_dp *dp = dp_pdev->dp;
+ struct ath12k_base *ab = dp->ab;
+ u16 msdu_len = rx_info->msdu_len;
+ struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
+ u8 l3pad_bytes = rx_info->l3_pad_bytes;
+ struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
+ u32 hal_rx_desc_sz = dp->ab->hal.hal_desc_sz;
+
+ if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) {
+ /* First buffer will be freed by the caller, so deduct it's length */
+ msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz);
+ ath12k_wifi7_dp_rx_null_q_desc_sg_drop(dp, msdu_len, msdu_list);
+ return -EINVAL;
+ }
+
+ /* Even after cleaning up the sg buffers in the msdu list with above check
+ * any msdu received with continuation flag needs to be dropped as invalid.
+ * This protects against some random err frame with continuation flag.
+ */
+ if (rxcb->is_continuation)
+ return -EINVAL;
+
+ if (!rx_info->msdu_done) {
+ ath12k_warn(ab,
+ "msdu_done bit not set in null_q_des processing\n");
+ __skb_queue_purge(msdu_list);
+ return -EIO;
+ }
+
+ /* Handle NULL queue descriptor violations arising out a missing
+ * REO queue for a given peer or a given TID. This typically
+ * may happen if a packet is received on a QOS enabled TID before the
+ * ADDBA negotiation for that TID, when the TID queue is setup. Or
+ * it may also happen for MC/BC frames if they are not routed to the
+ * non-QOS TID queue, in the absence of any other default TID queue.
+ * This error can show up both in a REO destination or WBM release ring.
+ */
+
+ if (rxcb->is_frag) {
+ skb_pull(msdu, hal_rx_desc_sz);
+ } else {
+ if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
+ return -EINVAL;
+
+ skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
+ skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
+ }
+ if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(dp, desc, msdu, rx_info)))
+ return -EINVAL;
+
+ ath12k_dp_rx_h_ppdu(dp_pdev, rx_info);
+ ath12k_wifi7_dp_rx_h_mpdu(dp_pdev, msdu, desc, rx_info);
+
+ rxcb->tid = rx_info->tid;
+
+ /* Please note that caller will having the access to msdu and completing
+ * rx with mac80211. Need not worry about cleaning up amsdu_list.
+ */
+
+ return 0;
+}
+
+static bool ath12k_wifi7_dp_rx_h_tkip_mic_err(struct ath12k_pdev_dp *dp_pdev,
+ struct sk_buff *msdu,
+ struct hal_rx_desc_data *rx_info)
+{
+ struct ath12k_dp *dp = dp_pdev->dp;
+ struct ath12k_base *ab = dp->ab;
+ u16 msdu_len = rx_info->msdu_len;
+ struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
+ u8 l3pad_bytes = rx_info->l3_pad_bytes;
+ struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
+ u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
+
+ rxcb->is_first_msdu = rx_info->is_first_msdu;
+ rxcb->is_last_msdu = rx_info->is_last_msdu;
+
+ if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) {
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
+ "invalid msdu len in tkip mic err %u\n", msdu_len);
+ ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", desc,
+ sizeof(*desc));
+ return true;
+ }
+
+ skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
+ skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
+
+ if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(dp, desc, msdu, rx_info)))
+ return true;
+
+ ath12k_dp_rx_h_ppdu(dp_pdev, rx_info);
+
+ rx_info->rx_status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
+ RX_FLAG_DECRYPTED);
+
+ ath12k_dp_rx_h_undecap(dp_pdev, msdu, desc,
+ HAL_ENCRYPT_TYPE_TKIP_MIC, false, rx_info);
+ return false;
+}
+
+static bool ath12k_wifi7_dp_rx_h_rxdma_err(struct ath12k_pdev_dp *dp_pdev,
+ struct sk_buff *msdu,
+ struct hal_rx_desc_data *rx_info)
+{
+ struct ath12k_dp *dp = dp_pdev->dp;
+ struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
+ bool drop = false;
+
+ dp->device_stats.rxdma_error[rxcb->err_code]++;
+
+ switch (rxcb->err_code) {
+ case HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR:
+ case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
+ if (rx_info->err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) {
+ drop = ath12k_wifi7_dp_rx_h_tkip_mic_err(dp_pdev, msdu, rx_info);
+ break;
+ }
+ fallthrough;
+ default:
+ /* TODO: Review other rxdma error code to check if anything is
+ * worth reporting to mac80211
+ */
+ drop = true;
+ break;
+ }
+
+ return drop;
+}
+
+static bool ath12k_wifi7_dp_rx_h_reo_err(struct ath12k_pdev_dp *dp_pdev,
+ struct sk_buff *msdu,
+ struct hal_rx_desc_data *rx_info,
+ struct sk_buff_head *msdu_list)
+{
+ struct ath12k_dp *dp = dp_pdev->dp;
+ struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
+ bool drop = false;
+
+ dp->device_stats.reo_error[rxcb->err_code]++;
+
+ switch (rxcb->err_code) {
+ case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
+ if (ath12k_wifi7_dp_rx_h_null_q_desc(dp_pdev, msdu, rx_info, msdu_list))
+ drop = true;
+ break;
+ case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
+ /* TODO: Do not drop PN failed packets in the driver;
+ * instead, it is good to drop such packets in mac80211
+ * after incrementing the replay counters.
+ */
+ fallthrough;
+ default:
+ /* TODO: Review other errors and process them to mac80211
+ * as appropriate.
+ */
+ drop = true;
+ break;
+ }
+
+ return drop;
+}
+
+static void ath12k_wifi7_dp_rx_wbm_err(struct ath12k_pdev_dp *dp_pdev,
+ struct napi_struct *napi,
+ struct sk_buff *msdu,
+ struct sk_buff_head *msdu_list)
+{
+ struct ath12k_dp *dp = dp_pdev->dp;
+ struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
+ struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
+ struct ieee80211_rx_status rxs = {};
+ struct hal_rx_desc_data rx_info;
+ bool drop = true;
+
+ rx_info.addr2_present = false;
+ rx_info.rx_status = &rxs;
+
+ ath12k_dp_extract_rx_desc_data(dp->hal, &rx_info, rx_desc, rx_desc);
+
+ switch (rxcb->err_rel_src) {
+ case HAL_WBM_REL_SRC_MODULE_REO:
+ drop = ath12k_wifi7_dp_rx_h_reo_err(dp_pdev, msdu, &rx_info, msdu_list);
+ break;
+ case HAL_WBM_REL_SRC_MODULE_RXDMA:
+ drop = ath12k_wifi7_dp_rx_h_rxdma_err(dp_pdev, msdu, &rx_info);
+ break;
+ default:
+ /* msdu will get freed */
+ break;
+ }
+
+ if (drop) {
+ dev_kfree_skb_any(msdu);
+ return;
+ }
+
+ rx_info.rx_status->flag |= RX_FLAG_SKIP_MONITOR;
+
+ ath12k_dp_rx_deliver_msdu(dp_pdev, napi, msdu, &rx_info);
+}
+
+void ath12k_wifi7_dp_setup_pn_check_reo_cmd(struct ath12k_hal_reo_cmd *cmd,
+ struct ath12k_dp_rx_tid *rx_tid,
+ u32 cipher, enum set_key_cmd key_cmd)
+{
+ cmd->flag = HAL_REO_CMD_FLG_NEED_STATUS;
+ cmd->upd0 = HAL_REO_CMD_UPD0_PN |
+ HAL_REO_CMD_UPD0_PN_SIZE |
+ HAL_REO_CMD_UPD0_PN_VALID |
+ HAL_REO_CMD_UPD0_PN_CHECK |
+ HAL_REO_CMD_UPD0_SVLD;
+
+ switch (cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ if (key_cmd == SET_KEY) {
+ cmd->upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;
+ cmd->pn_size = 48;
+ }
+ break;
+ default:
+ break;
+ }
+
+ cmd->addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
+ cmd->addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
+}
+
+int ath12k_wifi7_dp_rx_process_wbm_err(struct ath12k_dp *dp,
+ struct napi_struct *napi, int budget)
+{
+ struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES];
+ struct ath12k_base *ab = dp->ab;
+ struct ath12k_hal *hal = dp->hal;
+ struct ath12k *ar;
+ struct ath12k_pdev_dp *dp_pdev;
+ struct ath12k_hw_group *ag = dp->ag;
+ struct ath12k_dp_hw_group *dp_hw_grp = &ag->dp_hw_grp;
+ struct ath12k_dp *partner_dp;
+ struct dp_rxdma_ring *rx_ring;
+ struct hal_rx_wbm_rel_info err_info;
+ struct hal_srng *srng;
+ struct sk_buff *msdu;
+ struct sk_buff_head msdu_list, scatter_msdu_list;
+ struct ath12k_skb_rxcb *rxcb;
+ void *rx_desc;
+ int num_buffs_reaped[ATH12K_MAX_DEVICES] = {};
+ int total_num_buffs_reaped = 0;
+ struct ath12k_rx_desc_info *desc_info;
+ struct ath12k_device_dp_stats *device_stats = &dp->device_stats;
+ struct ath12k_hw_link *hw_links = ag->hw_links;
+ u8 hw_link_id, device_id;
+ int ret, pdev_idx;
+ struct hal_rx_desc *msdu_data;
+
+ __skb_queue_head_init(&msdu_list);
+ __skb_queue_head_init(&scatter_msdu_list);
+
+ for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++)
+ INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
+
+ srng = &hal->srng_list[dp->rx_rel_ring.ring_id];
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ while (budget) {
+ rx_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng);
+ if (!rx_desc)
+ break;
+
+ ret = ath12k_wifi7_hal_wbm_desc_parse_err(dp, rx_desc,
+ &err_info);
+ if (ret) {
+ ath12k_warn(ab, "failed to parse rx error in wbm_rel ring desc %d\n",
+ ret);
+ continue;
+ }
+
+ desc_info = err_info.rx_desc;
+
+ /* retry manual desc retrieval if hw cc is not done */
+ if (!desc_info) {
+ desc_info = ath12k_dp_get_rx_desc(dp, err_info.cookie);
+ if (!desc_info) {
+ ath12k_warn(ab, "Invalid cookie in DP WBM rx error descriptor retrieval: 0x%x\n",
+ err_info.cookie);
+ continue;
+ }
+ }
+
+ if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
+ ath12k_warn(ab, "WBM RX err, Check HW CC implementation");
+
+ msdu = desc_info->skb;
+ desc_info->skb = NULL;
+
+ device_id = desc_info->device_id;
+ partner_dp = ath12k_dp_hw_grp_to_dp(dp_hw_grp, device_id);
+ if (unlikely(!partner_dp)) {
+ dev_kfree_skb_any(msdu);
+
+ /* In any case continuation bit is set
+ * in the previous record, cleanup scatter_msdu_list
+ */
+ ath12k_dp_clean_up_skb_list(&scatter_msdu_list);
+ continue;
+ }
+
+ list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]);
+
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ dma_unmap_single(partner_dp->dev, rxcb->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+
+ num_buffs_reaped[device_id]++;
+ total_num_buffs_reaped++;
+
+ if (!err_info.continuation)
+ budget--;
+
+ if (err_info.push_reason !=
+ HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
+
+ msdu_data = (struct hal_rx_desc *)msdu->data;
+ rxcb->err_rel_src = err_info.err_rel_src;
+ rxcb->err_code = err_info.err_code;
+ rxcb->is_first_msdu = err_info.first_msdu;
+ rxcb->is_last_msdu = err_info.last_msdu;
+ rxcb->is_continuation = err_info.continuation;
+ rxcb->rx_desc = msdu_data;
+ rxcb->peer_id = ath12k_wifi7_dp_rx_get_peer_id(dp, dp->peer_metadata_ver,
+ err_info.peer_metadata);
+
+ if (err_info.continuation) {
+ __skb_queue_tail(&scatter_msdu_list, msdu);
+ continue;
+ }
+
+ hw_link_id = ath12k_dp_rx_get_msdu_src_link(partner_dp->hal,
+ msdu_data);
+ if (hw_link_id >= ATH12K_GROUP_MAX_RADIO) {
+ dev_kfree_skb_any(msdu);
+
+ /* In any case continuation bit is set
+ * in the previous record, cleanup scatter_msdu_list
+ */
+ ath12k_dp_clean_up_skb_list(&scatter_msdu_list);
+ continue;
+ }
+
+ if (!skb_queue_empty(&scatter_msdu_list)) {
+ struct sk_buff *msdu;
+
+ skb_queue_walk(&scatter_msdu_list, msdu) {
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ rxcb->hw_link_id = hw_link_id;
+ }
+
+ skb_queue_splice_tail_init(&scatter_msdu_list,
+ &msdu_list);
+ }
+
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ rxcb->hw_link_id = hw_link_id;
+ __skb_queue_tail(&msdu_list, msdu);
+ }
+
+ /* In any case continuation bit is set in the
+ * last record, cleanup scatter_msdu_list
+ */
+ ath12k_dp_clean_up_skb_list(&scatter_msdu_list);
+
+ ath12k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ if (!total_num_buffs_reaped)
+ goto done;
+
+ for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) {
+ if (!num_buffs_reaped[device_id])
+ continue;
+
+ partner_dp = ath12k_dp_hw_grp_to_dp(dp_hw_grp, device_id);
+ rx_ring = &partner_dp->rx_refill_buf_ring;
+
+ ath12k_dp_rx_bufs_replenish(dp, rx_ring,
+ &rx_desc_used_list[device_id],
+ num_buffs_reaped[device_id]);
+ }
+
+ rcu_read_lock();
+ while ((msdu = __skb_dequeue(&msdu_list))) {
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ hw_link_id = rxcb->hw_link_id;
+
+ device_id = hw_links[hw_link_id].device_id;
+ partner_dp = ath12k_dp_hw_grp_to_dp(dp_hw_grp, device_id);
+ if (unlikely(!partner_dp)) {
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
+ "Unable to process WBM error msdu due to invalid hw link id %d device id %d\n",
+ hw_link_id, device_id);
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
+
+ pdev_idx = ath12k_hw_mac_id_to_pdev_id(partner_dp->hw_params,
+ hw_links[hw_link_id].pdev_idx);
+
+ dp_pdev = ath12k_dp_to_pdev_dp(partner_dp, pdev_idx);
+ if (!dp_pdev) {
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
+ ar = ath12k_pdev_dp_to_ar(dp_pdev);
+
+ if (!ar || !rcu_dereference(ar->ab->pdevs_active[pdev_idx])) {
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
+
+ if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) {
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
+
+ if (rxcb->err_rel_src < HAL_WBM_REL_SRC_MODULE_MAX) {
+ device_id = dp_pdev->dp->device_id;
+ device_stats->rx_wbm_rel_source[rxcb->err_rel_src][device_id]++;
+ }
+
+ ath12k_wifi7_dp_rx_wbm_err(dp_pdev, napi, msdu, &msdu_list);
+ }
+ rcu_read_unlock();
+done:
+ return total_num_buffs_reaped;
+}
+
+int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
+ struct htt_rx_ring_tlv_filter tlv_filter = {};
+ u32 ring_id;
+ int ret;
+ u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
+
+ ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
+
+ tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING;
+ tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR;
+ tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST |
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST |
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA;
+ tlv_filter.offset_valid = true;
+ tlv_filter.rx_packet_offset = hal_rx_desc_sz;
+
+ tlv_filter.rx_mpdu_start_offset =
+ ath12k_hal_rx_desc_get_mpdu_start_offset_qcn9274();
+ tlv_filter.rx_msdu_end_offset =
+ ath12k_hal_rx_desc_get_msdu_end_offset_qcn9274();
+
+ tlv_filter.rx_mpdu_start_wmask = ath12k_hal_rx_mpdu_start_wmask_get_qcn9274();
+ tlv_filter.rx_msdu_end_wmask = ath12k_hal_rx_msdu_end_wmask_get_qcn9274();
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
+ "Configuring compact tlv masks rx_mpdu_start_wmask 0x%x rx_msdu_end_wmask 0x%x\n",
+ tlv_filter.rx_mpdu_start_wmask, tlv_filter.rx_msdu_end_wmask);
+
+ ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, 0,
+ HAL_RXDMA_BUF,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+
+ return ret;
+}
+
+int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
+ struct htt_rx_ring_tlv_filter tlv_filter = {};
+ u32 ring_id;
+ int ret = 0;
+ u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
+ int i;
+
+ ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
+
+ tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING;
+ tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR;
+ tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST |
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST |
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA;
+ tlv_filter.offset_valid = true;
+ tlv_filter.rx_packet_offset = hal_rx_desc_sz;
+
+ tlv_filter.rx_header_offset = offsetof(struct hal_rx_desc_wcn7850, pkt_hdr_tlv);
+
+ tlv_filter.rx_mpdu_start_offset =
+ ath12k_hal_rx_desc_get_mpdu_start_offset_wcn7850();
+ tlv_filter.rx_msdu_end_offset =
+ ath12k_hal_rx_desc_get_msdu_end_offset_wcn7850();
+
+ /* TODO: Selectively subscribe to required qwords within msdu_end
+ * and mpdu_start and setup the mask in below msg
+ * and modify the rx_desc struct
+ */
+
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
+ ring_id = dp->rx_mac_buf_ring[i].ring_id;
+ ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, i,
+ HAL_RXDMA_BUF,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+ }
+
+ return ret;
+}
+
+int ath12k_dp_rxdma_ring_sel_config_qcc2072(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
+ struct htt_rx_ring_tlv_filter tlv_filter = {};
+ u32 ring_id;
+ int ret = 0;
+ u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
+ int i;
+
+ ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
+
+ tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING;
+ tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR;
+ tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST |
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST |
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA;
+ tlv_filter.offset_valid = true;
+ tlv_filter.rx_packet_offset = hal_rx_desc_sz;
+
+ tlv_filter.rx_header_offset = offsetof(struct hal_rx_desc_qcc2072, pkt_hdr_tlv);
+
+ tlv_filter.rx_mpdu_start_offset =
+ ath12k_hal_rx_desc_get_mpdu_start_offset_qcc2072();
+ tlv_filter.rx_msdu_end_offset =
+ ath12k_hal_rx_desc_get_msdu_end_offset_qcc2072();
+
+ /*
+ * TODO: Selectively subscribe to required qwords within msdu_end
+ * and mpdu_start and setup the mask in below msg
+ * and modify the rx_desc struct
+ */
+
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
+ ring_id = dp->rx_mac_buf_ring[i].ring_id;
+ ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, i,
+ HAL_RXDMA_BUF,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+ }
+
+ return ret;
+}
+
+void ath12k_wifi7_dp_rx_process_reo_status(struct ath12k_dp *dp)
+{
+ struct ath12k_base *ab = dp->ab;
+ struct ath12k_hal *hal = dp->hal;
+ struct hal_srng *srng;
+ struct ath12k_dp_rx_reo_cmd *cmd, *tmp;
+ bool found = false;
+ u16 tag;
+ struct hal_reo_status reo_status;
+ void *hdr, *desc;
+
+ srng = &hal->srng_list[dp->reo_status_ring.ring_id];
+
+ memset(&reo_status, 0, sizeof(reo_status));
+
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ while ((hdr = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
+ tag = hal->ops->reo_status_dec_tlv_hdr(hdr, &desc);
+
+ switch (tag) {
+ case HAL_REO_GET_QUEUE_STATS_STATUS:
+ ath12k_wifi7_hal_reo_status_queue_stats(ab, desc,
+ &reo_status);
+ break;
+ case HAL_REO_FLUSH_QUEUE_STATUS:
+ ath12k_wifi7_hal_reo_flush_queue_status(ab, desc,
+ &reo_status);
+ break;
+ case HAL_REO_FLUSH_CACHE_STATUS:
+ ath12k_wifi7_hal_reo_flush_cache_status(ab, desc,
+ &reo_status);
+ break;
+ case HAL_REO_UNBLOCK_CACHE_STATUS:
+ ath12k_wifi7_hal_reo_unblk_cache_status(ab, desc,
+ &reo_status);
+ break;
+ case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
+ ath12k_wifi7_hal_reo_flush_timeout_list_status(ab, desc,
+ &reo_status);
+ break;
+ case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
+ ath12k_wifi7_hal_reo_desc_thresh_reached_status(ab, desc,
+ &reo_status);
+ break;
+ case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
+ ath12k_wifi7_hal_reo_update_rx_reo_queue_status(ab, desc,
+ &reo_status);
+ break;
+ default:
+ ath12k_warn(ab, "Unknown reo status type %d\n", tag);
+ continue;
+ }
+
+ spin_lock_bh(&dp->reo_cmd_lock);
+ list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
+ if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
+ found = true;
+ list_del(&cmd->list);
+ break;
+ }
+ }
+ spin_unlock_bh(&dp->reo_cmd_lock);
+
+ if (found) {
+ cmd->handler(dp, (void *)&cmd->data,
+ reo_status.uniform_hdr.cmd_status);
+ kfree(cmd);
+ }
+
+ found = false;
+ }
+
+ ath12k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+}
+
+bool
+ath12k_wifi7_dp_rxdesc_mpdu_valid(struct ath12k_base *ab,
+ struct hal_rx_desc *rx_desc)
+{
+ u32 tlv_tag;
+
+ tlv_tag = ab->hal.ops->rx_desc_get_mpdu_start_tag(rx_desc);
+
+ return tlv_tag == HAL_RX_MPDU_START;
+}
diff --git a/drivers/net/wireless/ath/ath12k/wifi7/dp_rx.h b/drivers/net/wireless/ath/ath12k/wifi7/dp_rx.h
new file mode 100644
index 000000000000..8aa79faf567f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wifi7/dp_rx.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+#ifndef ATH12K_DP_RX_WIFI7_H
+#define ATH12K_DP_RX_WIFI7_H
+
+#include "../core.h"
+#include "../dp_rx.h"
+#include "hal_rx_desc.h"
+
+struct ath12k_hal_reo_cmd;
+
+int ath12k_wifi7_dp_rx_process_wbm_err(struct ath12k_dp *dp,
+ struct napi_struct *napi, int budget);
+int ath12k_wifi7_dp_rx_process_err(struct ath12k_dp *dp, struct napi_struct *napi,
+ int budget);
+int ath12k_wifi7_dp_rx_process(struct ath12k_dp *dp, int mac_id,
+ struct napi_struct *napi,
+ int budget);
+void ath12k_wifi7_dp_rx_process_reo_status(struct ath12k_dp *dp);
+int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab);
+int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab);
+int ath12k_dp_rxdma_ring_sel_config_qcc2072(struct ath12k_base *ab);
+void ath12k_wifi7_dp_setup_pn_check_reo_cmd(struct ath12k_hal_reo_cmd *cmd,
+ struct ath12k_dp_rx_tid *rx_tid,
+ u32 cipher, enum set_key_cmd key_cmd);
+int ath12k_wifi7_dp_rx_assign_reoq(struct ath12k_base *ab, struct ath12k_dp_peer *dp_peer,
+ struct ath12k_dp_rx_tid *rx_tid,
+ u16 ssn, enum hal_pn_type pn_type);
+int ath12k_wifi7_dp_rx_link_desc_return(struct ath12k_dp *dp,
+ struct ath12k_buffer_addr *buf_addr_info,
+ enum hal_wbm_rel_bm_act action);
+void ath12k_wifi7_dp_rx_frags_cleanup(struct ath12k_dp_rx_tid *rx_tid,
+ bool rel_link_desc);
+void ath12k_wifi7_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u16 tid,
+ dma_addr_t paddr);
+void ath12k_wifi7_dp_rx_peer_tid_delete(struct ath12k_base *ab,
+ struct ath12k_dp_link_peer *peer, u8 tid);
+int ath12k_wifi7_dp_reo_cmd_send(struct ath12k_base *ab,
+ struct ath12k_dp_rx_tid_rxq *rx_tid,
+ enum hal_reo_cmd_type type,
+ struct ath12k_hal_reo_cmd *cmd,
+ void (*cb)(struct ath12k_dp *dp, void *ctx,
+ enum hal_reo_cmd_status status));
+int ath12k_wifi7_dp_reo_cache_flush(struct ath12k_base *ab,
+ struct ath12k_dp_rx_tid_rxq *rx_tid);
+int ath12k_wifi7_peer_rx_tid_reo_update(struct ath12k_dp *dp,
+ struct ath12k_dp_link_peer *peer,
+ struct ath12k_dp_rx_tid *rx_tid,
+ u32 ba_win_sz, u16 ssn,
+ bool update_ssn);
+void ath12k_wifi7_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u16 tid);
+bool
+ath12k_wifi7_dp_rxdesc_mpdu_valid(struct ath12k_base *ab,
+ struct hal_rx_desc *rx_desc);
+int ath12k_wifi7_dp_rx_tid_delete_handler(struct ath12k_base *ab,
+ struct ath12k_dp_rx_tid_rxq *rx_tid);
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/wifi7/dp_tx.c b/drivers/net/wireless/ath/ath12k/wifi7/dp_tx.c
new file mode 100644
index 000000000000..629084aa36d8
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wifi7/dp_tx.c
@@ -0,0 +1,978 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include "../core.h"
+#include "../debug.h"
+#include "../dp_tx.h"
+#include "../peer.h"
+#include "dp_tx.h"
+#include "hal_desc.h"
+#include "hal.h"
+#include "hal_tx.h"
+
+static void
+ath12k_wifi7_hal_tx_cmd_ext_desc_setup(struct ath12k_base *ab,
+ struct hal_tx_msdu_ext_desc *tcl_ext_cmd,
+ struct hal_tx_info *ti)
+{
+ tcl_ext_cmd->info0 = le32_encode_bits(ti->paddr,
+ HAL_TX_MSDU_EXT_INFO0_BUF_PTR_LO);
+ tcl_ext_cmd->info1 = le32_encode_bits(0x0,
+ HAL_TX_MSDU_EXT_INFO1_BUF_PTR_HI) |
+ le32_encode_bits(ti->data_len,
+ HAL_TX_MSDU_EXT_INFO1_BUF_LEN);
+
+ tcl_ext_cmd->info1 |= le32_encode_bits(1, HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE) |
+ le32_encode_bits(ti->encap_type,
+ HAL_TX_MSDU_EXT_INFO1_ENCAP_TYPE) |
+ le32_encode_bits(ti->encrypt_type,
+ HAL_TX_MSDU_EXT_INFO1_ENCRYPT_TYPE);
+}
+
+#define HTT_META_DATA_ALIGNMENT 0x8
+
+/* Preparing HTT Metadata when utilized with ext MSDU */
+static int ath12k_wifi7_dp_prepare_htt_metadata(struct sk_buff *skb)
+{
+ struct hal_tx_msdu_metadata *desc_ext;
+ u8 htt_desc_size;
+ /* Size rounded of multiple of 8 bytes */
+ u8 htt_desc_size_aligned;
+
+ htt_desc_size = sizeof(struct hal_tx_msdu_metadata);
+ htt_desc_size_aligned = ALIGN(htt_desc_size, HTT_META_DATA_ALIGNMENT);
+
+ desc_ext = ath12k_dp_metadata_align_skb(skb, htt_desc_size_aligned);
+ if (!desc_ext)
+ return -ENOMEM;
+
+ desc_ext->info0 = le32_encode_bits(1, HAL_TX_MSDU_METADATA_INFO0_ENCRYPT_FLAG) |
+ le32_encode_bits(0, HAL_TX_MSDU_METADATA_INFO0_ENCRYPT_TYPE) |
+ le32_encode_bits(1,
+ HAL_TX_MSDU_METADATA_INFO0_HOST_TX_DESC_POOL);
+
+ return 0;
+}
+
+/* TODO: Remove the export once this file is built with wifi7 ko */
+int ath12k_wifi7_dp_tx(struct ath12k_pdev_dp *dp_pdev, struct ath12k_link_vif *arvif,
+ struct sk_buff *skb, bool gsn_valid, int mcbc_gsn,
+ bool is_mcast)
+{
+ struct ath12k_dp *dp = dp_pdev->dp;
+ struct ath12k_hal *hal = dp->hal;
+ struct ath12k_base *ab = dp->ab;
+ struct hal_tx_info ti = {};
+ struct ath12k_tx_desc_info *tx_desc;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
+ struct hal_tcl_data_cmd *hal_tcl_desc;
+ struct hal_tx_msdu_ext_desc *msg;
+ struct sk_buff *skb_ext_desc = NULL;
+ struct hal_srng *tcl_ring;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ath12k_dp_vif *dp_vif = &ahvif->dp_vif;
+ struct ath12k_dp_link_vif *dp_link_vif;
+ struct dp_tx_ring *tx_ring;
+ u8 pool_id;
+ u8 hal_ring_id;
+ int ret;
+ u8 ring_selector, ring_map = 0;
+ bool tcl_ring_retry;
+ bool msdu_ext_desc = false;
+ bool add_htt_metadata = false;
+ u32 iova_mask = dp->hw_params->iova_mask;
+ bool is_diff_encap = false;
+ bool is_null_frame = false;
+
+ if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
+ return -ESHUTDOWN;
+
+ if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
+ !ieee80211_is_data(hdr->frame_control))
+ return -EOPNOTSUPP;
+
+ pool_id = skb_get_queue_mapping(skb) & (ATH12K_HW_MAX_QUEUES - 1);
+
+ /* Let the default ring selection be based on current processor
+ * number, where one of the 3 tcl rings are selected based on
+ * the smp_processor_id(). In case that ring
+ * is full/busy, we resort to other available rings.
+ * If all rings are full, we drop the packet.
+ * TODO: Add throttling logic when all rings are full
+ */
+ ring_selector = dp->hw_params->hw_ops->get_ring_selector(skb);
+
+tcl_ring_sel:
+ tcl_ring_retry = false;
+ ti.ring_id = ring_selector % dp->hw_params->max_tx_ring;
+
+ ring_map |= BIT(ti.ring_id);
+ ti.rbm_id = hal->tcl_to_wbm_rbm_map[ti.ring_id].rbm_id;
+
+ tx_ring = &dp->tx_ring[ti.ring_id];
+
+ tx_desc = ath12k_dp_tx_assign_buffer(dp, pool_id);
+ if (!tx_desc)
+ return -ENOMEM;
+
+ dp_link_vif = ath12k_dp_vif_to_dp_link_vif(&ahvif->dp_vif, arvif->link_id);
+
+ ti.bank_id = dp_link_vif->bank_id;
+ ti.meta_data_flags = dp_link_vif->tcl_metadata;
+
+ if (dp_vif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW &&
+ test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags)) {
+ if (skb_cb->flags & ATH12K_SKB_CIPHER_SET) {
+ ti.encrypt_type =
+ ath12k_dp_tx_get_encrypt_type(skb_cb->cipher);
+
+ if (ieee80211_has_protected(hdr->frame_control))
+ skb_put(skb, IEEE80211_CCMP_MIC_LEN);
+ } else {
+ ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
+ }
+
+ msdu_ext_desc = true;
+ }
+
+ if (gsn_valid) {
+ /* Reset and Initialize meta_data_flags with Global Sequence
+ * Number (GSN) info.
+ */
+ ti.meta_data_flags =
+ u32_encode_bits(HTT_TCL_META_DATA_TYPE_GLOBAL_SEQ_NUM,
+ HTT_TCL_META_DATA_TYPE) |
+ u32_encode_bits(mcbc_gsn, HTT_TCL_META_DATA_GLOBAL_SEQ_NUM);
+ }
+
+ ti.encap_type = ath12k_dp_tx_get_encap_type(ab, skb);
+ ti.addr_search_flags = dp_link_vif->hal_addr_search_flags;
+ ti.search_type = dp_link_vif->search_type;
+ ti.type = HAL_TCL_DESC_TYPE_BUFFER;
+ ti.pkt_offset = 0;
+ ti.lmac_id = dp_link_vif->lmac_id;
+
+ ti.vdev_id = dp_link_vif->vdev_id;
+ if (gsn_valid)
+ ti.vdev_id += HTT_TX_MLO_MCAST_HOST_REINJECT_BASE_VDEV_ID;
+
+ ti.bss_ast_hash = dp_link_vif->ast_hash;
+ ti.bss_ast_idx = dp_link_vif->ast_idx;
+ ti.dscp_tid_tbl_idx = 0;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW) {
+ ti.flags0 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_IP4_CKSUM_EN) |
+ u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_UDP4_CKSUM_EN) |
+ u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_UDP6_CKSUM_EN) |
+ u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TCP4_CKSUM_EN) |
+ u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TCP6_CKSUM_EN);
+ }
+
+ ti.flags1 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO3_TID_OVERWRITE);
+
+ ti.tid = ath12k_dp_tx_get_tid(skb);
+
+ switch (ti.encap_type) {
+ case HAL_TCL_ENCAP_TYPE_NATIVE_WIFI:
+ is_null_frame = ieee80211_is_nullfunc(hdr->frame_control);
+ if (ahvif->vif->offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED) {
+ if (skb->protocol == cpu_to_be16(ETH_P_PAE) || is_null_frame)
+ is_diff_encap = true;
+
+ /* Firmware expects msdu ext descriptor for nwifi/raw packets
+ * received in ETH mode. Without this, observed tx fail for
+ * Multicast packets in ETH mode.
+ */
+ msdu_ext_desc = true;
+ } else {
+ ath12k_dp_tx_encap_nwifi(skb);
+ }
+ break;
+ case HAL_TCL_ENCAP_TYPE_RAW:
+ if (!test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) {
+ ret = -EINVAL;
+ goto fail_remove_tx_buf;
+ }
+ break;
+ case HAL_TCL_ENCAP_TYPE_ETHERNET:
+ /* no need to encap */
+ break;
+ case HAL_TCL_ENCAP_TYPE_802_3:
+ default:
+ /* TODO: Take care of other encap modes as well */
+ ret = -EINVAL;
+ atomic_inc(&dp->device_stats.tx_err.misc_fail);
+ goto fail_remove_tx_buf;
+ }
+
+ if (iova_mask &&
+ (unsigned long)skb->data & iova_mask) {
+ ret = ath12k_dp_tx_align_payload(dp, &skb);
+ if (ret) {
+ ath12k_warn(ab, "failed to align TX buffer %d\n", ret);
+ /* don't bail out, give original buffer
+ * a chance even unaligned.
+ */
+ goto map;
+ }
+
+ /* hdr is pointing to a wrong place after alignment,
+ * so refresh it for later use.
+ */
+ hdr = (void *)skb->data;
+ }
+map:
+ ti.paddr = dma_map_single(dp->dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dp->dev, ti.paddr)) {
+ atomic_inc(&dp->device_stats.tx_err.misc_fail);
+ ath12k_warn(ab, "failed to DMA map data Tx buffer\n");
+ ret = -ENOMEM;
+ goto fail_remove_tx_buf;
+ }
+
+ if ((!test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags) &&
+ !(skb_cb->flags & ATH12K_SKB_HW_80211_ENCAP) &&
+ !(skb_cb->flags & ATH12K_SKB_CIPHER_SET) &&
+ ieee80211_has_protected(hdr->frame_control)) ||
+ is_diff_encap) {
+ /* Firmware is not expecting meta data for qos null
+ * nwifi packet received in ETH encap mode.
+ */
+ if (is_null_frame && msdu_ext_desc)
+ goto skip_htt_meta;
+
+ /* Add metadata for sw encrypted vlan group traffic
+ * and EAPOL nwifi packet received in ETH encap mode.
+ */
+ add_htt_metadata = true;
+ msdu_ext_desc = true;
+ ti.meta_data_flags |= HTT_TCL_META_DATA_VALID_HTT;
+skip_htt_meta:
+ ti.flags0 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TO_FW);
+ ti.encap_type = HAL_TCL_ENCAP_TYPE_RAW;
+ ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
+ }
+
+ tx_desc->skb = skb;
+ tx_desc->mac_id = dp_link_vif->pdev_idx;
+ ti.desc_id = tx_desc->desc_id;
+ ti.data_len = skb->len;
+ skb_cb->paddr = ti.paddr;
+
+ if (msdu_ext_desc) {
+ skb_ext_desc = dev_alloc_skb(sizeof(struct hal_tx_msdu_ext_desc));
+ if (!skb_ext_desc) {
+ ret = -ENOMEM;
+ goto fail_unmap_dma;
+ }
+
+ skb_put(skb_ext_desc, sizeof(struct hal_tx_msdu_ext_desc));
+ memset(skb_ext_desc->data, 0, skb_ext_desc->len);
+
+ msg = (struct hal_tx_msdu_ext_desc *)skb_ext_desc->data;
+ ath12k_wifi7_hal_tx_cmd_ext_desc_setup(ab, msg, &ti);
+
+ if (add_htt_metadata) {
+ ret = ath12k_wifi7_dp_prepare_htt_metadata(skb_ext_desc);
+ if (ret < 0) {
+ ath12k_dbg(ab, ATH12K_DBG_DP_TX,
+ "Failed to add HTT meta data, dropping packet\n");
+ goto fail_free_ext_skb;
+ }
+ }
+
+ ti.paddr = dma_map_single(dp->dev, skb_ext_desc->data,
+ skb_ext_desc->len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(dp->dev, ti.paddr);
+ if (ret)
+ goto fail_free_ext_skb;
+
+ ti.data_len = skb_ext_desc->len;
+ ti.type = HAL_TCL_DESC_TYPE_EXT_DESC;
+
+ skb_cb->paddr_ext_desc = ti.paddr;
+ tx_desc->skb_ext_desc = skb_ext_desc;
+ }
+
+ hal_ring_id = tx_ring->tcl_data_ring.ring_id;
+ tcl_ring = &hal->srng_list[hal_ring_id];
+
+ spin_lock_bh(&tcl_ring->lock);
+
+ ath12k_hal_srng_access_begin(ab, tcl_ring);
+
+ hal_tcl_desc = ath12k_hal_srng_src_get_next_entry(ab, tcl_ring);
+ if (!hal_tcl_desc) {
+ /* NOTE: It is highly unlikely we'll be running out of tcl_ring
+ * desc because the desc is directly enqueued onto hw queue.
+ */
+ ath12k_hal_srng_access_end(ab, tcl_ring);
+ dp->device_stats.tx_err.desc_na[ti.ring_id]++;
+ spin_unlock_bh(&tcl_ring->lock);
+ ret = -ENOMEM;
+
+ /* Checking for available tcl descriptors in another ring in
+ * case of failure due to full tcl ring now, is better than
+ * checking this ring earlier for each pkt tx.
+ * Restart ring selection if some rings are not checked yet.
+ */
+ if (ring_map != (BIT(dp->hw_params->max_tx_ring) - 1) &&
+ dp->hw_params->tcl_ring_retry) {
+ tcl_ring_retry = true;
+ ring_selector++;
+ }
+
+ goto fail_unmap_dma_ext;
+ }
+
+ spin_lock_bh(&arvif->link_stats_lock);
+ arvif->link_stats.tx_encap_type[ti.encap_type]++;
+ arvif->link_stats.tx_encrypt_type[ti.encrypt_type]++;
+ arvif->link_stats.tx_desc_type[ti.type]++;
+
+ if (is_mcast)
+ arvif->link_stats.tx_bcast_mcast++;
+ else
+ arvif->link_stats.tx_enqueued++;
+ spin_unlock_bh(&arvif->link_stats_lock);
+
+ dp->device_stats.tx_enqueued[ti.ring_id]++;
+
+ ath12k_wifi7_hal_tx_cmd_desc_setup(ab, hal_tcl_desc, &ti);
+
+ ath12k_hal_srng_access_end(ab, tcl_ring);
+
+ spin_unlock_bh(&tcl_ring->lock);
+
+ ath12k_dbg_dump(ab, ATH12K_DBG_DP_TX, NULL, "dp tx msdu: ",
+ skb->data, skb->len);
+
+ atomic_inc(&dp_pdev->num_tx_pending);
+
+ return 0;
+
+fail_unmap_dma_ext:
+ if (skb_cb->paddr_ext_desc)
+ dma_unmap_single(dp->dev, skb_cb->paddr_ext_desc,
+ skb_ext_desc->len,
+ DMA_TO_DEVICE);
+fail_free_ext_skb:
+ kfree_skb(skb_ext_desc);
+
+fail_unmap_dma:
+ dma_unmap_single(dp->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
+
+fail_remove_tx_buf:
+ ath12k_dp_tx_release_txbuf(dp, tx_desc, pool_id);
+
+ spin_lock_bh(&arvif->link_stats_lock);
+ arvif->link_stats.tx_dropped++;
+ spin_unlock_bh(&arvif->link_stats_lock);
+
+ if (tcl_ring_retry)
+ goto tcl_ring_sel;
+
+ return ret;
+}
+
+static void
+ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_dp *dp,
+ struct ath12k_tx_desc_params *desc_params,
+ struct dp_tx_ring *tx_ring,
+ struct ath12k_dp_htt_wbm_tx_status *ts,
+ u16 peer_id)
+{
+ struct ath12k_base *ab = dp->ab;
+ struct ieee80211_tx_info *info;
+ struct ath12k_link_vif *arvif;
+ struct ath12k_skb_cb *skb_cb;
+ struct ieee80211_vif *vif;
+ struct ath12k_vif *ahvif;
+ struct sk_buff *msdu = desc_params->skb;
+ s32 noise_floor;
+ struct ieee80211_tx_status status = {};
+ struct ath12k_dp_link_peer *peer;
+ struct ath12k_pdev_dp *dp_pdev;
+ u8 pdev_id;
+
+ skb_cb = ATH12K_SKB_CB(msdu);
+ info = IEEE80211_SKB_CB(msdu);
+
+ pdev_id = ath12k_hw_mac_id_to_pdev_id(dp->hw_params, desc_params->mac_id);
+
+ rcu_read_lock();
+ dp_pdev = ath12k_dp_to_pdev_dp(dp, pdev_id);
+ if (!dp_pdev) {
+ rcu_read_unlock();
+ return;
+ }
+
+ dp->device_stats.tx_completed[tx_ring->tcl_data_ring_id]++;
+
+ if (atomic_dec_and_test(&dp_pdev->num_tx_pending))
+ wake_up(&dp_pdev->tx_empty_waitq);
+
+ dma_unmap_single(dp->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+ if (skb_cb->paddr_ext_desc) {
+ dma_unmap_single(dp->dev, skb_cb->paddr_ext_desc,
+ desc_params->skb_ext_desc->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(desc_params->skb_ext_desc);
+ }
+
+ vif = skb_cb->vif;
+ if (vif) {
+ ahvif = ath12k_vif_to_ahvif(vif);
+ arvif = rcu_dereference(ahvif->link[skb_cb->link_id]);
+ if (arvif) {
+ spin_lock_bh(&arvif->link_stats_lock);
+ arvif->link_stats.tx_completed++;
+ spin_unlock_bh(&arvif->link_stats_lock);
+ }
+ }
+
+ memset(&info->status, 0, sizeof(info->status));
+
+ if (ts->acked) {
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ info->status.ack_signal = ts->ack_rssi;
+
+ if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
+ ab->wmi_ab.svc_map)) {
+ struct ath12k *ar = ath12k_pdev_dp_to_ar(dp_pdev);
+
+ spin_lock_bh(&ar->data_lock);
+ noise_floor = ath12k_pdev_get_noise_floor(ar);
+ spin_unlock_bh(&ar->data_lock);
+
+ info->status.ack_signal += noise_floor;
+ }
+
+ info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
+ } else {
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+ }
+ }
+
+ peer = ath12k_dp_link_peer_find_by_peerid(dp_pdev, peer_id);
+ if (!peer || !peer->sta) {
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
+ "dp_tx: failed to find the peer with peer_id %d\n", peer_id);
+ ieee80211_free_txskb(ath12k_pdev_dp_to_hw(dp_pdev), msdu);
+ goto exit;
+ } else {
+ status.sta = peer->sta;
+ }
+
+ status.info = info;
+ status.skb = msdu;
+ ieee80211_tx_status_ext(ath12k_pdev_dp_to_hw(dp_pdev), &status);
+exit:
+ rcu_read_unlock();
+}
+
+static void
+ath12k_dp_tx_process_htt_tx_complete(struct ath12k_dp *dp, void *desc,
+ struct dp_tx_ring *tx_ring,
+ struct ath12k_tx_desc_params *desc_params)
+{
+ struct htt_tx_wbm_completion *status_desc;
+ struct ath12k_dp_htt_wbm_tx_status ts = {};
+ enum hal_wbm_htt_tx_comp_status wbm_status;
+ u16 peer_id;
+
+ status_desc = desc;
+
+ wbm_status = le32_get_bits(status_desc->info0,
+ HTT_TX_WBM_COMP_INFO0_STATUS);
+ dp->device_stats.fw_tx_status[wbm_status]++;
+
+ switch (wbm_status) {
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK:
+ ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK);
+ ts.ack_rssi = le32_get_bits(status_desc->info2,
+ HTT_TX_WBM_COMP_INFO2_ACK_RSSI);
+
+ peer_id = le32_get_bits(((struct hal_wbm_completion_ring_tx *)desc)->
+ info3, HAL_WBM_COMPL_TX_INFO3_PEER_ID);
+
+ ath12k_dp_tx_htt_tx_complete_buf(dp, desc_params, tx_ring, &ts, peer_id);
+ break;
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_VDEVID_MISMATCH:
+ ath12k_dp_tx_free_txbuf(dp, tx_ring, desc_params);
+ break;
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY:
+ /* This event is to be handled only when the driver decides to
+ * use WDS offload functionality.
+ */
+ break;
+ default:
+ ath12k_warn(dp->ab, "Unknown htt wbm tx status %d\n", wbm_status);
+ break;
+ }
+}
+
+static void ath12k_wifi7_dp_tx_update_txcompl(struct ath12k_pdev_dp *dp_pdev,
+ struct hal_tx_status *ts)
+{
+ struct ath12k_dp *dp = dp_pdev->dp;
+ struct ath12k_dp_link_peer *peer;
+ struct ath12k_link_sta *arsta;
+ struct rate_info txrate = {};
+ struct ieee80211_sta *sta;
+ struct ath12k_sta *ahsta;
+ u16 rate, ru_tones;
+ u8 rate_idx = 0;
+ int ret;
+
+ peer = ath12k_dp_link_peer_find_by_peerid(dp_pdev, ts->peer_id);
+ if (!peer || !peer->sta) {
+ ath12k_dbg(dp->ab, ATH12K_DBG_DP_TX,
+ "failed to find the peer by id %u\n", ts->peer_id);
+ return;
+ }
+
+ spin_lock_bh(&dp->dp_lock);
+
+ sta = peer->sta;
+ ahsta = ath12k_sta_to_ahsta(sta);
+ arsta = &ahsta->deflink;
+
+ spin_unlock_bh(&dp->dp_lock);
+
+ /* This is to prefer choose the real NSS value arsta->last_txrate.nss,
+ * if it is invalid, then choose the NSS value while assoc.
+ */
+ if (peer->last_txrate.nss)
+ txrate.nss = peer->last_txrate.nss;
+ else
+ txrate.nss = arsta->peer_nss;
+
+ switch (ts->pkt_type) {
+ case HAL_TX_RATE_STATS_PKT_TYPE_11A:
+ case HAL_TX_RATE_STATS_PKT_TYPE_11B:
+ ret = ath12k_mac_hw_ratecode_to_legacy_rate(ts->mcs,
+ ts->pkt_type,
+ &rate_idx,
+ &rate);
+ if (ret < 0) {
+ ath12k_warn(dp->ab, "Invalid tx legacy rate %d\n", ret);
+ return;
+ }
+
+ txrate.legacy = rate;
+ break;
+ case HAL_TX_RATE_STATS_PKT_TYPE_11N:
+ if (ts->mcs > ATH12K_HT_MCS_MAX) {
+ ath12k_warn(dp->ab, "Invalid HT mcs index %d\n", ts->mcs);
+ return;
+ }
+
+ if (txrate.nss != 0)
+ txrate.mcs = ts->mcs + 8 * (txrate.nss - 1);
+
+ txrate.flags = RATE_INFO_FLAGS_MCS;
+
+ if (ts->sgi)
+ txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case HAL_TX_RATE_STATS_PKT_TYPE_11AC:
+ if (ts->mcs > ATH12K_VHT_MCS_MAX) {
+ ath12k_warn(dp->ab, "Invalid VHT mcs index %d\n", ts->mcs);
+ return;
+ }
+
+ txrate.mcs = ts->mcs;
+ txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
+
+ if (ts->sgi)
+ txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case HAL_TX_RATE_STATS_PKT_TYPE_11AX:
+ if (ts->mcs > ATH12K_HE_MCS_MAX) {
+ ath12k_warn(dp->ab, "Invalid HE mcs index %d\n", ts->mcs);
+ return;
+ }
+
+ txrate.mcs = ts->mcs;
+ txrate.flags = RATE_INFO_FLAGS_HE_MCS;
+ txrate.he_gi = ath12k_he_gi_to_nl80211_he_gi(ts->sgi);
+ break;
+ case HAL_TX_RATE_STATS_PKT_TYPE_11BE:
+ if (ts->mcs > ATH12K_EHT_MCS_MAX) {
+ ath12k_warn(dp->ab, "Invalid EHT mcs index %d\n", ts->mcs);
+ return;
+ }
+
+ txrate.mcs = ts->mcs;
+ txrate.flags = RATE_INFO_FLAGS_EHT_MCS;
+ txrate.eht_gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(ts->sgi);
+ break;
+ default:
+ ath12k_warn(dp->ab, "Invalid tx pkt type: %d\n", ts->pkt_type);
+ return;
+ }
+
+ txrate.bw = ath12k_mac_bw_to_mac80211_bw(ts->bw);
+
+ if (ts->ofdma && ts->pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) {
+ txrate.bw = RATE_INFO_BW_HE_RU;
+ ru_tones = ath12k_mac_he_convert_tones_to_ru_tones(ts->tones);
+ txrate.he_ru_alloc =
+ ath12k_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones);
+ }
+
+ if (ts->ofdma && ts->pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11BE) {
+ txrate.bw = RATE_INFO_BW_EHT_RU;
+ txrate.eht_ru_alloc =
+ ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(ts->tones);
+ }
+
+ spin_lock_bh(&dp->dp_lock);
+ peer->txrate = txrate;
+ spin_unlock_bh(&dp->dp_lock);
+}
+
+static void ath12k_wifi7_dp_tx_complete_msdu(struct ath12k_pdev_dp *dp_pdev,
+ struct ath12k_tx_desc_params *desc_params,
+ struct hal_tx_status *ts,
+ int ring)
+{
+ struct ath12k_dp *dp = dp_pdev->dp;
+ struct ath12k_base *ab = dp->ab;
+ struct ieee80211_tx_info *info;
+ struct ath12k_link_vif *arvif;
+ struct ath12k_skb_cb *skb_cb;
+ struct ieee80211_vif *vif;
+ struct ath12k_vif *ahvif;
+ struct sk_buff *msdu = desc_params->skb;
+ s32 noise_floor;
+ struct ieee80211_tx_status status = {};
+ struct ieee80211_rate_status status_rate = {};
+ struct ath12k_dp_link_peer *peer;
+ struct rate_info rate;
+
+ if (WARN_ON_ONCE(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) {
+ /* Must not happen */
+ return;
+ }
+
+ skb_cb = ATH12K_SKB_CB(msdu);
+ dp->device_stats.tx_completed[ring]++;
+
+ dma_unmap_single(dp->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+ if (skb_cb->paddr_ext_desc) {
+ dma_unmap_single(dp->dev, skb_cb->paddr_ext_desc,
+ desc_params->skb_ext_desc->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(desc_params->skb_ext_desc);
+ }
+
+ rcu_read_lock();
+
+ if (!rcu_dereference(ab->pdevs_active[dp_pdev->mac_id])) {
+ ieee80211_free_txskb(ath12k_pdev_dp_to_hw(dp_pdev), msdu);
+ goto exit;
+ }
+
+ if (!skb_cb->vif) {
+ ieee80211_free_txskb(ath12k_pdev_dp_to_hw(dp_pdev), msdu);
+ goto exit;
+ }
+
+ vif = skb_cb->vif;
+ if (vif) {
+ ahvif = ath12k_vif_to_ahvif(vif);
+ arvif = rcu_dereference(ahvif->link[skb_cb->link_id]);
+ if (arvif) {
+ spin_lock_bh(&arvif->link_stats_lock);
+ arvif->link_stats.tx_completed++;
+ spin_unlock_bh(&arvif->link_stats_lock);
+ }
+ }
+
+ info = IEEE80211_SKB_CB(msdu);
+ memset(&info->status, 0, sizeof(info->status));
+
+ /* skip tx rate update from ieee80211_status*/
+ info->status.rates[0].idx = -1;
+
+ switch (ts->status) {
+ case HAL_WBM_TQM_REL_REASON_FRAME_ACKED:
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ info->status.ack_signal = ts->ack_rssi;
+
+ if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
+ ab->wmi_ab.svc_map)) {
+ struct ath12k *ar = ath12k_pdev_dp_to_ar(dp_pdev);
+
+ spin_lock_bh(&ar->data_lock);
+ noise_floor = ath12k_pdev_get_noise_floor(ar);
+ spin_unlock_bh(&ar->data_lock);
+
+ info->status.ack_signal += noise_floor;
+ }
+
+ info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
+ }
+ break;
+ case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX:
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+ break;
+ }
+ fallthrough;
+ case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_MPDU:
+ case HAL_WBM_TQM_REL_REASON_DROP_THRESHOLD:
+ case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_AGED_FRAMES:
+ /* The failure status is due to internal firmware tx failure
+ * hence drop the frame; do not update the status of frame to
+ * the upper layer
+ */
+ ieee80211_free_txskb(ath12k_pdev_dp_to_hw(dp_pdev), msdu);
+ goto exit;
+ default:
+ ath12k_dbg(ab, ATH12K_DBG_DP_TX, "tx frame is not acked status %d\n",
+ ts->status);
+ break;
+ }
+
+ /* NOTE: Tx rate status reporting. Tx completion status does not have
+ * necessary information (for example nss) to build the tx rate.
+ * Might end up reporting it out-of-band from HTT stats.
+ */
+
+ ath12k_wifi7_dp_tx_update_txcompl(dp_pdev, ts);
+
+ peer = ath12k_dp_link_peer_find_by_peerid(dp_pdev, ts->peer_id);
+ if (!peer || !peer->sta) {
+ ath12k_err(ab,
+ "dp_tx: failed to find the peer with peer_id %d\n",
+ ts->peer_id);
+ ieee80211_free_txskb(ath12k_pdev_dp_to_hw(dp_pdev), msdu);
+ goto exit;
+ }
+
+ status.sta = peer->sta;
+ status.info = info;
+ status.skb = msdu;
+ rate = peer->last_txrate;
+
+ status_rate.rate_idx = rate;
+ status_rate.try_count = 1;
+
+ status.rates = &status_rate;
+ status.n_rates = 1;
+ ieee80211_tx_status_ext(ath12k_pdev_dp_to_hw(dp_pdev), &status);
+
+exit:
+ rcu_read_unlock();
+}
+
+static void
+ath12k_wifi7_dp_tx_status_parse(struct ath12k_dp *dp,
+ struct hal_wbm_completion_ring_tx *desc,
+ struct hal_tx_status *ts)
+{
+ u32 info0 = le32_to_cpu(desc->rate_stats.info0);
+
+ ts->buf_rel_source =
+ le32_get_bits(desc->info0, HAL_WBM_COMPL_TX_INFO0_REL_SRC_MODULE);
+ if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
+ ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)
+ return;
+
+ if (ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)
+ return;
+
+ ts->status = le32_get_bits(desc->info0,
+ HAL_WBM_COMPL_TX_INFO0_TQM_RELEASE_REASON);
+
+ ts->ppdu_id = le32_get_bits(desc->info1,
+ HAL_WBM_COMPL_TX_INFO1_TQM_STATUS_NUMBER);
+
+ ts->peer_id = le32_get_bits(desc->info3, HAL_WBM_COMPL_TX_INFO3_PEER_ID);
+
+ ts->ack_rssi = le32_get_bits(desc->info2,
+ HAL_WBM_COMPL_TX_INFO2_ACK_FRAME_RSSI);
+
+ if (info0 & HAL_TX_RATE_STATS_INFO0_VALID) {
+ ts->pkt_type = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_PKT_TYPE);
+ ts->mcs = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_MCS);
+ ts->sgi = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_SGI);
+ ts->bw = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_BW);
+ ts->tones = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_TONES_IN_RU);
+ ts->ofdma = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_OFDMA_TX);
+ }
+}
+
+void ath12k_wifi7_dp_tx_completion_handler(struct ath12k_dp *dp, int ring_id)
+{
+ struct ath12k_base *ab = dp->ab;
+ struct ath12k_pdev_dp *dp_pdev;
+ int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
+ struct hal_srng *status_ring = &dp->hal->srng_list[hal_ring_id];
+ struct ath12k_tx_desc_info *tx_desc = NULL;
+ struct hal_tx_status ts = {};
+ struct ath12k_tx_desc_params desc_params;
+ struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
+ struct hal_wbm_release_ring *desc;
+ u8 pdev_idx;
+ u64 desc_va;
+ enum hal_wbm_rel_src_module buf_rel_source;
+ enum hal_wbm_tqm_rel_reason rel_status;
+
+ spin_lock_bh(&status_ring->lock);
+
+ ath12k_hal_srng_access_begin(ab, status_ring);
+
+ while (ATH12K_TX_COMPL_NEXT(ab, tx_ring->tx_status_head) !=
+ tx_ring->tx_status_tail) {
+ desc = ath12k_hal_srng_dst_get_next_entry(ab, status_ring);
+ if (!desc)
+ break;
+
+ memcpy(&tx_ring->tx_status[tx_ring->tx_status_head],
+ desc, sizeof(*desc));
+ tx_ring->tx_status_head =
+ ATH12K_TX_COMPL_NEXT(ab, tx_ring->tx_status_head);
+ }
+
+ if (ath12k_hal_srng_dst_peek(ab, status_ring) &&
+ (ATH12K_TX_COMPL_NEXT(ab, tx_ring->tx_status_head) ==
+ tx_ring->tx_status_tail)) {
+ /* TODO: Process pending tx_status messages when kfifo_is_full() */
+ ath12k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
+ }
+
+ ath12k_hal_srng_access_end(ab, status_ring);
+
+ spin_unlock_bh(&status_ring->lock);
+
+ while (ATH12K_TX_COMPL_NEXT(ab, tx_ring->tx_status_tail) !=
+ tx_ring->tx_status_head) {
+ struct hal_wbm_completion_ring_tx *tx_status;
+ u32 desc_id;
+
+ tx_ring->tx_status_tail =
+ ATH12K_TX_COMPL_NEXT(ab, tx_ring->tx_status_tail);
+ tx_status = &tx_ring->tx_status[tx_ring->tx_status_tail];
+ ath12k_wifi7_dp_tx_status_parse(dp, tx_status, &ts);
+
+ if (le32_get_bits(tx_status->info0, HAL_WBM_COMPL_TX_INFO0_CC_DONE)) {
+ /* HW done cookie conversion */
+ desc_va = ((u64)le32_to_cpu(tx_status->buf_va_hi) << 32 |
+ le32_to_cpu(tx_status->buf_va_lo));
+ tx_desc = (struct ath12k_tx_desc_info *)((unsigned long)desc_va);
+ } else {
+ /* SW does cookie conversion to VA */
+ desc_id = le32_get_bits(tx_status->buf_va_hi,
+ BUFFER_ADDR_INFO1_SW_COOKIE);
+
+ tx_desc = ath12k_dp_get_tx_desc(dp, desc_id);
+ }
+ if (!tx_desc) {
+ ath12k_warn(ab, "unable to retrieve tx_desc!");
+ continue;
+ }
+
+ desc_params.mac_id = tx_desc->mac_id;
+ desc_params.skb = tx_desc->skb;
+ desc_params.skb_ext_desc = tx_desc->skb_ext_desc;
+
+ /* Find the HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE value */
+ buf_rel_source = le32_get_bits(tx_status->info0,
+ HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE);
+ dp->device_stats.tx_wbm_rel_source[buf_rel_source]++;
+
+ rel_status = le32_get_bits(tx_status->info0,
+ HAL_WBM_COMPL_TX_INFO0_TQM_RELEASE_REASON);
+ dp->device_stats.tqm_rel_reason[rel_status]++;
+
+ /* Release descriptor as soon as extracting necessary info
+ * to reduce contention
+ */
+ ath12k_dp_tx_release_txbuf(dp, tx_desc, tx_desc->pool_id);
+ if (ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW) {
+ ath12k_dp_tx_process_htt_tx_complete(dp, (void *)tx_status,
+ tx_ring, &desc_params);
+ continue;
+ }
+
+ pdev_idx = ath12k_hw_mac_id_to_pdev_id(dp->hw_params, desc_params.mac_id);
+
+ rcu_read_lock();
+
+ dp_pdev = ath12k_dp_to_pdev_dp(dp, pdev_idx);
+ if (!dp_pdev) {
+ rcu_read_unlock();
+ continue;
+ }
+
+ if (atomic_dec_and_test(&dp_pdev->num_tx_pending))
+ wake_up(&dp_pdev->tx_empty_waitq);
+
+ ath12k_wifi7_dp_tx_complete_msdu(dp_pdev, &desc_params, &ts,
+ tx_ring->tcl_data_ring_id);
+ rcu_read_unlock();
+ }
+}
+
+u32 ath12k_wifi7_dp_tx_get_vdev_bank_config(struct ath12k_base *ab,
+ struct ath12k_link_vif *arvif)
+{
+ u32 bank_config = 0;
+ u8 link_id = arvif->link_id;
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ath12k_dp_vif *dp_vif = &ahvif->dp_vif;
+ struct ath12k_dp_link_vif *dp_link_vif;
+
+ dp_link_vif = ath12k_dp_vif_to_dp_link_vif(dp_vif, link_id);
+
+ /* Only valid for raw frames with HW crypto enabled.
+ * With SW crypto, mac80211 sets key per packet
+ */
+ if (dp_vif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW &&
+ test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags))
+ bank_config |=
+ u32_encode_bits(ath12k_dp_tx_get_encrypt_type(dp_vif->key_cipher),
+ HAL_TX_BANK_CONFIG_ENCRYPT_TYPE);
+
+ bank_config |= u32_encode_bits(dp_vif->tx_encap_type,
+ HAL_TX_BANK_CONFIG_ENCAP_TYPE);
+ bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_SRC_BUFFER_SWAP) |
+ u32_encode_bits(0, HAL_TX_BANK_CONFIG_LINK_META_SWAP) |
+ u32_encode_bits(0, HAL_TX_BANK_CONFIG_EPD);
+
+ /* only valid if idx_lookup_override is not set in tcl_data_cmd */
+ if (ahvif->vdev_type == WMI_VDEV_TYPE_STA)
+ bank_config |= u32_encode_bits(1, HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN);
+ else
+ bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN);
+
+ bank_config |= u32_encode_bits(dp_link_vif->hal_addr_search_flags &
+ HAL_TX_ADDRX_EN,
+ HAL_TX_BANK_CONFIG_ADDRX_EN) |
+ u32_encode_bits(!!(dp_link_vif->hal_addr_search_flags &
+ HAL_TX_ADDRY_EN),
+ HAL_TX_BANK_CONFIG_ADDRY_EN);
+
+ bank_config |= u32_encode_bits(ieee80211_vif_is_mesh(ahvif->vif) ? 3 : 0,
+ HAL_TX_BANK_CONFIG_MESH_EN) |
+ u32_encode_bits(dp_link_vif->vdev_id_check_en,
+ HAL_TX_BANK_CONFIG_VDEV_ID_CHECK_EN);
+
+ bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_DSCP_TIP_MAP_ID);
+
+ return bank_config;
+}
diff --git a/drivers/net/wireless/ath/ath12k/wifi7/dp_tx.h b/drivers/net/wireless/ath/ath12k/wifi7/dp_tx.h
new file mode 100644
index 000000000000..24cf7972d41b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wifi7/dp_tx.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef ATH12K_DP_TX_WIFI7_H
+#define ATH12K_DP_TX_WIFI7_H
+
+int ath12k_wifi7_dp_tx(struct ath12k_pdev_dp *dp_pdev, struct ath12k_link_vif *arvif,
+ struct sk_buff *skb, bool gsn_valid, int mcbc_gsn,
+ bool is_mcast);
+void ath12k_wifi7_dp_tx_completion_handler(struct ath12k_dp *dp, int ring_id);
+u32 ath12k_wifi7_dp_tx_get_vdev_bank_config(struct ath12k_base *ab,
+ struct ath12k_link_vif *arvif);
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/wifi7/hal.c b/drivers/net/wireless/ath/ath12k/wifi7/hal.c
new file mode 100644
index 000000000000..bd1753ca0db6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wifi7/hal.c
@@ -0,0 +1,713 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+#include "hw.h"
+#include "hal_desc.h"
+#include "../hal.h"
+#include "hal.h"
+#include "hal_tx.h"
+#include "../debug.h"
+#include "../hif.h"
+#include "hal_qcn9274.h"
+#include "hal_wcn7850.h"
+#include "hal_qcc2072.h"
+
+static const struct ath12k_hw_version_map ath12k_wifi7_hw_ver_map[] = {
+ [ATH12K_HW_QCN9274_HW10] = {
+ .hal_ops = &hal_qcn9274_ops,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9274_compact),
+ .tcl_to_wbm_rbm_map = ath12k_hal_tcl_to_wbm_rbm_map_qcn9274,
+ .hal_params = &ath12k_hw_hal_params_qcn9274,
+ .hw_regs = &qcn9274_v1_regs,
+ },
+ [ATH12K_HW_QCN9274_HW20] = {
+ .hal_ops = &hal_qcn9274_ops,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9274_compact),
+ .tcl_to_wbm_rbm_map = ath12k_hal_tcl_to_wbm_rbm_map_qcn9274,
+ .hal_params = &ath12k_hw_hal_params_qcn9274,
+ .hw_regs = &qcn9274_v2_regs,
+ },
+ [ATH12K_HW_WCN7850_HW20] = {
+ .hal_ops = &hal_wcn7850_ops,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_wcn7850),
+ .tcl_to_wbm_rbm_map = ath12k_hal_tcl_to_wbm_rbm_map_wcn7850,
+ .hal_params = &ath12k_hw_hal_params_wcn7850,
+ .hw_regs = &wcn7850_regs,
+ },
+ [ATH12K_HW_IPQ5332_HW10] = {
+ .hal_ops = &hal_qcn9274_ops,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9274_compact),
+ .tcl_to_wbm_rbm_map = ath12k_hal_tcl_to_wbm_rbm_map_qcn9274,
+ .hal_params = &ath12k_hw_hal_params_ipq5332,
+ .hw_regs = &ipq5332_regs,
+ },
+ [ATH12K_HW_QCC2072_HW10] = {
+ .hal_ops = &hal_qcc2072_ops,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_qcc2072),
+ .tcl_to_wbm_rbm_map = ath12k_hal_tcl_to_wbm_rbm_map_wcn7850,
+ .hal_params = &ath12k_hw_hal_params_wcn7850,
+ .hw_regs = &qcc2072_regs,
+ },
+};
+
+int ath12k_wifi7_hal_init(struct ath12k_base *ab)
+{
+ struct ath12k_hal *hal = &ab->hal;
+
+ memset(hal, 0, sizeof(*hal));
+
+ hal->ops = ath12k_wifi7_hw_ver_map[ab->hw_rev].hal_ops;
+ hal->hal_desc_sz = ath12k_wifi7_hw_ver_map[ab->hw_rev].hal_desc_sz;
+ hal->tcl_to_wbm_rbm_map = ath12k_wifi7_hw_ver_map[ab->hw_rev].tcl_to_wbm_rbm_map;
+ hal->regs = ath12k_wifi7_hw_ver_map[ab->hw_rev].hw_regs;
+ hal->hal_params = ath12k_wifi7_hw_ver_map[ab->hw_rev].hal_params;
+ hal->hal_wbm_release_ring_tx_size = sizeof(struct hal_wbm_release_ring_tx);
+
+ return 0;
+}
+
+static unsigned int ath12k_wifi7_hal_reo1_ring_id_offset(struct ath12k_hal *hal)
+{
+ return HAL_REO1_RING_ID(hal) - HAL_REO1_RING_BASE_LSB(hal);
+}
+
+static unsigned
+int ath12k_wifi7_hal_reo1_ring_msi1_base_lsb_offset(struct ath12k_hal *hal)
+{
+ return HAL_REO1_RING_MSI1_BASE_LSB(hal) - HAL_REO1_RING_BASE_LSB(hal);
+}
+
+static unsigned
+int ath12k_wifi7_hal_reo1_ring_msi1_base_msb_offset(struct ath12k_hal *hal)
+{
+ return HAL_REO1_RING_MSI1_BASE_MSB(hal) - HAL_REO1_RING_BASE_LSB(hal);
+}
+
+static unsigned int ath12k_wifi7_hal_reo1_ring_msi1_data_offset(struct ath12k_hal *hal)
+{
+ return HAL_REO1_RING_MSI1_DATA(hal) - HAL_REO1_RING_BASE_LSB(hal);
+}
+
+static unsigned int ath12k_wifi7_hal_reo1_ring_base_msb_offset(struct ath12k_hal *hal)
+{
+ return HAL_REO1_RING_BASE_MSB(hal) - HAL_REO1_RING_BASE_LSB(hal);
+}
+
+static unsigned
+int ath12k_wifi7_hal_reo1_ring_producer_int_setup_offset(struct ath12k_hal *hal)
+{
+ return HAL_REO1_RING_PRODUCER_INT_SETUP(hal) - HAL_REO1_RING_BASE_LSB(hal);
+}
+
+static unsigned int ath12k_wifi7_hal_reo1_ring_hp_addr_lsb_offset(struct ath12k_hal *hal)
+{
+ return HAL_REO1_RING_HP_ADDR_LSB(hal) - HAL_REO1_RING_BASE_LSB(hal);
+}
+
+static unsigned int ath12k_wifi7_hal_reo1_ring_hp_addr_msb_offset(struct ath12k_hal *hal)
+{
+ return HAL_REO1_RING_HP_ADDR_MSB(hal) - HAL_REO1_RING_BASE_LSB(hal);
+}
+
+static unsigned int ath12k_wifi7_hal_reo1_ring_misc_offset(struct ath12k_hal *hal)
+{
+ return HAL_REO1_RING_MISC(hal) - HAL_REO1_RING_BASE_LSB(hal);
+}
+
+void ath12k_wifi7_hal_ce_dst_setup(struct ath12k_base *ab,
+ struct hal_srng *srng, int ring_num)
+{
+ struct hal_srng_config *srng_config = &ab->hal.srng_config[HAL_CE_DST];
+ u32 addr;
+ u32 val;
+
+ addr = HAL_CE_DST_RING_CTRL +
+ srng_config->reg_start[HAL_SRNG_REG_GRP_R0] +
+ ring_num * srng_config->reg_size[HAL_SRNG_REG_GRP_R0];
+
+ val = ath12k_hif_read32(ab, addr);
+ val &= ~HAL_CE_DST_R0_DEST_CTRL_MAX_LEN;
+ val |= u32_encode_bits(srng->u.dst_ring.max_buffer_length,
+ HAL_CE_DST_R0_DEST_CTRL_MAX_LEN);
+ ath12k_hif_write32(ab, addr, val);
+}
+
+void ath12k_wifi7_hal_srng_dst_hw_init(struct ath12k_base *ab,
+ struct hal_srng *srng)
+{
+ struct ath12k_hal *hal = &ab->hal;
+ u32 val;
+ u64 hp_addr;
+ u32 reg_base;
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
+
+ if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
+ ath12k_hif_write32(ab, reg_base +
+ ath12k_wifi7_hal_reo1_ring_msi1_base_lsb_offset(hal),
+ srng->msi_addr);
+
+ val = u32_encode_bits(((u64)srng->msi_addr >> HAL_ADDR_MSB_REG_SHIFT),
+ HAL_REO1_RING_MSI1_BASE_MSB_ADDR) |
+ HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
+ ath12k_hif_write32(ab, reg_base +
+ ath12k_wifi7_hal_reo1_ring_msi1_base_msb_offset(hal),
+ val);
+
+ ath12k_hif_write32(ab,
+ reg_base +
+ ath12k_wifi7_hal_reo1_ring_msi1_data_offset(hal),
+ srng->msi_data);
+ }
+
+ ath12k_hif_write32(ab, reg_base, srng->ring_base_paddr);
+
+ val = u32_encode_bits(((u64)srng->ring_base_paddr >> HAL_ADDR_MSB_REG_SHIFT),
+ HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB) |
+ u32_encode_bits((srng->entry_size * srng->num_entries),
+ HAL_REO1_RING_BASE_MSB_RING_SIZE);
+ ath12k_hif_write32(ab, reg_base + ath12k_wifi7_hal_reo1_ring_base_msb_offset(hal),
+ val);
+
+ val = u32_encode_bits(srng->ring_id, HAL_REO1_RING_ID_RING_ID) |
+ u32_encode_bits(srng->entry_size, HAL_REO1_RING_ID_ENTRY_SIZE);
+ ath12k_hif_write32(ab, reg_base + ath12k_wifi7_hal_reo1_ring_id_offset(hal), val);
+
+ /* interrupt setup */
+ val = u32_encode_bits((srng->intr_timer_thres_us >> 3),
+ HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD);
+
+ val |= u32_encode_bits((srng->intr_batch_cntr_thres_entries * srng->entry_size),
+ HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD);
+
+ ath12k_hif_write32(ab,
+ reg_base +
+ ath12k_wifi7_hal_reo1_ring_producer_int_setup_offset(hal),
+ val);
+
+ hp_addr = hal->rdp.paddr +
+ ((unsigned long)srng->u.dst_ring.hp_addr -
+ (unsigned long)hal->rdp.vaddr);
+ ath12k_hif_write32(ab, reg_base +
+ ath12k_wifi7_hal_reo1_ring_hp_addr_lsb_offset(hal),
+ hp_addr & HAL_ADDR_LSB_REG_MASK);
+ ath12k_hif_write32(ab, reg_base +
+ ath12k_wifi7_hal_reo1_ring_hp_addr_msb_offset(hal),
+ hp_addr >> HAL_ADDR_MSB_REG_SHIFT);
+
+ /* Initialize head and tail pointers to indicate ring is empty */
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
+ ath12k_hif_write32(ab, reg_base, 0);
+ ath12k_hif_write32(ab, reg_base + HAL_REO1_RING_TP_OFFSET, 0);
+ *srng->u.dst_ring.hp_addr = 0;
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
+ val = 0;
+ if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
+ val |= HAL_REO1_RING_MISC_DATA_TLV_SWAP;
+ if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
+ val |= HAL_REO1_RING_MISC_HOST_FW_SWAP;
+ if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
+ val |= HAL_REO1_RING_MISC_MSI_SWAP;
+ val |= HAL_REO1_RING_MISC_SRNG_ENABLE;
+
+ ath12k_hif_write32(ab, reg_base + ath12k_wifi7_hal_reo1_ring_misc_offset(hal),
+ val);
+}
+
+void ath12k_wifi7_hal_srng_src_hw_init(struct ath12k_base *ab,
+ struct hal_srng *srng)
+{
+ struct ath12k_hal *hal = &ab->hal;
+ u32 val;
+ u64 tp_addr;
+ u32 reg_base;
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
+
+ if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
+ ath12k_hif_write32(ab, reg_base +
+ HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(hal),
+ srng->msi_addr);
+
+ val = u32_encode_bits(((u64)srng->msi_addr >> HAL_ADDR_MSB_REG_SHIFT),
+ HAL_TCL1_RING_MSI1_BASE_MSB_ADDR) |
+ HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
+ ath12k_hif_write32(ab, reg_base +
+ HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(hal),
+ val);
+
+ ath12k_hif_write32(ab, reg_base +
+ HAL_TCL1_RING_MSI1_DATA_OFFSET(hal),
+ srng->msi_data);
+ }
+
+ ath12k_hif_write32(ab, reg_base, srng->ring_base_paddr);
+
+ val = u32_encode_bits(((u64)srng->ring_base_paddr >> HAL_ADDR_MSB_REG_SHIFT),
+ HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB) |
+ u32_encode_bits((srng->entry_size * srng->num_entries),
+ HAL_TCL1_RING_BASE_MSB_RING_SIZE);
+ ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(hal), val);
+
+ val = u32_encode_bits(srng->entry_size, HAL_REO1_RING_ID_ENTRY_SIZE);
+ ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET(hal), val);
+
+ val = u32_encode_bits(srng->intr_timer_thres_us,
+ HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD);
+
+ val |= u32_encode_bits((srng->intr_batch_cntr_thres_entries * srng->entry_size),
+ HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD);
+
+ ath12k_hif_write32(ab,
+ reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(hal),
+ val);
+
+ val = 0;
+ if (srng->flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
+ val |= u32_encode_bits(srng->u.src_ring.low_threshold,
+ HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD);
+ }
+ ath12k_hif_write32(ab,
+ reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(hal),
+ val);
+
+ if (srng->ring_id != HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
+ tp_addr = hal->rdp.paddr +
+ ((unsigned long)srng->u.src_ring.tp_addr -
+ (unsigned long)hal->rdp.vaddr);
+ ath12k_hif_write32(ab,
+ reg_base + HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(hal),
+ tp_addr & HAL_ADDR_LSB_REG_MASK);
+ ath12k_hif_write32(ab,
+ reg_base + HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(hal),
+ tp_addr >> HAL_ADDR_MSB_REG_SHIFT);
+ }
+
+ /* Initialize head and tail pointers to indicate ring is empty */
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
+ ath12k_hif_write32(ab, reg_base, 0);
+ ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_TP_OFFSET, 0);
+ *srng->u.src_ring.tp_addr = 0;
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
+ val = 0;
+ if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
+ val |= HAL_TCL1_RING_MISC_DATA_TLV_SWAP;
+ if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
+ val |= HAL_TCL1_RING_MISC_HOST_FW_SWAP;
+ if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
+ val |= HAL_TCL1_RING_MISC_MSI_SWAP;
+
+ /* Loop count is not used for SRC rings */
+ val |= HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE;
+
+ val |= HAL_TCL1_RING_MISC_SRNG_ENABLE;
+
+ if (srng->ring_id == HAL_SRNG_RING_ID_WBM_IDLE_LINK)
+ val |= HAL_TCL1_RING_MISC_MSI_RING_ID_DISABLE;
+
+ ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_MISC_OFFSET(hal), val);
+}
+
+void ath12k_wifi7_hal_set_umac_srng_ptr_addr(struct ath12k_base *ab,
+ struct hal_srng *srng)
+{
+ u32 reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
+
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+ if (!ab->hw_params->supports_shadow_regs) {
+ srng->u.src_ring.hp_addr =
+ (u32 *)((unsigned long)ab->mem + reg_base);
+ } else {
+ ath12k_dbg(ab, ATH12K_DBG_HAL,
+ "hal reg_base 0x%x shadow 0x%lx\n",
+ reg_base,
+ (unsigned long)srng->u.src_ring.hp_addr -
+ (unsigned long)ab->mem);
+ }
+ } else {
+ if (!ab->hw_params->supports_shadow_regs) {
+ srng->u.dst_ring.tp_addr =
+ (u32 *)((unsigned long)ab->mem + reg_base +
+ (HAL_REO1_RING_TP - HAL_REO1_RING_HP));
+ } else {
+ ath12k_dbg(ab, ATH12K_DBG_HAL,
+ "target_reg 0x%x shadow 0x%lx\n",
+ reg_base + HAL_REO1_RING_TP - HAL_REO1_RING_HP,
+ (unsigned long)srng->u.dst_ring.tp_addr -
+ (unsigned long)ab->mem);
+ }
+ }
+}
+
+int ath12k_wifi7_hal_srng_get_ring_id(struct ath12k_hal *hal,
+ enum hal_ring_type type,
+ int ring_num, int mac_id)
+{
+ struct hal_srng_config *srng_config = &hal->srng_config[type];
+ int ring_id;
+
+ if (ring_num >= srng_config->max_rings) {
+ ath12k_warn(hal, "invalid ring number :%d\n", ring_num);
+ return -EINVAL;
+ }
+
+ ring_id = srng_config->start_ring_id + ring_num;
+ if (srng_config->mac_type == ATH12K_HAL_SRNG_PMAC)
+ ring_id += mac_id * HAL_SRNG_RINGS_PER_PMAC;
+
+ if (WARN_ON(ring_id >= HAL_SRNG_RING_ID_MAX))
+ return -EINVAL;
+
+ return ring_id;
+}
+
+static
+void ath12k_wifi7_hal_srng_update_hp_tp_addr(struct ath12k_base *ab,
+ int shadow_cfg_idx,
+ enum hal_ring_type ring_type,
+ int ring_num)
+{
+ struct hal_srng *srng;
+ struct ath12k_hal *hal = &ab->hal;
+ int ring_id;
+ struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
+
+ ring_id = ath12k_wifi7_hal_srng_get_ring_id(hal, ring_type, ring_num,
+ 0);
+ if (ring_id < 0)
+ return;
+
+ srng = &hal->srng_list[ring_id];
+
+ if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
+ srng->u.dst_ring.tp_addr = (u32 *)(HAL_SHADOW_REG(shadow_cfg_idx) +
+ (unsigned long)ab->mem);
+ else
+ srng->u.src_ring.hp_addr = (u32 *)(HAL_SHADOW_REG(shadow_cfg_idx) +
+ (unsigned long)ab->mem);
+}
+
+u32 ath12k_wifi7_hal_ce_get_desc_size(enum hal_ce_desc type)
+{
+ switch (type) {
+ case HAL_CE_DESC_SRC:
+ return sizeof(struct hal_ce_srng_src_desc);
+ case HAL_CE_DESC_DST:
+ return sizeof(struct hal_ce_srng_dest_desc);
+ case HAL_CE_DESC_DST_STATUS:
+ return sizeof(struct hal_ce_srng_dst_status_desc);
+ }
+
+ return 0;
+}
+
+int ath12k_wifi7_hal_srng_update_shadow_config(struct ath12k_base *ab,
+ enum hal_ring_type ring_type,
+ int ring_num)
+{
+ struct ath12k_hal *hal = &ab->hal;
+ struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
+ int shadow_cfg_idx = hal->num_shadow_reg_configured;
+ u32 target_reg;
+
+ if (shadow_cfg_idx >= HAL_SHADOW_NUM_REGS_MAX)
+ return -EINVAL;
+
+ hal->num_shadow_reg_configured++;
+
+ target_reg = srng_config->reg_start[HAL_HP_OFFSET_IN_REG_START];
+ target_reg += srng_config->reg_size[HAL_HP_OFFSET_IN_REG_START] *
+ ring_num;
+
+ /* For destination ring, shadow the TP */
+ if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
+ target_reg += HAL_OFFSET_FROM_HP_TO_TP;
+
+ hal->shadow_reg_addr[shadow_cfg_idx] = target_reg;
+
+ /* update hp/tp addr to hal structure*/
+ ath12k_wifi7_hal_srng_update_hp_tp_addr(ab, shadow_cfg_idx, ring_type,
+ ring_num);
+
+ ath12k_dbg(ab, ATH12K_DBG_HAL,
+ "target_reg %x, shadow reg 0x%x shadow_idx 0x%x, ring_type %d, ring num %d",
+ target_reg,
+ HAL_SHADOW_REG(shadow_cfg_idx),
+ shadow_cfg_idx,
+ ring_type, ring_num);
+
+ return 0;
+}
+
+void ath12k_wifi7_hal_ce_src_set_desc(struct hal_ce_srng_src_desc *desc,
+ dma_addr_t paddr,
+ u32 len, u32 id, u8 byte_swap_data)
+{
+ desc->buffer_addr_low = cpu_to_le32(paddr & HAL_ADDR_LSB_REG_MASK);
+ desc->buffer_addr_info =
+ le32_encode_bits(((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT),
+ HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI) |
+ le32_encode_bits(byte_swap_data,
+ HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP) |
+ le32_encode_bits(0, HAL_CE_SRC_DESC_ADDR_INFO_GATHER) |
+ le32_encode_bits(len, HAL_CE_SRC_DESC_ADDR_INFO_LEN);
+ desc->meta_info = le32_encode_bits(id, HAL_CE_SRC_DESC_META_INFO_DATA);
+}
+
+void ath12k_wifi7_hal_ce_dst_set_desc(struct hal_ce_srng_dest_desc *desc,
+ dma_addr_t paddr)
+{
+ desc->buffer_addr_low = cpu_to_le32(paddr & HAL_ADDR_LSB_REG_MASK);
+ desc->buffer_addr_info =
+ le32_encode_bits(((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT),
+ HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI);
+}
+
+void ath12k_wifi7_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc,
+ u32 cookie, dma_addr_t paddr,
+ enum hal_rx_buf_return_buf_manager rbm)
+{
+ desc->buf_addr_info.info0 = le32_encode_bits((paddr & HAL_ADDR_LSB_REG_MASK),
+ BUFFER_ADDR_INFO0_ADDR);
+ desc->buf_addr_info.info1 =
+ le32_encode_bits(((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT),
+ BUFFER_ADDR_INFO1_ADDR) |
+ le32_encode_bits(rbm, BUFFER_ADDR_INFO1_RET_BUF_MGR) |
+ le32_encode_bits(cookie, BUFFER_ADDR_INFO1_SW_COOKIE);
+}
+
+u32 ath12k_wifi7_hal_ce_dst_status_get_length(struct hal_ce_srng_dst_status_desc *desc)
+{
+ u32 len;
+
+ len = le32_get_bits(READ_ONCE(desc->flags), HAL_CE_DST_STATUS_DESC_FLAGS_LEN);
+ desc->flags &= ~cpu_to_le32(HAL_CE_DST_STATUS_DESC_FLAGS_LEN);
+
+ return len;
+}
+
+void
+ath12k_wifi7_hal_setup_link_idle_list(struct ath12k_base *ab,
+ struct hal_wbm_idle_scatter_list *sbuf,
+ u32 nsbufs, u32 tot_link_desc,
+ u32 end_offset)
+{
+ struct ath12k_hal *hal = &ab->hal;
+ struct ath12k_buffer_addr *link_addr;
+ int i;
+ u32 reg_scatter_buf_sz = HAL_WBM_IDLE_SCATTER_BUF_SIZE / 64;
+ u32 val;
+
+ link_addr = (void *)sbuf[0].vaddr + HAL_WBM_IDLE_SCATTER_BUF_SIZE;
+
+ for (i = 1; i < nsbufs; i++) {
+ link_addr->info0 = cpu_to_le32(sbuf[i].paddr & HAL_ADDR_LSB_REG_MASK);
+
+ link_addr->info1 =
+ le32_encode_bits((u64)sbuf[i].paddr >> HAL_ADDR_MSB_REG_SHIFT,
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32) |
+ le32_encode_bits(BASE_ADDR_MATCH_TAG_VAL,
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG);
+
+ link_addr = (void *)sbuf[i].vaddr +
+ HAL_WBM_IDLE_SCATTER_BUF_SIZE;
+ }
+
+ val = u32_encode_bits(reg_scatter_buf_sz, HAL_WBM_SCATTER_BUFFER_SIZE) |
+ u32_encode_bits(0x1, HAL_WBM_LINK_DESC_IDLE_LIST_MODE);
+
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR(hal),
+ val);
+
+ val = u32_encode_bits(reg_scatter_buf_sz * nsbufs,
+ HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST);
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_R0_IDLE_LIST_SIZE_ADDR(hal),
+ val);
+
+ val = u32_encode_bits(sbuf[0].paddr & HAL_ADDR_LSB_REG_MASK,
+ BUFFER_ADDR_INFO0_ADDR);
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_RING_BASE_LSB(hal),
+ val);
+
+ val = u32_encode_bits(BASE_ADDR_MATCH_TAG_VAL,
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG) |
+ u32_encode_bits((u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT,
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32);
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_RING_BASE_MSB(hal),
+ val);
+
+ /* Setup head and tail pointers for the idle list */
+ val = u32_encode_bits(sbuf[nsbufs - 1].paddr, BUFFER_ADDR_INFO0_ADDR);
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0(hal),
+ val);
+
+ val = u32_encode_bits(((u64)sbuf[nsbufs - 1].paddr >> HAL_ADDR_MSB_REG_SHIFT),
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32) |
+ u32_encode_bits((end_offset >> 2),
+ HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1);
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1(hal),
+ val);
+
+ val = u32_encode_bits(sbuf[0].paddr, BUFFER_ADDR_INFO0_ADDR);
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0(hal),
+ val);
+
+ val = u32_encode_bits(sbuf[0].paddr, BUFFER_ADDR_INFO0_ADDR);
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0(hal),
+ val);
+
+ val = u32_encode_bits(((u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT),
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32) |
+ u32_encode_bits(0, HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1);
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1(hal),
+ val);
+
+ val = 2 * tot_link_desc;
+ ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR(hal),
+ val);
+
+ /* Enable the SRNG */
+ val = u32_encode_bits(1, HAL_WBM_IDLE_LINK_RING_MISC_SRNG_ENABLE) |
+ u32_encode_bits(1, HAL_WBM_IDLE_LINK_RING_MISC_RIND_ID_DISABLE);
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_IDLE_LINK_RING_MISC_ADDR(hal),
+ val);
+}
+
+void ath12k_wifi7_hal_tx_configure_bank_register(struct ath12k_base *ab,
+ u32 bank_config,
+ u8 bank_id)
+{
+ ath12k_hif_write32(ab, HAL_TCL_SW_CONFIG_BANK_ADDR + 4 * bank_id,
+ bank_config);
+}
+
+void ath12k_wifi7_hal_reoq_lut_addr_read_enable(struct ath12k_base *ab)
+{
+ struct ath12k_hal *hal = &ab->hal;
+
+ u32 val = ath12k_hif_read32(ab, HAL_SEQ_WCSS_UMAC_REO_REG +
+ HAL_REO1_QDESC_ADDR(hal));
+
+ ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_ADDR(hal),
+ val | HAL_REO_QDESC_ADDR_READ_LUT_ENABLE);
+}
+
+void ath12k_wifi7_hal_reoq_lut_set_max_peerid(struct ath12k_base *ab)
+{
+ struct ath12k_hal *hal = &ab->hal;
+
+ ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_MAX_PEERID(hal),
+ HAL_REO_QDESC_MAX_PEERID);
+}
+
+void ath12k_wifi7_hal_write_reoq_lut_addr(struct ath12k_base *ab,
+ dma_addr_t paddr)
+{
+ ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG +
+ HAL_REO1_QDESC_LUT_BASE0(&ab->hal), paddr);
+}
+
+void ath12k_wifi7_hal_write_ml_reoq_lut_addr(struct ath12k_base *ab,
+ dma_addr_t paddr)
+{
+ ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG +
+ HAL_REO1_QDESC_LUT_BASE1(&ab->hal), paddr);
+}
+
+void ath12k_wifi7_hal_cc_config(struct ath12k_base *ab)
+{
+ u32 cmem_base = ab->qmi.dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start;
+ u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
+ u32 wbm_base = HAL_SEQ_WCSS_UMAC_WBM_REG;
+ u32 val = 0;
+ struct ath12k_hal *hal = &ab->hal;
+
+ if (ath12k_ftm_mode)
+ return;
+
+ ath12k_hif_write32(ab, reo_base + HAL_REO1_SW_COOKIE_CFG0(hal), cmem_base);
+
+ val |= u32_encode_bits(ATH12K_CMEM_ADDR_MSB,
+ HAL_REO1_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB) |
+ u32_encode_bits(ATH12K_CC_PPT_MSB,
+ HAL_REO1_SW_COOKIE_CFG_COOKIE_PPT_MSB) |
+ u32_encode_bits(ATH12K_CC_SPT_MSB,
+ HAL_REO1_SW_COOKIE_CFG_COOKIE_SPT_MSB) |
+ u32_encode_bits(1, HAL_REO1_SW_COOKIE_CFG_ALIGN) |
+ u32_encode_bits(1, HAL_REO1_SW_COOKIE_CFG_ENABLE) |
+ u32_encode_bits(1, HAL_REO1_SW_COOKIE_CFG_GLOBAL_ENABLE);
+
+ ath12k_hif_write32(ab, reo_base + HAL_REO1_SW_COOKIE_CFG1(hal), val);
+
+ /* Enable HW CC for WBM */
+ ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG0, cmem_base);
+
+ val = u32_encode_bits(ATH12K_CMEM_ADDR_MSB,
+ HAL_WBM_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB) |
+ u32_encode_bits(ATH12K_CC_PPT_MSB,
+ HAL_WBM_SW_COOKIE_CFG_COOKIE_PPT_MSB) |
+ u32_encode_bits(ATH12K_CC_SPT_MSB,
+ HAL_WBM_SW_COOKIE_CFG_COOKIE_SPT_MSB) |
+ u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_ALIGN);
+
+ ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG1, val);
+
+ /* Enable conversion complete indication */
+ val = ath12k_hif_read32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG2);
+ val |= u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_RELEASE_PATH_EN) |
+ u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_ERR_PATH_EN) |
+ u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_CONV_IND_EN);
+
+ ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG2, val);
+
+ /* Enable Cookie conversion for WBM2SW Rings */
+ val = ath12k_hif_read32(ab, wbm_base + HAL_WBM_SW_COOKIE_CONVERT_CFG);
+ val |= u32_encode_bits(1, HAL_WBM_SW_COOKIE_CONV_CFG_GLOBAL_EN) |
+ hal->hal_params->wbm2sw_cc_enable;
+
+ ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CONVERT_CFG, val);
+}
+
+enum hal_rx_buf_return_buf_manager
+ath12k_wifi7_hal_get_idle_link_rbm(struct ath12k_hal *hal, u8 device_id)
+{
+ switch (device_id) {
+ case 0:
+ return HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST;
+ case 1:
+ return HAL_RX_BUF_RBM_WBM_DEV1_IDLE_DESC_LIST;
+ case 2:
+ return HAL_RX_BUF_RBM_WBM_DEV2_IDLE_DESC_LIST;
+ default:
+ ath12k_warn(hal,
+ "invalid %d device id, so choose default rbm\n",
+ device_id);
+ WARN_ON(1);
+ return HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST;
+ }
+}
diff --git a/drivers/net/wireless/ath/ath12k/wifi7/hal.h b/drivers/net/wireless/ath/ath12k/wifi7/hal.h
new file mode 100644
index 000000000000..9337225a5253
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wifi7/hal.h
@@ -0,0 +1,561 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef ATH12K_HAL_WIFI7_H
+#define ATH12K_HAL_WIFI7_H
+
+#include "../core.h"
+#include "../hal.h"
+#include "hal_desc.h"
+#include "hal_tx.h"
+#include "hal_rx.h"
+#include "hal_rx_desc.h"
+
+/* calculate the register address from bar0 of shadow register x */
+#define HAL_SHADOW_BASE_ADDR 0x000008fc
+#define HAL_SHADOW_NUM_REGS 40
+#define HAL_HP_OFFSET_IN_REG_START 1
+#define HAL_OFFSET_FROM_HP_TO_TP 4
+
+#define HAL_SHADOW_REG(x) (HAL_SHADOW_BASE_ADDR + (4 * (x)))
+#define HAL_REO_QDESC_MAX_PEERID 8191
+
+/* WCSS Relative address */
+#define HAL_SEQ_WCSS_CMEM_OFFSET 0x00100000
+#define HAL_SEQ_WCSS_UMAC_OFFSET 0x00a00000
+#define HAL_SEQ_WCSS_UMAC_REO_REG 0x00a38000
+#define HAL_SEQ_WCSS_UMAC_TCL_REG 0x00a44000
+#define HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(hal) \
+ ((hal)->regs->umac_ce0_src_reg_base)
+#define HAL_SEQ_WCSS_UMAC_CE0_DST_REG(hal) \
+ ((hal)->regs->umac_ce0_dest_reg_base)
+#define HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(hal) \
+ ((hal)->regs->umac_ce1_src_reg_base)
+#define HAL_SEQ_WCSS_UMAC_CE1_DST_REG(hal) \
+ ((hal)->regs->umac_ce1_dest_reg_base)
+#define HAL_SEQ_WCSS_UMAC_WBM_REG 0x00a34000
+
+#define HAL_CE_WFSS_CE_REG_BASE 0x01b80000
+
+#define HAL_TCL_SW_CONFIG_BANK_ADDR 0x00a4408c
+
+/* SW2TCL(x) R0 ring configuration address */
+#define HAL_TCL1_RING_CMN_CTRL_REG 0x00000020
+#define HAL_TCL1_RING_DSCP_TID_MAP 0x00000240
+
+#define HAL_TCL1_RING_BASE_LSB(hal) \
+ ((hal)->regs->tcl1_ring_base_lsb)
+#define HAL_TCL1_RING_BASE_MSB(hal) \
+ ((hal)->regs->tcl1_ring_base_msb)
+#define HAL_TCL1_RING_ID(hal) ((hal)->regs->tcl1_ring_id)
+#define HAL_TCL1_RING_MISC(hal) \
+ ((hal)->regs->tcl1_ring_misc)
+#define HAL_TCL1_RING_TP_ADDR_LSB(hal) \
+ ((hal)->regs->tcl1_ring_tp_addr_lsb)
+#define HAL_TCL1_RING_TP_ADDR_MSB(hal) \
+ ((hal)->regs->tcl1_ring_tp_addr_msb)
+#define HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0(hal) \
+ ((hal)->regs->tcl1_ring_consumer_int_setup_ix0)
+#define HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1(hal) \
+ ((hal)->regs->tcl1_ring_consumer_int_setup_ix1)
+#define HAL_TCL1_RING_MSI1_BASE_LSB(hal) \
+ ((hal)->regs->tcl1_ring_msi1_base_lsb)
+#define HAL_TCL1_RING_MSI1_BASE_MSB(hal) \
+ ((hal)->regs->tcl1_ring_msi1_base_msb)
+#define HAL_TCL1_RING_MSI1_DATA(hal) \
+ ((hal)->regs->tcl1_ring_msi1_data)
+#define HAL_TCL2_RING_BASE_LSB(hal) \
+ ((hal)->regs->tcl2_ring_base_lsb)
+#define HAL_TCL_RING_BASE_LSB(hal) \
+ ((hal)->regs->tcl_ring_base_lsb)
+
+#define HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(hal) ({ typeof(hal) _hal = (hal); \
+ (HAL_TCL1_RING_MSI1_BASE_LSB(_hal) - HAL_TCL1_RING_BASE_LSB(_hal)); })
+#define HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(hal) ({ typeof(hal) _hal = (hal); \
+ (HAL_TCL1_RING_MSI1_BASE_MSB(_hal) - HAL_TCL1_RING_BASE_LSB(_hal)); })
+#define HAL_TCL1_RING_MSI1_DATA_OFFSET(hal) ({ typeof(hal) _hal = (hal); \
+ (HAL_TCL1_RING_MSI1_DATA(_hal) - HAL_TCL1_RING_BASE_LSB(_hal)); })
+#define HAL_TCL1_RING_BASE_MSB_OFFSET(hal) ({ typeof(hal) _hal = (hal); \
+ (HAL_TCL1_RING_BASE_MSB(_hal) - HAL_TCL1_RING_BASE_LSB(_hal)); })
+#define HAL_TCL1_RING_ID_OFFSET(hal) ({ typeof(hal) _hal = (hal); \
+ (HAL_TCL1_RING_ID(_hal) - HAL_TCL1_RING_BASE_LSB(_hal)); })
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(hal) ({ typeof(hal) _hal = (hal); \
+ (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0(_hal) - HAL_TCL1_RING_BASE_LSB(_hal)); })
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(hal) ({ typeof(hal) _hal = (hal); \
+ (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1(_hal) - HAL_TCL1_RING_BASE_LSB(_hal)); })
+#define HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(hal) ({ typeof(hal) _hal = (hal); \
+ (HAL_TCL1_RING_TP_ADDR_LSB(_hal) - HAL_TCL1_RING_BASE_LSB(_hal)); })
+#define HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(hal) ({ typeof(hal) _hal = (hal); \
+ (HAL_TCL1_RING_TP_ADDR_MSB(_hal) - HAL_TCL1_RING_BASE_LSB(_hal)); })
+#define HAL_TCL1_RING_MISC_OFFSET(hal) ({ typeof(hal) _hal = (hal); \
+ (HAL_TCL1_RING_MISC(_hal) - HAL_TCL1_RING_BASE_LSB(_hal)); })
+
+/* SW2TCL(x) R2 ring pointers (head/tail) address */
+#define HAL_TCL1_RING_HP 0x00002000
+#define HAL_TCL1_RING_TP 0x00002004
+#define HAL_TCL2_RING_HP 0x00002008
+#define HAL_TCL_RING_HP 0x00002028
+
+#define HAL_TCL1_RING_TP_OFFSET \
+ (HAL_TCL1_RING_TP - HAL_TCL1_RING_HP)
+
+/* TCL STATUS ring address */
+#define HAL_TCL_STATUS_RING_BASE_LSB(hal) \
+ ((hal)->regs->tcl_status_ring_base_lsb)
+#define HAL_TCL_STATUS_RING_HP 0x00002048
+
+/* PPE2TCL1 Ring address */
+#define HAL_TCL_PPE2TCL1_RING_BASE_LSB 0x00000c48
+#define HAL_TCL_PPE2TCL1_RING_HP 0x00002038
+
+/* WBM PPE Release Ring address */
+#define HAL_WBM_PPE_RELEASE_RING_BASE_LSB(hal) \
+ ((hal)->regs->ppe_rel_ring_base)
+#define HAL_WBM_PPE_RELEASE_RING_HP 0x00003020
+
+/* REO2SW(x) R0 ring configuration address */
+#define HAL_REO1_GEN_ENABLE 0x00000000
+#define HAL_REO1_MISC_CTRL_ADDR(hal) \
+ ((hal)->regs->reo1_misc_ctrl_addr)
+#define HAL_REO1_DEST_RING_CTRL_IX_0 0x00000004
+#define HAL_REO1_DEST_RING_CTRL_IX_1 0x00000008
+#define HAL_REO1_DEST_RING_CTRL_IX_2 0x0000000c
+#define HAL_REO1_DEST_RING_CTRL_IX_3 0x00000010
+#define HAL_REO1_QDESC_ADDR(hal) ((hal)->regs->reo1_qdesc_addr)
+#define HAL_REO1_QDESC_MAX_PEERID(hal) ((hal)->regs->reo1_qdesc_max_peerid)
+#define HAL_REO1_SW_COOKIE_CFG0(hal) ((hal)->regs->reo1_sw_cookie_cfg0)
+#define HAL_REO1_SW_COOKIE_CFG1(hal) ((hal)->regs->reo1_sw_cookie_cfg1)
+#define HAL_REO1_QDESC_LUT_BASE0(hal) ((hal)->regs->reo1_qdesc_lut_base0)
+#define HAL_REO1_QDESC_LUT_BASE1(hal) ((hal)->regs->reo1_qdesc_lut_base1)
+#define HAL_REO1_RING_BASE_LSB(hal) ((hal)->regs->reo1_ring_base_lsb)
+#define HAL_REO1_RING_BASE_MSB(hal) ((hal)->regs->reo1_ring_base_msb)
+#define HAL_REO1_RING_ID(hal) ((hal)->regs->reo1_ring_id)
+#define HAL_REO1_RING_MISC(hal) ((hal)->regs->reo1_ring_misc)
+#define HAL_REO1_RING_HP_ADDR_LSB(hal) ((hal)->regs->reo1_ring_hp_addr_lsb)
+#define HAL_REO1_RING_HP_ADDR_MSB(hal) ((hal)->regs->reo1_ring_hp_addr_msb)
+#define HAL_REO1_RING_PRODUCER_INT_SETUP(hal) \
+ ((hal)->regs->reo1_ring_producer_int_setup)
+#define HAL_REO1_RING_MSI1_BASE_LSB(hal) \
+ ((hal)->regs->reo1_ring_msi1_base_lsb)
+#define HAL_REO1_RING_MSI1_BASE_MSB(hal) \
+ ((hal)->regs->reo1_ring_msi1_base_msb)
+#define HAL_REO1_RING_MSI1_DATA(hal) ((hal)->regs->reo1_ring_msi1_data)
+#define HAL_REO2_RING_BASE_LSB(hal) ((hal)->regs->reo2_ring_base)
+#define HAL_REO1_AGING_THRESH_IX_0(hal) ((hal)->regs->reo1_aging_thres_ix0)
+#define HAL_REO1_AGING_THRESH_IX_1(hal) ((hal)->regs->reo1_aging_thres_ix1)
+#define HAL_REO1_AGING_THRESH_IX_2(hal) ((hal)->regs->reo1_aging_thres_ix2)
+#define HAL_REO1_AGING_THRESH_IX_3(hal) ((hal)->regs->reo1_aging_thres_ix3)
+
+/* REO2SW(x) R2 ring pointers (head/tail) address */
+#define HAL_REO1_RING_HP 0x00003048
+#define HAL_REO1_RING_TP 0x0000304c
+#define HAL_REO2_RING_HP 0x00003050
+
+#define HAL_REO1_RING_TP_OFFSET (HAL_REO1_RING_TP - HAL_REO1_RING_HP)
+
+/* REO2SW0 ring configuration address */
+#define HAL_REO_SW0_RING_BASE_LSB(hal) \
+ ((hal)->regs->reo2_sw0_ring_base)
+
+/* REO2SW0 R2 ring pointer (head/tail) address */
+#define HAL_REO_SW0_RING_HP 0x00003088
+
+/* REO CMD R0 address */
+#define HAL_REO_CMD_RING_BASE_LSB(hal) \
+ ((hal)->regs->reo_cmd_ring_base)
+
+/* REO CMD R2 address */
+#define HAL_REO_CMD_HP 0x00003020
+
+/* SW2REO R0 address */
+#define HAL_SW2REO_RING_BASE_LSB(hal) \
+ ((hal)->regs->sw2reo_ring_base)
+#define HAL_SW2REO1_RING_BASE_LSB(hal) \
+ ((hal)->regs->sw2reo1_ring_base)
+
+/* SW2REO R2 address */
+#define HAL_SW2REO_RING_HP 0x00003028
+#define HAL_SW2REO1_RING_HP 0x00003030
+
+/* CE ring R0 address */
+#define HAL_CE_SRC_RING_BASE_LSB 0x00000000
+#define HAL_CE_DST_RING_BASE_LSB 0x00000000
+#define HAL_CE_DST_STATUS_RING_BASE_LSB 0x00000058
+#define HAL_CE_DST_RING_CTRL 0x000000b0
+
+/* CE ring R2 address */
+#define HAL_CE_DST_RING_HP 0x00000400
+#define HAL_CE_DST_STATUS_RING_HP 0x00000408
+
+/* REO status address */
+#define HAL_REO_STATUS_RING_BASE_LSB(hal) \
+ ((hal)->regs->reo_status_ring_base)
+#define HAL_REO_STATUS_HP 0x000030a8
+
+/* WBM Idle R0 address */
+#define HAL_WBM_IDLE_LINK_RING_BASE_LSB(hal) \
+ ((hal)->regs->wbm_idle_ring_base_lsb)
+#define HAL_WBM_IDLE_LINK_RING_MISC_ADDR(hal) \
+ ((hal)->regs->wbm_idle_ring_misc_addr)
+#define HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR(hal) \
+ ((hal)->regs->wbm_r0_idle_list_cntl_addr)
+#define HAL_WBM_R0_IDLE_LIST_SIZE_ADDR(hal) \
+ ((hal)->regs->wbm_r0_idle_list_size_addr)
+#define HAL_WBM_SCATTERED_RING_BASE_LSB(hal) \
+ ((hal)->regs->wbm_scattered_ring_base_lsb)
+#define HAL_WBM_SCATTERED_RING_BASE_MSB(hal) \
+ ((hal)->regs->wbm_scattered_ring_base_msb)
+#define HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0(hal) \
+ ((hal)->regs->wbm_scattered_desc_head_info_ix0)
+#define HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1(hal) \
+ ((hal)->regs->wbm_scattered_desc_head_info_ix1)
+#define HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0(hal) \
+ ((hal)->regs->wbm_scattered_desc_tail_info_ix0)
+#define HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1(hal) \
+ ((hal)->regs->wbm_scattered_desc_tail_info_ix1)
+#define HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR(hal) \
+ ((hal)->regs->wbm_scattered_desc_ptr_hp_addr)
+
+/* WBM Idle R2 address */
+#define HAL_WBM_IDLE_LINK_RING_HP 0x000030b8
+
+/* SW2WBM R0 release address */
+#define HAL_WBM_SW_RELEASE_RING_BASE_LSB(hal) \
+ ((hal)->regs->wbm_sw_release_ring_base_lsb)
+#define HAL_WBM_SW1_RELEASE_RING_BASE_LSB(hal) \
+ ((hal)->regs->wbm_sw1_release_ring_base_lsb)
+
+/* SW2WBM R2 release address */
+#define HAL_WBM_SW_RELEASE_RING_HP 0x00003010
+#define HAL_WBM_SW1_RELEASE_RING_HP 0x00003018
+
+/* WBM2SW R0 release address */
+#define HAL_WBM0_RELEASE_RING_BASE_LSB(hal) \
+ ((hal)->regs->wbm0_release_ring_base_lsb)
+
+#define HAL_WBM1_RELEASE_RING_BASE_LSB(hal) \
+ ((hal)->regs->wbm1_release_ring_base_lsb)
+
+/* WBM2SW R2 release address */
+#define HAL_WBM0_RELEASE_RING_HP 0x000030c8
+#define HAL_WBM1_RELEASE_RING_HP 0x000030d0
+
+/* WBM cookie config address and mask */
+#define HAL_WBM_SW_COOKIE_CFG0 0x00000040
+#define HAL_WBM_SW_COOKIE_CFG1 0x00000044
+#define HAL_WBM_SW_COOKIE_CFG2 0x00000090
+#define HAL_WBM_SW_COOKIE_CONVERT_CFG 0x00000094
+
+#define HAL_WBM_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB GENMASK(7, 0)
+#define HAL_WBM_SW_COOKIE_CFG_COOKIE_PPT_MSB GENMASK(12, 8)
+#define HAL_WBM_SW_COOKIE_CFG_COOKIE_SPT_MSB GENMASK(17, 13)
+#define HAL_WBM_SW_COOKIE_CFG_ALIGN BIT(18)
+#define HAL_WBM_SW_COOKIE_CFG_RELEASE_PATH_EN BIT(0)
+#define HAL_WBM_SW_COOKIE_CFG_ERR_PATH_EN BIT(1)
+#define HAL_WBM_SW_COOKIE_CFG_CONV_IND_EN BIT(3)
+
+#define HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW0_EN BIT(1)
+#define HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW1_EN BIT(2)
+#define HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW2_EN BIT(3)
+#define HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW3_EN BIT(4)
+#define HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW4_EN BIT(5)
+#define HAL_WBM_SW_COOKIE_CONV_CFG_GLOBAL_EN BIT(8)
+
+/* TCL ring field mask and offset */
+#define HAL_TCL1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8)
+#define HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0)
+#define HAL_TCL1_RING_ID_ENTRY_SIZE GENMASK(7, 0)
+#define HAL_TCL1_RING_MISC_MSI_RING_ID_DISABLE BIT(0)
+#define HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE BIT(1)
+#define HAL_TCL1_RING_MISC_MSI_SWAP BIT(3)
+#define HAL_TCL1_RING_MISC_HOST_FW_SWAP BIT(4)
+#define HAL_TCL1_RING_MISC_DATA_TLV_SWAP BIT(5)
+#define HAL_TCL1_RING_MISC_SRNG_ENABLE BIT(6)
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD GENMASK(31, 16)
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD GENMASK(14, 0)
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD GENMASK(15, 0)
+#define HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE BIT(8)
+#define HAL_TCL1_RING_MSI1_BASE_MSB_ADDR GENMASK(7, 0)
+#define HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN BIT(23)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP GENMASK(31, 0)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP0 GENMASK(2, 0)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP1 GENMASK(5, 3)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP2 GENMASK(8, 6)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP3 GENMASK(11, 9)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP4 GENMASK(14, 12)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP5 GENMASK(17, 15)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP6 GENMASK(20, 18)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP7 GENMASK(23, 21)
+
+/* REO ring field mask and offset */
+#define HAL_REO1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8)
+#define HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0)
+#define HAL_REO1_RING_ID_RING_ID GENMASK(15, 8)
+#define HAL_REO1_RING_ID_ENTRY_SIZE GENMASK(7, 0)
+#define HAL_REO1_RING_MISC_MSI_SWAP BIT(3)
+#define HAL_REO1_RING_MISC_HOST_FW_SWAP BIT(4)
+#define HAL_REO1_RING_MISC_DATA_TLV_SWAP BIT(5)
+#define HAL_REO1_RING_MISC_SRNG_ENABLE BIT(6)
+#define HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD GENMASK(31, 16)
+#define HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD GENMASK(14, 0)
+#define HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE BIT(8)
+#define HAL_REO1_RING_MSI1_BASE_MSB_ADDR GENMASK(7, 0)
+#define HAL_REO1_MISC_CTL_FRAG_DST_RING GENMASK(20, 17)
+#define HAL_REO1_MISC_CTL_BAR_DST_RING GENMASK(24, 21)
+#define HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE BIT(2)
+#define HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE BIT(3)
+#define HAL_REO1_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB GENMASK(7, 0)
+#define HAL_REO1_SW_COOKIE_CFG_COOKIE_PPT_MSB GENMASK(12, 8)
+#define HAL_REO1_SW_COOKIE_CFG_COOKIE_SPT_MSB GENMASK(17, 13)
+#define HAL_REO1_SW_COOKIE_CFG_ALIGN BIT(18)
+#define HAL_REO1_SW_COOKIE_CFG_ENABLE BIT(19)
+#define HAL_REO1_SW_COOKIE_CFG_GLOBAL_ENABLE BIT(20)
+#define HAL_REO_QDESC_ADDR_READ_LUT_ENABLE BIT(7)
+#define HAL_REO_QDESC_ADDR_READ_CLEAR_QDESC_ARRAY BIT(6)
+
+/* CE ring bit field mask and shift */
+#define HAL_CE_DST_R0_DEST_CTRL_MAX_LEN GENMASK(15, 0)
+
+#define HAL_ADDR_LSB_REG_MASK 0xffffffff
+
+#define HAL_ADDR_MSB_REG_SHIFT 32
+
+/* WBM ring bit field mask and shift */
+#define HAL_WBM_LINK_DESC_IDLE_LIST_MODE BIT(1)
+#define HAL_WBM_SCATTER_BUFFER_SIZE GENMASK(10, 2)
+#define HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST GENMASK(31, 16)
+#define HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32 GENMASK(7, 0)
+#define HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG GENMASK(31, 8)
+
+#define HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1 GENMASK(20, 8)
+#define HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1 GENMASK(20, 8)
+
+#define HAL_WBM_IDLE_LINK_RING_MISC_SRNG_ENABLE BIT(6)
+#define HAL_WBM_IDLE_LINK_RING_MISC_RIND_ID_DISABLE BIT(0)
+
+#define BASE_ADDR_MATCH_TAG_VAL 0x5
+
+#define HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_REO_REO2SW0_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_REO_CMD_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_CE_SRC_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_CE_DST_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_RXDMA_RING_MAX_SIZE 0x0000ffff
+#define HAL_RXDMA_RING_MAX_SIZE_BE 0x000fffff
+#define HAL_WBM2PPE_RELEASE_RING_BASE_MSB_RING_SIZE 0x000fffff
+
+#define HAL_WBM2SW_REL_ERR_RING_NUM 3
+/* Add any other errors here and return them in
+ * ath12k_hal_rx_desc_get_err().
+ */
+
+#define HAL_IPQ5332_CE_WFSS_REG_BASE 0x740000
+#define HAL_IPQ5332_CE_SIZE 0x100000
+
+#define HAL_RX_MAX_BA_WINDOW 256
+
+#define HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_USEC (100 * 1000)
+#define HAL_DEFAULT_VO_REO_TIMEOUT_USEC (40 * 1000)
+
+#define HAL_SRNG_DESC_LOOP_CNT 0xf0000000
+
+#define HAL_REO_CMD_FLG_NEED_STATUS BIT(0)
+#define HAL_REO_CMD_FLG_STATS_CLEAR BIT(1)
+#define HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER BIT(2)
+#define HAL_REO_CMD_FLG_FLUSH_RELEASE_BLOCKING BIT(3)
+#define HAL_REO_CMD_FLG_FLUSH_NO_INVAL BIT(4)
+#define HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS BIT(5)
+#define HAL_REO_CMD_FLG_FLUSH_ALL BIT(6)
+#define HAL_REO_CMD_FLG_UNBLK_RESOURCE BIT(7)
+#define HAL_REO_CMD_FLG_UNBLK_CACHE BIT(8)
+#define HAL_REO_CMD_FLG_FLUSH_QUEUE_1K_DESC BIT(9)
+
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO0_UPD_* fields */
+#define HAL_REO_CMD_UPD0_RX_QUEUE_NUM BIT(8)
+#define HAL_REO_CMD_UPD0_VLD BIT(9)
+#define HAL_REO_CMD_UPD0_ALDC BIT(10)
+#define HAL_REO_CMD_UPD0_DIS_DUP_DETECTION BIT(11)
+#define HAL_REO_CMD_UPD0_SOFT_REORDER_EN BIT(12)
+#define HAL_REO_CMD_UPD0_AC BIT(13)
+#define HAL_REO_CMD_UPD0_BAR BIT(14)
+#define HAL_REO_CMD_UPD0_RETRY BIT(15)
+#define HAL_REO_CMD_UPD0_CHECK_2K_MODE BIT(16)
+#define HAL_REO_CMD_UPD0_OOR_MODE BIT(17)
+#define HAL_REO_CMD_UPD0_BA_WINDOW_SIZE BIT(18)
+#define HAL_REO_CMD_UPD0_PN_CHECK BIT(19)
+#define HAL_REO_CMD_UPD0_EVEN_PN BIT(20)
+#define HAL_REO_CMD_UPD0_UNEVEN_PN BIT(21)
+#define HAL_REO_CMD_UPD0_PN_HANDLE_ENABLE BIT(22)
+#define HAL_REO_CMD_UPD0_PN_SIZE BIT(23)
+#define HAL_REO_CMD_UPD0_IGNORE_AMPDU_FLG BIT(24)
+#define HAL_REO_CMD_UPD0_SVLD BIT(25)
+#define HAL_REO_CMD_UPD0_SSN BIT(26)
+#define HAL_REO_CMD_UPD0_SEQ_2K_ERR BIT(27)
+#define HAL_REO_CMD_UPD0_PN_ERR BIT(28)
+#define HAL_REO_CMD_UPD0_PN_VALID BIT(29)
+#define HAL_REO_CMD_UPD0_PN BIT(30)
+
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO1_* fields */
+#define HAL_REO_CMD_UPD1_VLD BIT(16)
+#define HAL_REO_CMD_UPD1_ALDC GENMASK(18, 17)
+#define HAL_REO_CMD_UPD1_DIS_DUP_DETECTION BIT(19)
+#define HAL_REO_CMD_UPD1_SOFT_REORDER_EN BIT(20)
+#define HAL_REO_CMD_UPD1_AC GENMASK(22, 21)
+#define HAL_REO_CMD_UPD1_BAR BIT(23)
+#define HAL_REO_CMD_UPD1_RETRY BIT(24)
+#define HAL_REO_CMD_UPD1_CHECK_2K_MODE BIT(25)
+#define HAL_REO_CMD_UPD1_OOR_MODE BIT(26)
+#define HAL_REO_CMD_UPD1_PN_CHECK BIT(27)
+#define HAL_REO_CMD_UPD1_EVEN_PN BIT(28)
+#define HAL_REO_CMD_UPD1_UNEVEN_PN BIT(29)
+#define HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE BIT(30)
+#define HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG BIT(31)
+
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO2_* fields */
+#define HAL_REO_CMD_UPD2_SVLD BIT(10)
+#define HAL_REO_CMD_UPD2_SSN GENMASK(22, 11)
+#define HAL_REO_CMD_UPD2_SEQ_2K_ERR BIT(23)
+#define HAL_REO_CMD_UPD2_PN_ERR BIT(24)
+
+struct hal_reo_status_queue_stats {
+ u16 ssn;
+ u16 curr_idx;
+ u32 pn[4];
+ u32 last_rx_queue_ts;
+ u32 last_rx_dequeue_ts;
+ u32 rx_bitmap[8]; /* Bitmap from 0-255 */
+ u32 curr_mpdu_cnt;
+ u32 curr_msdu_cnt;
+ u16 fwd_due_to_bar_cnt;
+ u16 dup_cnt;
+ u32 frames_in_order_cnt;
+ u32 num_mpdu_processed_cnt;
+ u32 num_msdu_processed_cnt;
+ u32 total_num_processed_byte_cnt;
+ u32 late_rx_mpdu_cnt;
+ u32 reorder_hole_cnt;
+ u8 timeout_cnt;
+ u8 bar_rx_cnt;
+ u8 num_window_2k_jump_cnt;
+};
+
+struct hal_reo_status_flush_queue {
+ bool err_detected;
+};
+
+enum hal_reo_status_flush_cache_err_code {
+ HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_SUCCESS,
+ HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_IN_USE,
+ HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_NOT_FOUND,
+};
+
+struct hal_reo_status_flush_cache {
+ bool err_detected;
+ enum hal_reo_status_flush_cache_err_code err_code;
+ bool cache_controller_flush_status_hit;
+ u8 cache_controller_flush_status_desc_type;
+ u8 cache_controller_flush_status_client_id;
+ u8 cache_controller_flush_status_err;
+ u8 cache_controller_flush_status_cnt;
+};
+
+enum hal_reo_status_unblock_cache_type {
+ HAL_REO_STATUS_UNBLOCK_BLOCKING_RESOURCE,
+ HAL_REO_STATUS_UNBLOCK_ENTIRE_CACHE_USAGE,
+};
+
+struct hal_reo_status_unblock_cache {
+ bool err_detected;
+ enum hal_reo_status_unblock_cache_type unblock_type;
+};
+
+struct hal_reo_status_flush_timeout_list {
+ bool err_detected;
+ bool list_empty;
+ u16 release_desc_cnt;
+ u16 fwd_buf_cnt;
+};
+
+enum hal_reo_threshold_idx {
+ HAL_REO_THRESHOLD_IDX_DESC_COUNTER0,
+ HAL_REO_THRESHOLD_IDX_DESC_COUNTER1,
+ HAL_REO_THRESHOLD_IDX_DESC_COUNTER2,
+ HAL_REO_THRESHOLD_IDX_DESC_COUNTER_SUM,
+};
+
+struct hal_reo_status_desc_thresh_reached {
+ enum hal_reo_threshold_idx threshold_idx;
+ u32 link_desc_counter0;
+ u32 link_desc_counter1;
+ u32 link_desc_counter2;
+ u32 link_desc_counter_sum;
+};
+
+struct hal_reo_status {
+ struct hal_reo_status_header uniform_hdr;
+ u8 loop_cnt;
+ union {
+ struct hal_reo_status_queue_stats queue_stats;
+ struct hal_reo_status_flush_queue flush_queue;
+ struct hal_reo_status_flush_cache flush_cache;
+ struct hal_reo_status_unblock_cache unblock_cache;
+ struct hal_reo_status_flush_timeout_list timeout_list;
+ struct hal_reo_status_desc_thresh_reached desc_thresh_reached;
+ } u;
+};
+
+int ath12k_wifi7_hal_init(struct ath12k_base *ab);
+void ath12k_wifi7_hal_ce_dst_setup(struct ath12k_base *ab,
+ struct hal_srng *srng, int ring_num);
+void ath12k_wifi7_hal_srng_dst_hw_init(struct ath12k_base *ab,
+ struct hal_srng *srng);
+void ath12k_wifi7_hal_srng_src_hw_init(struct ath12k_base *ab,
+ struct hal_srng *srng);
+void ath12k_wifi7_hal_set_umac_srng_ptr_addr(struct ath12k_base *ab,
+ struct hal_srng *srng);
+int ath12k_wifi7_hal_srng_update_shadow_config(struct ath12k_base *ab,
+ enum hal_ring_type ring_type,
+ int ring_num);
+int ath12k_wifi7_hal_srng_get_ring_id(struct ath12k_hal *hal,
+ enum hal_ring_type type,
+ int ring_num, int mac_id);
+u32 ath12k_wifi7_hal_ce_get_desc_size(enum hal_ce_desc type);
+void ath12k_wifi7_hal_cc_config(struct ath12k_base *ab);
+enum hal_rx_buf_return_buf_manager
+ath12k_wifi7_hal_get_idle_link_rbm(struct ath12k_hal *hal, u8 device_id);
+void ath12k_wifi7_hal_ce_src_set_desc(struct hal_ce_srng_src_desc *desc,
+ dma_addr_t paddr,
+ u32 len, u32 id, u8 byte_swap_data);
+void ath12k_wifi7_hal_ce_dst_set_desc(struct hal_ce_srng_dest_desc *desc,
+ dma_addr_t paddr);
+void
+ath12k_wifi7_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc,
+ u32 cookie, dma_addr_t paddr,
+ enum hal_rx_buf_return_buf_manager rbm);
+u32
+ath12k_wifi7_hal_ce_dst_status_get_length(struct hal_ce_srng_dst_status_desc *desc);
+void
+ath12k_wifi7_hal_setup_link_idle_list(struct ath12k_base *ab,
+ struct hal_wbm_idle_scatter_list *sbuf,
+ u32 nsbufs, u32 tot_link_desc,
+ u32 end_offset);
+void ath12k_wifi7_hal_reoq_lut_addr_read_enable(struct ath12k_base *ab);
+void ath12k_wifi7_hal_reoq_lut_set_max_peerid(struct ath12k_base *ab);
+void ath12k_wifi7_hal_write_reoq_lut_addr(struct ath12k_base *ab,
+ dma_addr_t paddr);
+void ath12k_wifi7_hal_write_ml_reoq_lut_addr(struct ath12k_base *ab,
+ dma_addr_t paddr);
+u32 ath12k_wifi7_hal_reo_qdesc_size(u32 ba_window_size, u8 tid);
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/hal_desc.h b/drivers/net/wireless/ath/ath12k/wifi7/hal_desc.h
index 13ddac4a9412..e1ab47b44433 100644
--- a/drivers/net/wireless/ath/ath12k/hal_desc.h
+++ b/drivers/net/wireless/ath/ath12k/wifi7/hal_desc.h
@@ -1,92 +1,13 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
-#include "core.h"
+#include "../core.h"
#ifndef ATH12K_HAL_DESC_H
#define ATH12K_HAL_DESC_H
-#define BUFFER_ADDR_INFO0_ADDR GENMASK(31, 0)
-
-#define BUFFER_ADDR_INFO1_ADDR GENMASK(7, 0)
-#define BUFFER_ADDR_INFO1_RET_BUF_MGR GENMASK(11, 8)
-#define BUFFER_ADDR_INFO1_SW_COOKIE GENMASK(31, 12)
-
-struct ath12k_buffer_addr {
- __le32 info0;
- __le32 info1;
-} __packed;
-
-/* ath12k_buffer_addr
- *
- * buffer_addr_31_0
- * Address (lower 32 bits) of the MSDU buffer or MSDU_EXTENSION
- * descriptor or Link descriptor
- *
- * buffer_addr_39_32
- * Address (upper 8 bits) of the MSDU buffer or MSDU_EXTENSION
- * descriptor or Link descriptor
- *
- * return_buffer_manager (RBM)
- * Consumer: WBM
- * Producer: SW/FW
- * Indicates to which buffer manager the buffer or MSDU_EXTENSION
- * descriptor or link descriptor that is being pointed to shall be
- * returned after the frame has been processed. It is used by WBM
- * for routing purposes.
- *
- * Values are defined in enum %HAL_RX_BUF_RBM_
- *
- * sw_buffer_cookie
- * Cookie field exclusively used by SW. HW ignores the contents,
- * accept that it passes the programmed value on to other
- * descriptors together with the physical address.
- *
- * Field can be used by SW to for example associate the buffers
- * physical address with the virtual address.
- *
- * NOTE1:
- * The three most significant bits can have a special meaning
- * in case this struct is embedded in a TX_MPDU_DETAILS STRUCT,
- * and field transmit_bw_restriction is set
- *
- * In case of NON punctured transmission:
- * Sw_buffer_cookie[19:17] = 3'b000: 20 MHz TX only
- * Sw_buffer_cookie[19:17] = 3'b001: 40 MHz TX only
- * Sw_buffer_cookie[19:17] = 3'b010: 80 MHz TX only
- * Sw_buffer_cookie[19:17] = 3'b011: 160 MHz TX only
- * Sw_buffer_cookie[19:17] = 3'b101: 240 MHz TX only
- * Sw_buffer_cookie[19:17] = 3'b100: 320 MHz TX only
- * Sw_buffer_cookie[19:18] = 2'b11: reserved
- *
- * In case of punctured transmission:
- * Sw_buffer_cookie[19:16] = 4'b0000: pattern 0 only
- * Sw_buffer_cookie[19:16] = 4'b0001: pattern 1 only
- * Sw_buffer_cookie[19:16] = 4'b0010: pattern 2 only
- * Sw_buffer_cookie[19:16] = 4'b0011: pattern 3 only
- * Sw_buffer_cookie[19:16] = 4'b0100: pattern 4 only
- * Sw_buffer_cookie[19:16] = 4'b0101: pattern 5 only
- * Sw_buffer_cookie[19:16] = 4'b0110: pattern 6 only
- * Sw_buffer_cookie[19:16] = 4'b0111: pattern 7 only
- * Sw_buffer_cookie[19:16] = 4'b1000: pattern 8 only
- * Sw_buffer_cookie[19:16] = 4'b1001: pattern 9 only
- * Sw_buffer_cookie[19:16] = 4'b1010: pattern 10 only
- * Sw_buffer_cookie[19:16] = 4'b1011: pattern 11 only
- * Sw_buffer_cookie[19:18] = 2'b11: reserved
- *
- * Note: a punctured transmission is indicated by the presence
- * of TLV TX_PUNCTURE_SETUP embedded in the scheduler TLV
- *
- * Sw_buffer_cookie[20:17]: Tid: The TID field in the QoS control
- * field
- *
- * Sw_buffer_cookie[16]: Mpdu_qos_control_valid: This field
- * indicates MPDUs with a QoS control field.
- *
- */
-
enum hal_tlv_tag {
HAL_MACTX_CBF_START = 0 /* 0x0 */,
HAL_PHYRX_DATA = 1 /* 0x1 */,
@@ -566,27 +487,6 @@ enum hal_tlv_tag {
HAL_TLV_BASE = 511 /* 0x1ff */,
};
-#define HAL_TLV_HDR_TAG GENMASK(9, 1)
-#define HAL_TLV_HDR_LEN GENMASK(25, 10)
-#define HAL_TLV_USR_ID GENMASK(31, 26)
-
-#define HAL_TLV_ALIGN 4
-
-struct hal_tlv_hdr {
- __le32 tl;
- u8 value[];
-} __packed;
-
-#define HAL_TLV_64_HDR_TAG GENMASK(9, 1)
-#define HAL_TLV_64_HDR_LEN GENMASK(21, 10)
-#define HAL_TLV_64_USR_ID GENMASK(31, 26)
-#define HAL_TLV_64_ALIGN 8
-
-struct hal_tlv_64_hdr {
- __le64 tl;
- u8 value[];
-} __packed;
-
#define RX_MPDU_DESC_INFO0_MSDU_COUNT GENMASK(7, 0)
#define RX_MPDU_DESC_INFO0_FRAG_FLAG BIT(8)
#define RX_MPDU_DESC_INFO0_MPDU_RETRY BIT(9)
@@ -820,35 +720,6 @@ struct rx_msdu_ext_desc {
* Set to the link ID of the PMAC that received the frame
*/
-enum hal_reo_dest_ring_buffer_type {
- HAL_REO_DEST_RING_BUFFER_TYPE_MSDU,
- HAL_REO_DEST_RING_BUFFER_TYPE_LINK_DESC,
-};
-
-enum hal_reo_dest_ring_push_reason {
- HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED,
- HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION,
-};
-
-enum hal_reo_dest_ring_error_code {
- HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO,
- HAL_REO_DEST_RING_ERROR_CODE_DESC_INVALID,
- HAL_REO_DEST_RING_ERROR_CODE_AMPDU_IN_NON_BA,
- HAL_REO_DEST_RING_ERROR_CODE_NON_BA_DUPLICATE,
- HAL_REO_DEST_RING_ERROR_CODE_BA_DUPLICATE,
- HAL_REO_DEST_RING_ERROR_CODE_FRAME_2K_JUMP,
- HAL_REO_DEST_RING_ERROR_CODE_BAR_2K_JUMP,
- HAL_REO_DEST_RING_ERROR_CODE_FRAME_OOR,
- HAL_REO_DEST_RING_ERROR_CODE_BAR_OOR,
- HAL_REO_DEST_RING_ERROR_CODE_NO_BA_SESSION,
- HAL_REO_DEST_RING_ERROR_CODE_FRAME_SN_EQUALS_SSN,
- HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED,
- HAL_REO_DEST_RING_ERROR_CODE_2K_ERR_FLAG_SET,
- HAL_REO_DEST_RING_ERROR_CODE_PN_ERR_FLAG_SET,
- HAL_REO_DEST_RING_ERROR_CODE_DESC_BLOCKED,
- HAL_REO_DEST_RING_ERROR_CODE_MAX,
-};
-
#define HAL_REO_DEST_RING_INFO0_BUFFER_TYPE BIT(0)
#define HAL_REO_DEST_RING_INFO0_PUSH_REASON GENMASK(2, 1)
#define HAL_REO_DEST_RING_INFO0_ERROR_CODE GENMASK(7, 3)
@@ -986,35 +857,6 @@ struct hal_reo_to_ppe_ring {
* More Segments followed
*/
-enum hal_reo_entr_rxdma_push_reason {
- HAL_REO_ENTR_RING_RXDMA_PUSH_REASON_ERR_DETECTED,
- HAL_REO_ENTR_RING_RXDMA_PUSH_REASON_ROUTING_INSTRUCTION,
- HAL_REO_ENTR_RING_RXDMA_PUSH_REASON_RX_FLUSH,
-};
-
-enum hal_reo_entr_rxdma_ecode {
- HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR,
- HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR,
- HAL_REO_ENTR_RING_RXDMA_ECODE_FCS_ERR,
- HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR,
- HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR,
- HAL_REO_ENTR_RING_RXDMA_ECODE_UNECRYPTED_ERR,
- HAL_REO_ENTR_RING_RXDMA_ECODE_MSDU_LEN_ERR,
- HAL_REO_ENTR_RING_RXDMA_ECODE_MSDU_LIMIT_ERR,
- HAL_REO_ENTR_RING_RXDMA_ECODE_WIFI_PARSE_ERR,
- HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_PARSE_ERR,
- HAL_REO_ENTR_RING_RXDMA_ECODE_SA_TIMEOUT_ERR,
- HAL_REO_ENTR_RING_RXDMA_ECODE_DA_TIMEOUT_ERR,
- HAL_REO_ENTR_RING_RXDMA_ECODE_FLOW_TIMEOUT_ERR,
- HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR,
- HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_FRAG_ERR,
- HAL_REO_ENTR_RING_RXDMA_ECODE_MULTICAST_ECHO_ERR,
- HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_MISMATCH_ERR,
- HAL_REO_ENTR_RING_RXDMA_ECODE_UNAUTH_WDS_ERR,
- HAL_REO_ENTR_RING_RXDMA_ECODE_GRPCAST_AMSDU_WDS_ERR,
- HAL_REO_ENTR_RING_RXDMA_ECODE_MAX,
-};
-
enum hal_rx_reo_dest_ring {
HAL_RX_REO_DEST_RING_TCL,
HAL_RX_REO_DEST_RING_SW1,
@@ -1207,6 +1049,13 @@ struct hal_reo_get_queue_stats {
* Hole_count
*/
+struct hal_reo_get_queue_stats_qcc2072 {
+ struct hal_reo_cmd_hdr cmd;
+ __le32 queue_addr_lo;
+ __le32 info0;
+ __le32 rsvd0[6];
+} __packed;
+
#define HAL_REO_FLUSH_QUEUE_INFO0_DESC_ADDR_HI GENMASK(7, 0)
#define HAL_REO_FLUSH_QUEUE_INFO0_BLOCK_DESC_ADDR BIT(8)
#define HAL_REO_FLUSH_QUEUE_INFO0_BLOCK_RESRC_IDX GENMASK(10, 9)
@@ -1269,46 +1118,6 @@ struct hal_reo_flush_cache {
#define HAL_TCL_DATA_CMD_INFO5_RING_ID GENMASK(27, 20)
#define HAL_TCL_DATA_CMD_INFO5_LOOPING_COUNT GENMASK(31, 28)
-enum hal_encrypt_type {
- HAL_ENCRYPT_TYPE_WEP_40,
- HAL_ENCRYPT_TYPE_WEP_104,
- HAL_ENCRYPT_TYPE_TKIP_NO_MIC,
- HAL_ENCRYPT_TYPE_WEP_128,
- HAL_ENCRYPT_TYPE_TKIP_MIC,
- HAL_ENCRYPT_TYPE_WAPI,
- HAL_ENCRYPT_TYPE_CCMP_128,
- HAL_ENCRYPT_TYPE_OPEN,
- HAL_ENCRYPT_TYPE_CCMP_256,
- HAL_ENCRYPT_TYPE_GCMP_128,
- HAL_ENCRYPT_TYPE_AES_GCMP_256,
- HAL_ENCRYPT_TYPE_WAPI_GCM_SM4,
-};
-
-enum hal_tcl_encap_type {
- HAL_TCL_ENCAP_TYPE_RAW,
- HAL_TCL_ENCAP_TYPE_NATIVE_WIFI,
- HAL_TCL_ENCAP_TYPE_ETHERNET,
- HAL_TCL_ENCAP_TYPE_802_3 = 3,
- HAL_TCL_ENCAP_TYPE_MAX
-};
-
-enum hal_tcl_desc_type {
- HAL_TCL_DESC_TYPE_BUFFER,
- HAL_TCL_DESC_TYPE_EXT_DESC,
- HAL_TCL_DESC_TYPE_MAX,
-};
-
-enum hal_wbm_htt_tx_comp_status {
- HAL_WBM_REL_HTT_TX_COMP_STATUS_OK,
- HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP,
- HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL,
- HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ,
- HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT,
- HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY,
- HAL_WBM_REL_HTT_TX_COMP_STATUS_VDEVID_MISMATCH,
- HAL_WBM_REL_HTT_TX_COMP_STATUS_MAX,
-};
-
struct hal_tcl_data_cmd {
struct ath12k_buffer_addr buf_addr_info;
__le32 info0;
@@ -1765,107 +1574,11 @@ struct hal_ce_srng_dst_status_desc {
#define HAL_TX_RATE_STATS_INFO0_OFDMA_TX BIT(16)
#define HAL_TX_RATE_STATS_INFO0_TONES_IN_RU GENMASK(28, 17)
-enum hal_tx_rate_stats_bw {
- HAL_TX_RATE_STATS_BW_20,
- HAL_TX_RATE_STATS_BW_40,
- HAL_TX_RATE_STATS_BW_80,
- HAL_TX_RATE_STATS_BW_160,
-};
-
-enum hal_tx_rate_stats_pkt_type {
- HAL_TX_RATE_STATS_PKT_TYPE_11A,
- HAL_TX_RATE_STATS_PKT_TYPE_11B,
- HAL_TX_RATE_STATS_PKT_TYPE_11N,
- HAL_TX_RATE_STATS_PKT_TYPE_11AC,
- HAL_TX_RATE_STATS_PKT_TYPE_11AX,
- HAL_TX_RATE_STATS_PKT_TYPE_11BA,
- HAL_TX_RATE_STATS_PKT_TYPE_11BE,
-};
-
-enum hal_tx_rate_stats_sgi {
- HAL_TX_RATE_STATS_SGI_08US,
- HAL_TX_RATE_STATS_SGI_04US,
- HAL_TX_RATE_STATS_SGI_16US,
- HAL_TX_RATE_STATS_SGI_32US,
-};
-
struct hal_tx_rate_stats {
__le32 info0;
__le32 tsf;
} __packed;
-struct hal_wbm_link_desc {
- struct ath12k_buffer_addr buf_addr_info;
-} __packed;
-
-/* hal_wbm_link_desc
- *
- * Producer: WBM
- * Consumer: WBM
- *
- * buf_addr_info
- * Details of the physical address of a buffer or MSDU
- * link descriptor.
- */
-
-enum hal_wbm_rel_src_module {
- HAL_WBM_REL_SRC_MODULE_TQM,
- HAL_WBM_REL_SRC_MODULE_RXDMA,
- HAL_WBM_REL_SRC_MODULE_REO,
- HAL_WBM_REL_SRC_MODULE_FW,
- HAL_WBM_REL_SRC_MODULE_SW,
- HAL_WBM_REL_SRC_MODULE_MAX,
-};
-
-enum hal_wbm_rel_desc_type {
- HAL_WBM_REL_DESC_TYPE_REL_MSDU,
- HAL_WBM_REL_DESC_TYPE_MSDU_LINK,
- HAL_WBM_REL_DESC_TYPE_MPDU_LINK,
- HAL_WBM_REL_DESC_TYPE_MSDU_EXT,
- HAL_WBM_REL_DESC_TYPE_QUEUE_EXT,
-};
-
-/* hal_wbm_rel_desc_type
- *
- * msdu_buffer
- * The address points to an MSDU buffer
- *
- * msdu_link_descriptor
- * The address points to an Tx MSDU link descriptor
- *
- * mpdu_link_descriptor
- * The address points to an MPDU link descriptor
- *
- * msdu_ext_descriptor
- * The address points to an MSDU extension descriptor
- *
- * queue_ext_descriptor
- * The address points to an TQM queue extension descriptor. WBM should
- * treat this is the same way as a link descriptor.
- */
-
-enum hal_wbm_rel_bm_act {
- HAL_WBM_REL_BM_ACT_PUT_IN_IDLE,
- HAL_WBM_REL_BM_ACT_REL_MSDU,
-};
-
-/* hal_wbm_rel_bm_act
- *
- * put_in_idle_list
- * Put the buffer or descriptor back in the idle list. In case of MSDU or
- * MDPU link descriptor, BM does not need to check to release any
- * individual MSDU buffers.
- *
- * release_msdu_list
- * This BM action can only be used in combination with desc_type being
- * msdu_link_descriptor. Field first_msdu_index points out which MSDU
- * pointer in the MSDU link descriptor is the first of an MPDU that is
- * released. BM shall release all the MSDU buffers linked to this first
- * MSDU buffer pointer. All related MSDU buffer pointer entries shall be
- * set to value 0, which represents the 'NULL' pointer. When all MSDU
- * buffer pointers in the MSDU link descriptor are 'NULL', the MSDU link
- * descriptor itself shall also be released.
- */
#define HAL_WBM_COMPL_RX_INFO0_REL_SRC_MODULE GENMASK(2, 0)
#define HAL_WBM_COMPL_RX_INFO0_BM_ACTION GENMASK(5, 3)
#define HAL_WBM_COMPL_RX_INFO0_DESC_TYPE GENMASK(8, 6)
@@ -2007,7 +1720,6 @@ struct hal_wbm_release_ring_cc_rx {
#define HAL_WBM_RELEASE_INFO3_CONTINUATION BIT(2)
#define HAL_WBM_RELEASE_INFO5_LOOPING_COUNT GENMASK(31, 28)
-#define HAL_ENCRYPT_TYPE_MAX 12
struct hal_wbm_release_ring {
struct ath12k_buffer_addr buf_addr_info;
@@ -2331,7 +2043,6 @@ enum hal_desc_buf_type {
#define HAL_DESC_REO_OWNED 4
#define HAL_DESC_REO_QUEUE_DESC 8
#define HAL_DESC_REO_QUEUE_EXT_DESC 9
-#define HAL_DESC_REO_NON_QOS_TID 16
#define HAL_DESC_HDR_INFO0_OWNER GENMASK(3, 0)
#define HAL_DESC_HDR_INFO0_BUF_TYPE GENMASK(7, 4)
@@ -2728,6 +2439,11 @@ struct hal_reo_get_queue_stats_status {
* entries into this Ring has looped around the ring.
*/
+struct hal_reo_get_queue_stats_status_qcc2072 {
+ __le32 tlv32_padding;
+ struct hal_reo_get_queue_stats_status status;
+} __packed;
+
#define HAL_REO_STATUS_LOOP_CNT GENMASK(31, 28)
#define HAL_REO_FLUSH_QUEUE_INFO0_ERR_DETECTED BIT(0)
@@ -2957,25 +2673,6 @@ struct hal_tcl_entrance_from_ppe_ring {
__le32 info0;
} __packed;
-struct hal_mon_buf_ring {
- __le32 paddr_lo;
- __le32 paddr_hi;
- __le64 cookie;
-};
-
-/* hal_mon_buf_ring
- * Producer : SW
- * Consumer : Monitor
- *
- * paddr_lo
- * Lower 32-bit physical address of the buffer pointer from the source ring.
- * paddr_hi
- * bit range 7-0 : upper 8 bit of the physical address.
- * bit range 31-8 : reserved.
- * cookie
- * Consumer: RxMon/TxMon 64 bit cookie of the buffers.
- */
-
#define HAL_MON_DEST_COOKIE_BUF_ID GENMASK(17, 0)
#define HAL_MON_DEST_INFO0_END_OFFSET GENMASK(11, 0)
diff --git a/drivers/net/wireless/ath/ath12k/wifi7/hal_qcc2072.c b/drivers/net/wireless/ath/ath12k/wifi7/hal_qcc2072.c
new file mode 100644
index 000000000000..1eefb931a853
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wifi7/hal_qcc2072.c
@@ -0,0 +1,503 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include "hal_qcc2072.h"
+#include "hal_wcn7850.h"
+
+const struct ath12k_hw_regs qcc2072_regs = {
+ /* SW2TCL(x) R0 ring configuration address */
+ .tcl1_ring_id = 0x00000920,
+ .tcl1_ring_misc = 0x00000928,
+ .tcl1_ring_tp_addr_lsb = 0x00000934,
+ .tcl1_ring_tp_addr_msb = 0x00000938,
+ .tcl1_ring_consumer_int_setup_ix0 = 0x00000948,
+ .tcl1_ring_consumer_int_setup_ix1 = 0x0000094c,
+ .tcl1_ring_msi1_base_lsb = 0x00000960,
+ .tcl1_ring_msi1_base_msb = 0x00000964,
+ .tcl1_ring_msi1_data = 0x00000968,
+ .tcl_ring_base_lsb = 0x00000b70,
+ .tcl1_ring_base_lsb = 0x00000918,
+ .tcl1_ring_base_msb = 0x0000091c,
+ .tcl2_ring_base_lsb = 0x00000990,
+
+ /* TCL STATUS ring address */
+ .tcl_status_ring_base_lsb = 0x00000d50,
+
+ .wbm_idle_ring_base_lsb = 0x00000d3c,
+ .wbm_idle_ring_misc_addr = 0x00000d4c,
+ .wbm_r0_idle_list_cntl_addr = 0x00000240,
+ .wbm_r0_idle_list_size_addr = 0x00000244,
+ .wbm_scattered_ring_base_lsb = 0x00000250,
+ .wbm_scattered_ring_base_msb = 0x00000254,
+ .wbm_scattered_desc_head_info_ix0 = 0x00000260,
+ .wbm_scattered_desc_head_info_ix1 = 0x00000264,
+ .wbm_scattered_desc_tail_info_ix0 = 0x00000270,
+ .wbm_scattered_desc_tail_info_ix1 = 0x00000274,
+ .wbm_scattered_desc_ptr_hp_addr = 0x00000027c,
+
+ .wbm_sw_release_ring_base_lsb = 0x0000037c,
+ .wbm_sw1_release_ring_base_lsb = ATH12K_HW_REG_UNDEFINED,
+ .wbm0_release_ring_base_lsb = 0x00000e08,
+ .wbm1_release_ring_base_lsb = 0x00000e80,
+
+ /* PCIe base address */
+ .pcie_qserdes_sysclk_en_sel = 0x01e0c0ac,
+ .pcie_pcs_osc_dtct_config_base = 0x01e0cc58,
+
+ /* PPE release ring address */
+ .ppe_rel_ring_base = 0x0000046c,
+
+ /* REO DEST ring address */
+ .reo2_ring_base = 0x00000578,
+ .reo1_misc_ctrl_addr = 0x00000ba0,
+ .reo1_sw_cookie_cfg0 = 0x0000006c,
+ .reo1_sw_cookie_cfg1 = 0x00000070,
+ .reo1_qdesc_lut_base0 = ATH12K_HW_REG_UNDEFINED,
+ .reo1_qdesc_lut_base1 = ATH12K_HW_REG_UNDEFINED,
+
+ .reo1_ring_base_lsb = 0x00000500,
+ .reo1_ring_base_msb = 0x00000504,
+ .reo1_ring_id = 0x00000508,
+ .reo1_ring_misc = 0x00000510,
+ .reo1_ring_hp_addr_lsb = 0x00000514,
+ .reo1_ring_hp_addr_msb = 0x00000518,
+ .reo1_ring_producer_int_setup = 0x00000524,
+ .reo1_ring_msi1_base_lsb = 0x00000548,
+ .reo1_ring_msi1_base_msb = 0x0000054c,
+ .reo1_ring_msi1_data = 0x00000550,
+ .reo1_aging_thres_ix0 = 0x00000b2c,
+ .reo1_aging_thres_ix1 = 0x00000b30,
+ .reo1_aging_thres_ix2 = 0x00000b34,
+ .reo1_aging_thres_ix3 = 0x00000b38,
+
+ /* REO Exception ring address */
+ .reo2_sw0_ring_base = 0x000008c0,
+
+ /* REO Reinject ring address */
+ .sw2reo_ring_base = 0x00000320,
+ .sw2reo1_ring_base = 0x00000398,
+
+ /* REO cmd ring address */
+ .reo_cmd_ring_base = 0x000002a8,
+
+ /* REO status ring address */
+ .reo_status_ring_base = 0x00000aa0,
+
+ /* CE base address */
+ .umac_ce0_src_reg_base = 0x01b80000,
+ .umac_ce0_dest_reg_base = 0x01b81000,
+ .umac_ce1_src_reg_base = 0x01b82000,
+ .umac_ce1_dest_reg_base = 0x01b83000,
+
+ .gcc_gcc_pcie_hot_rst = 0x1e65304,
+
+ .qrtr_node_id = 0x1e03300,
+};
+
+static void ath12k_hal_rx_desc_set_msdu_len_qcc2072(struct hal_rx_desc *desc, u16 len)
+{
+ u32 info = __le32_to_cpu(desc->u.qcc2072.msdu_end.info10);
+
+ info &= ~RX_MSDU_END_INFO10_MSDU_LENGTH;
+ info |= u32_encode_bits(len, RX_MSDU_END_INFO10_MSDU_LENGTH);
+
+ desc->u.qcc2072.msdu_end.info10 = __cpu_to_le32(info);
+}
+
+static void ath12k_hal_rx_desc_get_dot11_hdr_qcc2072(struct hal_rx_desc *desc,
+ struct ieee80211_hdr *hdr)
+{
+ hdr->frame_control = desc->u.qcc2072.mpdu_start.frame_ctrl;
+ hdr->duration_id = desc->u.qcc2072.mpdu_start.duration;
+ ether_addr_copy(hdr->addr1, desc->u.qcc2072.mpdu_start.addr1);
+ ether_addr_copy(hdr->addr2, desc->u.qcc2072.mpdu_start.addr2);
+ ether_addr_copy(hdr->addr3, desc->u.qcc2072.mpdu_start.addr3);
+
+ if (__le32_to_cpu(desc->u.qcc2072.mpdu_start.info4) &
+ RX_MPDU_START_INFO4_MAC_ADDR4_VALID)
+ ether_addr_copy(hdr->addr4, desc->u.qcc2072.mpdu_start.addr4);
+
+ hdr->seq_ctrl = desc->u.qcc2072.mpdu_start.seq_ctrl;
+}
+
+static void ath12k_hal_rx_desc_get_crypto_hdr_qcc2072(struct hal_rx_desc *desc,
+ u8 *crypto_hdr,
+ enum hal_encrypt_type enctype)
+{
+ unsigned int key_id;
+
+ switch (enctype) {
+ case HAL_ENCRYPT_TYPE_OPEN:
+ return;
+ case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
+ case HAL_ENCRYPT_TYPE_TKIP_MIC:
+ crypto_hdr[0] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcc2072.mpdu_start.pn[0]);
+ crypto_hdr[1] = 0;
+ crypto_hdr[2] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcc2072.mpdu_start.pn[0]);
+ break;
+ case HAL_ENCRYPT_TYPE_CCMP_128:
+ case HAL_ENCRYPT_TYPE_CCMP_256:
+ case HAL_ENCRYPT_TYPE_GCMP_128:
+ case HAL_ENCRYPT_TYPE_AES_GCMP_256:
+ crypto_hdr[0] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcc2072.mpdu_start.pn[0]);
+ crypto_hdr[1] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcc2072.mpdu_start.pn[0]);
+ crypto_hdr[2] = 0;
+ break;
+ case HAL_ENCRYPT_TYPE_WEP_40:
+ case HAL_ENCRYPT_TYPE_WEP_104:
+ case HAL_ENCRYPT_TYPE_WEP_128:
+ case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
+ case HAL_ENCRYPT_TYPE_WAPI:
+ return;
+ }
+
+ key_id = u32_get_bits(__le32_to_cpu(desc->u.qcc2072.mpdu_start.info5),
+ RX_MPDU_START_INFO5_KEY_ID);
+ crypto_hdr[3] = 0x20 | (key_id << 6);
+ crypto_hdr[4] = HAL_RX_MPDU_INFO_PN_GET_BYTE3(desc->u.qcc2072.mpdu_start.pn[0]);
+ crypto_hdr[5] = HAL_RX_MPDU_INFO_PN_GET_BYTE4(desc->u.qcc2072.mpdu_start.pn[0]);
+ crypto_hdr[6] = HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcc2072.mpdu_start.pn[1]);
+ crypto_hdr[7] = HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcc2072.mpdu_start.pn[1]);
+}
+
+static void ath12k_hal_rx_desc_copy_end_tlv_qcc2072(struct hal_rx_desc *fdesc,
+ struct hal_rx_desc *ldesc)
+{
+ memcpy(&fdesc->u.qcc2072.msdu_end, &ldesc->u.qcc2072.msdu_end,
+ sizeof(struct rx_msdu_end_qcn9274));
+}
+
+static u8 ath12k_hal_rx_desc_get_msdu_src_link_qcc2072(struct hal_rx_desc *desc)
+{
+ return 0;
+}
+
+static u8 ath12k_hal_rx_desc_get_l3_pad_bytes_qcc2072(struct hal_rx_desc *desc)
+{
+ return le16_get_bits(desc->u.qcc2072.msdu_end.info5,
+ RX_MSDU_END_INFO5_L3_HDR_PADDING);
+}
+
+static u32 ath12k_hal_rx_desc_get_mpdu_start_tag_qcc2072(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcc2072.mpdu_start_tag,
+ HAL_TLV_HDR_TAG);
+}
+
+static u32 ath12k_hal_rx_desc_get_mpdu_ppdu_id_qcc2072(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.qcc2072.mpdu_start.phy_ppdu_id);
+}
+
+static u8 *ath12k_hal_rx_desc_get_msdu_payload_qcc2072(struct hal_rx_desc *desc)
+{
+ return &desc->u.qcc2072.msdu_payload[0];
+}
+
+static bool ath12k_hal_rx_desc_get_first_msdu_qcc2072(struct hal_rx_desc *desc)
+{
+ return !!le16_get_bits(desc->u.qcc2072.msdu_end.info5,
+ RX_MSDU_END_INFO5_FIRST_MSDU);
+}
+
+static bool ath12k_hal_rx_desc_get_last_msdu_qcc2072(struct hal_rx_desc *desc)
+{
+ return !!le16_get_bits(desc->u.qcc2072.msdu_end.info5,
+ RX_MSDU_END_INFO5_LAST_MSDU);
+}
+
+static bool ath12k_hal_rx_desc_encrypt_valid_qcc2072(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcc2072.mpdu_start.info4,
+ RX_MPDU_START_INFO4_ENCRYPT_INFO_VALID);
+}
+
+static u32 ath12k_hal_rx_desc_get_encrypt_type_qcc2072(struct hal_rx_desc *desc)
+{
+ if (!ath12k_hal_rx_desc_encrypt_valid_qcc2072(desc))
+ return HAL_ENCRYPT_TYPE_OPEN;
+
+ return le32_get_bits(desc->u.qcc2072.mpdu_start.info2,
+ RX_MPDU_START_INFO2_ENC_TYPE);
+}
+
+static u8 ath12k_hal_rx_desc_get_decap_type_qcc2072(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcc2072.msdu_end.info11,
+ RX_MSDU_END_INFO11_DECAP_FORMAT);
+}
+
+static u8 ath12k_hal_rx_desc_get_mesh_ctl_qcc2072(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcc2072.msdu_end.info11,
+ RX_MSDU_END_INFO11_MESH_CTRL_PRESENT);
+}
+
+static bool ath12k_hal_rx_desc_get_mpdu_seq_ctl_vld_qcc2072(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcc2072.mpdu_start.info4,
+ RX_MPDU_START_INFO4_MPDU_SEQ_CTRL_VALID);
+}
+
+static bool ath12k_hal_rx_desc_get_mpdu_fc_valid_qcc2072(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcc2072.mpdu_start.info4,
+ RX_MPDU_START_INFO4_MPDU_FCTRL_VALID);
+}
+
+static u16 ath12k_hal_rx_desc_get_mpdu_start_seq_no_qcc2072(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcc2072.mpdu_start.info4,
+ RX_MPDU_START_INFO4_MPDU_SEQ_NUM);
+}
+
+static u16 ath12k_hal_rx_desc_get_msdu_len_qcc2072(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcc2072.msdu_end.info10,
+ RX_MSDU_END_INFO10_MSDU_LENGTH);
+}
+
+static u8 ath12k_hal_rx_desc_get_msdu_sgi_qcc2072(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcc2072.msdu_end.info12,
+ RX_MSDU_END_INFO12_SGI);
+}
+
+static u8 ath12k_hal_rx_desc_get_msdu_rate_mcs_qcc2072(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcc2072.msdu_end.info12,
+ RX_MSDU_END_INFO12_RATE_MCS);
+}
+
+static u8 ath12k_hal_rx_desc_get_msdu_rx_bw_qcc2072(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcc2072.msdu_end.info12,
+ RX_MSDU_END_INFO12_RECV_BW);
+}
+
+static u32 ath12k_hal_rx_desc_get_msdu_freq_qcc2072(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.qcc2072.msdu_end.phy_meta_data);
+}
+
+static u8 ath12k_hal_rx_desc_get_msdu_pkt_type_qcc2072(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcc2072.msdu_end.info12,
+ RX_MSDU_END_INFO12_PKT_TYPE);
+}
+
+static u8 ath12k_hal_rx_desc_get_msdu_nss_qcc2072(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcc2072.msdu_end.info12,
+ RX_MSDU_END_INFO12_MIMO_SS_BITMAP);
+}
+
+static u8 ath12k_hal_rx_desc_get_mpdu_tid_qcc2072(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcc2072.mpdu_start.info2,
+ RX_MPDU_START_INFO2_TID);
+}
+
+static u16 ath12k_hal_rx_desc_get_mpdu_peer_id_qcc2072(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.qcc2072.mpdu_start.sw_peer_id);
+}
+
+static bool ath12k_hal_rx_desc_mac_addr2_valid_qcc2072(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.qcc2072.mpdu_start.info4) &
+ RX_MPDU_START_INFO4_MAC_ADDR2_VALID;
+}
+
+static u8 *ath12k_hal_rx_desc_mpdu_start_addr2_qcc2072(struct hal_rx_desc *desc)
+{
+ return desc->u.qcc2072.mpdu_start.addr2;
+}
+
+static bool ath12k_hal_rx_desc_is_da_mcbc_qcc2072(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.qcc2072.msdu_end.info13) &
+ RX_MSDU_END_INFO13_MCAST_BCAST;
+}
+
+static bool ath12k_hal_rx_h_msdu_done_qcc2072(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcc2072.msdu_end.info14,
+ RX_MSDU_END_INFO14_MSDU_DONE);
+}
+
+static bool ath12k_hal_rx_h_l4_cksum_fail_qcc2072(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcc2072.msdu_end.info13,
+ RX_MSDU_END_INFO13_TCP_UDP_CKSUM_FAIL);
+}
+
+static bool ath12k_hal_rx_h_ip_cksum_fail_qcc2072(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcc2072.msdu_end.info13,
+ RX_MSDU_END_INFO13_IP_CKSUM_FAIL);
+}
+
+static bool ath12k_hal_rx_h_is_decrypted_qcc2072(struct hal_rx_desc *desc)
+{
+ return (le32_get_bits(desc->u.qcc2072.msdu_end.info14,
+ RX_MSDU_END_INFO14_DECRYPT_STATUS_CODE) ==
+ RX_DESC_DECRYPT_STATUS_CODE_OK);
+}
+
+static u32 ath12k_hal_rx_h_mpdu_err_qcc2072(struct hal_rx_desc *desc)
+{
+ u32 info = __le32_to_cpu(desc->u.qcc2072.msdu_end.info13);
+ u32 errmap = 0;
+
+ if (info & RX_MSDU_END_INFO13_FCS_ERR)
+ errmap |= HAL_RX_MPDU_ERR_FCS;
+
+ if (info & RX_MSDU_END_INFO13_DECRYPT_ERR)
+ errmap |= HAL_RX_MPDU_ERR_DECRYPT;
+
+ if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR)
+ errmap |= HAL_RX_MPDU_ERR_TKIP_MIC;
+
+ if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR)
+ errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR;
+
+ if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR)
+ errmap |= HAL_RX_MPDU_ERR_OVERFLOW;
+
+ if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR)
+ errmap |= HAL_RX_MPDU_ERR_MSDU_LEN;
+
+ if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR)
+ errmap |= HAL_RX_MPDU_ERR_MPDU_LEN;
+
+ return errmap;
+}
+
+static void ath12k_hal_extract_rx_desc_data_qcc2072(struct hal_rx_desc_data *rx_desc_data,
+ struct hal_rx_desc *rx_desc,
+ struct hal_rx_desc *ldesc)
+{
+ rx_desc_data->is_first_msdu = ath12k_hal_rx_desc_get_first_msdu_qcc2072(ldesc);
+ rx_desc_data->is_last_msdu = ath12k_hal_rx_desc_get_last_msdu_qcc2072(ldesc);
+ rx_desc_data->l3_pad_bytes = ath12k_hal_rx_desc_get_l3_pad_bytes_qcc2072(ldesc);
+ rx_desc_data->enctype = ath12k_hal_rx_desc_get_encrypt_type_qcc2072(rx_desc);
+ rx_desc_data->decap_type = ath12k_hal_rx_desc_get_decap_type_qcc2072(rx_desc);
+ rx_desc_data->mesh_ctrl_present =
+ ath12k_hal_rx_desc_get_mesh_ctl_qcc2072(rx_desc);
+ rx_desc_data->seq_ctl_valid =
+ ath12k_hal_rx_desc_get_mpdu_seq_ctl_vld_qcc2072(rx_desc);
+ rx_desc_data->fc_valid = ath12k_hal_rx_desc_get_mpdu_fc_valid_qcc2072(rx_desc);
+ rx_desc_data->seq_no = ath12k_hal_rx_desc_get_mpdu_start_seq_no_qcc2072(rx_desc);
+ rx_desc_data->msdu_len = ath12k_hal_rx_desc_get_msdu_len_qcc2072(ldesc);
+ rx_desc_data->sgi = ath12k_hal_rx_desc_get_msdu_sgi_qcc2072(rx_desc);
+ rx_desc_data->rate_mcs = ath12k_hal_rx_desc_get_msdu_rate_mcs_qcc2072(rx_desc);
+ rx_desc_data->bw = ath12k_hal_rx_desc_get_msdu_rx_bw_qcc2072(rx_desc);
+ rx_desc_data->phy_meta_data = ath12k_hal_rx_desc_get_msdu_freq_qcc2072(rx_desc);
+ rx_desc_data->pkt_type = ath12k_hal_rx_desc_get_msdu_pkt_type_qcc2072(rx_desc);
+ rx_desc_data->nss = hweight8(ath12k_hal_rx_desc_get_msdu_nss_qcc2072(rx_desc));
+ rx_desc_data->tid = ath12k_hal_rx_desc_get_mpdu_tid_qcc2072(rx_desc);
+ rx_desc_data->peer_id = ath12k_hal_rx_desc_get_mpdu_peer_id_qcc2072(rx_desc);
+ rx_desc_data->addr2_present = ath12k_hal_rx_desc_mac_addr2_valid_qcc2072(rx_desc);
+ rx_desc_data->addr2 = ath12k_hal_rx_desc_mpdu_start_addr2_qcc2072(rx_desc);
+ rx_desc_data->is_mcbc = ath12k_hal_rx_desc_is_da_mcbc_qcc2072(rx_desc);
+ rx_desc_data->msdu_done = ath12k_hal_rx_h_msdu_done_qcc2072(ldesc);
+ rx_desc_data->l4_csum_fail = ath12k_hal_rx_h_l4_cksum_fail_qcc2072(rx_desc);
+ rx_desc_data->ip_csum_fail = ath12k_hal_rx_h_ip_cksum_fail_qcc2072(rx_desc);
+ rx_desc_data->is_decrypted = ath12k_hal_rx_h_is_decrypted_qcc2072(rx_desc);
+ rx_desc_data->err_bitmap = ath12k_hal_rx_h_mpdu_err_qcc2072(rx_desc);
+}
+
+static int ath12k_hal_srng_create_config_qcc2072(struct ath12k_hal *hal)
+{
+ struct hal_srng_config *s;
+ int ret;
+
+ ret = ath12k_hal_srng_create_config_wcn7850(hal);
+ if (ret)
+ return ret;
+
+ s = &hal->srng_config[HAL_REO_CMD];
+ s->entry_size = (sizeof(struct hal_tlv_hdr) +
+ sizeof(struct hal_reo_get_queue_stats_qcc2072)) >> 2;
+
+ s = &hal->srng_config[HAL_REO_STATUS];
+ s->entry_size = (sizeof(struct hal_tlv_hdr) +
+ sizeof(struct hal_reo_get_queue_stats_status_qcc2072)) >> 2;
+
+ return 0;
+}
+
+static u16 ath12k_hal_reo_status_dec_tlv_hdr_qcc2072(void *tlv, void **desc)
+{
+ struct hal_reo_get_queue_stats_status_qcc2072 *status_tlv;
+ u16 tag;
+
+ tag = ath12k_hal_decode_tlv32_hdr(tlv, (void **)&status_tlv);
+ /*
+ * actual desc of REO status entry starts after tlv32_padding,
+ * see hal_reo_get_queue_stats_status_qcc2072
+ */
+ *desc = &status_tlv->status;
+
+ return tag;
+}
+
+const struct hal_ops hal_qcc2072_ops = {
+ .create_srng_config = ath12k_hal_srng_create_config_qcc2072,
+ .rx_desc_set_msdu_len = ath12k_hal_rx_desc_set_msdu_len_qcc2072,
+ .rx_desc_get_dot11_hdr = ath12k_hal_rx_desc_get_dot11_hdr_qcc2072,
+ .rx_desc_get_crypto_header = ath12k_hal_rx_desc_get_crypto_hdr_qcc2072,
+ .rx_desc_copy_end_tlv = ath12k_hal_rx_desc_copy_end_tlv_qcc2072,
+ .rx_desc_get_msdu_src_link_id = ath12k_hal_rx_desc_get_msdu_src_link_qcc2072,
+ .extract_rx_desc_data = ath12k_hal_extract_rx_desc_data_qcc2072,
+ .rx_desc_get_l3_pad_bytes = ath12k_hal_rx_desc_get_l3_pad_bytes_qcc2072,
+ .rx_desc_get_mpdu_start_tag = ath12k_hal_rx_desc_get_mpdu_start_tag_qcc2072,
+ .rx_desc_get_mpdu_ppdu_id = ath12k_hal_rx_desc_get_mpdu_ppdu_id_qcc2072,
+ .rx_desc_get_msdu_payload = ath12k_hal_rx_desc_get_msdu_payload_qcc2072,
+ .ce_dst_setup = ath12k_wifi7_hal_ce_dst_setup,
+ .srng_src_hw_init = ath12k_wifi7_hal_srng_src_hw_init,
+ .srng_dst_hw_init = ath12k_wifi7_hal_srng_dst_hw_init,
+ .set_umac_srng_ptr_addr = ath12k_wifi7_hal_set_umac_srng_ptr_addr,
+ .srng_update_shadow_config = ath12k_wifi7_hal_srng_update_shadow_config,
+ .srng_get_ring_id = ath12k_wifi7_hal_srng_get_ring_id,
+ .ce_get_desc_size = ath12k_wifi7_hal_ce_get_desc_size,
+ .ce_src_set_desc = ath12k_wifi7_hal_ce_src_set_desc,
+ .ce_dst_set_desc = ath12k_wifi7_hal_ce_dst_set_desc,
+ .ce_dst_status_get_length = ath12k_wifi7_hal_ce_dst_status_get_length,
+ .set_link_desc_addr = ath12k_wifi7_hal_set_link_desc_addr,
+ .tx_set_dscp_tid_map = ath12k_wifi7_hal_tx_set_dscp_tid_map,
+ .tx_configure_bank_register =
+ ath12k_wifi7_hal_tx_configure_bank_register,
+ .reoq_lut_addr_read_enable = ath12k_wifi7_hal_reoq_lut_addr_read_enable,
+ .reoq_lut_set_max_peerid = ath12k_wifi7_hal_reoq_lut_set_max_peerid,
+ .write_reoq_lut_addr = ath12k_wifi7_hal_write_reoq_lut_addr,
+ .write_ml_reoq_lut_addr = ath12k_wifi7_hal_write_ml_reoq_lut_addr,
+ .setup_link_idle_list = ath12k_wifi7_hal_setup_link_idle_list,
+ .reo_init_cmd_ring = ath12k_wifi7_hal_reo_init_cmd_ring_tlv32,
+ .reo_hw_setup = ath12k_wifi7_hal_reo_hw_setup,
+ .rx_buf_addr_info_set = ath12k_wifi7_hal_rx_buf_addr_info_set,
+ .rx_buf_addr_info_get = ath12k_wifi7_hal_rx_buf_addr_info_get,
+ .cc_config = ath12k_wifi7_hal_cc_config,
+ .get_idle_link_rbm = ath12k_wifi7_hal_get_idle_link_rbm,
+ .rx_msdu_list_get = ath12k_wifi7_hal_rx_msdu_list_get,
+ .rx_reo_ent_buf_paddr_get = ath12k_wifi7_hal_rx_reo_ent_buf_paddr_get,
+ .reo_cmd_enc_tlv_hdr = ath12k_hal_encode_tlv32_hdr,
+ .reo_status_dec_tlv_hdr = ath12k_hal_reo_status_dec_tlv_hdr_qcc2072,
+};
+
+u32 ath12k_hal_rx_desc_get_mpdu_start_offset_qcc2072(void)
+{
+ return offsetof(struct hal_rx_desc_qcc2072, mpdu_start_tag);
+}
+
+u32 ath12k_hal_rx_desc_get_msdu_end_offset_qcc2072(void)
+{
+ return offsetof(struct hal_rx_desc_qcc2072, msdu_end_tag);
+}
diff --git a/drivers/net/wireless/ath/ath12k/wifi7/hal_qcc2072.h b/drivers/net/wireless/ath/ath12k/wifi7/hal_qcc2072.h
new file mode 100644
index 000000000000..6de943df7786
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wifi7/hal_qcc2072.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include "../hal.h"
+#include "hal.h"
+
+extern const struct ath12k_hw_regs qcc2072_regs;
+extern const struct hal_ops hal_qcc2072_ops;
+
+u32 ath12k_hal_rx_desc_get_mpdu_start_offset_qcc2072(void);
+u32 ath12k_hal_rx_desc_get_msdu_end_offset_qcc2072(void);
diff --git a/drivers/net/wireless/ath/ath12k/wifi7/hal_qcn9274.c b/drivers/net/wireless/ath/ath12k/wifi7/hal_qcn9274.c
new file mode 100644
index 000000000000..41c918eb1767
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wifi7/hal_qcn9274.c
@@ -0,0 +1,1038 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+#include "hal_desc.h"
+#include "hal_qcn9274.h"
+#include "hw.h"
+#include "hal.h"
+#include "hal_tx.h"
+
+static const struct hal_srng_config hw_srng_config_template[] = {
+ /* TODO: max_rings can populated by querying HW capabilities */
+ [HAL_REO_DST] = {
+ .start_ring_id = HAL_SRNG_RING_ID_REO2SW1,
+ .max_rings = 8,
+ .entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_REO_EXCEPTION] = {
+ /* Designating REO2SW0 ring as exception ring.
+ * Any of theREO2SW rings can be used as exception ring.
+ */
+ .start_ring_id = HAL_SRNG_RING_ID_REO2SW0,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_REO_REO2SW0_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_REO_REINJECT] = {
+ .start_ring_id = HAL_SRNG_RING_ID_SW2REO,
+ .max_rings = 4,
+ .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_REO_CMD] = {
+ .start_ring_id = HAL_SRNG_RING_ID_REO_CMD,
+ .max_rings = 1,
+ .entry_size = (sizeof(struct hal_tlv_64_hdr) +
+ sizeof(struct hal_reo_get_queue_stats)) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_REO_CMD_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_REO_STATUS] = {
+ .start_ring_id = HAL_SRNG_RING_ID_REO_STATUS,
+ .max_rings = 1,
+ .entry_size = (sizeof(struct hal_tlv_64_hdr) +
+ sizeof(struct hal_reo_get_queue_stats_status)) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_TCL_DATA] = {
+ .start_ring_id = HAL_SRNG_RING_ID_SW2TCL1,
+ .max_rings = 6,
+ .entry_size = sizeof(struct hal_tcl_data_cmd) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_TCL_CMD] = {
+ .start_ring_id = HAL_SRNG_RING_ID_SW2TCL_CMD,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_tcl_gse_cmd) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_TCL_STATUS] = {
+ .start_ring_id = HAL_SRNG_RING_ID_TCL_STATUS,
+ .max_rings = 1,
+ .entry_size = (sizeof(struct hal_tlv_hdr) +
+ sizeof(struct hal_tcl_status_ring)) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_CE_SRC] = {
+ .start_ring_id = HAL_SRNG_RING_ID_CE0_SRC,
+ .max_rings = 16,
+ .entry_size = sizeof(struct hal_ce_srng_src_desc) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_CE_SRC_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_CE_DST] = {
+ .start_ring_id = HAL_SRNG_RING_ID_CE0_DST,
+ .max_rings = 16,
+ .entry_size = sizeof(struct hal_ce_srng_dest_desc) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_CE_DST_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_CE_DST_STATUS] = {
+ .start_ring_id = HAL_SRNG_RING_ID_CE0_DST_STATUS,
+ .max_rings = 16,
+ .entry_size = sizeof(struct hal_ce_srng_dst_status_desc) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_WBM_IDLE_LINK] = {
+ .start_ring_id = HAL_SRNG_RING_ID_WBM_IDLE_LINK,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_link_desc) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_SW2WBM_RELEASE] = {
+ .start_ring_id = HAL_SRNG_RING_ID_WBM_SW0_RELEASE,
+ .max_rings = 2,
+ .entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_WBM2SW_RELEASE] = {
+ .start_ring_id = HAL_SRNG_RING_ID_WBM2SW0_RELEASE,
+ .max_rings = 8,
+ .entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_RXDMA_BUF] = {
+ .start_ring_id = HAL_SRNG_SW2RXDMA_BUF0,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_DMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
+ },
+ [HAL_RXDMA_DST] = {
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0,
+ .max_rings = 0,
+ .entry_size = 0,
+ .mac_type = ATH12K_HAL_SRNG_PMAC,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
+ },
+ [HAL_RXDMA_MONITOR_BUF] = {
+ .start_ring_id = HAL_SRNG_SW2RXMON_BUF0,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_mon_buf_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_PMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
+ },
+ [HAL_RXDMA_MONITOR_STATUS] = {
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_PMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
+ },
+ [HAL_RXDMA_MONITOR_DESC] = { 0, },
+ [HAL_RXDMA_DIR_BUF] = {
+ .start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
+ .max_rings = 2,
+ .entry_size = 8 >> 2, /* TODO: Define the struct */
+ .mac_type = ATH12K_HAL_SRNG_PMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
+ },
+ [HAL_PPE2TCL] = {
+ .start_ring_id = HAL_SRNG_RING_ID_PPE2TCL1,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_tcl_entrance_from_ppe_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_PMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_PPE_RELEASE] = {
+ .start_ring_id = HAL_SRNG_RING_ID_WBM_PPE_RELEASE,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_PMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_WBM2PPE_RELEASE_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_TX_MONITOR_BUF] = {
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2TXMON_BUF0,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_mon_buf_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_PMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
+ },
+ [HAL_RXDMA_MONITOR_DST] = {
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXMON_BUF0,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_mon_dest_desc) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_PMAC,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
+ },
+ [HAL_TX_MONITOR_DST] = {
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_TXMON2SW0_BUF0,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_mon_dest_desc) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_PMAC,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
+ }
+};
+
+const struct ath12k_hw_regs qcn9274_v1_regs = {
+ /* SW2TCL(x) R0 ring configuration address */
+ .tcl1_ring_id = 0x00000908,
+ .tcl1_ring_misc = 0x00000910,
+ .tcl1_ring_tp_addr_lsb = 0x0000091c,
+ .tcl1_ring_tp_addr_msb = 0x00000920,
+ .tcl1_ring_consumer_int_setup_ix0 = 0x00000930,
+ .tcl1_ring_consumer_int_setup_ix1 = 0x00000934,
+ .tcl1_ring_msi1_base_lsb = 0x00000948,
+ .tcl1_ring_msi1_base_msb = 0x0000094c,
+ .tcl1_ring_msi1_data = 0x00000950,
+ .tcl_ring_base_lsb = 0x00000b58,
+ .tcl1_ring_base_lsb = 0x00000900,
+ .tcl1_ring_base_msb = 0x00000904,
+ .tcl2_ring_base_lsb = 0x00000978,
+
+ /* TCL STATUS ring address */
+ .tcl_status_ring_base_lsb = 0x00000d38,
+
+ .wbm_idle_ring_base_lsb = 0x00000d0c,
+ .wbm_idle_ring_misc_addr = 0x00000d1c,
+ .wbm_r0_idle_list_cntl_addr = 0x00000210,
+ .wbm_r0_idle_list_size_addr = 0x00000214,
+ .wbm_scattered_ring_base_lsb = 0x00000220,
+ .wbm_scattered_ring_base_msb = 0x00000224,
+ .wbm_scattered_desc_head_info_ix0 = 0x00000230,
+ .wbm_scattered_desc_head_info_ix1 = 0x00000234,
+ .wbm_scattered_desc_tail_info_ix0 = 0x00000240,
+ .wbm_scattered_desc_tail_info_ix1 = 0x00000244,
+ .wbm_scattered_desc_ptr_hp_addr = 0x0000024c,
+
+ .wbm_sw_release_ring_base_lsb = 0x0000034c,
+ .wbm_sw1_release_ring_base_lsb = 0x000003c4,
+ .wbm0_release_ring_base_lsb = 0x00000dd8,
+ .wbm1_release_ring_base_lsb = 0x00000e50,
+
+ /* PCIe base address */
+ .pcie_qserdes_sysclk_en_sel = 0x01e0c0a8,
+ .pcie_pcs_osc_dtct_config_base = 0x01e0d45c,
+
+ /* PPE release ring address */
+ .ppe_rel_ring_base = 0x0000043c,
+
+ /* REO DEST ring address */
+ .reo2_ring_base = 0x0000055c,
+ .reo1_misc_ctrl_addr = 0x00000b7c,
+ .reo1_sw_cookie_cfg0 = 0x00000050,
+ .reo1_sw_cookie_cfg1 = 0x00000054,
+ .reo1_qdesc_lut_base0 = 0x00000058,
+ .reo1_qdesc_lut_base1 = 0x0000005c,
+ .reo1_ring_base_lsb = 0x000004e4,
+ .reo1_ring_base_msb = 0x000004e8,
+ .reo1_ring_id = 0x000004ec,
+ .reo1_ring_misc = 0x000004f4,
+ .reo1_ring_hp_addr_lsb = 0x000004f8,
+ .reo1_ring_hp_addr_msb = 0x000004fc,
+ .reo1_ring_producer_int_setup = 0x00000508,
+ .reo1_ring_msi1_base_lsb = 0x0000052C,
+ .reo1_ring_msi1_base_msb = 0x00000530,
+ .reo1_ring_msi1_data = 0x00000534,
+ .reo1_aging_thres_ix0 = 0x00000b08,
+ .reo1_aging_thres_ix1 = 0x00000b0c,
+ .reo1_aging_thres_ix2 = 0x00000b10,
+ .reo1_aging_thres_ix3 = 0x00000b14,
+
+ /* REO Exception ring address */
+ .reo2_sw0_ring_base = 0x000008a4,
+
+ /* REO Reinject ring address */
+ .sw2reo_ring_base = 0x00000304,
+ .sw2reo1_ring_base = 0x0000037c,
+
+ /* REO cmd ring address */
+ .reo_cmd_ring_base = 0x0000028c,
+
+ /* REO status ring address */
+ .reo_status_ring_base = 0x00000a84,
+
+ /* CE base address */
+ .umac_ce0_src_reg_base = 0x01b80000,
+ .umac_ce0_dest_reg_base = 0x01b81000,
+ .umac_ce1_src_reg_base = 0x01b82000,
+ .umac_ce1_dest_reg_base = 0x01b83000,
+
+ .gcc_gcc_pcie_hot_rst = 0x1e38338,
+
+ .qrtr_node_id = 0x1e03164,
+};
+
+const struct ath12k_hw_regs qcn9274_v2_regs = {
+ /* SW2TCL(x) R0 ring configuration address */
+ .tcl1_ring_id = 0x00000908,
+ .tcl1_ring_misc = 0x00000910,
+ .tcl1_ring_tp_addr_lsb = 0x0000091c,
+ .tcl1_ring_tp_addr_msb = 0x00000920,
+ .tcl1_ring_consumer_int_setup_ix0 = 0x00000930,
+ .tcl1_ring_consumer_int_setup_ix1 = 0x00000934,
+ .tcl1_ring_msi1_base_lsb = 0x00000948,
+ .tcl1_ring_msi1_base_msb = 0x0000094c,
+ .tcl1_ring_msi1_data = 0x00000950,
+ .tcl_ring_base_lsb = 0x00000b58,
+ .tcl1_ring_base_lsb = 0x00000900,
+ .tcl1_ring_base_msb = 0x00000904,
+ .tcl2_ring_base_lsb = 0x00000978,
+
+ /* TCL STATUS ring address */
+ .tcl_status_ring_base_lsb = 0x00000d38,
+
+ /* WBM idle link ring address */
+ .wbm_idle_ring_base_lsb = 0x00000d3c,
+ .wbm_idle_ring_misc_addr = 0x00000d4c,
+ .wbm_r0_idle_list_cntl_addr = 0x00000240,
+ .wbm_r0_idle_list_size_addr = 0x00000244,
+ .wbm_scattered_ring_base_lsb = 0x00000250,
+ .wbm_scattered_ring_base_msb = 0x00000254,
+ .wbm_scattered_desc_head_info_ix0 = 0x00000260,
+ .wbm_scattered_desc_head_info_ix1 = 0x00000264,
+ .wbm_scattered_desc_tail_info_ix0 = 0x00000270,
+ .wbm_scattered_desc_tail_info_ix1 = 0x00000274,
+ .wbm_scattered_desc_ptr_hp_addr = 0x0000027c,
+
+ /* SW2WBM release ring address */
+ .wbm_sw_release_ring_base_lsb = 0x0000037c,
+ .wbm_sw1_release_ring_base_lsb = 0x000003f4,
+
+ /* WBM2SW release ring address */
+ .wbm0_release_ring_base_lsb = 0x00000e08,
+ .wbm1_release_ring_base_lsb = 0x00000e80,
+
+ /* PCIe base address */
+ .pcie_qserdes_sysclk_en_sel = 0x01e0c0a8,
+ .pcie_pcs_osc_dtct_config_base = 0x01e0d45c,
+
+ /* PPE release ring address */
+ .ppe_rel_ring_base = 0x0000046c,
+
+ /* REO DEST ring address */
+ .reo2_ring_base = 0x00000578,
+ .reo1_misc_ctrl_addr = 0x00000b9c,
+ .reo1_sw_cookie_cfg0 = 0x0000006c,
+ .reo1_sw_cookie_cfg1 = 0x00000070,
+ .reo1_qdesc_lut_base0 = 0x00000074,
+ .reo1_qdesc_lut_base1 = 0x00000078,
+ .reo1_qdesc_addr = 0x0000007c,
+ .reo1_qdesc_max_peerid = 0x00000088,
+ .reo1_ring_base_lsb = 0x00000500,
+ .reo1_ring_base_msb = 0x00000504,
+ .reo1_ring_id = 0x00000508,
+ .reo1_ring_misc = 0x00000510,
+ .reo1_ring_hp_addr_lsb = 0x00000514,
+ .reo1_ring_hp_addr_msb = 0x00000518,
+ .reo1_ring_producer_int_setup = 0x00000524,
+ .reo1_ring_msi1_base_lsb = 0x00000548,
+ .reo1_ring_msi1_base_msb = 0x0000054C,
+ .reo1_ring_msi1_data = 0x00000550,
+ .reo1_aging_thres_ix0 = 0x00000B28,
+ .reo1_aging_thres_ix1 = 0x00000B2C,
+ .reo1_aging_thres_ix2 = 0x00000B30,
+ .reo1_aging_thres_ix3 = 0x00000B34,
+
+ /* REO Exception ring address */
+ .reo2_sw0_ring_base = 0x000008c0,
+
+ /* REO Reinject ring address */
+ .sw2reo_ring_base = 0x00000320,
+ .sw2reo1_ring_base = 0x00000398,
+
+ /* REO cmd ring address */
+ .reo_cmd_ring_base = 0x000002A8,
+
+ /* REO status ring address */
+ .reo_status_ring_base = 0x00000aa0,
+
+ /* CE base address */
+ .umac_ce0_src_reg_base = 0x01b80000,
+ .umac_ce0_dest_reg_base = 0x01b81000,
+ .umac_ce1_src_reg_base = 0x01b82000,
+ .umac_ce1_dest_reg_base = 0x01b83000,
+
+ .gcc_gcc_pcie_hot_rst = 0x1e38338,
+
+ .qrtr_node_id = 0x1e03164,
+};
+
+const struct ath12k_hw_regs ipq5332_regs = {
+ /* SW2TCL(x) R0 ring configuration address */
+ .tcl1_ring_id = 0x00000918,
+ .tcl1_ring_misc = 0x00000920,
+ .tcl1_ring_tp_addr_lsb = 0x0000092c,
+ .tcl1_ring_tp_addr_msb = 0x00000930,
+ .tcl1_ring_consumer_int_setup_ix0 = 0x00000940,
+ .tcl1_ring_consumer_int_setup_ix1 = 0x00000944,
+ .tcl1_ring_msi1_base_lsb = 0x00000958,
+ .tcl1_ring_msi1_base_msb = 0x0000095c,
+ .tcl1_ring_base_lsb = 0x00000910,
+ .tcl1_ring_base_msb = 0x00000914,
+ .tcl1_ring_msi1_data = 0x00000960,
+ .tcl2_ring_base_lsb = 0x00000988,
+ .tcl_ring_base_lsb = 0x00000b68,
+
+ /* TCL STATUS ring address */
+ .tcl_status_ring_base_lsb = 0x00000d48,
+
+ /* REO DEST ring address */
+ .reo2_ring_base = 0x00000578,
+ .reo1_misc_ctrl_addr = 0x00000b9c,
+ .reo1_sw_cookie_cfg0 = 0x0000006c,
+ .reo1_sw_cookie_cfg1 = 0x00000070,
+ .reo1_qdesc_lut_base0 = 0x00000074,
+ .reo1_qdesc_lut_base1 = 0x00000078,
+ .reo1_ring_base_lsb = 0x00000500,
+ .reo1_ring_base_msb = 0x00000504,
+ .reo1_ring_id = 0x00000508,
+ .reo1_ring_misc = 0x00000510,
+ .reo1_ring_hp_addr_lsb = 0x00000514,
+ .reo1_ring_hp_addr_msb = 0x00000518,
+ .reo1_ring_producer_int_setup = 0x00000524,
+ .reo1_ring_msi1_base_lsb = 0x00000548,
+ .reo1_ring_msi1_base_msb = 0x0000054C,
+ .reo1_ring_msi1_data = 0x00000550,
+ .reo1_aging_thres_ix0 = 0x00000B28,
+ .reo1_aging_thres_ix1 = 0x00000B2C,
+ .reo1_aging_thres_ix2 = 0x00000B30,
+ .reo1_aging_thres_ix3 = 0x00000B34,
+
+ /* REO Exception ring address */
+ .reo2_sw0_ring_base = 0x000008c0,
+
+ /* REO Reinject ring address */
+ .sw2reo_ring_base = 0x00000320,
+ .sw2reo1_ring_base = 0x00000398,
+
+ /* REO cmd ring address */
+ .reo_cmd_ring_base = 0x000002A8,
+
+ /* REO status ring address */
+ .reo_status_ring_base = 0x00000aa0,
+
+ /* WBM idle link ring address */
+ .wbm_idle_ring_base_lsb = 0x00000d3c,
+ .wbm_idle_ring_misc_addr = 0x00000d4c,
+ .wbm_r0_idle_list_cntl_addr = 0x00000240,
+ .wbm_r0_idle_list_size_addr = 0x00000244,
+ .wbm_scattered_ring_base_lsb = 0x00000250,
+ .wbm_scattered_ring_base_msb = 0x00000254,
+ .wbm_scattered_desc_head_info_ix0 = 0x00000260,
+ .wbm_scattered_desc_head_info_ix1 = 0x00000264,
+ .wbm_scattered_desc_tail_info_ix0 = 0x00000270,
+ .wbm_scattered_desc_tail_info_ix1 = 0x00000274,
+ .wbm_scattered_desc_ptr_hp_addr = 0x0000027c,
+
+ /* SW2WBM release ring address */
+ .wbm_sw_release_ring_base_lsb = 0x0000037c,
+
+ /* WBM2SW release ring address */
+ .wbm0_release_ring_base_lsb = 0x00000e08,
+ .wbm1_release_ring_base_lsb = 0x00000e80,
+
+ /* PPE release ring address */
+ .ppe_rel_ring_base = 0x0000046c,
+
+ /* CE address */
+ .umac_ce0_src_reg_base = 0x00740000 -
+ HAL_IPQ5332_CE_WFSS_REG_BASE,
+ .umac_ce0_dest_reg_base = 0x00741000 -
+ HAL_IPQ5332_CE_WFSS_REG_BASE,
+ .umac_ce1_src_reg_base = 0x00742000 -
+ HAL_IPQ5332_CE_WFSS_REG_BASE,
+ .umac_ce1_dest_reg_base = 0x00743000 -
+ HAL_IPQ5332_CE_WFSS_REG_BASE,
+};
+
+static inline
+bool ath12k_hal_rx_desc_get_first_msdu_qcn9274(struct hal_rx_desc *desc)
+{
+ return !!le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5,
+ RX_MSDU_END_INFO5_FIRST_MSDU);
+}
+
+static inline
+bool ath12k_hal_rx_desc_get_last_msdu_qcn9274(struct hal_rx_desc *desc)
+{
+ return !!le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5,
+ RX_MSDU_END_INFO5_LAST_MSDU);
+}
+
+u8 ath12k_hal_rx_desc_get_l3_pad_bytes_qcn9274(struct hal_rx_desc *desc)
+{
+ return le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5,
+ RX_MSDU_END_INFO5_L3_HDR_PADDING);
+}
+
+static inline
+bool ath12k_hal_rx_desc_encrypt_valid_qcn9274(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4,
+ RX_MPDU_START_INFO4_ENCRYPT_INFO_VALID);
+}
+
+static inline
+u32 ath12k_hal_rx_desc_get_encrypt_type_qcn9274(struct hal_rx_desc *desc)
+{
+ if (!ath12k_hal_rx_desc_encrypt_valid_qcn9274(desc))
+ return HAL_ENCRYPT_TYPE_OPEN;
+
+ return le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info2,
+ RX_MPDU_START_INFO2_ENC_TYPE);
+}
+
+static inline
+u8 ath12k_hal_rx_desc_get_decap_type_qcn9274(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info11,
+ RX_MSDU_END_INFO11_DECAP_FORMAT);
+}
+
+static inline
+u8 ath12k_hal_rx_desc_get_mesh_ctl_qcn9274(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info11,
+ RX_MSDU_END_INFO11_MESH_CTRL_PRESENT);
+}
+
+static inline
+bool ath12k_hal_rx_desc_get_mpdu_seq_ctl_vld_qcn9274(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4,
+ RX_MPDU_START_INFO4_MPDU_SEQ_CTRL_VALID);
+}
+
+static inline
+bool ath12k_hal_rx_desc_get_mpdu_fc_valid_qcn9274(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4,
+ RX_MPDU_START_INFO4_MPDU_FCTRL_VALID);
+}
+
+static inline
+u16 ath12k_hal_rx_desc_get_mpdu_start_seq_no_qcn9274(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4,
+ RX_MPDU_START_INFO4_MPDU_SEQ_NUM);
+}
+
+static inline
+u16 ath12k_hal_rx_desc_get_msdu_len_qcn9274(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info10,
+ RX_MSDU_END_INFO10_MSDU_LENGTH);
+}
+
+static inline
+u8 ath12k_hal_rx_desc_get_msdu_sgi_qcn9274(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12,
+ RX_MSDU_END_INFO12_SGI);
+}
+
+static inline
+u8 ath12k_hal_rx_desc_get_msdu_rate_mcs_qcn9274(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12,
+ RX_MSDU_END_INFO12_RATE_MCS);
+}
+
+static inline
+u8 ath12k_hal_rx_desc_get_msdu_rx_bw_qcn9274(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12,
+ RX_MSDU_END_INFO12_RECV_BW);
+}
+
+static inline
+u32 ath12k_hal_rx_desc_get_msdu_freq_qcn9274(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.qcn9274_compact.msdu_end.phy_meta_data);
+}
+
+static inline
+u8 ath12k_hal_rx_desc_get_msdu_pkt_type_qcn9274(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12,
+ RX_MSDU_END_INFO12_PKT_TYPE);
+}
+
+static inline
+u8 ath12k_hal_rx_desc_get_msdu_nss_qcn9274(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12,
+ RX_MSDU_END_INFO12_MIMO_SS_BITMAP);
+}
+
+static inline
+u8 ath12k_hal_rx_desc_get_mpdu_tid_qcn9274(struct hal_rx_desc *desc)
+{
+ return le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5,
+ RX_MSDU_END_INFO5_TID);
+}
+
+static inline
+u16 ath12k_hal_rx_desc_get_mpdu_peer_id_qcn9274(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.qcn9274_compact.mpdu_start.sw_peer_id);
+}
+
+void ath12k_hal_rx_desc_copy_end_tlv_qcn9274(struct hal_rx_desc *fdesc,
+ struct hal_rx_desc *ldesc)
+{
+ fdesc->u.qcn9274_compact.msdu_end = ldesc->u.qcn9274_compact.msdu_end;
+}
+
+u32 ath12k_hal_rx_desc_get_mpdu_ppdu_id_qcn9274(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.qcn9274_compact.mpdu_start.phy_ppdu_id);
+}
+
+void ath12k_hal_rx_desc_set_msdu_len_qcn9274(struct hal_rx_desc *desc, u16 len)
+{
+ u32 info = __le32_to_cpu(desc->u.qcn9274_compact.msdu_end.info10);
+
+ info = u32_replace_bits(info, len, RX_MSDU_END_INFO10_MSDU_LENGTH);
+ desc->u.qcn9274_compact.msdu_end.info10 = __cpu_to_le32(info);
+}
+
+u8 *ath12k_hal_rx_desc_get_msdu_payload_qcn9274(struct hal_rx_desc *desc)
+{
+ return &desc->u.qcn9274_compact.msdu_payload[0];
+}
+
+u32 ath12k_hal_rx_desc_get_mpdu_start_offset_qcn9274(void)
+{
+ return offsetof(struct hal_rx_desc_qcn9274_compact, mpdu_start);
+}
+
+u32 ath12k_hal_rx_desc_get_msdu_end_offset_qcn9274(void)
+{
+ return offsetof(struct hal_rx_desc_qcn9274_compact, msdu_end);
+}
+
+static inline
+bool ath12k_hal_rx_desc_mac_addr2_valid_qcn9274(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.qcn9274_compact.mpdu_start.info4) &
+ RX_MPDU_START_INFO4_MAC_ADDR2_VALID;
+}
+
+static inline
+u8 *ath12k_hal_rx_desc_mpdu_start_addr2_qcn9274(struct hal_rx_desc *desc)
+{
+ return desc->u.qcn9274_compact.mpdu_start.addr2;
+}
+
+static inline
+bool ath12k_hal_rx_desc_is_da_mcbc_qcn9274(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.qcn9274_compact.msdu_end.info5) &
+ RX_MSDU_END_INFO5_DA_IS_MCBC;
+}
+
+static inline
+bool ath12k_hal_rx_h_msdu_done_qcn9274(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274_compact.msdu_end.info14,
+ RX_MSDU_END_INFO14_MSDU_DONE);
+}
+
+static inline
+bool ath12k_hal_rx_h_l4_cksum_fail_qcn9274(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274_compact.msdu_end.info13,
+ RX_MSDU_END_INFO13_TCP_UDP_CKSUM_FAIL);
+}
+
+static inline
+bool ath12k_hal_rx_h_ip_cksum_fail_qcn9274(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274_compact.msdu_end.info13,
+ RX_MSDU_END_INFO13_IP_CKSUM_FAIL);
+}
+
+static inline
+bool ath12k_hal_rx_h_is_decrypted_qcn9274(struct hal_rx_desc *desc)
+{
+ return (le32_get_bits(desc->u.qcn9274_compact.msdu_end.info14,
+ RX_MSDU_END_INFO14_DECRYPT_STATUS_CODE) ==
+ RX_DESC_DECRYPT_STATUS_CODE_OK);
+}
+
+u32 ath12k_hal_get_rx_desc_size_qcn9274(void)
+{
+ return sizeof(struct hal_rx_desc_qcn9274_compact);
+}
+
+u8 ath12k_hal_rx_desc_get_msdu_src_link_qcn9274(struct hal_rx_desc *desc)
+{
+ return le64_get_bits(desc->u.qcn9274_compact.msdu_end.msdu_end_tag,
+ RX_MSDU_END_64_TLV_SRC_LINK_ID);
+}
+
+u16 ath12k_hal_rx_mpdu_start_wmask_get_qcn9274(void)
+{
+ return QCN9274_MPDU_START_WMASK;
+}
+
+u32 ath12k_hal_rx_msdu_end_wmask_get_qcn9274(void)
+{
+ return QCN9274_MSDU_END_WMASK;
+}
+
+static u32 ath12k_hal_rx_h_mpdu_err_qcn9274(struct hal_rx_desc *desc)
+{
+ u32 info = __le32_to_cpu(desc->u.qcn9274_compact.msdu_end.info13);
+ u32 errmap = 0;
+
+ if (info & RX_MSDU_END_INFO13_FCS_ERR)
+ errmap |= HAL_RX_MPDU_ERR_FCS;
+
+ if (info & RX_MSDU_END_INFO13_DECRYPT_ERR)
+ errmap |= HAL_RX_MPDU_ERR_DECRYPT;
+
+ if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR)
+ errmap |= HAL_RX_MPDU_ERR_TKIP_MIC;
+
+ if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR)
+ errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR;
+
+ if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR)
+ errmap |= HAL_RX_MPDU_ERR_OVERFLOW;
+
+ if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR)
+ errmap |= HAL_RX_MPDU_ERR_MSDU_LEN;
+
+ if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR)
+ errmap |= HAL_RX_MPDU_ERR_MPDU_LEN;
+
+ return errmap;
+}
+
+void ath12k_hal_rx_desc_get_crypto_hdr_qcn9274(struct hal_rx_desc *desc,
+ u8 *crypto_hdr,
+ enum hal_encrypt_type enctype)
+{
+ unsigned int key_id;
+
+ switch (enctype) {
+ case HAL_ENCRYPT_TYPE_OPEN:
+ return;
+ case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
+ case HAL_ENCRYPT_TYPE_TKIP_MIC:
+ crypto_hdr[0] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274_compact.mpdu_start.pn[0]);
+ crypto_hdr[1] = 0;
+ crypto_hdr[2] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274_compact.mpdu_start.pn[0]);
+ break;
+ case HAL_ENCRYPT_TYPE_CCMP_128:
+ case HAL_ENCRYPT_TYPE_CCMP_256:
+ case HAL_ENCRYPT_TYPE_GCMP_128:
+ case HAL_ENCRYPT_TYPE_AES_GCMP_256:
+ crypto_hdr[0] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274_compact.mpdu_start.pn[0]);
+ crypto_hdr[1] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274_compact.mpdu_start.pn[0]);
+ crypto_hdr[2] = 0;
+ break;
+ case HAL_ENCRYPT_TYPE_WEP_40:
+ case HAL_ENCRYPT_TYPE_WEP_104:
+ case HAL_ENCRYPT_TYPE_WEP_128:
+ case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
+ case HAL_ENCRYPT_TYPE_WAPI:
+ return;
+ }
+ key_id = le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info5,
+ RX_MPDU_START_INFO5_KEY_ID);
+ crypto_hdr[3] = 0x20 | (key_id << 6);
+ crypto_hdr[4] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE3(desc->u.qcn9274_compact.mpdu_start.pn[0]);
+ crypto_hdr[5] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE4(desc->u.qcn9274_compact.mpdu_start.pn[0]);
+ crypto_hdr[6] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274_compact.mpdu_start.pn[1]);
+ crypto_hdr[7] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274_compact.mpdu_start.pn[1]);
+}
+
+void ath12k_hal_rx_desc_get_dot11_hdr_qcn9274(struct hal_rx_desc *desc,
+ struct ieee80211_hdr *hdr)
+{
+ hdr->frame_control = desc->u.qcn9274_compact.mpdu_start.frame_ctrl;
+ hdr->duration_id = desc->u.qcn9274_compact.mpdu_start.duration;
+ ether_addr_copy(hdr->addr1, desc->u.qcn9274_compact.mpdu_start.addr1);
+ ether_addr_copy(hdr->addr2, desc->u.qcn9274_compact.mpdu_start.addr2);
+ ether_addr_copy(hdr->addr3, desc->u.qcn9274_compact.mpdu_start.addr3);
+ if (__le32_to_cpu(desc->u.qcn9274_compact.mpdu_start.info4) &
+ RX_MPDU_START_INFO4_MAC_ADDR4_VALID) {
+ ether_addr_copy(hdr->addr4, desc->u.qcn9274_compact.mpdu_start.addr4);
+ }
+ hdr->seq_ctrl = desc->u.qcn9274_compact.mpdu_start.seq_ctrl;
+}
+
+void ath12k_hal_extract_rx_desc_data_qcn9274(struct hal_rx_desc_data *rx_desc_data,
+ struct hal_rx_desc *rx_desc,
+ struct hal_rx_desc *ldesc)
+{
+ rx_desc_data->is_first_msdu = ath12k_hal_rx_desc_get_first_msdu_qcn9274(ldesc);
+ rx_desc_data->is_last_msdu = ath12k_hal_rx_desc_get_last_msdu_qcn9274(ldesc);
+ rx_desc_data->l3_pad_bytes = ath12k_hal_rx_desc_get_l3_pad_bytes_qcn9274(ldesc);
+ rx_desc_data->enctype = ath12k_hal_rx_desc_get_encrypt_type_qcn9274(rx_desc);
+ rx_desc_data->decap_type = ath12k_hal_rx_desc_get_decap_type_qcn9274(rx_desc);
+ rx_desc_data->mesh_ctrl_present =
+ ath12k_hal_rx_desc_get_mesh_ctl_qcn9274(rx_desc);
+ rx_desc_data->seq_ctl_valid =
+ ath12k_hal_rx_desc_get_mpdu_seq_ctl_vld_qcn9274(rx_desc);
+ rx_desc_data->fc_valid = ath12k_hal_rx_desc_get_mpdu_fc_valid_qcn9274(rx_desc);
+ rx_desc_data->seq_no = ath12k_hal_rx_desc_get_mpdu_start_seq_no_qcn9274(rx_desc);
+ rx_desc_data->msdu_len = ath12k_hal_rx_desc_get_msdu_len_qcn9274(ldesc);
+ rx_desc_data->sgi = ath12k_hal_rx_desc_get_msdu_sgi_qcn9274(rx_desc);
+ rx_desc_data->rate_mcs = ath12k_hal_rx_desc_get_msdu_rate_mcs_qcn9274(rx_desc);
+ rx_desc_data->bw = ath12k_hal_rx_desc_get_msdu_rx_bw_qcn9274(rx_desc);
+ rx_desc_data->phy_meta_data = ath12k_hal_rx_desc_get_msdu_freq_qcn9274(rx_desc);
+ rx_desc_data->pkt_type = ath12k_hal_rx_desc_get_msdu_pkt_type_qcn9274(rx_desc);
+ rx_desc_data->nss = hweight8(ath12k_hal_rx_desc_get_msdu_nss_qcn9274(rx_desc));
+ rx_desc_data->tid = ath12k_hal_rx_desc_get_mpdu_tid_qcn9274(rx_desc);
+ rx_desc_data->peer_id = ath12k_hal_rx_desc_get_mpdu_peer_id_qcn9274(rx_desc);
+ rx_desc_data->addr2_present = ath12k_hal_rx_desc_mac_addr2_valid_qcn9274(rx_desc);
+ rx_desc_data->addr2 = ath12k_hal_rx_desc_mpdu_start_addr2_qcn9274(rx_desc);
+ rx_desc_data->is_mcbc = ath12k_hal_rx_desc_is_da_mcbc_qcn9274(rx_desc);
+ rx_desc_data->msdu_done = ath12k_hal_rx_h_msdu_done_qcn9274(ldesc);
+ rx_desc_data->l4_csum_fail = ath12k_hal_rx_h_l4_cksum_fail_qcn9274(rx_desc);
+ rx_desc_data->ip_csum_fail = ath12k_hal_rx_h_ip_cksum_fail_qcn9274(rx_desc);
+ rx_desc_data->is_decrypted = ath12k_hal_rx_h_is_decrypted_qcn9274(rx_desc);
+ rx_desc_data->err_bitmap = ath12k_hal_rx_h_mpdu_err_qcn9274(rx_desc);
+}
+
+const struct ath12k_hw_hal_params ath12k_hw_hal_params_qcn9274 = {
+ .rx_buf_rbm = HAL_RX_BUF_RBM_SW3_BM,
+ .wbm2sw_cc_enable = HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW0_EN |
+ HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW1_EN |
+ HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW2_EN |
+ HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW3_EN |
+ HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW4_EN,
+};
+
+const struct ath12k_hw_hal_params ath12k_hw_hal_params_ipq5332 = {
+ .rx_buf_rbm = HAL_RX_BUF_RBM_SW3_BM,
+ .wbm2sw_cc_enable = HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW0_EN |
+ HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW1_EN |
+ HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW2_EN |
+ HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW3_EN |
+ HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW4_EN,
+};
+
+static int ath12k_hal_srng_create_config_qcn9274(struct ath12k_hal *hal)
+{
+ struct hal_srng_config *s;
+
+ hal->srng_config = kmemdup(hw_srng_config_template,
+ sizeof(hw_srng_config_template),
+ GFP_KERNEL);
+ if (!hal->srng_config)
+ return -ENOMEM;
+
+ s = &hal->srng_config[HAL_REO_DST];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB(hal);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP;
+ s->reg_size[0] = HAL_REO2_RING_BASE_LSB(hal) - HAL_REO1_RING_BASE_LSB(hal);
+ s->reg_size[1] = HAL_REO2_RING_HP - HAL_REO1_RING_HP;
+
+ s = &hal->srng_config[HAL_REO_EXCEPTION];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_SW0_RING_BASE_LSB(hal);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_SW0_RING_HP;
+
+ s = &hal->srng_config[HAL_REO_REINJECT];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB(hal);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP;
+ s->reg_size[0] = HAL_SW2REO1_RING_BASE_LSB(hal) - HAL_SW2REO_RING_BASE_LSB(hal);
+ s->reg_size[1] = HAL_SW2REO1_RING_HP - HAL_SW2REO_RING_HP;
+
+ s = &hal->srng_config[HAL_REO_CMD];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB(hal);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP;
+
+ s = &hal->srng_config[HAL_REO_STATUS];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(hal);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP;
+
+ s = &hal->srng_config[HAL_TCL_DATA];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB(hal);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP;
+ s->reg_size[0] = HAL_TCL2_RING_BASE_LSB(hal) - HAL_TCL1_RING_BASE_LSB(hal);
+ s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP;
+
+ s = &hal->srng_config[HAL_TCL_CMD];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB(hal);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP;
+
+ s = &hal->srng_config[HAL_TCL_STATUS];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(hal);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP;
+
+ s = &hal->srng_config[HAL_CE_SRC];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(hal) + HAL_CE_DST_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(hal) + HAL_CE_DST_RING_HP;
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(hal) -
+ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(hal);
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(hal) -
+ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(hal);
+
+ s = &hal->srng_config[HAL_CE_DST];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(hal) + HAL_CE_DST_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(hal) + HAL_CE_DST_RING_HP;
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(hal) -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG(hal);
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(hal) -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG(hal);
+
+ s = &hal->srng_config[HAL_CE_DST_STATUS];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(hal) +
+ HAL_CE_DST_STATUS_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(hal) + HAL_CE_DST_STATUS_RING_HP;
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(hal) -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG(hal);
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(hal) -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG(hal);
+
+ s = &hal->srng_config[HAL_WBM_IDLE_LINK];
+ s->reg_start[0] =
+ HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(hal);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP;
+
+ s = &hal->srng_config[HAL_SW2WBM_RELEASE];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SW_RELEASE_RING_BASE_LSB(hal);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SW_RELEASE_RING_HP;
+ s->reg_size[0] = HAL_WBM_SW1_RELEASE_RING_BASE_LSB(hal) -
+ HAL_WBM_SW_RELEASE_RING_BASE_LSB(hal);
+ s->reg_size[1] = HAL_WBM_SW1_RELEASE_RING_HP - HAL_WBM_SW_RELEASE_RING_HP;
+
+ s = &hal->srng_config[HAL_WBM2SW_RELEASE];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_BASE_LSB(hal);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP;
+ s->reg_size[0] = HAL_WBM1_RELEASE_RING_BASE_LSB(hal) -
+ HAL_WBM0_RELEASE_RING_BASE_LSB(hal);
+ s->reg_size[1] = HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP;
+
+ /* Some LMAC rings are not accessed from the host:
+ * RXDMA_BUG, RXDMA_DST, RXDMA_MONITOR_BUF, RXDMA_MONITOR_STATUS,
+ * RXDMA_MONITOR_DST, RXDMA_MONITOR_DESC, RXDMA_DIR_BUF_SRC,
+ * RXDMA_RX_MONITOR_BUF, TX_MONITOR_BUF, TX_MONITOR_DST, SW2RXDMA
+ */
+ s = &hal->srng_config[HAL_PPE2TCL];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_PPE2TCL1_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_PPE2TCL1_RING_HP;
+
+ s = &hal->srng_config[HAL_PPE_RELEASE];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_PPE_RELEASE_RING_BASE_LSB(hal);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_PPE_RELEASE_RING_HP;
+
+ return 0;
+}
+
+const struct ath12k_hal_tcl_to_wbm_rbm_map
+ath12k_hal_tcl_to_wbm_rbm_map_qcn9274[DP_TCL_NUM_RING_MAX] = {
+ {
+ .wbm_ring_num = 0,
+ .rbm_id = HAL_RX_BUF_RBM_SW0_BM,
+ },
+ {
+ .wbm_ring_num = 1,
+ .rbm_id = HAL_RX_BUF_RBM_SW1_BM,
+ },
+ {
+ .wbm_ring_num = 2,
+ .rbm_id = HAL_RX_BUF_RBM_SW2_BM,
+ },
+ {
+ .wbm_ring_num = 4,
+ .rbm_id = HAL_RX_BUF_RBM_SW4_BM,
+ },
+};
+
+const struct hal_ops hal_qcn9274_ops = {
+ .create_srng_config = ath12k_hal_srng_create_config_qcn9274,
+ .rx_desc_set_msdu_len = ath12k_hal_rx_desc_set_msdu_len_qcn9274,
+ .rx_desc_get_dot11_hdr = ath12k_hal_rx_desc_get_dot11_hdr_qcn9274,
+ .rx_desc_get_crypto_header = ath12k_hal_rx_desc_get_crypto_hdr_qcn9274,
+ .rx_desc_copy_end_tlv = ath12k_hal_rx_desc_copy_end_tlv_qcn9274,
+ .rx_desc_get_msdu_src_link_id = ath12k_hal_rx_desc_get_msdu_src_link_qcn9274,
+ .extract_rx_desc_data = ath12k_hal_extract_rx_desc_data_qcn9274,
+ .rx_desc_get_l3_pad_bytes = ath12k_hal_rx_desc_get_l3_pad_bytes_qcn9274,
+ .rx_desc_get_mpdu_ppdu_id = ath12k_hal_rx_desc_get_mpdu_ppdu_id_qcn9274,
+ .rx_desc_get_msdu_payload = ath12k_hal_rx_desc_get_msdu_payload_qcn9274,
+ .ce_dst_setup = ath12k_wifi7_hal_ce_dst_setup,
+ .srng_src_hw_init = ath12k_wifi7_hal_srng_src_hw_init,
+ .srng_dst_hw_init = ath12k_wifi7_hal_srng_dst_hw_init,
+ .set_umac_srng_ptr_addr = ath12k_wifi7_hal_set_umac_srng_ptr_addr,
+ .srng_update_shadow_config = ath12k_wifi7_hal_srng_update_shadow_config,
+ .srng_get_ring_id = ath12k_wifi7_hal_srng_get_ring_id,
+ .ce_get_desc_size = ath12k_wifi7_hal_ce_get_desc_size,
+ .ce_src_set_desc = ath12k_wifi7_hal_ce_src_set_desc,
+ .ce_dst_set_desc = ath12k_wifi7_hal_ce_dst_set_desc,
+ .ce_dst_status_get_length = ath12k_wifi7_hal_ce_dst_status_get_length,
+ .set_link_desc_addr = ath12k_wifi7_hal_set_link_desc_addr,
+ .tx_set_dscp_tid_map = ath12k_wifi7_hal_tx_set_dscp_tid_map,
+ .tx_configure_bank_register =
+ ath12k_wifi7_hal_tx_configure_bank_register,
+ .reoq_lut_addr_read_enable = ath12k_wifi7_hal_reoq_lut_addr_read_enable,
+ .reoq_lut_set_max_peerid = ath12k_wifi7_hal_reoq_lut_set_max_peerid,
+ .write_reoq_lut_addr = ath12k_wifi7_hal_write_reoq_lut_addr,
+ .write_ml_reoq_lut_addr = ath12k_wifi7_hal_write_ml_reoq_lut_addr,
+ .setup_link_idle_list = ath12k_wifi7_hal_setup_link_idle_list,
+ .reo_init_cmd_ring = ath12k_wifi7_hal_reo_init_cmd_ring_tlv64,
+ .reo_hw_setup = ath12k_wifi7_hal_reo_hw_setup,
+ .reo_shared_qaddr_cache_clear = ath12k_wifi7_hal_reo_shared_qaddr_cache_clear,
+ .rx_buf_addr_info_set = ath12k_wifi7_hal_rx_buf_addr_info_set,
+ .rx_buf_addr_info_get = ath12k_wifi7_hal_rx_buf_addr_info_get,
+ .cc_config = ath12k_wifi7_hal_cc_config,
+ .get_idle_link_rbm = ath12k_wifi7_hal_get_idle_link_rbm,
+ .rx_msdu_list_get = ath12k_wifi7_hal_rx_msdu_list_get,
+ .rx_reo_ent_buf_paddr_get = ath12k_wifi7_hal_rx_reo_ent_buf_paddr_get,
+ .reo_cmd_enc_tlv_hdr = ath12k_hal_encode_tlv64_hdr,
+ .reo_status_dec_tlv_hdr = ath12k_hal_decode_tlv64_hdr,
+};
diff --git a/drivers/net/wireless/ath/ath12k/wifi7/hal_qcn9274.h b/drivers/net/wireless/ath/ath12k/wifi7/hal_qcn9274.h
new file mode 100644
index 000000000000..08c0a0469474
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wifi7/hal_qcn9274.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear*/
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef ATH12K_HAL_QCN9274_H
+#define ATH12K_HAL_QCN9274_H
+
+#include <linux/ieee80211.h>
+#include <linux/etherdevice.h>
+#include "../hal.h"
+#include "hal_rx.h"
+#include "hal.h"
+
+extern const struct hal_ops hal_qcn9274_ops;
+extern const struct ath12k_hw_regs qcn9274_v1_regs;
+extern const struct ath12k_hw_regs qcn9274_v2_regs;
+extern const struct ath12k_hw_regs ipq5332_regs;
+extern const struct ath12k_hal_tcl_to_wbm_rbm_map
+ath12k_hal_tcl_to_wbm_rbm_map_qcn9274[DP_TCL_NUM_RING_MAX];
+extern const struct ath12k_hw_hal_params ath12k_hw_hal_params_qcn9274;
+extern const struct ath12k_hw_hal_params ath12k_hw_hal_params_ipq5332;
+
+u8 ath12k_hal_rx_desc_get_l3_pad_bytes_qcn9274(struct hal_rx_desc *desc);
+void ath12k_hal_rx_desc_copy_end_tlv_qcn9274(struct hal_rx_desc *fdesc,
+ struct hal_rx_desc *ldesc);
+u32 ath12k_hal_rx_desc_get_mpdu_ppdu_id_qcn9274(struct hal_rx_desc *desc);
+void ath12k_hal_rx_desc_set_msdu_len_qcn9274(struct hal_rx_desc *desc, u16 len);
+u8 *ath12k_hal_rx_desc_get_msdu_payload_qcn9274(struct hal_rx_desc *desc);
+u32 ath12k_hal_rx_desc_get_mpdu_start_offset_qcn9274(void);
+u32 ath12k_hal_rx_desc_get_msdu_end_offset_qcn9274(void);
+u32 ath12k_hal_get_rx_desc_size_qcn9274(void);
+u8 ath12k_hal_rx_desc_get_msdu_src_link_qcn9274(struct hal_rx_desc *desc);
+u16 ath12k_hal_rx_mpdu_start_wmask_get_qcn9274(void);
+u32 ath12k_hal_rx_msdu_end_wmask_get_qcn9274(void);
+void ath12k_hal_rx_desc_get_crypto_hdr_qcn9274(struct hal_rx_desc *desc,
+ u8 *crypto_hdr,
+ enum hal_encrypt_type enctype);
+void ath12k_hal_rx_desc_get_dot11_hdr_qcn9274(struct hal_rx_desc *desc,
+ struct ieee80211_hdr *hdr);
+void ath12k_hal_extract_rx_desc_data_qcn9274(struct hal_rx_desc_data *rx_desc_data,
+ struct hal_rx_desc *rx_desc,
+ struct hal_rx_desc *ldesc);
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.c b/drivers/net/wireless/ath/ath12k/wifi7/hal_rx.c
index c4443ca05cd6..49c693289709 100644
--- a/drivers/net/wireless/ath/ath12k/hal_rx.c
+++ b/drivers/net/wireless/ath/ath12k/wifi7/hal_rx.c
@@ -4,15 +4,17 @@
* Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
-#include "debug.h"
-#include "hal.h"
+#include "../debug.h"
+#include "../hal.h"
+#include "../hif.h"
#include "hal_tx.h"
#include "hal_rx.h"
#include "hal_desc.h"
-#include "hif.h"
+#include "hal.h"
-static void ath12k_hal_reo_set_desc_hdr(struct hal_desc_header *hdr,
- u8 owner, u8 buffer_type, u32 magic)
+static
+void ath12k_wifi7_hal_reo_set_desc_hdr(struct hal_desc_header *hdr,
+ u8 owner, u8 buffer_type, u32 magic)
{
hdr->info0 = le32_encode_bits(owner, HAL_DESC_HDR_INFO0_OWNER) |
le32_encode_bits(buffer_type, HAL_DESC_HDR_INFO0_BUF_TYPE);
@@ -21,15 +23,13 @@ static void ath12k_hal_reo_set_desc_hdr(struct hal_desc_header *hdr,
hdr->info0 |= le32_encode_bits(magic, HAL_DESC_HDR_INFO0_DBG_RESERVED);
}
-static int ath12k_hal_reo_cmd_queue_stats(struct hal_tlv_64_hdr *tlv,
- struct ath12k_hal_reo_cmd *cmd)
+static int ath12k_wifi7_hal_reo_cmd_queue_stats(struct ath12k_hal *hal, void *tlv,
+ struct ath12k_hal_reo_cmd *cmd)
{
struct hal_reo_get_queue_stats *desc;
- tlv->tl = le64_encode_bits(HAL_REO_GET_QUEUE_STATS, HAL_TLV_HDR_TAG) |
- le64_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
-
- desc = (struct hal_reo_get_queue_stats *)tlv->value;
+ desc = hal->ops->reo_cmd_enc_tlv_hdr(tlv, HAL_REO_GET_QUEUE_STATS,
+ sizeof(*desc));
memset_startat(desc, 0, queue_addr_lo);
desc->cmd.info0 &= ~cpu_to_le32(HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED);
@@ -45,9 +45,8 @@ static int ath12k_hal_reo_cmd_queue_stats(struct hal_tlv_64_hdr *tlv,
return le32_get_bits(desc->cmd.info0, HAL_REO_CMD_HDR_INFO0_CMD_NUMBER);
}
-static int ath12k_hal_reo_cmd_flush_cache(struct ath12k_hal *hal,
- struct hal_tlv_64_hdr *tlv,
- struct ath12k_hal_reo_cmd *cmd)
+static int ath12k_wifi7_hal_reo_cmd_flush_cache(struct ath12k_hal *hal, void *tlv,
+ struct ath12k_hal_reo_cmd *cmd)
{
struct hal_reo_flush_cache *desc;
u8 avail_slot = ffz(hal->avail_blk_resource);
@@ -59,10 +58,8 @@ static int ath12k_hal_reo_cmd_flush_cache(struct ath12k_hal *hal,
hal->current_blk_index = avail_slot;
}
- tlv->tl = le64_encode_bits(HAL_REO_FLUSH_CACHE, HAL_TLV_HDR_TAG) |
- le64_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
-
- desc = (struct hal_reo_flush_cache *)tlv->value;
+ desc = hal->ops->reo_cmd_enc_tlv_hdr(tlv, HAL_REO_FLUSH_CACHE,
+ sizeof(*desc));
memset_startat(desc, 0, cache_addr_lo);
desc->cmd.info0 &= ~cpu_to_le32(HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED);
@@ -95,15 +92,14 @@ static int ath12k_hal_reo_cmd_flush_cache(struct ath12k_hal *hal,
return le32_get_bits(desc->cmd.info0, HAL_REO_CMD_HDR_INFO0_CMD_NUMBER);
}
-static int ath12k_hal_reo_cmd_update_rx_queue(struct hal_tlv_64_hdr *tlv,
- struct ath12k_hal_reo_cmd *cmd)
+static int
+ath12k_wifi7_hal_reo_cmd_update_rx_queue(struct ath12k_hal *hal, void *tlv,
+ struct ath12k_hal_reo_cmd *cmd)
{
struct hal_reo_update_rx_queue *desc;
- tlv->tl = le64_encode_bits(HAL_REO_UPDATE_RX_REO_QUEUE, HAL_TLV_HDR_TAG) |
- le64_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
-
- desc = (struct hal_reo_update_rx_queue *)tlv->value;
+ desc = hal->ops->reo_cmd_enc_tlv_hdr(tlv, HAL_REO_UPDATE_RX_REO_QUEUE,
+ sizeof(*desc));
memset_startat(desc, 0, queue_addr_lo);
desc->cmd.info0 &= ~cpu_to_le32(HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED);
@@ -220,11 +216,12 @@ static int ath12k_hal_reo_cmd_update_rx_queue(struct hal_tlv_64_hdr *tlv,
return le32_get_bits(desc->cmd.info0, HAL_REO_CMD_HDR_INFO0_CMD_NUMBER);
}
-int ath12k_hal_reo_cmd_send(struct ath12k_base *ab, struct hal_srng *srng,
- enum hal_reo_cmd_type type,
- struct ath12k_hal_reo_cmd *cmd)
+int ath12k_wifi7_hal_reo_cmd_send(struct ath12k_base *ab, struct hal_srng *srng,
+ enum hal_reo_cmd_type type,
+ struct ath12k_hal_reo_cmd *cmd)
{
- struct hal_tlv_64_hdr *reo_desc;
+ struct ath12k_hal *hal = &ab->hal;
+ void *reo_desc;
int ret;
spin_lock_bh(&srng->lock);
@@ -238,13 +235,13 @@ int ath12k_hal_reo_cmd_send(struct ath12k_base *ab, struct hal_srng *srng,
switch (type) {
case HAL_REO_CMD_GET_QUEUE_STATS:
- ret = ath12k_hal_reo_cmd_queue_stats(reo_desc, cmd);
+ ret = ath12k_wifi7_hal_reo_cmd_queue_stats(hal, reo_desc, cmd);
break;
case HAL_REO_CMD_FLUSH_CACHE:
- ret = ath12k_hal_reo_cmd_flush_cache(&ab->hal, reo_desc, cmd);
+ ret = ath12k_wifi7_hal_reo_cmd_flush_cache(hal, reo_desc, cmd);
break;
case HAL_REO_CMD_UPDATE_RX_QUEUE:
- ret = ath12k_hal_reo_cmd_update_rx_queue(reo_desc, cmd);
+ ret = ath12k_wifi7_hal_reo_cmd_update_rx_queue(hal, reo_desc, cmd);
break;
case HAL_REO_CMD_FLUSH_QUEUE:
case HAL_REO_CMD_UNBLOCK_CACHE:
@@ -265,8 +262,9 @@ out:
return ret;
}
-void ath12k_hal_rx_buf_addr_info_set(struct ath12k_buffer_addr *binfo,
- dma_addr_t paddr, u32 cookie, u8 manager)
+void ath12k_wifi7_hal_rx_buf_addr_info_set(struct ath12k_buffer_addr *binfo,
+ dma_addr_t paddr, u32 cookie,
+ u8 manager)
{
u32 paddr_lo, paddr_hi;
@@ -278,9 +276,9 @@ void ath12k_hal_rx_buf_addr_info_set(struct ath12k_buffer_addr *binfo,
le32_encode_bits(manager, BUFFER_ADDR_INFO1_RET_BUF_MGR);
}
-void ath12k_hal_rx_buf_addr_info_get(struct ath12k_buffer_addr *binfo,
- dma_addr_t *paddr,
- u32 *cookie, u8 *rbm)
+void ath12k_wifi7_hal_rx_buf_addr_info_get(struct ath12k_buffer_addr *binfo,
+ dma_addr_t *paddr,
+ u32 *cookie, u8 *rbm)
{
*paddr = (((u64)le32_get_bits(binfo->info1, BUFFER_ADDR_INFO1_ADDR)) << 32) |
le32_get_bits(binfo->info0, BUFFER_ADDR_INFO0_ADDR);
@@ -288,9 +286,10 @@ void ath12k_hal_rx_buf_addr_info_get(struct ath12k_buffer_addr *binfo,
*rbm = le32_get_bits(binfo->info1, BUFFER_ADDR_INFO1_RET_BUF_MGR);
}
-void ath12k_hal_rx_msdu_link_info_get(struct hal_rx_msdu_link *link, u32 *num_msdus,
- u32 *msdu_cookies,
- enum hal_rx_buf_return_buf_manager *rbm)
+void
+ath12k_wifi7_hal_rx_msdu_link_info_get(struct hal_rx_msdu_link *link,
+ u32 *num_msdus, u32 *msdu_cookies,
+ enum hal_rx_buf_return_buf_manager *rbm)
{
struct hal_rx_msdu_details *msdu;
u32 val;
@@ -317,10 +316,11 @@ void ath12k_hal_rx_msdu_link_info_get(struct hal_rx_msdu_link *link, u32 *num_ms
}
}
-int ath12k_hal_desc_reo_parse_err(struct ath12k_base *ab,
- struct hal_reo_dest_ring *desc,
- dma_addr_t *paddr, u32 *desc_bank)
+int ath12k_wifi7_hal_desc_reo_parse_err(struct ath12k_dp *dp,
+ struct hal_reo_dest_ring *desc,
+ dma_addr_t *paddr, u32 *desc_bank)
{
+ struct ath12k_base *ab = dp->ab;
enum hal_reo_dest_ring_push_reason push_reason;
enum hal_reo_dest_ring_error_code err_code;
u32 cookie;
@@ -329,7 +329,7 @@ int ath12k_hal_desc_reo_parse_err(struct ath12k_base *ab,
HAL_REO_DEST_RING_INFO0_PUSH_REASON);
err_code = le32_get_bits(desc->info0,
HAL_REO_DEST_RING_INFO0_ERROR_CODE);
- ab->device_stats.reo_error[err_code]++;
+ dp->device_stats.reo_error[err_code]++;
if (push_reason != HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED &&
push_reason != HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
@@ -338,14 +338,15 @@ int ath12k_hal_desc_reo_parse_err(struct ath12k_base *ab,
return -EINVAL;
}
- ath12k_hal_rx_reo_ent_paddr_get(ab, &desc->buf_addr_info, paddr, &cookie);
+ ath12k_wifi7_hal_rx_reo_ent_paddr_get(&desc->buf_addr_info, paddr,
+ &cookie);
*desc_bank = u32_get_bits(cookie, DP_LINK_DESC_BANK_MASK);
return 0;
}
-int ath12k_hal_wbm_desc_parse_err(struct ath12k_base *ab, void *desc,
- struct hal_rx_wbm_rel_info *rel_info)
+int ath12k_wifi7_hal_wbm_desc_parse_err(struct ath12k_dp *dp, void *desc,
+ struct hal_rx_wbm_rel_info *rel_info)
{
struct hal_wbm_release_ring *wbm_desc = desc;
struct hal_wbm_release_ring_cc_rx *wbm_cc_desc = desc;
@@ -378,7 +379,7 @@ int ath12k_hal_wbm_desc_parse_err(struct ath12k_base *ab, void *desc,
val = le32_get_bits(wbm_desc->buf_addr_info.info1,
BUFFER_ADDR_INFO1_RET_BUF_MGR);
if (val != HAL_RX_BUF_RBM_SW3_BM) {
- ab->device_stats.invalid_rbm++;
+ dp->device_stats.invalid_rbm++;
return -EINVAL;
}
@@ -390,7 +391,7 @@ int ath12k_hal_wbm_desc_parse_err(struct ath12k_base *ab, void *desc,
val = le32_get_bits(wbm_cc_desc->info0,
HAL_WBM_RELEASE_RX_CC_INFO0_RBM);
if (val != HAL_RX_BUF_RBM_SW3_BM) {
- ab->device_stats.invalid_rbm++;
+ dp->device_stats.invalid_rbm++;
return -EINVAL;
}
@@ -429,12 +430,13 @@ int ath12k_hal_wbm_desc_parse_err(struct ath12k_base *ab, void *desc,
HAL_WBM_RELEASE_INFO0_RXDMA_ERROR_CODE);
}
+ rel_info->peer_metadata = wbm_desc->info2;
+
return 0;
}
-void ath12k_hal_rx_reo_ent_paddr_get(struct ath12k_base *ab,
- struct ath12k_buffer_addr *buff_addr,
- dma_addr_t *paddr, u32 *cookie)
+void ath12k_wifi7_hal_rx_reo_ent_paddr_get(struct ath12k_buffer_addr *buff_addr,
+ dma_addr_t *paddr, u32 *cookie)
{
*paddr = ((u64)(le32_get_bits(buff_addr->info1,
BUFFER_ADDR_INFO1_ADDR)) << 32) |
@@ -443,10 +445,10 @@ void ath12k_hal_rx_reo_ent_paddr_get(struct ath12k_base *ab,
*cookie = le32_get_bits(buff_addr->info1, BUFFER_ADDR_INFO1_SW_COOKIE);
}
-void ath12k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr,
- u32 *sw_cookie,
- struct ath12k_buffer_addr **pp_buf_addr,
- u8 *rbm, u32 *msdu_cnt)
+void ath12k_wifi7_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr,
+ u32 *sw_cookie,
+ struct ath12k_buffer_addr **pp_buf_addr,
+ u8 *rbm, u32 *msdu_cnt)
{
struct hal_reo_entrance_ring *reo_ent_ring =
(struct hal_reo_entrance_ring *)rx_desc;
@@ -474,11 +476,14 @@ void ath12k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr,
*pp_buf_addr = (void *)buf_addr_info;
}
-void ath12k_hal_rx_msdu_list_get(struct ath12k *ar,
- struct hal_rx_msdu_link *link_desc,
- struct hal_rx_msdu_list *msdu_list,
- u16 *num_msdus)
+void ath12k_wifi7_hal_rx_msdu_list_get(struct ath12k *ar,
+ void *link_desc_opaque,
+ void *msdu_list_opaque, u16 *num_msdus)
{
+ struct hal_rx_msdu_link *link_desc =
+ (struct hal_rx_msdu_link *)link_desc_opaque;
+ struct hal_rx_msdu_list *msdu_list =
+ (struct hal_rx_msdu_list *)msdu_list_opaque;
struct hal_rx_msdu_details *msdu_details = NULL;
struct rx_msdu_desc *msdu_desc_info = NULL;
u32 last = 0, first = 0;
@@ -523,10 +528,11 @@ void ath12k_hal_rx_msdu_list_get(struct ath12k *ar,
*num_msdus = i;
}
-void ath12k_hal_rx_msdu_link_desc_set(struct ath12k_base *ab,
- struct hal_wbm_release_ring *desc,
- struct ath12k_buffer_addr *buf_addr_info,
- enum hal_wbm_rel_bm_act action)
+void
+ath12k_wifi7_hal_rx_msdu_link_desc_set(struct ath12k_base *ab,
+ struct hal_wbm_release_ring *desc,
+ struct ath12k_buffer_addr *buf_addr_info,
+ enum hal_wbm_rel_bm_act action)
{
desc->buf_addr_info = *buf_addr_info;
desc->info0 |= le32_encode_bits(HAL_WBM_REL_SRC_MODULE_SW,
@@ -536,12 +542,10 @@ void ath12k_hal_rx_msdu_link_desc_set(struct ath12k_base *ab,
HAL_WBM_RELEASE_INFO0_DESC_TYPE);
}
-void ath12k_hal_reo_status_queue_stats(struct ath12k_base *ab, struct hal_tlv_64_hdr *tlv,
- struct hal_reo_status *status)
+void ath12k_wifi7_hal_reo_status_queue_stats(struct ath12k_base *ab,
+ struct hal_reo_get_queue_stats_status *desc,
+ struct hal_reo_status *status)
{
- struct hal_reo_get_queue_stats_status *desc =
- (struct hal_reo_get_queue_stats_status *)tlv->value;
-
status->uniform_hdr.cmd_num =
le32_get_bits(desc->hdr.info0,
HAL_REO_STATUS_HDR_INFO0_STATUS_NUM);
@@ -599,12 +603,10 @@ void ath12k_hal_reo_status_queue_stats(struct ath12k_base *ab, struct hal_tlv_64
HAL_REO_GET_QUEUE_STATS_STATUS_INFO5_LOOPING_CNT));
}
-void ath12k_hal_reo_flush_queue_status(struct ath12k_base *ab, struct hal_tlv_64_hdr *tlv,
- struct hal_reo_status *status)
+void ath12k_wifi7_hal_reo_flush_queue_status(struct ath12k_base *ab,
+ struct hal_reo_flush_queue_status *desc,
+ struct hal_reo_status *status)
{
- struct hal_reo_flush_queue_status *desc =
- (struct hal_reo_flush_queue_status *)tlv->value;
-
status->uniform_hdr.cmd_num =
le32_get_bits(desc->hdr.info0,
HAL_REO_STATUS_HDR_INFO0_STATUS_NUM);
@@ -616,12 +618,12 @@ void ath12k_hal_reo_flush_queue_status(struct ath12k_base *ab, struct hal_tlv_64
HAL_REO_FLUSH_QUEUE_INFO0_ERR_DETECTED);
}
-void ath12k_hal_reo_flush_cache_status(struct ath12k_base *ab, struct hal_tlv_64_hdr *tlv,
- struct hal_reo_status *status)
+void
+ath12k_wifi7_hal_reo_flush_cache_status(struct ath12k_base *ab,
+ struct hal_reo_flush_cache_status *desc,
+ struct hal_reo_status *status)
{
struct ath12k_hal *hal = &ab->hal;
- struct hal_reo_flush_cache_status *desc =
- (struct hal_reo_flush_cache_status *)tlv->value;
status->uniform_hdr.cmd_num =
le32_get_bits(desc->hdr.info0,
@@ -657,12 +659,11 @@ void ath12k_hal_reo_flush_cache_status(struct ath12k_base *ab, struct hal_tlv_64
HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_COUNT);
}
-void ath12k_hal_reo_unblk_cache_status(struct ath12k_base *ab, struct hal_tlv_64_hdr *tlv,
- struct hal_reo_status *status)
+void ath12k_wifi7_hal_reo_unblk_cache_status(struct ath12k_base *ab,
+ struct hal_reo_unblock_cache_status *desc,
+ struct hal_reo_status *status)
{
struct ath12k_hal *hal = &ab->hal;
- struct hal_reo_unblock_cache_status *desc =
- (struct hal_reo_unblock_cache_status *)tlv->value;
status->uniform_hdr.cmd_num =
le32_get_bits(desc->hdr.info0,
@@ -684,13 +685,11 @@ void ath12k_hal_reo_unblk_cache_status(struct ath12k_base *ab, struct hal_tlv_64
hal->avail_blk_resource &= ~BIT(hal->current_blk_index);
}
-void ath12k_hal_reo_flush_timeout_list_status(struct ath12k_base *ab,
- struct hal_tlv_64_hdr *tlv,
- struct hal_reo_status *status)
+void
+ath12k_wifi7_hal_reo_flush_timeout_list_status(struct ath12k_base *ab,
+ struct hal_reo_flush_timeout_list_status *desc,
+ struct hal_reo_status *status)
{
- struct hal_reo_flush_timeout_list_status *desc =
- (struct hal_reo_flush_timeout_list_status *)tlv->value;
-
status->uniform_hdr.cmd_num =
le32_get_bits(desc->hdr.info0,
HAL_REO_STATUS_HDR_INFO0_STATUS_NUM);
@@ -713,13 +712,11 @@ void ath12k_hal_reo_flush_timeout_list_status(struct ath12k_base *ab,
HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_FWD_BUF_COUNT);
}
-void ath12k_hal_reo_desc_thresh_reached_status(struct ath12k_base *ab,
- struct hal_tlv_64_hdr *tlv,
- struct hal_reo_status *status)
+void
+ath12k_wifi7_hal_reo_desc_thresh_reached_status(struct ath12k_base *ab,
+ struct hal_reo_desc_thresh_reached_status *desc,
+ struct hal_reo_status *status)
{
- struct hal_reo_desc_thresh_reached_status *desc =
- (struct hal_reo_desc_thresh_reached_status *)tlv->value;
-
status->uniform_hdr.cmd_num =
le32_get_bits(desc->hdr.info0,
HAL_REO_STATUS_HDR_INFO0_STATUS_NUM);
@@ -748,13 +745,10 @@ void ath12k_hal_reo_desc_thresh_reached_status(struct ath12k_base *ab,
HAL_REO_DESC_THRESH_STATUS_INFO4_LINK_DESC_COUNTER_SUM);
}
-void ath12k_hal_reo_update_rx_reo_queue_status(struct ath12k_base *ab,
- struct hal_tlv_64_hdr *tlv,
- struct hal_reo_status *status)
+void ath12k_wifi7_hal_reo_update_rx_reo_queue_status(struct ath12k_base *ab,
+ struct hal_reo_status_hdr *desc,
+ struct hal_reo_status *status)
{
- struct hal_reo_status_hdr *desc =
- (struct hal_reo_status_hdr *)tlv->value;
-
status->uniform_hdr.cmd_num =
le32_get_bits(desc->info0,
HAL_REO_STATUS_HDR_INFO0_STATUS_NUM);
@@ -763,7 +757,7 @@ void ath12k_hal_reo_update_rx_reo_queue_status(struct ath12k_base *ab,
HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS);
}
-u32 ath12k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid)
+u32 ath12k_wifi7_hal_reo_qdesc_size(u32 ba_window_size, u8 tid)
{
u32 num_ext_desc, num_1k_desc = 0;
@@ -789,15 +783,15 @@ u32 ath12k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid)
(num_1k_desc * sizeof(struct hal_rx_reo_queue_1k));
}
-void ath12k_hal_reo_qdesc_setup(struct hal_rx_reo_queue *qdesc,
- int tid, u32 ba_window_size,
- u32 start_seq, enum hal_pn_type type)
+void ath12k_wifi7_hal_reo_qdesc_setup(struct hal_rx_reo_queue *qdesc,
+ int tid, u32 ba_window_size,
+ u32 start_seq, enum hal_pn_type type)
{
struct hal_rx_reo_queue_ext *ext_desc;
- ath12k_hal_reo_set_desc_hdr(&qdesc->desc_hdr, HAL_DESC_REO_OWNED,
- HAL_DESC_REO_QUEUE_DESC,
- REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0);
+ ath12k_wifi7_hal_reo_set_desc_hdr(&qdesc->desc_hdr, HAL_DESC_REO_OWNED,
+ HAL_DESC_REO_QUEUE_DESC,
+ REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0);
qdesc->rx_queue_num = le32_encode_bits(tid, HAL_RX_REO_QUEUE_RX_QUEUE_NUMBER);
@@ -855,21 +849,24 @@ void ath12k_hal_reo_qdesc_setup(struct hal_rx_reo_queue *qdesc,
* queue descriptor in Rx peer entry as part of dp_rx_tid_update.
*/
memset(ext_desc, 0, 3 * sizeof(*ext_desc));
- ath12k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
- HAL_DESC_REO_QUEUE_EXT_DESC,
- REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1);
+ ath12k_wifi7_hal_reo_set_desc_hdr(&ext_desc->desc_hdr,
+ HAL_DESC_REO_OWNED,
+ HAL_DESC_REO_QUEUE_EXT_DESC,
+ REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1);
ext_desc++;
- ath12k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
- HAL_DESC_REO_QUEUE_EXT_DESC,
- REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2);
+ ath12k_wifi7_hal_reo_set_desc_hdr(&ext_desc->desc_hdr,
+ HAL_DESC_REO_OWNED,
+ HAL_DESC_REO_QUEUE_EXT_DESC,
+ REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2);
ext_desc++;
- ath12k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
- HAL_DESC_REO_QUEUE_EXT_DESC,
- REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_3);
+ ath12k_wifi7_hal_reo_set_desc_hdr(&ext_desc->desc_hdr,
+ HAL_DESC_REO_OWNED,
+ HAL_DESC_REO_QUEUE_EXT_DESC,
+ REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_3);
}
-void ath12k_hal_reo_init_cmd_ring(struct ath12k_base *ab,
- struct hal_srng *srng)
+void ath12k_wifi7_hal_reo_init_cmd_ring_tlv64(struct ath12k_base *ab,
+ struct hal_srng *srng)
{
struct hal_srng_params params;
struct hal_tlv_64_hdr *tlv;
@@ -893,8 +890,35 @@ void ath12k_hal_reo_init_cmd_ring(struct ath12k_base *ab,
}
}
-void ath12k_hal_reo_hw_setup(struct ath12k_base *ab, u32 ring_hash_map)
+void ath12k_wifi7_hal_reo_init_cmd_ring_tlv32(struct ath12k_base *ab,
+ struct hal_srng *srng)
{
+ struct hal_reo_get_queue_stats *desc;
+ struct hal_srng_params params;
+ struct hal_tlv_hdr *tlv;
+ int i, cmd_num = 1;
+ int entry_size;
+ u8 *entry;
+
+ memset(&params, 0, sizeof(params));
+
+ entry_size = ath12k_hal_srng_get_entrysize(ab, HAL_REO_CMD);
+ ath12k_hal_srng_get_params(ab, srng, &params);
+ entry = (u8 *)params.ring_base_vaddr;
+
+ for (i = 0; i < params.num_entries; i++) {
+ tlv = (struct hal_tlv_hdr *)entry;
+ desc = (struct hal_reo_get_queue_stats *)tlv->value;
+ desc->cmd.info0 = le32_encode_bits(cmd_num++,
+ HAL_REO_CMD_HDR_INFO0_CMD_NUMBER);
+ entry += entry_size;
+ }
+}
+
+void ath12k_wifi7_hal_reo_hw_setup(struct ath12k_base *ab, u32 ring_hash_map)
+{
+ struct ath12k_hal *hal = &ab->hal;
+
u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
u32 val;
@@ -904,7 +928,7 @@ void ath12k_hal_reo_hw_setup(struct ath12k_base *ab, u32 ring_hash_map)
u32_encode_bits(1, HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE);
ath12k_hif_write32(ab, reo_base + HAL_REO1_GEN_ENABLE, val);
- val = ath12k_hif_read32(ab, reo_base + HAL_REO1_MISC_CTRL_ADDR(ab));
+ val = ath12k_hif_read32(ab, reo_base + HAL_REO1_MISC_CTRL_ADDR(hal));
val &= ~(HAL_REO1_MISC_CTL_FRAG_DST_RING |
HAL_REO1_MISC_CTL_BAR_DST_RING);
@@ -912,15 +936,15 @@ void ath12k_hal_reo_hw_setup(struct ath12k_base *ab, u32 ring_hash_map)
HAL_REO1_MISC_CTL_FRAG_DST_RING);
val |= u32_encode_bits(HAL_SRNG_RING_ID_REO2SW0,
HAL_REO1_MISC_CTL_BAR_DST_RING);
- ath12k_hif_write32(ab, reo_base + HAL_REO1_MISC_CTRL_ADDR(ab), val);
+ ath12k_hif_write32(ab, reo_base + HAL_REO1_MISC_CTRL_ADDR(hal), val);
- ath12k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0(ab),
+ ath12k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0(hal),
HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_USEC);
- ath12k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_1(ab),
+ ath12k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_1(hal),
HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_USEC);
- ath12k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_2(ab),
+ ath12k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_2(hal),
HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_USEC);
- ath12k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3(ab),
+ ath12k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3(hal),
HAL_DEFAULT_VO_REO_TIMEOUT_USEC);
ath12k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
@@ -929,19 +953,21 @@ void ath12k_hal_reo_hw_setup(struct ath12k_base *ab, u32 ring_hash_map)
ring_hash_map);
}
-void ath12k_hal_reo_shared_qaddr_cache_clear(struct ath12k_base *ab)
+void ath12k_wifi7_hal_reo_shared_qaddr_cache_clear(struct ath12k_base *ab)
{
u32 val;
+ struct ath12k_hal *hal = &ab->hal;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
- lockdep_assert_held(&ab->base_lock);
+ lockdep_assert_held(&dp->dp_lock);
val = ath12k_hif_read32(ab, HAL_SEQ_WCSS_UMAC_REO_REG +
- HAL_REO1_QDESC_ADDR(ab));
+ HAL_REO1_QDESC_ADDR(hal));
val |= u32_encode_bits(1, HAL_REO_QDESC_ADDR_READ_CLEAR_QDESC_ARRAY);
ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG +
- HAL_REO1_QDESC_ADDR(ab), val);
+ HAL_REO1_QDESC_ADDR(hal), val);
val &= ~HAL_REO_QDESC_ADDR_READ_CLEAR_QDESC_ARRAY;
ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG +
- HAL_REO1_QDESC_ADDR(ab), val);
+ HAL_REO1_QDESC_ADDR(hal), val);
}
diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.h b/drivers/net/wireless/ath/ath12k/wifi7/hal_rx.h
index d1ad7747b82c..ac2a8ac03288 100644
--- a/drivers/net/wireless/ath/ath12k/hal_rx.h
+++ b/drivers/net/wireless/ath/ath12k/wifi7/hal_rx.h
@@ -1,12 +1,16 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#ifndef ATH12K_HAL_RX_H
#define ATH12K_HAL_RX_H
+#include "hal_desc.h"
+
+struct hal_reo_status;
+
struct hal_rx_wbm_rel_info {
u32 cookie;
enum hal_wbm_rel_src_module err_rel_src;
@@ -17,11 +21,9 @@ struct hal_rx_wbm_rel_info {
bool continuation;
void *rx_desc;
bool hw_cc_done;
+ __le32 peer_metadata;
};
-#define HAL_INVALID_PEERID 0x3fff
-#define VHT_SIG_SU_NSS_MASK 0x7
-
#define HAL_RX_MPDU_INFO_PN_GET_BYTE1(__val) \
le32_get_bits((__val), GENMASK(7, 0))
@@ -39,69 +41,10 @@ struct hal_rx_mon_status_tlv_hdr {
u8 value[];
};
-enum hal_rx_su_mu_coding {
- HAL_RX_SU_MU_CODING_BCC,
- HAL_RX_SU_MU_CODING_LDPC,
- HAL_RX_SU_MU_CODING_MAX,
-};
-
-enum hal_rx_gi {
- HAL_RX_GI_0_8_US,
- HAL_RX_GI_0_4_US,
- HAL_RX_GI_1_6_US,
- HAL_RX_GI_3_2_US,
- HAL_RX_GI_MAX,
-};
-
-enum hal_rx_bw {
- HAL_RX_BW_20MHZ,
- HAL_RX_BW_40MHZ,
- HAL_RX_BW_80MHZ,
- HAL_RX_BW_160MHZ,
- HAL_RX_BW_320MHZ,
- HAL_RX_BW_MAX,
-};
-
-enum hal_rx_preamble {
- HAL_RX_PREAMBLE_11A,
- HAL_RX_PREAMBLE_11B,
- HAL_RX_PREAMBLE_11N,
- HAL_RX_PREAMBLE_11AC,
- HAL_RX_PREAMBLE_11AX,
- HAL_RX_PREAMBLE_11BA,
- HAL_RX_PREAMBLE_11BE,
- HAL_RX_PREAMBLE_MAX,
-};
-
-enum hal_rx_reception_type {
- HAL_RX_RECEPTION_TYPE_SU,
- HAL_RX_RECEPTION_TYPE_MU_MIMO,
- HAL_RX_RECEPTION_TYPE_MU_OFDMA,
- HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO,
- HAL_RX_RECEPTION_TYPE_MAX,
-};
-
-enum hal_rx_legacy_rate {
- HAL_RX_LEGACY_RATE_1_MBPS,
- HAL_RX_LEGACY_RATE_2_MBPS,
- HAL_RX_LEGACY_RATE_5_5_MBPS,
- HAL_RX_LEGACY_RATE_6_MBPS,
- HAL_RX_LEGACY_RATE_9_MBPS,
- HAL_RX_LEGACY_RATE_11_MBPS,
- HAL_RX_LEGACY_RATE_12_MBPS,
- HAL_RX_LEGACY_RATE_18_MBPS,
- HAL_RX_LEGACY_RATE_24_MBPS,
- HAL_RX_LEGACY_RATE_36_MBPS,
- HAL_RX_LEGACY_RATE_48_MBPS,
- HAL_RX_LEGACY_RATE_54_MBPS,
- HAL_RX_LEGACY_RATE_INVALID,
-};
-
#define HAL_TLV_STATUS_PPDU_NOT_DONE 0
#define HAL_TLV_STATUS_PPDU_DONE 1
#define HAL_TLV_STATUS_BUF_DONE 2
#define HAL_TLV_STATUS_PPDU_NON_STD_DONE 3
-#define HAL_RX_FCS_LEN 4
enum hal_rx_mon_status {
HAL_RX_MON_STATUS_PPDU_NOT_DONE,
@@ -113,167 +56,6 @@ enum hal_rx_mon_status {
HAL_RX_MON_STATUS_MSDU_END,
};
-#define HAL_RX_MAX_MPDU 1024
-#define HAL_RX_NUM_WORDS_PER_PPDU_BITMAP (HAL_RX_MAX_MPDU >> 5)
-
-struct hal_rx_user_status {
- u32 mcs:4,
- nss:3,
- ofdma_info_valid:1,
- ul_ofdma_ru_start_index:7,
- ul_ofdma_ru_width:7,
- ul_ofdma_ru_size:8;
- u32 ul_ofdma_user_v0_word0;
- u32 ul_ofdma_user_v0_word1;
- u32 ast_index;
- u32 tid;
- u16 tcp_msdu_count;
- u16 tcp_ack_msdu_count;
- u16 udp_msdu_count;
- u16 other_msdu_count;
- u16 frame_control;
- u8 frame_control_info_valid;
- u8 data_sequence_control_info_valid;
- u16 first_data_seq_ctrl;
- u32 preamble_type;
- u16 ht_flags;
- u16 vht_flags;
- u16 he_flags;
- u8 rs_flags;
- u8 ldpc;
- u32 mpdu_cnt_fcs_ok;
- u32 mpdu_cnt_fcs_err;
- u32 mpdu_fcs_ok_bitmap[HAL_RX_NUM_WORDS_PER_PPDU_BITMAP];
- u32 mpdu_ok_byte_count;
- u32 mpdu_err_byte_count;
- bool ampdu_present;
- u16 ampdu_id;
-};
-
-#define HAL_MAX_UL_MU_USERS 37
-
-struct hal_rx_u_sig_info {
- bool ul_dl;
- u8 bw;
- u8 ppdu_type_comp_mode;
- u8 eht_sig_mcs;
- u8 num_eht_sig_sym;
- struct ieee80211_radiotap_eht_usig usig;
-};
-
-#define HAL_RX_MON_MAX_AGGR_SIZE 128
-
-struct hal_rx_tlv_aggr_info {
- bool in_progress;
- u16 cur_len;
- u16 tlv_tag;
- u8 buf[HAL_RX_MON_MAX_AGGR_SIZE];
-};
-
-struct hal_rx_radiotap_eht {
- __le32 known;
- __le32 data[9];
-};
-
-#define EHT_MAX_USER_INFO 4
-
-struct hal_rx_eht_info {
- u8 num_user_info;
- struct hal_rx_radiotap_eht eht;
- u32 user_info[EHT_MAX_USER_INFO];
-};
-
-struct hal_rx_mon_ppdu_info {
- u32 ppdu_id;
- u32 last_ppdu_id;
- u64 ppdu_ts;
- u32 num_mpdu_fcs_ok;
- u32 num_mpdu_fcs_err;
- u32 preamble_type;
- u32 mpdu_len;
- u16 chan_num;
- u16 freq;
- u16 tcp_msdu_count;
- u16 tcp_ack_msdu_count;
- u16 udp_msdu_count;
- u16 other_msdu_count;
- u16 peer_id;
- u8 rate;
- u8 mcs;
- u8 nss;
- u8 bw;
- u8 vht_flag_values1;
- u8 vht_flag_values2;
- u8 vht_flag_values3[4];
- u8 vht_flag_values4;
- u8 vht_flag_values5;
- u16 vht_flag_values6;
- u8 is_stbc;
- u8 gi;
- u8 sgi;
- u8 ldpc;
- u8 beamformed;
- u8 rssi_comb;
- u16 tid;
- u8 fc_valid;
- u16 ht_flags;
- u16 vht_flags;
- u16 he_flags;
- u16 he_mu_flags;
- u8 dcm;
- u8 ru_alloc;
- u8 reception_type;
- u64 tsft;
- u64 rx_duration;
- u16 frame_control;
- u32 ast_index;
- u8 rs_fcs_err;
- u8 rs_flags;
- u8 cck_flag;
- u8 ofdm_flag;
- u8 ulofdma_flag;
- u8 frame_control_info_valid;
- u16 he_per_user_1;
- u16 he_per_user_2;
- u8 he_per_user_position;
- u8 he_per_user_known;
- u16 he_flags1;
- u16 he_flags2;
- u8 he_RU[4];
- u16 he_data1;
- u16 he_data2;
- u16 he_data3;
- u16 he_data4;
- u16 he_data5;
- u16 he_data6;
- u32 ppdu_len;
- u32 prev_ppdu_id;
- u32 device_id;
- u16 first_data_seq_ctrl;
- u8 monitor_direct_used;
- u8 data_sequence_control_info_valid;
- u8 ltf_size;
- u8 rxpcu_filter_pass;
- s8 rssi_chain[8][8];
- u32 num_users;
- u32 mpdu_fcs_ok_bitmap[HAL_RX_NUM_WORDS_PER_PPDU_BITMAP];
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- u8 addr4[ETH_ALEN];
- struct hal_rx_user_status userstats[HAL_MAX_UL_MU_USERS];
- u8 userid;
- bool first_msdu_in_mpdu;
- bool is_ampdu;
- u8 medium_prot_type;
- bool ppdu_continuation;
- bool eht_usig;
- struct hal_rx_u_sig_info u_sig_info;
- bool is_eht;
- struct hal_rx_eht_info eht_info;
- struct hal_rx_tlv_aggr_info tlv_aggr;
-};
-
#define HAL_RX_PPDU_START_INFO0_PPDU_ID GENMASK(15, 0)
#define HAL_RX_PPDU_START_INFO1_CHAN_NUM GENMASK(15, 0)
#define HAL_RX_PPDU_START_INFO1_CHAN_FREQ GENMASK(31, 16)
@@ -531,11 +313,6 @@ struct hal_rx_rxpcu_classification_overview {
u32 rsvd0;
} __packed;
-struct hal_rx_msdu_desc_info {
- u32 msdu_flags;
- u16 msdu_len; /* 14 bits for length */
-};
-
#define HAL_RX_NUM_MSDU_DESC 6
struct hal_rx_msdu_list {
struct hal_rx_msdu_desc_info msdu_info[HAL_RX_NUM_MSDU_DESC];
@@ -588,15 +365,6 @@ struct hal_rx_resp_req_info {
#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2 0xBDBEEF
#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_3 0xCDBEEF
-#define HAL_RX_UL_OFDMA_USER_INFO_V0_W0_VALID BIT(30)
-#define HAL_RX_UL_OFDMA_USER_INFO_V0_W0_VER BIT(31)
-#define HAL_RX_UL_OFDMA_USER_INFO_V0_W1_NSS GENMASK(2, 0)
-#define HAL_RX_UL_OFDMA_USER_INFO_V0_W1_MCS GENMASK(6, 3)
-#define HAL_RX_UL_OFDMA_USER_INFO_V0_W1_LDPC BIT(7)
-#define HAL_RX_UL_OFDMA_USER_INFO_V0_W1_DCM BIT(8)
-#define HAL_RX_UL_OFDMA_USER_INFO_V0_W1_RU_START GENMASK(15, 9)
-#define HAL_RX_UL_OFDMA_USER_INFO_V0_W1_RU_SIZE GENMASK(18, 16)
-
/* HE Radiotap data1 Mask */
#define HE_SU_FORMAT_TYPE 0x0000
#define HE_EXT_SU_FORMAT_TYPE 0x0001
@@ -1044,128 +812,64 @@ enum hal_mon_reception_type {
#define HAL_RU_PER80(ru_per80, num_80mhz, ru_idx_per80mhz) \
(HAL_RU(ru_per80, num_80mhz, ru_idx_per80mhz))
-#define RU_INVALID 0
-#define RU_26 1
-#define RU_52 2
-#define RU_106 4
-#define RU_242 9
-#define RU_484 18
-#define RU_996 37
-#define RU_2X996 74
-#define RU_3X996 111
-#define RU_4X996 148
-#define RU_52_26 (RU_52 + RU_26)
-#define RU_106_26 (RU_106 + RU_26)
-#define RU_484_242 (RU_484 + RU_242)
-#define RU_996_484 (RU_996 + RU_484)
-#define RU_996_484_242 (RU_996 + RU_484_242)
-#define RU_2X996_484 (RU_2X996 + RU_484)
-#define RU_3X996_484 (RU_3X996 + RU_484)
-
-enum ath12k_eht_ru_size {
- ATH12K_EHT_RU_26,
- ATH12K_EHT_RU_52,
- ATH12K_EHT_RU_106,
- ATH12K_EHT_RU_242,
- ATH12K_EHT_RU_484,
- ATH12K_EHT_RU_996,
- ATH12K_EHT_RU_996x2,
- ATH12K_EHT_RU_996x4,
- ATH12K_EHT_RU_52_26,
- ATH12K_EHT_RU_106_26,
- ATH12K_EHT_RU_484_242,
- ATH12K_EHT_RU_996_484,
- ATH12K_EHT_RU_996_484_242,
- ATH12K_EHT_RU_996x2_484,
- ATH12K_EHT_RU_996x3,
- ATH12K_EHT_RU_996x3_484,
-
- /* Keep last */
- ATH12K_EHT_RU_INVALID,
-};
-
-#define HAL_RX_RU_ALLOC_TYPE_MAX ATH12K_EHT_RU_INVALID
-
-static inline
-enum nl80211_he_ru_alloc ath12k_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones)
-{
- enum nl80211_he_ru_alloc ret;
-
- switch (ru_tones) {
- case RU_52:
- ret = NL80211_RATE_INFO_HE_RU_ALLOC_52;
- break;
- case RU_106:
- ret = NL80211_RATE_INFO_HE_RU_ALLOC_106;
- break;
- case RU_242:
- ret = NL80211_RATE_INFO_HE_RU_ALLOC_242;
- break;
- case RU_484:
- ret = NL80211_RATE_INFO_HE_RU_ALLOC_484;
- break;
- case RU_996:
- ret = NL80211_RATE_INFO_HE_RU_ALLOC_996;
- break;
- case RU_2X996:
- ret = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
- break;
- case RU_26:
- fallthrough;
- default:
- ret = NL80211_RATE_INFO_HE_RU_ALLOC_26;
- break;
- }
- return ret;
-}
-
-void ath12k_hal_reo_status_queue_stats(struct ath12k_base *ab,
- struct hal_tlv_64_hdr *tlv,
- struct hal_reo_status *status);
-void ath12k_hal_reo_flush_queue_status(struct ath12k_base *ab,
- struct hal_tlv_64_hdr *tlv,
- struct hal_reo_status *status);
-void ath12k_hal_reo_flush_cache_status(struct ath12k_base *ab,
- struct hal_tlv_64_hdr *tlv,
- struct hal_reo_status *status);
-void ath12k_hal_reo_unblk_cache_status(struct ath12k_base *ab,
- struct hal_tlv_64_hdr *tlv,
- struct hal_reo_status *status);
-void ath12k_hal_reo_flush_timeout_list_status(struct ath12k_base *ab,
- struct hal_tlv_64_hdr *tlv,
- struct hal_reo_status *status);
-void ath12k_hal_reo_desc_thresh_reached_status(struct ath12k_base *ab,
- struct hal_tlv_64_hdr *tlv,
- struct hal_reo_status *status);
-void ath12k_hal_reo_update_rx_reo_queue_status(struct ath12k_base *ab,
- struct hal_tlv_64_hdr *tlv,
+void ath12k_wifi7_hal_reo_status_queue_stats(struct ath12k_base *ab,
+ struct hal_reo_get_queue_stats_status *desc,
+ struct hal_reo_status *status);
+void ath12k_wifi7_hal_reo_flush_queue_status(struct ath12k_base *ab,
+ struct hal_reo_flush_queue_status *desc,
+ struct hal_reo_status *status);
+void ath12k_wifi7_hal_reo_flush_cache_status(struct ath12k_base *ab,
+ struct hal_reo_flush_cache_status *desc,
+ struct hal_reo_status *status);
+void ath12k_wifi7_hal_reo_unblk_cache_status(struct ath12k_base *ab,
+ struct hal_reo_unblock_cache_status *desc,
+ struct hal_reo_status *status);
+void
+ath12k_wifi7_hal_reo_flush_timeout_list_status(struct ath12k_base *ab,
+ struct hal_reo_flush_timeout_list_status *desc,
struct hal_reo_status *status);
-void ath12k_hal_rx_msdu_link_info_get(struct hal_rx_msdu_link *link, u32 *num_msdus,
- u32 *msdu_cookies,
- enum hal_rx_buf_return_buf_manager *rbm);
-void ath12k_hal_rx_msdu_link_desc_set(struct ath12k_base *ab,
- struct hal_wbm_release_ring *desc,
- struct ath12k_buffer_addr *buf_addr_info,
- enum hal_wbm_rel_bm_act action);
-void ath12k_hal_rx_buf_addr_info_set(struct ath12k_buffer_addr *binfo,
- dma_addr_t paddr, u32 cookie, u8 manager);
-void ath12k_hal_rx_buf_addr_info_get(struct ath12k_buffer_addr *binfo,
- dma_addr_t *paddr,
- u32 *cookie, u8 *rbm);
-int ath12k_hal_desc_reo_parse_err(struct ath12k_base *ab,
- struct hal_reo_dest_ring *desc,
- dma_addr_t *paddr, u32 *desc_bank);
-int ath12k_hal_wbm_desc_parse_err(struct ath12k_base *ab, void *desc,
- struct hal_rx_wbm_rel_info *rel_info);
-void ath12k_hal_rx_reo_ent_paddr_get(struct ath12k_base *ab,
- struct ath12k_buffer_addr *buff_addr,
- dma_addr_t *paddr, u32 *cookie);
-void ath12k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr, u32 *sw_cookie,
- struct ath12k_buffer_addr **pp_buf_addr,
- u8 *rbm, u32 *msdu_cnt);
-void ath12k_hal_rx_msdu_list_get(struct ath12k *ar,
- struct hal_rx_msdu_link *link_desc,
- struct hal_rx_msdu_list *msdu_list,
- u16 *num_msdus);
+void
+ath12k_wifi7_hal_reo_desc_thresh_reached_status(struct ath12k_base *ab,
+ struct hal_reo_desc_thresh_reached_status *desc,
+ struct hal_reo_status *status);
+void ath12k_wifi7_hal_reo_update_rx_reo_queue_status(struct ath12k_base *ab,
+ struct hal_reo_status_hdr *desc,
+ struct hal_reo_status *status);
+void ath12k_wifi7_hal_rx_msdu_link_info_get(struct hal_rx_msdu_link *link, u32 *num_msdus,
+ u32 *msdu_cookies,
+ enum hal_rx_buf_return_buf_manager *rbm);
+void ath12k_wifi7_hal_rx_msdu_link_desc_set(struct ath12k_base *ab,
+ struct hal_wbm_release_ring *desc,
+ struct ath12k_buffer_addr *buf_addr_info,
+ enum hal_wbm_rel_bm_act action);
+void ath12k_wifi7_hal_rx_buf_addr_info_set(struct ath12k_buffer_addr *binfo,
+ dma_addr_t paddr, u32 cookie, u8 manager);
+void ath12k_wifi7_hal_rx_buf_addr_info_get(struct ath12k_buffer_addr *binfo,
+ dma_addr_t *paddr,
+ u32 *cookie, u8 *rbm);
+int ath12k_wifi7_hal_desc_reo_parse_err(struct ath12k_dp *dp,
+ struct hal_reo_dest_ring *desc,
+ dma_addr_t *paddr, u32 *desc_bank);
+int ath12k_wifi7_hal_wbm_desc_parse_err(struct ath12k_dp *dp, void *desc,
+ struct hal_rx_wbm_rel_info *rel_info);
+void ath12k_wifi7_hal_rx_reo_ent_paddr_get(struct ath12k_buffer_addr *buff_addr,
+ dma_addr_t *paddr, u32 *cookie);
+void ath12k_wifi7_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr,
+ u32 *sw_cookie,
+ struct ath12k_buffer_addr **pp_buf_addr,
+ u8 *rbm, u32 *msdu_cnt);
+void ath12k_wifi7_hal_rx_msdu_list_get(struct ath12k *ar,
+ void *link_desc,
+ void *msdu_list_opaque,
+ u16 *num_msdus);
+void ath12k_wifi7_hal_reo_init_cmd_ring_tlv64(struct ath12k_base *ab,
+ struct hal_srng *srng);
+void ath12k_wifi7_hal_reo_init_cmd_ring_tlv32(struct ath12k_base *ab,
+ struct hal_srng *srng);
+void ath12k_wifi7_hal_reo_shared_qaddr_cache_clear(struct ath12k_base *ab);
+void ath12k_wifi7_hal_reo_hw_setup(struct ath12k_base *ab, u32 ring_hash_map);
+void ath12k_wifi7_hal_reo_qdesc_setup(struct hal_rx_reo_queue *qdesc,
+ int tid, u32 ba_window_size,
+ u32 start_seq, enum hal_pn_type type);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/rx_desc.h b/drivers/net/wireless/ath/ath12k/wifi7/hal_rx_desc.h
index 6c600473b402..0d19a9cbb68c 100644
--- a/drivers/net/wireless/ath/ath12k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath12k/wifi7/hal_rx_desc.h
@@ -1,18 +1,11 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#ifndef ATH12K_RX_DESC_H
#define ATH12K_RX_DESC_H
-enum rx_desc_decap_type {
- RX_DESC_DECAP_TYPE_RAW,
- RX_DESC_DECAP_TYPE_NATIVE_WIFI,
- RX_DESC_DECAP_TYPE_ETHERNET2_DIX,
- RX_DESC_DECAP_TYPE_8023,
-};
-
enum rx_desc_decrypt_status_code {
RX_DESC_DECRYPT_STATUS_CODE_OK,
RX_DESC_DECRYPT_STATUS_CODE_UNPROTECTED_FRAME,
@@ -631,40 +624,6 @@ struct rx_mpdu_start_qcn9274_compact {
*
*/
-enum rx_msdu_start_pkt_type {
- RX_MSDU_START_PKT_TYPE_11A,
- RX_MSDU_START_PKT_TYPE_11B,
- RX_MSDU_START_PKT_TYPE_11N,
- RX_MSDU_START_PKT_TYPE_11AC,
- RX_MSDU_START_PKT_TYPE_11AX,
- RX_MSDU_START_PKT_TYPE_11BA,
- RX_MSDU_START_PKT_TYPE_11BE,
-};
-
-enum rx_msdu_start_sgi {
- RX_MSDU_START_SGI_0_8_US,
- RX_MSDU_START_SGI_0_4_US,
- RX_MSDU_START_SGI_1_6_US,
- RX_MSDU_START_SGI_3_2_US,
-};
-
-enum rx_msdu_start_recv_bw {
- RX_MSDU_START_RECV_BW_20MHZ,
- RX_MSDU_START_RECV_BW_40MHZ,
- RX_MSDU_START_RECV_BW_80MHZ,
- RX_MSDU_START_RECV_BW_160MHZ,
-};
-
-enum rx_msdu_start_reception_type {
- RX_MSDU_START_RECEPTION_TYPE_SU,
- RX_MSDU_START_RECEPTION_TYPE_DL_MU_MIMO,
- RX_MSDU_START_RECEPTION_TYPE_DL_MU_OFDMA,
- RX_MSDU_START_RECEPTION_TYPE_DL_MU_OFDMA_MIMO,
- RX_MSDU_START_RECEPTION_TYPE_UL_MU_MIMO,
- RX_MSDU_START_RECEPTION_TYPE_UL_MU_OFDMA,
- RX_MSDU_START_RECEPTION_TYPE_UL_MU_OFDMA_MIMO,
-};
-
#define RX_MSDU_END_64_TLV_SRC_LINK_ID GENMASK(24, 22)
#define RX_MSDU_END_INFO0_RXPCU_MPDU_FITLER GENMASK(1, 0)
@@ -1495,12 +1454,6 @@ struct rx_msdu_end_qcn9274_compact {
*
*/
-struct hal_rx_desc_qcn9274 {
- struct rx_msdu_end_qcn9274 msdu_end;
- struct rx_mpdu_start_qcn9274 mpdu_start;
- u8 msdu_payload[];
-} __packed;
-
struct hal_rx_desc_qcn9274_compact {
struct rx_msdu_end_qcn9274_compact msdu_end;
struct rx_mpdu_start_qcn9274_compact mpdu_start;
@@ -1528,17 +1481,28 @@ struct hal_rx_desc_wcn7850 {
u8 msdu_payload[];
};
+struct rx_pkt_hdr_tlv_qcc2072 {
+ __le32 tag;
+ __le64 phy_ppdu_id;
+ u8 rx_pkt_hdr[HAL_RX_BE_PKT_HDR_TLV_LEN];
+};
+
+struct hal_rx_desc_qcc2072 {
+ __le32 msdu_end_tag;
+ struct rx_msdu_end_qcn9274 msdu_end;
+ u8 rx_padding0[RX_BE_PADDING0_BYTES];
+ __le32 mpdu_start_tag;
+ struct rx_mpdu_start_qcn9274 mpdu_start;
+ struct rx_pkt_hdr_tlv_qcc2072 pkt_hdr_tlv;
+ u8 msdu_payload[];
+};
+
struct hal_rx_desc {
union {
- struct hal_rx_desc_qcn9274 qcn9274;
struct hal_rx_desc_qcn9274_compact qcn9274_compact;
struct hal_rx_desc_wcn7850 wcn7850;
+ struct hal_rx_desc_qcc2072 qcc2072;
} u;
} __packed;
-#define MAX_USER_POS 8
-#define MAX_MU_GROUP_ID 64
-#define MAX_MU_GROUP_SHOW 16
-#define MAX_MU_GROUP_LENGTH (6 * MAX_MU_GROUP_SHOW)
-
#endif /* ATH12K_RX_DESC_H */
diff --git a/drivers/net/wireless/ath/ath12k/hal_tx.c b/drivers/net/wireless/ath/ath12k/wifi7/hal_tx.c
index 869e07e406fe..02d3cadf03fe 100644
--- a/drivers/net/wireless/ath/ath12k/hal_tx.c
+++ b/drivers/net/wireless/ath/ath12k/wifi7/hal_tx.c
@@ -1,13 +1,13 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
-#include "hal_desc.h"
-#include "hal.h"
+#include "../hal.h"
#include "hal_tx.h"
-#include "hif.h"
+#include "../hif.h"
+#include "hal.h"
#define DSCP_TID_MAP_TBL_ENTRY_SIZE 64
@@ -29,9 +29,9 @@ static inline u8 dscp2tid(u8 dscp)
return dscp >> 3;
}
-void ath12k_hal_tx_cmd_desc_setup(struct ath12k_base *ab,
- struct hal_tcl_data_cmd *tcl_cmd,
- struct hal_tx_info *ti)
+void ath12k_wifi7_hal_tx_cmd_desc_setup(struct ath12k_base *ab,
+ struct hal_tcl_data_cmd *tcl_cmd,
+ struct hal_tx_info *ti)
{
tcl_cmd->buf_addr_info.info0 =
le32_encode_bits(ti->paddr, BUFFER_ADDR_INFO0_ADDR);
@@ -66,7 +66,7 @@ void ath12k_hal_tx_cmd_desc_setup(struct ath12k_base *ab,
tcl_cmd->info5 = 0;
}
-void ath12k_hal_tx_set_dscp_tid_map(struct ath12k_base *ab, int id)
+void ath12k_wifi7_hal_tx_set_dscp_tid_map(struct ath12k_base *ab, int id)
{
u32 ctrl_reg_val;
u32 addr;
@@ -136,10 +136,3 @@ void ath12k_hal_tx_set_dscp_tid_map(struct ath12k_base *ab, int id)
HAL_TCL1_RING_CMN_CTRL_REG,
ctrl_reg_val);
}
-
-void ath12k_hal_tx_configure_bank_register(struct ath12k_base *ab, u32 bank_config,
- u8 bank_id)
-{
- ath12k_hif_write32(ab, HAL_TCL_SW_CONFIG_BANK_ADDR + 4 * bank_id,
- bank_config);
-}
diff --git a/drivers/net/wireless/ath/ath12k/hal_tx.h b/drivers/net/wireless/ath/ath12k/wifi7/hal_tx.h
index eb065a79f6c6..9d2b1552c2f5 100644
--- a/drivers/net/wireless/ath/ath12k/hal_tx.h
+++ b/drivers/net/wireless/ath/ath12k/wifi7/hal_tx.h
@@ -1,21 +1,14 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc.
- * All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#ifndef ATH12K_HAL_TX_H
#define ATH12K_HAL_TX_H
+#include "../mac.h"
#include "hal_desc.h"
-#include "core.h"
-
-#define HAL_TX_ADDRX_EN 1
-#define HAL_TX_ADDRY_EN 2
-
-#define HAL_TX_ADDR_SEARCH_DEFAULT 0
-#define HAL_TX_ADDR_SEARCH_INDEX 1
/* TODO: check all these data can be managed with struct ath12k_tx_desc_info for perf */
struct hal_tx_info {
@@ -188,13 +181,14 @@ struct hal_tx_fes_status_end {
/* STA mode will have MCAST_PKT_CTRL instead of DSCP_TID_MAP bitfield */
#define HAL_TX_BANK_CONFIG_DSCP_TIP_MAP_ID GENMASK(22, 17)
-void ath12k_hal_tx_cmd_desc_setup(struct ath12k_base *ab,
- struct hal_tcl_data_cmd *tcl_cmd,
- struct hal_tx_info *ti);
-void ath12k_hal_tx_set_dscp_tid_map(struct ath12k_base *ab, int id);
-int ath12k_hal_reo_cmd_send(struct ath12k_base *ab, struct hal_srng *srng,
- enum hal_reo_cmd_type type,
- struct ath12k_hal_reo_cmd *cmd);
-void ath12k_hal_tx_configure_bank_register(struct ath12k_base *ab, u32 bank_config,
- u8 bank_id);
+void ath12k_wifi7_hal_tx_set_dscp_tid_map(struct ath12k_base *ab, int id);
+void ath12k_wifi7_hal_tx_cmd_desc_setup(struct ath12k_base *ab,
+ struct hal_tcl_data_cmd *tcl_cmd,
+ struct hal_tx_info *ti);
+int ath12k_wifi7_hal_reo_cmd_send(struct ath12k_base *ab, struct hal_srng *srng,
+ enum hal_reo_cmd_type type,
+ struct ath12k_hal_reo_cmd *cmd);
+void ath12k_wifi7_hal_tx_configure_bank_register(struct ath12k_base *ab,
+ u32 bank_config,
+ u8 bank_id);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/wifi7/hal_wcn7850.c b/drivers/net/wireless/ath/ath12k/wifi7/hal_wcn7850.c
new file mode 100644
index 000000000000..e64e512cac7d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wifi7/hal_wcn7850.c
@@ -0,0 +1,809 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include "hal_desc.h"
+#include "hal_wcn7850.h"
+#include "hw.h"
+#include "hal.h"
+#include "hal_tx.h"
+
+static const struct hal_srng_config hw_srng_config_template[] = {
+ /* TODO: max_rings can populated by querying HW capabilities */
+ [HAL_REO_DST] = {
+ .start_ring_id = HAL_SRNG_RING_ID_REO2SW1,
+ .max_rings = 8,
+ .entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_REO_EXCEPTION] = {
+ /* Designating REO2SW0 ring as exception ring.
+ * Any of theREO2SW rings can be used as exception ring.
+ */
+ .start_ring_id = HAL_SRNG_RING_ID_REO2SW0,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_REO_REO2SW0_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_REO_REINJECT] = {
+ .start_ring_id = HAL_SRNG_RING_ID_SW2REO,
+ .max_rings = 4,
+ .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_REO_CMD] = {
+ .start_ring_id = HAL_SRNG_RING_ID_REO_CMD,
+ .max_rings = 1,
+ .entry_size = (sizeof(struct hal_tlv_64_hdr) +
+ sizeof(struct hal_reo_get_queue_stats)) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_REO_CMD_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_REO_STATUS] = {
+ .start_ring_id = HAL_SRNG_RING_ID_REO_STATUS,
+ .max_rings = 1,
+ .entry_size = (sizeof(struct hal_tlv_64_hdr) +
+ sizeof(struct hal_reo_get_queue_stats_status)) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_TCL_DATA] = {
+ .start_ring_id = HAL_SRNG_RING_ID_SW2TCL1,
+ .max_rings = 6,
+ .entry_size = sizeof(struct hal_tcl_data_cmd) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_TCL_CMD] = {
+ .start_ring_id = HAL_SRNG_RING_ID_SW2TCL_CMD,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_tcl_gse_cmd) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_TCL_STATUS] = {
+ .start_ring_id = HAL_SRNG_RING_ID_TCL_STATUS,
+ .max_rings = 1,
+ .entry_size = (sizeof(struct hal_tlv_hdr) +
+ sizeof(struct hal_tcl_status_ring)) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_CE_SRC] = {
+ .start_ring_id = HAL_SRNG_RING_ID_CE0_SRC,
+ .max_rings = 16,
+ .entry_size = sizeof(struct hal_ce_srng_src_desc) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_CE_SRC_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_CE_DST] = {
+ .start_ring_id = HAL_SRNG_RING_ID_CE0_DST,
+ .max_rings = 16,
+ .entry_size = sizeof(struct hal_ce_srng_dest_desc) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_CE_DST_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_CE_DST_STATUS] = {
+ .start_ring_id = HAL_SRNG_RING_ID_CE0_DST_STATUS,
+ .max_rings = 16,
+ .entry_size = sizeof(struct hal_ce_srng_dst_status_desc) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_WBM_IDLE_LINK] = {
+ .start_ring_id = HAL_SRNG_RING_ID_WBM_IDLE_LINK,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_link_desc) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_SW2WBM_RELEASE] = {
+ .start_ring_id = HAL_SRNG_RING_ID_WBM_SW0_RELEASE,
+ .max_rings = 2,
+ .entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_WBM2SW_RELEASE] = {
+ .start_ring_id = HAL_SRNG_RING_ID_WBM2SW0_RELEASE,
+ .max_rings = 8,
+ .entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_RXDMA_BUF] = {
+ .start_ring_id = HAL_SRNG_SW2RXDMA_BUF0,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_DMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
+ },
+ [HAL_RXDMA_DST] = {
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0,
+ .max_rings = 0,
+ .entry_size = 0,
+ .mac_type = ATH12K_HAL_SRNG_PMAC,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
+ },
+ [HAL_RXDMA_MONITOR_BUF] = {},
+ [HAL_RXDMA_MONITOR_STATUS] = {
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_PMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
+ },
+ [HAL_RXDMA_MONITOR_DESC] = { 0, },
+ [HAL_RXDMA_DIR_BUF] = {
+ .start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
+ .max_rings = 2,
+ .entry_size = 8 >> 2, /* TODO: Define the struct */
+ .mac_type = ATH12K_HAL_SRNG_PMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
+ },
+ [HAL_PPE2TCL] = {},
+ [HAL_PPE_RELEASE] = {},
+ [HAL_TX_MONITOR_BUF] = {},
+ [HAL_RXDMA_MONITOR_DST] = {},
+ [HAL_TX_MONITOR_DST] = {}
+};
+
+const struct ath12k_hw_regs wcn7850_regs = {
+ /* SW2TCL(x) R0 ring configuration address */
+ .tcl1_ring_id = 0x00000908,
+ .tcl1_ring_misc = 0x00000910,
+ .tcl1_ring_tp_addr_lsb = 0x0000091c,
+ .tcl1_ring_tp_addr_msb = 0x00000920,
+ .tcl1_ring_consumer_int_setup_ix0 = 0x00000930,
+ .tcl1_ring_consumer_int_setup_ix1 = 0x00000934,
+ .tcl1_ring_msi1_base_lsb = 0x00000948,
+ .tcl1_ring_msi1_base_msb = 0x0000094c,
+ .tcl1_ring_msi1_data = 0x00000950,
+ .tcl_ring_base_lsb = 0x00000b58,
+ .tcl1_ring_base_lsb = 0x00000900,
+ .tcl1_ring_base_msb = 0x00000904,
+ .tcl2_ring_base_lsb = 0x00000978,
+
+ /* TCL STATUS ring address */
+ .tcl_status_ring_base_lsb = 0x00000d38,
+
+ .wbm_idle_ring_base_lsb = 0x00000d3c,
+ .wbm_idle_ring_misc_addr = 0x00000d4c,
+ .wbm_r0_idle_list_cntl_addr = 0x00000240,
+ .wbm_r0_idle_list_size_addr = 0x00000244,
+ .wbm_scattered_ring_base_lsb = 0x00000250,
+ .wbm_scattered_ring_base_msb = 0x00000254,
+ .wbm_scattered_desc_head_info_ix0 = 0x00000260,
+ .wbm_scattered_desc_head_info_ix1 = 0x00000264,
+ .wbm_scattered_desc_tail_info_ix0 = 0x00000270,
+ .wbm_scattered_desc_tail_info_ix1 = 0x00000274,
+ .wbm_scattered_desc_ptr_hp_addr = 0x00000027c,
+
+ .wbm_sw_release_ring_base_lsb = 0x0000037c,
+ .wbm_sw1_release_ring_base_lsb = 0x00000284,
+ .wbm0_release_ring_base_lsb = 0x00000e08,
+ .wbm1_release_ring_base_lsb = 0x00000e80,
+
+ /* PCIe base address */
+ .pcie_qserdes_sysclk_en_sel = 0x01e0e0a8,
+ .pcie_pcs_osc_dtct_config_base = 0x01e0f45c,
+
+ /* PPE release ring address */
+ .ppe_rel_ring_base = 0x0000043c,
+
+ /* REO DEST ring address */
+ .reo2_ring_base = 0x0000055c,
+ .reo1_misc_ctrl_addr = 0x00000b7c,
+ .reo1_sw_cookie_cfg0 = 0x00000050,
+ .reo1_sw_cookie_cfg1 = 0x00000054,
+ .reo1_qdesc_lut_base0 = 0x00000058,
+ .reo1_qdesc_lut_base1 = 0x0000005c,
+ .reo1_ring_base_lsb = 0x000004e4,
+ .reo1_ring_base_msb = 0x000004e8,
+ .reo1_ring_id = 0x000004ec,
+ .reo1_ring_misc = 0x000004f4,
+ .reo1_ring_hp_addr_lsb = 0x000004f8,
+ .reo1_ring_hp_addr_msb = 0x000004fc,
+ .reo1_ring_producer_int_setup = 0x00000508,
+ .reo1_ring_msi1_base_lsb = 0x0000052C,
+ .reo1_ring_msi1_base_msb = 0x00000530,
+ .reo1_ring_msi1_data = 0x00000534,
+ .reo1_aging_thres_ix0 = 0x00000b08,
+ .reo1_aging_thres_ix1 = 0x00000b0c,
+ .reo1_aging_thres_ix2 = 0x00000b10,
+ .reo1_aging_thres_ix3 = 0x00000b14,
+
+ /* REO Exception ring address */
+ .reo2_sw0_ring_base = 0x000008a4,
+
+ /* REO Reinject ring address */
+ .sw2reo_ring_base = 0x00000304,
+ .sw2reo1_ring_base = 0x0000037c,
+
+ /* REO cmd ring address */
+ .reo_cmd_ring_base = 0x0000028c,
+
+ /* REO status ring address */
+ .reo_status_ring_base = 0x00000a84,
+
+ /* CE base address */
+ .umac_ce0_src_reg_base = 0x01b80000,
+ .umac_ce0_dest_reg_base = 0x01b81000,
+ .umac_ce1_src_reg_base = 0x01b82000,
+ .umac_ce1_dest_reg_base = 0x01b83000,
+
+ .gcc_gcc_pcie_hot_rst = 0x1e40304,
+
+ .qrtr_node_id = 0x1e03164,
+};
+
+static inline
+bool ath12k_hal_rx_desc_get_first_msdu_wcn7850(struct hal_rx_desc *desc)
+{
+ return !!le16_get_bits(desc->u.wcn7850.msdu_end.info5,
+ RX_MSDU_END_INFO5_FIRST_MSDU);
+}
+
+static inline
+bool ath12k_hal_rx_desc_get_last_msdu_wcn7850(struct hal_rx_desc *desc)
+{
+ return !!le16_get_bits(desc->u.wcn7850.msdu_end.info5,
+ RX_MSDU_END_INFO5_LAST_MSDU);
+}
+
+u8 ath12k_hal_rx_desc_get_l3_pad_bytes_wcn7850(struct hal_rx_desc *desc)
+{
+ return le16_get_bits(desc->u.wcn7850.msdu_end.info5,
+ RX_MSDU_END_INFO5_L3_HDR_PADDING);
+}
+
+static inline
+bool ath12k_hal_rx_desc_encrypt_valid_wcn7850(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.wcn7850.mpdu_start.info4,
+ RX_MPDU_START_INFO4_ENCRYPT_INFO_VALID);
+}
+
+static inline
+u32 ath12k_hal_rx_desc_get_encrypt_type_wcn7850(struct hal_rx_desc *desc)
+{
+ if (!ath12k_hal_rx_desc_encrypt_valid_wcn7850(desc))
+ return HAL_ENCRYPT_TYPE_OPEN;
+
+ return le32_get_bits(desc->u.wcn7850.mpdu_start.info2,
+ RX_MPDU_START_INFO2_ENC_TYPE);
+}
+
+static inline
+u8 ath12k_hal_rx_desc_get_decap_type_wcn7850(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.wcn7850.msdu_end.info11,
+ RX_MSDU_END_INFO11_DECAP_FORMAT);
+}
+
+static inline
+u8 ath12k_hal_rx_desc_get_mesh_ctl_wcn7850(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.wcn7850.msdu_end.info11,
+ RX_MSDU_END_INFO11_MESH_CTRL_PRESENT);
+}
+
+static inline
+bool ath12k_hal_rx_desc_get_mpdu_seq_ctl_vld_wcn7850(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.wcn7850.mpdu_start.info4,
+ RX_MPDU_START_INFO4_MPDU_SEQ_CTRL_VALID);
+}
+
+static inline
+bool ath12k_hal_rx_desc_get_mpdu_fc_valid_wcn7850(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.wcn7850.mpdu_start.info4,
+ RX_MPDU_START_INFO4_MPDU_FCTRL_VALID);
+}
+
+static inline
+u16 ath12k_hal_rx_desc_get_mpdu_start_seq_no_wcn7850(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.wcn7850.mpdu_start.info4,
+ RX_MPDU_START_INFO4_MPDU_SEQ_NUM);
+}
+
+static inline
+u16 ath12k_hal_rx_desc_get_msdu_len_wcn7850(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.wcn7850.msdu_end.info10,
+ RX_MSDU_END_INFO10_MSDU_LENGTH);
+}
+
+static inline
+u8 ath12k_hal_rx_desc_get_msdu_sgi_wcn7850(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.wcn7850.msdu_end.info12,
+ RX_MSDU_END_INFO12_SGI);
+}
+
+static inline
+u8 ath12k_hal_rx_desc_get_msdu_rate_mcs_wcn7850(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.wcn7850.msdu_end.info12,
+ RX_MSDU_END_INFO12_RATE_MCS);
+}
+
+static inline
+u8 ath12k_hal_rx_desc_get_msdu_rx_bw_wcn7850(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.wcn7850.msdu_end.info12,
+ RX_MSDU_END_INFO12_RECV_BW);
+}
+
+static inline
+u32 ath12k_hal_rx_desc_get_msdu_freq_wcn7850(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.wcn7850.msdu_end.phy_meta_data);
+}
+
+static inline
+u8 ath12k_hal_rx_desc_get_msdu_pkt_type_wcn7850(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.wcn7850.msdu_end.info12,
+ RX_MSDU_END_INFO12_PKT_TYPE);
+}
+
+static inline
+u8 ath12k_hal_rx_desc_get_msdu_nss_wcn7850(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.wcn7850.msdu_end.info12,
+ RX_MSDU_END_INFO12_MIMO_SS_BITMAP);
+}
+
+static inline
+u8 ath12k_hal_rx_desc_get_mpdu_tid_wcn7850(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.wcn7850.mpdu_start.info2,
+ RX_MPDU_START_INFO2_TID);
+}
+
+static inline
+u16 ath12k_hal_rx_desc_get_mpdu_peer_id_wcn7850(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.wcn7850.mpdu_start.sw_peer_id);
+}
+
+void ath12k_hal_rx_desc_copy_end_tlv_wcn7850(struct hal_rx_desc *fdesc,
+ struct hal_rx_desc *ldesc)
+{
+ memcpy(&fdesc->u.wcn7850.msdu_end, &ldesc->u.wcn7850.msdu_end,
+ sizeof(struct rx_msdu_end_qcn9274));
+}
+
+u32 ath12k_hal_rx_desc_get_mpdu_start_tag_wcn7850(struct hal_rx_desc *desc)
+{
+ return le64_get_bits(desc->u.wcn7850.mpdu_start_tag,
+ HAL_TLV_HDR_TAG);
+}
+
+u32 ath12k_hal_rx_desc_get_mpdu_ppdu_id_wcn7850(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.wcn7850.mpdu_start.phy_ppdu_id);
+}
+
+void ath12k_hal_rx_desc_set_msdu_len_wcn7850(struct hal_rx_desc *desc, u16 len)
+{
+ u32 info = __le32_to_cpu(desc->u.wcn7850.msdu_end.info10);
+
+ info &= ~RX_MSDU_END_INFO10_MSDU_LENGTH;
+ info |= u32_encode_bits(len, RX_MSDU_END_INFO10_MSDU_LENGTH);
+
+ desc->u.wcn7850.msdu_end.info10 = __cpu_to_le32(info);
+}
+
+u8 *ath12k_hal_rx_desc_get_msdu_payload_wcn7850(struct hal_rx_desc *desc)
+{
+ return &desc->u.wcn7850.msdu_payload[0];
+}
+
+u32 ath12k_hal_rx_desc_get_mpdu_start_offset_wcn7850(void)
+{
+ return offsetof(struct hal_rx_desc_wcn7850, mpdu_start_tag);
+}
+
+u32 ath12k_hal_rx_desc_get_msdu_end_offset_wcn7850(void)
+{
+ return offsetof(struct hal_rx_desc_wcn7850, msdu_end_tag);
+}
+
+static inline
+bool ath12k_hal_rx_desc_mac_addr2_valid_wcn7850(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.wcn7850.mpdu_start.info4) &
+ RX_MPDU_START_INFO4_MAC_ADDR2_VALID;
+}
+
+static inline
+u8 *ath12k_hal_rx_desc_mpdu_start_addr2_wcn7850(struct hal_rx_desc *desc)
+{
+ return desc->u.wcn7850.mpdu_start.addr2;
+}
+
+static inline
+bool ath12k_hal_rx_desc_is_da_mcbc_wcn7850(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.wcn7850.msdu_end.info13) &
+ RX_MSDU_END_INFO13_MCAST_BCAST;
+}
+
+static inline
+bool ath12k_hal_rx_h_msdu_done_wcn7850(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.wcn7850.msdu_end.info14,
+ RX_MSDU_END_INFO14_MSDU_DONE);
+}
+
+static inline
+bool ath12k_hal_rx_h_l4_cksum_fail_wcn7850(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.wcn7850.msdu_end.info13,
+ RX_MSDU_END_INFO13_TCP_UDP_CKSUM_FAIL);
+}
+
+static inline
+bool ath12k_hal_rx_h_ip_cksum_fail_wcn7850(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.wcn7850.msdu_end.info13,
+ RX_MSDU_END_INFO13_IP_CKSUM_FAIL);
+}
+
+static inline
+bool ath12k_hal_rx_h_is_decrypted_wcn7850(struct hal_rx_desc *desc)
+{
+ return (le32_get_bits(desc->u.wcn7850.msdu_end.info14,
+ RX_MSDU_END_INFO14_DECRYPT_STATUS_CODE) ==
+ RX_DESC_DECRYPT_STATUS_CODE_OK);
+}
+
+u32 ath12k_hal_get_rx_desc_size_wcn7850(void)
+{
+ return sizeof(struct hal_rx_desc_wcn7850);
+}
+
+u8 ath12k_hal_rx_desc_get_msdu_src_link_wcn7850(struct hal_rx_desc *desc)
+{
+ return 0;
+}
+
+static u32 ath12k_hal_rx_h_mpdu_err_wcn7850(struct hal_rx_desc *desc)
+{
+ u32 info = __le32_to_cpu(desc->u.wcn7850.msdu_end.info13);
+ u32 errmap = 0;
+
+ if (info & RX_MSDU_END_INFO13_FCS_ERR)
+ errmap |= HAL_RX_MPDU_ERR_FCS;
+
+ if (info & RX_MSDU_END_INFO13_DECRYPT_ERR)
+ errmap |= HAL_RX_MPDU_ERR_DECRYPT;
+
+ if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR)
+ errmap |= HAL_RX_MPDU_ERR_TKIP_MIC;
+
+ if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR)
+ errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR;
+
+ if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR)
+ errmap |= HAL_RX_MPDU_ERR_OVERFLOW;
+
+ if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR)
+ errmap |= HAL_RX_MPDU_ERR_MSDU_LEN;
+
+ if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR)
+ errmap |= HAL_RX_MPDU_ERR_MPDU_LEN;
+
+ return errmap;
+}
+
+void ath12k_hal_rx_desc_get_crypto_hdr_wcn7850(struct hal_rx_desc *desc,
+ u8 *crypto_hdr,
+ enum hal_encrypt_type enctype)
+{
+ unsigned int key_id;
+
+ switch (enctype) {
+ case HAL_ENCRYPT_TYPE_OPEN:
+ return;
+ case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
+ case HAL_ENCRYPT_TYPE_TKIP_MIC:
+ crypto_hdr[0] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.wcn7850.mpdu_start.pn[0]);
+ crypto_hdr[1] = 0;
+ crypto_hdr[2] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.wcn7850.mpdu_start.pn[0]);
+ break;
+ case HAL_ENCRYPT_TYPE_CCMP_128:
+ case HAL_ENCRYPT_TYPE_CCMP_256:
+ case HAL_ENCRYPT_TYPE_GCMP_128:
+ case HAL_ENCRYPT_TYPE_AES_GCMP_256:
+ crypto_hdr[0] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.wcn7850.mpdu_start.pn[0]);
+ crypto_hdr[1] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.wcn7850.mpdu_start.pn[0]);
+ crypto_hdr[2] = 0;
+ break;
+ case HAL_ENCRYPT_TYPE_WEP_40:
+ case HAL_ENCRYPT_TYPE_WEP_104:
+ case HAL_ENCRYPT_TYPE_WEP_128:
+ case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
+ case HAL_ENCRYPT_TYPE_WAPI:
+ return;
+ }
+ key_id = u32_get_bits(__le32_to_cpu(desc->u.wcn7850.mpdu_start.info5),
+ RX_MPDU_START_INFO5_KEY_ID);
+ crypto_hdr[3] = 0x20 | (key_id << 6);
+ crypto_hdr[4] = HAL_RX_MPDU_INFO_PN_GET_BYTE3(desc->u.wcn7850.mpdu_start.pn[0]);
+ crypto_hdr[5] = HAL_RX_MPDU_INFO_PN_GET_BYTE4(desc->u.wcn7850.mpdu_start.pn[0]);
+ crypto_hdr[6] = HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.wcn7850.mpdu_start.pn[1]);
+ crypto_hdr[7] = HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.wcn7850.mpdu_start.pn[1]);
+}
+
+void ath12k_hal_rx_desc_get_dot11_hdr_wcn7850(struct hal_rx_desc *desc,
+ struct ieee80211_hdr *hdr)
+{
+ hdr->frame_control = desc->u.wcn7850.mpdu_start.frame_ctrl;
+ hdr->duration_id = desc->u.wcn7850.mpdu_start.duration;
+ ether_addr_copy(hdr->addr1, desc->u.wcn7850.mpdu_start.addr1);
+ ether_addr_copy(hdr->addr2, desc->u.wcn7850.mpdu_start.addr2);
+ ether_addr_copy(hdr->addr3, desc->u.wcn7850.mpdu_start.addr3);
+ if (__le32_to_cpu(desc->u.wcn7850.mpdu_start.info4) &
+ RX_MPDU_START_INFO4_MAC_ADDR4_VALID) {
+ ether_addr_copy(hdr->addr4, desc->u.wcn7850.mpdu_start.addr4);
+ }
+ hdr->seq_ctrl = desc->u.wcn7850.mpdu_start.seq_ctrl;
+}
+
+void ath12k_hal_extract_rx_desc_data_wcn7850(struct hal_rx_desc_data *rx_desc_data,
+ struct hal_rx_desc *rx_desc,
+ struct hal_rx_desc *ldesc)
+{
+ rx_desc_data->is_first_msdu = ath12k_hal_rx_desc_get_first_msdu_wcn7850(ldesc);
+ rx_desc_data->is_last_msdu = ath12k_hal_rx_desc_get_last_msdu_wcn7850(ldesc);
+ rx_desc_data->l3_pad_bytes = ath12k_hal_rx_desc_get_l3_pad_bytes_wcn7850(ldesc);
+ rx_desc_data->enctype = ath12k_hal_rx_desc_get_encrypt_type_wcn7850(rx_desc);
+ rx_desc_data->decap_type = ath12k_hal_rx_desc_get_decap_type_wcn7850(rx_desc);
+ rx_desc_data->mesh_ctrl_present =
+ ath12k_hal_rx_desc_get_mesh_ctl_wcn7850(rx_desc);
+ rx_desc_data->seq_ctl_valid =
+ ath12k_hal_rx_desc_get_mpdu_seq_ctl_vld_wcn7850(rx_desc);
+ rx_desc_data->fc_valid = ath12k_hal_rx_desc_get_mpdu_fc_valid_wcn7850(rx_desc);
+ rx_desc_data->seq_no = ath12k_hal_rx_desc_get_mpdu_start_seq_no_wcn7850(rx_desc);
+ rx_desc_data->msdu_len = ath12k_hal_rx_desc_get_msdu_len_wcn7850(ldesc);
+ rx_desc_data->sgi = ath12k_hal_rx_desc_get_msdu_sgi_wcn7850(rx_desc);
+ rx_desc_data->rate_mcs = ath12k_hal_rx_desc_get_msdu_rate_mcs_wcn7850(rx_desc);
+ rx_desc_data->bw = ath12k_hal_rx_desc_get_msdu_rx_bw_wcn7850(rx_desc);
+ rx_desc_data->phy_meta_data = ath12k_hal_rx_desc_get_msdu_freq_wcn7850(rx_desc);
+ rx_desc_data->pkt_type = ath12k_hal_rx_desc_get_msdu_pkt_type_wcn7850(rx_desc);
+ rx_desc_data->nss = hweight8(ath12k_hal_rx_desc_get_msdu_nss_wcn7850(rx_desc));
+ rx_desc_data->tid = ath12k_hal_rx_desc_get_mpdu_tid_wcn7850(rx_desc);
+ rx_desc_data->peer_id = ath12k_hal_rx_desc_get_mpdu_peer_id_wcn7850(rx_desc);
+ rx_desc_data->addr2_present = ath12k_hal_rx_desc_mac_addr2_valid_wcn7850(rx_desc);
+ rx_desc_data->addr2 = ath12k_hal_rx_desc_mpdu_start_addr2_wcn7850(rx_desc);
+ rx_desc_data->is_mcbc = ath12k_hal_rx_desc_is_da_mcbc_wcn7850(rx_desc);
+ rx_desc_data->msdu_done = ath12k_hal_rx_h_msdu_done_wcn7850(ldesc);
+ rx_desc_data->l4_csum_fail = ath12k_hal_rx_h_l4_cksum_fail_wcn7850(rx_desc);
+ rx_desc_data->ip_csum_fail = ath12k_hal_rx_h_ip_cksum_fail_wcn7850(rx_desc);
+ rx_desc_data->is_decrypted = ath12k_hal_rx_h_is_decrypted_wcn7850(rx_desc);
+ rx_desc_data->err_bitmap = ath12k_hal_rx_h_mpdu_err_wcn7850(rx_desc);
+}
+
+int ath12k_hal_srng_create_config_wcn7850(struct ath12k_hal *hal)
+{
+ struct hal_srng_config *s;
+
+ hal->srng_config = kmemdup(hw_srng_config_template,
+ sizeof(hw_srng_config_template),
+ GFP_KERNEL);
+ if (!hal->srng_config)
+ return -ENOMEM;
+
+ s = &hal->srng_config[HAL_REO_DST];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB(hal);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP;
+ s->reg_size[0] = HAL_REO2_RING_BASE_LSB(hal) - HAL_REO1_RING_BASE_LSB(hal);
+ s->reg_size[1] = HAL_REO2_RING_HP - HAL_REO1_RING_HP;
+
+ s = &hal->srng_config[HAL_REO_EXCEPTION];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_SW0_RING_BASE_LSB(hal);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_SW0_RING_HP;
+
+ s = &hal->srng_config[HAL_REO_REINJECT];
+ s->max_rings = 1;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB(hal);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP;
+
+ s = &hal->srng_config[HAL_REO_CMD];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB(hal);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP;
+
+ s = &hal->srng_config[HAL_REO_STATUS];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(hal);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP;
+
+ s = &hal->srng_config[HAL_TCL_DATA];
+ s->max_rings = 5;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB(hal);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP;
+ s->reg_size[0] = HAL_TCL2_RING_BASE_LSB(hal) - HAL_TCL1_RING_BASE_LSB(hal);
+ s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP;
+
+ s = &hal->srng_config[HAL_TCL_CMD];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB(hal);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP;
+
+ s = &hal->srng_config[HAL_TCL_STATUS];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(hal);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP;
+
+ s = &hal->srng_config[HAL_CE_SRC];
+ s->max_rings = 12;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(hal) + HAL_CE_DST_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(hal) + HAL_CE_DST_RING_HP;
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(hal) -
+ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(hal);
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(hal) -
+ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(hal);
+
+ s = &hal->srng_config[HAL_CE_DST];
+ s->max_rings = 12;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(hal) + HAL_CE_DST_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(hal) + HAL_CE_DST_RING_HP;
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(hal) -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG(hal);
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(hal) -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG(hal);
+
+ s = &hal->srng_config[HAL_CE_DST_STATUS];
+ s->max_rings = 12;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(hal) +
+ HAL_CE_DST_STATUS_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(hal) + HAL_CE_DST_STATUS_RING_HP;
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(hal) -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG(hal);
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(hal) -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG(hal);
+
+ s = &hal->srng_config[HAL_WBM_IDLE_LINK];
+ s->reg_start[0] =
+ HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(hal);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP;
+
+ s = &hal->srng_config[HAL_SW2WBM_RELEASE];
+ s->max_rings = 1;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SW_RELEASE_RING_BASE_LSB(hal);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SW_RELEASE_RING_HP;
+
+ s = &hal->srng_config[HAL_WBM2SW_RELEASE];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_BASE_LSB(hal);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP;
+ s->reg_size[0] = HAL_WBM1_RELEASE_RING_BASE_LSB(hal) -
+ HAL_WBM0_RELEASE_RING_BASE_LSB(hal);
+ s->reg_size[1] = HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP;
+
+ s = &hal->srng_config[HAL_RXDMA_BUF];
+ s->max_rings = 2;
+ s->mac_type = ATH12K_HAL_SRNG_PMAC;
+
+ s = &hal->srng_config[HAL_RXDMA_DST];
+ s->max_rings = 1;
+ s->entry_size = sizeof(struct hal_reo_entrance_ring) >> 2;
+
+ /* below rings are not used */
+ s = &hal->srng_config[HAL_RXDMA_DIR_BUF];
+ s->max_rings = 0;
+
+ s = &hal->srng_config[HAL_PPE2TCL];
+ s->max_rings = 0;
+
+ s = &hal->srng_config[HAL_PPE_RELEASE];
+ s->max_rings = 0;
+
+ s = &hal->srng_config[HAL_TX_MONITOR_BUF];
+ s->max_rings = 0;
+
+ s = &hal->srng_config[HAL_TX_MONITOR_DST];
+ s->max_rings = 0;
+
+ s = &hal->srng_config[HAL_PPE2TCL];
+ s->max_rings = 0;
+
+ return 0;
+}
+
+const struct ath12k_hal_tcl_to_wbm_rbm_map
+ath12k_hal_tcl_to_wbm_rbm_map_wcn7850[DP_TCL_NUM_RING_MAX] = {
+ {
+ .wbm_ring_num = 0,
+ .rbm_id = HAL_RX_BUF_RBM_SW0_BM,
+ },
+ {
+ .wbm_ring_num = 2,
+ .rbm_id = HAL_RX_BUF_RBM_SW2_BM,
+ },
+ {
+ .wbm_ring_num = 4,
+ .rbm_id = HAL_RX_BUF_RBM_SW4_BM,
+ },
+};
+
+const struct ath12k_hw_hal_params ath12k_hw_hal_params_wcn7850 = {
+ .rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM,
+ .wbm2sw_cc_enable = HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW0_EN |
+ HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW2_EN |
+ HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW3_EN |
+ HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW4_EN,
+};
+
+const struct hal_ops hal_wcn7850_ops = {
+ .create_srng_config = ath12k_hal_srng_create_config_wcn7850,
+ .rx_desc_set_msdu_len = ath12k_hal_rx_desc_set_msdu_len_wcn7850,
+ .rx_desc_get_dot11_hdr = ath12k_hal_rx_desc_get_dot11_hdr_wcn7850,
+ .rx_desc_get_crypto_header = ath12k_hal_rx_desc_get_crypto_hdr_wcn7850,
+ .rx_desc_copy_end_tlv = ath12k_hal_rx_desc_copy_end_tlv_wcn7850,
+ .rx_desc_get_msdu_src_link_id = ath12k_hal_rx_desc_get_msdu_src_link_wcn7850,
+ .extract_rx_desc_data = ath12k_hal_extract_rx_desc_data_wcn7850,
+ .rx_desc_get_l3_pad_bytes = ath12k_hal_rx_desc_get_l3_pad_bytes_wcn7850,
+ .rx_desc_get_mpdu_start_tag = ath12k_hal_rx_desc_get_mpdu_start_tag_wcn7850,
+ .rx_desc_get_mpdu_ppdu_id = ath12k_hal_rx_desc_get_mpdu_ppdu_id_wcn7850,
+ .rx_desc_get_msdu_payload = ath12k_hal_rx_desc_get_msdu_payload_wcn7850,
+ .ce_dst_setup = ath12k_wifi7_hal_ce_dst_setup,
+ .srng_src_hw_init = ath12k_wifi7_hal_srng_src_hw_init,
+ .srng_dst_hw_init = ath12k_wifi7_hal_srng_dst_hw_init,
+ .set_umac_srng_ptr_addr = ath12k_wifi7_hal_set_umac_srng_ptr_addr,
+ .srng_update_shadow_config = ath12k_wifi7_hal_srng_update_shadow_config,
+ .srng_get_ring_id = ath12k_wifi7_hal_srng_get_ring_id,
+ .ce_get_desc_size = ath12k_wifi7_hal_ce_get_desc_size,
+ .ce_src_set_desc = ath12k_wifi7_hal_ce_src_set_desc,
+ .ce_dst_set_desc = ath12k_wifi7_hal_ce_dst_set_desc,
+ .ce_dst_status_get_length = ath12k_wifi7_hal_ce_dst_status_get_length,
+ .set_link_desc_addr = ath12k_wifi7_hal_set_link_desc_addr,
+ .tx_set_dscp_tid_map = ath12k_wifi7_hal_tx_set_dscp_tid_map,
+ .tx_configure_bank_register =
+ ath12k_wifi7_hal_tx_configure_bank_register,
+ .reoq_lut_addr_read_enable = ath12k_wifi7_hal_reoq_lut_addr_read_enable,
+ .reoq_lut_set_max_peerid = ath12k_wifi7_hal_reoq_lut_set_max_peerid,
+ .write_reoq_lut_addr = ath12k_wifi7_hal_write_reoq_lut_addr,
+ .write_ml_reoq_lut_addr = ath12k_wifi7_hal_write_ml_reoq_lut_addr,
+ .setup_link_idle_list = ath12k_wifi7_hal_setup_link_idle_list,
+ .reo_init_cmd_ring = ath12k_wifi7_hal_reo_init_cmd_ring_tlv64,
+ .reo_shared_qaddr_cache_clear = ath12k_wifi7_hal_reo_shared_qaddr_cache_clear,
+ .reo_hw_setup = ath12k_wifi7_hal_reo_hw_setup,
+ .rx_buf_addr_info_set = ath12k_wifi7_hal_rx_buf_addr_info_set,
+ .rx_buf_addr_info_get = ath12k_wifi7_hal_rx_buf_addr_info_get,
+ .cc_config = ath12k_wifi7_hal_cc_config,
+ .get_idle_link_rbm = ath12k_wifi7_hal_get_idle_link_rbm,
+ .rx_msdu_list_get = ath12k_wifi7_hal_rx_msdu_list_get,
+ .rx_reo_ent_buf_paddr_get = ath12k_wifi7_hal_rx_reo_ent_buf_paddr_get,
+ .reo_cmd_enc_tlv_hdr = ath12k_hal_encode_tlv64_hdr,
+ .reo_status_dec_tlv_hdr = ath12k_hal_decode_tlv64_hdr,
+};
diff --git a/drivers/net/wireless/ath/ath12k/wifi7/hal_wcn7850.h b/drivers/net/wireless/ath/ath12k/wifi7/hal_wcn7850.h
new file mode 100644
index 000000000000..a56ca9fd3de4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wifi7/hal_wcn7850.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef ATH12K_HAL_WCN7850_H
+#define ATH12K_HAL_WCN7850_H
+
+#include "../hal.h"
+#include "hal_rx.h"
+#include "hal.h"
+
+extern const struct hal_ops hal_wcn7850_ops;
+extern const struct ath12k_hw_regs wcn7850_regs;
+extern const struct ath12k_hal_tcl_to_wbm_rbm_map
+ath12k_hal_tcl_to_wbm_rbm_map_wcn7850[DP_TCL_NUM_RING_MAX];
+extern const struct ath12k_hw_hal_params ath12k_hw_hal_params_wcn7850;
+
+u8 ath12k_hal_rx_desc_get_l3_pad_bytes_wcn7850(struct hal_rx_desc *desc);
+void ath12k_hal_rx_desc_copy_end_tlv_wcn7850(struct hal_rx_desc *fdesc,
+ struct hal_rx_desc *ldesc);
+u32 ath12k_hal_rx_desc_get_mpdu_start_tag_wcn7850(struct hal_rx_desc *desc);
+u32 ath12k_hal_rx_desc_get_mpdu_ppdu_id_wcn7850(struct hal_rx_desc *desc);
+void ath12k_hal_rx_desc_set_msdu_len_wcn7850(struct hal_rx_desc *desc, u16 len);
+u8 *ath12k_hal_rx_desc_get_msdu_payload_wcn7850(struct hal_rx_desc *desc);
+u32 ath12k_hal_rx_desc_get_mpdu_start_offset_wcn7850(void);
+u32 ath12k_hal_rx_desc_get_msdu_end_offset_wcn7850(void);
+u32 ath12k_hal_get_rx_desc_size_wcn7850(void);
+u8 ath12k_hal_rx_desc_get_msdu_src_link_wcn7850(struct hal_rx_desc *desc);
+void ath12k_hal_rx_desc_get_crypto_hdr_wcn7850(struct hal_rx_desc *desc,
+ u8 *crypto_hdr,
+ enum hal_encrypt_type enctype);
+void ath12k_hal_rx_desc_get_dot11_hdr_wcn7850(struct hal_rx_desc *desc,
+ struct ieee80211_hdr *hdr);
+void ath12k_hal_extract_rx_desc_data_wcn7850(struct hal_rx_desc_data *rx_desc_data,
+ struct hal_rx_desc *rx_desc,
+ struct hal_rx_desc *ldesc);
+int ath12k_hal_srng_create_config_wcn7850(struct ath12k_hal *hal);
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/wifi7/hw.c b/drivers/net/wireless/ath/ath12k/wifi7/hw.c
new file mode 100644
index 000000000000..df045ddf42da
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wifi7/hw.c
@@ -0,0 +1,1049 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+
+#include "../debug.h"
+#include "../core.h"
+#include "../ce.h"
+#include "ce.h"
+#include "../hw.h"
+#include "hw.h"
+#include "../mhi.h"
+#include "mhi.h"
+#include "dp_rx.h"
+#include "../peer.h"
+#include "wmi.h"
+#include "../wow.h"
+#include "../debugfs.h"
+#include "../debugfs_sta.h"
+#include "../testmode.h"
+#include "hal.h"
+#include "dp_tx.h"
+
+static const guid_t wcn7850_uuid = GUID_INIT(0xf634f534, 0x6147, 0x11ec,
+ 0x90, 0xd6, 0x02, 0x42,
+ 0xac, 0x12, 0x00, 0x03);
+
+static u8 ath12k_wifi7_hw_qcn9274_mac_from_pdev_id(int pdev_idx)
+{
+ return pdev_idx;
+}
+
+static int
+ath12k_wifi7_hw_mac_id_to_pdev_id_qcn9274(const struct ath12k_hw_params *hw,
+ int mac_id)
+{
+ return mac_id;
+}
+
+static int
+ath12k_wifi7_hw_mac_id_to_srng_id_qcn9274(const struct ath12k_hw_params *hw,
+ int mac_id)
+{
+ return 0;
+}
+
+static u8 ath12k_wifi7_hw_get_ring_selector_qcn9274(struct sk_buff *skb)
+{
+ return smp_processor_id();
+}
+
+static bool ath12k_wifi7_dp_srng_is_comp_ring_qcn9274(int ring_num)
+{
+ if (ring_num < 3 || ring_num == 4)
+ return true;
+
+ return false;
+}
+
+static bool
+ath12k_wifi7_is_frame_link_agnostic_qcn9274(struct ath12k_link_vif *arvif,
+ struct ieee80211_mgmt *mgmt)
+{
+ return ieee80211_is_action(mgmt->frame_control);
+}
+
+static int
+ath12k_wifi7_hw_mac_id_to_pdev_id_wcn7850(const struct ath12k_hw_params *hw,
+ int mac_id)
+{
+ return 0;
+}
+
+static int
+ath12k_wifi7_hw_mac_id_to_srng_id_wcn7850(const struct ath12k_hw_params *hw,
+ int mac_id)
+{
+ return mac_id;
+}
+
+static u8 ath12k_wifi7_hw_get_ring_selector_wcn7850(struct sk_buff *skb)
+{
+ return skb_get_queue_mapping(skb);
+}
+
+static bool ath12k_wifi7_dp_srng_is_comp_ring_wcn7850(int ring_num)
+{
+ if (ring_num == 0 || ring_num == 2 || ring_num == 4)
+ return true;
+
+ return false;
+}
+
+static bool ath12k_is_addba_resp_action_code(struct ieee80211_mgmt *mgmt)
+{
+ if (!ieee80211_is_action(mgmt->frame_control))
+ return false;
+
+ if (mgmt->u.action.category != WLAN_CATEGORY_BACK)
+ return false;
+
+ if (mgmt->u.action.u.addba_resp.action_code != WLAN_ACTION_ADDBA_RESP)
+ return false;
+
+ return true;
+}
+
+static bool
+ath12k_wifi7_is_frame_link_agnostic_wcn7850(struct ath12k_link_vif *arvif,
+ struct ieee80211_mgmt *mgmt)
+{
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+ struct ath12k_hw *ah = ath12k_ar_to_ah(arvif->ar);
+ struct ath12k_base *ab = arvif->ar->ab;
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
+ struct ath12k_dp_peer *peer;
+ __le16 fc = mgmt->frame_control;
+
+ spin_lock_bh(&dp->dp_lock);
+ if (!ath12k_dp_link_peer_find_by_addr(dp, mgmt->da)) {
+ spin_lock_bh(&ah->dp_hw.peer_lock);
+ peer = ath12k_dp_peer_find_by_addr(&ah->dp_hw, mgmt->da);
+ if (!peer || (peer && !peer->is_mlo)) {
+ spin_unlock_bh(&ah->dp_hw.peer_lock);
+ spin_unlock_bh(&dp->dp_lock);
+ return false;
+ }
+ spin_unlock_bh(&ah->dp_hw.peer_lock);
+ }
+ spin_unlock_bh(&dp->dp_lock);
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ return arvif->is_up &&
+ (vif->valid_links == vif->active_links) &&
+ !ieee80211_is_probe_req(fc) &&
+ !ieee80211_is_auth(fc) &&
+ !ieee80211_is_deauth(fc) &&
+ !ath12k_is_addba_resp_action_code(mgmt);
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ return !(ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc) ||
+ ieee80211_is_assoc_resp(fc) || ieee80211_is_reassoc_resp(fc) ||
+ ath12k_is_addba_resp_action_code(mgmt));
+
+ return false;
+}
+
+static const struct ath12k_hw_ops qcn9274_ops = {
+ .get_hw_mac_from_pdev_id = ath12k_wifi7_hw_qcn9274_mac_from_pdev_id,
+ .mac_id_to_pdev_id = ath12k_wifi7_hw_mac_id_to_pdev_id_qcn9274,
+ .mac_id_to_srng_id = ath12k_wifi7_hw_mac_id_to_srng_id_qcn9274,
+ .rxdma_ring_sel_config = ath12k_dp_rxdma_ring_sel_config_qcn9274,
+ .get_ring_selector = ath12k_wifi7_hw_get_ring_selector_qcn9274,
+ .dp_srng_is_tx_comp_ring = ath12k_wifi7_dp_srng_is_comp_ring_qcn9274,
+ .is_frame_link_agnostic = ath12k_wifi7_is_frame_link_agnostic_qcn9274,
+};
+
+static const struct ath12k_hw_ops wcn7850_ops = {
+ .get_hw_mac_from_pdev_id = ath12k_wifi7_hw_qcn9274_mac_from_pdev_id,
+ .mac_id_to_pdev_id = ath12k_wifi7_hw_mac_id_to_pdev_id_wcn7850,
+ .mac_id_to_srng_id = ath12k_wifi7_hw_mac_id_to_srng_id_wcn7850,
+ .rxdma_ring_sel_config = ath12k_dp_rxdma_ring_sel_config_wcn7850,
+ .get_ring_selector = ath12k_wifi7_hw_get_ring_selector_wcn7850,
+ .dp_srng_is_tx_comp_ring = ath12k_wifi7_dp_srng_is_comp_ring_wcn7850,
+ .is_frame_link_agnostic = ath12k_wifi7_is_frame_link_agnostic_wcn7850,
+};
+
+static const struct ath12k_hw_ops qcc2072_ops = {
+ .get_hw_mac_from_pdev_id = ath12k_wifi7_hw_qcn9274_mac_from_pdev_id,
+ .mac_id_to_pdev_id = ath12k_wifi7_hw_mac_id_to_pdev_id_wcn7850,
+ .mac_id_to_srng_id = ath12k_wifi7_hw_mac_id_to_srng_id_wcn7850,
+ .rxdma_ring_sel_config = ath12k_dp_rxdma_ring_sel_config_qcc2072,
+ .get_ring_selector = ath12k_wifi7_hw_get_ring_selector_wcn7850,
+ .dp_srng_is_tx_comp_ring = ath12k_wifi7_dp_srng_is_comp_ring_wcn7850,
+ .is_frame_link_agnostic = ath12k_wifi7_is_frame_link_agnostic_wcn7850,
+};
+
+#define ATH12K_TX_RING_MASK_0 0x1
+#define ATH12K_TX_RING_MASK_1 0x2
+#define ATH12K_TX_RING_MASK_2 0x4
+#define ATH12K_TX_RING_MASK_3 0x8
+#define ATH12K_TX_RING_MASK_4 0x10
+
+#define ATH12K_RX_RING_MASK_0 0x1
+#define ATH12K_RX_RING_MASK_1 0x2
+#define ATH12K_RX_RING_MASK_2 0x4
+#define ATH12K_RX_RING_MASK_3 0x8
+
+#define ATH12K_RX_ERR_RING_MASK_0 0x1
+
+#define ATH12K_RX_WBM_REL_RING_MASK_0 0x1
+
+#define ATH12K_REO_STATUS_RING_MASK_0 0x1
+
+#define ATH12K_HOST2RXDMA_RING_MASK_0 0x1
+
+#define ATH12K_RX_MON_RING_MASK_0 0x1
+#define ATH12K_RX_MON_RING_MASK_1 0x2
+#define ATH12K_RX_MON_RING_MASK_2 0x4
+
+#define ATH12K_TX_MON_RING_MASK_0 0x1
+#define ATH12K_TX_MON_RING_MASK_1 0x2
+
+#define ATH12K_RX_MON_STATUS_RING_MASK_0 0x1
+#define ATH12K_RX_MON_STATUS_RING_MASK_1 0x2
+#define ATH12K_RX_MON_STATUS_RING_MASK_2 0x4
+
+static const struct ath12k_hw_ring_mask ath12k_wifi7_hw_ring_mask_qcn9274 = {
+ .tx = {
+ ATH12K_TX_RING_MASK_0,
+ ATH12K_TX_RING_MASK_1,
+ ATH12K_TX_RING_MASK_2,
+ ATH12K_TX_RING_MASK_3,
+ },
+ .rx_mon_dest = {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ ATH12K_RX_MON_RING_MASK_0,
+ ATH12K_RX_MON_RING_MASK_1,
+ ATH12K_RX_MON_RING_MASK_2,
+ },
+ .rx = {
+ 0, 0, 0, 0,
+ ATH12K_RX_RING_MASK_0,
+ ATH12K_RX_RING_MASK_1,
+ ATH12K_RX_RING_MASK_2,
+ ATH12K_RX_RING_MASK_3,
+ },
+ .rx_err = {
+ 0, 0, 0,
+ ATH12K_RX_ERR_RING_MASK_0,
+ },
+ .rx_wbm_rel = {
+ 0, 0, 0,
+ ATH12K_RX_WBM_REL_RING_MASK_0,
+ },
+ .reo_status = {
+ 0, 0, 0,
+ ATH12K_REO_STATUS_RING_MASK_0,
+ },
+ .host2rxdma = {
+ 0, 0, 0,
+ ATH12K_HOST2RXDMA_RING_MASK_0,
+ },
+ .tx_mon_dest = {
+ 0, 0, 0,
+ },
+};
+
+static const struct ath12k_hw_ring_mask ath12k_wifi7_hw_ring_mask_ipq5332 = {
+ .tx = {
+ ATH12K_TX_RING_MASK_0,
+ ATH12K_TX_RING_MASK_1,
+ ATH12K_TX_RING_MASK_2,
+ ATH12K_TX_RING_MASK_3,
+ },
+ .rx_mon_dest = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ ATH12K_RX_MON_RING_MASK_0,
+ },
+ .rx = {
+ 0, 0, 0, 0,
+ ATH12K_RX_RING_MASK_0,
+ ATH12K_RX_RING_MASK_1,
+ ATH12K_RX_RING_MASK_2,
+ ATH12K_RX_RING_MASK_3,
+ },
+ .rx_err = {
+ 0, 0, 0,
+ ATH12K_RX_ERR_RING_MASK_0,
+ },
+ .rx_wbm_rel = {
+ 0, 0, 0,
+ ATH12K_RX_WBM_REL_RING_MASK_0,
+ },
+ .reo_status = {
+ 0, 0, 0,
+ ATH12K_REO_STATUS_RING_MASK_0,
+ },
+ .host2rxdma = {
+ 0, 0, 0,
+ ATH12K_HOST2RXDMA_RING_MASK_0,
+ },
+ .tx_mon_dest = {
+ ATH12K_TX_MON_RING_MASK_0,
+ ATH12K_TX_MON_RING_MASK_1,
+ },
+};
+
+static const struct ath12k_hw_ring_mask ath12k_wifi7_hw_ring_mask_wcn7850 = {
+ .tx = {
+ ATH12K_TX_RING_MASK_0,
+ ATH12K_TX_RING_MASK_1,
+ ATH12K_TX_RING_MASK_2,
+ },
+ .rx_mon_dest = {
+ },
+ .rx_mon_status = {
+ 0, 0, 0, 0,
+ ATH12K_RX_MON_STATUS_RING_MASK_0,
+ ATH12K_RX_MON_STATUS_RING_MASK_1,
+ ATH12K_RX_MON_STATUS_RING_MASK_2,
+ },
+ .rx = {
+ 0, 0, 0,
+ ATH12K_RX_RING_MASK_0,
+ ATH12K_RX_RING_MASK_1,
+ ATH12K_RX_RING_MASK_2,
+ ATH12K_RX_RING_MASK_3,
+ },
+ .rx_err = {
+ ATH12K_RX_ERR_RING_MASK_0,
+ },
+ .rx_wbm_rel = {
+ ATH12K_RX_WBM_REL_RING_MASK_0,
+ },
+ .reo_status = {
+ ATH12K_REO_STATUS_RING_MASK_0,
+ },
+ .host2rxdma = {
+ },
+ .tx_mon_dest = {
+ },
+};
+
+static const struct ce_ie_addr ath12k_wifi7_ce_ie_addr_ipq5332 = {
+ .ie1_reg_addr = CE_HOST_IE_ADDRESS - HAL_IPQ5332_CE_WFSS_REG_BASE,
+ .ie2_reg_addr = CE_HOST_IE_2_ADDRESS - HAL_IPQ5332_CE_WFSS_REG_BASE,
+ .ie3_reg_addr = CE_HOST_IE_3_ADDRESS - HAL_IPQ5332_CE_WFSS_REG_BASE,
+};
+
+static const struct ce_remap ath12k_wifi7_ce_remap_ipq5332 = {
+ .base = HAL_IPQ5332_CE_WFSS_REG_BASE,
+ .size = HAL_IPQ5332_CE_SIZE,
+ .cmem_offset = HAL_SEQ_WCSS_CMEM_OFFSET,
+};
+
+static const struct ath12k_hw_params ath12k_wifi7_hw_params[] = {
+ {
+ .name = "qcn9274 hw1.0",
+ .hw_rev = ATH12K_HW_QCN9274_HW10,
+ .fw = {
+ .dir = "QCN9274/hw1.0",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ .m3_loader = ath12k_m3_fw_loader_driver,
+ .download_aux_ucode = false,
+ },
+ .max_radios = 1,
+ .single_pdev_only = false,
+ .qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9274,
+ .internal_sleep_clock = false,
+
+ .hw_ops = &qcn9274_ops,
+ .ring_mask = &ath12k_wifi7_hw_ring_mask_qcn9274,
+
+ .host_ce_config = ath12k_wifi7_host_ce_config_qcn9274,
+ .ce_count = 16,
+ .target_ce_config = ath12k_wifi7_target_ce_config_wlan_qcn9274,
+ .target_ce_count = 12,
+ .svc_to_ce_map =
+ ath12k_wifi7_target_service_to_ce_map_wlan_qcn9274,
+ .svc_to_ce_map_len = 18,
+
+ .rxdma1_enable = false,
+ .num_rxdma_per_pdev = 1,
+ .num_rxdma_dst_ring = 0,
+ .rx_mac_buf_ring = false,
+ .vdev_start_delay = false,
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+ BIT(NL80211_IFTYPE_AP_VLAN),
+ .supports_monitor = false,
+
+ .idle_ps = false,
+ .download_calib = true,
+ .supports_suspend = false,
+ .tcl_ring_retry = true,
+ .reoq_lut_support = true,
+ .supports_shadow_regs = false,
+
+ .num_tcl_banks = 48,
+ .max_tx_ring = 4,
+
+ .mhi_config = &ath12k_wifi7_mhi_config_qcn9274,
+
+ .wmi_init = ath12k_wifi7_wmi_init_qcn9274,
+
+ .qmi_cnss_feature_bitmap = BIT(CNSS_QDSS_CFG_MISS_V01),
+
+ .rfkill_pin = 0,
+ .rfkill_cfg = 0,
+ .rfkill_on_level = 0,
+
+ .rddm_size = 0x600000,
+
+ .def_num_link = 0,
+ .max_mlo_peer = 256,
+
+ .otp_board_id_register = QCN9274_QFPROM_RAW_RFA_PDET_ROW13_LSB,
+
+ .supports_sta_ps = false,
+
+ .acpi_guid = NULL,
+ .supports_dynamic_smps_6ghz = true,
+
+ .iova_mask = 0,
+
+ .supports_aspm = false,
+
+ .ce_ie_addr = NULL,
+ .ce_remap = NULL,
+ .bdf_addr_offset = 0,
+
+ .current_cc_support = false,
+
+ .dp_primary_link_only = true,
+ },
+ {
+ .name = "wcn7850 hw2.0",
+ .hw_rev = ATH12K_HW_WCN7850_HW20,
+
+ .fw = {
+ .dir = "WCN7850/hw2.0",
+ .board_size = 256 * 1024,
+ .cal_offset = 256 * 1024,
+ .m3_loader = ath12k_m3_fw_loader_driver,
+ .download_aux_ucode = false,
+ },
+
+ .max_radios = 1,
+ .single_pdev_only = true,
+ .qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_WCN7850,
+ .internal_sleep_clock = true,
+
+ .hw_ops = &wcn7850_ops,
+ .ring_mask = &ath12k_wifi7_hw_ring_mask_wcn7850,
+
+ .host_ce_config = ath12k_wifi7_host_ce_config_wcn7850,
+ .ce_count = 9,
+ .target_ce_config = ath12k_wifi7_target_ce_config_wlan_wcn7850,
+ .target_ce_count = 9,
+ .svc_to_ce_map =
+ ath12k_wifi7_target_service_to_ce_map_wlan_wcn7850,
+ .svc_to_ce_map_len = 14,
+
+ .rxdma1_enable = false,
+ .num_rxdma_per_pdev = 2,
+ .num_rxdma_dst_ring = 1,
+ .rx_mac_buf_ring = true,
+ .vdev_start_delay = true,
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO),
+ .supports_monitor = true,
+
+ .idle_ps = true,
+ .download_calib = false,
+ .supports_suspend = true,
+ .tcl_ring_retry = false,
+ .reoq_lut_support = false,
+ .supports_shadow_regs = true,
+
+ .num_tcl_banks = 7,
+ .max_tx_ring = 3,
+
+ .mhi_config = &ath12k_wifi7_mhi_config_wcn7850,
+
+ .wmi_init = ath12k_wifi7_wmi_init_wcn7850,
+
+ .qmi_cnss_feature_bitmap = BIT(CNSS_QDSS_CFG_MISS_V01) |
+ BIT(CNSS_PCIE_PERST_NO_PULL_V01),
+
+ .rfkill_pin = 48,
+ .rfkill_cfg = 0,
+ .rfkill_on_level = 1,
+
+ .rddm_size = 0x780000,
+
+ .def_num_link = 2,
+ .max_mlo_peer = 32,
+
+ .otp_board_id_register = 0,
+
+ .supports_sta_ps = true,
+
+ .acpi_guid = &wcn7850_uuid,
+ .supports_dynamic_smps_6ghz = false,
+
+ .iova_mask = ATH12K_PCIE_MAX_PAYLOAD_SIZE - 1,
+
+ .supports_aspm = true,
+
+ .ce_ie_addr = NULL,
+ .ce_remap = NULL,
+ .bdf_addr_offset = 0,
+
+ .current_cc_support = true,
+
+ .dp_primary_link_only = false,
+ },
+ {
+ .name = "qcn9274 hw2.0",
+ .hw_rev = ATH12K_HW_QCN9274_HW20,
+ .fw = {
+ .dir = "QCN9274/hw2.0",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ .m3_loader = ath12k_m3_fw_loader_driver,
+ .download_aux_ucode = false,
+ },
+ .max_radios = 2,
+ .single_pdev_only = false,
+ .qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9274,
+ .internal_sleep_clock = false,
+
+ .hw_ops = &qcn9274_ops,
+ .ring_mask = &ath12k_wifi7_hw_ring_mask_qcn9274,
+
+ .host_ce_config = ath12k_wifi7_host_ce_config_qcn9274,
+ .ce_count = 16,
+ .target_ce_config = ath12k_wifi7_target_ce_config_wlan_qcn9274,
+ .target_ce_count = 12,
+ .svc_to_ce_map =
+ ath12k_wifi7_target_service_to_ce_map_wlan_qcn9274,
+ .svc_to_ce_map_len = 18,
+
+ .rxdma1_enable = true,
+ .num_rxdma_per_pdev = 1,
+ .num_rxdma_dst_ring = 0,
+ .rx_mac_buf_ring = false,
+ .vdev_start_delay = false,
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+ BIT(NL80211_IFTYPE_AP_VLAN),
+ .supports_monitor = true,
+
+ .idle_ps = false,
+ .download_calib = true,
+ .supports_suspend = false,
+ .tcl_ring_retry = true,
+ .reoq_lut_support = true,
+ .supports_shadow_regs = false,
+
+ .num_tcl_banks = 48,
+ .max_tx_ring = 4,
+
+ .mhi_config = &ath12k_wifi7_mhi_config_qcn9274,
+
+ .wmi_init = ath12k_wifi7_wmi_init_qcn9274,
+
+ .qmi_cnss_feature_bitmap = BIT(CNSS_QDSS_CFG_MISS_V01),
+
+ .rfkill_pin = 0,
+ .rfkill_cfg = 0,
+ .rfkill_on_level = 0,
+
+ .rddm_size = 0x600000,
+
+ .def_num_link = 0,
+ .max_mlo_peer = 256,
+
+ .otp_board_id_register = QCN9274_QFPROM_RAW_RFA_PDET_ROW13_LSB,
+
+ .supports_sta_ps = false,
+
+ .acpi_guid = NULL,
+ .supports_dynamic_smps_6ghz = true,
+
+ .iova_mask = 0,
+
+ .supports_aspm = false,
+
+ .ce_ie_addr = NULL,
+ .ce_remap = NULL,
+ .bdf_addr_offset = 0,
+
+ .current_cc_support = false,
+
+ .dp_primary_link_only = true,
+ },
+ {
+ .name = "ipq5332 hw1.0",
+ .hw_rev = ATH12K_HW_IPQ5332_HW10,
+ .fw = {
+ .dir = "IPQ5332/hw1.0",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ .m3_loader = ath12k_m3_fw_loader_remoteproc,
+ .download_aux_ucode = false,
+ },
+ .max_radios = 1,
+ .single_pdev_only = false,
+ .qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ5332,
+ .internal_sleep_clock = false,
+
+ .hw_ops = &qcn9274_ops,
+ .ring_mask = &ath12k_wifi7_hw_ring_mask_ipq5332,
+
+ .host_ce_config = ath12k_wifi7_host_ce_config_ipq5332,
+ .ce_count = 12,
+ .target_ce_config = ath12k_wifi7_target_ce_config_wlan_ipq5332,
+ .target_ce_count = 12,
+ .svc_to_ce_map =
+ ath12k_wifi7_target_service_to_ce_map_wlan_ipq5332,
+ .svc_to_ce_map_len = 18,
+
+ .rxdma1_enable = false,
+ .num_rxdma_per_pdev = 1,
+ .num_rxdma_dst_ring = 0,
+ .rx_mac_buf_ring = false,
+ .vdev_start_delay = false,
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_MESH_POINT),
+ .supports_monitor = false,
+
+ .idle_ps = false,
+ .download_calib = true,
+ .supports_suspend = false,
+ .tcl_ring_retry = true,
+ .reoq_lut_support = false,
+ .supports_shadow_regs = false,
+
+ .num_tcl_banks = 48,
+ .max_tx_ring = 4,
+
+ .wmi_init = &ath12k_wifi7_wmi_init_qcn9274,
+
+ .qmi_cnss_feature_bitmap = BIT(CNSS_QDSS_CFG_MISS_V01),
+
+ .rfkill_pin = 0,
+ .rfkill_cfg = 0,
+ .rfkill_on_level = 0,
+
+ .rddm_size = 0,
+
+ .def_num_link = 0,
+ .max_mlo_peer = 256,
+
+ .otp_board_id_register = 0,
+
+ .supports_sta_ps = false,
+
+ .acpi_guid = NULL,
+ .supports_dynamic_smps_6ghz = false,
+ .iova_mask = 0,
+ .supports_aspm = false,
+
+ .ce_ie_addr = &ath12k_wifi7_ce_ie_addr_ipq5332,
+ .ce_remap = &ath12k_wifi7_ce_remap_ipq5332,
+ .bdf_addr_offset = 0xC00000,
+
+ .dp_primary_link_only = true,
+ },
+ {
+ .name = "qcc2072 hw1.0",
+ .hw_rev = ATH12K_HW_QCC2072_HW10,
+
+ .fw = {
+ .dir = "QCC2072/hw1.0",
+ .board_size = 256 * 1024,
+ .cal_offset = 256 * 1024,
+ .m3_loader = ath12k_m3_fw_loader_driver,
+ .download_aux_ucode = true,
+ },
+
+ .max_radios = 1,
+ .single_pdev_only = true,
+ .qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_WCN7850,
+ .internal_sleep_clock = true,
+
+ .hw_ops = &qcc2072_ops,
+ .ring_mask = &ath12k_wifi7_hw_ring_mask_wcn7850,
+
+ .host_ce_config = ath12k_wifi7_host_ce_config_wcn7850,
+ .ce_count = 9,
+ .target_ce_config = ath12k_wifi7_target_ce_config_wlan_wcn7850,
+ .target_ce_count = 9,
+ .svc_to_ce_map =
+ ath12k_wifi7_target_service_to_ce_map_wlan_wcn7850,
+ .svc_to_ce_map_len = 14,
+
+ .rxdma1_enable = false,
+ .num_rxdma_per_pdev = 2,
+ .num_rxdma_dst_ring = 1,
+ .rx_mac_buf_ring = true,
+ .vdev_start_delay = true,
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO),
+ .supports_monitor = true,
+
+ .idle_ps = true,
+ .download_calib = false,
+ .supports_suspend = true,
+ .tcl_ring_retry = false,
+ .reoq_lut_support = false,
+ .supports_shadow_regs = true,
+
+ .num_tcl_banks = 7,
+ .max_tx_ring = 3,
+
+ .mhi_config = &ath12k_wifi7_mhi_config_wcn7850,
+
+ .wmi_init = ath12k_wifi7_wmi_init_wcn7850,
+
+ .qmi_cnss_feature_bitmap = BIT(CNSS_QDSS_CFG_MISS_V01) |
+ BIT(CNSS_PCIE_PERST_NO_PULL_V01) |
+ BIT(CNSS_AUX_UC_SUPPORT_V01),
+
+ .rfkill_pin = 0,
+ .rfkill_cfg = 0,
+ .rfkill_on_level = 0,
+
+ .rddm_size = 0x780000,
+
+ .def_num_link = 2,
+ .max_mlo_peer = 32,
+
+ .otp_board_id_register = 0,
+
+ .supports_sta_ps = true,
+
+ .acpi_guid = &wcn7850_uuid,
+ .supports_dynamic_smps_6ghz = false,
+
+ .iova_mask = 0,
+
+ .supports_aspm = true,
+
+ .ce_ie_addr = NULL,
+ .ce_remap = NULL,
+ .bdf_addr_offset = 0,
+
+ .current_cc_support = true,
+
+ .dp_primary_link_only = false,
+ },
+};
+
+/* Note: called under rcu_read_lock() */
+static void ath12k_wifi7_mac_op_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ath12k_link_vif *arvif = &ahvif->deflink;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ struct ieee80211_sta *sta = control->sta;
+ struct ath12k_link_vif *tmp_arvif;
+ u32 info_flags = info->flags;
+ struct sk_buff *msdu_copied;
+ struct ath12k *ar, *tmp_ar;
+ struct ath12k_pdev_dp *dp_pdev, *tmp_dp_pdev;
+ struct ath12k_dp_link_peer *peer;
+ unsigned long links_map;
+ bool is_mcast = false;
+ bool is_dvlan = false;
+ struct ethhdr *eth;
+ bool is_prb_rsp;
+ u16 mcbc_gsn;
+ u8 link_id;
+ int ret;
+ struct ath12k_dp *tmp_dp;
+
+ if (ahvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ ieee80211_free_txskb(hw, skb);
+ return;
+ }
+
+ link_id = u32_get_bits(info->control.flags, IEEE80211_TX_CTRL_MLO_LINK);
+ memset(skb_cb, 0, sizeof(*skb_cb));
+ skb_cb->vif = vif;
+
+ if (key) {
+ skb_cb->cipher = key->cipher;
+ skb_cb->flags |= ATH12K_SKB_CIPHER_SET;
+ }
+
+ /* handle only for MLO case, use deflink for non MLO case */
+ if (ieee80211_vif_is_mld(vif)) {
+ link_id = ath12k_mac_get_tx_link(sta, vif, link_id, skb, info_flags);
+ if (link_id >= IEEE80211_MLD_MAX_NUM_LINKS) {
+ ieee80211_free_txskb(hw, skb);
+ return;
+ }
+ } else {
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ link_id = ATH12K_FIRST_SCAN_LINK;
+ else
+ link_id = 0;
+ }
+
+ arvif = rcu_dereference(ahvif->link[link_id]);
+ if (!arvif || !arvif->ar) {
+ ath12k_warn(ahvif->ah, "failed to find arvif link id %u for frame transmission",
+ link_id);
+ ieee80211_free_txskb(hw, skb);
+ return;
+ }
+
+ ar = arvif->ar;
+ skb_cb->link_id = link_id;
+ /*
+ * as skb_cb is common currently for dp and mgmt tx processing
+ * set this in the common mac op tx function.
+ */
+ skb_cb->ar = ar;
+ is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
+
+ if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
+ eth = (struct ethhdr *)skb->data;
+ is_mcast = is_multicast_ether_addr(eth->h_dest);
+
+ skb_cb->flags |= ATH12K_SKB_HW_80211_ENCAP;
+ } else if (ieee80211_is_mgmt(hdr->frame_control)) {
+ if (sta && sta->mlo)
+ skb_cb->flags |= ATH12K_SKB_MLO_STA;
+
+ ret = ath12k_mac_mgmt_tx(ar, skb, is_prb_rsp);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to queue management frame %d\n",
+ ret);
+ ieee80211_free_txskb(hw, skb);
+ }
+ return;
+ }
+
+ if (!(info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP))
+ is_mcast = is_multicast_ether_addr(hdr->addr1);
+
+ /* This is case only for P2P_GO */
+ if (vif->type == NL80211_IFTYPE_AP && vif->p2p)
+ ath12k_mac_add_p2p_noa_ie(ar, vif, skb, is_prb_rsp);
+
+ dp_pdev = ath12k_dp_to_pdev_dp(ar->ab->dp, ar->pdev_idx);
+ if (!dp_pdev) {
+ ieee80211_free_txskb(hw, skb);
+ return;
+ }
+
+ /* Checking if it is a DVLAN frame */
+ if (!test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags) &&
+ !(skb_cb->flags & ATH12K_SKB_HW_80211_ENCAP) &&
+ !(skb_cb->flags & ATH12K_SKB_CIPHER_SET) &&
+ ieee80211_has_protected(hdr->frame_control))
+ is_dvlan = true;
+
+ if (!vif->valid_links || !is_mcast || is_dvlan ||
+ (skb_cb->flags & ATH12K_SKB_HW_80211_ENCAP) ||
+ test_bit(ATH12K_FLAG_RAW_MODE, &ar->ab->dev_flags)) {
+ ret = ath12k_wifi7_dp_tx(dp_pdev, arvif, skb, false, 0, is_mcast);
+ if (unlikely(ret)) {
+ ath12k_warn(ar->ab, "failed to transmit frame %d\n", ret);
+ ieee80211_free_txskb(ar->ah->hw, skb);
+ return;
+ }
+ } else {
+ mcbc_gsn = atomic_inc_return(&ahvif->dp_vif.mcbc_gsn) & 0xfff;
+
+ links_map = ahvif->links_map;
+ for_each_set_bit(link_id, &links_map,
+ IEEE80211_MLD_MAX_NUM_LINKS) {
+ tmp_arvif = rcu_dereference(ahvif->link[link_id]);
+ if (!tmp_arvif || !tmp_arvif->is_up)
+ continue;
+
+ tmp_ar = tmp_arvif->ar;
+ tmp_dp_pdev = ath12k_dp_to_pdev_dp(tmp_ar->ab->dp,
+ tmp_ar->pdev_idx);
+ if (!tmp_dp_pdev)
+ continue;
+ msdu_copied = skb_copy(skb, GFP_ATOMIC);
+ if (!msdu_copied) {
+ ath12k_err(ar->ab,
+ "skb copy failure link_id 0x%X vdevid 0x%X\n",
+ link_id, tmp_arvif->vdev_id);
+ continue;
+ }
+
+ ath12k_mlo_mcast_update_tx_link_address(vif, link_id,
+ msdu_copied,
+ info_flags);
+
+ skb_cb = ATH12K_SKB_CB(msdu_copied);
+ skb_cb->link_id = link_id;
+ skb_cb->vif = vif;
+ skb_cb->ar = tmp_ar;
+
+ /* For open mode, skip peer find logic */
+ if (unlikely(!ahvif->dp_vif.key_cipher))
+ goto skip_peer_find;
+
+ tmp_dp = ath12k_ab_to_dp(tmp_ar->ab);
+ spin_lock_bh(&tmp_dp->dp_lock);
+ peer = ath12k_dp_link_peer_find_by_addr(tmp_dp,
+ tmp_arvif->bssid);
+ if (!peer || !peer->dp_peer) {
+ spin_unlock_bh(&tmp_dp->dp_lock);
+ ath12k_warn(tmp_ar->ab,
+ "failed to find peer for vdev_id 0x%X addr %pM link_map 0x%X\n",
+ tmp_arvif->vdev_id, tmp_arvif->bssid,
+ ahvif->links_map);
+ dev_kfree_skb_any(msdu_copied);
+ continue;
+ }
+
+ key = peer->dp_peer->keys[peer->dp_peer->mcast_keyidx];
+ if (key) {
+ skb_cb->cipher = key->cipher;
+ skb_cb->flags |= ATH12K_SKB_CIPHER_SET;
+
+ hdr = (struct ieee80211_hdr *)msdu_copied->data;
+ if (!ieee80211_has_protected(hdr->frame_control))
+ hdr->frame_control |=
+ cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+ }
+ spin_unlock_bh(&tmp_dp->dp_lock);
+
+skip_peer_find:
+ ret = ath12k_wifi7_dp_tx(tmp_dp_pdev, tmp_arvif,
+ msdu_copied, true, mcbc_gsn, is_mcast);
+ if (unlikely(ret)) {
+ if (ret == -ENOMEM) {
+ /* Drops are expected during heavy multicast
+ * frame flood. Print with debug log
+ * level to avoid lot of console prints
+ */
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "failed to transmit frame %d\n",
+ ret);
+ } else {
+ ath12k_warn(ar->ab,
+ "failed to transmit frame %d\n",
+ ret);
+ }
+
+ dev_kfree_skb_any(msdu_copied);
+ }
+ }
+ ieee80211_free_txskb(ar->ah->hw, skb);
+ }
+}
+
+static const struct ieee80211_ops ath12k_ops_wifi7 = {
+ .tx = ath12k_wifi7_mac_op_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
+ .start = ath12k_mac_op_start,
+ .stop = ath12k_mac_op_stop,
+ .reconfig_complete = ath12k_mac_op_reconfig_complete,
+ .add_interface = ath12k_mac_op_add_interface,
+ .remove_interface = ath12k_mac_op_remove_interface,
+ .update_vif_offload = ath12k_mac_op_update_vif_offload,
+ .config = ath12k_mac_op_config,
+ .link_info_changed = ath12k_mac_op_link_info_changed,
+ .vif_cfg_changed = ath12k_mac_op_vif_cfg_changed,
+ .change_vif_links = ath12k_mac_op_change_vif_links,
+ .configure_filter = ath12k_mac_op_configure_filter,
+ .hw_scan = ath12k_mac_op_hw_scan,
+ .cancel_hw_scan = ath12k_mac_op_cancel_hw_scan,
+ .set_key = ath12k_mac_op_set_key,
+ .set_rekey_data = ath12k_mac_op_set_rekey_data,
+ .sta_state = ath12k_mac_op_sta_state,
+ .sta_set_txpwr = ath12k_mac_op_sta_set_txpwr,
+ .link_sta_rc_update = ath12k_mac_op_link_sta_rc_update,
+ .conf_tx = ath12k_mac_op_conf_tx,
+ .set_antenna = ath12k_mac_op_set_antenna,
+ .get_antenna = ath12k_mac_op_get_antenna,
+ .ampdu_action = ath12k_mac_op_ampdu_action,
+ .add_chanctx = ath12k_mac_op_add_chanctx,
+ .remove_chanctx = ath12k_mac_op_remove_chanctx,
+ .change_chanctx = ath12k_mac_op_change_chanctx,
+ .assign_vif_chanctx = ath12k_mac_op_assign_vif_chanctx,
+ .unassign_vif_chanctx = ath12k_mac_op_unassign_vif_chanctx,
+ .switch_vif_chanctx = ath12k_mac_op_switch_vif_chanctx,
+ .get_txpower = ath12k_mac_op_get_txpower,
+ .set_rts_threshold = ath12k_mac_op_set_rts_threshold,
+ .set_frag_threshold = ath12k_mac_op_set_frag_threshold,
+ .set_bitrate_mask = ath12k_mac_op_set_bitrate_mask,
+ .get_survey = ath12k_mac_op_get_survey,
+ .flush = ath12k_mac_op_flush,
+ .sta_statistics = ath12k_mac_op_sta_statistics,
+ .link_sta_statistics = ath12k_mac_op_link_sta_statistics,
+ .remain_on_channel = ath12k_mac_op_remain_on_channel,
+ .cancel_remain_on_channel = ath12k_mac_op_cancel_remain_on_channel,
+ .change_sta_links = ath12k_mac_op_change_sta_links,
+ .can_activate_links = ath12k_mac_op_can_activate_links,
+#ifdef CONFIG_PM
+ .suspend = ath12k_wow_op_suspend,
+ .resume = ath12k_wow_op_resume,
+ .set_wakeup = ath12k_wow_op_set_wakeup,
+#endif
+#ifdef CONFIG_ATH12K_DEBUGFS
+ .vif_add_debugfs = ath12k_debugfs_op_vif_add,
+#endif
+ CFG80211_TESTMODE_CMD(ath12k_tm_cmd)
+#ifdef CONFIG_ATH12K_DEBUGFS
+ .link_sta_add_debugfs = ath12k_debugfs_link_sta_op_add,
+#endif
+};
+
+int ath12k_wifi7_hw_init(struct ath12k_base *ab)
+{
+ const struct ath12k_hw_params *hw_params = NULL;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ath12k_wifi7_hw_params); i++) {
+ hw_params = &ath12k_wifi7_hw_params[i];
+
+ if (hw_params->hw_rev == ab->hw_rev)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(ath12k_wifi7_hw_params)) {
+ ath12k_err(ab, "Unsupported Wi-Fi 7 hardware version: 0x%x\n",
+ ab->hw_rev);
+ return -EINVAL;
+ }
+
+ ab->hw_params = hw_params;
+ ab->ath12k_ops = &ath12k_ops_wifi7;
+
+ ath12k_wifi7_hal_init(ab);
+
+ ath12k_info(ab, "Wi-Fi 7 Hardware name: %s\n", ab->hw_params->name);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath12k/wifi7/hw.h b/drivers/net/wireless/ath/ath12k/wifi7/hw.h
new file mode 100644
index 000000000000..643b6fdfdb66
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wifi7/hw.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef ATH12K_WIFI7_HW_H
+#define ATH12K_WIFI7_HW_H
+
+struct ath12k_base;
+int ath12k_wifi7_hw_init(struct ath12k_base *ab);
+
+#endif /* ATH12K_WIFI7_HW_H */
diff --git a/drivers/net/wireless/ath/ath12k/wifi7/mhi.c b/drivers/net/wireless/ath/ath12k/wifi7/mhi.c
new file mode 100644
index 000000000000..b8d972659314
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wifi7/mhi.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include "../mhi.h"
+#include "mhi.h"
+
+static const struct mhi_channel_config ath12k_wifi7_mhi_channels_qcn9274[] = {
+ {
+ .num = 20,
+ .name = "IPCR",
+ .num_elements = 32,
+ .event_ring = 1,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ },
+ {
+ .num = 21,
+ .name = "IPCR",
+ .num_elements = 32,
+ .event_ring = 1,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = true,
+ },
+};
+
+static struct mhi_event_config ath12k_wifi7_mhi_events_qcn9274[] = {
+ {
+ .num_elements = 32,
+ .irq_moderation_ms = 0,
+ .irq = 1,
+ .data_type = MHI_ER_CTRL,
+ .mode = MHI_DB_BRST_DISABLE,
+ .hardware_event = false,
+ .client_managed = false,
+ .offload_channel = false,
+ },
+ {
+ .num_elements = 256,
+ .irq_moderation_ms = 1,
+ .irq = 2,
+ .mode = MHI_DB_BRST_DISABLE,
+ .priority = 1,
+ .hardware_event = false,
+ .client_managed = false,
+ .offload_channel = false,
+ },
+};
+
+const struct mhi_controller_config ath12k_wifi7_mhi_config_qcn9274 = {
+ .max_channels = 30,
+ .timeout_ms = 10000,
+ .use_bounce_buf = false,
+ .buf_len = 0,
+ .num_channels = ARRAY_SIZE(ath12k_wifi7_mhi_channels_qcn9274),
+ .ch_cfg = ath12k_wifi7_mhi_channels_qcn9274,
+ .num_events = ARRAY_SIZE(ath12k_wifi7_mhi_events_qcn9274),
+ .event_cfg = ath12k_wifi7_mhi_events_qcn9274,
+};
+
+static const struct mhi_channel_config ath12k_wifi7_mhi_channels_wcn7850[] = {
+ {
+ .num = 20,
+ .name = "IPCR",
+ .num_elements = 64,
+ .event_ring = 1,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ },
+ {
+ .num = 21,
+ .name = "IPCR",
+ .num_elements = 64,
+ .event_ring = 1,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = true,
+ },
+};
+
+static struct mhi_event_config ath12k_wifi7_mhi_events_wcn7850[] = {
+ {
+ .num_elements = 32,
+ .irq_moderation_ms = 0,
+ .irq = 1,
+ .mode = MHI_DB_BRST_DISABLE,
+ .data_type = MHI_ER_CTRL,
+ .hardware_event = false,
+ .client_managed = false,
+ .offload_channel = false,
+ },
+ {
+ .num_elements = 256,
+ .irq_moderation_ms = 1,
+ .irq = 2,
+ .mode = MHI_DB_BRST_DISABLE,
+ .priority = 1,
+ .hardware_event = false,
+ .client_managed = false,
+ .offload_channel = false,
+ },
+};
+
+const struct mhi_controller_config ath12k_wifi7_mhi_config_wcn7850 = {
+ .max_channels = 128,
+ .timeout_ms = 2000,
+ .use_bounce_buf = false,
+ .buf_len = 8192,
+ .num_channels = ARRAY_SIZE(ath12k_wifi7_mhi_channels_wcn7850),
+ .ch_cfg = ath12k_wifi7_mhi_channels_wcn7850,
+ .num_events = ARRAY_SIZE(ath12k_wifi7_mhi_events_wcn7850),
+ .event_cfg = ath12k_wifi7_mhi_events_wcn7850,
+};
diff --git a/drivers/net/wireless/ath/ath12k/wifi7/mhi.h b/drivers/net/wireless/ath/ath12k/wifi7/mhi.h
new file mode 100644
index 000000000000..2e2dd3503d83
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wifi7/mhi.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef _ATH12K_WIFI7_MHI_H
+#define _ATH12K_WIFI7_MHI_H
+extern const struct mhi_controller_config ath12k_wifi7_mhi_config_qcn9274;
+extern const struct mhi_controller_config ath12k_wifi7_mhi_config_wcn7850;
+#endif /* _ATH12K_WIFI7_MHI_H */
diff --git a/drivers/net/wireless/ath/ath12k/wifi7/pci.c b/drivers/net/wireless/ath/ath12k/wifi7/pci.c
new file mode 100644
index 000000000000..6c96b52dec13
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wifi7/pci.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/pci.h>
+
+#include "../pci.h"
+#include "pci.h"
+#include "../core.h"
+#include "../hif.h"
+#include "../mhi.h"
+#include "hw.h"
+#include "../hal.h"
+#include "dp.h"
+#include "core.h"
+#include "hal.h"
+
+#define QCN9274_DEVICE_ID 0x1109
+#define WCN7850_DEVICE_ID 0x1107
+#define QCC2072_DEVICE_ID 0x1112
+
+#define ATH12K_PCI_W7_SOC_HW_VERSION_1 1
+#define ATH12K_PCI_W7_SOC_HW_VERSION_2 2
+
+#define TCSR_SOC_HW_VERSION 0x1B00000
+#define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(11, 8)
+#define TCSR_SOC_HW_VERSION_MINOR_MASK GENMASK(7, 4)
+
+#define WINDOW_REG_ADDRESS 0x310c
+#define WINDOW_REG_ADDRESS_QCC2072 0x3278
+
+static const struct pci_device_id ath12k_wifi7_pci_id_table[] = {
+ { PCI_VDEVICE(QCOM, QCN9274_DEVICE_ID) },
+ { PCI_VDEVICE(QCOM, WCN7850_DEVICE_ID) },
+ { PCI_VDEVICE(QCOM, QCC2072_DEVICE_ID) },
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, ath12k_wifi7_pci_id_table);
+
+/* TODO: revisit IRQ mapping for new SRNG's */
+static const struct ath12k_msi_config ath12k_wifi7_msi_config[] = {
+ {
+ .total_vectors = 16,
+ .total_users = 3,
+ .users = (struct ath12k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 5, .base_vector = 3 },
+ { .name = "DP", .num_vectors = 8, .base_vector = 8 },
+ },
+ },
+};
+
+static const struct ath12k_pci_ops ath12k_wifi7_pci_ops_qcn9274 = {
+ .wakeup = NULL,
+ .release = NULL,
+};
+
+static int ath12k_wifi7_pci_bus_wake_up(struct ath12k_base *ab)
+{
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+
+ return mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
+}
+
+static void ath12k_wifi7_pci_bus_release(struct ath12k_base *ab)
+{
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+
+ mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
+}
+
+static const struct ath12k_pci_ops ath12k_wifi7_pci_ops_wcn7850 = {
+ .wakeup = ath12k_wifi7_pci_bus_wake_up,
+ .release = ath12k_wifi7_pci_bus_release,
+};
+
+static
+void ath12k_wifi7_pci_read_hw_version(struct ath12k_base *ab,
+ u32 *major, u32 *minor)
+{
+ u32 soc_hw_version;
+
+ soc_hw_version = ath12k_pci_read32(ab, TCSR_SOC_HW_VERSION);
+ *major = u32_get_bits(soc_hw_version, TCSR_SOC_HW_VERSION_MAJOR_MASK);
+ *minor = u32_get_bits(soc_hw_version, TCSR_SOC_HW_VERSION_MINOR_MASK);
+}
+
+static int ath12k_wifi7_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_dev)
+{
+ u32 soc_hw_version_major, soc_hw_version_minor;
+ struct ath12k_pci *ab_pci;
+ struct ath12k_base *ab;
+ int ret;
+
+ ab = pci_get_drvdata(pdev);
+ if (!ab)
+ return -EINVAL;
+
+ ab_pci = ath12k_pci_priv(ab);
+ if (!ab_pci)
+ return -EINVAL;
+
+ switch (pci_dev->device) {
+ case QCN9274_DEVICE_ID:
+ ab_pci->msi_config = &ath12k_wifi7_msi_config[0];
+ ab->static_window_map = true;
+ ab_pci->pci_ops = &ath12k_wifi7_pci_ops_qcn9274;
+ /*
+ * init window reg addr before reading hardware version
+ * as it will be used there
+ */
+ ab_pci->window_reg_addr = WINDOW_REG_ADDRESS;
+ ath12k_wifi7_pci_read_hw_version(ab, &soc_hw_version_major,
+ &soc_hw_version_minor);
+ ab->target_mem_mode = ath12k_core_get_memory_mode(ab);
+ switch (soc_hw_version_major) {
+ case ATH12K_PCI_W7_SOC_HW_VERSION_2:
+ ab->hw_rev = ATH12K_HW_QCN9274_HW20;
+ break;
+ case ATH12K_PCI_W7_SOC_HW_VERSION_1:
+ ab->hw_rev = ATH12K_HW_QCN9274_HW10;
+ break;
+ default:
+ dev_err(&pdev->dev,
+ "Unknown hardware version found for QCN9274: 0x%x\n",
+ soc_hw_version_major);
+ return -EOPNOTSUPP;
+ }
+ break;
+ case WCN7850_DEVICE_ID:
+ ab->id.bdf_search = ATH12K_BDF_SEARCH_BUS_AND_BOARD;
+ ab_pci->msi_config = &ath12k_wifi7_msi_config[0];
+ ab->static_window_map = false;
+ ab_pci->pci_ops = &ath12k_wifi7_pci_ops_wcn7850;
+ /*
+ * init window reg addr before reading hardware version
+ * as it will be used there
+ */
+ ab_pci->window_reg_addr = WINDOW_REG_ADDRESS;
+ ath12k_wifi7_pci_read_hw_version(ab, &soc_hw_version_major,
+ &soc_hw_version_minor);
+ ab->target_mem_mode = ATH12K_QMI_MEMORY_MODE_DEFAULT;
+ switch (soc_hw_version_major) {
+ case ATH12K_PCI_W7_SOC_HW_VERSION_2:
+ ab->hw_rev = ATH12K_HW_WCN7850_HW20;
+ break;
+ default:
+ dev_err(&pdev->dev,
+ "Unknown hardware version found for WCN7850: 0x%x\n",
+ soc_hw_version_major);
+ return -EOPNOTSUPP;
+ }
+ break;
+ case QCC2072_DEVICE_ID:
+ ab->id.bdf_search = ATH12K_BDF_SEARCH_BUS_AND_BOARD;
+ ab_pci->msi_config = &ath12k_wifi7_msi_config[0];
+ ab->static_window_map = false;
+ ab_pci->pci_ops = &ath12k_wifi7_pci_ops_wcn7850;
+ ab_pci->window_reg_addr = WINDOW_REG_ADDRESS_QCC2072;
+ ab->target_mem_mode = ATH12K_QMI_MEMORY_MODE_DEFAULT;
+ /* there is only one version till now */
+ ab->hw_rev = ATH12K_HW_QCC2072_HW10;
+ break;
+ default:
+ dev_err(&pdev->dev, "Unknown Wi-Fi 7 PCI device found: 0x%x\n",
+ pci_dev->device);
+ return -EOPNOTSUPP;
+ }
+
+ ret = ath12k_wifi7_hw_init(ab);
+ if (ret) {
+ dev_err(&pdev->dev, "WiFi-7 hw_init for PCI failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct ath12k_pci_reg_base ath12k_wifi7_reg_base = {
+ .umac_base = HAL_SEQ_WCSS_UMAC_OFFSET,
+ .ce_reg_base = HAL_CE_WFSS_CE_REG_BASE,
+};
+
+static struct ath12k_pci_driver ath12k_wifi7_pci_driver = {
+ .name = "ath12k_wifi7_pci",
+ .id_table = ath12k_wifi7_pci_id_table,
+ .ops.probe = ath12k_wifi7_pci_probe,
+ .reg_base = &ath12k_wifi7_reg_base,
+ .ops.arch_init = ath12k_wifi7_arch_init,
+ .ops.arch_deinit = ath12k_wifi7_arch_deinit,
+};
+
+int ath12k_wifi7_pci_init(void)
+{
+ int ret;
+
+ ret = ath12k_pci_register_driver(ATH12K_DEVICE_FAMILY_WIFI7,
+ &ath12k_wifi7_pci_driver);
+ if (ret) {
+ pr_err("Failed to register ath12k Wi-Fi 7 driver: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void ath12k_wifi7_pci_exit(void)
+{
+ ath12k_pci_unregister_driver(ATH12K_DEVICE_FAMILY_WIFI7);
+}
diff --git a/drivers/net/wireless/ath/ath12k/wifi7/pci.h b/drivers/net/wireless/ath/ath12k/wifi7/pci.h
new file mode 100644
index 000000000000..662a8bab0ce7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wifi7/pci.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+#ifndef ATH12K_PCI_WIFI7_H
+#define ATH12K_PCI_WIFI7_H
+
+int ath12k_wifi7_pci_init(void);
+void ath12k_wifi7_pci_exit(void);
+
+#endif /* ATH12K_PCI_WIFI7_H */
diff --git a/drivers/net/wireless/ath/ath12k/wifi7/wmi.c b/drivers/net/wireless/ath/ath12k/wifi7/wmi.c
new file mode 100644
index 000000000000..ed538d20d324
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wifi7/wmi.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include "../core.h"
+#include "wmi.h"
+
+void ath12k_wifi7_wmi_init_qcn9274(struct ath12k_base *ab,
+ struct ath12k_wmi_resource_config_arg *config)
+{
+ config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS(ab);
+ config->num_peers = ab->num_radios *
+ ath12k_core_get_max_peers_per_radio(ab);
+ config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
+ config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
+ config->num_peer_keys = TARGET_NUM_PEER_KEYS;
+ config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
+ config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
+ config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
+ config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
+
+ if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
+ config->rx_decap_mode = TARGET_DECAP_MODE_RAW;
+ else
+ config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
+
+ config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
+ config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
+ config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
+ config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
+ config->num_mcast_groups = TARGET_NUM_MCAST_GROUPS;
+ config->num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS;
+ config->mcast2ucast_mode = TARGET_MCAST2UCAST_MODE;
+ config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
+ config->num_wds_entries = TARGET_NUM_WDS_ENTRIES;
+ config->dma_burst_size = TARGET_DMA_BURST_SIZE;
+ config->rx_skip_defrag_timeout_dup_detection_check =
+ TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
+ config->vow_config = TARGET_VOW_CONFIG;
+ config->gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV;
+ config->num_msdu_desc = TARGET_NUM_MSDU_DESC;
+ config->beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD;
+ config->rx_batchmode = TARGET_RX_BATCHMODE;
+ /* Indicates host supports peer map v3 and unmap v2 support */
+ config->peer_map_unmap_version = 0x32;
+ config->twt_ap_pdev_count = ab->num_radios;
+ config->twt_ap_sta_count = 1000;
+ config->ema_max_vap_cnt = ab->num_radios;
+ config->ema_max_profile_period = TARGET_EMA_MAX_PROFILE_PERIOD;
+ config->beacon_tx_offload_max_vdev += config->ema_max_vap_cnt;
+
+ if (test_bit(WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT, ab->wmi_ab.svc_map))
+ config->peer_metadata_ver = ATH12K_PEER_METADATA_V1B;
+}
+
+void ath12k_wifi7_wmi_init_wcn7850(struct ath12k_base *ab,
+ struct ath12k_wmi_resource_config_arg *config)
+{
+ config->num_vdevs = 4;
+ config->num_peers = 16;
+ config->num_tids = 32;
+
+ config->num_offload_peers = 3;
+ config->num_offload_reorder_buffs = 3;
+ config->num_peer_keys = TARGET_NUM_PEER_KEYS;
+ config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
+ config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
+ config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
+ config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
+ config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
+ config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
+ config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
+ config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
+ config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
+ config->num_mcast_groups = 0;
+ config->num_mcast_table_elems = 0;
+ config->mcast2ucast_mode = 0;
+ config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
+ config->num_wds_entries = 0;
+ config->dma_burst_size = 0;
+ config->rx_skip_defrag_timeout_dup_detection_check = 0;
+ config->vow_config = TARGET_VOW_CONFIG;
+ config->gtk_offload_max_vdev = 2;
+ config->num_msdu_desc = 0x400;
+ config->beacon_tx_offload_max_vdev = 2;
+ config->rx_batchmode = TARGET_RX_BATCHMODE;
+
+ config->peer_map_unmap_version = 0x1;
+ config->use_pdev_id = 1;
+ config->max_frag_entries = 0xa;
+ config->num_tdls_vdevs = 0x1;
+ config->num_tdls_conn_table_entries = 8;
+ config->beacon_tx_offload_max_vdev = 0x2;
+ config->num_multicast_filter_entries = 0x20;
+ config->num_wow_filters = 0x16;
+ config->num_keep_alive_pattern = 0;
+
+ if (test_bit(WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT, ab->wmi_ab.svc_map))
+ config->peer_metadata_ver = ATH12K_PEER_METADATA_V1A;
+ else
+ config->peer_metadata_ver = ab->wmi_ab.dp_peer_meta_data_ver;
+}
diff --git a/drivers/net/wireless/ath/ath12k/wifi7/wmi.h b/drivers/net/wireless/ath/ath12k/wifi7/wmi.h
new file mode 100644
index 000000000000..ae74e176fa2d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wifi7/wmi.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef ATH12K_WMI_WIFI7_H
+#define ATH12K_WMI_WIFI7_H
+
+void ath12k_wifi7_wmi_init_qcn9274(struct ath12k_base *ab,
+ struct ath12k_wmi_resource_config_arg *config);
+void ath12k_wifi7_wmi_init_wcn7850(struct ath12k_base *ab,
+ struct ath12k_wmi_resource_config_arg *config);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
index 3ce5fcb0e460..7617fc3a2479 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.c
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -126,6 +126,14 @@ struct wmi_tlv_mgmt_rx_parse {
bool frame_buf_done;
};
+struct wmi_pdev_set_obss_bitmap_arg {
+ u32 tlv_tag;
+ u32 pdev_id;
+ u32 cmd_id;
+ const u32 *bitmap;
+ const char *label;
+};
+
static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = {
[WMI_TAG_ARRAY_BYTE] = { .min_len = 0 },
[WMI_TAG_ARRAY_UINT32] = { .min_len = 0 },
@@ -206,103 +214,6 @@ static __le32 ath12k_wmi_tlv_cmd_hdr(u32 cmd, u32 len)
return ath12k_wmi_tlv_hdr(cmd, len - TLV_HDR_SIZE);
}
-void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
- struct ath12k_wmi_resource_config_arg *config)
-{
- config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS(ab);
- config->num_peers = ab->num_radios *
- ath12k_core_get_max_peers_per_radio(ab);
- config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
- config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
- config->num_peer_keys = TARGET_NUM_PEER_KEYS;
- config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
- config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
- config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
- config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
- config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
- config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
- config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
-
- if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
- config->rx_decap_mode = TARGET_DECAP_MODE_RAW;
- else
- config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
-
- config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
- config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
- config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
- config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
- config->num_mcast_groups = TARGET_NUM_MCAST_GROUPS;
- config->num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS;
- config->mcast2ucast_mode = TARGET_MCAST2UCAST_MODE;
- config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
- config->num_wds_entries = TARGET_NUM_WDS_ENTRIES;
- config->dma_burst_size = TARGET_DMA_BURST_SIZE;
- config->rx_skip_defrag_timeout_dup_detection_check =
- TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
- config->vow_config = TARGET_VOW_CONFIG;
- config->gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV;
- config->num_msdu_desc = TARGET_NUM_MSDU_DESC;
- config->beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD;
- config->rx_batchmode = TARGET_RX_BATCHMODE;
- /* Indicates host supports peer map v3 and unmap v2 support */
- config->peer_map_unmap_version = 0x32;
- config->twt_ap_pdev_count = ab->num_radios;
- config->twt_ap_sta_count = 1000;
- config->ema_max_vap_cnt = ab->num_radios;
- config->ema_max_profile_period = TARGET_EMA_MAX_PROFILE_PERIOD;
- config->beacon_tx_offload_max_vdev += config->ema_max_vap_cnt;
-
- if (test_bit(WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT, ab->wmi_ab.svc_map))
- config->peer_metadata_ver = ATH12K_PEER_METADATA_V1B;
-}
-
-void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
- struct ath12k_wmi_resource_config_arg *config)
-{
- config->num_vdevs = 4;
- config->num_peers = 16;
- config->num_tids = 32;
-
- config->num_offload_peers = 3;
- config->num_offload_reorder_buffs = 3;
- config->num_peer_keys = TARGET_NUM_PEER_KEYS;
- config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
- config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
- config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
- config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
- config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
- config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
- config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
- config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
- config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
- config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
- config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
- config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
- config->num_mcast_groups = 0;
- config->num_mcast_table_elems = 0;
- config->mcast2ucast_mode = 0;
- config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
- config->num_wds_entries = 0;
- config->dma_burst_size = 0;
- config->rx_skip_defrag_timeout_dup_detection_check = 0;
- config->vow_config = TARGET_VOW_CONFIG;
- config->gtk_offload_max_vdev = 2;
- config->num_msdu_desc = 0x400;
- config->beacon_tx_offload_max_vdev = 2;
- config->rx_batchmode = TARGET_RX_BATCHMODE;
-
- config->peer_map_unmap_version = 0x1;
- config->use_pdev_id = 1;
- config->max_frag_entries = 0xa;
- config->num_tdls_vdevs = 0x1;
- config->num_tdls_conn_table_entries = 8;
- config->beacon_tx_offload_max_vdev = 0x2;
- config->num_multicast_filter_entries = 0x20;
- config->num_wow_filters = 0x16;
- config->num_keep_alive_pattern = 0;
-}
-
#define PRIMAP(_hw_mode_) \
[_hw_mode_] = _hw_mode_##_PRI
@@ -496,6 +407,7 @@ ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
struct ath12k_band_cap *cap_band;
struct ath12k_pdev_cap *pdev_cap = &pdev->cap;
struct ath12k_fw_pdev *fw_pdev;
+ u32 supported_bands;
u32 phy_map;
u32 hw_idx, phy_idx = 0;
int i;
@@ -519,14 +431,19 @@ ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
return -EINVAL;
mac_caps = wmi_mac_phy_caps + phy_idx;
+ supported_bands = le32_to_cpu(mac_caps->supported_bands);
+
+ if (!(supported_bands & WMI_HOST_WLAN_2GHZ_CAP) &&
+ !(supported_bands & WMI_HOST_WLAN_5GHZ_CAP))
+ return -EINVAL;
pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
pdev->hw_link_id = ath12k_wmi_mac_phy_get_hw_link_id(mac_caps);
- pdev_cap->supported_bands |= le32_to_cpu(mac_caps->supported_bands);
+ pdev_cap->supported_bands |= supported_bands;
pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density);
fw_pdev = &ab->fw_pdev[ab->fw_pdev_count];
- fw_pdev->supported_bands = le32_to_cpu(mac_caps->supported_bands);
+ fw_pdev->supported_bands = supported_bands;
fw_pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
fw_pdev->phy_id = le32_to_cpu(mac_caps->phy_id);
ab->fw_pdev_count++;
@@ -535,10 +452,12 @@ ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
* band to band for a single radio, need to see how this should be
* handled.
*/
- if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2GHZ_CAP) {
+ if (supported_bands & WMI_HOST_WLAN_2GHZ_CAP) {
pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_2g);
pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_2g);
- } else if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5GHZ_CAP) {
+ }
+
+ if (supported_bands & WMI_HOST_WLAN_5GHZ_CAP) {
pdev_cap->vht_cap = le32_to_cpu(mac_caps->vht_cap_info_5g);
pdev_cap->vht_mcs = le32_to_cpu(mac_caps->vht_supp_mcs_5g);
pdev_cap->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
@@ -548,8 +467,6 @@ ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
WMI_NSS_RATIO_EN_DIS_GET(mac_caps->nss_ratio);
pdev_cap->nss_ratio_info =
WMI_NSS_RATIO_INFO_GET(mac_caps->nss_ratio);
- } else {
- return -EINVAL;
}
/* tx/rx chainmask reported from fw depends on the actual hw chains used,
@@ -565,7 +482,7 @@ ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
pdev_cap->rx_chain_mask_shift =
find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32);
- if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2GHZ_CAP) {
+ if (supported_bands & WMI_HOST_WLAN_2GHZ_CAP) {
cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
cap_band->max_bw_supported = le32_to_cpu(mac_caps->max_bw_supported_2g);
@@ -585,7 +502,7 @@ ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
le32_to_cpu(mac_caps->he_ppet2g.ppet16_ppet8_ru3_ru0[i]);
}
- if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5GHZ_CAP) {
+ if (supported_bands & WMI_HOST_WLAN_5GHZ_CAP) {
cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
cap_band->max_bw_supported =
@@ -2897,7 +2814,8 @@ int ath12k_wmi_send_scan_chan_list_cmd(struct ath12k *ar,
max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) /
sizeof(*chan_info);
- num_send_chans = min(arg->nallchans, max_chan_limit);
+ num_send_chans = min3(arg->nallchans, max_chan_limit,
+ ATH12K_WMI_MAX_NUM_CHAN_PER_CMD);
arg->nallchans -= num_send_chans;
len += sizeof(*chan_info) * num_send_chans;
@@ -3650,6 +3568,140 @@ ath12k_wmi_send_obss_spr_cmd(struct ath12k *ar, u32 vdev_id,
return ret;
}
+u32 ath12k_wmi_build_obss_pd(const struct ath12k_wmi_obss_pd_arg *arg)
+{
+ u32 param_val = 0;
+
+ param_val |= u32_encode_bits((u8)arg->srg_th, GENMASK(15, 8));
+ param_val |= u32_encode_bits((u8)arg->non_srg_th, GENMASK(7, 0));
+
+ if (arg->srp_support)
+ param_val |= ATH12K_OBSS_PD_THRESHOLD_IN_DBM;
+
+ if (arg->srg_enabled && arg->srp_support)
+ param_val |= ATH12K_OBSS_PD_SRG_EN;
+
+ if (arg->non_srg_enabled)
+ param_val |= ATH12K_OBSS_PD_NON_SRG_EN;
+
+ return param_val;
+}
+
+static int ath12k_wmi_pdev_set_obss_bitmap(struct ath12k *ar,
+ const struct wmi_pdev_set_obss_bitmap_arg *arg)
+{
+ struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ const int len = sizeof(*cmd);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(arg->tlv_tag, len);
+ cmd->pdev_id = cpu_to_le32(arg->pdev_id);
+ memcpy(cmd->bitmap, arg->bitmap, sizeof(cmd->bitmap));
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi set pdev %u %s %08x %08x\n",
+ arg->pdev_id, arg->label, arg->bitmap[0], arg->bitmap[1]);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, arg->cmd_id);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send %s: %d\n", arg->label, ret);
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_pdev_set_srg_bss_color_bitmap(struct ath12k *ar,
+ u32 pdev_id, const u32 *bitmap)
+{
+ struct wmi_pdev_set_obss_bitmap_arg arg = {
+ .tlv_tag = WMI_TAG_PDEV_SRG_BSS_COLOR_BITMAP_CMD,
+ .pdev_id = pdev_id,
+ .cmd_id = WMI_PDEV_SET_SRG_BSS_COLOR_BITMAP_CMDID,
+ .bitmap = bitmap,
+ .label = "SRG bss color bitmap",
+ };
+
+ return ath12k_wmi_pdev_set_obss_bitmap(ar, &arg);
+}
+
+int ath12k_wmi_pdev_set_srg_partial_bssid_bitmap(struct ath12k *ar,
+ u32 pdev_id, const u32 *bitmap)
+{
+ struct wmi_pdev_set_obss_bitmap_arg arg = {
+ .tlv_tag = WMI_TAG_PDEV_SRG_PARTIAL_BSSID_BITMAP_CMD,
+ .pdev_id = pdev_id,
+ .cmd_id = WMI_PDEV_SET_SRG_PARTIAL_BSSID_BITMAP_CMDID,
+ .bitmap = bitmap,
+ .label = "SRG partial bssid bitmap",
+ };
+
+ return ath12k_wmi_pdev_set_obss_bitmap(ar, &arg);
+}
+
+int ath12k_wmi_pdev_srg_obss_color_enable_bitmap(struct ath12k *ar,
+ u32 pdev_id, const u32 *bitmap)
+{
+ struct wmi_pdev_set_obss_bitmap_arg arg = {
+ .tlv_tag = WMI_TAG_PDEV_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD,
+ .pdev_id = pdev_id,
+ .cmd_id = WMI_PDEV_SET_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID,
+ .bitmap = bitmap,
+ .label = "SRG obss color enable bitmap",
+ };
+
+ return ath12k_wmi_pdev_set_obss_bitmap(ar, &arg);
+}
+
+int ath12k_wmi_pdev_srg_obss_bssid_enable_bitmap(struct ath12k *ar,
+ u32 pdev_id, const u32 *bitmap)
+{
+ struct wmi_pdev_set_obss_bitmap_arg arg = {
+ .tlv_tag = WMI_TAG_PDEV_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD,
+ .pdev_id = pdev_id,
+ .cmd_id = WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID,
+ .bitmap = bitmap,
+ .label = "SRG obss bssid enable bitmap",
+ };
+
+ return ath12k_wmi_pdev_set_obss_bitmap(ar, &arg);
+}
+
+int ath12k_wmi_pdev_non_srg_obss_color_enable_bitmap(struct ath12k *ar,
+ u32 pdev_id, const u32 *bitmap)
+{
+ struct wmi_pdev_set_obss_bitmap_arg arg = {
+ .tlv_tag = WMI_TAG_PDEV_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD,
+ .pdev_id = pdev_id,
+ .cmd_id = WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID,
+ .bitmap = bitmap,
+ .label = "non SRG obss color enable bitmap",
+ };
+
+ return ath12k_wmi_pdev_set_obss_bitmap(ar, &arg);
+}
+
+int ath12k_wmi_pdev_non_srg_obss_bssid_enable_bitmap(struct ath12k *ar,
+ u32 pdev_id, const u32 *bitmap)
+{
+ struct wmi_pdev_set_obss_bitmap_arg arg = {
+ .tlv_tag = WMI_TAG_PDEV_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD,
+ .pdev_id = pdev_id,
+ .cmd_id = WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID,
+ .bitmap = bitmap,
+ .label = "non SRG obss bssid enable bitmap",
+ };
+
+ return ath12k_wmi_pdev_set_obss_bitmap(ar, &arg);
+}
+
int ath12k_wmi_obss_color_cfg_cmd(struct ath12k *ar, u32 vdev_id,
u8 bss_color, u32 period,
bool enable)
@@ -4217,6 +4269,7 @@ int ath12k_wmi_set_hw_mode(struct ath12k_base *ab,
int ath12k_wmi_cmd_init(struct ath12k_base *ab)
{
+ struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
struct ath12k_wmi_init_cmd_arg arg = {};
@@ -4237,7 +4290,7 @@ int ath12k_wmi_cmd_init(struct ath12k_base *ab)
arg.num_band_to_mac = ab->num_radios;
ath12k_fill_band_to_mac_param(ab, arg.band_to_mac);
- ab->dp.peer_metadata_ver = arg.res_cfg.peer_metadata_ver;
+ dp->peer_metadata_ver = arg.res_cfg.peer_metadata_ver;
return ath12k_init_cmd_send(&wmi_ab->wmi[0], &arg);
}
@@ -4545,7 +4598,7 @@ static int ath12k_wmi_hw_mode_caps(struct ath12k_base *soc,
pref = soc->wmi_ab.preferred_hw_mode;
- if (ath12k_hw_mode_pri_map[mode] < ath12k_hw_mode_pri_map[pref]) {
+ if (ath12k_hw_mode_pri_map[mode] <= ath12k_hw_mode_pri_map[pref]) {
svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps;
soc->wmi_ab.preferred_hw_mode = mode;
}
@@ -5014,19 +5067,10 @@ ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base *ab,
const struct ath12k_wmi_caps_ext_params *caps,
struct ath12k_pdev *pdev)
{
- struct ath12k_band_cap *cap_band;
- u32 bands, support_320mhz;
+ u32 bands;
int i;
if (ab->hw_params->single_pdev_only) {
- if (caps->hw_mode_id == WMI_HOST_HW_MODE_SINGLE) {
- support_320mhz = le32_to_cpu(caps->eht_cap_phy_info_5ghz[0]) &
- IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
- cap_band = &pdev->cap.band[NL80211_BAND_6GHZ];
- cap_band->eht_cap_phy_info[0] |= support_320mhz;
- return 0;
- }
-
for (i = 0; i < ab->fw_pdev_count; i++) {
struct ath12k_fw_pdev *fw_pdev = &ab->fw_pdev[i];
@@ -5079,14 +5123,22 @@ static int ath12k_wmi_tlv_mac_phy_caps_ext(struct ath12k_base *ab, u16 tag,
void *data)
{
const struct ath12k_wmi_caps_ext_params *caps = ptr;
+ struct ath12k_band_cap *cap_band;
+ u32 support_320mhz;
int i = 0, ret;
if (tag != WMI_TAG_MAC_PHY_CAPABILITIES_EXT)
return -EPROTO;
if (ab->hw_params->single_pdev_only) {
- if (ab->wmi_ab.preferred_hw_mode != le32_to_cpu(caps->hw_mode_id) &&
- caps->hw_mode_id != WMI_HOST_HW_MODE_SINGLE)
+ if (caps->hw_mode_id == WMI_HOST_HW_MODE_SINGLE) {
+ support_320mhz = le32_to_cpu(caps->eht_cap_phy_info_5ghz[0]) &
+ IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
+ cap_band = &ab->pdevs[0].cap.band[NL80211_BAND_6GHZ];
+ cap_band->eht_cap_phy_info[0] |= support_320mhz;
+ }
+
+ if (ab->wmi_ab.preferred_hw_mode != le32_to_cpu(caps->hw_mode_id))
return 0;
} else {
for (i = 0; i < ab->num_radios; i++) {
@@ -5567,6 +5619,10 @@ static int ath12k_wmi_svc_rdy_ext2_parse(struct ath12k_base *ab,
ret);
return ret;
}
+
+ ab->wmi_ab.dp_peer_meta_data_ver =
+ u32_get_bits(parse->arg.target_cap_flags,
+ WMI_TARGET_CAP_FLAGS_RX_PEER_METADATA_VERSION);
break;
case WMI_TAG_ARRAY_STRUCT:
@@ -7372,8 +7428,8 @@ static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff
struct wmi_peer_sta_kickout_arg arg = {};
struct ath12k_link_vif *arvif;
struct ieee80211_sta *sta;
- struct ath12k_peer *peer;
- unsigned int link_id;
+ struct ath12k_sta *ahsta;
+ struct ath12k_link_sta *arsta;
struct ath12k *ar;
if (ath12k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) {
@@ -7385,42 +7441,24 @@ static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff
spin_lock_bh(&ab->base_lock);
- peer = ath12k_peer_find_by_addr(ab, arg.mac_addr);
+ arsta = ath12k_link_sta_find_by_addr(ab, arg.mac_addr);
- if (!peer) {
- ath12k_warn(ab, "peer not found %pM\n",
+ if (!arsta) {
+ ath12k_warn(ab, "arsta not found %pM\n",
arg.mac_addr);
goto exit;
}
- arvif = ath12k_mac_get_arvif_by_vdev_id(ab, peer->vdev_id);
+ arvif = arsta->arvif;
if (!arvif) {
- ath12k_warn(ab, "invalid vdev id in peer sta kickout ev %d",
- peer->vdev_id);
+ ath12k_warn(ab, "invalid arvif in peer sta kickout ev for STA %pM",
+ arg.mac_addr);
goto exit;
}
ar = arvif->ar;
-
- if (peer->mlo) {
- sta = ieee80211_find_sta_by_link_addrs(ath12k_ar_to_hw(ar),
- arg.mac_addr,
- NULL, &link_id);
- if (peer->link_id != link_id) {
- ath12k_warn(ab,
- "Spurious quick kickout for MLO STA %pM with invalid link_id, peer: %d, sta: %d\n",
- arg.mac_addr, peer->link_id, link_id);
- goto exit;
- }
- } else {
- sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
- arg.mac_addr, NULL);
- }
- if (!sta) {
- ath12k_warn(ab, "Spurious quick kickout for %sSTA %pM\n",
- peer->mlo ? "MLO " : "", arg.mac_addr);
- goto exit;
- }
+ ahsta = arsta->ahsta;
+ sta = ath12k_ahsta_to_sta(ahsta);
ath12k_dbg(ab, ATH12K_DBG_WMI,
"peer sta kickout event %pM reason: %d rssi: %d\n",
diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h
index f99fced1610e..0bf0a7941cd3 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.h
+++ b/drivers/net/wireless/ath/ath12k/wmi.h
@@ -9,6 +9,7 @@
#include <net/mac80211.h>
#include "htc.h"
+#include "cmn_defs.h"
/* Naming conventions for structures:
*
@@ -373,6 +374,12 @@ enum wmi_tlv_cmd_id {
WMI_PDEV_DMA_RING_CFG_REQ_CMDID,
WMI_PDEV_HE_TB_ACTION_FRM_CMDID,
WMI_PDEV_PKTLOG_FILTER_CMDID,
+ WMI_PDEV_SET_SRG_BSS_COLOR_BITMAP_CMDID = 0x403b,
+ WMI_PDEV_SET_SRG_PARTIAL_BSSID_BITMAP_CMDID,
+ WMI_PDEV_SET_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID,
+ WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID,
+ WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID,
+ WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID,
WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID = 0x4044,
WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID = 0x4045,
WMI_PDEV_SET_BIOS_INTERFACE_CMDID = 0x404A,
@@ -1075,6 +1082,9 @@ enum wmi_tlv_pdev_param {
WMI_PDEV_PARAM_RADIO_CHAN_STATS_ENABLE,
WMI_PDEV_PARAM_RADIO_DIAGNOSIS_ENABLE,
WMI_PDEV_PARAM_MESH_MCAST_ENABLE,
+ WMI_PDEV_PARAM_SET_CMD_OBSS_PD_THRESHOLD = 0xbc,
+ WMI_PDEV_PARAM_SET_CMD_OBSS_PD_PER_AC = 0xbe,
+ WMI_PDEV_PARAM_ENABLE_SR_PROHIBIT = 0xc6,
};
enum wmi_tlv_vdev_param {
@@ -1986,6 +1996,12 @@ enum wmi_tlv_tag {
WMI_TAG_SERVICE_READY_EXT2_EVENT = 0x334,
WMI_TAG_FILS_DISCOVERY_TMPL_CMD = 0x344,
WMI_TAG_MAC_PHY_CAPABILITIES_EXT = 0x36F,
+ WMI_TAG_PDEV_SRG_BSS_COLOR_BITMAP_CMD = 0x37b,
+ WMI_TAG_PDEV_SRG_PARTIAL_BSSID_BITMAP_CMD,
+ WMI_TAG_PDEV_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD = 0x381,
+ WMI_TAG_PDEV_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD,
+ WMI_TAG_PDEV_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD,
+ WMI_TAG_PDEV_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD,
WMI_TAG_REGULATORY_RULE_EXT_STRUCT = 0x3A9,
WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT,
WMI_TAG_TPC_STATS_GET_CMD = 0x38B,
@@ -2243,6 +2259,7 @@ enum wmi_tlv_service {
WMI_TLV_SERVICE_FREQINFO_IN_METADATA = 219,
WMI_TLV_SERVICE_EXT2_MSG = 220,
WMI_TLV_SERVICE_BEACON_PROTECTION_SUPPORT = 244,
+ WMI_TLV_SERVICE_SRG_SRP_SPATIAL_REUSE_SUPPORT = 249,
WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT = 253,
WMI_MAX_EXT_SERVICE = 256,
@@ -2782,6 +2799,8 @@ enum wmi_channel_width {
#define WMI_EHT_MCS_NSS_10_11 GENMASK(11, 8)
#define WMI_EHT_MCS_NSS_12_13 GENMASK(15, 12)
+#define WMI_TARGET_CAP_FLAGS_RX_PEER_METADATA_VERSION GENMASK(1, 0)
+
struct wmi_service_ready_ext2_event {
__le32 reg_db_version;
__le32 hw_min_max_tx_power_2ghz;
@@ -4922,6 +4941,12 @@ struct wmi_obss_spatial_reuse_params_cmd {
__le32 vdev_id;
} __packed;
+struct wmi_pdev_obss_pd_bitmap_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+ __le32 bitmap[2];
+} __packed;
+
#define ATH12K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS 200
#define ATH12K_OBSS_COLOR_COLLISION_DETECTION_DISABLE 0
#define ATH12K_OBSS_COLOR_COLLISION_DETECTION 1
@@ -5151,8 +5176,6 @@ struct wmi_probe_tmpl_cmd {
__le32 buf_len;
} __packed;
-#define MAX_RADIOS 2
-
#define WMI_MLO_CMD_TIMEOUT_HZ (5 * HZ)
#define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ)
#define WMI_SEND_TIMEOUT_HZ (3 * HZ)
@@ -5231,6 +5254,8 @@ struct ath12k_wmi_base {
struct ath12k_svc_ext_info svc_ext_info;
u32 sbs_lower_band_end_freq;
struct ath12k_hw_mode_info hw_mode_info;
+
+ u8 dp_peer_meta_data_ver;
};
struct wmi_pdev_set_bios_interface_cmd {
@@ -6323,10 +6348,21 @@ struct ath12k_wmi_rssi_dbm_conv_info_arg {
s8 min_nf_dbm;
};
-void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
- struct ath12k_wmi_resource_config_arg *config);
-void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
- struct ath12k_wmi_resource_config_arg *config);
+/* each WMI cmd can hold 58 channel entries at most */
+#define ATH12K_WMI_MAX_NUM_CHAN_PER_CMD 58
+
+#define ATH12K_OBSS_PD_THRESHOLD_IN_DBM BIT(29)
+#define ATH12K_OBSS_PD_SRG_EN BIT(30)
+#define ATH12K_OBSS_PD_NON_SRG_EN BIT(31)
+
+struct ath12k_wmi_obss_pd_arg {
+ bool srp_support;
+ bool srg_enabled;
+ bool non_srg_enabled;
+ s8 srg_th;
+ s8 non_srg_th;
+};
+
int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
u32 cmd_id);
struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_sc, u32 len);
@@ -6430,6 +6466,19 @@ int ath12k_wmi_send_twt_enable_cmd(struct ath12k *ar, u32 pdev_id);
int ath12k_wmi_send_twt_disable_cmd(struct ath12k *ar, u32 pdev_id);
int ath12k_wmi_send_obss_spr_cmd(struct ath12k *ar, u32 vdev_id,
struct ieee80211_he_obss_pd *he_obss_pd);
+u32 ath12k_wmi_build_obss_pd(const struct ath12k_wmi_obss_pd_arg *arg);
+int ath12k_wmi_pdev_set_srg_bss_color_bitmap(struct ath12k *ar, u32 pdev_id,
+ const u32 *bitmap);
+int ath12k_wmi_pdev_set_srg_partial_bssid_bitmap(struct ath12k *ar, u32 pdev_id,
+ const u32 *bitmap);
+int ath12k_wmi_pdev_srg_obss_color_enable_bitmap(struct ath12k *ar, u32 pdev_id,
+ const u32 *bitmap);
+int ath12k_wmi_pdev_srg_obss_bssid_enable_bitmap(struct ath12k *ar, u32 pdev_id,
+ const u32 *bitmap);
+int ath12k_wmi_pdev_non_srg_obss_color_enable_bitmap(struct ath12k *ar, u32 pdev_id,
+ const u32 *bitmap);
+int ath12k_wmi_pdev_non_srg_obss_bssid_enable_bitmap(struct ath12k *ar, u32 pdev_id,
+ const u32 *bitmap);
int ath12k_wmi_obss_color_cfg_cmd(struct ath12k *ar, u32 vdev_id,
u8 bss_color, u32 period,
bool enable);
diff --git a/drivers/net/wireless/ath/ath12k/wow.c b/drivers/net/wireless/ath/ath12k/wow.c
index e8481626f194..bb08e1740582 100644
--- a/drivers/net/wireless/ath/ath12k/wow.c
+++ b/drivers/net/wireless/ath/ath12k/wow.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <linux/delay.h>
@@ -135,6 +135,9 @@ static int ath12k_wow_cleanup(struct ath12k *ar)
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif != &arvif->ahvif->deflink)
+ continue;
+
ret = ath12k_wow_vif_cleanup(arvif);
if (ret) {
ath12k_warn(ar->ab, "failed to clean wow wakeups on vdev %i: %d\n",
@@ -479,8 +482,12 @@ static int ath12k_wow_set_wakeups(struct ath12k *ar,
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif != &arvif->ahvif->deflink)
+ continue;
+
if (ath12k_wow_is_p2p_vdev(arvif->ahvif))
continue;
+
ret = ath12k_wow_vif_set_wakeups(arvif, wowlan);
if (ret) {
ath12k_warn(ar->ab, "failed to set wow wakeups on vdev %i: %d\n",
@@ -538,6 +545,9 @@ static int ath12k_wow_nlo_cleanup(struct ath12k *ar)
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif != &arvif->ahvif->deflink)
+ continue;
+
if (ath12k_wow_is_p2p_vdev(arvif->ahvif))
continue;
@@ -745,6 +755,9 @@ static int ath12k_wow_arp_ns_offload(struct ath12k *ar, bool enable)
list_for_each_entry(arvif, &ar->arvifs, list) {
ahvif = arvif->ahvif;
+ if (arvif != &ahvif->deflink)
+ continue;
+
if (ahvif->vdev_type != WMI_VDEV_TYPE_STA)
continue;
@@ -776,6 +789,9 @@ static int ath12k_gtk_rekey_offload(struct ath12k *ar, bool enable)
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif != &arvif->ahvif->deflink)
+ continue;
+
if (arvif->ahvif->vdev_type != WMI_VDEV_TYPE_STA ||
!arvif->is_up ||
!arvif->rekey_data.enable_offload)
@@ -919,6 +935,7 @@ cleanup:
exit:
return ret ? 1 : 0;
}
+EXPORT_SYMBOL(ath12k_wow_op_suspend);
void ath12k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled)
{
@@ -929,6 +946,7 @@ void ath12k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled)
device_set_wakeup_enable(ar->ab->dev, enabled);
}
+EXPORT_SYMBOL(ath12k_wow_op_set_wakeup);
int ath12k_wow_op_resume(struct ieee80211_hw *hw)
{
@@ -1001,6 +1019,7 @@ exit:
return ret;
}
+EXPORT_SYMBOL(ath12k_wow_op_resume);
int ath12k_wow_init(struct ath12k *ar)
{
diff --git a/drivers/net/wireless/ath/ath5k/debug.h b/drivers/net/wireless/ath/ath5k/debug.h
index 0a3f916a1ef3..fd323ae84c95 100644
--- a/drivers/net/wireless/ath/ath5k/debug.h
+++ b/drivers/net/wireless/ath/ath5k/debug.h
@@ -80,11 +80,9 @@ struct ath5k_dbg_info {
* @ATH5K_DEBUG_CALIBRATE: periodic calibration
* @ATH5K_DEBUG_TXPOWER: transmit power setting
* @ATH5K_DEBUG_LED: led management
- * @ATH5K_DEBUG_DUMP_RX: print received skb content
- * @ATH5K_DEBUG_DUMP_TX: print transmit skb content
* @ATH5K_DEBUG_DUMPBANDS: dump bands
* @ATH5K_DEBUG_DMA: debug dma start/stop
- * @ATH5K_DEBUG_TRACE: trace function calls
+ * @ATH5K_DEBUG_ANI: debug Adaptive Noise Immunity
* @ATH5K_DEBUG_DESC: descriptor setup
* @ATH5K_DEBUG_ANY: show at any debug level
*
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 0c47be06c153..47d570a5ca6a 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -47,7 +47,7 @@ config ATH9K_PCI
config ATH9K_AHB
bool "Atheros ath9k AHB bus support"
- depends on ATH9K
+ depends on ATH9K && OF
default n
help
This option enables the AHB bus support in ath9k.
diff --git a/drivers/net/wireless/ath/ath9k/common-debug.h b/drivers/net/wireless/ath/ath9k/common-debug.h
index 2938b5b96b07..97948af97682 100644
--- a/drivers/net/wireless/ath/ath9k/common-debug.h
+++ b/drivers/net/wireless/ath/ath9k/common-debug.h
@@ -19,14 +19,14 @@
/**
* struct ath_rx_stats - RX Statistics
* @rx_pkts_all: No. of total frames received, including ones that
- may have had errors.
+ * may have had errors.
* @rx_bytes_all: No. of total bytes received, including ones that
- may have had errors.
+ * may have had errors.
* @crc_err: No. of frames with incorrect CRC value
* @decrypt_crc_err: No. of frames whose CRC check failed after
- decryption process completed
+ * decryption process completed
* @phy_err: No. of frames whose reception failed because the PHY
- encountered an error
+ * encountered an error
* @mic_err: No. of frames with incorrect TKIP MIC verification failure
* @pre_delim_crc_err: Pre-Frame delimiter CRC error detections
* @post_delim_crc_err: Post-Frame delimiter CRC error detections
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index ffcf2276eb92..f55b3afb3777 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -403,15 +403,3 @@ void ath9k_cmn_init_crypto(struct ath_hw *ah)
ath_hw_keyreset(common, (u16) i);
}
EXPORT_SYMBOL(ath9k_cmn_init_crypto);
-
-static int __init ath9k_cmn_init(void)
-{
- return 0;
-}
-module_init(ath9k_cmn_init);
-
-static void __exit ath9k_cmn_exit(void)
-{
- return;
-}
-module_exit(ath9k_cmn_exit);
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index cb3e75969875..804e2a0a0c20 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -142,11 +142,12 @@ struct ath_interrupt_stats {
/**
* struct ath_tx_stats - Statistics about TX
* @tx_pkts_all: No. of total frames transmitted, including ones that
- may have had errors.
+ * may have had errors.
* @tx_bytes_all: No. of total bytes transmitted, including ones that
- may have had errors.
+ * may have had errors.
* @queued: Total MPDUs (non-aggr) queued
* @completed: Total MPDUs (non-aggr) completed
+ * @xretries: Total MPDUs with xretries
* @a_aggr: Total no. of aggregates queued
* @a_queued_hw: Total AMPDUs queued to hardware
* @a_completed: Total AMPDUs completed
@@ -154,14 +155,14 @@ struct ath_interrupt_stats {
* @a_xretries: No. of AMPDUs dropped due to xretries
* @txerr_filtered: No. of frames with TXERR_FILT flag set.
* @fifo_underrun: FIFO underrun occurrences
- Valid only for:
- - non-aggregate condition.
- - first packet of aggregate.
+ * Valid only for:
+ * - non-aggregate condition.
+ * - first packet of aggregate.
* @xtxop: No. of frames filtered because of TXOP limit
* @timer_exp: Transmit timer expiry
* @desc_cfg_err: Descriptor configuration errors
- * @data_urn: TX data underrun errors
- * @delim_urn: TX delimiter underrun errors
+ * @data_underrun: TX data underrun errors
+ * @delim_underrun: TX delimiter underrun errors
* @puttxbuf: Number of times hardware was given txbuf to write.
* @txstart: Number of times hardware was told to start tx.
* @txprocdesc: Number of times tx descriptor was processed
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 9bd1286d2857..31e107c81e2d 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -58,7 +58,7 @@ union wil_tx_desc;
*/
#define WIL_MAX_VIFS 4
-/**
+/*
* extract bits [@b0:@b1] (inclusive) from the value @x
* it should be @b0 <= @b1, or result is incorrect
*/
@@ -433,7 +433,7 @@ extern struct fw_map fw_mapping[MAX_FW_MAPPING_TABLE_SIZE];
* @cid: CID value
* @tid: TID value
*
- * @cidxtid field encoded as bits 0..3 - CID; 4..7 - TID
+ * Returns: @cidxtid field encoded as bits 0..3 - CID; 4..7 - TID
*/
static inline u8 mk_cidxtid(u8 cid, u8 tid)
{
@@ -444,8 +444,7 @@ static inline u8 mk_cidxtid(u8 cid, u8 tid)
* parse_cidxtid - parse @cidxtid field
* @cid: store CID value here
* @tid: store TID value here
- *
- * @cidxtid field encoded as bits 0..3 - CID; 4..7 - TID
+ * @cidxtid: field encoded as bits 0..3 - CID; 4..7 - TID
*/
static inline void parse_cidxtid(u8 cidxtid, u8 *cid, u8 *tid)
{
@@ -500,7 +499,7 @@ enum { /* for wil_ctx.mapped_as */
wil_mapped_as_page = 2,
};
-/**
+/*
* struct wil_ctx - software context for ring descriptor
*/
struct wil_ctx {
@@ -514,7 +513,7 @@ struct wil_desc_ring_rx_swtail { /* relevant for enhanced DMA only */
dma_addr_t pa;
};
-/**
+/*
* A general ring structure, used for RX and TX.
* In legacy DMA it represents the vring,
* In enahnced DMA it represents the descriptor ring (vrings are handled by FW)
@@ -531,7 +530,7 @@ struct wil_ring {
bool is_rx;
};
-/**
+/*
* Additional data for Rx ring.
* Used for enhanced DMA RX chaining.
*/
@@ -543,7 +542,7 @@ struct wil_ring_rx_data {
u16 buff_size;
};
-/**
+/*
* Status ring structure, used for enhanced DMA completions for RX and TX.
*/
struct wil_status_ring {
@@ -586,8 +585,8 @@ struct wil_net_stats {
u32 ft_roams; /* relevant in STA mode */
};
-/**
- * struct tx_rx_ops - different TX/RX ops for legacy and enhanced
+/*
+ * struct wil_txrx_ops - different TX/RX ops for legacy and enhanced
* DMA flow
*/
struct wil_txrx_ops {
@@ -627,7 +626,7 @@ struct wil_txrx_ops {
irqreturn_t (*irq_rx)(int irq, void *cookie);
};
-/**
+/*
* Additional data for Tx ring
*/
struct wil_ring_tx_data {
@@ -658,7 +657,7 @@ enum { /* for wil6210_priv.status */
struct pci_dev;
/**
- * struct tid_ampdu_rx - TID aggregation information (Rx).
+ * struct wil_tid_ampdu_rx - TID aggregation information (Rx).
*
* @reorder_buf: buffer to reorder incoming aggregated MPDUs
* @last_rx: jiffies of last rx activity
@@ -728,7 +727,7 @@ enum wil_rekey_state {
WIL_REKEY_WAIT_M4_SENT = 2,
};
-/**
+/*
* struct wil_sta_info - data for peer
*
* Peer identified by its CID (connection ID)
@@ -741,7 +740,7 @@ struct wil_sta_info {
u8 mid;
enum wil_sta_status status;
struct wil_net_stats stats;
- /**
+ /*
* 20 latency bins. 1st bin counts packets with latency
* of 0..tx_latency_res, last bin counts packets with latency
* of 19*tx_latency_res and above.
@@ -882,7 +881,7 @@ struct wil6210_vif {
struct work_struct enable_tx_key_worker;
};
-/**
+/*
* RX buffer allocated for enhanced DMA RX descriptors
*/
struct wil_rx_buff {
@@ -891,7 +890,7 @@ struct wil_rx_buff {
int id;
};
-/**
+/*
* During Rx completion processing, the driver extracts a buffer ID which
* is used as an index to the rx_buff_mgmt.buff_arr array and then the SKB
* is given to the network stack and the buffer is moved from the 'active'
@@ -1147,7 +1146,7 @@ static inline void wil_c(struct wil6210_priv *wil, u32 reg, u32 val)
wil_w(wil, reg, wil_r(wil, reg) & ~val);
}
-/**
+/*
* wil_cid_valid - check cid is valid
*/
static inline bool wil_cid_valid(struct wil6210_priv *wil, int cid)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index bb96b87b2a6e..61f7e620cab3 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -932,7 +932,7 @@ static struct wireless_dev *brcmf_mon_add_vif(struct wiphy *wiphy,
ndev->type = ARPHRD_IEEE80211_RADIOTAP;
ndev->ieee80211_ptr = &vif->wdev;
ndev->needs_free_netdev = true;
- ndev->priv_destructor = brcmf_cfg80211_free_netdev;
+ ndev->priv_destructor = brcmf_cfg80211_free_vif;
SET_NETDEV_DEV(ndev, wiphy_dev(cfg->wiphy));
ifp = netdev_priv(ndev);
@@ -6082,7 +6082,7 @@ void brcmf_free_vif(struct brcmf_cfg80211_vif *vif)
kfree(vif);
}
-void brcmf_cfg80211_free_netdev(struct net_device *ndev)
+void brcmf_cfg80211_free_vif(struct net_device *ndev)
{
struct brcmf_cfg80211_vif *vif;
struct brcmf_if *ifp;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index 273c80f2d483..6ceb30142905 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -182,7 +182,7 @@ struct brcmf_cfg80211_profile {
* @BRCMF_VIF_STATUS_CONNECTED: connected/joined successfully.
* @BRCMF_VIF_STATUS_DISCONNECTING: disconnect/disable in progress.
* @BRCMF_VIF_STATUS_AP_CREATED: AP operation started.
- * @BRCMF_VIF_STATUS_EAP_SUCCUSS: EAPOL handshake successful.
+ * @BRCMF_VIF_STATUS_EAP_SUCCESS: EAPOL handshake successful.
* @BRCMF_VIF_STATUS_ASSOC_SUCCESS: successful SET_SSID received.
*/
enum brcmf_vif_status {
@@ -201,10 +201,12 @@ enum brcmf_vif_status {
* @probe_req_ie: IE info for probe request.
* @probe_res_ie: IE info for probe response.
* @beacon_ie: IE info for beacon frame.
+ * @assoc_req_ie: IE info for association request frame.
* @assoc_res_ie: IE info for association response frame.
* @probe_req_ie_len: IE info length for probe request.
* @probe_res_ie_len: IE info length for probe response.
* @beacon_ie_len: IE info length for beacon frame.
+ * @assoc_req_ie_len: IE info length for association request frame.
* @assoc_res_ie_len: IE info length for association response frame.
*/
struct vif_saved_ie {
@@ -227,12 +229,14 @@ struct vif_saved_ie {
* @wdev: wireless device.
* @profile: profile information.
* @sme_state: SME state using enum brcmf_vif_status bits.
+ * @saved_ie: saved IE info for a vif.
* @list: linked list.
* @mgmt_tx: completion for management frame transmit.
* @mgmt_tx_status: status of last management frame sent to firmware.
* @mgmt_tx_id:
* @mgmt_rx_reg: registered rx mgmt frame types.
* @mbss: Multiple BSS type, set if not first AP (not relevant for P2P).
+ * @is_11d: beacon contains country IE, enable regulatory 802.11d support
* @cqm_rssi_low: Lower RSSI limit for CQM monitoring
* @cqm_rssi_high: Upper RSSI limit for CQM monitoring
* @cqm_rssi_last: Last RSSI reading for CQM monitoring
@@ -489,7 +493,7 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
void brcmf_set_mpc(struct brcmf_if *ndev, int mpc);
bool brcmf_is_apmode_operating(struct wiphy *wiphy);
void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg);
-void brcmf_cfg80211_free_netdev(struct net_device *ndev);
+void brcmf_cfg80211_free_vif(struct net_device *ndev);
int brcmf_set_wsec(struct brcmf_if *ifp, const u8 *key, u16 key_len, u16 flags);
int brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 862a0336a0b5..616885d6db3f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -674,7 +674,7 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool locked)
netif_carrier_off(ndev);
- ndev->priv_destructor = brcmf_cfg80211_free_netdev;
+ ndev->priv_destructor = brcmf_cfg80211_free_vif;
brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
return 0;
@@ -692,7 +692,7 @@ void brcmf_net_detach(struct net_device *ndev, bool locked)
else
unregister_netdev(ndev);
} else {
- brcmf_cfg80211_free_netdev(ndev);
+ brcmf_cfg80211_free_vif(ndev);
free_netdev(ndev);
}
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
index b4bba67a45ec..5258681218ea 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -4790,7 +4790,7 @@ void wlc_phy_init_lcnphy(struct brcms_phy *pi)
wlc_lcnphy_calib_modes(pi, PHY_PERICAL_PHYINIT);
}
-static bool wlc_phy_txpwr_srom_read_lcnphy(struct brcms_phy *pi)
+static void wlc_phy_txpwr_srom_read_lcnphy(struct brcms_phy *pi)
{
s8 txpwr = 0;
int i;
@@ -4879,8 +4879,6 @@ static bool wlc_phy_txpwr_srom_read_lcnphy(struct brcms_phy *pi)
sprom->ant_available_bg);
}
pi_lcn->lcnphy_cck_dig_filt_type = -1;
-
- return true;
}
void wlc_2064_vco_cal(struct brcms_phy *pi)
@@ -4992,10 +4990,7 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi)
pi->pi_fptr.radioloftget = wlc_lcnphy_get_radio_loft;
pi->pi_fptr.detach = wlc_phy_detach_lcnphy;
- if (!wlc_phy_txpwr_srom_read_lcnphy(pi)) {
- kfree(pi->u.pi_lcnphy);
- return false;
- }
+ wlc_phy_txpwr_srom_read_lcnphy(pi);
if (LCNREV_IS(pi->pubpi.phy_rev, 1)) {
if (pi_lcn->lcnphy_tempsense_option == 3) {
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index 104748fcdc33..54991f31c52c 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -3224,7 +3224,9 @@ il3945_store_measurement(struct device *d, struct device_attribute *attr,
D_INFO("Invoking measurement of type %d on " "channel %d (for '%s')\n",
type, params.channel, buf);
+ mutex_lock(&il->mutex);
il3945_get_measurement(il, &params, type);
+ mutex_unlock(&il->mutex);
return count;
}
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-rs.c b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
index 1826c37c090c..ecc6c8d2a4c5 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
@@ -448,11 +448,6 @@ il3945_rs_tx_status(void *il_rate, struct ieee80211_supported_band *sband,
return;
}
- if (!il_sta) {
- D_RATE("leave: No STA il data to update!\n");
- return;
- }
-
/* Treat uninitialized rate scaling data same as non-existing. */
if (!rs_sta->il) {
D_RATE("leave: STA il data uninitialized!\n");
@@ -627,7 +622,7 @@ il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
D_RATE("enter\n");
/* Treat uninitialized rate scaling data same as non-existing. */
- if (rs_sta && !rs_sta->il) {
+ if (!rs_sta->il) {
D_RATE("Rate scaling information not initialized yet.\n");
il_sta = NULL;
}
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index 3588dec75ebd..57fa866efd9f 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -4606,7 +4606,9 @@ il4965_store_tx_power(struct device *d, struct device_attribute *attr,
if (ret)
IL_INFO("%s is not in decimal form.\n", buf);
else {
+ mutex_lock(&il->mutex);
ret = il_set_tx_power(il, val, false);
+ mutex_unlock(&il->mutex);
if (ret)
IL_ERR("failed setting tx power (0x%08x).\n", ret);
else
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
index d25445bd1e5c..77db8c75e6e2 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
@@ -19,12 +19,6 @@
#define IWL_BZ_SMEM_OFFSET 0x400000
#define IWL_BZ_SMEM_LEN 0xD0000
-#define IWL_BZ_A_FM_B_FW_PRE "iwlwifi-bz-a0-fm-b0"
-#define IWL_BZ_A_FM_C_FW_PRE "iwlwifi-bz-a0-fm-c0"
-#define IWL_BZ_A_FM4_B_FW_PRE "iwlwifi-bz-a0-fm4-b0"
-#define IWL_GL_B_FM_B_FW_PRE "iwlwifi-gl-b0-fm-b0"
-#define IWL_GL_C_FM_C_FW_PRE "iwlwifi-gl-c0-fm-c0"
-
static const struct iwl_family_base_params iwl_bz_base = {
.num_of_queues = 512,
.max_tfd_queue_size = 65536,
@@ -100,9 +94,3 @@ const struct iwl_mac_cfg iwl_gl_mac_cfg = {
.xtal_latency = 12000,
.low_latency_xtal = true,
};
-
-IWL_CORE_FW(IWL_BZ_A_FM_B_FW_PRE, IWL_BZ_UCODE_CORE_MAX);
-IWL_CORE_FW(IWL_BZ_A_FM_C_FW_PRE, IWL_BZ_UCODE_CORE_MAX);
-IWL_CORE_FW(IWL_BZ_A_FM4_B_FW_PRE, IWL_BZ_UCODE_CORE_MAX);
-IWL_CORE_FW(IWL_GL_B_FM_B_FW_PRE, IWL_BZ_UCODE_CORE_MAX);
-IWL_CORE_FW(IWL_GL_C_FM_C_FW_PRE, IWL_BZ_UCODE_CORE_MAX);
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/rf-fm.c b/drivers/net/wireless/intel/iwlwifi/cfg/rf-fm.c
index fd82050e33a3..ad2536f53084 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/rf-fm.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/rf-fm.c
@@ -5,6 +5,12 @@
*/
#include "iwl-config.h"
+#define IWL_BZ_A_FM_B_FW_PRE "iwlwifi-bz-a0-fm-b0"
+#define IWL_BZ_A_FM_C_FW_PRE "iwlwifi-bz-a0-fm-c0"
+#define IWL_BZ_A_FM4_B_FW_PRE "iwlwifi-bz-a0-fm4-b0"
+#define IWL_GL_B_FM_B_FW_PRE "iwlwifi-gl-b0-fm-b0"
+#define IWL_GL_C_FM_C_FW_PRE "iwlwifi-gl-c0-fm-c0"
+
/* NVM versions */
#define IWL_FM_NVM_VERSION 0x0a1d
@@ -50,3 +56,9 @@ const char iwl_be201_name[] = "Intel(R) Wi-Fi 7 BE201 320MHz";
const char iwl_be200_name[] = "Intel(R) Wi-Fi 7 BE200 320MHz";
const char iwl_be202_name[] = "Intel(R) Wi-Fi 7 BE202 160MHz";
const char iwl_be401_name[] = "Intel(R) Wi-Fi 7 BE401 320MHz";
+
+IWL_CORE_FW(IWL_BZ_A_FM_B_FW_PRE, IWL_BZ_UCODE_CORE_MAX);
+IWL_CORE_FW(IWL_BZ_A_FM_C_FW_PRE, IWL_BZ_UCODE_CORE_MAX);
+IWL_CORE_FW(IWL_BZ_A_FM4_B_FW_PRE, IWL_BZ_UCODE_CORE_MAX);
+IWL_CORE_FW(IWL_GL_B_FM_B_FW_PRE, IWL_BZ_UCODE_CORE_MAX);
+IWL_CORE_FW(IWL_GL_C_FM_C_FW_PRE, IWL_BZ_UCODE_CORE_MAX);
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/rf-pe.c b/drivers/net/wireless/intel/iwlwifi/cfg/rf-pe.c
index 408b9850bd10..2c29054ce7b8 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/rf-pe.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/rf-pe.c
@@ -13,5 +13,4 @@ const char iwl_killer_bn1850i_name[] =
const char iwl_bn201_name[] = "Intel(R) Wi-Fi 8 BN201";
const char iwl_bn203_name[] = "Intel(R) Wi-Fi 8 BN203";
-const char iwl_be221_name[] = "Intel(R) Wi-Fi 7 BE221";
const char iwl_be223_name[] = "Intel(R) Wi-Fi 7 BE223";
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index 52edc19d8cdd..de9aef0d924c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -28,6 +28,8 @@ static const size_t acpi_dsm_size[DSM_FUNC_NUM_FUNCS] = {
[DSM_FUNC_ENERGY_DETECTION_THRESHOLD] = sizeof(u32),
[DSM_FUNC_RFI_CONFIG] = sizeof(u32),
[DSM_FUNC_ENABLE_11BE] = sizeof(u32),
+ [DSM_FUNC_ENABLE_UNII_9] = sizeof(u32),
+ [DSM_FUNC_ENABLE_11BN] = sizeof(u32),
};
static int iwl_acpi_get_handle(struct device *dev, acpi_string method,
@@ -156,61 +158,104 @@ out:
}
/*
- * This function receives a DSM function number, calculates its expected size
- * according to Intel BIOS spec, and fills in the value in a 32-bit field.
+ * This function loads all the DSM functions, it checks the size and populates
+ * the cache with the values in a 32-bit field.
* In case the expected size is smaller than 32-bit, padding will be added.
*/
-int iwl_acpi_get_dsm(struct iwl_fw_runtime *fwrt,
- enum iwl_dsm_funcs func, u32 *value)
+static int iwl_acpi_load_dsm_values(struct iwl_fw_runtime *fwrt)
{
- size_t expected_size;
- u64 tmp;
+ u64 query_func_val;
int ret;
BUILD_BUG_ON(ARRAY_SIZE(acpi_dsm_size) != DSM_FUNC_NUM_FUNCS);
- if (WARN_ON(func >= ARRAY_SIZE(acpi_dsm_size) || !func))
- return -EINVAL;
+ ret = iwl_acpi_get_dsm_integer(fwrt->dev, ACPI_DSM_REV,
+ DSM_FUNC_QUERY,
+ &iwl_guid, &query_func_val,
+ acpi_dsm_size[DSM_FUNC_QUERY]);
- expected_size = acpi_dsm_size[func];
+ if (ret) {
+ IWL_DEBUG_RADIO(fwrt, "ACPI QUERY FUNC not valid: %d\n", ret);
+ return ret;
+ }
- /* Currently all ACPI DSMs are either 8-bit or 32-bit */
- if (expected_size != sizeof(u8) && expected_size != sizeof(u32))
- return -EOPNOTSUPP;
+ fwrt->dsm_revision = ACPI_DSM_REV;
+ fwrt->dsm_source = BIOS_SOURCE_ACPI;
- if (!fwrt->acpi_dsm_funcs_valid) {
- ret = iwl_acpi_get_dsm_integer(fwrt->dev, ACPI_DSM_REV,
- DSM_FUNC_QUERY,
- &iwl_guid, &tmp,
- acpi_dsm_size[DSM_FUNC_QUERY]);
- if (ret) {
- /* always indicate BIT(0) to avoid re-reading */
- fwrt->acpi_dsm_funcs_valid = BIT(0);
- return ret;
+ IWL_DEBUG_RADIO(fwrt, "ACPI DSM validity bitmap 0x%x\n",
+ (u32)query_func_val);
+
+ /* DSM_FUNC_QUERY is 0, start from 1 */
+ for (int func = 1; func < ARRAY_SIZE(fwrt->dsm_values); func++) {
+ size_t expected_size = acpi_dsm_size[func];
+ u64 tmp;
+
+ if (!(query_func_val & BIT(func))) {
+ IWL_DEBUG_RADIO(fwrt,
+ "ACPI DSM %d not indicated as valid\n",
+ func);
+ continue;
}
- IWL_DEBUG_RADIO(fwrt, "ACPI DSM validity bitmap 0x%x\n",
- (u32)tmp);
- /* always indicate BIT(0) to avoid re-reading */
- fwrt->acpi_dsm_funcs_valid = tmp | BIT(0);
+ /* This is an invalid function (5 for example) */
+ if (!expected_size)
+ continue;
+
+ /* Currently all ACPI DSMs are either 8-bit or 32-bit */
+ if (expected_size != sizeof(u8) && expected_size != sizeof(u32))
+ continue;
+
+ ret = iwl_acpi_get_dsm_integer(fwrt->dev, ACPI_DSM_REV, func,
+ &iwl_guid, &tmp, expected_size);
+ if (ret)
+ continue;
+
+ if ((expected_size == sizeof(u8) && tmp != (u8)tmp) ||
+ (expected_size == sizeof(u32) && tmp != (u32)tmp))
+ IWL_DEBUG_RADIO(fwrt,
+ "DSM value overflows the expected size, truncating\n");
+ fwrt->dsm_values[func] = (u32)tmp;
+ fwrt->dsm_funcs_valid |= BIT(func);
+ }
+
+ return 0;
+}
+
+/*
+ * This function receives a DSM function number, calculates its expected size
+ * according to Intel BIOS spec, and fills in the value in a 32-bit field.
+ * In case the expected size is smaller than 32-bit, padding will be added.
+ */
+int iwl_acpi_get_dsm(struct iwl_fw_runtime *fwrt,
+ enum iwl_dsm_funcs func, u32 *value)
+{
+ if (!fwrt->dsm_funcs_valid) {
+ int ret = iwl_acpi_load_dsm_values(fwrt);
+
+ /*
+ * Always set the valid bit for DSM_FUNC_QUERY so that even if
+ * DSM_FUNC_QUERY returns 0 (no DSM function is valid), we will
+ * still consider the cache as valid.
+ */
+ fwrt->dsm_funcs_valid |= BIT(DSM_FUNC_QUERY);
+
+ if (ret)
+ return ret;
}
- if (!(fwrt->acpi_dsm_funcs_valid & BIT(func))) {
+ BUILD_BUG_ON(ARRAY_SIZE(fwrt->dsm_values) != DSM_FUNC_NUM_FUNCS);
+ BUILD_BUG_ON(BITS_PER_TYPE(fwrt->dsm_funcs_valid) < DSM_FUNC_NUM_FUNCS);
+
+ if (WARN_ON(func >= ARRAY_SIZE(fwrt->dsm_values) || !func))
+ return -EINVAL;
+
+ if (!(fwrt->dsm_funcs_valid & BIT(func))) {
IWL_DEBUG_RADIO(fwrt, "ACPI DSM %d not indicated as valid\n",
func);
return -ENODATA;
}
- ret = iwl_acpi_get_dsm_integer(fwrt->dev, ACPI_DSM_REV, func,
- &iwl_guid, &tmp, expected_size);
- if (ret)
- return ret;
-
- if ((expected_size == sizeof(u8) && tmp != (u8)tmp) ||
- (expected_size == sizeof(u32) && tmp != (u32)tmp))
- IWL_DEBUG_RADIO(fwrt,
- "DSM value overflows the expected size, truncating\n");
- *value = (u32)tmp;
+ *value = fwrt->dsm_values[func];
return 0;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
index f76cea6e9ec8..c7a833f8041a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
@@ -66,6 +66,18 @@ enum iwl_mac_conf_subcmd_ids {
*/
TWT_OPERATION_CMD = 0x10,
/**
+ * @NAN_CFG_CMD: &struct iwl_nan_config_cmd
+ */
+ NAN_CFG_CMD = 0x12,
+ /**
+ * @NAN_DW_END_NOTIF: &struct iwl_nan_dw_end_notif
+ */
+ NAN_DW_END_NOTIF = 0xf4,
+ /**
+ * @NAN_JOINED_CLUSTER_NOTIF: &struct iwl_nan_cluster_notif
+ */
+ NAN_JOINED_CLUSTER_NOTIF = 0xf5,
+ /**
* @MISSED_BEACONS_NOTIF: &struct iwl_missed_beacons_notif
*/
MISSED_BEACONS_NOTIF = 0xF6,
@@ -492,22 +504,36 @@ enum iwl_link_modify_bandwidth {
};
/**
+ * enum iwl_npca_flags - NPCA flags
+ * @IWL_NPCA_FLAG_MAC_HDR_BASED: MAC header based NPCA operation
+ * permitted in the BSS (MOPLEN)
+ */
+enum iwl_npca_flags {
+ IWL_NPCA_FLAG_MAC_HDR_BASED = BIT(0),
+}; /* NPCA_FLAG_E */
+
+/**
* struct iwl_npca_params - NPCA parameters (non-primary channel access)
*
+ * @dis_subch_bmap: disabled subchannel bitmap for NPCA
* @switch_delay: after switch, delay TX according to destination AP
* @switch_back_delay: switch back to control channel before OBSS frame end
+ * @initial_qsrc: Indicates the value that is used to initialize the
+ * EDCAF QSRC[AC] variables
* @min_dur_threshold: minimum PPDU time to switch to the non-primary
- * NPCA channel
- * @flags: NPCA flags - bit 0: puncturing allowed, bit 1: new TX allowed
+ * NPCA channel (usec)
+ * @flags: NPCA flags, see &enum iwl_npca_flags
* @reserved: reserved for alignment purposes
*/
struct iwl_npca_params {
+ __le16 dis_subch_bmap;
u8 switch_delay;
u8 switch_back_delay;
- __le16 min_dur_threshold;
- __le16 flags;
- __le16 reserved;
-} __packed; /* NPCA_PARAM_API_S_VER_1 */
+ u8 initial_qsrc;
+ u8 min_dur_threshold;
+ u8 flags;
+ u8 reserved;
+} __packed; /* NPCA_PARAM_API_S_VER_2 */
/**
* struct iwl_link_config_cmd - command structure to configure the LINK context
@@ -618,7 +644,8 @@ struct iwl_link_config_cmd {
struct iwl_npca_params npca_params; /* since _VER_7 */
struct iwl_ac_qos prio_edca_params; /* since _VER_7 */
__le32 reserved3[4];
-} __packed; /* LINK_CONTEXT_CONFIG_CMD_API_S_VER_1, _VER_2, _VER_3, _VER_4, _VER_5, _VER_6, _VER_7 */
+} __packed; /* LINK_CONTEXT_CONFIG_CMD_API_S_VER_1, _VER_2, _VER_3, _VER_4,
+ * _VER_5, _VER_6, _VER_7, _VER_8 */
/* Currently FW supports link ids in the range 0-3 and can have
* at most two active links for each vif.
@@ -990,4 +1017,122 @@ struct iwl_twt_operation_cmd {
u8 ul_tid_bitmap;
} __packed; /* TWT_OPERATION_API_S_VER_1 */
+enum iwl_nan_band {
+ IWL_NAN_BAND_5GHZ = 0,
+ IWL_NAN_BAND_2GHZ = 1,
+ IWL_NUM_NAN_BANDS,
+};
+
+/**
+ * struct iwl_nan_band_config - NAN band configuration
+ *
+ * @rssi_close: RSSI threshold for close proximity in dBm
+ * @rssi_middle: RSSI threshold for middle proximity in dBm
+ * @dw_interval: Discovery Window (DW) interval for synchronization beacons and
+ * SDFs. Valid values of DW interval are: 1, 2, 3, 4 and 5 corresponding to
+ * 1, 2, 4, 8, and 16 DWs.
+ * @reserved: reserved
+ */
+struct iwl_nan_band_config {
+ u8 rssi_close;
+ u8 rssi_middle;
+ u8 dw_interval;
+ u8 reserved;
+}; /* NAN_BAND_SPECIFIC_CONFIG_API_S_VER_1 */
+
+/**
+ * enum iwl_nan_flags - flags for NAN configuration
+ *
+ * @IWL_NAN_FLAG_DW_END_NOTIF_ENABLED: indicates that the host wants to receive
+ * notifications when a DW ends.
+ */
+enum iwl_nan_flags {
+ IWL_NAN_FLAG_DW_END_NOTIF_ENABLED = BIT(0),
+};
+
+/**
+ * struct iwl_nan_config_cmd - NAN configuration command
+ *
+ * @action: action to perform, see &enum iwl_ctxt_action
+ * @nmi_addr: NAN Management Interface (NMI) address
+ * @reserved_for_nmi_addr: reserved
+ * @discovery_beacon_interval: discovery beacon interval in TUs
+ * @cluster_id: lower last two bytes of the cluster ID, in case the local
+ * device starts a cluster
+ * @sta_id: station ID of the NAN station
+ * @hb_channel: channel for 5 GHz if the device supports operation on 5 GHz.
+ * Valid values are 44 and 149, which correspond to the 5 GHz channel, and
+ * 0 which means that NAN operation on the 5 GHz band is disabled.
+ * @master_pref: master preference
+ * @dwell_time: dwell time on the discovery channel during scan (milliseconds).
+ * If set to 0, the dwell time is determined by the firmware.
+ * @scan_period: scan period in seconds. If set to 0, the scan period is
+ * determined by the firmware.
+ * @flags: flags for NAN configuration, see &enum iwl_nan_flags
+ * @band_config: band configuration for NAN, one for each band
+ * @nan_attr_len: length of the NAN attributes to be added to the beacon (bytes)
+ * @nan_vendor_elems_len: length of the NAN vendor elements to be added to the
+ * beacon (bytes)
+ * @beacon_data: variable length data that contains the NAN attributes
+ * (&nan_attr_len) followed by the NAN vendor elements
+ * (&nan_vendor_elems_len).
+ */
+struct iwl_nan_config_cmd {
+ __le32 action;
+ u8 nmi_addr[6];
+ __le16 reserved_for_nmi_addr;
+ __le32 discovery_beacon_interval;
+
+ u8 cluster_id[2];
+ u8 sta_id;
+ u8 hb_channel;
+
+ u8 master_pref;
+ u8 dwell_time;
+ u8 scan_period;
+ u8 flags;
+
+ struct iwl_nan_band_config band_config[IWL_NUM_NAN_BANDS];
+
+ __le32 nan_attr_len;
+ __le32 nan_vendor_elems_len;
+ u8 beacon_data[];
+} __packed; /* NAN_CONFIG_CMD_API_S_VER_1 */
+
+/**
+ * enum iwl_nan_cluster_notif_flags - flags for the cluster notification
+ *
+ * @IWL_NAN_CLUSTER_NOTIF_FLAG_NEW_CLUSTER: indicates that the device has
+ * started a new cluster. If not set, the device has joined an existing
+ * cluster.
+ */
+enum iwl_nan_cluster_notif_flags {
+ IWL_NAN_CLUSTER_NOTIF_FLAG_NEW_CLUSTER = BIT(0),
+}; /* NAN_JOINED_CLUSTER_FLAG_E_VER_1 */
+
+/**
+ * struct iwl_nan_cluster_notif - event sent when the device starts or joins a
+ * NAN cluster.
+ *
+ * @cluster_id: the last two bytes of the cluster ID
+ * @flags: combination of &enum iwl_nan_cluster_notif_flags
+ * @reserved: reserved
+ */
+struct iwl_nan_cluster_notif {
+ u8 cluster_id[2];
+ u8 flags;
+ u8 reserved;
+}; /* NAN_JOINED_CLUSTER_NTF_API_S_VER_1 */
+
+/**
+ * struct iwl_nan_dw_end_notif - sent to notify the host the end of a DW.
+ *
+ * @band: band on which the DW ended. See &enum iwl_nan_band.
+ * @reserved: reserved
+ */
+struct iwl_nan_dw_end_notif {
+ u8 band;
+ u8 reserved[3];
+} __packed; /* NAN_DW_END_NTF_API_S_VER_1 */
+
#endif /* __iwl_fw_api_mac_cfg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index 4644fc1aa1ec..bd6bf931866f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -503,18 +503,26 @@ enum bios_source {
};
/**
- * struct bios_value_u32 - BIOS configuration.
+ * struct iwl_bios_config_hdr - BIOS configuration header
* @table_source: see &enum bios_source
* @table_revision: table revision.
* @reserved: reserved
- * @value: value in bios.
*/
-struct bios_value_u32 {
+struct iwl_bios_config_hdr {
u8 table_source;
u8 table_revision;
u8 reserved[2];
+} __packed; /* BIOS_CONFIG_HDR_API_S_VER_1 */
+
+/**
+ * struct bios_value_u32 - BIOS configuration.
+ * @hdr: bios config header
+ * @value: value in bios.
+ */
+struct bios_value_u32 {
+ struct iwl_bios_config_hdr hdr;
__le32 value;
-} __packed; /* BIOS_TABLE_SOURCE_U32_S_VER_1 */
+} __packed; /* BIOS_CONFIG_DATA_U32_API_S_VER_1 */
/**
* struct iwl_tas_config_cmd - configures the TAS.
@@ -650,6 +658,10 @@ struct iwl_lari_config_change_cmd_v8 {
* bit0: enable 11be in China(CB/CN).
* bit1: enable 11be in South Korea.
* bit 2 - 31: reserved.
+ * @oem_11bn_allow_bitmap: Bitmap of 11bn allowed MCCs. The firmware expects to
+ * get the data from the BIOS.
+ * @oem_unii9_enable: UNII-9 enablement as read from the BIOS
+ * @bios_hdr: bios config header
*/
struct iwl_lari_config_change_cmd {
__le32 config_bitmap;
@@ -661,8 +673,16 @@ struct iwl_lari_config_change_cmd {
__le32 edt_bitmap;
__le32 oem_320mhz_allow_bitmap;
__le32 oem_11be_allow_bitmap;
+ /* since version 13 */
+ __le32 oem_11bn_allow_bitmap;
+ /* since version 13 */
+ __le32 oem_unii9_enable;
+ /* since version 13 */
+ struct iwl_bios_config_hdr bios_hdr;
} __packed;
-/* LARI_CHANGE_CONF_CMD_S_VER_12 */
+/* LARI_CHANGE_CONF_CMD_S_VER_12
+ * LARI_CHANGE_CONF_CMD_S_VER_13
+ */
/* Activate UNII-1 (5.2GHz) for World Wide */
#define ACTIVATE_5G2_IN_WW_MASK BIT(4)
@@ -682,11 +702,11 @@ struct iwl_pnvm_init_complete_ntfy {
/**
* struct iwl_mcc_allowed_ap_type_cmd - struct for MCC_ALLOWED_AP_TYPE_CMD
- * @offset_map: mapping a mcc to UHB AP type support (UATS) allowed
+ * @mcc_to_ap_type_map: mapping an MCC to 6 GHz AP type support (UATS)
* @reserved: reserved
*/
struct iwl_mcc_allowed_ap_type_cmd {
- u8 offset_map[UATS_TABLE_ROW_SIZE][UATS_TABLE_COL_SIZE];
+ u8 mcc_to_ap_type_map[UATS_TABLE_ROW_SIZE][UATS_TABLE_COL_SIZE];
__le16 reserved;
} __packed; /* MCC_ALLOWED_AP_TYPE_CMD_API_S_VER_1 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
index 535864e22626..0cd8a12e0f7c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
@@ -766,7 +766,7 @@ enum iwl_6ghz_ap_type {
* AP_TX_POWER_CONSTRAINTS_CMD
* Used for VLP/LPI/AFC Access Point power constraints for 6GHz channels
* @link_id: linkId
- * @ap_type: see &enum iwl_ap_type
+ * @ap_type: see &enum iwl_6ghz_ap_type
* @eirp_pwr: 8-bit 2s complement signed integer in the range
* -64 dBm to 63 dBm with a 0.5 dB step
* default &DEFAULT_TPE_TX_POWER (no maximum limit)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
index 9c464e7aba10..ae6be3ed32f8 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
@@ -73,6 +73,7 @@ enum iwl_tlc_mng_cfg_chains {
* @IWL_TLC_MNG_MODE_VHT: enable VHT
* @IWL_TLC_MNG_MODE_HE: enable HE
* @IWL_TLC_MNG_MODE_EHT: enable EHT
+ * @IWL_TLC_MNG_MODE_UHR: enable UHR
*/
enum iwl_tlc_mng_cfg_mode {
IWL_TLC_MNG_MODE_CCK = 0,
@@ -82,6 +83,7 @@ enum iwl_tlc_mng_cfg_mode {
IWL_TLC_MNG_MODE_VHT,
IWL_TLC_MNG_MODE_HE,
IWL_TLC_MNG_MODE_EHT,
+ IWL_TLC_MNG_MODE_UHR,
};
/**
@@ -205,7 +207,7 @@ struct iwl_tlc_config_cmd_v4 {
} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_4 */
/**
- * struct iwl_tlc_config_cmd - TLC configuration
+ * struct iwl_tlc_config_cmd_v5 - TLC configuration
* @sta_id: station id
* @reserved1: reserved
* @max_ch_width: max supported channel width from &enum iwl_tlc_mng_cfg_cw
@@ -221,7 +223,7 @@ struct iwl_tlc_config_cmd_v4 {
* @max_tx_op: max TXOP in uSecs for all AC (BK, BE, VO, VI),
* set zero for no limit.
*/
-struct iwl_tlc_config_cmd {
+struct iwl_tlc_config_cmd_v5 {
u8 sta_id;
u8 reserved1[3];
u8 max_ch_width;
@@ -236,6 +238,38 @@ struct iwl_tlc_config_cmd {
} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_5 */
/**
+ * struct iwl_tlc_config_cmd - TLC configuration
+ * @sta_mask: station mask (in NAN we can have multiple logical stations of
+ * the same peer (with the same TLC configuration)).
+ * @phy_id: the phy id to used for this TLC configuration
+ * @max_ch_width: max supported channel width from &enum iwl_tlc_mng_cfg_cw
+ * @mode: &enum iwl_tlc_mng_cfg_mode
+ * @chains: bitmask of &enum iwl_tlc_mng_cfg_chains
+ * @sgi_ch_width_supp: bitmap of SGI support per channel width
+ * use BIT(&enum iwl_tlc_mng_cfg_cw)
+ * @flags: bitmask of &enum iwl_tlc_mng_cfg_flags
+ * @non_ht_rates: bitmap of supported legacy rates
+ * @ht_rates: bitmap of &enum iwl_tlc_mng_ht_rates, per <nss, channel-width>
+ * pair (0 - 80mhz width and below, 1 - 160mhz, 2 - 320mhz).
+ * @max_mpdu_len: max MPDU length, in bytes
+ * @max_tx_op: max TXOP in uSecs for all AC (BK, BE, VO, VI),
+ * set zero for no limit.
+ */
+struct iwl_tlc_config_cmd {
+ __le32 sta_mask;
+ __le32 phy_id;
+ u8 max_ch_width;
+ u8 mode;
+ u8 chains;
+ u8 sgi_ch_width_supp;
+ __le16 flags;
+ __le16 non_ht_rates;
+ __le32 ht_rates[IWL_TLC_NSS_MAX][IWL_TLC_MCS_PER_BW_NUM_V4];
+ __le16 max_mpdu_len;
+ __le16 max_tx_op;
+} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_6 */
+
+/**
* enum iwl_tlc_update_flags - updated fields
* @IWL_TLC_NOTIF_FLAG_RATE: last initial rate update
* @IWL_TLC_NOTIF_FLAG_AMSDU: umsdu parameters update
@@ -706,10 +740,11 @@ enum {
#define RATE_MCS_HE_SU_4_LTF 3
#define RATE_MCS_HE_SU_4_LTF_08_GI 4
-/* Bit 24-23: HE type. (0) SU, (1) SU_EXT, (2) MU, (3) trigger based */
+/* Bit 24-23: HE type. (0) SU, (1) HE SU_EXT/UHR ELR, (2) MU, (3) trigger based */
#define RATE_MCS_HE_TYPE_POS 23
#define RATE_MCS_HE_TYPE_SU (0 << RATE_MCS_HE_TYPE_POS)
#define RATE_MCS_HE_TYPE_EXT_SU (1 << RATE_MCS_HE_TYPE_POS)
+#define RATE_MCS_HE_TYPE_UHR_ELR (1 << RATE_MCS_HE_TYPE_POS)
#define RATE_MCS_HE_TYPE_MU (2 << RATE_MCS_HE_TYPE_POS)
#define RATE_MCS_HE_TYPE_TRIG (3 << RATE_MCS_HE_TYPE_POS)
#define RATE_MCS_HE_TYPE_MSK (3 << RATE_MCS_HE_TYPE_POS)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
index 3ed7e0807b90..ac6c1ef2cbcd 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
@@ -1062,13 +1062,37 @@ struct iwl_vht_sigs {
#define OFDM_RX_FRAME_VHT_NUM_OF_DATA_SYM 0x000007ff
#define OFDM_RX_FRAME_VHT_NUM_OF_DATA_SYM_VALID 0x80000000
__le32 a0;
- __le32 a1, a2;
+#define OFDM_RX_FRAME_VHT_BANDWIDTH 0x00000003
+#define OFDM_RX_FRAME_VHT_STBC 0x00000008
+#define OFDM_RX_FRAME_VHT_GRP_ID 0x000003f0
+#define OFDM_RX_FRAME_VHT_STS_USER0 0x00001c00
+#define OFDM_RX_FRAME_VHT_MU_STS_USER1 0x0000e000
+#define OFDM_RX_FRAME_VHT_MU_STS_USER2 0x00070000
+#define OFDM_RX_FRAME_VHT_MU_STS_USER3 0x00380000
+#define OFDM_RX_FRAME_VHT_PARTIAL_AID_OR_MU_STS 0x003fe000
+#define OFDM_RX_FRAME_VHT_MU_MIMO_USER_POSITION 0x03000000
+#define OFDM_RX_FRAME_VHT_NO_STREAMS 0x04000000
+#define OFDM_RX_FRAME_VHT_STS 0x38000000
+ __le32 a1;
+#define OFDM_RX_FRAME_VHT_SHORT_GI 0x00000001
+#define OFDM_RX_FRAME_VHT_SHORT_GI_AMBIG 0x00000002
+#define OFDM_RX_FRAME_VHT_CODING 0x00000004
+#define OFDM_RX_FRAME_VHT_CODING_EXTRA_SYM 0x00000008
+#define OFDM_RX_FRAME_VHT_MCS_OR_MU_CODING 0x000000f0
+#define OFDM_RX_FRAME_VHT_BF_OR_MU_RESERVED 0x00000100
+#define OFDM_RX_FRAME_VHT_CRC 0x0003fc00
+#define OFDM_RX_FRAME_VHT_CRC_OK_BIT 0x00040000
+#define OFDM_RX_FRAME_VHT_CUR_USER_CODING 0x00080000
+#define OFDM_RX_FRAME_VHT_CUR_USER_STS 0x00700000
+ __le32 a2;
};
struct iwl_he_sigs {
#define OFDM_RX_FRAME_HE_BEAM_CHANGE 0x00000001
#define OFDM_RX_FRAME_HE_UL_FLAG 0x00000002
+/* SU/ER-SU: MCS, MU: SIG-B MCS */
#define OFDM_RX_FRAME_HE_MCS 0x0000003c
+/* SU/ER-SU: DCM, MU: SIG-B DCM */
#define OFDM_RX_FRAME_HE_DCM 0x00000040
#define OFDM_RX_FRAME_HE_BSS_COLOR 0x00001f80
#define OFDM_RX_FRAME_HE_SPATIAL_REUSE 0x0001e000
@@ -1247,19 +1271,82 @@ struct iwl_eht_tb_sigs {
};
struct iwl_uhr_sigs {
- __le32 usig_a1, usig_a1_uhr, usig_a2_uhr, b1, b2;
+ /* same as EHT above */
+ __le32 usig_a1;
+#define OFDM_RX_FRAME_UHR_BSS_COLOR2 0x0000003f
+ __le32 usig_a1_uhr;
+#define OFDM_RX_FRAME_UHR_PPDU_TYPE 0x00000003
+#define OFDM_RX_FRAME_UHR_COBF_CSR_DISABLE 0x00000004
+#define OFDM_RX_FRAME_UHR_PUNC_CHANNEL 0x000000f8
+#define OFDM_RX_FRAME_UHR_USIG2_VALIDATE_B8 0x00000100
+#define OFDM_RX_FRAME_UHR_SIG_MCS 0x00000600
+#define OFDM_RX_FRAME_UHR_SIG_SYM_NUM 0x0000f800
+#define OFDM_RX_FRAME_UHR_TRIG_SPATIAL_REUSE_1 0x000f0000
+#define OFDM_RX_FRAME_UHR_TRIG_SPATIAL_REUSE_2 0x00f00000
+#define OFDM_RX_FRAME_UHR_TRIG_USIG2_DISREGARD 0x1f000000
+ __le32 usig_a2_uhr;
+#define OFDM_RX_FRAME_UHR_SPATIAL_REUSE 0x0000000f
+#define OFDM_RX_FRAME_UHR_GI_LTF_TYPE 0x00000030
+#define OFDM_RX_FRAME_UHR_NUM_OF_LTF_SYM 0x000001c0
+#define OFDM_RX_FRAME_UHR_CODING_EXTRA_SYM 0x00000200
+#define OFDM_RX_FRAME_UHR_PE_A_FACTOR 0x00000c00
+#define OFDM_RX_FRAME_UHR_PE_DISAMBIGUITY 0x00001000
+#define OFDM_RX_FRAME_UHR_IM_DISABLE 0x00002000
+#define OFDM_RX_FRAME_UHR_USIG_OVF_DISREGARD 0x0000c000
+#define OFDM_RX_FRAME_UHR_NUM_OF_USERS 0x00070000
+#define OFDM_RX_FRAME_UHR_NSTS 0x00f00000
+#define OFDM_RX_FRAME_UHR_BF 0x01000000
+#define OFDM_RX_FRAME_UHR_USIG_OVF_NDP_DISREGARD 0x06000000
+#define OFDM_RX_FRAME_UHR_COMM_CC1_CRC_OK 0x08000000
+#define OFDM_RX_FRAME_UHR_COMM_CC2_CRC_OK 0x10000000
+#define OFDM_RX_FRAME_UHR_NON_VALID_RU_ALLOC 0x20000000
+ __le32 b1;
+#define OFDM_RX_FRAME_UHR_MCS 0x000001f0
+#define OFDM_RX_FRAME_UHR_CODING 0x00000200
+#define OFDM_RX_FRAME_UHR_SPATIAL_CONFIG 0x00003c00
+#define OFDM_RX_FRAME_UHR_STA_RU 0x003fc000
+#define OFDM_RX_FRAME_UHR_STA_RU_PS160 0x00400000
+#define OFDM_RX_FRAME_UHR_UEQM 0x00800000
+#define OFDM_RX_FRAME_UHR_2XLDPC 0x01000000
+#define OFDM_RX_FRAME_UHR_UEQM_PATTERN 0x06000000
+#define OFDM_RX_FRAME_UHR_IS_MU_MIMO_USER_FIELD 0x08000000
+#define OFDM_RX_FRAME_UHR_USER_FIELD_CRC_OK 0x40000000
+ __le32 b2;
+#define OFDM_RX_UHR_NUM_OF_DATA_SYM 0x000007ff
+#define OFDM_RX_UHR_PE_DURATION 0x00003800
__le32 sig2;
+ /* same as EHT above: OFDM_RX_FRAME_EHT_RU_ALLOC_* */
__le32 cmn[6];
+#define OFDM_RX_FRAME_UHR_USER_FIELD_ID 0x000007ff
__le32 user_id;
};
struct iwl_uhr_tb_sigs {
- __le32 usig_a1, usig_a2_uhr, tb_rx0, tb_rx1;
+ /* same as UHR above */
+ __le32 usig_a1, usig_a2_uhr;
+ /* same as HE above */
+ __le32 tb_rx0, tb_rx1;
};
struct iwl_uhr_elr_sigs {
+ /* same as UHR above */
__le32 usig_a1, usig_a2_uhr;
- __le32 uhr_sig_elr1, uhr_sig_elr2;
+#define OFDM_RX_VECTOR_UHR_ELR_VER_ID 0x00000007
+#define OFDM_RX_VECTOR_UHR_ELR_UPLINK_FLAG 0x00000008
+#define OFDM_RX_VECTOR_UHR_ELR_MCS 0x00000010
+#define OFDM_RX_VECTOR_UHR_ELR_CODING 0x00000020
+#define OFDM_RX_VECTOR_UHR_ELR_LENGTH_IN_SYM 0x00007fc0
+#define OFDM_RX_VECTOR_UHR_ELR_CODING_EXTRA_SYM 0x00008000
+#define OFDM_RX_VECTOR_UHR_ELR_SIG1_CRC_OK 0x00010000
+#define OFDM_RX_VECTOR_UHR_ELR_STA_ID 0x0ffe0000
+#define OFDM_RX_VECTOR_UHR_ELR_DISREGARD 0x70000000
+ __le32 uhr_sig_elr1;
+#define OFDM_RX_VECTOR_UHR_ELR_MARK_BSS_COLOR 0x0000003f
+#define OFDM_RX_VECTOR_UHR_ELR_SIG_ID_INDX 0x00000e00
+#define OFDM_RX_VECTOR_UHR_ELR_STA_RU 0x000ff000
+#define OFDM_RX_VECTOR_UHR_ELR_STA_RU_PS160 0x00100000
+#define OFDM_RX_VECTOR_UHR_ELR_SIG2_CRC_OK 0x00200000
+ __le32 uhr_sig_elr2;
};
union iwl_sigs {
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index b9e0b69c6680..378788de1d74 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -447,6 +447,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* during assert handling even if the dump isn't split
* @IWL_UCODE_TLV_CAPA_FW_ACCEPTS_RAW_DSM_TABLE: Firmware has capability of
* handling raw DSM table data.
+ * @IWL_UCODE_TLV_CAPA_NAN_SYNC_SUPPORT: Supports NAN synchronization
*
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/
@@ -550,6 +551,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_RESET_DURING_ASSERT = (__force iwl_ucode_tlv_capa_t)(4 * 32 + 0),
IWL_UCODE_TLV_CAPA_FW_ACCEPTS_RAW_DSM_TABLE = (__force iwl_ucode_tlv_capa_t)(4 * 32 + 1),
+ IWL_UCODE_TLV_CAPA_NAN_SYNC_SUPPORT = (__force iwl_ucode_tlv_capa_t)(4 * 32 + 2),
NUM_IWL_UCODE_TLV_CAPA
/*
* This construction make both sparse (which cannot increment the previous
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
index d2ad169ae880..958e71a3c958 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
@@ -376,8 +376,10 @@ int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
num_sub_bands = IWL_NUM_SUB_BANDS_V2;
gain = cmd->v7.gain[0];
*cmd_size = sizeof(cmd->v7);
- cmd->v7.ppag_config_info.table_source = fwrt->ppag_bios_source;
- cmd->v7.ppag_config_info.table_revision = fwrt->ppag_bios_rev;
+ cmd->v7.ppag_config_info.hdr.table_source =
+ fwrt->ppag_bios_source;
+ cmd->v7.ppag_config_info.hdr.table_revision =
+ fwrt->ppag_bios_rev;
cmd->v7.ppag_config_info.value = cpu_to_le32(fwrt->ppag_flags);
} else {
IWL_DEBUG_RADIO(fwrt, "Unsupported PPAG command version\n");
@@ -488,206 +490,6 @@ bool iwl_add_mcc_to_tas_block_list(u16 *list, u8 *size, u16 mcc)
}
IWL_EXPORT_SYMBOL(iwl_add_mcc_to_tas_block_list);
-__le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
-{
- int ret;
- u32 val;
- __le32 config_bitmap = 0;
-
- switch (CSR_HW_RFID_TYPE(fwrt->trans->info.hw_rf_id)) {
- case IWL_CFG_RF_TYPE_HR1:
- case IWL_CFG_RF_TYPE_HR2:
- case IWL_CFG_RF_TYPE_JF1:
- case IWL_CFG_RF_TYPE_JF2:
- ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_INDONESIA_5G2,
- &val);
-
- if (!ret && val == DSM_VALUE_INDONESIA_ENABLE)
- config_bitmap |=
- cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK);
- break;
- default:
- break;
- }
-
- ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_DISABLE_SRD, &val);
- if (!ret) {
- if (val == DSM_VALUE_SRD_PASSIVE)
- config_bitmap |=
- cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK);
- else if (val == DSM_VALUE_SRD_DISABLE)
- config_bitmap |=
- cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK);
- }
-
- if (fw_has_capa(&fwrt->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT)) {
- ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_REGULATORY_CONFIG,
- &val);
- /*
- * China 2022 enable if the BIOS object does not exist or
- * if it is enabled in BIOS.
- */
- if (ret < 0 || val & DSM_MASK_CHINA_22_REG)
- config_bitmap |=
- cpu_to_le32(LARI_CONFIG_ENABLE_CHINA_22_REG_SUPPORT_MSK);
- }
-
- return config_bitmap;
-}
-IWL_EXPORT_SYMBOL(iwl_get_lari_config_bitmap);
-
-static size_t iwl_get_lari_config_cmd_size(u8 cmd_ver)
-{
- size_t cmd_size;
-
- switch (cmd_ver) {
- case 12:
- cmd_size = sizeof(struct iwl_lari_config_change_cmd);
- break;
- case 8:
- cmd_size = sizeof(struct iwl_lari_config_change_cmd_v8);
- break;
- case 6:
- cmd_size = sizeof(struct iwl_lari_config_change_cmd_v6);
- break;
- default:
- cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1);
- break;
- }
- return cmd_size;
-}
-
-int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt,
- struct iwl_lari_config_change_cmd *cmd,
- size_t *cmd_size)
-{
- int ret;
- u32 value;
- bool has_raw_dsm_capa = fw_has_capa(&fwrt->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_FW_ACCEPTS_RAW_DSM_TABLE);
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw,
- WIDE_ID(REGULATORY_AND_NVM_GROUP,
- LARI_CONFIG_CHANGE), 1);
-
- if (WARN_ONCE(cmd_ver > 12,
- "Don't add newer versions to this function\n"))
- return -EINVAL;
-
- memset(cmd, 0, sizeof(*cmd));
- *cmd_size = iwl_get_lari_config_cmd_size(cmd_ver);
-
- cmd->config_bitmap = iwl_get_lari_config_bitmap(fwrt);
-
- ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_11AX_ENABLEMENT, &value);
- if (!ret) {
- if (!has_raw_dsm_capa)
- value &= DSM_11AX_ALLOW_BITMAP;
- cmd->oem_11ax_allow_bitmap = cpu_to_le32(value);
- }
-
- ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_UNII4_CHAN, &value);
- if (!ret) {
- if (!has_raw_dsm_capa)
- value &= DSM_UNII4_ALLOW_BITMAP;
-
- /* Since version 12, bits 4 and 5 are supported
- * regardless of this capability, By pass this masking
- * if firmware has capability of accepting raw DSM table.
- */
- if (!has_raw_dsm_capa && cmd_ver < 12 &&
- !fw_has_capa(&fwrt->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_BIOS_OVERRIDE_5G9_FOR_CA))
- value &= ~(DSM_VALUE_UNII4_CANADA_OVERRIDE_MSK |
- DSM_VALUE_UNII4_CANADA_EN_MSK);
-
- cmd->oem_unii4_allow_bitmap = cpu_to_le32(value);
- }
-
- ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ACTIVATE_CHANNEL, &value);
- if (!ret) {
- if (!has_raw_dsm_capa)
- value &= CHAN_STATE_ACTIVE_BITMAP_CMD_V12;
-
- if (!has_raw_dsm_capa && cmd_ver < 8)
- value &= ~ACTIVATE_5G2_IN_WW_MASK;
-
- /* Since version 12, bits 5 and 6 are supported
- * regardless of this capability, By pass this masking
- * if firmware has capability of accepting raw DSM table.
- */
- if (!has_raw_dsm_capa && cmd_ver < 12 &&
- !fw_has_capa(&fwrt->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_BIOS_OVERRIDE_UNII4_US_CA))
- value &= CHAN_STATE_ACTIVE_BITMAP_CMD_V8;
-
- cmd->chan_state_active_bitmap = cpu_to_le32(value);
- }
-
- ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_6E, &value);
- if (!ret)
- cmd->oem_uhb_allow_bitmap = cpu_to_le32(value);
-
- ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_FORCE_DISABLE_CHANNELS, &value);
- if (!ret) {
- if (!has_raw_dsm_capa)
- value &= DSM_FORCE_DISABLE_CHANNELS_ALLOWED_BITMAP;
- cmd->force_disable_channels_bitmap = cpu_to_le32(value);
- }
-
- ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENERGY_DETECTION_THRESHOLD,
- &value);
- if (!ret) {
- if (!has_raw_dsm_capa)
- value &= DSM_EDT_ALLOWED_BITMAP;
- cmd->edt_bitmap = cpu_to_le32(value);
- }
-
- ret = iwl_bios_get_wbem(fwrt, &value);
- if (!ret)
- cmd->oem_320mhz_allow_bitmap = cpu_to_le32(value);
-
- ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_11BE, &value);
- if (!ret)
- cmd->oem_11be_allow_bitmap = cpu_to_le32(value);
-
- if (cmd->config_bitmap ||
- cmd->oem_uhb_allow_bitmap ||
- cmd->oem_11ax_allow_bitmap ||
- cmd->oem_unii4_allow_bitmap ||
- cmd->chan_state_active_bitmap ||
- cmd->force_disable_channels_bitmap ||
- cmd->edt_bitmap ||
- cmd->oem_320mhz_allow_bitmap ||
- cmd->oem_11be_allow_bitmap) {
- IWL_DEBUG_RADIO(fwrt,
- "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n",
- le32_to_cpu(cmd->config_bitmap),
- le32_to_cpu(cmd->oem_11ax_allow_bitmap));
- IWL_DEBUG_RADIO(fwrt,
- "sending LARI_CONFIG_CHANGE, oem_unii4_allow_bitmap=0x%x, chan_state_active_bitmap=0x%x, cmd_ver=%d\n",
- le32_to_cpu(cmd->oem_unii4_allow_bitmap),
- le32_to_cpu(cmd->chan_state_active_bitmap),
- cmd_ver);
- IWL_DEBUG_RADIO(fwrt,
- "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x, force_disable_channels_bitmap=0x%x\n",
- le32_to_cpu(cmd->oem_uhb_allow_bitmap),
- le32_to_cpu(cmd->force_disable_channels_bitmap));
- IWL_DEBUG_RADIO(fwrt,
- "sending LARI_CONFIG_CHANGE, edt_bitmap=0x%x, oem_320mhz_allow_bitmap=0x%x\n",
- le32_to_cpu(cmd->edt_bitmap),
- le32_to_cpu(cmd->oem_320mhz_allow_bitmap));
- IWL_DEBUG_RADIO(fwrt,
- "sending LARI_CONFIG_CHANGE, oem_11be_allow_bitmap=0x%x\n",
- le32_to_cpu(cmd->oem_11be_allow_bitmap));
- } else {
- return 1;
- }
-
- return 0;
-}
-IWL_EXPORT_SYMBOL(iwl_fill_lari_config);
-
int iwl_bios_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
u32 *value)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
index 735482e7adf5..1489031687b7 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
@@ -125,7 +125,9 @@ enum iwl_dsm_funcs {
DSM_FUNC_ENERGY_DETECTION_THRESHOLD = 10,
DSM_FUNC_RFI_CONFIG = 11,
DSM_FUNC_ENABLE_11BE = 12,
- DSM_FUNC_NUM_FUNCS = 13,
+ DSM_FUNC_ENABLE_11BN = 13,
+ DSM_FUNC_ENABLE_UNII_9 = 14,
+ DSM_FUNC_NUM_FUNCS,
};
enum iwl_dsm_values_srd {
@@ -218,11 +220,6 @@ int iwl_bios_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc);
int iwl_bios_get_eckv(struct iwl_fw_runtime *fwrt, u32 *ext_clk);
int iwl_bios_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value);
-__le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt);
-int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt,
- struct iwl_lari_config_change_cmd *cmd,
- size_t *cmd_size);
-
int iwl_bios_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
u32 *value);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index 57570ff15622..ff186fb2e0da 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -116,10 +116,14 @@ struct iwl_txf_iter_data {
* @phy_filters: specific phy filters as read from WPFC BIOS table
* @ppag_bios_rev: PPAG BIOS revision
* @ppag_bios_source: see &enum bios_source
- * @acpi_dsm_funcs_valid: bitmap indicating which DSM values are valid,
+ * @dsm_funcs_valid: bitmap indicating which DSM values are valid,
* zero (default initialization) means it hasn't been read yet,
* and BIT(0) is set when it has since function 0 also has this
- * bitmap and is always supported
+ * bitmap and is always supported.
+ * If the bit is set for a specific function, then the corresponding
+ * entry in &dsm_values is valid.
+ * @dsm_values: cache of the DSM values. The validity of each entry is
+ * determined by &dsm_funcs_valid.
* @geo_enabled: WGDS table is present
* @geo_num_profiles: number of geo profiles
* @geo_rev: geo profiles table revision
@@ -137,6 +141,8 @@ struct iwl_txf_iter_data {
* @timestamp.seq: timestamp marking sequence
* @timestamp.delay: timestamp marking worker delay
* @tpc_enabled: TPC enabled
+ * @dsm_source: one of &enum bios_source. UEFI, ACPI or NONE
+ * @dsm_revision: the revision of the DSM table
*/
struct iwl_fw_runtime {
struct iwl_trans *trans;
@@ -211,9 +217,12 @@ struct iwl_fw_runtime {
bool uats_valid;
u8 uefi_tables_lock_status;
struct iwl_phy_specific_cfg phy_filters;
+ enum bios_source dsm_source;
+ u8 dsm_revision;
-#ifdef CONFIG_ACPI
- u32 acpi_dsm_funcs_valid;
+#if defined(CONFIG_ACPI) || defined(CONFIG_EFI)
+ u32 dsm_funcs_valid;
+ u32 dsm_values[DSM_FUNC_NUM_FUNCS];
#endif
};
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/smem.c b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
index 90fd69b4860c..344ddde85b18 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/smem.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
@@ -6,6 +6,7 @@
*/
#include "iwl-drv.h"
#include "runtime.h"
+#include "dbg.h"
#include "fw/api/commands.h"
static void iwl_parse_shared_mem_22000(struct iwl_fw_runtime *fwrt,
@@ -17,7 +18,9 @@ static void iwl_parse_shared_mem_22000(struct iwl_fw_runtime *fwrt,
u8 api_ver = iwl_fw_lookup_notif_ver(fwrt->fw, SYSTEM_GROUP,
SHARED_MEM_CFG_CMD, 0);
- if (WARN_ON(lmac_num > ARRAY_SIZE(mem_cfg->lmac_smem)))
+ /* Note: notification has 3 entries, but we only expect 2 */
+ if (IWL_FW_CHECK(fwrt, lmac_num > ARRAY_SIZE(fwrt->smem_cfg.lmac),
+ "FW advertises %d LMACs\n", lmac_num))
return;
fwrt->smem_cfg.num_lmacs = lmac_num;
@@ -26,7 +29,8 @@ static void iwl_parse_shared_mem_22000(struct iwl_fw_runtime *fwrt,
fwrt->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo2_size);
if (api_ver >= 4 &&
- !WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) < sizeof(*mem_cfg))) {
+ !IWL_FW_CHECK(fwrt, iwl_rx_packet_payload_len(pkt) < sizeof(*mem_cfg),
+ "bad shared mem notification size\n")) {
fwrt->smem_cfg.rxfifo2_control_size =
le32_to_cpu(mem_cfg->rxfifo2_control_size);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
index 4ae4d215e633..a7ba86e06c09 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
@@ -402,8 +402,9 @@ static int iwl_uefi_uats_parse(struct uefi_cnv_wlan_uats_data *uats_data,
if (uats_data->revision != 1)
return -EINVAL;
- memcpy(fwrt->uats_table.offset_map, uats_data->offset_map,
- sizeof(fwrt->uats_table.offset_map));
+ memcpy(fwrt->uats_table.mcc_to_ap_type_map,
+ uats_data->mcc_to_ap_type_map,
+ sizeof(fwrt->uats_table.mcc_to_ap_type_map));
fwrt->uats_valid = true;
@@ -721,17 +722,12 @@ out:
return ret;
}
-int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
- u32 *value)
+static int iwl_uefi_load_dsm_values(struct iwl_fw_runtime *fwrt)
{
struct uefi_cnv_var_general_cfg *data;
int ret = -EINVAL;
- BUILD_BUG_ON(ARRAY_SIZE(data->functions) < DSM_FUNC_NUM_FUNCS);
-
- /* Not supported function index */
- if (func >= DSM_FUNC_NUM_FUNCS || func == 5)
- return -EOPNOTSUPP;
+ BUILD_BUG_ON(ARRAY_SIZE(data->functions) < ARRAY_SIZE(fwrt->dsm_values));
data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_DSM_NAME,
"DSM", sizeof(*data), NULL);
@@ -743,24 +739,66 @@ int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
data->revision);
goto out;
}
+ fwrt->dsm_revision = data->revision;
+ fwrt->dsm_source = BIOS_SOURCE_UEFI;
- if (!(data->functions[DSM_FUNC_QUERY] & BIT(func))) {
- IWL_DEBUG_RADIO(fwrt, "DSM func %d not in 0x%x\n",
- func, data->functions[DSM_FUNC_QUERY]);
- goto out;
- }
+ fwrt->dsm_funcs_valid = data->functions[DSM_FUNC_QUERY];
- *value = data->functions[func];
+ /*
+ * Make sure we don't load the DSM values twice. Set this only after we
+ * validated the DSM table so that if the table in UEFI is not valid,
+ * we will fallback to ACPI.
+ */
+ fwrt->dsm_funcs_valid |= BIT(DSM_FUNC_QUERY);
- IWL_DEBUG_RADIO(fwrt,
- "UEFI: DSM func=%d: value=%d\n", func, *value);
+ for (int func = 1; func < ARRAY_SIZE(fwrt->dsm_values); func++) {
+ if (!(fwrt->dsm_funcs_valid & BIT(func))) {
+ IWL_DEBUG_RADIO(fwrt, "DSM func %d not in 0x%x\n",
+ func, fwrt->dsm_funcs_valid);
+ continue;
+ }
+ fwrt->dsm_values[func] = data->functions[func];
+
+ IWL_DEBUG_RADIO(fwrt,
+ "UEFI: DSM func=%d: value=%d\n", func,
+ fwrt->dsm_values[func]);
+ }
ret = 0;
+
out:
kfree(data);
return ret;
}
+int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
+ u32 *value)
+{
+ /* Not supported function index */
+ if (func >= DSM_FUNC_NUM_FUNCS || func == 5)
+ return -EOPNOTSUPP;
+
+ if (!fwrt->dsm_funcs_valid) {
+ int ret = iwl_uefi_load_dsm_values(fwrt);
+
+ if (ret)
+ return ret;
+ }
+
+ if (!(fwrt->dsm_funcs_valid & BIT(func))) {
+ IWL_DEBUG_RADIO(fwrt, "DSM func %d not in 0x%x\n",
+ func, fwrt->dsm_funcs_valid);
+ return -EINVAL;
+ }
+
+ *value = fwrt->dsm_values[func];
+
+ IWL_DEBUG_RADIO(fwrt,
+ "UEFI: DSM func=%d: value=%d\n", func, *value);
+
+ return 0;
+}
+
int iwl_uefi_get_puncturing(struct iwl_fw_runtime *fwrt)
{
struct uefi_cnv_var_puncturing_data *data;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
index 5a4c557e47c7..349ac1505ad7 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
@@ -60,7 +60,7 @@ struct uefi_cnv_wlan_sgom_data {
struct uefi_cnv_wlan_uats_data {
u8 revision;
- u8 offset_map[IWL_UATS_MAP_SIZE - 1];
+ u8 mcc_to_ap_type_map[IWL_UATS_MAP_SIZE - 1];
} __packed;
struct uefi_cnv_common_step_data {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 076810ee5d34..45cf2bc68e41 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -687,7 +687,6 @@ extern const char iwl_killer_bn1850w2_name[];
extern const char iwl_killer_bn1850i_name[];
extern const char iwl_bn201_name[];
extern const char iwl_bn203_name[];
-extern const char iwl_be221_name[];
extern const char iwl_be223_name[];
extern const char iwl_ax221_name[];
#if IS_ENABLED(CONFIG_IWLDVM)
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/Makefile b/drivers/net/wireless/intel/iwlwifi/mld/Makefile
index c966e573f430..5740c0510b61 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/mld/Makefile
@@ -4,7 +4,7 @@ obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += tests/
iwlmld-y += mld.o notif.o mac80211.o fw.o power.o iface.o link.o rx.o mcc.o session-protect.o phy.o
iwlmld-y += scan.o sta.o tx.o coex.o tlc.o agg.o key.o regulatory.o ap.o thermal.o roc.o stats.o
-iwlmld-y += low_latency.o mlo.o ptp.o time_sync.o ftm-initiator.o
+iwlmld-y += low_latency.o mlo.o ptp.o time_sync.o ftm-initiator.o nan.o
iwlmld-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
iwlmld-$(CONFIG_IWLWIFI_LEDS) += led.o
iwlmld-$(CONFIG_PM_SLEEP) += d3.o
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/d3.c b/drivers/net/wireless/intel/iwlwifi/mld/d3.c
index dd85be94433c..6595542e95cf 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/d3.c
@@ -996,8 +996,6 @@ static void iwl_mld_mlo_rekey(struct iwl_mld *mld,
struct iwl_mld_wowlan_status *wowlan_status,
struct ieee80211_vif *vif)
{
- struct iwl_mld_old_mlo_keys *old_keys __free(kfree) = NULL;
-
IWL_DEBUG_WOWLAN(mld, "Num of MLO Keys: %d\n", wowlan_status->num_mlo_keys);
if (!wowlan_status->num_mlo_keys)
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c
index b9c9cd3f44e4..5c2a2033b3fd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c
@@ -244,7 +244,7 @@ static size_t iwl_mld_dump_tas_resp(struct iwl_dhc_tas_status_resp *resp,
}
pos += scnprintf(buf + pos, count - pos, "TAS Report\n");
- switch (resp->tas_config_info.table_source) {
+ switch (resp->tas_config_info.hdr.table_source) {
case BIOS_SOURCE_NONE:
pos += scnprintf(buf + pos, count - pos,
"BIOS SOURCE NONE ");
@@ -260,13 +260,13 @@ static size_t iwl_mld_dump_tas_resp(struct iwl_dhc_tas_status_resp *resp,
default:
pos += scnprintf(buf + pos, count - pos,
"BIOS SOURCE UNKNOWN (%d) ",
- resp->tas_config_info.table_source);
+ resp->tas_config_info.hdr.table_source);
break;
}
pos += scnprintf(buf + pos, count - pos,
"revision is: %d data is: 0x%08x\n",
- resp->tas_config_info.table_revision,
+ resp->tas_config_info.hdr.table_revision,
resp->tas_config_info.value);
pos += scnprintf(buf + pos, count - pos, "Current MCC: 0x%x\n",
le16_to_cpu(resp->curr_mcc));
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/iface.c b/drivers/net/wireless/intel/iwlwifi/mld/iface.c
index f15d1f5d1bf5..743e44ff19cf 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/iface.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/iface.c
@@ -337,6 +337,10 @@ int iwl_mld_mac_fw_action(struct iwl_mld *mld, struct ieee80211_vif *vif,
lockdep_assert_wiphy(mld->wiphy);
+ /* NAN interface type is not known to FW */
+ if (vif->type == NL80211_IFTYPE_NAN)
+ return 0;
+
if (action == FW_CTXT_ACTION_REMOVE)
return iwl_mld_rm_mac_from_fw(mld, vif);
@@ -385,21 +389,16 @@ static void iwl_mld_mlo_scan_start_wk(struct wiphy *wiphy,
IWL_MLD_ALLOC_FN(vif, vif)
/* Constructor function for struct iwl_mld_vif */
-static int
+static void
iwl_mld_init_vif(struct iwl_mld *mld, struct ieee80211_vif *vif)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
- int ret;
lockdep_assert_wiphy(mld->wiphy);
mld_vif->mld = mld;
mld_vif->roc_activity = ROC_NUM_ACTIVITIES;
- ret = iwl_mld_allocate_vif_fw_id(mld, &mld_vif->fw_id, vif);
- if (ret)
- return ret;
-
if (!mld->fw_status.in_hw_restart) {
wiphy_work_init(&mld_vif->emlsr.unblock_tpt_wk,
iwl_mld_emlsr_unblock_tpt_wk);
@@ -413,8 +412,6 @@ iwl_mld_init_vif(struct iwl_mld *mld, struct ieee80211_vif *vif)
iwl_mld_mlo_scan_start_wk);
}
iwl_mld_init_internal_sta(&mld_vif->aux_sta);
-
- return 0;
}
int iwl_mld_add_vif(struct iwl_mld *mld, struct ieee80211_vif *vif)
@@ -424,7 +421,13 @@ int iwl_mld_add_vif(struct iwl_mld *mld, struct ieee80211_vif *vif)
lockdep_assert_wiphy(mld->wiphy);
- ret = iwl_mld_init_vif(mld, vif);
+ iwl_mld_init_vif(mld, vif);
+
+ /* NAN interface type is not known to FW */
+ if (vif->type == NL80211_IFTYPE_NAN)
+ return 0;
+
+ ret = iwl_mld_allocate_vif_fw_id(mld, &mld_vif->fw_id, vif);
if (ret)
return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/iface.h b/drivers/net/wireless/intel/iwlwifi/mld/iface.h
index a3573d20f214..62fca166afd1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/iface.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/iface.h
@@ -32,6 +32,7 @@ enum iwl_mld_cca_40mhz_wa_status {
* link is preventing EMLSR. This is a temporary blocking that is set when
* there is an indication that a non-BSS interface is to be added.
* @IWL_MLD_EMLSR_BLOCKED_TPT: throughput is too low to make EMLSR worthwhile
+ * @IWL_MLD_EMLSR_BLOCKED_NAN: NAN is preventing EMLSR.
*/
enum iwl_mld_emlsr_blocked {
IWL_MLD_EMLSR_BLOCKED_PREVENTION = 0x1,
@@ -40,6 +41,7 @@ enum iwl_mld_emlsr_blocked {
IWL_MLD_EMLSR_BLOCKED_NON_BSS = 0x8,
IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS = 0x10,
IWL_MLD_EMLSR_BLOCKED_TPT = 0x20,
+ IWL_MLD_EMLSR_BLOCKED_NAN = 0x40,
};
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
index cd0dce8de856..3414b04a6953 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
@@ -50,7 +50,7 @@
{ \
.max = 1, \
.types = BIT(NL80211_IFTYPE_P2P_DEVICE), \
- }
+ },
static const struct ieee80211_iface_limit iwl_mld_limits[] = {
IWL_MLD_LIMITS(0)
@@ -60,6 +60,22 @@ static const struct ieee80211_iface_limit iwl_mld_limits_ap[] = {
IWL_MLD_LIMITS(BIT(NL80211_IFTYPE_AP))
};
+static const struct ieee80211_iface_limit iwl_mld_limits_nan[] = {
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_NAN),
+ },
+ /* Removed when two channels are permitted */
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_AP),
+ },
+};
+
static const struct ieee80211_iface_combination
iwl_mld_iface_combinations[] = {
{
@@ -74,6 +90,19 @@ iwl_mld_iface_combinations[] = {
.limits = iwl_mld_limits_ap,
.n_limits = ARRAY_SIZE(iwl_mld_limits_ap),
},
+ /* NAN combinations follow, these exclude P2P */
+ {
+ .num_different_channels = 2,
+ .max_interfaces = 3,
+ .limits = iwl_mld_limits_nan,
+ .n_limits = ARRAY_SIZE(iwl_mld_limits_nan) - 1,
+ },
+ {
+ .num_different_channels = 1,
+ .max_interfaces = 4,
+ .limits = iwl_mld_limits_nan,
+ .n_limits = ARRAY_SIZE(iwl_mld_limits_nan),
+ }
};
static const u8 ext_capa_base[IWL_MLD_STA_EXT_CAPA_SIZE] = {
@@ -305,8 +334,38 @@ static void iwl_mac_hw_set_wiphy(struct iwl_mld *mld)
wiphy->hw_timestamp_max_peers = 1;
- wiphy->iface_combinations = iwl_mld_iface_combinations;
- wiphy->n_iface_combinations = ARRAY_SIZE(iwl_mld_iface_combinations);
+ if (iwl_mld_nan_supported(mld)) {
+ hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_NAN);
+ hw->wiphy->iface_combinations = iwl_mld_iface_combinations;
+ hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(iwl_mld_iface_combinations);
+
+ hw->wiphy->nan_supported_bands = BIT(NL80211_BAND_2GHZ);
+ if (mld->nvm_data->bands[NL80211_BAND_5GHZ].n_channels)
+ hw->wiphy->nan_supported_bands |=
+ BIT(NL80211_BAND_5GHZ);
+
+ hw->wiphy->nan_capa.flags = WIPHY_NAN_FLAGS_CONFIGURABLE_SYNC |
+ WIPHY_NAN_FLAGS_USERSPACE_DE;
+
+ hw->wiphy->nan_capa.op_mode = NAN_OP_MODE_PHY_MODE_MASK |
+ NAN_OP_MODE_80P80MHZ |
+ NAN_OP_MODE_160MHZ;
+
+ /* Support 2 antenna's for Tx and Rx */
+ hw->wiphy->nan_capa.n_antennas = 0x22;
+
+ /* Maximal channel switch time is 4 msec */
+ hw->wiphy->nan_capa.max_channel_switch_time = 4;
+ hw->wiphy->nan_capa.dev_capabilities =
+ NAN_DEV_CAPA_EXT_KEY_ID_SUPPORTED |
+ NAN_DEV_CAPA_NDPE_SUPPORTED;
+ } else {
+ wiphy->iface_combinations = iwl_mld_iface_combinations;
+ /* Do not include NAN combinations */
+ wiphy->n_iface_combinations =
+ ARRAY_SIZE(iwl_mld_iface_combinations) - 2;
+ }
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_DFS_CONCURRENT);
@@ -318,6 +377,8 @@ static void iwl_mac_hw_set_wiphy(struct iwl_mld *mld)
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER);
+ wiphy_ext_feature_set(wiphy,
+ NL80211_EXT_FEATURE_MULTICAST_REGISTRATIONS);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT);
if (fw_has_capa(ucode_capa, IWL_UCODE_TLV_CAPA_PROTECTED_TWT))
@@ -616,10 +677,11 @@ int iwl_mld_mac80211_add_interface(struct ieee80211_hw *hw,
* Add the default link, but not if this is an MLD vif as that implies
* the HW is restarting and it will be configured by change_vif_links.
*/
- if (!ieee80211_vif_is_mld(vif))
+ if (vif->type != NL80211_IFTYPE_NAN && !ieee80211_vif_is_mld(vif)) {
ret = iwl_mld_add_link(mld, &vif->bss_conf);
- if (ret)
- goto err;
+ if (ret)
+ goto err;
+ }
if (vif->type == NL80211_IFTYPE_STATION) {
vif->driver_flags |= IEEE80211_VIF_REMOVE_AP_AFTER_DISASSOC;
@@ -647,6 +709,9 @@ int iwl_mld_mac80211_add_interface(struct ieee80211_hw *hw,
if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
mld->p2p_device_vif = vif;
+ if (vif->type == NL80211_IFTYPE_NAN)
+ mld->nan_device_vif = vif;
+
return 0;
err:
@@ -674,7 +739,10 @@ void iwl_mld_mac80211_remove_interface(struct ieee80211_hw *hw,
if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
mld->p2p_device_vif = NULL;
- iwl_mld_remove_link(mld, &vif->bss_conf);
+ if (vif->type == NL80211_IFTYPE_NAN)
+ mld->nan_device_vif = NULL;
+ else
+ iwl_mld_remove_link(mld, &vif->bss_conf);
#ifdef CONFIG_IWLWIFI_DEBUGFS
debugfs_remove(iwl_mld_vif_from_mac80211(vif)->dbgfs_slink);
@@ -984,7 +1052,9 @@ int iwl_mld_assign_vif_chanctx(struct ieee80211_hw *hw,
{
struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
- unsigned int n_active = iwl_mld_count_active_links(mld, vif);
+ struct iwl_mld_link *temp_mld_link;
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ u16 final_active_links = 0;
int ret;
lockdep_assert_wiphy(mld->wiphy);
@@ -992,10 +1062,7 @@ int iwl_mld_assign_vif_chanctx(struct ieee80211_hw *hw,
if (WARN_ON(!mld_link))
return -EINVAL;
- /* if the assigned one was not counted yet, count it now */
if (!rcu_access_pointer(mld_link->chan_ctx)) {
- n_active++;
-
/* Track addition of non-BSS link */
if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) {
ret = iwl_mld_emlsr_check_non_bss_block(mld, 1);
@@ -1016,17 +1083,25 @@ int iwl_mld_assign_vif_chanctx(struct ieee80211_hw *hw,
rcu_assign_pointer(mld_link->chan_ctx, ctx);
- if (n_active > 1) {
- struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ /* We cannot rely on vif->active_links at this stage as it contains
+ * both the removed links and the newly added links.
+ * Therefore, we create our own bitmap of the final active links,
+ * which does not include the removed links.
+ */
+ for_each_mld_vif_valid_link(mld_vif, temp_mld_link) {
+ if (rcu_access_pointer(temp_mld_link->chan_ctx))
+ final_active_links |= BIT(link_id);
+ }
+ if (hweight16(final_active_links) > 1) {
/* Indicate to mac80211 that EML is enabled */
vif->driver_flags |= IEEE80211_VIF_EML_ACTIVE;
mld_vif->emlsr.last_entry_ts = jiffies;
- if (vif->active_links & BIT(mld_vif->emlsr.selected_links))
+ if (final_active_links == mld_vif->emlsr.selected_links)
mld_vif->emlsr.primary = mld_vif->emlsr.selected_primary;
else
- mld_vif->emlsr.primary = __ffs(vif->active_links);
+ mld_vif->emlsr.primary = __ffs(final_active_links);
iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_ESR_LINK_UP,
NULL);
@@ -1506,6 +1581,9 @@ iwl_mld_mac80211_conf_tx(struct ieee80211_hw *hw,
lockdep_assert_wiphy(mld->wiphy);
+ if (vif->type == NL80211_IFTYPE_NAN)
+ return 0;
+
link = iwl_mld_link_dereference_check(mld_vif, link_id);
if (!link)
return -EINVAL;
@@ -1706,6 +1784,9 @@ static int iwl_mld_move_sta_state_up(struct iwl_mld *mld,
/* Ensure any block due to a non-BSS link is synced */
iwl_mld_emlsr_check_non_bss_block(mld, 0);
+ /* Ensure NAN block is synced */
+ iwl_mld_emlsr_check_nan_block(mld, vif);
+
/* Block EMLSR until a certain throughput it reached */
if (!mld->fw_status.in_hw_restart &&
IWL_MLD_ENTER_EMLSR_TPT_THRESH > 0)
@@ -2701,4 +2782,7 @@ const struct ieee80211_ops iwl_mld_hw_ops = {
.set_hw_timestamp = iwl_mld_set_hw_timestamp,
.start_pmsr = iwl_mld_start_pmsr,
.can_neg_ttlm = iwl_mld_can_neg_ttlm,
+ .start_nan = iwl_mld_start_nan,
+ .stop_nan = iwl_mld_stop_nan,
+ .nan_change_conf = iwl_mld_nan_change_config,
};
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mld.c b/drivers/net/wireless/intel/iwlwifi/mld/mld.c
index 8a4c96385640..495e9d8f3af6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mld.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mld.c
@@ -234,6 +234,9 @@ static const struct iwl_hcmd_names iwl_mld_mac_conf_names[] = {
HCMD_NAME(AUX_STA_CMD),
HCMD_NAME(STA_REMOVE_CMD),
HCMD_NAME(ROC_CMD),
+ HCMD_NAME(NAN_CFG_CMD),
+ HCMD_NAME(NAN_DW_END_NOTIF),
+ HCMD_NAME(NAN_JOINED_CLUSTER_NOTIF),
HCMD_NAME(MISSED_BEACONS_NOTIF),
HCMD_NAME(EMLSR_TRANS_FAIL_NOTIF),
HCMD_NAME(ROC_NOTIF),
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mld.h b/drivers/net/wireless/intel/iwlwifi/mld/mld.h
index 22efe8e10f53..66c7a7d31409 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mld.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mld.h
@@ -35,6 +35,7 @@
#include "ptp.h"
#include "time_sync.h"
#include "ftm-initiator.h"
+#include "nan.h"
/**
* DOC: Introduction
@@ -199,6 +200,7 @@
* @ptp_data: data of the PTP clock
* @time_sync: time sync data.
* @ftm_initiator: FTM initiator data
+ * @nan_device_vif: points to the NAN device vif if exists
*/
struct iwl_mld {
/* Add here fields that need clean up on restart */
@@ -228,6 +230,7 @@ struct iwl_mld {
#endif /* CONFIG_PM_SLEEP */
struct ieee80211_vif *p2p_device_vif;
bool bt_is_active;
+ struct ieee80211_vif *nan_device_vif;
);
struct ieee80211_link_sta __rcu *fw_id_to_link_sta[IWL_STATION_COUNT_MAX];
/* And here fields that survive a fw restart */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mlo.c b/drivers/net/wireless/intel/iwlwifi/mld/mlo.c
index c6b151f26921..f842f5183223 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mlo.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mlo.c
@@ -12,7 +12,8 @@
HOW(ROC) \
HOW(NON_BSS) \
HOW(TMP_NON_BSS) \
- HOW(TPT)
+ HOW(TPT) \
+ HOW(NAN)
static const char *
iwl_mld_get_emlsr_blocked_string(enum iwl_mld_emlsr_blocked blocked)
@@ -451,29 +452,49 @@ static void iwl_mld_count_non_bss_links(void *_data, u8 *mac,
struct iwl_mld_update_emlsr_block_data {
bool block;
+ enum iwl_mld_emlsr_blocked reason;
int result;
};
static void
-iwl_mld_vif_iter_update_emlsr_non_bss_block(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
+iwl_mld_vif_iter_update_emlsr_block(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
{
struct iwl_mld_update_emlsr_block_data *data = _data;
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
int ret;
+ if (!iwl_mld_vif_has_emlsr_cap(vif))
+ return;
+
if (data->block) {
ret = iwl_mld_block_emlsr_sync(mld_vif->mld, vif,
- IWL_MLD_EMLSR_BLOCKED_NON_BSS,
+ data->reason,
iwl_mld_get_primary_link(vif));
if (ret)
data->result = ret;
} else {
iwl_mld_unblock_emlsr(mld_vif->mld, vif,
- IWL_MLD_EMLSR_BLOCKED_NON_BSS);
+ data->reason);
}
}
+int iwl_mld_update_emlsr_block(struct iwl_mld *mld, bool block,
+ enum iwl_mld_emlsr_blocked reason)
+{
+ struct iwl_mld_update_emlsr_block_data block_data = {
+ .block = block,
+ .reason = reason,
+ };
+
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_vif_iter_update_emlsr_block,
+ &block_data);
+
+ return block_data.result;
+}
+
int iwl_mld_emlsr_check_non_bss_block(struct iwl_mld *mld,
int pending_link_changes)
{
@@ -481,7 +502,6 @@ int iwl_mld_emlsr_check_non_bss_block(struct iwl_mld *mld,
* block EMLSR on the bss vif. Upon deactivation, check if this link
* was the last non-station link active, and if so unblock the bss vif
*/
- struct iwl_mld_update_emlsr_block_data block_data = {};
int count = pending_link_changes;
/* No need to count if we are activating a non-BSS link */
@@ -495,14 +515,8 @@ int iwl_mld_emlsr_check_non_bss_block(struct iwl_mld *mld,
* We could skip updating it if the block change did not change (and
* pending_link_changes is non-zero).
*/
- block_data.block = !!count;
-
- ieee80211_iterate_active_interfaces_mtx(mld->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mld_vif_iter_update_emlsr_non_bss_block,
- &block_data);
-
- return block_data.result;
+ return iwl_mld_update_emlsr_block(mld, !!count,
+ IWL_MLD_EMLSR_BLOCKED_NON_BSS);
}
#define EMLSR_SEC_LINK_MIN_PERC 10
@@ -844,9 +858,9 @@ iwl_mld_emlsr_pair_state(struct ieee80211_vif *vif,
if (c_low->chan->center_freq > c_high->chan->center_freq)
swap(c_low, c_high);
- c_low_upper_edge = c_low->chan->center_freq +
+ c_low_upper_edge = c_low->center_freq1 +
cfg80211_chandef_get_width(c_low) / 2;
- c_high_lower_edge = c_high->chan->center_freq -
+ c_high_lower_edge = c_high->center_freq1 -
cfg80211_chandef_get_width(c_high) / 2;
if (a->chandef->chan->band == NL80211_BAND_5GHZ &&
@@ -1197,3 +1211,16 @@ void iwl_mld_stop_ignoring_tpt_updates(struct iwl_mld *mld)
iwl_mld_ignore_tpt_iter,
&start);
}
+
+int iwl_mld_emlsr_check_nan_block(struct iwl_mld *mld, struct ieee80211_vif *vif)
+{
+ if (mld->nan_device_vif &&
+ ieee80211_vif_nan_started(mld->nan_device_vif))
+ return iwl_mld_block_emlsr_sync(mld, vif,
+ IWL_MLD_EMLSR_BLOCKED_NAN,
+ iwl_mld_get_primary_link(vif));
+
+ iwl_mld_unblock_emlsr(mld, vif, IWL_MLD_EMLSR_BLOCKED_NAN);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mlo.h b/drivers/net/wireless/intel/iwlwifi/mld/mlo.h
index d936589fe39d..ccc3a7afa095 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mlo.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mlo.h
@@ -150,6 +150,11 @@ void iwl_mld_emlsr_check_chan_load(struct ieee80211_hw *hw,
*/
void iwl_mld_retry_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif);
+int iwl_mld_emlsr_check_nan_block(struct iwl_mld *mld, struct ieee80211_vif *vif);
+
+int iwl_mld_update_emlsr_block(struct iwl_mld *mld, bool block,
+ enum iwl_mld_emlsr_blocked reason);
+
struct iwl_mld_link_sel_data {
u8 link_id;
const struct cfg80211_chan_def *chandef;
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/nan.c b/drivers/net/wireless/intel/iwlwifi/mld/nan.c
new file mode 100644
index 000000000000..2dbd3d58b0c6
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/nan.c
@@ -0,0 +1,299 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+
+#include "mld.h"
+#include "iface.h"
+#include "mlo.h"
+#include "fw/api/mac-cfg.h"
+
+#define IWL_NAN_DISOVERY_BEACON_INTERNVAL_TU 512
+#define IWL_NAN_RSSI_CLOSE 55
+#define IWL_NAN_RSSI_MIDDLE 70
+
+bool iwl_mld_nan_supported(struct iwl_mld *mld)
+{
+ return fw_has_capa(&mld->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_NAN_SYNC_SUPPORT);
+}
+
+static int iwl_mld_nan_send_config_cmd(struct iwl_mld *mld,
+ struct iwl_nan_config_cmd *cmd,
+ u8 *beacon_data, size_t beacon_data_len)
+{
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(MAC_CONF_GROUP, NAN_CFG_CMD),
+ };
+
+ hcmd.len[0] = sizeof(*cmd);
+ hcmd.data[0] = cmd;
+
+ if (beacon_data_len) {
+ hcmd.len[1] = beacon_data_len;
+ hcmd.data[1] = beacon_data;
+ hcmd.dataflags[1] = IWL_HCMD_DFL_DUP;
+ }
+
+ return iwl_mld_send_cmd(mld, &hcmd);
+}
+
+static int iwl_mld_nan_config(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct cfg80211_nan_conf *conf,
+ enum iwl_ctxt_action action)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_nan_config_cmd cmd = {
+ .action = cpu_to_le32(action),
+ };
+ u8 *data __free(kfree) = NULL;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ ether_addr_copy(cmd.nmi_addr, vif->addr);
+ cmd.master_pref = conf->master_pref;
+
+ if (conf->cluster_id)
+ memcpy(cmd.cluster_id, conf->cluster_id + 4,
+ sizeof(cmd.cluster_id));
+
+ cmd.scan_period = conf->scan_period < 255 ? conf->scan_period : 255;
+ cmd.dwell_time =
+ conf->scan_dwell_time < 255 ? conf->scan_dwell_time : 255;
+
+ if (conf->discovery_beacon_interval)
+ cmd.discovery_beacon_interval =
+ cpu_to_le32(conf->discovery_beacon_interval);
+ else
+ cmd.discovery_beacon_interval =
+ cpu_to_le32(IWL_NAN_DISOVERY_BEACON_INTERNVAL_TU);
+
+ if (conf->enable_dw_notification)
+ cmd.flags = IWL_NAN_FLAG_DW_END_NOTIF_ENABLED;
+
+ /* 2 GHz band must be supported */
+ cmd.band_config[IWL_NAN_BAND_2GHZ].rssi_close =
+ abs(conf->band_cfgs[NL80211_BAND_2GHZ].rssi_close);
+ cmd.band_config[IWL_NAN_BAND_2GHZ].rssi_middle =
+ abs(conf->band_cfgs[NL80211_BAND_2GHZ].rssi_middle);
+ cmd.band_config[IWL_NAN_BAND_2GHZ].dw_interval =
+ conf->band_cfgs[NL80211_BAND_2GHZ].awake_dw_interval;
+
+ /* 5 GHz band operation is optional. Configure its operation if
+ * supported. Note that conf->bands might be zero, so we need to check
+ * the channel pointer, not the band mask.
+ */
+ if (conf->band_cfgs[NL80211_BAND_5GHZ].chan) {
+ cmd.hb_channel =
+ conf->band_cfgs[NL80211_BAND_5GHZ].chan->hw_value;
+
+ cmd.band_config[IWL_NAN_BAND_5GHZ].rssi_close =
+ abs(conf->band_cfgs[NL80211_BAND_5GHZ].rssi_close);
+ cmd.band_config[IWL_NAN_BAND_5GHZ].rssi_middle =
+ abs(conf->band_cfgs[NL80211_BAND_5GHZ].rssi_middle);
+ cmd.band_config[IWL_NAN_BAND_5GHZ].dw_interval =
+ conf->band_cfgs[NL80211_BAND_5GHZ].awake_dw_interval;
+ }
+
+ if (conf->extra_nan_attrs_len || conf->vendor_elems_len) {
+ data = kmalloc(conf->extra_nan_attrs_len +
+ conf->vendor_elems_len, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ cmd.nan_attr_len = cpu_to_le32(conf->extra_nan_attrs_len);
+ cmd.nan_vendor_elems_len = cpu_to_le32(conf->vendor_elems_len);
+
+ if (conf->extra_nan_attrs_len)
+ memcpy(data, conf->extra_nan_attrs,
+ conf->extra_nan_attrs_len);
+
+ if (conf->vendor_elems_len)
+ memcpy(data + conf->extra_nan_attrs_len,
+ conf->vendor_elems,
+ conf->vendor_elems_len);
+ }
+
+ cmd.sta_id = mld_vif->aux_sta.sta_id;
+ return iwl_mld_nan_send_config_cmd(mld, &cmd, data,
+ conf->extra_nan_attrs_len +
+ conf->vendor_elems_len);
+}
+
+int iwl_mld_start_nan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct cfg80211_nan_conf *conf)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_int_sta *aux_sta = &mld_vif->aux_sta;
+ int ret;
+
+ IWL_DEBUG_MAC80211(mld, "NAN: start: bands=0x%x\n", conf->bands);
+
+ ret = iwl_mld_update_emlsr_block(mld, true, IWL_MLD_EMLSR_BLOCKED_NAN);
+ if (ret)
+ return ret;
+
+ ret = iwl_mld_add_aux_sta(mld, aux_sta);
+ if (ret)
+ goto unblock_emlsr;
+
+ ret = iwl_mld_nan_config(mld, vif, conf, FW_CTXT_ACTION_ADD);
+ if (ret) {
+ IWL_ERR(mld, "Failed to start NAN. ret=%d\n", ret);
+ goto remove_aux;
+ }
+ return 0;
+
+remove_aux:
+ iwl_mld_remove_aux_sta(mld, vif);
+unblock_emlsr:
+ iwl_mld_update_emlsr_block(mld, false, IWL_MLD_EMLSR_BLOCKED_NAN);
+
+ return ret;
+}
+
+int iwl_mld_nan_change_config(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_nan_conf *conf,
+ u32 changes)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ IWL_DEBUG_MAC80211(mld, "NAN: change: changes=0x%x, bands=0x%x\n",
+ changes, conf->bands);
+
+ /* Note that we do not use 'changes' as the FW always expects the
+ * complete configuration, and mac80211 always provides the complete
+ * configuration.
+ */
+ return iwl_mld_nan_config(mld, vif, conf, FW_CTXT_ACTION_MODIFY);
+}
+
+int iwl_mld_stop_nan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_nan_config_cmd cmd = {
+ .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
+ };
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ ret = iwl_mld_send_cmd_pdu(mld,
+ WIDE_ID(MAC_CONF_GROUP, NAN_CFG_CMD),
+ &cmd);
+ if (ret)
+ IWL_ERR(mld, "NAN: Failed to stop NAN. ret=%d\n", ret);
+
+ /* assume that higher layer guarantees that no additional frames are
+ * added before calling this callback
+ */
+ iwl_mld_flush_link_sta_txqs(mld, mld_vif->aux_sta.sta_id);
+ iwl_mld_remove_aux_sta(mld, vif);
+
+ /* cancel based on object type being NAN, as the NAN objects do
+ * not have a unique identifier associated with them
+ */
+ iwl_mld_cancel_notifications_of_object(mld,
+ IWL_MLD_OBJECT_TYPE_NAN,
+ 0);
+
+ iwl_mld_update_emlsr_block(mld, false, IWL_MLD_EMLSR_BLOCKED_NAN);
+
+ return 0;
+}
+
+void iwl_mld_handle_nan_cluster_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_nan_cluster_notif *notif = (void *)pkt->data;
+ struct wireless_dev *wdev = mld->nan_device_vif ?
+ ieee80211_vif_to_wdev(mld->nan_device_vif) : NULL;
+ bool new_cluster = !!(notif->flags &
+ IWL_NAN_CLUSTER_NOTIF_FLAG_NEW_CLUSTER);
+ u8 cluster_id[ETH_ALEN] = {
+ 0x50, 0x6f, 0x9a, 0x01,
+ notif->cluster_id[0], notif->cluster_id[1]
+ };
+
+ IWL_DEBUG_INFO(mld,
+ "NAN: cluster event: cluster_id=%pM, flags=0x%x\n",
+ cluster_id, notif->flags);
+
+ if (IWL_FW_CHECK(mld, !wdev, "NAN: cluster event without wdev\n"))
+ return;
+
+ if (IWL_FW_CHECK(mld, !ieee80211_vif_nan_started(mld->nan_device_vif),
+ "NAN: cluster event without NAN started\n"))
+ return;
+
+ cfg80211_nan_cluster_joined(wdev, cluster_id, new_cluster, GFP_KERNEL);
+}
+
+bool iwl_mld_cancel_nan_cluster_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt,
+ u32 obj_id)
+{
+ return true;
+}
+
+bool iwl_mld_cancel_nan_dw_end_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt,
+ u32 obj_id)
+{
+ return true;
+}
+
+void iwl_mld_handle_nan_dw_end_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_nan_dw_end_notif *notif = (void *)pkt->data;
+ struct iwl_mld_vif *mld_vif = mld->nan_device_vif ?
+ iwl_mld_vif_from_mac80211(mld->nan_device_vif) :
+ NULL;
+ struct wireless_dev *wdev;
+ struct ieee80211_channel *chan;
+
+ IWL_INFO(mld, "NAN: DW end: band=%u\n", notif->band);
+
+ if (IWL_FW_CHECK(mld, !mld_vif, "NAN: DW end without mld_vif\n"))
+ return;
+
+ if (IWL_FW_CHECK(mld, !ieee80211_vif_nan_started(mld->nan_device_vif),
+ "NAN: DW end without NAN started\n"))
+ return;
+
+ if (WARN_ON(mld_vif->aux_sta.sta_id == IWL_INVALID_STA))
+ return;
+
+ IWL_DEBUG_INFO(mld, "NAN: flush queues for aux sta=%u\n",
+ mld_vif->aux_sta.sta_id);
+
+ iwl_mld_flush_link_sta_txqs(mld, mld_vif->aux_sta.sta_id);
+
+ /* TODO: currently the notification specified the band on which the DW
+ * ended. Need to change that to the actual channel on which the next DW
+ * will be started.
+ */
+ switch (notif->band) {
+ case IWL_NAN_BAND_2GHZ:
+ chan = ieee80211_get_channel(mld->wiphy, 2437);
+ break;
+ case IWL_NAN_BAND_5GHZ:
+ /* TODO: use the actual channel */
+ chan = ieee80211_get_channel(mld->wiphy, 5745);
+ break;
+ default:
+ IWL_FW_CHECK(mld, false,
+ "NAN: Invalid band %u in DW end notif\n",
+ notif->band);
+ return;
+ }
+
+ wdev = ieee80211_vif_to_wdev(mld->nan_device_vif);
+ cfg80211_next_nan_dw_notif(wdev, chan, GFP_KERNEL);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/nan.h b/drivers/net/wireless/intel/iwlwifi/mld/nan.h
new file mode 100644
index 000000000000..c9c83d1012f0
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/nan.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+
+#include <net/cfg80211.h>
+#include <linux/etherdevice.h>
+
+bool iwl_mld_nan_supported(struct iwl_mld *mld);
+int iwl_mld_start_nan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_nan_conf *conf);
+int iwl_mld_nan_change_config(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_nan_conf *conf,
+ u32 changes);
+int iwl_mld_stop_nan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+void iwl_mld_handle_nan_cluster_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+void iwl_mld_handle_nan_dw_end_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+bool iwl_mld_cancel_nan_cluster_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt,
+ u32 obj_id);
+bool iwl_mld_cancel_nan_dw_end_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt,
+ u32 obj_id);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/notif.c b/drivers/net/wireless/intel/iwlwifi/mld/notif.c
index 4cf3920b005f..35356b244c0a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/notif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/notif.c
@@ -111,6 +111,9 @@ static bool iwl_mld_cancel_##name##_notif(struct iwl_mld *mld, \
#define RX_HANDLER_OF_FTM_REQ(_grp, _cmd, _name) \
RX_HANDLER_OF_OBJ(_grp, _cmd, _name, FTM_REQ)
+#define RX_HANDLER_OF_NAN(_grp, _cmd, _name) \
+ RX_HANDLER_OF_OBJ(_grp, _cmd, _name, NAN)
+
static void iwl_mld_handle_mfuart_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt)
{
@@ -344,6 +347,8 @@ CMD_VERSIONS(time_sync_confirm_notif,
CMD_VER_ENTRY(1, iwl_time_msmt_cfm_notify))
CMD_VERSIONS(ftm_resp_notif, CMD_VER_ENTRY(10, iwl_tof_range_rsp_ntfy))
CMD_VERSIONS(beacon_filter_notif, CMD_VER_ENTRY(2, iwl_beacon_filter_notif))
+CMD_VERSIONS(nan_cluster_notif, CMD_VER_ENTRY(1, iwl_nan_cluster_notif))
+CMD_VERSIONS(nan_dw_end_notif, CMD_VER_ENTRY(1, iwl_nan_dw_end_notif))
DEFINE_SIMPLE_CANCELLATION(session_prot, iwl_session_prot_notif, mac_link_id)
DEFINE_SIMPLE_CANCELLATION(tlc, iwl_tlc_update_notif, sta_id)
@@ -459,6 +464,10 @@ const struct iwl_rx_handler iwl_mld_rx_handlers[] = {
beacon_filter_notif)
RX_HANDLER_OF_FTM_REQ(LOCATION_GROUP, TOF_RANGE_RESPONSE_NOTIF,
ftm_resp_notif)
+ RX_HANDLER_OF_NAN(MAC_CONF_GROUP, NAN_JOINED_CLUSTER_NOTIF,
+ nan_cluster_notif)
+ RX_HANDLER_OF_NAN(MAC_CONF_GROUP, NAN_DW_END_NOTIF,
+ nan_dw_end_notif)
};
EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_rx_handlers);
@@ -531,6 +540,8 @@ static void iwl_mld_rx_notif(struct iwl_mld *mld,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_rx_packet *pkt)
{
+ union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt };
+
for (int i = 0; i < ARRAY_SIZE(iwl_mld_rx_handlers); i++) {
const struct iwl_rx_handler *rx_h = &iwl_mld_rx_handlers[i];
struct iwl_async_handler_entry *entry;
@@ -571,6 +582,8 @@ static void iwl_mld_rx_notif(struct iwl_mld *mld,
}
iwl_notification_wait_notify(&mld->notif_wait, pkt);
+ iwl_dbg_tlv_time_point(&mld->fwrt,
+ IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF, &tp_data);
}
void iwl_mld_rx(struct iwl_op_mode *op_mode, struct napi_struct *napi,
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/notif.h b/drivers/net/wireless/intel/iwlwifi/mld/notif.h
index adcdd9dec192..373c1a90d98e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/notif.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/notif.h
@@ -25,6 +25,7 @@ enum iwl_mld_object_type {
IWL_MLD_OBJECT_TYPE_ROC,
IWL_MLD_OBJECT_TYPE_SCAN,
IWL_MLD_OBJECT_TYPE_FTM_REQ,
+ IWL_MLD_OBJECT_TYPE_NAN,
};
void iwl_mld_cancel_notifications_of_object(struct iwl_mld *mld,
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/power.c b/drivers/net/wireless/intel/iwlwifi/mld/power.c
index f664b277adf7..c3318e84f4a2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/power.c
@@ -328,6 +328,33 @@ iwl_mld_tpe_sta_cmd_data(struct iwl_txpower_constraints_cmd *cmd,
link->tpe.max_reg_client[0].power[i]);
}
+static int
+iwl_mld_set_ap_power_type(struct iwl_txpower_constraints_cmd *cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link)
+{
+ if (vif->type == NL80211_IFTYPE_AP) {
+ cmd->ap_type = cpu_to_le16(IWL_6GHZ_AP_TYPE_VLP);
+ return 0;
+ }
+
+ switch (link->power_type) {
+ case IEEE80211_REG_LPI_AP:
+ cmd->ap_type = cpu_to_le16(IWL_6GHZ_AP_TYPE_LPI);
+ break;
+ case IEEE80211_REG_SP_AP:
+ cmd->ap_type = cpu_to_le16(IWL_6GHZ_AP_TYPE_SP);
+ break;
+ case IEEE80211_REG_VLP_AP:
+ cmd->ap_type = cpu_to_le16(IWL_6GHZ_AP_TYPE_VLP);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
void
iwl_mld_send_ap_tx_power_constraint_cmd(struct iwl_mld *mld,
struct ieee80211_vif *vif,
@@ -349,15 +376,13 @@ iwl_mld_send_ap_tx_power_constraint_cmd(struct iwl_mld *mld,
memset(cmd.psd_pwr, DEFAULT_TPE_TX_POWER, sizeof(cmd.psd_pwr));
memset(cmd.eirp_pwr, DEFAULT_TPE_TX_POWER, sizeof(cmd.eirp_pwr));
- if (vif->type == NL80211_IFTYPE_AP) {
- cmd.ap_type = cpu_to_le16(IWL_6GHZ_AP_TYPE_VLP);
- } else if (link->power_type == IEEE80211_REG_UNSET_AP) {
+ if (iwl_mld_set_ap_power_type(&cmd, vif, link))
return;
- } else {
- cmd.ap_type = cpu_to_le16(link->power_type - 1);
+
+ if (vif->type != NL80211_IFTYPE_AP)
iwl_mld_tpe_sta_cmd_data(&cmd, link);
- }
+ IWL_DEBUG_POWER(mld, "AP power type: %d\n", le16_to_cpu(cmd.ap_type));
ret = iwl_mld_send_cmd_pdu(mld,
WIDE_ID(PHY_OPS_GROUP,
AP_TX_POWER_CONSTRAINTS_CMD),
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c b/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c
index 40571125b3ab..6ab5a3410353 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c
@@ -165,8 +165,8 @@ static int iwl_mld_ppag_send_cmd(struct iwl_mld *mld)
{
struct iwl_fw_runtime *fwrt = &mld->fwrt;
union iwl_ppag_table_cmd cmd = {
- .v7.ppag_config_info.table_source = fwrt->ppag_bios_source,
- .v7.ppag_config_info.table_revision = fwrt->ppag_bios_rev,
+ .v7.ppag_config_info.hdr.table_source = fwrt->ppag_bios_source,
+ .v7.ppag_config_info.hdr.table_revision = fwrt->ppag_bios_rev,
.v7.ppag_config_info.value = cpu_to_le32(fwrt->ppag_flags),
};
int ret;
@@ -206,11 +206,27 @@ int iwl_mld_init_ppag(struct iwl_mld *mld)
return iwl_mld_ppag_send_cmd(mld);
}
+static __le32 iwl_mld_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
+{
+ int ret;
+ u32 val;
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_DISABLE_SRD, &val);
+ if (!ret) {
+ if (val == DSM_VALUE_SRD_PASSIVE)
+ return cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK);
+ else if (val == DSM_VALUE_SRD_DISABLE)
+ return cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK);
+ }
+
+ return 0;
+}
+
void iwl_mld_configure_lari(struct iwl_mld *mld)
{
struct iwl_fw_runtime *fwrt = &mld->fwrt;
struct iwl_lari_config_change_cmd cmd = {
- .config_bitmap = iwl_get_lari_config_bitmap(fwrt),
+ .config_bitmap = iwl_mld_get_lari_config_bitmap(fwrt),
};
bool has_raw_dsm_capa = fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_FW_ACCEPTS_RAW_DSM_TABLE);
@@ -265,6 +281,14 @@ void iwl_mld_configure_lari(struct iwl_mld *mld)
if (!ret)
cmd.oem_11be_allow_bitmap = cpu_to_le32(value);
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_11BN, &value);
+ if (!ret)
+ cmd.oem_11bn_allow_bitmap = cpu_to_le32(value);
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_UNII_9, &value);
+ if (!ret)
+ cmd.oem_unii9_enable = cpu_to_le32(value);
+
if (!cmd.config_bitmap &&
!cmd.oem_uhb_allow_bitmap &&
!cmd.oem_11ax_allow_bitmap &&
@@ -273,9 +297,14 @@ void iwl_mld_configure_lari(struct iwl_mld *mld)
!cmd.force_disable_channels_bitmap &&
!cmd.edt_bitmap &&
!cmd.oem_320mhz_allow_bitmap &&
- !cmd.oem_11be_allow_bitmap)
+ !cmd.oem_11be_allow_bitmap &&
+ !cmd.oem_11bn_allow_bitmap &&
+ !cmd.oem_unii9_enable)
return;
+ cmd.bios_hdr.table_source = fwrt->dsm_source;
+ cmd.bios_hdr.table_revision = fwrt->dsm_revision;
+
IWL_DEBUG_RADIO(mld,
"sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n",
le32_to_cpu(cmd.config_bitmap),
@@ -295,9 +324,28 @@ void iwl_mld_configure_lari(struct iwl_mld *mld)
IWL_DEBUG_RADIO(mld,
"sending LARI_CONFIG_CHANGE, oem_11be_allow_bitmap=0x%x\n",
le32_to_cpu(cmd.oem_11be_allow_bitmap));
-
- ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(REGULATORY_AND_NVM_GROUP,
- LARI_CONFIG_CHANGE), &cmd);
+ IWL_DEBUG_RADIO(mld,
+ "sending LARI_CONFIG_CHANGE, oem_11bn_allow_bitmap=0x%x\n",
+ le32_to_cpu(cmd.oem_11bn_allow_bitmap));
+ IWL_DEBUG_RADIO(mld,
+ "sending LARI_CONFIG_CHANGE, oem_unii9_enable=0x%x\n",
+ le32_to_cpu(cmd.oem_unii9_enable));
+
+ if (iwl_fw_lookup_cmd_ver(mld->fw,
+ WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ LARI_CONFIG_CHANGE), 12) == 12) {
+ int cmd_size = offsetof(typeof(cmd), oem_11bn_allow_bitmap);
+
+ ret = iwl_mld_send_cmd_pdu(mld,
+ WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ LARI_CONFIG_CHANGE),
+ &cmd, cmd_size);
+ } else {
+ ret = iwl_mld_send_cmd_pdu(mld,
+ WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ LARI_CONFIG_CHANGE),
+ &cmd);
+ }
if (ret)
IWL_DEBUG_RADIO(mld,
"Failed to send LARI_CONFIG_CHANGE (%d)\n",
@@ -373,8 +421,8 @@ void iwl_mld_init_tas(struct iwl_mld *mld)
for (u8 i = 0; i < data.block_list_size; i++)
cmd.block_list_array[i] =
cpu_to_le16(data.block_list_array[i]);
- cmd.tas_config_info.table_source = data.table_source;
- cmd.tas_config_info.table_revision = data.table_revision;
+ cmd.tas_config_info.hdr.table_source = data.table_source;
+ cmd.tas_config_info.hdr.table_revision = data.table_revision;
cmd.tas_config_info.value = cpu_to_le32(data.tas_selection);
ret = iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/rx.c b/drivers/net/wireless/intel/iwlwifi/mld/rx.c
index 6a76e3fcb581..214dcfde2fb4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/rx.c
@@ -208,6 +208,134 @@ static void iwl_mld_fill_signal(struct iwl_mld *mld, int link_id,
}
static void
+iwl_mld_decode_vht_phy_data(struct iwl_mld_rx_phy_data *phy_data,
+ struct ieee80211_radiotap_vht *vht,
+ struct ieee80211_rx_status *rx_status)
+{
+ bool stbc;
+
+ vht->known = cpu_to_le16(IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH |
+ IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID |
+ IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
+ IEEE80211_RADIOTAP_VHT_KNOWN_GI |
+ IEEE80211_RADIOTAP_VHT_KNOWN_SGI_NSYM_DIS |
+ IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM |
+ IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED);
+
+ switch (le32_get_bits(phy_data->ntfy->sigs.vht.a1,
+ OFDM_RX_FRAME_VHT_BANDWIDTH)) {
+ case 0:
+ vht->bandwidth = IEEE80211_RADIOTAP_VHT_BW_20;
+ break;
+ case 1:
+ vht->bandwidth = IEEE80211_RADIOTAP_VHT_BW_40;
+ break;
+ case 2:
+ vht->bandwidth = IEEE80211_RADIOTAP_VHT_BW_80;
+ break;
+ case 3:
+ vht->bandwidth = IEEE80211_RADIOTAP_VHT_BW_160;
+ break;
+ }
+
+ vht->group_id = le32_get_bits(phy_data->ntfy->sigs.vht.a1,
+ OFDM_RX_FRAME_VHT_GRP_ID);
+
+ stbc = le32_get_bits(phy_data->ntfy->sigs.vht.a1,
+ OFDM_RX_FRAME_VHT_STBC);
+ if (stbc)
+ vht->flags |= IEEE80211_RADIOTAP_VHT_FLAG_STBC;
+
+ if (le32_get_bits(phy_data->ntfy->sigs.vht.a2,
+ OFDM_RX_FRAME_VHT_SHORT_GI))
+ vht->flags |= IEEE80211_RADIOTAP_VHT_FLAG_SGI;
+
+ if (le32_get_bits(phy_data->ntfy->sigs.vht.a2,
+ OFDM_RX_FRAME_VHT_SHORT_GI_AMBIG))
+ vht->flags |= IEEE80211_RADIOTAP_VHT_FLAG_SGI_NSYM_M10_9;
+
+ if (le32_get_bits(phy_data->ntfy->sigs.vht.a2,
+ OFDM_RX_FRAME_VHT_CODING_EXTRA_SYM))
+ vht->flags |= IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM;
+
+ if (vht->group_id != 0 && vht->group_id != 63) {
+ /* MU frame */
+ int user = le32_get_bits(phy_data->ntfy->sigs.vht.a1,
+ OFDM_RX_FRAME_VHT_MU_MIMO_USER_POSITION);
+ int nsts;
+
+ /* Always beamformed */
+ vht->flags |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED;
+
+ /* No MCS information in the a1/a2 data for MU frames */
+ nsts = le32_get_bits(phy_data->ntfy->sigs.vht.a1,
+ OFDM_RX_FRAME_VHT_STS_USER0);
+ vht->mcs_nss[0] = (stbc ? nsts / 2 : nsts) | 0xf0;
+
+ nsts = le32_get_bits(phy_data->ntfy->sigs.vht.a1,
+ OFDM_RX_FRAME_VHT_MU_STS_USER1);
+ vht->mcs_nss[1] = (stbc ? nsts / 2 : nsts) | 0xf0;
+
+ nsts = le32_get_bits(phy_data->ntfy->sigs.vht.a1,
+ OFDM_RX_FRAME_VHT_MU_STS_USER2);
+ vht->mcs_nss[2] = (stbc ? nsts / 2 : nsts) | 0xf0;
+
+ nsts = le32_get_bits(phy_data->ntfy->sigs.vht.a1,
+ OFDM_RX_FRAME_VHT_MU_STS_USER3);
+ vht->mcs_nss[3] = (stbc ? nsts / 2 : nsts) | 0xf0;
+
+ /* Report current user MCS from rate_n_flags via rx_status */
+ vht->mcs_nss[user] &= 0x0f;
+ vht->mcs_nss[user] |= rx_status->rate_idx << 4;
+
+ /* Report LDPC for current user */
+ if (rx_status->enc_flags & RX_ENC_FLAG_LDPC)
+ vht->coding = 0x1 << user;
+ } else {
+ int nsts;
+
+ /* SU frame */
+ vht->known |= cpu_to_le16(IEEE80211_RADIOTAP_VHT_KNOWN_PARTIAL_AID);
+
+ if (le32_get_bits(phy_data->ntfy->sigs.vht.a2,
+ OFDM_RX_FRAME_VHT_BF_OR_MU_RESERVED))
+ vht->flags |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED;
+
+ vht->partial_aid =
+ cpu_to_le16(le32_get_bits(phy_data->ntfy->sigs.vht.a1,
+ OFDM_RX_FRAME_VHT_PARTIAL_AID_OR_MU_STS));
+
+ nsts = le32_get_bits(phy_data->ntfy->sigs.vht.a1,
+ OFDM_RX_FRAME_VHT_STS) + 1;
+ vht->mcs_nss[0] =
+ (stbc ? nsts / 2 : nsts) |
+ le32_get_bits(phy_data->ntfy->sigs.vht.a2,
+ OFDM_RX_FRAME_VHT_MCS_OR_MU_CODING) << 4;
+ vht->mcs_nss[1] = 0;
+ vht->mcs_nss[2] = 0;
+ vht->mcs_nss[3] = 0;
+
+ if (rx_status->enc_flags & RX_ENC_FLAG_LDPC)
+ vht->coding = 0x1;
+ }
+}
+
+static void iwl_mld_rx_vht(struct sk_buff *skb,
+ struct iwl_mld_rx_phy_data *phy_data)
+{
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_radiotap_vht *vht;
+
+ if (likely(!phy_data->ntfy))
+ return;
+
+ vht = skb_put_zero(skb, sizeof(*vht));
+ rx_status->flag |= RX_FLAG_RADIOTAP_VHT;
+
+ iwl_mld_decode_vht_phy_data(phy_data, vht, rx_status);
+}
+
+static void
iwl_mld_he_set_ru_alloc(struct ieee80211_rx_status *rx_status,
struct ieee80211_radiotap_he *he,
u8 ru_with_p80)
@@ -268,11 +396,11 @@ iwl_mld_decode_he_mu(struct iwl_mld_rx_phy_data *phy_data,
{
u32 rate_n_flags = phy_data->rate_n_flags;
- he_mu->flags1 |= RTAP_ENC_HE(phy_data->ntfy->sigs.he.b,
- OFDM_RX_FRAME_HE_SIGB_DCM,
+ he_mu->flags1 |= RTAP_ENC_HE(phy_data->ntfy->sigs.he.a1,
+ OFDM_RX_FRAME_HE_DCM,
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM);
- he_mu->flags1 |= RTAP_ENC_HE(phy_data->ntfy->sigs.he.b,
- OFDM_RX_FRAME_HE_SIGB_MCS,
+ he_mu->flags1 |= RTAP_ENC_HE(phy_data->ntfy->sigs.he.a1,
+ OFDM_RX_FRAME_HE_MCS,
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS);
he_mu->flags2 |= RTAP_ENC_HE(phy_data->ntfy->sigs.he.a1,
OFDM_RX_FRAME_HE_PRMBL_PUNC_TYPE,
@@ -280,7 +408,7 @@ iwl_mld_decode_he_mu(struct iwl_mld_rx_phy_data *phy_data,
he_mu->flags2 |= RTAP_ENC_HE(phy_data->ntfy->sigs.he.a2,
OFDM_RX_FRAME_HE_MU_NUM_OF_SIGB_SYM_OR_USER_NUM,
IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS);
- he_mu->flags2 |= RTAP_ENC_HE(phy_data->ntfy->sigs.he.b,
+ he_mu->flags2 |= RTAP_ENC_HE(phy_data->ntfy->sigs.he.a2,
OFDM_RX_FRAME_HE_MU_SIGB_COMP,
IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP);
@@ -1377,6 +1505,10 @@ static void iwl_mld_rx_fill_status(struct iwl_mld *mld, int link_id,
iwl_mld_set_rx_rate(mld, phy_data, rx_status);
+ /* must be before HE data (radiotap field order) */
+ if (format == RATE_MCS_MOD_TYPE_VHT)
+ iwl_mld_rx_vht(skb, phy_data);
+
/* must be before L-SIG data (radiotap field order) */
if (format == RATE_MCS_MOD_TYPE_HE)
iwl_mld_rx_he(skb, phy_data);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/scan.c b/drivers/net/wireless/intel/iwlwifi/mld/scan.c
index fd1022ddc912..16f48087a888 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/scan.c
@@ -1063,14 +1063,15 @@ static int
iwl_mld_scan_cmd_set_6ghz_chan_params(struct iwl_mld *mld,
struct iwl_mld_scan_params *params,
struct ieee80211_vif *vif,
- struct iwl_scan_req_params_v17 *scan_p,
- enum iwl_mld_scan_status scan_status)
+ struct iwl_scan_req_params_v17 *scan_p)
{
struct iwl_scan_channel_params_v7 *chan_p = &scan_p->channel_params;
struct iwl_scan_probe_params_v4 *probe_p = &scan_p->probe_params;
- chan_p->flags = iwl_mld_scan_get_cmd_gen_flags(mld, params, vif,
- scan_status);
+ /* Explicitly clear the flags since most of them are not
+ * relevant for 6 GHz scan.
+ */
+ chan_p->flags = 0;
chan_p->count = iwl_mld_scan_cfg_channels_6g(mld, params,
params->n_channels,
probe_p, chan_p,
@@ -1106,8 +1107,7 @@ iwl_mld_scan_cmd_set_chan_params(struct iwl_mld *mld,
if (params->scan_6ghz)
return iwl_mld_scan_cmd_set_6ghz_chan_params(mld, params,
- vif, scan_p,
- scan_status);
+ vif, scan_p);
/* relevant only for 2.4 GHz/5 GHz scan */
cp->flags = iwl_mld_scan_cmd_set_chan_flags(mld, params, vif,
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/sta.c b/drivers/net/wireless/intel/iwlwifi/mld/sta.c
index 61ecc33116cf..6056a306f7cb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/sta.c
@@ -1163,7 +1163,8 @@ void iwl_mld_remove_aux_sta(struct iwl_mld *mld,
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
if (WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE &&
- vif->type != NL80211_IFTYPE_STATION))
+ vif->type != NL80211_IFTYPE_STATION &&
+ vif->type != NL80211_IFTYPE_NAN))
return;
iwl_mld_remove_internal_sta(mld, &mld_vif->aux_sta, false,
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tlc.c b/drivers/net/wireless/intel/iwlwifi/mld/tlc.c
index 0e172281b0c8..62a54c37a98c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/tlc.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tlc.c
@@ -8,6 +8,7 @@
#include "tlc.h"
#include "hcmd.h"
#include "sta.h"
+#include "phy.h"
#include "fw/api/rs.h"
#include "fw/api/context.h"
@@ -447,11 +448,48 @@ iwl_mld_fill_supp_rates(struct iwl_mld *mld, struct ieee80211_vif *vif,
}
}
-static void iwl_mld_convert_tlc_cmd_to_v4(struct iwl_tlc_config_cmd *cmd,
- struct iwl_tlc_config_cmd_v4 *cmd_v4)
+static int iwl_mld_convert_tlc_cmd_to_v5(struct iwl_tlc_config_cmd *cmd,
+ struct iwl_tlc_config_cmd_v5 *cmd_v5)
{
+ if (WARN_ON_ONCE(hweight32(le32_to_cpu(cmd->sta_mask)) != 1))
+ return -EINVAL;
+
+ /* Convert sta_mask to sta_id */
+ cmd_v5->sta_id = __ffs(le32_to_cpu(cmd->sta_mask));
+
+ /* Copy all the rest */
+ cmd_v5->max_ch_width = cmd->max_ch_width;
+ cmd_v5->mode = cmd->mode;
+ cmd_v5->chains = cmd->chains;
+ cmd_v5->sgi_ch_width_supp = cmd->sgi_ch_width_supp;
+ cmd_v5->flags = cmd->flags;
+ cmd_v5->non_ht_rates = cmd->non_ht_rates;
+
+ BUILD_BUG_ON(sizeof(cmd_v5->ht_rates) != sizeof(cmd->ht_rates));
+ memcpy(cmd_v5->ht_rates, cmd->ht_rates, sizeof(cmd->ht_rates));
+
+ cmd_v5->max_mpdu_len = cmd->max_mpdu_len;
+ cmd_v5->max_tx_op = cmd->max_tx_op;
+
+ return 0;
+}
+
+static int iwl_mld_convert_tlc_cmd_to_v4(struct iwl_tlc_config_cmd *cmd,
+ struct iwl_tlc_config_cmd_v4 *cmd_v4)
+{
+ if (WARN_ON_ONCE(hweight32(le32_to_cpu(cmd->sta_mask)) != 1))
+ return -EINVAL;
+
+ /* Convert sta_mask to sta_id */
+ cmd_v4->sta_id = __ffs(le32_to_cpu(cmd->sta_mask));
+
/* Copy everything until ht_rates */
- memcpy(cmd_v4, cmd, offsetof(struct iwl_tlc_config_cmd, ht_rates));
+ cmd_v4->max_ch_width = cmd->max_ch_width;
+ cmd_v4->mode = cmd->mode;
+ cmd_v4->chains = cmd->chains;
+ cmd_v4->sgi_ch_width_supp = cmd->sgi_ch_width_supp;
+ cmd_v4->flags = cmd->flags;
+ cmd_v4->non_ht_rates = cmd->non_ht_rates;
/* Convert ht_rates from __le32 to __le16 */
BUILD_BUG_ON(ARRAY_SIZE(cmd_v4->ht_rates) != ARRAY_SIZE(cmd->ht_rates));
@@ -465,14 +503,17 @@ static void iwl_mld_convert_tlc_cmd_to_v4(struct iwl_tlc_config_cmd *cmd,
/* Copy the rest */
cmd_v4->max_mpdu_len = cmd->max_mpdu_len;
cmd_v4->max_tx_op = cmd->max_tx_op;
+
+ return 0;
}
static void iwl_mld_send_tlc_cmd(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct ieee80211_link_sta *link_sta,
- enum nl80211_band band)
+ struct ieee80211_bss_conf *link)
{
struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
+ enum nl80211_band band = link->chanreq.oper.chan->band;
struct ieee80211_supported_band *sband = mld->hw->wiphy->bands[band];
const struct ieee80211_sta_he_cap *own_he_cap =
ieee80211_get_he_iftype_cap_vif(sband, vif);
@@ -492,25 +533,44 @@ static void iwl_mld_send_tlc_cmd(struct iwl_mld *mld,
int fw_sta_id = iwl_mld_fw_sta_id_from_link_sta(mld, link_sta);
u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, TLC_MNG_CONFIG_CMD);
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mld->fw, cmd_id, 0);
- struct iwl_tlc_config_cmd_v4 cmd_v4;
+ struct ieee80211_chanctx_conf *chan_ctx;
+ struct iwl_tlc_config_cmd_v5 cmd_v5 = {};
+ struct iwl_tlc_config_cmd_v4 cmd_v4 = {};
void *cmd_ptr;
u8 cmd_size;
+ u32 phy_id;
int ret;
if (fw_sta_id < 0)
return;
- cmd.sta_id = fw_sta_id;
+ cmd.sta_mask = cpu_to_le32(BIT(fw_sta_id));
+
+ chan_ctx = rcu_dereference_wiphy(mld->wiphy, link->chanctx_conf);
+ if (WARN_ON(!chan_ctx))
+ return;
+
+ phy_id = iwl_mld_phy_from_mac80211(chan_ctx)->fw_id;
+ cmd.phy_id = cpu_to_le32(phy_id);
iwl_mld_fill_supp_rates(mld, vif, link_sta, sband,
own_he_cap, own_eht_cap,
&cmd);
- if (cmd_ver == 5) {
+ if (cmd_ver == 6) {
cmd_ptr = &cmd;
cmd_size = sizeof(cmd);
+ } else if (cmd_ver == 5) {
+ /* TODO: remove support once FW moves to version 6 */
+ ret = iwl_mld_convert_tlc_cmd_to_v5(&cmd, &cmd_v5);
+ if (ret)
+ return;
+ cmd_ptr = &cmd_v5;
+ cmd_size = sizeof(cmd_v5);
} else if (cmd_ver == 4) {
- iwl_mld_convert_tlc_cmd_to_v4(&cmd, &cmd_v4);
+ ret = iwl_mld_convert_tlc_cmd_to_v4(&cmd, &cmd_v4);
+ if (ret)
+ return;
cmd_ptr = &cmd_v4;
cmd_size = sizeof(cmd_v4);
} else {
@@ -520,8 +580,9 @@ static void iwl_mld_send_tlc_cmd(struct iwl_mld *mld,
}
IWL_DEBUG_RATE(mld,
- "TLC CONFIG CMD, sta_id=%d, max_ch_width=%d, mode=%d\n",
- cmd.sta_id, cmd.max_ch_width, cmd.mode);
+ "TLC CONFIG CMD, sta_mask=0x%x, max_ch_width=%d, mode=%d, phy_id=%d\n",
+ le32_to_cpu(cmd.sta_mask), cmd.max_ch_width, cmd.mode,
+ le32_to_cpu(cmd.phy_id));
/* Send async since this can be called within a RCU-read section */
ret = iwl_mld_send_cmd_with_flags_pdu(mld, cmd_id, CMD_ASYNC, cmd_ptr,
@@ -561,7 +622,6 @@ void iwl_mld_config_tlc_link(struct iwl_mld *mld,
struct ieee80211_link_sta *link_sta)
{
struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
- enum nl80211_band band;
if (WARN_ON_ONCE(!link_conf->chanreq.oper.chan))
return;
@@ -575,8 +635,7 @@ void iwl_mld_config_tlc_link(struct iwl_mld *mld,
ieee80211_sta_recalc_aggregates(link_sta->sta);
}
- band = link_conf->chanreq.oper.chan->band;
- iwl_mld_send_tlc_cmd(mld, vif, link_sta, band);
+ iwl_mld_send_tlc_cmd(mld, vif, link_sta, link_conf);
}
void iwl_mld_config_tlc(struct iwl_mld *mld, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tx.c b/drivers/net/wireless/intel/iwlwifi/mld/tx.c
index 3b4b575aadaa..546d09a38dab 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tx.c
@@ -345,6 +345,11 @@ u8 iwl_mld_get_lowest_rate(struct iwl_mld *mld,
iwl_mld_get_basic_rates_and_band(mld, vif, info, &basic_rates, &band);
+ if (band >= NUM_NL80211_BANDS) {
+ WARN_ON(vif->type != NL80211_IFTYPE_NAN);
+ return IWL_FIRST_OFDM_RATE;
+ }
+
sband = mld->hw->wiphy->bands[band];
for_each_set_bit(i, &basic_rates, BITS_PER_LONG) {
u16 hw = sband->bitrates[i].hw_value;
@@ -667,6 +672,12 @@ iwl_mld_get_tx_queue_id(struct iwl_mld *mld, struct ieee80211_txq *txq,
WARN_ON(!ieee80211_is_mgmt(fc));
return mld_vif->aux_sta.queue_id;
+ case NL80211_IFTYPE_NAN:
+ mld_vif = iwl_mld_vif_from_mac80211(info->control.vif);
+
+ WARN_ON(!ieee80211_is_mgmt(fc));
+
+ return mld_vif->aux_sta.queue_id;
default:
WARN_ONCE(1, "Unsupported vif type\n");
break;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index af1a45845999..6c225861db61 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -726,8 +726,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_time_quota_data *quota;
u32 status;
- if (WARN_ON_ONCE(iwl_mvm_is_cdb_supported(mvm) ||
- ieee80211_vif_is_mld(vif)))
+ if (WARN_ON_ONCE(iwl_mvm_is_cdb_supported(mvm)))
return -EINVAL;
/* add back the PHY */
@@ -1248,7 +1247,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
struct ieee80211_vif *vif = NULL;
struct iwl_mvm_vif *mvmvif = NULL;
struct ieee80211_sta *ap_sta = NULL;
- struct iwl_mvm_vif_link_info *mvm_link;
struct iwl_d3_manager_config d3_cfg_cmd = {
/*
* Program the minimum sleep time to 10 seconds, as many
@@ -1280,13 +1278,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mvmvif = iwl_mvm_vif_from_mac80211(vif);
- mvm_link = mvmvif->link[iwl_mvm_get_primary_link(vif)];
- if (WARN_ON_ONCE(!mvm_link)) {
- ret = -EINVAL;
- goto out_noreset;
- }
-
- if (mvm_link->ap_sta_id == IWL_INVALID_STA) {
+ if (mvmvif->deflink.ap_sta_id == IWL_INVALID_STA) {
/* if we're not associated, this must be netdetect */
if (!wowlan->nd_config) {
ret = 1;
@@ -1304,10 +1296,10 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
.offloading_tid = 0,
};
- wowlan_config_cmd.sta_id = mvm_link->ap_sta_id;
+ wowlan_config_cmd.sta_id = mvmvif->deflink.ap_sta_id;
ap_sta = rcu_dereference_protected(
- mvm->fw_id_to_mac_id[mvm_link->ap_sta_id],
+ mvm->fw_id_to_mac_id[mvmvif->deflink.ap_sta_id],
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(ap_sta)) {
ret = -EINVAL;
@@ -1324,7 +1316,8 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
if (ret)
goto out_noreset;
ret = iwl_mvm_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
- vif, mvmvif, mvm_link, ap_sta);
+ vif, mvmvif, &mvmvif->deflink,
+ ap_sta);
if (ret)
goto out;
@@ -1819,10 +1812,6 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw,
struct iwl_mvm_d3_gtk_iter_data *data = _data;
struct iwl_wowlan_status_data *status = data->status;
s8 keyidx;
- int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
-
- if (link_id >= 0 && key->link_id >= 0 && link_id != key->link_id)
- return;
switch (key->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
@@ -1876,7 +1865,6 @@ static bool iwl_mvm_gtk_rekey(struct iwl_wowlan_status_data *status,
{
int i, j;
struct ieee80211_key_conf *key;
- int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
for (i = 0; i < ARRAY_SIZE(status->gtk); i++) {
if (!status->gtk[i].len)
@@ -1888,8 +1876,7 @@ static bool iwl_mvm_gtk_rekey(struct iwl_wowlan_status_data *status,
key = ieee80211_gtk_rekey_add(vif, status->gtk[i].id,
status->gtk[i].key,
- sizeof(status->gtk[i].key),
- link_id);
+ sizeof(status->gtk[i].key), -1);
if (IS_ERR(key)) {
/* FW may send also the old keys */
if (PTR_ERR(key) == -EALREADY)
@@ -1918,14 +1905,13 @@ iwl_mvm_d3_igtk_bigtk_rekey_add(struct iwl_wowlan_status_data *status,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct ieee80211_key_conf *key_config;
struct ieee80211_key_seq seq;
- int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
s8 keyidx = key_data->id;
if (!key_data->len)
return true;
key_config = ieee80211_gtk_rekey_add(vif, keyidx, key_data->key,
- sizeof(key_data->key), link_id);
+ sizeof(key_data->key), -1);
if (IS_ERR(key_config)) {
/* FW may send also the old keys */
return PTR_ERR(key_config) == -EALREADY;
@@ -1935,13 +1921,9 @@ iwl_mvm_d3_igtk_bigtk_rekey_add(struct iwl_wowlan_status_data *status,
ieee80211_set_key_rx_seq(key_config, 0, &seq);
if (keyidx == 4 || keyidx == 5) {
- struct iwl_mvm_vif_link_info *mvm_link;
-
- link_id = link_id < 0 ? 0 : link_id;
- mvm_link = mvmvif->link[link_id];
- if (mvm_link->igtk)
- mvm_link->igtk->hw_key_idx = STA_KEY_IDX_INVALID;
- mvm_link->igtk = key_config;
+ if (mvmvif->deflink.igtk)
+ mvmvif->deflink.igtk->hw_key_idx = STA_KEY_IDX_INVALID;
+ mvmvif->deflink.igtk = key_config;
}
if (vif->type == NL80211_IFTYPE_STATION && (keyidx == 6 || keyidx == 7))
@@ -2396,23 +2378,19 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
bool keep = false;
struct iwl_mvm_sta *mvm_ap_sta;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- int link_id = vif->active_links ? __ffs(vif->active_links) : 0;
- struct iwl_mvm_vif_link_info *mvm_link = mvmvif->link[link_id];
int wowlan_info_ver = iwl_fw_lookup_notif_ver(mvm->fw,
PROT_OFFLOAD_GROUP,
WOWLAN_INFO_NOTIFICATION,
IWL_FW_CMD_VER_UNKNOWN);
- if (WARN_ON(!mvm_link))
- goto out_unlock;
-
if (!status)
goto out_unlock;
IWL_DEBUG_WOWLAN(mvm, "wakeup reason 0x%x\n",
status->wakeup_reasons);
- mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, mvm_link->ap_sta_id);
+ mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm,
+ mvmvif->deflink.ap_sta_id);
if (!mvm_ap_sta)
goto out_unlock;
@@ -2756,9 +2734,6 @@ iwl_mvm_choose_query_wakeup_reasons(struct iwl_mvm *mvm,
u8 sta_id = mvm->net_detect ? IWL_INVALID_STA :
mvmvif->deflink.ap_sta_id;
- /* bug - FW with MLO has status notification */
- WARN_ON(ieee80211_vif_is_mld(vif));
-
d3_data->status = iwl_mvm_send_wowlan_get_status(mvm, sta_id);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index edae13755ee6..43cf94c9a36b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -1135,8 +1135,9 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
for (u16 i = 0; i < data.block_list_size; i++)
cmd_v5.block_list_array[i] =
cpu_to_le16(data.block_list_array[i]);
- cmd_v5.tas_config_info.table_source = data.table_source;
- cmd_v5.tas_config_info.table_revision = data.table_revision;
+ cmd_v5.tas_config_info.hdr.table_source = data.table_source;
+ cmd_v5.tas_config_info.hdr.table_revision =
+ data.table_revision;
cmd_v5.tas_config_info.value = cpu_to_le32(data.tas_selection);
} else if (fw_ver == 4) {
cmd_size = sizeof(cmd_v2_v4.common) + sizeof(cmd_v2_v4.v4);
@@ -1165,13 +1166,208 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret);
}
+static __le32 iwl_mvm_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
+{
+ int ret;
+ u32 val;
+ __le32 config_bitmap = 0;
+
+ switch (CSR_HW_RFID_TYPE(fwrt->trans->info.hw_rf_id)) {
+ case IWL_CFG_RF_TYPE_HR1:
+ case IWL_CFG_RF_TYPE_HR2:
+ case IWL_CFG_RF_TYPE_JF1:
+ case IWL_CFG_RF_TYPE_JF2:
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_INDONESIA_5G2,
+ &val);
+
+ if (!ret && val == DSM_VALUE_INDONESIA_ENABLE)
+ config_bitmap |=
+ cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK);
+ break;
+ default:
+ break;
+ }
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_DISABLE_SRD, &val);
+ if (!ret) {
+ if (val == DSM_VALUE_SRD_PASSIVE)
+ config_bitmap |=
+ cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK);
+ else if (val == DSM_VALUE_SRD_DISABLE)
+ config_bitmap |=
+ cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK);
+ }
+
+ if (fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT)) {
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_REGULATORY_CONFIG,
+ &val);
+ /*
+ * China 2022 enable if the BIOS object does not exist or
+ * if it is enabled in BIOS.
+ */
+ if (ret < 0 || val & DSM_MASK_CHINA_22_REG)
+ config_bitmap |=
+ cpu_to_le32(LARI_CONFIG_ENABLE_CHINA_22_REG_SUPPORT_MSK);
+ }
+
+ return config_bitmap;
+}
+
+static size_t iwl_mvm_get_lari_config_cmd_size(u8 cmd_ver)
+{
+ size_t cmd_size;
+
+ switch (cmd_ver) {
+ case 12:
+ cmd_size = offsetof(struct iwl_lari_config_change_cmd,
+ oem_11bn_allow_bitmap);
+ break;
+ case 8:
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd_v8);
+ break;
+ case 6:
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd_v6);
+ break;
+ default:
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1);
+ break;
+ }
+ return cmd_size;
+}
+
+static int iwl_mvm_fill_lari_config(struct iwl_fw_runtime *fwrt,
+ struct iwl_lari_config_change_cmd *cmd,
+ size_t *cmd_size)
+{
+ int ret;
+ u32 value;
+ bool has_raw_dsm_capa = fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_FW_ACCEPTS_RAW_DSM_TABLE);
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw,
+ WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ LARI_CONFIG_CHANGE), 1);
+
+ memset(cmd, 0, sizeof(*cmd));
+ *cmd_size = iwl_mvm_get_lari_config_cmd_size(cmd_ver);
+
+ cmd->config_bitmap = iwl_mvm_get_lari_config_bitmap(fwrt);
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_11AX_ENABLEMENT, &value);
+ if (!ret) {
+ if (!has_raw_dsm_capa)
+ value &= DSM_11AX_ALLOW_BITMAP;
+ cmd->oem_11ax_allow_bitmap = cpu_to_le32(value);
+ }
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_UNII4_CHAN, &value);
+ if (!ret) {
+ if (!has_raw_dsm_capa)
+ value &= DSM_UNII4_ALLOW_BITMAP;
+
+ /* Since version 12, bits 4 and 5 are supported
+ * regardless of this capability, By pass this masking
+ * if firmware has capability of accepting raw DSM table.
+ */
+ if (!has_raw_dsm_capa && cmd_ver < 12 &&
+ !fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BIOS_OVERRIDE_5G9_FOR_CA))
+ value &= ~(DSM_VALUE_UNII4_CANADA_OVERRIDE_MSK |
+ DSM_VALUE_UNII4_CANADA_EN_MSK);
+
+ cmd->oem_unii4_allow_bitmap = cpu_to_le32(value);
+ }
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ACTIVATE_CHANNEL, &value);
+ if (!ret) {
+ if (!has_raw_dsm_capa)
+ value &= CHAN_STATE_ACTIVE_BITMAP_CMD_V12;
+
+ if (!has_raw_dsm_capa && cmd_ver < 8)
+ value &= ~ACTIVATE_5G2_IN_WW_MASK;
+
+ /* Since version 12, bits 5 and 6 are supported
+ * regardless of this capability, By pass this masking
+ * if firmware has capability of accepting raw DSM table.
+ */
+ if (!has_raw_dsm_capa && cmd_ver < 12 &&
+ !fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BIOS_OVERRIDE_UNII4_US_CA))
+ value &= CHAN_STATE_ACTIVE_BITMAP_CMD_V8;
+
+ cmd->chan_state_active_bitmap = cpu_to_le32(value);
+ }
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_6E, &value);
+ if (!ret)
+ cmd->oem_uhb_allow_bitmap = cpu_to_le32(value);
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_FORCE_DISABLE_CHANNELS, &value);
+ if (!ret) {
+ if (!has_raw_dsm_capa)
+ value &= DSM_FORCE_DISABLE_CHANNELS_ALLOWED_BITMAP;
+ cmd->force_disable_channels_bitmap = cpu_to_le32(value);
+ }
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENERGY_DETECTION_THRESHOLD,
+ &value);
+ if (!ret) {
+ if (!has_raw_dsm_capa)
+ value &= DSM_EDT_ALLOWED_BITMAP;
+ cmd->edt_bitmap = cpu_to_le32(value);
+ }
+
+ ret = iwl_bios_get_wbem(fwrt, &value);
+ if (!ret)
+ cmd->oem_320mhz_allow_bitmap = cpu_to_le32(value);
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_11BE, &value);
+ if (!ret)
+ cmd->oem_11be_allow_bitmap = cpu_to_le32(value);
+
+ if (cmd->config_bitmap ||
+ cmd->oem_uhb_allow_bitmap ||
+ cmd->oem_11ax_allow_bitmap ||
+ cmd->oem_unii4_allow_bitmap ||
+ cmd->chan_state_active_bitmap ||
+ cmd->force_disable_channels_bitmap ||
+ cmd->edt_bitmap ||
+ cmd->oem_320mhz_allow_bitmap ||
+ cmd->oem_11be_allow_bitmap) {
+ IWL_DEBUG_RADIO(fwrt,
+ "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n",
+ le32_to_cpu(cmd->config_bitmap),
+ le32_to_cpu(cmd->oem_11ax_allow_bitmap));
+ IWL_DEBUG_RADIO(fwrt,
+ "sending LARI_CONFIG_CHANGE, oem_unii4_allow_bitmap=0x%x, chan_state_active_bitmap=0x%x, cmd_ver=%d\n",
+ le32_to_cpu(cmd->oem_unii4_allow_bitmap),
+ le32_to_cpu(cmd->chan_state_active_bitmap),
+ cmd_ver);
+ IWL_DEBUG_RADIO(fwrt,
+ "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x, force_disable_channels_bitmap=0x%x\n",
+ le32_to_cpu(cmd->oem_uhb_allow_bitmap),
+ le32_to_cpu(cmd->force_disable_channels_bitmap));
+ IWL_DEBUG_RADIO(fwrt,
+ "sending LARI_CONFIG_CHANGE, edt_bitmap=0x%x, oem_320mhz_allow_bitmap=0x%x\n",
+ le32_to_cpu(cmd->edt_bitmap),
+ le32_to_cpu(cmd->oem_320mhz_allow_bitmap));
+ IWL_DEBUG_RADIO(fwrt,
+ "sending LARI_CONFIG_CHANGE, oem_11be_allow_bitmap=0x%x\n",
+ le32_to_cpu(cmd->oem_11be_allow_bitmap));
+ } else {
+ return 1;
+ }
+
+ return 0;
+}
+
static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
{
struct iwl_lari_config_change_cmd cmd;
size_t cmd_size;
int ret;
- ret = iwl_fill_lari_config(&mvm->fwrt, &cmd, &cmd_size);
+ ret = iwl_mvm_fill_lari_config(&mvm->fwrt, &cmd, &cmd_size);
if (!ret) {
ret = iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(REGULATORY_AND_NVM_GROUP,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
index 738facceb240..b5d252ece2d9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
@@ -285,28 +285,6 @@ int iwl_mvm_disable_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return ret;
}
-u8 iwl_mvm_get_primary_link(struct ieee80211_vif *vif)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
- /* relevant data is written with both locks held, so read with either */
- lockdep_assert(lockdep_is_held(&mvmvif->mvm->mutex) ||
- lockdep_is_held(&mvmvif->mvm->hw->wiphy->mtx));
-
- if (!ieee80211_vif_is_mld(vif))
- return 0;
-
- /* In AP mode, there is no primary link */
- if (vif->type == NL80211_IFTYPE_AP)
- return __ffs(vif->active_links);
-
- if (mvmvif->esr_active &&
- !WARN_ON(!(BIT(mvmvif->primary_link) & vif->active_links)))
- return mvmvif->primary_link;
-
- return __ffs(vif->active_links);
-}
-
void iwl_mvm_init_link(struct iwl_mvm_vif_link_info *link)
{
link->bcast_sta.sta_id = IWL_INVALID_STA;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 867807abde66..0e5820c13523 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -873,7 +873,6 @@ u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct iwl_mvm *mvm,
struct ieee80211_tx_info *info,
struct ieee80211_vif *vif)
{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct ieee80211_supported_band *sband;
unsigned long basic = vif->bss_conf.basic_rates;
u16 lowest_cck = IWL_RATE_COUNT, lowest_ofdm = IWL_RATE_COUNT;
@@ -883,16 +882,6 @@ u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct iwl_mvm *mvm,
u8 rate;
u32 i;
- if (link_id == IEEE80211_LINK_UNSPECIFIED && ieee80211_vif_is_mld(vif)) {
- for (i = 0; i < ARRAY_SIZE(mvmvif->link); i++) {
- if (!mvmvif->link[i])
- continue;
- /* shouldn't do this when >1 link is active */
- WARN_ON_ONCE(link_id != IEEE80211_LINK_UNSPECIFIED);
- link_id = i;
- }
- }
-
if (link_id < IEEE80211_LINK_UNSPECIFIED) {
struct ieee80211_bss_conf *link_conf;
@@ -1761,6 +1750,20 @@ void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm,
mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ /*
+ * len_low should be 2 + n*13 (where n is the number of descriptors.
+ * 13 is the size of a NoA descriptor). We can have either one or two
+ * descriptors.
+ */
+ if (IWL_FW_CHECK(mvm, notif->noa_active &&
+ notif->noa_attr.len_low != 2 +
+ sizeof(struct ieee80211_p2p_noa_desc) &&
+ notif->noa_attr.len_low != 2 +
+ sizeof(struct ieee80211_p2p_noa_desc) * 2,
+ "Invalid noa_attr.len_low (%d)\n",
+ notif->noa_attr.len_low))
+ return;
+
new_data = kzalloc(sizeof(*new_data), GFP_KERNEL);
if (!new_data)
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 44029ceb8f77..169c87588938 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1109,7 +1109,6 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
mvmvif->ba_enabled = false;
mvmvif->ap_sta = NULL;
- mvmvif->esr_active = false;
vif->driver_flags &= ~IEEE80211_VIF_EML_ACTIVE;
for_each_mvm_vif_valid_link(mvmvif, link_id) {
@@ -1129,39 +1128,6 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
RCU_INIT_POINTER(mvmvif->deflink.probe_resp_data, NULL);
}
-static void iwl_mvm_cleanup_sta_iterator(void *data, struct ieee80211_sta *sta)
-{
- struct iwl_mvm *mvm = data;
- struct iwl_mvm_sta *mvm_sta;
- struct ieee80211_vif *vif;
- int link_id;
-
- mvm_sta = iwl_mvm_sta_from_mac80211(sta);
- vif = mvm_sta->vif;
-
- if (!sta->valid_links)
- return;
-
- for (link_id = 0; link_id < ARRAY_SIZE((sta)->link); link_id++) {
- struct iwl_mvm_link_sta *mvm_link_sta;
-
- mvm_link_sta =
- rcu_dereference_check(mvm_sta->link[link_id],
- lockdep_is_held(&mvm->mutex));
- if (mvm_link_sta && !(vif->active_links & BIT(link_id))) {
- /*
- * We have a link STA but the link is inactive in
- * mac80211. This will happen if we failed to
- * deactivate the link but mac80211 roll back the
- * deactivation of the link.
- * Delete the stale data to avoid issues later on.
- */
- iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_link_sta,
- link_id);
- }
- }
-}
-
static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
{
iwl_mvm_stop_device(mvm);
@@ -1184,10 +1150,6 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
*/
ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
- /* cleanup stations as links may be gone after restart */
- ieee80211_iterate_stations_atomic(mvm->hw,
- iwl_mvm_cleanup_sta_iterator, mvm);
-
mvm->p2p_device_vif = NULL;
iwl_mvm_reset_phy_ctxts(mvm);
@@ -2639,7 +2601,7 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
}
void iwl_mvm_protect_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- u32 duration_override, unsigned int link_id)
+ u32 duration_override)
{
u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS;
@@ -2659,8 +2621,7 @@ void iwl_mvm_protect_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
iwl_mvm_schedule_session_protection(mvm, vif, 900,
- min_duration, false,
- link_id);
+ min_duration, false);
else
iwl_mvm_protect_session(mvm, vif, duration,
min_duration, 500, false);
@@ -2860,7 +2821,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
* time could be small without us having heard
* a beacon yet.
*/
- iwl_mvm_protect_assoc(mvm, vif, 0, 0);
+ iwl_mvm_protect_assoc(mvm, vif, 0);
}
iwl_mvm_sf_update(mvm, vif, false);
@@ -3921,12 +3882,6 @@ iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm,
mvmvif->authorized = 1;
- if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
- mvmvif->link_selection_res = vif->active_links;
- mvmvif->link_selection_primary =
- vif->active_links ? __ffs(vif->active_links) : 0;
- }
-
callbacks->mac_ctxt_changed(mvm, vif, false);
iwl_mvm_mei_host_associated(mvm, vif, mvm_sta);
}
@@ -3972,7 +3927,6 @@ iwl_mvm_sta_state_authorized_to_assoc(struct iwl_mvm *mvm,
* time.
*/
mvmvif->authorized = 0;
- mvmvif->link_selection_res = 0;
/* disable beacon filtering */
iwl_mvm_disable_beacon_filter(mvm, vif);
@@ -4197,7 +4151,7 @@ void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
return;
guard(mvm)(mvm);
- iwl_mvm_protect_assoc(mvm, vif, info->duration, info->link_id);
+ iwl_mvm_protect_assoc(mvm, vif, info->duration);
}
void iwl_mvm_mac_mgd_complete_tx(struct ieee80211_hw *hw,
@@ -5568,8 +5522,7 @@ static int iwl_mvm_pre_channel_switch(struct iwl_mvm *mvm,
if (!vif->cfg.assoc || !vif->bss_conf.dtim_period)
return -EBUSY;
- if (chsw->delay > IWL_MAX_CSA_BLOCK_TX &&
- hweight16(vif->valid_links) <= 1)
+ if (chsw->delay > IWL_MAX_CSA_BLOCK_TX)
schedule_delayed_work(&mvmvif->csa_work, 0);
if (chsw->block_tx) {
@@ -5733,15 +5686,8 @@ void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return;
}
- if (!drop && hweight16(vif->active_links) <= 1) {
- int link_id = vif->active_links ? __ffs(vif->active_links) : 0;
- struct ieee80211_bss_conf *link_conf;
-
- link_conf = wiphy_dereference(hw->wiphy,
- vif->link_conf[link_id]);
- if (WARN_ON(!link_conf))
- return;
- if (link_conf->csa_active && mvmvif->csa_blocks_tx)
+ if (!drop) {
+ if (vif->bss_conf.csa_active && mvmvif->csa_blocks_tx)
drop = true;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
index ef0be44207e1..9bb253dcf4a7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2022 - 2024 Intel Corporation
+ * Copyright (C) 2022 - 2025 Intel Corporation
*/
#include <linux/kernel.h>
#include <net/mac80211.h>
@@ -43,11 +43,11 @@ static u32 iwl_mvm_get_sec_sta_mask(struct iwl_mvm *mvm,
* group keys have no sta pointer), so we don't have a STA now.
* Since this happens for group keys only, just use the link_info as
* the group keys are per link; make sure that is the case by checking
- * we do have a link_id or are not doing MLO.
+ * we do have a link_id.
* Of course the same can be done during add as well, but we must do
* it during remove, since we don't have the mvmvif->ap_sta pointer.
*/
- if (!sta && (keyconf->link_id >= 0 || !ieee80211_vif_is_mld(vif)))
+ if (!sta && keyconf->link_id >= 0)
return BIT(link_info->ap_sta_id);
/* STA should be non-NULL now, but iwl_mvm_sta_fw_id_mask() checks */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c
index 2d116a41913c..bf54b90a7c51 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c
@@ -56,23 +56,6 @@ static void iwl_mvm_mld_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
if (iwlwifi_mod_params.disable_11ax)
return;
- /* If we have MLO enabled, then the firmware needs to enable
- * address translation for the station(s) we add. That depends
- * on having EHT enabled in firmware, which in turn depends on
- * mac80211 in the code below.
- * However, mac80211 doesn't enable HE/EHT until it has parsed
- * the association response successfully, so just skip all that
- * and enable both when we have MLO.
- */
- if (ieee80211_vif_is_mld(vif)) {
- iwl_mvm_mld_set_he_support(mvm, vif, cmd, cmd_ver);
- if (cmd_ver == 2)
- cmd->wifi_gen_v2.eht_support = cpu_to_le32(1);
- else
- cmd->wifi_gen.eht_support = 1;
- return;
- }
-
rcu_read_lock();
for (link_id = 0; link_id < ARRAY_SIZE((vif)->link_conf); link_id++) {
link_conf = rcu_dereference(vif->link_conf[link_id]);
@@ -116,7 +99,6 @@ static int iwl_mvm_mld_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
u32 action, bool force_assoc_off)
{
struct iwl_mac_config_cmd cmd = {};
- u16 esr_transition_timeout;
WARN_ON(vif->type != NL80211_IFTYPE_STATION);
@@ -154,17 +136,6 @@ static int iwl_mvm_mld_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
}
cmd.client.assoc_id = cpu_to_le16(vif->cfg.aid);
- if (ieee80211_vif_is_mld(vif)) {
- esr_transition_timeout =
- u16_get_bits(vif->cfg.eml_cap,
- IEEE80211_EML_CAP_TRANSITION_TIMEOUT);
-
- cmd.client.esr_transition_timeout =
- min_t(u16, IEEE80211_EML_CAP_TRANSITION_TIMEOUT_128TU,
- esr_transition_timeout);
- cmd.client.medium_sync_delay =
- cpu_to_le16(vif->cfg.eml_med_sync_delay);
- }
if (vif->probe_req_reg && vif->cfg.assoc && vif->p2p)
cmd.filter_flags |= cpu_to_le32(MAC_CFG_FILTER_ACCEPT_PROBE_REQ);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
index 380b6f8a53fd..075ff09e93cc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
@@ -60,19 +60,12 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
IEEE80211_VIF_SUPPORTS_CQM_RSSI;
}
- /* We want link[0] to point to the default link, unless we have MLO and
- * in this case this will be modified later by .change_vif_links()
- * If we are in the restart flow with an MLD connection, we will wait
- * to .change_vif_links() to setup the links.
- */
- if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
- !ieee80211_vif_is_mld(vif)) {
- mvmvif->link[0] = &mvmvif->deflink;
+ /* We want link[0] to point to the default link. */
+ mvmvif->link[0] = &mvmvif->deflink;
- ret = iwl_mvm_add_link(mvm, vif, &vif->bss_conf);
- if (ret)
- goto out_free_bf;
- }
+ ret = iwl_mvm_add_link(mvm, vif, &vif->bss_conf);
+ if (ret)
+ goto out_free_bf;
/* Save a pointer to p2p device vif, so it can later be used to
* update the p2p device MAC when a GO is started/stopped
@@ -181,58 +174,6 @@ static void iwl_mvm_mld_mac_remove_interface(struct ieee80211_hw *hw,
}
}
-static unsigned int iwl_mvm_mld_count_active_links(struct iwl_mvm_vif *mvmvif)
-{
- unsigned int n_active = 0;
- int i;
-
- for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
- if (mvmvif->link[i] && mvmvif->link[i]->phy_ctxt)
- n_active++;
- }
-
- return n_active;
-}
-
-static int iwl_mvm_esr_mode_active(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- int link_id, ret = 0;
-
- mvmvif->esr_active = true;
-
- /* Indicate to mac80211 that EML is enabled */
- vif->driver_flags |= IEEE80211_VIF_EML_ACTIVE;
-
- iwl_mvm_update_smps_on_active_links(mvm, vif, IWL_MVM_SMPS_REQ_FW,
- IEEE80211_SMPS_OFF);
-
- for_each_mvm_vif_valid_link(mvmvif, link_id) {
- struct iwl_mvm_vif_link_info *link = mvmvif->link[link_id];
-
- if (!link->phy_ctxt)
- continue;
-
- ret = iwl_mvm_phy_send_rlc(mvm, link->phy_ctxt, 2, 2);
- if (ret)
- break;
-
- link->phy_ctxt->rlc_disabled = true;
- }
-
- if (vif->active_links == mvmvif->link_selection_res &&
- !WARN_ON(!(vif->active_links & BIT(mvmvif->link_selection_primary))))
- mvmvif->primary_link = mvmvif->link_selection_primary;
- else
- mvmvif->primary_link = __ffs(vif->active_links);
-
- iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_ESR_LINK_UP,
- NULL);
-
- return ret;
-}
-
static int
__iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
@@ -243,17 +184,12 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm,
u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- unsigned int n_active = iwl_mvm_mld_count_active_links(mvmvif);
unsigned int link_id = link_conf->link_id;
int ret;
if (WARN_ON_ONCE(!mvmvif->link[link_id]))
return -EINVAL;
- /* if the assigned one was not counted yet, count it now */
- if (!mvmvif->link[link_id]->phy_ctxt)
- n_active++;
-
/* mac parameters such as HE support can change at this stage
* For sta, need first to configure correct state from drv_sta_state
* and only after that update mac config.
@@ -268,15 +204,6 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm,
mvmvif->link[link_id]->phy_ctxt = phy_ctxt;
- if (iwl_mvm_is_esr_supported(mvm->fwrt.trans) && n_active > 1) {
- mvmvif->link[link_id]->listen_lmac = true;
- ret = iwl_mvm_esr_mode_active(mvm, vif);
- if (ret) {
- IWL_ERR(mvm, "failed to activate ESR mode (%d)\n", ret);
- goto out;
- }
- }
-
if (switching_chanctx) {
/* reactivate if we turned this off during channel switch */
if (vif->type == NL80211_IFTYPE_AP)
@@ -341,55 +268,6 @@ static int iwl_mvm_mld_assign_vif_chanctx(struct ieee80211_hw *hw,
return __iwl_mvm_mld_assign_vif_chanctx(mvm, vif, link_conf, ctx, false);
}
-static int iwl_mvm_esr_mode_inactive(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct ieee80211_bss_conf *link_conf;
- int link_id, ret = 0;
-
- mvmvif->esr_active = false;
-
- vif->driver_flags &= ~IEEE80211_VIF_EML_ACTIVE;
-
- iwl_mvm_update_smps_on_active_links(mvm, vif, IWL_MVM_SMPS_REQ_FW,
- IEEE80211_SMPS_AUTOMATIC);
-
- for_each_vif_active_link(vif, link_conf, link_id) {
- struct ieee80211_chanctx_conf *chanctx_conf;
- struct iwl_mvm_phy_ctxt *phy_ctxt;
- u8 static_chains, dynamic_chains;
-
- mvmvif->link[link_id]->listen_lmac = false;
-
- rcu_read_lock();
-
- chanctx_conf = rcu_dereference(link_conf->chanctx_conf);
- phy_ctxt = mvmvif->link[link_id]->phy_ctxt;
-
- if (!chanctx_conf || !phy_ctxt) {
- rcu_read_unlock();
- continue;
- }
-
- phy_ctxt->rlc_disabled = false;
- static_chains = chanctx_conf->rx_chains_static;
- dynamic_chains = chanctx_conf->rx_chains_dynamic;
-
- rcu_read_unlock();
-
- ret = iwl_mvm_phy_send_rlc(mvm, phy_ctxt, static_chains,
- dynamic_chains);
- if (ret)
- break;
- }
-
- iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_ESR_LINK_DOWN,
- NULL);
-
- return ret;
-}
-
static void
__iwl_mvm_mld_unassign_vif_chanctx(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
@@ -399,7 +277,6 @@ __iwl_mvm_mld_unassign_vif_chanctx(struct iwl_mvm *mvm,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- unsigned int n_active = iwl_mvm_mld_count_active_links(mvmvif);
unsigned int link_id = link_conf->link_id;
/* shouldn't happen, but verify link_id is valid before accessing */
@@ -421,14 +298,6 @@ __iwl_mvm_mld_unassign_vif_chanctx(struct iwl_mvm *mvm,
iwl_mvm_link_changed(mvm, vif, link_conf,
LINK_CONTEXT_MODIFY_ACTIVE, false);
- if (iwl_mvm_is_esr_supported(mvm->fwrt.trans) && n_active > 1) {
- int ret = iwl_mvm_esr_mode_inactive(mvm, vif);
-
- if (ret)
- IWL_ERR(mvm, "failed to deactivate ESR mode (%d)\n",
- ret);
- }
-
if (vif->type == NL80211_IFTYPE_MONITOR)
iwl_mvm_mld_rm_snif_sta(mvm, vif);
@@ -448,9 +317,8 @@ static void iwl_mvm_mld_unassign_vif_chanctx(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
__iwl_mvm_mld_unassign_vif_chanctx(mvm, vif, link_conf, ctx, false);
- /* in the non-MLD case, remove/re-add the link to clean up FW state */
- if (!ieee80211_vif_is_mld(vif) && !mvmvif->ap_sta &&
- !WARN_ON_ONCE(vif->cfg.assoc)) {
+ /* Remove/re-add the link to clean up FW state */
+ if (!mvmvif->ap_sta && !WARN_ON_ONCE(vif->cfg.assoc)) {
iwl_mvm_remove_link(mvm, vif, link_conf);
iwl_mvm_add_link(mvm, vif, link_conf);
}
@@ -785,12 +653,6 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm,
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
protect) {
- /* We are in assoc so only one link is active-
- * The association link
- */
- unsigned int link_id =
- ffs(vif->active_links) - 1;
-
/* If we're not restarting and still haven't
* heard a beacon (dtim period unknown) then
* make sure we still have enough minimum time
@@ -800,7 +662,7 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm,
* time could be small without us having heard
* a beacon yet.
*/
- iwl_mvm_protect_assoc(mvm, vif, 0, link_id);
+ iwl_mvm_protect_assoc(mvm, vif, 0);
}
iwl_mvm_sf_update(mvm, vif, false);
@@ -1096,14 +958,6 @@ iwl_mvm_mld_change_vif_links(struct ieee80211_hw *hw,
if (new_links == 0) {
mvmvif->link[0] = &mvmvif->deflink;
err = iwl_mvm_add_link(mvm, vif, &vif->bss_conf);
- if (err == 0)
- mvmvif->primary_link = 0;
- } else if (!(new_links & BIT(mvmvif->primary_link))) {
- /*
- * Ensure we always have a valid primary_link, the real
- * decision happens later when PHY is activated.
- */
- mvmvif->primary_link = __ffs(new_links);
}
out_err:
@@ -1128,44 +982,17 @@ iwl_mvm_mld_change_sta_links(struct ieee80211_hw *hw,
return iwl_mvm_mld_update_sta_links(mvm, vif, sta, old_links, new_links);
}
-bool iwl_mvm_vif_has_esr_cap(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
-{
- const struct wiphy_iftype_ext_capab *ext_capa;
-
- lockdep_assert_held(&mvm->mutex);
-
- if (!ieee80211_vif_is_mld(vif) || !vif->cfg.assoc ||
- hweight16(ieee80211_vif_usable_links(vif)) == 1)
- return false;
-
- if (!(vif->cfg.eml_cap & IEEE80211_EML_CAP_EMLSR_SUPP))
- return false;
-
- ext_capa = cfg80211_get_iftype_ext_capa(mvm->hw->wiphy,
- ieee80211_vif_type_p2p(vif));
- return (ext_capa &&
- (ext_capa->eml_capabilities & IEEE80211_EML_CAP_EMLSR_SUPP));
-}
-
static bool iwl_mvm_mld_can_activate_links(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u16 desired_links)
{
- struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int n_links = hweight16(desired_links);
if (n_links <= 1)
return true;
- guard(mvm)(mvm);
-
- /* Check if HW supports the wanted number of links */
- if (n_links > iwl_mvm_max_active_links(mvm, vif))
- return false;
-
- /* If it is an eSR device, check that we can enter eSR */
- return iwl_mvm_is_esr_supported(mvm->fwrt.trans) &&
- iwl_mvm_vif_has_esr_cap(mvm, vif);
+ WARN_ON(1);
+ return false;
}
static enum ieee80211_neg_ttlm_res
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
index d9a2801636cf..1100d763ceb6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
@@ -9,40 +9,14 @@
u32 iwl_mvm_sta_fw_id_mask(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int filter_link_id)
{
- struct ieee80211_link_sta *link_sta;
struct iwl_mvm_sta *mvmsta;
- struct ieee80211_vif *vif;
- unsigned int link_id;
- u32 result = 0;
if (!sta)
return 0;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
- vif = mvmsta->vif;
-
- /* it's easy when the STA is not an MLD */
- if (!sta->valid_links)
- return BIT(mvmsta->deflink.sta_id);
-
- /* but if it is an MLD, get the mask of all the FW STAs it has ... */
- for_each_sta_active_link(vif, sta, link_sta, link_id) {
- struct iwl_mvm_link_sta *mvm_link_sta;
-
- /* unless we have a specific link in mind */
- if (filter_link_id >= 0 && link_id != filter_link_id)
- continue;
-
- mvm_link_sta =
- rcu_dereference_check(mvmsta->link[link_id],
- lockdep_is_held(&mvm->mutex));
- if (!mvm_link_sta)
- continue;
-
- result |= BIT(mvm_link_sta->sta_id);
- }
- return result;
+ return BIT(mvmsta->deflink.sta_id);
}
static int iwl_mvm_mld_send_sta_cmd(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 301d590fe0bd..db5f9804b529 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -120,7 +120,6 @@ struct iwl_mvm_time_event_data {
* if the te is in the time event list or not (when id == TE_MAX)
*/
u32 id;
- s8 link_id;
};
/* Power management */
@@ -380,14 +379,7 @@ struct iwl_mvm_vif_link_info {
* @bcn_prot: beacon protection data (keys; FIXME: needs to be per link)
* @deflink: default link data for use in non-MLO
* @link: link data for each link in MLO
- * @esr_active: indicates eSR mode is active
* @pm_enabled: indicates powersave is enabled
- * @link_selection_res: bitmap of active links as it was decided in the last
- * link selection. Valid only for a MLO vif after assoc. 0 if there wasn't
- * any link selection yet.
- * @link_selection_primary: primary link selected by link selection
- * @primary_link: primary link in eSR. Valid only for an associated MLD vif,
- * and in eSR mode. Valid only for a STA.
* @roc_activity: currently running ROC activity for this vif (or
* ROC_NUM_ACTIVITIES if no activity is running).
* @session_prot_connection_loss: the connection was lost due to session
@@ -434,7 +426,6 @@ struct iwl_mvm_vif {
bool ap_ibss_active;
bool pm_enabled;
bool monitor_active;
- bool esr_active;
bool session_prot_connection_loss;
u8 low_latency: 6;
@@ -515,10 +506,6 @@ struct iwl_mvm_vif {
u16 max_tx_op;
- u16 link_selection_res;
- u8 link_selection_primary;
- u8 primary_link;
-
struct iwl_mvm_vif_link_info deflink;
struct iwl_mvm_vif_link_info *link[IEEE80211_MLD_MAX_NUM_LINKS];
};
@@ -1619,40 +1606,6 @@ static inline bool iwl_mvm_is_ctdp_supported(struct iwl_mvm *mvm)
IWL_UCODE_TLV_CAPA_CTDP_SUPPORT);
}
-static inline bool iwl_mvm_is_esr_supported(struct iwl_trans *trans)
-{
- if (CSR_HW_RFID_IS_CDB(trans->info.hw_rf_id))
- return false;
-
- switch (CSR_HW_RFID_TYPE(trans->info.hw_rf_id)) {
- case IWL_CFG_RF_TYPE_FM:
- /* Step A doesn't support eSR */
- return CSR_HW_RFID_STEP(trans->info.hw_rf_id);
- case IWL_CFG_RF_TYPE_WH:
- case IWL_CFG_RF_TYPE_PE:
- return true;
- default:
- return false;
- }
-}
-
-static inline int iwl_mvm_max_active_links(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- struct iwl_trans *trans = mvm->fwrt.trans;
-
- if (vif->type == NL80211_IFTYPE_AP)
- return mvm->fw->ucode_capa.num_beacons;
-
- /* Check if HW supports eSR or STR */
- if (iwl_mvm_is_esr_supported(trans) ||
- (CSR_HW_RFID_TYPE(trans->info.hw_rf_id) == IWL_CFG_RF_TYPE_FM &&
- CSR_HW_RFID_IS_CDB(trans->info.hw_rf_id)))
- return IWL_FW_MAX_ACTIVE_LINKS_NUM;
-
- return 1;
-}
-
extern const u8 iwl_mvm_ac_to_tx_fifo[];
extern const u8 iwl_mvm_ac_to_gen2_tx_fifo[];
extern const u8 iwl_mvm_ac_to_bz_tx_fifo[];
@@ -2008,15 +1961,6 @@ int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int iwl_mvm_disable_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf);
-u8 iwl_mvm_get_primary_link(struct ieee80211_vif *vif);
-
-struct iwl_mvm_link_sel_data {
- u8 link_id;
- const struct cfg80211_chan_def *chandef;
- s32 signal;
- u16 grade;
-};
-
#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
extern const struct iwl_hcmd_arr iwl_mvm_groups[];
extern const unsigned int iwl_mvm_groups_size;
@@ -2064,7 +2008,7 @@ int iwl_mvm_cancel_roc(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
/*Session Protection */
void iwl_mvm_protect_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- u32 duration_override, unsigned int link_id);
+ u32 duration_override);
/* Quota management */
static inline size_t iwl_mvm_quota_cmd_size(struct iwl_mvm *mvm)
@@ -2884,8 +2828,6 @@ int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
int duration, enum iwl_roc_activity activity);
-/* EMLSR */
-bool iwl_mvm_vif_has_esr_cap(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
void
iwl_mvm_send_ap_tx_power_constraint_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index d35c63a673b6..7f0b4f5daa21 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -2254,17 +2254,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
IWL_RX_MPDU_STATUS_STA_ID);
if (!WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations)) {
- struct ieee80211_link_sta *link_sta;
-
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
if (IS_ERR(sta))
sta = NULL;
- link_sta = rcu_dereference(mvm->fw_id_to_link_sta[sta_id]);
-
- if (sta && sta->valid_links && link_sta) {
- rx_status->link_valid = 1;
- rx_status->link_id = link_sta->link_id;
- }
}
} else if (!is_multicast_ether_addr(hdr->addr2)) {
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index b588f1dcf20d..9c51953d255d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -2568,16 +2568,16 @@ static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm,
bitmap_ssid,
version);
return 0;
- } else {
- pb->preq = params->preq;
}
- cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif);
- cp->n_aps_override[0] = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY;
- cp->n_aps_override[1] = IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS;
+ pb->preq = params->preq;
iwl_mvm_umac_scan_fill_6g_chan_list(mvm, params, pb);
+ /* Explicitly clear the flags since most of them are not
+ * relevant for 6 GHz scan.
+ */
+ cp->flags = 0;
cp->count = iwl_mvm_umac_scan_cfg_channels_v7_6g(mvm, params,
params->n_channels,
pb, cp, vif->type,
@@ -3023,12 +3023,8 @@ static int _iwl_mvm_single_scan_start(struct iwl_mvm *mvm,
params.iter_notif = true;
params.tsf_report_link_id = req->tsf_report_link_id;
- if (params.tsf_report_link_id < 0) {
- if (vif->active_links)
- params.tsf_report_link_id = __ffs(vif->active_links);
- else
- params.tsf_report_link_id = 0;
- }
+ if (params.tsf_report_link_id < 0)
+ params.tsf_report_link_id = 0;
iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
index 36379b738de1..4945ebf19f6b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2014 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020, 2022-2024 Intel Corporation
+ * Copyright (C) 2018-2020, 2022-2025 Intel Corporation
*/
#include <linux/etherdevice.h>
#include "mvm.h"
@@ -155,7 +155,7 @@ void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
iwl_mvm_schedule_session_protection(mvm, vif, duration,
- duration, true, link_id);
+ duration, true);
else
iwl_mvm_protect_session(mvm, vif, duration,
duration, 100, true);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 0b12ee8ad618..2b52a4f3bff9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -42,7 +42,6 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
te_data->uid = 0;
te_data->id = TE_MAX;
te_data->vif = NULL;
- te_data->link_id = -1;
}
static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm)
@@ -721,8 +720,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
/* Determine whether mac or link id should be used, and validate the link id */
static int iwl_mvm_get_session_prot_id(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- s8 link_id)
+ struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ver = iwl_fw_lookup_cmd_ver(mvm->fw,
@@ -732,22 +730,18 @@ static int iwl_mvm_get_session_prot_id(struct iwl_mvm *mvm,
if (ver < 2)
return mvmvif->id;
- if (WARN(link_id < 0 || !mvmvif->link[link_id],
- "Invalid link ID for session protection: %u\n", link_id))
- return -EINVAL;
-
- if (WARN(!mvmvif->link[link_id]->active,
- "Session Protection on an inactive link: %u\n", link_id))
+ if (WARN(!mvmvif->deflink.active,
+ "Session Protection on an inactive link\n"))
return -EINVAL;
- return mvmvif->link[link_id]->fw_link_id;
+ return mvmvif->deflink.fw_link_id;
}
static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- u32 id, s8 link_id)
+ u32 id)
{
- int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, link_id);
+ int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif);
struct iwl_session_prot_cmd cmd = {
.id_and_color = cpu_to_le32(mac_link_id),
.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
@@ -791,7 +785,6 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
struct ieee80211_vif *vif = te_data->vif;
struct iwl_mvm_vif *mvmvif;
enum nl80211_iftype iftype;
- s8 link_id;
bool p2p_aux = iwl_mvm_has_p2p_over_aux(mvm);
u8 roc_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0);
@@ -811,7 +804,6 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
/* Save time event uid before clearing its data */
*uid = te_data->uid;
id = te_data->id;
- link_id = te_data->link_id;
/*
* The clear_data function handles time events that were already removed
@@ -837,8 +829,7 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
*/
if (mvmvif && id < SESSION_PROTECT_CONF_MAX_ID) {
/* Session protection is still ongoing. Cancel it */
- iwl_mvm_cancel_session_protection(mvm, vif, id,
- link_id);
+ iwl_mvm_cancel_session_protection(mvm, vif, id);
if (iftype == NL80211_IFTYPE_P2P_DEVICE) {
iwl_mvm_roc_finished(mvm);
}
@@ -1007,7 +998,6 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
if (!le32_to_cpu(notif->status) || !le32_to_cpu(notif->start)) {
/* End TE, notify mac80211 */
mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID;
- mvmvif->time_event_data.link_id = -1;
/* set the bit so the ROC cleanup will actually clean up */
set_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status);
iwl_mvm_roc_finished(mvm);
@@ -1132,7 +1122,7 @@ iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_session_prot_cmd cmd = {
.id_and_color =
- cpu_to_le32(iwl_mvm_get_session_prot_id(mvm, vif, 0)),
+ cpu_to_le32(iwl_mvm_get_session_prot_id(mvm, vif)),
.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
.duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
};
@@ -1143,8 +1133,6 @@ iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm,
* protection's configuration.
*/
- mvmvif->time_event_data.link_id = 0;
-
switch (type) {
case IEEE80211_ROC_TYPE_NORMAL:
mvmvif->time_event_data.id =
@@ -1290,8 +1278,7 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return;
}
iwl_mvm_cancel_session_protection(mvm, vif,
- te_data->id,
- te_data->link_id);
+ te_data->id);
} else {
iwl_mvm_remove_aux_roc_te(mvm, mvmvif,
&mvmvif->hs_time_event_data);
@@ -1423,14 +1410,13 @@ static bool iwl_mvm_session_prot_notif(struct iwl_notif_wait_data *notif_wait,
void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
u32 duration, u32 min_duration,
- bool wait_for_notif,
- unsigned int link_id)
+ bool wait_for_notif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
const u16 notif[] = { WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF) };
struct iwl_notification_wait wait_notif;
- int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, (s8)link_id);
+ int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif);
struct iwl_session_prot_cmd cmd = {
.id_and_color = cpu_to_le32(mac_link_id),
.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
@@ -1444,7 +1430,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
spin_lock_bh(&mvm->time_event_lock);
- if (te_data->running && te_data->link_id == link_id &&
+ if (te_data->running &&
time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
jiffies_to_msecs(te_data->end_jiffies - jiffies));
@@ -1461,7 +1447,6 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
te_data->id = le32_to_cpu(cmd.conf_id);
te_data->duration = le32_to_cpu(cmd.duration_tu);
te_data->vif = vif;
- te_data->link_id = link_id;
spin_unlock_bh(&mvm->time_event_lock);
IWL_DEBUG_TE(mvm, "Add new session protection, duration %d TU\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
index 1ef8768756db..3f8628cbd480 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
@@ -210,13 +210,11 @@ iwl_mvm_te_scheduled(struct iwl_mvm_time_event_data *te_data)
* @duration: the requested duration of the protection
* @min_duration: the minimum duration of the protection
* @wait_for_notif: if true, will block until the start of the protection
- * @link_id: The link to schedule a session protection for
*/
void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
u32 duration, u32 min_duration,
- bool wait_for_notif,
- unsigned int link_id);
+ bool wait_for_notif);
/**
* iwl_mvm_rx_session_protect_notif - handles %SESSION_PROTECTION_NOTIF
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index bb97837baeda..bca13417e82c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -817,28 +817,15 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
NL80211_IFTYPE_P2P_DEVICE ||
info.control.vif->type == NL80211_IFTYPE_AP ||
info.control.vif->type == NL80211_IFTYPE_ADHOC) {
- u32 link_id = u32_get_bits(info.control.flags,
- IEEE80211_TX_CTRL_MLO_LINK);
- struct iwl_mvm_vif_link_info *link;
-
- if (link_id == IEEE80211_LINK_UNSPECIFIED) {
- if (info.control.vif->active_links)
- link_id = ffs(info.control.vif->active_links) - 1;
- else
- link_id = 0;
- }
-
- link = mvmvif->link[link_id];
- if (WARN_ON(!link))
- return -1;
if (!ieee80211_is_data(hdr->frame_control))
- sta_id = link->bcast_sta.sta_id;
+ sta_id = mvmvif->deflink.bcast_sta.sta_id;
else
- sta_id = link->mcast_sta.sta_id;
+ sta_id = mvmvif->deflink.mcast_sta.sta_id;
- queue = iwl_mvm_get_ctrl_vif_queue(mvm, link, &info,
- skb);
+ queue = iwl_mvm_get_ctrl_vif_queue(mvm,
+ &mvmvif->deflink,
+ &info, skb);
} else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) {
queue = mvm->snif_queue;
sta_id = mvm->snif_sta.sta_id;
@@ -895,33 +882,9 @@ unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
*/
val = mvmsta->max_amsdu_len;
- if (hweight16(sta->valid_links) <= 1) {
- if (sta->valid_links) {
- struct ieee80211_bss_conf *link_conf;
- unsigned int link = ffs(sta->valid_links) - 1;
+ band = mvmsta->vif->bss_conf.chanreq.oper.chan->band;
- rcu_read_lock();
- link_conf = rcu_dereference(mvmsta->vif->link_conf[link]);
- if (WARN_ON(!link_conf))
- band = NL80211_BAND_2GHZ;
- else
- band = link_conf->chanreq.oper.chan->band;
- rcu_read_unlock();
- } else {
- band = mvmsta->vif->bss_conf.chanreq.oper.chan->band;
- }
-
- lmac = iwl_mvm_get_lmac_id(mvm, band);
- } else if (fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_CDB_SUPPORT)) {
- /* for real MLO restrict to both LMACs if they exist */
- lmac = IWL_LMAC_5G_INDEX;
- val = min_t(unsigned int, val,
- mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
- lmac = IWL_LMAC_24G_INDEX;
- } else {
- lmac = IWL_LMAC_24G_INDEX;
- }
+ lmac = iwl_mvm_get_lmac_id(mvm, band);
return min_t(unsigned int, val,
mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 1a6c1f8706e1..4a33a032c2a7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -308,10 +308,6 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
smps_mode = IEEE80211_SMPS_DYNAMIC;
}
- /* SMPS is disabled in eSR */
- if (mvmvif->esr_active)
- smps_mode = IEEE80211_SMPS_OFF;
-
ieee80211_request_smps(vif, link_id, smps_mode);
}
diff --git a/drivers/net/wireless/intersil/p54/main.c b/drivers/net/wireless/intersil/p54/main.c
index 2ec3655f1a9c..57a62108cbc3 100644
--- a/drivers/net/wireless/intersil/p54/main.c
+++ b/drivers/net/wireless/intersil/p54/main.c
@@ -143,8 +143,10 @@ static int p54_beacon_update(struct p54_common *priv,
if (!beacon)
return -ENOMEM;
ret = p54_beacon_format_ie_tim(beacon);
- if (ret)
+ if (ret) {
+ dev_kfree_skb_any(beacon);
return ret;
+ }
/*
* During operation, the firmware takes care of beaconing.
diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c
index b3c4040257a6..924ab93b7b67 100644
--- a/drivers/net/wireless/marvell/libertas/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas/if_usb.c
@@ -426,6 +426,8 @@ static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload, uint16_t nb
goto tx_ret;
}
+ usb_kill_urb(cardp->tx_urb);
+
usb_fill_bulk_urb(cardp->tx_urb, cardp->udev,
usb_sndbulkpipe(cardp->udev,
cardp->ep_out),
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index be23a29e7de0..a66d18e380fc 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -3147,10 +3147,14 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
SET_NETDEV_DEV(dev, adapter->dev);
- priv->dfs_cac_workqueue = alloc_workqueue("MWIFIEX_DFS_CAC%s",
+ ret = dev_alloc_name(dev, name);
+ if (ret)
+ goto err_alloc_name;
+
+ priv->dfs_cac_workqueue = alloc_workqueue("MWIFIEX_DFS_CAC-%s",
WQ_HIGHPRI |
WQ_MEM_RECLAIM |
- WQ_UNBOUND, 0, name);
+ WQ_UNBOUND, 0, dev->name);
if (!priv->dfs_cac_workqueue) {
mwifiex_dbg(adapter, ERROR, "cannot alloc DFS CAC queue\n");
ret = -ENOMEM;
@@ -3159,9 +3163,9 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
INIT_DELAYED_WORK(&priv->dfs_cac_work, mwifiex_dfs_cac_work_queue);
- priv->dfs_chan_sw_workqueue = alloc_workqueue("MWIFIEX_DFS_CHSW%s",
+ priv->dfs_chan_sw_workqueue = alloc_workqueue("MWIFIEX_DFS_CHSW-%s",
WQ_HIGHPRI | WQ_UNBOUND |
- WQ_MEM_RECLAIM, 0, name);
+ WQ_MEM_RECLAIM, 0, dev->name);
if (!priv->dfs_chan_sw_workqueue) {
mwifiex_dbg(adapter, ERROR, "cannot alloc DFS channel sw queue\n");
ret = -ENOMEM;
@@ -3198,6 +3202,7 @@ err_alloc_chsw:
destroy_workqueue(priv->dfs_cac_workqueue);
priv->dfs_cac_workqueue = NULL;
err_alloc_cac:
+err_alloc_name:
free_netdev(dev);
priv->netdev = NULL;
err_sta_init:
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/core.c b/drivers/net/wireless/realtek/rtl8xxxu/core.c
index c06ad064f37c..f9a527f6a175 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/core.c
@@ -7826,6 +7826,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
goto err_set_intfdata;
hw->vif_data_size = sizeof(struct rtl8xxxu_vif);
+ hw->sta_data_size = sizeof(struct rtl8xxxu_sta_info);
hw->wiphy->max_scan_ssids = 1;
hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
index 4641999f3fe9..e88d92d3ae7a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
@@ -519,16 +519,3 @@ MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core");
-
-static int __init rtl_btcoexist_module_init(void)
-{
- return 0;
-}
-
-static void __exit rtl_btcoexist_module_exit(void)
-{
- return;
-}
-
-module_init(rtl_btcoexist_module_init);
-module_exit(rtl_btcoexist_module_exit);
diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c
index 0bc4afa4fda3..fd967006b3e1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/regd.c
+++ b/drivers/net/wireless/realtek/rtlwifi/regd.c
@@ -206,7 +206,7 @@ static void _rtl_reg_apply_active_scan_flags(struct wiphy *wiphy,
}
/*
- *If a country IE has been recieved check its rule for this
+ *If a country IE has been received check its rule for this
*channel first before enabling active scan. The passive scan
*would have been enforced by the initial processing of our
*custom regulatory domain.
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index fa0ed39cb199..c4f9758b4e96 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -730,10 +730,10 @@ void rtw_set_rx_freq_band(struct rtw_rx_pkt_stat *pkt_stat, u8 channel)
}
EXPORT_SYMBOL(rtw_set_rx_freq_band);
-void rtw_set_dtim_period(struct rtw_dev *rtwdev, int dtim_period)
+void rtw_set_dtim_period(struct rtw_dev *rtwdev, u8 dtim_period)
{
rtw_write32_set(rtwdev, REG_TCR, BIT_TCR_UPDATE_TIMIE);
- rtw_write8(rtwdev, REG_DTIM_COUNTER_ROOT, dtim_period - 1);
+ rtw_write8(rtwdev, REG_DTIM_COUNTER_ROOT, dtim_period ? dtim_period - 1 : 0);
}
void rtw_update_channel(struct rtw_dev *rtwdev, u8 center_channel,
@@ -1483,6 +1483,8 @@ void rtw_core_scan_start(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
set_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags);
set_bit(RTW_FLAG_SCANNING, rtwdev->flags);
+
+ rtw_phy_dig_set_max_coverage(rtwdev);
}
void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
@@ -1494,6 +1496,7 @@ void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
if (!rtwvif)
return;
+ rtw_phy_dig_reset(rtwdev);
clear_bit(RTW_FLAG_SCANNING, rtwdev->flags);
clear_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags);
@@ -1658,14 +1661,41 @@ static u16 rtw_get_max_scan_ie_len(struct rtw_dev *rtwdev)
return len;
}
+static struct ieee80211_supported_band *
+rtw_sband_dup(struct rtw_dev *rtwdev,
+ const struct ieee80211_supported_band *sband)
+{
+ struct ieee80211_supported_band *dup;
+
+ dup = devm_kmemdup(rtwdev->dev, sband, sizeof(*sband), GFP_KERNEL);
+ if (!dup)
+ return NULL;
+
+ dup->channels = devm_kmemdup_array(rtwdev->dev, sband->channels,
+ sband->n_channels,
+ sizeof(*sband->channels),
+ GFP_KERNEL);
+ if (!dup->channels)
+ return NULL;
+
+ dup->bitrates = devm_kmemdup_array(rtwdev->dev, sband->bitrates,
+ sband->n_bitrates,
+ sizeof(*sband->bitrates),
+ GFP_KERNEL);
+ if (!dup->bitrates)
+ return NULL;
+
+ return dup;
+}
+
static void rtw_set_supported_band(struct ieee80211_hw *hw,
const struct rtw_chip_info *chip)
{
- struct rtw_dev *rtwdev = hw->priv;
struct ieee80211_supported_band *sband;
+ struct rtw_dev *rtwdev = hw->priv;
if (chip->band & RTW_BAND_2G) {
- sband = kmemdup(&rtw_band_2ghz, sizeof(*sband), GFP_KERNEL);
+ sband = rtw_sband_dup(rtwdev, &rtw_band_2ghz);
if (!sband)
goto err_out;
if (chip->ht_supported)
@@ -1674,7 +1704,7 @@ static void rtw_set_supported_band(struct ieee80211_hw *hw,
}
if (chip->band & RTW_BAND_5G) {
- sband = kmemdup(&rtw_band_5ghz, sizeof(*sband), GFP_KERNEL);
+ sband = rtw_sband_dup(rtwdev, &rtw_band_5ghz);
if (!sband)
goto err_out;
if (chip->ht_supported)
@@ -1690,13 +1720,6 @@ err_out:
rtw_err(rtwdev, "failed to set supported band\n");
}
-static void rtw_unset_supported_band(struct ieee80211_hw *hw,
- const struct rtw_chip_info *chip)
-{
- kfree(hw->wiphy->bands[NL80211_BAND_2GHZ]);
- kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]);
-}
-
static void rtw_vif_smps_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
@@ -2320,10 +2343,7 @@ EXPORT_SYMBOL(rtw_register_hw);
void rtw_unregister_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
{
- const struct rtw_chip_info *chip = rtwdev->chip;
-
ieee80211_unregister_hw(hw);
- rtw_unset_supported_band(hw, chip);
rtw_debugfs_deinit(rtwdev);
rtw_led_deinit(rtwdev);
}
@@ -2444,10 +2464,10 @@ void rtw_core_enable_beacon(struct rtw_dev *rtwdev, bool enable)
if (enable) {
rtw_write32_set(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION);
- rtw_write32_clr(rtwdev, REG_TXPAUSE, BIT_HIGH_QUEUE);
+ rtw_write8_clr(rtwdev, REG_TXPAUSE, BIT_HIGH_QUEUE);
} else {
rtw_write32_clr(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION);
- rtw_write32_set(rtwdev, REG_TXPAUSE, BIT_HIGH_QUEUE);
+ rtw_write8_set(rtwdev, REG_TXPAUSE, BIT_HIGH_QUEUE);
}
}
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index 43ed6d6b4291..1ab70214ce36 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -2226,7 +2226,7 @@ enum nl80211_band rtw_hw_to_nl80211_band(enum rtw_supported_band hw_band)
}
void rtw_set_rx_freq_band(struct rtw_rx_pkt_stat *pkt_stat, u8 channel);
-void rtw_set_dtim_period(struct rtw_dev *rtwdev, int dtim_period);
+void rtw_set_dtim_period(struct rtw_dev *rtwdev, u8 dtim_period);
void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
struct rtw_channel_params *ch_param);
bool check_hw_ready(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target);
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
index 55be0d8e0c28..e2ac5c6fd500 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.c
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
@@ -370,6 +370,26 @@ static void rtw_phy_statistics(struct rtw_dev *rtwdev)
#define DIG_CVRG_MIN 0x1c
#define DIG_RSSI_GAIN_OFFSET 15
+void rtw_phy_dig_set_max_coverage(struct rtw_dev *rtwdev)
+{
+ /* Lower values result in greater coverage. */
+ rtw_dbg(rtwdev, RTW_DBG_PHY, "Setting IGI=%#x for max coverage\n",
+ DIG_CVRG_MIN);
+
+ rtw_phy_dig_write(rtwdev, DIG_CVRG_MIN);
+}
+
+void rtw_phy_dig_reset(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 last_igi;
+
+ last_igi = dm_info->igi_history[0];
+ rtw_dbg(rtwdev, RTW_DBG_PHY, "Resetting IGI=%#x\n", last_igi);
+
+ rtw_phy_dig_write(rtwdev, last_igi);
+}
+
static bool
rtw_phy_dig_check_damping(struct rtw_dm_info *dm_info)
{
diff --git a/drivers/net/wireless/realtek/rtw88/phy.h b/drivers/net/wireless/realtek/rtw88/phy.h
index c9e6b869661d..8449936497bb 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.h
+++ b/drivers/net/wireless/realtek/rtw88/phy.h
@@ -146,6 +146,8 @@ static inline int rtw_check_supported_rfe(struct rtw_dev *rtwdev)
}
void rtw_phy_dig_write(struct rtw_dev *rtwdev, u8 igi);
+void rtw_phy_dig_reset(struct rtw_dev *rtwdev);
+void rtw_phy_dig_set_max_coverage(struct rtw_dev *rtwdev);
struct rtw_power_params {
u8 pwr_base;
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723cs.c b/drivers/net/wireless/realtek/rtw88/rtw8723cs.c
index 1f98d35a8dd1..2018c9d76dd1 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723cs.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723cs.c
@@ -23,9 +23,9 @@ static struct sdio_driver rtw_8723cs_driver = {
.id_table = rtw_8723cs_id_table,
.probe = rtw_sdio_probe,
.remove = rtw_sdio_remove,
+ .shutdown = rtw_sdio_shutdown,
.drv = {
.pm = &rtw_sdio_pm_ops,
- .shutdown = rtw_sdio_shutdown
}};
module_sdio_driver(rtw_8723cs_driver);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723ds.c b/drivers/net/wireless/realtek/rtw88/rtw8723ds.c
index 206b77e5b98e..e38c90b769a2 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723ds.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723ds.c
@@ -28,10 +28,10 @@ static struct sdio_driver rtw_8723ds_driver = {
.name = KBUILD_MODNAME,
.probe = rtw_sdio_probe,
.remove = rtw_sdio_remove,
+ .shutdown = rtw_sdio_shutdown,
.id_table = rtw_8723ds_id_table,
.drv = {
.pm = &rtw_sdio_pm_ops,
- .shutdown = rtw_sdio_shutdown,
}
};
module_sdio_driver(rtw_8723ds_driver);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821cs.c b/drivers/net/wireless/realtek/rtw88/rtw8821cs.c
index 6d94162213c6..58e0ef219cdc 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821cs.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821cs.c
@@ -23,10 +23,10 @@ static struct sdio_driver rtw_8821cs_driver = {
.name = KBUILD_MODNAME,
.probe = rtw_sdio_probe,
.remove = rtw_sdio_remove,
+ .shutdown = rtw_sdio_shutdown,
.id_table = rtw_8821cs_id_table,
.drv = {
.pm = &rtw_sdio_pm_ops,
- .shutdown = rtw_sdio_shutdown,
}
};
module_sdio_driver(rtw_8821cs_driver);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821cu.c b/drivers/net/wireless/realtek/rtw88/rtw8821cu.c
index 7a0fffc359e2..8cd09d66655d 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821cu.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821cu.c
@@ -37,6 +37,8 @@ static const struct usb_device_id rtw_8821cu_id_table[] = {
.driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* Edimax */
{ USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xd811, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* Edimax */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2c4e, 0x0105, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* Mercusys */
{},
};
MODULE_DEVICE_TABLE(usb, rtw_8821cu_id_table);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
index 89b6485b229a..4d88cc2f4148 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -1005,7 +1005,8 @@ static int rtw8822b_set_antenna(struct rtw_dev *rtwdev,
hal->antenna_tx = antenna_tx;
hal->antenna_rx = antenna_rx;
- rtw8822b_config_trx_mode(rtwdev, antenna_tx, antenna_rx, false);
+ if (test_bit(RTW_FLAG_POWERON, rtwdev->flags))
+ rtw8822b_config_trx_mode(rtwdev, antenna_tx, antenna_rx, false);
return 0;
}
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822bs.c b/drivers/net/wireless/realtek/rtw88/rtw8822bs.c
index 744781dcb419..2de9b11540c5 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822bs.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822bs.c
@@ -23,10 +23,10 @@ static struct sdio_driver rtw_8822bs_driver = {
.name = KBUILD_MODNAME,
.probe = rtw_sdio_probe,
.remove = rtw_sdio_remove,
+ .shutdown = rtw_sdio_shutdown,
.id_table = rtw_8822bs_id_table,
.drv = {
.pm = &rtw_sdio_pm_ops,
- .shutdown = rtw_sdio_shutdown,
}
};
module_sdio_driver(rtw_8822bs_driver);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822cs.c b/drivers/net/wireless/realtek/rtw88/rtw8822cs.c
index 322281e07eb8..b00ef4173962 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822cs.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822cs.c
@@ -23,10 +23,10 @@ static struct sdio_driver rtw_8822cs_driver = {
.name = KBUILD_MODNAME,
.probe = rtw_sdio_probe,
.remove = rtw_sdio_remove,
+ .shutdown = rtw_sdio_shutdown,
.id_table = rtw_8822cs_id_table,
.drv = {
.pm = &rtw_sdio_pm_ops,
- .shutdown = rtw_sdio_shutdown,
}
};
module_sdio_driver(rtw_8822cs_driver);
diff --git a/drivers/net/wireless/realtek/rtw88/sdio.c b/drivers/net/wireless/realtek/rtw88/sdio.c
index e35de52d8eb4..138e9e348c6c 100644
--- a/drivers/net/wireless/realtek/rtw88/sdio.c
+++ b/drivers/net/wireless/realtek/rtw88/sdio.c
@@ -1414,9 +1414,8 @@ void rtw_sdio_remove(struct sdio_func *sdio_func)
}
EXPORT_SYMBOL(rtw_sdio_remove);
-void rtw_sdio_shutdown(struct device *dev)
+void rtw_sdio_shutdown(struct sdio_func *sdio_func)
{
- struct sdio_func *sdio_func = dev_to_sdio_func(dev);
const struct rtw_chip_info *chip;
struct ieee80211_hw *hw;
struct rtw_dev *rtwdev;
diff --git a/drivers/net/wireless/realtek/rtw88/sdio.h b/drivers/net/wireless/realtek/rtw88/sdio.h
index 3c659ed180f0..457e8b02380e 100644
--- a/drivers/net/wireless/realtek/rtw88/sdio.h
+++ b/drivers/net/wireless/realtek/rtw88/sdio.h
@@ -166,7 +166,7 @@ extern const struct dev_pm_ops rtw_sdio_pm_ops;
int rtw_sdio_probe(struct sdio_func *sdio_func,
const struct sdio_device_id *id);
void rtw_sdio_remove(struct sdio_func *sdio_func);
-void rtw_sdio_shutdown(struct device *dev);
+void rtw_sdio_shutdown(struct sdio_func *sdio_func);
static inline bool rtw_sdio_is_sdio30_supported(struct rtw_dev *rtwdev)
{
diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c
index 3b5126ffc81a..db60e142268d 100644
--- a/drivers/net/wireless/realtek/rtw88/usb.c
+++ b/drivers/net/wireless/realtek/rtw88/usb.c
@@ -965,7 +965,8 @@ static int rtw_usb_init_rx(struct rtw_dev *rtwdev)
struct sk_buff *rx_skb;
int i;
- rtwusb->rxwq = alloc_workqueue("rtw88_usb: rx wq", WQ_BH, 0);
+ rtwusb->rxwq = alloc_workqueue("rtw88_usb: rx wq", WQ_BH | WQ_PERCPU,
+ 0);
if (!rtwusb->rxwq) {
rtw_err(rtwdev, "failed to create RX work queue\n");
return -ENOMEM;
diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c
index 9370cbda945c..9f63d67777fa 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.c
+++ b/drivers/net/wireless/realtek/rtw89/cam.c
@@ -1140,3 +1140,137 @@ void rtw89_cam_fill_dctl_sec_cam_info_v2(struct rtw89_dev *rtwdev,
le32_encode_bits(mld_bssid[5], DCTLINFO_V2_W12_MLD_BSSID_5);
h2c->m12 = cpu_to_le32(DCTLINFO_V2_W12_ALL);
}
+
+void rtw89_cam_fill_dctl_sec_cam_info_v3(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ struct rtw89_sta_link *rtwsta_link,
+ struct rtw89_h2c_dctlinfo_ud_v3 *h2c)
+{
+ struct ieee80211_sta *sta = rtwsta_link_to_sta_safe(rtwsta_link);
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif_link->rtwvif);
+ struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
+ struct rtw89_addr_cam_entry *addr_cam =
+ rtw89_get_addr_cam_of(rtwvif_link, rtwsta_link);
+ bool is_mld = sta ? sta->mlo : ieee80211_vif_is_mld(vif);
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ u8 *ptk_tx_iv = rtw_wow->key_info.ptk_tx_iv;
+ u8 *mld_sma, *mld_tma, *mld_bssid;
+
+ h2c->c0 = le32_encode_bits(rtwsta_link ? rtwsta_link->mac_id :
+ rtwvif_link->mac_id,
+ DCTLINFO_V3_C0_MACID) |
+ le32_encode_bits(1, DCTLINFO_V3_C0_OP);
+
+ h2c->w2 = le32_encode_bits(is_mld, DCTLINFO_V3_W2_IS_MLD);
+ h2c->m2 = cpu_to_le32(DCTLINFO_V3_W2_IS_MLD);
+
+ h2c->w4 = le32_encode_bits(addr_cam->sec_ent_keyid[0],
+ DCTLINFO_V3_W4_SEC_ENT0_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[1],
+ DCTLINFO_V3_W4_SEC_ENT1_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[2],
+ DCTLINFO_V3_W4_SEC_ENT2_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[3],
+ DCTLINFO_V3_W4_SEC_ENT3_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[4],
+ DCTLINFO_V3_W4_SEC_ENT4_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[5],
+ DCTLINFO_V3_W4_SEC_ENT5_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[6],
+ DCTLINFO_V3_W4_SEC_ENT6_KEYID);
+ h2c->m4 = cpu_to_le32(DCTLINFO_V3_W4_SEC_ENT0_KEYID |
+ DCTLINFO_V3_W4_SEC_ENT1_KEYID |
+ DCTLINFO_V3_W4_SEC_ENT2_KEYID |
+ DCTLINFO_V3_W4_SEC_ENT3_KEYID |
+ DCTLINFO_V3_W4_SEC_ENT4_KEYID |
+ DCTLINFO_V3_W4_SEC_ENT5_KEYID |
+ DCTLINFO_V3_W4_SEC_ENT6_KEYID);
+
+ h2c->w5 = le32_encode_bits(addr_cam->sec_cam_map[0],
+ DCTLINFO_V3_W5_SEC_ENT_VALID_V1);
+ h2c->m5 = cpu_to_le32(DCTLINFO_V3_W5_SEC_ENT_VALID_V1);
+
+ h2c->w6 = le32_encode_bits(addr_cam->sec_ent[0],
+ DCTLINFO_V3_W6_SEC_ENT0_V2) |
+ le32_encode_bits(addr_cam->sec_ent[1],
+ DCTLINFO_V3_W6_SEC_ENT1_V2) |
+ le32_encode_bits(addr_cam->sec_ent[2],
+ DCTLINFO_V3_W6_SEC_ENT2_V2);
+ h2c->m6 = cpu_to_le32(DCTLINFO_V3_W6_SEC_ENT0_V2 |
+ DCTLINFO_V3_W6_SEC_ENT1_V2 |
+ DCTLINFO_V3_W6_SEC_ENT2_V2);
+
+ h2c->w7 = le32_encode_bits(addr_cam->sec_ent[3],
+ DCTLINFO_V3_W7_SEC_ENT3_V2) |
+ le32_encode_bits(addr_cam->sec_ent[4],
+ DCTLINFO_V3_W7_SEC_ENT4_V2) |
+ le32_encode_bits(addr_cam->sec_ent[5],
+ DCTLINFO_V3_W7_SEC_ENT5_V2);
+ h2c->m7 = cpu_to_le32(DCTLINFO_V3_W7_SEC_ENT3_V2 |
+ DCTLINFO_V3_W7_SEC_ENT4_V2 |
+ DCTLINFO_V3_W7_SEC_ENT5_V2);
+
+ h2c->w8 = le32_encode_bits(addr_cam->sec_ent[6],
+ DCTLINFO_V3_W8_SEC_ENT6_V2);
+ h2c->m8 = cpu_to_le32(DCTLINFO_V3_W8_SEC_ENT6_V2);
+
+ if (rtw_wow->ptk_alg) {
+ h2c->w0 = le32_encode_bits(ptk_tx_iv[0] | ptk_tx_iv[1] << 8,
+ DCTLINFO_V3_W0_AES_IV_L);
+ h2c->m0 = cpu_to_le32(DCTLINFO_V3_W0_AES_IV_L);
+
+ h2c->w1 = le32_encode_bits(ptk_tx_iv[4] |
+ ptk_tx_iv[5] << 8 |
+ ptk_tx_iv[6] << 16 |
+ ptk_tx_iv[7] << 24,
+ DCTLINFO_V3_W1_AES_IV_H);
+ h2c->m1 = cpu_to_le32(DCTLINFO_V3_W1_AES_IV_H);
+
+ h2c->w4 |= le32_encode_bits(rtw_wow->ptk_keyidx,
+ DCTLINFO_V3_W4_SEC_KEY_ID);
+ h2c->m4 |= cpu_to_le32(DCTLINFO_V3_W4_SEC_KEY_ID);
+ }
+
+ if (!is_mld)
+ return;
+
+ if (rtwvif_link->net_type == RTW89_NET_TYPE_INFRA) {
+ mld_sma = rtwvif->mac_addr;
+ mld_tma = vif->cfg.ap_addr;
+ mld_bssid = vif->cfg.ap_addr;
+ } else if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE && sta) {
+ mld_sma = rtwvif->mac_addr;
+ mld_tma = sta->addr;
+ mld_bssid = rtwvif->mac_addr;
+ } else {
+ return;
+ }
+
+ h2c->w9 = le32_encode_bits(mld_sma[0], DCTLINFO_V3_W9_MLD_SMA_0_V2) |
+ le32_encode_bits(mld_sma[1], DCTLINFO_V3_W9_MLD_SMA_1_V2) |
+ le32_encode_bits(mld_sma[2], DCTLINFO_V3_W9_MLD_SMA_2_V2) |
+ le32_encode_bits(mld_sma[3], DCTLINFO_V3_W9_MLD_SMA_3_V2);
+ h2c->m9 = cpu_to_le32(DCTLINFO_V3_W9_ALL);
+
+ h2c->w10 = le32_encode_bits(mld_sma[4], DCTLINFO_V3_W10_MLD_SMA_4_V2) |
+ le32_encode_bits(mld_sma[5], DCTLINFO_V3_W10_MLD_SMA_5_V2) |
+ le32_encode_bits(mld_tma[0], DCTLINFO_V3_W10_MLD_TMA_0_V2) |
+ le32_encode_bits(mld_tma[1], DCTLINFO_V3_W10_MLD_TMA_1_V2);
+ h2c->m10 = cpu_to_le32(DCTLINFO_V3_W10_ALL);
+
+ h2c->w11 = le32_encode_bits(mld_tma[2], DCTLINFO_V3_W11_MLD_TMA_2_V2) |
+ le32_encode_bits(mld_tma[3], DCTLINFO_V3_W11_MLD_TMA_3_V2) |
+ le32_encode_bits(mld_tma[4], DCTLINFO_V3_W11_MLD_TMA_4_V2) |
+ le32_encode_bits(mld_tma[5], DCTLINFO_V3_W11_MLD_TMA_5_V2);
+ h2c->m11 = cpu_to_le32(DCTLINFO_V3_W11_ALL);
+
+ h2c->w12 = le32_encode_bits(mld_bssid[0], DCTLINFO_V3_W12_MLD_TA_BSSID_0_V2) |
+ le32_encode_bits(mld_bssid[1], DCTLINFO_V3_W12_MLD_TA_BSSID_1_V2) |
+ le32_encode_bits(mld_bssid[2], DCTLINFO_V3_W12_MLD_TA_BSSID_2_V2) |
+ le32_encode_bits(mld_bssid[3], DCTLINFO_V3_W12_MLD_TA_BSSID_3_V2);
+ h2c->m12 = cpu_to_le32(DCTLINFO_V3_W12_ALL);
+
+ h2c->w13 = le32_encode_bits(mld_bssid[4], DCTLINFO_V3_W13_MLD_TA_BSSID_4_V2) |
+ le32_encode_bits(mld_bssid[5], DCTLINFO_V3_W13_MLD_TA_BSSID_5_V2);
+ h2c->m13 = cpu_to_le32(DCTLINFO_V3_W13_ALL);
+}
diff --git a/drivers/net/wireless/realtek/rtw89/cam.h b/drivers/net/wireless/realtek/rtw89/cam.h
index c46b6f91bbdb..22868f262243 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.h
+++ b/drivers/net/wireless/realtek/rtw89/cam.h
@@ -302,6 +302,131 @@ struct rtw89_h2c_dctlinfo_ud_v2 {
#define DCTLINFO_V2_W12_MLD_BSSID_5 GENMASK(15, 8)
#define DCTLINFO_V2_W12_ALL GENMASK(15, 0)
+struct rtw89_h2c_dctlinfo_ud_v3 {
+ __le32 c0;
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+ __le32 w7;
+ __le32 w8;
+ __le32 w9;
+ __le32 w10;
+ __le32 w11;
+ __le32 w12;
+ __le32 w13;
+ __le32 w14;
+ __le32 w15;
+ __le32 m0;
+ __le32 m1;
+ __le32 m2;
+ __le32 m3;
+ __le32 m4;
+ __le32 m5;
+ __le32 m6;
+ __le32 m7;
+ __le32 m8;
+ __le32 m9;
+ __le32 m10;
+ __le32 m11;
+ __le32 m12;
+ __le32 m13;
+ __le32 m14;
+ __le32 m15;
+} __packed;
+
+#define DCTLINFO_V3_C0_MACID GENMASK(15, 0)
+#define DCTLINFO_V3_C0_OP BIT(16)
+
+#define DCTLINFO_V3_W0_QOS_FIELD_H GENMASK(7, 0)
+#define DCTLINFO_V3_W0_HW_EXSEQ_MACID GENMASK(14, 8)
+#define DCTLINFO_V3_W0_QOS_DATA BIT(15)
+#define DCTLINFO_V3_W0_AES_IV_L GENMASK(31, 16)
+#define DCTLINFO_V3_W0_ALL GENMASK(31, 0)
+#define DCTLINFO_V3_W1_AES_IV_H GENMASK(31, 0)
+#define DCTLINFO_V3_W1_ALL GENMASK(31, 0)
+#define DCTLINFO_V3_W2_SEQ0 GENMASK(11, 0)
+#define DCTLINFO_V3_W2_SEQ1 GENMASK(23, 12)
+#define DCTLINFO_V3_W2_AMSDU_MAX_LEN GENMASK(26, 24)
+#define DCTLINFO_V3_W2_STA_AMSDU_EN BIT(27)
+#define DCTLINFO_V3_W2_CHKSUM_OFLD_EN BIT(28)
+#define DCTLINFO_V3_W2_WITH_LLC BIT(29)
+#define DCTLINFO_V3_W2_NAT25_EN BIT(30)
+#define DCTLINFO_V3_W2_IS_MLD BIT(31)
+#define DCTLINFO_V3_W2_ALL GENMASK(31, 0)
+#define DCTLINFO_V3_W3_SEQ2 GENMASK(11, 0)
+#define DCTLINFO_V3_W3_SEQ3 GENMASK(23, 12)
+#define DCTLINFO_V3_W3_TGT_IND GENMASK(27, 24)
+#define DCTLINFO_V3_W3_TGT_IND_EN BIT(28)
+#define DCTLINFO_V3_W3_HTC_LB GENMASK(31, 29)
+#define DCTLINFO_V3_W3_ALL GENMASK(31, 0)
+#define DCTLINFO_V3_W4_VLAN_TAG_SEL GENMASK(7, 5)
+#define DCTLINFO_V3_W4_HTC_ORDER BIT(8)
+#define DCTLINFO_V3_W4_SEC_KEY_ID GENMASK(10, 9)
+#define DCTLINFO_V3_W4_VLAN_RX_DYNAMIC_PCP_EN BIT(11)
+#define DCTLINFO_V3_W4_VLAN_RX_PKT_DROP BIT(12)
+#define DCTLINFO_V3_W4_VLAN_RX_VALID BIT(13)
+#define DCTLINFO_V3_W4_VLAN_TX_VALID BIT(14)
+#define DCTLINFO_V3_W4_WAPI BIT(15)
+#define DCTLINFO_V3_W4_SEC_ENT_MODE GENMASK(17, 16)
+#define DCTLINFO_V3_W4_SEC_ENT0_KEYID GENMASK(19, 18)
+#define DCTLINFO_V3_W4_SEC_ENT1_KEYID GENMASK(21, 20)
+#define DCTLINFO_V3_W4_SEC_ENT2_KEYID GENMASK(23, 22)
+#define DCTLINFO_V3_W4_SEC_ENT3_KEYID GENMASK(25, 24)
+#define DCTLINFO_V3_W4_SEC_ENT4_KEYID GENMASK(27, 26)
+#define DCTLINFO_V3_W4_SEC_ENT5_KEYID GENMASK(29, 28)
+#define DCTLINFO_V3_W4_SEC_ENT6_KEYID GENMASK(31, 30)
+#define DCTLINFO_V3_W4_ALL GENMASK(31, 5)
+#define DCTLINFO_V3_W5_SEC_ENT7_KEYID GENMASK(1, 0)
+#define DCTLINFO_V3_W5_SEC_ENT8_KEYID GENMASK(3, 2)
+#define DCTLINFO_V3_W5_SEC_ENT_VALID_V1 GENMASK(23, 8)
+#define DCTLINFO_V3_W5_ALL (GENMASK(23, 8) | GENMASK(3, 0))
+#define DCTLINFO_V3_W6_SEC_ENT0_V2 GENMASK(8, 0)
+#define DCTLINFO_V3_W6_SEC_ENT1_V2 GENMASK(18, 10)
+#define DCTLINFO_V3_W6_SEC_ENT2_V2 GENMASK(28, 20)
+#define DCTLINFO_V3_W6_ALL GENMASK(28, 0)
+#define DCTLINFO_V3_W7_SEC_ENT3_V2 GENMASK(8, 0)
+#define DCTLINFO_V3_W7_SEC_ENT4_V2 GENMASK(18, 10)
+#define DCTLINFO_V3_W7_SEC_ENT5_V2 GENMASK(28, 20)
+#define DCTLINFO_V3_W7_ALL GENMASK(28, 0)
+#define DCTLINFO_V3_W8_SEC_ENT6_V2 GENMASK(8, 0)
+#define DCTLINFO_V3_W8_SEC_ENT7_V1 GENMASK(18, 10)
+#define DCTLINFO_V3_W8_SEC_ENT8_V1 GENMASK(28, 20)
+#define DCTLINFO_V3_W8_ALL GENMASK(28, 0)
+#define DCTLINFO_V3_W9_MLD_SMA_0_V2 GENMASK(7, 0)
+#define DCTLINFO_V3_W9_MLD_SMA_1_V2 GENMASK(15, 8)
+#define DCTLINFO_V3_W9_MLD_SMA_2_V2 GENMASK(23, 16)
+#define DCTLINFO_V3_W9_MLD_SMA_3_V2 GENMASK(31, 24)
+#define DCTLINFO_V3_W9_MLD_SMA_L_V2 GENMASK(31, 0)
+#define DCTLINFO_V3_W9_ALL GENMASK(31, 0)
+#define DCTLINFO_V3_W10_MLD_SMA_4_V2 GENMASK(7, 0)
+#define DCTLINFO_V3_W10_MLD_SMA_5_V2 GENMASK(15, 8)
+#define DCTLINFO_V3_W10_MLD_SMA_H_V2 GENMASK(15, 0)
+#define DCTLINFO_V3_W10_MLD_TMA_0_V2 GENMASK(23, 16)
+#define DCTLINFO_V3_W10_MLD_TMA_1_V2 GENMASK(31, 24)
+#define DCTLINFO_V3_W10_MLD_TMA_L_V2 GENMASK(31, 16)
+#define DCTLINFO_V3_W10_ALL GENMASK(31, 0)
+#define DCTLINFO_V3_W11_MLD_TMA_2_V2 GENMASK(7, 0)
+#define DCTLINFO_V3_W11_MLD_TMA_3_V2 GENMASK(15, 8)
+#define DCTLINFO_V3_W11_MLD_TMA_4_V2 GENMASK(23, 16)
+#define DCTLINFO_V3_W11_MLD_TMA_5_V2 GENMASK(31, 24)
+#define DCTLINFO_V3_W11_MLD_TMA_H_V2 GENMASK(31, 0)
+#define DCTLINFO_V3_W11_ALL GENMASK(31, 0)
+#define DCTLINFO_V3_W12_MLD_TA_BSSID_0_V2 GENMASK(7, 0)
+#define DCTLINFO_V3_W12_MLD_TA_BSSID_1_V2 GENMASK(15, 8)
+#define DCTLINFO_V3_W12_MLD_TA_BSSID_2_V2 GENMASK(23, 16)
+#define DCTLINFO_V3_W12_MLD_TA_BSSID_3_V2 GENMASK(31, 24)
+#define DCTLINFO_V3_W12_MLD_TA_BSSID_L_V2 GENMASK(31, 0)
+#define DCTLINFO_V3_W12_ALL GENMASK(31, 0)
+#define DCTLINFO_V3_W13_MLD_TA_BSSID_4_V2 GENMASK(7, 0)
+#define DCTLINFO_V3_W13_MLD_TA_BSSID_5_V2 GENMASK(15, 8)
+#define DCTLINFO_V3_W13_MLD_TA_BSSID_H_V2 GENMASK(15, 0)
+#define DCTLINFO_V3_W13_HW_EXSEQ_MACID_V1 GENMASK(24, 16)
+#define DCTLINFO_V3_W13_ALL GENMASK(24, 0)
+
int rtw89_cam_init(struct rtw89_dev *rtwdev, struct rtw89_vif_link *vif);
void rtw89_cam_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif_link *vif);
int rtw89_cam_init_addr_cam(struct rtw89_dev *rtwdev,
@@ -328,6 +453,10 @@ void rtw89_cam_fill_dctl_sec_cam_info_v2(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link,
struct rtw89_h2c_dctlinfo_ud_v2 *h2c);
+void rtw89_cam_fill_dctl_sec_cam_info_v3(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ struct rtw89_sta_link *rtwsta_link,
+ struct rtw89_h2c_dctlinfo_ud_v3 *h2c);
int rtw89_cam_fill_bssid_cam_info(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link,
diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c
index 86f1b39a967f..9b2f6f0a00fd 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.c
+++ b/drivers/net/wireless/realtek/rtw89/chan.c
@@ -60,6 +60,28 @@ static enum rtw89_subband rtw89_get_subband_type(enum rtw89_band band,
}
}
+static enum rtw89_tx_comp_band rtw89_get_tx_comp_band(enum rtw89_band band,
+ u8 center_chan)
+{
+ switch (band) {
+ default:
+ case RTW89_BAND_2G:
+ return RTW89_TX_COMP_BAND_2GHZ;
+ case RTW89_BAND_5G:
+ if (center_chan < 149)
+ return RTW89_TX_COMP_BAND_5GHZ_L;
+ else
+ return RTW89_TX_COMP_BAND_5GHZ_H;
+ case RTW89_BAND_6G:
+ if (center_chan < 65)
+ return RTW89_TX_COMP_BAND_5GHZ_H;
+ else if (center_chan < 193)
+ return RTW89_TX_COMP_BAND_6GHZ_M;
+ else
+ return RTW89_TX_COMP_BAND_6GHZ_UH;
+ }
+}
+
static enum rtw89_sc_offset rtw89_get_primary_chan_idx(enum rtw89_bandwidth bw,
u32 center_freq,
u32 primary_freq)
@@ -123,6 +145,7 @@ void rtw89_chan_create(struct rtw89_chan *chan, u8 center_chan, u8 primary_chan,
chan->freq = center_freq;
chan->subband_type = rtw89_get_subband_type(band, center_chan);
+ chan->tx_comp_band = rtw89_get_tx_comp_band(band, center_chan);
chan->pri_ch_idx = rtw89_get_primary_chan_idx(bandwidth, center_freq,
primary_freq);
chan->pri_sb_idx = rtw89_get_primary_sb_idx(center_chan, primary_chan,
@@ -295,6 +318,8 @@ void rtw89_entity_init(struct rtw89_dev *rtwdev)
mgnt->chanctx_tbl[i][j] = RTW89_CHANCTX_IDLE;
}
+ hal->entity_force_hw = RTW89_PHY_NUM;
+
rtw89_config_default_chandef(rtwdev);
}
@@ -347,8 +372,8 @@ static void rtw89_normalize_link_chanctx(struct rtw89_dev *rtwdev,
if (unlikely(!rtwvif_link->chanctx_assigned))
return;
- cur = rtw89_vif_get_link_inst(rtwvif, 0);
- if (!cur || !cur->chanctx_assigned)
+ cur = rtw89_get_designated_link(rtwvif);
+ if (unlikely(!cur) || !cur->chanctx_assigned)
return;
if (cur == rtwvif_link)
@@ -417,12 +442,43 @@ dflt:
}
EXPORT_SYMBOL(__rtw89_mgnt_chan_get);
+bool rtw89_entity_check_hw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ switch (rtwdev->mlo_dbcc_mode) {
+ case MLO_2_PLUS_0_1RF:
+ return phy_idx == RTW89_PHY_0;
+ case MLO_0_PLUS_2_1RF:
+ return phy_idx == RTW89_PHY_1;
+ default:
+ return false;
+ }
+}
+
+void rtw89_entity_force_hw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ rtwdev->hal.entity_force_hw = phy_idx;
+
+ if (phy_idx != RTW89_PHY_NUM)
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN, "%s: %d\n", __func__, phy_idx);
+ else
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN, "%s: (none)\n", __func__);
+}
+
static enum rtw89_mlo_dbcc_mode
rtw89_entity_sel_mlo_dbcc_mode(struct rtw89_dev *rtwdev, u8 active_hws)
{
if (rtwdev->chip->chip_gen != RTW89_CHIP_BE)
return MLO_DBCC_NOT_SUPPORT;
+ switch (rtwdev->hal.entity_force_hw) {
+ case RTW89_PHY_0:
+ return MLO_2_PLUS_0_1RF;
+ case RTW89_PHY_1:
+ return MLO_0_PLUS_2_1RF;
+ default:
+ break;
+ }
+
switch (active_hws) {
case BIT(0):
return MLO_2_PLUS_0_1RF;
@@ -466,8 +522,8 @@ static void rtw89_entity_recalc_mgnt_roles(struct rtw89_dev *rtwdev)
}
/* To be consistent with legacy behavior, expect the first active role
- * which uses RTW89_CHANCTX_0 to put at position 0, and make its first
- * link instance take RTW89_CHANCTX_0. (normalizing)
+ * which uses RTW89_CHANCTX_0 to put at position 0 and its designated
+ * link take RTW89_CHANCTX_0. (normalizing)
*/
list_for_each_entry(role, &mgnt->active_list, mgnt_entry) {
for (i = 0; i < role->links_inst_valid_num; i++) {
@@ -2608,17 +2664,20 @@ bool rtw89_mcc_detect_go_bcn(struct rtw89_dev *rtwdev,
static void rtw89_mcc_detect_connection(struct rtw89_dev *rtwdev,
struct rtw89_mcc_role *role)
{
+ struct rtw89_vif_link *rtwvif_link = role->rtwvif_link;
struct ieee80211_vif *vif;
bool start_detect;
int ret;
ret = rtw89_core_send_nullfunc(rtwdev, role->rtwvif_link, true, false,
RTW89_MCC_PROBE_TIMEOUT);
- if (ret)
+ if (ret &&
+ READ_ONCE(rtwvif_link->sync_bcn_tsf) == rtwvif_link->last_sync_bcn_tsf)
role->probe_count++;
else
role->probe_count = 0;
+ rtwvif_link->last_sync_bcn_tsf = READ_ONCE(rtwvif_link->sync_bcn_tsf);
if (role->probe_count < RTW89_MCC_PROBE_MAX_TRIES)
return;
diff --git a/drivers/net/wireless/realtek/rtw89/chan.h b/drivers/net/wireless/realtek/rtw89/chan.h
index 5b22764d5329..c797cda2e763 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.h
+++ b/drivers/net/wireless/realtek/rtw89/chan.h
@@ -166,6 +166,8 @@ void rtw89_config_roc_chandef(struct rtw89_dev *rtwdev,
const struct cfg80211_chan_def *chandef);
void rtw89_entity_init(struct rtw89_dev *rtwdev);
enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev);
+bool rtw89_entity_check_hw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+void rtw89_entity_force_hw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
void rtw89_chanctx_work(struct wiphy *wiphy, struct wiphy_work *work);
void rtw89_queue_chanctx_work(struct rtw89_dev *rtwdev);
void rtw89_queue_chanctx_change(struct rtw89_dev *rtwdev,
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index 0824940c91ae..6e77522bcd8f 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -470,6 +470,32 @@ void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev)
__rtw89_core_set_chip_txpwr(rtwdev, chan, RTW89_PHY_1);
}
+void rtw89_chip_rfk_channel(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ bool mon = !!rtwdev->pure_monitor_mode_vif;
+ bool prehdl_link = false;
+
+ if (chip->chip_gen != RTW89_CHIP_AX &&
+ !RTW89_CHK_FW_FEATURE_GROUP(WITH_RFK_PRE_NOTIFY, &rtwdev->fw) &&
+ !mon && !rtw89_entity_check_hw(rtwdev, rtwvif_link->phy_idx))
+ prehdl_link = true;
+
+ if (prehdl_link) {
+ rtw89_entity_force_hw(rtwdev, rtwvif_link->phy_idx);
+ rtw89_set_channel(rtwdev);
+ }
+
+ if (chip->ops->rfk_channel)
+ chip->ops->rfk_channel(rtwdev, rtwvif_link);
+
+ if (prehdl_link) {
+ rtw89_entity_force_hw(rtwdev, RTW89_PHY_NUM);
+ rtw89_set_channel(rtwdev);
+ }
+}
+
static void rtw89_chip_rfk_channel_for_pure_mon_vif(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx)
{
@@ -548,7 +574,7 @@ rtw89_core_get_tx_type(struct rtw89_dev *rtwdev,
struct ieee80211_hdr *hdr = (void *)skb->data;
__le16 fc = hdr->frame_control;
- if (ieee80211_is_mgmt(fc) || ieee80211_is_nullfunc(fc))
+ if (ieee80211_is_mgmt(fc) || ieee80211_is_any_nullfunc(fc))
return RTW89_CORE_TX_TYPE_MGMT;
return RTW89_CORE_TX_TYPE_DATA;
@@ -833,6 +859,7 @@ rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev,
desc_info->qsel = qsel;
desc_info->ch_dma = ch_dma;
+ desc_info->sw_mld = true;
desc_info->port = desc_info->hiq ? rtwvif_link->port : 0;
desc_info->mac_id = rtw89_core_tx_get_mac_id(rtwdev, tx_req);
desc_info->hw_ssn_sel = RTW89_MGMT_HW_SSN_SEL;
@@ -1051,6 +1078,7 @@ rtw89_core_tx_update_data_info(struct rtw89_dev *rtwdev,
desc_info->ch_dma = ch_dma;
desc_info->tid_indicate = tid_indicate;
desc_info->qsel = qsel;
+ desc_info->sw_mld = false;
desc_info->mac_id = rtw89_core_tx_get_mac_id(rtwdev, tx_req);
desc_info->port = desc_info->hiq ? rtwvif_link->port : 0;
desc_info->er_cap = rtwsta_link ? rtwsta_link->er_cap : false;
@@ -1207,7 +1235,7 @@ rtw89_core_tx_update_desc_info(struct rtw89_dev *rtwdev,
if (addr_cam->valid && desc_info->mlo)
upd_wlan_hdr = true;
- if (rtw89_is_tx_rpt_skb(rtwdev, tx_req->skb))
+ if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS || tx_req->with_wait)
rtw89_tx_rpt_init(rtwdev, tx_req);
is_bmc = (is_broadcast_ether_addr(hdr->addr1) ||
@@ -1326,7 +1354,7 @@ int rtw89_h2c_tx(struct rtw89_dev *rtwdev,
static int rtw89_core_tx_write_link(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link,
- struct sk_buff *skb, int *qsel, bool sw_mld,
+ struct sk_buff *skb, int *qsel,
struct rtw89_tx_wait_info *wait)
{
struct ieee80211_sta *sta = rtwsta_link_to_sta_safe(rtwsta_link);
@@ -1341,14 +1369,15 @@ static int rtw89_core_tx_write_link(struct rtw89_dev *rtwdev,
tx_req.sta = sta;
tx_req.rtwvif_link = rtwvif_link;
tx_req.rtwsta_link = rtwsta_link;
- tx_req.desc_info.sw_mld = sw_mld;
- rcu_assign_pointer(skb_data->wait, wait);
+ tx_req.with_wait = !!wait;
rtw89_traffic_stats_accu(rtwdev, rtwvif, skb, true, true);
rtw89_wow_parse_akm(rtwdev, skb);
rtw89_core_tx_update_desc_info(rtwdev, &tx_req);
rtw89_core_tx_wake(rtwdev, &tx_req);
+ rcu_assign_pointer(skb_data->wait, wait);
+
ret = rtw89_hci_tx_write(rtwdev, &tx_req);
if (ret) {
rtw89_err(rtwdev, "failed to transmit skb to HCI\n");
@@ -1385,8 +1414,7 @@ int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
}
}
- return rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, qsel, false,
- NULL);
+ return rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, qsel, NULL);
}
static __le32 rtw89_build_txwd_body0(struct rtw89_tx_desc_info *desc_info)
@@ -1631,6 +1659,17 @@ static __le32 rtw89_build_txwd_body2_v2(struct rtw89_tx_desc_info *desc_info)
return cpu_to_le32(dword);
}
+static __le32 rtw89_build_txwd_body2_v3(struct rtw89_tx_desc_info *desc_info)
+{
+ u32 dword = FIELD_PREP(BE_TXD_BODY2_TID_IND_V1, desc_info->tid_indicate) |
+ FIELD_PREP(BE_TXD_BODY2_QSEL_V1, desc_info->qsel) |
+ FIELD_PREP(BE_TXD_BODY2_TXPKTSIZE, desc_info->pkt_size) |
+ FIELD_PREP(BE_TXD_BODY2_AGG_EN, desc_info->agg_en) |
+ FIELD_PREP(BE_TXD_BODY2_MACID_V1, desc_info->mac_id);
+
+ return cpu_to_le32(dword);
+}
+
static __le32 rtw89_build_txwd_body3_v2(struct rtw89_tx_desc_info *desc_info)
{
u32 dword = FIELD_PREP(BE_TXD_BODY3_WIFI_SEQ, desc_info->seq) |
@@ -1640,6 +1679,16 @@ static __le32 rtw89_build_txwd_body3_v2(struct rtw89_tx_desc_info *desc_info)
return cpu_to_le32(dword);
}
+static __le32 rtw89_build_txwd_body3_v3(struct rtw89_tx_desc_info *desc_info)
+{
+ u32 dword = FIELD_PREP(BE_TXD_BODY3_WIFI_SEQ, desc_info->seq) |
+ FIELD_PREP(BE_TXD_BODY3_MLO_FLAG, desc_info->mlo) |
+ FIELD_PREP(BE_TXD_BODY3_IS_MLD_SW_EN, desc_info->sw_mld) |
+ FIELD_PREP(BE_TXD_BODY3_BK_V1, desc_info->bk);
+
+ return cpu_to_le32(dword);
+}
+
static __le32 rtw89_build_txwd_body4_v2(struct rtw89_tx_desc_info *desc_info)
{
u32 dword = FIELD_PREP(BE_TXD_BODY4_SEC_IV_L0, desc_info->sec_seq[0]) |
@@ -1711,6 +1760,15 @@ static __le32 rtw89_build_txwd_info2_v2(struct rtw89_tx_desc_info *desc_info)
return cpu_to_le32(dword);
}
+static __le32 rtw89_build_txwd_info2_v3(struct rtw89_tx_desc_info *desc_info)
+{
+ u32 dword = FIELD_PREP(BE_TXD_INFO2_AMPDU_DENSITY, desc_info->ampdu_density) |
+ FIELD_PREP(BE_TXD_INFO2_FORCE_KEY_EN_V1, desc_info->sec_en) |
+ FIELD_PREP(BE_TXD_INFO2_SEC_CAM_IDX_V1, desc_info->sec_cam_idx);
+
+ return cpu_to_le32(dword);
+}
+
static __le32 rtw89_build_txwd_info4_v2(struct rtw89_tx_desc_info *desc_info)
{
bool rts_en = !desc_info->is_bmc;
@@ -1749,6 +1807,35 @@ void rtw89_core_fill_txdesc_v2(struct rtw89_dev *rtwdev,
}
EXPORT_SYMBOL(rtw89_core_fill_txdesc_v2);
+void rtw89_core_fill_txdesc_v3(struct rtw89_dev *rtwdev,
+ struct rtw89_tx_desc_info *desc_info,
+ void *txdesc)
+{
+ struct rtw89_txwd_body_v2 *txwd_body = txdesc;
+ struct rtw89_txwd_info_v2 *txwd_info;
+
+ txwd_body->dword0 = rtw89_build_txwd_body0_v2(desc_info);
+ txwd_body->dword1 = rtw89_build_txwd_body1_v2(desc_info);
+ txwd_body->dword2 = rtw89_build_txwd_body2_v3(desc_info);
+ txwd_body->dword3 = rtw89_build_txwd_body3_v3(desc_info);
+ if (desc_info->sec_en) {
+ txwd_body->dword4 = rtw89_build_txwd_body4_v2(desc_info);
+ txwd_body->dword5 = rtw89_build_txwd_body5_v2(desc_info);
+ }
+ txwd_body->dword6 = rtw89_build_txwd_body6_v2(desc_info);
+ txwd_body->dword7 = rtw89_build_txwd_body7_v2(desc_info);
+
+ if (!desc_info->en_wd_info)
+ return;
+
+ txwd_info = (struct rtw89_txwd_info_v2 *)(txwd_body + 1);
+ txwd_info->dword0 = rtw89_build_txwd_info0_v2(desc_info);
+ txwd_info->dword1 = rtw89_build_txwd_info1_v2(desc_info);
+ txwd_info->dword2 = rtw89_build_txwd_info2_v3(desc_info);
+ txwd_info->dword4 = rtw89_build_txwd_info4_v2(desc_info);
+}
+EXPORT_SYMBOL(rtw89_core_fill_txdesc_v3);
+
static __le32 rtw89_build_txwd_fwcmd0_v1(struct rtw89_tx_desc_info *desc_info)
{
u32 dword = FIELD_PREP(AX_RXD_RPKT_LEN_MASK, desc_info->pkt_size) |
@@ -2785,7 +2872,7 @@ static void rtw89_core_bcn_track_assoc(struct rtw89_dev *rtwdev,
rcu_read_lock();
bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
- beacon_int = bss_conf->beacon_int;
+ beacon_int = bss_conf->beacon_int ?: 100;
dtim = bss_conf->dtim_period;
rcu_read_unlock();
@@ -2815,9 +2902,7 @@ static void rtw89_core_bcn_track_reset(struct rtw89_dev *rtwdev)
memset(&rtwdev->bcn_track, 0, sizeof(rtwdev->bcn_track));
}
-static void rtw89_vif_rx_bcn_stat(struct rtw89_dev *rtwdev,
- struct ieee80211_bss_conf *bss_conf,
- struct sk_buff *skb)
+static void rtw89_vif_rx_bcn_stat(struct rtw89_dev *rtwdev, struct sk_buff *skb)
{
#define RTW89_APPEND_TSF_2GHZ 384
#define RTW89_APPEND_TSF_5GHZ 52
@@ -2826,7 +2911,7 @@ static void rtw89_vif_rx_bcn_stat(struct rtw89_dev *rtwdev,
struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat;
struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track;
- u32 bcn_intvl_us = ieee80211_tu_to_usec(bss_conf->beacon_int);
+ u32 bcn_intvl_us = ieee80211_tu_to_usec(bcn_track->beacon_int);
u64 tsf = le64_to_cpu(mgmt->u.beacon.timestamp);
u8 wp, num = bcn_stat->num;
u16 append;
@@ -2834,6 +2919,10 @@ static void rtw89_vif_rx_bcn_stat(struct rtw89_dev *rtwdev,
if (!RTW89_CHK_FW_FEATURE(BEACON_TRACKING, &rtwdev->fw))
return;
+ /* Skip if not yet associated */
+ if (!bcn_intvl_us)
+ return;
+
switch (rx_status->band) {
default:
case NL80211_BAND_2GHZ:
@@ -2921,7 +3010,7 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
pkt_stat->beacon_rate = desc_info->data_rate;
pkt_stat->beacon_len = skb->len;
- rtw89_vif_rx_bcn_stat(rtwdev, bss_conf, skb);
+ rtw89_vif_rx_bcn_stat(rtwdev, skb);
}
if (!ether_addr_equal(bss_conf->addr, hdr->addr1))
@@ -3408,6 +3497,79 @@ void rtw89_core_query_rxdesc_v2(struct rtw89_dev *rtwdev,
}
EXPORT_SYMBOL(rtw89_core_query_rxdesc_v2);
+void rtw89_core_query_rxdesc_v3(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_desc_info *desc_info,
+ u8 *data, u32 data_offset)
+{
+ struct rtw89_rxdesc_phy_rpt_v2 *rxd_rpt;
+ struct rtw89_rxdesc_short_v3 *rxd_s;
+ struct rtw89_rxdesc_long_v3 *rxd_l;
+ u16 shift_len, drv_info_len, phy_rtp_len, hdr_cnv_len;
+
+ rxd_s = (struct rtw89_rxdesc_short_v3 *)(data + data_offset);
+
+ desc_info->pkt_size = le32_get_bits(rxd_s->dword0, BE_RXD_RPKT_LEN_MASK);
+ desc_info->drv_info_size = le32_get_bits(rxd_s->dword0, BE_RXD_DRV_INFO_SZ_MASK);
+ desc_info->phy_rpt_size = le32_get_bits(rxd_s->dword0, BE_RXD_PHY_RPT_SZ_MASK);
+ desc_info->hdr_cnv_size = le32_get_bits(rxd_s->dword0, BE_RXD_HDR_CNV_SZ_MASK);
+ desc_info->shift = le32_get_bits(rxd_s->dword0, BE_RXD_SHIFT_MASK);
+ desc_info->long_rxdesc = le32_get_bits(rxd_s->dword0, BE_RXD_LONG_RXD);
+ desc_info->pkt_type = le32_get_bits(rxd_s->dword0, BE_RXD_RPKT_TYPE_MASK);
+ desc_info->bb_sel = le32_get_bits(rxd_s->dword0, BE_RXD_BB_SEL);
+ if (desc_info->pkt_type == RTW89_CORE_RX_TYPE_PPDU_STAT)
+ desc_info->mac_info_valid = true;
+
+ desc_info->frame_type = le32_get_bits(rxd_s->dword2, BE_RXD_TYPE_MASK);
+ desc_info->mac_id = le32_get_bits(rxd_s->dword2, BE_RXD_MAC_ID_V1);
+ desc_info->addr_cam_valid = le32_get_bits(rxd_s->dword2, BE_RXD_ADDR_CAM_VLD);
+
+ desc_info->icv_err = le32_get_bits(rxd_s->dword3, BE_RXD_ICV_ERR);
+ desc_info->crc32_err = le32_get_bits(rxd_s->dword3, BE_RXD_CRC32_ERR);
+ desc_info->hw_dec = le32_get_bits(rxd_s->dword3, BE_RXD_HW_DEC);
+ desc_info->sw_dec = le32_get_bits(rxd_s->dword3, BE_RXD_SW_DEC);
+ desc_info->addr1_match = le32_get_bits(rxd_s->dword3, BE_RXD_A1_MATCH);
+
+ desc_info->bw = le32_get_bits(rxd_s->dword4, BE_RXD_BW_MASK);
+ desc_info->data_rate = le32_get_bits(rxd_s->dword4, BE_RXD_RX_DATARATE_MASK);
+ desc_info->gi_ltf = le32_get_bits(rxd_s->dword4, BE_RXD_RX_GI_LTF_MASK);
+ desc_info->ppdu_cnt = le32_get_bits(rxd_s->dword4, BE_RXD_PPDU_CNT_MASK);
+ desc_info->ppdu_type = le32_get_bits(rxd_s->dword4, BE_RXD_PPDU_TYPE_MASK);
+
+ desc_info->free_run_cnt = le32_to_cpu(rxd_s->dword5);
+
+ shift_len = desc_info->shift << 1; /* 2-byte unit */
+ drv_info_len = desc_info->drv_info_size << 3; /* 8-byte unit */
+ phy_rtp_len = desc_info->phy_rpt_size << 3; /* 8-byte unit */
+ hdr_cnv_len = desc_info->hdr_cnv_size << 4; /* 16-byte unit */
+ desc_info->offset = data_offset + shift_len + drv_info_len +
+ phy_rtp_len + hdr_cnv_len;
+
+ if (desc_info->long_rxdesc)
+ desc_info->rxd_len = sizeof(struct rtw89_rxdesc_long_v3);
+ else
+ desc_info->rxd_len = sizeof(struct rtw89_rxdesc_short_v3);
+ desc_info->ready = true;
+
+ if (phy_rtp_len == sizeof(*rxd_rpt)) {
+ rxd_rpt = (struct rtw89_rxdesc_phy_rpt_v2 *)(data + data_offset +
+ desc_info->rxd_len);
+ desc_info->rssi = le32_get_bits(rxd_rpt->dword0, BE_RXD_PHY_RSSI);
+ }
+
+ if (!desc_info->long_rxdesc)
+ return;
+
+ rxd_l = (struct rtw89_rxdesc_long_v3 *)(data + data_offset);
+
+ desc_info->sr_en = le32_get_bits(rxd_l->dword6, BE_RXD_SR_EN);
+ desc_info->user_id = le32_get_bits(rxd_l->dword6, BE_RXD_USER_ID_MASK);
+ desc_info->addr_cam_id = le32_get_bits(rxd_l->dword6, BE_RXD_ADDR_CAM_V1);
+ desc_info->sec_cam_id = le32_get_bits(rxd_l->dword6, BE_RXD_SEC_CAM_IDX_V1);
+
+ desc_info->rx_pl_id = le32_get_bits(rxd_l->dword7, BE_RXD_RX_PL_ID_MASK);
+}
+EXPORT_SYMBOL(rtw89_core_query_rxdesc_v3);
+
struct rtw89_core_iter_rx_status {
struct rtw89_dev *rtwdev;
struct ieee80211_rx_status *rx_status;
@@ -4091,8 +4253,7 @@ int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rt
goto out;
}
- ret = rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, &qsel, true,
- wait);
+ ret = rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, &qsel, wait);
if (ret) {
rtw89_warn(rtwdev, "nullfunc transmit failed: %d\n", ret);
dev_kfree_skb_any(skb);
@@ -5085,7 +5246,7 @@ static void rtw89_init_vht_cap(struct rtw89_dev *rtwdev,
}
vht_cap->vht_supported = true;
- vht_cap->cap = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
+ vht_cap->cap = chip->max_vht_mpdu_cap |
IEEE80211_VHT_CAP_SHORT_GI_80 |
IEEE80211_VHT_CAP_RXSTBC_1 |
IEEE80211_VHT_CAP_HTC_VHT |
@@ -5213,7 +5374,7 @@ static void rtw89_init_he_cap(struct rtw89_dev *rtwdev,
IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START) |
le16_encode_bits(IEEE80211_VHT_MAX_AMPDU_1024K,
IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP) |
- le16_encode_bits(IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454,
+ le16_encode_bits(chip->max_vht_mpdu_cap,
IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN);
iftype_data->he_6ghz_capa.capa = capa;
}
@@ -5234,7 +5395,7 @@ static void rtw89_init_eht_cap(struct rtw89_dev *rtwdev,
u8 val, val_mcs13;
int sts = 8;
- if (chip->chip_gen == RTW89_CHIP_AX)
+ if (chip->chip_gen == RTW89_CHIP_AX || hal->no_eht)
return;
if (hal->no_mcs_12_13)
@@ -5251,7 +5412,7 @@ static void rtw89_init_eht_cap(struct rtw89_dev *rtwdev,
eht_cap->has_eht = true;
eht_cap_elem->mac_cap_info[0] =
- u8_encode_bits(IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_7991,
+ u8_encode_bits(chip->max_eht_mpdu_cap,
IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK);
eht_cap_elem->mac_cap_info[1] = 0;
@@ -5657,6 +5818,7 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
rtw89_phy_dm_init(rtwdev);
+ rtw89_mac_set_edcca_mode_bands(rtwdev, true);
rtw89_mac_cfg_ppdu_status_bands(rtwdev, true);
rtw89_mac_cfg_phy_rpt_bands(rtwdev, true);
rtw89_mac_update_rts_threshold(rtwdev);
@@ -5923,6 +6085,9 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
struct rtw89_btc *btc = &rtwdev->btc;
u8 band;
+ bitmap_or(rtwdev->quirks, rtwdev->quirks, &rtwdev->chip->default_quirks,
+ NUM_OF_RTW89_QUIRKS);
+
INIT_LIST_HEAD(&rtwdev->ba_list);
INIT_LIST_HEAD(&rtwdev->forbid_ba_list);
INIT_LIST_HEAD(&rtwdev->rtwvifs_list);
@@ -6080,7 +6245,9 @@ void rtw89_core_scan_complete(struct rtw89_dev *rtwdev,
static void rtw89_read_chip_ver(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_hal *hal = &rtwdev->hal;
int ret;
+ u8 val2;
u8 val;
u8 cv;
@@ -6092,14 +6259,28 @@ static void rtw89_read_chip_ver(struct rtw89_dev *rtwdev)
cv = CHIP_CBV;
}
- rtwdev->hal.cv = cv;
+ hal->cv = cv;
- if (rtw89_is_rtl885xb(rtwdev)) {
+ if (rtw89_is_rtl885xb(rtwdev) || chip->chip_gen >= RTW89_CHIP_BE) {
ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_CV, &val);
if (ret)
return;
- rtwdev->hal.acv = u8_get_bits(val, XTAL_SI_ACV_MASK);
+ hal->acv = u8_get_bits(val, XTAL_SI_ACV_MASK);
+ }
+
+ if (chip->chip_gen >= RTW89_CHIP_BE) {
+ hal->cid =
+ rtw89_read32_mask(rtwdev, R_BE_SYS_CHIPINFO, B_BE_HW_ID_MASK);
+
+ ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_CHIP_ID_L, &val);
+ if (ret)
+ return;
+ ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_CHIP_ID_H, &val2);
+ if (ret)
+ return;
+
+ hal->aid = val | val2 << 8;
}
}
@@ -6198,7 +6379,8 @@ int rtw89_core_mlsr_switch(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
goto wake_queue;
}
- rtw89_chip_rfk_channel(rtwdev, target);
+ if (RTW89_CHK_FW_FEATURE_GROUP(WITH_RFK_PRE_NOTIFY, &rtwdev->fw))
+ rtw89_chip_rfk_channel(rtwdev, target);
rtwvif->mlo_mode = RTW89_MLO_MODE_MLSR;
@@ -6309,6 +6491,8 @@ void rtw89_core_rfkill_poll(struct rtw89_dev *rtwdev, bool force)
int rtw89_chip_info_setup(struct rtw89_dev *rtwdev)
{
+ struct rtw89_efuse *efuse = &rtwdev->efuse;
+ struct rtw89_hal *hal = &rtwdev->hal;
int ret;
rtw89_read_chip_ver(rtwdev);
@@ -6348,6 +6532,9 @@ int rtw89_chip_info_setup(struct rtw89_dev *rtwdev)
rtw89_core_setup_rfe_parms(rtwdev);
rtwdev->ps_mode = rtw89_update_ps_mode(rtwdev);
+ rtw89_info(rtwdev, "chip info CID: %x, CV: %x, AID: %x, ACV: %x, RFE: %d\n",
+ hal->cid, hal->cv, hal->aid, hal->acv, efuse->rfe_type);
+
out:
rtw89_mac_pwr_off(rtwdev);
@@ -6398,8 +6585,8 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
hw->extra_tx_headroom = tx_headroom;
hw->queues = IEEE80211_NUM_ACS;
- hw->max_rx_aggregation_subframes = RTW89_MAX_RX_AGG_NUM;
- hw->max_tx_aggregation_subframes = RTW89_MAX_TX_AGG_NUM;
+ hw->max_rx_aggregation_subframes = chip->max_rx_agg_num;
+ hw->max_tx_aggregation_subframes = chip->max_tx_agg_num;
hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index a9cb47ea0b93..4778957d6b2d 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -23,8 +23,10 @@ struct rtw89_efuse_block_cfg;
struct rtw89_h2c_rf_tssi;
struct rtw89_fw_txpwr_track_cfg;
struct rtw89_phy_rfk_log_fmt;
+struct rtw89_phy_calc_efuse_gain;
struct rtw89_debugfs;
struct rtw89_regd_data;
+struct rtw89_wow_cam_info;
extern const struct ieee80211_ops rtw89_ops;
@@ -113,6 +115,16 @@ enum rtw89_subband {
RTW89_SUBBAND_2GHZ_5GHZ_NR = RTW89_CH_5G_BAND_4 + 1,
};
+enum rtw89_tx_comp_band {
+ RTW89_TX_COMP_BAND_2GHZ,
+ RTW89_TX_COMP_BAND_5GHZ_L,
+ RTW89_TX_COMP_BAND_5GHZ_H,
+ RTW89_TX_COMP_BAND_6GHZ_M,
+ RTW89_TX_COMP_BAND_6GHZ_UH,
+
+ RTW89_TX_COMP_BAND_NR,
+};
+
enum rtw89_gain_offset {
RTW89_GAIN_OFFSET_2G_CCK,
RTW89_GAIN_OFFSET_2G_OFDM,
@@ -158,6 +170,17 @@ enum rtw89_core_chip_id {
RTL8922D,
};
+enum rtw89_core_chip_cid {
+ RTL8922D_CID7025 = 0x74,
+ RTL8922D_CID7090 = 0x79,
+};
+
+enum rtw89_core_chip_aid {
+ RTL8922D_AID1348 = 0x1348,
+ RTL8922D_AID7060 = 0x7060,
+ RTL8922D_AID7102 = 0x7102,
+};
+
enum rtw89_chip_gen {
RTW89_CHIP_AX,
RTW89_CHIP_BE,
@@ -978,6 +1001,7 @@ struct rtw89_chan {
*/
u32 freq;
enum rtw89_subband subband_type;
+ enum rtw89_tx_comp_band tx_comp_band;
enum rtw89_sc_offset pri_ch_idx;
u8 pri_sb_idx;
};
@@ -1125,6 +1149,15 @@ struct rtw89_rxdesc_short_v2 {
__le32 dword5;
} __packed;
+struct rtw89_rxdesc_short_v3 {
+ __le32 dword0;
+ __le32 dword1;
+ __le32 dword2;
+ __le32 dword3;
+ __le32 dword4;
+ __le32 dword5;
+} __packed;
+
struct rtw89_rxdesc_long {
__le32 dword0;
__le32 dword1;
@@ -1149,6 +1182,19 @@ struct rtw89_rxdesc_long_v2 {
__le32 dword9;
} __packed;
+struct rtw89_rxdesc_long_v3 {
+ __le32 dword0;
+ __le32 dword1;
+ __le32 dword2;
+ __le32 dword3;
+ __le32 dword4;
+ __le32 dword5;
+ __le32 dword6;
+ __le32 dword7;
+ __le32 dword8;
+ __le32 dword9;
+} __packed;
+
struct rtw89_rxdesc_phy_rpt_v2 {
__le32 dword0;
__le32 dword1;
@@ -1211,6 +1257,8 @@ struct rtw89_core_tx_request {
struct rtw89_vif_link *rtwvif_link;
struct rtw89_sta_link *rtwsta_link;
struct rtw89_tx_desc_info desc_info;
+
+ bool with_wait;
};
struct rtw89_txq {
@@ -3404,9 +3452,6 @@ struct rtw89_ra_info {
#define RTW89_PPDU_MAC_RX_CNT_SIZE 96
#define RTW89_PPDU_MAC_RX_CNT_SIZE_V1 128
-#define RTW89_MAX_RX_AGG_NUM 64
-#define RTW89_MAX_TX_AGG_NUM 128
-
struct rtw89_ampdu_params {
u16 agg_num;
bool amsdu;
@@ -3789,6 +3834,11 @@ struct rtw89_chip_ops {
s8 pw_ofst, enum rtw89_mac_idx mac_idx);
void (*digital_pwr_comp)(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx);
+ void (*calc_rx_gain_normal)(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path,
+ enum rtw89_phy_idx phy_idx,
+ struct rtw89_phy_calc_efuse_gain *calc);
int (*pwr_on_func)(struct rtw89_dev *rtwdev);
int (*pwr_off_func)(struct rtw89_dev *rtwdev);
void (*query_rxdesc)(struct rtw89_dev *rtwdev,
@@ -3833,6 +3883,8 @@ struct rtw89_chip_ops {
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link,
bool valid, struct ieee80211_ampdu_params *params);
+ int (*h2c_wow_cam_update)(struct rtw89_dev *rtwdev,
+ struct rtw89_wow_cam_info *cam_info);
void (*btc_set_rfe)(struct rtw89_dev *rtwdev);
void (*btc_init_cfg)(struct rtw89_dev *rtwdev);
@@ -3965,6 +4017,11 @@ struct rtw89_hfc_prec_cfg {
u8 h2c_full_cond;
u8 wp_ch07_full_cond;
u8 wp_ch811_full_cond;
+ /* for WiFi 7 chips after 8922D */
+ u16 ch011_full_page;
+ u16 h2c_full_page;
+ u16 wp_ch07_full_page;
+ u16 wp_ch811_full_page;
};
struct rtw89_hfc_param {
@@ -3989,13 +4046,14 @@ struct rtw89_dle_size {
u16 pge_size;
u16 lnk_pge_num;
u16 unlnk_pge_num;
- /* for WiFi 7 chips below */
+ /* for WiFi 7 chips below (suffix v1) */
u32 srt_ofst;
};
struct rtw89_wde_quota {
u16 hif;
u16 wcpu;
+ /* unused dcpu isn't listed */
u16 pkt_in;
u16 cpu_io;
};
@@ -4013,8 +4071,10 @@ struct rtw89_ple_quota {
u16 wd_rel;
u16 cpu_io;
u16 tx_rpt;
- /* for WiFi 7 chips below */
+ /* for WiFi 7 chips below (suffix v1) */
u16 h2d;
+ /* for WiFi 7 chips after 8922D (suffix v2) */
+ u16 snrpt;
};
struct rtw89_rsvd_quota {
@@ -4035,6 +4095,17 @@ struct rtw89_dle_rsvd_size {
u32 size;
};
+struct rtw89_dle_input {
+ u32 tx_ampdu_num_b0;
+ u32 tx_ampdu_num_b1;
+ u32 tx_amsdu_size; /* unit: KB */
+ u32 h2c_max_size;
+ u32 rx_amsdu_size; /* unit: KB */
+ u32 c2h_max_size;
+ u32 mpdu_info_tbl_b0;
+ u32 mpdu_info_tbl_b1;
+};
+
struct rtw89_dle_mem {
enum rtw89_qta_mode mode;
const struct rtw89_dle_size *wde_size;
@@ -4047,6 +4118,8 @@ struct rtw89_dle_mem {
const struct rtw89_rsvd_quota *rsvd_qt;
const struct rtw89_dle_rsvd_size *rsvd0_size;
const struct rtw89_dle_rsvd_size *rsvd1_size;
+ /* for WiFi 7 chips after 8922D */
+ const struct rtw89_dle_input *dle_input;
};
struct rtw89_reg_def {
@@ -4325,6 +4398,13 @@ struct rtw89_rfkill_regs {
struct rtw89_reg3_def mode;
};
+struct rtw89_sb_regs {
+ struct {
+ u32 cfg;
+ u32 get;
+ } n[2];
+};
+
struct rtw89_dig_regs {
u32 seg0_pd_reg;
u32 pd_lower_bound_mask;
@@ -4424,6 +4504,10 @@ struct rtw89_chip_info {
bool small_fifo_size;
u32 dle_scc_rsvd_size;
u16 max_amsdu_limit;
+ u16 max_vht_mpdu_cap;
+ u16 max_eht_mpdu_cap;
+ u16 max_tx_agg_num;
+ u16 max_rx_agg_num;
bool dis_2g_40m_ul_ofdma;
u32 rsvd_ple_ofst;
const struct rtw89_hfc_param_ini *hfc_param_ini[RTW89_HCI_TYPE_NUM];
@@ -4538,10 +4622,12 @@ struct rtw89_chip_info {
u32 bss_clr_map_reg;
const struct rtw89_rfkill_regs *rfkill_init;
struct rtw89_reg_def rfkill_get;
+ struct rtw89_sb_regs btc_sb;
u32 dma_ch_mask;
const struct rtw89_edcca_regs *edcca_regs;
const struct wiphy_wowlan_support *wowlan_stub;
const struct rtw89_xtal_info *xtal_info;
+ unsigned long default_quirks; /* bitmap of rtw89_quirks */
};
struct rtw89_chip_variant {
@@ -4572,6 +4658,7 @@ enum rtw89_hcifc_mode {
struct rtw89_dle_info {
const struct rtw89_rsvd_quota *rsvd_qt;
+ const struct rtw89_dle_input *dle_input;
enum rtw89_qta_mode qta_mode;
u16 ple_pg_size;
u16 ple_free_pg;
@@ -4664,8 +4751,17 @@ enum rtw89_fw_feature {
RTW89_FW_FEATURE_MACID_PAUSE_SLEEP,
RTW89_FW_FEATURE_SCAN_OFFLOAD_BE_V0,
RTW89_FW_FEATURE_WOW_REASON_V1,
- RTW89_FW_FEATURE_RFK_PRE_NOTIFY_V0,
- RTW89_FW_FEATURE_RFK_PRE_NOTIFY_V1,
+ RTW89_FW_FEATURE_GROUP(WITH_RFK_PRE_NOTIFY,
+ RTW89_FW_FEATURE_RFK_PRE_NOTIFY_V0,
+ RTW89_FW_FEATURE_RFK_PRE_NOTIFY_V1,
+ RTW89_FW_FEATURE_RFK_PRE_NOTIFY_V2,
+ RTW89_FW_FEATURE_RFK_PRE_NOTIFY_V3,
+ ),
+ RTW89_FW_FEATURE_GROUP(WITH_RFK_PRE_NOTIFY_MCC,
+ RTW89_FW_FEATURE_RFK_PRE_NOTIFY_MCC_V0,
+ RTW89_FW_FEATURE_RFK_PRE_NOTIFY_MCC_V1,
+ RTW89_FW_FEATURE_RFK_PRE_NOTIFY_MCC_V2,
+ ),
RTW89_FW_FEATURE_RFK_RXDCK_V0,
RTW89_FW_FEATURE_RFK_IQK_V0,
RTW89_FW_FEATURE_NO_WOW_CPU_IO_RX,
@@ -4680,6 +4776,11 @@ enum rtw89_fw_feature {
RTW89_FW_FEATURE_LPS_DACK_BY_C2H_REG,
RTW89_FW_FEATURE_BEACON_TRACKING,
RTW89_FW_FEATURE_ADDR_CAM_V0,
+ RTW89_FW_FEATURE_SER_L1_BY_EVENT,
+ RTW89_FW_FEATURE_SIM_SER_L0L1_BY_HALT_H2C,
+ RTW89_FW_FEATURE_LPS_ML_INFO_V1,
+
+ NUM_OF_RTW89_FW_FEATURES,
};
struct rtw89_fw_suit {
@@ -4741,6 +4842,7 @@ struct rtw89_fw_elm_info {
const struct rtw89_regd_data *regd;
const struct rtw89_fw_element_hdr *afe;
const struct rtw89_fw_element_hdr *diag_mac;
+ const struct rtw89_fw_element_hdr *tx_comp;
};
enum rtw89_fw_mss_dev_type {
@@ -4771,20 +4873,28 @@ struct rtw89_fw_info {
struct rtw89_fw_suit bbmcu0;
struct rtw89_fw_suit bbmcu1;
struct rtw89_fw_log log;
- u32 feature_map;
struct rtw89_fw_elm_info elm_info;
struct rtw89_fw_secure sec;
+
+ DECLARE_BITMAP(feature_map, NUM_OF_RTW89_FW_FEATURES);
};
#define RTW89_CHK_FW_FEATURE(_feat, _fw) \
- (!!((_fw)->feature_map & BIT(RTW89_FW_FEATURE_ ## _feat)))
+ test_bit(RTW89_FW_FEATURE_ ## _feat, (_fw)->feature_map)
#define RTW89_CHK_FW_FEATURE_GROUP(_grp, _fw) \
- (!!((_fw)->feature_map & GENMASK(RTW89_FW_FEATURE_ ## _grp ## _MAX, \
- RTW89_FW_FEATURE_ ## _grp ## _MIN)))
+({ \
+ unsigned int bit = find_next_bit((_fw)->feature_map, \
+ NUM_OF_RTW89_FW_FEATURES, \
+ RTW89_FW_FEATURE_ ## _grp ## _MIN); \
+ bit <= RTW89_FW_FEATURE_ ## _grp ## _MAX; \
+})
#define RTW89_SET_FW_FEATURE(_fw_feature, _fw) \
- ((_fw)->feature_map |= BIT(_fw_feature))
+ set_bit(_fw_feature, (_fw)->feature_map)
+
+#define RTW89_CLR_FW_FEATURE(_fw_feature, _fw) \
+ clear_bit(_fw_feature, (_fw)->feature_map)
struct rtw89_cam_info {
DECLARE_BITMAP(addr_cam_map, RTW89_MAX_ADDR_CAM_NUM);
@@ -5026,7 +5136,9 @@ enum rtw89_dm_type {
struct rtw89_hal {
u32 rx_fltr;
u8 cv;
+ u8 cid; /* enum rtw89_core_chip_cid */
u8 acv;
+ u16 aid; /* enum rtw89_core_chip_aid */
u32 antenna_tx;
u32 antenna_rx;
u8 tx_nss;
@@ -5037,6 +5149,7 @@ struct rtw89_hal {
bool support_cckpd;
bool support_igi;
bool no_mcs_12_13;
+ bool no_eht;
atomic_t roc_chanctx_idx;
u8 roc_link_index;
@@ -5051,6 +5164,8 @@ struct rtw89_hal {
enum rtw89_entity_mode entity_mode;
struct rtw89_entity_mgnt entity_mgnt;
+ enum rtw89_phy_idx entity_force_hw;
+
u32 disabled_dm_bitmap; /* bitmap of enum rtw89_dm_type */
u8 thermal_prot_th;
@@ -5065,6 +5180,8 @@ enum rtw89_flags {
RTW89_FLAG_DMAC_FUNC,
RTW89_FLAG_CMAC0_FUNC,
RTW89_FLAG_CMAC1_FUNC,
+ RTW89_FLAG_CMAC0_PWR,
+ RTW89_FLAG_CMAC1_PWR,
RTW89_FLAG_FW_RDY,
RTW89_FLAG_RUNNING,
RTW89_FLAG_PROBE_DONE,
@@ -5095,13 +5212,15 @@ enum rtw89_quirks {
};
enum rtw89_custid {
- RTW89_CUSTID_NONE,
- RTW89_CUSTID_ACER,
- RTW89_CUSTID_AMD,
- RTW89_CUSTID_ASUS,
- RTW89_CUSTID_DELL,
- RTW89_CUSTID_HP,
- RTW89_CUSTID_LENOVO,
+ RTW89_CUSTID_NONE = 0,
+ RTW89_CUSTID_HP = 1,
+ RTW89_CUSTID_ASUS = 2,
+ RTW89_CUSTID_ACER = 3,
+ RTW89_CUSTID_LENOVO = 4,
+ RTW89_CUSTID_NEC = 5,
+ RTW89_CUSTID_AMD = 6,
+ RTW89_CUSTID_FUJITSU = 7,
+ RTW89_CUSTID_DELL = 8,
};
enum rtw89_pkt_drop_sel {
@@ -5216,6 +5335,7 @@ struct rtw89_rfk_mcc_info_data {
u8 ch[RTW89_RFK_CHS_NR];
u8 band[RTW89_RFK_CHS_NR];
u8 bw[RTW89_RFK_CHS_NR];
+ u32 rf18[RTW89_RFK_CHS_NR];
u8 table_idx;
};
@@ -5585,7 +5705,7 @@ struct rtw89_env_monitor_info {
u16 ifs_clm_cckfa;
u16 ifs_clm_cckcca_excl_fa;
u16 ifs_clm_total_ifs;
- u8 ifs_clm_his[RTW89_IFS_CLM_NUM];
+ u16 ifs_clm_his[RTW89_IFS_CLM_NUM];
u16 ifs_clm_avg[RTW89_IFS_CLM_NUM];
u16 ifs_clm_cca[RTW89_IFS_CLM_NUM];
u8 ifs_clm_tx_ratio;
@@ -5786,6 +5906,12 @@ struct rtw89_phy_efuse_gain {
s8 comp[RF_PATH_MAX][RTW89_SUBBAND_NR]; /* S(8, 0) */
};
+struct rtw89_phy_calc_efuse_gain {
+ s8 cck_mean_gain_bias;
+ s8 cck_rpl_ofst;
+ s8 rssi_ofst;
+};
+
#define RTW89_MAX_PATTERN_NUM 18
#define RTW89_MAX_PATTERN_MASK_SIZE 4
#define RTW89_MAX_PATTERN_SIZE 128
@@ -5793,7 +5919,7 @@ struct rtw89_phy_efuse_gain {
struct rtw89_wow_cam_info {
bool r_w;
u8 idx;
- u32 mask[RTW89_MAX_PATTERN_MASK_SIZE];
+ __le32 mask[RTW89_MAX_PATTERN_MASK_SIZE];
u16 crc;
bool negative_pattern_match;
bool skip_mac_hdr;
@@ -7100,15 +7226,6 @@ static inline void rtw89_chip_rfk_init_late(struct rtw89_dev *rtwdev)
chip->ops->rfk_init_late(rtwdev);
}
-static inline void rtw89_chip_rfk_channel(struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link)
-{
- const struct rtw89_chip_info *chip = rtwdev->chip;
-
- if (chip->ops->rfk_channel)
- chip->ops->rfk_channel(rtwdev, rtwvif_link);
-}
-
static inline void rtw89_chip_rfk_band_changed(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
const struct rtw89_chan *chan)
@@ -7249,6 +7366,19 @@ static inline void rtw89_chip_digital_pwr_comp(struct rtw89_dev *rtwdev,
chip->ops->digital_pwr_comp(rtwdev, phy_idx);
}
+static inline
+void rtw89_chip_calc_rx_gain_normal(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path,
+ enum rtw89_phy_idx phy_idx,
+ struct rtw89_phy_calc_efuse_gain *calc)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->calc_rx_gain_normal)
+ chip->ops->calc_rx_gain_normal(rtwdev, chan, path, phy_idx, calc);
+}
+
static inline void rtw89_load_txpwr_table(struct rtw89_dev *rtwdev,
const struct rtw89_txpwr_table *tbl)
{
@@ -7558,6 +7688,9 @@ void rtw89_core_fill_txdesc_v1(struct rtw89_dev *rtwdev,
void rtw89_core_fill_txdesc_v2(struct rtw89_dev *rtwdev,
struct rtw89_tx_desc_info *desc_info,
void *txdesc);
+void rtw89_core_fill_txdesc_v3(struct rtw89_dev *rtwdev,
+ struct rtw89_tx_desc_info *desc_info,
+ void *txdesc);
void rtw89_core_fill_txdesc_fwcmd_v1(struct rtw89_dev *rtwdev,
struct rtw89_tx_desc_info *desc_info,
void *txdesc);
@@ -7576,6 +7709,9 @@ void rtw89_core_query_rxdesc(struct rtw89_dev *rtwdev,
void rtw89_core_query_rxdesc_v2(struct rtw89_dev *rtwdev,
struct rtw89_rx_desc_info *desc_info,
u8 *data, u32 data_offset);
+void rtw89_core_query_rxdesc_v3(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_desc_info *desc_info,
+ u8 *data, u32 data_offset);
void rtw89_core_napi_start(struct rtw89_dev *rtwdev);
void rtw89_core_napi_stop(struct rtw89_dev *rtwdev);
int rtw89_core_napi_init(struct rtw89_dev *rtwdev);
@@ -7622,6 +7758,8 @@ struct rtw89_sta_link *rtw89_sta_set_link(struct rtw89_sta *rtwsta,
unsigned int link_id);
void rtw89_sta_unset_link(struct rtw89_sta *rtwsta, unsigned int link_id);
void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev);
+void rtw89_chip_rfk_channel(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link);
const struct rtw89_6ghz_span *
rtw89_get_6ghz_span(struct rtw89_dev *rtwdev, u32 center_freq);
void rtw89_get_default_chandef(struct cfg80211_chan_def *chandef);
diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
index 1264c2f82600..d46691fa09bc 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.c
+++ b/drivers/net/wireless/realtek/rtw89/debug.c
@@ -79,6 +79,7 @@ struct rtw89_debugfs {
struct rtw89_debugfs_priv send_h2c;
struct rtw89_debugfs_priv early_h2c;
struct rtw89_debugfs_priv fw_crash;
+ struct rtw89_debugfs_priv ser_counters;
struct rtw89_debugfs_priv btc_info;
struct rtw89_debugfs_priv btc_manual;
struct rtw89_debugfs_priv fw_log_manual;
@@ -825,10 +826,6 @@ static ssize_t __print_txpwr_map(struct rtw89_dev *rtwdev, char *buf, size_t buf
s8 *bufp, tmp;
int ret;
- bufp = vzalloc(map->addr_to - map->addr_from + 4);
- if (!bufp)
- return -ENOMEM;
-
if (path_num == 1)
max_valid_addr = map->addr_to_1ss;
else
@@ -837,6 +834,10 @@ static ssize_t __print_txpwr_map(struct rtw89_dev *rtwdev, char *buf, size_t buf
if (max_valid_addr == 0)
return -EOPNOTSUPP;
+ bufp = vzalloc(map->addr_to - map->addr_from + 4);
+ if (!bufp)
+ return -ENOMEM;
+
for (addr = map->addr_from; addr <= max_valid_addr; addr += 4) {
ret = rtw89_mac_txpwr_read32(rtwdev, RTW89_PHY_0, addr, &val);
if (ret)
@@ -3537,13 +3538,49 @@ out:
return count;
}
-static int rtw89_dbg_trigger_ctrl_error(struct rtw89_dev *rtwdev)
+static int rtw89_dbg_trigger_l1_error_by_halt_h2c_ax(struct rtw89_dev *rtwdev)
+{
+ if (!test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags))
+ return -EBUSY;
+
+ return rtw89_mac_set_err_status(rtwdev, MAC_AX_ERR_L1_RESET_FORCE);
+}
+
+static int rtw89_dbg_trigger_l1_error_by_halt_h2c_be(struct rtw89_dev *rtwdev)
+{
+ if (!test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags))
+ return -EBUSY;
+
+ rtw89_write32_set(rtwdev, R_BE_FW_TRIGGER_IDCT_ISR,
+ B_BE_DMAC_FW_TRIG_IDCT | B_BE_DMAC_FW_ERR_IDCT_IMR);
+
+ return 0;
+}
+
+static int rtw89_dbg_trigger_l1_error_by_halt_h2c(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ switch (chip->chip_gen) {
+ case RTW89_CHIP_AX:
+ return rtw89_dbg_trigger_l1_error_by_halt_h2c_ax(rtwdev);
+ case RTW89_CHIP_BE:
+ return rtw89_dbg_trigger_l1_error_by_halt_h2c_be(rtwdev);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int rtw89_dbg_trigger_l1_error(struct rtw89_dev *rtwdev)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
struct rtw89_cpuio_ctrl ctrl_para = {0};
u16 pkt_id;
int ret;
+ if (RTW89_CHK_FW_FEATURE(SIM_SER_L0L1_BY_HALT_H2C, &rtwdev->fw))
+ return rtw89_dbg_trigger_l1_error_by_halt_h2c(rtwdev);
+
rtw89_leave_ps_mode(rtwdev);
ret = mac->dle_buf_req(rtwdev, 0x20, true, &pkt_id);
@@ -3564,7 +3601,7 @@ static int rtw89_dbg_trigger_ctrl_error(struct rtw89_dev *rtwdev)
return 0;
}
-static int rtw89_dbg_trigger_mac_error_ax(struct rtw89_dev *rtwdev)
+static int rtw89_dbg_trigger_l0_error_ax(struct rtw89_dev *rtwdev)
{
u16 val16;
u8 val8;
@@ -3586,34 +3623,67 @@ static int rtw89_dbg_trigger_mac_error_ax(struct rtw89_dev *rtwdev)
return 0;
}
-static int rtw89_dbg_trigger_mac_error_be(struct rtw89_dev *rtwdev)
+static int rtw89_dbg_trigger_l0_error_be(struct rtw89_dev *rtwdev)
{
+ u8 val8;
int ret;
ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_CMAC_SEL);
if (ret)
return ret;
+ val8 = rtw89_read8(rtwdev, R_BE_CMAC_FUNC_EN);
+ rtw89_write8(rtwdev, R_BE_CMAC_FUNC_EN, val8 & ~B_BE_TMAC_EN);
+ mdelay(1);
+ rtw89_write8(rtwdev, R_BE_CMAC_FUNC_EN, val8);
+
+ return 0;
+}
+
+static int rtw89_dbg_trigger_l0_error_by_halt_h2c_ax(struct rtw89_dev *rtwdev)
+{
+ if (!test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags))
+ return -EBUSY;
+
+ return rtw89_mac_set_err_status(rtwdev, MAC_AX_ERR_L0_RESET_FORCE);
+}
+
+static int rtw89_dbg_trigger_l0_error_by_halt_h2c_be(struct rtw89_dev *rtwdev)
+{
+ if (!test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags))
+ return -EBUSY;
+
rtw89_write32_set(rtwdev, R_BE_CMAC_FW_TRIGGER_IDCT_ISR,
B_BE_CMAC_FW_TRIG_IDCT | B_BE_CMAC_FW_ERR_IDCT_IMR);
return 0;
}
-static int rtw89_dbg_trigger_mac_error(struct rtw89_dev *rtwdev)
+static int rtw89_dbg_trigger_l0_error(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
-
- rtw89_leave_ps_mode(rtwdev);
+ int (*sim_l0_by_halt_h2c)(struct rtw89_dev *rtwdev);
+ int (*sim_l0)(struct rtw89_dev *rtwdev);
switch (chip->chip_gen) {
case RTW89_CHIP_AX:
- return rtw89_dbg_trigger_mac_error_ax(rtwdev);
+ sim_l0_by_halt_h2c = rtw89_dbg_trigger_l0_error_by_halt_h2c_ax;
+ sim_l0 = rtw89_dbg_trigger_l0_error_ax;
+ break;
case RTW89_CHIP_BE:
- return rtw89_dbg_trigger_mac_error_be(rtwdev);
+ sim_l0_by_halt_h2c = rtw89_dbg_trigger_l0_error_by_halt_h2c_be;
+ sim_l0 = rtw89_dbg_trigger_l0_error_be;
+ break;
default:
return -EOPNOTSUPP;
}
+
+ if (RTW89_CHK_FW_FEATURE(SIM_SER_L0L1_BY_HALT_H2C, &rtwdev->fw))
+ return sim_l0_by_halt_h2c(rtwdev);
+
+ rtw89_leave_ps_mode(rtwdev);
+
+ return sim_l0(rtwdev);
}
static ssize_t
@@ -3630,8 +3700,8 @@ rtw89_debug_priv_fw_crash_get(struct rtw89_dev *rtwdev,
enum rtw89_dbg_crash_simulation_type {
RTW89_DBG_SIM_CPU_EXCEPTION = 1,
- RTW89_DBG_SIM_CTRL_ERROR = 2,
- RTW89_DBG_SIM_MAC_ERROR = 3,
+ RTW89_DBG_SIM_L1_ERROR = 2,
+ RTW89_DBG_SIM_L0_ERROR = 3,
};
static ssize_t
@@ -3656,11 +3726,11 @@ rtw89_debug_priv_fw_crash_set(struct rtw89_dev *rtwdev,
return -EOPNOTSUPP;
sim = rtw89_fw_h2c_trigger_cpu_exception;
break;
- case RTW89_DBG_SIM_CTRL_ERROR:
- sim = rtw89_dbg_trigger_ctrl_error;
+ case RTW89_DBG_SIM_L1_ERROR:
+ sim = rtw89_dbg_trigger_l1_error;
break;
- case RTW89_DBG_SIM_MAC_ERROR:
- sim = rtw89_dbg_trigger_mac_error;
+ case RTW89_DBG_SIM_L0_ERROR:
+ sim = rtw89_dbg_trigger_l0_error;
/* Driver SER flow won't get involved; only FW will. */
announce = false;
@@ -3680,6 +3750,60 @@ rtw89_debug_priv_fw_crash_set(struct rtw89_dev *rtwdev,
return count;
}
+struct rtw89_dbg_ser_counters {
+ unsigned int l0;
+ unsigned int l1;
+ unsigned int l0_to_l1;
+};
+
+static void rtw89_dbg_get_ser_counters_ax(struct rtw89_dev *rtwdev,
+ struct rtw89_dbg_ser_counters *cnt)
+{
+ const u32 val = rtw89_read32(rtwdev, R_AX_SER_DBG_INFO);
+
+ cnt->l0 = u32_get_bits(val, B_AX_SER_L0_COUNTER_MASK);
+ cnt->l1 = u32_get_bits(val, B_AX_SER_L1_COUNTER_MASK);
+ cnt->l0_to_l1 = u32_get_bits(val, B_AX_L0_TO_L1_EVENT_MASK);
+}
+
+static void rtw89_dbg_get_ser_counters_be(struct rtw89_dev *rtwdev,
+ struct rtw89_dbg_ser_counters *cnt)
+{
+ const u32 val = rtw89_read32(rtwdev, R_BE_SER_DBG_INFO);
+
+ cnt->l0 = u32_get_bits(val, B_BE_SER_L0_COUNTER_MASK);
+ cnt->l1 = u32_get_bits(val, B_BE_SER_L1_COUNTER_MASK);
+ cnt->l0_to_l1 = u32_get_bits(val, B_BE_SER_L0_PROMOTE_L1_EVENT_MASK);
+}
+
+static ssize_t rtw89_debug_priv_ser_counters_get(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ char *buf, size_t bufsz)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_dbg_ser_counters cnt = {};
+ char *p = buf, *end = buf + bufsz;
+
+ rtw89_leave_ps_mode(rtwdev);
+
+ switch (chip->chip_gen) {
+ case RTW89_CHIP_AX:
+ rtw89_dbg_get_ser_counters_ax(rtwdev, &cnt);
+ break;
+ case RTW89_CHIP_BE:
+ rtw89_dbg_get_ser_counters_be(rtwdev, &cnt);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ p += scnprintf(p, end - p, "SER L0 Count: %d\n", cnt.l0);
+ p += scnprintf(p, end - p, "SER L1 Count: %d\n", cnt.l1);
+ p += scnprintf(p, end - p, "SER L0 promote event: %d\n", cnt.l0_to_l1);
+
+ return p - buf;
+}
+
static ssize_t rtw89_debug_priv_btc_info_get(struct rtw89_dev *rtwdev,
struct rtw89_debugfs_priv *debugfs_priv,
char *buf, size_t bufsz)
@@ -4767,6 +4891,7 @@ static const struct rtw89_debugfs rtw89_debugfs_templ = {
.send_h2c = rtw89_debug_priv_set(send_h2c),
.early_h2c = rtw89_debug_priv_set_and_get(early_h2c, RWLOCK),
.fw_crash = rtw89_debug_priv_set_and_get(fw_crash, WLOCK),
+ .ser_counters = rtw89_debug_priv_get(ser_counters, RLOCK),
.btc_info = rtw89_debug_priv_get(btc_info, RSIZE_12K),
.btc_manual = rtw89_debug_priv_set(btc_manual),
.fw_log_manual = rtw89_debug_priv_set(fw_log_manual, WLOCK),
@@ -4814,6 +4939,7 @@ void rtw89_debugfs_add_sec1(struct rtw89_dev *rtwdev, struct dentry *debugfs_top
rtw89_debugfs_add_w(send_h2c);
rtw89_debugfs_add_rw(early_h2c);
rtw89_debugfs_add_rw(fw_crash);
+ rtw89_debugfs_add_r(ser_counters);
rtw89_debugfs_add_r(btc_info);
rtw89_debugfs_add_w(btc_manual);
rtw89_debugfs_add_w(fw_log_manual);
diff --git a/drivers/net/wireless/realtek/rtw89/debug.h b/drivers/net/wireless/realtek/rtw89/debug.h
index a364e7adb079..7cdceb24f52d 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.h
+++ b/drivers/net/wireless/realtek/rtw89/debug.h
@@ -31,6 +31,7 @@ enum rtw89_debug_mask {
RTW89_DBG_CHAN = BIT(20),
RTW89_DBG_ACPI = BIT(21),
RTW89_DBG_EDCCA = BIT(22),
+ RTW89_DBG_PS = BIT(23),
RTW89_DBG_UNEXP = BIT(31),
};
diff --git a/drivers/net/wireless/realtek/rtw89/efuse.c b/drivers/net/wireless/realtek/rtw89/efuse.c
index 6c6c763510af..a2757a88d55d 100644
--- a/drivers/net/wireless/realtek/rtw89/efuse.c
+++ b/drivers/net/wireless/realtek/rtw89/efuse.c
@@ -7,10 +7,6 @@
#include "mac.h"
#include "reg.h"
-#define EF_FV_OFSET 0x5ea
-#define EF_CV_MASK GENMASK(7, 4)
-#define EF_CV_INV 15
-
#define EFUSE_B1_MSSDEVTYPE_MASK GENMASK(3, 0)
#define EFUSE_B1_MSSCUSTIDX0_MASK GENMASK(7, 4)
#define EFUSE_B2_MSSKEYNUM_MASK GENMASK(3, 0)
diff --git a/drivers/net/wireless/realtek/rtw89/efuse.h b/drivers/net/wireless/realtek/rtw89/efuse.h
index a96fc1044791..a14a9dfed8e8 100644
--- a/drivers/net/wireless/realtek/rtw89/efuse.h
+++ b/drivers/net/wireless/realtek/rtw89/efuse.h
@@ -11,6 +11,11 @@
#define RTW89_EFUSE_BLOCK_SIZE_MASK GENMASK(15, 0)
#define RTW89_EFUSE_MAX_BLOCK_SIZE 0x10000
+#define EF_FV_OFSET 0x5EA
+#define EF_FV_OFSET_BE_V1 0x17CA
+#define EF_CV_MASK GENMASK(7, 4)
+#define EF_CV_INV 15
+
struct rtw89_efuse_block_cfg {
u32 offset;
u32 size;
@@ -26,5 +31,6 @@ int rtw89_read_efuse_ver(struct rtw89_dev *rtwdev, u8 *efv);
int rtw89_efuse_recognize_mss_info_v1(struct rtw89_dev *rtwdev, u8 b1, u8 b2);
int rtw89_efuse_read_fw_secure_ax(struct rtw89_dev *rtwdev);
int rtw89_efuse_read_fw_secure_be(struct rtw89_dev *rtwdev);
+int rtw89_efuse_read_ecv_be(struct rtw89_dev *rtwdev);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/efuse_be.c b/drivers/net/wireless/realtek/rtw89/efuse_be.c
index 64768923b0f0..70c1b8be662e 100644
--- a/drivers/net/wireless/realtek/rtw89/efuse_be.c
+++ b/drivers/net/wireless/realtek/rtw89/efuse_be.c
@@ -512,3 +512,29 @@ out:
return 0;
}
+
+int rtw89_efuse_read_ecv_be(struct rtw89_dev *rtwdev)
+{
+ u32 dump_addr;
+ u8 buff[4]; /* efuse access must 4 bytes align */
+ int ret;
+ u8 ecv;
+ u8 val;
+
+ dump_addr = ALIGN_DOWN(EF_FV_OFSET_BE_V1, 4);
+
+ ret = rtw89_dump_physical_efuse_map_be(rtwdev, buff, dump_addr, 4, false);
+ if (ret)
+ return ret;
+
+ val = buff[EF_FV_OFSET_BE_V1 & 0x3];
+
+ ecv = u8_get_bits(val, EF_CV_MASK);
+ if (ecv == EF_CV_INV)
+ return -ENOENT;
+
+ rtwdev->hal.cv = ecv;
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_efuse_read_ecv_be);
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index 7b9d9989e517..f84726f04669 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -812,6 +812,8 @@ struct __fw_feat_cfg {
enum rtw89_fw_feature feature;
u32 ver_code;
bool (*cond)(u32 suit_ver_code, u32 comp_ver_code);
+ bool disable;
+ int size;
};
#define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \
@@ -822,10 +824,36 @@ struct __fw_feat_cfg {
.cond = __fw_feat_cond_ ## _cond, \
}
+#define __S_DIS_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \
+ { \
+ .chip_id = _chip, \
+ .feature = RTW89_FW_FEATURE_ ## _feat, \
+ .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \
+ .cond = __fw_feat_cond_ ## _cond, \
+ .disable = true, \
+ .size = 1, \
+ }
+
+#define __G_DIS_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _grp) \
+ { \
+ .chip_id = _chip, \
+ .feature = RTW89_FW_FEATURE_ ## _grp ## _MIN, \
+ .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \
+ .cond = __fw_feat_cond_ ## _cond, \
+ .disable = true, \
+ .size = RTW89_FW_FEATURE_ ## _grp ## _MAX - \
+ RTW89_FW_FEATURE_ ## _grp ## _MIN + 1, \
+ }
+
+#define __DIS_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat, _type) \
+ __##_type##_DIS_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat)
+
static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, TX_WAKE),
__CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, SCAN_OFFLOAD),
__CFG_FW_FEAT(RTL8851B, ge, 0, 29, 41, 0, CRASH_TRIGGER_TYPE_0),
+ __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 127, 0, SER_L1_BY_EVENT),
+ __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 130, 0, SIM_SER_L0L1_BY_HALT_H2C),
__CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT),
__CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD),
__CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE),
@@ -837,11 +865,14 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER_TYPE_0),
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD),
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 7, BEACON_FILTER),
+ __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 15, BEACON_LOSS_COUNT_V1),
__CFG_FW_FEAT(RTL8852B, lt, 0, 29, 30, 0, NO_WOW_CPU_IO_RX),
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 127, 0, LPS_DACK_BY_C2H_REG),
+ __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 127, 0, SER_L1_BY_EVENT),
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 128, 0, CRASH_TRIGGER_TYPE_1),
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 128, 0, SCAN_OFFLOAD_EXTRA_OP),
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 128, 0, BEACON_TRACKING),
+ __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 130, 0, SIM_SER_L0L1_BY_HALT_H2C),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, NO_LPS_PG),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, TX_WAKE),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 90, 0, CRASH_TRIGGER_TYPE_0),
@@ -851,8 +882,10 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 127, 0, SCAN_OFFLOAD_EXTRA_OP),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 127, 0, LPS_DACK_BY_C2H_REG),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 127, 0, CRASH_TRIGGER_TYPE_1),
- __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS),
+ __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 127, 0, SER_L1_BY_EVENT),
+ __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 130, 0, SIM_SER_L0L1_BY_HALT_H2C),
__CFG_FW_FEAT(RTL8852C, ge, 0, 0, 0, 0, RFK_NTFY_MCC_V0),
+ __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER_TYPE_0),
@@ -862,26 +895,34 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 128, 0, LPS_DACK_BY_C2H_REG),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 128, 0, CRASH_TRIGGER_TYPE_1),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 129, 1, BEACON_TRACKING),
- __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER_TYPE_0),
+ __CFG_FW_FEAT(RTL8852C, ge, 0, 29, 94, 0, SER_L1_BY_EVENT),
+ __CFG_FW_FEAT(RTL8852C, ge, 0, 29, 130, 0, SIM_SER_L0L1_BY_HALT_H2C),
+ __CFG_FW_FEAT(RTL8922A, ge, 0, 0, 0, 0, RFK_PRE_NOTIFY_V0),
__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP),
+ __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER_TYPE_0),
__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD),
- __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 21, 0, SCAN_OFFLOAD_BE_V0),
+ __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD_EXTRA_OP),
__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 12, 0, BEACON_FILTER),
+ __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 21, 0, SCAN_OFFLOAD_BE_V0),
__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 22, 0, WOW_REASON_V1),
__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 28, 0, RFK_IQK_V0),
- __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, RFK_PRE_NOTIFY_V0),
+ __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 31, 0, RFK_PRE_NOTIFY_V1),
__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, LPS_CH_INFO),
__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 42, 0, RFK_RXDCK_V0),
__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 46, 0, NOTIFY_AP_INFO),
__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 47, 0, CH_INFO_BE_V0),
- __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 49, 0, RFK_PRE_NOTIFY_V1),
+ __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 49, 0, RFK_PRE_NOTIFY_V2),
+ __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 49, 0, RFK_PRE_NOTIFY_MCC_V0),
__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 51, 0, NO_PHYCAP_P1),
__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 64, 0, NO_POWER_DIFFERENCE),
__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 71, 0, BEACON_LOSS_COUNT_V1),
__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 76, 0, LPS_DACK_BY_C2H_REG),
__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 79, 0, CRASH_TRIGGER_TYPE_1),
__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 80, 0, BEACON_TRACKING),
+ __DIS_FW_FEAT(RTL8922A, ge, 0, 35, 84, 0, WITH_RFK_PRE_NOTIFY, G),
+ __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 84, 0, RFK_PRE_NOTIFY_MCC_V1),
__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 84, 0, ADDR_CAM_V0),
+ __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 97, 0, SIM_SER_L0L1_BY_HALT_H2C),
};
static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw,
@@ -896,8 +937,16 @@ static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw,
if (chip->chip_id != ent->chip_id)
continue;
- if (ent->cond(ver_code, ent->ver_code))
+ if (!ent->cond(ver_code, ent->ver_code))
+ continue;
+
+ if (!ent->disable) {
RTW89_SET_FW_FEATURE(ent->feature, fw);
+ continue;
+ }
+
+ for (int n = 0; n < ent->size; n++)
+ RTW89_CLR_FW_FEATURE(ent->feature + n, fw);
}
}
@@ -1013,42 +1062,47 @@ int rtw89_build_phy_tbl_from_elm(struct rtw89_dev *rtwdev,
const union rtw89_fw_element_arg arg)
{
struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
- struct rtw89_phy_table *tbl;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct rtw89_phy_table *tbl, **pp;
struct rtw89_reg2_def *regs;
- enum rtw89_rf_path rf_path;
+ bool radio = false;
u32 n_regs, i;
+ u16 aid;
u8 idx;
- tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
- if (!tbl)
- return -ENOMEM;
-
switch (le32_to_cpu(elm->id)) {
case RTW89_FW_ELEMENT_ID_BB_REG:
- elm_info->bb_tbl = tbl;
+ pp = &elm_info->bb_tbl;
break;
case RTW89_FW_ELEMENT_ID_BB_GAIN:
- elm_info->bb_gain = tbl;
+ pp = &elm_info->bb_gain;
break;
case RTW89_FW_ELEMENT_ID_RADIO_A:
case RTW89_FW_ELEMENT_ID_RADIO_B:
case RTW89_FW_ELEMENT_ID_RADIO_C:
case RTW89_FW_ELEMENT_ID_RADIO_D:
- rf_path = arg.rf_path;
idx = elm->u.reg2.idx;
+ pp = &elm_info->rf_radio[idx];
- elm_info->rf_radio[idx] = tbl;
- tbl->rf_path = rf_path;
- tbl->config = rtw89_phy_config_rf_reg_v1;
+ radio = true;
break;
case RTW89_FW_ELEMENT_ID_RF_NCTL:
- elm_info->rf_nctl = tbl;
+ pp = &elm_info->rf_nctl;
break;
default:
- kfree(tbl);
return -ENOENT;
}
+ aid = le16_to_cpu(elm->aid);
+ if (aid && aid != hal->aid)
+ return 1; /* ignore if aid not matched */
+ else if (*pp)
+ return 1; /* ignore if an element is existing */
+
+ tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
+ if (!tbl)
+ return -ENOMEM;
+
n_regs = le32_to_cpu(elm->size) / sizeof(tbl->regs[0]);
regs = kcalloc(n_regs, sizeof(*regs), GFP_KERNEL);
if (!regs)
@@ -1062,6 +1116,13 @@ int rtw89_build_phy_tbl_from_elm(struct rtw89_dev *rtwdev,
tbl->n_regs = n_regs;
tbl->regs = regs;
+ if (radio) {
+ tbl->rf_path = arg.rf_path;
+ tbl->config = rtw89_phy_config_rf_reg_v1;
+ }
+
+ *pp = tbl;
+
return 0;
out:
@@ -1322,6 +1383,26 @@ int rtw89_recognize_diag_mac_from_elm(struct rtw89_dev *rtwdev,
return 0;
}
+static
+int rtw89_build_tx_comp_from_elm(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_element_hdr *elm,
+ const union rtw89_fw_element_arg arg)
+{
+ struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u16 aid;
+
+ aid = le16_to_cpu(elm->aid);
+ if (aid && aid != hal->aid)
+ return 1; /* ignore if aid not matched */
+ else if (elm_info->tx_comp)
+ return 1; /* ignore if an element is existing */
+
+ elm_info->tx_comp = elm;
+
+ return 0;
+}
+
static const struct rtw89_fw_element_handler __fw_element_handlers[] = {
[RTW89_FW_ELEMENT_ID_BBMCU0] = {__rtw89_fw_recognize_from_elm,
{ .fw_type = RTW89_FW_BBMCU0 }, NULL},
@@ -1413,6 +1494,9 @@ static const struct rtw89_fw_element_handler __fw_element_handlers[] = {
[RTW89_FW_ELEMENT_ID_DIAG_MAC] = {
rtw89_recognize_diag_mac_from_elm, {}, NULL,
},
+ [RTW89_FW_ELEMENT_ID_TX_COMP] = {
+ rtw89_build_tx_comp_from_elm, {}, NULL,
+ },
};
int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev)
@@ -1481,11 +1565,12 @@ void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb,
u8 type, u8 cat, u8 class, u8 func,
bool rack, bool dack, u32 len)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct fwcmd_hdr *hdr;
hdr = (struct fwcmd_hdr *)skb_push(skb, 8);
- if (!(rtwdev->fw.h2c_seq % 4))
+ if (chip->chip_gen == RTW89_CHIP_AX && !(rtwdev->fw.h2c_seq % 4))
rack = true;
hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) |
FIELD_PREP(H2C_HDR_CAT, cat) |
@@ -2267,6 +2352,45 @@ fail:
}
EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v2);
+int rtw89_fw_h2c_dctl_sec_cam_v3(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ struct rtw89_sta_link *rtwsta_link)
+{
+ struct rtw89_h2c_dctlinfo_ud_v3 *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_dctlinfo_ud_v3 *)skb->data;
+
+ rtw89_cam_fill_dctl_sec_cam_info_v3(rtwdev, rtwvif_link, rtwsta_link, h2c);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_DCTLINFO_UD_V3, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v3);
+
int rtw89_fw_h2c_default_dmac_tbl_v2(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link)
@@ -2322,6 +2446,62 @@ fail:
}
EXPORT_SYMBOL(rtw89_fw_h2c_default_dmac_tbl_v2);
+int rtw89_fw_h2c_default_dmac_tbl_v3(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ struct rtw89_sta_link *rtwsta_link)
+{
+ u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
+ struct rtw89_h2c_dctlinfo_ud_v3 *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for dctl v2\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_dctlinfo_ud_v3 *)skb->data;
+
+ h2c->c0 = le32_encode_bits(mac_id, DCTLINFO_V3_C0_MACID) |
+ le32_encode_bits(1, DCTLINFO_V3_C0_OP);
+
+ h2c->m0 = cpu_to_le32(DCTLINFO_V3_W0_ALL);
+ h2c->m1 = cpu_to_le32(DCTLINFO_V3_W1_ALL);
+ h2c->m2 = cpu_to_le32(DCTLINFO_V3_W2_ALL);
+ h2c->m3 = cpu_to_le32(DCTLINFO_V3_W3_ALL);
+ h2c->m4 = cpu_to_le32(DCTLINFO_V3_W4_ALL);
+ h2c->m5 = cpu_to_le32(DCTLINFO_V3_W5_ALL);
+ h2c->m6 = cpu_to_le32(DCTLINFO_V3_W6_ALL);
+ h2c->m7 = cpu_to_le32(DCTLINFO_V3_W7_ALL);
+ h2c->m8 = cpu_to_le32(DCTLINFO_V3_W8_ALL);
+ h2c->m9 = cpu_to_le32(DCTLINFO_V3_W9_ALL);
+ h2c->m10 = cpu_to_le32(DCTLINFO_V3_W10_ALL);
+ h2c->m11 = cpu_to_le32(DCTLINFO_V3_W11_ALL);
+ h2c->m12 = cpu_to_le32(DCTLINFO_V3_W12_ALL);
+ h2c->m13 = cpu_to_le32(DCTLINFO_V3_W13_ALL);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_DCTLINFO_UD_V3, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_default_dmac_tbl_v3);
+
int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link,
@@ -3108,6 +3288,161 @@ fail:
return ret;
}
+void rtw89_bb_lps_cmn_info_rx_gain_fill(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_link_info_rx_gain *h2c_gain,
+ const struct rtw89_chan *chan, u8 phy_idx)
+{
+ const struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
+ enum rtw89_bb_link_rx_gain_table_type tab_idx;
+ struct rtw89_chan chan_bcn;
+ u8 bw = chan->band_width;
+ u8 gain_band;
+ u8 bw_idx;
+ u8 path;
+ int i;
+
+ rtw89_chan_create(&chan_bcn, chan->primary_channel, chan->primary_channel,
+ chan->band_type, RTW89_CHANNEL_WIDTH_20);
+
+ for (tab_idx = RTW89_BB_PS_LINK_RX_GAIN_TAB_BCN_PATH_A;
+ tab_idx < RTW89_BB_PS_LINK_RX_GAIN_TAB_MAX; tab_idx++) {
+ struct rtw89_phy_calc_efuse_gain calc = {};
+
+ path = (tab_idx & BIT(0)) ? (RF_PATH_B) : (RF_PATH_A);
+ if (tab_idx & BIT(1)) {
+ rtw89_chip_calc_rx_gain_normal(rtwdev, chan, path, phy_idx,
+ &calc);
+ gain_band = rtw89_subband_to_gain_band_be(chan->subband_type);
+ if (bw > RTW89_CHANNEL_WIDTH_40)
+ bw_idx = RTW89_BB_BW_80_160_320;
+ else
+ bw_idx = RTW89_BB_BW_20_40;
+ } else {
+ rtw89_chip_calc_rx_gain_normal(rtwdev, &chan_bcn, path, phy_idx,
+ &calc);
+ gain_band = rtw89_subband_to_gain_band_be(chan_bcn.subband_type);
+ bw_idx = RTW89_BB_BW_20_40;
+ }
+
+ /* efuse ofst and comp */
+ h2c_gain->gain_ofst[tab_idx] = calc.rssi_ofst;
+ h2c_gain->cck_gain_ofst[tab_idx] = calc.cck_rpl_ofst;
+ h2c_gain->cck_rpl_bias_comp[tab_idx][0] = calc.cck_mean_gain_bias;
+ h2c_gain->cck_rpl_bias_comp[tab_idx][1] = calc.cck_mean_gain_bias;
+
+ for (i = 0; i < TIA_GAIN_NUM; i++) {
+ h2c_gain->gain_err_tia[tab_idx][i] =
+ cpu_to_le16(gain->tia_gain[gain_band][bw_idx][path][i]);
+ }
+ memcpy(h2c_gain->gain_err_lna[tab_idx],
+ gain->lna_gain[gain_band][bw_idx][path],
+ LNA_GAIN_NUM);
+ memcpy(h2c_gain->op1db_lna[tab_idx],
+ gain->lna_op1db[gain_band][bw_idx][path],
+ LNA_GAIN_NUM);
+ memcpy(h2c_gain->op1db_tia[tab_idx],
+ gain->tia_lna_op1db[gain_band][bw_idx][path],
+ LNA_GAIN_NUM + 1);
+
+ memcpy(h2c_gain->rpl_bias_comp_bw[tab_idx]._20M,
+ gain->rpl_ofst_20[gain_band][path],
+ RTW89_BW20_SC_20M);
+ memcpy(h2c_gain->rpl_bias_comp_bw[tab_idx]._40M,
+ gain->rpl_ofst_40[gain_band][path],
+ RTW89_BW20_SC_40M);
+ memcpy(h2c_gain->rpl_bias_comp_bw[tab_idx]._80M,
+ gain->rpl_ofst_80[gain_band][path],
+ RTW89_BW20_SC_80M);
+ memcpy(h2c_gain->rpl_bias_comp_bw[tab_idx]._160M,
+ gain->rpl_ofst_160[gain_band][path],
+ RTW89_BW20_SC_160M);
+ }
+}
+
+int rtw89_fw_h2c_lps_ml_cmn_info_v1(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ static const u8 bcn_bw_ofst[] = {0, 0, 0, 3, 6, 9, 0, 12};
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_efuse *efuse = &rtwdev->efuse;
+ struct rtw89_h2c_lps_ml_cmn_info_v1 *h2c;
+ struct rtw89_vif_link *rtwvif_link;
+ const struct rtw89_chan *chan;
+ struct rtw89_bb_ctx *bb;
+ u32 len = sizeof(*h2c);
+ unsigned int link_id;
+ struct sk_buff *skb;
+ u8 beacon_bw_ofst;
+ u32 done;
+ int ret;
+
+ if (chip->chip_gen != RTW89_CHIP_BE)
+ return 0;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ml_cmn_info_v1\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_lps_ml_cmn_info_v1 *)skb->data;
+
+ h2c->fmt_id = 0x20;
+
+ h2c->mlo_dbcc_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
+ h2c->rfe_type = efuse->rfe_type;
+ h2c->rssi_main = U8_MAX;
+
+ memset(h2c->link_id, 0xfe, RTW89_BB_PS_LINK_BUF_MAX);
+
+ rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
+ u8 phy_idx = rtwvif_link->phy_idx;
+
+ bb = rtw89_get_bb_ctx(rtwdev, phy_idx);
+ chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
+
+ h2c->link_id[phy_idx] = phy_idx;
+ h2c->central_ch[phy_idx] = chan->channel;
+ h2c->pri_ch[phy_idx] = chan->primary_channel;
+ h2c->band[phy_idx] = chan->band_type;
+ h2c->bw[phy_idx] = chan->band_width;
+
+ if (rtwvif_link->bcn_bw_idx < ARRAY_SIZE(bcn_bw_ofst)) {
+ beacon_bw_ofst = bcn_bw_ofst[rtwvif_link->bcn_bw_idx];
+ h2c->dup_bcn_ofst[phy_idx] = beacon_bw_ofst;
+ }
+
+ if (h2c->rssi_main > bb->ch_info.rssi_min)
+ h2c->rssi_main = bb->ch_info.rssi_min;
+
+ rtw89_bb_lps_cmn_info_rx_gain_fill(rtwdev,
+ &h2c->rx_gain[phy_idx],
+ chan, phy_idx);
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM,
+ H2C_FUNC_FW_LPS_ML_CMN_INFO, 0, 0, len);
+
+ rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT_BE4, B_CHK_LPS_STAT, 0);
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000,
+ true, rtwdev, R_CHK_LPS_STAT_BE4, B_CHK_LPS_STAT);
+ if (ret)
+ rtw89_warn(rtwdev, "h2c_lps_ml_cmn_info done polling timeout\n");
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
#define H2C_P2P_ACT_LEN 20
int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
@@ -3318,6 +3653,92 @@ fail:
}
EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl_g7);
+int rtw89_fw_h2c_default_cmac_tbl_be(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ struct rtw89_sta_link *rtwsta_link)
+{
+ u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
+ bool preld = rtw89_mac_chk_preload_allow(rtwdev);
+ struct rtw89_h2c_cctlinfo_ud_be *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for default cmac be\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_cctlinfo_ud_be *)skb->data;
+
+ h2c->c0 = le32_encode_bits(mac_id, BE_CCTL_INFO_C0_V1_MACID) |
+ le32_encode_bits(1, BE_CCTL_INFO_C0_V1_OP);
+
+ h2c->w0 = le32_encode_bits(4, BE_CCTL_INFO_W0_DATARATE);
+ h2c->m0 = cpu_to_le32(BE_CCTL_INFO_W0_ALL);
+
+ h2c->w1 = le32_encode_bits(4, BE_CCTL_INFO_W1_DATA_RTY_LOWEST_RATE) |
+ le32_encode_bits(0xa, BE_CCTL_INFO_W1_RTSRATE) |
+ le32_encode_bits(4, BE_CCTL_INFO_W1_RTS_RTY_LOWEST_RATE);
+ h2c->m1 = cpu_to_le32(BE_CCTL_INFO_W1_ALL);
+
+ h2c->w1 = le32_encode_bits(preld, BE_CCTL_INFO_W2_PRELOAD_ENABLE);
+ h2c->m2 = cpu_to_le32(BE_CCTL_INFO_W2_ALL);
+
+ h2c->m3 = cpu_to_le32(BE_CCTL_INFO_W3_ALL);
+
+ h2c->w4 = le32_encode_bits(0xFFFF, BE_CCTL_INFO_W4_ACT_SUBCH_CBW);
+ h2c->m4 = cpu_to_le32(BE_CCTL_INFO_W4_ALL);
+
+ h2c->w5 = le32_encode_bits(2, BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING0_V1) |
+ le32_encode_bits(2, BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING1_V1) |
+ le32_encode_bits(2, BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING2_V1) |
+ le32_encode_bits(2, BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING3_V1) |
+ le32_encode_bits(2, BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING4_V1);
+ h2c->m5 = cpu_to_le32(BE_CCTL_INFO_W5_ALL);
+
+ h2c->w6 = le32_encode_bits(0xb, BE_CCTL_INFO_W6_RESP_REF_RATE);
+ h2c->m6 = cpu_to_le32(BE_CCTL_INFO_W6_ALL);
+
+ h2c->w7 = le32_encode_bits(1, BE_CCTL_INFO_W7_NC) |
+ le32_encode_bits(1, BE_CCTL_INFO_W7_NR) |
+ le32_encode_bits(1, BE_CCTL_INFO_W7_CB) |
+ le32_encode_bits(0x1, BE_CCTL_INFO_W7_CSI_PARA_EN) |
+ le32_encode_bits(0xb, BE_CCTL_INFO_W7_CSI_FIX_RATE);
+ h2c->m7 = cpu_to_le32(BE_CCTL_INFO_W7_ALL);
+
+ h2c->m8 = cpu_to_le32(BE_CCTL_INFO_W8_ALL);
+
+ h2c->w14 = le32_encode_bits(0, BE_CCTL_INFO_W14_VO_CURR_RATE) |
+ le32_encode_bits(0, BE_CCTL_INFO_W14_VI_CURR_RATE) |
+ le32_encode_bits(0, BE_CCTL_INFO_W14_BE_CURR_RATE_L);
+ h2c->m14 = cpu_to_le32(BE_CCTL_INFO_W14_ALL);
+
+ h2c->w15 = le32_encode_bits(0, BE_CCTL_INFO_W15_BE_CURR_RATE_H) |
+ le32_encode_bits(0, BE_CCTL_INFO_W15_BK_CURR_RATE) |
+ le32_encode_bits(0, BE_CCTL_INFO_W15_MGNT_CURR_RATE);
+ h2c->m15 = cpu_to_le32(BE_CCTL_INFO_W15_ALL);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl_be);
+
static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
struct ieee80211_link_sta *link_sta,
u8 *pads)
@@ -3648,6 +4069,134 @@ fail:
}
EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl_g7);
+int rtw89_fw_h2c_assoc_cmac_tbl_be(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ struct rtw89_sta_link *rtwsta_link)
+{
+ struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
+ u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
+ struct rtw89_h2c_cctlinfo_ud_be *h2c;
+ struct ieee80211_bss_conf *bss_conf;
+ struct ieee80211_link_sta *link_sta;
+ u8 pads[RTW89_PPE_BW_NUM];
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ u16 lowest_rate;
+ int ret;
+
+ memset(pads, 0, sizeof(pads));
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for assoc cmac be\n");
+ return -ENOMEM;
+ }
+
+ rcu_read_lock();
+
+ bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
+
+ if (rtwsta_link) {
+ link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
+
+ if (link_sta->eht_cap.has_eht)
+ __get_sta_eht_pkt_padding(rtwdev, link_sta, pads);
+ else if (link_sta->he_cap.has_he)
+ __get_sta_he_pkt_padding(rtwdev, link_sta, pads);
+ }
+
+ if (vif->p2p)
+ lowest_rate = RTW89_HW_RATE_OFDM6;
+ else if (chan->band_type == RTW89_BAND_2G)
+ lowest_rate = RTW89_HW_RATE_CCK1;
+ else
+ lowest_rate = RTW89_HW_RATE_OFDM6;
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_cctlinfo_ud_be *)skb->data;
+
+ h2c->c0 = le32_encode_bits(mac_id, BE_CCTL_INFO_C0_V1_MACID) |
+ le32_encode_bits(1, BE_CCTL_INFO_C0_V1_OP);
+
+ h2c->w0 = le32_encode_bits(1, BE_CCTL_INFO_W0_DISRTSFB) |
+ le32_encode_bits(1, BE_CCTL_INFO_W0_DISDATAFB);
+ h2c->m0 = cpu_to_le32(BE_CCTL_INFO_W0_DISRTSFB |
+ BE_CCTL_INFO_W0_DISDATAFB);
+
+ h2c->w1 = le32_encode_bits(lowest_rate, BE_CCTL_INFO_W1_RTS_RTY_LOWEST_RATE);
+ h2c->m1 = cpu_to_le32(BE_CCTL_INFO_W1_RTS_RTY_LOWEST_RATE);
+
+ h2c->w2 = le32_encode_bits(0, BE_CCTL_INFO_W2_DATA_TXCNT_LMT_SEL);
+ h2c->m2 = cpu_to_le32(BE_CCTL_INFO_W2_DATA_TXCNT_LMT_SEL);
+
+ h2c->w3 = le32_encode_bits(0, BE_CCTL_INFO_W3_RTS_TXCNT_LMT_SEL);
+ h2c->m3 = cpu_to_le32(BE_CCTL_INFO_W3_RTS_TXCNT_LMT_SEL);
+
+ h2c->w4 = le32_encode_bits(rtwvif_link->port, BE_CCTL_INFO_W4_MULTI_PORT_ID);
+ h2c->m4 = cpu_to_le32(BE_CCTL_INFO_W4_MULTI_PORT_ID);
+
+ if (bss_conf->eht_support) {
+ u16 punct = bss_conf->chanreq.oper.punctured;
+
+ h2c->w4 |= le32_encode_bits(~punct,
+ BE_CCTL_INFO_W4_ACT_SUBCH_CBW);
+ h2c->m4 |= cpu_to_le32(BE_CCTL_INFO_W4_ACT_SUBCH_CBW);
+ }
+
+ h2c->w5 = le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_20],
+ BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING0_V1) |
+ le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_40],
+ BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING1_V1) |
+ le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_80],
+ BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING2_V1) |
+ le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_160],
+ BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING3_V1) |
+ le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_320],
+ BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING4_V1);
+ h2c->m5 = cpu_to_le32(BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING0_V1 |
+ BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING1_V1 |
+ BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING2_V1 |
+ BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING3_V1 |
+ BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING4_V1);
+
+ if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) {
+ h2c->w5 |= le32_encode_bits(0, BE_CCTL_INFO_W5_DATA_DCM_V1);
+ h2c->m5 |= cpu_to_le32(BE_CCTL_INFO_W5_DATA_DCM_V1);
+ }
+
+ h2c->w6 = le32_encode_bits(vif->cfg.aid, BE_CCTL_INFO_W6_AID12_PAID) |
+ le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0,
+ BE_CCTL_INFO_W6_ULDL);
+ h2c->m6 = cpu_to_le32(BE_CCTL_INFO_W6_AID12_PAID | BE_CCTL_INFO_W6_ULDL);
+
+ if (rtwsta_link) {
+ h2c->w8 = le32_encode_bits(link_sta->he_cap.has_he,
+ BE_CCTL_INFO_W8_BSR_QUEUE_SIZE_FORMAT_V1);
+ h2c->m8 = cpu_to_le32(BE_CCTL_INFO_W8_BSR_QUEUE_SIZE_FORMAT_V1);
+ }
+
+ rcu_read_unlock();
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl_be);
+
int rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link)
@@ -3714,6 +4263,72 @@ fail:
}
EXPORT_SYMBOL(rtw89_fw_h2c_ampdu_cmac_tbl_g7);
+int rtw89_fw_h2c_ampdu_cmac_tbl_be(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ struct rtw89_sta_link *rtwsta_link)
+{
+ struct rtw89_sta *rtwsta = rtwsta_link->rtwsta;
+ struct rtw89_h2c_cctlinfo_ud_be *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ u16 agg_num = 0;
+ u8 ba_bmap = 0;
+ int ret;
+ u8 tid;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for ampdu cmac be\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_cctlinfo_ud_be *)skb->data;
+
+ for_each_set_bit(tid, rtwsta->ampdu_map, IEEE80211_NUM_TIDS) {
+ if (agg_num == 0)
+ agg_num = rtwsta->ampdu_params[tid].agg_num;
+ else
+ agg_num = min(agg_num, rtwsta->ampdu_params[tid].agg_num);
+ }
+
+ if (agg_num <= 0x20)
+ ba_bmap = 3;
+ else if (agg_num > 0x20 && agg_num <= 0x40)
+ ba_bmap = 0;
+ else if (agg_num > 0x40 && agg_num <= 0x80)
+ ba_bmap = 1;
+ else if (agg_num > 0x80 && agg_num <= 0x100)
+ ba_bmap = 2;
+ else if (agg_num > 0x100 && agg_num <= 0x200)
+ ba_bmap = 4;
+ else if (agg_num > 0x200 && agg_num <= 0x400)
+ ba_bmap = 5;
+
+ h2c->c0 = le32_encode_bits(rtwsta_link->mac_id, BE_CCTL_INFO_C0_V1_MACID) |
+ le32_encode_bits(1, BE_CCTL_INFO_C0_V1_OP);
+
+ h2c->w3 = le32_encode_bits(ba_bmap, BE_CCTL_INFO_W3_BA_BMAP);
+ h2c->m3 = cpu_to_le32(BE_CCTL_INFO_W3_BA_BMAP);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_ampdu_cmac_tbl_be);
+
int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_sta_link *rtwsta_link)
{
@@ -3811,6 +4426,60 @@ fail:
}
EXPORT_SYMBOL(rtw89_fw_h2c_txtime_cmac_tbl_g7);
+int rtw89_fw_h2c_txtime_cmac_tbl_be(struct rtw89_dev *rtwdev,
+ struct rtw89_sta_link *rtwsta_link)
+{
+ struct rtw89_h2c_cctlinfo_ud_be *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for txtime_cmac_be\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_cctlinfo_ud_be *)skb->data;
+
+ h2c->c0 = le32_encode_bits(rtwsta_link->mac_id, BE_CCTL_INFO_C0_V1_MACID) |
+ le32_encode_bits(1, BE_CCTL_INFO_C0_V1_OP);
+
+ if (rtwsta_link->cctl_tx_time) {
+ h2c->w3 |= le32_encode_bits(1, BE_CCTL_INFO_W3_AMPDU_TIME_SEL);
+ h2c->m3 |= cpu_to_le32(BE_CCTL_INFO_W3_AMPDU_TIME_SEL);
+
+ h2c->w2 |= le32_encode_bits(rtwsta_link->ampdu_max_time,
+ BE_CCTL_INFO_W2_AMPDU_MAX_TIME);
+ h2c->m2 |= cpu_to_le32(BE_CCTL_INFO_W2_AMPDU_MAX_TIME);
+ }
+ if (rtwsta_link->cctl_tx_retry_limit) {
+ h2c->w2 |= le32_encode_bits(1, BE_CCTL_INFO_W2_DATA_TXCNT_LMT_SEL) |
+ le32_encode_bits(rtwsta_link->data_tx_cnt_lmt,
+ BE_CCTL_INFO_W2_DATA_TX_CNT_LMT);
+ h2c->m2 |= cpu_to_le32(BE_CCTL_INFO_W2_DATA_TXCNT_LMT_SEL |
+ BE_CCTL_INFO_W2_DATA_TX_CNT_LMT);
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_txtime_cmac_tbl_be);
+
int rtw89_fw_h2c_punctured_cmac_tbl_g7(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
u16 punctured)
@@ -3854,6 +4523,48 @@ fail:
}
EXPORT_SYMBOL(rtw89_fw_h2c_punctured_cmac_tbl_g7);
+int rtw89_fw_h2c_punctured_cmac_tbl_be(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ u16 punctured)
+{
+ struct rtw89_h2c_cctlinfo_ud_be *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for punctured cmac be\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_cctlinfo_ud_be *)skb->data;
+
+ h2c->c0 = le32_encode_bits(rtwvif_link->mac_id, BE_CCTL_INFO_C0_V1_MACID) |
+ le32_encode_bits(1, BE_CCTL_INFO_C0_V1_OP);
+
+ h2c->w4 = le32_encode_bits(~punctured, BE_CCTL_INFO_W4_ACT_SUBCH_CBW);
+ h2c->m4 = cpu_to_le32(BE_CCTL_INFO_W4_ACT_SUBCH_CBW);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_punctured_cmac_tbl_be);
+
int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_sta_link *rtwsta_link)
{
@@ -5935,27 +6646,18 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
u8 scan_offload_ver = U8_MAX;
u8 cfg_len = sizeof(*h2c);
unsigned int cond;
- u8 ap_idx = U8_MAX;
u8 ver = U8_MAX;
u8 policy_val;
void *ptr;
+ u8 txnull;
u8 txbcn;
int ret;
u32 len;
u8 i;
- scan_op[0].macid = rtwvif_link->mac_id;
- scan_op[0].port = rtwvif_link->port;
- scan_op[0].chan = *op;
- vif = rtwvif_to_vif(rtwvif_link->rtwvif);
- if (vif->type == NL80211_IFTYPE_AP)
- ap_idx = 0;
-
- if (ext->set) {
- scan_op[1] = *ext;
- vif = rtwvif_to_vif(ext->rtwvif_link->rtwvif);
- if (vif->type == NL80211_IFTYPE_AP)
- ap_idx = 1;
+ if (option->num_opch > RTW89_MAX_OP_NUM_BE) {
+ rtw89_err(rtwdev, "num of scan OP chan %d over limit\n", option->num_opch);
+ return -ENOENT;
}
rtw89_scan_get_6g_disabled_chan(rtwdev, option);
@@ -6060,11 +6762,29 @@ flex_member:
}
for (i = 0; i < option->num_opch; i++) {
- bool is_ap_idx = i == ap_idx;
+ struct rtw89_vif_link *rtwvif_link_op;
+ bool is_ap;
+
+ switch (i) {
+ case 0:
+ scan_op[0].macid = rtwvif_link->mac_id;
+ scan_op[0].port = rtwvif_link->port;
+ scan_op[0].chan = *op;
+ rtwvif_link_op = rtwvif_link;
+ break;
+ case 1:
+ scan_op[1] = *ext;
+ rtwvif_link_op = ext->rtwvif_link;
+ break;
+ }
- opmode = is_ap_idx ? RTW89_SCAN_OPMODE_TBTT : RTW89_SCAN_OPMODE_INTV;
- policy_val = is_ap_idx ? 2 : RTW89_OFF_CHAN_TIME / 10;
- txbcn = is_ap_idx ? 1 : 0;
+ vif = rtwvif_to_vif(rtwvif_link_op->rtwvif);
+ is_ap = vif->type == NL80211_IFTYPE_AP;
+ txnull = !is_zero_ether_addr(rtwvif_link_op->bssid) &&
+ vif->type != NL80211_IFTYPE_AP;
+ opmode = is_ap ? RTW89_SCAN_OPMODE_TBTT : RTW89_SCAN_OPMODE_INTV;
+ policy_val = is_ap ? 2 : RTW89_OFF_CHAN_TIME / 10;
+ txbcn = is_ap ? 1 : 0;
opch = ptr;
opch->w0 = le32_encode_bits(scan_op[i].macid,
@@ -6075,7 +6795,7 @@ flex_member:
RTW89_H2C_SCANOFLD_BE_OPCH_W0_PORT) |
le32_encode_bits(opmode,
RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY) |
- le32_encode_bits(true,
+ le32_encode_bits(txnull,
RTW89_H2C_SCANOFLD_BE_OPCH_W0_TXNULL) |
le32_encode_bits(policy_val,
RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY_VAL);
@@ -6349,6 +7069,7 @@ int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
struct rtw89_fw_h2c_rfk_pre_info_common *common;
struct rtw89_fw_h2c_rfk_pre_info_v0 *h2c_v0;
struct rtw89_fw_h2c_rfk_pre_info_v1 *h2c_v1;
+ struct rtw89_fw_h2c_rfk_pre_info_v2 *h2c_v2;
struct rtw89_fw_h2c_rfk_pre_info *h2c;
u8 tbl_sel[NUM_OF_RTW89_FW_RFK_PATH];
u32 len = sizeof(*h2c);
@@ -6358,7 +7079,11 @@ int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
u32 val32;
int ret;
- if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V1, &rtwdev->fw)) {
+ if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V3, &rtwdev->fw)) {
+ } else if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V2, &rtwdev->fw)) {
+ len = sizeof(*h2c_v2);
+ ver = 2;
+ } else if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V1, &rtwdev->fw)) {
len = sizeof(*h2c_v1);
ver = 1;
} else if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V0, &rtwdev->fw)) {
@@ -6372,8 +7097,21 @@ int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
return -ENOMEM;
}
skb_put(skb, len);
+
+ if (ver <= 2)
+ goto old_format;
+
h2c = (struct rtw89_fw_h2c_rfk_pre_info *)skb->data;
- common = &h2c->base_v1.common;
+
+ h2c->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
+ h2c->phy_idx = cpu_to_le32(phy_idx);
+ h2c->mlo_1_1 = cpu_to_le32(rtw89_is_mlo_1_1(rtwdev));
+
+ goto done;
+
+old_format:
+ h2c_v2 = (struct rtw89_fw_h2c_rfk_pre_info_v2 *)skb->data;
+ common = &h2c_v2->base_v1.common;
common->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
@@ -6400,7 +7138,7 @@ int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
if (ver <= 1)
continue;
- h2c->cur_bandwidth[path] =
+ h2c_v2->cur_bandwidth[path] =
cpu_to_le32(rfk_mcc->data[path].bw[tbl_sel[path]]);
}
@@ -6431,7 +7169,7 @@ int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
}
if (rtw89_is_mlo_1_1(rtwdev)) {
- h2c_v1 = &h2c->base_v1;
+ h2c_v1 = &h2c_v2->base_v1;
h2c_v1->mlo_1_1 = cpu_to_le32(1);
}
done:
@@ -6453,9 +7191,108 @@ fail:
return ret;
}
+int rtw89_fw_h2c_rf_pre_ntfy_mcc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data;
+ struct rtw89_rfk_mcc_info *rfk_mcc_v0 = &rtwdev->rfk_mcc;
+ struct rtw89_fw_h2c_rfk_pre_info_mcc_v0 *h2c_v0;
+ struct rtw89_fw_h2c_rfk_pre_info_mcc_v1 *h2c_v1;
+ struct rtw89_fw_h2c_rfk_pre_info_mcc *h2c;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ u8 ver = U8_MAX;
+ u8 tbl, path;
+ u8 tbl_sel;
+ int ret;
+
+ if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_MCC_V2, &rtwdev->fw)) {
+ } else if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_MCC_V1, &rtwdev->fw)) {
+ len = sizeof(*h2c_v1);
+ ver = 1;
+ } else if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_MCC_V0, &rtwdev->fw)) {
+ len = sizeof(*h2c_v0);
+ ver = 0;
+ }
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c rfk_pre_ntfy_mcc\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+
+ if (ver != 0)
+ goto v1;
+
+ h2c_v0 = (struct rtw89_fw_h2c_rfk_pre_info_mcc_v0 *)skb->data;
+ for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++) {
+ for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) {
+ h2c_v0->tbl_18[tbl][path] =
+ cpu_to_le32(rfk_mcc_v0->data[path].rf18[tbl]);
+ tbl_sel = rfk_mcc_v0->data[path].table_idx;
+ h2c_v0->cur_18[path] =
+ cpu_to_le32(rfk_mcc_v0->data[path].rf18[tbl_sel]);
+ }
+ }
+
+ h2c_v0->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
+ goto done;
+
+v1:
+ h2c_v1 = (struct rtw89_fw_h2c_rfk_pre_info_mcc_v1 *)skb->data;
+
+ BUILD_BUG_ON(NUM_OF_RTW89_FW_RFK_TBL > RTW89_RFK_CHS_NR);
+
+ for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++)
+ h2c_v1->tbl_18[tbl] = cpu_to_le32(rfk_mcc->rf18[tbl]);
+
+ BUILD_BUG_ON(ARRAY_SIZE(rtwdev->rfk_mcc.data) < NUM_OF_RTW89_FW_RFK_PATH);
+
+ /* shared table array, but tbl_sel can be independent by path */
+ for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) {
+ tbl = rfk_mcc[path].table_idx;
+ h2c_v1->cur_18[path] = cpu_to_le32(rfk_mcc->rf18[tbl]);
+
+ if (path == phy_idx)
+ h2c_v1->tbl_idx = tbl;
+ }
+
+ h2c_v1->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
+ h2c_v1->phy_idx = phy_idx;
+
+ if (rtw89_is_mlo_1_1(rtwdev))
+ h2c_v1->mlo_1_1 = cpu_to_le32(1);
+
+ if (ver == 1)
+ goto done;
+
+ h2c = (struct rtw89_fw_h2c_rfk_pre_info_mcc *)skb->data;
+
+ h2c->aid = cpu_to_le32(hal->aid);
+
+done:
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY,
+ H2C_FUNC_OUTSRC_RF_MCC_INFO, 0, 0, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
const struct rtw89_chan *chan, enum rtw89_tssi_mode tssi_mode)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_efuse *efuse = &rtwdev->efuse;
struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_h2c_rf_tssi *h2c;
@@ -6476,11 +7313,15 @@ int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
h2c->ch = chan->channel;
h2c->bw = chan->band_width;
h2c->band = chan->band_type;
- h2c->hwtx_en = true;
h2c->cv = hal->cv;
h2c->tssi_mode = tssi_mode;
h2c->rfe_type = efuse->rfe_type;
+ if (chip->chip_id == RTL8922A)
+ h2c->hwtx_en = true;
+ else
+ h2c->hwtx_en = false;
+
rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(rtwdev, phy_idx, chan, h2c);
rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(rtwdev, phy_idx, chan, h2c);
@@ -6663,9 +7504,9 @@ int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
skb_put(skb, len);
h2c = (struct rtw89_h2c_rf_dack *)skb->data;
- h2c->len = cpu_to_le32(len);
- h2c->phy = cpu_to_le32(phy_idx);
- h2c->type = cpu_to_le32(0);
+ h2c->len = len;
+ h2c->phy = phy_idx;
+ h2c->type = 0;
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
@@ -6774,6 +7615,90 @@ fail:
return ret;
}
+int rtw89_fw_h2c_rf_txiqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
+{
+ struct rtw89_h2c_rf_txiqk *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c RF TXIQK\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_rf_txiqk *)skb->data;
+
+ h2c->len = len;
+ h2c->phy = phy_idx;
+ h2c->txiqk_enable = true;
+ h2c->is_wb_txiqk = true;
+ h2c->kpath = RF_AB;
+ h2c->cur_band = chan->band_type;
+ h2c->cur_bw = chan->band_width;
+ h2c->cur_ch = chan->channel;
+ h2c->txiqk_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
+ H2C_FUNC_RFK_TXIQK_OFFOAD, 0, 0, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int rtw89_fw_h2c_rf_cim3k(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
+{
+ struct rtw89_h2c_rf_cim3k *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c RF CIM3K\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_rf_cim3k *)skb->data;
+
+ h2c->len = len;
+ h2c->phy = phy_idx;
+ h2c->kpath = RF_AB;
+ h2c->cur_band = chan->band_type;
+ h2c->cur_bw = chan->band_width;
+ h2c->cur_ch = chan->channel;
+ h2c->cim3k_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
+ H2C_FUNC_RFK_CIM3K_OFFOAD, 0, 0, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
bool rack, bool dack)
@@ -6859,6 +7784,17 @@ void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev)
__rtw89_fw_free_all_early_h2c(rtwdev);
}
+void rtw89_fw_c2h_dummy_handler(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+ struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h);
+ u8 category = attr->category;
+ u8 class = attr->class;
+ u8 func = attr->func;
+
+ rtw89_debug(rtwdev, RTW89_DBG_FW,
+ "C2H cate=%u cls=%u func=%u is dummy\n", category, class, func);
+}
+
static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h)
{
const struct rtw89_c2h_hdr *hdr = (const struct rtw89_c2h_hdr *)c2h->data;
@@ -7044,6 +7980,9 @@ static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev,
else
timeout = RTW89_C2H_TIMEOUT;
+ if (info->timeout)
+ timeout = info->timeout;
+
ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1,
timeout, false, rtwdev,
chip->c2h_ctrl_reg);
@@ -7384,6 +8323,7 @@ static void rtw89_hw_scan_add_chan_ax(struct rtw89_dev *rtwdev, int chan_type,
struct cfg80211_scan_request *req = rtwvif->scan_req;
struct rtw89_chan *op = &rtwdev->scan_info.op_chan;
struct rtw89_pktofld_info *info;
+ struct ieee80211_vif *vif;
u8 band, probe_count = 0;
int ret;
@@ -7436,7 +8376,9 @@ static void rtw89_hw_scan_add_chan_ax(struct rtw89_dev *rtwdev, int chan_type,
ch_info->pri_ch = op->primary_channel;
ch_info->ch_band = op->band_type;
ch_info->bw = op->band_width;
- ch_info->tx_null = true;
+ vif = rtwvif_link_to_vif(rtwvif_link);
+ ch_info->tx_null = !is_zero_ether_addr(rtwvif_link->bssid) &&
+ vif->type != NL80211_IFTYPE_AP;
ch_info->num_pkt = 0;
break;
case RTW89_CHAN_DFS:
@@ -7454,7 +8396,9 @@ static void rtw89_hw_scan_add_chan_ax(struct rtw89_dev *rtwdev, int chan_type,
ch_info->pri_ch = ext->chan.primary_channel;
ch_info->ch_band = ext->chan.band_type;
ch_info->bw = ext->chan.band_width;
- ch_info->tx_null = true;
+ vif = rtwvif_link_to_vif(ext->rtwvif_link);
+ ch_info->tx_null = !is_zero_ether_addr(ext->rtwvif_link->bssid) &&
+ vif->type != NL80211_IFTYPE_AP;
ch_info->num_pkt = 0;
ch_info->macid_tx = true;
break;
@@ -8083,12 +9027,9 @@ static void rtw89_hw_scan_set_extra_op_info(struct rtw89_dev *rtwdev,
if (tmp == scan_rtwvif)
continue;
- tmp_link = rtw89_vif_get_link_inst(tmp, 0);
- if (unlikely(!tmp_link)) {
- rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
- "hw scan: no HW-0 link for extra op\n");
+ tmp_link = rtw89_get_designated_link(tmp);
+ if (unlikely(!tmp_link))
continue;
- }
tmp_chan = rtw89_chan_get(rtwdev, tmp_link->chanctx_idx);
*ext = (struct rtw89_hw_scan_extra_op){
@@ -8114,6 +9055,7 @@ int rtw89_hw_scan_start(struct rtw89_dev *rtwdev,
struct cfg80211_scan_request *req = &scan_req->req;
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
rtwvif_link->chanctx_idx);
+ struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
struct rtw89_chanctx_pause_parm pause_parm = {
.rsn = RTW89_CHANCTX_PAUSE_REASON_HW_SCAN,
@@ -8142,6 +9084,8 @@ int rtw89_hw_scan_start(struct rtw89_dev *rtwdev,
if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
get_random_mask_addr(mac_addr, req->mac_addr,
req->mac_addr_mask);
+ else if (ieee80211_vif_is_mld(vif))
+ ether_addr_copy(mac_addr, vif->addr);
else
ether_addr_copy(mac_addr, rtwvif_link->mac_addr);
@@ -8705,44 +9649,106 @@ fail:
return ret;
}
-#define H2C_WOW_CAM_UPD_LEN 24
-int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev,
- struct rtw89_wow_cam_info *cam_info)
+int rtw89_fw_h2c_wow_cam_update(struct rtw89_dev *rtwdev,
+ struct rtw89_wow_cam_info *cam_info)
{
+ struct rtw89_h2c_wow_cam_update *h2c;
+ u32 len = sizeof(*h2c);
struct sk_buff *skb;
int ret;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_CAM_UPD_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
- rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
+ rtw89_err(rtwdev, "failed to alloc skb for wow cam update\n");
return -ENOMEM;
}
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_wow_cam_update *)skb->data;
+
+ h2c->w0 = le32_encode_bits(cam_info->r_w, RTW89_H2C_WOW_CAM_UPD_W0_R_W) |
+ le32_encode_bits(cam_info->idx, RTW89_H2C_WOW_CAM_UPD_W0_IDX);
+
+ if (!cam_info->valid)
+ goto fill_valid;
+
+ h2c->wkfm0 = cam_info->mask[0];
+ h2c->wkfm1 = cam_info->mask[1];
+ h2c->wkfm2 = cam_info->mask[2];
+ h2c->wkfm3 = cam_info->mask[3];
+ h2c->w5 = le32_encode_bits(cam_info->crc, RTW89_H2C_WOW_CAM_UPD_W5_CRC) |
+ le32_encode_bits(cam_info->negative_pattern_match,
+ RTW89_H2C_WOW_CAM_UPD_W5_NEGATIVE_PATTERN_MATCH) |
+ le32_encode_bits(cam_info->skip_mac_hdr,
+ RTW89_H2C_WOW_CAM_UPD_W5_SKIP_MAC_HDR) |
+ le32_encode_bits(cam_info->uc, RTW89_H2C_WOW_CAM_UPD_W5_UC) |
+ le32_encode_bits(cam_info->mc, RTW89_H2C_WOW_CAM_UPD_W5_MC) |
+ le32_encode_bits(cam_info->bc, RTW89_H2C_WOW_CAM_UPD_W5_BC);
+fill_valid:
+ h2c->w5 |= le32_encode_bits(cam_info->valid, RTW89_H2C_WOW_CAM_UPD_W5_VALID);
- skb_put(skb, H2C_WOW_CAM_UPD_LEN);
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MAC_WOW,
+ H2C_FUNC_WOW_CAM_UPD, 0, 1,
+ len);
- RTW89_SET_WOW_CAM_UPD_R_W(skb->data, cam_info->r_w);
- RTW89_SET_WOW_CAM_UPD_IDX(skb->data, cam_info->idx);
- if (cam_info->valid) {
- RTW89_SET_WOW_CAM_UPD_WKFM1(skb->data, cam_info->mask[0]);
- RTW89_SET_WOW_CAM_UPD_WKFM2(skb->data, cam_info->mask[1]);
- RTW89_SET_WOW_CAM_UPD_WKFM3(skb->data, cam_info->mask[2]);
- RTW89_SET_WOW_CAM_UPD_WKFM4(skb->data, cam_info->mask[3]);
- RTW89_SET_WOW_CAM_UPD_CRC(skb->data, cam_info->crc);
- RTW89_SET_WOW_CAM_UPD_NEGATIVE_PATTERN_MATCH(skb->data,
- cam_info->negative_pattern_match);
- RTW89_SET_WOW_CAM_UPD_SKIP_MAC_HDR(skb->data,
- cam_info->skip_mac_hdr);
- RTW89_SET_WOW_CAM_UPD_UC(skb->data, cam_info->uc);
- RTW89_SET_WOW_CAM_UPD_MC(skb->data, cam_info->mc);
- RTW89_SET_WOW_CAM_UPD_BC(skb->data, cam_info->bc);
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
}
- RTW89_SET_WOW_CAM_UPD_VALID(skb->data, cam_info->valid);
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_wow_cam_update);
+
+int rtw89_fw_h2c_wow_cam_update_v1(struct rtw89_dev *rtwdev,
+ struct rtw89_wow_cam_info *cam_info)
+{
+ struct rtw89_h2c_wow_payload_cam_update *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for wow payload cam update\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_wow_payload_cam_update *)skb->data;
+
+ h2c->w0 = le32_encode_bits(cam_info->r_w, RTW89_H2C_WOW_PLD_CAM_UPD_W0_R_W) |
+ le32_encode_bits(cam_info->idx, RTW89_H2C_WOW_PLD_CAM_UPD_W0_IDX);
+ h2c->w8 = le32_encode_bits(cam_info->valid, RTW89_H2C_WOW_PLD_CAM_UPD_W8_VALID) |
+ le32_encode_bits(1, RTW89_H2C_WOW_PLD_CAM_UPD_W8_WOW_PTR);
+
+ if (!cam_info->valid)
+ goto done;
+
+ h2c->wkfm0 = cam_info->mask[0];
+ h2c->wkfm1 = cam_info->mask[1];
+ h2c->wkfm2 = cam_info->mask[2];
+ h2c->wkfm3 = cam_info->mask[3];
+ h2c->w5 = le32_encode_bits(cam_info->uc, RTW89_H2C_WOW_PLD_CAM_UPD_W5_UC) |
+ le32_encode_bits(cam_info->mc, RTW89_H2C_WOW_PLD_CAM_UPD_W5_MC) |
+ le32_encode_bits(cam_info->bc, RTW89_H2C_WOW_PLD_CAM_UPD_W5_BC) |
+ le32_encode_bits(cam_info->skip_mac_hdr,
+ RTW89_H2C_WOW_PLD_CAM_UPD_W5_SKIP_MAC_HDR);
+ h2c->w6 = le32_encode_bits(cam_info->crc, RTW89_H2C_WOW_PLD_CAM_UPD_W6_CRC);
+ h2c->w7 = le32_encode_bits(cam_info->negative_pattern_match,
+ RTW89_H2C_WOW_PLD_CAM_UPD_W7_NEGATIVE_PATTERN_MATCH);
+
+done:
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC,
H2C_CL_MAC_WOW,
- H2C_FUNC_WOW_CAM_UPD, 0, 1,
- H2C_WOW_CAM_UPD_LEN);
+ H2C_FUNC_WOW_PLD_CAM_UPD, 0, 1,
+ len);
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
@@ -8756,6 +9762,7 @@ fail:
return ret;
}
+EXPORT_SYMBOL(rtw89_fw_h2c_wow_cam_update_v1);
int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index cedb4a47a769..d45b6ea6ea1b 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -42,6 +42,10 @@ struct rtw89_c2hreg_phycap {
#define RTW89_C2HREG_PHYCAP_W0_BW GENMASK(31, 24)
#define RTW89_C2HREG_PHYCAP_W1_TX_NSS GENMASK(7, 0)
#define RTW89_C2HREG_PHYCAP_W1_PROT GENMASK(15, 8)
+#define RTW89_C2HREG_PHYCAP_W1_PROT_11N 1
+#define RTW89_C2HREG_PHYCAP_W1_PROT_11AC 2
+#define RTW89_C2HREG_PHYCAP_W1_PROT_11AX 3
+#define RTW89_C2HREG_PHYCAP_W1_PROT_11BE 4
#define RTW89_C2HREG_PHYCAP_W1_NIC GENMASK(23, 16)
#define RTW89_C2HREG_PHYCAP_W1_WL_FUNC GENMASK(31, 24)
#define RTW89_C2HREG_PHYCAP_W2_HW_TYPE GENMASK(7, 0)
@@ -120,6 +124,7 @@ struct rtw89_h2creg_sch_tx_en {
struct rtw89_mac_c2h_info {
u8 id;
u8 content_len;
+ u32 timeout;
union {
u32 c2hreg[RTW89_C2HREG_MAX];
struct rtw89_c2hreg_hdr hdr;
@@ -1517,6 +1522,153 @@ struct rtw89_h2c_cctlinfo_ud_g7 {
#define CCTLINFO_G7_W15_MGNT_CURR_RATE GENMASK(27, 16)
#define CCTLINFO_G7_W15_ALL GENMASK(27, 0)
+struct rtw89_h2c_cctlinfo_ud_be {
+ __le32 c0;
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+ __le32 w7;
+ __le32 w8;
+ __le32 w9;
+ __le32 w10;
+ __le32 w11;
+ __le32 w12;
+ __le32 w13;
+ __le32 w14;
+ __le32 w15;
+ __le32 m0;
+ __le32 m1;
+ __le32 m2;
+ __le32 m3;
+ __le32 m4;
+ __le32 m5;
+ __le32 m6;
+ __le32 m7;
+ __le32 m8;
+ __le32 m9;
+ __le32 m10;
+ __le32 m11;
+ __le32 m12;
+ __le32 m13;
+ __le32 m14;
+ __le32 m15;
+} __packed;
+
+#define BE_CCTL_INFO_C0_V1_MACID GENMASK(9, 0)
+#define BE_CCTL_INFO_C0_V1_OP BIT(10)
+
+#define BE_CCTL_INFO_W0_DATARATE GENMASK(11, 0)
+#define BE_CCTL_INFO_W0_DATA_GI_LTF GENMASK(14, 12)
+#define BE_CCTL_INFO_W0_TRYRATE BIT(15)
+#define BE_CCTL_INFO_W0_ARFR_CTRL GENMASK(17, 16)
+#define BE_CCTL_INFO_W0_DIS_HE1SS_STBC BIT(18)
+#define BE_CCTL_INFO_W0_ACQ_RPT_EN BIT(20)
+#define BE_CCTL_INFO_W0_MGQ_RPT_EN BIT(21)
+#define BE_CCTL_INFO_W0_ULQ_RPT_EN BIT(22)
+#define BE_CCTL_INFO_W0_TWTQ_RPT_EN BIT(23)
+#define BE_CCTL_INFO_W0_FORCE_TXOP BIT(24)
+#define BE_CCTL_INFO_W0_DISRTSFB BIT(25)
+#define BE_CCTL_INFO_W0_DISDATAFB BIT(26)
+#define BE_CCTL_INFO_W0_NSTR_EN BIT(27)
+#define BE_CCTL_INFO_W0_AMPDU_DENSITY GENMASK(31, 28)
+#define BE_CCTL_INFO_W0_ALL (GENMASK(31, 20) | GENMASK(18, 0))
+#define BE_CCTL_INFO_W1_DATA_RTY_LOWEST_RATE GENMASK(11, 0)
+#define BE_CCTL_INFO_W1_RTS_TXCNT_LMT GENMASK(15, 12)
+#define BE_CCTL_INFO_W1_RTSRATE GENMASK(27, 16)
+#define BE_CCTL_INFO_W1_RTS_RTY_LOWEST_RATE GENMASK(31, 28)
+#define BE_CCTL_INFO_W1_ALL GENMASK(31, 0)
+#define BE_CCTL_INFO_W2_DATA_TX_CNT_LMT GENMASK(5, 0)
+#define BE_CCTL_INFO_W2_DATA_TXCNT_LMT_SEL BIT(6)
+#define BE_CCTL_INFO_W2_MAX_AGG_NUM_SEL BIT(7)
+#define BE_CCTL_INFO_W2_RTS_EN BIT(8)
+#define BE_CCTL_INFO_W2_CTS2SELF_EN BIT(9)
+#define BE_CCTL_INFO_W2_CCA_RTS GENMASK(11, 10)
+#define BE_CCTL_INFO_W2_HW_RTS_EN BIT(12)
+#define BE_CCTL_INFO_W2_RTS_DROP_DATA_MODE GENMASK(14, 13)
+#define BE_CCTL_INFO_W2_PRELOAD_ENABLE BIT(15)
+#define BE_CCTL_INFO_W2_AMPDU_MAX_LEN GENMASK(26, 16)
+#define BE_CCTL_INFO_W2_UL_MU_DIS BIT(27)
+#define BE_CCTL_INFO_W2_AMPDU_MAX_TIME GENMASK(31, 28)
+#define BE_CCTL_INFO_W2_ALL GENMASK(31, 0)
+#define BE_CCTL_INFO_W3_MAX_AGG_NUM GENMASK(7, 0)
+#define BE_CCTL_INFO_W3_DATA_BW GENMASK(10, 8)
+#define BE_CCTL_INFO_W3_DATA_BW_ER BIT(11)
+#define BE_CCTL_INFO_W3_BA_BMAP GENMASK(14, 12)
+#define BE_CCTL_INFO_W3_VCS_STBC BIT(15)
+#define BE_CCTL_INFO_W3_VO_LFTIME_SEL GENMASK(18, 16)
+#define BE_CCTL_INFO_W3_VI_LFTIME_SEL GENMASK(21, 19)
+#define BE_CCTL_INFO_W3_BE_LFTIME_SEL GENMASK(24, 22)
+#define BE_CCTL_INFO_W3_BK_LFTIME_SEL GENMASK(27, 25)
+#define BE_CCTL_INFO_W3_AMPDU_TIME_SEL BIT(28)
+#define BE_CCTL_INFO_W3_AMPDU_LEN_SEL BIT(29)
+#define BE_CCTL_INFO_W3_RTS_TXCNT_LMT_SEL BIT(30)
+#define BE_CCTL_INFO_W3_LSIG_TXOP_EN BIT(31)
+#define BE_CCTL_INFO_W3_ALL GENMASK(31, 0)
+#define BE_CCTL_INFO_W4_MULTI_PORT_ID GENMASK(2, 0)
+#define BE_CCTL_INFO_W4_BYPASS_PUNC BIT(3)
+#define BE_CCTL_INFO_W4_MBSSID GENMASK(7, 4)
+#define BE_CCTL_INFO_W4_TID_DISABLE_V1 GENMASK(15, 8)
+#define BE_CCTL_INFO_W4_ACT_SUBCH_CBW GENMASK(31, 16)
+#define BE_CCTL_INFO_W4_ALL GENMASK(31, 0)
+#define BE_CCTL_INFO_W5_ADDR_CAM_INDEX_V1 GENMASK(9, 0)
+#define BE_CCTL_INFO_W5_SR_MCS_SU GENMASK(14, 10)
+#define BE_CCTL_INFO_W5_A_CTRL_BQR_V1 BIT(15)
+#define BE_CCTL_INFO_W5_A_CTRL_BSR_V1 BIT(16)
+#define BE_CCTL_INFO_W5_A_CTRL_CAS_V1 BIT(17)
+#define BE_CCTL_INFO_W5_DATA_ER_V1 BIT(18)
+#define BE_CCTL_INFO_W5_DATA_DCM_V1 BIT(19)
+#define BE_CCTL_INFO_W5_DATA_LDPC_V1 BIT(20)
+#define BE_CCTL_INFO_W5_DATA_STBC_V1 BIT(21)
+#define BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING0_V1 GENMASK(23, 22)
+#define BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING1_V1 GENMASK(25, 24)
+#define BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING2_V1 GENMASK(27, 26)
+#define BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING3_V1 GENMASK(29, 28)
+#define BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING4_V1 GENMASK(31, 30)
+#define BE_CCTL_INFO_W5_ALL GENMASK(31, 0)
+#define BE_CCTL_INFO_W6_AID12_PAID GENMASK(11, 0)
+#define BE_CCTL_INFO_W6_RESP_REF_RATE GENMASK(23, 12)
+#define BE_CCTL_INFO_W6_ULDL BIT(31)
+#define BE_CCTL_INFO_W6_ALL (BIT(31) | GENMASK(23, 0))
+#define BE_CCTL_INFO_W7_NC GENMASK(2, 0)
+#define BE_CCTL_INFO_W7_NR GENMASK(5, 3)
+#define BE_CCTL_INFO_W7_NG GENMASK(7, 6)
+#define BE_CCTL_INFO_W7_CB GENMASK(9, 8)
+#define BE_CCTL_INFO_W7_CS GENMASK(11, 10)
+#define BE_CCTL_INFO_W7_CSI_STBC_EN BIT(13)
+#define BE_CCTL_INFO_W7_CSI_LDPC_EN BIT(14)
+#define BE_CCTL_INFO_W7_CSI_PARA_EN BIT(15)
+#define BE_CCTL_INFO_W7_CSI_FIX_RATE GENMASK(27, 16)
+#define BE_CCTL_INFO_W7_CSI_BW GENMASK(31, 29)
+#define BE_CCTL_INFO_W7_ALL GENMASK(31, 0)
+#define BE_CCTL_INFO_W8_ALL_ACK_SUPPORT_V1 BIT(0)
+#define BE_CCTL_INFO_W8_BSR_QUEUE_SIZE_FORMAT_V1 BIT(1)
+#define BE_CCTL_INFO_W8_BSR_OM_UPD_EN_V1 BIT(2)
+#define BE_CCTL_INFO_W8_MACID_FWD_IDC_V1 BIT(3)
+#define BE_CCTL_INFO_W8_AZ_SEC_EN BIT(4)
+#define BE_CCTL_INFO_W8_BF_SEC_EN BIT(5)
+#define BE_CCTL_INFO_W8_FIX_UL_ADDRCAM_IDX_V1 BIT(6)
+#define BE_CCTL_INFO_W8_CTRL_CNT_VLD_V1 BIT(7)
+#define BE_CCTL_INFO_W8_CTRL_CNT_V1 GENMASK(11, 8)
+#define BE_CCTL_INFO_W8_RESP_SEC_TYPE GENMASK(15, 12)
+#define BE_CCTL_INFO_W8_ALL GENMASK(15, 0)
+#define BE_CCTL_INFO_W9_EMLSR_TRANS_DLY GENMASK(2, 0)
+#define BE_CCTL_INFO_W9_ALL GENMASK(2, 0)
+#define BE_CCTL_INFO_W10_SW_EHT_NLTF GENMASK(1, 0)
+#define BE_CCTL_INFO_W10_TB_MLO_MODE BIT(2)
+#define BE_CCTL_INFO_W10_ALL GENMASK(2, 0)
+#define BE_CCTL_INFO_W14_VO_CURR_RATE GENMASK(11, 0)
+#define BE_CCTL_INFO_W14_VI_CURR_RATE GENMASK(23, 12)
+#define BE_CCTL_INFO_W14_BE_CURR_RATE_L GENMASK(31, 24)
+#define BE_CCTL_INFO_W14_ALL GENMASK(31, 0)
+#define BE_CCTL_INFO_W15_BE_CURR_RATE_H GENMASK(3, 0)
+#define BE_CCTL_INFO_W15_BK_CURR_RATE GENMASK(15, 4)
+#define BE_CCTL_INFO_W15_MGNT_CURR_RATE GENMASK(27, 16)
+#define BE_CCTL_INFO_W15_ALL GENMASK(27, 0)
+
struct rtw89_h2c_bcn_upd {
__le32 w0;
__le32 w1;
@@ -1865,6 +2017,66 @@ struct rtw89_h2c_lps_ml_cmn_info {
u8 dup_bcn_ofst[RTW89_PHY_NUM];
} __packed;
+#define BB_RX_GAIN_TB_RSSI_COMP_NUM 3
+#define BB_RX_GAIN_CCK_RPL_BIAS_COMP_NUM 2
+#define BB_GT2_GS_IDX_NUM 11
+#define BB_GT2_WB_GIDX_ELNA_NUM 16
+#define BB_GT2_G_ELNA_NUM 2
+
+enum rtw89_bb_link_rx_gain_table_type {
+ RTW89_BB_PS_LINK_RX_GAIN_TAB_BCN_PATH_A = 0x00,
+ RTW89_BB_PS_LINK_RX_GAIN_TAB_BCN_PATH_B = 0x01,
+ RTW89_BB_PS_LINK_RX_GAIN_TAB_NOR_PATH_A = 0x02,
+ RTW89_BB_PS_LINK_RX_GAIN_TAB_NOR_PATH_B = 0x03,
+ RTW89_BB_PS_LINK_RX_GAIN_TAB_MAX,
+};
+
+enum rtw89_bb_ps_link_buf_id {
+ RTW89_BB_PS_LINK_BUF_0 = 0x00,
+ RTW89_BB_PS_LINK_BUF_1 = 0x01,
+ RTW89_BB_PS_LINK_BUF_2 = 0x02,
+ RTW89_BB_PS_LINK_BUF_MAX,
+};
+
+struct rtw89_bb_link_info_rx_gain {
+ u8 gain_ofst[RTW89_BB_PS_LINK_RX_GAIN_TAB_MAX];
+ __le16 rpl_bias_comp[RTW89_BB_PS_LINK_RX_GAIN_TAB_MAX];
+ u8 tb_rssi_m_bias_comp[RTW89_BB_PS_LINK_RX_GAIN_TAB_MAX]
+ [BB_RX_GAIN_TB_RSSI_COMP_NUM];
+ u8 cck_gain_ofst[RTW89_BB_PS_LINK_RX_GAIN_TAB_MAX];
+ u8 cck_rpl_bias_comp[RTW89_BB_PS_LINK_RX_GAIN_TAB_MAX]
+ [BB_RX_GAIN_CCK_RPL_BIAS_COMP_NUM];
+ u8 gain_err_lna[RTW89_BB_PS_LINK_RX_GAIN_TAB_MAX][LNA_GAIN_NUM];
+ __le16 gain_err_tia[RTW89_BB_PS_LINK_RX_GAIN_TAB_MAX][TIA_GAIN_NUM];
+ u8 op1db_lna[RTW89_BB_PS_LINK_RX_GAIN_TAB_MAX][LNA_GAIN_NUM];
+ u8 op1db_tia[RTW89_BB_PS_LINK_RX_GAIN_TAB_MAX][TIA_LNA_OP1DB_NUM];
+ struct {
+ u8 _20M[RTW89_BW20_SC_20M];
+ u8 _40M[RTW89_BW20_SC_40M];
+ u8 _80M[RTW89_BW20_SC_80M];
+ u8 _160M[RTW89_BW20_SC_160M];
+ } rpl_bias_comp_bw[RTW89_BB_PS_LINK_RX_GAIN_TAB_MAX];
+ u8 wb_gs[RTW89_BB_PS_LINK_RX_GAIN_TAB_MAX][BB_GT2_GS_IDX_NUM];
+ u8 bypass_lna[RTW89_BB_PS_LINK_RX_GAIN_TAB_MAX][LNA_GAIN_NUM];
+ u8 wb_lna_tia[RTW89_BB_PS_LINK_RX_GAIN_TAB_MAX][BB_GT2_WB_GIDX_ELNA_NUM];
+ u8 wb_g_elna[RTW89_BB_PS_LINK_RX_GAIN_TAB_MAX][BB_GT2_G_ELNA_NUM];
+} __packed;
+
+struct rtw89_h2c_lps_ml_cmn_info_v1 {
+ u8 fmt_id;
+ u8 rfe_type;
+ u8 rssi_main;
+ u8 rsvd0;
+ __le32 mlo_dbcc_mode;
+ u8 link_id[RTW89_BB_PS_LINK_BUF_MAX];
+ u8 central_ch[RTW89_BB_PS_LINK_BUF_MAX];
+ u8 pri_ch[RTW89_BB_PS_LINK_BUF_MAX];
+ u8 bw[RTW89_BB_PS_LINK_BUF_MAX];
+ u8 band[RTW89_BB_PS_LINK_BUF_MAX];
+ u8 dup_bcn_ofst[RTW89_BB_PS_LINK_BUF_MAX];
+ struct rtw89_bb_link_info_rx_gain rx_gain[RTW89_BB_PS_LINK_BUF_MAX];
+} __packed;
+
struct rtw89_h2c_trig_cpu_except {
__le32 w0;
} __packed;
@@ -2052,70 +2264,55 @@ static inline void RTW89_SET_WOW_WAKEUP_CTRL_MAC_ID(void *h2c, u32 val)
le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 24));
}
-static inline void RTW89_SET_WOW_CAM_UPD_R_W(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, BIT(0));
-}
-
-static inline void RTW89_SET_WOW_CAM_UPD_IDX(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 1));
-}
-
-static inline void RTW89_SET_WOW_CAM_UPD_WKFM1(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c + 1, val, GENMASK(31, 0));
-}
-
-static inline void RTW89_SET_WOW_CAM_UPD_WKFM2(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c + 2, val, GENMASK(31, 0));
-}
-
-static inline void RTW89_SET_WOW_CAM_UPD_WKFM3(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c + 3, val, GENMASK(31, 0));
-}
-
-static inline void RTW89_SET_WOW_CAM_UPD_WKFM4(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c + 4, val, GENMASK(31, 0));
-}
-
-static inline void RTW89_SET_WOW_CAM_UPD_CRC(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c + 5, val, GENMASK(15, 0));
-}
-
-static inline void RTW89_SET_WOW_CAM_UPD_NEGATIVE_PATTERN_MATCH(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c + 5, val, BIT(22));
-}
-
-static inline void RTW89_SET_WOW_CAM_UPD_SKIP_MAC_HDR(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c + 5, val, BIT(23));
-}
-
-static inline void RTW89_SET_WOW_CAM_UPD_UC(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c + 5, val, BIT(24));
-}
-
-static inline void RTW89_SET_WOW_CAM_UPD_MC(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c + 5, val, BIT(25));
-}
+struct rtw89_h2c_wow_cam_update {
+ __le32 w0;
+ __le32 wkfm0;
+ __le32 wkfm1;
+ __le32 wkfm2;
+ __le32 wkfm3;
+ __le32 w5;
+} __packed;
-static inline void RTW89_SET_WOW_CAM_UPD_BC(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c + 5, val, BIT(26));
-}
+#define RTW89_H2C_WOW_CAM_UPD_W0_R_W BIT(0)
+#define RTW89_H2C_WOW_CAM_UPD_W0_IDX GENMASK(7, 1)
+#define RTW89_H2C_WOW_CAM_UPD_WKFM0 GENMASK(31, 0)
+#define RTW89_H2C_WOW_CAM_UPD_WKFM1 GENMASK(31, 0)
+#define RTW89_H2C_WOW_CAM_UPD_WKFM2 GENMASK(31, 0)
+#define RTW89_H2C_WOW_CAM_UPD_WKFM3 GENMASK(31, 0)
+#define RTW89_H2C_WOW_CAM_UPD_W5_CRC GENMASK(15, 0)
+#define RTW89_H2C_WOW_CAM_UPD_W5_NEGATIVE_PATTERN_MATCH BIT(22)
+#define RTW89_H2C_WOW_CAM_UPD_W5_SKIP_MAC_HDR BIT(23)
+#define RTW89_H2C_WOW_CAM_UPD_W5_UC BIT(24)
+#define RTW89_H2C_WOW_CAM_UPD_W5_MC BIT(25)
+#define RTW89_H2C_WOW_CAM_UPD_W5_BC BIT(26)
+#define RTW89_H2C_WOW_CAM_UPD_W5_VALID BIT(31)
+
+struct rtw89_h2c_wow_payload_cam_update {
+ __le32 w0;
+ __le32 wkfm0;
+ __le32 wkfm1;
+ __le32 wkfm2;
+ __le32 wkfm3;
+ __le32 w5;
+ __le32 w6;
+ __le32 w7;
+ __le32 w8;
+} __packed;
-static inline void RTW89_SET_WOW_CAM_UPD_VALID(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c + 5, val, BIT(31));
-}
+#define RTW89_H2C_WOW_PLD_CAM_UPD_W0_R_W BIT(0)
+#define RTW89_H2C_WOW_PLD_CAM_UPD_W0_IDX GENMASK(7, 1)
+#define RTW89_H2C_WOW_PLD_CAM_UPD_WKFM0 GENMASK(31, 0)
+#define RTW89_H2C_WOW_PLD_CAM_UPD_WKFM1 GENMASK(31, 0)
+#define RTW89_H2C_WOW_PLD_CAM_UPD_WKFM2 GENMASK(31, 0)
+#define RTW89_H2C_WOW_PLD_CAM_UPD_WKFM3 GENMASK(31, 0)
+#define RTW89_H2C_WOW_PLD_CAM_UPD_W5_UC BIT(0)
+#define RTW89_H2C_WOW_PLD_CAM_UPD_W5_MC BIT(1)
+#define RTW89_H2C_WOW_PLD_CAM_UPD_W5_BC BIT(2)
+#define RTW89_H2C_WOW_PLD_CAM_UPD_W5_SKIP_MAC_HDR BIT(7)
+#define RTW89_H2C_WOW_PLD_CAM_UPD_W6_CRC GENMASK(15, 0)
+#define RTW89_H2C_WOW_PLD_CAM_UPD_W7_NEGATIVE_PATTERN_MATCH BIT(0)
+#define RTW89_H2C_WOW_PLD_CAM_UPD_W8_VALID BIT(0)
+#define RTW89_H2C_WOW_PLD_CAM_UPD_W8_WOW_PTR BIT(1)
struct rtw89_h2c_wow_gtk_ofld {
__le32 w0;
@@ -2826,6 +3023,7 @@ struct rtw89_h2c_scanofld_be_macc_role {
__le32 w0;
} __packed;
+#define RTW89_MAX_OP_NUM_BE 2
#define RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_BAND GENMASK(1, 0)
#define RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_PORT GENMASK(4, 2)
#define RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_MACID GENMASK(23, 8)
@@ -3691,6 +3889,26 @@ struct rtw89_c2h_ra_rpt {
#define RTW89_C2H_RA_RPT_W3_MD_SEL_B2 BIT(15)
#define RTW89_C2H_RA_RPT_W3_BW_B2 BIT(16)
+struct rtw89_c2h_lps_rpt {
+ struct rtw89_c2h_hdr hdr;
+ u8 type;
+ u8 cnt_bbcr;
+ u8 cnt_bbmcucr;
+ u8 cnt_rfcr;
+ u8 data[];
+ /*
+ * The layout of data:
+ * u8 info[][4], size = total_len - size of below fields
+ * __le16 bbcr_addr[], size = cnt_bbcr
+ * __le32 bbcr_data[], size = cnt_bbcr
+ * __le16 bbmcucr_addr[], size = cnt_bbmcucr
+ * __le32 bbmcucr_data[], size = cnt_bbmcucr
+ * __le16 rfcr_addr[], size = cnt_rfcr
+ * __le32 rfcr_data_a[], size = cnt_rfcr
+ * __le32 rfcr_data_b[], size = cnt_rfcr
+ */
+} __packed;
+
struct rtw89_c2h_fw_scan_rpt {
struct rtw89_c2h_hdr hdr;
u8 phy_idx;
@@ -4044,6 +4262,7 @@ enum rtw89_fw_element_id {
RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_6GHZ = 26,
RTW89_FW_ELEMENT_ID_AFE_PWR_SEQ = 27,
RTW89_FW_ELEMENT_ID_DIAG_MAC = 28,
+ RTW89_FW_ELEMENT_ID_TX_COMP = 29,
RTW89_FW_ELEMENT_ID_NUM,
};
@@ -4177,7 +4396,8 @@ struct rtw89_fw_element_hdr {
__le32 id; /* enum rtw89_fw_element_id */
__le32 size; /* exclude header size */
u8 ver[4];
- __le32 rsvd0;
+ __le16 aid; /* should match rtw89_hal::aid */
+ __le16 rsvd0;
__le32 rsvd1;
__le32 rsvd2;
union {
@@ -4307,6 +4527,7 @@ enum rtw89_wow_h2c_func {
H2C_FUNC_WAKEUP_CTRL = 0x8,
H2C_FUNC_WOW_CAM_UPD = 0xC,
H2C_FUNC_AOAC_REPORT_REQ = 0xD,
+ H2C_FUNC_WOW_PLD_CAM_UPD = 0x12,
NUM_OF_RTW89_WOW_H2C_FUNC,
};
@@ -4347,6 +4568,7 @@ enum rtw89_ps_h2c_func {
#define H2C_FUNC_MAC_CCTLINFO_UD_V1 0xa
#define H2C_FUNC_MAC_DCTLINFO_UD_V2 0xc
#define H2C_FUNC_MAC_BCN_UPD_BE 0xd
+#define H2C_FUNC_MAC_DCTLINFO_UD_V3 0x10
#define H2C_FUNC_MAC_CCTLINFO_UD_G7 0x11
/* CLASS 6 - Address CAM */
@@ -4483,6 +4705,7 @@ enum rtw89_mrc_h2c_func {
#define H2C_CL_OUTSRC_RF_REG_B 0x9
#define H2C_CL_OUTSRC_RF_FW_NOTIFY 0xa
#define H2C_FUNC_OUTSRC_RF_GET_MCCCH 0x2
+#define H2C_FUNC_OUTSRC_RF_MCC_INFO 0xf
#define H2C_FUNC_OUTSRC_RF_PS_INFO 0x10
#define H2C_CL_OUTSRC_RF_FW_RFK 0xb
@@ -4495,6 +4718,8 @@ enum rtw89_rfk_offload_h2c_func {
H2C_FUNC_RFK_RXDCK_OFFLOAD = 0x6,
H2C_FUNC_RFK_PRE_NOTIFY = 0x8,
H2C_FUNC_RFK_TAS_OFFLOAD = 0x9,
+ H2C_FUNC_RFK_TXIQK_OFFOAD = 0xc,
+ H2C_FUNC_RFK_CIM3K_OFFOAD = 0xe,
};
struct rtw89_fw_h2c_rf_get_mccch {
@@ -4586,11 +4811,38 @@ struct rtw89_fw_h2c_rfk_pre_info_v1 {
__le32 mlo_1_1;
} __packed;
-struct rtw89_fw_h2c_rfk_pre_info {
+struct rtw89_fw_h2c_rfk_pre_info_v2 {
struct rtw89_fw_h2c_rfk_pre_info_v1 base_v1;
__le32 cur_bandwidth[NUM_OF_RTW89_FW_RFK_PATH];
} __packed;
+struct rtw89_fw_h2c_rfk_pre_info {
+ __le32 mlo_mode;
+ __le32 phy_idx;
+ __le32 mlo_1_1;
+} __packed;
+
+struct rtw89_fw_h2c_rfk_pre_info_mcc_v0 {
+ __le32 tbl_18[NUM_OF_RTW89_FW_RFK_TBL][NUM_OF_RTW89_FW_RFK_PATH];
+ __le32 cur_18[NUM_OF_RTW89_FW_RFK_PATH];
+ __le32 mlo_mode;
+} __packed;
+
+struct rtw89_fw_h2c_rfk_pre_info_mcc_v1 {
+ __le32 tbl_18[NUM_OF_RTW89_FW_RFK_TBL];
+ __le32 cur_18[NUM_OF_RTW89_FW_RFK_PATH];
+ __le32 mlo_mode;
+ __le32 mlo_1_1;
+ u8 phy_idx;
+ u8 tbl_idx;
+} __packed;
+
+struct rtw89_fw_h2c_rfk_pre_info_mcc {
+ struct rtw89_fw_h2c_rfk_pre_info_mcc_v1 base;
+ u8 rsvd[2];
+ __le32 aid;
+} __packed;
+
struct rtw89_h2c_rf_tssi {
__le16 len;
u8 phy;
@@ -4660,9 +4912,9 @@ struct rtw89_h2c_rf_txgapk {
} __packed;
struct rtw89_h2c_rf_dack {
- __le32 len;
- __le32 phy;
- __le32 type;
+ u8 len;
+ u8 phy;
+ u8 type;
} __packed;
struct rtw89_h2c_rf_rxdck_v0 {
@@ -4685,6 +4937,30 @@ struct rtw89_h2c_rf_rxdck {
u8 is_chl_k;
} __packed;
+struct rtw89_h2c_rf_txiqk {
+ u8 len;
+ u8 phy;
+ u8 txiqk_enable;
+ u8 is_wb_txiqk;
+ u8 kpath;
+ u8 cur_band;
+ u8 cur_bw;
+ u8 cur_ch;
+ u8 txiqk_dbg_en;
+} __packed;
+
+struct rtw89_h2c_rf_cim3k {
+ u8 len;
+ u8 phy;
+ u8 su_cim3k_enable[2];
+ u8 ru_cim3k_enable[2];
+ u8 kpath;
+ u8 cur_band;
+ u8 cur_bw;
+ u8 cur_ch;
+ u8 cim3k_dbg_en;
+} __packed;
+
enum rtw89_rf_log_type {
RTW89_RF_RUN_LOG = 0,
RTW89_RF_RPT_LOG = 1,
@@ -4729,12 +5005,16 @@ struct rtw89_c2h_rf_iqk_rpt_log {
u8 rsvd;
__le32 reload_cnt;
__le32 iqk_fail_cnt;
+ __le32 rf_0x18[2];
__le32 lok_idac[2];
__le32 lok_vbuf[2];
- __le32 rftxgain[2][4];
- __le32 rfrxgain[2][4];
- __le32 tx_xym[2][4];
- __le32 rx_xym[2][4];
+ __le32 rftxgain[2][6];
+ __le32 rfrxgain[2][6];
+ __le32 tx_xym[2][6];
+ __le32 rx_xym[2][6];
+ __le32 rx_wb_xym[2][32];
+ bool is_radar;
+ u8 rsvd1[3];
} __packed;
struct rtw89_c2h_rf_dpk_rpt_log {
@@ -4777,6 +5057,7 @@ struct rtw89_c2h_rf_dack_rpt_log {
u8 dack_fail;
u8 wbdck_d[2];
u8 rck_d;
+ u8 adgaink_ex_d;
} __packed;
struct rtw89_c2h_rf_rxdck_rpt_log {
@@ -4803,7 +5084,57 @@ struct rtw89_c2h_rf_txgapk_rpt_log {
u8 is_txgapk_ok;
u8 chk_id;
u8 ver;
- u8 rsv1;
+ u8 d_bnd_ok;
+ __le32 stage[2];
+ __le16 failcode[2];
+ u8 rsvd[4];
+} __packed;
+
+struct rtw89_c2h_rf_txiqk_rpt_log {
+ u8 fw_txiqk_ver;
+ u8 iqk_band[2];
+ u8 iqk_ch[2];
+ u8 iqk_bw[2];
+ bool tx_iqk_fail[2];
+ bool is_iqk_init;
+ bool txiqk_en;
+ bool lok_en;
+ bool lok_fail[2];
+ u8 rsvd[2];
+ __le32 iqk_times;
+ bool txiqk_nctldone[2];
+ u8 rsvd2[2];
+ __le32 txgain[2][6];
+ __le32 tx_iqc[2][6];
+ __le32 tx_xym[2][6][14];
+ __le32 kidx[2];
+} __packed;
+
+struct rtw89_c2h_rf_cim3k_rpt_log {
+ u8 cim3k_band[2];
+ u8 cim3k_ch[2];
+ u8 cim3k_bw[2];
+ u8 su_path_ok[2];
+ u8 ru_path_ok[2];
+ u8 txagc_cim3k[2];
+ u8 ther_cim3k[2];
+ u8 cim3k_gs[2];
+ __le16 cim3k_pwsf[2];
+ bool cim3k_nctldone[2];
+ u8 rsvd[2];
+ __le32 cim3k_rxiqc[2];
+ __le32 cim3k_su_coef[2][3];
+ __le16 dc_i[2];
+ __le16 dc_q[2];
+ u8 corr_val[2];
+ u8 corr_idx[2];
+ u8 rxbb_ov[2];
+ u8 cim3k_txiqc[2];
+ u8 kidx[2];
+ u8 fw_cim3k_ver;
+ bool su_cim3k_en[2];
+ bool ru_cim3k_en[2];
+ u8 rsvd1;
} __packed;
struct rtw89_c2h_rfk_report {
@@ -4858,25 +5189,42 @@ int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
int rtw89_fw_h2c_default_cmac_tbl_g7(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link);
+int rtw89_fw_h2c_default_cmac_tbl_be(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ struct rtw89_sta_link *rtwsta_link);
int rtw89_fw_h2c_default_dmac_tbl_v2(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link);
+int rtw89_fw_h2c_default_dmac_tbl_v3(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ struct rtw89_sta_link *rtwsta_link);
int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link);
int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link);
+int rtw89_fw_h2c_assoc_cmac_tbl_be(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ struct rtw89_sta_link *rtwsta_link);
int rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link);
+int rtw89_fw_h2c_ampdu_cmac_tbl_be(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ struct rtw89_sta_link *rtwsta_link);
int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_sta_link *rtwsta_link);
int rtw89_fw_h2c_txtime_cmac_tbl_g7(struct rtw89_dev *rtwdev,
struct rtw89_sta_link *rtwsta_link);
+int rtw89_fw_h2c_txtime_cmac_tbl_be(struct rtw89_dev *rtwdev,
+ struct rtw89_sta_link *rtwsta_link);
int rtw89_fw_h2c_punctured_cmac_tbl_g7(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
u16 punctured);
+int rtw89_fw_h2c_punctured_cmac_tbl_be(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ u16 punctured);
int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_sta_link *rtwsta_link);
int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
@@ -4895,9 +5243,13 @@ int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev,
int rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link);
+int rtw89_fw_h2c_dctl_sec_cam_v3(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ struct rtw89_sta_link *rtwsta_link);
void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h);
void rtw89_fw_c2h_work(struct wiphy *wiphy, struct wiphy_work *work);
void rtw89_fw_c2h_purge_obsoleted_scan_events(struct rtw89_dev *rtwdev);
+void rtw89_fw_c2h_dummy_handler(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len);
int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link,
@@ -4948,6 +5300,7 @@ int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_rf_ps_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx);
+int rtw89_fw_h2c_rf_pre_ntfy_mcc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
int rtw89_fw_h2c_mcc_dig(struct rtw89_dev *rtwdev,
enum rtw89_chanctx_idx chanctx_idx,
u8 mcc_role_idx, u8 pd_val, bool en);
@@ -4964,6 +5317,10 @@ int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
const struct rtw89_chan *chan, bool is_chl_k);
int rtw89_fw_h2c_rf_tas_trigger(struct rtw89_dev *rtwdev, bool enable);
+int rtw89_fw_h2c_rf_txiqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan);
+int rtw89_fw_h2c_rf_cim3k(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan);
int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
bool rack, bool dack);
@@ -4994,6 +5351,11 @@ int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
int rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif);
+void rtw89_bb_lps_cmn_info_rx_gain_fill(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_link_info_rx_gain *h2c_gain,
+ const struct rtw89_chan *chan, u8 phy_idx);
+int rtw89_fw_h2c_lps_ml_cmn_info_v1(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif);
int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
bool enable);
struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len);
@@ -5054,8 +5416,10 @@ int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtw
bool enable);
int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link, bool enable);
-int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev,
- struct rtw89_wow_cam_info *cam_info);
+int rtw89_fw_h2c_wow_cam_update(struct rtw89_dev *rtwdev,
+ struct rtw89_wow_cam_info *cam_info);
+int rtw89_fw_h2c_wow_cam_update_v1(struct rtw89_dev *rtwdev,
+ struct rtw89_wow_cam_info *cam_info);
int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
bool enable);
@@ -5219,6 +5583,15 @@ int rtw89_chip_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
return 0;
}
+static inline
+int rtw89_chip_h2c_wow_cam_update(struct rtw89_dev *rtwdev,
+ struct rtw89_wow_cam_info *cam_info)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return chip->ops->h2c_wow_cam_update(rtwdev, cam_info);
+}
+
/* Must consider compatibility; don't insert new in the mid.
* Fill each field's default value in rtw89_regd_entcpy().
*/
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index d78fbe73e365..8472f1a63951 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -848,6 +848,7 @@ EXPORT_SYMBOL(rtw89_mac_get_err_status);
int rtw89_mac_set_err_status(struct rtw89_dev *rtwdev, u32 err)
{
struct rtw89_ser *ser = &rtwdev->ser;
+ bool ser_l1_hdl = false;
u32 halt;
int ret = 0;
@@ -856,6 +857,12 @@ int rtw89_mac_set_err_status(struct rtw89_dev *rtwdev, u32 err)
return -EINVAL;
}
+ if (err == MAC_AX_ERR_L1_DISABLE_EN || err == MAC_AX_ERR_L1_RCVY_EN)
+ ser_l1_hdl = true;
+
+ if (RTW89_CHK_FW_FEATURE(SER_L1_BY_EVENT, &rtwdev->fw) && ser_l1_hdl)
+ goto set;
+
ret = read_poll_timeout(rtw89_read32, halt, (halt == 0x0), 1000,
100000, false, rtwdev, R_AX_HALT_H2C_CTRL);
if (ret) {
@@ -863,10 +870,10 @@ int rtw89_mac_set_err_status(struct rtw89_dev *rtwdev, u32 err)
return -EFAULT;
}
+set:
rtw89_write32(rtwdev, R_AX_HALT_H2C, err);
- if (ser->prehandle_l1 &&
- (err == MAC_AX_ERR_L1_DISABLE_EN || err == MAC_AX_ERR_L1_RCVY_EN))
+ if (ser->prehandle_l1 && ser_l1_hdl)
return 0;
rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, B_AX_HALT_H2C_TRIGGER);
@@ -1476,15 +1483,37 @@ static void rtw89_mac_power_switch_boot_mode(struct rtw89_dev *rtwdev)
rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST);
}
+static int rtw89_mac_pwr_off_func_for_unplugged(struct rtw89_dev *rtwdev)
+{
+ /*
+ * Avoid accessing IO for unplugged power-off to prevent warnings,
+ * especially XTAL SI.
+ */
+ return 0;
+}
+
+static void rtw89_mac_update_scoreboard(struct rtw89_dev *rtwdev, u8 val)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u32 reg;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(chip->btc_sb.n); i++) {
+ reg = chip->btc_sb.n[i].cfg;
+ if (!reg)
+ continue;
+
+ rtw89_write8(rtwdev, reg + 3, val);
+ }
+}
+
static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on)
{
-#define PWR_ACT 1
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_pwr_cfg * const *cfg_seq;
int (*cfg_func)(struct rtw89_dev *rtwdev);
int ret;
- u8 val;
rtw89_mac_power_switch_boot_mode(rtwdev);
@@ -1492,17 +1521,22 @@ static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on)
cfg_seq = chip->pwr_on_seq;
cfg_func = chip->ops->pwr_on_func;
} else {
- cfg_seq = chip->pwr_off_seq;
- cfg_func = chip->ops->pwr_off_func;
+ if (test_bit(RTW89_FLAG_UNPLUGGED, rtwdev->flags)) {
+ cfg_seq = NULL;
+ cfg_func = rtw89_mac_pwr_off_func_for_unplugged;
+ } else {
+ cfg_seq = chip->pwr_off_seq;
+ cfg_func = chip->ops->pwr_off_func;
+ }
}
if (test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags))
__rtw89_leave_ps_mode(rtwdev);
- val = rtw89_read32_mask(rtwdev, R_AX_IC_PWR_STATE, B_AX_WLMAC_PWR_STE_MASK);
- if (on && val == PWR_ACT) {
- rtw89_err(rtwdev, "MAC has already powered on\n");
- return -EBUSY;
+ if (on) {
+ ret = mac->reset_pwr_state(rtwdev);
+ if (ret)
+ return ret;
}
ret = cfg_func ? cfg_func(rtwdev) : rtw89_mac_pwr_seq(rtwdev, cfg_seq);
@@ -1510,26 +1544,32 @@ static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on)
return ret;
if (on) {
- if (!test_bit(RTW89_FLAG_PROBE_DONE, rtwdev->flags))
+ if (!test_bit(RTW89_FLAG_PROBE_DONE, rtwdev->flags)) {
+ rtw89_mac_efuse_read_ecv(rtwdev);
mac->efuse_read_fw_secure(rtwdev);
+ }
set_bit(RTW89_FLAG_POWERON, rtwdev->flags);
set_bit(RTW89_FLAG_DMAC_FUNC, rtwdev->flags);
set_bit(RTW89_FLAG_CMAC0_FUNC, rtwdev->flags);
- rtw89_write8(rtwdev, R_AX_SCOREBOARD + 3, MAC_AX_NOTIFY_TP_MAJOR);
+
+ rtw89_mac_update_scoreboard(rtwdev, MAC_AX_NOTIFY_TP_MAJOR);
+ rtw89_mac_clr_aon_intr(rtwdev);
} else {
clear_bit(RTW89_FLAG_POWERON, rtwdev->flags);
clear_bit(RTW89_FLAG_DMAC_FUNC, rtwdev->flags);
clear_bit(RTW89_FLAG_CMAC0_FUNC, rtwdev->flags);
clear_bit(RTW89_FLAG_CMAC1_FUNC, rtwdev->flags);
+ clear_bit(RTW89_FLAG_CMAC0_PWR, rtwdev->flags);
+ clear_bit(RTW89_FLAG_CMAC1_PWR, rtwdev->flags);
clear_bit(RTW89_FLAG_FW_RDY, rtwdev->flags);
- rtw89_write8(rtwdev, R_AX_SCOREBOARD + 3, MAC_AX_NOTIFY_PWR_MAJOR);
+
+ rtw89_mac_update_scoreboard(rtwdev, MAC_AX_NOTIFY_PWR_MAJOR);
rtw89_set_entity_state(rtwdev, RTW89_PHY_0, false);
rtw89_set_entity_state(rtwdev, RTW89_PHY_1, false);
}
return 0;
-#undef PWR_ACT
}
int rtw89_mac_pwr_on(struct rtw89_dev *rtwdev)
@@ -1664,8 +1704,8 @@ static int sys_init_ax(struct rtw89_dev *rtwdev)
const struct rtw89_mac_size_set rtw89_mac_size = {
.hfc_preccfg_pcie = {2, 40, 0, 0, 1, 0, 0, 0},
- .hfc_prec_cfg_c0 = {2, 32, 0, 0, 0, 0, 0, 0},
- .hfc_prec_cfg_c2 = {0, 256, 0, 0, 0, 0, 0, 0},
+ .hfc_prec_cfg_c0 = {2, 32, 0, 0, 0, 0, 0, 0, 2, 32, 0, 0},
+ .hfc_prec_cfg_c2 = {0, 256, 0, 0, 0, 0, 0, 0, 0, 256, 0, 0},
/* PCIE 64 */
.wde_size0 = {RTW89_WDE_PG_64, 4095, 1,},
.wde_size0_v1 = {RTW89_WDE_PG_64, 3328, 0, 0,},
@@ -1680,10 +1720,12 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.wde_size7 = {RTW89_WDE_PG_64, 510, 2,},
/* DLFW */
.wde_size9 = {RTW89_WDE_PG_64, 0, 1024,},
+ .wde_size16_v1 = {RTW89_WDE_PG_64, 639, 1, 0,},
/* 8852C USB3.0 */
.wde_size17 = {RTW89_WDE_PG_64, 354, 30,},
/* 8852C DLFW */
.wde_size18 = {RTW89_WDE_PG_64, 0, 2048,},
+ .wde_size18_v1 = {RTW89_WDE_PG_64, 0, 640, 0,},
/* 8852C PCIE SCC */
.wde_size19 = {RTW89_WDE_PG_64, 3328, 0,},
.wde_size23 = {RTW89_WDE_PG_64, 1022, 2,},
@@ -1710,6 +1752,8 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.ple_size18 = {RTW89_PLE_PG_128, 2544, 16,},
/* 8852C PCIE SCC */
.ple_size19 = {RTW89_PLE_PG_128, 1904, 16,},
+ .ple_size20_v1 = {RTW89_PLE_PG_128, 2554, 182, 40960,},
+ .ple_size22_v1 = {RTW89_PLE_PG_128, 2736, 0, 40960,},
/* 8852B USB2.0 SCC */
.ple_size32 = {RTW89_PLE_PG_128, 620, 20,},
/* 8852B USB3.0 SCC */
@@ -1721,6 +1765,7 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.wde_qt0_v1 = {3302, 6, 0, 20,},
/* 8852A USB */
.wde_qt1 = {512, 196, 0, 60,},
+ .wde_qt3 = {0, 0, 0, 0,},
/* DLFW */
.wde_qt4 = {0, 0, 0, 0,},
/* PCIE 64 */
@@ -1733,6 +1778,7 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.wde_qt17 = {0, 0, 0, 0,},
/* 8852C PCIE SCC */
.wde_qt18 = {3228, 60, 0, 40,},
+ .wde_qt19_v1 = {613, 6, 0, 20,},
.wde_qt23 = {958, 48, 0, 16,},
/* 8852B USB2.0/USB3.0 SCC */
.wde_qt25 = {152, 2, 0, 8,},
@@ -1744,6 +1790,7 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.ple_qt4 = {264, 0, 16, 20, 26, 13, 356, 0, 32, 40, 8,},
/* PCIE SCC */
.ple_qt5 = {264, 0, 32, 20, 64, 13, 1101, 0, 64, 128, 120,},
+ .ple_qt5_v2 = {0, 0, 32, 256, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0,},
.ple_qt9 = {0, 0, 32, 256, 0, 0, 0, 0, 0, 0, 1, 0, 0,},
/* DLFW */
.ple_qt13 = {0, 0, 16, 48, 0, 0, 0, 0, 0, 0, 0,},
@@ -1754,8 +1801,10 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.ple_qt26 = {2654, 0, 1134, 48, 64, 13, 1478, 0, 64, 128, 120, 0,},
/* USB 52C USB3.0 */
.ple_qt42 = {1068, 0, 16, 48, 4, 13, 178, 0, 16, 1, 8, 16, 0,},
+ .ple_qt42_v2 = {91, 91, 32, 16, 19, 13, 91, 91, 44, 18, 1, 4, 0, 0,},
/* USB 52C USB3.0 */
.ple_qt43 = {3068, 0, 32, 48, 4, 13, 178, 0, 16, 1, 8, 16, 0,},
+ .ple_qt43_v2 = {645, 645, 32, 16, 2062, 2056, 2134, 2134, 2087, 2061, 1, 2047, 0, 0,},
/* DLFW 52C */
.ple_qt44 = {0, 0, 16, 256, 0, 0, 0, 0, 0, 0, 0, 0,},
/* DLFW 52C */
@@ -1789,8 +1838,13 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.ple_qt_51b_wow = {147, 0, 16, 20, 157, 13, 133, 0, 172, 14, 24, 0,},
.ple_rsvd_qt0 = {2, 107, 107, 6, 6, 6, 6, 0, 0, 0,},
.ple_rsvd_qt1 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0,},
+ .ple_rsvd_qt9 = {1, 44, 44, 6, 6, 6, 6, 69, 0, 0,},
.rsvd0_size0 = {212992, 0,},
+ .rsvd0_size6 = {40960, 0,},
.rsvd1_size0 = {587776, 2048,},
+ .rsvd1_size2 = {391168, 2048,},
+ .dle_input3 = {0, 0, 0, 16384, 0, 2048, 0, 0,},
+ .dle_input18 = {128, 128, 11454, 2048, 0, 2048, 24, 24,},
};
EXPORT_SYMBOL(rtw89_mac_size);
@@ -1811,6 +1865,7 @@ static const struct rtw89_dle_mem *get_dle_mem_cfg(struct rtw89_dev *rtwdev,
}
mac->dle_info.rsvd_qt = cfg->rsvd_qt;
+ mac->dle_info.dle_input = cfg->dle_input;
mac->dle_info.ple_pg_size = cfg->ple_size->pge_size;
mac->dle_info.ple_free_pg = cfg->ple_size->lnk_pge_num;
mac->dle_info.qta_mode = mode;
@@ -2231,8 +2286,8 @@ error:
return ret;
}
-static int preload_init_set(struct rtw89_dev *rtwdev, enum rtw89_mac_idx mac_idx,
- enum rtw89_qta_mode mode)
+static int preload_init_set_ax(struct rtw89_dev *rtwdev, u8 mac_idx,
+ enum rtw89_qta_mode mode)
{
u32 reg, max_preld_size, min_rsvd_size;
@@ -2260,13 +2315,14 @@ static bool is_qta_poh(struct rtw89_dev *rtwdev)
int rtw89_mac_preload_init(struct rtw89_dev *rtwdev, enum rtw89_mac_idx mac_idx,
enum rtw89_qta_mode mode)
{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
const struct rtw89_chip_info *chip = rtwdev->chip;
if (chip->chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev) ||
!is_qta_poh(rtwdev))
return 0;
- return preload_init_set(rtwdev, mac_idx, mode);
+ return mac->preload_init(rtwdev, mac_idx, mode);
}
static bool dle_is_txq_empty(struct rtw89_dev *rtwdev)
@@ -3061,6 +3117,7 @@ static int rtw89_mac_setup_phycap_part0(struct rtw89_dev *rtwdev)
struct rtw89_efuse *efuse = &rtwdev->efuse;
struct rtw89_mac_c2h_info c2h_info = {};
struct rtw89_hal *hal = &rtwdev->hal;
+ u8 protocol;
u8 tx_nss;
u8 rx_nss;
u8 tx_ant;
@@ -3108,6 +3165,10 @@ static int rtw89_mac_setup_phycap_part0(struct rtw89_dev *rtwdev)
rtw89_debug(rtwdev, RTW89_DBG_FW, "TX path diversity=%d\n", hal->tx_path_diversity);
rtw89_debug(rtwdev, RTW89_DBG_FW, "Antenna diversity=%d\n", hal->ant_diversity);
+ protocol = u32_get_bits(phycap->w1, RTW89_C2HREG_PHYCAP_W1_PROT);
+ if (protocol < RTW89_C2HREG_PHYCAP_W1_PROT_11BE)
+ hal->no_eht = true;
+
return 0;
}
@@ -3931,6 +3992,29 @@ static int rtw89_mac_feat_init(struct rtw89_dev *rtwdev)
return 0;
}
+static int rtw89_mac_reset_pwr_state_ax(struct rtw89_dev *rtwdev)
+{
+ u8 val;
+
+ val = rtw89_read32_mask(rtwdev, R_AX_IC_PWR_STATE, B_AX_WLMAC_PWR_STE_MASK);
+ if (val == MAC_AX_MAC_ON) {
+ /*
+ * A USB adapter might play as USB mass storage with driver and
+ * then switch to WiFi adapter, causing it stays on power-on
+ * state when doing WiFi USB probe. Return EAGAIN to caller to
+ * power-off and power-on again to reset the state.
+ */
+ if (rtwdev->hci.type == RTW89_HCI_TYPE_USB &&
+ !test_bit(RTW89_FLAG_PROBE_DONE, rtwdev->flags))
+ return -EAGAIN;
+
+ rtw89_err(rtwdev, "MAC has already powered on\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
static void rtw89_disable_fw_watchdog(struct rtw89_dev *rtwdev)
{
u32 val32;
@@ -4145,12 +4229,19 @@ int rtw89_mac_partial_init(struct rtw89_dev *rtwdev, bool include_bb)
int rtw89_mac_preinit(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
int ret;
ret = rtw89_mac_pwr_on(rtwdev);
if (ret)
return ret;
+ if (mac->mac_func_en) {
+ ret = mac->mac_func_en(rtwdev);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -4283,6 +4374,12 @@ static const struct rtw89_port_reg rtw89_port_base_ax = {
R_AX_PORT_HGQ_WINDOW_CFG + 3},
};
+static const struct rtw89_mac_mu_gid_addr rtw89_mac_mu_gid_addr_ax = {
+ .position_en = {R_AX_GID_POSITION_EN0, R_AX_GID_POSITION_EN1},
+ .position = {R_AX_GID_POSITION0, R_AX_GID_POSITION1,
+ R_AX_GID_POSITION2, R_AX_GID_POSITION3},
+};
+
static void rtw89_mac_check_packet_ctrl(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link, u8 type)
{
@@ -4341,6 +4438,7 @@ static void rtw89_mac_bcn_drop(struct rtw89_dev *rtwdev,
#define BCN_HOLD_DEF 200
#define BCN_MASK_DEF 0
#define TBTT_ERLY_DEF 5
+#define TBTT_AGG_DEF 1
#define BCN_SET_UNIT 32
#define BCN_ERLY_SET_DLY (10 * 2)
@@ -4644,6 +4742,16 @@ static void rtw89_mac_port_cfg_tbtt_early(struct rtw89_dev *rtwdev,
B_AX_TBTTERLY_MASK, TBTT_ERLY_DEF);
}
+static void rtw89_mac_port_cfg_tbtt_agg(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link)
+{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ const struct rtw89_port_reg *p = mac->port_base;
+
+ rtw89_write16_port_mask(rtwdev, rtwvif_link, p->tbtt_agg,
+ B_AX_TBTT_AGG_NUM_MASK, TBTT_AGG_DEF);
+}
+
static void rtw89_mac_port_cfg_bss_color(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link)
{
@@ -4904,6 +5012,7 @@ int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvi
rtw89_mac_port_cfg_bcn_hold_time(rtwdev, rtwvif_link);
rtw89_mac_port_cfg_bcn_mask_area(rtwdev, rtwvif_link);
rtw89_mac_port_cfg_tbtt_early(rtwdev, rtwvif_link);
+ rtw89_mac_port_cfg_tbtt_agg(rtwdev, rtwvif_link);
rtw89_mac_port_cfg_bss_color(rtwdev, rtwvif_link);
rtw89_mac_port_cfg_mbssid(rtwdev, rtwvif_link);
rtw89_mac_port_cfg_func_en(rtwdev, rtwvif_link, true);
@@ -5198,10 +5307,10 @@ rtw89_mac_bcn_fltr_rpt(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_l
if (start_detect)
return;
- ieee80211_connection_loss(vif);
- } else {
- rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, true);
+ ieee80211_beacon_loss(vif);
}
+
+ rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, true);
return;
case RTW89_BCN_FLTR_NOTIFY:
nl_event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
@@ -6358,9 +6467,11 @@ int rtw89_mac_cfg_plt_ax(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt)
void rtw89_mac_cfg_sb(struct rtw89_dev *rtwdev, u32 val)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u32 reg = chip->btc_sb.n[0].cfg;
u32 fw_sb;
- fw_sb = rtw89_read32(rtwdev, R_AX_SCOREBOARD);
+ fw_sb = rtw89_read32(rtwdev, reg);
fw_sb = FIELD_GET(B_MAC_AX_SB_FW_MASK, fw_sb);
fw_sb = fw_sb & ~B_MAC_AX_BTGS1_NOTIFY;
if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags))
@@ -6371,13 +6482,16 @@ void rtw89_mac_cfg_sb(struct rtw89_dev *rtwdev, u32 val)
val = B_AX_TOGGLE |
FIELD_PREP(B_MAC_AX_SB_DRV_MASK, val) |
FIELD_PREP(B_MAC_AX_SB_FW_MASK, fw_sb);
- rtw89_write32(rtwdev, R_AX_SCOREBOARD, val);
+ rtw89_write32(rtwdev, reg, val);
fsleep(1000); /* avoid BT FW loss information */
}
u32 rtw89_mac_get_sb(struct rtw89_dev *rtwdev)
{
- return rtw89_read32(rtwdev, R_AX_SCOREBOARD);
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u32 reg = chip->btc_sb.n[0].get;
+
+ return rtw89_read32(rtwdev, reg);
}
int rtw89_mac_cfg_ctrl_path(struct rtw89_dev *rtwdev, bool wl)
@@ -6662,6 +6776,8 @@ void rtw89_mac_bf_disassoc(struct rtw89_dev *rtwdev,
void rtw89_mac_bf_set_gid_table(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *conf)
{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ const struct rtw89_mac_mu_gid_addr *addr = mac->mu_gid;
struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
struct rtw89_vif_link *rtwvif_link;
u8 mac_idx;
@@ -6681,20 +6797,20 @@ void rtw89_mac_bf_set_gid_table(struct rtw89_dev *rtwdev, struct ieee80211_vif *
p = (__le32 *)conf->mu_group.membership;
rtw89_write32(rtwdev,
- rtw89_mac_reg_by_idx(rtwdev, R_AX_GID_POSITION_EN0, mac_idx),
+ rtw89_mac_reg_by_idx(rtwdev, addr->position_en[0], mac_idx),
le32_to_cpu(p[0]));
rtw89_write32(rtwdev,
- rtw89_mac_reg_by_idx(rtwdev, R_AX_GID_POSITION_EN1, mac_idx),
+ rtw89_mac_reg_by_idx(rtwdev, addr->position_en[1], mac_idx),
le32_to_cpu(p[1]));
p = (__le32 *)conf->mu_group.position;
- rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(rtwdev, R_AX_GID_POSITION0, mac_idx),
+ rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(rtwdev, addr->position[0], mac_idx),
le32_to_cpu(p[0]));
- rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(rtwdev, R_AX_GID_POSITION1, mac_idx),
+ rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(rtwdev, addr->position[1], mac_idx),
le32_to_cpu(p[1]));
- rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(rtwdev, R_AX_GID_POSITION2, mac_idx),
+ rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(rtwdev, addr->position[2], mac_idx),
le32_to_cpu(p[2]));
- rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(rtwdev, R_AX_GID_POSITION3, mac_idx),
+ rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(rtwdev, addr->position[3], mac_idx),
le32_to_cpu(p[3]));
}
@@ -6942,6 +7058,12 @@ int rtw89_mac_write_xtal_si_ax(struct rtw89_dev *rtwdev, u8 offset, u8 val, u8 m
return ret;
}
+ if (!test_bit(RTW89_FLAG_UNPLUGGED, rtwdev->flags) &&
+ (u32_get_bits(val32, B_AX_WL_XTAL_SI_ADDR_MASK) != offset ||
+ u32_get_bits(val32, B_AX_WL_XTAL_SI_DATA_MASK) != val))
+ rtw89_warn(rtwdev, "xtal si write: offset=%x val=%x poll=%x\n",
+ offset, val, val32);
+
return 0;
}
@@ -6965,7 +7087,12 @@ int rtw89_mac_read_xtal_si_ax(struct rtw89_dev *rtwdev, u8 offset, u8 *val)
return ret;
}
- *val = rtw89_read8(rtwdev, R_AX_WLAN_XTAL_SI_CTRL + 1);
+ if (!test_bit(RTW89_FLAG_UNPLUGGED, rtwdev->flags) &&
+ u32_get_bits(val32, B_AX_WL_XTAL_SI_ADDR_MASK) != offset)
+ rtw89_warn(rtwdev, "xtal si read: offset=%x poll=%x\n",
+ offset, val32);
+
+ *val = u32_get_bits(val32, B_AX_WL_XTAL_SI_DATA_MASK);
return 0;
}
@@ -7163,6 +7290,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.port_base = &rtw89_port_base_ax,
.agg_len_ht = R_AX_AGG_LEN_HT_0,
.ps_status = R_AX_PPWRBIT_SETTING,
+ .mu_gid = &rtw89_mac_mu_gid_addr_ax,
.muedca_ctrl = {
.addr = R_AX_MUEDCA_EN,
@@ -7184,6 +7312,10 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.check_mac_en = rtw89_mac_check_mac_en_ax,
.sys_init = sys_init_ax,
.trx_init = trx_init_ax,
+ .preload_init = preload_init_set_ax,
+ .clr_aon_intr = NULL,
+ .err_imr_ctrl = err_imr_ctrl_ax,
+ .mac_func_en = NULL,
.hci_func_en = rtw89_mac_hci_func_en_ax,
.dmac_func_pre_en = rtw89_mac_dmac_func_pre_en_ax,
.dle_func_en = dle_func_en_ax,
@@ -7193,6 +7325,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.typ_fltr_opt = rtw89_mac_typ_fltr_opt_ax,
.cfg_ppdu_status = rtw89_mac_cfg_ppdu_status_ax,
.cfg_phy_rpt = NULL,
+ .set_edcca_mode = NULL,
.dle_mix_cfg = dle_mix_cfg_ax,
.chk_dle_rdy = chk_dle_rdy_ax,
@@ -7206,6 +7339,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.set_cpuio = set_cpuio_ax,
.dle_quota_change = dle_quota_change_ax,
+ .reset_pwr_state = rtw89_mac_reset_pwr_state_ax,
.disable_cpu = rtw89_mac_disable_cpu_ax,
.fwdl_enable_wcpu = rtw89_mac_enable_cpu_ax,
.fwdl_get_status = rtw89_fw_get_rdy_ax,
@@ -7215,6 +7349,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.parse_phycap_map = rtw89_parse_phycap_map_ax,
.cnv_efuse_state = rtw89_cnv_efuse_state_ax,
.efuse_read_fw_secure = rtw89_efuse_read_fw_secure_ax,
+ .efuse_read_ecv = NULL,
.cfg_plt = rtw89_mac_cfg_plt_ax,
.get_plt_cnt = rtw89_mac_get_plt_cnt_ax,
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index 0007229d6753..e71a71648ab8 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -914,6 +914,9 @@ enum mac_ax_err_info {
MAC_AX_ERR_L0_CFG_DIS_NOTIFY = 0x0011,
MAC_AX_ERR_L0_CFG_HANDSHAKE = 0x0012,
MAC_AX_ERR_L0_RCVY_EN = 0x0013,
+ MAC_AX_ERR_L0_RESET_FORCE = 0x0020,
+ MAC_AX_ERR_L0_RESET_FORCE_C1 = 0x0021,
+ MAC_AX_ERR_L1_RESET_FORCE = 0x0022,
MAC_AX_SET_ERR_MAX,
};
@@ -929,8 +932,10 @@ struct rtw89_mac_size_set {
const struct rtw89_dle_size wde_size6;
const struct rtw89_dle_size wde_size7;
const struct rtw89_dle_size wde_size9;
+ const struct rtw89_dle_size wde_size16_v1;
const struct rtw89_dle_size wde_size17;
const struct rtw89_dle_size wde_size18;
+ const struct rtw89_dle_size wde_size18_v1;
const struct rtw89_dle_size wde_size19;
const struct rtw89_dle_size wde_size23;
const struct rtw89_dle_size wde_size25;
@@ -946,18 +951,22 @@ struct rtw89_mac_size_set {
const struct rtw89_dle_size ple_size17;
const struct rtw89_dle_size ple_size18;
const struct rtw89_dle_size ple_size19;
+ const struct rtw89_dle_size ple_size20_v1;
+ const struct rtw89_dle_size ple_size22_v1;
const struct rtw89_dle_size ple_size32;
const struct rtw89_dle_size ple_size33;
const struct rtw89_dle_size ple_size34;
const struct rtw89_wde_quota wde_qt0;
const struct rtw89_wde_quota wde_qt1;
const struct rtw89_wde_quota wde_qt0_v1;
+ const struct rtw89_wde_quota wde_qt3;
const struct rtw89_wde_quota wde_qt4;
const struct rtw89_wde_quota wde_qt6;
const struct rtw89_wde_quota wde_qt7;
const struct rtw89_wde_quota wde_qt16;
const struct rtw89_wde_quota wde_qt17;
const struct rtw89_wde_quota wde_qt18;
+ const struct rtw89_wde_quota wde_qt19_v1;
const struct rtw89_wde_quota wde_qt23;
const struct rtw89_wde_quota wde_qt25;
const struct rtw89_wde_quota wde_qt31;
@@ -965,13 +974,16 @@ struct rtw89_mac_size_set {
const struct rtw89_ple_quota ple_qt1;
const struct rtw89_ple_quota ple_qt4;
const struct rtw89_ple_quota ple_qt5;
+ const struct rtw89_ple_quota ple_qt5_v2;
const struct rtw89_ple_quota ple_qt9;
const struct rtw89_ple_quota ple_qt13;
const struct rtw89_ple_quota ple_qt18;
const struct rtw89_ple_quota ple_qt25;
const struct rtw89_ple_quota ple_qt26;
const struct rtw89_ple_quota ple_qt42;
+ const struct rtw89_ple_quota ple_qt42_v2;
const struct rtw89_ple_quota ple_qt43;
+ const struct rtw89_ple_quota ple_qt43_v2;
const struct rtw89_ple_quota ple_qt44;
const struct rtw89_ple_quota ple_qt45;
const struct rtw89_ple_quota ple_qt46;
@@ -991,12 +1003,23 @@ struct rtw89_mac_size_set {
const struct rtw89_ple_quota ple_qt_51b_wow;
const struct rtw89_rsvd_quota ple_rsvd_qt0;
const struct rtw89_rsvd_quota ple_rsvd_qt1;
+ const struct rtw89_rsvd_quota ple_rsvd_qt1_v1;
+ const struct rtw89_rsvd_quota ple_rsvd_qt9;
const struct rtw89_dle_rsvd_size rsvd0_size0;
+ const struct rtw89_dle_rsvd_size rsvd0_size6;
const struct rtw89_dle_rsvd_size rsvd1_size0;
+ const struct rtw89_dle_rsvd_size rsvd1_size2;
+ const struct rtw89_dle_input dle_input3;
+ const struct rtw89_dle_input dle_input18;
};
extern const struct rtw89_mac_size_set rtw89_mac_size;
+struct rtw89_mac_mu_gid_addr {
+ u32 position_en[2];
+ u32 position[4];
+};
+
struct rtw89_mac_gen_def {
u32 band1_offset;
u32 filter_model_addr;
@@ -1007,6 +1030,7 @@ struct rtw89_mac_gen_def {
const struct rtw89_port_reg *port_base;
u32 agg_len_ht;
u32 ps_status;
+ const struct rtw89_mac_mu_gid_addr *mu_gid;
struct rtw89_reg_def muedca_ctrl;
struct rtw89_reg_def bfee_ctrl;
@@ -1019,6 +1043,11 @@ struct rtw89_mac_gen_def {
enum rtw89_mac_hwmod_sel sel);
int (*sys_init)(struct rtw89_dev *rtwdev);
int (*trx_init)(struct rtw89_dev *rtwdev);
+ int (*preload_init)(struct rtw89_dev *rtwdev, u8 mac_idx,
+ enum rtw89_qta_mode mode);
+ void (*clr_aon_intr)(struct rtw89_dev *rtwdev);
+ void (*err_imr_ctrl)(struct rtw89_dev *rtwdev, bool en);
+ int (*mac_func_en)(struct rtw89_dev *rtwdev);
void (*hci_func_en)(struct rtw89_dev *rtwdev);
void (*dmac_func_pre_en)(struct rtw89_dev *rtwdev);
void (*dle_func_en)(struct rtw89_dev *rtwdev, bool enable);
@@ -1033,6 +1062,7 @@ struct rtw89_mac_gen_def {
u8 mac_idx);
int (*cfg_ppdu_status)(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable);
void (*cfg_phy_rpt)(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable);
+ void (*set_edcca_mode)(struct rtw89_dev *rtwdev, u8 mac_idx, bool normal);
int (*dle_mix_cfg)(struct rtw89_dev *rtwdev, const struct rtw89_dle_mem *cfg);
int (*chk_dle_rdy)(struct rtw89_dev *rtwdev, bool wde_or_ple);
@@ -1052,6 +1082,7 @@ struct rtw89_mac_gen_def {
struct rtw89_cpuio_ctrl *ctrl_para, bool wd);
int (*dle_quota_change)(struct rtw89_dev *rtwdev, bool band1_en);
+ int (*reset_pwr_state)(struct rtw89_dev *rtwdev);
void (*disable_cpu)(struct rtw89_dev *rtwdev);
int (*fwdl_enable_wcpu)(struct rtw89_dev *rtwdev, u8 boot_reason,
bool dlfw, bool include_bb);
@@ -1062,6 +1093,7 @@ struct rtw89_mac_gen_def {
int (*parse_phycap_map)(struct rtw89_dev *rtwdev);
int (*cnv_efuse_state)(struct rtw89_dev *rtwdev, bool idle);
int (*efuse_read_fw_secure)(struct rtw89_dev *rtwdev);
+ int (*efuse_read_ecv)(struct rtw89_dev *rtwdev);
int (*cfg_plt)(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt);
u16 (*get_plt_cnt)(struct rtw89_dev *rtwdev, u8 band);
@@ -1105,6 +1137,22 @@ u32 rtw89_mac_reg_by_idx(struct rtw89_dev *rtwdev, u32 reg_base, u8 band)
return band == 0 ? reg_base : (reg_base + mac->band1_offset);
}
+static inline void
+rtw89_write16_idx(struct rtw89_dev *rtwdev, u32 addr, u16 data, u8 band)
+{
+ addr = rtw89_mac_reg_by_idx(rtwdev, addr, band);
+
+ rtw89_write16(rtwdev, addr, data);
+}
+
+static inline void
+rtw89_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask, u32 data, u8 band)
+{
+ addr = rtw89_mac_reg_by_idx(rtwdev, addr, band);
+
+ rtw89_write32_mask(rtwdev, addr, mask, data);
+}
+
static inline
u32 rtw89_mac_reg_by_port(struct rtw89_dev *rtwdev, u32 base, u8 port, u8 mac_idx)
{
@@ -1218,6 +1266,14 @@ int rtw89_mac_check_mac_en(struct rtw89_dev *rtwdev, u8 band,
return mac->check_mac_en(rtwdev, band, sel);
}
+static inline void rtw89_mac_clr_aon_intr(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+
+ if (mac->clr_aon_intr)
+ mac->clr_aon_intr(rtwdev);
+}
+
int rtw89_mac_write_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 val);
int rtw89_mac_read_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 *val);
int rtw89_mac_dle_dfi_cfg(struct rtw89_dev *rtwdev, struct rtw89_mac_dle_dfi_ctrl *ctrl);
@@ -1340,6 +1396,24 @@ int rtw89_mac_cfg_ppdu_status_bands(struct rtw89_dev *rtwdev, bool enable)
return rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_1, enable);
}
+static inline
+void rtw89_mac_set_edcca_mode(struct rtw89_dev *rtwdev, u8 mac_idx, bool normal)
+{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+
+ if (!mac->set_edcca_mode)
+ return;
+
+ mac->set_edcca_mode(rtwdev, mac_idx, normal);
+}
+
+static inline
+void rtw89_mac_set_edcca_mode_bands(struct rtw89_dev *rtwdev, bool normal)
+{
+ rtw89_mac_set_edcca_mode(rtwdev, RTW89_MAC_0, normal);
+ rtw89_mac_set_edcca_mode(rtwdev, RTW89_MAC_1, normal);
+}
+
void rtw89_mac_set_rx_fltr(struct rtw89_dev *rtwdev, u8 mac_idx, u32 rx_fltr);
void rtw89_mac_update_rts_threshold(struct rtw89_dev *rtwdev);
void rtw89_mac_flush_txq(struct rtw89_dev *rtwdev, u32 queues, bool drop);
@@ -1352,6 +1426,8 @@ int rtw89_mac_cfg_gnt_v1(struct rtw89_dev *rtwdev,
const struct rtw89_mac_ax_coex_gnt *gnt_cfg);
int rtw89_mac_cfg_gnt_v2(struct rtw89_dev *rtwdev,
const struct rtw89_mac_ax_coex_gnt *gnt_cfg);
+int rtw89_mac_cfg_gnt_v3(struct rtw89_dev *rtwdev,
+ const struct rtw89_mac_ax_coex_gnt *gnt_cfg);
static inline
int rtw89_mac_cfg_plt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt)
@@ -1567,6 +1643,8 @@ enum rtw89_mac_xtal_si_offset {
XTAL_SI_APBT = 0xD1,
XTAL_SI_PLL = 0xE0,
XTAL_SI_PLL_1 = 0xE1,
+ XTAL_SI_CHIP_ID_L = 0xFD,
+ XTAL_SI_CHIP_ID_H = 0xFE,
};
static inline
@@ -1597,6 +1675,16 @@ int rtw89_mac_get_dle_rsvd_qt_cfg(struct rtw89_dev *rtwdev,
struct rtw89_mac_dle_rsvd_qt_cfg *cfg);
int rtw89_mac_cpu_io_rx(struct rtw89_dev *rtwdev, bool wow_enable);
+static inline int rtw89_mac_efuse_read_ecv(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+
+ if (!mac->efuse_read_ecv)
+ return -ENOENT;
+
+ return mac->efuse_read_ecv(rtwdev);
+}
+
static inline
void rtw89_fwdl_secure_idmem_share_mode(struct rtw89_dev *rtwdev, u8 mode)
{
@@ -1605,7 +1693,7 @@ void rtw89_fwdl_secure_idmem_share_mode(struct rtw89_dev *rtwdev, u8 mode)
if (!mac->fwdl_secure_idmem_share_mode)
return;
- return mac->fwdl_secure_idmem_share_mode(rtwdev, mode);
+ mac->fwdl_secure_idmem_share_mode(rtwdev, mode);
}
static inline
@@ -1719,4 +1807,16 @@ void rtw89_tx_rpt_skbs_purge(struct rtw89_dev *rtwdev)
rtw89_tx_rpt_tx_status(rtwdev, skbs[i],
RTW89_TX_MACID_DROP);
}
+
+static inline bool rtw89_mac_chk_preload_allow(struct rtw89_dev *rtwdev)
+{
+ if (rtwdev->hci.type != RTW89_HCI_TYPE_PCIE)
+ return false;
+
+ if (rtwdev->chip->chip_id == RTL8922D && rtwdev->hal.cid == RTL8922D_CID7090)
+ return true;
+
+ return false;
+}
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index f39ca1c2ed10..315bb0d0759f 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -127,6 +127,7 @@ static int __rtw89_ops_add_iface_link(struct rtw89_dev *rtwdev,
rtwvif_link->reg_6ghz_power = RTW89_REG_6GHZ_POWER_DFLT;
rtwvif_link->rand_tsf_done = false;
rtwvif_link->detect_bcn_count = 0;
+ rtwvif_link->last_sync_bcn_tsf = 0;
rcu_read_lock();
@@ -719,7 +720,8 @@ static void rtw89_ops_vif_cfg_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_MLD_VALID_LINKS) {
struct rtw89_vif_link *cur = rtw89_get_designated_link(rtwvif);
- rtw89_chip_rfk_channel(rtwdev, cur);
+ if (RTW89_CHK_FW_FEATURE_GROUP(WITH_RFK_PRE_NOTIFY, &rtwdev->fw))
+ rtw89_chip_rfk_channel(rtwdev, cur);
if (hweight16(vif->active_links) == 1)
rtwvif->mlo_mode = RTW89_MLO_MODE_MLSR;
@@ -1436,9 +1438,9 @@ static void rtw89_ops_channel_switch_beacon(struct ieee80211_hw *hw,
BUILD_BUG_ON(RTW89_MLD_NON_STA_LINK_NUM != 1);
- rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
+ rtwvif_link = rtw89_get_designated_link(rtwvif);
if (unlikely(!rtwvif_link)) {
- rtw89_err(rtwdev, "chsw bcn: find no link on HW-0\n");
+ rtw89_err(rtwdev, "chsw bcn: find no designated link\n");
return;
}
@@ -1612,12 +1614,23 @@ static int __rtw89_ops_set_vif_links(struct rtw89_dev *rtwdev,
return 0;
}
-static void rtw89_vif_cfg_fw_links(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif,
- unsigned long links, bool en)
+static void rtw89_vif_update_fw_links(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ u16 current_links, bool en)
{
+ struct rtw89_vif_ml_trans *trans = &rtwvif->ml_trans;
struct rtw89_vif_link *rtwvif_link;
unsigned int link_id;
+ unsigned long links;
+
+ /* Do follow-up when all updating links exist. */
+ if (current_links != trans->mediate_links)
+ return;
+
+ if (en)
+ links = trans->links_to_add;
+ else
+ links = trans->links_to_del;
for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
rtwvif_link = rtwvif->links[link_id];
@@ -1628,20 +1641,6 @@ static void rtw89_vif_cfg_fw_links(struct rtw89_dev *rtwdev,
}
}
-static void rtw89_vif_update_fw_links(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif,
- u16 current_links)
-{
- struct rtw89_vif_ml_trans *trans = &rtwvif->ml_trans;
-
- /* Do follow-up when all updating links exist. */
- if (current_links != trans->mediate_links)
- return;
-
- rtw89_vif_cfg_fw_links(rtwdev, rtwvif, trans->links_to_del, false);
- rtw89_vif_cfg_fw_links(rtwdev, rtwvif, trans->links_to_add, true);
-}
-
static
int rtw89_ops_change_vif_links(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -1683,7 +1682,7 @@ int rtw89_ops_change_vif_links(struct ieee80211_hw *hw,
if (rtwdev->scanning)
rtw89_hw_scan_abort(rtwdev, rtwdev->scan_info.scanning_vif);
- rtw89_vif_update_fw_links(rtwdev, rtwvif, old_links);
+ rtw89_vif_update_fw_links(rtwdev, rtwvif, old_links, true);
if (!old_links)
__rtw89_ops_clr_vif_links(rtwdev, rtwvif,
@@ -1716,6 +1715,9 @@ int rtw89_ops_change_vif_links(struct ieee80211_hw *hw,
BIT(RTW89_VIF_IDLE_LINK_ID));
}
+ if (!ret)
+ rtw89_vif_update_fw_links(rtwdev, rtwvif, new_links, false);
+
rtw89_enter_ips_by_hwflags(rtwdev);
return ret;
}
diff --git a/drivers/net/wireless/realtek/rtw89/mac_be.c b/drivers/net/wireless/realtek/rtw89/mac_be.c
index 556e5f98e8d4..dc66b1ee851a 100644
--- a/drivers/net/wireless/realtek/rtw89/mac_be.c
+++ b/drivers/net/wireless/realtek/rtw89/mac_be.c
@@ -62,6 +62,12 @@ static const struct rtw89_port_reg rtw89_port_base_be = {
R_BE_PORT_HGQ_WINDOW_CFG + 3},
};
+static const struct rtw89_mac_mu_gid_addr rtw89_mac_mu_gid_addr_be = {
+ .position_en = {R_BE_GID_POSITION_EN0, R_BE_GID_POSITION_EN1},
+ .position = {R_BE_GID_POSITION0, R_BE_GID_POSITION1,
+ R_BE_GID_POSITION2, R_BE_GID_POSITION3},
+};
+
static int rtw89_mac_check_mac_en_be(struct rtw89_dev *rtwdev, u8 mac_idx,
enum rtw89_mac_hwmod_sel sel)
{
@@ -89,6 +95,7 @@ static void hfc_get_mix_info_be(struct rtw89_dev *rtwdev)
struct rtw89_hfc_prec_cfg *prec_cfg = &param->prec_cfg;
struct rtw89_hfc_pub_cfg *pub_cfg = &param->pub_cfg;
struct rtw89_hfc_pub_info *info = &param->pub_info;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
u32 val;
val = rtw89_read32(rtwdev, R_BE_PUB_PAGE_INFO1);
@@ -116,14 +123,23 @@ static void hfc_get_mix_info_be(struct rtw89_dev *rtwdev)
val = rtw89_read32(rtwdev, R_BE_CH_PAGE_CTRL);
prec_cfg->ch011_prec = u32_get_bits(val, B_BE_PREC_PAGE_CH011_V1_MASK);
+ if (chip->chip_id == RTL8922D)
+ prec_cfg->ch011_full_page = u32_get_bits(val, B_BE_FULL_WD_PG_MASK);
prec_cfg->h2c_prec = u32_get_bits(val, B_BE_PREC_PAGE_CH12_V1_MASK);
val = rtw89_read32(rtwdev, R_BE_PUB_PAGE_CTRL2);
pub_cfg->pub_max = u32_get_bits(val, B_BE_PUBPG_ALL_MASK);
val = rtw89_read32(rtwdev, R_BE_WP_PAGE_CTRL1);
- prec_cfg->wp_ch07_prec = u32_get_bits(val, B_BE_PREC_PAGE_WP_CH07_MASK);
- prec_cfg->wp_ch811_prec = u32_get_bits(val, B_BE_PREC_PAGE_WP_CH811_MASK);
+ if (chip->chip_id == RTL8922D) {
+ prec_cfg->wp_ch07_prec = u32_get_bits(val, B_BE_PREC_PAGE_WP_CH07_V1_MASK);
+ prec_cfg->wp_ch07_full_page = u32_get_bits(val, B_BE_FULL_PAGE_WP_CH07_MASK);
+ prec_cfg->wp_ch811_prec = u32_get_bits(val, B_BE_PREC_PAGE_WP_CH811_V1_MASK);
+ prec_cfg->wp_ch811_full_page = u32_get_bits(val, B_BE_FULL_PAGE_WP_CH811_MASK);
+ } else {
+ prec_cfg->wp_ch07_prec = u32_get_bits(val, B_BE_PREC_PAGE_WP_CH07_MASK);
+ prec_cfg->wp_ch811_prec = u32_get_bits(val, B_BE_PREC_PAGE_WP_CH811_MASK);
+ }
val = rtw89_read32(rtwdev, R_BE_WP_PAGE_CTRL2);
pub_cfg->wp_thrd = u32_get_bits(val, B_BE_WP_THRD_MASK);
@@ -148,17 +164,26 @@ static void hfc_mix_cfg_be(struct rtw89_dev *rtwdev)
struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
const struct rtw89_hfc_prec_cfg *prec_cfg = &param->prec_cfg;
const struct rtw89_hfc_pub_cfg *pub_cfg = &param->pub_cfg;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
u32 val;
val = u32_encode_bits(prec_cfg->ch011_prec, B_BE_PREC_PAGE_CH011_V1_MASK) |
u32_encode_bits(prec_cfg->h2c_prec, B_BE_PREC_PAGE_CH12_V1_MASK);
+ if (chip->chip_id == RTL8922D)
+ val = u32_replace_bits(val, prec_cfg->ch011_full_page, B_BE_FULL_WD_PG_MASK);
rtw89_write32(rtwdev, R_BE_CH_PAGE_CTRL, val);
val = u32_encode_bits(pub_cfg->pub_max, B_BE_PUBPG_ALL_MASK);
rtw89_write32(rtwdev, R_BE_PUB_PAGE_CTRL2, val);
- val = u32_encode_bits(prec_cfg->wp_ch07_prec, B_BE_PREC_PAGE_WP_CH07_MASK) |
- u32_encode_bits(prec_cfg->wp_ch811_prec, B_BE_PREC_PAGE_WP_CH811_MASK);
+ if (chip->chip_id == RTL8922D)
+ val = u32_encode_bits(prec_cfg->wp_ch07_prec, B_BE_PREC_PAGE_WP_CH07_V1_MASK) |
+ u32_encode_bits(prec_cfg->wp_ch07_full_page, B_BE_FULL_PAGE_WP_CH07_MASK) |
+ u32_encode_bits(prec_cfg->wp_ch811_prec, B_BE_PREC_PAGE_WP_CH811_V1_MASK) |
+ u32_encode_bits(prec_cfg->wp_ch811_full_page, B_BE_FULL_PAGE_WP_CH811_MASK);
+ else
+ val = u32_encode_bits(prec_cfg->wp_ch07_prec, B_BE_PREC_PAGE_WP_CH07_MASK) |
+ u32_encode_bits(prec_cfg->wp_ch811_prec, B_BE_PREC_PAGE_WP_CH811_MASK);
rtw89_write32(rtwdev, R_BE_WP_PAGE_CTRL1, val);
val = u32_replace_bits(rtw89_read32(rtwdev, R_BE_HCI_FC_CTRL),
@@ -200,6 +225,9 @@ static void dle_func_en_be(struct rtw89_dev *rtwdev, bool enable)
static void dle_clk_en_be(struct rtw89_dev *rtwdev, bool enable)
{
+ if (rtwdev->chip->chip_id != RTL8922A)
+ return;
+
if (enable)
rtw89_write32_set(rtwdev, R_BE_DMAC_CLK_EN,
B_BE_DLE_WDE_CLK_EN | B_BE_DLE_PLE_CLK_EN);
@@ -331,6 +359,11 @@ static void ple_quota_cfg_be(struct rtw89_dev *rtwdev,
SET_QUOTA(cpu_io, PLE, 10);
SET_QUOTA(tx_rpt, PLE, 11);
SET_QUOTA(h2d, PLE, 12);
+
+ if (rtwdev->chip->chip_id == RTL8922A)
+ return;
+
+ SET_QUOTA(snrpt, PLE, 13);
}
static void rtw89_mac_hci_func_en_be(struct rtw89_dev *rtwdev)
@@ -341,6 +374,8 @@ static void rtw89_mac_hci_func_en_be(struct rtw89_dev *rtwdev)
static void rtw89_mac_dmac_func_pre_en_be(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u32 mask;
u32 val;
val = rtw89_read32(rtwdev, R_BE_HAXI_INIT_CFG1);
@@ -364,12 +399,12 @@ static void rtw89_mac_dmac_func_pre_en_be(struct rtw89_dev *rtwdev)
rtw89_write32(rtwdev, R_BE_HAXI_INIT_CFG1, val);
- rtw89_write32_clr(rtwdev, R_BE_HAXI_DMA_STOP1,
- B_BE_STOP_CH0 | B_BE_STOP_CH1 | B_BE_STOP_CH2 |
- B_BE_STOP_CH3 | B_BE_STOP_CH4 | B_BE_STOP_CH5 |
- B_BE_STOP_CH6 | B_BE_STOP_CH7 | B_BE_STOP_CH8 |
- B_BE_STOP_CH9 | B_BE_STOP_CH10 | B_BE_STOP_CH11 |
- B_BE_STOP_CH12 | B_BE_STOP_CH13 | B_BE_STOP_CH14);
+ if (chip->chip_id == RTL8922A)
+ mask = B_BE_TX_STOP1_MASK;
+ else
+ mask = B_BE_TX_STOP1_MASK_V1;
+
+ rtw89_write32_clr(rtwdev, R_BE_HAXI_DMA_STOP1, mask);
rtw89_write32_set(rtwdev, R_BE_DMAC_TABLE_CTRL, B_BE_DMAC_ADDR_MODE);
}
@@ -396,6 +431,12 @@ int rtw89_mac_write_xtal_si_be(struct rtw89_dev *rtwdev, u8 offset, u8 val, u8 m
return ret;
}
+ if (!test_bit(RTW89_FLAG_UNPLUGGED, rtwdev->flags) &&
+ (u32_get_bits(val32, B_BE_WL_XTAL_SI_ADDR_MASK) != offset ||
+ u32_get_bits(val32, B_BE_WL_XTAL_SI_DATA_MASK) != val))
+ rtw89_warn(rtwdev, "xtal si write: offset=%x val=%x poll=%x\n",
+ offset, val, val32);
+
return 0;
}
@@ -420,7 +461,141 @@ int rtw89_mac_read_xtal_si_be(struct rtw89_dev *rtwdev, u8 offset, u8 *val)
return ret;
}
- *val = rtw89_read8(rtwdev, R_BE_WLAN_XTAL_SI_CTRL + 1);
+ if (!test_bit(RTW89_FLAG_UNPLUGGED, rtwdev->flags) &&
+ u32_get_bits(val32, B_BE_WL_XTAL_SI_ADDR_MASK) != offset)
+ rtw89_warn(rtwdev, "xtal si read: offset=%x poll=%x\n",
+ offset, val32);
+
+ *val = u32_get_bits(val32, B_BE_WL_XTAL_SI_DATA_MASK);
+
+ return 0;
+}
+
+static int rtw89_mac_reset_pwr_state_be(struct rtw89_dev *rtwdev)
+{
+ u32 val32;
+ int ret;
+
+ val32 = rtw89_read32(rtwdev, R_BE_SYSON_FSM_MON);
+ val32 &= WLAN_FSM_MASK;
+ val32 |= WLAN_FSM_SET;
+ rtw89_write32(rtwdev, R_BE_SYSON_FSM_MON, val32);
+
+ ret = read_poll_timeout(rtw89_read32_mask, val32, val32 == WLAN_FSM_IDLE,
+ 1000, 2000000, false,
+ rtwdev, R_BE_SYSON_FSM_MON, WLAN_FSM_STATE_MASK);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]Polling WLAN PMC timeout= %X\n", val32);
+ return ret;
+ }
+
+ val32 = rtw89_read32_mask(rtwdev, R_BE_IC_PWR_STATE, B_BE_WLMAC_PWR_STE_MASK);
+ if (val32 == MAC_AX_MAC_OFF) {
+ rtw89_write32_clr(rtwdev, R_BE_HCI_OPT_CTRL, B_BE_HAXIDMA_IO_EN);
+
+ ret = read_poll_timeout(rtw89_read32_mask, val32, !val32,
+ 1000, 2000000, false,
+ rtwdev, R_BE_HCI_OPT_CTRL,
+ B_BE_HAXIDMA_IO_ST | B_BE_HAXIDMA_BACKUP_RESTORE_ST);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]Polling HAXI IO timeout= %X\n", val32);
+ return ret;
+ }
+
+ rtw89_write32_clr(rtwdev, R_BE_HCI_OPT_CTRL, B_BE_HCI_WLAN_IO_EN);
+
+ ret = read_poll_timeout(rtw89_read32_mask, val32, !val32,
+ 1000, 2000000, false,
+ rtwdev, R_BE_HCI_OPT_CTRL, B_BE_HCI_WLAN_IO_ST);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]Polling WLAN IO timeout= %X\n", val32);
+ return ret;
+ }
+
+ rtw89_write32_clr(rtwdev, R_BE_SYS_PW_CTRL, B_BE_EN_WLON);
+ rtw89_write32_clr(rtwdev, R_BE_SYS_PW_CTRL, B_BE_APFM_SWLPS);
+ } else if (val32 == MAC_AX_MAC_ON) {
+ rtw89_write32_clr(rtwdev, R_BE_HCI_OPT_CTRL, B_BE_HAXIDMA_IO_EN);
+
+ ret = read_poll_timeout(rtw89_read32_mask, val32, !val32,
+ 1000, 2000000, false,
+ rtwdev, R_BE_HCI_OPT_CTRL,
+ B_BE_HAXIDMA_IO_ST | B_BE_HAXIDMA_BACKUP_RESTORE_ST);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]Polling HAXI IO timeout= %X\n", val32);
+ return ret;
+ }
+
+ rtw89_write32_clr(rtwdev, R_BE_HCI_OPT_CTRL, B_BE_HCI_WLAN_IO_EN);
+
+ ret = read_poll_timeout(rtw89_read32_mask, val32, !val32,
+ 1000, 2000000, false,
+ rtwdev, R_BE_HCI_OPT_CTRL, B_BE_HCI_WLAN_IO_ST);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]Polling WLAN IO timeout= %X\n", val32);
+ return ret;
+ }
+
+ rtw89_write32_set(rtwdev, R_BE_SYS_PW_CTRL, B_BE_EN_WLON);
+ rtw89_write32_set(rtwdev, R_BE_SYS_PW_CTRL, B_BE_APFM_OFFMAC);
+
+ ret = read_poll_timeout(rtw89_read32_mask, val32, val32 == MAC_AX_MAC_OFF,
+ 1000, 2000000, false,
+ rtwdev, R_BE_SYS_PW_CTRL, B_BE_APFM_OFFMAC);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]Polling MAC state timeout= %X\n", val32);
+ return ret;
+ }
+
+ rtw89_write32_clr(rtwdev, R_BE_SYS_PW_CTRL, B_BE_EN_WLON);
+ rtw89_write32_clr(rtwdev, R_BE_SYS_PW_CTRL, B_BE_APFM_SWLPS);
+ } else if (val32 == MAC_AX_MAC_LPS) {
+ rtw89_write32_clr(rtwdev, R_BE_HCI_OPT_CTRL, B_BE_HAXIDMA_IO_EN);
+
+ ret = read_poll_timeout(rtw89_read32_mask, val32, !val32,
+ 1000, 2000000, false,
+ rtwdev, R_BE_HCI_OPT_CTRL,
+ B_BE_HAXIDMA_IO_ST | B_BE_HAXIDMA_BACKUP_RESTORE_ST);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]Polling HAXI IO timeout= %X\n", val32);
+ return ret;
+ }
+
+ rtw89_write32_clr(rtwdev, R_BE_HCI_OPT_CTRL, B_BE_HCI_WLAN_IO_EN);
+
+ ret = read_poll_timeout(rtw89_read32_mask, val32, !val32,
+ 1000, 2000000, false,
+ rtwdev, R_BE_HCI_OPT_CTRL, B_BE_HCI_WLAN_IO_ST);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]Polling WLAN IO timeout= %X\n", val32);
+ return ret;
+ }
+
+ rtw89_write32_set(rtwdev, R_BE_WLLPS_CTRL, B_BE_FORCE_LEAVE_LPS);
+
+ ret = read_poll_timeout(rtw89_read32_mask, val32, val32 == MAC_AX_MAC_ON,
+ 1000, 2000000, false,
+ rtwdev, R_BE_IC_PWR_STATE, B_BE_WLMAC_PWR_STE_MASK);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]Polling MAC STS timeout= %X\n", val32);
+ return ret;
+ }
+
+ rtw89_write32_set(rtwdev, R_BE_SYS_PW_CTRL, B_BE_EN_WLON);
+ rtw89_write32_set(rtwdev, R_BE_SYS_PW_CTRL, B_BE_APFM_OFFMAC);
+
+ ret = read_poll_timeout(rtw89_read32_mask, val32, val32 == MAC_AX_MAC_OFF,
+ 1000, 2000000, false,
+ rtwdev, R_BE_SYS_PW_CTRL, B_BE_APFM_OFFMAC);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]Polling MAC state timeout= %X\n", val32);
+ return ret;
+ }
+
+ rtw89_write32_clr(rtwdev, R_BE_WLLPS_CTRL, B_BE_FORCE_LEAVE_LPS);
+ rtw89_write32_clr(rtwdev, R_BE_SYS_PW_CTRL, B_BE_EN_WLON);
+ rtw89_write32_clr(rtwdev, R_BE_SYS_PW_CTRL, B_BE_APFM_SWLPS);
+ }
return 0;
}
@@ -439,7 +614,8 @@ static void rtw89_mac_disable_cpu_be(struct rtw89_dev *rtwdev)
val32 &= B_BE_RUN_ENV_MASK;
rtw89_write32(rtwdev, R_BE_WCPU_FW_CTRL, val32);
- rtw89_write32_set(rtwdev, R_BE_DCPU_PLATFORM_ENABLE, B_BE_DCPU_PLATFORM_EN);
+ if (rtwdev->chip->chip_id == RTL8922A)
+ rtw89_write32_set(rtwdev, R_BE_DCPU_PLATFORM_ENABLE, B_BE_DCPU_PLATFORM_EN);
rtw89_write32(rtwdev, R_BE_UDM0, 0);
rtw89_write32(rtwdev, R_BE_HALT_C2H, 0);
@@ -585,31 +761,125 @@ static int rtw89_fwdl_check_path_ready_be(struct rtw89_dev *rtwdev,
static int dmac_func_en_be(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->chip_id == RTL8922A)
+ return 0;
+
+ rtw89_write32_set(rtwdev, R_BE_DMAC_FUNC_EN,
+ B_BE_MAC_FUNC_EN | B_BE_DMAC_FUNC_EN |
+ B_BE_MPDU_PROC_EN | B_BE_WD_RLS_EN |
+ B_BE_DLE_WDE_EN | B_BE_TXPKT_CTRL_EN |
+ B_BE_STA_SCH_EN | B_BE_DLE_PLE_EN |
+ B_BE_PKT_BUF_EN | B_BE_DMAC_TBL_EN |
+ B_BE_PKT_IN_EN | B_BE_DLE_CPUIO_EN |
+ B_BE_DISPATCHER_EN | B_BE_BBRPT_EN |
+ B_BE_MAC_SEC_EN | B_BE_H_AXIDMA_EN |
+ B_BE_DMAC_MLO_EN | B_BE_PLRLS_EN |
+ B_BE_P_AXIDMA_EN | B_BE_DLE_DATACPUIO_EN);
+
+ return 0;
+}
+
+static int cmac_share_func_en_be(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->chip_id == RTL8922A)
+ return 0;
+
+ rtw89_write32_set(rtwdev, R_BE_CMAC_SHARE_FUNC_EN,
+ B_BE_CMAC_SHARE_EN | B_BE_RESPBA_EN |
+ B_BE_ADDRSRCH_EN | B_BE_BTCOEX_EN);
+
+ return 0;
+}
+
+static int cmac_pwr_en_be(struct rtw89_dev *rtwdev, u8 mac_idx, bool en)
+{
+ if (mac_idx > RTW89_MAC_1)
+ return -EINVAL;
+
+ if (mac_idx == RTW89_MAC_0) {
+ if (en == test_bit(RTW89_FLAG_CMAC0_PWR, rtwdev->flags))
+ return 0;
+
+ if (en) {
+ rtw89_write32_set(rtwdev, R_BE_AFE_CTRL1,
+ B_BE_R_SYM_WLCMAC0_ALL_EN);
+ rtw89_write32_clr(rtwdev, R_BE_FEN_RST_ENABLE,
+ B_BE_R_SYM_ISO_CMAC02PP);
+ rtw89_write32_set(rtwdev, R_BE_FEN_RST_ENABLE,
+ B_BE_CMAC0_FEN);
+
+ set_bit(RTW89_FLAG_CMAC0_PWR, rtwdev->flags);
+ } else {
+ rtw89_write32_clr(rtwdev, R_BE_FEN_RST_ENABLE,
+ B_BE_CMAC0_FEN);
+ rtw89_write32_set(rtwdev, R_BE_FEN_RST_ENABLE,
+ B_BE_R_SYM_ISO_CMAC02PP);
+ rtw89_write32_clr(rtwdev, R_BE_AFE_CTRL1,
+ B_BE_R_SYM_WLCMAC0_ALL_EN);
+
+ clear_bit(RTW89_FLAG_CMAC0_PWR, rtwdev->flags);
+ }
+ } else {
+ if (en == test_bit(RTW89_FLAG_CMAC1_PWR, rtwdev->flags))
+ return 0;
+
+ if (en) {
+ rtw89_write32_set(rtwdev, R_BE_AFE_CTRL1,
+ B_BE_R_SYM_WLCMAC1_ALL_EN);
+ rtw89_write32_clr(rtwdev, R_BE_FEN_RST_ENABLE,
+ B_BE_R_SYM_ISO_CMAC12PP);
+ rtw89_write32_set(rtwdev, R_BE_FEN_RST_ENABLE,
+ B_BE_CMAC1_FEN);
+
+ set_bit(RTW89_FLAG_CMAC1_PWR, rtwdev->flags);
+ } else {
+ rtw89_write32_clr(rtwdev, R_BE_FEN_RST_ENABLE,
+ B_BE_CMAC1_FEN);
+ rtw89_write32_set(rtwdev, R_BE_FEN_RST_ENABLE,
+ B_BE_R_SYM_ISO_CMAC12PP);
+ rtw89_write32_clr(rtwdev, R_BE_AFE_CTRL1,
+ B_BE_R_SYM_WLCMAC1_ALL_EN);
+
+ clear_bit(RTW89_FLAG_CMAC1_PWR, rtwdev->flags);
+ }
+ }
+
return 0;
}
static int cmac_func_en_be(struct rtw89_dev *rtwdev, u8 mac_idx, bool en)
{
+ enum rtw89_flags pwr_flag, func_flag;
u32 reg;
if (mac_idx > RTW89_MAC_1)
return -EINVAL;
- if (mac_idx == RTW89_MAC_0)
+ if (mac_idx == RTW89_MAC_0) {
+ pwr_flag = RTW89_FLAG_CMAC0_PWR;
+ func_flag = RTW89_FLAG_CMAC0_FUNC;
+ } else {
+ pwr_flag = RTW89_FLAG_CMAC1_PWR;
+ func_flag = RTW89_FLAG_CMAC1_FUNC;
+ }
+
+ if (!test_bit(pwr_flag, rtwdev->flags)) {
+ rtw89_warn(rtwdev, "CMAC %u power cut did not release\n", mac_idx);
return 0;
+ }
if (en) {
- rtw89_write32_set(rtwdev, R_BE_AFE_CTRL1, B_BE_AFE_CTRL1_SET);
- rtw89_write32_clr(rtwdev, R_BE_SYS_ISO_CTRL_EXTEND, B_BE_R_SYM_ISO_CMAC12PP);
- rtw89_write32_set(rtwdev, R_BE_FEN_RST_ENABLE, B_BE_CMAC1_FEN);
-
reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_CK_EN, mac_idx);
rtw89_write32_set(rtwdev, reg, B_BE_CK_EN_SET);
reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_CMAC_FUNC_EN, mac_idx);
rtw89_write32_set(rtwdev, reg, B_BE_CMAC_FUNC_EN_SET);
- set_bit(RTW89_FLAG_CMAC1_FUNC, rtwdev->flags);
+ set_bit(func_flag, rtwdev->flags);
} else {
reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_CMAC_FUNC_EN, mac_idx);
rtw89_write32_clr(rtwdev, reg, B_BE_CMAC_FUNC_EN_SET);
@@ -617,11 +887,7 @@ static int cmac_func_en_be(struct rtw89_dev *rtwdev, u8 mac_idx, bool en)
reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_CK_EN, mac_idx);
rtw89_write32_clr(rtwdev, reg, B_BE_CK_EN_SET);
- rtw89_write32_clr(rtwdev, R_BE_FEN_RST_ENABLE, B_BE_CMAC1_FEN);
- rtw89_write32_set(rtwdev, R_BE_SYS_ISO_CTRL_EXTEND, B_BE_R_SYM_ISO_CMAC12PP);
- rtw89_write32_clr(rtwdev, R_BE_AFE_CTRL1, B_BE_AFE_CTRL1_SET);
-
- clear_bit(RTW89_FLAG_CMAC1_FUNC, rtwdev->flags);
+ clear_bit(func_flag, rtwdev->flags);
}
return 0;
@@ -640,6 +906,14 @@ static int sys_init_be(struct rtw89_dev *rtwdev)
if (ret)
return ret;
+ ret = cmac_share_func_en_be(rtwdev);
+ if (ret)
+ return ret;
+
+ ret = cmac_pwr_en_be(rtwdev, RTW89_MAC_0, true);
+ if (ret)
+ return ret;
+
ret = cmac_func_en_be(rtwdev, RTW89_MAC_0, true);
if (ret)
return ret;
@@ -651,11 +925,53 @@ static int sys_init_be(struct rtw89_dev *rtwdev)
return ret;
}
+static int mac_func_en_be(struct rtw89_dev *rtwdev)
+{
+ u32 val;
+ int ret;
+
+ ret = dmac_func_en_be(rtwdev);
+ if (ret)
+ return ret;
+
+ ret = cmac_share_func_en_be(rtwdev);
+ if (ret)
+ return ret;
+
+ val = rtw89_read32(rtwdev, R_BE_FEN_RST_ENABLE);
+ if (val & B_BE_CMAC0_FEN) {
+ ret = cmac_pwr_en_be(rtwdev, RTW89_MAC_0, true);
+ if (ret)
+ return ret;
+
+ ret = cmac_func_en_be(rtwdev, RTW89_MAC_0, true);
+ if (ret)
+ return ret;
+ }
+
+ if (val & B_BE_CMAC1_FEN) {
+ ret = cmac_pwr_en_be(rtwdev, RTW89_MAC_1, true);
+ if (ret)
+ return ret;
+
+ ret = cmac_func_en_be(rtwdev, RTW89_MAC_1, true);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int sta_sch_init_be(struct rtw89_dev *rtwdev)
{
u32 p_val;
int ret;
+ if (rtwdev->chip->chip_id == RTL8922D) {
+ rtw89_write32_set(rtwdev, R_BE_SS_LITE_TXL_MACID, B_BE_RPT_OTHER_BAND_EN);
+ return 0;
+ }
+
ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL);
if (ret)
return ret;
@@ -685,14 +1001,16 @@ static int mpdu_proc_init_be(struct rtw89_dev *rtwdev)
return ret;
rtw89_write32_set(rtwdev, R_BE_MPDU_PROC, B_BE_APPEND_FCS);
- rtw89_write32(rtwdev, R_BE_CUT_AMSDU_CTRL, TRXCFG_MPDU_PROC_CUT_CTRL);
+ rtw89_write32(rtwdev, R_BE_CUT_AMSDU_CTRL, TRXCFG_MPDU_PROC_CUT_CTRL |
+ B_BE_CA_CHK_ADDRCAM_EN);
val32 = rtw89_read32(rtwdev, R_BE_HDR_SHCUT_SETTING);
val32 |= (B_BE_TX_HW_SEQ_EN | B_BE_TX_HW_ACK_POLICY_EN | B_BE_TX_MAC_MPDU_PROC_EN);
val32 &= ~B_BE_TX_ADDR_MLD_TO_LIK;
rtw89_write32_set(rtwdev, R_BE_HDR_SHCUT_SETTING, val32);
- rtw89_write32(rtwdev, R_BE_RX_HDRTRNS, TRXCFG_MPDU_PROC_RX_HDR_CONV);
+ rtw89_write32(rtwdev, R_BE_RX_HDRTRNS, TRXCFG_MPDU_PROC_RX_HDR_CONV |
+ B_BE_HC_ADDR_HIT_EN);
val32 = rtw89_read32(rtwdev, R_BE_DISP_FWD_WLAN_0);
val32 = u32_replace_bits(val32, 1, B_BE_FWD_WLAN_CPU_TYPE_0_DATA_MASK);
@@ -728,7 +1046,10 @@ static int sec_eng_init_be(struct rtw89_dev *rtwdev)
static int txpktctrl_init_be(struct rtw89_dev *rtwdev)
{
+ struct rtw89_mac_info *mac = &rtwdev->mac;
struct rtw89_mac_dle_rsvd_qt_cfg qt_cfg;
+ const struct rtw89_dle_input *dle_input;
+ u32 mpdu_info_b1_ofst;
u32 val32;
int ret;
@@ -739,9 +1060,16 @@ static int txpktctrl_init_be(struct rtw89_dev *rtwdev)
return ret;
}
+ dle_input = mac->dle_info.dle_input;
+ if (dle_input)
+ mpdu_info_b1_ofst = DIV_ROUND_UP(dle_input->mpdu_info_tbl_b0,
+ BIT(MPDU_INFO_TBL_FACTOR));
+ else
+ mpdu_info_b1_ofst = MPDU_INFO_B1_OFST;
+
val32 = rtw89_read32(rtwdev, R_BE_TXPKTCTL_MPDUINFO_CFG);
val32 = u32_replace_bits(val32, qt_cfg.pktid, B_BE_MPDUINFO_PKTID_MASK);
- val32 = u32_replace_bits(val32, MPDU_INFO_B1_OFST, B_BE_MPDUINFO_B1_BADDR_MASK);
+ val32 = u32_replace_bits(val32, mpdu_info_b1_ofst, B_BE_MPDUINFO_B1_BADDR_MASK);
val32 |= B_BE_MPDUINFO_FEN;
rtw89_write32(rtwdev, R_BE_TXPKTCTL_MPDUINFO_CFG, val32);
@@ -750,7 +1078,9 @@ static int txpktctrl_init_be(struct rtw89_dev *rtwdev)
static int mlo_init_be(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
u32 val32;
+ u32 reg;
int ret;
val32 = rtw89_read32(rtwdev, R_BE_MLO_INIT_CTL);
@@ -766,7 +1096,12 @@ static int mlo_init_be(struct rtw89_dev *rtwdev)
if (ret)
rtw89_err(rtwdev, "[MLO]%s: MLO init polling timeout\n", __func__);
- rtw89_write32_set(rtwdev, R_BE_SS_CTRL, B_BE_MLO_HW_CHGLINK_EN);
+ if (chip->chip_id == RTL8922A)
+ reg = R_BE_SS_CTRL;
+ else
+ reg = R_BE_SS_CTRL_V1;
+
+ rtw89_write32_set(rtwdev, reg, B_BE_MLO_HW_CHGLINK_EN);
rtw89_write32_set(rtwdev, R_BE_CMAC_SHARE_ACQCHK_CFG_0, B_BE_R_MACID_ACQ_CHK_EN);
return ret;
@@ -829,6 +1164,7 @@ static int dmac_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
static int scheduler_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
u32 val32;
u32 reg;
int ret;
@@ -837,6 +1173,11 @@ static int scheduler_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
if (ret)
return ret;
+ if (chip->chip_id == RTL8922D) {
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_SCH_EXT_CTRL, mac_idx);
+ rtw89_write32_set(rtwdev, reg, B_BE_CWCNT_PLUS_MODE);
+ }
+
reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_HE_CTN_CHK_CCA_NAV, mac_idx);
val32 = B_BE_HE_CTN_CHK_CCA_P20 | B_BE_HE_CTN_CHK_EDCCA_P20 |
B_BE_HE_CTN_CHK_CCA_BITMAP | B_BE_HE_CTN_CHK_EDCCA_BITMAP |
@@ -870,6 +1211,11 @@ static int scheduler_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
rtw89_write32_mask(rtwdev, reg, B_BE_BCNQ_CW_MASK, 0x32);
rtw89_write32_mask(rtwdev, reg, B_BE_BCNQ_AIFS_MASK, BCN_IFS_25US);
+ if (chip->chip_id == RTL8922D) {
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_SCH_EDCA_RST_CFG, mac_idx);
+ rtw89_write32_set(rtwdev, reg, B_BE_TX_NAV_RST_EDCA_EN);
+ }
+
return 0;
}
@@ -985,6 +1331,12 @@ static int nav_ctrl_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
rtw89_write32(rtwdev, reg, val32);
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_SPECIAL_TX_SETTING, mac_idx);
+ rtw89_write32_clr(rtwdev, reg, B_BE_BMC_NAV_PROTECT);
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_TRXPTCL_RESP_0, mac_idx);
+ rtw89_write32_set(rtwdev, reg, B_BE_WMAC_MBA_DUR_FORCE);
+
return 0;
}
@@ -1008,14 +1360,23 @@ static int spatial_reuse_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
static int tmac_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_hal *hal = &rtwdev->hal;
u32 reg;
reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_TB_PPDU_CTRL, mac_idx);
rtw89_write32_clr(rtwdev, reg, B_BE_QOSNULL_UPD_MUEDCA_EN);
- reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_WMTX_TCR_BE_4, mac_idx);
- rtw89_write32_mask(rtwdev, reg, B_BE_EHT_HE_PPDU_4XLTF_ZLD_USTIMER_MASK, 0x12);
- rtw89_write32_mask(rtwdev, reg, B_BE_EHT_HE_PPDU_2XLTF_ZLD_USTIMER_MASK, 0xe);
+ if (chip->chip_id == RTL8922A) {
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_WMTX_TCR_BE_4, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_BE_EHT_HE_PPDU_4XLTF_ZLD_USTIMER_MASK, 0x12);
+ rtw89_write32_mask(rtwdev, reg, B_BE_EHT_HE_PPDU_2XLTF_ZLD_USTIMER_MASK, 0xe);
+ }
+
+ if (chip->chip_id == RTL8922D && hal->cid != RTL8922D_CID7090) {
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_COMMON_PHYINTF_CTRL_0, mac_idx);
+ rtw89_write32_clr(rtwdev, reg, CLEAR_DTOP_DIS);
+ }
return 0;
}
@@ -1040,6 +1401,15 @@ static int trxptcl_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
val32 &= ~B_BE_MACLBK_EN;
rtw89_write32(rtwdev, reg, val32);
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_CMAC_FUNC_EN, mac_idx);
+ rtw89_write32_set(rtwdev, reg, B_BE_PHYINTF_EN);
+
+ if (chip->chip_id == RTL8922D) {
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_RX_PLCP_EXT_OPTION_2, mac_idx);
+ rtw89_write32_set(rtwdev, reg, B_BE_PLCP_PHASE_B_CRC_CHK_EN |
+ B_BE_PLCP_PHASE_A_CRC_CHK_EN);
+ }
+
reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_TRXPTCL_RESP_0, mac_idx);
val32 = rtw89_read32(rtwdev, reg);
val32 = u32_replace_bits(val32, WMAC_SPEC_SIFS_CCK,
@@ -1109,6 +1479,7 @@ static int rst_bacam_be(struct rtw89_dev *rtwdev)
static int rmac_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
u32 rx_min_qta, rx_max_len, rx_max_pg;
u16 val16;
u32 reg;
@@ -1152,6 +1523,17 @@ static int rmac_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_RX_PLCP_EXT_OPTION_1, mac_idx);
rtw89_write16_set(rtwdev, reg, B_BE_PLCP_SU_PSDU_LEN_SRC);
+ if (chip->chip_id == RTL8922D) {
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_BSR_UPD_CTRL, mac_idx);
+ rtw89_write32_set(rtwdev, reg, B_BE_QSIZE_RULE);
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_RXGCK_CTRL, mac_idx);
+ rtw89_write16_mask(rtwdev, reg, B_BE_RXGCK_GCK_RATE_LIMIT_MASK, RX_GCK_LEGACY);
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_PLCP_HDR_FLTR, mac_idx);
+ rtw89_write32_set(rtwdev, reg, B_BE_DIS_CHK_MIN_LEN);
+ }
+
return 0;
}
@@ -1175,7 +1557,7 @@ static int resp_pktctl_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_RESP_CSI_RESERVED_PAGE, mac_idx);
rtw89_write32_mask(rtwdev, reg, B_BE_CSI_RESERVED_START_PAGE_MASK, qt_cfg.pktid);
- rtw89_write32_mask(rtwdev, reg, B_BE_CSI_RESERVED_PAGE_NUM_MASK, qt_cfg.pg_num);
+ rtw89_write32_mask(rtwdev, reg, B_BE_CSI_RESERVED_PAGE_NUM_MASK, qt_cfg.pg_num + 1);
return 0;
}
@@ -1210,6 +1592,7 @@ static int cmac_com_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
static int ptcl_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
u32 val32;
u8 val8;
u32 reg;
@@ -1224,8 +1607,9 @@ static int ptcl_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
val32 = rtw89_read32(rtwdev, reg);
val32 = u32_replace_bits(val32, S_AX_CTS2S_TH_1K,
B_BE_HW_CTS2SELF_PKT_LEN_TH_MASK);
- val32 = u32_replace_bits(val32, S_AX_CTS2S_TH_SEC_256B,
- B_BE_HW_CTS2SELF_PKT_LEN_TH_TWW_MASK);
+ if (chip->chip_id == RTL8922A)
+ val32 = u32_replace_bits(val32, S_AX_CTS2S_TH_SEC_256B,
+ B_BE_HW_CTS2SELF_PKT_LEN_TH_TWW_MASK);
val32 |= B_BE_HW_CTS2SELF_EN;
rtw89_write32(rtwdev, reg, val32);
@@ -1246,7 +1630,46 @@ static int ptcl_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
rtw89_write8(rtwdev, reg, val8);
reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_AMPDU_AGG_LIMIT, mac_idx);
- rtw89_write32_mask(rtwdev, reg, B_BE_AMPDU_MAX_TIME_MASK, AMPDU_MAX_TIME);
+ if (chip->chip_id == RTL8922A)
+ val32 = AMPDU_MAX_TIME;
+ else
+ val32 = AMPDU_MAX_TIME_V1;
+ rtw89_write32_mask(rtwdev, reg, B_BE_AMPDU_MAX_TIME_MASK, val32);
+
+ if (chip->chip_id == RTL8922D) {
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_AGG_BK_0, mac_idx);
+ rtw89_write32_clr(rtwdev, reg, B_BE_WDBK_CFG | B_BE_EN_RTY_BK |
+ B_BE_EN_RTY_BK_COD);
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_AMPDU_AGG_LIMIT, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_BE_MAX_AGG_NUM_MASK,
+ MAX_TX_AMPDU_NUM_V1 - 1);
+ }
+
+ if (rtw89_mac_chk_preload_allow(rtwdev)) {
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_AGG_BK_0, mac_idx);
+ rtw89_write32_set(rtwdev, reg, B_BE_PRELD_MGQ0_EN |
+ B_BE_PRELD_HIQ_P4_EN |
+ B_BE_PRELD_HIQ_P3_EN |
+ B_BE_PRELD_HIQ_P2_EN |
+ B_BE_PRELD_HIQ_P1_EN |
+ B_BE_PRELD_HIQ_P0MB15_EN |
+ B_BE_PRELD_HIQ_P0MB14_EN |
+ B_BE_PRELD_HIQ_P0MB13_EN |
+ B_BE_PRELD_HIQ_P0MB12_EN |
+ B_BE_PRELD_HIQ_P0MB11_EN |
+ B_BE_PRELD_HIQ_P0MB10_EN |
+ B_BE_PRELD_HIQ_P0MB9_EN |
+ B_BE_PRELD_HIQ_P0MB8_EN |
+ B_BE_PRELD_HIQ_P0MB7_EN |
+ B_BE_PRELD_HIQ_P0MB6_EN |
+ B_BE_PRELD_HIQ_P0MB5_EN |
+ B_BE_PRELD_HIQ_P0MB4_EN |
+ B_BE_PRELD_HIQ_P0MB3_EN |
+ B_BE_PRELD_HIQ_P0MB2_EN |
+ B_BE_PRELD_HIQ_P0MB1_EN |
+ B_BE_PRELD_HIQ_P0_EN);
+ }
return 0;
}
@@ -1533,22 +1956,22 @@ static int dle_quota_change_be(struct rtw89_dev *rtwdev, bool band1_en)
static int preload_init_be(struct rtw89_dev *rtwdev, u8 mac_idx,
enum rtw89_qta_mode mode)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
u32 max_preld_size, min_rsvd_size;
+ u8 preld_acq, preld_miscq;
u32 val32;
u32 reg;
+ if (!(chip->chip_id == RTL8922A || rtw89_mac_chk_preload_allow(rtwdev)))
+ return 0;
+
max_preld_size = mac_idx == RTW89_MAC_0 ?
PRELD_B0_ENT_NUM : PRELD_B1_ENT_NUM;
+ if (chip->chip_id == RTL8922D)
+ max_preld_size = PRELD_B01_ENT_NUM_8922D;
max_preld_size *= PRELD_AMSDU_SIZE;
+ min_rsvd_size = PRELD_NEXT_MIN_SIZE;
- reg = mac_idx == RTW89_MAC_0 ? R_BE_TXPKTCTL_B0_PRELD_CFG0 :
- R_BE_TXPKTCTL_B1_PRELD_CFG0;
- val32 = rtw89_read32(rtwdev, reg);
- val32 = u32_replace_bits(val32, max_preld_size, B_BE_B0_PRELD_USEMAXSZ_MASK);
- val32 |= B_BE_B0_PRELD_FEN;
- rtw89_write32(rtwdev, reg, val32);
-
- min_rsvd_size = PRELD_AMSDU_SIZE;
reg = mac_idx == RTW89_MAC_0 ? R_BE_TXPKTCTL_B0_PRELD_CFG1 :
R_BE_TXPKTCTL_B1_PRELD_CFG1;
val32 = rtw89_read32(rtwdev, reg);
@@ -1556,9 +1979,36 @@ static int preload_init_be(struct rtw89_dev *rtwdev, u8 mac_idx,
val32 = u32_replace_bits(val32, min_rsvd_size, B_BE_B0_PRELD_NXT_RSVMINSZ_MASK);
rtw89_write32(rtwdev, reg, val32);
+ reg = mac_idx == RTW89_MAC_0 ? R_BE_TXPKTCTL_B0_PRELD_CFG0 :
+ R_BE_TXPKTCTL_B1_PRELD_CFG0;
+ if (chip->chip_id == RTL8922D) {
+ preld_acq = PRELD_ACQ_ENT_NUM_8922D;
+ preld_miscq = PRELD_MISCQ_ENT_NUM_8922D;
+ } else {
+ preld_acq = mac_idx == RTW89_MAC_0 ? PRELD_B0_ACQ_ENT_NUM_8922A :
+ PRELD_B1_ACQ_ENT_NUM_8922A;
+ preld_miscq = PRELD_MISCQ_ENT_NUM_8922A;
+ }
+
+ val32 = rtw89_read32(rtwdev, reg);
+ val32 = u32_replace_bits(val32, preld_acq, B_BE_B0_PRELD_CAM_G0ENTNUM_MASK);
+ val32 = u32_replace_bits(val32, preld_miscq, B_BE_B0_PRELD_CAM_G1ENTNUM_MASK);
+ val32 = u32_replace_bits(val32, max_preld_size, B_BE_B0_PRELD_USEMAXSZ_MASK);
+ val32 |= B_BE_B0_PRELD_FEN;
+ rtw89_write32(rtwdev, reg, val32);
+
return 0;
}
+static void clr_aon_intr_be(struct rtw89_dev *rtwdev)
+{
+ if (rtwdev->hci.type != RTW89_HCI_TYPE_PCIE)
+ return;
+
+ rtw89_write32_clr(rtwdev, R_BE_FWS0IMR, B_BE_FS_GPIOA_INT_EN);
+ rtw89_write32_set(rtwdev, R_BE_FWS0ISR, B_BE_FS_GPIOA_INT);
+}
+
static int dbcc_bb_ctrl_be(struct rtw89_dev *rtwdev, bool bb1_en)
{
u32 set = B_BE_FEN_BB1PLAT_RSTB | B_BE_FEN_BB1_IP_RSTN;
@@ -1588,6 +2038,10 @@ static int enable_imr_be(struct rtw89_dev *rtwdev, u8 mac_idx,
else
return -EINVAL;
+ if (chip->chip_id == RTL8922D)
+ rtw89_write32_mask(rtwdev, R_BE_NO_RX_ERR_CFG,
+ B_BE_NO_RX_ERR_TO_MASK, 0);
+
for (i = 0; i < table->n_regs; i++) {
reg = &table->regs[i];
addr = rtw89_mac_reg_by_idx(rtwdev, reg->addr, mac_idx);
@@ -1638,6 +2092,12 @@ static int band1_enable_be(struct rtw89_dev *rtwdev)
return ret;
}
+ ret = cmac_pwr_en_be(rtwdev, RTW89_MAC_1, true);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC%d pwr en %d\n", RTW89_MAC_1, ret);
+ return ret;
+ }
+
ret = cmac_func_en_be(rtwdev, RTW89_MAC_1, true);
if (ret) {
rtw89_err(rtwdev, "[ERR]CMAC%d func en %d\n", RTW89_MAC_1, ret);
@@ -1681,6 +2141,12 @@ static int band1_disable_be(struct rtw89_dev *rtwdev)
return ret;
}
+ ret = cmac_pwr_en_be(rtwdev, RTW89_MAC_1, false);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC%d pwr dis %d\n", RTW89_MAC_1, ret);
+ return ret;
+ }
+
ret = rtw89_mac_dle_quota_change(rtwdev, rtwdev->mac.qta_mode, false);
if (ret) {
rtw89_err(rtwdev, "[ERR]DLE quota change %d\n", ret);
@@ -1731,26 +2197,40 @@ static int dbcc_enable_be(struct rtw89_dev *rtwdev, bool enable)
static int set_host_rpr_be(struct rtw89_dev *rtwdev)
{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
u32 val32;
u32 mode;
u32 fltr;
+ u32 qid;
bool poh;
poh = is_qta_poh(rtwdev);
if (poh) {
mode = RTW89_RPR_MODE_POH;
- fltr = S_BE_WDRLS_FLTR_TXOK | S_BE_WDRLS_FLTR_RTYLMT |
- S_BE_WDRLS_FLTR_LIFTIM | S_BE_WDRLS_FLTR_MACID;
+ qid = WDRLS_DEST_QID_POH;
} else {
mode = RTW89_RPR_MODE_STF;
fltr = 0;
+ qid = WDRLS_DEST_QID_STF;
+ }
+
+ if (chip_id == RTL8922A) {
+ fltr = S_BE_WDRLS_FLTR_TXOK | S_BE_WDRLS_FLTR_RTYLMT |
+ S_BE_WDRLS_FLTR_LIFTIM | S_BE_WDRLS_FLTR_MACID;
+ } else {
+ fltr = S_BE_WDRLS_FLTR_TXOK_V1 | S_BE_WDRLS_FLTR_RTYLMT_V1 |
+ S_BE_WDRLS_FLTR_LIFTIM_V1 | S_BE_WDRLS_FLTR_MACID_V1;
}
rtw89_write32_mask(rtwdev, R_BE_WDRLS_CFG, B_BE_WDRLS_MODE_MASK, mode);
+ rtw89_write32_mask(rtwdev, R_BE_RLSRPT0_CFG0, B_BE_RLSRPT0_QID_MASK, qid);
val32 = rtw89_read32(rtwdev, R_BE_RLSRPT0_CFG1);
- val32 = u32_replace_bits(val32, fltr, B_BE_RLSRPT0_FLTR_MAP_MASK);
+ if (chip_id == RTL8922A)
+ val32 = u32_replace_bits(val32, fltr, B_BE_RLSRPT0_FLTR_MAP_MASK);
+ else
+ val32 = u32_replace_bits(val32, fltr, B_BE_RLSRPT0_FLTR_MAP_V1_MASK);
val32 = u32_replace_bits(val32, 30, B_BE_RLSRPT0_AGGNUM_MASK);
val32 = u32_replace_bits(val32, 255, B_BE_RLSRPT0_TO_MASK);
rtw89_write32(rtwdev, R_BE_RLSRPT0_CFG1, val32);
@@ -1863,12 +2343,65 @@ int rtw89_mac_cfg_gnt_v2(struct rtw89_dev *rtwdev,
}
EXPORT_SYMBOL(rtw89_mac_cfg_gnt_v2);
+int rtw89_mac_cfg_gnt_v3(struct rtw89_dev *rtwdev,
+ const struct rtw89_mac_ax_coex_gnt *gnt_cfg)
+{
+ u32 val = 0;
+
+ if (gnt_cfg->band[0].gnt_bt)
+ val |= B_BE_PTA_GNT_BT0_BB_VAL | B_BE_PTA_GNT_BT0_RX_BB0_VAL |
+ B_BE_PTA_GNT_BT0_TX_BB0_VAL;
+
+ if (gnt_cfg->band[0].gnt_bt_sw_en)
+ val |= B_BE_PTA_GNT_BT0_BB_SWCTRL | B_BE_PTA_GNT_BT0_RX_BB0_SWCTRL |
+ B_BE_PTA_GNT_BT0_TX_BB0_SWCTRL;
+
+ if (gnt_cfg->band[0].gnt_wl)
+ val |= B_BE_PTA_GNT_WL_BB0_VAL;
+
+ if (gnt_cfg->band[0].gnt_wl_sw_en)
+ val |= B_BE_PTA_GNT_WL_BB0_SWCTRL;
+
+ if (gnt_cfg->band[1].gnt_bt)
+ val |= B_BE_PTA_GNT_BT0_BB_VAL | B_BE_PTA_GNT_BT0_RX_BB1_VAL |
+ B_BE_PTA_GNT_BT0_TX_BB1_VAL;
+
+ if (gnt_cfg->band[1].gnt_bt_sw_en)
+ val |= B_BE_PTA_GNT_BT0_BB_SWCTRL | B_BE_PTA_GNT_BT0_RX_BB1_SWCTRL |
+ B_BE_PTA_GNT_BT0_TX_BB1_SWCTRL;
+
+ if (gnt_cfg->band[1].gnt_wl)
+ val |= B_BE_PTA_GNT_WL_BB1_VAL;
+
+ if (gnt_cfg->band[1].gnt_wl_sw_en)
+ val |= B_BE_PTA_GNT_WL_BB1_SWCTRL;
+
+ if (gnt_cfg->bt[0].wlan_act_en)
+ val |= B_BE_PTA_WL_ACT0_SWCTRL | B_BE_PTA_WL_ACT_RX_BT0_SWCTRL |
+ B_BE_PTA_WL_ACT_TX_BT0_SWCTRL;
+ if (gnt_cfg->bt[0].wlan_act)
+ val |= B_BE_PTA_WL_ACT0_VAL | B_BE_PTA_WL_ACT_RX_BT0_VAL |
+ B_BE_PTA_WL_ACT_TX_BT0_VAL;
+ if (gnt_cfg->bt[1].wlan_act_en)
+ val |= B_BE_PTA_WL_ACT1_SWCTRL | B_BE_PTA_WL_ACT_RX_BT1_SWCTRL |
+ B_BE_PTA_WL_ACT_TX_BT1_SWCTRL;
+ if (gnt_cfg->bt[1].wlan_act)
+ val |= B_BE_PTA_WL_ACT1_VAL | B_BE_PTA_WL_ACT_RX_BT1_VAL |
+ B_BE_PTA_WL_ACT_TX_BT1_VAL;
+
+ rtw89_write32(rtwdev, R_BE_PTA_GNT_SW_CTRL, val);
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_mac_cfg_gnt_v3);
+
int rtw89_mac_cfg_ctrl_path_v2(struct rtw89_dev *rtwdev, bool wl)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_mac_ax_gnt *g = dm->gnt.band;
struct rtw89_mac_ax_wl_act *gbt = dm->gnt.bt;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
int i;
if (wl)
@@ -1883,7 +2416,11 @@ int rtw89_mac_cfg_ctrl_path_v2(struct rtw89_dev *rtwdev, bool wl)
gbt[i].wlan_act_en = 0;
}
- return rtw89_mac_cfg_gnt_v2(rtwdev, &dm->gnt);
+ if (chip->chip_id == RTL8922A)
+ return rtw89_mac_cfg_gnt_v2(rtwdev, &dm->gnt);
+ else
+ return rtw89_mac_cfg_gnt_v3(rtwdev, &dm->gnt);
+
}
EXPORT_SYMBOL(rtw89_mac_cfg_ctrl_path_v2);
@@ -2012,6 +2549,65 @@ void rtw89_mac_cfg_phy_rpt_be(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
EXPORT_SYMBOL(rtw89_mac_cfg_phy_rpt_be);
static
+void rtw89_mac_set_edcca_mode_be(struct rtw89_dev *rtwdev, u8 mac_idx, bool normal)
+{
+ u16 resp_ack, resp_rts, resp_rts_punc, resp_normal, resp_normal_punc;
+
+ if (rtwdev->chip->chip_id == RTL8922A)
+ return;
+
+ resp_ack = RESP_ACK_CFG_BE;
+ resp_rts = RESP_RTS_CFG_BE;
+ resp_rts_punc = RESP_RTS_PUNC_CFG_BE;
+ resp_normal = RESP_NORMAL_CFG_BE;
+ resp_normal_punc = RESP_NORMAL_PUNC_CFG_BE;
+
+ if (normal) {
+ rtw89_write16_idx(rtwdev, R_BE_WMAC_ACK_BA_RESP_LEGACY,
+ resp_ack, mac_idx);
+ rtw89_write16_idx(rtwdev, R_BE_WMAC_ACK_BA_RESP_HE,
+ resp_ack, mac_idx);
+ rtw89_write16_idx(rtwdev, R_BE_WMAC_ACK_BA_RESP_EHT_LEG_PUNC,
+ resp_ack, mac_idx);
+ rtw89_write16_idx(rtwdev, R_BE_WMAC_RX_RTS_RESP_LEGACY,
+ resp_rts, mac_idx);
+ rtw89_write16_idx(rtwdev, R_BE_WMAC_RX_RTS_RESP_LEGACY_PUNC,
+ resp_rts_punc, mac_idx);
+ rtw89_write16_idx(rtwdev, R_BE_WMAC_RX_MURTS_RESP_LEGACY,
+ resp_normal, mac_idx);
+ rtw89_write16_idx(rtwdev, R_BE_WMAC_RX_MURTS_RESP_LEGACY_PUNC,
+ resp_normal_punc, mac_idx);
+ rtw89_write16_idx(rtwdev, R_BE_WMAC_OTHERS_RESP_LEGACY,
+ resp_normal, mac_idx);
+ rtw89_write16_idx(rtwdev, R_BE_WMAC_OTHERS_RESP_HE,
+ resp_normal_punc, mac_idx);
+ rtw89_write16_idx(rtwdev, R_BE_WMAC_OTHERS_RESP_EHT_LEG_PUNC,
+ resp_normal_punc, mac_idx);
+ } else {
+ rtw89_write16_idx(rtwdev, R_BE_WMAC_ACK_BA_RESP_LEGACY,
+ resp_normal, mac_idx);
+ rtw89_write16_idx(rtwdev, R_BE_WMAC_ACK_BA_RESP_HE,
+ resp_normal_punc, mac_idx);
+ rtw89_write16_idx(rtwdev, R_BE_WMAC_ACK_BA_RESP_EHT_LEG_PUNC,
+ resp_normal_punc, mac_idx);
+ rtw89_write16_idx(rtwdev, R_BE_WMAC_RX_RTS_RESP_LEGACY,
+ resp_rts, mac_idx);
+ rtw89_write16_idx(rtwdev, R_BE_WMAC_RX_RTS_RESP_LEGACY_PUNC,
+ resp_rts_punc, mac_idx);
+ rtw89_write16_idx(rtwdev, R_BE_WMAC_RX_MURTS_RESP_LEGACY,
+ resp_normal, mac_idx);
+ rtw89_write16_idx(rtwdev, R_BE_WMAC_RX_MURTS_RESP_LEGACY_PUNC,
+ resp_normal_punc, mac_idx);
+ rtw89_write16_idx(rtwdev, R_BE_WMAC_OTHERS_RESP_LEGACY,
+ resp_normal, mac_idx);
+ rtw89_write16_idx(rtwdev, R_BE_WMAC_OTHERS_RESP_HE,
+ resp_normal_punc, mac_idx);
+ rtw89_write16_idx(rtwdev, R_BE_WMAC_OTHERS_RESP_EHT_LEG_PUNC,
+ resp_normal_punc, mac_idx);
+ }
+}
+
+static
int rtw89_mac_cfg_ppdu_status_be(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
{
u32 reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_PPDU_STAT, mac_idx);
@@ -2580,6 +3176,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.port_base = &rtw89_port_base_be,
.agg_len_ht = R_BE_AGG_LEN_HT_0,
.ps_status = R_BE_WMTX_POWER_BE_BIT_CTL,
+ .mu_gid = &rtw89_mac_mu_gid_addr_be,
.muedca_ctrl = {
.addr = R_BE_MUEDCA_EN,
@@ -2601,6 +3198,10 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.check_mac_en = rtw89_mac_check_mac_en_be,
.sys_init = sys_init_be,
.trx_init = trx_init_be,
+ .preload_init = preload_init_be,
+ .clr_aon_intr = clr_aon_intr_be,
+ .err_imr_ctrl = err_imr_ctrl_be,
+ .mac_func_en = mac_func_en_be,
.hci_func_en = rtw89_mac_hci_func_en_be,
.dmac_func_pre_en = rtw89_mac_dmac_func_pre_en_be,
.dle_func_en = dle_func_en_be,
@@ -2610,6 +3211,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.typ_fltr_opt = rtw89_mac_typ_fltr_opt_be,
.cfg_ppdu_status = rtw89_mac_cfg_ppdu_status_be,
.cfg_phy_rpt = rtw89_mac_cfg_phy_rpt_be,
+ .set_edcca_mode = rtw89_mac_set_edcca_mode_be,
.dle_mix_cfg = dle_mix_cfg_be,
.chk_dle_rdy = chk_dle_rdy_be,
@@ -2623,6 +3225,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.set_cpuio = set_cpuio_be,
.dle_quota_change = dle_quota_change_be,
+ .reset_pwr_state = rtw89_mac_reset_pwr_state_be,
.disable_cpu = rtw89_mac_disable_cpu_be,
.fwdl_enable_wcpu = rtw89_mac_fwdl_enable_wcpu_be,
.fwdl_get_status = fwdl_get_status_be,
@@ -2632,6 +3235,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.parse_phycap_map = rtw89_parse_phycap_map_be,
.cnv_efuse_state = rtw89_cnv_efuse_state_be,
.efuse_read_fw_secure = rtw89_efuse_read_fw_secure_be,
+ .efuse_read_ecv = rtw89_efuse_read_ecv_be,
.cfg_plt = rtw89_mac_cfg_plt_be,
.get_plt_cnt = rtw89_mac_get_plt_cnt_be,
diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
index a66fcdb0293b..43c61b3dc969 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.c
+++ b/drivers/net/wireless/realtek/rtw89/pci.c
@@ -604,8 +604,15 @@ static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev, void *rpp)
info->parse_rpp(rtwdev, rpp, &rpp_info);
- if (rpp_info.txch == RTW89_TXCH_CH12) {
- rtw89_warn(rtwdev, "should no fwcmd release report\n");
+ if (unlikely(rpp_info.txch >= RTW89_TXCH_NUM ||
+ info->tx_dma_ch_mask & BIT(rpp_info.txch))) {
+ rtw89_warn(rtwdev, "should no release report on txch %d\n",
+ rpp_info.txch);
+ return;
+ }
+
+ if (unlikely(rpp_info.seq >= RTW89_PCI_TXWD_NUM_MAX)) {
+ rtw89_warn(rtwdev, "invalid seq %d\n", rpp_info.seq);
return;
}
@@ -963,6 +970,9 @@ static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev)
if (unlikely(isrs.halt_c2h_isrs & isr_def->isr_wdt_timeout))
rtw89_ser_notify(rtwdev, MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT);
+ if (unlikely(isrs.halt_c2h_isrs & isr_def->isr_sps_ocp))
+ rtw89_warn(rtwdev, "SPS OCP alarm 0x%x\n", isrs.halt_c2h_isrs);
+
if (unlikely(rtwpci->under_recovery))
goto enable_intr;
@@ -3998,7 +4008,8 @@ static void rtw89_pci_recovery_intr_mask_v3(struct rtw89_dev *rtwdev)
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0;
- rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN;
+ rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN |
+ B_BE_SPSANA_OCP_INT_EN | B_BE_SPS_OCP_INT_EN;
rtwpci->intrs[0] = 0;
rtwpci->intrs[1] = 0;
}
@@ -4008,7 +4019,8 @@ static void rtw89_pci_default_intr_mask_v3(struct rtw89_dev *rtwdev)
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0;
- rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN;
+ rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN |
+ B_BE_SPSANA_OCP_INT_EN | B_BE_SPS_OCP_INT_EN;
rtwpci->intrs[0] = 0;
rtwpci->intrs[1] = B_BE_PCIE_RDU_CH1_IMR |
B_BE_PCIE_RDU_CH0_IMR |
@@ -4598,6 +4610,7 @@ static int __maybe_unused rtw89_pci_resume(struct device *dev)
rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1,
B_AX_SEL_REQ_ENTR_L1);
}
+ rtw89_pci_hci_ldo(rtwdev);
rtw89_pci_l2_hci_ldo(rtwdev);
rtw89_pci_basic_cfg(rtwdev, true);
@@ -4649,6 +4662,7 @@ const struct rtw89_pci_isr_def rtw89_pci_isr_ax = {
.isr_rdu = B_AX_RDU_INT,
.isr_halt_c2h = B_AX_HALT_C2H_INT_EN,
.isr_wdt_timeout = B_AX_WDT_TIMEOUT_INT_EN,
+ .isr_sps_ocp = 0,
.isr_clear_rpq = {R_AX_PCIE_HISR00, B_AX_RPQDMA_INT | B_AX_RPQBD_FULL_INT},
.isr_clear_rxq = {R_AX_PCIE_HISR00, B_AX_RXP1DMA_INT | B_AX_RXDMA_INT |
B_AX_RDU_INT},
diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h
index 16dfb0e79d77..ccfa6d33623a 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.h
+++ b/drivers/net/wireless/realtek/rtw89/pci.h
@@ -55,6 +55,8 @@
#define B_AX_CALIB_EN BIT(13)
#define B_AX_DIV GENMASK(15, 14)
#define RAC_SET_PPR_V1 0x31
+#define RAC_ANA41 0x41
+#define PHY_ERR_FLAG_EN BIT(6)
#define R_AX_DBI_FLAG 0x1090
#define B_AX_DBI_RFLAG BIT(17)
@@ -145,6 +147,11 @@
#define R_RAC_DIRECT_OFFSET_BE_LANE0_G2 0x3900
#define R_RAC_DIRECT_OFFSET_BE_LANE1_G2 0x3980
+#define RAC_DIRECT_OFFESET_L0_G1 0x3800
+#define RAC_DIRECT_OFFESET_L1_G1 0x3900
+#define RAC_DIRECT_OFFESET_L0_G2 0x3A00
+#define RAC_DIRECT_OFFESET_L1_G2 0x3B00
+
#define RTW89_PCI_WR_RETRY_CNT 20
/* Interrupts */
@@ -296,6 +303,10 @@
#define B_BE_PCIE_EN_AUX_CLK BIT(0)
#define R_BE_PCIE_PS_CTRL 0x3008
+#define B_BE_ASPM_L11_EN BIT(19)
+#define B_BE_ASPM_L12_EN BIT(18)
+#define B_BE_PCIPM_L11_EN BIT(17)
+#define B_BE_PCIPM_L12_EN BIT(16)
#define B_BE_RSM_L0S_EN BIT(8)
#define B_BE_CMAC_EXIT_L1_EN BIT(7)
#define B_BE_DMAC0_EXIT_L1_EN BIT(6)
@@ -767,31 +778,6 @@
#define R_AX_WP_ADDR_H_SEL8_11 0x133C
#define R_AX_WP_ADDR_H_SEL12_15 0x1340
-#define R_BE_HAXI_DMA_STOP1 0xB010
-#define B_BE_STOP_WPDMA BIT(31)
-#define B_BE_STOP_CH14 BIT(14)
-#define B_BE_STOP_CH13 BIT(13)
-#define B_BE_STOP_CH12 BIT(12)
-#define B_BE_STOP_CH11 BIT(11)
-#define B_BE_STOP_CH10 BIT(10)
-#define B_BE_STOP_CH9 BIT(9)
-#define B_BE_STOP_CH8 BIT(8)
-#define B_BE_STOP_CH7 BIT(7)
-#define B_BE_STOP_CH6 BIT(6)
-#define B_BE_STOP_CH5 BIT(5)
-#define B_BE_STOP_CH4 BIT(4)
-#define B_BE_STOP_CH3 BIT(3)
-#define B_BE_STOP_CH2 BIT(2)
-#define B_BE_STOP_CH1 BIT(1)
-#define B_BE_STOP_CH0 BIT(0)
-#define B_BE_TX_STOP1_MASK (B_BE_STOP_CH0 | B_BE_STOP_CH1 | \
- B_BE_STOP_CH2 | B_BE_STOP_CH3 | \
- B_BE_STOP_CH4 | B_BE_STOP_CH5 | \
- B_BE_STOP_CH6 | B_BE_STOP_CH7 | \
- B_BE_STOP_CH8 | B_BE_STOP_CH9 | \
- B_BE_STOP_CH10 | B_BE_STOP_CH11 | \
- B_BE_STOP_CH12)
-
#define R_BE_CH0_TXBD_NUM_V1 0xB030
#define R_BE_CH1_TXBD_NUM_V1 0xB032
#define R_BE_CH2_TXBD_NUM_V1 0xB034
@@ -974,6 +960,12 @@
#define R_BE_PCIE_CRPWM 0x30C4
#define R_BE_L1_2_CTRL_HCILDO 0x3110
+#define B_BE_PM_CLKREQ_EXT_RB BIT(11)
+#define B_BE_PCIE_DIS_RTK_PRST_N_L1_2 BIT(10)
+#define B_BE_PCIE_PRST_IN_L1_2_RB BIT(9)
+#define B_BE_PCIE_PRST_SEL_RB_V1 BIT(8)
+#define B_BE_PCIE_DIS_L2_CTRL_APHY_SUSB BIT(7)
+#define B_BE_PCIE_DIS_L1_2_CTRL_APHY_SUSB BIT(6)
#define B_BE_PCIE_DIS_L1_2_CTRL_HCILDO BIT(0)
#define R_BE_PL1_DBG_INFO 0x3120
@@ -1023,9 +1015,11 @@
#define B_BE_PL1_SER_PL1_EN BIT(31)
#define B_BE_PL1_IGNORE_HOT_RST BIT(30)
#define B_BE_PL1_TIMER_UNIT_MASK GENMASK(19, 17)
+#define PCIE_SER_TIMER_UNIT 0x2
#define B_BE_PL1_TIMER_CLEAR BIT(0)
#define R_BE_REG_PL1_MASK 0x34B0
+#define B_BE_SER_LTSSM_UNSTABLE_MASK BIT(6)
#define B_BE_SER_PCLKREQ_ACK_MASK BIT(5)
#define B_BE_SER_PM_CLK_MASK BIT(4)
#define B_BE_SER_LTSSM_IMR BIT(3)
@@ -1055,6 +1049,18 @@
#define B_BE_CLR_CH2_IDX BIT(2)
#define B_BE_CLR_CH1_IDX BIT(1)
#define B_BE_CLR_CH0_IDX BIT(0)
+#define B_BE_CLR_ALL_IDX_MASK (B_BE_CLR_CH0_IDX | B_BE_CLR_CH1_IDX | \
+ B_BE_CLR_CH2_IDX | B_BE_CLR_CH3_IDX | \
+ B_BE_CLR_CH4_IDX | B_BE_CLR_CH5_IDX | \
+ B_BE_CLR_CH6_IDX | B_BE_CLR_CH7_IDX | \
+ B_BE_CLR_CH8_IDX | B_BE_CLR_CH9_IDX | \
+ B_BE_CLR_CH10_IDX | B_BE_CLR_CH11_IDX | \
+ B_BE_CLR_CH12_IDX | B_BE_CLR_CH13_IDX | \
+ B_BE_CLR_CH14_IDX)
+#define B_BE_CLR_ALL_IDX_MASK_V1 (B_BE_CLR_CH0_IDX | B_BE_CLR_CH2_IDX | \
+ B_BE_CLR_CH4_IDX | B_BE_CLR_CH6_IDX | \
+ B_BE_CLR_CH8_IDX | B_BE_CLR_CH10_IDX | \
+ B_BE_CLR_CH12_IDX)
#define R_BE_RXBD_RWPTR_CLR1_V1 0xB018
#define B_BE_CLR_ROQ1_IDX_V1 BIT(5)
@@ -1325,6 +1331,7 @@ struct rtw89_pci_isr_def {
u32 isr_rdu;
u32 isr_halt_c2h;
u32 isr_wdt_timeout;
+ u32 isr_sps_ocp;
struct rtw89_reg2_def isr_clear_rpq;
struct rtw89_reg2_def isr_clear_rxq;
};
diff --git a/drivers/net/wireless/realtek/rtw89/pci_be.c b/drivers/net/wireless/realtek/rtw89/pci_be.c
index e4590879b800..114f40c6c31b 100644
--- a/drivers/net/wireless/realtek/rtw89/pci_be.c
+++ b/drivers/net/wireless/realtek/rtw89/pci_be.c
@@ -46,6 +46,14 @@ static void rtw89_pci_aspm_set_be(struct rtw89_dev *rtwdev, bool enable)
static void rtw89_pci_l1ss_set_be(struct rtw89_dev *rtwdev, bool enable)
{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ if (enable && chip_id == RTL8922D && hal->cid == RTL8922D_CID7090)
+ rtw89_write32_set(rtwdev, R_BE_PCIE_PS_CTRL,
+ B_BE_ASPM_L11_EN | B_BE_ASPM_L12_EN |
+ B_BE_PCIPM_L11_EN | B_BE_PCIPM_L12_EN);
+
if (enable)
rtw89_write32_set(rtwdev, R_BE_PCIE_MIX_CFG,
B_BE_L1SUB_ENABLE);
@@ -154,7 +162,7 @@ static void rtw89_pci_ctrl_trxdma_pcie_be(struct rtw89_dev *rtwdev,
rtw89_write32(rtwdev, R_BE_HAXI_INIT_CFG1, val);
- if (io_en == MAC_AX_PCIE_ENABLE)
+ if (io_en == MAC_AX_PCIE_ENABLE && rtwdev->chip->chip_id == RTL8922A)
rtw89_write32_mask(rtwdev, R_BE_HAXI_MST_WDT_TIMEOUT_SEL_V1,
B_BE_HAXI_MST_WDT_TIMEOUT_SEL_MASK, 4);
}
@@ -162,14 +170,15 @@ static void rtw89_pci_ctrl_trxdma_pcie_be(struct rtw89_dev *rtwdev,
static void rtw89_pci_clr_idx_all_be(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_pci_rx_ring *rx_ring;
u32 val;
- val = B_BE_CLR_CH0_IDX | B_BE_CLR_CH1_IDX | B_BE_CLR_CH2_IDX |
- B_BE_CLR_CH3_IDX | B_BE_CLR_CH4_IDX | B_BE_CLR_CH5_IDX |
- B_BE_CLR_CH6_IDX | B_BE_CLR_CH7_IDX | B_BE_CLR_CH8_IDX |
- B_BE_CLR_CH9_IDX | B_BE_CLR_CH10_IDX | B_BE_CLR_CH11_IDX |
- B_BE_CLR_CH12_IDX | B_BE_CLR_CH13_IDX | B_BE_CLR_CH14_IDX;
+ if (chip->chip_id == RTL8922A)
+ val = B_BE_CLR_ALL_IDX_MASK;
+ else
+ val = B_BE_CLR_ALL_IDX_MASK_V1;
+
rtw89_write32(rtwdev, R_BE_TXBD_RWPTR_CLR1, val);
rtw89_write32(rtwdev, R_BE_RXBD_RWPTR_CLR1_V1,
@@ -184,10 +193,13 @@ static void rtw89_pci_clr_idx_all_be(struct rtw89_dev *rtwdev)
static int rtw89_pci_poll_txdma_ch_idle_be(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ u32 dma_busy1 = info->dma_busy1.addr;
+ u32 check = info->dma_busy1.mask;
u32 val;
- return read_poll_timeout(rtw89_read32, val, (val & DMA_BUSY1_CHECK_BE) == 0,
- 10, 1000, false, rtwdev, R_BE_HAXI_DMA_BUSY1);
+ return read_poll_timeout(rtw89_read32, val, (val & check) == 0,
+ 10, 1000, false, rtwdev, dma_busy1);
}
static int rtw89_pci_poll_rxdma_ch_idle_be(struct rtw89_dev *rtwdev)
@@ -223,20 +235,24 @@ static int rtw89_pci_poll_dma_all_idle_be(struct rtw89_dev *rtwdev)
static void rtw89_pci_mode_op_be(struct rtw89_dev *rtwdev)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
u32 val32_init1, val32_rxapp, val32_exp;
val32_init1 = rtw89_read32(rtwdev, R_BE_HAXI_INIT_CFG1);
- val32_rxapp = rtw89_read32(rtwdev, R_BE_RX_APPEND_MODE);
+ if (chip->chip_id == RTL8922A)
+ val32_rxapp = rtw89_read32(rtwdev, R_BE_RX_APPEND_MODE);
val32_exp = rtw89_read32(rtwdev, R_BE_HAXI_EXP_CTRL_V1);
- if (info->rxbd_mode == MAC_AX_RXBD_PKT) {
- val32_init1 = u32_replace_bits(val32_init1, PCIE_RXBD_NORM,
- B_BE_RXQ_RXBD_MODE_MASK);
- } else if (info->rxbd_mode == MAC_AX_RXBD_SEP) {
- val32_init1 = u32_replace_bits(val32_init1, PCIE_RXBD_SEP,
- B_BE_RXQ_RXBD_MODE_MASK);
- val32_rxapp = u32_replace_bits(val32_rxapp, 0,
- B_BE_APPEND_LEN_MASK);
+ if (chip->chip_id == RTL8922A) {
+ if (info->rxbd_mode == MAC_AX_RXBD_PKT) {
+ val32_init1 = u32_replace_bits(val32_init1, PCIE_RXBD_NORM,
+ B_BE_RXQ_RXBD_MODE_MASK);
+ } else if (info->rxbd_mode == MAC_AX_RXBD_SEP) {
+ val32_init1 = u32_replace_bits(val32_init1, PCIE_RXBD_SEP,
+ B_BE_RXQ_RXBD_MODE_MASK);
+ val32_rxapp = u32_replace_bits(val32_rxapp, 0,
+ B_BE_APPEND_LEN_MASK);
+ }
}
val32_init1 = u32_replace_bits(val32_init1, info->tx_burst,
@@ -251,7 +267,8 @@ static void rtw89_pci_mode_op_be(struct rtw89_dev *rtwdev)
B_BE_CFG_WD_PERIOD_ACTIVE_MASK);
rtw89_write32(rtwdev, R_BE_HAXI_INIT_CFG1, val32_init1);
- rtw89_write32(rtwdev, R_BE_RX_APPEND_MODE, val32_rxapp);
+ if (chip->chip_id == RTL8922A)
+ rtw89_write32(rtwdev, R_BE_RX_APPEND_MODE, val32_rxapp);
rtw89_write32(rtwdev, R_BE_HAXI_EXP_CTRL_V1, val32_exp);
}
@@ -277,6 +294,10 @@ static void rtw89_pci_debounce_be(struct rtw89_dev *rtwdev)
static void rtw89_pci_ldo_low_pwr_be(struct rtw89_dev *rtwdev)
{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u32 clr;
+
rtw89_write32_set(rtwdev, R_BE_SYS_PW_CTRL, B_BE_PSUS_OFF_CAPC_EN);
rtw89_write32_set(rtwdev, R_BE_SYS_PAGE_CLK_GATED,
B_BE_SOP_OFFPOOBS_PC | B_BE_CPHY_AUXCLK_OP |
@@ -284,7 +305,16 @@ static void rtw89_pci_ldo_low_pwr_be(struct rtw89_dev *rtwdev)
rtw89_write32_clr(rtwdev, R_BE_SYS_SDIO_CTRL, B_BE_PCIE_FORCE_IBX_EN |
B_BE_PCIE_DIS_L2_RTK_PERST |
B_BE_PCIE_DIS_L2__CTRL_LDO_HCI);
- rtw89_write32_clr(rtwdev, R_BE_L1_2_CTRL_HCILDO, B_BE_PCIE_DIS_L1_2_CTRL_HCILDO);
+
+ if (chip_id == RTL8922D && hal->cid == RTL8922D_CID7090)
+ clr = B_BE_PCIE_DIS_L1_2_CTRL_HCILDO |
+ B_BE_PCIE_DIS_L1_2_CTRL_APHY_SUSB |
+ B_BE_PCIE_DIS_RTK_PRST_N_L1_2 |
+ B_BE_PCIE_DIS_L2_CTRL_APHY_SUSB;
+ else
+ clr = B_BE_PCIE_DIS_L1_2_CTRL_HCILDO;
+
+ rtw89_write32_clr(rtwdev, R_BE_L1_2_CTRL_HCILDO, clr);
}
static void rtw89_pci_pcie_setting_be(struct rtw89_dev *rtwdev)
@@ -300,11 +330,25 @@ static void rtw89_pci_pcie_setting_be(struct rtw89_dev *rtwdev)
rtw89_write32_set(rtwdev, R_BE_EFUSE_CTRL_2_V1, B_BE_R_SYM_AUTOLOAD_WITH_PMC_SEL);
rtw89_write32_set(rtwdev, R_BE_PCIE_LAT_CTRL, B_BE_SYM_AUX_CLK_SEL);
+
+ if (chip->chip_id != RTL8922D)
+ return;
+
+ rtw89_write32_set(rtwdev, R_BE_RSV_CTRL, B_BE_R_SYM_PRST_CPHY_RST);
+ rtw89_write32_set(rtwdev, R_BE_SYS_PW_CTRL, B_BE_USUS_OFFCAPC_EN);
}
static void rtw89_pci_ser_setting_be(struct rtw89_dev *rtwdev)
{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ struct rtw89_hal *hal = &rtwdev->hal;
u32 val32;
+ int ret;
+
+ if (chip_id == RTL8922D)
+ goto be2_chips;
+ else if (chip_id != RTL8922A)
+ return;
rtw89_write32(rtwdev, R_BE_PL1_DBG_INFO, 0x0);
rtw89_write32_set(rtwdev, R_BE_FWS1IMR, B_BE_PCIE_SER_TIMEOUT_INDIC_EN);
@@ -315,10 +359,48 @@ static void rtw89_pci_ser_setting_be(struct rtw89_dev *rtwdev)
val32 |= B_BE_SER_PMU_IMR | B_BE_SER_L1SUB_IMR | B_BE_SER_PM_MASTER_IMR |
B_BE_SER_LTSSM_IMR | B_BE_SER_PM_CLK_MASK | B_BE_SER_PCLKREQ_ACK_MASK;
rtw89_write32(rtwdev, R_BE_REG_PL1_MASK, val32);
+
+ return;
+
+be2_chips:
+ rtw89_write32_clr(rtwdev, R_BE_PCIE_SER_DBG, B_BE_PCIE_SER_FLUSH_RSTB);
+ rtw89_write32_set(rtwdev, R_BE_PCIE_SER_DBG, B_BE_PCIE_SER_FLUSH_RSTB);
+
+ rtw89_write16_clr(rtwdev, RAC_DIRECT_OFFESET_L0_G1 +
+ RAC_ANA41 * RAC_MULT, PHY_ERR_FLAG_EN);
+ rtw89_write16_clr(rtwdev, RAC_DIRECT_OFFESET_L0_G2 +
+ RAC_ANA41 * RAC_MULT, PHY_ERR_FLAG_EN);
+ rtw89_write16_set(rtwdev, RAC_DIRECT_OFFESET_L0_G1 +
+ RAC_ANA41 * RAC_MULT, PHY_ERR_FLAG_EN);
+ rtw89_write16_set(rtwdev, RAC_DIRECT_OFFESET_L0_G2 +
+ RAC_ANA41 * RAC_MULT, PHY_ERR_FLAG_EN);
+
+ val32 = rtw89_read32(rtwdev, R_BE_SER_PL1_CTRL);
+ val32 &= ~B_BE_PL1_SER_PL1_EN;
+ rtw89_write32(rtwdev, R_BE_SER_PL1_CTRL, val32);
+
+ ret = read_poll_timeout_atomic(rtw89_read32, val32, !val32,
+ 1, 1000, false, rtwdev, R_BE_REG_PL1_ISR);
+ if (ret)
+ rtw89_warn(rtwdev, "[ERR] PCIE SER clear poll fail\n");
+
+ val32 = rtw89_read32(rtwdev, R_BE_REG_PL1_MASK);
+ val32 |= B_BE_SER_PMU_IMR | B_BE_SER_L1SUB_IMR | B_BE_SER_PM_MASTER_IMR |
+ B_BE_SER_LTSSM_IMR | B_BE_SER_PM_CLK_MASK | B_BE_SER_PCLKREQ_ACK_MASK |
+ B_BE_SER_LTSSM_UNSTABLE_MASK;
+ rtw89_write32(rtwdev, R_BE_REG_PL1_MASK, val32);
+
+ rtw89_write32_mask(rtwdev, R_BE_SER_PL1_CTRL, B_BE_PL1_TIMER_UNIT_MASK,
+ PCIE_SER_TIMER_UNIT);
+ rtw89_write32_set(rtwdev, R_BE_SER_PL1_CTRL, B_BE_PL1_SER_PL1_EN);
+
+ if (hal->cid == RTL8922D_CID7090)
+ rtw89_write32_set(rtwdev, R_BE_SYS_SDIO_CTRL, B_BE_SER_DETECT_EN);
}
static void rtw89_pci_ctrl_txdma_ch_be(struct rtw89_dev *rtwdev, bool enable)
{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
u32 mask_all;
u32 val;
@@ -327,6 +409,9 @@ static void rtw89_pci_ctrl_txdma_ch_be(struct rtw89_dev *rtwdev, bool enable)
B_BE_STOP_CH6 | B_BE_STOP_CH7 | B_BE_STOP_CH8 |
B_BE_STOP_CH9 | B_BE_STOP_CH10 | B_BE_STOP_CH11;
+ /* mask out unsupported channels for certains chips */
+ mask_all &= info->dma_stop1.mask;
+
val = rtw89_read32(rtwdev, R_BE_HAXI_DMA_STOP1);
val |= B_BE_STOP_CH13 | B_BE_STOP_CH14;
@@ -409,6 +494,7 @@ static int rtw89_pci_ops_mac_pre_deinit_be(struct rtw89_dev *rtwdev)
int rtw89_pci_ltr_set_v2(struct rtw89_dev *rtwdev, bool en)
{
u32 ctrl0, cfg0, cfg1, dec_ctrl, idle_ltcy, act_ltcy, dis_ltcy;
+ u32 ltr_idle_lat_ctrl, ltr_act_lat_ctrl;
ctrl0 = rtw89_read32(rtwdev, R_BE_LTR_CTRL_0);
if (rtw89_pci_ltr_is_err_reg_val(ctrl0))
@@ -451,8 +537,16 @@ int rtw89_pci_ltr_set_v2(struct rtw89_dev *rtwdev, bool en)
cfg0 = u32_replace_bits(cfg0, 3, B_BE_LTR_IDX_IDLE_MASK);
dec_ctrl = u32_replace_bits(dec_ctrl, 0, B_BE_LTR_IDX_DISABLE_V1_MASK);
- rtw89_write32(rtwdev, R_BE_LTR_LATENCY_IDX3_V1, 0x90039003);
- rtw89_write32(rtwdev, R_BE_LTR_LATENCY_IDX1_V1, 0x880b880b);
+ if (rtwdev->chip->chip_id == RTL8922A) {
+ ltr_idle_lat_ctrl = 0x90039003;
+ ltr_act_lat_ctrl = 0x880b880b;
+ } else {
+ ltr_idle_lat_ctrl = 0x90019001;
+ ltr_act_lat_ctrl = 0x88018801;
+ }
+
+ rtw89_write32(rtwdev, R_BE_LTR_LATENCY_IDX3_V1, ltr_idle_lat_ctrl);
+ rtw89_write32(rtwdev, R_BE_LTR_LATENCY_IDX1_V1, ltr_act_lat_ctrl);
rtw89_write32(rtwdev, R_BE_LTR_LATENCY_IDX0_V1, 0);
rtw89_write32(rtwdev, R_BE_LTR_DECISION_CTRL_V1, dec_ctrl);
rtw89_write32(rtwdev, R_BE_LTR_CFG_0, cfg0);
@@ -669,6 +763,7 @@ const struct rtw89_pci_isr_def rtw89_pci_isr_be = {
.isr_rdu = B_BE_RDU_CH1_INT_V1 | B_BE_RDU_CH0_INT_V1,
.isr_halt_c2h = B_BE_HALT_C2H_INT,
.isr_wdt_timeout = B_BE_WDT_TIMEOUT_INT,
+ .isr_sps_ocp = 0,
.isr_clear_rpq = {R_BE_PCIE_DMA_ISR, B_BE_PCIE_RX_RPQ0_ISR_V1},
.isr_clear_rxq = {R_BE_PCIE_DMA_ISR, B_BE_PCIE_RX_RX0P2_ISR_V1},
};
@@ -678,6 +773,7 @@ const struct rtw89_pci_isr_def rtw89_pci_isr_be_v1 = {
.isr_rdu = B_BE_PCIE_RDU_CH1_INT | B_BE_PCIE_RDU_CH0_INT,
.isr_halt_c2h = B_BE_HALT_C2H_INT,
.isr_wdt_timeout = B_BE_WDT_TIMEOUT_INT,
+ .isr_sps_ocp = B_BE_SPS_OCP_INT | B_BE_SPSANA_OCP_INT,
.isr_clear_rpq = {R_BE_PCIE_DMA_ISR, B_BE_PCIE_RX_RPQ0_ISR_V1},
.isr_clear_rxq = {R_BE_PCIE_DMA_ISR, B_BE_PCIE_RX_RX0P2_ISR_V1},
};
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
index 9f418b1fb7ed..6c6d5f1da867 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.c
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -281,8 +281,7 @@ static void rtw89_phy_ra_gi_ltf(struct rtw89_dev *rtwdev,
struct cfg80211_bitrate_mask *mask = &rtwsta_link->mask;
u8 band = chan->band_type;
enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
- u8 he_ltf = mask->control[nl_band].he_ltf;
- u8 he_gi = mask->control[nl_band].he_gi;
+ u8 ltf, gi;
*fix_giltf_en = true;
@@ -293,22 +292,31 @@ static void rtw89_phy_ra_gi_ltf(struct rtw89_dev *rtwdev,
else
*fix_giltf = RTW89_GILTF_2XHE08;
- if (!(rtwsta_link->use_cfg_mask && link_sta->he_cap.has_he))
+ if (!rtwsta_link->use_cfg_mask)
return;
- if (he_ltf == 2 && he_gi == 2) {
+ if (link_sta->eht_cap.has_eht) {
+ ltf = mask->control[nl_band].eht_ltf;
+ gi = mask->control[nl_band].eht_gi;
+ } else if (link_sta->he_cap.has_he) {
+ ltf = mask->control[nl_band].he_ltf;
+ gi = mask->control[nl_band].he_gi;
+ } else {
+ return;
+ }
+
+ if (ltf == 2 && gi == 2)
*fix_giltf = RTW89_GILTF_LGI_4XHE32;
- } else if (he_ltf == 2 && he_gi == 0) {
+ else if (ltf == 2 && gi == 0)
*fix_giltf = RTW89_GILTF_SGI_4XHE08;
- } else if (he_ltf == 1 && he_gi == 1) {
+ else if (ltf == 1 && gi == 1)
*fix_giltf = RTW89_GILTF_2XHE16;
- } else if (he_ltf == 1 && he_gi == 0) {
+ else if (ltf == 1 && gi == 0)
*fix_giltf = RTW89_GILTF_2XHE08;
- } else if (he_ltf == 0 && he_gi == 1) {
+ else if (ltf == 0 && gi == 1)
*fix_giltf = RTW89_GILTF_1XHE16;
- } else if (he_ltf == 0 && he_gi == 0) {
+ else if (ltf == 0 && gi == 0)
*fix_giltf = RTW89_GILTF_1XHE08;
- }
}
static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
@@ -1028,6 +1036,68 @@ u32 rtw89_phy_read_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
}
EXPORT_SYMBOL(rtw89_phy_read_rf_v2);
+static u32 rtw89_phy_read_full_rf_v3_a(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path rf_path, u32 addr)
+{
+ bool done;
+ u32 busy;
+ int ret;
+ u32 val;
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, busy, !busy,
+ 1, 30, false,
+ rtwdev, R_SW_SI_DATA_BE4,
+ B_SW_SI_W_BUSY_BE4 | B_SW_SI_R_BUSY_BE4);
+ if (ret) {
+ rtw89_warn(rtwdev, "poll HWSI is busy\n");
+ return INV_RF_DATA;
+ }
+
+ val = u32_encode_bits(rf_path, GENMASK(10, 8)) |
+ u32_encode_bits(addr, GENMASK(7, 0));
+
+ rtw89_phy_write32_mask(rtwdev, R_SW_SI_READ_ADDR_BE4, B_SW_SI_READ_ADDR_BE4, val);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, done, done,
+ 1, 30, false,
+ rtwdev, R_SW_SI_DATA_BE4, B_SW_SI_READ_DATA_DONE_BE4);
+ if (ret) {
+ rtw89_warn(rtwdev, "read HWSI is busy\n");
+ return INV_RF_DATA;
+ }
+
+ val = rtw89_phy_read32_mask(rtwdev, R_SW_SI_DATA_BE4, B_SW_SI_READ_DATA_BE4);
+
+ return val;
+}
+
+static u32 rtw89_phy_read_rf_v3_a(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path rf_path, u32 addr, u32 mask)
+{
+ u32 val;
+
+ val = rtw89_phy_read_full_rf_v3_a(rtwdev, rf_path, addr);
+
+ return (val & mask) >> __ffs(mask);
+}
+
+u32 rtw89_phy_read_rf_v3(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask)
+{
+ bool ad_sel = u32_get_bits(addr, RTW89_RF_ADDR_ADSEL_MASK);
+
+ if (rf_path >= rtwdev->chip->rf_path_num) {
+ rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
+ return INV_RF_DATA;
+ }
+
+ if (ad_sel)
+ return rtw89_phy_read_rf(rtwdev, rf_path, addr, mask);
+ else
+ return rtw89_phy_read_rf_v3_a(rtwdev, rf_path, addr, mask);
+}
+EXPORT_SYMBOL(rtw89_phy_read_rf_v3);
+
bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask, u32 data)
{
@@ -1167,6 +1237,66 @@ bool rtw89_phy_write_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
}
EXPORT_SYMBOL(rtw89_phy_write_rf_v2);
+static
+bool rtw89_phy_write_full_rf_v3_a(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 data)
+{
+ u32 busy;
+ u32 val;
+ int ret;
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, busy, !busy,
+ 1, 30, false,
+ rtwdev, R_SW_SI_DATA_BE4,
+ B_SW_SI_W_BUSY_BE4 | B_SW_SI_R_BUSY_BE4);
+ if (ret) {
+ rtw89_warn(rtwdev, "[%s] HWSI is busy\n", __func__);
+ return false;
+ }
+
+ val = u32_encode_bits(rf_path, B_SW_SI_DATA_PATH_BE4) |
+ u32_encode_bits(addr, B_SW_SI_DATA_ADR_BE4) |
+ u32_encode_bits(data, B_SW_SI_DATA_DAT_BE4);
+
+ rtw89_phy_write32(rtwdev, R_SW_SI_WDATA_BE4, val);
+
+ return true;
+}
+
+static
+bool rtw89_phy_write_rf_a_v3(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask, u32 data)
+{
+ u32 val;
+
+ if (mask == RFREG_MASK) {
+ val = data;
+ } else {
+ val = rtw89_phy_read_full_rf_v3_a(rtwdev, rf_path, addr);
+ val &= ~mask;
+ val |= (data << __ffs(mask)) & mask;
+ }
+
+ return rtw89_phy_write_full_rf_v3_a(rtwdev, rf_path, addr, val);
+}
+
+bool rtw89_phy_write_rf_v3(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask, u32 data)
+{
+ bool ad_sel = u32_get_bits(addr, RTW89_RF_ADDR_ADSEL_MASK);
+
+ if (rf_path >= rtwdev->chip->rf_path_num) {
+ rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
+ return INV_RF_DATA;
+ }
+
+ if (ad_sel)
+ return rtw89_phy_write_rf(rtwdev, rf_path, addr, mask, data);
+ else
+ return rtw89_phy_write_rf_a_v3(rtwdev, rf_path, addr, mask, data);
+}
+EXPORT_SYMBOL(rtw89_phy_write_rf_v3);
+
static bool rtw89_chip_rf_v1(struct rtw89_dev *rtwdev)
{
return rtwdev->chip->ops->write_rf == rtw89_phy_write_rf_v1;
@@ -3202,6 +3332,7 @@ void (* const rtw89_phy_c2h_ra_handler[])(struct rtw89_dev *rtwdev,
[RTW89_PHY_C2H_FUNC_STS_RPT] = rtw89_phy_c2h_ra_rpt,
[RTW89_PHY_C2H_FUNC_MU_GPTBL_RPT] = NULL,
[RTW89_PHY_C2H_FUNC_TXSTS] = NULL,
+ [RTW89_PHY_C2H_FUNC_ACCELERATE_EN] = rtw89_fw_c2h_dummy_handler,
};
static void
@@ -3210,6 +3341,64 @@ rtw89_phy_c2h_lowrt_rty(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
}
static void
+rtw89_phy_c2h_lps_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+ const struct rtw89_c2h_lps_rpt *c2h_rpt = (const void *)c2h->data;
+ const __le32 *data_a, *data_b;
+ u16 len_info, cr_len, idx;
+ const __le16 *addr;
+ const u8 *info;
+
+ /* elements size of BBCR/BBMCUCR/RFCR are 6/6/10 bytes respectively */
+ cr_len = c2h_rpt->cnt_bbcr * 6 +
+ c2h_rpt->cnt_bbmcucr * 6 +
+ c2h_rpt->cnt_rfcr * 10;
+ len_info = len - (sizeof(*c2h_rpt) + cr_len);
+
+ if (len < sizeof(*c2h_rpt) + cr_len || len_info % 4 != 0) {
+ rtw89_debug(rtwdev, RTW89_DBG_PS,
+ "Invalid LPS RPT len(%d) TYPE(%d) CRCNT: BB(%d) MCU(%d) RF(%d)\n",
+ len, c2h_rpt->type, c2h_rpt->cnt_bbcr,
+ c2h_rpt->cnt_bbmcucr, c2h_rpt->cnt_rfcr);
+ return;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_PS,
+ "LPS RPT TYPE(%d), CRCNT: BB(%d) MCU(%d) RF(%d)\n",
+ c2h_rpt->type, c2h_rpt->cnt_bbcr,
+ c2h_rpt->cnt_bbmcucr, c2h_rpt->cnt_rfcr);
+
+ info = &c2h_rpt->data[0];
+ for (idx = 0; idx < len_info; idx += 4, info += 4)
+ rtw89_debug(rtwdev, RTW89_DBG_PS,
+ "BB LPS INFO (%02d) - 0x%02x,0x%02x,0x%02x,0x%02x\n",
+ idx, info[3], info[2], info[1], info[0]);
+
+ addr = (const void *)(info);
+ data_a = (const void *)(addr + c2h_rpt->cnt_bbcr);
+ for (idx = 0; idx < c2h_rpt->cnt_bbcr; idx++, addr++, data_a++)
+ rtw89_debug(rtwdev, RTW89_DBG_PS,
+ "LPS BB CR - 0x%04x=0x%08x\n",
+ le16_to_cpu(*addr), le32_to_cpu(*data_a));
+
+ addr = (const void *)data_a;
+ data_a = (const void *)(addr + c2h_rpt->cnt_bbmcucr);
+ for (idx = 0; idx < c2h_rpt->cnt_bbmcucr; idx++, addr++, data_a++)
+ rtw89_debug(rtwdev, RTW89_DBG_PS,
+ "LPS BBMCU - 0x%04x=0x%08x\n",
+ le16_to_cpu(*addr), le32_to_cpu(*data_a));
+
+ addr = (const void *)data_a;
+ data_a = (const void *)(addr + c2h_rpt->cnt_rfcr);
+ data_b = (const void *)(data_a + c2h_rpt->cnt_rfcr);
+ for (idx = 0; idx < c2h_rpt->cnt_rfcr; idx++, addr++, data_a++, data_b++)
+ rtw89_debug(rtwdev, RTW89_DBG_PS,
+ "LPS RFCR - 0x%04x=0x%05x,0x%05x\n",
+ le16_to_cpu(*addr), le32_to_cpu(*data_a),
+ le32_to_cpu(*data_b));
+}
+
+static void
rtw89_phy_c2h_fw_scan_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
{
const struct rtw89_c2h_fw_scan_rpt *c2h_rpt =
@@ -3230,6 +3419,8 @@ void (* const rtw89_phy_c2h_dm_handler[])(struct rtw89_dev *rtwdev,
[RTW89_PHY_C2H_DM_FUNC_SIGB] = NULL,
[RTW89_PHY_C2H_DM_FUNC_LOWRT_RTY] = rtw89_phy_c2h_lowrt_rty,
[RTW89_PHY_C2H_DM_FUNC_MCC_DIG] = NULL,
+ [RTW89_PHY_C2H_DM_FUNC_LPS] = rtw89_phy_c2h_lps_rpt,
+ [RTW89_PHY_C2H_DM_FUNC_ENV_MNTR] = rtw89_fw_c2h_dummy_handler,
[RTW89_PHY_C2H_DM_FUNC_FW_SCAN] = rtw89_phy_c2h_fw_scan_rpt,
};
@@ -3267,6 +3458,8 @@ static void rtw89_phy_c2h_rfk_rpt_log(struct rtw89_dev *rtwdev,
{
struct rtw89_c2h_rf_txgapk_rpt_log *txgapk;
struct rtw89_c2h_rf_rxdck_rpt_log *rxdck;
+ struct rtw89_c2h_rf_txiqk_rpt_log *txiqk;
+ struct rtw89_c2h_rf_cim3k_rpt_log *cim3k;
struct rtw89_c2h_rf_dack_rpt_log *dack;
struct rtw89_c2h_rf_tssi_rpt_log *tssi;
struct rtw89_c2h_rf_dpk_rpt_log *dpk;
@@ -3321,6 +3514,8 @@ static void rtw89_phy_c2h_rfk_rpt_log(struct rtw89_dev *rtwdev,
i, iqk->iqk_ch[i]);
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->iqk_bw[%d] = %x\n",
i, iqk->iqk_bw[i]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->rf_0x18[%d] = %x\n",
+ i, le32_to_cpu(iqk->rf_0x18[i]));
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->lok_idac[%d] = %x\n",
i, le32_to_cpu(iqk->lok_idac[i]));
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->lok_vbuf[%d] = %x\n",
@@ -3329,22 +3524,30 @@ static void rtw89_phy_c2h_rfk_rpt_log(struct rtw89_dev *rtwdev,
i, iqk->iqk_tx_fail[i]);
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->iqk_rx_fail[%d] = %x\n",
i, iqk->iqk_rx_fail[i]);
- for (j = 0; j < 4; j++)
+ for (j = 0; j < 6; j++)
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[IQK] iqk->rftxgain[%d][%d] = %x\n",
i, j, le32_to_cpu(iqk->rftxgain[i][j]));
- for (j = 0; j < 4; j++)
+ for (j = 0; j < 6; j++)
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[IQK] iqk->tx_xym[%d][%d] = %x\n",
i, j, le32_to_cpu(iqk->tx_xym[i][j]));
- for (j = 0; j < 4; j++)
+ for (j = 0; j < 6; j++)
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[IQK] iqk->rfrxgain[%d][%d] = %x\n",
i, j, le32_to_cpu(iqk->rfrxgain[i][j]));
- for (j = 0; j < 4; j++)
+ for (j = 0; j < 6; j++)
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[IQK] iqk->rx_xym[%d][%d] = %x\n",
i, j, le32_to_cpu(iqk->rx_xym[i][j]));
+
+ if (!iqk->iqk_xym_en)
+ continue;
+
+ for (j = 0; j < 32; j++)
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK] iqk->rx_wb_xym[%d][%d] = %x\n",
+ i, j, iqk->rx_wb_xym[i][j]);
}
return;
case RTW89_PHY_C2H_RFK_LOG_FUNC_DPK:
@@ -3500,8 +3703,16 @@ static void rtw89_phy_c2h_rfk_rpt_log(struct rtw89_dev *rtwdev,
le32_to_cpu(txgapk->chk_cnt));
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt ver = 0x%x\n",
txgapk->ver);
- rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt rsv1 = %d\n",
- txgapk->rsv1);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt d_bnd_ok = %d\n",
+ txgapk->d_bnd_ok);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt stage[0] = 0x%x\n",
+ le32_to_cpu(txgapk->stage[0]));
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt stage[1] = 0x%x\n",
+ le32_to_cpu(txgapk->stage[1]));
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]failcode[0] = 0x%x\n",
+ le16_to_cpu(txgapk->failcode[0]));
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]failcode[1] = 0x%x\n",
+ le16_to_cpu(txgapk->failcode[1]));
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt track_d[0] = %*ph\n",
(int)sizeof(txgapk->track_d[0]), txgapk->track_d[0]);
@@ -3517,7 +3728,14 @@ static void rtw89_phy_c2h_rfk_rpt_log(struct rtw89_dev *rtwdev,
goto out;
rtw89_phy_c2h_rfk_tas_pwr(rtwdev, content);
-
+ return;
+ case RTW89_PHY_C2H_RFK_LOG_FUNC_TXIQK:
+ if (len != sizeof(*txiqk))
+ goto out;
+ return;
+ case RTW89_PHY_C2H_RFK_LOG_FUNC_CIM3K:
+ if (len != sizeof(*cim3k))
+ goto out;
return;
default:
break;
@@ -3654,6 +3872,20 @@ rtw89_phy_c2h_rfk_log_tas_pwr(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32
RTW89_PHY_C2H_RFK_LOG_FUNC_TAS_PWR, "TAS");
}
+static void
+rtw89_phy_c2h_rfk_log_txiqk(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+ rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
+ RTW89_PHY_C2H_RFK_LOG_FUNC_TXIQK, "TXIQK");
+}
+
+static void
+rtw89_phy_c2h_rfk_log_cim3k(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+ rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
+ RTW89_PHY_C2H_RFK_LOG_FUNC_CIM3K, "CIM3K");
+}
+
static
void (* const rtw89_phy_c2h_rfk_log_handler[])(struct rtw89_dev *rtwdev,
struct sk_buff *c2h, u32 len) = {
@@ -3664,6 +3896,8 @@ void (* const rtw89_phy_c2h_rfk_log_handler[])(struct rtw89_dev *rtwdev,
[RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI] = rtw89_phy_c2h_rfk_log_tssi,
[RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK] = rtw89_phy_c2h_rfk_log_txgapk,
[RTW89_PHY_C2H_RFK_LOG_FUNC_TAS_PWR] = rtw89_phy_c2h_rfk_log_tas_pwr,
+ [RTW89_PHY_C2H_RFK_LOG_FUNC_TXIQK] = rtw89_phy_c2h_rfk_log_txiqk,
+ [RTW89_PHY_C2H_RFK_LOG_FUNC_CIM3K] = rtw89_phy_c2h_rfk_log_cim3k,
};
static
@@ -3752,6 +3986,7 @@ bool rtw89_phy_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func)
case RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK:
case RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI:
case RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK:
+ case RTW89_PHY_C2H_RFK_LOG_FUNC_TXIQK:
return true;
default:
return false;
@@ -3776,7 +4011,7 @@ void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
switch (class) {
case RTW89_PHY_C2H_CLASS_RA:
- if (func < RTW89_PHY_C2H_FUNC_RA_MAX)
+ if (func < ARRAY_SIZE(rtw89_phy_c2h_ra_handler))
handler = rtw89_phy_c2h_ra_handler[func];
break;
case RTW89_PHY_C2H_RFK_LOG:
@@ -3808,13 +4043,22 @@ int rtw89_phy_rfk_pre_ntfy_and_wait(struct rtw89_dev *rtwdev,
{
int ret;
- rtw89_phy_rfk_report_prep(rtwdev);
+ if (RTW89_CHK_FW_FEATURE_GROUP(WITH_RFK_PRE_NOTIFY, &rtwdev->fw)) {
+ rtw89_phy_rfk_report_prep(rtwdev);
+ rtw89_fw_h2c_rf_pre_ntfy(rtwdev, phy_idx);
+ ret = rtw89_phy_rfk_report_wait(rtwdev, "PRE_NTFY", ms);
+ if (ret)
+ return ret;
+ }
- ret = rtw89_fw_h2c_rf_pre_ntfy(rtwdev, phy_idx);
- if (ret)
- return ret;
+ if (RTW89_CHK_FW_FEATURE_GROUP(WITH_RFK_PRE_NOTIFY_MCC, &rtwdev->fw)) {
+ ret = rtw89_fw_h2c_rf_pre_ntfy_mcc(rtwdev, phy_idx);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
- return rtw89_phy_rfk_report_wait(rtwdev, "PRE_NTFY", ms);
}
EXPORT_SYMBOL(rtw89_phy_rfk_pre_ntfy_and_wait);
@@ -3921,6 +4165,40 @@ int rtw89_phy_rfk_rxdck_and_wait(struct rtw89_dev *rtwdev,
}
EXPORT_SYMBOL(rtw89_phy_rfk_rxdck_and_wait);
+int rtw89_phy_rfk_txiqk_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
+ unsigned int ms)
+{
+ int ret;
+
+ rtw89_phy_rfk_report_prep(rtwdev);
+
+ ret = rtw89_fw_h2c_rf_txiqk(rtwdev, phy_idx, chan);
+ if (ret)
+ return ret;
+
+ return rtw89_phy_rfk_report_wait(rtwdev, "TX_IQK", ms);
+}
+EXPORT_SYMBOL(rtw89_phy_rfk_txiqk_and_wait);
+
+int rtw89_phy_rfk_cim3k_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
+ unsigned int ms)
+{
+ int ret;
+
+ rtw89_phy_rfk_report_prep(rtwdev);
+
+ ret = rtw89_fw_h2c_rf_cim3k(rtwdev, phy_idx, chan);
+ if (ret)
+ return ret;
+
+ return rtw89_phy_rfk_report_wait(rtwdev, "CIM3k", ms);
+}
+EXPORT_SYMBOL(rtw89_phy_rfk_cim3k_and_wait);
+
static u32 phy_tssi_get_cck_group(u8 ch)
{
switch (ch) {
@@ -4368,6 +4646,7 @@ void rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
struct rtw89_h2c_rf_tssi *h2c)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
u8 ch = chan->channel;
s8 trim_de;
@@ -4391,9 +4670,14 @@ void rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(struct rtw89_dev *rtwdev,
cck_de = tssi_info->tssi_cck[i][gidx];
val = u32_get_bits(cck_de + trim_de, 0xff);
- h2c->curr_tssi_cck_de[i] = 0x0;
- h2c->curr_tssi_cck_de_20m[i] = val;
- h2c->curr_tssi_cck_de_40m[i] = val;
+ if (chip->chip_id == RTL8922A) {
+ h2c->curr_tssi_cck_de[i] = 0x0;
+ h2c->curr_tssi_cck_de_20m[i] = val;
+ h2c->curr_tssi_cck_de_40m[i] = val;
+ } else {
+ h2c->curr_tssi_cck_de[i] = val;
+ }
+
h2c->curr_tssi_efuse_cck_de[i] = cck_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -4402,12 +4686,17 @@ void rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(struct rtw89_dev *rtwdev,
ofdm_de = phy_tssi_get_ofdm_de(rtwdev, phy, chan, i);
val = u32_get_bits(ofdm_de + trim_de, 0xff);
- h2c->curr_tssi_ofdm_de[i] = 0x0;
- h2c->curr_tssi_ofdm_de_20m[i] = val;
- h2c->curr_tssi_ofdm_de_40m[i] = val;
- h2c->curr_tssi_ofdm_de_80m[i] = val;
- h2c->curr_tssi_ofdm_de_160m[i] = val;
- h2c->curr_tssi_ofdm_de_320m[i] = val;
+ if (chip->chip_id == RTL8922A) {
+ h2c->curr_tssi_ofdm_de[i] = 0x0;
+ h2c->curr_tssi_ofdm_de_20m[i] = val;
+ h2c->curr_tssi_ofdm_de_40m[i] = val;
+ h2c->curr_tssi_ofdm_de_80m[i] = val;
+ h2c->curr_tssi_ofdm_de_160m[i] = val;
+ h2c->curr_tssi_ofdm_de_320m[i] = val;
+ } else {
+ h2c->curr_tssi_ofdm_de[i] = val;
+ }
+
h2c->curr_tssi_efuse_ofdm_de[i] = ofdm_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -4422,10 +4711,12 @@ void rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(struct rtw89_dev *rtwdev,
{
struct rtw89_fw_txpwr_track_cfg *trk = rtwdev->fw.elm_info.txpwr_trk;
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
const s8 *thm_up[RF_PATH_B + 1] = {};
const s8 *thm_down[RF_PATH_B + 1] = {};
u8 subband = chan->subband_type;
- s8 thm_ofst[128] = {0};
+ s8 thm_ofst[128] = {};
+ int multiplier;
u8 thermal;
u8 path;
u8 i, j;
@@ -4489,6 +4780,11 @@ void rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(struct rtw89_dev *rtwdev,
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
"[TSSI] tmeter tbl on subband: %u\n", subband);
+ if (chip->chip_id == RTL8922A)
+ multiplier = 1;
+ else
+ multiplier = -1;
+
for (path = RF_PATH_A; path <= RF_PATH_B; path++) {
thermal = tssi_info->thermal[path];
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -4503,16 +4799,20 @@ void rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(struct rtw89_dev *rtwdev,
h2c->pg_thermal[path] = thermal;
i = 0;
- for (j = 0; j < 64; j++)
+ for (j = 0; j < 64; j++) {
thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
thm_up[path][i++] :
thm_up[path][DELTA_SWINGIDX_SIZE - 1];
+ thm_ofst[j] *= multiplier;
+ }
i = 1;
- for (j = 127; j >= 64; j--)
+ for (j = 127; j >= 64; j--) {
thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
-thm_down[path][i++] :
-thm_down[path][DELTA_SWINGIDX_SIZE - 1];
+ thm_ofst[j] *= multiplier;
+ }
for (i = 0; i < 128; i += 4) {
h2c->ftable[path][i + 0] = thm_ofst[i + 3];
@@ -6001,11 +6301,12 @@ static bool rtw89_phy_ifs_clm_get_result(struct rtw89_dev *rtwdev,
env->ifs_clm_his[1] =
rtw89_phy_read32_idx(rtwdev, ccx->ifs_his_addr,
ccx->ifs_t2_his_mask, bb->phy_idx);
+
env->ifs_clm_his[2] =
- rtw89_phy_read32_idx(rtwdev, ccx->ifs_his_addr,
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_his_addr2,
ccx->ifs_t3_his_mask, bb->phy_idx);
env->ifs_clm_his[3] =
- rtw89_phy_read32_idx(rtwdev, ccx->ifs_his_addr,
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_his_addr2,
ccx->ifs_t4_his_mask, bb->phy_idx);
env->ifs_clm_avg[0] =
@@ -6268,14 +6569,16 @@ static bool rtw89_physts_ie_page_valid(struct rtw89_dev *rtwdev,
return true;
}
-static u32 rtw89_phy_get_ie_bitmap_addr(enum rtw89_phy_status_bitmap ie_page)
+static u32 rtw89_phy_get_ie_bitmap_addr(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_status_bitmap ie_page)
{
+ const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
static const u8 ie_page_shift = 2;
if (ie_page == RTW89_EHT_PKT)
- return R_PHY_STS_BITMAP_EHT;
+ return phy->physt_bmp_eht;
- return R_PHY_STS_BITMAP_ADDR_START + (ie_page << ie_page_shift);
+ return phy->physt_bmp_start + (ie_page << ie_page_shift);
}
static u32 rtw89_physts_get_ie_bitmap(struct rtw89_dev *rtwdev,
@@ -6287,7 +6590,7 @@ static u32 rtw89_physts_get_ie_bitmap(struct rtw89_dev *rtwdev,
if (!rtw89_physts_ie_page_valid(rtwdev, &ie_page))
return 0;
- addr = rtw89_phy_get_ie_bitmap_addr(ie_page);
+ addr = rtw89_phy_get_ie_bitmap_addr(rtwdev, ie_page);
return rtw89_phy_read32_idx(rtwdev, addr, MASKDWORD, phy_idx);
}
@@ -6305,7 +6608,7 @@ static void rtw89_physts_set_ie_bitmap(struct rtw89_dev *rtwdev,
if (chip->chip_id == RTL8852A)
val &= B_PHY_STS_BITMAP_MSK_52A;
- addr = rtw89_phy_get_ie_bitmap_addr(ie_page);
+ addr = rtw89_phy_get_ie_bitmap_addr(rtwdev, ie_page);
rtw89_phy_write32_idx(rtwdev, addr, MASKDWORD, val, phy_idx);
}
@@ -6329,6 +6632,17 @@ static void rtw89_physts_enable_fail_report(struct rtw89_dev *rtwdev,
}
}
+static void rtw89_physts_enable_hdr_2(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->chip_gen == RTW89_CHIP_AX || chip->chip_id == RTL8922A)
+ return;
+
+ rtw89_phy_write32_idx_set(rtwdev, R_STS_HDR2_PARSING_BE4,
+ B_STS_HDR2_PARSING_BE4, phy_idx);
+}
+
static void __rtw89_physts_parsing_init(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx)
{
@@ -6338,6 +6652,9 @@ static void __rtw89_physts_parsing_init(struct rtw89_dev *rtwdev,
rtw89_physts_enable_fail_report(rtwdev, false, phy_idx);
+ /* enable hdr_2 for 8922D (PHYSTS_BE_GEN2 above) */
+ rtw89_physts_enable_hdr_2(rtwdev, phy_idx);
+
for (i = 0; i < RTW89_PHYSTS_BITMAP_NUM; i++) {
if (i == RTW89_RSVD_9 ||
(i == RTW89_EHT_PKT && chip->chip_gen == RTW89_CHIP_AX))
@@ -6703,6 +7020,9 @@ static void rtw89_phy_dig_sdagc_follow_pagc_config(struct rtw89_dev *rtwdev,
{
const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
+ if (rtwdev->chip->chip_gen != RTW89_CHIP_AX)
+ return;
+
rtw89_phy_write32_idx(rtwdev, dig_regs->p0_p20_pagcugc_en.addr,
dig_regs->p0_p20_pagcugc_en.mask, enable, bb->phy_idx);
rtw89_phy_write32_idx(rtwdev, dig_regs->p0_s20_pagcugc_en.addr,
@@ -7762,6 +8082,7 @@ static void rtw89_phy_edcca_log(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *b
bool flag_fb, flag_p20, flag_s20, flag_s40, flag_s80;
s8 pwdb_fb, pwdb_p20, pwdb_s20, pwdb_s40, pwdb_s80;
u8 path, per20_bitmap = 0;
+ u8 pwdb_sel = 5;
u8 pwdb[8];
u32 tmp;
@@ -7773,12 +8094,14 @@ static void rtw89_phy_edcca_log(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *b
else
edcca_p_regs = &edcca_regs->p[RTW89_PHY_0];
- if (rtwdev->chip->chip_id == RTL8922A)
- rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be,
- edcca_regs->rpt_sel_be_mask, 0);
-
rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel,
edcca_p_regs->rpt_sel_mask, 0);
+ if (rtwdev->chip->chip_id == RTL8922A || rtwdev->chip->chip_id == RTL8922D) {
+ rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be,
+ edcca_regs->rpt_sel_be_mask, 0);
+ per20_bitmap = rtw89_phy_read32_mask(rtwdev, edcca_p_regs->rpt_a,
+ MASKBYTE0);
+ }
tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_b);
path = u32_get_bits(tmp, B_EDCCA_RPT_B_PATH_MASK);
flag_s80 = u32_get_bits(tmp, B_EDCCA_RPT_B_S80);
@@ -7790,13 +8113,16 @@ static void rtw89_phy_edcca_log(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *b
pwdb_p20 = u32_get_bits(tmp, MASKBYTE2);
pwdb_fb = u32_get_bits(tmp, MASKBYTE3);
+ if (rtwdev->chip->chip_id == RTL8922D)
+ pwdb_sel = 2;
+
rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel,
- edcca_p_regs->rpt_sel_mask, 5);
+ edcca_p_regs->rpt_sel_mask, pwdb_sel);
tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_b);
pwdb_s80 = u32_get_bits(tmp, MASKBYTE1);
pwdb_s40 = u32_get_bits(tmp, MASKBYTE2);
- if (rtwdev->chip->chip_id == RTL8922A) {
+ if (rtwdev->chip->chip_id == RTL8922A || rtwdev->chip->chip_id == RTL8922D) {
rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be,
edcca_regs->rpt_sel_be_mask, 4);
tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_b);
@@ -7804,8 +8130,6 @@ static void rtw89_phy_edcca_log(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *b
pwdb[1] = u32_get_bits(tmp, MASKBYTE2);
pwdb[2] = u32_get_bits(tmp, MASKBYTE1);
pwdb[3] = u32_get_bits(tmp, MASKBYTE0);
- per20_bitmap = rtw89_phy_read32_mask(rtwdev, edcca_p_regs->rpt_a,
- MASKBYTE0);
rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be,
edcca_regs->rpt_sel_be_mask, 5);
@@ -8027,6 +8351,7 @@ static const struct rtw89_ccx_regs rtw89_ccx_regs_ax = {
.ifs_clm_ofdm_fa_mask = B_IFS_CLM_OFDM_FA_MSK,
.ifs_clm_cck_fa_mask = B_IFS_CLM_CCK_FA_MSK,
.ifs_his_addr = R_IFS_HIS,
+ .ifs_his_addr2 = R_IFS_HIS,
.ifs_t4_his_mask = B_IFS_T4_HIS_MSK,
.ifs_t3_his_mask = B_IFS_T3_HIS_MSK,
.ifs_t2_his_mask = B_IFS_T2_HIS_MSK,
@@ -8072,9 +8397,12 @@ static const struct rtw89_cfo_regs rtw89_cfo_regs_ax = {
const struct rtw89_phy_gen_def rtw89_phy_gen_ax = {
.cr_base = 0x10000,
+ .physt_bmp_start = R_PHY_STS_BITMAP_ADDR_START,
+ .physt_bmp_eht = 0xfc,
.ccx = &rtw89_ccx_regs_ax,
.physts = &rtw89_physts_regs_ax,
.cfo = &rtw89_cfo_regs_ax,
+ .bb_wrap = NULL,
.phy0_phy1_offset = rtw89_phy0_phy1_offset_ax,
.config_bb_gain = rtw89_phy_config_bb_gain_ax,
.preinit_rf_nctl = rtw89_phy_preinit_rf_nctl_ax,
diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
index 9caacffd0af8..ab263738d212 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.h
+++ b/drivers/net/wireless/realtek/rtw89/phy.h
@@ -139,7 +139,9 @@ enum rtw89_phy_c2h_ra_func {
RTW89_PHY_C2H_FUNC_STS_RPT,
RTW89_PHY_C2H_FUNC_MU_GPTBL_RPT,
RTW89_PHY_C2H_FUNC_TXSTS,
- RTW89_PHY_C2H_FUNC_RA_MAX,
+ RTW89_PHY_C2H_FUNC_ACCELERATE_EN = 0x7,
+
+ RTW89_PHY_C2H_FUNC_RA_NUM,
};
enum rtw89_phy_c2h_rfk_log_func {
@@ -150,6 +152,8 @@ enum rtw89_phy_c2h_rfk_log_func {
RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI = 4,
RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK = 5,
RTW89_PHY_C2H_RFK_LOG_FUNC_TAS_PWR = 9,
+ RTW89_PHY_C2H_RFK_LOG_FUNC_TXIQK = 0xc,
+ RTW89_PHY_C2H_RFK_LOG_FUNC_CIM3K = 0xe,
RTW89_PHY_C2H_RFK_LOG_FUNC_NUM,
};
@@ -165,6 +169,8 @@ enum rtw89_phy_c2h_dm_func {
RTW89_PHY_C2H_DM_FUNC_SIGB,
RTW89_PHY_C2H_DM_FUNC_LOWRT_RTY,
RTW89_PHY_C2H_DM_FUNC_MCC_DIG,
+ RTW89_PHY_C2H_DM_FUNC_LPS = 0x9,
+ RTW89_PHY_C2H_DM_FUNC_ENV_MNTR = 0xa,
RTW89_PHY_C2H_DM_FUNC_FW_SCAN = 0xc,
RTW89_PHY_C2H_DM_FUNC_NUM,
};
@@ -416,6 +422,7 @@ struct rtw89_ccx_regs {
u32 ifs_clm_ofdm_fa_mask;
u32 ifs_clm_cck_fa_mask;
u32 ifs_his_addr;
+ u32 ifs_his_addr2;
u32 ifs_t4_his_mask;
u32 ifs_t3_his_mask;
u32 ifs_t2_his_mask;
@@ -459,6 +466,11 @@ struct rtw89_cfo_regs {
u32 valid_0_mask;
};
+struct rtw89_bb_wrap_regs {
+ u32 pwr_macid_lmt;
+ u32 pwr_macid_path;
+};
+
enum rtw89_bandwidth_section_num_ax {
RTW89_BW20_SEC_NUM_AX = 8,
RTW89_BW40_SEC_NUM_AX = 4,
@@ -531,9 +543,12 @@ struct rtw89_phy_rfk_log_fmt {
struct rtw89_phy_gen_def {
u32 cr_base;
+ u32 physt_bmp_start;
+ u32 physt_bmp_eht;
const struct rtw89_ccx_regs *ccx;
const struct rtw89_physts_regs *physts;
const struct rtw89_cfo_regs *cfo;
+ const struct rtw89_bb_wrap_regs *bb_wrap;
u32 (*phy0_phy1_offset)(struct rtw89_dev *rtwdev, u32 addr);
void (*config_bb_gain)(struct rtw89_dev *rtwdev,
const struct rtw89_reg2_def *reg,
@@ -559,6 +574,7 @@ struct rtw89_phy_gen_def {
extern const struct rtw89_phy_gen_def rtw89_phy_gen_ax;
extern const struct rtw89_phy_gen_def rtw89_phy_gen_be;
+extern const struct rtw89_phy_gen_def rtw89_phy_gen_be_v1;
static inline void rtw89_phy_write8(struct rtw89_dev *rtwdev,
u32 addr, u8 data)
@@ -823,12 +839,16 @@ u32 rtw89_phy_read_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask);
u32 rtw89_phy_read_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask);
+u32 rtw89_phy_read_rf_v3(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask);
bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask, u32 data);
bool rtw89_phy_write_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask, u32 data);
bool rtw89_phy_write_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask, u32 data);
+bool rtw89_phy_write_rf_v3(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask, u32 data);
void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev);
void rtw89_phy_init_bb_afe(struct rtw89_dev *rtwdev);
void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev, bool noio);
@@ -879,6 +899,12 @@ static inline void rtw89_phy_bb_wrap_init(struct rtw89_dev *rtwdev)
phy->bb_wrap_init(rtwdev);
}
+void rtw89_phy_bb_wrap_set_rfsi_ct_opt(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx);
+void rtw89_phy_bb_wrap_set_rfsi_bandedge_ch(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx);
+
static inline void rtw89_phy_ch_info_init(struct rtw89_dev *rtwdev)
{
const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
@@ -1010,6 +1036,14 @@ int rtw89_phy_rfk_rxdck_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
const struct rtw89_chan *chan,
bool is_chl_k, unsigned int ms);
+int rtw89_phy_rfk_txiqk_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
+ unsigned int ms);
+int rtw89_phy_rfk_cim3k_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
+ unsigned int ms);
void rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
const struct rtw89_chan *chan,
diff --git a/drivers/net/wireless/realtek/rtw89/phy_be.c b/drivers/net/wireless/realtek/rtw89/phy_be.c
index bd17714f13d1..08fd24a55d85 100644
--- a/drivers/net/wireless/realtek/rtw89/phy_be.c
+++ b/drivers/net/wireless/realtek/rtw89/phy_be.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2023 Realtek Corporation
*/
+#include "chan.h"
#include "debug.h"
#include "mac.h"
#include "phy.h"
@@ -44,6 +45,7 @@ static const struct rtw89_ccx_regs rtw89_ccx_regs_be = {
.ifs_clm_ofdm_fa_mask = B_IFS_CLM_OFDM_FA_MSK,
.ifs_clm_cck_fa_mask = B_IFS_CLM_CCK_FA_MSK,
.ifs_his_addr = R_IFS_HIS_V1,
+ .ifs_his_addr2 = R_IFS_HIS_V1,
.ifs_t4_his_mask = B_IFS_T4_HIS_MSK,
.ifs_t3_his_mask = B_IFS_T3_HIS_MSK,
.ifs_t2_his_mask = B_IFS_T2_HIS_MSK,
@@ -74,17 +76,99 @@ static const struct rtw89_ccx_regs rtw89_ccx_regs_be = {
.nhm_pwr_method_msk = B_NHM_PWDB_METHOD_MSK,
};
+static const struct rtw89_ccx_regs rtw89_ccx_regs_be_v1 = {
+ .setting_addr = R_CCX_BE4,
+ .edcca_opt_mask = B_CCX_EDCCA_OPT_MSK_V1,
+ .measurement_trig_mask = B_MEASUREMENT_TRIG_MSK,
+ .trig_opt_mask = B_CCX_TRIG_OPT_MSK,
+ .en_mask = B_CCX_EN_MSK,
+ .ifs_cnt_addr = R_IFS_COUNTER_BE4,
+ .ifs_clm_period_mask = B_IFS_CLM_PERIOD_MSK,
+ .ifs_clm_cnt_unit_mask = B_IFS_CLM_COUNTER_UNIT_MSK,
+ .ifs_clm_cnt_clear_mask = B_IFS_COUNTER_CLR_MSK,
+ .ifs_collect_en_mask = B_IFS_COLLECT_EN,
+ .ifs_t1_addr = R_IFS_T1_BE4,
+ .ifs_t1_th_h_mask = B_IFS_T1_TH_HIGH_MSK,
+ .ifs_t1_en_mask = B_IFS_T1_EN_MSK,
+ .ifs_t1_th_l_mask = B_IFS_T1_TH_LOW_MSK,
+ .ifs_t2_addr = R_IFS_T2_BE4,
+ .ifs_t2_th_h_mask = B_IFS_T2_TH_HIGH_MSK,
+ .ifs_t2_en_mask = B_IFS_T2_EN_MSK,
+ .ifs_t2_th_l_mask = B_IFS_T2_TH_LOW_MSK,
+ .ifs_t3_addr = R_IFS_T3_BE4,
+ .ifs_t3_th_h_mask = B_IFS_T3_TH_HIGH_MSK,
+ .ifs_t3_en_mask = B_IFS_T3_EN_MSK,
+ .ifs_t3_th_l_mask = B_IFS_T3_TH_LOW_MSK,
+ .ifs_t4_addr = R_IFS_T4_BE4,
+ .ifs_t4_th_h_mask = B_IFS_T4_TH_HIGH_MSK,
+ .ifs_t4_en_mask = B_IFS_T4_EN_MSK,
+ .ifs_t4_th_l_mask = B_IFS_T4_TH_LOW_MSK,
+ .ifs_clm_tx_cnt_addr = R_IFS_CLM_TX_CNT_BE4,
+ .ifs_clm_edcca_excl_cca_fa_mask = B_IFS_CLM_EDCCA_EXCLUDE_CCA_FA_MSK,
+ .ifs_clm_tx_cnt_msk = B_IFS_CLM_TX_CNT_MSK,
+ .ifs_clm_cca_addr = R_IFS_CLM_CCA_BE4,
+ .ifs_clm_ofdmcca_excl_fa_mask = B_IFS_CLM_OFDMCCA_EXCLUDE_FA_MSK,
+ .ifs_clm_cckcca_excl_fa_mask = B_IFS_CLM_CCKCCA_EXCLUDE_FA_MSK,
+ .ifs_clm_fa_addr = R_IFS_CLM_FA_BE4,
+ .ifs_clm_ofdm_fa_mask = B_IFS_CLM_OFDM_FA_MSK,
+ .ifs_clm_cck_fa_mask = B_IFS_CLM_CCK_FA_MSK,
+ .ifs_his_addr = R_IFS_T1_HIS_BE4,
+ .ifs_his_addr2 = R_IFS_T3_HIS_BE4, /* for 3/4 */
+ .ifs_t4_his_mask = B_IFS_T4_HIS_BE4,
+ .ifs_t3_his_mask = B_IFS_T3_HIS_BE4,
+ .ifs_t2_his_mask = B_IFS_T2_HIS_BE4,
+ .ifs_t1_his_mask = B_IFS_T1_HIS_BE4,
+ .ifs_avg_l_addr = R_IFS_T1_AVG_BE4,
+ .ifs_t2_avg_mask = B_IFS_T2_AVG_BE4,
+ .ifs_t1_avg_mask = B_IFS_T1_AVG_BE4,
+ .ifs_avg_h_addr = R_IFS_T3_AVG_BE4,
+ .ifs_t4_avg_mask = B_IFS_T4_AVG_BE4,
+ .ifs_t3_avg_mask = B_IFS_T3_AVG_BE4,
+ .ifs_cca_l_addr = R_IFS_T1_CLM_BE4,
+ .ifs_t2_cca_mask = B_IFS_T2_CLM_BE4,
+ .ifs_t1_cca_mask = B_IFS_T1_CLM_BE4,
+ .ifs_cca_h_addr = R_IFS_T3_CLM_BE4,
+ .ifs_t4_cca_mask = B_IFS_T4_CLM_BE4,
+ .ifs_t3_cca_mask = B_IFS_T3_CLM_BE4,
+ .ifs_total_addr = R_IFS_TOTAL_BE4,
+ .ifs_cnt_done_mask = B_IFS_CNT_DONE_BE4,
+ .ifs_total_mask = B_IFS_TOTAL_BE4,
+};
+
static const struct rtw89_physts_regs rtw89_physts_regs_be = {
.setting_addr = R_PLCP_HISTOGRAM,
.dis_trigger_fail_mask = B_STS_DIS_TRIG_BY_FAIL,
.dis_trigger_brk_mask = B_STS_DIS_TRIG_BY_BRK,
};
+static const struct rtw89_physts_regs rtw89_physts_regs_be_v1 = {
+ .setting_addr = R_PLCP_HISTOGRAM_BE_V1,
+ .dis_trigger_fail_mask = B_STS_DIS_TRIG_BY_FAIL,
+ .dis_trigger_brk_mask = B_STS_DIS_TRIG_BY_BRK,
+};
+
static const struct rtw89_cfo_regs rtw89_cfo_regs_be = {
- .comp = R_DCFO_WEIGHT_V1,
- .weighting_mask = B_DCFO_WEIGHT_MSK_V1,
- .comp_seg0 = R_DCFO_OPT_V1,
- .valid_0_mask = B_DCFO_OPT_EN_V1,
+ .comp = R_DCFO_WEIGHT_BE,
+ .weighting_mask = B_DCFO_WEIGHT_MSK_BE,
+ .comp_seg0 = R_DCFO_OPT_BE,
+ .valid_0_mask = B_DCFO_OPT_EN_BE,
+};
+
+static const struct rtw89_cfo_regs rtw89_cfo_regs_be_v1 = {
+ .comp = R_DCFO_WEIGHT_BE_V1,
+ .weighting_mask = B_DCFO_WEIGHT_MSK_BE,
+ .comp_seg0 = R_DCFO_OPT_BE_V1,
+ .valid_0_mask = B_DCFO_OPT_EN_BE,
+};
+
+static const struct rtw89_bb_wrap_regs rtw89_bb_wrap_regs_be = {
+ .pwr_macid_lmt = R_BE_PWR_MACID_LMT_BASE,
+ .pwr_macid_path = R_BE_PWR_MACID_PATH_BASE,
+};
+
+static const struct rtw89_bb_wrap_regs rtw89_bb_wrap_regs_be_v1 = {
+ .pwr_macid_lmt = R_BE_PWR_MACID_LMT_BASE_V1,
+ .pwr_macid_path = R_BE_PWR_MACID_PATH_BASE_V1,
};
static u32 rtw89_phy0_phy1_offset_be(struct rtw89_dev *rtwdev, u32 addr)
@@ -105,6 +189,25 @@ static u32 rtw89_phy0_phy1_offset_be(struct rtw89_dev *rtwdev, u32 addr)
return ofst;
}
+static u32 rtw89_phy0_phy1_offset_be_v1(struct rtw89_dev *rtwdev, u32 addr)
+{
+ u32 phy_page = addr >> 8;
+ u32 ofst = 0;
+
+ if ((phy_page >= 0x204 && phy_page <= 0x20F) ||
+ (phy_page >= 0x220 && phy_page <= 0x22F) ||
+ (phy_page >= 0x240 && phy_page <= 0x24f) ||
+ (phy_page >= 0x260 && phy_page <= 0x26f) ||
+ (phy_page >= 0x2C0 && phy_page <= 0x2C9) ||
+ (phy_page >= 0x2E4 && phy_page <= 0x2E8) ||
+ phy_page == 0x2EE)
+ ofst = 0x1000;
+ else
+ ofst = 0x0;
+
+ return ofst;
+}
+
union rtw89_phy_bb_gain_arg_be {
u32 addr;
struct {
@@ -301,40 +404,101 @@ static void rtw89_phy_preinit_rf_nctl_be(struct rtw89_dev *rtwdev)
}
}
+static void rtw89_phy_preinit_rf_nctl_be_v1(struct rtw89_dev *rtwdev)
+{
+ rtw89_phy_write32_mask(rtwdev, R_GOTX_IQKDPK_C0_BE4, B_GOTX_IQKDPK, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_GOTX_IQKDPK_C1_BE4, B_GOTX_IQKDPK, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_IOQ_IQK_DPK_BE4, B_IOQ_IQK_DPK_RST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_RST_BE4, B_IQK_DPK_RST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_PRST_BE4, B_IQK_DPK_PRST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_PRST_C1_BE4, B_IQK_DPK_PRST, 0x1);
+}
+
+static u32 rtw89_phy_bb_wrap_flush_addr(struct rtw89_dev *rtwdev, u32 addr)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
+ return 0;
+
+ if (rtwdev->chip->chip_id == RTL8922D && hal->cid == RTL8922D_CID7025) {
+ if (addr >= R_BE_PWR_MACID_PATH_BASE_V1 &&
+ addr <= R_BE_PWR_MACID_PATH_BASE_V1 + 0xFF)
+ return addr + 0x800;
+
+ if (addr >= R_BE_PWR_MACID_LMT_BASE_V1 &&
+ addr <= R_BE_PWR_MACID_LMT_BASE_V1 + 0xFF)
+ return addr - 0x800;
+ }
+
+ return 0;
+}
+
+static
+void rtw89_write_bb_wrap_flush(struct rtw89_dev *rtwdev, u32 addr, u32 data)
+{
+ /* To write registers of pwr_macid_lmt and pwr_macid_path with flush */
+ u32 flush_addr;
+ u32 val32;
+
+ flush_addr = rtw89_phy_bb_wrap_flush_addr(rtwdev, addr);
+ if (flush_addr) {
+ val32 = rtw89_read32(rtwdev, flush_addr);
+ rtw89_write32(rtwdev, flush_addr, val32);
+ }
+
+ rtw89_write32(rtwdev, addr, data);
+}
+
static
void rtw89_phy_bb_wrap_pwr_by_macid_init(struct rtw89_dev *rtwdev)
{
- u32 macid_idx, cr, base_macid_lmt, max_macid = 32;
+ const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
+ const struct rtw89_bb_wrap_regs *bb_wrap = phy->bb_wrap;
+ u32 max_macid = rtwdev->chip->support_macid_num;
+ u32 macid_idx, cr, base_macid_lmt;
- base_macid_lmt = R_BE_PWR_MACID_LMT_BASE;
+ base_macid_lmt = bb_wrap->pwr_macid_lmt;
for (macid_idx = 0; macid_idx < 4 * max_macid; macid_idx += 4) {
cr = base_macid_lmt + macid_idx;
- rtw89_write32(rtwdev, cr, 0x03007F7F);
+ rtw89_write_bb_wrap_flush(rtwdev, cr, 0);
}
}
static
void rtw89_phy_bb_wrap_tx_path_by_macid_init(struct rtw89_dev *rtwdev)
{
- int i, max_macid = 32;
- u32 cr = R_BE_PWR_MACID_PATH_BASE;
+ const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
+ const struct rtw89_bb_wrap_regs *bb_wrap = phy->bb_wrap;
+ u32 max_macid = rtwdev->chip->support_macid_num;
+ u32 cr = bb_wrap->pwr_macid_path;
+ int i;
for (i = 0; i < max_macid; i++, cr += 4)
- rtw89_write32(rtwdev, cr, 0x03C86000);
+ rtw89_write_bb_wrap_flush(rtwdev, cr, 0);
}
static void rtw89_phy_bb_wrap_tpu_set_all(struct rtw89_dev *rtwdev,
enum rtw89_mac_idx mac_idx)
{
- u32 addr;
+ u32 addr, t;
- for (addr = R_BE_PWR_BY_RATE; addr <= R_BE_PWR_BY_RATE_END; addr += 4)
- rtw89_write32(rtwdev, addr, 0);
- for (addr = R_BE_PWR_RULMT_START; addr <= R_BE_PWR_RULMT_END; addr += 4)
- rtw89_write32(rtwdev, addr, 0);
- for (addr = R_BE_PWR_RATE_OFST_CTRL; addr <= R_BE_PWR_RATE_OFST_END; addr += 4)
- rtw89_write32(rtwdev, addr, 0);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_FTM_SS, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_BY_RATE_DBW_ON, 0x3);
+
+ for (addr = R_BE_PWR_BY_RATE; addr <= R_BE_PWR_BY_RATE_END; addr += 4) {
+ t = rtw89_mac_reg_by_idx(rtwdev, addr, mac_idx);
+ rtw89_write32(rtwdev, t, 0);
+ }
+ for (addr = R_BE_PWR_RULMT_START; addr <= R_BE_PWR_RULMT_END; addr += 4) {
+ t = rtw89_mac_reg_by_idx(rtwdev, addr, mac_idx);
+ rtw89_write32(rtwdev, t, 0);
+ }
+ for (addr = R_BE_PWR_RATE_OFST_CTRL; addr <= R_BE_PWR_RATE_OFST_END; addr += 4) {
+ t = rtw89_mac_reg_by_idx(rtwdev, addr, mac_idx);
+ rtw89_write32(rtwdev, t, 0);
+ }
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_REF_CTRL, mac_idx);
rtw89_write32_mask(rtwdev, addr, B_BE_PWR_OFST_LMT_DB, 0);
@@ -394,6 +558,332 @@ static void rtw89_phy_bb_wrap_ftm_init(struct rtw89_dev *rtwdev,
rtw89_write32_mask(rtwdev, addr, 0x7, 0);
}
+static u32 rtw89_phy_bb_wrap_be_bandedge_decision(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan)
+{
+ u8 pri_ch = chan->primary_channel;
+ u32 val = 0;
+
+ switch (chan->band_type) {
+ default:
+ case RTW89_BAND_2G:
+ if (pri_ch == 1 || pri_ch == 13)
+ val = BIT(1) | BIT(0);
+ else if (pri_ch == 3 || pri_ch == 11)
+ val = BIT(1);
+ break;
+ case RTW89_BAND_5G:
+ if (pri_ch == 36 || pri_ch == 64 || pri_ch == 100)
+ val = BIT(3) | BIT(2) | BIT(1) | BIT(0);
+ else if (pri_ch == 40 || pri_ch == 60 || pri_ch == 104)
+ val = BIT(3) | BIT(2) | BIT(1);
+ else if ((pri_ch > 40 && pri_ch < 60) || pri_ch == 108 || pri_ch == 112)
+ val = BIT(3) | BIT(2);
+ else if (pri_ch > 112 && pri_ch < 132)
+ val = BIT(3);
+ break;
+ case RTW89_BAND_6G:
+ if (pri_ch == 233)
+ val = BIT(0);
+ break;
+ }
+
+ return val;
+}
+
+void rtw89_phy_bb_wrap_set_rfsi_ct_opt(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ u32 reg;
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_RFSI_CT_OPT_0_BE4, phy_idx);
+ rtw89_write32(rtwdev, reg, 0x00010001);
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_RFSI_CT_OPT_8_BE4, phy_idx);
+ rtw89_write32(rtwdev, reg, 0x00010001);
+}
+EXPORT_SYMBOL(rtw89_phy_bb_wrap_set_rfsi_ct_opt);
+
+void rtw89_phy_bb_wrap_set_rfsi_bandedge_ch(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ u32 reg;
+ u32 val;
+
+ val = rtw89_phy_bb_wrap_be_bandedge_decision(rtwdev, chan);
+
+ rtw89_phy_write32_idx(rtwdev, R_TX_CFR_MANUAL_EN_BE4, B_TX_CFR_MANUAL_EN_BE4_M,
+ chan->primary_channel == 13, phy_idx);
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BANDEDGE_DBWX_BE4, phy_idx);
+ rtw89_write32_mask(rtwdev, reg, B_BANDEDGE_DBW20_BE4, val & BIT(0));
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BANDEDGE_DBWX_BE4, phy_idx);
+ rtw89_write32_mask(rtwdev, reg, B_BANDEDGE_DBW40_BE4, (val & BIT(1)) >> 1);
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BANDEDGE_DBWX_BE4, phy_idx);
+ rtw89_write32_mask(rtwdev, reg, B_BANDEDGE_DBW80_BE4, (val & BIT(2)) >> 2);
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BANDEDGE_DBWY_BE4, phy_idx);
+ rtw89_write32_mask(rtwdev, reg, B_BANDEDGE_DBW160_BE4, (val & BIT(3)) >> 3);
+}
+EXPORT_SYMBOL(rtw89_phy_bb_wrap_set_rfsi_bandedge_ch);
+
+static void rtw89_phy_bb_wrap_tx_rfsi_qam_comp_th_init(struct rtw89_dev *rtwdev,
+ enum rtw89_mac_idx mac_idx)
+{
+ /* TH0 */
+ rtw89_write32_idx(rtwdev, R_QAM_TH0_BE4, B_QAM_TH0_0_BE4, 0x1, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_TH0_BE4, B_QAM_TH0_3_BE4, 0x1, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_TH1_BE4, B_QAM_TH1_1_BE4, 0x1, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_TH1_BE4, B_QAM_TH1_4_BE4, 0x1, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_TH1_BE4, B_QAM_TH1_7_BE4, 0x1, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_TH2_BE4, B_QAM_TH2_0_BE4, 0x1, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_TH2_BE4, B_QAM_TH2_3_BE4, 0x1, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_TH2_BE4, B_QAM_TH2_6_BE4, 0x1, mac_idx);
+ /* TH1 */
+ rtw89_write32_idx(rtwdev, R_QAM_TH0_BE4, B_QAM_TH0_1_BE4, 0x2, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_TH0_BE4, B_QAM_TH0_4_BE4, 0x2, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_TH1_BE4, B_QAM_TH1_2_BE4, 0x2, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_TH1_BE4, B_QAM_TH1_5_BE4, 0x2, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_TH1_BE4, B_QAM_TH1_8_BE4, 0x2, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_TH2_BE4, B_QAM_TH2_1_BE4, 0x2, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_TH2_BE4, B_QAM_TH2_4_BE4, 0x2, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_TH2_BE4, B_QAM_TH2_7_BE4, 0x2, mac_idx);
+ /* TH2 */
+ rtw89_write32_idx(rtwdev, R_QAM_TH0_BE4, B_QAM_TH0_2_BE4, 0x4, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_TH1_BE4, B_QAM_TH1_0_BE4, 0x4, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_TH1_BE4, B_QAM_TH1_3_BE4, 0x4, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_TH1_BE4, B_QAM_TH1_6_BE4, 0x4, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_TH1_BE4, B_QAM_TH1_9_BE4, 0x4, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_TH2_BE4, B_QAM_TH2_2_BE4, 0x4, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_TH2_BE4, B_QAM_TH2_5_BE4, 0x4, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_TH2_BE4, B_QAM_TH2_8_BE4, 0x4, mac_idx);
+ /* DPD 160M */
+ rtw89_write32_idx(rtwdev, R_DPD_DBW160_TH0_BE4, B_DPD_DBW160_TH0_0_BE4, 0x1, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_DBW160_TH0_BE4, B_DPD_DBW160_TH0_1_BE4, 0x1, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_DBW160_TH0_BE4, B_DPD_DBW160_TH0_2_BE4, 0x1, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_DBW160_TH0_BE4, B_DPD_DBW160_TH0_3_BE4, 0x1, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_DBW160_TH0_BE4, B_DPD_DBW160_TH0_4_BE4, 0x1, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_DBW160_TH1_BE4, B_DPD_DBW160_TH1_5_BE4, 0x1, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_DBW160_TH1_BE4, B_DPD_DBW160_TH1_6_BE4, 0x1, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_DBW160_TH1_BE4, B_DPD_DBW160_TH1_7_BE4, 0x1, mac_idx);
+ /* DPD 20M */
+ rtw89_write32_idx(rtwdev, R_DPD_CBW_TH0_BE4, B_DPD_CBW20_TH0_0_BE4, 0x2, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_CBW_TH0_BE4, B_DPD_CBW20_TH0_1_BE4, 0x2, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_CBW_TH0_BE4, B_DPD_CBW20_TH0_2_BE4, 0x2, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_CBW_TH0_BE4, B_DPD_CBW20_TH0_3_BE4, 0x2, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_CBW_TH0_BE4, B_DPD_CBW20_TH0_4_BE4, 0x2, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_CBW_TH0_BE4, B_DPD_CBW20_TH0_5_BE4, 0x2, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_CBW_TH0_BE4, B_DPD_CBW20_TH0_6_BE4, 0x2, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_CBW_TH1_BE4, B_DPD_CBW20_TH1_7_BE4, 0x2, mac_idx);
+ /* DPD 40M */
+ rtw89_write32_idx(rtwdev, R_DPD_CBW_TH1_BE4, B_DPD_CBW40_TH1_0_BE4, 0x2, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_CBW_TH1_BE4, B_DPD_CBW40_TH1_1_BE4, 0x2, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_CBW_TH1_BE4, B_DPD_CBW40_TH1_2_BE4, 0x2, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_CBW_TH1_BE4, B_DPD_CBW40_TH1_3_BE4, 0x2, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_CBW_TH1_BE4, B_DPD_CBW40_TH1_4_BE4, 0x2, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_CBW_TH1_BE4, B_DPD_CBW20_TH0_3_BE4, 0x2, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_CBW_TH1_BE4, B_DPD_CBW20_TH0_4_BE4, 0x2, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_CBW_TH1_BE4, B_DPD_CBW20_TH0_5_BE4, 0x2, mac_idx);
+ /* DPD 80M */
+ rtw89_write32_idx(rtwdev, R_DPD_CBW_TH1_BE4, B_DPD_CBW80_TH1_0_BE4, 0x2, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_CBW_TH2_BE4, B_DPD_CBW80_TH2_1_BE4, 0x2, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_CBW_TH2_BE4, B_DPD_CBW80_TH2_2_BE4, 0x2, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_CBW_TH2_BE4, B_DPD_CBW80_TH2_3_BE4, 0x2, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_CBW_TH2_BE4, B_DPD_CBW80_TH2_4_BE4, 0x2, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_CBW_TH2_BE4, B_DPD_CBW80_TH2_5_BE4, 0x2, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_CBW_TH2_BE4, B_DPD_CBW80_TH2_6_BE4, 0x2, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_CBW_TH2_BE4, B_DPD_CBW80_TH2_7_BE4, 0x2, mac_idx);
+ /* CIM3K */
+ rtw89_write32_idx(rtwdev, R_COMP_CIM3K_BE4, B_COMP_CIM3K_TH2_BE4, 0x2, mac_idx);
+}
+
+static void rtw89_phy_bb_wrap_tx_rfsi_scenario_def(struct rtw89_dev *rtwdev,
+ enum rtw89_mac_idx mac_idx)
+{
+ rtw89_write32_idx(rtwdev, R_RFSI_CT_DEF_BE4, B_RFSI_CT_ER_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_RFSI_CT_DEF_BE4, B_RFSI_CT_SUBF_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_RFSI_CT_DEF_BE4, B_RFSI_CT_FTM_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_RFSI_CT_DEF_BE4, B_RFSI_CT_SENS_BE4, 0x0, mac_idx);
+
+ rtw89_write32_idx(rtwdev, R_FBTB_CT_DEF_BE4, B_FBTB_CT_DEF_BE, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_FBTB_CT_DEF_BE4, B_FBTB_CT_PB_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_FBTB_CT_DEF_BE4, B_FBTB_CT_DL_WO_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_FBTB_CT_DEF_BE4, B_FBTB_CT_DL_BF_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_FBTB_CT_DEF_BE4, B_FBTB_CT_MUMIMO_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_FBTB_CT_DEF_BE4, B_FBTB_CT_FTM_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_FBTB_CT_DEF_BE4, B_FBTB_CT_SENS_BE4, 0x0, mac_idx);
+}
+
+static void rtw89_phy_bb_wrap_tx_rfsi_qam_comp_val(struct rtw89_dev *rtwdev,
+ enum rtw89_mac_idx mac_idx)
+{
+ rtw89_write32_idx(rtwdev, R_QAM_COMP_TH0_BE4, MASKLWORD, 0x4010, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_COMP_TH0_BE4, MASKHWORD, 0x4410, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_COMP_TH1_BE4, MASKLWORD, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_COMP_TH1_BE4, MASKHWORD, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_COMP_TH2_BE4, MASKLWORD, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_COMP_TH2_BE4, MASKHWORD, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_COMP_TH3_BE4, MASKLWORD, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_COMP_TH3_BE4, MASKHWORD, 0x0, mac_idx);
+
+ rtw89_write32_idx(rtwdev, R_QAM_COMP_TH4_BE4, B_QAM_COMP_TH4_L, 0x8, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_COMP_TH4_BE4, B_QAM_COMP_TH4_M, 0x8, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_COMP_TH4_BE4, B_QAM_COMP_TH4_H, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_COMP_TH5_BE4, B_QAM_COMP_TH5_L, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_COMP_TH5_BE4, B_QAM_COMP_TH5_M, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_COMP_TH5_BE4, B_QAM_COMP_TH5_H, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_COMP_TH6_BE4, B_QAM_COMP_TH6_L, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_COMP_TH6_BE4, B_QAM_COMP_TH6_M, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_COMP_TH4_BE4, B_QAM_COMP_TH4_2L, 0x8, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_COMP_TH4_BE4, B_QAM_COMP_TH4_2M, 0x8, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_COMP_TH4_BE4, B_QAM_COMP_TH4_2H, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_COMP_TH5_BE4, B_QAM_COMP_TH5_2L, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_COMP_TH5_BE4, B_QAM_COMP_TH5_2M, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_COMP_TH5_BE4, B_QAM_COMP_TH5_2H, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_COMP_TH6_BE4, B_QAM_COMP_TH6_2L, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_QAM_COMP_TH6_BE4, B_QAM_COMP_TH6_2M, 0x0, mac_idx);
+
+ rtw89_write32_idx(rtwdev, R_OW_VAL_0_BE4, MASKLWORD, 0x4010, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OW_VAL_0_BE4, MASKHWORD, 0x4010, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OW_VAL_1_BE4, MASKLWORD, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OW_VAL_1_BE4, MASKHWORD, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OW_VAL_2_BE4, MASKLWORD, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OW_VAL_2_BE4, MASKHWORD, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OW_VAL_3_BE4, MASKLWORD, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OW_VAL_3_BE4, MASKHWORD, 0x0, mac_idx);
+}
+
+static void rtw89_phy_bb_set_oob_dpd_qam_comp_val(struct rtw89_dev *rtwdev,
+ enum rtw89_mac_idx mac_idx)
+{
+ rtw89_write32_idx(rtwdev, R_OOB_CBW20_BE4, B_OOB_CBW20_CCK0_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW20_BE4, B_OOB_CBW20_CCK1_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW20_BE4, B_OOB_CBW20_CCK2_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW20_BE4, B_OOB_CBW20_CCK3_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW20_BE4, B_OOB_CBW20_CCK4_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW20_BE4, B_OOB_CBW20_CCK5_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW20_BE4, B_OOB_CBW20_CCK6_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW20_BE4, B_OOB_CBW20_CCK7_BE4, 0x0, mac_idx);
+
+ rtw89_write32_idx(rtwdev, R_OOB_CBW40_BE4, B_OOB_CBW40_CCK0_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW40_BE4, B_OOB_CBW40_CCK1_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW40_BE4, B_OOB_CBW40_CCK2_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW40_BE4, B_OOB_CBW40_CCK3_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW40_BE4, B_OOB_CBW40_CCK4_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW40_BE4, B_OOB_CBW40_CCK5_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW40_BE4, B_OOB_CBW40_CCK6_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW40_BE4, B_OOB_CBW40_CCK7_BE4, 0x0, mac_idx);
+
+ rtw89_write32_idx(rtwdev, R_OOB_CBW20_BE4, B_OOB_CBW20_TH0_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW20_BE4, B_OOB_CBW20_TH1_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW20_BE4, B_OOB_CBW20_TH2_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW20_BE4, B_OOB_CBW20_TH3_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW20_BE4, B_OOB_CBW20_TH4_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW20_BE4, B_OOB_CBW20_TH5_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW20_BE4, B_OOB_CBW20_TH6_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW20_BE4, B_OOB_CBW20_TH7_BE4, 0x0, mac_idx);
+
+ rtw89_write32_idx(rtwdev, R_OOB_CBW40_BE4, B_OOB_CBW40_TH0_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW40_BE4, B_OOB_CBW40_TH1_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW40_BE4, B_OOB_CBW40_TH2_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW40_BE4, B_OOB_CBW40_TH3_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW40_BE4, B_OOB_CBW40_TH4_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW40_BE4, B_OOB_CBW40_TH5_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW40_BE4, B_OOB_CBW40_TH6_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW40_BE4, B_OOB_CBW40_TH7_BE4, 0x0, mac_idx);
+
+ rtw89_write32_idx(rtwdev, R_OOB_CBW80_BE4, B_OOB_CBW80_TH0_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW80_BE4, B_OOB_CBW80_TH1_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW80_BE4, B_OOB_CBW80_TH2_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW80_BE4, B_OOB_CBW80_TH3_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW80_BE4, B_OOB_CBW80_TH4_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW80_BE4, B_OOB_CBW80_TH5_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW80_BE4, B_OOB_CBW80_TH6_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW80_BE4, B_OOB_CBW80_TH7_BE4, 0x0, mac_idx);
+
+ rtw89_write32_idx(rtwdev, R_OOB_CBW40_BE4, B_OOB_CBW20_OW0_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW40_BE4, B_OOB_CBW20_OW1_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW40_BE4, B_OOB_CBW20_OW2_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW40_BE4, B_OOB_CBW20_OW3_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW40_BE4, B_OOB_CBW20_OW4_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW40_BE4, B_OOB_CBW20_OW5_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW40_BE4, B_OOB_CBW20_OW6_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW40_BE4, B_OOB_CBW20_OW7_BE4, 0x0, mac_idx);
+
+ rtw89_write32_idx(rtwdev, R_OOB_CBW40_BE4, B_OOB_CBW40_OW0_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW40_BE4, B_OOB_CBW40_OW1_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW40_BE4, B_OOB_CBW40_OW2_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW40_BE4, B_OOB_CBW40_OW3_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW40_BE4, B_OOB_CBW40_OW4_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW40_BE4, B_OOB_CBW40_OW5_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW40_BE4, B_OOB_CBW40_OW6_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW40_BE4, B_OOB_CBW40_OW7_BE4, 0x0, mac_idx);
+
+ rtw89_write32_idx(rtwdev, R_OOB_CBW80_BE4, B_OOB_CBW80_OW0_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW80_BE4, B_OOB_CBW80_OW1_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW80_BE4, B_OOB_CBW80_OW2_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW80_BE4, B_OOB_CBW80_OW3_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW80_BE4, B_OOB_CBW80_OW4_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW80_BE4, B_OOB_CBW80_OW5_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW80_BE4, B_OOB_CBW80_OW6_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_OOB_CBW80_BE4, B_OOB_CBW80_OW7_BE4, 0x0, mac_idx);
+}
+
+static void rtw89_phy_bb_set_mdpd_qam_comp_val(struct rtw89_dev *rtwdev,
+ enum rtw89_mac_idx mac_idx)
+{
+ rtw89_write32_idx(rtwdev, R_DPD_CBW160_BE4, B_DPD_CBW160_TH0_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_CBW160_BE4, B_DPD_CBW160_TH1_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_CBW160_BE4, B_DPD_CBW160_TH2_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_CBW160_BE4, B_DPD_CBW160_TH3_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_CBW160_BE4, B_DPD_CBW160_TH4_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_CBW160_BE4, B_DPD_CBW160_TH5_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_CBW160_BE4, B_DPD_CBW160_TH6_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_CBW160_BE4, B_DPD_CBW160_TH7_BE4, 0x0, mac_idx);
+
+ rtw89_write32_idx(rtwdev, R_DPD_CBW160_BE4, B_DPD_CBW160_OW0_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_CBW160_BE4, B_DPD_CBW160_OW1_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_CBW160_BE4, B_DPD_CBW160_OW2_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_CBW160_BE4, B_DPD_CBW160_OW3_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_CBW160_BE4, B_DPD_CBW160_OW4_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_CBW160_BE4, B_DPD_CBW160_OW5_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_CBW160_BE4, B_DPD_CBW160_OW6_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_DPD_CBW160_BE4, B_DPD_CBW160_OW7_BE4, 0x0, mac_idx);
+}
+
+static void rtw89_phy_bb_set_cim3k_val(struct rtw89_dev *rtwdev,
+ enum rtw89_mac_idx mac_idx)
+{
+ rtw89_write32_idx(rtwdev, R_COMP_CIM3K_BE4, B_COMP_CIM3K_TH_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_COMP_CIM3K_BE4, B_COMP_CIM3K_OW_BE4, 0x0, mac_idx);
+ rtw89_write32_idx(rtwdev, R_COMP_CIM3K_BE4, B_COMP_CIM3K_NONBE_BE4, 0x1, mac_idx);
+ rtw89_write32_idx(rtwdev, R_COMP_CIM3K_BE4, B_COMP_CIM3K_BANDEDGE_BE4, 0x1, mac_idx);
+}
+
+static void rtw89_phy_bb_wrap_tx_rfsi_ctrl_init(struct rtw89_dev *rtwdev,
+ enum rtw89_mac_idx mac_idx)
+{
+ enum rtw89_phy_idx phy_idx = mac_idx != RTW89_MAC_0 ? RTW89_PHY_1 : RTW89_PHY_0;
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ const struct rtw89_chan *chan;
+
+ if (chip_id != RTL8922D)
+ return;
+
+ rtw89_phy_bb_wrap_tx_rfsi_qam_comp_th_init(rtwdev, mac_idx);
+ rtw89_phy_bb_wrap_tx_rfsi_scenario_def(rtwdev, mac_idx);
+ rtw89_phy_bb_wrap_tx_rfsi_qam_comp_val(rtwdev, mac_idx);
+ rtw89_phy_bb_set_oob_dpd_qam_comp_val(rtwdev, mac_idx);
+ rtw89_phy_bb_set_mdpd_qam_comp_val(rtwdev, mac_idx);
+ rtw89_phy_bb_set_cim3k_val(rtwdev, mac_idx);
+
+ rtw89_phy_bb_wrap_set_rfsi_ct_opt(rtwdev, phy_idx);
+
+ chan = rtw89_mgnt_chan_get(rtwdev, phy_idx);
+ if (chan)
+ rtw89_phy_bb_wrap_set_rfsi_bandedge_ch(rtwdev, chan, phy_idx);
+}
+
static void rtw89_phy_bb_wrap_ul_pwr(struct rtw89_dev *rtwdev)
{
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
@@ -414,12 +904,13 @@ static void rtw89_phy_bb_wrap_ul_pwr(struct rtw89_dev *rtwdev)
static void __rtw89_phy_bb_wrap_init_be(struct rtw89_dev *rtwdev,
enum rtw89_mac_idx mac_idx)
{
- rtw89_phy_bb_wrap_pwr_by_macid_init(rtwdev);
rtw89_phy_bb_wrap_tx_path_by_macid_init(rtwdev);
- rtw89_phy_bb_wrap_listen_path_en_init(rtwdev);
+ rtw89_phy_bb_wrap_pwr_by_macid_init(rtwdev);
+ rtw89_phy_bb_wrap_tpu_set_all(rtwdev, mac_idx);
+ rtw89_phy_bb_wrap_tx_rfsi_ctrl_init(rtwdev, mac_idx);
rtw89_phy_bb_wrap_force_cr_init(rtwdev, mac_idx);
rtw89_phy_bb_wrap_ftm_init(rtwdev, mac_idx);
- rtw89_phy_bb_wrap_tpu_set_all(rtwdev, mac_idx);
+ rtw89_phy_bb_wrap_listen_path_en_init(rtwdev);
rtw89_phy_bb_wrap_ul_pwr(rtwdev);
}
@@ -441,6 +932,14 @@ static void rtw89_phy_ch_info_init_be(struct rtw89_dev *rtwdev)
rtw89_phy_set_phy_regs(rtwdev, R_CHINFO_TYPE_SCAL, B_CHINFO_SCAL, 0x0);
}
+static void rtw89_phy_ch_info_init_be_v1(struct rtw89_dev *rtwdev)
+{
+ rtw89_phy_write32_mask(rtwdev, R_CHINFO_SEG_BE4, B_CHINFO_SEG_LEN_BE4, 0);
+ rtw89_phy_set_phy_regs(rtwdev, R_CHINFO_OPT_BE4, B_CHINFO_OPT_BE4, 0x3);
+ rtw89_phy_set_phy_regs(rtwdev, R_CHINFO_NX_BE4, B_CHINFO_NX_BE4, 0x669);
+ rtw89_phy_set_phy_regs(rtwdev, R_CHINFO_ALG_BE4, B_CHINFO_ALG_BE4, 0);
+}
+
struct rtw89_byr_spec_ent_be {
struct rtw89_rate_desc init;
u8 num_of_idx;
@@ -1004,9 +1503,12 @@ static void rtw89_phy_set_txpwr_limit_ru_be(struct rtw89_dev *rtwdev,
const struct rtw89_phy_gen_def rtw89_phy_gen_be = {
.cr_base = 0x20000,
+ .physt_bmp_start = R_PHY_STS_BITMAP_ADDR_START,
+ .physt_bmp_eht = R_PHY_STS_BITMAP_EHT,
.ccx = &rtw89_ccx_regs_be,
.physts = &rtw89_physts_regs_be,
.cfo = &rtw89_cfo_regs_be,
+ .bb_wrap = &rtw89_bb_wrap_regs_be,
.phy0_phy1_offset = rtw89_phy0_phy1_offset_be,
.config_bb_gain = rtw89_phy_config_bb_gain_be,
.preinit_rf_nctl = rtw89_phy_preinit_rf_nctl_be,
@@ -1019,3 +1521,24 @@ const struct rtw89_phy_gen_def rtw89_phy_gen_be = {
.set_txpwr_limit_ru = rtw89_phy_set_txpwr_limit_ru_be,
};
EXPORT_SYMBOL(rtw89_phy_gen_be);
+
+const struct rtw89_phy_gen_def rtw89_phy_gen_be_v1 = {
+ .cr_base = 0x0,
+ .physt_bmp_start = R_PHY_STS_BITMAP_ADDR_START_BE4,
+ .physt_bmp_eht = R_PHY_STS_BITMAP_EHT_BE4,
+ .ccx = &rtw89_ccx_regs_be_v1,
+ .physts = &rtw89_physts_regs_be_v1,
+ .cfo = &rtw89_cfo_regs_be_v1,
+ .bb_wrap = &rtw89_bb_wrap_regs_be_v1,
+ .phy0_phy1_offset = rtw89_phy0_phy1_offset_be_v1,
+ .config_bb_gain = rtw89_phy_config_bb_gain_be,
+ .preinit_rf_nctl = rtw89_phy_preinit_rf_nctl_be_v1,
+ .bb_wrap_init = rtw89_phy_bb_wrap_init_be,
+ .ch_info_init = rtw89_phy_ch_info_init_be_v1,
+
+ .set_txpwr_byrate = rtw89_phy_set_txpwr_byrate_be,
+ .set_txpwr_offset = rtw89_phy_set_txpwr_offset_be,
+ .set_txpwr_limit = rtw89_phy_set_txpwr_limit_be,
+ .set_txpwr_limit_ru = rtw89_phy_set_txpwr_limit_ru_be,
+};
+EXPORT_SYMBOL(rtw89_phy_gen_be_v1);
diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c
index 3f69dd4361c3..aad2ee7926d6 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.c
+++ b/drivers/net/wireless/realtek/rtw89/ps.c
@@ -16,7 +16,7 @@
static int rtw89_fw_receive_lps_h2c_check(struct rtw89_dev *rtwdev, u8 macid)
{
- struct rtw89_mac_c2h_info c2h_info = {};
+ struct rtw89_mac_c2h_info c2h_info = {.timeout = 5000};
u16 c2hreg_macid;
u32 c2hreg_ret;
int ret;
@@ -189,6 +189,8 @@ void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
if (RTW89_CHK_FW_FEATURE(LPS_CH_INFO, &rtwdev->fw))
rtw89_fw_h2c_lps_ch_info(rtwdev, rtwvif);
+ else if (RTW89_CHK_FW_FEATURE(LPS_ML_INFO_V1, &rtwdev->fw))
+ rtw89_fw_h2c_lps_ml_cmn_info_v1(rtwdev, rtwvif);
else
rtw89_fw_h2c_lps_ml_cmn_info(rtwdev, rtwvif);
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
index 5b4a459cf29c..9b605617c3f0 100644
--- a/drivers/net/wireless/realtek/rtw89/reg.h
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -149,6 +149,7 @@
#define R_AX_WLLPS_CTRL 0x0090
#define B_AX_LPSOP_ASWRM BIT(17)
#define B_AX_LPSOP_DSWRM BIT(9)
+#define B_AX_FORCE_LEAVE_LPS BIT(3)
#define B_AX_DIS_WLBT_LPSEN_LOPC BIT(1)
#define SW_LPS_OPTION 0x0001A0B2
@@ -313,13 +314,18 @@
#define R_AX_IC_PWR_STATE 0x03F0
#define B_AX_WHOLE_SYS_PWR_STE_MASK GENMASK(25, 16)
#define B_AX_WLMAC_PWR_STE_MASK GENMASK(9, 8)
+#define MAC_AX_MAC_OFF 0
+#define MAC_AX_MAC_ON 1
+#define MAC_AX_MAC_LPS 2
#define B_AX_UART_HCISYS_PWR_STE_MASK GENMASK(7, 6)
#define B_AX_SDIO_HCISYS_PWR_STE_MASK GENMASK(5, 4)
#define B_AX_USB_HCISYS_PWR_STE_MASK GENMASK(3, 2)
#define B_AX_PCIE_HCISYS_PWR_STE_MASK GENMASK(1, 0)
#define R_AX_SPS_DIG_OFF_CTRL0 0x0400
+#define B_AX_R1_L1_MASK GENMASK(7, 6)
#define B_AX_C3_L1_MASK GENMASK(5, 4)
+#define B_AX_C2_L1_MASK GENMASK(3, 2)
#define B_AX_C1_L1_MASK GENMASK(1, 0)
#define R_AX_AFE_OFF_CTRL1 0x0444
@@ -603,6 +609,9 @@
#define R_AX_SER_DBG_INFO 0x8424
#define B_AX_L0_TO_L1_EVENT_MASK GENMASK(31, 28)
+#define B_AX_SER_L1_COUNTER_MASK GENMASK(27, 24)
+#define B_AX_RMAC_PPDU_HANG_CNT_MASK GENMASK(23, 16)
+#define B_AX_SER_L0_COUNTER_MASK GENMASK(7, 0)
#define R_AX_DLE_EMPTY0 0x8430
#define B_AX_PLE_EMPTY_QTA_DMAC_CPUIO BIT(26)
@@ -1958,7 +1967,9 @@
#define B_AX_B0_PRELD_FEN BIT(31)
#define B_AX_B0_PRELD_USEMAXSZ_MASK GENMASK(25, 16)
#define PRELD_B0_ENT_NUM 10
+#define PRELD_B01_ENT_NUM_8922D 2
#define PRELD_AMSDU_SIZE 52
+#define PRELD_NEXT_MIN_SIZE 255
#define B_AX_B0_PRELD_CAM_G1ENTNUM_MASK GENMASK(12, 8)
#define B_AX_B0_PRELD_CAM_G0ENTNUM_MASK GENMASK(4, 0)
@@ -2094,6 +2105,7 @@
#define B_AX_B1_ISR_ERR_USRCTL_REINIT BIT(0)
#define R_AX_AFE_CTRL1 0x0024
+#define B_AX_CMAC_CLK_SEL BIT(21)
#define B_AX_R_SYM_WLCMAC1_P4_PC_EN BIT(4)
#define B_AX_R_SYM_WLCMAC1_P3_PC_EN BIT(3)
@@ -2107,6 +2119,12 @@
#define B_AX_R_SYM_FEN_WLBBFUN_1 BIT(16)
#define B_AX_R_SYM_ISO_CMAC12PP BIT(5)
+#define R_AX_SYSON_FSM_MON 0x00A0
+#define B_AX_FSM_MON_SEL_MASK GENMASK(26, 24)
+#define B_AX_DOP_ELDO BIT(23)
+#define B_AX_FSM_MON_UPD BIT(15)
+#define B_AX_FSM_PAR_MASK GENMASK(14, 0)
+
#define R_AX_CMAC_REG_START 0xC000
#define R_AX_CMAC_FUNC_EN 0xC000
@@ -3813,6 +3831,7 @@
#define B_BE_EN_WLON BIT(16)
#define B_BE_APDM_HPDN BIT(15)
#define B_BE_PSUS_OFF_CAPC_EN BIT(14)
+#define B_BE_USUS_OFFCAPC_EN BIT(13)
#define B_BE_AFSM_PCIE_SUS_EN BIT(12)
#define B_BE_AFSM_WLSUS_EN BIT(11)
#define B_BE_APFM_SWLPS BIT(10)
@@ -3881,6 +3900,8 @@
#define B_BE_SYM_PADPDN_WL_RFC0_1P3 BIT(5)
#define R_BE_RSV_CTRL 0x001C
+#define B_BE_R_SYM_PRST_CPHY_RST BIT(25)
+#define B_BE_R_SYM_PRST_PDN_EN BIT(24)
#define B_BE_HR_BE_DBG GENMASK(23, 12)
#define B_BE_R_SYM_DIS_PCIE_FLR BIT(9)
#define B_BE_R_EN_HRST_PWRON BIT(8)
@@ -3927,6 +3948,11 @@
#define B_BE_R_SYM_WLCMAC0_P2_PC_EN BIT(26)
#define B_BE_R_SYM_WLCMAC0_P1_PC_EN BIT(25)
#define B_BE_R_SYM_WLCMAC0_PC_EN BIT(24)
+#define B_BE_R_SYM_WLCMAC0_ALL_EN (B_BE_R_SYM_WLCMAC0_PC_EN | \
+ B_BE_R_SYM_WLCMAC0_P1_PC_EN | \
+ B_BE_R_SYM_WLCMAC0_P2_PC_EN | \
+ B_BE_R_SYM_WLCMAC0_P3_PC_EN | \
+ B_BE_R_SYM_WLCMAC0_P4_PC_EN)
#define B_BE_DATAMEM_PC3_EN BIT(23)
#define B_BE_DATAMEM_PC2_EN BIT(22)
#define B_BE_DATAMEM_PC1_EN BIT(21)
@@ -3948,11 +3974,11 @@
#define B_BE_R_SYM_WLCMAC1_P2_PC_EN BIT(2)
#define B_BE_R_SYM_WLCMAC1_P1_PC_EN BIT(1)
#define B_BE_R_SYM_WLCMAC1_PC_EN BIT(0)
-#define B_BE_AFE_CTRL1_SET (B_BE_R_SYM_WLCMAC1_PC_EN | \
- B_BE_R_SYM_WLCMAC1_P1_PC_EN | \
- B_BE_R_SYM_WLCMAC1_P2_PC_EN | \
- B_BE_R_SYM_WLCMAC1_P3_PC_EN | \
- B_BE_R_SYM_WLCMAC1_P4_PC_EN)
+#define B_BE_R_SYM_WLCMAC1_ALL_EN (B_BE_R_SYM_WLCMAC1_PC_EN | \
+ B_BE_R_SYM_WLCMAC1_P1_PC_EN | \
+ B_BE_R_SYM_WLCMAC1_P2_PC_EN | \
+ B_BE_R_SYM_WLCMAC1_P3_PC_EN | \
+ B_BE_R_SYM_WLCMAC1_P4_PC_EN)
#define R_BE_EFUSE_CTRL 0x0030
#define B_BE_EF_MODE_SEL_MASK GENMASK(31, 30)
@@ -4015,6 +4041,7 @@
#define R_BE_SYS_SDIO_CTRL 0x0070
#define B_BE_MCM_FLASH_EN BIT(28)
+#define B_BE_SER_DETECT_EN BIT(26)
#define B_BE_PCIE_SEC_LOAD BIT(26)
#define B_BE_PCIE_SER_RSTB BIT(25)
#define B_BE_PCIE_SEC_LOAD_CLR BIT(24)
@@ -4172,6 +4199,22 @@
#define B_BE_LPSROP_LOWPWRPLL BIT(7)
#define B_BE_LPSROP_DSWRSD_SEL_MASK GENMASK(5, 4)
+#define R_BE_SYSON_FSM_MON 0x00A0
+#define B_BE_FSM_MON_SEL_MASK GENMASK(26, 24)
+#define B_BE_DOP_ELDO BIT(23)
+#define B_BE_AFE_PLL_BYPASS BIT(22)
+#define B_BE_PON_SWR_BYPASS BIT(21)
+#define B_BE_PON_ADIE_BYPASS BIT(20)
+#define B_BE_AFE_LS_BYPASS BIT(19)
+#define B_BE_BTPMC_XTAL_SI_BYPASS BIT(17)
+#define B_BE_WLPMC_XTAL_SI_BYPASS BIT(16)
+#define B_BE_FSM_MON_UPD BIT(15)
+#define B_BE_FSM_PAR_MASK GENMASK(14, 0)
+#define WLAN_FSM_MASK 0xFFFFFF
+#define WLAN_FSM_SET 0x4000000
+#define WLAN_FSM_STATE_MASK 0x1FF
+#define WLAN_FSM_IDLE 0
+
#define R_BE_EFUSE_CTRL_2_V1 0x00A4
#define B_BE_EF_ENT BIT(31)
#define B_BE_EF_TCOLUMN_EN BIT(29)
@@ -4188,6 +4231,10 @@
#define B_BE_EF_DSB_EN BIT(11)
#define B_BE_EF_DLY_SEL_MASK GENMASK(3, 0)
+#define R_BE_SCOREBOARD 0x00AC
+#define B_BE_TOGGLE BIT(31)
+#define B_BE_DATA_LINE_MASK GENMASK(30, 0)
+
#define R_BE_PMC_DBG_CTRL2 0x00CC
#define B_BE_EFUSE_BURN_GNT_MASK GENMASK(31, 24)
#define B_BE_DIS_IOWRAP_TIMEOUT BIT(16)
@@ -4237,6 +4284,13 @@
#define R_BE_PCIE_MIO_INTD 0x00E8
#define B_BE_PCIE_MIO_DATA_MASK GENMASK(31, 0)
+#define R_BE_SYS_CHIPINFO 0x00FC
+#define B_BE_USB2_SEL BIT(31)
+#define B_BE_U3PHY_RST_V1 BIT(30)
+#define B_BE_U3_TERM_DETECT BIT(29)
+#define B_BE_VERIFY_ENV_MASK GENMASK(9, 8)
+#define B_BE_HW_ID_MASK GENMASK(7, 0)
+
#define R_BE_HALT_H2C_CTRL 0x0160
#define B_BE_HALT_H2C_TRIGGER BIT(0)
@@ -4261,6 +4315,72 @@
#define R_BE_SECURE_BOOT_MALLOC_INFO 0x0184
+#define R_BE_FWS0IMR 0x0190
+#define B_BE_FS_HALT_H2C_INT_EN BIT(31)
+#define B_BE_FS_FSM_HIOE_TO_EVENT_INT_EN BIT(30)
+#define B_BE_FS_HCI_SUS_INT_EN BIT(29)
+#define B_BE_FS_HCI_RES_INT_EN BIT(28)
+#define B_BE_FS_HCI_RESET_INT_EN BIT(27)
+#define B_BE_FS_BT_SB1_INT_EN BIT(26)
+#define B_BE_FS_ACT2RECOVERY_INT_EN BIT(25)
+#define B_BE_FS_GEN1GEN2_SWITCH_INT_EN BIT(24)
+#define B_BE_FS_USB_LPMRSM_INT_EN BIT(22)
+#define B_BE_FS_USB_LPMINT_INT_EN BIT(21)
+#define B_BE_FS_PWMERR_INT_EN BIT(20)
+#define B_BE_FS_PDNINT_EN BIT(19)
+#define B_BE_FS_SPSA_OCP_INT_EN BIT(18)
+#define B_BE_FS_SPSD_OCP_INT_EN BIT(17)
+#define B_BE_FS_BT_SB0_INT_EN BIT(16)
+#define B_BE_FS_GPIOF_INT_EN BIT(15)
+#define B_BE_FS_GPIOE_INT_EN BIT(14)
+#define B_BE_FS_GPIOD_INT_EN BIT(13)
+#define B_BE_FS_GPIOC_INT_EN BIT(12)
+#define B_BE_FS_GPIOB_INT_EN BIT(11)
+#define B_BE_FS_GPIOA_INT_EN BIT(10)
+#define B_BE_FS_GPIO9_INT_EN BIT(9)
+#define B_BE_FS_GPIO8_INT_EN BIT(8)
+#define B_BE_FS_GPIO7_INT_EN BIT(7)
+#define B_BE_FS_GPIO6_INT_EN BIT(6)
+#define B_BE_FS_GPIO5_INT_EN BIT(5)
+#define B_BE_FS_GPIO4_INT_EN BIT(4)
+#define B_BE_FS_GPIO3_INT_EN BIT(3)
+#define B_BE_FS_GPIO2_INT_EN BIT(2)
+#define B_BE_FS_GPIO1_INT_EN BIT(1)
+#define B_BE_FS_GPIO0_INT_EN BIT(0)
+
+#define R_BE_FWS0ISR 0x0194
+#define B_BE_FS_HALT_H2C_INT BIT(31)
+#define B_BE_FS_FSM_HIOE_TO_EVENT_INT BIT(30)
+#define B_BE_FS_HCI_SUS_INT BIT(29)
+#define B_BE_FS_HCI_RES_INT BIT(28)
+#define B_BE_FS_HCI_RESET_INT BIT(27)
+#define B_BE_FS_BT_SB1_INT BIT(26)
+#define B_BE_FS_ACT2RECOVERY_INT BIT(25)
+#define B_BE_FS_GEN1GEN2_SWITCH_INT BIT(24)
+#define B_BE_FS_USB_LPMRSM_INT BIT(22)
+#define B_BE_FS_USB_LPMINT_INT BIT(21)
+#define B_BE_FS_PWMERR_INT BIT(20)
+#define B_BE_FS_PDNINT BIT(19)
+#define B_BE_FS_SPSA_OCP_INT BIT(18)
+#define B_BE_FS_SPSD_OCP_INT BIT(17)
+#define B_BE_FS_BT_SB0_INT BIT(16)
+#define B_BE_FS_GPIOF_INT BIT(15)
+#define B_BE_FS_GPIOE_INT BIT(14)
+#define B_BE_FS_GPIOD_INT BIT(13)
+#define B_BE_FS_GPIOC_INT BIT(12)
+#define B_BE_FS_GPIOB_INT BIT(11)
+#define B_BE_FS_GPIOA_INT BIT(10)
+#define B_BE_FS_GPIO9_INT BIT(9)
+#define B_BE_FS_GPIO8_INT BIT(8)
+#define B_BE_FS_GPIO7_INT BIT(7)
+#define B_BE_FS_GPIO6_INT BIT(6)
+#define B_BE_FS_GPIO5_INT BIT(5)
+#define B_BE_FS_GPIO4_INT BIT(4)
+#define B_BE_FS_GPIO3_INT BIT(3)
+#define B_BE_FS_GPIO2_INT BIT(2)
+#define B_BE_FS_GPIO1_INT BIT(1)
+#define B_BE_FS_GPIO0_INT BIT(0)
+
#define R_BE_FWS1IMR 0x0198
#define B_BE_FS_RPWM_INT_EN_V1 BIT(24)
#define B_BE_PCIE_HOTRST_EN BIT(22)
@@ -4447,6 +4567,16 @@
#define B_BE_WL_XTAL_SI_DATA_MASK GENMASK(15, 8)
#define B_BE_WL_XTAL_SI_ADDR_MASK GENMASK(7, 0)
+#define R_BE_PCIE_SER_DBG 0x02FC
+#define B_BE_PCIE_SER_DBG_MASK GENMASK(31, 10)
+#define B_BE_PCIE_SER_PHY_PROTECT BIT(9)
+#define B_BE_PCIE_SER_MAC_PROTECT BIT(8)
+#define B_BE_PCIE_SER_FLUSH_RSTB BIT(4)
+#define B_BE_PCIE_AXI_BRG_FLUSH_EN BIT(3)
+#define B_BE_PCIE_SER_AUXCLK_RDY BIT(2)
+#define B_BE_PCIE_SER_FRZ_REG_RST BIT(1)
+#define B_BE_PCIE_SER_FRZ_CFG_SPC_RST BIT(0)
+
#define R_BE_IC_PWR_STATE 0x03F0
#define B_BE_WHOLE_SYS_PWR_STE_MASK GENMASK(25, 16)
#define MAC_AX_SYS_ACT 0x220
@@ -4599,6 +4729,10 @@
#define R_BE_LTR_LATENCY_IDX2_V1 0x361C
#define R_BE_LTR_LATENCY_IDX3_V1 0x3620
+#define R_BE_HCI_BUF_IMR 0x6018
+#define B_BE_HCI_BUF_IMR_CLR 0xC0000303
+#define B_BE_HCI_BUF_IMR_SET 0xC0000301
+
#define R_BE_H2CREG_DATA0 0x7140
#define R_BE_H2CREG_DATA1 0x7144
#define R_BE_H2CREG_DATA2 0x7148
@@ -4693,6 +4827,9 @@
#define B_BE_LTR_CMAC1_RX_USE_PG_TH_MASK GENMASK(27, 16)
#define B_BE_LTR_CMAC0_RX_USE_PG_TH_MASK GENMASK(11, 0)
+#define R_BE_NO_RX_ERR_CFG 0x841C
+#define B_BE_NO_RX_ERR_TO_MASK GENMASK(31, 29)
+
#define R_BE_DMAC_TABLE_CTRL 0x8420
#define B_BE_HWAMSDU_PADDING_MODE BIT(31)
#define B_BE_MACID_MPDU_PROCESSOR_OFFSET_MASK GENMASK(26, 16)
@@ -4704,7 +4841,7 @@
#define B_BE_SER_L0_PROMOTE_L1_EVENT_MASK GENMASK(31, 28)
#define B_BE_SER_L1_COUNTER_MASK GENMASK(27, 24)
#define B_BE_RMAC_PPDU_HANG_CNT_MASK GENMASK(23, 16)
-#define B_BE_SER_L0_COUNTER_MASK GENMASK(8, 0)
+#define B_BE_SER_L0_COUNTER_MASK GENMASK(7, 0)
#define R_BE_DMAC_SYS_CR32B 0x842C
#define B_BE_DMAC_BB_PHY1_MASK GENMASK(31, 16)
@@ -4823,6 +4960,10 @@
#define R_BE_SER_L1_DBG_CNT_7 0x845C
#define B_BE_SER_L1_DBG_2_MASK GENMASK(31, 0)
+#define R_BE_FW_TRIGGER_IDCT_ISR 0x8508
+#define B_BE_DMAC_FW_ERR_IDCT_IMR BIT(31)
+#define B_BE_DMAC_FW_TRIG_IDCT BIT(0)
+
#define R_BE_DMAC_ERR_IMR 0x8520
#define B_BE_DMAC_NOTX_ERR_INT_EN BIT(21)
#define B_BE_DMAC_NORX_ERR_INT_EN BIT(20)
@@ -5026,6 +5167,8 @@
B_BE_STF_WRFF_UNDERFLOW_ERR_INT_EN | \
B_BE_STF_OQT_OVERFLOW_ERR_INT_EN | \
B_BE_STF_OQT_UNDERFLOW_ERR_INT_EN)
+#define B_BE_DISP_OTHER_IMR_CLR_V1 0xFFFFFFFF
+#define B_BE_DISP_OTHER_IMR_SET_V1 0x3F002000
#define R_BE_DISP_HOST_IMR 0x8874
#define B_BE_HR_WRFF_UNDERFLOW_ERR_INT_EN BIT(31)
@@ -5103,6 +5246,8 @@
B_BE_HR_DMA_PROCESS_ERR_INT_EN | \
B_BE_HR_WRFF_OVERFLOW_ERR_INT_EN | \
B_BE_HR_WRFF_UNDERFLOW_ERR_INT_EN)
+#define B_BE_DISP_HOST_IMR_CLR_V1 0xFBFFFFFF
+#define B_BE_DISP_HOST_IMR_SET_V1 0xC8B3E579
#define R_BE_DISP_CPU_IMR 0x8878
#define B_BE_CR_PLD_LEN_ERR_INT_EN BIT(30)
@@ -5177,6 +5322,8 @@
B_BE_CR_DMA_PROCESS_ERR_INT_EN | \
B_BE_CR_WRFF_OVERFLOW_ERR_INT_EN | \
B_BE_CR_WRFF_UNDERFLOW_ERR_INT_EN)
+#define B_BE_DISP_CPU_IMR_CLR_V1 0x7DFFFFFD
+#define B_BE_DISP_CPU_IMR_SET_V1 0x34F938FD
#define R_BE_RX_STOP 0x8914
#define B_BE_CPU_RX_STOP BIT(17)
@@ -5471,6 +5618,10 @@
#define B_BE_PLE_Q12_MAX_SIZE_MASK GENMASK(27, 16)
#define B_BE_PLE_Q12_MIN_SIZE_MASK GENMASK(11, 0)
+#define R_BE_PLE_QTA13_CFG 0x9074
+#define B_BE_PLE_Q13_MAX_SIZE_MASK GENMASK(27, 16)
+#define B_BE_PLE_Q13_MIN_SIZE_MASK GENMASK(11, 0)
+
#define R_BE_PLE_ERRFLAG1_IMR 0x90C0
#define B_BE_PLE_SRCHPG_PGOFST_IMR BIT(26)
#define B_BE_PLE_SRCHPG_STRPG_IMR BIT(25)
@@ -5528,7 +5679,21 @@
B_BE_WDRLS_RPT1_AGGNUM0_ERR_INT_EN | \
B_BE_WDRLS_RPT1_FRZTO_ERR_INT_EN)
+#define R_BE_RLSRPT0_CFG0 0x9440
+#define B_BE_RLSRPT0_FWRLS BIT(31)
+#define B_BE_RLSRPT0_FWD_TRGT_MASK GENMASK(23, 16)
+#define B_BE_RLSRPT0_PID_MASK GENMASK(10, 8)
+#define B_BE_RLSRPT0_QID_MASK GENMASK(5, 0)
+#define WDRLS_DEST_QID_POH 1
+#define WDRLS_DEST_QID_STF 0
+
#define R_BE_RLSRPT0_CFG1 0x9444
+#define B_BE_RLSRPT0_FLTR_MAP_V1_MASK GENMASK(28, 24)
+#define S_BE_WDRLS_FLTR_TXOK_V1 BIT(0)
+#define S_BE_WDRLS_FLTR_RTYLMT_V1 BIT(1)
+#define S_BE_WDRLS_FLTR_LIFTIM_V1 BIT(2)
+#define S_BE_WDRLS_FLTR_MACID_V1 BIT(3)
+#define S_BE_WDRLS_FLTR_RELINK_V1 BIT(4)
#define B_BE_RLSRPT0_FLTR_MAP_MASK GENMASK(27, 24)
#define S_BE_WDRLS_FLTR_TXOK 1
#define S_BE_WDRLS_FLTR_RTYLMT 2
@@ -5822,12 +5987,18 @@
#define B_BE_MPDUINFO_PKTID_MASK GENMASK(27, 16)
#define B_BE_MPDUINFO_B1_BADDR_MASK GENMASK(5, 0)
#define MPDU_INFO_B1_OFST 18
+#define MPDU_INFO_TBL_FACTOR 3
#define R_BE_TXPKTCTL_B0_PRELD_CFG0 0x9F48
#define B_BE_B0_PRELD_FEN BIT(31)
#define B_BE_B0_PRELD_USEMAXSZ_MASK GENMASK(25, 16)
#define B_BE_B0_PRELD_CAM_G1ENTNUM_MASK GENMASK(12, 8)
+#define PRELD_MISCQ_ENT_NUM_8922A 2
+#define PRELD_MISCQ_ENT_NUM_8922D 1
#define B_BE_B0_PRELD_CAM_G0ENTNUM_MASK GENMASK(4, 0)
+#define PRELD_B0_ACQ_ENT_NUM_8922A 8
+#define PRELD_B1_ACQ_ENT_NUM_8922A 2
+#define PRELD_ACQ_ENT_NUM_8922D 1
#define R_BE_TXPKTCTL_B0_PRELD_CFG1 0x9F4C
#define B_BE_B0_PRELD_NXT_TXENDWIN_MASK GENMASK(11, 8)
@@ -5939,6 +6110,7 @@
#define B_BE_PLRLS_CTL_FRZTO_ISR BIT(0)
#define R_BE_SS_CTRL 0xA310
+#define R_BE_SS_CTRL_V1 0xA610
#define B_BE_SS_INIT_DONE BIT(31)
#define B_BE_WDE_STA_DIS BIT(30)
#define B_BE_WARM_INIT BIT(29)
@@ -5978,6 +6150,24 @@
#define B_BE_RPT_TIMEOUT_ISR BIT(1)
#define B_BE_SEARCH_TIMEOUT_ISR BIT(0)
+#define R_BE_PLRLS_ERR_IMR_V1 0xA518
+#define B_BE_PLRLS_DUMMY_ISR6 BIT(7)
+#define B_BE_PLRLS_DUMMY_ISR5 BIT(6)
+#define B_BE_PLRLS_DUMMY_ISR4 BIT(5)
+#define B_BE_PLRLS_DUMMY_ISR3 BIT(4)
+#define B_BE_PLRLS_DUMMY_ISR2 BIT(3)
+#define B_BE_PLRLS_DUMMY_ISR1 BIT(2)
+#define B_BE_PLRLS_DUMMY_ISR0 BIT(1)
+#define B_BE_PLRLS_ERR_IMR_V1_CLR 0x1
+#define B_BE_PLRLS_ERR_IMR_V1_SET 0x1
+
+#define R_BE_SS_LITE_TXL_MACID 0xA790
+#define B_BE_RPT_OTHER_BAND_EN BIT(31)
+#define B_BE_TXL_CMD_EN BIT(30)
+#define B_BE_TXL_READ_MACID_MASK GENMASK(29, 20)
+#define B_BE_TXL_MACID_1_MASK GENMASK(19, 10)
+#define B_BE_TXL_MACID_0_MASK GENMASK(9, 0)
+
#define R_BE_HAXI_INIT_CFG1 0xB000
#define B_BE_CFG_WD_PERIOD_IDLE_MASK GENMASK(31, 28)
#define B_BE_CFG_WD_PERIOD_ACTIVE_MASK GENMASK(27, 24)
@@ -6017,6 +6207,18 @@
#define B_BE_STOP_CH2 BIT(2)
#define B_BE_STOP_CH1 BIT(1)
#define B_BE_STOP_CH0 BIT(0)
+#define B_BE_TX_STOP1_MASK (B_BE_STOP_CH0 | B_BE_STOP_CH1 | \
+ B_BE_STOP_CH2 | B_BE_STOP_CH3 | \
+ B_BE_STOP_CH4 | B_BE_STOP_CH5 | \
+ B_BE_STOP_CH6 | B_BE_STOP_CH7 | \
+ B_BE_STOP_CH8 | B_BE_STOP_CH9 | \
+ B_BE_STOP_CH10 | B_BE_STOP_CH11 | \
+ B_BE_STOP_CH12 | B_BE_STOP_CH13 | \
+ B_BE_STOP_CH14)
+#define B_BE_TX_STOP1_MASK_V1 (B_BE_STOP_CH0 | B_BE_STOP_CH2 | \
+ B_BE_STOP_CH4 | B_BE_STOP_CH6 | \
+ B_BE_STOP_CH8 | B_BE_STOP_CH10 | \
+ B_BE_STOP_CH12)
#define R_BE_HAXI_MST_WDT_TIMEOUT_SEL_V1 0xB02C
#define B_BE_HAXI_MST_WDT_TIMEOUT_SEL_MASK GENMASK(4, 0)
@@ -6069,6 +6271,7 @@
#define R_BE_CH_PAGE_CTRL 0xB704
#define B_BE_PREC_PAGE_CH12_V1_MASK GENMASK(21, 16)
+#define B_BE_FULL_WD_PG_MASK GENMASK(15, 8)
#define B_BE_PREC_PAGE_CH011_V1_MASK GENMASK(5, 0)
#define R_BE_CH0_PAGE_CTRL 0xB718
@@ -6101,6 +6304,10 @@
#define R_BE_WP_PAGE_CTRL1 0xB7A4
#define B_BE_PREC_PAGE_WP_CH811_MASK GENMASK(24, 16)
#define B_BE_PREC_PAGE_WP_CH07_MASK GENMASK(8, 0)
+#define B_BE_FULL_PAGE_WP_CH811_MASK GENMASK(31, 24)
+#define B_BE_PREC_PAGE_WP_CH811_V1_MASK GENMASK(23, 16)
+#define B_BE_FULL_PAGE_WP_CH07_MASK GENMASK(15, 8)
+#define B_BE_PREC_PAGE_WP_CH07_V1_MASK GENMASK(7, 0)
#define R_BE_WP_PAGE_CTRL2 0xB7A8
#define B_BE_WP_THRD_MASK GENMASK(12, 0)
@@ -6159,8 +6366,79 @@
#define B_BE_GNT_WL_BB_PWR_VAL BIT(1)
#define B_BE_GNT_WL_BB_PWR_SWCTRL BIT(0)
+#define R_BE_PTA_GNT_SW_CTRL 0x0E348
+#define B_BE_PTA_WL_ACT0_VAL BIT(19)
+#define B_BE_PTA_WL_ACT0_SWCTRL BIT(18)
+#define B_BE_PTA_GNT_BT0_RX_BB1_VAL BIT(17)
+#define B_BE_PTA_GNT_BT0_RX_BB1_SWCTRL BIT(16)
+#define B_BE_PTA_GNT_BT0_TX_BB1_VAL BIT(15)
+#define B_BE_PTA_GNT_BT0_TX_BB1_SWCTRL BIT(14)
+#define B_BE_PTA_GNT_BT0_RX_BB0_VAL BIT(13)
+#define B_BE_PTA_GNT_BT0_RX_BB0_SWCTRL BIT(12)
+#define B_BE_PTA_GNT_BT0_TX_BB0_VAL BIT(11)
+#define B_BE_PTA_GNT_BT0_TX_BB0_SWCTRL BIT(10)
+#define B_BE_PTA_GNT_BT0_BB_VAL BIT(9)
+#define B_BE_PTA_GNT_BT0_BB_SWCTRL BIT(8)
+#define B_BE_PTA_WL_ACT_RX_BT0_VAL BIT(7)
+#define B_BE_PTA_WL_ACT_RX_BT0_SWCTRL BIT(6)
+#define B_BE_PTA_WL_ACT_TX_BT0_VAL BIT(5)
+#define B_BE_PTA_WL_ACT_TX_BT0_SWCTRL BIT(4)
+#define B_BE_PTA_GNT_WL_BB1_VAL BIT(3)
+#define B_BE_PTA_GNT_WL_BB1_SWCTRL BIT(2)
+#define B_BE_PTA_GNT_WL_BB0_VAL BIT(1)
+#define B_BE_PTA_GNT_WL_BB0_SWCTRL BIT(0)
+
+#define R_BE_PTA_GNT_VAL 0x0E34C
+#define B_BE_PTA_WL_ACT2 BIT(20)
+#define B_BE_PTA_GNT_ZB_TX_BB1 BIT(19)
+#define B_BE_PTA_GNT_ZB_TX_BB0 BIT(18)
+#define B_BE_PTA_WL_ACT1 BIT(17)
+#define B_BE_PTA_GNT_BT1_RX_BB1 BIT(16)
+#define B_BE_PTA_GNT_BT1_RX_BB0 BIT(15)
+#define B_BE_PTA_GNT_BT1_TX_BB1 BIT(14)
+#define B_BE_PTA_GNT_BT1_TX_BB0 BIT(13)
+#define B_BE_PTA_WL_ACT_RX_BT1 BIT(12)
+#define B_BE_PTA_WL_ACT_TX_BT1 BIT(11)
+#define B_BE_PTA_GNT_BT1_BB BIT(10)
+#define B_BE_PTA_WL_ACT0 BIT(9)
+#define B_BE_PTA_GNT_BT0_RX_BB1 BIT(8)
+#define B_BE_PTA_GNT_BT0_TX_BB1 BIT(7)
+#define B_BE_PTA_GNT_BT0_RX_BB0 BIT(6)
+#define B_BE_PTA_GNT_BT0_TX_BB0 BIT(5)
+#define B_BE_PTA_GNT_BT0_BB BIT(4)
+#define B_BE_PTA_WL_ACT_RX_BT0 BIT(3)
+#define B_BE_PTA_WL_ACT_TX_BT0 BIT(2)
+#define B_BE_PTA_GNT_WL_BB1 BIT(1)
+#define B_BE_PTA_GNT_WL_BB0 BIT(0)
+
+#define R_BE_PTA_GNT_ZL_SW_CTRL 0x0E350
+#define B_BE_PTA_WL_ACT2_VAL BIT(21)
+#define B_BE_PTA_WL_ACT2_SWCTRL BIT(20)
+#define B_BE_PTA_GNT_ZB_TX_BB1_VAL BIT(19)
+#define B_BE_PTA_GNT_ZB_TX_BB1_SWCTRL BIT(18)
+#define B_BE_PTA_GNT_ZB_TX_BB0_VAL BIT(17)
+#define B_BE_PTA_GNT_ZB_TX_BB0_SWCTRL BIT(16)
+#define B_BE_PTA_WL_ACT1_VAL BIT(15)
+#define B_BE_PTA_WL_ACT1_SWCTRL BIT(14)
+#define B_BE_PTA_GNT_BT1_RX_BB1_VAL BIT(13)
+#define B_BE_PTA_GNT_BT1_RX_BB1_SWCTRL BIT(12)
+#define B_BE_PTA_GNT_BT1_RX_BB0_VAL BIT(11)
+#define B_BE_PTA_GNT_BT1_RX_BB0_SWCTRL BIT(10)
+#define B_BE_PTA_GNT_BT1_TX_BB1_VAL BIT(9)
+#define B_BE_PTA_GNT_BT1_TX_BB1_SWCTRL BIT(8)
+#define B_BE_PTA_GNT_BT1_TX_BB0_VAL BIT(7)
+#define B_BE_PTA_GNT_BT1_TX_BB0_SWCTRL BIT(6)
+#define B_BE_PTA_WL_ACT_RX_BT1_VAL BIT(5)
+#define B_BE_PTA_WL_ACT_RX_BT1_SWCTRL BIT(4)
+#define B_BE_PTA_WL_ACT_TX_BT1_VAL BIT(3)
+#define B_BE_PTA_WL_ACT_TX_BT1_SWCTRL BIT(2)
+#define B_BE_PTA_GNT_BT1_BB_VAL BIT(1)
+#define B_BE_PTA_GNT_BT1_BB_SWCTRL BIT(0)
+
#define R_BE_PWR_MACID_PATH_BASE 0x0E500
+#define R_BE_PWR_MACID_PATH_BASE_V1 0x1C000
#define R_BE_PWR_MACID_LMT_BASE 0x0ED00
+#define R_BE_PWR_MACID_LMT_BASE_V1 0x1C800
#define R_BE_CMAC_FUNC_EN 0x10000
#define R_BE_CMAC_FUNC_EN_C1 0x14000
@@ -6223,6 +6501,19 @@
#define BE_WMAC_RFMOD_160M 3
#define BE_WMAC_RFMOD_320M 4
+#define R_BE_GID_POSITION0 0x10070
+#define R_BE_GID_POSITION0_C1 0x14070
+#define R_BE_GID_POSITION1 0x10074
+#define R_BE_GID_POSITION1_C1 0x14074
+#define R_BE_GID_POSITION2 0x10078
+#define R_BE_GID_POSITION2_C1 0x14078
+#define R_BE_GID_POSITION3 0x1007C
+#define R_BE_GID_POSITION3_C1 0x1407C
+#define R_BE_GID_POSITION_EN0 0x10080
+#define R_BE_GID_POSITION_EN0_C1 0x14080
+#define R_BE_GID_POSITION_EN1 0x10084
+#define R_BE_GID_POSITION_EN1_C1 0x14084
+
#define R_BE_TX_SUB_BAND_VALUE 0x10088
#define R_BE_TX_SUB_BAND_VALUE_C1 0x14088
#define B_BE_PRI20_BITMAP_MASK GENMASK(31, 16)
@@ -6257,6 +6548,21 @@
#define B_BE_RSC_MASK GENMASK(7, 6)
#define B_BE_RRSR_CCK_MASK GENMASK(3, 0)
+#define R_BE_COMMON_PHYINTF_CTRL_0 0x100B8
+#define R_BE_COMMON_PHYINTF_CTRL_0_C1 0x140B8
+#define B_BE_SEQ_EN_GUARD_CYE_MASK GENMASK(23, 20)
+#define B_BE_PARA_FIFO_CRC_EN BIT(18)
+#define B_BE_SEQ_FIFO_TO_EN BIT(17)
+#define B_BE_PARA_FIFO_TO_EN BIT(16)
+#define B_BE_SEQ_FIFO_CLR_EN BIT(6)
+#define B_BE_PARA_FIFO_CLR_EN_V1 BIT(5)
+#define B_BE_CSI_FIFO_CLR_EN_V1 BIT(4)
+#define B_BE_FTM_FIFO_CLR_EN_V1 BIT(3)
+#define B_BE_RXD_FIFO_CLR_EN_V1 BIT(2)
+#define B_BE_TXD_FIFO_CLR_EN_V1 BIT(1)
+#define B_BE_TXUID_FIFO_CLR_EN_V1 BIT(0)
+#define CLEAR_DTOP_DIS (BIT(1) | BIT(5) | BIT(6))
+
#define R_BE_CMAC_ERR_IMR 0x10160
#define R_BE_CMAC_ERR_IMR_C1 0x14160
#define B_BE_CMAC_FW_ERR_IDCT_EN BIT(16)
@@ -6349,6 +6655,25 @@
#define B_BE_P0_SYNC_PORT_SRC_SEL_MASK GENMASK(26, 24)
#define B_BE_P0_TSFTR_SYNC_OFFSET_MASK GENMASK(18, 0)
+#define R_BE_SCH_EDCA_RST_CFG 0x102E4
+#define R_BE_SCH_EDCA_RST_CFG_C1 0x142E4
+#define B_BE_EDCCA_S160_RST_EDCA_EN BIT(23)
+#define B_BE_EDCCA_S80_RST_EDCA_EN BIT(22)
+#define B_BE_EDCCA_S40_RST_EDCA_EN BIT(21)
+#define B_BE_EDCCA_S20_RST_EDCA_EN BIT(20)
+#define B_BE_OFDM_CCA_S160_RST_EDCA_EN BIT(19)
+#define B_BE_CCA_PEB_BE_BITMAP_RST_EDCA_EN BIT(18)
+#define B_BE_RX_INTRA_NAV_RST_EDCA_EN BIT(15)
+#define B_BE_RX_BASIC_NAV_RST_EDCA_EN BIT(14)
+#define B_BE_EDCCA_PER20_BITMAP_SIFS_RST_EDCA_EN BIT(10)
+#define B_BE_TX_NAV_RST_EDCA_EN BIT(7)
+#define B_BE_NO_GNT_WL_RST_EDCA_EN BIT(5)
+#define B_BE_EDCCA_P20_RST_EDCA_EN BIT(4)
+#define B_BE_OFDM_CCA_S80_RST_EDCA_EN BIT(3)
+#define B_BE_OFDM_CCA_S40_RST_EDCA_EN BIT(2)
+#define B_BE_OFDM_CCA_S20_RST_EDCA_EN BIT(1)
+#define B_BE_CCA_P20_RST_EDCA_EN BIT(0)
+
#define R_BE_EDCA_BCNQ_PARAM 0x10324
#define R_BE_EDCA_BCNQ_PARAM_C1 0x14324
#define B_BE_BCNQ_CW_MASK GENMASK(31, 24)
@@ -6639,6 +6964,34 @@
#define B_BE_CMAC_TX_MODE_1 BIT(1)
#define B_BE_CMAC_TX_MODE_0 BIT(0)
+#define R_BE_AGG_BK_0 0x10804
+#define R_BE_AGG_BK_0_C1 0x14804
+#define B_BE_DIS_SAMPDU_TXIME_SR_CHECK BIT(24)
+#define B_BE_TX_PAIR_MACID_LEN_EN BIT(23)
+#define B_BE_DIS_SND_STS_CHECK_SU BIT(22)
+#define B_BE_MAX_AGG_NUM_FIX_MODE_EN_V1 BIT(21)
+#define B_BE_DIS_SIFS_BK_AGG_AMPDU BIT(20)
+#define B_BE_EN_MU2SU_CHK_PROTECT_PPDU BIT(19)
+#define B_BE_RPT_TXOP_START_PROTECT BIT(18)
+#define B_BE_RANDOM_GEN_CMD_ABORT_EN BIT(17)
+#define B_BE_PHYTXON_ENDPS_RESP_CHK BIT(16)
+#define B_BE_CTN_CHK_SEQ_REQ_EN BIT(15)
+#define B_BE_PTCL_RLS_ALLFAIL_EN BIT(14)
+#define B_BE_DIS_MURU_PRI_Q_EMPTY_CHK BIT(13)
+#define B_BE_DIS_MURU_SEC_Q_EMPTY_CHK BIT(12)
+#define B_BE_EN_SAMPDU_TXIME_TWT_CHECK BIT(11)
+#define B_BE_DIS_SAMPDU_TXIME_P2P_CHECK BIT(10)
+#define B_BE_DIS_SAMPDU_TXIME_BCN_CHECK BIT(9)
+#define B_BE_DIS_UL_SEQ_ABORT_CHECK BIT(8)
+#define B_BE_DIS_SND_STS_CHECK BIT(7)
+#define B_BE_NAV_PAUS_PHB_EN BIT(6)
+#define B_BE_TXOP_SHT_PHB_EN BIT(5)
+#define B_BE_AGG_BRK_PHB_EN BIT(4)
+#define B_BE_DIS_SSN_CHK BIT(3)
+#define B_BE_WDBK_CFG BIT(2)
+#define B_BE_EN_RTY_BK BIT(1)
+#define B_BE_EN_RTY_BK_COD BIT(0)
+
#define R_BE_TB_PPDU_CTRL 0x1080C
#define R_BE_TB_PPDU_CTRL_C1 0x1480C
#define B_BE_TB_PPDU_BK_DIS BIT(15)
@@ -6653,9 +7006,11 @@
#define R_BE_AMPDU_AGG_LIMIT_C1 0x14810
#define B_BE_AMPDU_MAX_TIME_MASK GENMASK(31, 24)
#define AMPDU_MAX_TIME 0x9E
+#define AMPDU_MAX_TIME_V1 0xA4
#define B_BE_RA_TRY_RATE_AGG_LMT_MASK GENMASK(23, 16)
#define B_BE_RTS_MAX_AGG_NUM_MASK GENMASK(15, 8)
#define B_BE_MAX_AGG_NUM_MASK GENMASK(7, 0)
+#define MAX_TX_AMPDU_NUM_V1 128
#define R_BE_AGG_LEN_HT_0 0x10814
#define R_BE_AGG_LEN_HT_0_C1 0x14814
@@ -6663,6 +7018,20 @@
#define B_BE_RTS_TXTIME_TH_MASK GENMASK(15, 8)
#define B_BE_RTS_LEN_TH_MASK GENMASK(7, 0)
+#define R_BE_SPECIAL_TX_SETTING 0x10820
+#define R_BE_SPECIAL_TX_SETTING_C1 0x14820
+#define B_BE_TRI_PADDING_EXTEND BIT(31)
+#define B_BE_TX_SN_BYPASS_EN BIT(30)
+#define B_BE_USE_DATA_BW BIT(29)
+#define B_BE_BW_SIGTA_MASK GENMASK(28, 27)
+#define B_BE_BMC_NAV_PROTECT BIT(26)
+#define B_BE_F2P_KEEP_NON_SR_CMD BIT(25)
+#define B_BE_F2P_SU_FIXRATE_OVER_WD BIT(24)
+#define B_BE_BAR_TXRATE_FOR_NULL_WD_MASK GENMASK(23, 20)
+#define B_BE_STBC_CFEND_MASK GENMASK(19, 18)
+#define B_BE_STBC_CFEND_RATE_MASK GENMASK(17, 9)
+#define B_BE_BASIC_CFEND_RATE_MASK GENMASK(8, 0)
+
#define R_BE_SIFS_SETTING 0x10824
#define R_BE_SIFS_SETTING_C1 0x14824
#define B_BE_HW_CTS2SELF_PKT_LEN_TH_MASK GENMASK(31, 24)
@@ -6696,6 +7065,44 @@
#define B_BE_PORT_DROP_4_0_MASK GENMASK(20, 16)
#define B_BE_MBSSID_DROP_15_0_MASK GENMASK(15, 0)
+#define R_BE_PTCL_PRELD_CTRL 0x10868
+#define R_BE_PTCL_PRELD_CTRL_C1 0x14868
+#define B_BE_PRELD_MGQ2_EN BIT(22)
+#define B_BE_PRELD_MGQ1_EN BIT(21)
+#define B_BE_PRELD_MGQ0_EN BIT(20)
+#define B_BE_PRELD_HIQ_P4_EN BIT(19)
+#define B_BE_PRELD_HIQ_P3_EN BIT(18)
+#define B_BE_PRELD_HIQ_P2_EN BIT(17)
+#define B_BE_PRELD_HIQ_P1_EN BIT(16)
+#define B_BE_PRELD_HIQ_P0MB15_EN BIT(15)
+#define B_BE_PRELD_HIQ_P0MB14_EN BIT(14)
+#define B_BE_PRELD_HIQ_P0MB13_EN BIT(13)
+#define B_BE_PRELD_HIQ_P0MB12_EN BIT(12)
+#define B_BE_PRELD_HIQ_P0MB11_EN BIT(11)
+#define B_BE_PRELD_HIQ_P0MB10_EN BIT(10)
+#define B_BE_PRELD_HIQ_P0MB9_EN BIT(9)
+#define B_BE_PRELD_HIQ_P0MB8_EN BIT(8)
+#define B_BE_PRELD_HIQ_P0MB7_EN BIT(7)
+#define B_BE_PRELD_HIQ_P0MB6_EN BIT(6)
+#define B_BE_PRELD_HIQ_P0MB5_EN BIT(5)
+#define B_BE_PRELD_HIQ_P0MB4_EN BIT(4)
+#define B_BE_PRELD_HIQ_P0MB3_EN BIT(3)
+#define B_BE_PRELD_HIQ_P0MB2_EN BIT(2)
+#define B_BE_PRELD_HIQ_P0MB1_EN BIT(1)
+#define B_BE_PRELD_HIQ_P0_EN BIT(0)
+#define B_BE_PRELD_HIQ_ALL_EN (B_BE_PRELD_HIQ_P0_EN | B_BE_PRELD_HIQ_P1_EN | \
+ B_BE_PRELD_HIQ_P2_EN | B_BE_PRELD_HIQ_P3_EN | \
+ B_BE_PRELD_HIQ_P4_EN)
+#define B_BE_PRELD_HIQ_P0MB_ALL_EN \
+ (B_BE_PRELD_HIQ_P0_EN | B_BE_PRELD_HIQ_P0MB1_EN | \
+ B_BE_PRELD_HIQ_P0MB2_EN | B_BE_PRELD_HIQ_P0MB3_EN | \
+ B_BE_PRELD_HIQ_P0MB4_EN | B_BE_PRELD_HIQ_P0MB5_EN | \
+ B_BE_PRELD_HIQ_P0MB6_EN | B_BE_PRELD_HIQ_P0MB7_EN | \
+ B_BE_PRELD_HIQ_P0MB8_EN | B_BE_PRELD_HIQ_P0MB9_EN | \
+ B_BE_PRELD_HIQ_P0MB10_EN | B_BE_PRELD_HIQ_P0MB11_EN | \
+ B_BE_PRELD_HIQ_P0MB12_EN | B_BE_PRELD_HIQ_P0MB13_EN | \
+ B_BE_PRELD_HIQ_P0MB14_EN | B_BE_PRELD_HIQ_P0MB15_EN)
+
#define R_BE_BT_PLT 0x1087C
#define R_BE_BT_PLT_C1 0x1487C
#define B_BE_BT_PLT_PKT_CNT_MASK GENMASK(31, 16)
@@ -6936,6 +7343,8 @@
B_BE_RX_RU1_FSM_HANG_ERROR_IMR | \
B_BE_RX_RU0_FSM_HANG_ERROR_IMR | \
B_BE_RX_GET_NULL_PKT_ERROR_IMR)
+#define B_BE_RX_ERROR_FLAG_IMR_CLR_V1 0x7FFFFFF8
+#define B_BE_RX_ERROR_FLAG_IMR_SET_V1 0x7FFFFF38
#define R_BE_RX_CTRL_1 0x10C0C
#define R_BE_RX_CTRL_1_C1 0x14C0C
@@ -7349,6 +7758,8 @@
#define B_BE_ACK_BA_RESP_LEGACY_CHK_SEC_CCA20 BIT(2)
#define B_BE_ACK_BA_RESP_LEGACY_CHK_EDCCA BIT(1)
#define B_BE_ACK_BA_RESP_LEGACY_CHK_CCA BIT(0)
+#define RESP_ACK_CFG_BE (B_BE_ACK_BA_RESP_LEGACY_CHK_BTCCA | \
+ B_BE_ACK_BA_RESP_LEGACY_CHK_TX_NAV)
#define R_BE_WMAC_ACK_BA_RESP_HE 0x11204
#define R_BE_WMAC_ACK_BA_RESP_HE_C1 0x15204
@@ -7390,6 +7801,188 @@
#define B_BE_ACK_BA_EHT_LEG_PUNC_CHK_EDCCA BIT(1)
#define B_BE_ACK_BA_EHT_LEG_PUNC_CHK_CCA BIT(0)
+#define R_BE_WMAC_RX_RTS_RESP_LEGACY 0x1120C
+#define R_BE_WMAC_RX_RTS_RESP_LEGACY_C1 0x1520C
+#define B_BE_RX_RTS_RESP_LEGACY_CHK_NSTR BIT(16)
+#define B_BE_RX_RTS_RESP_LEGACY_CHK_TX_NAV BIT(15)
+#define B_BE_RX_RTS_RESP_LEGACY_CHK_INTRA_NAV BIT(14)
+#define B_BE_RX_RTS_RESP_LEGACY_CHK_BASIC_NAV BIT(13)
+#define B_BE_RX_RTS_RESP_LEGACY_CHK_BTCCA BIT(12)
+#define B_BE_RX_RTS_RESP_LEGACY_CHK_SEC_EDCCA160 BIT(11)
+#define B_BE_RX_RTS_RESP_LEGACY_CHK_SEC_EDCCA80 BIT(10)
+#define B_BE_RX_RTS_RESP_LEGACY_CHK_SEC_EDCCA40 BIT(9)
+#define B_BE_RX_RTS_RESP_LEGACY_CHK_SEC_EDCCA20 BIT(8)
+#define B_BE_RX_RTS_RESP_LEGACY_CHK_EDCCA_PER20_BMP BIT(7)
+#define B_BE_RX_RTS_RESP_LEGACY_CHK_CCA_PER20_BMP BIT(6)
+#define B_BE_RX_RTS_RESP_LEGACY_CHK_SEC_CCA160 BIT(5)
+#define B_BE_RX_RTS_RESP_LEGACY_CHK_SEC_CCA80 BIT(4)
+#define B_BE_RX_RTS_RESP_LEGACY_CHK_SEC_CCA40 BIT(3)
+#define B_BE_RX_RTS_RESP_LEGACY_CHK_SEC_CCA20 BIT(2)
+#define B_BE_RX_RTS_RESP_LEGACY_CHK_EDCCA BIT(1)
+#define B_BE_RX_RTS_RESP_LEGACY_CHK_CCA BIT(0)
+#define RESP_RTS_CFG_BE (B_BE_RX_RTS_RESP_LEGACY_CHK_CCA | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_EDCCA | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_SEC_CCA20 | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_SEC_CCA40 | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_SEC_CCA80 | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_SEC_CCA160 | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_SEC_EDCCA20 | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_SEC_EDCCA40 | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_SEC_EDCCA80 | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_SEC_EDCCA160 | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_BTCCA | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_BASIC_NAV | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_INTRA_NAV | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_TX_NAV)
+#define RESP_RTS_PUNC_CFG_BE (B_BE_RX_RTS_RESP_LEGACY_CHK_CCA | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_EDCCA | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_CCA_PER20_BMP | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_EDCCA_PER20_BMP | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_BTCCA | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_BASIC_NAV | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_INTRA_NAV | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_TX_NAV)
+#define RESP_NORMAL_CFG_BE (B_BE_RX_RTS_RESP_LEGACY_CHK_CCA | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_EDCCA | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_SEC_CCA20 | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_SEC_CCA40 | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_SEC_CCA80 | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_SEC_CCA160 | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_SEC_EDCCA20 | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_SEC_EDCCA40 | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_SEC_EDCCA80 | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_SEC_EDCCA160 | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_BTCCA | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_BASIC_NAV | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_TX_NAV)
+#define RESP_NORMAL_PUNC_CFG_BE (B_BE_RX_RTS_RESP_LEGACY_CHK_CCA | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_EDCCA | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_CCA_PER20_BMP | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_EDCCA_PER20_BMP | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_BTCCA | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_BASIC_NAV | \
+ B_BE_RX_RTS_RESP_LEGACY_CHK_TX_NAV)
+
+#define R_BE_WMAC_RX_RTS_RESP_LEGACY_PUNC 0x11210
+#define R_BE_WMAC_RX_RTS_RESP_LEGACY_PUNC_C1 0x15210
+#define B_BE_RX_RTS_RESP_LEGACY_PUNC_CHK_NSTR BIT(16)
+#define B_BE_RX_RTS_RESP_LEGACY_PUNC_CHK_TX_NAV BIT(15)
+#define B_BE_RX_RTS_RESP_LEGACY_PUNC_CHK_INTRA_NAV BIT(14)
+#define B_BE_RX_RTS_RESP_LEGACY_PUNC_CHK_BASIC_NAV BIT(13)
+#define B_BE_RX_RTS_RESP_LEGACY_PUNC_CHK_BTCCA BIT(12)
+#define B_BE_RX_RTS_RESP_LEGACY_PUNC_CHK_SEC_EDCCA160 BIT(11)
+#define B_BE_RX_RTS_RESP_LEGACY_PUNC_CHK_SEC_EDCCA80 BIT(10)
+#define B_BE_RX_RTS_RESP_LEGACY_PUNC_CHK_SEC_EDCCA40 BIT(9)
+#define B_BE_RX_RTS_RESP_LEGACY_PUNC_CHK_SEC_EDCCA20 BIT(8)
+#define B_BE_RX_RTS_RESP_LEGACY_PUNC_CHK_EDCCA_PER20_BMP BIT(7)
+#define B_BE_RX_RTS_RESP_LEGACY_PUNC_CHK_CCA_PER20_BMP BIT(6)
+#define B_BE_RX_RTS_RESP_LEGACY_PUNC_CHK_SEC_CCA160 BIT(5)
+#define B_BE_RX_RTS_RESP_LEGACY_PUNC_CHK_SEC_CCA80 BIT(4)
+#define B_BE_RX_RTS_RESP_LEGACY_PUNC_CHK_SEC_CCA40 BIT(3)
+#define B_BE_RX_RTS_RESP_LEGACY_PUNC_CHK_SEC_CCA20 BIT(2)
+#define B_BE_RX_RTS_RESP_LEGACY_PUNC_CHK_EDCCA BIT(1)
+#define B_BE_RX_RTS_RESP_LEGACY_PUNC_CHK_CCA BIT(0)
+
+#define R_BE_WMAC_RX_MURTS_RESP_LEGACY 0x11214
+#define R_BE_WMAC_RX_MURTS_RESP_LEGACY_C1 0x15214
+#define B_BE_MURTS_RESP_LEGACY_CHK_NSTR BIT(16)
+#define B_BE_MURTS_RESP_LEGACY_CHK_TX_NAV BIT(15)
+#define B_BE_MURTS_RESP_LEGACY_CHK_INTRA_NAV BIT(14)
+#define B_BE_MURTS_RESP_LEGACY_CHK_BASIC_NAV BIT(13)
+#define B_BE_MURTS_RESP_LEGACY_CHK_BTCCA BIT(12)
+#define B_BE_MURTS_RESP_LEGACY_CHK_SEC_EDCCA160 BIT(11)
+#define B_BE_MURTS_RESP_LEGACY_CHK_SEC_EDCCA80 BIT(10)
+#define B_BE_MURTS_RESP_LEGACY_CHK_SEC_EDCCA40 BIT(9)
+#define B_BE_MURTS_RESP_LEGACY_CHK_SEC_EDCCA20 BIT(8)
+#define B_BE_MURTS_RESP_LEGACY_CHK_EDCCA_PER20_BMP BIT(7)
+#define B_BE_MURTS_RESP_LEGACY_CHK_CCA_PER20_BMP BIT(6)
+#define B_BE_MURTS_RESP_LEGACY_CHK_SEC_CCA160 BIT(5)
+#define B_BE_MURTS_RESP_LEGACY_CHK_SEC_CCA80 BIT(4)
+#define B_BE_MURTS_RESP_LEGACY_CHK_SEC_CCA40 BIT(3)
+#define B_BE_MURTS_RESP_LEGACY_CHK_SEC_CCA20 BIT(2)
+#define B_BE_MURTS_RESP_LEGACY_CHK_EDCCA BIT(1)
+#define B_BE_MURTS_RESP_LEGACY_CHK_CCA BIT(0)
+
+#define R_BE_WMAC_RX_MURTS_RESP_LEGACY_PUNC 0x11218
+#define R_BE_WMAC_RX_MURTS_RESP_LEGACY_PUNC_C1 0x15218
+#define B_BE_MURTS_RESP_LEGACY_PUNC_CHK_NSTR BIT(16)
+#define B_BE_MURTS_RESP_LEGACY_PUNC_CHK_TX_NAV BIT(15)
+#define B_BE_MURTS_RESP_LEGACY_PUNC_CHK_INTRA_NAV BIT(14)
+#define B_BE_MURTS_RESP_LEGACY_PUNC_CHK_BASIC_NAV BIT(13)
+#define B_BE_MURTS_RESP_LEGACY_PUNC_CHK_BTCCA BIT(12)
+#define B_BE_MURTS_RESP_LEGACY_PUNC_CHK_SEC_EDCCA160 BIT(11)
+#define B_BE_MURTS_RESP_LEGACY_PUNC_CHK_SEC_EDCCA80 BIT(10)
+#define B_BE_MURTS_RESP_LEGACY_PUNC_CHK_SEC_EDCCA40 BIT(9)
+#define B_BE_MURTS_RESP_LEGACY_PUNC_CHK_SEC_EDCCA20 BIT(8)
+#define B_BE_MURTS_RESP_LEGACY_PUNC_CHK_EDCCA_PER20_BMP BIT(7)
+#define B_BE_MURTS_RESP_LEGACY_PUNC_CHK_CCA_PER20_BMP BIT(6)
+#define B_BE_MURTS_RESP_LEGACY_PUNC_CHK_SEC_CCA160 BIT(5)
+#define B_BE_MURTS_RESP_LEGACY_PUNC_CHK_SEC_CCA80 BIT(4)
+#define B_BE_MURTS_RESP_LEGACY_PUNC_CHK_SEC_CCA40 BIT(3)
+#define B_BE_MURTS_RESP_LEGACY_PUNC_CHK_SEC_CCA20 BIT(2)
+#define B_BE_MURTS_RESP_LEGACY_PUNC_CHK_EDCCA BIT(1)
+#define B_BE_MURTS_RESP_LEGACY_PUNC_CHK_CCA BIT(0)
+
+#define R_BE_WMAC_OTHERS_RESP_LEGACY 0x1121C
+#define R_BE_WMAC_OTHERS_RESP_LEGACY_C1 0x1521C
+#define B_BE_OTHERS_RESP_LEGACY_CHK_NSTR BIT(16)
+#define B_BE_OTHERS_RESP_LEGACY_CHK_TX_NAV BIT(15)
+#define B_BE_OTHERS_RESP_LEGACY_CHK_INTRA_NAV BIT(14)
+#define B_BE_OTHERS_RESP_LEGACY_CHK_BASIC_NAV BIT(13)
+#define B_BE_OTHERS_RESP_LEGACY_CHK_BTCCA BIT(12)
+#define B_BE_OTHERS_RESP_LEGACY_CHK_SEC_EDCCA160 BIT(11)
+#define B_BE_OTHERS_RESP_LEGACY_CHK_SEC_EDCCA80 BIT(10)
+#define B_BE_OTHERS_RESP_LEGACY_CHK_SEC_EDCCA40 BIT(9)
+#define B_BE_OTHERS_RESP_LEGACY_CHK_SEC_EDCCA20 BIT(8)
+#define B_BE_OTHERS_RESP_LEGACY_CHK_EDCCA_PER20_BMP BIT(7)
+#define B_BE_OTHERS_RESP_LEGACY_CHK_CCA_PER20_BMP BIT(6)
+#define B_BE_OTHERS_RESP_LEGACY_CHK_SEC_CCA160 BIT(5)
+#define B_BE_OTHERS_RESP_LEGACY_CHK_SEC_CCA80 BIT(4)
+#define B_BE_OTHERS_RESP_LEGACY_CHK_SEC_CCA40 BIT(3)
+#define B_BE_OTHERS_RESP_LEGACY_CHK_SEC_CCA20 BIT(2)
+#define B_BE_OTHERS_RESP_LEGACY_CHK_EDCCA BIT(1)
+#define B_BE_OTHERS_RESP_LEGACY_CHK_CCA BIT(0)
+
+#define R_BE_WMAC_OTHERS_RESP_HE 0x11220
+#define R_BE_WMAC_OTHERS_RESP_HE_C1 0x15220
+#define B_BE_OTHERS_RESP_HE_CHK_NSTR BIT(16)
+#define B_BE_OTHERS_RESP_HE_CHK_TX_NAV BIT(15)
+#define B_BE_OTHERS_RESP_HE_CHK_INTRA_NAV BIT(14)
+#define B_BE_OTHERS_RESP_HE_CHK_BASIC_NAV BIT(13)
+#define B_BE_OTHERS_RESP_HE_CHK_BTCCA BIT(12)
+#define B_BE_OTHERS_RESP_HE_CHK_SEC_EDCCA160 BIT(11)
+#define B_BE_OTHERS_RESP_HE_CHK_SEC_EDCCA80 BIT(10)
+#define B_BE_OTHERS_RESP_HE_CHK_SEC_EDCCA40 BIT(9)
+#define B_BE_OTHERS_RESP_HE_CHK_SEC_EDCCA20 BIT(8)
+#define B_BE_OTHERS_RESP_HE_CHK_EDCCA_PER20_BMP BIT(7)
+#define B_BE_OTHERS_RESP_HE_CHK_CCA_PER20_BMP BIT(6)
+#define B_BE_OTHERS_RESP_HE_CHK_SEC_CCA160 BIT(5)
+#define B_BE_OTHERS_RESP_HE_CHK_SEC_CCA80 BIT(4)
+#define B_BE_OTHERS_RESP_HE_CHK_SEC_CCA40 BIT(3)
+#define B_BE_OTHERS_RESP_HE_CHK_SEC_CCA20 BIT(2)
+#define B_BE_OTHERS_RESP_HE_CHK_EDCCA BIT(1)
+#define B_BE_OTHERS_RESP_HE_CHK_CCA BIT(0)
+
+#define R_BE_WMAC_OTHERS_RESP_EHT_LEG_PUNC 0x11224
+#define R_BE_WMAC_OTHERS_RESP_EHT_LEG_PUNC_C1 0x15224
+#define B_BE_OTHERS_RESP_EHT_LEG_PUNC_CHK_NSTR BIT(16)
+#define B_BE_OTHERS_RESP_EHT_LEG_PUNC_CHK_TX_NAV BIT(15)
+#define B_BE_OTHERS_RESP_EHT_LEG_PUNC_CHK_INTRA_NAV BIT(14)
+#define B_BE_OTHERS_RESP_EHT_LEG_PUNC_CHK_BASIC_NAV BIT(13)
+#define B_BE_OTHERS_RESP_EHT_LEG_PUNC_CHK_BTCCA BIT(12)
+#define B_BE_OTHERS_RESP_EHT_LEG_PUNC_CHK_SEC_EDCCA160 BIT(11)
+#define B_BE_OTHERS_RESP_EHT_LEG_PUNC_CHK_SEC_EDCCA80 BIT(10)
+#define B_BE_OTHERS_RESP_EHT_LEG_PUNC_CHK_SEC_EDCCA40 BIT(9)
+#define B_BE_OTHERS_RESP_EHT_LEG_PUNC_CHK_SEC_EDCCA20 BIT(8)
+#define B_BE_OTHERS_RESP_EHT_LEG_PUNC_CHK_EDCCA_PER20_BMP BIT(7)
+#define B_BE_OTHERS_RESP_EHT_LEG_PUNC_CHK_CCA_PER20_BMP BIT(6)
+#define B_BE_OTHERS_RESP_EHT_LEG_PUNC_CHK_SEC_CCA160 BIT(5)
+#define B_BE_OTHERS_RESP_EHT_LEG_PUNC_CHK_SEC_CCA80 BIT(4)
+#define B_BE_OTHERS_RESP_EHT_LEG_PUNC_CHK_SEC_CCA40 BIT(3)
+#define B_BE_OTHERS_RESP_EHT_LEG_PUNC_CHK_SEC_CCA20 BIT(2)
+#define B_BE_OTHERS_RESP_EHT_LEG_PUNC_CHK_EDCCA BIT(1)
+#define B_BE_OTHERS_RESP_EHT_LEG_PUNC_CHK_CCA BIT(0)
+
#define R_BE_RCR 0x11400
#define R_BE_RCR_C1 0x15400
#define B_BE_BUSY_CHKSN BIT(15)
@@ -7427,6 +8020,17 @@
#define B_BE_CCK_SIG_CHK BIT(1)
#define B_BE_CCK_CRC_CHK BIT(0)
+#define R_BE_RXGCK_CTRL 0x11406
+#define R_BE_RXGCK_CTRL_C1 0x15406
+#define B_BE_RXGCK_BCNPRS_DISGCLK BIT(12)
+#define B_BE_RXGCK_GCK_RATE_LIMIT_MASK GENMASK(9, 8)
+#define RX_GCK_LEGACY 2
+#define B_BE_RXGCK_DISREG_GCLK BIT(7)
+#define B_BE_RXGCK_ENTRY_DELAY_MASK GENMASK(6, 4)
+#define B_BE_RXGCK_GCK_CYCLE_MASK GENMASK(3, 2)
+#define B_BE_RXGCK_CCA_EN BIT(1)
+#define B_BE_DISGCLK BIT(0)
+
#define R_BE_RX_FLTR_OPT 0x11420
#define R_BE_RX_FLTR_OPT_C1 0x15420
#define B_BE_UID_FILTER_MASK GENMASK(31, 24)
@@ -7521,6 +8125,11 @@
#define B_BE_CSIPRT_HESU_AID_EN BIT(25)
#define B_BE_CSIPRT_VHTSU_AID_EN BIT(24)
+#define R_BE_BSR_UPD_CTRL 0x11468
+#define R_BE_BSR_UPD_CTRL_C1 0x15468
+#define B_BE_QSIZE_RULE BIT(1)
+#define B_BE_QSIZE_UPD BIT(0)
+
#define R_BE_DRV_INFO_OPTION 0x11470
#define R_BE_DRV_INFO_OPTION_C1 0x15470
#define B_BE_DRV_INFO_PHYRPT_EN BIT(0)
@@ -7586,11 +8195,35 @@
#define B_BE_PLCP_CH20_WIDATA_SRC BIT(1)
#define B_BE_PLCP_PPDU_TYPE_SRC BIT(0)
+#define R_BE_RX_PLCP_EXT_OPTION_2 0x11518
+#define R_BE_RX_PLCP_EXT_OPTION_2_C1 0x15518
+#define B_BE_PLCP_PHASE_B_CRC_CHK_EN BIT(17)
+#define B_BE_PLCP_PHASE_A_CRC_CHK_EN BIT(16)
+#define B_BE_EHTTB_EHTSIG_CRC_CHK_EN BIT(3)
+#define B_BE_EHTTB_USIG_CRC_CHK_EN BIT(2)
+#define B_BE_EHTMU_EHTSIG_CRC_CHK_EN BIT(1)
+#define B_BE_EHTMU_USIG_CRC_CHK_EN BIT(0)
+
#define R_BE_RESP_CSI_RESERVED_PAGE 0x11810
#define R_BE_RESP_CSI_RESERVED_PAGE_C1 0x15810
#define B_BE_CSI_RESERVED_PAGE_NUM_MASK GENMASK(27, 16)
#define B_BE_CSI_RESERVED_START_PAGE_MASK GENMASK(11, 0)
+#define R_BE_RESP_IMR1 0x11878
+#define R_BE_RESP_IMR1_C1 0x15878
+#define B_BE_RESP_IMR_1_MASK GENMASK(31, 9)
+#define B_BE_FSM_TIMEOUT_ERR_IMR BIT(8)
+#define B_BE_SEC_DOUBLE_HIT_ERR_IMR BIT(7)
+#define B_BE_WRPTR_ERR_IMR BIT(6)
+#define B_BE_SMR_TOO_MANY_PLD_ERR_IMR BIT(5)
+#define B_BE_LMR_TOO_MANY_PLD_ERR_IMR BIT(4)
+#define B_BE_CSI_TOO_MANY_PLD_ERR_IMR BIT(3)
+#define B_BE_FTM_LMR_PLDID_READY_ERR_IMR BIT(2)
+#define B_BE_SMR_PLDID_READY_ERR_IMR BIT(1)
+#define B_BE_CSI_PLDID_READY_ERR_IMR BIT(0)
+#define B_BE_RESP_IMR1_CLR 0x1FF
+#define B_BE_RESP_IMR1_SET 0xFF
+
#define R_BE_RESP_IMR 0x11884
#define R_BE_RESP_IMR_C1 0x15884
#define B_BE_RESP_TBL_FLAG_ERR_ISR_EN BIT(17)
@@ -7635,6 +8268,8 @@
B_BE_RESP_PLDID_RDY_ERR_ISR_EN | \
B_BE_RESP_WRPTR_CROSS_ERR_ISR_EN | \
B_BE_RESP_SEC_DOUBLE_HIT_ERR_ISR_EN)
+#define B_BE_RESP_IMR_CLR_V1 0xFFFFFFFF
+#define B_BE_RESP_IMR_SET_V1 0xFFFFFFFF
#define R_BE_PWR_MODULE 0x11900
#define R_BE_PWR_MODULE_C1 0x15900
@@ -7693,6 +8328,7 @@
#define R_BE_PWR_FTM 0x11B00
#define R_BE_PWR_FTM_SS 0x11B04
+#define B_BE_PWR_BY_RATE_DBW_ON GENMASK(27, 26)
#define R_BE_PWR_BY_RATE 0x11E00
#define R_BE_PWR_BY_RATE_MAX 0x11FA8
@@ -7713,6 +8349,10 @@
#define R_BE_TXPWR_ERR_FLAG_C1 0x158E4
#define R_BE_TXPWR_ERR_IMR_C1 0x158E0
+#define R_BE_SCH_EXT_CTRL 0x103FC
+#define R_BE_SCH_EXT_CTRL_C1 0x143FC
+#define B_BE_CWCNT_PLUS_MODE BIT(31)
+
#define CMAC1_START_ADDR_BE 0x14000
#define CMAC1_END_ADDR_BE 0x17FFF
@@ -7997,6 +8637,7 @@
#define B_UPD_P0_EN BIT(31)
#define R_EMLSR 0x0044
#define B_EMLSR_PARM GENMASK(27, 12)
+#define R_CHK_LPS_STAT_BE4 0x3007C
#define R_CHK_LPS_STAT 0x0058
#define B_CHK_LPS_STAT BIT(0)
#define R_SPOOF_CG 0x00B4
@@ -8070,10 +8711,12 @@
#define R_MAC_PIN_SEL 0x0734
#define B_CH_IDX_SEG0 GENMASK(23, 16)
#define R_PLCP_HISTOGRAM 0x0738
+#define R_PLCP_HISTOGRAM_BE_V1 0x20738
#define B_STS_PARSING_TIME GENMASK(19, 16)
#define B_STS_DIS_TRIG_BY_FAIL BIT(3)
#define B_STS_DIS_TRIG_BY_BRK BIT(2)
#define R_PHY_STS_BITMAP_ADDR_START R_PHY_STS_BITMAP_SEARCH_FAIL
+#define R_PHY_STS_BITMAP_ADDR_START_BE4 0x2073C
#define B_PHY_STS_BITMAP_ADDR_MASK GENMASK(6, 2)
#define R_PHY_STS_BITMAP_SEARCH_FAIL 0x073C
#define B_PHY_STS_BITMAP_MSK_52A 0x337cff3f
@@ -8092,6 +8735,7 @@
#define R_PHY_STS_BITMAP_VHT 0x0770
#define R_PHY_STS_BITMAP_HE 0x0774
#define R_PHY_STS_BITMAP_EHT 0x0788
+#define R_PHY_STS_BITMAP_EHT_BE4 0x20788
#define R_EDCCA_RPTREG_SEL_BE 0x078C
#define B_EDCCA_RPTREG_SEL_BE_MSK GENMASK(22, 20)
#define R_PMAC_GNT 0x0980
@@ -8122,6 +8766,7 @@
#define R_DBCC_80P80_SEL_EVM_RPT 0x0A10
#define B_DBCC_80P80_SEL_EVM_RPT_EN BIT(0)
#define R_CCX 0x0C00
+#define R_CCX_BE4 0x20C00
#define B_CCX_EDCCA_OPT_MSK GENMASK(6, 4)
#define B_CCX_EDCCA_OPT_MSK_V1 GENMASK(7, 4)
#define B_MEASUREMENT_TRIG_MSK BIT(2)
@@ -8150,34 +8795,42 @@
#define R_FAHM 0x0C1C
#define B_RXTD_CKEN BIT(2)
#define R_IFS_COUNTER 0x0C28
+#define R_IFS_COUNTER_BE4 0x20C28
#define B_IFS_CLM_PERIOD_MSK GENMASK(31, 16)
#define B_IFS_CLM_COUNTER_UNIT_MSK GENMASK(15, 14)
#define B_IFS_COUNTER_CLR_MSK BIT(13)
#define B_IFS_COLLECT_EN BIT(12)
#define R_IFS_T1 0x0C2C
+#define R_IFS_T1_BE4 0x20C2C
#define B_IFS_T1_TH_HIGH_MSK GENMASK(31, 16)
#define B_IFS_T1_EN_MSK BIT(15)
#define B_IFS_T1_TH_LOW_MSK GENMASK(14, 0)
#define R_IFS_T2 0x0C30
+#define R_IFS_T2_BE4 0x20C30
#define B_IFS_T2_TH_HIGH_MSK GENMASK(31, 16)
#define B_IFS_T2_EN_MSK BIT(15)
#define B_IFS_T2_TH_LOW_MSK GENMASK(14, 0)
#define R_IFS_T3 0x0C34
+#define R_IFS_T3_BE4 0x20C34
#define B_IFS_T3_TH_HIGH_MSK GENMASK(31, 16)
#define B_IFS_T3_EN_MSK BIT(15)
#define B_IFS_T3_TH_LOW_MSK GENMASK(14, 0)
#define R_IFS_T4 0x0C38
+#define R_IFS_T4_BE4 0x20C38
#define B_IFS_T4_TH_HIGH_MSK GENMASK(31, 16)
#define B_IFS_T4_EN_MSK BIT(15)
#define B_IFS_T4_TH_LOW_MSK GENMASK(14, 0)
#define R_PD_CTRL 0x0C3C
#define B_PD_HIT_DIS BIT(9)
#define R_IOQ_IQK_DPK 0x0C60
+#define R_IOQ_IQK_DPK_BE4 0x20C60
#define B_IOQ_IQK_DPK_CLKEN GENMASK(1, 0)
#define B_IOQ_IQK_DPK_EN BIT(1)
+#define B_IOQ_IQK_DPK_RST BIT(0)
#define R_GNT_BT_WGT_EN 0x0C6C
#define B_GNT_BT_WGT_EN BIT(21)
#define R_IQK_DPK_RST 0x0C6C
+#define R_IQK_DPK_RST_BE4 0x20C6C
#define R_IQK_DPK_RST_C1 0x1C6C
#define B_IQK_DPK_RST BIT(0)
#define R_TX_COLLISION_T2R_ST 0x0C70
@@ -8295,14 +8948,17 @@
#define B_NHM_READY_MSK BIT(16)
#define R_IFS_CLM_TX_CNT 0x1ACC
#define R_IFS_CLM_TX_CNT_V1 0x0ECC
+#define R_IFS_CLM_TX_CNT_BE4 0x20ECC
#define B_IFS_CLM_EDCCA_EXCLUDE_CCA_FA_MSK GENMASK(31, 16)
#define B_IFS_CLM_TX_CNT_MSK GENMASK(15, 0)
#define R_IFS_CLM_CCA 0x1AD0
#define R_IFS_CLM_CCA_V1 0x0ED0
+#define R_IFS_CLM_CCA_BE4 0x20ED0
#define B_IFS_CLM_OFDMCCA_EXCLUDE_FA_MSK GENMASK(31, 16)
#define B_IFS_CLM_CCKCCA_EXCLUDE_FA_MSK GENMASK(15, 0)
#define R_IFS_CLM_FA 0x1AD4
#define R_IFS_CLM_FA_V1 0x0ED4
+#define R_IFS_CLM_FA_BE4 0x20ED4
#define B_IFS_CLM_OFDM_FA_MSK GENMASK(31, 16)
#define B_IFS_CLM_CCK_FA_MSK GENMASK(15, 0)
#define R_IFS_HIS 0x1AD8
@@ -8959,12 +9615,14 @@
#define B_S0_DACKQ7_K GENMASK(15, 8)
#define R_S0_DACKQ8 0x5E98
#define B_S0_DACKQ8_K GENMASK(15, 8)
-#define R_DCFO_WEIGHT_V1 0x6244
-#define B_DCFO_WEIGHT_MSK_V1 GENMASK(31, 28)
+#define R_DCFO_WEIGHT_BE 0x6244
+#define R_DCFO_WEIGHT_BE_V1 0x24808
+#define B_DCFO_WEIGHT_MSK_BE GENMASK(31, 28)
#define R_DAC_CLK 0x625C
#define B_DAC_CLK GENMASK(31, 30)
-#define R_DCFO_OPT_V1 0x6260
-#define B_DCFO_OPT_EN_V1 BIT(17)
+#define R_DCFO_OPT_BE 0x6260
+#define R_DCFO_OPT_BE_V1 0x24824
+#define B_DCFO_OPT_EN_BE BIT(17)
#define R_TXFCTR 0x627C
#define B_TXFCTR_THD GENMASK(19, 10)
#define R_TXSCALE 0x6284
@@ -9474,10 +10132,14 @@
#define R_GAIN_MAP1 0xE54C
#define B_GAIN_MAP1_EN BIT(0)
#define R_GOTX_IQKDPK_C0 0xE464
+#define R_GOTX_IQKDPK_C0_BE4 0x2E464
#define R_GOTX_IQKDPK_C1 0xE564
+#define R_GOTX_IQKDPK_C1_BE4 0x2E564
#define B_GOTX_IQKDPK GENMASK(28, 27)
#define R_IQK_DPK_PRST 0xE4AC
+#define R_IQK_DPK_PRST_BE4 0x2E4AC
#define R_IQK_DPK_PRST_C1 0xE5AC
+#define R_IQK_DPK_PRST_C1_BE4 0x2E5AC
#define B_IQK_DPK_PRST BIT(27)
#define R_TXPWR_RSTA 0xE60C
#define B_TXPWR_RSTA BIT(16)
@@ -9504,6 +10166,258 @@
#define R_TSSI_K_P1 0xE7A0
#define B_TSSI_K_OFDM_P1 GENMASK(29, 20)
+#define R_COMP_CIM3K_BE4 0x11998
+#define B_COMP_CIM3K_OW_BE4 BIT(1)
+#define B_COMP_CIM3K_TH_BE4 BIT(2)
+#define B_COMP_CIM3K_TH2_BE4 GENMASK(5, 3)
+#define B_COMP_CIM3K_TXPWR_EN_BE4 BIT(6)
+#define B_COMP_CIM3K_NONBE_BE4 BIT(7)
+#define B_COMP_CIM3K_BANDEDGE_BE4 BIT(8)
+#define R_DPD_CBW160_BE4 0x119B4
+#define B_DPD_CBW160_TH0_BE4 BIT(0)
+#define B_DPD_CBW160_TH1_BE4 BIT(1)
+#define B_DPD_CBW160_TH2_BE4 BIT(2)
+#define B_DPD_CBW160_TH3_BE4 BIT(3)
+#define B_DPD_CBW160_TH4_BE4 BIT(4)
+#define B_DPD_CBW160_TH5_BE4 BIT(5)
+#define B_DPD_CBW160_TH6_BE4 BIT(6)
+#define B_DPD_CBW160_TH7_BE4 BIT(7)
+#define B_DPD_CBW160_OW0_BE4 BIT(8)
+#define B_DPD_CBW160_OW1_BE4 BIT(9)
+#define B_DPD_CBW160_OW2_BE4 BIT(10)
+#define B_DPD_CBW160_OW3_BE4 BIT(11)
+#define B_DPD_CBW160_OW4_BE4 BIT(12)
+#define B_DPD_CBW160_OW5_BE4 BIT(13)
+#define B_DPD_CBW160_OW6_BE4 BIT(14)
+#define B_DPD_CBW160_OW7_BE4 BIT(15)
+#define R_OOB_CBW20_BE4 0x119B4
+#define B_OOB_CBW20_CCK0_BE4 BIT(16)
+#define B_OOB_CBW20_CCK1_BE4 BIT(17)
+#define B_OOB_CBW20_CCK2_BE4 BIT(18)
+#define B_OOB_CBW20_CCK3_BE4 BIT(19)
+#define B_OOB_CBW20_CCK4_BE4 BIT(20)
+#define B_OOB_CBW20_CCK5_BE4 BIT(21)
+#define B_OOB_CBW20_CCK6_BE4 BIT(22)
+#define B_OOB_CBW20_CCK7_BE4 BIT(23)
+#define B_OOB_CBW20_TH0_BE4 BIT(24)
+#define B_OOB_CBW20_TH1_BE4 BIT(25)
+#define B_OOB_CBW20_TH2_BE4 BIT(26)
+#define B_OOB_CBW20_TH3_BE4 BIT(27)
+#define B_OOB_CBW20_TH4_BE4 BIT(28)
+#define B_OOB_CBW20_TH5_BE4 BIT(29)
+#define B_OOB_CBW20_TH6_BE4 BIT(30)
+#define B_OOB_CBW20_TH7_BE4 BIT(31)
+#define R_OOB_CBW40_BE4 0x119B8
+#define B_OOB_CBW20_OW0_BE4 BIT(0)
+#define B_OOB_CBW20_OW1_BE4 BIT(1)
+#define B_OOB_CBW20_OW2_BE4 BIT(2)
+#define B_OOB_CBW20_OW3_BE4 BIT(3)
+#define B_OOB_CBW20_OW4_BE4 BIT(4)
+#define B_OOB_CBW20_OW5_BE4 BIT(5)
+#define B_OOB_CBW20_OW6_BE4 BIT(6)
+#define B_OOB_CBW20_OW7_BE4 BIT(7)
+#define B_OOB_CBW40_CCK0_BE4 BIT(8)
+#define B_OOB_CBW40_CCK1_BE4 BIT(9)
+#define B_OOB_CBW40_CCK2_BE4 BIT(10)
+#define B_OOB_CBW40_CCK3_BE4 BIT(11)
+#define B_OOB_CBW40_CCK4_BE4 BIT(12)
+#define B_OOB_CBW40_CCK5_BE4 BIT(13)
+#define B_OOB_CBW40_CCK6_BE4 BIT(14)
+#define B_OOB_CBW40_CCK7_BE4 BIT(15)
+#define B_OOB_CBW40_TH0_BE4 BIT(16)
+#define B_OOB_CBW40_TH1_BE4 BIT(17)
+#define B_OOB_CBW40_TH2_BE4 BIT(18)
+#define B_OOB_CBW40_TH3_BE4 BIT(19)
+#define B_OOB_CBW40_TH4_BE4 BIT(20)
+#define B_OOB_CBW40_TH5_BE4 BIT(21)
+#define B_OOB_CBW40_TH6_BE4 BIT(22)
+#define B_OOB_CBW40_TH7_BE4 BIT(23)
+#define B_OOB_CBW40_OW0_BE4 BIT(24)
+#define B_OOB_CBW40_OW1_BE4 BIT(25)
+#define B_OOB_CBW40_OW2_BE4 BIT(26)
+#define B_OOB_CBW40_OW3_BE4 BIT(27)
+#define B_OOB_CBW40_OW4_BE4 BIT(28)
+#define B_OOB_CBW40_OW5_BE4 BIT(29)
+#define B_OOB_CBW40_OW6_BE4 BIT(30)
+#define B_OOB_CBW40_OW7_BE4 BIT(31)
+#define R_OOB_CBW80_BE4 0x119BC
+#define B_OOB_CBW80_TH0_BE4 BIT(0)
+#define B_OOB_CBW80_TH1_BE4 BIT(1)
+#define B_OOB_CBW80_TH2_BE4 BIT(2)
+#define B_OOB_CBW80_TH3_BE4 BIT(3)
+#define B_OOB_CBW80_TH4_BE4 BIT(4)
+#define B_OOB_CBW80_TH5_BE4 BIT(5)
+#define B_OOB_CBW80_TH6_BE4 BIT(6)
+#define B_OOB_CBW80_TH7_BE4 BIT(7)
+#define B_OOB_CBW80_OW0_BE4 BIT(8)
+#define B_OOB_CBW80_OW1_BE4 BIT(9)
+#define B_OOB_CBW80_OW2_BE4 BIT(10)
+#define B_OOB_CBW80_OW3_BE4 BIT(11)
+#define B_OOB_CBW80_OW4_BE4 BIT(12)
+#define B_OOB_CBW80_OW5_BE4 BIT(13)
+#define B_OOB_CBW80_OW6_BE4 BIT(14)
+#define B_OOB_CBW80_OW7_BE4 BIT(15)
+#define R_DPD_DBW160_TH0_BE4 0x119BC
+#define B_DPD_DBW160_TH0_0_BE4 GENMASK(18, 16)
+#define B_DPD_DBW160_TH0_1_BE4 GENMASK(21, 19)
+#define B_DPD_DBW160_TH0_2_BE4 GENMASK(24, 22)
+#define B_DPD_DBW160_TH0_3_BE4 GENMASK(27, 25)
+#define B_DPD_DBW160_TH0_4_BE4 GENMASK(30, 28)
+#define R_DPD_DBW160_TH1_BE4 0x119C0
+#define B_DPD_DBW160_TH1_5_BE4 GENMASK(2, 0)
+#define B_DPD_DBW160_TH1_6_BE4 GENMASK(5, 3)
+#define B_DPD_DBW160_TH1_7_BE4 GENMASK(8, 6)
+#define R_DPD_CBW_TH0_BE4 0x119C0
+#define B_DPD_CBW20_TH0_0_BE4 GENMASK(11, 9)
+#define B_DPD_CBW20_TH0_1_BE4 GENMASK(14, 12)
+#define B_DPD_CBW20_TH0_2_BE4 GENMASK(17, 15)
+#define B_DPD_CBW20_TH0_3_BE4 GENMASK(20, 18)
+#define B_DPD_CBW20_TH0_4_BE4 GENMASK(23, 21)
+#define B_DPD_CBW20_TH0_5_BE4 GENMASK(26, 24)
+#define B_DPD_CBW20_TH0_6_BE4 GENMASK(29, 27)
+#define R_DPD_CBW_TH1_BE4 0x119C4
+#define B_DPD_CBW20_TH1_7_BE4 GENMASK(2, 0)
+#define B_DPD_CBW40_TH1_0_BE4 GENMASK(5, 3)
+#define B_DPD_CBW40_TH1_1_BE4 GENMASK(8, 6)
+#define B_DPD_CBW40_TH1_2_BE4 GENMASK(11, 9)
+#define B_DPD_CBW40_TH1_3_BE4 GENMASK(14, 12)
+#define B_DPD_CBW40_TH1_4_BE4 GENMASK(17, 15)
+#define B_DPD_CBW40_TH1_5_BE4 GENMASK(20, 18)
+#define B_DPD_CBW40_TH1_6_BE4 GENMASK(23, 21)
+#define B_DPD_CBW40_TH1_7_BE4 GENMASK(26, 24)
+#define B_DPD_CBW80_TH1_0_BE4 GENMASK(29, 27)
+#define R_DPD_CBW_TH2_BE4 0x119C8
+#define B_DPD_CBW80_TH2_1_BE4 GENMASK(2, 0)
+#define B_DPD_CBW80_TH2_2_BE4 GENMASK(5, 3)
+#define B_DPD_CBW80_TH2_3_BE4 GENMASK(8, 6)
+#define B_DPD_CBW80_TH2_4_BE4 GENMASK(11, 9)
+#define B_DPD_CBW80_TH2_5_BE4 GENMASK(14, 12)
+#define B_DPD_CBW80_TH2_6_BE4 GENMASK(17, 15)
+#define B_DPD_CBW80_TH2_7_BE4 GENMASK(20, 18)
+#define R_QAM_TH0_BE4 0x119E4
+#define B_QAM_TH0_0_BE4 GENMASK(18, 16)
+#define B_QAM_TH0_1_BE4 GENMASK(21, 19)
+#define B_QAM_TH0_2_BE4 GENMASK(24, 22)
+#define B_QAM_TH0_3_BE4 GENMASK(27, 25)
+#define B_QAM_TH0_4_BE4 GENMASK(30, 28)
+#define R_QAM_TH1_BE4 0x119E8
+#define B_QAM_TH1_0_BE4 GENMASK(2, 0)
+#define B_QAM_TH1_1_BE4 GENMASK(5, 3)
+#define B_QAM_TH1_2_BE4 GENMASK(8, 6)
+#define B_QAM_TH1_3_BE4 GENMASK(11, 9)
+#define B_QAM_TH1_4_BE4 GENMASK(14, 12)
+#define B_QAM_TH1_5_BE4 GENMASK(17, 15)
+#define B_QAM_TH1_6_BE4 GENMASK(20, 18)
+#define B_QAM_TH1_7_BE4 GENMASK(23, 21)
+#define B_QAM_TH1_8_BE4 GENMASK(26, 24)
+#define B_QAM_TH1_9_BE4 GENMASK(29, 27)
+#define R_QAM_TH2_BE4 0x119EC
+#define B_QAM_TH2_0_BE4 GENMASK(2, 0)
+#define B_QAM_TH2_1_BE4 GENMASK(5, 3)
+#define B_QAM_TH2_2_BE4 GENMASK(8, 6)
+#define B_QAM_TH2_3_BE4 GENMASK(11, 9)
+#define B_QAM_TH2_4_BE4 GENMASK(14, 12)
+#define B_QAM_TH2_5_BE4 GENMASK(17, 15)
+#define B_QAM_TH2_6_BE4 GENMASK(20, 18)
+#define B_QAM_TH2_7_BE4 GENMASK(23, 21)
+#define B_QAM_TH2_8_BE4 GENMASK(26, 24)
+#define R_RFSI_CT_DEF_BE4 0x119F0
+#define B_RFSI_CT_ER_BE4 GENMASK(18, 15)
+#define B_RFSI_CT_SUBF_BE4 GENMASK(22, 19)
+#define B_RFSI_CT_FTM_BE4 GENMASK(26, 23)
+#define B_RFSI_CT_SENS_BE4 GENMASK(30, 27)
+#define R_FBTB_CT_DEF_BE4 0x119F4
+#define B_FBTB_CT_DEF_BE GENMASK(3, 0)
+#define B_FBTB_CT_PB_BE4 GENMASK(7, 4)
+#define B_FBTB_CT_DL_WO_BE4 GENMASK(11, 8)
+#define B_FBTB_CT_DL_BF_BE4 GENMASK(15, 12)
+#define B_FBTB_CT_MUMIMO_BE4 GENMASK(19, 16)
+#define B_FBTB_CT_FTM_BE4 GENMASK(23, 20)
+#define B_FBTB_CT_SENS_BE4 GENMASK(27, 24)
+#define R_RFSI_CT_OPT_0_BE4 0x11A94
+#define R_RFSI_CT_OPT_8_BE4 0x11A98
+#define R_QAM_COMP_TH0_BE4 0x11A9C
+#define R_QAM_COMP_TH1_BE4 0x11AA0
+#define R_QAM_COMP_TH2_BE4 0x11AA4
+#define R_QAM_COMP_TH3_BE4 0x11AA8
+#define R_QAM_COMP_TH4_BE4 0x11ABC
+#define B_QAM_COMP_TH4_L GENMASK(4, 0)
+#define B_QAM_COMP_TH4_M GENMASK(14, 10)
+#define B_QAM_COMP_TH4_H GENMASK(24, 20)
+#define B_QAM_COMP_TH4_2L GENMASK(9, 5)
+#define B_QAM_COMP_TH4_2M GENMASK(19, 15)
+#define B_QAM_COMP_TH4_2H GENMASK(29, 25)
+#define R_QAM_COMP_TH5_BE4 0x11AC0
+#define B_QAM_COMP_TH5_L GENMASK(4, 0)
+#define B_QAM_COMP_TH5_M GENMASK(14, 10)
+#define B_QAM_COMP_TH5_H GENMASK(24, 20)
+#define B_QAM_COMP_TH5_2L GENMASK(9, 5)
+#define B_QAM_COMP_TH5_2M GENMASK(19, 15)
+#define B_QAM_COMP_TH5_2H GENMASK(29, 25)
+#define R_QAM_COMP_TH6_BE4 0x11AC4
+#define B_QAM_COMP_TH6_L GENMASK(4, 0)
+#define B_QAM_COMP_TH6_M GENMASK(14, 10)
+#define B_QAM_COMP_TH6_2L GENMASK(9, 5)
+#define B_QAM_COMP_TH6_2M GENMASK(19, 15)
+#define R_OW_VAL_0_BE4 0x11AAC
+#define R_OW_VAL_1_BE4 0x11AB0
+#define R_OW_VAL_2_BE4 0x11AB4
+#define R_OW_VAL_3_BE4 0x11AB8
+#define R_BANDEDGE_DBWX_BE4 0x11ACC
+#define B_BANDEDGE_DBW20_BE4 BIT(29)
+#define B_BANDEDGE_DBW40_BE4 BIT(30)
+#define B_BANDEDGE_DBW80_BE4 BIT(31)
+#define R_BANDEDGE_DBWY_BE4 0x11AD0
+#define B_BANDEDGE_DBW160_BE4 BIT(0)
+
+#define R_CHINFO_SEG_BE4 0x200B4
+#define B_CHINFO_SEG_LEN_BE4 GENMASK(12, 10)
+#define R_STS_HDR2_PARSING_BE4 0x2070C
+#define B_STS_HDR2_PARSING_BE4 BIT(10)
+#define R_SW_SI_WDATA_BE4 0x20370
+#define B_SW_SI_DATA_PATH_BE4 GENMASK(31, 28)
+#define B_SW_SI_DATA_ADR_BE4 GENMASK(27, 20)
+#define B_SW_SI_DATA_DAT_BE4 GENMASK(19, 0)
+#define R_SW_SI_READ_ADDR_BE4 0x20378
+#define B_SW_SI_READ_ADDR_BE4 GENMASK(10, 0)
+#define R_IFS_T1_AVG_BE4 0x20EDC
+#define B_IFS_T1_AVG_BE4 GENMASK(15, 0)
+#define B_IFS_T2_AVG_BE4 GENMASK(31, 16)
+#define R_IFS_T3_AVG_BE4 0x20EE0
+#define B_IFS_T3_AVG_BE4 GENMASK(15, 0)
+#define B_IFS_T4_AVG_BE4 GENMASK(31, 16)
+#define R_IFS_T1_CLM_BE4 0x20EE4
+#define B_IFS_T1_CLM_BE4 GENMASK(15, 0)
+#define B_IFS_T2_CLM_BE4 GENMASK(31, 16)
+#define R_IFS_T3_CLM_BE4 0x20EE8
+#define B_IFS_T3_CLM_BE4 GENMASK(15, 0)
+#define B_IFS_T4_CLM_BE4 GENMASK(31, 16)
+#define R_IFS_TOTAL_BE4 0x20EEC
+#define B_IFS_TOTAL_BE4 GENMASK(15, 0)
+#define B_IFS_CNT_DONE_BE4 BIT(16)
+#define R_IFS_T1_HIS_BE4 0x20F50
+#define B_IFS_T1_HIS_BE4 GENMASK(15, 0)
+#define B_IFS_T2_HIS_BE4 GENMASK(31, 16)
+#define R_IFS_T3_HIS_BE4 0x20F54
+#define B_IFS_T3_HIS_BE4 GENMASK(15, 0)
+#define B_IFS_T4_HIS_BE4 GENMASK(31, 16)
+
+#define R_TX_CFR_MANUAL_EN_BE4 0x2483C
+#define B_TX_CFR_MANUAL_EN_BE4_M BIT(30)
+
+#define R_CHINFO_OPT_BE4 0x267C8
+#define B_CHINFO_OPT_BE4 GENMASK(14, 13)
+#define R_CHINFO_NX_BE4 0x267D0
+#define B_CHINFO_NX_BE4 GENMASK(16, 6)
+#define R_CHINFO_ALG_BE4 0x267C8
+#define B_CHINFO_ALG_BE4 GENMASK(31, 30)
+
+#define R_SW_SI_DATA_BE4 0x2CF4C
+#define B_SW_SI_READ_DATA_BE4 GENMASK(19, 0)
+#define B_SW_SI_W_BUSY_BE4 BIT(24)
+#define B_SW_SI_R_BUSY_BE4 BIT(25)
+#define B_SW_SI_READ_DATA_DONE_BE4 BIT(26)
+
/* WiFi CPU local domain */
#define R_AX_WDT_CTRL 0x0040
#define B_AX_WDT_EN BIT(31)
diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c
index 209d84909f88..28466cb35ea2 100644
--- a/drivers/net/wireless/realtek/rtw89/regd.c
+++ b/drivers/net/wireless/realtek/rtw89/regd.c
@@ -38,7 +38,7 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("MX", RTW89_MEXICO, RTW89_MEXICO, RTW89_FCC, 0x0),
COUNTRY_REGD("NI", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
COUNTRY_REGD("PA", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
- COUNTRY_REGD("PY", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
+ COUNTRY_REGD("PY", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
COUNTRY_REGD("PE", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
COUNTRY_REGD("US", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x1),
COUNTRY_REGD("UY", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
@@ -95,7 +95,7 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("MK", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
COUNTRY_REGD("MA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
COUNTRY_REGD("MZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
- COUNTRY_REGD("NA", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("NA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
COUNTRY_REGD("NG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
COUNTRY_REGD("OM", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
COUNTRY_REGD("QA", RTW89_QATAR, RTW89_QATAR, RTW89_QATAR, 0x0),
@@ -111,12 +111,12 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("AE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
COUNTRY_REGD("YE", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
COUNTRY_REGD("ZW", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
- COUNTRY_REGD("BD", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("BD", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
COUNTRY_REGD("KH", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
COUNTRY_REGD("CN", RTW89_CN, RTW89_CN, RTW89_CN, 0x0),
COUNTRY_REGD("HK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
COUNTRY_REGD("IN", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
- COUNTRY_REGD("ID", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("ID", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
COUNTRY_REGD("KR", RTW89_KCC, RTW89_KCC, RTW89_KCC, 0x1),
COUNTRY_REGD("MY", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
COUNTRY_REGD("PK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
@@ -125,7 +125,7 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("LK", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
COUNTRY_REGD("TW", RTW89_FCC, RTW89_FCC, RTW89_ETSI, 0x0),
COUNTRY_REGD("TH", RTW89_THAILAND, RTW89_THAILAND, RTW89_THAILAND, 0x0),
- COUNTRY_REGD("VN", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("VN", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
COUNTRY_REGD("AU", RTW89_ACMA, RTW89_ACMA, RTW89_ACMA, 0x0),
COUNTRY_REGD("NZ", RTW89_ACMA, RTW89_ACMA, RTW89_ACMA, 0x0),
COUNTRY_REGD("PG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
@@ -134,7 +134,7 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("JM", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
COUNTRY_REGD("AN", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
COUNTRY_REGD("TT", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
- COUNTRY_REGD("TN", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("TN", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
COUNTRY_REGD("AF", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
COUNTRY_REGD("DZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
COUNTRY_REGD("AS", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
@@ -187,9 +187,9 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("GM", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
COUNTRY_REGD("GE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
COUNTRY_REGD("GI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
- COUNTRY_REGD("GL", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("GL", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
COUNTRY_REGD("GD", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
- COUNTRY_REGD("GP", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("GP", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
COUNTRY_REGD("GU", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
COUNTRY_REGD("GG", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
COUNTRY_REGD("GN", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
@@ -214,7 +214,7 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("MQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
COUNTRY_REGD("MR", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
COUNTRY_REGD("MU", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
- COUNTRY_REGD("YT", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("YT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
COUNTRY_REGD("FM", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
COUNTRY_REGD("MD", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
COUNTRY_REGD("MN", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
@@ -260,7 +260,7 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("UZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
COUNTRY_REGD("VU", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
COUNTRY_REGD("WF", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
- COUNTRY_REGD("EH", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("EH", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
COUNTRY_REGD("ZM", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
COUNTRY_REGD("CU", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
COUNTRY_REGD("IR", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
@@ -1142,6 +1142,7 @@ static int rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev,
}
} else {
rtwvif_link->reg_6ghz_power = RTW89_REG_6GHZ_POWER_DFLT;
+ dflt = true;
}
rcu_read_unlock();
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
index 84b628d23882..d6deb44a685b 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
@@ -633,8 +633,6 @@ static int rtw8851b_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map,
efuse->rfe_type = map->rfe_type;
efuse->xtal_cap = map->xtal_k;
- rtw89_info(rtwdev, "chip rfe_type is %d\n", efuse->rfe_type);
-
return 0;
}
@@ -2532,6 +2530,7 @@ static const struct rtw89_chip_ops rtw8851b_chip_ops = {
.cfg_txrx_path = rtw8851b_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = rtw8851b_set_txpwr_ul_tb_offset,
.digital_pwr_comp = NULL,
+ .calc_rx_gain_normal = NULL,
.pwr_on_func = rtw8851b_pwr_on_func,
.pwr_off_func = rtw8851b_pwr_off_func,
.query_rxdesc = rtw89_core_query_rxdesc,
@@ -2553,6 +2552,7 @@ static const struct rtw89_chip_ops rtw8851b_chip_ops = {
.h2c_default_dmac_tbl = NULL,
.h2c_update_beacon = rtw89_fw_h2c_update_beacon,
.h2c_ba_cam = rtw89_fw_h2c_ba_cam,
+ .h2c_wow_cam_update = rtw89_fw_h2c_wow_cam_update,
.btc_set_rfe = rtw8851b_btc_set_rfe,
.btc_init_cfg = rtw8851b_btc_init_cfg,
@@ -2590,6 +2590,10 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.small_fifo_size = true,
.dle_scc_rsvd_size = 98304,
.max_amsdu_limit = 3500,
+ .max_vht_mpdu_cap = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454,
+ .max_eht_mpdu_cap = 0,
+ .max_tx_agg_num = 128,
+ .max_rx_agg_num = 64,
.dis_2g_40m_ul_ofdma = true,
.rsvd_ple_ofst = 0x2f800,
.hfc_param_ini = {rtw8851b_hfc_param_ini_pcie,
@@ -2704,6 +2708,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.bss_clr_map_reg = R_BSS_CLR_MAP_V1,
.rfkill_init = &rtw8851b_rfkill_regs,
.rfkill_get = {R_AX_GPIO_EXT_CTRL, B_AX_GPIO_IN_9},
+ .btc_sb = {{{R_AX_SCOREBOARD, R_AX_SCOREBOARD},}},
.dma_ch_mask = BIT(RTW89_DMA_ACH4) | BIT(RTW89_DMA_ACH5) |
BIT(RTW89_DMA_ACH6) | BIT(RTW89_DMA_ACH7) |
BIT(RTW89_DMA_B1MG) | BIT(RTW89_DMA_B1HI),
@@ -2712,6 +2717,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.wowlan_stub = &rtw_wowlan_stub_8851b,
#endif
.xtal_info = &rtw8851b_xtal_info,
+ .default_quirks = 0,
};
EXPORT_SYMBOL(rtw8851b_chip_info);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
index 8677723e3561..5ea7a36ab5ab 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
@@ -678,8 +678,6 @@ static int rtw8852a_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map,
efuse->rfe_type = map->rfe_type;
efuse->xtal_cap = map->xtal_k;
- rtw89_info(rtwdev, "chip rfe_type is %d\n", efuse->rfe_type);
-
return 0;
}
@@ -2226,6 +2224,7 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = {
.cfg_txrx_path = NULL,
.set_txpwr_ul_tb_offset = rtw8852a_set_txpwr_ul_tb_offset,
.digital_pwr_comp = NULL,
+ .calc_rx_gain_normal = NULL,
.pwr_on_func = NULL,
.pwr_off_func = NULL,
.query_rxdesc = rtw89_core_query_rxdesc,
@@ -2247,6 +2246,7 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = {
.h2c_default_dmac_tbl = NULL,
.h2c_update_beacon = rtw89_fw_h2c_update_beacon,
.h2c_ba_cam = rtw89_fw_h2c_ba_cam,
+ .h2c_wow_cam_update = rtw89_fw_h2c_wow_cam_update,
.btc_set_rfe = rtw8852a_btc_set_rfe,
.btc_init_cfg = rtw8852a_btc_init_cfg,
@@ -2275,6 +2275,10 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.small_fifo_size = false,
.dle_scc_rsvd_size = 0,
.max_amsdu_limit = 3500,
+ .max_vht_mpdu_cap = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454,
+ .max_eht_mpdu_cap = 0,
+ .max_tx_agg_num = 128,
+ .max_rx_agg_num = 64,
.dis_2g_40m_ul_ofdma = true,
.rsvd_ple_ofst = 0x6f800,
.hfc_param_ini = {rtw8852a_hfc_param_ini_pcie,
@@ -2391,12 +2395,14 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.bss_clr_map_reg = R_BSS_CLR_MAP,
.rfkill_init = &rtw8852a_rfkill_regs,
.rfkill_get = {R_AX_GPIO_EXT_CTRL, B_AX_GPIO_IN_9},
+ .btc_sb = {{{R_AX_SCOREBOARD, R_AX_SCOREBOARD},}},
.dma_ch_mask = 0,
.edcca_regs = &rtw8852a_edcca_regs,
#ifdef CONFIG_PM
.wowlan_stub = &rtw_wowlan_stub_8852a,
#endif
.xtal_info = &rtw8852a_xtal_info,
+ .default_quirks = 0,
};
EXPORT_SYMBOL(rtw8852a_chip_info);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852au.c b/drivers/net/wireless/realtek/rtw89/rtw8852au.c
index ca782469c455..ccdbcc178c2a 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852au.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852au.c
@@ -52,6 +52,8 @@ static const struct usb_device_id rtw_8852au_id_table[] = {
.driver_info = (kernel_ulong_t)&rtw89_8852au_info },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3321, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&rtw89_8852au_info },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3323, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&rtw89_8852au_info },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x332c, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&rtw89_8852au_info },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x013f, 0xff, 0xff, 0xff),
@@ -60,6 +62,8 @@ static const struct usb_device_id rtw_8852au_id_table[] = {
.driver_info = (kernel_ulong_t)&rtw89_8852au_info },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0141, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&rtw89_8852au_info },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3625, 0x010d, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&rtw89_8852au_info },
{ USB_DEVICE_AND_INTERFACE_INFO(0x3625, 0x010f, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&rtw89_8852au_info },
{},
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
index 70fb05bc5e98..197e3f5fb21b 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
@@ -313,6 +313,27 @@ static void rtw8852b_pwr_sps_ana(struct rtw89_dev *rtwdev)
rtw89_write16(rtwdev, R_AX_SPS_ANA_ON_CTRL2, RTL8852B_RFE_05_SPS_ANA);
}
+static void rtw8852b_pwr_sps_dig_off(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_efuse *efuse = &rtwdev->efuse;
+
+ if (efuse->rfe_type == 0x5) {
+ rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_OFF_CTRL0,
+ B_AX_C1_L1_MASK, 0x1);
+ rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_OFF_CTRL0,
+ B_AX_C2_L1_MASK, 0x1);
+ rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_OFF_CTRL0,
+ B_AX_C3_L1_MASK, 0x2);
+ rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_OFF_CTRL0,
+ B_AX_R1_L1_MASK, 0x1);
+ } else {
+ rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_OFF_CTRL0,
+ B_AX_C1_L1_MASK, 0x1);
+ rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_OFF_CTRL0,
+ B_AX_C3_L1_MASK, 0x3);
+ }
+}
+
static int rtw8852b_pwr_on_func(struct rtw89_dev *rtwdev)
{
u32 val32;
@@ -338,8 +359,7 @@ static int rtw8852b_pwr_on_func(struct rtw89_dev *rtwdev)
if (ret)
return ret;
- rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_OFF_CTRL0, B_AX_C1_L1_MASK, 0x1);
- rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_OFF_CTRL0, B_AX_C3_L1_MASK, 0x3);
+ rtw8852b_pwr_sps_dig_off(rtwdev);
rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_EN_WLON);
rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFN_ONMAC);
@@ -837,6 +857,7 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = {
.cfg_txrx_path = rtw8852bx_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = rtw8852bx_set_txpwr_ul_tb_offset,
.digital_pwr_comp = NULL,
+ .calc_rx_gain_normal = NULL,
.pwr_on_func = rtw8852b_pwr_on_func,
.pwr_off_func = rtw8852b_pwr_off_func,
.query_rxdesc = rtw89_core_query_rxdesc,
@@ -858,6 +879,7 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = {
.h2c_default_dmac_tbl = NULL,
.h2c_update_beacon = rtw89_fw_h2c_update_beacon,
.h2c_ba_cam = rtw89_fw_h2c_ba_cam,
+ .h2c_wow_cam_update = rtw89_fw_h2c_wow_cam_update,
.btc_set_rfe = rtw8852b_btc_set_rfe,
.btc_init_cfg = rtw8852bx_btc_init_cfg,
@@ -899,6 +921,10 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.small_fifo_size = true,
.dle_scc_rsvd_size = 98304,
.max_amsdu_limit = 5000,
+ .max_vht_mpdu_cap = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454,
+ .max_eht_mpdu_cap = 0,
+ .max_tx_agg_num = 128,
+ .max_rx_agg_num = 64,
.dis_2g_40m_ul_ofdma = true,
.rsvd_ple_ofst = 0x2f800,
.hfc_param_ini = {rtw8852b_hfc_param_ini_pcie,
@@ -1016,6 +1042,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.bss_clr_map_reg = R_BSS_CLR_MAP_V1,
.rfkill_init = &rtw8852b_rfkill_regs,
.rfkill_get = {R_AX_GPIO_EXT_CTRL, B_AX_GPIO_IN_9},
+ .btc_sb = {{{R_AX_SCOREBOARD, R_AX_SCOREBOARD},}},
.dma_ch_mask = BIT(RTW89_DMA_ACH4) | BIT(RTW89_DMA_ACH5) |
BIT(RTW89_DMA_ACH6) | BIT(RTW89_DMA_ACH7) |
BIT(RTW89_DMA_B1MG) | BIT(RTW89_DMA_B1HI),
@@ -1024,6 +1051,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.wowlan_stub = &rtw_wowlan_stub_8852b,
#endif
.xtal_info = NULL,
+ .default_quirks = 0,
};
EXPORT_SYMBOL(rtw8852b_chip_info);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
index 4e72f4961837..65b839323e3e 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
@@ -265,8 +265,6 @@ static int __rtw8852bx_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map,
efuse->rfe_type = map->rfe_type;
efuse->xtal_cap = map->xtal_k;
- rtw89_info(rtwdev, "chip rfe_type is %d\n", efuse->rfe_type);
-
return 0;
}
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
index f956474c3b72..92bbd6e5d699 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
@@ -703,6 +703,7 @@ static const struct rtw89_chip_ops rtw8852bt_chip_ops = {
.cfg_txrx_path = rtw8852bx_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = rtw8852bx_set_txpwr_ul_tb_offset,
.digital_pwr_comp = NULL,
+ .calc_rx_gain_normal = NULL,
.pwr_on_func = rtw8852bt_pwr_on_func,
.pwr_off_func = rtw8852bt_pwr_off_func,
.query_rxdesc = rtw89_core_query_rxdesc,
@@ -724,6 +725,7 @@ static const struct rtw89_chip_ops rtw8852bt_chip_ops = {
.h2c_default_dmac_tbl = NULL,
.h2c_update_beacon = rtw89_fw_h2c_update_beacon,
.h2c_ba_cam = rtw89_fw_h2c_ba_cam,
+ .h2c_wow_cam_update = rtw89_fw_h2c_wow_cam_update,
.btc_set_rfe = rtw8852bt_btc_set_rfe,
.btc_init_cfg = rtw8852bx_btc_init_cfg,
@@ -765,6 +767,10 @@ const struct rtw89_chip_info rtw8852bt_chip_info = {
.small_fifo_size = true,
.dle_scc_rsvd_size = 98304,
.max_amsdu_limit = 5000,
+ .max_vht_mpdu_cap = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454,
+ .max_eht_mpdu_cap = 0,
+ .max_tx_agg_num = 128,
+ .max_rx_agg_num = 64,
.dis_2g_40m_ul_ofdma = true,
.rsvd_ple_ofst = 0x6f800,
.hfc_param_ini = {rtw8852bt_hfc_param_ini_pcie, NULL, NULL},
@@ -873,6 +879,7 @@ const struct rtw89_chip_info rtw8852bt_chip_info = {
.bss_clr_map_reg = R_BSS_CLR_MAP_V1,
.rfkill_init = &rtw8852bt_rfkill_regs,
.rfkill_get = {R_AX_GPIO_EXT_CTRL, B_AX_GPIO_IN_9},
+ .btc_sb = {{{R_AX_SCOREBOARD, R_AX_SCOREBOARD},}},
.dma_ch_mask = BIT(RTW89_DMA_ACH4) | BIT(RTW89_DMA_ACH5) |
BIT(RTW89_DMA_ACH6) | BIT(RTW89_DMA_ACH7) |
BIT(RTW89_DMA_B1MG) | BIT(RTW89_DMA_B1HI),
@@ -881,6 +888,7 @@ const struct rtw89_chip_info rtw8852bt_chip_info = {
.wowlan_stub = &rtw_wowlan_stub_8852bt,
#endif
.xtal_info = NULL,
+ .default_quirks = 0,
};
EXPORT_SYMBOL(rtw8852bt_chip_info);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bu.c b/drivers/net/wireless/realtek/rtw89/rtw8852bu.c
index 980d17ef68d0..84cd3ec971f9 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bu.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bu.c
@@ -54,6 +54,8 @@ static const struct usb_device_id rtw_8852bu_id_table[] = {
.driver_info = (kernel_ulong_t)&rtw89_8852bu_info },
{ USB_DEVICE_AND_INTERFACE_INFO(0x0db0, 0x6931, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&rtw89_8852bu_info },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0db0, 0xf0c8, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&rtw89_8852bu_info },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3327, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&rtw89_8852bu_info },
{ USB_DEVICE_AND_INTERFACE_INFO(0x3574, 0x6121, 0xff, 0xff, 0xff),
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
index db99450e9158..de5d343f80a5 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
@@ -600,8 +600,6 @@ static int rtw8852c_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map,
efuse->rfe_type = map->rfe_type;
efuse->xtal_cap = map->xtal_k;
- rtw89_info(rtwdev, "chip rfe_type is %d\n", efuse->rfe_type);
-
return 0;
}
@@ -3067,6 +3065,7 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = {
.cfg_txrx_path = rtw8852c_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = rtw8852c_set_txpwr_ul_tb_offset,
.digital_pwr_comp = NULL,
+ .calc_rx_gain_normal = NULL,
.pwr_on_func = rtw8852c_pwr_on_func,
.pwr_off_func = rtw8852c_pwr_off_func,
.query_rxdesc = rtw89_core_query_rxdesc,
@@ -3088,6 +3087,7 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = {
.h2c_default_dmac_tbl = NULL,
.h2c_update_beacon = rtw89_fw_h2c_update_beacon,
.h2c_ba_cam = rtw89_fw_h2c_ba_cam,
+ .h2c_wow_cam_update = rtw89_fw_h2c_wow_cam_update,
.btc_set_rfe = rtw8852c_btc_set_rfe,
.btc_init_cfg = rtw8852c_btc_init_cfg,
@@ -3116,6 +3116,10 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.small_fifo_size = false,
.dle_scc_rsvd_size = 0,
.max_amsdu_limit = 8000,
+ .max_vht_mpdu_cap = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454,
+ .max_eht_mpdu_cap = 0,
+ .max_tx_agg_num = 128,
+ .max_rx_agg_num = 64,
.dis_2g_40m_ul_ofdma = false,
.rsvd_ple_ofst = 0x6f800,
.hfc_param_ini = {rtw8852c_hfc_param_ini_pcie,
@@ -3236,12 +3240,14 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.bss_clr_map_reg = R_BSS_CLR_MAP,
.rfkill_init = &rtw8852c_rfkill_regs,
.rfkill_get = {R_AX_GPIO_EXT_CTRL, B_AX_GPIO_IN_9},
+ .btc_sb = {{{R_AX_SCOREBOARD, R_AX_SCOREBOARD},}},
.dma_ch_mask = 0,
.edcca_regs = &rtw8852c_edcca_regs,
#ifdef CONFIG_PM
.wowlan_stub = &rtw_wowlan_stub_8852c,
#endif
.xtal_info = NULL,
+ .default_quirks = 0,
};
EXPORT_SYMBOL(rtw8852c_chip_info);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852cu.c b/drivers/net/wireless/realtek/rtw89/rtw8852cu.c
index 2708b523ca14..3b9825c92a0d 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852cu.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852cu.c
@@ -46,6 +46,8 @@ static const struct usb_device_id rtw_8852cu_id_table[] = {
.driver_info = (kernel_ulong_t)&rtw89_8852cu_info },
{ USB_DEVICE_AND_INTERFACE_INFO(0x0db0, 0x991d, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&rtw89_8852cu_info },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x28de, 0x2432, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&rtw89_8852cu_info },
{ USB_DEVICE_AND_INTERFACE_INFO(0x35b2, 0x0502, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&rtw89_8852cu_info },
{ USB_DEVICE_AND_INTERFACE_INFO(0x35bc, 0x0101, 0xff, 0xff, 0xff),
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.c b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
index 4437279c554b..f41b66b362c4 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
@@ -628,24 +628,36 @@ static int rtw8922a_read_efuse_rf(struct rtw89_dev *rtwdev, u8 *log_map)
rtw8922a_efuse_parsing_tssi(rtwdev, map);
rtw8922a_efuse_parsing_gain_offset(rtwdev, map);
- rtw89_info(rtwdev, "chip rfe_type is %d\n", efuse->rfe_type);
-
return 0;
}
static int rtw8922a_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map,
enum rtw89_efuse_block block)
{
+ struct rtw89_efuse *efuse = &rtwdev->efuse;
+ int ret;
+
switch (block) {
case RTW89_EFUSE_BLOCK_HCI_DIG_PCIE_SDIO:
- return rtw8922a_read_efuse_pci_sdio(rtwdev, log_map);
+ ret = rtw8922a_read_efuse_pci_sdio(rtwdev, log_map);
+ break;
case RTW89_EFUSE_BLOCK_HCI_DIG_USB:
- return rtw8922a_read_efuse_usb(rtwdev, log_map);
+ ret = rtw8922a_read_efuse_usb(rtwdev, log_map);
+ break;
case RTW89_EFUSE_BLOCK_RF:
- return rtw8922a_read_efuse_rf(rtwdev, log_map);
+ ret = rtw8922a_read_efuse_rf(rtwdev, log_map);
+ break;
default:
- return 0;
+ ret = 0;
+ break;
+ }
+
+ if (!ret && is_zero_ether_addr(efuse->addr)) {
+ rtw89_info(rtwdev, "efuse mac address is zero, using random mac\n");
+ eth_random_addr(efuse->addr);
}
+
+ return ret;
}
#define THM_TRIM_POSITIVE_MASK BIT(6)
@@ -1756,6 +1768,32 @@ static int rtw8922a_ctrl_rx_path_tmac(struct rtw89_dev *rtwdev,
}
#define DIGITAL_PWR_COMP_REG_NUM 22
+static const u32 rtw8922a_digital_pwr_comp_2g_s0_val[][DIGITAL_PWR_COMP_REG_NUM] = {
+ {0x012C0064, 0x04B00258, 0x00432710, 0x019000A7, 0x06400320,
+ 0x0D05091D, 0x14D50FA0, 0x00000000, 0x01010000, 0x00000101,
+ 0x01010101, 0x02020201, 0x02010000, 0x03030202, 0x00000303,
+ 0x03020101, 0x06060504, 0x01010000, 0x06050403, 0x01000606,
+ 0x05040202, 0x07070706},
+ {0x012C0064, 0x04B00258, 0x00432710, 0x019000A7, 0x06400320,
+ 0x0D05091D, 0x14D50FA0, 0x00000000, 0x01010100, 0x00000101,
+ 0x01000000, 0x01010101, 0x01010000, 0x02020202, 0x00000404,
+ 0x03020101, 0x04040303, 0x02010000, 0x03030303, 0x00000505,
+ 0x03030201, 0x05050303},
+};
+
+static const u32 rtw8922a_digital_pwr_comp_2g_s1_val[][DIGITAL_PWR_COMP_REG_NUM] = {
+ {0x012C0064, 0x04B00258, 0x00432710, 0x019000A7, 0x06400320,
+ 0x0D05091D, 0x14D50FA0, 0x01010000, 0x01010101, 0x00000101,
+ 0x01010100, 0x01010101, 0x01010000, 0x02020202, 0x01000202,
+ 0x02020101, 0x03030202, 0x02010000, 0x05040403, 0x01000606,
+ 0x05040302, 0x07070605},
+ {0x012C0064, 0x04B00258, 0x00432710, 0x019000A7, 0x06400320,
+ 0x0D05091D, 0x14D50FA0, 0x00000000, 0x01010100, 0x00000101,
+ 0x01010000, 0x02020201, 0x02010100, 0x03030202, 0x01000404,
+ 0x04030201, 0x05050404, 0x01010100, 0x04030303, 0x01000505,
+ 0x03030101, 0x05050404},
+};
+
static const u32 rtw8922a_digital_pwr_comp_val[][DIGITAL_PWR_COMP_REG_NUM] = {
{0x012C0096, 0x044C02BC, 0x00322710, 0x015E0096, 0x03C8028A,
0x0BB80708, 0x17701194, 0x02020100, 0x03030303, 0x01000303,
@@ -1770,7 +1808,7 @@ static const u32 rtw8922a_digital_pwr_comp_val[][DIGITAL_PWR_COMP_REG_NUM] = {
};
static void rtw8922a_set_digital_pwr_comp(struct rtw89_dev *rtwdev,
- bool enable, u8 nss,
+ u8 band, u8 nss,
enum rtw89_rf_path path)
{
static const u32 ltpc_t0[2] = {R_BE_LTPC_T0_PATH0, R_BE_LTPC_T0_PATH1};
@@ -1778,14 +1816,25 @@ static void rtw8922a_set_digital_pwr_comp(struct rtw89_dev *rtwdev,
u32 addr, val;
u32 i;
- if (nss == 1)
- digital_pwr_comp = rtw8922a_digital_pwr_comp_val[0];
- else
- digital_pwr_comp = rtw8922a_digital_pwr_comp_val[1];
+ if (nss == 1) {
+ if (band == RTW89_BAND_2G)
+ digital_pwr_comp = path == RF_PATH_A ?
+ rtw8922a_digital_pwr_comp_2g_s0_val[0] :
+ rtw8922a_digital_pwr_comp_2g_s1_val[0];
+ else
+ digital_pwr_comp = rtw8922a_digital_pwr_comp_val[0];
+ } else {
+ if (band == RTW89_BAND_2G)
+ digital_pwr_comp = path == RF_PATH_A ?
+ rtw8922a_digital_pwr_comp_2g_s0_val[1] :
+ rtw8922a_digital_pwr_comp_2g_s1_val[1];
+ else
+ digital_pwr_comp = rtw8922a_digital_pwr_comp_val[1];
+ }
addr = ltpc_t0[path];
for (i = 0; i < DIGITAL_PWR_COMP_REG_NUM; i++, addr += 4) {
- val = enable ? digital_pwr_comp[i] : 0;
+ val = digital_pwr_comp[i];
rtw89_phy_write32(rtwdev, addr, val);
}
}
@@ -1794,7 +1843,7 @@ static void rtw8922a_digital_pwr_comp(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx)
{
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
- bool enable = chan->band_type != RTW89_BAND_2G;
+ u8 band = chan->band_type;
u8 path;
if (rtwdev->mlo_dbcc_mode == MLO_1_PLUS_1_1RF) {
@@ -1802,10 +1851,10 @@ static void rtw8922a_digital_pwr_comp(struct rtw89_dev *rtwdev,
path = RF_PATH_A;
else
path = RF_PATH_B;
- rtw8922a_set_digital_pwr_comp(rtwdev, enable, 1, path);
+ rtw8922a_set_digital_pwr_comp(rtwdev, band, 1, path);
} else {
- rtw8922a_set_digital_pwr_comp(rtwdev, enable, 2, RF_PATH_A);
- rtw8922a_set_digital_pwr_comp(rtwdev, enable, 2, RF_PATH_B);
+ rtw8922a_set_digital_pwr_comp(rtwdev, band, 2, RF_PATH_A);
+ rtw8922a_set_digital_pwr_comp(rtwdev, band, 2, RF_PATH_B);
}
}
@@ -2826,6 +2875,7 @@ static const struct rtw89_chip_ops rtw8922a_chip_ops = {
.cfg_txrx_path = rtw8922a_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = NULL,
.digital_pwr_comp = rtw8922a_digital_pwr_comp,
+ .calc_rx_gain_normal = NULL,
.pwr_on_func = rtw8922a_pwr_on_func,
.pwr_off_func = rtw8922a_pwr_off_func,
.query_rxdesc = rtw89_core_query_rxdesc_v2,
@@ -2847,6 +2897,7 @@ static const struct rtw89_chip_ops rtw8922a_chip_ops = {
.h2c_default_dmac_tbl = rtw89_fw_h2c_default_dmac_tbl_v2,
.h2c_update_beacon = rtw89_fw_h2c_update_beacon_be,
.h2c_ba_cam = rtw89_fw_h2c_ba_cam_v1,
+ .h2c_wow_cam_update = rtw89_fw_h2c_wow_cam_update,
.btc_set_rfe = rtw8922a_btc_set_rfe,
.btc_init_cfg = rtw8922a_btc_init_cfg,
@@ -2875,6 +2926,10 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.small_fifo_size = false,
.dle_scc_rsvd_size = 0,
.max_amsdu_limit = 8000,
+ .max_vht_mpdu_cap = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454,
+ .max_eht_mpdu_cap = IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_7991,
+ .max_tx_agg_num = 128,
+ .max_rx_agg_num = 64,
.dis_2g_40m_ul_ofdma = false,
.rsvd_ple_ofst = 0x8f800,
.hfc_param_ini = {rtw8922a_hfc_param_ini_pcie, NULL, NULL},
@@ -2988,12 +3043,14 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.bss_clr_map_reg = R_BSS_CLR_MAP_V2,
.rfkill_init = &rtw8922a_rfkill_regs,
.rfkill_get = {R_BE_GPIO_EXT_CTRL, B_BE_GPIO_IN_9},
+ .btc_sb = {{{R_BE_SCOREBOARD, R_BE_SCOREBOARD},}},
.dma_ch_mask = 0,
.edcca_regs = &rtw8922a_edcca_regs,
#ifdef CONFIG_PM
.wowlan_stub = &rtw_wowlan_stub_8922a,
#endif
.xtal_info = NULL,
+ .default_quirks = 0,
};
EXPORT_SYMBOL(rtw8922a_chip_info);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c
index fce094c7ce93..98f14b31cf52 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c
@@ -205,11 +205,11 @@ static void rtw8922a_chlk_ktbl_sel(struct rtw89_dev *rtwdev, u8 kpath, u8 idx)
}
}
-static u8 rtw8922a_chlk_reload_sel_tbl(struct rtw89_dev *rtwdev,
- const struct rtw89_chan *chan, u8 path)
+static u8 rtw8922a_chlk_reload_sel_tbl_v0(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan, u8 path)
{
- struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
struct rtw89_rfk_chan_desc desc[__RTW89_RFK_CHS_NR_V1] = {};
+ struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
u8 tbl_sel;
for (tbl_sel = 0; tbl_sel < ARRAY_SIZE(desc); tbl_sel++) {
@@ -229,11 +229,53 @@ static u8 rtw8922a_chlk_reload_sel_tbl(struct rtw89_dev *rtwdev,
rfk_mcc->data[path].ch[tbl_sel] = chan->channel;
rfk_mcc->data[path].band[tbl_sel] = chan->band_type;
rfk_mcc->data[path].bw[tbl_sel] = chan->band_width;
+ rfk_mcc->data[path].rf18[tbl_sel] = rtw89_chip_chan_to_rf18_val(rtwdev, chan);
rfk_mcc->data[path].table_idx = tbl_sel;
return tbl_sel;
}
+static u8 rtw8922a_chlk_reload_sel_tbl_v1(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan, u8 path)
+{
+ struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data;
+ struct rtw89_rfk_chan_desc desc[__RTW89_RFK_CHS_NR_V1] = {};
+ u8 tbl_sel;
+
+ for (tbl_sel = 0; tbl_sel < ARRAY_SIZE(desc); tbl_sel++) {
+ struct rtw89_rfk_chan_desc *p = &desc[tbl_sel];
+
+ p->ch = rfk_mcc->ch[tbl_sel];
+
+ p->has_band = true;
+ p->band = rfk_mcc->band[tbl_sel];
+
+ p->has_bw = true;
+ p->bw = rfk_mcc->bw[tbl_sel];
+ }
+
+ tbl_sel = rtw89_rfk_chan_lookup(rtwdev, desc, ARRAY_SIZE(desc), chan);
+
+ rfk_mcc->ch[tbl_sel] = chan->channel;
+ rfk_mcc->band[tbl_sel] = chan->band_type;
+ rfk_mcc->bw[tbl_sel] = chan->band_width;
+ rfk_mcc->rf18[tbl_sel] = rtw89_chip_chan_to_rf18_val(rtwdev, chan);
+
+ /* shared table array, but tbl_sel can be independent by path */
+ rfk_mcc[path].table_idx = tbl_sel;
+
+ return tbl_sel;
+}
+
+static u8 rtw8922a_chlk_reload_sel_tbl(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan, u8 path)
+{
+ if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_MCC_V1, &rtwdev->fw))
+ return rtw8922a_chlk_reload_sel_tbl_v1(rtwdev, chan, path);
+ else
+ return rtw8922a_chlk_reload_sel_tbl_v0(rtwdev, chan, path);
+}
+
static void rtw8922a_chlk_reload(struct rtw89_dev *rtwdev)
{
const struct rtw89_chan *chan0, *chan1;
diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c
index f99e179f7ff9..7fdc69578da3 100644
--- a/drivers/net/wireless/realtek/rtw89/ser.c
+++ b/drivers/net/wireless/realtek/rtw89/ser.c
@@ -431,6 +431,14 @@ static void hal_send_m4_event(struct rtw89_ser *ser)
rtw89_mac_set_err_status(rtwdev, MAC_AX_ERR_L1_RCVY_EN);
}
+static void hal_enable_err_imr(struct rtw89_ser *ser)
+{
+ struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+
+ mac->err_imr_ctrl(rtwdev, true);
+}
+
/* state handler */
static void ser_idle_st_hdl(struct rtw89_ser *ser, u8 evt)
{
@@ -552,6 +560,8 @@ static void ser_do_hci_st_hdl(struct rtw89_ser *ser, u8 evt)
break;
case SER_EV_MAC_RESET_DONE:
+ hal_enable_err_imr(ser);
+
ser_state_goto(ser, SER_IDLE_ST);
break;
diff --git a/drivers/net/wireless/realtek/rtw89/txrx.h b/drivers/net/wireless/realtek/rtw89/txrx.h
index fa324b4a1dde..b69a2529aefc 100644
--- a/drivers/net/wireless/realtek/rtw89/txrx.h
+++ b/drivers/net/wireless/realtek/rtw89/txrx.h
@@ -188,12 +188,16 @@ static inline u8 rtw89_get_data_nss(struct rtw89_dev *rtwdev, u16 hw_rate)
#define BE_TXD_BODY2_QSEL GENMASK(22, 17)
#define BE_TXD_BODY2_TID_IND BIT(23)
#define BE_TXD_BODY2_MACID GENMASK(31, 24)
+#define BE_TXD_BODY2_QSEL_V1 GENMASK(20, 15)
+#define BE_TXD_BODY2_TID_IND_V1 BIT(21)
+#define BE_TXD_BODY2_MACID_V1 GENMASK(31, 22)
/* TX WD BODY DWORD 3 */
#define BE_TXD_BODY3_WIFI_SEQ GENMASK(11, 0)
#define BE_TXD_BODY3_MLO_FLAG BIT(12)
#define BE_TXD_BODY3_IS_MLD_SW_EN BIT(13)
#define BE_TXD_BODY3_TRY_RATE BIT(14)
+#define BE_TXD_BODY3_BK_V1 BIT(14)
#define BE_TXD_BODY3_RELINK_FLAG_V1 BIT(15)
#define BE_TXD_BODY3_BAND0_SU_TC_V1 GENMASK(21, 16)
#define BE_TXD_BODY3_TOTAL_TC GENMASK(27, 22)
@@ -201,6 +205,7 @@ static inline u8 rtw89_get_data_nss(struct rtw89_dev *rtwdev, u16 hw_rate)
#define BE_TXD_BODY3_MU_PRI_RTY BIT(29)
#define BE_TXD_BODY3_MU_2ND_RTY BIT(30)
#define BE_TXD_BODY3_BAND1_SU_RTY_V1 BIT(31)
+#define BE_TXD_BODY3_DRIVER_QUEUE_TIME GENMASK(31, 16)
/* TX WD BODY DWORD 4 */
#define BE_TXD_BODY4_TXDESC_CHECKSUM GENMASK(15, 0)
@@ -224,6 +229,10 @@ static inline u8 rtw89_get_data_nss(struct rtw89_dev *rtwdev, u16 hw_rate)
#define BE_TXD_BODY6_EOSP_BIT BIT(15)
#define BE_TXD_BODY6_S_IDX GENMASK(23, 16)
#define BE_TXD_BODY6_RU_POS GENMASK(31, 24)
+#define BE_TXD_BODY6_MU_TC_V1 GENMASK(3, 0)
+#define BE_TXD_BODY6_RU_TC_V1 GENMASK(8, 5)
+#define BE_TXD_BODY6_RELINK_EN BIT(9)
+#define BE_TXD_BODY6_RELINK_LAST BIT(10)
/* TX WD BODY DWORD 7 */
#define BE_TXD_BODY7_RTS_TC GENMASK(5, 0)
@@ -262,6 +271,8 @@ static inline u8 rtw89_get_data_nss(struct rtw89_dev *rtwdev, u16 hw_rate)
/* TX WD INFO DWORD 2 */
#define BE_TXD_INFO2_SEC_CAM_IDX GENMASK(7, 0)
#define BE_TXD_INFO2_FORCE_KEY_EN BIT(8)
+#define BE_TXD_INFO2_SEC_CAM_IDX_V1 GENMASK(9, 0)
+#define BE_TXD_INFO2_FORCE_KEY_EN_V1 BIT(10)
#define BE_TXD_INFO2_LIFETIME_SEL GENMASK(15, 13)
#define BE_TXD_INFO2_FORCE_TXOP BIT(17)
#define BE_TXD_INFO2_AMPDU_DENSITY GENMASK(20, 18)
@@ -277,6 +288,7 @@ static inline u8 rtw89_get_data_nss(struct rtw89_dev *rtwdev, u16 hw_rate)
#define BE_TXD_INFO3_RTT_EN BIT(9)
#define BE_TXD_INFO3_HT_DATA_SND_V1 BIT(10)
#define BE_TXD_INFO3_BT_NULL BIT(11)
+#define BE_TXD_INFO3_DISABLE_TXBF BIT(11)
#define BE_TXD_INFO3_TRI_FRAME BIT(12)
#define BE_TXD_INFO3_NULL_0 BIT(13)
#define BE_TXD_INFO3_NULL_1 BIT(14)
@@ -292,6 +304,8 @@ static inline u8 rtw89_get_data_nss(struct rtw89_dev *rtwdev, u16 hw_rate)
#define BE_TXD_INFO4_PUNC_MODE GENMASK(17, 16)
#define BE_TXD_INFO4_SW_TX_OK_0 BIT(18)
#define BE_TXD_INFO4_SW_TX_OK_1 BIT(19)
+#define BE_TXD_INFO4_SW_EHT_NLTF_SWITCH BIT(20)
+#define BE_TXD_INFO4_SW_EHT_NLTF GENMASK(22, 21)
#define BE_TXD_INFO4_SW_TX_PWR_DBM GENMASK(26, 23)
#define BE_TXD_INFO4_RTS_EN BIT(27)
#define BE_TXD_INFO4_CTS2SELF BIT(28)
@@ -308,6 +322,7 @@ static inline u8 rtw89_get_data_nss(struct rtw89_dev *rtwdev, u16 hw_rate)
#define BE_TXD_INFO6_UL_GI_LTF GENMASK(14, 12)
#define BE_TXD_INFO6_UL_DOPPLER BIT(15)
#define BE_TXD_INFO6_UL_STBC BIT(16)
+#define BE_TXD_INFO6_UL_MU_MIMO_EN BIT(17)
#define BE_TXD_INFO6_UL_LENGTH_REF GENMASK(21, 18)
#define BE_TXD_INFO6_UL_RF_GAIN_IDX GENMASK(31, 22)
@@ -322,6 +337,7 @@ static inline u8 rtw89_get_data_nss(struct rtw89_dev *rtwdev, u16 hw_rate)
#define BE_TXD_INFO7_UL_HELTF_SYMBOL_NUM GENMASK(19, 17)
#define BE_TXD_INFO7_ULBW GENMASK(21, 20)
#define BE_TXD_INFO7_ULBW_EXT GENMASK(23, 22)
+#define BE_TXD_INFO7_UL_TRI_PAD_TSF BIT(24)
#define BE_TXD_INFO7_USE_WD_UL GENMASK(25, 24)
#define BE_TXD_INFO7_EXTEND_MODE_SEL GENMASK(31, 28)
@@ -488,6 +504,7 @@ struct rtw89_phy_sts_iehdr {
/* BE RXD dword2 */
#define BE_RXD_MAC_ID_MASK GENMASK(7, 0)
+#define BE_RXD_MAC_ID_V1 GENMASK(9, 0)
#define BE_RXD_TYPE_MASK GENMASK(11, 10)
#define BE_RXD_LAST_MSDU BIT(12)
#define BE_RXD_AMSDU_CUT BIT(13)
@@ -519,6 +536,7 @@ struct rtw89_phy_sts_iehdr {
#define BE_RXD_QNULL BIT(22)
#define BE_RXD_A4_FRAME BIT(23)
#define BE_RXD_FRAG_MASK GENMASK(27, 24)
+#define BE_RXD_GET_CH_INFO_V2 GENMASK(31, 29)
#define BE_RXD_GET_CH_INFO_V1_MASK GENMASK(31, 30)
/* BE RXD dword4 */
@@ -534,10 +552,14 @@ struct rtw89_phy_sts_iehdr {
/* BE RXD dword6 */
#define BE_RXD_ADDR_CAM_MASK GENMASK(7, 0)
+#define BE_RXD_ADDR_CAM_V1 GENMASK(9, 0)
+#define BE_RXD_RX_STATISTICS_V1 BIT(11)
+#define BE_RXD_SMART_ANT_V1 BIT(12)
#define BE_RXD_SR_EN BIT(13)
#define BE_RXD_NON_SRG_PPDU BIT(14)
#define BE_RXD_INTER_PPDU BIT(15)
#define BE_RXD_USER_ID_MASK GENMASK(21, 16)
+#define BE_RXD_SEC_CAM_IDX_V1 GENMASK(31, 22)
#define BE_RXD_RX_STATISTICS BIT(22)
#define BE_RXD_SMART_ANT BIT(23)
#define BE_RXD_SEC_CAM_IDX_MASK GENMASK(31, 24)
diff --git a/drivers/net/wireless/realtek/rtw89/usb.c b/drivers/net/wireless/realtek/rtw89/usb.c
index d7d968207a39..e77561a4d971 100644
--- a/drivers/net/wireless/realtek/rtw89/usb.c
+++ b/drivers/net/wireless/realtek/rtw89/usb.c
@@ -620,7 +620,7 @@ static int rtw89_usb_init_rx(struct rtw89_dev *rtwdev)
struct sk_buff *rx_skb;
int i;
- rtwusb->rxwq = alloc_workqueue("rtw89_usb: rx wq", WQ_BH, 0);
+ rtwusb->rxwq = alloc_workqueue("rtw89_usb: rx wq", WQ_BH | WQ_PERCPU, 0);
if (!rtwusb->rxwq) {
rtw89_err(rtwdev, "failed to create RX work queue\n");
return -ENOMEM;
diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c
index 46aba4cb2ee9..b67ceda59e92 100644
--- a/drivers/net/wireless/realtek/rtw89/wow.c
+++ b/drivers/net/wireless/realtek/rtw89/wow.c
@@ -809,6 +809,10 @@ static void rtw89_wow_show_wakeup_reason(struct rtw89_dev *rtwdev)
reason = rtw89_read8(rtwdev, wow_reason_reg);
switch (reason) {
+ case RTW89_WOW_RSN_RX_DISASSOC:
+ wakeup.disconnect = true;
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "WOW: Rx disassoc\n");
+ break;
case RTW89_WOW_RSN_RX_DEAUTH:
wakeup.disconnect = true;
rtw89_debug(rtwdev, RTW89_DBG_WOW, "WOW: Rx deauth\n");
@@ -1070,7 +1074,7 @@ static void rtw89_wow_pattern_clear_cam(struct rtw89_dev *rtwdev)
for (i = 0; i < rtw_wow->pattern_cnt; i++) {
rtw_pattern = &rtw_wow->patterns[i];
rtw_pattern->valid = false;
- rtw89_fw_wow_cam_update(rtwdev, rtw_pattern);
+ rtw89_chip_h2c_wow_cam_update(rtwdev, rtw_pattern);
}
}
@@ -1081,7 +1085,7 @@ static void rtw89_wow_pattern_write(struct rtw89_dev *rtwdev)
int i;
for (i = 0; i < rtw_wow->pattern_cnt; i++)
- rtw89_fw_wow_cam_update(rtwdev, rtw_pattern + i);
+ rtw89_chip_h2c_wow_cam_update(rtwdev, rtw_pattern + i);
}
static void rtw89_wow_pattern_clear(struct rtw89_dev *rtwdev)
@@ -1263,15 +1267,15 @@ static int rtw89_wow_swap_fw(struct rtw89_dev *rtwdev, bool wow)
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
const struct rtw89_chip_info *chip = rtwdev->chip;
bool include_bb = !!chip->bbmcu_nr;
- bool disable_intr_for_dlfw = false;
+ bool disable_intr_for_dlfw = true;
struct ieee80211_sta *wow_sta;
struct rtw89_sta_link *rtwsta_link = NULL;
struct rtw89_sta *rtwsta;
bool is_conn = true;
int ret;
- if (chip_id == RTL8852C || chip_id == RTL8922A)
- disable_intr_for_dlfw = true;
+ if (chip->chip_gen == RTW89_CHIP_AX && chip_id != RTL8852C)
+ disable_intr_for_dlfw = false;
wow_sta = ieee80211_find_sta(wow_vif, wow_vif->cfg.ap_addr);
if (wow_sta) {
diff --git a/drivers/net/wireless/realtek/rtw89/wow.h b/drivers/net/wireless/realtek/rtw89/wow.h
index d2ba6cebc2a6..71e07f482174 100644
--- a/drivers/net/wireless/realtek/rtw89/wow.h
+++ b/drivers/net/wireless/realtek/rtw89/wow.h
@@ -33,6 +33,7 @@
enum rtw89_wake_reason {
RTW89_WOW_RSN_RX_PTK_REKEY = 0x1,
RTW89_WOW_RSN_RX_GTK_REKEY = 0x2,
+ RTW89_WOW_RSN_RX_DISASSOC = 0x4,
RTW89_WOW_RSN_RX_DEAUTH = 0x8,
RTW89_WOW_RSN_DISCONNECT = 0x10,
RTW89_WOW_RSN_RX_MAGIC_PKT = 0x21,
diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c
index 2112d8d277a9..a9bb37d5d581 100644
--- a/drivers/net/wireless/rsi/rsi_91x_main.c
+++ b/drivers/net/wireless/rsi/rsi_91x_main.c
@@ -425,35 +425,6 @@ void rsi_91x_deinit(struct rsi_hw *adapter)
}
EXPORT_SYMBOL_GPL(rsi_91x_deinit);
-/**
- * rsi_91x_hal_module_init() - This function is invoked when the module is
- * loaded into the kernel.
- * It registers the client driver.
- * @void: Void.
- *
- * Return: 0 on success, -1 on failure.
- */
-static int rsi_91x_hal_module_init(void)
-{
- rsi_dbg(INIT_ZONE, "%s: Module init called\n", __func__);
- return 0;
-}
-
-/**
- * rsi_91x_hal_module_exit() - This function is called at the time of
- * removing/unloading the module.
- * It unregisters the client driver.
- * @void: Void.
- *
- * Return: None.
- */
-static void rsi_91x_hal_module_exit(void)
-{
- rsi_dbg(INIT_ZONE, "%s: Module exit called\n", __func__);
-}
-
-module_init(rsi_91x_hal_module_init);
-module_exit(rsi_91x_hal_module_exit);
MODULE_AUTHOR("Redpine Signals Inc");
MODULE_DESCRIPTION("Station driver for RSI 91x devices");
MODULE_VERSION("0.1");
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index 1e578533e473..ee7ad81c858d 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -1443,9 +1443,8 @@ static int rsi_thaw(struct device *dev)
return 0;
}
-static void rsi_shutdown(struct device *dev)
+static void rsi_shutdown(struct sdio_func *pfunction)
{
- struct sdio_func *pfunction = dev_to_sdio_func(dev);
struct rsi_hw *adapter = sdio_get_drvdata(pfunction);
struct rsi_91x_sdiodev *sdev = adapter->rsi_dev;
struct ieee80211_hw *hw = adapter->hw;
@@ -1513,9 +1512,9 @@ static struct sdio_driver rsi_driver = {
.remove = rsi_disconnect,
.id_table = rsi_dev_table,
#ifdef CONFIG_PM
+ .shutdown = rsi_shutdown,
.drv = {
.pm = &rsi_pm_ops,
- .shutdown = rsi_shutdown,
}
#endif
};
diff --git a/drivers/net/wireless/ti/wl1251/acx.c b/drivers/net/wireless/ti/wl1251/acx.c
index cb8b3102fa6c..166efac812fe 100644
--- a/drivers/net/wireless/ti/wl1251/acx.c
+++ b/drivers/net/wireless/ti/wl1251/acx.c
@@ -3,6 +3,7 @@
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include "wl1251.h"
#include "reg.h"
@@ -149,15 +150,7 @@ int wl1251_acx_fw_version(struct wl1251 *wl, char *buf, size_t len)
goto out;
}
- /* be careful with the buffer sizes */
- strncpy(buf, rev->fw_version, min(len, sizeof(rev->fw_version)));
-
- /*
- * if the firmware version string is exactly
- * sizeof(rev->fw_version) long or fw_len is less than
- * sizeof(rev->fw_version) it won't be null terminated
- */
- buf[min(len, sizeof(rev->fw_version)) - 1] = '\0';
+ strscpy(buf, rev->fw_version, len);
out:
kfree(rev);
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index d16afb35f9ee..f8160f372bc7 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -455,6 +455,7 @@ enum wl1271_cmd_key_type {
KEY_TKIP = 2,
KEY_AES = 3,
KEY_GEM = 4,
+ KEY_IGTK = 5,
};
struct wl1271_cmd_set_keys {
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 12f0167d7380..dce79bce2e3f 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -2419,6 +2419,11 @@ power_off:
strscpy(wiphy->fw_version, wl->chip.fw_ver_str,
sizeof(wiphy->fw_version));
+ /* WLAN_CIPHER_SUITE_AES_CMAC must be last in cipher_suites;
+ support only with firmware 8.9.1 and newer */
+ if (wl->chip.fw_ver[FW_VER_MAJOR] < 1)
+ wl->hw->wiphy->n_cipher_suites--;
+
/*
* Now we know if 11a is supported (info from the NVS), so disable
* 11a channels if not supported
@@ -3585,6 +3590,9 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
case WL1271_CIPHER_SUITE_GEM:
key_type = KEY_GEM;
break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ key_type = KEY_IGTK;
+ break;
default:
wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
@@ -6196,6 +6204,7 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
WLAN_CIPHER_SUITE_TKIP,
WLAN_CIPHER_SUITE_CCMP,
WL1271_CIPHER_SUITE_GEM,
+ WLAN_CIPHER_SUITE_AES_CMAC,
};
/* The tx descriptor buffer */
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index a73207bbe5d7..421d688ae58b 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -370,7 +370,6 @@ static void wl1271_remove(struct sdio_func *func)
platform_device_unregister(glue->core);
}
-#ifdef CONFIG_PM
static int wl1271_suspend(struct device *dev)
{
/* Tell MMC/SDIO core it's OK to power down the card
@@ -422,18 +421,15 @@ static const struct dev_pm_ops wl1271_sdio_pm_ops = {
.suspend = wl1271_suspend,
.resume = wl1271_resume,
};
-#endif
static struct sdio_driver wl1271_sdio_driver = {
.name = "wl1271_sdio",
.id_table = wl1271_devices,
.probe = wl1271_probe,
.remove = wl1271_remove,
-#ifdef CONFIG_PM
.drv = {
- .pm = &wl1271_sdio_pm_ops,
+ .pm = pm_ptr(&wl1271_sdio_pm_ops),
},
-#endif
};
module_sdio_driver(wl1271_sdio_driver);
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
index 79cc63272134..4d9f5f87e814 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
@@ -7163,14 +7163,12 @@ static int __init init_mac80211_hwsim(void)
}
param.p2p_device = support_p2p_device;
- param.nan_device = true;
param.mlo = mlo;
param.multi_radio = multi_radio;
param.use_chanctx = channels > 1 || mlo || multi_radio;
param.iftypes = HWSIM_IFTYPE_SUPPORT_MASK;
if (param.p2p_device)
param.iftypes |= BIT(NL80211_IFTYPE_P2P_DEVICE);
- param.iftypes |= BIT(NL80211_IFTYPE_NAN);
err = mac80211_hwsim_new_radio(NULL, &param);
if (err < 0)
diff --git a/drivers/net/wwan/Kconfig b/drivers/net/wwan/Kconfig
index 410b0245114e..88df55d78d90 100644
--- a/drivers/net/wwan/Kconfig
+++ b/drivers/net/wwan/Kconfig
@@ -7,6 +7,7 @@ menu "Wireless WAN"
config WWAN
tristate "WWAN Driver Core"
+ depends on GNSS || GNSS = n
help
Say Y here if you want to use the WWAN driver core. This driver
provides a common framework for WWAN drivers.
diff --git a/drivers/net/wwan/mhi_wwan_ctrl.c b/drivers/net/wwan/mhi_wwan_ctrl.c
index e9f979d2d851..e13c0b078175 100644
--- a/drivers/net/wwan/mhi_wwan_ctrl.c
+++ b/drivers/net/wwan/mhi_wwan_ctrl.c
@@ -263,6 +263,7 @@ static const struct mhi_device_id mhi_wwan_ctrl_match_table[] = {
{ .chan = "QMI", .driver_data = WWAN_PORT_QMI },
{ .chan = "DIAG", .driver_data = WWAN_PORT_QCDM },
{ .chan = "FIREHOSE", .driver_data = WWAN_PORT_FIREHOSE },
+ { .chan = "NMEA", .driver_data = WWAN_PORT_NMEA },
{},
};
MODULE_DEVICE_TABLE(mhi, mhi_wwan_ctrl_match_table);
diff --git a/drivers/net/wwan/mhi_wwan_mbim.c b/drivers/net/wwan/mhi_wwan_mbim.c
index f8bc9a39bfa3..1d7e3ad900c1 100644
--- a/drivers/net/wwan/mhi_wwan_mbim.c
+++ b/drivers/net/wwan/mhi_wwan_mbim.c
@@ -98,7 +98,8 @@ static struct mhi_mbim_link *mhi_mbim_get_link_rcu(struct mhi_mbim_context *mbim
static int mhi_mbim_get_link_mux_id(struct mhi_controller *cntrl)
{
if (strcmp(cntrl->name, "foxconn-dw5934e") == 0 ||
- strcmp(cntrl->name, "foxconn-t99w640") == 0)
+ strcmp(cntrl->name, "foxconn-t99w640") == 0 ||
+ strcmp(cntrl->name, "foxconn-t99w760") == 0)
return WDS_BIND_MUX_DATA_PORT_MUX_ID;
return 0;
diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c
index 63a47d420bc5..015213b3d687 100644
--- a/drivers/net/wwan/wwan_core.c
+++ b/drivers/net/wwan/wwan_core.c
@@ -1,5 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2021, Linaro Ltd <loic.poulain@linaro.org> */
+/* WWAN Driver Core
+ *
+ * Copyright (c) 2021, Linaro Ltd <loic.poulain@linaro.org>
+ * Copyright (c) 2025, Sergey Ryazanov <ryazanov.s.a@gmail.com>
+ */
#include <linux/bitmap.h>
#include <linux/err.h>
@@ -16,6 +20,7 @@
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/termios.h>
+#include <linux/gnss.h>
#include <linux/wwan.h>
#include <net/rtnetlink.h>
#include <uapi/linux/wwan.h>
@@ -42,16 +47,18 @@ static struct dentry *wwan_debugfs_dir;
* struct wwan_device - The structure that defines a WWAN device
*
* @id: WWAN device unique ID.
+ * @refcount: Reference count of this WWAN device. When this refcount reaches
+ * zero, the device is deleted. NB: access is protected by global
+ * wwan_register_lock mutex.
* @dev: Underlying device.
- * @port_id: Current available port ID to pick.
* @ops: wwan device ops
* @ops_ctxt: context to pass to ops
* @debugfs_dir: WWAN device debugfs dir
*/
struct wwan_device {
unsigned int id;
+ int refcount;
struct device dev;
- atomic_t port_id;
const struct wwan_ops *ops;
void *ops_ctxt;
#ifdef CONFIG_WWAN_DEBUGFS
@@ -73,6 +80,7 @@ struct wwan_device {
* @headroom_len: SKB reserved headroom size
* @frag_len: Length to fragment packet
* @at_data: AT port specific data
+ * @gnss: Pointer to GNSS device associated with this port
*/
struct wwan_port {
enum wwan_port_type type;
@@ -91,9 +99,16 @@ struct wwan_port {
struct ktermios termios;
int mdmbits;
} at_data;
+ struct gnss_device *gnss;
};
};
+static int wwan_port_op_start(struct wwan_port *port);
+static void wwan_port_op_stop(struct wwan_port *port);
+static int wwan_port_op_tx(struct wwan_port *port, struct sk_buff *skb,
+ bool nonblock);
+static int wwan_wait_tx(struct wwan_port *port, bool nonblock);
+
static ssize_t index_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct wwan_device *wwan = to_wwan_dev(dev);
@@ -224,8 +239,10 @@ static struct wwan_device *wwan_create_dev(struct device *parent)
/* If wwandev already exists, return it */
wwandev = wwan_dev_get_by_parent(parent);
- if (!IS_ERR(wwandev))
+ if (!IS_ERR(wwandev)) {
+ wwandev->refcount++;
goto done_unlock;
+ }
id = ida_alloc(&wwan_dev_ids, GFP_KERNEL);
if (id < 0) {
@@ -244,6 +261,7 @@ static struct wwan_device *wwan_create_dev(struct device *parent)
wwandev->dev.class = &wwan_class;
wwandev->dev.type = &wwan_dev_type;
wwandev->id = id;
+ wwandev->refcount = 1;
dev_set_name(&wwandev->dev, "wwan%d", wwandev->id);
err = device_register(&wwandev->dev);
@@ -265,30 +283,18 @@ done_unlock:
return wwandev;
}
-static int is_wwan_child(struct device *dev, void *data)
-{
- return dev->class == &wwan_class;
-}
-
static void wwan_remove_dev(struct wwan_device *wwandev)
{
- int ret;
-
/* Prevent concurrent picking from wwan_create_dev */
mutex_lock(&wwan_register_lock);
- /* WWAN device is created and registered (get+add) along with its first
- * child port, and subsequent port registrations only grab a reference
- * (get). The WWAN device must then be unregistered (del+put) along with
- * its last port, and reference simply dropped (put) otherwise. In the
- * same fashion, we must not unregister it when the ops are still there.
- */
- if (wwandev->ops)
- ret = 1;
- else
- ret = device_for_each_child(&wwandev->dev, NULL, is_wwan_child);
+ if (--wwandev->refcount <= 0) {
+ struct device *child = device_find_any_child(&wwandev->dev);
+
+ put_device(child);
+ if (WARN_ON(wwandev->ops || child)) /* Paranoid */
+ goto out_unlock;
- if (!ret) {
#ifdef CONFIG_WWAN_DEBUGFS
debugfs_remove_recursive(wwandev->debugfs_dir);
#endif
@@ -297,6 +303,7 @@ static void wwan_remove_dev(struct wwan_device *wwandev)
put_device(&wwandev->dev);
}
+out_unlock:
mutex_unlock(&wwan_register_lock);
}
@@ -342,6 +349,7 @@ static const struct {
.name = "MIPC",
.devsuf = "mipc",
},
+ /* WWAN_PORT_NMEA is exported via the GNSS subsystem */
};
static ssize_t type_show(struct device *dev, struct device_attribute *attr,
@@ -363,7 +371,8 @@ static void wwan_port_destroy(struct device *dev)
{
struct wwan_port *port = to_wwan_port(dev);
- ida_free(&minors, MINOR(port->dev.devt));
+ if (dev->class == &wwan_class)
+ ida_free(&minors, MINOR(dev->devt));
mutex_destroy(&port->data_lock);
mutex_destroy(&port->ops_lock);
kfree(port);
@@ -442,6 +451,174 @@ static int __wwan_port_dev_assign_name(struct wwan_port *port, const char *fmt)
return dev_set_name(&port->dev, "%s", buf);
}
+/* Register a regular WWAN port device (e.g. AT, MBIM, etc.) */
+static int wwan_port_register_wwan(struct wwan_port *port)
+{
+ struct wwan_device *wwandev = to_wwan_dev(port->dev.parent);
+ char namefmt[0x20];
+ int minor, err;
+
+ /* A port is exposed as character device, get a minor */
+ minor = ida_alloc_range(&minors, 0, WWAN_MAX_MINORS - 1, GFP_KERNEL);
+ if (minor < 0)
+ return minor;
+
+ port->dev.class = &wwan_class;
+ port->dev.devt = MKDEV(wwan_major, minor);
+
+ /* allocate unique name based on wwan device id, port type and number */
+ snprintf(namefmt, sizeof(namefmt), "wwan%u%s%%d", wwandev->id,
+ wwan_port_types[port->type].devsuf);
+
+ /* Serialize ports registration */
+ mutex_lock(&wwan_register_lock);
+
+ __wwan_port_dev_assign_name(port, namefmt);
+ err = device_add(&port->dev);
+
+ mutex_unlock(&wwan_register_lock);
+
+ if (err) {
+ ida_free(&minors, minor);
+ port->dev.class = NULL;
+ return err;
+ }
+
+ dev_info(&wwandev->dev, "port %s attached\n", dev_name(&port->dev));
+
+ return 0;
+}
+
+/* Unregister a regular WWAN port (e.g. AT, MBIM, etc) */
+static void wwan_port_unregister_wwan(struct wwan_port *port)
+{
+ struct wwan_device *wwandev = to_wwan_dev(port->dev.parent);
+
+ dev_set_drvdata(&port->dev, NULL);
+
+ dev_info(&wwandev->dev, "port %s disconnected\n", dev_name(&port->dev));
+
+ device_del(&port->dev);
+}
+
+#if IS_ENABLED(CONFIG_GNSS)
+static int wwan_gnss_open(struct gnss_device *gdev)
+{
+ return wwan_port_op_start(gnss_get_drvdata(gdev));
+}
+
+static void wwan_gnss_close(struct gnss_device *gdev)
+{
+ wwan_port_op_stop(gnss_get_drvdata(gdev));
+}
+
+static int wwan_gnss_write(struct gnss_device *gdev, const unsigned char *buf,
+ size_t count)
+{
+ struct wwan_port *port = gnss_get_drvdata(gdev);
+ struct sk_buff *skb, *head = NULL, *tail = NULL;
+ size_t frag_len, remain = count;
+ int ret;
+
+ ret = wwan_wait_tx(port, false);
+ if (ret)
+ return ret;
+
+ do {
+ frag_len = min(remain, port->frag_len);
+ skb = alloc_skb(frag_len + port->headroom_len, GFP_KERNEL);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto freeskb;
+ }
+ skb_reserve(skb, port->headroom_len);
+ memcpy(skb_put(skb, frag_len), buf + count - remain, frag_len);
+
+ if (!head) {
+ head = skb;
+ } else {
+ if (!tail)
+ skb_shinfo(head)->frag_list = skb;
+ else
+ tail->next = skb;
+
+ tail = skb;
+ head->data_len += skb->len;
+ head->len += skb->len;
+ head->truesize += skb->truesize;
+ }
+ } while (remain -= frag_len);
+
+ ret = wwan_port_op_tx(port, head, false);
+ if (!ret)
+ return count;
+
+freeskb:
+ kfree_skb(head);
+ return ret;
+}
+
+static struct gnss_operations wwan_gnss_ops = {
+ .open = wwan_gnss_open,
+ .close = wwan_gnss_close,
+ .write_raw = wwan_gnss_write,
+};
+
+/* GNSS port specific device registration */
+static int wwan_port_register_gnss(struct wwan_port *port)
+{
+ struct wwan_device *wwandev = to_wwan_dev(port->dev.parent);
+ struct gnss_device *gdev;
+ int err;
+
+ gdev = gnss_allocate_device(&wwandev->dev);
+ if (!gdev)
+ return -ENOMEM;
+
+ /* NB: for now we support only NMEA WWAN port type, so hardcode
+ * the GNSS port type. If more GNSS WWAN port types will be added,
+ * then we should dynamically map WWAN port type to GNSS type.
+ */
+ gdev->type = GNSS_TYPE_NMEA;
+ gdev->ops = &wwan_gnss_ops;
+ gnss_set_drvdata(gdev, port);
+
+ port->gnss = gdev;
+
+ err = gnss_register_device(gdev);
+ if (err) {
+ gnss_put_device(gdev);
+ return err;
+ }
+
+ dev_info(&wwandev->dev, "port %s attached\n", dev_name(&gdev->dev));
+
+ return 0;
+}
+
+/* GNSS port specific device unregistration */
+static void wwan_port_unregister_gnss(struct wwan_port *port)
+{
+ struct wwan_device *wwandev = to_wwan_dev(port->dev.parent);
+ struct gnss_device *gdev = port->gnss;
+
+ dev_info(&wwandev->dev, "port %s disconnected\n", dev_name(&gdev->dev));
+
+ gnss_deregister_device(gdev);
+ gnss_put_device(gdev);
+}
+#else
+static int wwan_port_register_gnss(struct wwan_port *port)
+{
+ return -EOPNOTSUPP;
+}
+
+static void wwan_port_unregister_gnss(struct wwan_port *port)
+{
+ WARN_ON(1); /* This handler cannot be called */
+}
+#endif
+
struct wwan_port *wwan_create_port(struct device *parent,
enum wwan_port_type type,
const struct wwan_port_ops *ops,
@@ -450,8 +627,7 @@ struct wwan_port *wwan_create_port(struct device *parent,
{
struct wwan_device *wwandev;
struct wwan_port *port;
- char namefmt[0x20];
- int minor, err;
+ int err;
if (type > WWAN_PORT_MAX || !ops)
return ERR_PTR(-EINVAL);
@@ -463,17 +639,9 @@ struct wwan_port *wwan_create_port(struct device *parent,
if (IS_ERR(wwandev))
return ERR_CAST(wwandev);
- /* A port is exposed as character device, get a minor */
- minor = ida_alloc_range(&minors, 0, WWAN_MAX_MINORS - 1, GFP_KERNEL);
- if (minor < 0) {
- err = minor;
- goto error_wwandev_remove;
- }
-
port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port) {
err = -ENOMEM;
- ida_free(&minors, minor);
goto error_wwandev_remove;
}
@@ -487,27 +655,18 @@ struct wwan_port *wwan_create_port(struct device *parent,
mutex_init(&port->data_lock);
port->dev.parent = &wwandev->dev;
- port->dev.class = &wwan_class;
port->dev.type = &wwan_port_dev_type;
- port->dev.devt = MKDEV(wwan_major, minor);
dev_set_drvdata(&port->dev, drvdata);
+ device_initialize(&port->dev);
- /* allocate unique name based on wwan device id, port type and number */
- snprintf(namefmt, sizeof(namefmt), "wwan%u%s%%d", wwandev->id,
- wwan_port_types[port->type].devsuf);
-
- /* Serialize ports registration */
- mutex_lock(&wwan_register_lock);
-
- __wwan_port_dev_assign_name(port, namefmt);
- err = device_register(&port->dev);
-
- mutex_unlock(&wwan_register_lock);
+ if (port->type == WWAN_PORT_NMEA)
+ err = wwan_port_register_gnss(port);
+ else
+ err = wwan_port_register_wwan(port);
if (err)
goto error_put_device;
- dev_info(&wwandev->dev, "port %s attached\n", dev_name(&port->dev));
return port;
error_put_device:
@@ -524,18 +683,22 @@ void wwan_remove_port(struct wwan_port *port)
struct wwan_device *wwandev = to_wwan_dev(port->dev.parent);
mutex_lock(&port->ops_lock);
- if (port->start_count)
+ if (port->start_count) {
port->ops->stop(port);
+ port->start_count = 0;
+ }
port->ops = NULL; /* Prevent any new port operations (e.g. from fops) */
mutex_unlock(&port->ops_lock);
wake_up_interruptible(&port->waitqueue);
-
skb_queue_purge(&port->rxq);
- dev_set_drvdata(&port->dev, NULL);
- dev_info(&wwandev->dev, "port %s disconnected\n", dev_name(&port->dev));
- device_unregister(&port->dev);
+ if (port->type == WWAN_PORT_NMEA)
+ wwan_port_unregister_gnss(port);
+ else
+ wwan_port_unregister_wwan(port);
+
+ put_device(&port->dev);
/* Release related wwan device */
wwan_remove_dev(wwandev);
@@ -544,8 +707,15 @@ EXPORT_SYMBOL_GPL(wwan_remove_port);
void wwan_port_rx(struct wwan_port *port, struct sk_buff *skb)
{
- skb_queue_tail(&port->rxq, skb);
- wake_up_interruptible(&port->waitqueue);
+ if (port->type == WWAN_PORT_NMEA) {
+#if IS_ENABLED(CONFIG_GNSS)
+ gnss_insert_raw(port->gnss, skb->data, skb->len);
+#endif
+ consume_skb(skb);
+ } else {
+ skb_queue_tail(&port->rxq, skb);
+ wake_up_interruptible(&port->waitqueue);
+ }
}
EXPORT_SYMBOL_GPL(wwan_port_rx);
diff --git a/drivers/net/wwan/wwan_hwsim.c b/drivers/net/wwan/wwan_hwsim.c
index 733688cd4607..8541bd58e831 100644
--- a/drivers/net/wwan/wwan_hwsim.c
+++ b/drivers/net/wwan/wwan_hwsim.c
@@ -2,7 +2,7 @@
/*
* WWAN device simulator for WWAN framework testing.
*
- * Copyright (c) 2021, Sergey Ryazanov <ryazanov.s.a@gmail.com>
+ * Copyright (c) 2021, 2025, Sergey Ryazanov <ryazanov.s.a@gmail.com>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -12,8 +12,10 @@
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/spinlock.h>
+#include <linux/time.h>
#include <linux/list.h>
#include <linux/skbuff.h>
+#include <linux/timer.h>
#include <linux/netdevice.h>
#include <linux/wwan.h>
#include <linux/debugfs.h>
@@ -56,12 +58,19 @@ struct wwan_hwsim_port {
struct wwan_port *wwan;
struct work_struct del_work;
struct dentry *debugfs_topdir;
- enum { /* AT command parser state */
- AT_PARSER_WAIT_A,
- AT_PARSER_WAIT_T,
- AT_PARSER_WAIT_TERM,
- AT_PARSER_SKIP_LINE,
- } pstate;
+ union {
+ struct {
+ enum { /* AT command parser state */
+ AT_PARSER_WAIT_A,
+ AT_PARSER_WAIT_T,
+ AT_PARSER_WAIT_TERM,
+ AT_PARSER_SKIP_LINE,
+ } pstate;
+ } at_emul;
+ struct {
+ struct timer_list timer;
+ } nmea_emul;
+ };
};
static const struct file_operations wwan_hwsim_debugfs_portdestroy_fops;
@@ -101,16 +110,16 @@ static const struct wwan_ops wwan_hwsim_wwan_rtnl_ops = {
.setup = wwan_hwsim_netdev_setup,
};
-static int wwan_hwsim_port_start(struct wwan_port *wport)
+static int wwan_hwsim_at_emul_start(struct wwan_port *wport)
{
struct wwan_hwsim_port *port = wwan_port_get_drvdata(wport);
- port->pstate = AT_PARSER_WAIT_A;
+ port->at_emul.pstate = AT_PARSER_WAIT_A;
return 0;
}
-static void wwan_hwsim_port_stop(struct wwan_port *wport)
+static void wwan_hwsim_at_emul_stop(struct wwan_port *wport)
{
}
@@ -120,7 +129,7 @@ static void wwan_hwsim_port_stop(struct wwan_port *wport)
*
* Be aware that this processor is not fully V.250 compliant.
*/
-static int wwan_hwsim_port_tx(struct wwan_port *wport, struct sk_buff *in)
+static int wwan_hwsim_at_emul_tx(struct wwan_port *wport, struct sk_buff *in)
{
struct wwan_hwsim_port *port = wwan_port_get_drvdata(wport);
struct sk_buff *out;
@@ -142,17 +151,17 @@ static int wwan_hwsim_port_tx(struct wwan_port *wport, struct sk_buff *in)
for (i = 0, s = 0; i < in->len; ++i) {
char c = in->data[i];
- if (port->pstate == AT_PARSER_WAIT_A) {
+ if (port->at_emul.pstate == AT_PARSER_WAIT_A) {
if (c == 'A' || c == 'a')
- port->pstate = AT_PARSER_WAIT_T;
+ port->at_emul.pstate = AT_PARSER_WAIT_T;
else if (c != '\n') /* Ignore formating char */
- port->pstate = AT_PARSER_SKIP_LINE;
- } else if (port->pstate == AT_PARSER_WAIT_T) {
+ port->at_emul.pstate = AT_PARSER_SKIP_LINE;
+ } else if (port->at_emul.pstate == AT_PARSER_WAIT_T) {
if (c == 'T' || c == 't')
- port->pstate = AT_PARSER_WAIT_TERM;
+ port->at_emul.pstate = AT_PARSER_WAIT_TERM;
else
- port->pstate = AT_PARSER_SKIP_LINE;
- } else if (port->pstate == AT_PARSER_WAIT_TERM) {
+ port->at_emul.pstate = AT_PARSER_SKIP_LINE;
+ } else if (port->at_emul.pstate == AT_PARSER_WAIT_TERM) {
if (c != '\r')
continue;
/* Consume the trailing formatting char as well */
@@ -162,11 +171,11 @@ static int wwan_hwsim_port_tx(struct wwan_port *wport, struct sk_buff *in)
skb_put_data(out, &in->data[s], n);/* Echo */
skb_put_data(out, "\r\nOK\r\n", 6);
s = i + 1;
- port->pstate = AT_PARSER_WAIT_A;
- } else if (port->pstate == AT_PARSER_SKIP_LINE) {
+ port->at_emul.pstate = AT_PARSER_WAIT_A;
+ } else if (port->at_emul.pstate == AT_PARSER_SKIP_LINE) {
if (c != '\r')
continue;
- port->pstate = AT_PARSER_WAIT_A;
+ port->at_emul.pstate = AT_PARSER_WAIT_A;
}
}
@@ -183,18 +192,131 @@ static int wwan_hwsim_port_tx(struct wwan_port *wport, struct sk_buff *in)
return 0;
}
-static const struct wwan_port_ops wwan_hwsim_port_ops = {
- .start = wwan_hwsim_port_start,
- .stop = wwan_hwsim_port_stop,
- .tx = wwan_hwsim_port_tx,
+static const struct wwan_port_ops wwan_hwsim_at_emul_port_ops = {
+ .start = wwan_hwsim_at_emul_start,
+ .stop = wwan_hwsim_at_emul_stop,
+ .tx = wwan_hwsim_at_emul_tx,
};
-static struct wwan_hwsim_port *wwan_hwsim_port_new(struct wwan_hwsim_dev *dev)
+#if IS_ENABLED(CONFIG_GNSS)
+#define NMEA_MAX_LEN 82 /* Max sentence length */
+#define NMEA_TRAIL_LEN 5 /* '*' + Checksum + <CR><LF> */
+#define NMEA_MAX_DATA_LEN (NMEA_MAX_LEN - NMEA_TRAIL_LEN)
+
+static __printf(2, 3)
+void wwan_hwsim_nmea_skb_push_sentence(struct sk_buff *skb,
+ const char *fmt, ...)
+{
+ unsigned char *s, *p;
+ va_list ap;
+ u8 cs = 0;
+ int len;
+
+ s = skb_put(skb, NMEA_MAX_LEN + 1); /* +'\0' */
+ if (!s)
+ return;
+
+ va_start(ap, fmt);
+ len = vsnprintf(s, NMEA_MAX_DATA_LEN + 1, fmt, ap);
+ va_end(ap);
+ if (WARN_ON_ONCE(len > NMEA_MAX_DATA_LEN))/* No space for trailer */
+ return;
+
+ for (p = s + 1; *p != '\0'; ++p)/* Skip leading '$' or '!' */
+ cs ^= *p;
+ p += snprintf(p, 5 + 1, "*%02X\r\n", cs);
+
+ len = (p - s) - (NMEA_MAX_LEN + 1); /* exp. vs real length diff */
+ skb->tail += len; /* Adjust tail to real length */
+ skb->len += len;
+}
+
+static void wwan_hwsim_nmea_emul_timer(struct timer_list *t)
+{
+ /* 43.74754722298909 N 11.25759835922875 E in DMM format */
+ static const unsigned int coord[4 * 2] = { 43, 44, 8528, 0,
+ 11, 15, 4559, 0 };
+ struct wwan_hwsim_port *port = timer_container_of(port, t, nmea_emul.timer);
+ struct sk_buff *skb;
+ struct tm tm;
+
+ time64_to_tm(ktime_get_real_seconds(), 0, &tm);
+
+ mod_timer(&port->nmea_emul.timer, jiffies + HZ); /* 1 second */
+
+ skb = alloc_skb(NMEA_MAX_LEN * 2 + 2, GFP_ATOMIC); /* GGA + RMC */
+ if (!skb)
+ return;
+
+ wwan_hwsim_nmea_skb_push_sentence(skb,
+ "$GPGGA,%02u%02u%02u.000,%02u%02u.%04u,%c,%03u%02u.%04u,%c,1,7,1.03,176.2,M,55.2,M,,",
+ tm.tm_hour, tm.tm_min, tm.tm_sec,
+ coord[0], coord[1], coord[2],
+ coord[3] ? 'S' : 'N',
+ coord[4], coord[5], coord[6],
+ coord[7] ? 'W' : 'E');
+
+ wwan_hwsim_nmea_skb_push_sentence(skb,
+ "$GPRMC,%02u%02u%02u.000,A,%02u%02u.%04u,%c,%03u%02u.%04u,%c,0.02,31.66,%02u%02u%02u,,,A",
+ tm.tm_hour, tm.tm_min, tm.tm_sec,
+ coord[0], coord[1], coord[2],
+ coord[3] ? 'S' : 'N',
+ coord[4], coord[5], coord[6],
+ coord[7] ? 'W' : 'E',
+ tm.tm_mday, tm.tm_mon + 1,
+ (unsigned int)tm.tm_year - 100);
+
+ wwan_port_rx(port->wwan, skb);
+}
+
+static int wwan_hwsim_nmea_emul_start(struct wwan_port *wport)
+{
+ struct wwan_hwsim_port *port = wwan_port_get_drvdata(wport);
+
+ timer_setup(&port->nmea_emul.timer, wwan_hwsim_nmea_emul_timer, 0);
+ wwan_hwsim_nmea_emul_timer(&port->nmea_emul.timer);
+
+ return 0;
+}
+
+static void wwan_hwsim_nmea_emul_stop(struct wwan_port *wport)
+{
+ struct wwan_hwsim_port *port = wwan_port_get_drvdata(wport);
+
+ timer_delete_sync(&port->nmea_emul.timer);
+}
+
+static int wwan_hwsim_nmea_emul_tx(struct wwan_port *wport, struct sk_buff *in)
+{
+ consume_skb(in);
+
+ return 0;
+}
+
+static const struct wwan_port_ops wwan_hwsim_nmea_emul_port_ops = {
+ .start = wwan_hwsim_nmea_emul_start,
+ .stop = wwan_hwsim_nmea_emul_stop,
+ .tx = wwan_hwsim_nmea_emul_tx,
+};
+#endif
+
+static struct wwan_hwsim_port *wwan_hwsim_port_new(struct wwan_hwsim_dev *dev,
+ enum wwan_port_type type)
{
+ const struct wwan_port_ops *ops;
struct wwan_hwsim_port *port;
char name[0x10];
int err;
+ if (type == WWAN_PORT_AT)
+ ops = &wwan_hwsim_at_emul_port_ops;
+#if IS_ENABLED(CONFIG_GNSS)
+ else if (type == WWAN_PORT_NMEA)
+ ops = &wwan_hwsim_nmea_emul_port_ops;
+#endif
+ else
+ return ERR_PTR(-EINVAL);
+
port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port)
return ERR_PTR(-ENOMEM);
@@ -205,9 +327,7 @@ static struct wwan_hwsim_port *wwan_hwsim_port_new(struct wwan_hwsim_dev *dev)
port->id = dev->port_idx++;
spin_unlock(&dev->ports_lock);
- port->wwan = wwan_create_port(&dev->dev, WWAN_PORT_AT,
- &wwan_hwsim_port_ops,
- NULL, port);
+ port->wwan = wwan_create_port(&dev->dev, type, ops, NULL, port);
if (IS_ERR(port->wwan)) {
err = PTR_ERR(port->wwan);
goto err_free_port;
@@ -392,7 +512,7 @@ static ssize_t wwan_hwsim_debugfs_portcreate_write(struct file *file,
struct wwan_hwsim_dev *dev = file->private_data;
struct wwan_hwsim_port *port;
- port = wwan_hwsim_port_new(dev);
+ port = wwan_hwsim_port_new(dev, WWAN_PORT_AT);
if (IS_ERR(port))
return PTR_ERR(port);
@@ -459,6 +579,8 @@ static int __init wwan_hwsim_init_devs(void)
int i, j;
for (i = 0; i < wwan_hwsim_devsnum; ++i) {
+ struct wwan_hwsim_port *port;
+
dev = wwan_hwsim_dev_new();
if (IS_ERR(dev))
return PTR_ERR(dev);
@@ -467,13 +589,12 @@ static int __init wwan_hwsim_init_devs(void)
list_add_tail(&dev->list, &wwan_hwsim_devs);
spin_unlock(&wwan_hwsim_devs_lock);
- /* Create a couple of ports per each device to accelerate
+ /* Create a few various ports per each device to accelerate
* the simulator readiness time.
*/
- for (j = 0; j < 2; ++j) {
- struct wwan_hwsim_port *port;
- port = wwan_hwsim_port_new(dev);
+ for (j = 0; j < 2; ++j) {
+ port = wwan_hwsim_port_new(dev, WWAN_PORT_AT);
if (IS_ERR(port))
return PTR_ERR(port);
@@ -481,6 +602,18 @@ static int __init wwan_hwsim_init_devs(void)
list_add_tail(&port->list, &dev->ports);
spin_unlock(&dev->ports_lock);
}
+
+#if IS_ENABLED(CONFIG_GNSS)
+ port = wwan_hwsim_port_new(dev, WWAN_PORT_NMEA);
+ if (IS_ERR(port)) {
+ dev_warn(&dev->dev, "failed to create initial NMEA port: %d\n",
+ (int)PTR_ERR(port));
+ } else {
+ spin_lock(&dev->ports_lock);
+ list_add_tail(&port->list, &dev->ports);
+ spin_unlock(&dev->ports_lock);
+ }
+#endif
}
return 0;
diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
index 45ddce35f6d2..c6b2eba3511b 100644
--- a/drivers/net/xen-netback/hash.c
+++ b/drivers/net/xen-netback/hash.c
@@ -3,7 +3,7 @@
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
- * as published by the Free Softare Foundation; or, when distributed
+ * as published by the Free Software Foundation; or, when distributed
* separately from the Linux kernel or incorporated into other
* software packages, subject to the following license:
*
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 7c2220366623..0969d5c9f6b7 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -97,8 +97,8 @@ struct netfront_cb {
static DECLARE_WAIT_QUEUE_HEAD(module_wq);
struct netfront_stats {
- u64 packets;
- u64 bytes;
+ u64_stats_t packets;
+ u64_stats_t bytes;
struct u64_stats_sync syncp;
};
@@ -634,8 +634,8 @@ static int xennet_xdp_xmit_one(struct net_device *dev,
notify_remote_via_irq(queue->tx_irq);
u64_stats_update_begin(&tx_stats->syncp);
- tx_stats->bytes += xdpf->len;
- tx_stats->packets++;
+ u64_stats_add(&tx_stats->bytes, xdpf->len);
+ u64_stats_inc(&tx_stats->packets);
u64_stats_update_end(&tx_stats->syncp);
return 0;
@@ -843,8 +843,8 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
notify_remote_via_irq(queue->tx_irq);
u64_stats_update_begin(&tx_stats->syncp);
- tx_stats->bytes += skb->len;
- tx_stats->packets++;
+ u64_stats_add(&tx_stats->bytes, skb->len);
+ u64_stats_inc(&tx_stats->packets);
u64_stats_update_end(&tx_stats->syncp);
if (!netfront_tx_slot_available(queue))
@@ -1249,8 +1249,8 @@ static int handle_incoming_queue(struct netfront_queue *queue,
}
u64_stats_update_begin(&rx_stats->syncp);
- rx_stats->packets++;
- rx_stats->bytes += skb->len;
+ u64_stats_inc(&rx_stats->packets);
+ u64_stats_add(&rx_stats->bytes, skb->len);
u64_stats_update_end(&rx_stats->syncp);
/* Pass it up. */
@@ -1400,14 +1400,14 @@ static void xennet_get_stats64(struct net_device *dev,
do {
start = u64_stats_fetch_begin(&tx_stats->syncp);
- tx_packets = tx_stats->packets;
- tx_bytes = tx_stats->bytes;
+ tx_packets = u64_stats_read(&tx_stats->packets);
+ tx_bytes = u64_stats_read(&tx_stats->bytes);
} while (u64_stats_fetch_retry(&tx_stats->syncp, start));
do {
start = u64_stats_fetch_begin(&rx_stats->syncp);
- rx_packets = rx_stats->packets;
- rx_bytes = rx_stats->bytes;
+ rx_packets = u64_stats_read(&rx_stats->packets);
+ rx_bytes = u64_stats_read(&rx_stats->bytes);
} while (u64_stats_fetch_retry(&rx_stats->syncp, start));
tot->rx_packets += rx_packets;
diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c
index 049662ffdf97..6a5ce8ff91f0 100644
--- a/drivers/nfc/nxp-nci/i2c.c
+++ b/drivers/nfc/nxp-nci/i2c.c
@@ -305,7 +305,7 @@ static int nxp_nci_i2c_probe(struct i2c_client *client)
r = request_threaded_irq(client->irq, NULL,
nxp_nci_i2c_irq_thread_fn,
- IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ IRQF_ONESHOT,
NXP_NCI_I2C_DRIVER_NAME, phy);
if (r < 0)
nfc_err(&client->dev, "Unable to register IRQ handler\n");
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 678dd0452f0a..62153a3924b9 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -5,6 +5,28 @@
menu "PHY Subsystem"
+config PHY_COMMON_PROPS
+ bool
+ help
+ This parses properties common between generic PHYs and Ethernet PHYs.
+
+ Select this from consumer drivers to gain access to helpers for
+ parsing properties from the
+ Documentation/devicetree/bindings/phy/phy-common-props.yaml schema.
+
+config PHY_COMMON_PROPS_TEST
+ tristate "KUnit tests for PHY common props" if !KUNIT_ALL_TESTS
+ select PHY_COMMON_PROPS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds KUnit tests for the PHY common property API.
+
+ For more information on KUnit and unit tests in general,
+ please refer to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ When in doubt, say N.
+
config GENERIC_PHY
bool "PHY Core"
help
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index bfb27fb5a494..30b150d68de7 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -3,6 +3,8 @@
# Makefile for the phy drivers.
#
+obj-$(CONFIG_PHY_COMMON_PROPS) += phy-common-props.o
+obj-$(CONFIG_PHY_COMMON_PROPS_TEST) += phy-common-props-test.o
obj-$(CONFIG_GENERIC_PHY) += phy-core.o
obj-$(CONFIG_GENERIC_PHY_MIPI_DPHY) += phy-core-mipi-dphy.o
obj-$(CONFIG_PHY_CAN_TRANSCEIVER) += phy-can-transceiver.o
diff --git a/drivers/phy/phy-common-props-test.c b/drivers/phy/phy-common-props-test.c
new file mode 100644
index 000000000000..e937ec8a4126
--- /dev/null
+++ b/drivers/phy/phy-common-props-test.c
@@ -0,0 +1,422 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * phy-common-props-test.c -- Unit tests for PHY common properties API
+ *
+ * Copyright 2025-2026 NXP
+ */
+#include <kunit/test.h>
+#include <linux/property.h>
+#include <linux/phy/phy-common-props.h>
+#include <dt-bindings/phy/phy.h>
+
+/* Test: rx-polarity property is missing */
+static void phy_test_rx_polarity_is_missing(struct kunit *test)
+{
+ static const struct property_entry entries[] = {
+ {}
+ };
+ struct fwnode_handle *node;
+ unsigned int val;
+ int ret;
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ ret = phy_get_manual_rx_polarity(node, "sgmii", &val);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, val, PHY_POL_NORMAL);
+
+ fwnode_remove_software_node(node);
+}
+
+/* Test: rx-polarity has more values than rx-polarity-names */
+static void phy_test_rx_polarity_more_values_than_names(struct kunit *test)
+{
+ static const u32 rx_pol[] = { PHY_POL_NORMAL, PHY_POL_INVERT, PHY_POL_NORMAL };
+ static const char * const rx_pol_names[] = { "sgmii", "2500base-x" };
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_U32_ARRAY("rx-polarity", rx_pol),
+ PROPERTY_ENTRY_STRING_ARRAY("rx-polarity-names", rx_pol_names),
+ {}
+ };
+ struct fwnode_handle *node;
+ unsigned int val;
+ int ret;
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ ret = phy_get_manual_rx_polarity(node, "sgmii", &val);
+ KUNIT_EXPECT_EQ(test, ret, -EINVAL);
+
+ fwnode_remove_software_node(node);
+}
+
+/* Test: rx-polarity has 1 value and rx-polarity-names does not exist */
+static void phy_test_rx_polarity_single_value_no_names(struct kunit *test)
+{
+ static const u32 rx_pol[] = { PHY_POL_INVERT };
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_U32_ARRAY("rx-polarity", rx_pol),
+ {}
+ };
+ struct fwnode_handle *node;
+ unsigned int val;
+ int ret;
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ ret = phy_get_manual_rx_polarity(node, "sgmii", &val);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, val, PHY_POL_INVERT);
+
+ fwnode_remove_software_node(node);
+}
+
+/* Test: rx-polarity-names has more values than rx-polarity */
+static void phy_test_rx_polarity_more_names_than_values(struct kunit *test)
+{
+ static const u32 rx_pol[] = { PHY_POL_NORMAL, PHY_POL_INVERT };
+ static const char * const rx_pol_names[] = { "sgmii", "2500base-x", "1000base-x" };
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_U32_ARRAY("rx-polarity", rx_pol),
+ PROPERTY_ENTRY_STRING_ARRAY("rx-polarity-names", rx_pol_names),
+ {}
+ };
+ struct fwnode_handle *node;
+ unsigned int val;
+ int ret;
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ ret = phy_get_manual_rx_polarity(node, "sgmii", &val);
+ KUNIT_EXPECT_EQ(test, ret, -EINVAL);
+
+ fwnode_remove_software_node(node);
+}
+
+/* Test: rx-polarity and rx-polarity-names have same length, find the name */
+static void phy_test_rx_polarity_find_by_name(struct kunit *test)
+{
+ static const u32 rx_pol[] = { PHY_POL_NORMAL, PHY_POL_INVERT, PHY_POL_AUTO };
+ static const char * const rx_pol_names[] = { "sgmii", "2500base-x", "usb-ss" };
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_U32_ARRAY("rx-polarity", rx_pol),
+ PROPERTY_ENTRY_STRING_ARRAY("rx-polarity-names", rx_pol_names),
+ {}
+ };
+ struct fwnode_handle *node;
+ unsigned int val;
+ int ret;
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ ret = phy_get_manual_rx_polarity(node, "sgmii", &val);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, val, PHY_POL_NORMAL);
+
+ ret = phy_get_manual_rx_polarity(node, "2500base-x", &val);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, val, PHY_POL_INVERT);
+
+ ret = phy_get_rx_polarity(node, "usb-ss", BIT(PHY_POL_AUTO),
+ PHY_POL_AUTO, &val);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, val, PHY_POL_AUTO);
+
+ fwnode_remove_software_node(node);
+}
+
+/* Test: same length, name not found, no "default" - error */
+static void phy_test_rx_polarity_name_not_found_no_default(struct kunit *test)
+{
+ static const u32 rx_pol[] = { PHY_POL_NORMAL, PHY_POL_INVERT };
+ static const char * const rx_pol_names[] = { "2500base-x", "1000base-x" };
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_U32_ARRAY("rx-polarity", rx_pol),
+ PROPERTY_ENTRY_STRING_ARRAY("rx-polarity-names", rx_pol_names),
+ {}
+ };
+ struct fwnode_handle *node;
+ unsigned int val;
+ int ret;
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ ret = phy_get_manual_rx_polarity(node, "sgmii", &val);
+ KUNIT_EXPECT_EQ(test, ret, -EINVAL);
+
+ fwnode_remove_software_node(node);
+}
+
+/* Test: same length, name not found, but "default" exists */
+static void phy_test_rx_polarity_name_not_found_with_default(struct kunit *test)
+{
+ static const u32 rx_pol[] = { PHY_POL_NORMAL, PHY_POL_INVERT };
+ static const char * const rx_pol_names[] = { "2500base-x", "default" };
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_U32_ARRAY("rx-polarity", rx_pol),
+ PROPERTY_ENTRY_STRING_ARRAY("rx-polarity-names", rx_pol_names),
+ {}
+ };
+ struct fwnode_handle *node;
+ unsigned int val;
+ int ret;
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ ret = phy_get_manual_rx_polarity(node, "sgmii", &val);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, val, PHY_POL_INVERT);
+
+ fwnode_remove_software_node(node);
+}
+
+/* Test: polarity found but value is unsupported */
+static void phy_test_rx_polarity_unsupported_value(struct kunit *test)
+{
+ static const u32 rx_pol[] = { PHY_POL_AUTO };
+ static const char * const rx_pol_names[] = { "sgmii" };
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_U32_ARRAY("rx-polarity", rx_pol),
+ PROPERTY_ENTRY_STRING_ARRAY("rx-polarity-names", rx_pol_names),
+ {}
+ };
+ struct fwnode_handle *node;
+ unsigned int val;
+ int ret;
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ ret = phy_get_manual_rx_polarity(node, "sgmii", &val);
+ KUNIT_EXPECT_EQ(test, ret, -EOPNOTSUPP);
+
+ fwnode_remove_software_node(node);
+}
+
+/* Test: tx-polarity property is missing */
+static void phy_test_tx_polarity_is_missing(struct kunit *test)
+{
+ static const struct property_entry entries[] = {
+ {}
+ };
+ struct fwnode_handle *node;
+ unsigned int val;
+ int ret;
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ ret = phy_get_manual_tx_polarity(node, "sgmii", &val);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, val, PHY_POL_NORMAL);
+
+ fwnode_remove_software_node(node);
+}
+
+/* Test: tx-polarity has more values than tx-polarity-names */
+static void phy_test_tx_polarity_more_values_than_names(struct kunit *test)
+{
+ static const u32 tx_pol[] = { PHY_POL_NORMAL, PHY_POL_INVERT, PHY_POL_NORMAL };
+ static const char * const tx_pol_names[] = { "sgmii", "2500base-x" };
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_U32_ARRAY("tx-polarity", tx_pol),
+ PROPERTY_ENTRY_STRING_ARRAY("tx-polarity-names", tx_pol_names),
+ {}
+ };
+ struct fwnode_handle *node;
+ unsigned int val;
+ int ret;
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ ret = phy_get_manual_tx_polarity(node, "sgmii", &val);
+ KUNIT_EXPECT_EQ(test, ret, -EINVAL);
+
+ fwnode_remove_software_node(node);
+}
+
+/* Test: tx-polarity has 1 value and tx-polarity-names does not exist */
+static void phy_test_tx_polarity_single_value_no_names(struct kunit *test)
+{
+ static const u32 tx_pol[] = { PHY_POL_INVERT };
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_U32_ARRAY("tx-polarity", tx_pol),
+ {}
+ };
+ struct fwnode_handle *node;
+ unsigned int val;
+ int ret;
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ ret = phy_get_manual_tx_polarity(node, "sgmii", &val);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, val, PHY_POL_INVERT);
+
+ fwnode_remove_software_node(node);
+}
+
+/* Test: tx-polarity-names has more values than tx-polarity */
+static void phy_test_tx_polarity_more_names_than_values(struct kunit *test)
+{
+ static const u32 tx_pol[] = { PHY_POL_NORMAL, PHY_POL_INVERT };
+ static const char * const tx_pol_names[] = { "sgmii", "2500base-x", "1000base-x" };
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_U32_ARRAY("tx-polarity", tx_pol),
+ PROPERTY_ENTRY_STRING_ARRAY("tx-polarity-names", tx_pol_names),
+ {}
+ };
+ struct fwnode_handle *node;
+ unsigned int val;
+ int ret;
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ ret = phy_get_manual_tx_polarity(node, "sgmii", &val);
+ KUNIT_EXPECT_EQ(test, ret, -EINVAL);
+
+ fwnode_remove_software_node(node);
+}
+
+/* Test: tx-polarity and tx-polarity-names have same length, find the name */
+static void phy_test_tx_polarity_find_by_name(struct kunit *test)
+{
+ static const u32 tx_pol[] = { PHY_POL_NORMAL, PHY_POL_INVERT, PHY_POL_NORMAL };
+ static const char * const tx_pol_names[] = { "sgmii", "2500base-x", "1000base-x" };
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_U32_ARRAY("tx-polarity", tx_pol),
+ PROPERTY_ENTRY_STRING_ARRAY("tx-polarity-names", tx_pol_names),
+ {}
+ };
+ struct fwnode_handle *node;
+ unsigned int val;
+ int ret;
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ ret = phy_get_manual_tx_polarity(node, "sgmii", &val);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, val, PHY_POL_NORMAL);
+
+ ret = phy_get_manual_tx_polarity(node, "2500base-x", &val);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, val, PHY_POL_INVERT);
+
+ ret = phy_get_manual_tx_polarity(node, "1000base-x", &val);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, val, PHY_POL_NORMAL);
+
+ fwnode_remove_software_node(node);
+}
+
+/* Test: same length, name not found, no "default" - error */
+static void phy_test_tx_polarity_name_not_found_no_default(struct kunit *test)
+{
+ static const u32 tx_pol[] = { PHY_POL_NORMAL, PHY_POL_INVERT };
+ static const char * const tx_pol_names[] = { "2500base-x", "1000base-x" };
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_U32_ARRAY("tx-polarity", tx_pol),
+ PROPERTY_ENTRY_STRING_ARRAY("tx-polarity-names", tx_pol_names),
+ {}
+ };
+ struct fwnode_handle *node;
+ unsigned int val;
+ int ret;
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ ret = phy_get_manual_tx_polarity(node, "sgmii", &val);
+ KUNIT_EXPECT_EQ(test, ret, -EINVAL);
+
+ fwnode_remove_software_node(node);
+}
+
+/* Test: same length, name not found, but "default" exists */
+static void phy_test_tx_polarity_name_not_found_with_default(struct kunit *test)
+{
+ static const u32 tx_pol[] = { PHY_POL_NORMAL, PHY_POL_INVERT };
+ static const char * const tx_pol_names[] = { "2500base-x", "default" };
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_U32_ARRAY("tx-polarity", tx_pol),
+ PROPERTY_ENTRY_STRING_ARRAY("tx-polarity-names", tx_pol_names),
+ {}
+ };
+ struct fwnode_handle *node;
+ unsigned int val;
+ int ret;
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ ret = phy_get_manual_tx_polarity(node, "sgmii", &val);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, val, PHY_POL_INVERT);
+
+ fwnode_remove_software_node(node);
+}
+
+/* Test: polarity found but value is unsupported (AUTO for TX) */
+static void phy_test_tx_polarity_unsupported_value(struct kunit *test)
+{
+ static const u32 tx_pol[] = { PHY_POL_AUTO };
+ static const char * const tx_pol_names[] = { "sgmii" };
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_U32_ARRAY("tx-polarity", tx_pol),
+ PROPERTY_ENTRY_STRING_ARRAY("tx-polarity-names", tx_pol_names),
+ {}
+ };
+ struct fwnode_handle *node;
+ unsigned int val;
+ int ret;
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ ret = phy_get_manual_tx_polarity(node, "sgmii", &val);
+ KUNIT_EXPECT_EQ(test, ret, -EOPNOTSUPP);
+
+ fwnode_remove_software_node(node);
+}
+
+static struct kunit_case phy_common_props_test_cases[] = {
+ KUNIT_CASE(phy_test_rx_polarity_is_missing),
+ KUNIT_CASE(phy_test_rx_polarity_more_values_than_names),
+ KUNIT_CASE(phy_test_rx_polarity_single_value_no_names),
+ KUNIT_CASE(phy_test_rx_polarity_more_names_than_values),
+ KUNIT_CASE(phy_test_rx_polarity_find_by_name),
+ KUNIT_CASE(phy_test_rx_polarity_name_not_found_no_default),
+ KUNIT_CASE(phy_test_rx_polarity_name_not_found_with_default),
+ KUNIT_CASE(phy_test_rx_polarity_unsupported_value),
+ KUNIT_CASE(phy_test_tx_polarity_is_missing),
+ KUNIT_CASE(phy_test_tx_polarity_more_values_than_names),
+ KUNIT_CASE(phy_test_tx_polarity_single_value_no_names),
+ KUNIT_CASE(phy_test_tx_polarity_more_names_than_values),
+ KUNIT_CASE(phy_test_tx_polarity_find_by_name),
+ KUNIT_CASE(phy_test_tx_polarity_name_not_found_no_default),
+ KUNIT_CASE(phy_test_tx_polarity_name_not_found_with_default),
+ KUNIT_CASE(phy_test_tx_polarity_unsupported_value),
+ {}
+};
+
+static struct kunit_suite phy_common_props_test_suite = {
+ .name = "phy-common-props",
+ .test_cases = phy_common_props_test_cases,
+};
+
+kunit_test_suite(phy_common_props_test_suite);
+
+MODULE_DESCRIPTION("Test module for PHY common properties API");
+MODULE_AUTHOR("Vladimir Oltean <vladimir.oltean@nxp.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/phy-common-props.c b/drivers/phy/phy-common-props.c
new file mode 100644
index 000000000000..3e814bcbea86
--- /dev/null
+++ b/drivers/phy/phy-common-props.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * phy-common-props.c -- Common PHY properties
+ *
+ * Copyright 2025-2026 NXP
+ */
+#include <linux/export.h>
+#include <linux/fwnode.h>
+#include <linux/phy/phy-common-props.h>
+#include <linux/printk.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+
+/**
+ * fwnode_get_u32_prop_for_name - Find u32 property by name, or default value
+ * @fwnode: Pointer to firmware node, or NULL to use @default_val
+ * @name: Property name used as lookup key in @names_title (must not be NULL)
+ * @props_title: Name of u32 array property holding values
+ * @names_title: Name of string array property holding lookup keys
+ * @default_val: Default value if @fwnode is NULL or @props_title is empty
+ * @val: Pointer to store the returned value
+ *
+ * This function retrieves a u32 value from @props_title based on a name lookup
+ * in @names_title. The value stored in @val is determined as follows:
+ *
+ * - If @fwnode is NULL or @props_title is empty: @default_val is used
+ * - If @props_title has exactly one element and @names_title is empty:
+ * that element is used
+ * - Otherwise: @val is set to the element at the same index where @name is
+ * found in @names_title.
+ * - If @name is not found, the function looks for a "default" entry in
+ * @names_title and uses the corresponding value from @props_title
+ *
+ * When both @props_title and @names_title are present, they must have the
+ * same number of elements (except when @props_title has exactly one element).
+ *
+ * Return: zero on success, negative error on failure.
+ */
+static int fwnode_get_u32_prop_for_name(struct fwnode_handle *fwnode,
+ const char *name,
+ const char *props_title,
+ const char *names_title,
+ unsigned int default_val,
+ unsigned int *val)
+{
+ int err, n_props, n_names, idx;
+ u32 *props;
+
+ if (!name) {
+ pr_err("Lookup key inside \"%s\" is mandatory\n", names_title);
+ return -EINVAL;
+ }
+
+ n_props = fwnode_property_count_u32(fwnode, props_title);
+ if (n_props <= 0) {
+ /* fwnode is NULL, or is missing requested property */
+ *val = default_val;
+ return 0;
+ }
+
+ n_names = fwnode_property_string_array_count(fwnode, names_title);
+ if (n_names >= 0 && n_props != n_names) {
+ pr_err("%pfw mismatch between \"%s\" and \"%s\" property count (%d vs %d)\n",
+ fwnode, props_title, names_title, n_props, n_names);
+ return -EINVAL;
+ }
+
+ idx = fwnode_property_match_string(fwnode, names_title, name);
+ if (idx < 0)
+ idx = fwnode_property_match_string(fwnode, names_title, "default");
+ /*
+ * If the mode name is missing, it can only mean the specified property
+ * is the default one for all modes, so reject any other property count
+ * than 1.
+ */
+ if (idx < 0 && n_props != 1) {
+ pr_err("%pfw \"%s \" property has %d elements, but cannot find \"%s\" in \"%s\" and there is no default value\n",
+ fwnode, props_title, n_props, name, names_title);
+ return -EINVAL;
+ }
+
+ if (n_props == 1) {
+ err = fwnode_property_read_u32(fwnode, props_title, val);
+ if (err)
+ return err;
+
+ return 0;
+ }
+
+ /* We implicitly know idx >= 0 here */
+ props = kcalloc(n_props, sizeof(*props), GFP_KERNEL);
+ if (!props)
+ return -ENOMEM;
+
+ err = fwnode_property_read_u32_array(fwnode, props_title, props, n_props);
+ if (err >= 0)
+ *val = props[idx];
+
+ kfree(props);
+
+ return err;
+}
+
+static int phy_get_polarity_for_mode(struct fwnode_handle *fwnode,
+ const char *mode_name,
+ unsigned int supported,
+ unsigned int default_val,
+ const char *polarity_prop,
+ const char *names_prop,
+ unsigned int *val)
+{
+ int err;
+
+ err = fwnode_get_u32_prop_for_name(fwnode, mode_name, polarity_prop,
+ names_prop, default_val, val);
+ if (err)
+ return err;
+
+ if (!(supported & BIT(*val))) {
+ pr_err("%d is not a supported value for %pfw '%s' element '%s'\n",
+ *val, fwnode, polarity_prop, mode_name);
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+/**
+ * phy_get_rx_polarity - Get RX polarity for PHY differential lane
+ * @fwnode: Pointer to the PHY's firmware node.
+ * @mode_name: The name of the PHY mode to look up.
+ * @supported: Bit mask of PHY_POL_NORMAL, PHY_POL_INVERT and PHY_POL_AUTO
+ * @default_val: Default polarity value if property is missing
+ * @val: Pointer to returned polarity.
+ *
+ * Return: zero on success, negative error on failure.
+ */
+int __must_check phy_get_rx_polarity(struct fwnode_handle *fwnode,
+ const char *mode_name,
+ unsigned int supported,
+ unsigned int default_val,
+ unsigned int *val)
+{
+ return phy_get_polarity_for_mode(fwnode, mode_name, supported,
+ default_val, "rx-polarity",
+ "rx-polarity-names", val);
+}
+EXPORT_SYMBOL_GPL(phy_get_rx_polarity);
+
+/**
+ * phy_get_tx_polarity - Get TX polarity for PHY differential lane
+ * @fwnode: Pointer to the PHY's firmware node.
+ * @mode_name: The name of the PHY mode to look up.
+ * @supported: Bit mask of PHY_POL_NORMAL and PHY_POL_INVERT
+ * @default_val: Default polarity value if property is missing
+ * @val: Pointer to returned polarity.
+ *
+ * Return: zero on success, negative error on failure.
+ */
+int __must_check phy_get_tx_polarity(struct fwnode_handle *fwnode,
+ const char *mode_name, unsigned int supported,
+ unsigned int default_val, unsigned int *val)
+{
+ return phy_get_polarity_for_mode(fwnode, mode_name, supported,
+ default_val, "tx-polarity",
+ "tx-polarity-names", val);
+}
+EXPORT_SYMBOL_GPL(phy_get_tx_polarity);
+
+/**
+ * phy_get_manual_rx_polarity - Get manual RX polarity for PHY differential lane
+ * @fwnode: Pointer to the PHY's firmware node.
+ * @mode_name: The name of the PHY mode to look up.
+ * @val: Pointer to returned polarity.
+ *
+ * Helper for PHYs which do not support protocols with automatic RX polarity
+ * detection and correction.
+ *
+ * Return: zero on success, negative error on failure.
+ */
+int __must_check phy_get_manual_rx_polarity(struct fwnode_handle *fwnode,
+ const char *mode_name,
+ unsigned int *val)
+{
+ return phy_get_rx_polarity(fwnode, mode_name,
+ BIT(PHY_POL_NORMAL) | BIT(PHY_POL_INVERT),
+ PHY_POL_NORMAL, val);
+}
+EXPORT_SYMBOL_GPL(phy_get_manual_rx_polarity);
+
+/**
+ * phy_get_manual_tx_polarity - Get manual TX polarity for PHY differential lane
+ * @fwnode: Pointer to the PHY's firmware node.
+ * @mode_name: The name of the PHY mode to look up.
+ * @val: Pointer to returned polarity.
+ *
+ * Helper for PHYs without any custom default value for the TX polarity.
+ *
+ * Return: zero on success, negative error on failure.
+ */
+int __must_check phy_get_manual_tx_polarity(struct fwnode_handle *fwnode,
+ const char *mode_name,
+ unsigned int *val)
+{
+ return phy_get_tx_polarity(fwnode, mode_name,
+ BIT(PHY_POL_NORMAL) | BIT(PHY_POL_INVERT),
+ PHY_POL_NORMAL, val);
+}
+EXPORT_SYMBOL_GPL(phy_get_manual_tx_polarity);
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 5f8ea34d11d6..b93640ca08b7 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -134,7 +134,7 @@ config PTP_1588_CLOCK_KVM
config PTP_1588_CLOCK_VMCLOCK
tristate "Virtual machine PTP clock"
depends on X86_TSC || ARM_ARCH_TIMER
- depends on PTP_1588_CLOCK && ACPI && ARCH_SUPPORTS_INT128
+ depends on PTP_1588_CLOCK && ARCH_SUPPORTS_INT128
default PTP_1588_CLOCK_KVM
help
This driver adds support for using a virtual precision clock
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index 65fe05cac8c4..1b16a9c3d7fd 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -285,6 +285,7 @@ struct ptp_ocp_sma_connector {
u8 default_fcn;
struct dpll_pin *dpll_pin;
struct dpll_pin_properties dpll_prop;
+ dpll_tracker tracker;
};
struct ocp_attr_group {
@@ -383,6 +384,7 @@ struct ptp_ocp {
struct ptp_ocp_sma_connector sma[OCP_SMA_NUM];
const struct ocp_sma_op *sma_op;
struct dpll_device *dpll;
+ dpll_tracker tracker;
int signals_nr;
int freq_in_nr;
};
@@ -4788,7 +4790,7 @@ ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
devlink_register(devlink);
clkid = pci_get_dsn(pdev);
- bp->dpll = dpll_device_get(clkid, 0, THIS_MODULE);
+ bp->dpll = dpll_device_get(clkid, 0, THIS_MODULE, &bp->tracker);
if (IS_ERR(bp->dpll)) {
err = PTR_ERR(bp->dpll);
dev_err(&pdev->dev, "dpll_device_alloc failed\n");
@@ -4800,7 +4802,9 @@ ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out;
for (i = 0; i < OCP_SMA_NUM; i++) {
- bp->sma[i].dpll_pin = dpll_pin_get(clkid, i, THIS_MODULE, &bp->sma[i].dpll_prop);
+ bp->sma[i].dpll_pin = dpll_pin_get(clkid, i, THIS_MODULE,
+ &bp->sma[i].dpll_prop,
+ &bp->sma[i].tracker);
if (IS_ERR(bp->sma[i].dpll_pin)) {
err = PTR_ERR(bp->sma[i].dpll_pin);
goto out_dpll;
@@ -4809,7 +4813,7 @@ ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = dpll_pin_register(bp->dpll, bp->sma[i].dpll_pin, &dpll_pins_ops,
&bp->sma[i]);
if (err) {
- dpll_pin_put(bp->sma[i].dpll_pin);
+ dpll_pin_put(bp->sma[i].dpll_pin, &bp->sma[i].tracker);
goto out_dpll;
}
}
@@ -4819,9 +4823,9 @@ ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
out_dpll:
while (i--) {
dpll_pin_unregister(bp->dpll, bp->sma[i].dpll_pin, &dpll_pins_ops, &bp->sma[i]);
- dpll_pin_put(bp->sma[i].dpll_pin);
+ dpll_pin_put(bp->sma[i].dpll_pin, &bp->sma[i].tracker);
}
- dpll_device_put(bp->dpll);
+ dpll_device_put(bp->dpll, &bp->tracker);
out:
ptp_ocp_detach(bp);
out_disable:
@@ -4842,11 +4846,11 @@ ptp_ocp_remove(struct pci_dev *pdev)
for (i = 0; i < OCP_SMA_NUM; i++) {
if (bp->sma[i].dpll_pin) {
dpll_pin_unregister(bp->dpll, bp->sma[i].dpll_pin, &dpll_pins_ops, &bp->sma[i]);
- dpll_pin_put(bp->sma[i].dpll_pin);
+ dpll_pin_put(bp->sma[i].dpll_pin, &bp->sma[i].tracker);
}
}
dpll_device_unregister(bp->dpll, &dpll_ops, bp);
- dpll_device_put(bp->dpll);
+ dpll_device_put(bp->dpll, &bp->tracker);
devlink_unregister(devlink);
ptp_ocp_detach(bp);
pci_disable_device(pdev);
diff --git a/drivers/ptp/ptp_vmclock.c b/drivers/ptp/ptp_vmclock.c
index b3a83b03d9c1..c7c75e19f4dd 100644
--- a/drivers/ptp/ptp_vmclock.c
+++ b/drivers/ptp/ptp_vmclock.c
@@ -5,16 +5,22 @@
* Copyright © 2024 Amazon.com, Inc. or its affiliates.
*/
+#include "linux/poll.h"
+#include "linux/types.h"
+#include "linux/wait.h"
#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -39,6 +45,7 @@ struct vmclock_state {
struct resource res;
struct vmclock_abi *clk;
struct miscdevice miscdev;
+ wait_queue_head_t disrupt_wait;
struct ptp_clock_info ptp_clock_info;
struct ptp_clock *ptp_clock;
enum clocksource_ids cs_id, sys_cs_id;
@@ -76,13 +83,13 @@ static uint64_t mul_u64_u64_shr_add_u64(uint64_t *res_hi, uint64_t delta,
static bool tai_adjust(struct vmclock_abi *clk, uint64_t *sec)
{
- if (likely(clk->time_type == VMCLOCK_TIME_UTC))
+ if (clk->time_type == VMCLOCK_TIME_TAI)
return true;
- if (clk->time_type == VMCLOCK_TIME_TAI &&
+ if (clk->time_type == VMCLOCK_TIME_UTC &&
(le64_to_cpu(clk->flags) & VMCLOCK_FLAG_TAI_OFFSET_VALID)) {
if (sec)
- *sec += (int16_t)le16_to_cpu(clk->tai_offset_sec);
+ *sec -= (int16_t)le16_to_cpu(clk->tai_offset_sec);
return true;
}
return false;
@@ -343,9 +350,9 @@ static struct ptp_clock *vmclock_ptp_register(struct device *dev,
return NULL;
}
- /* Only UTC, or TAI with offset */
+ /* Accept TAI directly, or UTC with valid offset for conversion to TAI */
if (!tai_adjust(st->clk, NULL)) {
- dev_info(dev, "vmclock does not provide unambiguous UTC\n");
+ dev_info(dev, "vmclock does not provide unambiguous time\n");
return NULL;
}
@@ -357,10 +364,15 @@ static struct ptp_clock *vmclock_ptp_register(struct device *dev,
return ptp_clock_register(&st->ptp_clock_info, dev);
}
+struct vmclock_file_state {
+ struct vmclock_state *st;
+ atomic_t seq;
+};
+
static int vmclock_miscdev_mmap(struct file *fp, struct vm_area_struct *vma)
{
- struct vmclock_state *st = container_of(fp->private_data,
- struct vmclock_state, miscdev);
+ struct vmclock_file_state *fst = fp->private_data;
+ struct vmclock_state *st = fst->st;
if ((vma->vm_flags & (VM_READ|VM_WRITE)) != VM_READ)
return -EROFS;
@@ -379,11 +391,11 @@ static int vmclock_miscdev_mmap(struct file *fp, struct vm_area_struct *vma)
static ssize_t vmclock_miscdev_read(struct file *fp, char __user *buf,
size_t count, loff_t *ppos)
{
- struct vmclock_state *st = container_of(fp->private_data,
- struct vmclock_state, miscdev);
ktime_t deadline = ktime_add(ktime_get(), VMCLOCK_MAX_WAIT);
+ struct vmclock_file_state *fst = fp->private_data;
+ struct vmclock_state *st = fst->st;
+ uint32_t seq, old_seq;
size_t max_count;
- uint32_t seq;
if (*ppos >= PAGE_SIZE)
return 0;
@@ -392,6 +404,7 @@ static ssize_t vmclock_miscdev_read(struct file *fp, char __user *buf,
if (count > max_count)
count = max_count;
+ old_seq = atomic_read(&fst->seq);
while (1) {
seq = le32_to_cpu(st->clk->seq_count) & ~1U;
/* Pairs with hypervisor wmb */
@@ -402,8 +415,16 @@ static ssize_t vmclock_miscdev_read(struct file *fp, char __user *buf,
/* Pairs with hypervisor wmb */
virt_rmb();
- if (seq == le32_to_cpu(st->clk->seq_count))
- break;
+ if (seq == le32_to_cpu(st->clk->seq_count)) {
+ /*
+ * Either we updated fst->seq to seq (the latest version we observed)
+ * or someone else did (old_seq == seq), so we can break.
+ */
+ if (atomic_try_cmpxchg(&fst->seq, &old_seq, seq) ||
+ old_seq == seq) {
+ break;
+ }
+ }
if (ktime_after(ktime_get(), deadline))
return -ETIMEDOUT;
@@ -413,25 +434,63 @@ static ssize_t vmclock_miscdev_read(struct file *fp, char __user *buf,
return count;
}
+static __poll_t vmclock_miscdev_poll(struct file *fp, poll_table *wait)
+{
+ struct vmclock_file_state *fst = fp->private_data;
+ struct vmclock_state *st = fst->st;
+ uint32_t seq;
+
+ /*
+ * Hypervisor will not send us any notifications, so fail immediately
+ * to avoid having caller sleeping for ever.
+ */
+ if (!(le64_to_cpu(st->clk->flags) & VMCLOCK_FLAG_NOTIFICATION_PRESENT))
+ return POLLHUP;
+
+ poll_wait(fp, &st->disrupt_wait, wait);
+
+ seq = le32_to_cpu(st->clk->seq_count);
+ if (atomic_read(&fst->seq) != seq)
+ return POLLIN | POLLRDNORM;
+
+ return 0;
+}
+
+static int vmclock_miscdev_open(struct inode *inode, struct file *fp)
+{
+ struct vmclock_state *st = container_of(fp->private_data,
+ struct vmclock_state, miscdev);
+ struct vmclock_file_state *fst = kzalloc(sizeof(*fst), GFP_KERNEL);
+
+ if (!fst)
+ return -ENOMEM;
+
+ fst->st = st;
+ atomic_set(&fst->seq, 0);
+
+ fp->private_data = fst;
+
+ return 0;
+}
+
+static int vmclock_miscdev_release(struct inode *inode, struct file *fp)
+{
+ kfree(fp->private_data);
+ return 0;
+}
+
static const struct file_operations vmclock_miscdev_fops = {
.owner = THIS_MODULE,
+ .open = vmclock_miscdev_open,
+ .release = vmclock_miscdev_release,
.mmap = vmclock_miscdev_mmap,
.read = vmclock_miscdev_read,
+ .poll = vmclock_miscdev_poll,
};
/* module operations */
-static void vmclock_remove(void *data)
-{
- struct vmclock_state *st = data;
-
- if (st->ptp_clock)
- ptp_clock_unregister(st->ptp_clock);
-
- if (st->miscdev.minor != MISC_DYNAMIC_MINOR)
- misc_deregister(&st->miscdev);
-}
-
+#if IS_ENABLED(CONFIG_ACPI)
static acpi_status vmclock_acpi_resources(struct acpi_resource *ares, void *data)
{
struct vmclock_state *st = data;
@@ -459,6 +518,40 @@ static acpi_status vmclock_acpi_resources(struct acpi_resource *ares, void *data
return AE_ERROR;
}
+static void
+vmclock_acpi_notification_handler(acpi_handle __always_unused handle,
+ u32 __always_unused event, void *dev)
+{
+ struct device *device = dev;
+ struct vmclock_state *st = device->driver_data;
+
+ wake_up_interruptible(&st->disrupt_wait);
+}
+
+static int vmclock_setup_acpi_notification(struct device *dev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ acpi_status status;
+
+ /*
+ * This should never happen as this function is only called when
+ * has_acpi_companion(dev) is true, but the logic is sufficiently
+ * complex that Coverity can't see the tautology.
+ */
+ if (!adev)
+ return -ENODEV;
+
+ status = acpi_install_notify_handler(adev->handle, ACPI_DEVICE_NOTIFY,
+ vmclock_acpi_notification_handler,
+ dev);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "failed to install notification handler");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static int vmclock_probe_acpi(struct device *dev, struct vmclock_state *st)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
@@ -481,6 +574,82 @@ static int vmclock_probe_acpi(struct device *dev, struct vmclock_state *st)
return 0;
}
+#endif /* CONFIG_ACPI */
+
+static irqreturn_t vmclock_of_irq_handler(int __always_unused irq, void *_st)
+{
+ struct vmclock_state *st = _st;
+
+ wake_up_interruptible(&st->disrupt_wait);
+ return IRQ_HANDLED;
+}
+
+static int vmclock_probe_dt(struct device *dev, struct vmclock_state *st)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ st->res = *res;
+
+ return 0;
+}
+
+static int vmclock_setup_of_notification(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ int irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ return devm_request_irq(dev, irq, vmclock_of_irq_handler, IRQF_SHARED,
+ "vmclock", dev->driver_data);
+}
+
+static int vmclock_setup_notification(struct device *dev,
+ struct vmclock_state *st)
+{
+ /* The device does not support notifications. Nothing else to do */
+ if (!(le64_to_cpu(st->clk->flags) & VMCLOCK_FLAG_NOTIFICATION_PRESENT))
+ return 0;
+
+#if IS_ENABLED(CONFIG_ACPI)
+ if (has_acpi_companion(dev))
+ return vmclock_setup_acpi_notification(dev);
+#endif
+ return vmclock_setup_of_notification(dev);
+}
+
+static void vmclock_remove(void *data)
+{
+ struct device *dev = data;
+ struct vmclock_state *st = dev->driver_data;
+
+ if (!st) {
+ dev_err(dev, "%s called with NULL driver_data", __func__);
+ return;
+ }
+
+#if IS_ENABLED(CONFIG_ACPI)
+ if (has_acpi_companion(dev))
+ acpi_remove_notify_handler(ACPI_COMPANION(dev)->handle,
+ ACPI_DEVICE_NOTIFY,
+ vmclock_acpi_notification_handler);
+#endif
+
+ if (st->ptp_clock)
+ ptp_clock_unregister(st->ptp_clock);
+
+ if (st->miscdev.minor != MISC_DYNAMIC_MINOR)
+ misc_deregister(&st->miscdev);
+
+ dev->driver_data = NULL;
+}
static void vmclock_put_idx(void *data)
{
@@ -499,10 +668,12 @@ static int vmclock_probe(struct platform_device *pdev)
if (!st)
return -ENOMEM;
+#if IS_ENABLED(CONFIG_ACPI)
if (has_acpi_companion(dev))
ret = vmclock_probe_acpi(dev, st);
else
- ret = -EINVAL; /* Only ACPI for now */
+#endif
+ ret = vmclock_probe_dt(dev, st);
if (ret) {
dev_info(dev, "Failed to obtain physical address: %d\n", ret);
@@ -545,7 +716,14 @@ static int vmclock_probe(struct platform_device *pdev)
st->miscdev.minor = MISC_DYNAMIC_MINOR;
- ret = devm_add_action_or_reset(&pdev->dev, vmclock_remove, st);
+ init_waitqueue_head(&st->disrupt_wait);
+ dev->driver_data = st;
+
+ ret = devm_add_action_or_reset(&pdev->dev, vmclock_remove, dev);
+ if (ret)
+ return ret;
+
+ ret = vmclock_setup_notification(dev, st);
if (ret)
return ret;
@@ -591,15 +769,23 @@ static int vmclock_probe(struct platform_device *pdev)
static const struct acpi_device_id vmclock_acpi_ids[] = {
{ "AMZNC10C", 0 },
+ { "VMCLOCK", 0 },
{}
};
MODULE_DEVICE_TABLE(acpi, vmclock_acpi_ids);
+static const struct of_device_id vmclock_of_ids[] = {
+ { .compatible = "amazon,vmclock", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, vmclock_of_ids);
+
static struct platform_driver vmclock_platform_driver = {
.probe = vmclock_probe,
.driver = {
.name = "vmclock",
.acpi_match_table = vmclock_acpi_ids,
+ .of_match_table = vmclock_of_ids,
},
};
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 552cfb53498a..488d7fa6e4ec 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -48,6 +48,8 @@ static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8);
struct vhost_vsock {
struct vhost_dev dev;
struct vhost_virtqueue vqs[2];
+ struct net *net;
+ netns_tracker ns_tracker;
/* Link to global vhost_vsock_hash, writes use vhost_vsock_mutex */
struct hlist_node hash;
@@ -69,7 +71,7 @@ static u32 vhost_transport_get_local_cid(void)
/* Callers must be in an RCU read section or hold the vhost_vsock_mutex.
* The return value can only be dereferenced while within the section.
*/
-static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
+static struct vhost_vsock *vhost_vsock_get(u32 guest_cid, struct net *net)
{
struct vhost_vsock *vsock;
@@ -81,9 +83,9 @@ static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
if (other_cid == 0)
continue;
- if (other_cid == guest_cid)
+ if (other_cid == guest_cid &&
+ vsock_net_check_mode(net, vsock->net))
return vsock;
-
}
return NULL;
@@ -272,7 +274,7 @@ static void vhost_transport_send_pkt_work(struct vhost_work *work)
}
static int
-vhost_transport_send_pkt(struct sk_buff *skb)
+vhost_transport_send_pkt(struct sk_buff *skb, struct net *net)
{
struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
struct vhost_vsock *vsock;
@@ -281,7 +283,7 @@ vhost_transport_send_pkt(struct sk_buff *skb)
rcu_read_lock();
/* Find the vhost_vsock according to guest context id */
- vsock = vhost_vsock_get(le64_to_cpu(hdr->dst_cid));
+ vsock = vhost_vsock_get(le64_to_cpu(hdr->dst_cid), net);
if (!vsock) {
rcu_read_unlock();
kfree_skb(skb);
@@ -308,7 +310,8 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk)
rcu_read_lock();
/* Find the vhost_vsock according to guest context id */
- vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
+ vsock = vhost_vsock_get(vsk->remote_addr.svm_cid,
+ sock_net(sk_vsock(vsk)));
if (!vsock)
goto out;
@@ -407,7 +410,14 @@ static bool vhost_transport_msgzerocopy_allow(void)
return true;
}
-static bool vhost_transport_seqpacket_allow(u32 remote_cid);
+static bool vhost_transport_seqpacket_allow(struct vsock_sock *vsk,
+ u32 remote_cid);
+
+static bool
+vhost_transport_stream_allow(struct vsock_sock *vsk, u32 cid, u32 port)
+{
+ return true;
+}
static struct virtio_transport vhost_transport = {
.transport = {
@@ -433,7 +443,7 @@ static struct virtio_transport vhost_transport = {
.stream_has_space = virtio_transport_stream_has_space,
.stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
.stream_is_active = virtio_transport_stream_is_active,
- .stream_allow = virtio_transport_stream_allow,
+ .stream_allow = vhost_transport_stream_allow,
.seqpacket_dequeue = virtio_transport_seqpacket_dequeue,
.seqpacket_enqueue = virtio_transport_seqpacket_enqueue,
@@ -463,13 +473,15 @@ static struct virtio_transport vhost_transport = {
.send_pkt = vhost_transport_send_pkt,
};
-static bool vhost_transport_seqpacket_allow(u32 remote_cid)
+static bool vhost_transport_seqpacket_allow(struct vsock_sock *vsk,
+ u32 remote_cid)
{
+ struct net *net = sock_net(sk_vsock(vsk));
struct vhost_vsock *vsock;
bool seqpacket_allow = false;
rcu_read_lock();
- vsock = vhost_vsock_get(remote_cid);
+ vsock = vhost_vsock_get(remote_cid, net);
if (vsock)
seqpacket_allow = vsock->seqpacket_allow;
@@ -540,7 +552,8 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
if (le64_to_cpu(hdr->src_cid) == vsock->guest_cid &&
le64_to_cpu(hdr->dst_cid) ==
vhost_transport_get_local_cid())
- virtio_transport_recv_pkt(&vhost_transport, skb);
+ virtio_transport_recv_pkt(&vhost_transport, skb,
+ vsock->net);
else
kfree_skb(skb);
@@ -657,6 +670,7 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
{
struct vhost_virtqueue **vqs;
struct vhost_vsock *vsock;
+ struct net *net;
int ret;
/* This struct is large and allocation could fail, fall back to vmalloc
@@ -672,6 +686,9 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
goto out;
}
+ net = current->nsproxy->net_ns;
+ vsock->net = get_net_track(net, &vsock->ns_tracker, GFP_KERNEL);
+
vsock->guest_cid = 0; /* no CID assigned yet */
vsock->seqpacket_allow = false;
@@ -713,7 +730,7 @@ static void vhost_vsock_reset_orphans(struct sock *sk)
rcu_read_lock();
/* If the peer is still valid, no need to reset connection */
- if (vhost_vsock_get(vsk->remote_addr.svm_cid)) {
+ if (vhost_vsock_get(vsk->remote_addr.svm_cid, sock_net(sk))) {
rcu_read_unlock();
return;
}
@@ -762,6 +779,7 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
virtio_vsock_skb_queue_purge(&vsock->send_pkt_queue);
vhost_dev_cleanup(&vsock->dev);
+ put_net_track(vsock->net, &vsock->ns_tracker);
kfree(vsock->dev.vqs);
vhost_vsock_free(vsock);
return 0;
@@ -788,7 +806,7 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
/* Refuse if CID is already in use */
mutex_lock(&vhost_vsock_mutex);
- other = vhost_vsock_get(guest_cid);
+ other = vhost_vsock_get(guest_cid, vsock->net);
if (other && other != vsock) {
mutex_unlock(&vhost_vsock_mutex);
return -EADDRINUSE;