diff options
Diffstat (limited to 'drivers/net/ethernet/broadcom')
26 files changed, 3438 insertions, 158 deletions
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 0fc10e6c6902..9fdef874f5ca 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -257,6 +257,7 @@ config BNGE tristate "Broadcom Ethernet device support" depends on PCI select NET_DEVLINK + select PAGE_POOL help This driver supports Broadcom 50/100/200/400/800 gigabit Ethernet cards. The module will be called bng_en. To compile this driver as a module, diff --git a/drivers/net/ethernet/broadcom/bnge/bnge.h b/drivers/net/ethernet/broadcom/bnge/bnge.h index 6fb3683b6b04..7aed5f81cd51 100644 --- a/drivers/net/ethernet/broadcom/bnge/bnge.h +++ b/drivers/net/ethernet/broadcom/bnge/bnge.h @@ -102,6 +102,10 @@ struct bnge_dev { u16 chip_num; u8 chip_rev; +#if BITS_PER_LONG == 32 + /* ensure atomic 64-bit doorbell writes on 32-bit systems. */ + spinlock_t db_lock; +#endif int db_offset; /* db_offset within db_size */ int db_size; @@ -129,6 +133,7 @@ struct bnge_dev { unsigned long state; #define BNGE_STATE_DRV_REGISTERED 0 +#define BNGE_STATE_OPEN 1 u64 fw_cap; @@ -155,6 +160,7 @@ struct bnge_dev { u16 rss_indir_tbl_entries; u32 rss_cap; + u32 rss_hash_cfg; u16 rx_nr_rings; u16 tx_nr_rings; @@ -213,6 +219,27 @@ static inline bool bnge_is_agg_reqd(struct bnge_dev *bd) return true; } +static inline void bnge_writeq(struct bnge_dev *bd, u64 val, + void __iomem *addr) +{ +#if BITS_PER_LONG == 32 + spin_lock(&bd->db_lock); + lo_hi_writeq(val, addr); + spin_unlock(&bd->db_lock); +#else + writeq(val, addr); +#endif +} + +/* For TX and RX ring doorbells */ +static inline void bnge_db_write(struct bnge_dev *bd, struct bnge_db_info *db, + u32 idx) +{ + bnge_writeq(bd, db->db_key64 | DB_RING_IDX(db, idx), + db->doorbell); +} + bool bnge_aux_registered(struct bnge_dev *bd); +u16 bnge_aux_get_msix(struct bnge_dev *bd); #endif /* _BNGE_H_ */ diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_core.c b/drivers/net/ethernet/broadcom/bnge/bnge_core.c index 68da656f2894..2c72dd34d50d 100644 --- a/drivers/net/ethernet/broadcom/bnge/bnge_core.c +++ b/drivers/net/ethernet/broadcom/bnge/bnge_core.c @@ -96,6 +96,16 @@ static void bnge_fw_unregister_dev(struct bnge_dev *bd) bnge_free_ctx_mem(bd); } +static void bnge_set_dflt_rss_hash_type(struct bnge_dev *bd) +{ + bd->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | + VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | + VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | + VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 | + VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | + VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; +} + static int bnge_fw_register_dev(struct bnge_dev *bd) { int rc; @@ -137,6 +147,8 @@ static int bnge_fw_register_dev(struct bnge_dev *bd) goto err_func_unrgtr; } + bnge_set_dflt_rss_hash_type(bd); + return 0; err_func_unrgtr: @@ -296,6 +308,10 @@ static int bnge_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_config_uninit; } +#if BITS_PER_LONG == 32 + spin_lock_init(&bd->db_lock); +#endif + rc = bnge_alloc_irqs(bd); if (rc) { dev_err(&pdev->dev, "Error IRQ allocation rc = %d\n", rc); diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_db.h b/drivers/net/ethernet/broadcom/bnge/bnge_db.h new file mode 100644 index 000000000000..950ed582f1d8 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnge/bnge_db.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2025 Broadcom */ + +#ifndef _BNGE_DB_H_ +#define _BNGE_DB_H_ + +/* 64-bit doorbell */ +#define DBR_EPOCH_SFT 24 +#define DBR_TOGGLE_SFT 25 +#define DBR_XID_SFT 32 +#define DBR_PATH_L2 (0x1ULL << 56) +#define DBR_VALID (0x1ULL << 58) +#define DBR_TYPE_SQ (0x0ULL << 60) +#define DBR_TYPE_SRQ (0x2ULL << 60) +#define DBR_TYPE_CQ (0x4ULL << 60) +#define DBR_TYPE_CQ_ARMALL (0x6ULL << 60) +#define DBR_TYPE_NQ (0xaULL << 60) +#define DBR_TYPE_NQ_ARM (0xbULL << 60) +#define DBR_TYPE_NQ_MASK (0xeULL << 60) + +struct bnge_db_info { + void __iomem *doorbell; + u64 db_key64; + u32 db_ring_mask; + u32 db_epoch_mask; + u8 db_epoch_shift; +}; + +#define DB_EPOCH(db, idx) (((idx) & (db)->db_epoch_mask) << \ + ((db)->db_epoch_shift)) +#define DB_RING_IDX(db, idx) (((idx) & (db)->db_ring_mask) | \ + DB_EPOCH(db, idx)) + +#endif /* _BNGE_DB_H_ */ diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c index 5c178fade065..198f49b40dbf 100644 --- a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c +++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c @@ -6,6 +6,8 @@ #include <linux/mm.h> #include <linux/pci.h> #include <linux/bnxt/hsi.h> +#include <linux/if_vlan.h> +#include <net/netdev_queues.h> #include "bnge.h" #include "bnge_hwrm.h" @@ -701,3 +703,483 @@ qportcfg_exit: bnge_hwrm_req_drop(bd, req); return rc; } + +int bnge_hwrm_vnic_set_hds(struct bnge_net *bn, struct bnge_vnic_info *vnic) +{ + u16 hds_thresh = (u16)bn->netdev->cfg_pending->hds_thresh; + struct hwrm_vnic_plcmodes_cfg_input *req; + struct bnge_dev *bd = bn->bd; + int rc; + + rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_PLCMODES_CFG); + if (rc) + return rc; + + req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT); + req->enables = cpu_to_le32(BNGE_PLC_EN_JUMBO_THRES_VALID); + req->jumbo_thresh = cpu_to_le16(bn->rx_buf_use_size); + + if (bnge_is_agg_reqd(bd)) { + req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | + VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); + req->enables |= + cpu_to_le32(BNGE_PLC_EN_HDS_THRES_VALID); + req->hds_threshold = cpu_to_le16(hds_thresh); + } + req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); + return bnge_hwrm_req_send(bd, req); +} + +int bnge_hwrm_vnic_ctx_alloc(struct bnge_dev *bd, + struct bnge_vnic_info *vnic, u16 ctx_idx) +{ + struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp; + struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req; + int rc; + + rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC); + if (rc) + return rc; + + resp = bnge_hwrm_req_hold(bd, req); + rc = bnge_hwrm_req_send(bd, req); + if (!rc) + vnic->fw_rss_cos_lb_ctx[ctx_idx] = + le16_to_cpu(resp->rss_cos_lb_ctx_id); + bnge_hwrm_req_drop(bd, req); + + return rc; +} + +static void +__bnge_hwrm_vnic_set_rss(struct bnge_net *bn, + struct hwrm_vnic_rss_cfg_input *req, + struct bnge_vnic_info *vnic) +{ + struct bnge_dev *bd = bn->bd; + + bnge_fill_hw_rss_tbl(bn, vnic); + req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT; + + req->hash_type = cpu_to_le32(bd->rss_hash_cfg); + req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; + req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); + req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); +} + +int bnge_hwrm_vnic_set_rss(struct bnge_net *bn, + struct bnge_vnic_info *vnic, bool set_rss) +{ + struct hwrm_vnic_rss_cfg_input *req; + struct bnge_dev *bd = bn->bd; + dma_addr_t ring_tbl_map; + u32 i, nr_ctxs; + int rc; + + rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_RSS_CFG); + if (rc) + return rc; + + req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); + if (!set_rss) + return bnge_hwrm_req_send(bd, req); + + __bnge_hwrm_vnic_set_rss(bn, req, vnic); + ring_tbl_map = vnic->rss_table_dma_addr; + nr_ctxs = bnge_cal_nr_rss_ctxs(bd->rx_nr_rings); + + bnge_hwrm_req_hold(bd, req); + for (i = 0; i < nr_ctxs; ring_tbl_map += BNGE_RSS_TABLE_SIZE, i++) { + req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map); + req->ring_table_pair_index = i; + req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]); + rc = bnge_hwrm_req_send(bd, req); + if (rc) + goto exit; + } + +exit: + bnge_hwrm_req_drop(bd, req); + return rc; +} + +int bnge_hwrm_vnic_cfg(struct bnge_net *bn, struct bnge_vnic_info *vnic) +{ + struct bnge_rx_ring_info *rxr = &bn->rx_ring[0]; + struct hwrm_vnic_cfg_input *req; + struct bnge_dev *bd = bn->bd; + int rc; + + rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_CFG); + if (rc) + return rc; + + req->default_rx_ring_id = + cpu_to_le16(rxr->rx_ring_struct.fw_ring_id); + req->default_cmpl_ring_id = + cpu_to_le16(bnge_cp_ring_for_rx(rxr)); + req->enables = + cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID | + VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID); + vnic->mru = bd->netdev->mtu + ETH_HLEN + VLAN_HLEN; + req->mru = cpu_to_le16(vnic->mru); + + req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); + + if (bd->flags & BNGE_EN_STRIP_VLAN) + req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); + if (vnic->vnic_id == BNGE_VNIC_DEFAULT && bnge_aux_registered(bd)) + req->flags |= cpu_to_le32(BNGE_VNIC_CFG_ROCE_DUAL_MODE); + + return bnge_hwrm_req_send(bd, req); +} + +void bnge_hwrm_update_rss_hash_cfg(struct bnge_net *bn) +{ + struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT]; + struct hwrm_vnic_rss_qcfg_output *resp; + struct hwrm_vnic_rss_qcfg_input *req; + struct bnge_dev *bd = bn->bd; + + if (bnge_hwrm_req_init(bd, req, HWRM_VNIC_RSS_QCFG)) + return; + + req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); + /* all contexts configured to same hash_type, zero always exists */ + req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); + resp = bnge_hwrm_req_hold(bd, req); + if (!bnge_hwrm_req_send(bd, req)) + bd->rss_hash_cfg = + le32_to_cpu(resp->hash_type) ?: bd->rss_hash_cfg; + bnge_hwrm_req_drop(bd, req); +} + +int bnge_hwrm_l2_filter_free(struct bnge_dev *bd, struct bnge_l2_filter *fltr) +{ + struct hwrm_cfa_l2_filter_free_input *req; + int rc; + + rc = bnge_hwrm_req_init(bd, req, HWRM_CFA_L2_FILTER_FREE); + if (rc) + return rc; + + req->l2_filter_id = fltr->base.filter_id; + return bnge_hwrm_req_send(bd, req); +} + +int bnge_hwrm_l2_filter_alloc(struct bnge_dev *bd, struct bnge_l2_filter *fltr) +{ + struct hwrm_cfa_l2_filter_alloc_output *resp; + struct hwrm_cfa_l2_filter_alloc_input *req; + int rc; + + rc = bnge_hwrm_req_init(bd, req, HWRM_CFA_L2_FILTER_ALLOC); + if (rc) + return rc; + + req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX); + + req->flags |= cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); + req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id); + req->enables = + cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | + CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID | + CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK); + ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr); + eth_broadcast_addr(req->l2_addr_mask); + + if (fltr->l2_key.vlan) { + req->enables |= + cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN | + CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK | + CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS); + req->num_vlans = 1; + req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan); + req->l2_ivlan_mask = cpu_to_le16(0xfff); + } + + resp = bnge_hwrm_req_hold(bd, req); + rc = bnge_hwrm_req_send(bd, req); + if (!rc) + fltr->base.filter_id = resp->l2_filter_id; + + bnge_hwrm_req_drop(bd, req); + return rc; +} + +int bnge_hwrm_cfa_l2_set_rx_mask(struct bnge_dev *bd, + struct bnge_vnic_info *vnic) +{ + struct hwrm_cfa_l2_set_rx_mask_input *req; + int rc; + + rc = bnge_hwrm_req_init(bd, req, HWRM_CFA_L2_SET_RX_MASK); + if (rc) + return rc; + + req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); + if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) { + req->num_mc_entries = cpu_to_le32(vnic->mc_list_count); + req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); + } + req->mask = cpu_to_le32(vnic->rx_mask); + return bnge_hwrm_req_send_silent(bd, req); +} + +int bnge_hwrm_vnic_alloc(struct bnge_dev *bd, struct bnge_vnic_info *vnic, + unsigned int nr_rings) +{ + struct hwrm_vnic_alloc_output *resp; + struct hwrm_vnic_alloc_input *req; + unsigned int i; + int rc; + + rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_ALLOC); + if (rc) + return rc; + + for (i = 0; i < BNGE_MAX_CTX_PER_VNIC; i++) + vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID; + if (vnic->vnic_id == BNGE_VNIC_DEFAULT) + req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); + + resp = bnge_hwrm_req_hold(bd, req); + rc = bnge_hwrm_req_send(bd, req); + if (!rc) + vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id); + bnge_hwrm_req_drop(bd, req); + return rc; +} + +void bnge_hwrm_vnic_free_one(struct bnge_dev *bd, struct bnge_vnic_info *vnic) +{ + if (vnic->fw_vnic_id != INVALID_HW_RING_ID) { + struct hwrm_vnic_free_input *req; + + if (bnge_hwrm_req_init(bd, req, HWRM_VNIC_FREE)) + return; + + req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); + + bnge_hwrm_req_send(bd, req); + vnic->fw_vnic_id = INVALID_HW_RING_ID; + } +} + +void bnge_hwrm_vnic_ctx_free_one(struct bnge_dev *bd, + struct bnge_vnic_info *vnic, u16 ctx_idx) +{ + struct hwrm_vnic_rss_cos_lb_ctx_free_input *req; + + if (bnge_hwrm_req_init(bd, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE)) + return; + + req->rss_cos_lb_ctx_id = + cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]); + + bnge_hwrm_req_send(bd, req); + vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; +} + +void bnge_hwrm_stat_ctx_free(struct bnge_net *bn) +{ + struct hwrm_stat_ctx_free_input *req; + struct bnge_dev *bd = bn->bd; + int i; + + if (bnge_hwrm_req_init(bd, req, HWRM_STAT_CTX_FREE)) + return; + + bnge_hwrm_req_hold(bd, req); + for (i = 0; i < bd->nq_nr_rings; i++) { + struct bnge_napi *bnapi = bn->bnapi[i]; + struct bnge_nq_ring_info *nqr = &bnapi->nq_ring; + + if (nqr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { + req->stat_ctx_id = cpu_to_le32(nqr->hw_stats_ctx_id); + bnge_hwrm_req_send(bd, req); + + nqr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; + } + } + bnge_hwrm_req_drop(bd, req); +} + +int bnge_hwrm_stat_ctx_alloc(struct bnge_net *bn) +{ + struct hwrm_stat_ctx_alloc_output *resp; + struct hwrm_stat_ctx_alloc_input *req; + struct bnge_dev *bd = bn->bd; + int rc, i; + + rc = bnge_hwrm_req_init(bd, req, HWRM_STAT_CTX_ALLOC); + if (rc) + return rc; + + req->stats_dma_length = cpu_to_le16(bd->hw_ring_stats_size); + req->update_period_ms = cpu_to_le32(bn->stats_coal_ticks / 1000); + + resp = bnge_hwrm_req_hold(bd, req); + for (i = 0; i < bd->nq_nr_rings; i++) { + struct bnge_napi *bnapi = bn->bnapi[i]; + struct bnge_nq_ring_info *nqr = &bnapi->nq_ring; + + req->stats_dma_addr = cpu_to_le64(nqr->stats.hw_stats_map); + + rc = bnge_hwrm_req_send(bd, req); + if (rc) + break; + + nqr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id); + bn->grp_info[i].fw_stats_ctx = nqr->hw_stats_ctx_id; + } + bnge_hwrm_req_drop(bd, req); + return rc; +} + +int hwrm_ring_free_send_msg(struct bnge_net *bn, + struct bnge_ring_struct *ring, + u32 ring_type, int cmpl_ring_id) +{ + struct hwrm_ring_free_input *req; + struct bnge_dev *bd = bn->bd; + int rc; + + rc = bnge_hwrm_req_init(bd, req, HWRM_RING_FREE); + if (rc) + goto exit; + + req->cmpl_ring = cpu_to_le16(cmpl_ring_id); + req->ring_type = ring_type; + req->ring_id = cpu_to_le16(ring->fw_ring_id); + + bnge_hwrm_req_hold(bd, req); + rc = bnge_hwrm_req_send(bd, req); + bnge_hwrm_req_drop(bd, req); +exit: + if (rc) { + netdev_err(bd->netdev, "hwrm_ring_free type %d failed. rc:%d\n", ring_type, rc); + return -EIO; + } + return 0; +} + +int hwrm_ring_alloc_send_msg(struct bnge_net *bn, + struct bnge_ring_struct *ring, + u32 ring_type, u32 map_index) +{ + struct bnge_ring_mem_info *rmem = &ring->ring_mem; + struct bnge_ring_grp_info *grp_info; + struct hwrm_ring_alloc_output *resp; + struct hwrm_ring_alloc_input *req; + struct bnge_dev *bd = bn->bd; + u16 ring_id, flags = 0; + int rc; + + rc = bnge_hwrm_req_init(bd, req, HWRM_RING_ALLOC); + if (rc) + goto exit; + + req->enables = 0; + if (rmem->nr_pages > 1) { + req->page_tbl_addr = cpu_to_le64(rmem->dma_pg_tbl); + /* Page size is in log2 units */ + req->page_size = BNGE_PAGE_SHIFT; + req->page_tbl_depth = 1; + } else { + req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]); + } + req->fbo = 0; + /* Association of ring index with doorbell index and MSIX number */ + req->logical_id = cpu_to_le16(map_index); + + switch (ring_type) { + case HWRM_RING_ALLOC_TX: { + struct bnge_tx_ring_info *txr; + + txr = container_of(ring, struct bnge_tx_ring_info, + tx_ring_struct); + req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX; + /* Association of transmit ring with completion ring */ + grp_info = &bn->grp_info[ring->grp_idx]; + req->cmpl_ring_id = cpu_to_le16(bnge_cp_ring_for_tx(txr)); + req->length = cpu_to_le32(bn->tx_ring_mask + 1); + req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); + req->queue_id = cpu_to_le16(ring->queue_id); + req->flags = cpu_to_le16(flags); + break; + } + case HWRM_RING_ALLOC_RX: + req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX; + req->length = cpu_to_le32(bn->rx_ring_mask + 1); + + /* Association of rx ring with stats context */ + grp_info = &bn->grp_info[ring->grp_idx]; + req->rx_buf_size = cpu_to_le16(bn->rx_buf_use_size); + req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); + req->enables |= + cpu_to_le32(RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); + if (NET_IP_ALIGN == 2) + flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD; + req->flags = cpu_to_le16(flags); + break; + case HWRM_RING_ALLOC_AGG: + req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG; + /* Association of agg ring with rx ring */ + grp_info = &bn->grp_info[ring->grp_idx]; + req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id); + req->rx_buf_size = cpu_to_le16(BNGE_RX_PAGE_SIZE); + req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); + req->enables |= + cpu_to_le32(RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID | + RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); + req->length = cpu_to_le32(bn->rx_agg_ring_mask + 1); + break; + case HWRM_RING_ALLOC_CMPL: + req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; + req->length = cpu_to_le32(bn->cp_ring_mask + 1); + /* Association of cp ring with nq */ + grp_info = &bn->grp_info[map_index]; + req->nq_ring_id = cpu_to_le16(grp_info->nq_fw_ring_id); + req->cq_handle = cpu_to_le64(ring->handle); + req->enables |= + cpu_to_le32(RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID); + break; + case HWRM_RING_ALLOC_NQ: + req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ; + req->length = cpu_to_le32(bn->cp_ring_mask + 1); + req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; + break; + default: + netdev_err(bn->netdev, "hwrm alloc invalid ring type %d\n", ring_type); + return -EINVAL; + } + + resp = bnge_hwrm_req_hold(bd, req); + rc = bnge_hwrm_req_send(bd, req); + ring_id = le16_to_cpu(resp->ring_id); + bnge_hwrm_req_drop(bd, req); + +exit: + if (rc) { + netdev_err(bd->netdev, "hwrm_ring_alloc type %d failed. rc:%d\n", ring_type, rc); + return -EIO; + } + ring->fw_ring_id = ring_id; + return rc; +} + +int bnge_hwrm_set_async_event_cr(struct bnge_dev *bd, int idx) +{ + struct hwrm_func_cfg_input *req; + int rc; + + rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_CFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR); + req->async_event_cr = cpu_to_le16(idx); + return bnge_hwrm_req_send(bd, req); +} diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h index 6c03923eb559..042f28e84a05 100644 --- a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h +++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h @@ -4,6 +4,13 @@ #ifndef _BNGE_HWRM_LIB_H_ #define _BNGE_HWRM_LIB_H_ +#define BNGE_PLC_EN_JUMBO_THRES_VALID \ + VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID +#define BNGE_PLC_EN_HDS_THRES_VALID \ + VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID +#define BNGE_VNIC_CFG_ROCE_DUAL_MODE \ + VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE + int bnge_hwrm_ver_get(struct bnge_dev *bd); int bnge_hwrm_func_reset(struct bnge_dev *bd); int bnge_hwrm_fw_set_time(struct bnge_dev *bd); @@ -24,4 +31,28 @@ int bnge_hwrm_func_qcfg(struct bnge_dev *bd); int bnge_hwrm_func_resc_qcaps(struct bnge_dev *bd); int bnge_hwrm_queue_qportcfg(struct bnge_dev *bd); +int bnge_hwrm_vnic_set_hds(struct bnge_net *bn, struct bnge_vnic_info *vnic); +int bnge_hwrm_vnic_ctx_alloc(struct bnge_dev *bd, + struct bnge_vnic_info *vnic, u16 ctx_idx); +int bnge_hwrm_vnic_set_rss(struct bnge_net *bn, + struct bnge_vnic_info *vnic, bool set_rss); +int bnge_hwrm_vnic_cfg(struct bnge_net *bn, struct bnge_vnic_info *vnic); +void bnge_hwrm_update_rss_hash_cfg(struct bnge_net *bn); +int bnge_hwrm_vnic_alloc(struct bnge_dev *bd, struct bnge_vnic_info *vnic, + unsigned int nr_rings); +void bnge_hwrm_vnic_free_one(struct bnge_dev *bd, struct bnge_vnic_info *vnic); +void bnge_hwrm_vnic_ctx_free_one(struct bnge_dev *bd, + struct bnge_vnic_info *vnic, u16 ctx_idx); +int bnge_hwrm_l2_filter_free(struct bnge_dev *bd, struct bnge_l2_filter *fltr); +int bnge_hwrm_l2_filter_alloc(struct bnge_dev *bd, struct bnge_l2_filter *fltr); +int bnge_hwrm_cfa_l2_set_rx_mask(struct bnge_dev *bd, + struct bnge_vnic_info *vnic); +void bnge_hwrm_stat_ctx_free(struct bnge_net *bn); +int bnge_hwrm_stat_ctx_alloc(struct bnge_net *bn); +int hwrm_ring_free_send_msg(struct bnge_net *bn, struct bnge_ring_struct *ring, + u32 ring_type, int cmpl_ring_id); +int hwrm_ring_alloc_send_msg(struct bnge_net *bn, + struct bnge_ring_struct *ring, + u32 ring_type, u32 map_index); +int bnge_hwrm_set_async_event_cr(struct bnge_dev *bd, int idx); #endif /* _BNGE_HWRM_LIB_H_ */ diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c index 02254934f3d0..832eeb960bd2 100644 --- a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c +++ b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c @@ -14,10 +14,2194 @@ #include <linux/if.h> #include <net/ip.h> #include <linux/skbuff.h> +#include <net/page_pool/helpers.h> #include "bnge.h" #include "bnge_hwrm_lib.h" #include "bnge_ethtool.h" +#include "bnge_rmem.h" + +#define BNGE_RING_TO_TC_OFF(bd, tx) \ + ((tx) % (bd)->tx_nr_rings_per_tc) + +#define BNGE_RING_TO_TC(bd, tx) \ + ((tx) / (bd)->tx_nr_rings_per_tc) + +#define BNGE_TC_TO_RING_BASE(bd, tc) \ + ((tc) * (bd)->tx_nr_rings_per_tc) + +static void bnge_free_stats_mem(struct bnge_net *bn, + struct bnge_stats_mem *stats) +{ + struct bnge_dev *bd = bn->bd; + + if (stats->hw_stats) { + dma_free_coherent(bd->dev, stats->len, stats->hw_stats, + stats->hw_stats_map); + stats->hw_stats = NULL; + } +} + +static int bnge_alloc_stats_mem(struct bnge_net *bn, + struct bnge_stats_mem *stats) +{ + struct bnge_dev *bd = bn->bd; + + stats->hw_stats = dma_alloc_coherent(bd->dev, stats->len, + &stats->hw_stats_map, GFP_KERNEL); + if (!stats->hw_stats) + return -ENOMEM; + + return 0; +} + +static void bnge_free_ring_stats(struct bnge_net *bn) +{ + struct bnge_dev *bd = bn->bd; + int i; + + if (!bn->bnapi) + return; + + for (i = 0; i < bd->nq_nr_rings; i++) { + struct bnge_napi *bnapi = bn->bnapi[i]; + struct bnge_nq_ring_info *nqr = &bnapi->nq_ring; + + bnge_free_stats_mem(bn, &nqr->stats); + } +} + +static int bnge_alloc_ring_stats(struct bnge_net *bn) +{ + struct bnge_dev *bd = bn->bd; + u32 size, i; + int rc; + + size = bd->hw_ring_stats_size; + + for (i = 0; i < bd->nq_nr_rings; i++) { + struct bnge_napi *bnapi = bn->bnapi[i]; + struct bnge_nq_ring_info *nqr = &bnapi->nq_ring; + + nqr->stats.len = size; + rc = bnge_alloc_stats_mem(bn, &nqr->stats); + if (rc) + goto err_free_ring_stats; + + nqr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; + } + return 0; + +err_free_ring_stats: + bnge_free_ring_stats(bn); + return rc; +} + +static void bnge_free_nq_desc_arr(struct bnge_nq_ring_info *nqr) +{ + struct bnge_ring_struct *ring = &nqr->ring_struct; + + kfree(nqr->desc_ring); + nqr->desc_ring = NULL; + ring->ring_mem.pg_arr = NULL; + kfree(nqr->desc_mapping); + nqr->desc_mapping = NULL; + ring->ring_mem.dma_arr = NULL; +} + +static void bnge_free_cp_desc_arr(struct bnge_cp_ring_info *cpr) +{ + struct bnge_ring_struct *ring = &cpr->ring_struct; + + kfree(cpr->desc_ring); + cpr->desc_ring = NULL; + ring->ring_mem.pg_arr = NULL; + kfree(cpr->desc_mapping); + cpr->desc_mapping = NULL; + ring->ring_mem.dma_arr = NULL; +} + +static int bnge_alloc_nq_desc_arr(struct bnge_nq_ring_info *nqr, int n) +{ + nqr->desc_ring = kcalloc(n, sizeof(*nqr->desc_ring), GFP_KERNEL); + if (!nqr->desc_ring) + return -ENOMEM; + + nqr->desc_mapping = kcalloc(n, sizeof(*nqr->desc_mapping), GFP_KERNEL); + if (!nqr->desc_mapping) + goto err_free_desc_ring; + return 0; + +err_free_desc_ring: + kfree(nqr->desc_ring); + nqr->desc_ring = NULL; + return -ENOMEM; +} + +static int bnge_alloc_cp_desc_arr(struct bnge_cp_ring_info *cpr, int n) +{ + cpr->desc_ring = kcalloc(n, sizeof(*cpr->desc_ring), GFP_KERNEL); + if (!cpr->desc_ring) + return -ENOMEM; + + cpr->desc_mapping = kcalloc(n, sizeof(*cpr->desc_mapping), GFP_KERNEL); + if (!cpr->desc_mapping) + goto err_free_desc_ring; + return 0; + +err_free_desc_ring: + kfree(cpr->desc_ring); + cpr->desc_ring = NULL; + return -ENOMEM; +} + +static void bnge_free_nq_arrays(struct bnge_net *bn) +{ + struct bnge_dev *bd = bn->bd; + int i; + + for (i = 0; i < bd->nq_nr_rings; i++) { + struct bnge_napi *bnapi = bn->bnapi[i]; + + bnge_free_nq_desc_arr(&bnapi->nq_ring); + } +} + +static int bnge_alloc_nq_arrays(struct bnge_net *bn) +{ + struct bnge_dev *bd = bn->bd; + int i, rc; + + for (i = 0; i < bd->nq_nr_rings; i++) { + struct bnge_napi *bnapi = bn->bnapi[i]; + + rc = bnge_alloc_nq_desc_arr(&bnapi->nq_ring, bn->cp_nr_pages); + if (rc) + goto err_free_nq_arrays; + } + return 0; + +err_free_nq_arrays: + bnge_free_nq_arrays(bn); + return rc; +} + +static void bnge_free_nq_tree(struct bnge_net *bn) +{ + struct bnge_dev *bd = bn->bd; + int i; + + for (i = 0; i < bd->nq_nr_rings; i++) { + struct bnge_napi *bnapi = bn->bnapi[i]; + struct bnge_nq_ring_info *nqr; + struct bnge_ring_struct *ring; + int j; + + nqr = &bnapi->nq_ring; + ring = &nqr->ring_struct; + + bnge_free_ring(bd, &ring->ring_mem); + + if (!nqr->cp_ring_arr) + continue; + + for (j = 0; j < nqr->cp_ring_count; j++) { + struct bnge_cp_ring_info *cpr = &nqr->cp_ring_arr[j]; + + ring = &cpr->ring_struct; + bnge_free_ring(bd, &ring->ring_mem); + bnge_free_cp_desc_arr(cpr); + } + kfree(nqr->cp_ring_arr); + nqr->cp_ring_arr = NULL; + nqr->cp_ring_count = 0; + } +} + +static int alloc_one_cp_ring(struct bnge_net *bn, + struct bnge_cp_ring_info *cpr) +{ + struct bnge_ring_mem_info *rmem; + struct bnge_ring_struct *ring; + struct bnge_dev *bd = bn->bd; + int rc; + + rc = bnge_alloc_cp_desc_arr(cpr, bn->cp_nr_pages); + if (rc) + return -ENOMEM; + ring = &cpr->ring_struct; + rmem = &ring->ring_mem; + rmem->nr_pages = bn->cp_nr_pages; + rmem->page_size = HW_CMPD_RING_SIZE; + rmem->pg_arr = (void **)cpr->desc_ring; + rmem->dma_arr = cpr->desc_mapping; + rmem->flags = BNGE_RMEM_RING_PTE_FLAG; + rc = bnge_alloc_ring(bd, rmem); + if (rc) + goto err_free_cp_desc_arr; + return rc; + +err_free_cp_desc_arr: + bnge_free_cp_desc_arr(cpr); + return rc; +} + +static int bnge_alloc_nq_tree(struct bnge_net *bn) +{ + struct bnge_dev *bd = bn->bd; + int i, j, ulp_msix, rc; + int tcs = 1; + + ulp_msix = bnge_aux_get_msix(bd); + for (i = 0, j = 0; i < bd->nq_nr_rings; i++) { + bool sh = !!(bd->flags & BNGE_EN_SHARED_CHNL); + struct bnge_napi *bnapi = bn->bnapi[i]; + struct bnge_nq_ring_info *nqr; + struct bnge_cp_ring_info *cpr; + struct bnge_ring_struct *ring; + int cp_count = 0, k; + int rx = 0, tx = 0; + + nqr = &bnapi->nq_ring; + nqr->bnapi = bnapi; + ring = &nqr->ring_struct; + + rc = bnge_alloc_ring(bd, &ring->ring_mem); + if (rc) + goto err_free_nq_tree; + + ring->map_idx = ulp_msix + i; + + if (i < bd->rx_nr_rings) { + cp_count++; + rx = 1; + } + + if ((sh && i < bd->tx_nr_rings) || + (!sh && i >= bd->rx_nr_rings)) { + cp_count += tcs; + tx = 1; + } + + nqr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr), + GFP_KERNEL); + if (!nqr->cp_ring_arr) { + rc = -ENOMEM; + goto err_free_nq_tree; + } + + nqr->cp_ring_count = cp_count; + + for (k = 0; k < cp_count; k++) { + cpr = &nqr->cp_ring_arr[k]; + rc = alloc_one_cp_ring(bn, cpr); + if (rc) + goto err_free_nq_tree; + + cpr->bnapi = bnapi; + cpr->cp_idx = k; + if (!k && rx) { + bn->rx_ring[i].rx_cpr = cpr; + cpr->cp_ring_type = BNGE_NQ_HDL_TYPE_RX; + } else { + int n, tc = k - rx; + + n = BNGE_TC_TO_RING_BASE(bd, tc) + j; + bn->tx_ring[n].tx_cpr = cpr; + cpr->cp_ring_type = BNGE_NQ_HDL_TYPE_TX; + } + } + if (tx) + j++; + } + return 0; + +err_free_nq_tree: + bnge_free_nq_tree(bn); + return rc; +} + +static bool bnge_separate_head_pool(struct bnge_rx_ring_info *rxr) +{ + return rxr->need_head_pool || PAGE_SIZE > BNGE_RX_PAGE_SIZE; +} + +static void bnge_free_one_rx_ring_bufs(struct bnge_net *bn, + struct bnge_rx_ring_info *rxr) +{ + int i, max_idx; + + if (!rxr->rx_buf_ring) + return; + + max_idx = bn->rx_nr_pages * RX_DESC_CNT; + + for (i = 0; i < max_idx; i++) { + struct bnge_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; + void *data = rx_buf->data; + + if (!data) + continue; + + rx_buf->data = NULL; + page_pool_free_va(rxr->head_pool, data, true); + } +} + +static void bnge_free_one_agg_ring_bufs(struct bnge_net *bn, + struct bnge_rx_ring_info *rxr) +{ + int i, max_idx; + + if (!rxr->rx_agg_buf_ring) + return; + + max_idx = bn->rx_agg_nr_pages * RX_DESC_CNT; + + for (i = 0; i < max_idx; i++) { + struct bnge_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_buf_ring[i]; + netmem_ref netmem = rx_agg_buf->netmem; + + if (!netmem) + continue; + + rx_agg_buf->netmem = 0; + __clear_bit(i, rxr->rx_agg_bmap); + + page_pool_recycle_direct_netmem(rxr->page_pool, netmem); + } +} + +static void bnge_free_one_rx_ring_pair_bufs(struct bnge_net *bn, + struct bnge_rx_ring_info *rxr) +{ + bnge_free_one_rx_ring_bufs(bn, rxr); + bnge_free_one_agg_ring_bufs(bn, rxr); +} + +static void bnge_free_rx_ring_pair_bufs(struct bnge_net *bn) +{ + struct bnge_dev *bd = bn->bd; + int i; + + if (!bn->rx_ring) + return; + + for (i = 0; i < bd->rx_nr_rings; i++) + bnge_free_one_rx_ring_pair_bufs(bn, &bn->rx_ring[i]); +} + +static void bnge_free_all_rings_bufs(struct bnge_net *bn) +{ + bnge_free_rx_ring_pair_bufs(bn); +} + +static void bnge_free_rx_rings(struct bnge_net *bn) +{ + struct bnge_dev *bd = bn->bd; + int i; + + for (i = 0; i < bd->rx_nr_rings; i++) { + struct bnge_rx_ring_info *rxr = &bn->rx_ring[i]; + struct bnge_ring_struct *ring; + + page_pool_destroy(rxr->page_pool); + page_pool_destroy(rxr->head_pool); + rxr->page_pool = rxr->head_pool = NULL; + + kfree(rxr->rx_agg_bmap); + rxr->rx_agg_bmap = NULL; + + ring = &rxr->rx_ring_struct; + bnge_free_ring(bd, &ring->ring_mem); + + ring = &rxr->rx_agg_ring_struct; + bnge_free_ring(bd, &ring->ring_mem); + } +} + +static int bnge_alloc_rx_page_pool(struct bnge_net *bn, + struct bnge_rx_ring_info *rxr, + int numa_node) +{ + const unsigned int agg_size_fac = PAGE_SIZE / BNGE_RX_PAGE_SIZE; + const unsigned int rx_size_fac = PAGE_SIZE / SZ_4K; + struct page_pool_params pp = { 0 }; + struct bnge_dev *bd = bn->bd; + struct page_pool *pool; + + pp.pool_size = bn->rx_agg_ring_size / agg_size_fac; + pp.nid = numa_node; + pp.netdev = bn->netdev; + pp.dev = bd->dev; + pp.dma_dir = bn->rx_dir; + pp.max_len = PAGE_SIZE; + pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV | + PP_FLAG_ALLOW_UNREADABLE_NETMEM; + pp.queue_idx = rxr->bnapi->index; + + pool = page_pool_create(&pp); + if (IS_ERR(pool)) + return PTR_ERR(pool); + rxr->page_pool = pool; + + rxr->need_head_pool = page_pool_is_unreadable(pool); + if (bnge_separate_head_pool(rxr)) { + pp.pool_size = min(bn->rx_ring_size / rx_size_fac, 1024); + pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; + pool = page_pool_create(&pp); + if (IS_ERR(pool)) + goto err_destroy_pp; + } else { + page_pool_get(pool); + } + rxr->head_pool = pool; + return 0; + +err_destroy_pp: + page_pool_destroy(rxr->page_pool); + rxr->page_pool = NULL; + return PTR_ERR(pool); +} + +static void bnge_enable_rx_page_pool(struct bnge_rx_ring_info *rxr) +{ + page_pool_enable_direct_recycling(rxr->head_pool, &rxr->bnapi->napi); + page_pool_enable_direct_recycling(rxr->page_pool, &rxr->bnapi->napi); +} + +static int bnge_alloc_rx_agg_bmap(struct bnge_net *bn, + struct bnge_rx_ring_info *rxr) +{ + u16 mem_size; + + rxr->rx_agg_bmap_size = bn->rx_agg_ring_mask + 1; + mem_size = rxr->rx_agg_bmap_size / 8; + rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); + if (!rxr->rx_agg_bmap) + return -ENOMEM; + + return 0; +} + +static int bnge_alloc_rx_rings(struct bnge_net *bn) +{ + int i, rc = 0, agg_rings = 0, cpu; + struct bnge_dev *bd = bn->bd; + + if (bnge_is_agg_reqd(bd)) + agg_rings = 1; + + for (i = 0; i < bd->rx_nr_rings; i++) { + struct bnge_rx_ring_info *rxr = &bn->rx_ring[i]; + struct bnge_ring_struct *ring; + int cpu_node; + + ring = &rxr->rx_ring_struct; + + cpu = cpumask_local_spread(i, dev_to_node(bd->dev)); + cpu_node = cpu_to_node(cpu); + netdev_dbg(bn->netdev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n", + i, cpu_node); + rc = bnge_alloc_rx_page_pool(bn, rxr, cpu_node); + if (rc) + goto err_free_rx_rings; + bnge_enable_rx_page_pool(rxr); + + rc = bnge_alloc_ring(bd, &ring->ring_mem); + if (rc) + goto err_free_rx_rings; + + ring->grp_idx = i; + if (agg_rings) { + ring = &rxr->rx_agg_ring_struct; + rc = bnge_alloc_ring(bd, &ring->ring_mem); + if (rc) + goto err_free_rx_rings; + + ring->grp_idx = i; + rc = bnge_alloc_rx_agg_bmap(bn, rxr); + if (rc) + goto err_free_rx_rings; + } + } + return rc; + +err_free_rx_rings: + bnge_free_rx_rings(bn); + return rc; +} + +static void bnge_free_tx_rings(struct bnge_net *bn) +{ + struct bnge_dev *bd = bn->bd; + int i; + + for (i = 0; i < bd->tx_nr_rings; i++) { + struct bnge_tx_ring_info *txr = &bn->tx_ring[i]; + struct bnge_ring_struct *ring; + + ring = &txr->tx_ring_struct; + + bnge_free_ring(bd, &ring->ring_mem); + } +} + +static int bnge_alloc_tx_rings(struct bnge_net *bn) +{ + struct bnge_dev *bd = bn->bd; + int i, j, rc; + + for (i = 0, j = 0; i < bd->tx_nr_rings; i++) { + struct bnge_tx_ring_info *txr = &bn->tx_ring[i]; + struct bnge_ring_struct *ring; + u8 qidx; + + ring = &txr->tx_ring_struct; + + rc = bnge_alloc_ring(bd, &ring->ring_mem); + if (rc) + goto err_free_tx_rings; + + ring->grp_idx = txr->bnapi->index; + qidx = bd->tc_to_qidx[j]; + ring->queue_id = bd->q_info[qidx].queue_id; + if (BNGE_RING_TO_TC_OFF(bd, i) == (bd->tx_nr_rings_per_tc - 1)) + j++; + } + return 0; + +err_free_tx_rings: + bnge_free_tx_rings(bn); + return rc; +} + +static void bnge_free_vnic_attributes(struct bnge_net *bn) +{ + struct pci_dev *pdev = bn->bd->pdev; + struct bnge_vnic_info *vnic; + int i; + + if (!bn->vnic_info) + return; + + for (i = 0; i < bn->nr_vnics; i++) { + vnic = &bn->vnic_info[i]; + + kfree(vnic->uc_list); + vnic->uc_list = NULL; + + if (vnic->mc_list) { + dma_free_coherent(&pdev->dev, vnic->mc_list_size, + vnic->mc_list, vnic->mc_list_mapping); + vnic->mc_list = NULL; + } + + if (vnic->rss_table) { + dma_free_coherent(&pdev->dev, vnic->rss_table_size, + vnic->rss_table, + vnic->rss_table_dma_addr); + vnic->rss_table = NULL; + } + + vnic->rss_hash_key = NULL; + vnic->flags = 0; + } +} + +static int bnge_alloc_vnic_attributes(struct bnge_net *bn) +{ + struct bnge_dev *bd = bn->bd; + struct bnge_vnic_info *vnic; + int i, size; + + for (i = 0; i < bn->nr_vnics; i++) { + vnic = &bn->vnic_info[i]; + + if (vnic->flags & BNGE_VNIC_UCAST_FLAG) { + int mem_size = (BNGE_MAX_UC_ADDRS - 1) * ETH_ALEN; + + vnic->uc_list = kmalloc(mem_size, GFP_KERNEL); + if (!vnic->uc_list) + goto err_free_vnic_attributes; + } + + if (vnic->flags & BNGE_VNIC_MCAST_FLAG) { + vnic->mc_list_size = BNGE_MAX_MC_ADDRS * ETH_ALEN; + vnic->mc_list = + dma_alloc_coherent(bd->dev, + vnic->mc_list_size, + &vnic->mc_list_mapping, + GFP_KERNEL); + if (!vnic->mc_list) + goto err_free_vnic_attributes; + } + + /* Allocate rss table and hash key */ + size = L1_CACHE_ALIGN(BNGE_MAX_RSS_TABLE_SIZE); + + vnic->rss_table_size = size + HW_HASH_KEY_SIZE; + vnic->rss_table = dma_alloc_coherent(bd->dev, + vnic->rss_table_size, + &vnic->rss_table_dma_addr, + GFP_KERNEL); + if (!vnic->rss_table) + goto err_free_vnic_attributes; + + vnic->rss_hash_key = ((void *)vnic->rss_table) + size; + vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; + } + return 0; + +err_free_vnic_attributes: + bnge_free_vnic_attributes(bn); + return -ENOMEM; +} + +static int bnge_alloc_vnics(struct bnge_net *bn) +{ + int num_vnics; + + /* Allocate only 1 VNIC for now + * Additional VNICs will be added based on RFS/NTUPLE in future patches + */ + num_vnics = 1; + + bn->vnic_info = kcalloc(num_vnics, sizeof(struct bnge_vnic_info), + GFP_KERNEL); + if (!bn->vnic_info) + return -ENOMEM; + + bn->nr_vnics = num_vnics; + + return 0; +} + +static void bnge_free_vnics(struct bnge_net *bn) +{ + kfree(bn->vnic_info); + bn->vnic_info = NULL; + bn->nr_vnics = 0; +} + +static void bnge_free_ring_grps(struct bnge_net *bn) +{ + kfree(bn->grp_info); + bn->grp_info = NULL; +} + +static int bnge_init_ring_grps(struct bnge_net *bn) +{ + struct bnge_dev *bd = bn->bd; + int i; + + bn->grp_info = kcalloc(bd->nq_nr_rings, + sizeof(struct bnge_ring_grp_info), + GFP_KERNEL); + if (!bn->grp_info) + return -ENOMEM; + for (i = 0; i < bd->nq_nr_rings; i++) { + bn->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID; + bn->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; + bn->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID; + bn->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID; + bn->grp_info[i].nq_fw_ring_id = INVALID_HW_RING_ID; + } + + return 0; +} + +static void bnge_free_core(struct bnge_net *bn) +{ + bnge_free_vnic_attributes(bn); + bnge_free_tx_rings(bn); + bnge_free_rx_rings(bn); + bnge_free_nq_tree(bn); + bnge_free_nq_arrays(bn); + bnge_free_ring_stats(bn); + bnge_free_ring_grps(bn); + bnge_free_vnics(bn); + kfree(bn->tx_ring_map); + bn->tx_ring_map = NULL; + kfree(bn->tx_ring); + bn->tx_ring = NULL; + kfree(bn->rx_ring); + bn->rx_ring = NULL; + kfree(bn->bnapi); + bn->bnapi = NULL; +} + +static int bnge_alloc_core(struct bnge_net *bn) +{ + struct bnge_dev *bd = bn->bd; + int i, j, size, arr_size; + int rc = -ENOMEM; + void *bnapi; + + arr_size = L1_CACHE_ALIGN(sizeof(struct bnge_napi *) * + bd->nq_nr_rings); + size = L1_CACHE_ALIGN(sizeof(struct bnge_napi)); + bnapi = kzalloc(arr_size + size * bd->nq_nr_rings, GFP_KERNEL); + if (!bnapi) + return rc; + + bn->bnapi = bnapi; + bnapi += arr_size; + for (i = 0; i < bd->nq_nr_rings; i++, bnapi += size) { + struct bnge_nq_ring_info *nqr; + + bn->bnapi[i] = bnapi; + bn->bnapi[i]->index = i; + bn->bnapi[i]->bn = bn; + nqr = &bn->bnapi[i]->nq_ring; + nqr->ring_struct.ring_mem.flags = BNGE_RMEM_RING_PTE_FLAG; + } + + bn->rx_ring = kcalloc(bd->rx_nr_rings, + sizeof(struct bnge_rx_ring_info), + GFP_KERNEL); + if (!bn->rx_ring) + goto err_free_core; + + for (i = 0; i < bd->rx_nr_rings; i++) { + struct bnge_rx_ring_info *rxr = &bn->rx_ring[i]; + + rxr->rx_ring_struct.ring_mem.flags = + BNGE_RMEM_RING_PTE_FLAG; + rxr->rx_agg_ring_struct.ring_mem.flags = + BNGE_RMEM_RING_PTE_FLAG; + rxr->bnapi = bn->bnapi[i]; + bn->bnapi[i]->rx_ring = &bn->rx_ring[i]; + } + + bn->tx_ring = kcalloc(bd->tx_nr_rings, + sizeof(struct bnge_tx_ring_info), + GFP_KERNEL); + if (!bn->tx_ring) + goto err_free_core; + + bn->tx_ring_map = kcalloc(bd->tx_nr_rings, sizeof(u16), + GFP_KERNEL); + if (!bn->tx_ring_map) + goto err_free_core; + + if (bd->flags & BNGE_EN_SHARED_CHNL) + j = 0; + else + j = bd->rx_nr_rings; + + for (i = 0; i < bd->tx_nr_rings; i++) { + struct bnge_tx_ring_info *txr = &bn->tx_ring[i]; + struct bnge_napi *bnapi2; + int k; + + txr->tx_ring_struct.ring_mem.flags = BNGE_RMEM_RING_PTE_FLAG; + bn->tx_ring_map[i] = i; + k = j + BNGE_RING_TO_TC_OFF(bd, i); + + bnapi2 = bn->bnapi[k]; + txr->txq_index = i; + txr->tx_napi_idx = + BNGE_RING_TO_TC(bd, txr->txq_index); + bnapi2->tx_ring[txr->tx_napi_idx] = txr; + txr->bnapi = bnapi2; + } + + rc = bnge_alloc_ring_stats(bn); + if (rc) + goto err_free_core; + + rc = bnge_alloc_vnics(bn); + if (rc) + goto err_free_core; + + rc = bnge_alloc_nq_arrays(bn); + if (rc) + goto err_free_core; + + bnge_init_ring_struct(bn); + + rc = bnge_alloc_rx_rings(bn); + if (rc) + goto err_free_core; + + rc = bnge_alloc_tx_rings(bn); + if (rc) + goto err_free_core; + + rc = bnge_alloc_nq_tree(bn); + if (rc) + goto err_free_core; + + bn->vnic_info[BNGE_VNIC_DEFAULT].flags |= BNGE_VNIC_RSS_FLAG | + BNGE_VNIC_MCAST_FLAG | + BNGE_VNIC_UCAST_FLAG; + rc = bnge_alloc_vnic_attributes(bn); + if (rc) + goto err_free_core; + return 0; + +err_free_core: + bnge_free_core(bn); + return rc; +} + +u16 bnge_cp_ring_for_rx(struct bnge_rx_ring_info *rxr) +{ + return rxr->rx_cpr->ring_struct.fw_ring_id; +} + +u16 bnge_cp_ring_for_tx(struct bnge_tx_ring_info *txr) +{ + return txr->tx_cpr->ring_struct.fw_ring_id; +} + +static void bnge_db_nq(struct bnge_net *bn, struct bnge_db_info *db, u32 idx) +{ + bnge_writeq(bn->bd, db->db_key64 | DBR_TYPE_NQ_MASK | + DB_RING_IDX(db, idx), db->doorbell); +} + +static void bnge_db_cq(struct bnge_net *bn, struct bnge_db_info *db, u32 idx) +{ + bnge_writeq(bn->bd, db->db_key64 | DBR_TYPE_CQ_ARMALL | + DB_RING_IDX(db, idx), db->doorbell); +} + +static int bnge_cp_num_to_irq_num(struct bnge_net *bn, int n) +{ + struct bnge_napi *bnapi = bn->bnapi[n]; + struct bnge_nq_ring_info *nqr; + + nqr = &bnapi->nq_ring; + + return nqr->ring_struct.map_idx; +} + +static irqreturn_t bnge_msix(int irq, void *dev_instance) +{ + /* NAPI scheduling to be added in a future patch */ + return IRQ_HANDLED; +} + +static void bnge_init_nq_tree(struct bnge_net *bn) +{ + struct bnge_dev *bd = bn->bd; + int i, j; + + for (i = 0; i < bd->nq_nr_rings; i++) { + struct bnge_nq_ring_info *nqr = &bn->bnapi[i]->nq_ring; + struct bnge_ring_struct *ring = &nqr->ring_struct; + + ring->fw_ring_id = INVALID_HW_RING_ID; + for (j = 0; j < nqr->cp_ring_count; j++) { + struct bnge_cp_ring_info *cpr = &nqr->cp_ring_arr[j]; + + ring = &cpr->ring_struct; + ring->fw_ring_id = INVALID_HW_RING_ID; + } + } +} + +static netmem_ref __bnge_alloc_rx_netmem(struct bnge_net *bn, + dma_addr_t *mapping, + struct bnge_rx_ring_info *rxr, + unsigned int *offset, + gfp_t gfp) +{ + netmem_ref netmem; + + if (PAGE_SIZE > BNGE_RX_PAGE_SIZE) { + netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset, + BNGE_RX_PAGE_SIZE, gfp); + } else { + netmem = page_pool_alloc_netmems(rxr->page_pool, gfp); + *offset = 0; + } + if (!netmem) + return 0; + + *mapping = page_pool_get_dma_addr_netmem(netmem) + *offset; + return netmem; +} + +static u8 *__bnge_alloc_rx_frag(struct bnge_net *bn, dma_addr_t *mapping, + struct bnge_rx_ring_info *rxr, + gfp_t gfp) +{ + unsigned int offset; + struct page *page; + + page = page_pool_alloc_frag(rxr->head_pool, &offset, + bn->rx_buf_size, gfp); + if (!page) + return NULL; + + *mapping = page_pool_get_dma_addr(page) + bn->rx_dma_offset + offset; + return page_address(page) + offset; +} + +static int bnge_alloc_rx_data(struct bnge_net *bn, + struct bnge_rx_ring_info *rxr, + u16 prod, gfp_t gfp) +{ + struct bnge_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bn, prod)]; + struct rx_bd *rxbd; + dma_addr_t mapping; + u8 *data; + + rxbd = &rxr->rx_desc_ring[RX_RING(bn, prod)][RX_IDX(prod)]; + data = __bnge_alloc_rx_frag(bn, &mapping, rxr, gfp); + if (!data) + return -ENOMEM; + + rx_buf->data = data; + rx_buf->data_ptr = data + bn->rx_offset; + rx_buf->mapping = mapping; + + rxbd->rx_bd_haddr = cpu_to_le64(mapping); + + return 0; +} + +static int bnge_alloc_one_rx_ring_bufs(struct bnge_net *bn, + struct bnge_rx_ring_info *rxr, + int ring_nr) +{ + u32 prod = rxr->rx_prod; + int i, rc = 0; + + for (i = 0; i < bn->rx_ring_size; i++) { + rc = bnge_alloc_rx_data(bn, rxr, prod, GFP_KERNEL); + if (rc) + break; + prod = NEXT_RX(prod); + } + + /* Abort if not a single buffer can be allocated */ + if (rc && !i) { + netdev_err(bn->netdev, + "RX ring %d: allocated %d/%d buffers, abort\n", + ring_nr, i, bn->rx_ring_size); + return rc; + } + + rxr->rx_prod = prod; + + if (i < bn->rx_ring_size) + netdev_warn(bn->netdev, + "RX ring %d: allocated %d/%d buffers, continuing\n", + ring_nr, i, bn->rx_ring_size); + return 0; +} + +static u16 bnge_find_next_agg_idx(struct bnge_rx_ring_info *rxr, u16 idx) +{ + u16 next, max = rxr->rx_agg_bmap_size; + + next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx); + if (next >= max) + next = find_first_zero_bit(rxr->rx_agg_bmap, max); + return next; +} + +static int bnge_alloc_rx_netmem(struct bnge_net *bn, + struct bnge_rx_ring_info *rxr, + u16 prod, gfp_t gfp) +{ + struct bnge_sw_rx_agg_bd *rx_agg_buf; + u16 sw_prod = rxr->rx_sw_agg_prod; + unsigned int offset = 0; + struct rx_bd *rxbd; + dma_addr_t mapping; + netmem_ref netmem; + + rxbd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bn, prod)][RX_IDX(prod)]; + netmem = __bnge_alloc_rx_netmem(bn, &mapping, rxr, &offset, gfp); + if (!netmem) + return -ENOMEM; + + if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) + sw_prod = bnge_find_next_agg_idx(rxr, sw_prod); + + __set_bit(sw_prod, rxr->rx_agg_bmap); + rx_agg_buf = &rxr->rx_agg_buf_ring[sw_prod]; + rxr->rx_sw_agg_prod = RING_RX_AGG(bn, NEXT_RX_AGG(sw_prod)); + + rx_agg_buf->netmem = netmem; + rx_agg_buf->offset = offset; + rx_agg_buf->mapping = mapping; + rxbd->rx_bd_haddr = cpu_to_le64(mapping); + rxbd->rx_bd_opaque = sw_prod; + return 0; +} + +static int bnge_alloc_one_agg_ring_bufs(struct bnge_net *bn, + struct bnge_rx_ring_info *rxr, + int ring_nr) +{ + u32 prod = rxr->rx_agg_prod; + int i, rc = 0; + + for (i = 0; i < bn->rx_agg_ring_size; i++) { + rc = bnge_alloc_rx_netmem(bn, rxr, prod, GFP_KERNEL); + if (rc) + break; + prod = NEXT_RX_AGG(prod); + } + + if (rc && i < MAX_SKB_FRAGS) { + netdev_err(bn->netdev, + "Agg ring %d: allocated %d/%d buffers (min %d), abort\n", + ring_nr, i, bn->rx_agg_ring_size, MAX_SKB_FRAGS); + goto err_free_one_agg_ring_bufs; + } + + rxr->rx_agg_prod = prod; + + if (i < bn->rx_agg_ring_size) + netdev_warn(bn->netdev, + "Agg ring %d: allocated %d/%d buffers, continuing\n", + ring_nr, i, bn->rx_agg_ring_size); + return 0; + +err_free_one_agg_ring_bufs: + bnge_free_one_agg_ring_bufs(bn, rxr); + return -ENOMEM; +} + +static int bnge_alloc_one_rx_ring_pair_bufs(struct bnge_net *bn, int ring_nr) +{ + struct bnge_rx_ring_info *rxr = &bn->rx_ring[ring_nr]; + int rc; + + rc = bnge_alloc_one_rx_ring_bufs(bn, rxr, ring_nr); + if (rc) + return rc; + + if (bnge_is_agg_reqd(bn->bd)) { + rc = bnge_alloc_one_agg_ring_bufs(bn, rxr, ring_nr); + if (rc) + goto err_free_one_rx_ring_bufs; + } + return 0; + +err_free_one_rx_ring_bufs: + bnge_free_one_rx_ring_bufs(bn, rxr); + return rc; +} + +static void bnge_init_rxbd_pages(struct bnge_ring_struct *ring, u32 type) +{ + struct rx_bd **rx_desc_ring; + u32 prod; + int i; + + rx_desc_ring = (struct rx_bd **)ring->ring_mem.pg_arr; + for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) { + struct rx_bd *rxbd = rx_desc_ring[i]; + int j; + + for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) { + rxbd->rx_bd_len_flags_type = cpu_to_le32(type); + rxbd->rx_bd_opaque = prod; + } + } +} + +static void bnge_init_one_rx_ring_rxbd(struct bnge_net *bn, + struct bnge_rx_ring_info *rxr) +{ + struct bnge_ring_struct *ring; + u32 type; + + type = (bn->rx_buf_use_size << RX_BD_LEN_SHIFT) | + RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP; + + if (NET_IP_ALIGN == 2) + type |= RX_BD_FLAGS_SOP; + + ring = &rxr->rx_ring_struct; + bnge_init_rxbd_pages(ring, type); + ring->fw_ring_id = INVALID_HW_RING_ID; +} + +static void bnge_init_one_agg_ring_rxbd(struct bnge_net *bn, + struct bnge_rx_ring_info *rxr) +{ + struct bnge_ring_struct *ring; + u32 type; + + ring = &rxr->rx_agg_ring_struct; + ring->fw_ring_id = INVALID_HW_RING_ID; + if (bnge_is_agg_reqd(bn->bd)) { + type = ((u32)BNGE_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | + RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; + + bnge_init_rxbd_pages(ring, type); + } +} + +static void bnge_init_one_rx_ring_pair(struct bnge_net *bn, int ring_nr) +{ + struct bnge_rx_ring_info *rxr; + + rxr = &bn->rx_ring[ring_nr]; + bnge_init_one_rx_ring_rxbd(bn, rxr); + + netif_queue_set_napi(bn->netdev, ring_nr, NETDEV_QUEUE_TYPE_RX, + &rxr->bnapi->napi); + + bnge_init_one_agg_ring_rxbd(bn, rxr); +} + +static int bnge_alloc_rx_ring_pair_bufs(struct bnge_net *bn) +{ + int i, rc; + + for (i = 0; i < bn->bd->rx_nr_rings; i++) { + rc = bnge_alloc_one_rx_ring_pair_bufs(bn, i); + if (rc) + goto err_free_rx_ring_pair_bufs; + } + return 0; + +err_free_rx_ring_pair_bufs: + bnge_free_rx_ring_pair_bufs(bn); + return rc; +} + +static void bnge_init_rx_rings(struct bnge_net *bn) +{ + int i; + +#define BNGE_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN) +#define BNGE_RX_DMA_OFFSET NET_SKB_PAD + bn->rx_offset = BNGE_RX_OFFSET; + bn->rx_dma_offset = BNGE_RX_DMA_OFFSET; + + for (i = 0; i < bn->bd->rx_nr_rings; i++) + bnge_init_one_rx_ring_pair(bn, i); +} + +static void bnge_init_tx_rings(struct bnge_net *bn) +{ + int i; + + bn->tx_wake_thresh = max(bn->tx_ring_size / 2, BNGE_MIN_TX_DESC_CNT); + + for (i = 0; i < bn->bd->tx_nr_rings; i++) { + struct bnge_tx_ring_info *txr = &bn->tx_ring[i]; + struct bnge_ring_struct *ring = &txr->tx_ring_struct; + + ring->fw_ring_id = INVALID_HW_RING_ID; + + netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_TX, + &txr->bnapi->napi); + } +} + +static void bnge_init_vnics(struct bnge_net *bn) +{ + struct bnge_vnic_info *vnic0 = &bn->vnic_info[BNGE_VNIC_DEFAULT]; + int i; + + for (i = 0; i < bn->nr_vnics; i++) { + struct bnge_vnic_info *vnic = &bn->vnic_info[i]; + int j; + + vnic->fw_vnic_id = INVALID_HW_RING_ID; + vnic->vnic_id = i; + for (j = 0; j < BNGE_MAX_CTX_PER_VNIC; j++) + vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID; + + if (bn->vnic_info[i].rss_hash_key) { + if (i == BNGE_VNIC_DEFAULT) { + u8 *key = (void *)vnic->rss_hash_key; + int k; + + if (!bn->rss_hash_key_valid && + !bn->rss_hash_key_updated) { + get_random_bytes(bn->rss_hash_key, + HW_HASH_KEY_SIZE); + bn->rss_hash_key_updated = true; + } + + memcpy(vnic->rss_hash_key, bn->rss_hash_key, + HW_HASH_KEY_SIZE); + + if (!bn->rss_hash_key_updated) + continue; + + bn->rss_hash_key_updated = false; + bn->rss_hash_key_valid = true; + + bn->toeplitz_prefix = 0; + for (k = 0; k < 8; k++) { + bn->toeplitz_prefix <<= 8; + bn->toeplitz_prefix |= key[k]; + } + } else { + memcpy(vnic->rss_hash_key, vnic0->rss_hash_key, + HW_HASH_KEY_SIZE); + } + } + } +} + +static void bnge_set_db_mask(struct bnge_net *bn, struct bnge_db_info *db, + u32 ring_type) +{ + switch (ring_type) { + case HWRM_RING_ALLOC_TX: + db->db_ring_mask = bn->tx_ring_mask; + break; + case HWRM_RING_ALLOC_RX: + db->db_ring_mask = bn->rx_ring_mask; + break; + case HWRM_RING_ALLOC_AGG: + db->db_ring_mask = bn->rx_agg_ring_mask; + break; + case HWRM_RING_ALLOC_CMPL: + case HWRM_RING_ALLOC_NQ: + db->db_ring_mask = bn->cp_ring_mask; + break; + } + db->db_epoch_mask = db->db_ring_mask + 1; + db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask); +} + +static void bnge_set_db(struct bnge_net *bn, struct bnge_db_info *db, + u32 ring_type, u32 map_idx, u32 xid) +{ + struct bnge_dev *bd = bn->bd; + + switch (ring_type) { + case HWRM_RING_ALLOC_TX: + db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ; + break; + case HWRM_RING_ALLOC_RX: + case HWRM_RING_ALLOC_AGG: + db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ; + break; + case HWRM_RING_ALLOC_CMPL: + db->db_key64 = DBR_PATH_L2; + break; + case HWRM_RING_ALLOC_NQ: + db->db_key64 = DBR_PATH_L2; + break; + } + db->db_key64 |= ((u64)xid << DBR_XID_SFT) | DBR_VALID; + + db->doorbell = bd->bar1 + bd->db_offset; + bnge_set_db_mask(bn, db, ring_type); +} + +static int bnge_hwrm_cp_ring_alloc(struct bnge_net *bn, + struct bnge_cp_ring_info *cpr) +{ + const u32 type = HWRM_RING_ALLOC_CMPL; + struct bnge_napi *bnapi = cpr->bnapi; + struct bnge_ring_struct *ring; + u32 map_idx = bnapi->index; + int rc; + + ring = &cpr->ring_struct; + ring->handle = BNGE_SET_NQ_HDL(cpr); + rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx); + if (rc) + return rc; + + bnge_set_db(bn, &cpr->cp_db, type, map_idx, ring->fw_ring_id); + bnge_db_cq(bn, &cpr->cp_db, cpr->cp_raw_cons); + + return 0; +} + +static int bnge_hwrm_tx_ring_alloc(struct bnge_net *bn, + struct bnge_tx_ring_info *txr, u32 tx_idx) +{ + struct bnge_ring_struct *ring = &txr->tx_ring_struct; + const u32 type = HWRM_RING_ALLOC_TX; + int rc; + + rc = hwrm_ring_alloc_send_msg(bn, ring, type, tx_idx); + if (rc) + return rc; + + bnge_set_db(bn, &txr->tx_db, type, tx_idx, ring->fw_ring_id); + + return 0; +} + +static int bnge_hwrm_rx_agg_ring_alloc(struct bnge_net *bn, + struct bnge_rx_ring_info *rxr) +{ + struct bnge_ring_struct *ring = &rxr->rx_agg_ring_struct; + u32 type = HWRM_RING_ALLOC_AGG; + struct bnge_dev *bd = bn->bd; + u32 grp_idx = ring->grp_idx; + u32 map_idx; + int rc; + + map_idx = grp_idx + bd->rx_nr_rings; + rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx); + if (rc) + return rc; + + bnge_set_db(bn, &rxr->rx_agg_db, type, map_idx, + ring->fw_ring_id); + bnge_db_write(bn->bd, &rxr->rx_agg_db, rxr->rx_agg_prod); + bnge_db_write(bn->bd, &rxr->rx_db, rxr->rx_prod); + bn->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; + + return 0; +} + +static int bnge_hwrm_rx_ring_alloc(struct bnge_net *bn, + struct bnge_rx_ring_info *rxr) +{ + struct bnge_ring_struct *ring = &rxr->rx_ring_struct; + struct bnge_napi *bnapi = rxr->bnapi; + u32 type = HWRM_RING_ALLOC_RX; + u32 map_idx = bnapi->index; + int rc; + + rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx); + if (rc) + return rc; + + bnge_set_db(bn, &rxr->rx_db, type, map_idx, ring->fw_ring_id); + bn->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; + + return 0; +} + +static int bnge_hwrm_ring_alloc(struct bnge_net *bn) +{ + struct bnge_dev *bd = bn->bd; + bool agg_rings; + int i, rc = 0; + + agg_rings = !!(bnge_is_agg_reqd(bd)); + for (i = 0; i < bd->nq_nr_rings; i++) { + struct bnge_napi *bnapi = bn->bnapi[i]; + struct bnge_nq_ring_info *nqr = &bnapi->nq_ring; + struct bnge_ring_struct *ring = &nqr->ring_struct; + u32 type = HWRM_RING_ALLOC_NQ; + u32 map_idx = ring->map_idx; + unsigned int vector; + + vector = bd->irq_tbl[map_idx].vector; + disable_irq_nosync(vector); + rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx); + if (rc) { + enable_irq(vector); + goto err_out; + } + bnge_set_db(bn, &nqr->nq_db, type, map_idx, ring->fw_ring_id); + bnge_db_nq(bn, &nqr->nq_db, nqr->nq_raw_cons); + enable_irq(vector); + bn->grp_info[i].nq_fw_ring_id = ring->fw_ring_id; + + if (!i) { + rc = bnge_hwrm_set_async_event_cr(bd, ring->fw_ring_id); + if (rc) + netdev_warn(bn->netdev, "Failed to set async event completion ring.\n"); + } + } + + for (i = 0; i < bd->tx_nr_rings; i++) { + struct bnge_tx_ring_info *txr = &bn->tx_ring[i]; + + rc = bnge_hwrm_cp_ring_alloc(bn, txr->tx_cpr); + if (rc) + goto err_out; + rc = bnge_hwrm_tx_ring_alloc(bn, txr, i); + if (rc) + goto err_out; + } + + for (i = 0; i < bd->rx_nr_rings; i++) { + struct bnge_rx_ring_info *rxr = &bn->rx_ring[i]; + struct bnge_cp_ring_info *cpr; + struct bnge_ring_struct *ring; + struct bnge_napi *bnapi; + u32 map_idx, type; + + rc = bnge_hwrm_rx_ring_alloc(bn, rxr); + if (rc) + goto err_out; + /* If we have agg rings, post agg buffers first. */ + if (!agg_rings) + bnge_db_write(bn->bd, &rxr->rx_db, rxr->rx_prod); + + cpr = rxr->rx_cpr; + bnapi = rxr->bnapi; + type = HWRM_RING_ALLOC_CMPL; + map_idx = bnapi->index; + + ring = &cpr->ring_struct; + ring->handle = BNGE_SET_NQ_HDL(cpr); + rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx); + if (rc) + goto err_out; + bnge_set_db(bn, &cpr->cp_db, type, map_idx, + ring->fw_ring_id); + bnge_db_cq(bn, &cpr->cp_db, cpr->cp_raw_cons); + } + + if (agg_rings) { + for (i = 0; i < bd->rx_nr_rings; i++) { + rc = bnge_hwrm_rx_agg_ring_alloc(bn, &bn->rx_ring[i]); + if (rc) + goto err_out; + } + } +err_out: + return rc; +} + +void bnge_fill_hw_rss_tbl(struct bnge_net *bn, struct bnge_vnic_info *vnic) +{ + __le16 *ring_tbl = vnic->rss_table; + struct bnge_rx_ring_info *rxr; + struct bnge_dev *bd = bn->bd; + u16 tbl_size, i; + + tbl_size = bnge_get_rxfh_indir_size(bd); + + for (i = 0; i < tbl_size; i++) { + u16 ring_id, j; + + j = bd->rss_indir_tbl[i]; + rxr = &bn->rx_ring[j]; + + ring_id = rxr->rx_ring_struct.fw_ring_id; + *ring_tbl++ = cpu_to_le16(ring_id); + ring_id = bnge_cp_ring_for_rx(rxr); + *ring_tbl++ = cpu_to_le16(ring_id); + } +} + +static int bnge_hwrm_vnic_rss_cfg(struct bnge_net *bn, + struct bnge_vnic_info *vnic) +{ + int rc; + + rc = bnge_hwrm_vnic_set_rss(bn, vnic, true); + if (rc) { + netdev_err(bn->netdev, "hwrm vnic %d set rss failure rc: %d\n", + vnic->vnic_id, rc); + return rc; + } + rc = bnge_hwrm_vnic_cfg(bn, vnic); + if (rc) + netdev_err(bn->netdev, "hwrm vnic %d cfg failure rc: %d\n", + vnic->vnic_id, rc); + return rc; +} + +static int bnge_setup_vnic(struct bnge_net *bn, struct bnge_vnic_info *vnic) +{ + struct bnge_dev *bd = bn->bd; + int rc, i, nr_ctxs; + + nr_ctxs = bnge_cal_nr_rss_ctxs(bd->rx_nr_rings); + for (i = 0; i < nr_ctxs; i++) { + rc = bnge_hwrm_vnic_ctx_alloc(bd, vnic, i); + if (rc) { + netdev_err(bn->netdev, "hwrm vnic %d ctx %d alloc failure rc: %d\n", + vnic->vnic_id, i, rc); + return -ENOMEM; + } + bn->rsscos_nr_ctxs++; + } + + rc = bnge_hwrm_vnic_rss_cfg(bn, vnic); + if (rc) + return rc; + + if (bnge_is_agg_reqd(bd)) { + rc = bnge_hwrm_vnic_set_hds(bn, vnic); + if (rc) + netdev_err(bn->netdev, "hwrm vnic %d set hds failure rc: %d\n", + vnic->vnic_id, rc); + } + return rc; +} + +static void bnge_del_l2_filter(struct bnge_net *bn, struct bnge_l2_filter *fltr) +{ + if (!refcount_dec_and_test(&fltr->refcnt)) + return; + hlist_del_rcu(&fltr->base.hash); + kfree_rcu(fltr, base.rcu); +} + +static void bnge_init_l2_filter(struct bnge_net *bn, + struct bnge_l2_filter *fltr, + struct bnge_l2_key *key, u32 idx) +{ + struct hlist_head *head; + + ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr); + fltr->l2_key.vlan = key->vlan; + fltr->base.type = BNGE_FLTR_TYPE_L2; + + head = &bn->l2_fltr_hash_tbl[idx]; + hlist_add_head_rcu(&fltr->base.hash, head); + refcount_set(&fltr->refcnt, 1); +} + +static struct bnge_l2_filter *__bnge_lookup_l2_filter(struct bnge_net *bn, + struct bnge_l2_key *key, + u32 idx) +{ + struct bnge_l2_filter *fltr; + struct hlist_head *head; + + head = &bn->l2_fltr_hash_tbl[idx]; + hlist_for_each_entry_rcu(fltr, head, base.hash) { + struct bnge_l2_key *l2_key = &fltr->l2_key; + + if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) && + l2_key->vlan == key->vlan) + return fltr; + } + return NULL; +} + +static struct bnge_l2_filter *bnge_lookup_l2_filter(struct bnge_net *bn, + struct bnge_l2_key *key, + u32 idx) +{ + struct bnge_l2_filter *fltr; + + rcu_read_lock(); + fltr = __bnge_lookup_l2_filter(bn, key, idx); + if (fltr) + refcount_inc(&fltr->refcnt); + rcu_read_unlock(); + return fltr; +} + +static struct bnge_l2_filter *bnge_alloc_l2_filter(struct bnge_net *bn, + struct bnge_l2_key *key, + gfp_t gfp) +{ + struct bnge_l2_filter *fltr; + u32 idx; + + idx = jhash2(&key->filter_key, BNGE_L2_KEY_SIZE, bn->hash_seed) & + BNGE_L2_FLTR_HASH_MASK; + fltr = bnge_lookup_l2_filter(bn, key, idx); + if (fltr) + return fltr; + + fltr = kzalloc(sizeof(*fltr), gfp); + if (!fltr) + return ERR_PTR(-ENOMEM); + + bnge_init_l2_filter(bn, fltr, key, idx); + return fltr; +} + +static int bnge_hwrm_set_vnic_filter(struct bnge_net *bn, u16 vnic_id, u16 idx, + const u8 *mac_addr) +{ + struct bnge_l2_filter *fltr; + struct bnge_l2_key key; + int rc; + + ether_addr_copy(key.dst_mac_addr, mac_addr); + key.vlan = 0; + fltr = bnge_alloc_l2_filter(bn, &key, GFP_KERNEL); + if (IS_ERR(fltr)) + return PTR_ERR(fltr); + + fltr->base.fw_vnic_id = bn->vnic_info[vnic_id].fw_vnic_id; + rc = bnge_hwrm_l2_filter_alloc(bn->bd, fltr); + if (rc) + goto err_del_l2_filter; + bn->vnic_info[vnic_id].l2_filters[idx] = fltr; + return rc; + +err_del_l2_filter: + bnge_del_l2_filter(bn, fltr); + return rc; +} + +static bool bnge_mc_list_updated(struct bnge_net *bn, u32 *rx_mask) +{ + struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT]; + struct net_device *dev = bn->netdev; + struct netdev_hw_addr *ha; + int mc_count = 0, off = 0; + bool update = false; + u8 *haddr; + + netdev_for_each_mc_addr(ha, dev) { + if (mc_count >= BNGE_MAX_MC_ADDRS) { + *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; + vnic->mc_list_count = 0; + return false; + } + haddr = ha->addr; + if (!ether_addr_equal(haddr, vnic->mc_list + off)) { + memcpy(vnic->mc_list + off, haddr, ETH_ALEN); + update = true; + } + off += ETH_ALEN; + mc_count++; + } + if (mc_count) + *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; + + if (mc_count != vnic->mc_list_count) { + vnic->mc_list_count = mc_count; + update = true; + } + return update; +} + +static bool bnge_uc_list_updated(struct bnge_net *bn) +{ + struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT]; + struct net_device *dev = bn->netdev; + struct netdev_hw_addr *ha; + int off = 0; + + if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1)) + return true; + + netdev_for_each_uc_addr(ha, dev) { + if (!ether_addr_equal(ha->addr, vnic->uc_list + off)) + return true; + + off += ETH_ALEN; + } + return false; +} + +static bool bnge_promisc_ok(struct bnge_net *bn) +{ + return true; +} + +static int bnge_cfg_def_vnic(struct bnge_net *bn) +{ + struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT]; + struct net_device *dev = bn->netdev; + struct bnge_dev *bd = bn->bd; + struct netdev_hw_addr *ha; + int i, off = 0, rc; + bool uc_update; + + netif_addr_lock_bh(dev); + uc_update = bnge_uc_list_updated(bn); + netif_addr_unlock_bh(dev); + + if (!uc_update) + goto skip_uc; + + for (i = 1; i < vnic->uc_filter_count; i++) { + struct bnge_l2_filter *fltr = vnic->l2_filters[i]; + + bnge_hwrm_l2_filter_free(bd, fltr); + bnge_del_l2_filter(bn, fltr); + } + + vnic->uc_filter_count = 1; + + netif_addr_lock_bh(dev); + if (netdev_uc_count(dev) > (BNGE_MAX_UC_ADDRS - 1)) { + vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; + } else { + netdev_for_each_uc_addr(ha, dev) { + memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN); + off += ETH_ALEN; + vnic->uc_filter_count++; + } + } + netif_addr_unlock_bh(dev); + + for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) { + rc = bnge_hwrm_set_vnic_filter(bn, 0, i, vnic->uc_list + off); + if (rc) { + netdev_err(dev, "HWRM vnic filter failure rc: %d\n", rc); + vnic->uc_filter_count = i; + return rc; + } + } + +skip_uc: + if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) && + !bnge_promisc_ok(bn)) + vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; + rc = bnge_hwrm_cfa_l2_set_rx_mask(bd, vnic); + if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) { + netdev_info(dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n", + rc); + vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; + vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; + vnic->mc_list_count = 0; + rc = bnge_hwrm_cfa_l2_set_rx_mask(bd, vnic); + } + if (rc) + netdev_err(dev, "HWRM cfa l2 rx mask failure rc: %d\n", + rc); + + return rc; +} + +static void bnge_hwrm_vnic_free(struct bnge_net *bn) +{ + int i; + + for (i = 0; i < bn->nr_vnics; i++) + bnge_hwrm_vnic_free_one(bn->bd, &bn->vnic_info[i]); +} + +static void bnge_hwrm_vnic_ctx_free(struct bnge_net *bn) +{ + int i, j; + + for (i = 0; i < bn->nr_vnics; i++) { + struct bnge_vnic_info *vnic = &bn->vnic_info[i]; + + for (j = 0; j < BNGE_MAX_CTX_PER_VNIC; j++) { + if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID) + bnge_hwrm_vnic_ctx_free_one(bn->bd, vnic, j); + } + } + bn->rsscos_nr_ctxs = 0; +} + +static void bnge_hwrm_clear_vnic_filter(struct bnge_net *bn) +{ + struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT]; + int i; + + for (i = 0; i < vnic->uc_filter_count; i++) { + struct bnge_l2_filter *fltr = vnic->l2_filters[i]; + + bnge_hwrm_l2_filter_free(bn->bd, fltr); + bnge_del_l2_filter(bn, fltr); + } + + vnic->uc_filter_count = 0; +} + +static void bnge_clear_vnic(struct bnge_net *bn) +{ + bnge_hwrm_clear_vnic_filter(bn); + bnge_hwrm_vnic_free(bn); + bnge_hwrm_vnic_ctx_free(bn); +} + +static void bnge_hwrm_rx_ring_free(struct bnge_net *bn, + struct bnge_rx_ring_info *rxr, + bool close_path) +{ + struct bnge_ring_struct *ring = &rxr->rx_ring_struct; + u32 grp_idx = rxr->bnapi->index; + u32 cmpl_ring_id; + + if (ring->fw_ring_id == INVALID_HW_RING_ID) + return; + + cmpl_ring_id = bnge_cp_ring_for_rx(rxr); + hwrm_ring_free_send_msg(bn, ring, + RING_FREE_REQ_RING_TYPE_RX, + close_path ? cmpl_ring_id : + INVALID_HW_RING_ID); + ring->fw_ring_id = INVALID_HW_RING_ID; + bn->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID; +} + +static void bnge_hwrm_rx_agg_ring_free(struct bnge_net *bn, + struct bnge_rx_ring_info *rxr, + bool close_path) +{ + struct bnge_ring_struct *ring = &rxr->rx_agg_ring_struct; + u32 grp_idx = rxr->bnapi->index; + u32 cmpl_ring_id; + + if (ring->fw_ring_id == INVALID_HW_RING_ID) + return; + + cmpl_ring_id = bnge_cp_ring_for_rx(rxr); + hwrm_ring_free_send_msg(bn, ring, RING_FREE_REQ_RING_TYPE_RX_AGG, + close_path ? cmpl_ring_id : + INVALID_HW_RING_ID); + ring->fw_ring_id = INVALID_HW_RING_ID; + bn->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID; +} + +static void bnge_hwrm_tx_ring_free(struct bnge_net *bn, + struct bnge_tx_ring_info *txr, + bool close_path) +{ + struct bnge_ring_struct *ring = &txr->tx_ring_struct; + u32 cmpl_ring_id; + + if (ring->fw_ring_id == INVALID_HW_RING_ID) + return; + + cmpl_ring_id = close_path ? bnge_cp_ring_for_tx(txr) : + INVALID_HW_RING_ID; + hwrm_ring_free_send_msg(bn, ring, RING_FREE_REQ_RING_TYPE_TX, + cmpl_ring_id); + ring->fw_ring_id = INVALID_HW_RING_ID; +} + +static void bnge_hwrm_cp_ring_free(struct bnge_net *bn, + struct bnge_cp_ring_info *cpr) +{ + struct bnge_ring_struct *ring; + + ring = &cpr->ring_struct; + if (ring->fw_ring_id == INVALID_HW_RING_ID) + return; + + hwrm_ring_free_send_msg(bn, ring, RING_FREE_REQ_RING_TYPE_L2_CMPL, + INVALID_HW_RING_ID); + ring->fw_ring_id = INVALID_HW_RING_ID; +} + +static void bnge_hwrm_ring_free(struct bnge_net *bn, bool close_path) +{ + struct bnge_dev *bd = bn->bd; + int i; + + if (!bn->bnapi) + return; + + for (i = 0; i < bd->tx_nr_rings; i++) + bnge_hwrm_tx_ring_free(bn, &bn->tx_ring[i], close_path); + + for (i = 0; i < bd->rx_nr_rings; i++) { + bnge_hwrm_rx_ring_free(bn, &bn->rx_ring[i], close_path); + bnge_hwrm_rx_agg_ring_free(bn, &bn->rx_ring[i], close_path); + } + + for (i = 0; i < bd->nq_nr_rings; i++) { + struct bnge_napi *bnapi = bn->bnapi[i]; + struct bnge_nq_ring_info *nqr; + struct bnge_ring_struct *ring; + int j; + + nqr = &bnapi->nq_ring; + for (j = 0; j < nqr->cp_ring_count && nqr->cp_ring_arr; j++) + bnge_hwrm_cp_ring_free(bn, &nqr->cp_ring_arr[j]); + + ring = &nqr->ring_struct; + if (ring->fw_ring_id != INVALID_HW_RING_ID) { + hwrm_ring_free_send_msg(bn, ring, + RING_FREE_REQ_RING_TYPE_NQ, + INVALID_HW_RING_ID); + ring->fw_ring_id = INVALID_HW_RING_ID; + bn->grp_info[i].nq_fw_ring_id = INVALID_HW_RING_ID; + } + } +} + +static void bnge_setup_msix(struct bnge_net *bn) +{ + struct net_device *dev = bn->netdev; + struct bnge_dev *bd = bn->bd; + int len, i; + + len = sizeof(bd->irq_tbl[0].name); + for (i = 0; i < bd->nq_nr_rings; i++) { + int map_idx = bnge_cp_num_to_irq_num(bn, i); + char *attr; + + if (bd->flags & BNGE_EN_SHARED_CHNL) + attr = "TxRx"; + else if (i < bd->rx_nr_rings) + attr = "rx"; + else + attr = "tx"; + + snprintf(bd->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name, + attr, i); + bd->irq_tbl[map_idx].handler = bnge_msix; + } +} + +static int bnge_setup_interrupts(struct bnge_net *bn) +{ + struct net_device *dev = bn->netdev; + struct bnge_dev *bd = bn->bd; + + bnge_setup_msix(bn); + + return netif_set_real_num_queues(dev, bd->tx_nr_rings, bd->rx_nr_rings); +} + +static void bnge_hwrm_resource_free(struct bnge_net *bn, bool close_path) +{ + bnge_clear_vnic(bn); + bnge_hwrm_ring_free(bn, close_path); + bnge_hwrm_stat_ctx_free(bn); +} + +static void bnge_free_irq(struct bnge_net *bn) +{ + struct bnge_dev *bd = bn->bd; + struct bnge_irq *irq; + int i; + + for (i = 0; i < bd->nq_nr_rings; i++) { + int map_idx = bnge_cp_num_to_irq_num(bn, i); + + irq = &bd->irq_tbl[map_idx]; + if (irq->requested) { + if (irq->have_cpumask) { + irq_set_affinity_hint(irq->vector, NULL); + free_cpumask_var(irq->cpu_mask); + irq->have_cpumask = 0; + } + free_irq(irq->vector, bn->bnapi[i]); + } + + irq->requested = 0; + } +} + +static int bnge_request_irq(struct bnge_net *bn) +{ + struct bnge_dev *bd = bn->bd; + int i, rc; + + rc = bnge_setup_interrupts(bn); + if (rc) { + netdev_err(bn->netdev, "bnge_setup_interrupts err: %d\n", rc); + return rc; + } + for (i = 0; i < bd->nq_nr_rings; i++) { + int map_idx = bnge_cp_num_to_irq_num(bn, i); + struct bnge_irq *irq = &bd->irq_tbl[map_idx]; + + rc = request_irq(irq->vector, irq->handler, 0, irq->name, + bn->bnapi[i]); + if (rc) + goto err_free_irq; + + netif_napi_set_irq_locked(&bn->bnapi[i]->napi, irq->vector); + irq->requested = 1; + + if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) { + int numa_node = dev_to_node(&bd->pdev->dev); + + irq->have_cpumask = 1; + cpumask_set_cpu(cpumask_local_spread(i, numa_node), + irq->cpu_mask); + rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask); + if (rc) { + netdev_warn(bn->netdev, + "Set affinity failed, IRQ = %d\n", + irq->vector); + goto err_free_irq; + } + } + } + return 0; + +err_free_irq: + bnge_free_irq(bn); + return rc; +} + +static int bnge_init_chip(struct bnge_net *bn) +{ + struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT]; + struct bnge_dev *bd = bn->bd; + int rc; + +#define BNGE_DEF_STATS_COAL_TICKS 1000000 + bn->stats_coal_ticks = BNGE_DEF_STATS_COAL_TICKS; + + rc = bnge_hwrm_stat_ctx_alloc(bn); + if (rc) { + netdev_err(bn->netdev, "hwrm stat ctx alloc failure rc: %d\n", rc); + goto err_out; + } + + rc = bnge_hwrm_ring_alloc(bn); + if (rc) { + netdev_err(bn->netdev, "hwrm ring alloc failure rc: %d\n", rc); + goto err_out; + } + + rc = bnge_hwrm_vnic_alloc(bd, vnic, bd->rx_nr_rings); + if (rc) { + netdev_err(bn->netdev, "hwrm vnic alloc failure rc: %d\n", rc); + goto err_out; + } + + rc = bnge_setup_vnic(bn, vnic); + if (rc) + goto err_out; + + if (bd->rss_cap & BNGE_RSS_CAP_RSS_HASH_TYPE_DELTA) + bnge_hwrm_update_rss_hash_cfg(bn); + + /* Filter for default vnic 0 */ + rc = bnge_hwrm_set_vnic_filter(bn, 0, 0, bn->netdev->dev_addr); + if (rc) { + netdev_err(bn->netdev, "HWRM vnic filter failure rc: %d\n", rc); + goto err_out; + } + vnic->uc_filter_count = 1; + + vnic->rx_mask = 0; + + if (bn->netdev->flags & IFF_BROADCAST) + vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; + + if (bn->netdev->flags & IFF_PROMISC) + vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; + + if (bn->netdev->flags & IFF_ALLMULTI) { + vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; + vnic->mc_list_count = 0; + } else if (bn->netdev->flags & IFF_MULTICAST) { + u32 mask = 0; + + bnge_mc_list_updated(bn, &mask); + vnic->rx_mask |= mask; + } + + rc = bnge_cfg_def_vnic(bn); + if (rc) + goto err_out; + return 0; + +err_out: + bnge_hwrm_resource_free(bn, 0); + return rc; +} + +static int bnge_napi_poll(struct napi_struct *napi, int budget) +{ + int work_done = 0; + + /* defer NAPI implementation to next patch series */ + napi_complete_done(napi, work_done); + + return work_done; +} + +static void bnge_init_napi(struct bnge_net *bn) +{ + struct bnge_dev *bd = bn->bd; + struct bnge_napi *bnapi; + int i; + + for (i = 0; i < bd->nq_nr_rings; i++) { + bnapi = bn->bnapi[i]; + netif_napi_add_config_locked(bn->netdev, &bnapi->napi, + bnge_napi_poll, bnapi->index); + } +} + +static void bnge_del_napi(struct bnge_net *bn) +{ + struct bnge_dev *bd = bn->bd; + int i; + + for (i = 0; i < bd->rx_nr_rings; i++) + netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_RX, NULL); + for (i = 0; i < bd->tx_nr_rings; i++) + netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_TX, NULL); + + for (i = 0; i < bd->nq_nr_rings; i++) { + struct bnge_napi *bnapi = bn->bnapi[i]; + + __netif_napi_del_locked(&bnapi->napi); + } + + /* Wait for RCU grace period after removing NAPI instances */ + synchronize_net(); +} + +static int bnge_init_nic(struct bnge_net *bn) +{ + int rc; + + bnge_init_nq_tree(bn); + + bnge_init_rx_rings(bn); + rc = bnge_alloc_rx_ring_pair_bufs(bn); + if (rc) + return rc; + + bnge_init_tx_rings(bn); + + rc = bnge_init_ring_grps(bn); + if (rc) + goto err_free_rx_ring_pair_bufs; + + bnge_init_vnics(bn); + + rc = bnge_init_chip(bn); + if (rc) + goto err_free_ring_grps; + return rc; + +err_free_ring_grps: + bnge_free_ring_grps(bn); + return rc; + +err_free_rx_ring_pair_bufs: + bnge_free_rx_ring_pair_bufs(bn); + return rc; +} + +static int bnge_open_core(struct bnge_net *bn) +{ + struct bnge_dev *bd = bn->bd; + int rc; + + netif_carrier_off(bn->netdev); + + rc = bnge_reserve_rings(bd); + if (rc) { + netdev_err(bn->netdev, "bnge_reserve_rings err: %d\n", rc); + return rc; + } + + rc = bnge_alloc_core(bn); + if (rc) { + netdev_err(bn->netdev, "bnge_alloc_core err: %d\n", rc); + return rc; + } + + bnge_init_napi(bn); + rc = bnge_request_irq(bn); + if (rc) { + netdev_err(bn->netdev, "bnge_request_irq err: %d\n", rc); + goto err_del_napi; + } + + rc = bnge_init_nic(bn); + if (rc) { + netdev_err(bn->netdev, "bnge_init_nic err: %d\n", rc); + goto err_free_irq; + } + set_bit(BNGE_STATE_OPEN, &bd->state); + return 0; + +err_free_irq: + bnge_free_irq(bn); +err_del_napi: + bnge_del_napi(bn); + bnge_free_core(bn); + return rc; +} static netdev_tx_t bnge_start_xmit(struct sk_buff *skb, struct net_device *dev) { @@ -28,11 +2212,42 @@ static netdev_tx_t bnge_start_xmit(struct sk_buff *skb, struct net_device *dev) static int bnge_open(struct net_device *dev) { + struct bnge_net *bn = netdev_priv(dev); + int rc; + + rc = bnge_open_core(bn); + if (rc) + netdev_err(dev, "bnge_open_core err: %d\n", rc); + + return rc; +} + +static int bnge_shutdown_nic(struct bnge_net *bn) +{ + /* TODO: close_path = 0 until we make NAPI functional */ + bnge_hwrm_resource_free(bn, 0); return 0; } +static void bnge_close_core(struct bnge_net *bn) +{ + struct bnge_dev *bd = bn->bd; + + clear_bit(BNGE_STATE_OPEN, &bd->state); + bnge_shutdown_nic(bn); + bnge_free_all_rings_bufs(bn); + bnge_free_irq(bn); + bnge_del_napi(bn); + + bnge_free_core(bn); +} + static int bnge_close(struct net_device *dev) { + struct bnge_net *bn = netdev_priv(dev); + + bnge_close_core(bn); + return 0; } @@ -238,6 +2453,7 @@ int bnge_netdev_alloc(struct bnge_dev *bd, int max_irqs) bn->rx_ring_size = BNGE_DEFAULT_RX_RING_SIZE; bn->tx_ring_size = BNGE_DEFAULT_TX_RING_SIZE; + bn->rx_dir = DMA_FROM_DEVICE; bnge_set_tpa_flags(bd); bnge_set_ring_params(bd); @@ -245,6 +2461,7 @@ int bnge_netdev_alloc(struct bnge_dev *bd, int max_irqs) bnge_init_l2_fltr_tbl(bn); bnge_init_mac_addr(bd); + netdev->request_ops_lock = true; rc = register_netdev(netdev); if (rc) { dev_err(bd->dev, "Register netdev failed rc: %d\n", rc); diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h index a650d71a58db..fb3b961536ba 100644 --- a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h +++ b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h @@ -5,6 +5,9 @@ #define _BNGE_NETDEV_H_ #include <linux/bnxt/hsi.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/refcount.h> +#include "bnge_db.h" struct tx_bd { __le32 tx_bd_len_flags_type; @@ -113,11 +116,25 @@ struct bnge_sw_rx_bd { }; struct bnge_sw_rx_agg_bd { - struct page *page; + netmem_ref netmem; unsigned int offset; dma_addr_t mapping; }; +#define HWRM_RING_ALLOC_TX 0x1 +#define HWRM_RING_ALLOC_RX 0x2 +#define HWRM_RING_ALLOC_AGG 0x4 +#define HWRM_RING_ALLOC_CMPL 0x8 +#define HWRM_RING_ALLOC_NQ 0x10 + +struct bnge_ring_grp_info { + u16 fw_stats_ctx; + u16 fw_grp_id; + u16 rx_fw_ring_id; + u16 agg_fw_ring_id; + u16 nq_fw_ring_id; +}; + #define BNGE_RX_COPY_THRESH 256 #define BNGE_HW_FEATURE_VLAN_ALL_RX \ @@ -133,6 +150,32 @@ enum { #define BNGE_NET_EN_TPA (BNGE_NET_EN_GRO | BNGE_NET_EN_LRO) +/* Minimum TX BDs for a TX packet with MAX_SKB_FRAGS + 1. We need one extra + * BD because the first TX BD is always a long BD. + */ +#define BNGE_MIN_TX_DESC_CNT (MAX_SKB_FRAGS + 2) + +#define RX_RING(bn, x) (((x) & (bn)->rx_ring_mask) >> (BNGE_PAGE_SHIFT - 4)) +#define RX_AGG_RING(bn, x) (((x) & (bn)->rx_agg_ring_mask) >> \ + (BNGE_PAGE_SHIFT - 4)) +#define RX_IDX(x) ((x) & (RX_DESC_CNT - 1)) + +#define TX_RING(bn, x) (((x) & (bn)->tx_ring_mask) >> (BNGE_PAGE_SHIFT - 4)) +#define TX_IDX(x) ((x) & (TX_DESC_CNT - 1)) + +#define CP_RING(x) (((x) & ~(CP_DESC_CNT - 1)) >> (BNGE_PAGE_SHIFT - 4)) +#define CP_IDX(x) ((x) & (CP_DESC_CNT - 1)) + +#define RING_RX(bn, idx) ((idx) & (bn)->rx_ring_mask) +#define NEXT_RX(idx) ((idx) + 1) + +#define RING_RX_AGG(bn, idx) ((idx) & (bn)->rx_agg_ring_mask) +#define NEXT_RX_AGG(idx) ((idx) + 1) + +#define BNGE_NQ_HDL_TYPE_SHIFT 24 +#define BNGE_NQ_HDL_TYPE_RX 0x00 +#define BNGE_NQ_HDL_TYPE_TX 0x01 + struct bnge_net { struct bnge_dev *bd; struct net_device *netdev; @@ -164,6 +207,30 @@ struct bnge_net { struct hlist_head l2_fltr_hash_tbl[BNGE_L2_FLTR_HASH_SIZE]; u32 hash_seed; u64 toeplitz_prefix; + + struct bnge_napi **bnapi; + + struct bnge_rx_ring_info *rx_ring; + struct bnge_tx_ring_info *tx_ring; + + u16 *tx_ring_map; + enum dma_data_direction rx_dir; + + /* grp_info indexed by napi/nq index */ + struct bnge_ring_grp_info *grp_info; + struct bnge_vnic_info *vnic_info; + int nr_vnics; + int total_irqs; + + u32 tx_wake_thresh; + u16 rx_offset; + u16 rx_dma_offset; + + u8 rss_hash_key[HW_HASH_KEY_SIZE]; + u8 rss_hash_key_valid:1; + u8 rss_hash_key_updated:1; + int rsscos_nr_ctxs; + u32 stats_coal_ticks; }; #define BNGE_DEFAULT_RX_RING_SIZE 511 @@ -203,4 +270,185 @@ void bnge_set_ring_params(struct bnge_dev *bd); #define BNGE_MAX_RX_JUM_DESC_CNT (RX_DESC_CNT * MAX_RX_AGG_PAGES - 1) #define BNGE_MAX_TX_DESC_CNT (TX_DESC_CNT * MAX_TX_PAGES - 1) +#define BNGE_MAX_TXR_PER_NAPI 8 + +#define bnge_for_each_napi_tx(iter, bnapi, txr) \ + for (iter = 0, txr = (bnapi)->tx_ring[0]; txr; \ + txr = (iter < BNGE_MAX_TXR_PER_NAPI - 1) ? \ + (bnapi)->tx_ring[++iter] : NULL) + +#define BNGE_SET_NQ_HDL(cpr) \ + (((cpr)->cp_ring_type << BNGE_NQ_HDL_TYPE_SHIFT) | (cpr)->cp_idx) + +struct bnge_stats_mem { + u64 *sw_stats; + u64 *hw_masks; + void *hw_stats; + dma_addr_t hw_stats_map; + int len; +}; + +struct bnge_cp_ring_info { + struct bnge_napi *bnapi; + dma_addr_t *desc_mapping; + struct tx_cmp **desc_ring; + struct bnge_ring_struct ring_struct; + u8 cp_ring_type; + u8 cp_idx; + u32 cp_raw_cons; + struct bnge_db_info cp_db; +}; + +struct bnge_nq_ring_info { + struct bnge_napi *bnapi; + dma_addr_t *desc_mapping; + struct nqe_cn **desc_ring; + struct bnge_ring_struct ring_struct; + u32 nq_raw_cons; + struct bnge_db_info nq_db; + + struct bnge_stats_mem stats; + u32 hw_stats_ctx_id; + + int cp_ring_count; + struct bnge_cp_ring_info *cp_ring_arr; +}; + +struct bnge_rx_ring_info { + struct bnge_napi *bnapi; + struct bnge_cp_ring_info *rx_cpr; + u16 rx_prod; + u16 rx_agg_prod; + u16 rx_sw_agg_prod; + u16 rx_next_cons; + struct bnge_db_info rx_db; + struct bnge_db_info rx_agg_db; + + struct rx_bd *rx_desc_ring[MAX_RX_PAGES]; + struct bnge_sw_rx_bd *rx_buf_ring; + + struct rx_bd *rx_agg_desc_ring[MAX_RX_AGG_PAGES]; + struct bnge_sw_rx_agg_bd *rx_agg_buf_ring; + + unsigned long *rx_agg_bmap; + u16 rx_agg_bmap_size; + + dma_addr_t rx_desc_mapping[MAX_RX_PAGES]; + dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES]; + + struct bnge_ring_struct rx_ring_struct; + struct bnge_ring_struct rx_agg_ring_struct; + struct page_pool *page_pool; + struct page_pool *head_pool; + bool need_head_pool; +}; + +struct bnge_tx_ring_info { + struct bnge_napi *bnapi; + struct bnge_cp_ring_info *tx_cpr; + u16 tx_prod; + u16 tx_cons; + u16 tx_hw_cons; + u16 txq_index; + u8 tx_napi_idx; + u8 kick_pending; + struct bnge_db_info tx_db; + + struct tx_bd *tx_desc_ring[MAX_TX_PAGES]; + struct bnge_sw_tx_bd *tx_buf_ring; + + dma_addr_t tx_desc_mapping[MAX_TX_PAGES]; + + u32 dev_state; +#define BNGE_DEV_STATE_CLOSING 0x1 + + struct bnge_ring_struct tx_ring_struct; +}; + +struct bnge_napi { + struct napi_struct napi; + struct bnge_net *bn; + int index; + + struct bnge_nq_ring_info nq_ring; + struct bnge_rx_ring_info *rx_ring; + struct bnge_tx_ring_info *tx_ring[BNGE_MAX_TXR_PER_NAPI]; +}; + +#define INVALID_STATS_CTX_ID -1 +#define BNGE_VNIC_DEFAULT 0 +#define BNGE_MAX_UC_ADDRS 4 + +struct bnge_vnic_info { + u16 fw_vnic_id; +#define BNGE_MAX_CTX_PER_VNIC 8 + u16 fw_rss_cos_lb_ctx[BNGE_MAX_CTX_PER_VNIC]; + u16 mru; + /* index 0 always dev_addr */ + struct bnge_l2_filter *l2_filters[BNGE_MAX_UC_ADDRS]; + u16 uc_filter_count; + u8 *uc_list; + dma_addr_t rss_table_dma_addr; + __le16 *rss_table; + dma_addr_t rss_hash_key_dma_addr; + u64 *rss_hash_key; + int rss_table_size; +#define BNGE_RSS_TABLE_ENTRIES 64 +#define BNGE_RSS_TABLE_SIZE (BNGE_RSS_TABLE_ENTRIES * 4) +#define BNGE_RSS_TABLE_MAX_TBL 8 +#define BNGE_MAX_RSS_TABLE_SIZE \ + (BNGE_RSS_TABLE_SIZE * BNGE_RSS_TABLE_MAX_TBL) + u32 rx_mask; + + u8 *mc_list; + int mc_list_size; + int mc_list_count; + dma_addr_t mc_list_mapping; +#define BNGE_MAX_MC_ADDRS 16 + + u32 flags; +#define BNGE_VNIC_RSS_FLAG 1 +#define BNGE_VNIC_MCAST_FLAG 4 +#define BNGE_VNIC_UCAST_FLAG 8 + u32 vnic_id; +}; + +struct bnge_filter_base { + struct hlist_node hash; + struct list_head list; + __le64 filter_id; + u8 type; +#define BNGE_FLTR_TYPE_L2 2 + u8 flags; + u16 rxq; + u16 fw_vnic_id; + u16 vf_idx; + unsigned long state; +#define BNGE_FLTR_VALID 0 +#define BNGE_FLTR_FW_DELETED 2 + + struct rcu_head rcu; +}; + +struct bnge_l2_key { + union { + struct { + u8 dst_mac_addr[ETH_ALEN]; + u16 vlan; + }; + u32 filter_key; + }; +}; + +#define BNGE_L2_KEY_SIZE (sizeof(struct bnge_l2_key) / 4) +struct bnge_l2_filter { + /* base filter must be the first member */ + struct bnge_filter_base base; + struct bnge_l2_key l2_key; + refcount_t refcnt; +}; + +u16 bnge_cp_ring_for_rx(struct bnge_rx_ring_info *rxr); +u16 bnge_cp_ring_for_tx(struct bnge_tx_ring_info *txr); +void bnge_fill_hw_rss_tbl(struct bnge_net *bn, struct bnge_vnic_info *vnic); #endif /* _BNGE_NETDEV_H_ */ diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_resc.c b/drivers/net/ethernet/broadcom/bnge/bnge_resc.c index c79a3607a1b7..62ebe03a0dcf 100644 --- a/drivers/net/ethernet/broadcom/bnge/bnge_resc.c +++ b/drivers/net/ethernet/broadcom/bnge/bnge_resc.c @@ -46,7 +46,7 @@ static int bnge_aux_get_dflt_msix(struct bnge_dev *bd) return min_t(int, roce_msix, num_online_cpus() + 1); } -static u16 bnge_aux_get_msix(struct bnge_dev *bd) +u16 bnge_aux_get_msix(struct bnge_dev *bd) { if (bnge_is_roce_en(bd)) return bd->aux_num_msix; @@ -164,7 +164,7 @@ static int bnge_adjust_rings(struct bnge_dev *bd, u16 *rx, return bnge_fix_rings_count(rx, tx, max_nq, sh); } -static int bnge_cal_nr_rss_ctxs(u16 rx_rings) +int bnge_cal_nr_rss_ctxs(u16 rx_rings) { if (!rx_rings) return 0; @@ -184,7 +184,7 @@ static u16 bnge_get_total_vnics(struct bnge_dev *bd, u16 rx_rings) return 1; } -static u32 bnge_get_rxfh_indir_size(struct bnge_dev *bd) +u32 bnge_get_rxfh_indir_size(struct bnge_dev *bd) { return bnge_cal_nr_rss_ctxs(bd->rx_nr_rings) * BNGE_RSS_TABLE_ENTRIES; diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_resc.h b/drivers/net/ethernet/broadcom/bnge/bnge_resc.h index 54ef1c7d8822..0d6213b27580 100644 --- a/drivers/net/ethernet/broadcom/bnge/bnge_resc.h +++ b/drivers/net/ethernet/broadcom/bnge/bnge_resc.h @@ -72,6 +72,8 @@ void bnge_free_irqs(struct bnge_dev *bd); int bnge_net_init_dflt_config(struct bnge_dev *bd); void bnge_net_uninit_dflt_config(struct bnge_dev *bd); void bnge_aux_init_dflt_config(struct bnge_dev *bd); +u32 bnge_get_rxfh_indir_size(struct bnge_dev *bd); +int bnge_cal_nr_rss_ctxs(u16 rx_rings); static inline u32 bnge_adjust_pow_two(u32 total_ent, u16 ent_per_blk) diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c index 52ada65943a0..79f5ce2e5d08 100644 --- a/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c +++ b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c @@ -95,7 +95,7 @@ int bnge_alloc_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem) &rmem->dma_arr[i], GFP_KERNEL); if (!rmem->pg_arr[i]) - return -ENOMEM; + goto err_free_ring; if (rmem->ctx_mem) bnge_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i], @@ -116,10 +116,13 @@ int bnge_alloc_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem) if (rmem->vmem_size) { *rmem->vmem = vzalloc(rmem->vmem_size); if (!(*rmem->vmem)) - return -ENOMEM; + goto err_free_ring; } - return 0; + +err_free_ring: + bnge_free_ring(bd, rmem); + return -ENOMEM; } static int bnge_alloc_ctx_one_lvl(struct bnge_dev *bd, @@ -436,3 +439,61 @@ skip_rdma: return 0; } + +void bnge_init_ring_struct(struct bnge_net *bn) +{ + struct bnge_dev *bd = bn->bd; + int i, j; + + for (i = 0; i < bd->nq_nr_rings; i++) { + struct bnge_napi *bnapi = bn->bnapi[i]; + struct bnge_ring_mem_info *rmem; + struct bnge_nq_ring_info *nqr; + struct bnge_rx_ring_info *rxr; + struct bnge_tx_ring_info *txr; + struct bnge_ring_struct *ring; + + nqr = &bnapi->nq_ring; + ring = &nqr->ring_struct; + rmem = &ring->ring_mem; + rmem->nr_pages = bn->cp_nr_pages; + rmem->page_size = HW_CMPD_RING_SIZE; + rmem->pg_arr = (void **)nqr->desc_ring; + rmem->dma_arr = nqr->desc_mapping; + rmem->vmem_size = 0; + + rxr = bnapi->rx_ring; + if (!rxr) + goto skip_rx; + + ring = &rxr->rx_ring_struct; + rmem = &ring->ring_mem; + rmem->nr_pages = bn->rx_nr_pages; + rmem->page_size = HW_RXBD_RING_SIZE; + rmem->pg_arr = (void **)rxr->rx_desc_ring; + rmem->dma_arr = rxr->rx_desc_mapping; + rmem->vmem_size = SW_RXBD_RING_SIZE * bn->rx_nr_pages; + rmem->vmem = (void **)&rxr->rx_buf_ring; + + ring = &rxr->rx_agg_ring_struct; + rmem = &ring->ring_mem; + rmem->nr_pages = bn->rx_agg_nr_pages; + rmem->page_size = HW_RXBD_RING_SIZE; + rmem->pg_arr = (void **)rxr->rx_agg_desc_ring; + rmem->dma_arr = rxr->rx_agg_desc_mapping; + rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bn->rx_agg_nr_pages; + rmem->vmem = (void **)&rxr->rx_agg_buf_ring; + +skip_rx: + bnge_for_each_napi_tx(j, bnapi, txr) { + ring = &txr->tx_ring_struct; + rmem = &ring->ring_mem; + rmem->nr_pages = bn->tx_nr_pages; + rmem->page_size = HW_TXBD_RING_SIZE; + rmem->pg_arr = (void **)txr->tx_desc_ring; + rmem->dma_arr = txr->tx_desc_mapping; + rmem->vmem_size = SW_TXBD_RING_SIZE * bn->tx_nr_pages; + rmem->vmem = (void **)&txr->tx_buf_ring; + } + } +} diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_rmem.h b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.h index 300f1d8268ef..341c7f81ed09 100644 --- a/drivers/net/ethernet/broadcom/bnge/bnge_rmem.h +++ b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.h @@ -6,6 +6,7 @@ struct bnge_ctx_mem_type; struct bnge_dev; +struct bnge_net; #define PTU_PTE_VALID 0x1UL #define PTU_PTE_LAST 0x2UL @@ -180,9 +181,22 @@ struct bnge_ctx_mem_info { struct bnge_ctx_mem_type ctx_arr[BNGE_CTX_V2_MAX]; }; +struct bnge_ring_struct { + struct bnge_ring_mem_info ring_mem; + + u16 fw_ring_id; + union { + u16 grp_idx; + u16 map_idx; /* Used by NQs */ + }; + u32 handle; + u8 queue_id; +}; + int bnge_alloc_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem); void bnge_free_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem); int bnge_alloc_ctx_mem(struct bnge_dev *bd); void bnge_free_ctx_mem(struct bnge_dev *bd); +void bnge_init_ring_struct(struct bnge_net *bn); #endif /* _BNGE_RMEM_H_ */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 207a8bb36ae5..1d0e0e7362bd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -142,6 +142,7 @@ static const struct { [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" }, [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" }, [NETXTREME_E_P7_VF] = { "Broadcom BCM5760X Virtual Function" }, + [NETXTREME_E_P7_VF_HV] = { "Broadcom BCM5760X Virtual Function for Hyper-V" }, }; static const struct pci_device_id bnxt_pci_tbl[] = { @@ -217,6 +218,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = { { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV }, { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV }, { PCI_VDEVICE(BROADCOM, 0x1819), .driver_data = NETXTREME_E_P7_VF }, + { PCI_VDEVICE(BROADCOM, 0x181b), .driver_data = NETXTREME_E_P7_VF_HV }, { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF }, #endif { 0 } @@ -263,6 +265,8 @@ const u16 bnxt_bstore_to_trace[] = { [BNXT_CTX_CA1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA1_TRACE, [BNXT_CTX_CA2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA2_TRACE, [BNXT_CTX_RIGP1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP1_TRACE, + [BNXT_CTX_KONG] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_AFM_KONG_HWRM_TRACE, + [BNXT_CTX_QPC] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ERR_QPC_TRACE, }; static struct workqueue_struct *bnxt_pf_wq; @@ -315,7 +319,8 @@ static bool bnxt_vf_pciid(enum board_idx idx) return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF || idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV || idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF || - idx == NETXTREME_E_P5_VF_HV || idx == NETXTREME_E_P7_VF); + idx == NETXTREME_E_P5_VF_HV || idx == NETXTREME_E_P7_VF || + idx == NETXTREME_E_P7_VF_HV); } #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) @@ -3797,8 +3802,7 @@ static void bnxt_free_rx_rings(struct bnxt *bp) xdp_rxq_info_unreg(&rxr->xdp_rxq); page_pool_destroy(rxr->page_pool); - if (bnxt_separate_head_pool(rxr)) - page_pool_destroy(rxr->head_pool); + page_pool_destroy(rxr->head_pool); rxr->page_pool = rxr->head_pool = NULL; kfree(rxr->rx_agg_bmap); @@ -3845,6 +3849,8 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp, pool = page_pool_create(&pp); if (IS_ERR(pool)) goto err_destroy_pp; + } else { + page_pool_get(pool); } rxr->head_pool = pool; @@ -4397,7 +4403,7 @@ static void bnxt_alloc_one_rx_ring_netmem(struct bnxt *bp, for (i = 0; i < bp->rx_agg_ring_size; i++) { if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_KERNEL)) { netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n", - ring_nr, i, bp->rx_ring_size); + ring_nr, i, bp->rx_agg_ring_size); break; } prod = NEXT_RX_AGG(prod); @@ -6832,7 +6838,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic) req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); req->lb_rule = cpu_to_le16(0xffff); vnic_mru: - vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN; + vnic->mru = bp->dev->mtu + VLAN_ETH_HLEN; req->mru = cpu_to_le16(vnic->mru); req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); @@ -6969,6 +6975,8 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP; if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP) bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP; + if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP) + bp->rss_cap |= BNXT_RSS_CAP_IPV6_FLOW_LABEL_RSS_CAP; if (flags & VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP) bp->fw_cap |= BNXT_FW_CAP_VNIC_RE_FLUSH; } @@ -8016,7 +8024,8 @@ static int __bnxt_reserve_rings(struct bnxt *bp) } rx_rings = min_t(int, rx_rings, hwr.grp); hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings); - if (hwr.stat > bnxt_get_ulp_stat_ctxs(bp)) + if (bnxt_ulp_registered(bp->edev) && + hwr.stat > bnxt_get_ulp_stat_ctxs(bp)) hwr.stat -= bnxt_get_ulp_stat_ctxs(bp); hwr.cp = min_t(int, hwr.cp, hwr.stat); rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh); @@ -8024,6 +8033,11 @@ static int __bnxt_reserve_rings(struct bnxt *bp) hwr.rx = rx_rings << 1; tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx); hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings; + if (hwr.tx != bp->tx_nr_rings) { + netdev_warn(bp->dev, + "Able to reserve only %d out of %d requested TX rings\n", + hwr.tx, bp->tx_nr_rings); + } bp->tx_nr_rings = hwr.tx; /* If we cannot reserve all the RX rings, reset the RSS map only @@ -9138,7 +9152,7 @@ static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp, return rc; } -static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena) +static int bnxt_backing_store_cfg_v2(struct bnxt *bp) { struct bnxt_ctx_mem_info *ctx = bp->ctx; struct bnxt_ctx_mem_type *ctxm; @@ -9146,7 +9160,7 @@ static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena) int rc = 0; u16 type; - for (type = BNXT_CTX_SRT; type <= BNXT_CTX_RIGP1; type++) { + for (type = BNXT_CTX_SRT; type <= BNXT_CTX_QPC; type++) { ctxm = &ctx->ctx_arr[type]; if (!bnxt_bs_trace_avail(bp, type)) continue; @@ -9164,12 +9178,13 @@ static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena) } if (last_type == BNXT_CTX_INV) { - if (!ena) + for (type = 0; type < BNXT_CTX_MAX; type++) { + ctxm = &ctx->ctx_arr[type]; + if (ctxm->mem_valid) + last_type = type; + } + if (last_type == BNXT_CTX_INV) return 0; - else if (ena & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) - last_type = BNXT_CTX_MAX - 1; - else - last_type = BNXT_CTX_L2_MAX - 1; } ctx->ctx_arr[last_type].last = 1; @@ -9296,6 +9311,10 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) return 0; + ena = 0; + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) + goto skip_legacy; + ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; l2_qps = ctxm->qp_l2_entries; qp1_qps = ctxm->qp_qp1_entries; @@ -9304,7 +9323,6 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; srqs = ctxm->srq_l2_entries; max_srqs = ctxm->max_entries; - ena = 0; if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) { pg_lvl = 2; if (BNXT_SW_RES_LMT(bp)) { @@ -9398,8 +9416,9 @@ skip_rdma: ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i; ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES; +skip_legacy: if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2) - rc = bnxt_backing_store_cfg_v2(bp, ena); + rc = bnxt_backing_store_cfg_v2(bp); else rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); if (rc) { @@ -9613,10 +9632,10 @@ no_ptp: static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) { + u32 flags, flags_ext, flags_ext2, flags_ext3; + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; struct hwrm_func_qcaps_output *resp; struct hwrm_func_qcaps_input *req; - struct bnxt_hw_resc *hw_resc = &bp->hw_resc; - u32 flags, flags_ext, flags_ext2; int rc; rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS); @@ -9683,6 +9702,10 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED)) bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED; + flags_ext3 = le32_to_cpu(resp->flags_ext3); + if (flags_ext3 & FUNC_QCAPS_RESP_FLAGS_EXT3_ROCE_VF_DYN_ALLOC_SUPPORT) + bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_DYN_ALLOC_SUPPORT; + bp->tx_push_thresh = 0; if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) && BNXT_FW_MAJ(bp) > 217) @@ -12851,6 +12874,17 @@ static int bnxt_set_xps_mapping(struct bnxt *bp) return rc; } +static int bnxt_tx_nr_rings(struct bnxt *bp) +{ + return bp->num_tc ? bp->tx_nr_rings_per_tc * bp->num_tc : + bp->tx_nr_rings_per_tc; +} + +static int bnxt_tx_nr_rings_per_tc(struct bnxt *bp) +{ + return bp->num_tc ? bp->tx_nr_rings / bp->num_tc : bp->tx_nr_rings; +} + static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) { int rc = 0; @@ -12868,6 +12902,13 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) if (rc) return rc; + /* Make adjustments if reserved TX rings are less than requested */ + bp->tx_nr_rings -= bp->tx_nr_rings_xdp; + bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp); + if (bp->tx_nr_rings_xdp) { + bp->tx_nr_rings_xdp = bp->tx_nr_rings_per_tc; + bp->tx_nr_rings += bp->tx_nr_rings_xdp; + } rc = bnxt_alloc_mem(bp, irq_re_init); if (rc) { netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); @@ -13237,12 +13278,6 @@ static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num, mdio->val_in); - case SIOCSHWTSTAMP: - return bnxt_hwtstamp_set(dev, ifr); - - case SIOCGHWTSTAMP: - return bnxt_hwtstamp_get(dev, ifr); - default: /* do nothing */ break; @@ -14707,6 +14742,23 @@ static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp) return false; } +static void bnxt_hwrm_pfcwd_qcaps(struct bnxt *bp) +{ + struct hwrm_queue_pfcwd_timeout_qcaps_output *resp; + struct hwrm_queue_pfcwd_timeout_qcaps_input *req; + int rc; + + bp->max_pfcwd_tmo_ms = 0; + rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS); + if (rc) + return; + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); + if (!rc) + bp->max_pfcwd_tmo_ms = le16_to_cpu(resp->max_pfcwd_timeout); + hwrm_req_drop(bp, req); +} + static int bnxt_fw_init_one_p1(struct bnxt *bp) { int rc; @@ -14784,6 +14836,7 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp) if (bnxt_fw_pre_resv_vnics(bp)) bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS; + bnxt_hwrm_pfcwd_qcaps(bp); bnxt_hwrm_func_qcfg(bp); bnxt_hwrm_vnic_qcaps(bp); bnxt_hwrm_port_led_qcaps(bp); @@ -15747,6 +15800,8 @@ static const struct net_device_ops bnxt_netdev_ops = { .ndo_xdp_xmit = bnxt_xdp_xmit, .ndo_bridge_getlink = bnxt_bridge_getlink, .ndo_bridge_setlink = bnxt_bridge_setlink, + .ndo_hwtstamp_get = bnxt_hwtstamp_get, + .ndo_hwtstamp_set = bnxt_hwtstamp_set, }; static void bnxt_get_queue_stats_rx(struct net_device *dev, int i, @@ -15898,8 +15953,7 @@ err_rxq_info_unreg: xdp_rxq_info_unreg(&clone->xdp_rxq); err_page_pool_destroy: page_pool_destroy(clone->page_pool); - if (bnxt_separate_head_pool(clone)) - page_pool_destroy(clone->head_pool); + page_pool_destroy(clone->head_pool); clone->page_pool = NULL; clone->head_pool = NULL; return rc; @@ -15917,8 +15971,7 @@ static void bnxt_queue_mem_free(struct net_device *dev, void *qmem) xdp_rxq_info_unreg(&rxr->xdp_rxq); page_pool_destroy(rxr->page_pool); - if (bnxt_separate_head_pool(rxr)) - page_pool_destroy(rxr->head_pool); + page_pool_destroy(rxr->head_pool); rxr->page_pool = NULL; rxr->head_pool = NULL; @@ -16047,7 +16100,7 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx) napi_enable_locked(&bnapi->napi); bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); - mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN; + mru = bp->dev->mtu + VLAN_ETH_HLEN; for (i = 0; i < bp->nr_vnics; i++) { vnic = &bp->vnic_info[i]; @@ -16128,7 +16181,7 @@ static void bnxt_remove_one(struct pci_dev *pdev) struct bnxt *bp = netdev_priv(dev); if (BNXT_PF(bp)) - bnxt_sriov_disable(bp); + __bnxt_sriov_disable(bp); bnxt_rdma_aux_device_del(bp); @@ -16325,7 +16378,7 @@ static void bnxt_trim_dflt_sh_rings(struct bnxt *bp) bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings); bp->rx_nr_rings = bp->cp_nr_rings; bp->tx_nr_rings_per_tc = bp->cp_nr_rings; - bp->tx_nr_rings = bp->tx_nr_rings_per_tc; + bp->tx_nr_rings = bnxt_tx_nr_rings(bp); } static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) @@ -16357,7 +16410,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) bnxt_trim_dflt_sh_rings(bp); else bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings; - bp->tx_nr_rings = bp->tx_nr_rings_per_tc; + bp->tx_nr_rings = bnxt_tx_nr_rings(bp); avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings; if (avail_msix >= BNXT_MIN_ROCE_CP_RINGS) { @@ -16370,7 +16423,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) rc = __bnxt_reserve_rings(bp); if (rc && rc != -ENODEV) netdev_warn(bp->dev, "Unable to reserve tx rings\n"); - bp->tx_nr_rings_per_tc = bp->tx_nr_rings; + bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp); if (sh) bnxt_trim_dflt_sh_rings(bp); @@ -16379,7 +16432,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) rc = __bnxt_reserve_rings(bp); if (rc && rc != -ENODEV) netdev_warn(bp->dev, "2nd rings reservation failed.\n"); - bp->tx_nr_rings_per_tc = bp->tx_nr_rings; + bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp); } if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { bp->rx_nr_rings++; @@ -16413,7 +16466,7 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp) if (rc) goto init_dflt_ring_err; - bp->tx_nr_rings_per_tc = bp->tx_nr_rings; + bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp); bnxt_set_dflt_rfs(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index fda0d3cc6227..06a4c2afdf8a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1968,10 +1968,12 @@ struct bnxt_ctx_mem_type { #define BNXT_CTX_CA1 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA1_TRACE #define BNXT_CTX_CA2 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA2_TRACE #define BNXT_CTX_RIGP1 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP1_TRACE +#define BNXT_CTX_KONG FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE +#define BNXT_CTX_QPC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ERR_QPC_TRACE #define BNXT_CTX_MAX (BNXT_CTX_TIM + 1) #define BNXT_CTX_L2_MAX (BNXT_CTX_FTQM + 1) -#define BNXT_CTX_V2_MAX (BNXT_CTX_RIGP1 + 1) +#define BNXT_CTX_V2_MAX (BNXT_CTX_QPC + 1) #define BNXT_CTX_INV ((u16)-1) struct bnxt_ctx_mem_info { @@ -2130,6 +2132,7 @@ enum board_idx { NETXTREME_E_P5_VF, NETXTREME_E_P5_VF_HV, NETXTREME_E_P7_VF, + NETXTREME_E_P7_VF_HV, }; #define BNXT_TRACE_BUF_MAGIC_BYTE ((u8)0xbc) @@ -2407,6 +2410,7 @@ struct bnxt { #define BNXT_RSS_CAP_ESP_V4_RSS_CAP BIT(6) #define BNXT_RSS_CAP_ESP_V6_RSS_CAP BIT(7) #define BNXT_RSS_CAP_MULTI_RSS_CTX BIT(8) +#define BNXT_RSS_CAP_IPV6_FLOW_LABEL_RSS_CAP BIT(9) u8 rss_hash_key[HW_HASH_KEY_SIZE]; u8 rss_hash_key_valid:1; @@ -2422,6 +2426,8 @@ struct bnxt { u8 max_q; u8 num_tc; + u16 max_pfcwd_tmo_ms; + u8 tph_mode; unsigned int current_interval; @@ -2475,6 +2481,7 @@ struct bnxt { #define BNXT_FW_CAP_ENABLE_RDMA_SRIOV BIT_ULL(5) #define BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED BIT_ULL(6) #define BNXT_FW_CAP_KONG_MB_CHNL BIT_ULL(7) + #define BNXT_FW_CAP_ROCE_VF_DYN_ALLOC_SUPPORT BIT_ULL(8) #define BNXT_FW_CAP_OVS_64BIT_HANDLE BIT_ULL(10) #define BNXT_FW_CAP_TRUSTED_VF BIT_ULL(11) #define BNXT_FW_CAP_ERROR_RECOVERY BIT_ULL(13) @@ -2519,6 +2526,8 @@ struct bnxt { #define BNXT_SUPPORTS_MULTI_RSS_CTX(bp) \ (BNXT_PF(bp) && BNXT_SUPPORTS_NTUPLE_VNIC(bp) && \ ((bp)->rss_cap & BNXT_RSS_CAP_MULTI_RSS_CTX)) +#define BNXT_ROCE_VF_DYN_ALLOC_CAP(bp) \ + ((bp)->fw_cap & BNXT_FW_CAP_ROCE_VF_DYN_ALLOC_SUPPORT) #define BNXT_SUPPORTS_QUEUE_API(bp) \ (BNXT_PF(bp) && BNXT_SUPPORTS_NTUPLE_VNIC(bp) && \ ((bp)->fw_cap & BNXT_FW_CAP_VNIC_RE_FLUSH)) @@ -2542,6 +2551,7 @@ struct bnxt { u16 fw_rx_stats_ext_size; u16 fw_tx_stats_ext_size; u16 hw_ring_stats_size; + u16 pcie_stat_len; u8 pri2cos_idx[8]; u8 pri2cos_valid; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c index 18d6c94d5cb8..0181ab1f2dfd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c @@ -36,6 +36,8 @@ static const u16 bnxt_bstore_to_seg_id[] = { [BNXT_CTX_CA1] = BNXT_CTX_MEM_SEG_CA1, [BNXT_CTX_CA2] = BNXT_CTX_MEM_SEG_CA2, [BNXT_CTX_RIGP1] = BNXT_CTX_MEM_SEG_RIGP1, + [BNXT_CTX_KONG] = BNXT_CTX_MEM_SEG_KONG, + [BNXT_CTX_QPC] = BNXT_CTX_MEM_SEG_QPC, }; static int bnxt_dbg_hwrm_log_buffer_flush(struct bnxt *bp, u16 type, u32 flags, @@ -359,7 +361,7 @@ static u32 bnxt_get_ctx_coredump(struct bnxt *bp, void *buf, u32 offset, if (buf) buf += offset; - for (type = 0 ; type <= BNXT_CTX_RIGP1; type++) { + for (type = 0; type < BNXT_CTX_V2_MAX; type++) { struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type]; bool trace = bnxt_bs_trace_avail(bp, type); u32 seg_id = bnxt_bstore_to_seg_id[type]; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h index d1cd6387f3ab..c087df88154a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h @@ -102,6 +102,8 @@ struct bnxt_driver_segment_record { #define BNXT_CTX_MEM_SEG_CA1 0x9 #define BNXT_CTX_MEM_SEG_CA2 0xa #define BNXT_CTX_MEM_SEG_RIGP1 0xb +#define BNXT_CTX_MEM_SEG_QPC 0xc +#define BNXT_CTX_MEM_SEG_KONG 0xd #define BNXT_CRASH_DUMP_LEN (8 << 20) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 4c4581b0342e..02961d93ed35 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -40,12 +40,6 @@ bnxt_dl_flash_update(struct devlink *dl, struct bnxt *bp = bnxt_get_bp_from_dl(dl); int rc; - if (!BNXT_PF(bp)) { - NL_SET_ERR_MSG_MOD(extack, - "flash update not supported from a VF"); - return -EPERM; - } - devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0); rc = bnxt_flash_package_from_fw_obj(bp->dev, params->fw, 0, extack); if (!rc) @@ -220,7 +214,7 @@ __bnxt_dl_reporter_create(struct bnxt *bp, { struct devlink_health_reporter *reporter; - reporter = devlink_health_reporter_create(bp->dl, ops, 0, bp); + reporter = devlink_health_reporter_create(bp->dl, ops, bp); if (IS_ERR(reporter)) { netdev_warn(bp->dev, "Failed to create %s health reporter, rc = %ld\n", ops->name, PTR_ERR(reporter)); @@ -1080,16 +1074,9 @@ static int __bnxt_hwrm_nvm_req(struct bnxt *bp, static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, union devlink_param_value *val) { - struct hwrm_nvm_get_variable_input *req = msg; const struct bnxt_dl_nvm_param *nvm_param; int i; - /* Get/Set NVM CFG parameter is supported only on PFs */ - if (BNXT_VF(bp)) { - hwrm_req_drop(bp, req); - return -EPERM; - } - for (i = 0; i < ARRAY_SIZE(nvm_params); i++) { nvm_param = &nvm_params[i]; if (nvm_param->id == param_id) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 1b37612b1c01..41686a6f84b5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1584,6 +1584,8 @@ static u64 get_ethtool_ipv6_rss(struct bnxt *bp) { if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6) return RXH_IP_SRC | RXH_IP_DST; + if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL) + return RXH_IP_SRC | RXH_IP_DST | RXH_IP6_FL; return 0; } @@ -1662,13 +1664,18 @@ static int bnxt_set_rxfh_fields(struct net_device *dev, if (cmd->data == RXH_4TUPLE) tuple = 4; - else if (cmd->data == RXH_2TUPLE) + else if (cmd->data == RXH_2TUPLE || + cmd->data == (RXH_2TUPLE | RXH_IP6_FL)) tuple = 2; else if (!cmd->data) tuple = 0; else return -EINVAL; + if (cmd->data & RXH_IP6_FL && + !(bp->rss_cap & BNXT_RSS_CAP_IPV6_FLOW_LABEL_RSS_CAP)) + return -EINVAL; + if (cmd->flow_type == TCP_V4_FLOW) { rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4; if (tuple == 4) @@ -1732,10 +1739,15 @@ static int bnxt_set_rxfh_fields(struct net_device *dev, case AH_V6_FLOW: case ESP_V6_FLOW: case IPV6_FLOW: - if (tuple == 2) + rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | + VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL); + if (!tuple) + break; + if (cmd->data & RXH_IP6_FL) + rss_hash_cfg |= + VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL; + else if (tuple == 2) rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6; - else if (!tuple) - rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6; break; } @@ -2049,38 +2061,52 @@ static void bnxt_get_drvinfo(struct net_device *dev, static int bnxt_get_regs_len(struct net_device *dev) { struct bnxt *bp = netdev_priv(dev); - int reg_len; if (!BNXT_PF(bp)) return -EOPNOTSUPP; - reg_len = BNXT_PXP_REG_LEN; + return BNXT_PXP_REG_LEN + bp->pcie_stat_len; +} + +static void * +__bnxt_hwrm_pcie_qstats(struct bnxt *bp, struct hwrm_pcie_qstats_input *req) +{ + struct pcie_ctx_hw_stats_v2 *hw_pcie_stats; + dma_addr_t hw_pcie_stats_addr; + int rc; + + hw_pcie_stats = hwrm_req_dma_slice(bp, req, sizeof(*hw_pcie_stats), + &hw_pcie_stats_addr); + if (!hw_pcie_stats) + return NULL; - if (bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED) - reg_len += sizeof(struct pcie_ctx_hw_stats); + req->pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats)); + req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr); + rc = hwrm_req_send(bp, req); - return reg_len; + return rc ? NULL : hw_pcie_stats; } #define BNXT_PCIE_32B_ENTRY(start, end) \ - { offsetof(struct pcie_ctx_hw_stats, start), \ - offsetof(struct pcie_ctx_hw_stats, end) } + { offsetof(struct pcie_ctx_hw_stats_v2, start),\ + offsetof(struct pcie_ctx_hw_stats_v2, end) } static const struct { u16 start; u16 end; } bnxt_pcie_32b_entries[] = { BNXT_PCIE_32B_ENTRY(pcie_ltssm_histogram[0], pcie_ltssm_histogram[3]), + BNXT_PCIE_32B_ENTRY(pcie_tl_credit_nph_histogram[0], unused_1), + BNXT_PCIE_32B_ENTRY(pcie_rd_latency_histogram[0], unused_2), }; static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p) { - struct pcie_ctx_hw_stats *hw_pcie_stats; + struct hwrm_pcie_qstats_output *resp; struct hwrm_pcie_qstats_input *req; struct bnxt *bp = netdev_priv(dev); - dma_addr_t hw_pcie_stats_addr; - int rc; + u8 *src; regs->version = 0; if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_REG_ACCESS_RESTRICTED)) @@ -2092,24 +2118,21 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs, if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS)) return; - hw_pcie_stats = hwrm_req_dma_slice(bp, req, sizeof(*hw_pcie_stats), - &hw_pcie_stats_addr); - if (!hw_pcie_stats) { - hwrm_req_drop(bp, req); - return; - } - - regs->version = 1; - hwrm_req_hold(bp, req); /* hold on to slice */ - req->pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats)); - req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr); - rc = hwrm_req_send(bp, req); - if (!rc) { + resp = hwrm_req_hold(bp, req); + src = __bnxt_hwrm_pcie_qstats(bp, req); + if (src) { u8 *dst = (u8 *)(_p + BNXT_PXP_REG_LEN); - u8 *src = (u8 *)hw_pcie_stats; - int i, j; + int i, j, len; + + len = min(bp->pcie_stat_len, le16_to_cpu(resp->pcie_stat_size)); + if (len <= sizeof(struct pcie_ctx_hw_stats)) + regs->version = 1; + else if (len < sizeof(struct pcie_ctx_hw_stats_v2)) + regs->version = 2; + else + regs->version = 3; - for (i = 0, j = 0; i < sizeof(*hw_pcie_stats); ) { + for (i = 0, j = 0; i < len; ) { if (i >= bnxt_pcie_32b_entries[j].start && i <= bnxt_pcie_32b_entries[j].end) { u32 *dst32 = (u32 *)(dst + i); @@ -3185,7 +3208,8 @@ static int bnxt_get_fecparam(struct net_device *dev, } static void bnxt_get_fec_stats(struct net_device *dev, - struct ethtool_fec_stats *fec_stats) + struct ethtool_fec_stats *fec_stats, + struct ethtool_fec_hist *hist) { struct bnxt *bp = netdev_priv(dev); u64 *rx; @@ -4376,12 +4400,42 @@ static int bnxt_get_eee(struct net_device *dev, struct ethtool_keee *edata) return 0; } +static int bnxt_hwrm_pfcwd_qcfg(struct bnxt *bp, u16 *val) +{ + struct hwrm_queue_pfcwd_timeout_qcfg_output *resp; + struct hwrm_queue_pfcwd_timeout_qcfg_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCWD_TIMEOUT_QCFG); + if (rc) + return rc; + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) + *val = le16_to_cpu(resp->pfcwd_timeout_value); + hwrm_req_drop(bp, req); + return rc; +} + +static int bnxt_hwrm_pfcwd_cfg(struct bnxt *bp, u16 val) +{ + struct hwrm_queue_pfcwd_timeout_cfg_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCWD_TIMEOUT_CFG); + if (rc) + return rc; + req->pfcwd_timeout_value = cpu_to_le16(val); + rc = hwrm_req_send(bp, req); + return rc; +} + static int bnxt_set_tunable(struct net_device *dev, const struct ethtool_tunable *tuna, const void *data) { struct bnxt *bp = netdev_priv(dev); - u32 rx_copybreak; + u32 rx_copybreak, val; switch (tuna->id) { case ETHTOOL_RX_COPYBREAK: @@ -4394,6 +4448,15 @@ static int bnxt_set_tunable(struct net_device *dev, bp->rx_copybreak = rx_copybreak; } return 0; + case ETHTOOL_PFC_PREVENTION_TOUT: + if (BNXT_VF(bp) || !bp->max_pfcwd_tmo_ms) + return -EOPNOTSUPP; + + val = *(u16 *)data; + if (val > bp->max_pfcwd_tmo_ms && + val != PFC_STORM_PREVENTION_AUTO) + return -EINVAL; + return bnxt_hwrm_pfcwd_cfg(bp, val); default: return -EOPNOTSUPP; } @@ -4408,6 +4471,10 @@ static int bnxt_get_tunable(struct net_device *dev, case ETHTOOL_RX_COPYBREAK: *(u32 *)data = bp->rx_copybreak; break; + case ETHTOOL_PFC_PREVENTION_TOUT: + if (!bp->max_pfcwd_tmo_ms) + return -EOPNOTSUPP; + return bnxt_hwrm_pfcwd_qcfg(bp, data); default: return -EOPNOTSUPP; } @@ -5254,6 +5321,26 @@ static int bnxt_get_ts_info(struct net_device *dev, return 0; } +static void bnxt_hwrm_pcie_qstats(struct bnxt *bp) +{ + struct hwrm_pcie_qstats_output *resp; + struct hwrm_pcie_qstats_input *req; + + bp->pcie_stat_len = 0; + if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)) + return; + + if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS)) + return; + + resp = hwrm_req_hold(bp, req); + if (__bnxt_hwrm_pcie_qstats(bp, req)) + bp->pcie_stat_len = min_t(u16, + le16_to_cpu(resp->pcie_stat_size), + sizeof(struct pcie_ctx_hw_stats_v2)); + hwrm_req_drop(bp, req); +} + void bnxt_ethtool_init(struct bnxt *bp) { struct hwrm_selftest_qlist_output *resp; @@ -5262,6 +5349,7 @@ void bnxt_ethtool_init(struct bnxt *bp) struct net_device *dev = bp->dev; int i, rc; + bnxt_hwrm_pcie_qstats(bp); if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER)) bnxt_get_pkgver(dev); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index ca660e6d28a4..db81cf6d5289 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -560,10 +560,11 @@ static int bnxt_hwrm_ptp_cfg(struct bnxt *bp) return bnxt_ptp_cfg_tstamp_filters(bp); } -int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) +int bnxt_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *stmpconf, + struct netlink_ext_ack *extack) { struct bnxt *bp = netdev_priv(dev); - struct hwtstamp_config stmpconf; struct bnxt_ptp_cfg *ptp; u16 old_rxctl; int old_rx_filter, rc; @@ -573,17 +574,14 @@ int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) if (!ptp) return -EOPNOTSUPP; - if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf))) - return -EFAULT; - - if (stmpconf.tx_type != HWTSTAMP_TX_ON && - stmpconf.tx_type != HWTSTAMP_TX_OFF) + if (stmpconf->tx_type != HWTSTAMP_TX_ON && + stmpconf->tx_type != HWTSTAMP_TX_OFF) return -ERANGE; old_rx_filter = ptp->rx_filter; old_rxctl = ptp->rxctl; old_tx_tstamp_en = ptp->tx_tstamp_en; - switch (stmpconf.rx_filter) { + switch (stmpconf->rx_filter) { case HWTSTAMP_FILTER_NONE: ptp->rxctl = 0; ptp->rx_filter = HWTSTAMP_FILTER_NONE; @@ -616,7 +614,7 @@ int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) return -ERANGE; } - if (stmpconf.tx_type == HWTSTAMP_TX_ON) + if (stmpconf->tx_type == HWTSTAMP_TX_ON) ptp->tx_tstamp_en = 1; else ptp->tx_tstamp_en = 0; @@ -625,9 +623,8 @@ int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) if (rc) goto ts_set_err; - stmpconf.rx_filter = ptp->rx_filter; - return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? - -EFAULT : 0; + stmpconf->rx_filter = ptp->rx_filter; + return 0; ts_set_err: ptp->rx_filter = old_rx_filter; @@ -636,22 +633,22 @@ ts_set_err: return rc; } -int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) +int bnxt_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *stmpconf) { struct bnxt *bp = netdev_priv(dev); - struct hwtstamp_config stmpconf; struct bnxt_ptp_cfg *ptp; ptp = bp->ptp_cfg; if (!ptp) return -EOPNOTSUPP; - stmpconf.flags = 0; - stmpconf.tx_type = ptp->tx_tstamp_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + stmpconf->flags = 0; + stmpconf->tx_type = ptp->tx_tstamp_en ? HWTSTAMP_TX_ON + : HWTSTAMP_TX_OFF; - stmpconf.rx_filter = ptp->rx_filter; - return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? - -EFAULT : 0; + stmpconf->rx_filter = ptp->rx_filter; + return 0; } static int bnxt_map_regs(struct bnxt *bp, u32 *reg_arr, int count, int reg_win) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h index 0481161d26ef..8cc2fae47644 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h @@ -160,8 +160,11 @@ void bnxt_ptp_update_current_time(struct bnxt *bp); void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2); int bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp); void bnxt_ptp_reapply_pps(struct bnxt *bp); -int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr); -int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr); +int bnxt_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *stmpconf, + struct netlink_ext_ack *extack); +int bnxt_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *stmpconf); void bnxt_ptp_free_txts_skbs(struct bnxt_ptp_cfg *ptp); int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod); void bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb, u16 prod); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 480e18a32caa..80fed2c07b9e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -541,6 +541,13 @@ static void bnxt_hwrm_roce_sriov_cfg(struct bnxt *bp, int num_vfs) if (rc) goto err; + /* In case of VF Dynamic resource allocation, driver will provision + * maximum resources to all the VFs. FW will dynamically allocate + * resources to VFs on the fly, so always divide the resources by 1. + */ + if (BNXT_ROCE_VF_DYN_ALLOC_CAP(bp)) + num_vfs = 1; + cfg_req->fid = cpu_to_le16(0xffff); cfg_req->enables2 = cpu_to_le32(FUNC_CFG_REQ_ENABLES2_ROCE_MAX_AV_PER_VF | @@ -734,7 +741,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) FUNC_CFG_REQ_ENABLES_NUM_VNICS | FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS); - mtu = bp->dev->mtu + ETH_HLEN + VLAN_HLEN; + mtu = bp->dev->mtu + VLAN_ETH_HLEN; req->mru = cpu_to_le16(mtu); req->admin_mtu = cpu_to_le16(mtu); @@ -919,7 +926,7 @@ err_out1: return rc; } -void bnxt_sriov_disable(struct bnxt *bp) +void __bnxt_sriov_disable(struct bnxt *bp) { u16 num_vfs = pci_num_vf(bp->pdev); @@ -943,6 +950,14 @@ void bnxt_sriov_disable(struct bnxt *bp) devl_unlock(bp->dl); bnxt_free_vf_resources(bp); +} + +static void bnxt_sriov_disable(struct bnxt *bp) +{ + if (!pci_num_vf(bp->pdev)) + return; + + __bnxt_sriov_disable(bp); /* Reclaim all resources for the PF. */ rtnl_lock(); @@ -1321,7 +1336,7 @@ int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset) return 0; } -void bnxt_sriov_disable(struct bnxt *bp) +void __bnxt_sriov_disable(struct bnxt *bp) { } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h index 9a4bacba477b..e4979d729312 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h @@ -38,7 +38,7 @@ bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf); int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trust); int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs); int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset); -void bnxt_sriov_disable(struct bnxt *); +void __bnxt_sriov_disable(struct bnxt *bp); void bnxt_hwrm_exec_fwd_req(struct bnxt *); void bnxt_update_vf_mac(struct bnxt *); int bnxt_approve_mac(struct bnxt *, const u8 *, bool); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index d72fd248f3aa..2d66bf59cd64 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -244,7 +244,7 @@ bnxt_tc_parse_pedit(struct bnxt *bp, struct bnxt_tc_actions *actions, offset < offset_of_ip6_daddr + 16) { actions->nat.src_xlate = false; idx = (offset - offset_of_ip6_daddr) / 4; - actions->nat.l3.ipv6.saddr.s6_addr32[idx] = htonl(val); + actions->nat.l3.ipv6.daddr.s6_addr32[idx] = htonl(val); } else { netdev_err(bp->dev, "%s: IPv6_hdr: Invalid pedit field\n", diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index 58d579dca3f1..3e77a96e5a3e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -468,9 +468,8 @@ bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags, if (!skb) return NULL; - xdp_update_skb_shared_info(skb, num_frags, - sinfo->xdp_frags_size, - BNXT_RX_PAGE_SIZE * num_frags, - xdp_buff_is_frag_pfmemalloc(xdp)); + xdp_update_skb_frags_info(skb, num_frags, sinfo->xdp_frags_size, + BNXT_RX_PAGE_SIZE * num_frags, + xdp_buff_get_skb_flags(xdp)); return skb; } diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index a9040c42d2ff..6e97a5a7daaf 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -4230,8 +4230,7 @@ static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev) cnic_bnx2x_delete_wait(dev, 0); - cancel_delayed_work(&cp->delete_task); - flush_workqueue(cnic_wq); + cancel_delayed_work_sync(&cp->delete_task); if (atomic_read(&cp->iscsi_conn) != 0) netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n", diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index b4dc93a48718..7f00ec7fd7b9 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -13929,22 +13929,20 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, } -static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) +static int tg3_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *stmpconf, + struct netlink_ext_ack *extack) { struct tg3 *tp = netdev_priv(dev); - struct hwtstamp_config stmpconf; if (!tg3_flag(tp, PTP_CAPABLE)) return -EOPNOTSUPP; - if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf))) - return -EFAULT; - - if (stmpconf.tx_type != HWTSTAMP_TX_ON && - stmpconf.tx_type != HWTSTAMP_TX_OFF) + if (stmpconf->tx_type != HWTSTAMP_TX_ON && + stmpconf->tx_type != HWTSTAMP_TX_OFF) return -ERANGE; - switch (stmpconf.rx_filter) { + switch (stmpconf->rx_filter) { case HWTSTAMP_FILTER_NONE: tp->rxptpctl = 0; break; @@ -14004,74 +14002,72 @@ static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) tw32(TG3_RX_PTP_CTL, tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); - if (stmpconf.tx_type == HWTSTAMP_TX_ON) + if (stmpconf->tx_type == HWTSTAMP_TX_ON) tg3_flag_set(tp, TX_TSTAMP_EN); else tg3_flag_clear(tp, TX_TSTAMP_EN); - return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? - -EFAULT : 0; + return 0; } -static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) +static int tg3_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *stmpconf) { struct tg3 *tp = netdev_priv(dev); - struct hwtstamp_config stmpconf; if (!tg3_flag(tp, PTP_CAPABLE)) return -EOPNOTSUPP; - stmpconf.flags = 0; - stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ? - HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF); + stmpconf->flags = 0; + stmpconf->tx_type = tg3_flag(tp, TX_TSTAMP_EN) ? + HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; switch (tp->rxptpctl) { case 0: - stmpconf.rx_filter = HWTSTAMP_FILTER_NONE; + stmpconf->rx_filter = HWTSTAMP_FILTER_NONE; break; case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS: - stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; break; case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT: - stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; + stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; break; case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ: - stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; + stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; break; case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: - stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; break; case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: - stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; + stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; break; case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: - stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; + stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; break; case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT: - stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; + stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; break; case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT: - stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC; + stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC; break; case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT: - stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; + stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; break; case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ: - stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; + stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; break; case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ: - stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ; + stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ; break; case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ: - stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; + stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; break; default: WARN_ON_ONCE(1); return -ERANGE; } - return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? - -EFAULT : 0; + return 0; } static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) @@ -14126,12 +14122,6 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return err; - case SIOCSHWTSTAMP: - return tg3_hwtstamp_set(dev, ifr); - - case SIOCGHWTSTAMP: - return tg3_hwtstamp_get(dev, ifr); - default: /* do nothing */ break; @@ -14407,6 +14397,8 @@ static const struct net_device_ops tg3_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = tg3_poll_controller, #endif + .ndo_hwtstamp_get = tg3_hwtstamp_get, + .ndo_hwtstamp_set = tg3_hwtstamp_set, }; static void tg3_get_eeprom_size(struct tg3 *tp) |