diff options
Diffstat (limited to 'drivers/net/ethernet')
82 files changed, 1448 insertions, 314 deletions
diff --git a/drivers/net/ethernet/airoha/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c index 81ea01a652b9..433a646e9831 100644 --- a/drivers/net/ethernet/airoha/airoha_eth.c +++ b/drivers/net/ethernet/airoha/airoha_eth.c @@ -1710,7 +1710,9 @@ static void airhoha_set_gdm2_loopback(struct airoha_gdm_port *port) airoha_fe_wr(eth, REG_GDM_RXCHN_EN(2), 0xffff); airoha_fe_rmw(eth, REG_GDM_LPBK_CFG(2), LPBK_CHAN_MASK | LPBK_MODE_MASK | LPBK_EN_MASK, - FIELD_PREP(LPBK_CHAN_MASK, chan) | LPBK_EN_MASK); + FIELD_PREP(LPBK_CHAN_MASK, chan) | + LBK_GAP_MODE_MASK | LBK_LEN_MODE_MASK | + LBK_CHAN_MODE_MASK | LPBK_EN_MASK); airoha_fe_rmw(eth, REG_GDM_LEN_CFG(2), GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK, FIELD_PREP(GDM_SHORT_LEN_MASK, 60) | @@ -1871,6 +1873,20 @@ static u32 airoha_get_dsa_tag(struct sk_buff *skb, struct net_device *dev) #endif } +static bool airoha_dev_tx_queue_busy(struct airoha_queue *q, u32 nr_frags) +{ + u32 tail = q->tail <= q->head ? q->tail + q->ndesc : q->tail; + u32 index = q->head + nr_frags; + + /* completion napi can free out-of-order tx descriptors if hw QoS is + * enabled and packets with different priorities are queued to the same + * DMA ring. Take into account possible out-of-order reports checking + * if the tx queue is full using circular buffer head/tail pointers + * instead of the number of queued packets. + */ + return index >= tail; +} + static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, struct net_device *dev) { @@ -1924,7 +1940,7 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, txq = netdev_get_tx_queue(dev, qid); nr_frags = 1 + skb_shinfo(skb)->nr_frags; - if (q->queued + nr_frags > q->ndesc) { + if (airoha_dev_tx_queue_busy(q, nr_frags)) { /* not enough space in the queue */ netif_tx_stop_queue(txq); spin_unlock_bh(&q->lock); diff --git a/drivers/net/ethernet/airoha/airoha_regs.h b/drivers/net/ethernet/airoha/airoha_regs.h index e1c15c20be8e..69c5a143db8c 100644 --- a/drivers/net/ethernet/airoha/airoha_regs.h +++ b/drivers/net/ethernet/airoha/airoha_regs.h @@ -151,6 +151,9 @@ #define LPBK_LEN_MASK GENMASK(23, 10) #define LPBK_CHAN_MASK GENMASK(8, 4) #define LPBK_MODE_MASK GENMASK(3, 1) +#define LBK_GAP_MODE_MASK BIT(3) +#define LBK_LEN_MODE_MASK BIT(2) +#define LBK_CHAN_MODE_MASK BIT(1) #define LPBK_EN_MASK BIT(0) #define REG_GDM_TXCHN_EN(_n) (GDM_BASE(_n) + 0x24) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index f0989aa01855..4dc631af7933 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1080,7 +1080,6 @@ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata) static int xgbe_phy_reset(struct xgbe_prv_data *pdata) { - pdata->phy_link = -1; pdata->phy_speed = SPEED_UNKNOWN; return pdata->phy_if.phy_reset(pdata); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 1a37ec45e650..7675bb98f029 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -1555,6 +1555,7 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) pdata->phy.duplex = DUPLEX_FULL; } + pdata->phy_link = 0; pdata->phy.link = 0; pdata->phy.pause_autoneg = pdata->pause_autoneg; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 1d0e0e7362bd..3fc33b1b4dfb 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -9705,6 +9705,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) flags_ext3 = le32_to_cpu(resp->flags_ext3); if (flags_ext3 & FUNC_QCAPS_RESP_FLAGS_EXT3_ROCE_VF_DYN_ALLOC_SUPPORT) bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_DYN_ALLOC_SUPPORT; + if (flags_ext3 & FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_MIRROR_ON_ROCE; bp->tx_push_thresh = 0; if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) && diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 06a4c2afdf8a..741b2d854789 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -2514,6 +2514,7 @@ struct bnxt { #define BNXT_FW_CAP_VNIC_RE_FLUSH BIT_ULL(40) #define BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS BIT_ULL(41) #define BNXT_FW_CAP_NPAR_1_2 BIT_ULL(42) + #define BNXT_FW_CAP_MIRROR_ON_ROCE BIT_ULL(43) u32 fw_dbg_cap; @@ -2537,6 +2538,8 @@ struct bnxt { ((bp)->fw_cap & BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED) #define BNXT_SW_RES_LMT(bp) \ ((bp)->fw_cap & BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS) +#define BNXT_MIRROR_ON_ROCE_CAP(bp) \ + ((bp)->fw_cap & BNXT_FW_CAP_MIRROR_ON_ROCE) u32 hwrm_spec_code; u16 hwrm_cmd_seq; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index 61cf201bb0dc..f8c2c72b382d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -100,6 +100,12 @@ void bnxt_set_dflt_ulp_stat_ctxs(struct bnxt *bp) if (BNXT_PF(bp) && !bp->pf.port_id && bp->port_count > 1) bp->edev->ulp_num_ctxs++; + + /* Reserve one additional stat_ctx when the device is capable + * of supporting port mirroring on RDMA device. + */ + if (BNXT_MIRROR_ON_ROCE_CAP(bp)) + bp->edev->ulp_num_ctxs++; } } diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 7f00ec7fd7b9..d78cafdb2094 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -5803,7 +5803,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset) u32 current_speed = SPEED_UNKNOWN; u8 current_duplex = DUPLEX_UNKNOWN; bool current_link_up = false; - u32 local_adv, remote_adv, sgsr; + u32 local_adv = 0, remote_adv = 0, sgsr; if ((tg3_asic_rev(tp) == ASIC_REV_5719 || tg3_asic_rev(tp) == ASIC_REV_5720) && @@ -5944,9 +5944,6 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset) else current_duplex = DUPLEX_HALF; - local_adv = 0; - remote_adv = 0; - if (bmcr & BMCR_ANENABLE) { u32 common; diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index 1996d2e4e3e2..6e4f17142519 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -508,25 +508,34 @@ static int alloc_list(struct net_device *dev) for (i = 0; i < RX_RING_SIZE; i++) { /* Allocated fixed size of skbuff */ struct sk_buff *skb; + dma_addr_t addr; skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); np->rx_skbuff[i] = skb; - if (!skb) { - free_list(dev); - return -ENOMEM; - } + if (!skb) + goto err_free_list; + + addr = dma_map_single(&np->pdev->dev, skb->data, + np->rx_buf_sz, DMA_FROM_DEVICE); + if (dma_mapping_error(&np->pdev->dev, addr)) + goto err_kfree_skb; np->rx_ring[i].next_desc = cpu_to_le64(np->rx_ring_dma + ((i + 1) % RX_RING_SIZE) * sizeof(struct netdev_desc)); /* Rubicon now supports 40 bits of addressing space. */ - np->rx_ring[i].fraginfo = - cpu_to_le64(dma_map_single(&np->pdev->dev, skb->data, - np->rx_buf_sz, DMA_FROM_DEVICE)); + np->rx_ring[i].fraginfo = cpu_to_le64(addr); np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48); } return 0; + +err_kfree_skb: + dev_kfree_skb(np->rx_skbuff[i]); + np->rx_skbuff[i] = NULL; +err_free_list: + free_list(dev); + return -ENOMEM; } static void rio_hw_init(struct net_device *dev) @@ -724,7 +733,7 @@ start_xmit (struct sk_buff *skb, struct net_device *dev) u64 tfc_vlan_tag = 0; if (np->link_status == 0) { /* Link Down */ - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } entry = np->cur_tx % TX_RING_SIZE; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index c96d1d6ba8fe..18d86badd6ea 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -1077,8 +1077,7 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv, dma_addr_t addr; buffer_start = skb->data - dpaa2_eth_needed_headroom(skb); - aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN, - DPAA2_ETH_TX_BUF_ALIGN); + aligned_start = PTR_ALIGN(buffer_start, DPAA2_ETH_TX_BUF_ALIGN); if (aligned_start >= skb->head) buffer_start = aligned_start; else diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index aae462a0cf5a..0535e92404e3 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -1595,6 +1595,8 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, /* next descriptor to process */ i = rx_ring->next_to_clean; + enetc_lock_mdio(); + while (likely(rx_frm_cnt < work_limit)) { union enetc_rx_bd *rxbd; struct sk_buff *skb; @@ -1630,7 +1632,9 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, rx_byte_cnt += skb->len + ETH_HLEN; rx_frm_cnt++; + enetc_unlock_mdio(); napi_gro_receive(napi, skb); + enetc_lock_mdio(); } rx_ring->next_to_clean = i; @@ -1638,6 +1642,8 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, rx_ring->stats.packets += rx_frm_cnt; rx_ring->stats.bytes += rx_byte_cnt; + enetc_unlock_mdio(); + return rx_frm_cnt; } @@ -1947,6 +1953,8 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, /* next descriptor to process */ i = rx_ring->next_to_clean; + enetc_lock_mdio(); + while (likely(rx_frm_cnt < work_limit)) { union enetc_rx_bd *rxbd, *orig_rxbd; struct xdp_buff xdp_buff; @@ -2010,7 +2018,9 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, */ enetc_bulk_flip_buff(rx_ring, orig_i, i); + enetc_unlock_mdio(); napi_gro_receive(napi, skb); + enetc_lock_mdio(); break; case XDP_TX: tx_ring = priv->xdp_tx_ring[rx_ring->index]; @@ -2045,7 +2055,9 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, } break; case XDP_REDIRECT: + enetc_unlock_mdio(); err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog); + enetc_lock_mdio(); if (unlikely(err)) { enetc_xdp_drop(rx_ring, orig_i, i); rx_ring->stats.xdp_redirect_failures++; @@ -2065,8 +2077,11 @@ out: rx_ring->stats.packets += rx_frm_cnt; rx_ring->stats.bytes += rx_byte_cnt; - if (xdp_redirect_frm_cnt) + if (xdp_redirect_frm_cnt) { + enetc_unlock_mdio(); xdp_do_flush(); + enetc_lock_mdio(); + } if (xdp_tx_frm_cnt) enetc_update_tx_ring_tail(tx_ring); @@ -2075,6 +2090,8 @@ out: enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring) - rx_ring->xdp.xdp_tx_in_flight); + enetc_unlock_mdio(); + return rx_frm_cnt; } @@ -2093,6 +2110,7 @@ static int enetc_poll(struct napi_struct *napi, int budget) for (i = 0; i < v->count_tx_rings; i++) if (!enetc_clean_tx_ring(&v->tx_ring[i], budget)) complete = false; + enetc_unlock_mdio(); prog = rx_ring->xdp.prog; if (prog) @@ -2104,10 +2122,8 @@ static int enetc_poll(struct napi_struct *napi, int budget) if (work_done) v->rx_napi_work = true; - if (!complete) { - enetc_unlock_mdio(); + if (!complete) return budget; - } napi_complete_done(napi, work_done); @@ -2116,6 +2132,7 @@ static int enetc_poll(struct napi_struct *napi, int budget) v->rx_napi_work = false; + enetc_lock_mdio(); /* enable interrupts */ enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE); diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h index 0ec010a7d640..f279fa597991 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h @@ -76,7 +76,7 @@ struct enetc_lso_t { #define ENETC_LSO_MAX_DATA_LEN SZ_256K #define ENETC_RX_MAXFRM_SIZE ENETC_MAC_MAXFRM_SIZE -#define ENETC_RXB_TRUESIZE 2048 /* PAGE_SIZE >> 1 */ +#define ENETC_RXB_TRUESIZE (PAGE_SIZE >> 1) #define ENETC_RXB_PAD NET_SKB_PAD /* add extra space if needed */ #define ENETC_RXB_DMA_SIZE \ (SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - ENETC_RXB_PAD) diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index 577f9b1780ad..de88776dd2a2 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -479,10 +479,12 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) "missing 'reg' property in node %pOF\n", tbi); err = -EBUSY; + of_node_put(tbi); goto error; } set_tbipa(*prop, pdev, data->get_tbipa, priv->map, &res); + of_node_put(tbi); } } diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h index bceaf9b05cb4..4cc6dcbfd367 100644 --- a/drivers/net/ethernet/google/gve/gve.h +++ b/drivers/net/ethernet/google/gve/gve.h @@ -100,6 +100,8 @@ */ #define GVE_DQO_QPL_ONDEMAND_ALLOC_THRESHOLD 96 +#define GVE_DQO_RX_HWTSTAMP_VALID 0x1 + /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */ struct gve_rx_desc_queue { struct gve_rx_desc *desc_ring; /* the descriptor ring */ diff --git a/drivers/net/ethernet/google/gve/gve_desc_dqo.h b/drivers/net/ethernet/google/gve/gve_desc_dqo.h index d17da841b5a0..f7786b03c744 100644 --- a/drivers/net/ethernet/google/gve/gve_desc_dqo.h +++ b/drivers/net/ethernet/google/gve/gve_desc_dqo.h @@ -236,7 +236,8 @@ struct gve_rx_compl_desc_dqo { u8 status_error1; - __le16 reserved5; + u8 reserved5; + u8 ts_sub_nsecs_low; __le16 buf_id; /* Buffer ID which was sent on the buffer queue. */ union { diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c index 55393b784317..1aff3bbb8cfc 100644 --- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c @@ -456,14 +456,20 @@ static void gve_rx_skb_hash(struct sk_buff *skb, * Note that this means if the time delta between packet reception and the last * clock read is greater than ~2 seconds, this will provide invalid results. */ -static void gve_rx_skb_hwtstamp(struct gve_rx_ring *rx, u32 hwts) +static void gve_rx_skb_hwtstamp(struct gve_rx_ring *rx, + const struct gve_rx_compl_desc_dqo *desc) { u64 last_read = READ_ONCE(rx->gve->last_sync_nic_counter); struct sk_buff *skb = rx->ctx.skb_head; - u32 low = (u32)last_read; - s32 diff = hwts - low; - - skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(last_read + diff); + u32 ts, low; + s32 diff; + + if (desc->ts_sub_nsecs_low & GVE_DQO_RX_HWTSTAMP_VALID) { + ts = le32_to_cpu(desc->ts); + low = (u32)last_read; + diff = ts - low; + skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(last_read + diff); + } } static void gve_rx_free_skb(struct napi_struct *napi, struct gve_rx_ring *rx) @@ -944,7 +950,7 @@ static int gve_rx_complete_skb(struct gve_rx_ring *rx, struct napi_struct *napi, gve_rx_skb_csum(rx->ctx.skb_head, desc, ptype); if (rx->gve->ts_config.rx_filter == HWTSTAMP_FILTER_ALL) - gve_rx_skb_hwtstamp(rx, le32_to_cpu(desc->ts)); + gve_rx_skb_hwtstamp(rx, desc); /* RSC packets must set gso_size otherwise the TCP stack will complain * that packets are larger than MTU. diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig index 65302c41bfb1..38875c196cb6 100644 --- a/drivers/net/ethernet/hisilicon/Kconfig +++ b/drivers/net/ethernet/hisilicon/Kconfig @@ -148,6 +148,7 @@ config HIBMCGE tristate "Hisilicon BMC Gigabit Ethernet Device Support" depends on PCI && PCI_MSI select PHYLIB + select FIXED_PHY select MOTORCOMM_PHY select REALTEK_PHY help diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.c b/drivers/net/ethernet/intel/ice/ice_adapter.c index b53561c34708..0a8a48cd4bce 100644 --- a/drivers/net/ethernet/intel/ice/ice_adapter.c +++ b/drivers/net/ethernet/intel/ice/ice_adapter.c @@ -99,19 +99,21 @@ struct ice_adapter *ice_adapter_get(struct pci_dev *pdev) index = ice_adapter_xa_index(pdev); scoped_guard(mutex, &ice_adapters_mutex) { - err = xa_insert(&ice_adapters, index, NULL, GFP_KERNEL); - if (err == -EBUSY) { - adapter = xa_load(&ice_adapters, index); + adapter = xa_load(&ice_adapters, index); + if (adapter) { refcount_inc(&adapter->refcount); WARN_ON_ONCE(adapter->index != ice_adapter_index(pdev)); return adapter; } + err = xa_reserve(&ice_adapters, index, GFP_KERNEL); if (err) return ERR_PTR(err); adapter = ice_adapter_new(pdev); - if (!adapter) + if (!adapter) { + xa_release(&ice_adapters, index); return ERR_PTR(-ENOMEM); + } xa_store(&ice_adapters, index, adapter, GFP_KERNEL); } return adapter; diff --git a/drivers/net/ethernet/intel/idpf/idpf_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_ptp.c index 142823af1f9e..3e1052d070cf 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_ptp.c +++ b/drivers/net/ethernet/intel/idpf/idpf_ptp.c @@ -863,6 +863,9 @@ static void idpf_ptp_release_vport_tstamp(struct idpf_vport *vport) u64_stats_inc(&vport->tstamp_stats.flushed); list_del(&ptp_tx_tstamp->list_member); + if (ptp_tx_tstamp->skb) + consume_skb(ptp_tx_tstamp->skb); + kfree(ptp_tx_tstamp); } u64_stats_update_end(&vport->tstamp_stats.stats_sync); diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c index 8a2e0f8c5e36..61cedb6f2854 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c @@ -517,6 +517,7 @@ idpf_ptp_get_tstamp_value(struct idpf_vport *vport, shhwtstamps.hwtstamp = ns_to_ktime(tstamp); skb_tstamp_tx(ptp_tx_tstamp->skb, &shhwtstamps); consume_skb(ptp_tx_tstamp->skb); + ptp_tx_tstamp->skb = NULL; list_add(&ptp_tx_tstamp->list_member, &tx_tstamp_caps->latches_free); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 90d4e57b1c93..ca1ccc630001 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -12101,7 +12101,6 @@ static void ixgbe_remove(struct pci_dev *pdev) devl_port_unregister(&adapter->devlink_port); devl_unlock(adapter->devlink); - devlink_free(adapter->devlink); ixgbe_stop_ipsec_offload(adapter); ixgbe_clear_interrupt_scheme(adapter); @@ -12137,6 +12136,8 @@ static void ixgbe_remove(struct pci_dev *pdev) if (disable_dev) pci_disable_device(pdev); + + devlink_free(adapter->devlink); } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h index 4af149b63a39..0334ed4b8fa3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h @@ -50,6 +50,9 @@ enum ixgbe_pfvf_api_rev { ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */ ixgbe_mbox_api_14, /* API version 1.4, linux/freebsd VF driver */ + ixgbe_mbox_api_15, /* API version 1.5, linux/freebsd VF driver */ + ixgbe_mbox_api_16, /* API version 1.6, linux/freebsd VF driver */ + ixgbe_mbox_api_17, /* API version 1.7, linux/freebsd VF driver */ /* This value should always be last */ ixgbe_mbox_api_unknown, /* indicates that API version is not known */ }; @@ -86,6 +89,12 @@ enum ixgbe_pfvf_api_rev { #define IXGBE_VF_GET_LINK_STATE 0x10 /* get vf link state */ +/* mailbox API, version 1.6 VF requests */ +#define IXGBE_VF_GET_PF_LINK_STATE 0x11 /* request PF to send link info */ + +/* mailbox API, version 1.7 VF requests */ +#define IXGBE_VF_FEATURES_NEGOTIATE 0x12 /* get features supported by PF */ + /* length of permanent address message returned from PF */ #define IXGBE_VF_PERMADDR_MSG_LEN 4 /* word in permanent address message with the current multicast type */ @@ -96,6 +105,12 @@ enum ixgbe_pfvf_api_rev { #define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ #define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ +/* features negotiated between PF/VF */ +#define IXGBEVF_PF_SUP_IPSEC BIT(0) +#define IXGBEVF_PF_SUP_ESX_MBX BIT(1) + +#define IXGBE_SUPPORTED_FEATURES IXGBEVF_PF_SUP_IPSEC + struct ixgbe_hw; int ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 32ac1e020d91..ee133d6749b3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -510,6 +510,8 @@ static int ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 max_frame, u32 vf case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: case ixgbe_mbox_api_14: + case ixgbe_mbox_api_16: + case ixgbe_mbox_api_17: /* Version 1.1 supports jumbo frames on VFs if PF has * jumbo frames enabled which means legacy VFs are * disabled @@ -1046,6 +1048,8 @@ static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter, case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: case ixgbe_mbox_api_14: + case ixgbe_mbox_api_16: + case ixgbe_mbox_api_17: adapter->vfinfo[vf].vf_api = api; return 0; default: @@ -1072,6 +1076,8 @@ static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter, case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: case ixgbe_mbox_api_14: + case ixgbe_mbox_api_16: + case ixgbe_mbox_api_17: break; default: return -1; @@ -1112,6 +1118,8 @@ static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) /* verify the PF is supporting the correct API */ switch (adapter->vfinfo[vf].vf_api) { + case ixgbe_mbox_api_17: + case ixgbe_mbox_api_16: case ixgbe_mbox_api_14: case ixgbe_mbox_api_13: case ixgbe_mbox_api_12: @@ -1145,6 +1153,8 @@ static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter, /* verify the PF is supporting the correct API */ switch (adapter->vfinfo[vf].vf_api) { + case ixgbe_mbox_api_17: + case ixgbe_mbox_api_16: case ixgbe_mbox_api_14: case ixgbe_mbox_api_13: case ixgbe_mbox_api_12: @@ -1174,6 +1184,8 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, fallthrough; case ixgbe_mbox_api_13: case ixgbe_mbox_api_14: + case ixgbe_mbox_api_16: + case ixgbe_mbox_api_17: break; default: return -EOPNOTSUPP; @@ -1244,6 +1256,8 @@ static int ixgbe_get_vf_link_state(struct ixgbe_adapter *adapter, case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: case ixgbe_mbox_api_14: + case ixgbe_mbox_api_16: + case ixgbe_mbox_api_17: break; default: return -EOPNOTSUPP; @@ -1254,6 +1268,65 @@ static int ixgbe_get_vf_link_state(struct ixgbe_adapter *adapter, return 0; } +/** + * ixgbe_send_vf_link_status - send link status data to VF + * @adapter: pointer to adapter struct + * @msgbuf: pointer to message buffers + * @vf: VF identifier + * + * Reply for IXGBE_VF_GET_PF_LINK_STATE mbox command sending link status data. + * + * Return: 0 on success or -EOPNOTSUPP when operation is not supported. + */ +static int ixgbe_send_vf_link_status(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + + switch (adapter->vfinfo[vf].vf_api) { + case ixgbe_mbox_api_16: + case ixgbe_mbox_api_17: + if (hw->mac.type != ixgbe_mac_e610) + return -EOPNOTSUPP; + break; + default: + return -EOPNOTSUPP; + } + /* Simply provide stored values as watchdog & link status events take + * care of its freshness. + */ + msgbuf[1] = adapter->link_speed; + msgbuf[2] = adapter->link_up; + + return 0; +} + +/** + * ixgbe_negotiate_vf_features - negotiate supported features with VF driver + * @adapter: pointer to adapter struct + * @msgbuf: pointer to message buffers + * @vf: VF identifier + * + * Return: 0 on success or -EOPNOTSUPP when operation is not supported. + */ +static int ixgbe_negotiate_vf_features(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + u32 features = msgbuf[1]; + + switch (adapter->vfinfo[vf].vf_api) { + case ixgbe_mbox_api_17: + break; + default: + return -EOPNOTSUPP; + } + + features &= IXGBE_SUPPORTED_FEATURES; + msgbuf[1] = features; + + return 0; +} + static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) { u32 mbx_size = IXGBE_VFMAILBOX_SIZE; @@ -1328,6 +1401,12 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) case IXGBE_VF_IPSEC_DEL: retval = ixgbe_ipsec_vf_del_sa(adapter, msgbuf, vf); break; + case IXGBE_VF_GET_PF_LINK_STATE: + retval = ixgbe_send_vf_link_status(adapter, msgbuf, vf); + break; + case IXGBE_VF_FEATURES_NEGOTIATE: + retval = ixgbe_negotiate_vf_features(adapter, msgbuf, vf); + break; default: e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); retval = -EIO; diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h index a9bc96f6399d..e177d1d58696 100644 --- a/drivers/net/ethernet/intel/ixgbevf/defines.h +++ b/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -28,6 +28,7 @@ /* Link speed */ typedef u32 ixgbe_link_speed; +#define IXGBE_LINK_SPEED_UNKNOWN 0 #define IXGBE_LINK_SPEED_1GB_FULL 0x0020 #define IXGBE_LINK_SPEED_10GB_FULL 0x0080 #define IXGBE_LINK_SPEED_100_FULL 0x0008 diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c index 65580b9cb06f..fce35924ff8b 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c +++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c @@ -273,6 +273,9 @@ static int ixgbevf_ipsec_add_sa(struct net_device *dev, adapter = netdev_priv(dev); ipsec = adapter->ipsec; + if (!(adapter->pf_features & IXGBEVF_PF_SUP_IPSEC)) + return -EOPNOTSUPP; + if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) { NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol for IPsec offload"); return -EINVAL; @@ -405,6 +408,9 @@ static void ixgbevf_ipsec_del_sa(struct net_device *dev, adapter = netdev_priv(dev); ipsec = adapter->ipsec; + if (!(adapter->pf_features & IXGBEVF_PF_SUP_IPSEC)) + return; + if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) { sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX; @@ -612,6 +618,10 @@ void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter) size_t size; switch (adapter->hw.api_version) { + case ixgbe_mbox_api_17: + if (!(adapter->pf_features & IXGBEVF_PF_SUP_IPSEC)) + return; + break; case ixgbe_mbox_api_14: break; default: diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 3a379e6a3a2a..039187607e98 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -363,6 +363,13 @@ struct ixgbevf_adapter { struct ixgbe_hw hw; u16 msg_enable; + u32 pf_features; +#define IXGBEVF_PF_SUP_IPSEC BIT(0) +#define IXGBEVF_PF_SUP_ESX_MBX BIT(1) + +#define IXGBEVF_SUPPORTED_FEATURES (IXGBEVF_PF_SUP_IPSEC | \ + IXGBEVF_PF_SUP_ESX_MBX) + struct ixgbevf_hw_stats stats; unsigned long state; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 28e25641b167..d5ce20f47def 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -2271,10 +2271,36 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; } +/** + * ixgbevf_set_features - Set features supported by PF + * @adapter: pointer to the adapter struct + * + * Negotiate with PF supported features and then set pf_features accordingly. + */ +static void ixgbevf_set_features(struct ixgbevf_adapter *adapter) +{ + u32 *pf_features = &adapter->pf_features; + struct ixgbe_hw *hw = &adapter->hw; + int err; + + err = hw->mac.ops.negotiate_features(hw, pf_features); + if (err && err != -EOPNOTSUPP) + netdev_dbg(adapter->netdev, + "PF feature negotiation failed.\n"); + + /* Address also pre API 1.7 cases */ + if (hw->api_version == ixgbe_mbox_api_14) + *pf_features |= IXGBEVF_PF_SUP_IPSEC; + else if (hw->api_version == ixgbe_mbox_api_15) + *pf_features |= IXGBEVF_PF_SUP_ESX_MBX; +} + static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; static const int api[] = { + ixgbe_mbox_api_17, + ixgbe_mbox_api_16, ixgbe_mbox_api_15, ixgbe_mbox_api_14, ixgbe_mbox_api_13, @@ -2294,7 +2320,9 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) idx++; } - if (hw->api_version >= ixgbe_mbox_api_15) { + ixgbevf_set_features(adapter); + + if (adapter->pf_features & IXGBEVF_PF_SUP_ESX_MBX) { hw->mbx.ops.init_params(hw); memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops, sizeof(struct ixgbe_mbx_operations)); @@ -2651,6 +2679,8 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) case ixgbe_mbox_api_13: case ixgbe_mbox_api_14: case ixgbe_mbox_api_15: + case ixgbe_mbox_api_16: + case ixgbe_mbox_api_17: if (adapter->xdp_prog && hw->mac.max_tx_queues == rss) rss = rss > 3 ? 2 : 1; @@ -4645,6 +4675,8 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) case ixgbe_mbox_api_13: case ixgbe_mbox_api_14: case ixgbe_mbox_api_15: + case ixgbe_mbox_api_16: + case ixgbe_mbox_api_17: netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); break; diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h index 835bbcc5cc8e..a8ed23ee66aa 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.h +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h @@ -66,6 +66,8 @@ enum ixgbe_pfvf_api_rev { ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */ ixgbe_mbox_api_14, /* API version 1.4, linux/freebsd VF driver */ ixgbe_mbox_api_15, /* API version 1.5, linux/freebsd VF driver */ + ixgbe_mbox_api_16, /* API version 1.6, linux/freebsd VF driver */ + ixgbe_mbox_api_17, /* API version 1.7, linux/freebsd VF driver */ /* This value should always be last */ ixgbe_mbox_api_unknown, /* indicates that API version is not known */ }; @@ -102,6 +104,12 @@ enum ixgbe_pfvf_api_rev { #define IXGBE_VF_GET_LINK_STATE 0x10 /* get vf link state */ +/* mailbox API, version 1.6 VF requests */ +#define IXGBE_VF_GET_PF_LINK_STATE 0x11 /* request PF to send link info */ + +/* mailbox API, version 1.7 VF requests */ +#define IXGBE_VF_FEATURES_NEGOTIATE 0x12 /* get features supported by PF*/ + /* length of permanent address message returned from PF */ #define IXGBE_VF_PERMADDR_MSG_LEN 4 /* word in permanent address message with the current multicast type */ diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index dcaef34b88b6..74d320879513 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -313,6 +313,8 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues) * is not supported for this device type. */ switch (hw->api_version) { + case ixgbe_mbox_api_17: + case ixgbe_mbox_api_16: case ixgbe_mbox_api_15: case ixgbe_mbox_api_14: case ixgbe_mbox_api_13: @@ -382,6 +384,8 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key) * or if the operation is not supported for this device type. */ switch (hw->api_version) { + case ixgbe_mbox_api_17: + case ixgbe_mbox_api_16: case ixgbe_mbox_api_15: case ixgbe_mbox_api_14: case ixgbe_mbox_api_13: @@ -552,6 +556,8 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) case ixgbe_mbox_api_13: case ixgbe_mbox_api_14: case ixgbe_mbox_api_15: + case ixgbe_mbox_api_16: + case ixgbe_mbox_api_17: break; default: return -EOPNOTSUPP; @@ -625,6 +631,85 @@ static s32 ixgbevf_hv_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state) } /** + * ixgbevf_get_pf_link_state - Get PF's link status + * @hw: pointer to the HW structure + * @speed: link speed + * @link_up: indicate if link is up/down + * + * Ask PF to provide link_up state and speed of the link. + * + * Return: IXGBE_ERR_MBX in the case of mailbox error, + * -EOPNOTSUPP if the op is not supported or 0 on success. + */ +static int ixgbevf_get_pf_link_state(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up) +{ + u32 msgbuf[3] = {}; + int err; + + switch (hw->api_version) { + case ixgbe_mbox_api_16: + case ixgbe_mbox_api_17: + break; + default: + return -EOPNOTSUPP; + } + + msgbuf[0] = IXGBE_VF_GET_PF_LINK_STATE; + + err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, + ARRAY_SIZE(msgbuf)); + if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) { + err = IXGBE_ERR_MBX; + *speed = IXGBE_LINK_SPEED_UNKNOWN; + /* No need to set @link_up to false as it will be done by + * ixgbe_check_mac_link_vf(). + */ + } else { + *speed = msgbuf[1]; + *link_up = msgbuf[2]; + } + + return err; +} + +/** + * ixgbevf_negotiate_features_vf - negotiate supported features with PF driver + * @hw: pointer to the HW structure + * @pf_features: bitmask of features supported by PF + * + * Return: IXGBE_ERR_MBX in the case of mailbox error, + * -EOPNOTSUPP if the op is not supported or 0 on success. + */ +static int ixgbevf_negotiate_features_vf(struct ixgbe_hw *hw, u32 *pf_features) +{ + u32 msgbuf[2] = {}; + int err; + + switch (hw->api_version) { + case ixgbe_mbox_api_17: + break; + default: + return -EOPNOTSUPP; + } + + msgbuf[0] = IXGBE_VF_FEATURES_NEGOTIATE; + msgbuf[1] = IXGBEVF_SUPPORTED_FEATURES; + + err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, + ARRAY_SIZE(msgbuf)); + + if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) { + err = IXGBE_ERR_MBX; + *pf_features = 0x0; + } else { + *pf_features = msgbuf[1]; + } + + return err; +} + +/** * ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address * @hw: pointer to the HW structure * @vlan: 12 bit VLAN ID @@ -659,6 +744,58 @@ mbx_err: } /** + * ixgbe_read_vflinks - Read VFLINKS register + * @hw: pointer to the HW structure + * @speed: link speed + * @link_up: indicate if link is up/down + * + * Get linkup status and link speed from the VFLINKS register. + */ +static void ixgbe_read_vflinks(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up) +{ + u32 vflinks = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + + /* if link status is down no point in checking to see if PF is up */ + if (!(vflinks & IXGBE_LINKS_UP)) { + *link_up = false; + return; + } + + /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs + * before the link status is correct + */ + if (hw->mac.type == ixgbe_mac_82599_vf) { + for (int i = 0; i < 5; i++) { + udelay(100); + vflinks = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + + if (!(vflinks & IXGBE_LINKS_UP)) { + *link_up = false; + return; + } + } + } + + /* We reached this point so there's link */ + *link_up = true; + + switch (vflinks & IXGBE_LINKS_SPEED_82599) { + case IXGBE_LINKS_SPEED_10G_82599: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + break; + case IXGBE_LINKS_SPEED_1G_82599: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + case IXGBE_LINKS_SPEED_100_82599: + *speed = IXGBE_LINK_SPEED_100_FULL; + break; + default: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + } +} + +/** * ixgbevf_hv_set_vfta_vf - * Hyper-V variant - just a stub. * @hw: unused * @vlan: unused @@ -702,10 +839,10 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw, bool *link_up, bool autoneg_wait_to_complete) { + struct ixgbevf_adapter *adapter = hw->back; struct ixgbe_mbx_info *mbx = &hw->mbx; struct ixgbe_mac_info *mac = &hw->mac; s32 ret_val = 0; - u32 links_reg; u32 in_msg = 0; /* If we were hit with a reset drop the link */ @@ -715,43 +852,21 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw, if (!mac->get_link_status) goto out; - /* if link status is down no point in checking to see if pf is up */ - links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); - if (!(links_reg & IXGBE_LINKS_UP)) - goto out; - - /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs - * before the link status is correct - */ - if (mac->type == ixgbe_mac_82599_vf) { - int i; - - for (i = 0; i < 5; i++) { - udelay(100); - links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); - - if (!(links_reg & IXGBE_LINKS_UP)) - goto out; - } - } - - switch (links_reg & IXGBE_LINKS_SPEED_82599) { - case IXGBE_LINKS_SPEED_10G_82599: - *speed = IXGBE_LINK_SPEED_10GB_FULL; - break; - case IXGBE_LINKS_SPEED_1G_82599: - *speed = IXGBE_LINK_SPEED_1GB_FULL; - break; - case IXGBE_LINKS_SPEED_100_82599: - *speed = IXGBE_LINK_SPEED_100_FULL; - break; + if (hw->mac.type == ixgbe_mac_e610_vf) { + ret_val = ixgbevf_get_pf_link_state(hw, speed, link_up); + if (ret_val) + goto out; + } else { + ixgbe_read_vflinks(hw, speed, link_up); + if (*link_up == false) + goto out; } /* if the read failed it could just be a mailbox collision, best wait * until we are called again and don't report an error */ if (mbx->ops.read(hw, &in_msg, 1)) { - if (hw->api_version >= ixgbe_mbox_api_15) + if (adapter->pf_features & IXGBEVF_PF_SUP_ESX_MBX) mac->get_link_status = false; goto out; } @@ -951,6 +1066,8 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, case ixgbe_mbox_api_13: case ixgbe_mbox_api_14: case ixgbe_mbox_api_15: + case ixgbe_mbox_api_16: + case ixgbe_mbox_api_17: break; default: return 0; @@ -1005,6 +1122,7 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = { .setup_link = ixgbevf_setup_mac_link_vf, .check_link = ixgbevf_check_mac_link_vf, .negotiate_api_version = ixgbevf_negotiate_api_version_vf, + .negotiate_features = ixgbevf_negotiate_features_vf, .set_rar = ixgbevf_set_rar_vf, .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf, .update_xcast_mode = ixgbevf_update_xcast_mode, diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index 2d791bc26ae4..4f19b8900c29 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -26,6 +26,7 @@ struct ixgbe_mac_operations { s32 (*stop_adapter)(struct ixgbe_hw *); s32 (*get_bus_info)(struct ixgbe_hw *); s32 (*negotiate_api_version)(struct ixgbe_hw *hw, int api); + int (*negotiate_features)(struct ixgbe_hw *hw, u32 *pf_features); /* Link */ s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index d374a4454836..ec0e11c77cbf 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -1981,6 +1981,7 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) !is_cgx_mapped_to_nix(pdev->subsystem_device, cgx->cgx_id)) { dev_notice(dev, "CGX %d not mapped to NIX, skipping probe\n", cgx->cgx_id); + err = -ENODEV; goto err_release_regions; } diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c index 3dbb113b792c..1ed1f88dd7f8 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed.c +++ b/drivers/net/ethernet/mediatek/mtk_wed.c @@ -677,7 +677,7 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev) void *buf; int s; - page = __dev_alloc_page(GFP_KERNEL); + page = __dev_alloc_page(GFP_KERNEL | GFP_DMA32); if (!page) return -ENOMEM; @@ -800,7 +800,7 @@ mtk_wed_hwrro_buffer_alloc(struct mtk_wed_device *dev) struct page *page; int s; - page = __dev_alloc_page(GFP_KERNEL); + page = __dev_alloc_page(GFP_KERNEL | GFP_DMA32); if (!page) return -ENOMEM; @@ -2426,6 +2426,10 @@ mtk_wed_attach(struct mtk_wed_device *dev) dev->version = hw->version; dev->hw->pcie_base = mtk_wed_get_pcie_base(dev); + ret = dma_set_mask_and_coherent(hw->dev, DMA_BIT_MASK(32)); + if (ret) + goto out; + if (hw->eth->dma_dev == hw->eth->dev && of_dma_is_coherent(hw->eth->dev->of_node)) mtk_eth_set_dma_device(hw->eth, hw->dev); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index d2071aff7b8f..308b4458e0d4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1180,9 +1180,9 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv, mlx4_unregister_mac(mdev->dev, priv->port, mac); hlist_del_rcu(&entry->hlist); - kfree_rcu(entry, rcu); en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n", entry->mac, priv->port); + kfree_rcu(entry, rcu); ++removed; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 3692298e10f2..c9bdee9a8b30 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -100,7 +100,7 @@ u8 mlx5e_mpwrq_umr_entry_size(enum mlx5e_mpwrq_umr_mode mode) return sizeof(struct mlx5_ksm) * 4; } WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", mode); - return 0; + return 1; } u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 00e77c71e201..0a4fb8c92268 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -772,6 +772,7 @@ static int mlx5e_xfrm_add_state(struct net_device *dev, struct netlink_ext_ack *extack) { struct mlx5e_ipsec_sa_entry *sa_entry = NULL; + bool allow_tunnel_mode = false; struct mlx5e_ipsec *ipsec; struct mlx5e_priv *priv; gfp_t gfp; @@ -803,6 +804,20 @@ static int mlx5e_xfrm_add_state(struct net_device *dev, goto err_xfrm; } + if (mlx5_eswitch_block_mode(priv->mdev)) + goto unblock_ipsec; + + if (x->props.mode == XFRM_MODE_TUNNEL && + x->xso.type == XFRM_DEV_OFFLOAD_PACKET) { + allow_tunnel_mode = mlx5e_ipsec_fs_tunnel_allowed(sa_entry); + if (!allow_tunnel_mode) { + NL_SET_ERR_MSG_MOD(extack, + "Packet offload tunnel mode is disabled due to encap settings"); + err = -EINVAL; + goto unblock_mode; + } + } + /* check esn */ if (x->props.flags & XFRM_STATE_ESN) mlx5e_ipsec_update_esn_state(sa_entry); @@ -817,7 +832,7 @@ static int mlx5e_xfrm_add_state(struct net_device *dev, err = mlx5_ipsec_create_work(sa_entry); if (err) - goto unblock_ipsec; + goto unblock_encap; err = mlx5e_ipsec_create_dwork(sa_entry); if (err) @@ -832,14 +847,6 @@ static int mlx5e_xfrm_add_state(struct net_device *dev, if (err) goto err_hw_ctx; - if (x->props.mode == XFRM_MODE_TUNNEL && - x->xso.type == XFRM_DEV_OFFLOAD_PACKET && - !mlx5e_ipsec_fs_tunnel_enabled(sa_entry)) { - NL_SET_ERR_MSG_MOD(extack, "Packet offload tunnel mode is disabled due to encap settings"); - err = -EINVAL; - goto err_add_rule; - } - /* We use *_bh() variant because xfrm_timer_handler(), which runs * in softirq context, can reach our state delete logic and we need * xa_erase_bh() there. @@ -855,8 +862,7 @@ static int mlx5e_xfrm_add_state(struct net_device *dev, queue_delayed_work(ipsec->wq, &sa_entry->dwork->dwork, MLX5_IPSEC_RESCHED); - if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET && - x->props.mode == XFRM_MODE_TUNNEL) { + if (allow_tunnel_mode) { xa_lock_bh(&ipsec->sadb); __xa_set_mark(&ipsec->sadb, sa_entry->ipsec_obj_id, MLX5E_IPSEC_TUNNEL_SA); @@ -865,6 +871,11 @@ static int mlx5e_xfrm_add_state(struct net_device *dev, out: x->xso.offload_handle = (unsigned long)sa_entry; + if (allow_tunnel_mode) + mlx5_eswitch_unblock_encap(priv->mdev); + + mlx5_eswitch_unblock_mode(priv->mdev); + return 0; err_add_rule: @@ -877,6 +888,11 @@ release_work: if (sa_entry->work) kfree(sa_entry->work->data); kfree(sa_entry->work); +unblock_encap: + if (allow_tunnel_mode) + mlx5_eswitch_unblock_encap(priv->mdev); +unblock_mode: + mlx5_eswitch_unblock_mode(priv->mdev); unblock_ipsec: mlx5_eswitch_unblock_ipsec(priv->mdev); err_xfrm: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h index 23703f28386a..f8eaaf37963b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h @@ -319,7 +319,7 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry); int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry); void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry); void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry); -bool mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry *sa_entry); +bool mlx5e_ipsec_fs_tunnel_allowed(struct mlx5e_ipsec_sa_entry *sa_entry); int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry); void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry); @@ -342,6 +342,7 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv, struct mlx5e_priv *master_priv); void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event); +void mlx5e_ipsec_disable_events(struct mlx5e_priv *priv); static inline struct mlx5_core_dev * mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry) @@ -387,6 +388,10 @@ static inline void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *sl static inline void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event) { } + +static inline void mlx5e_ipsec_disable_events(struct mlx5e_priv *priv) +{ +} #endif #endif /* __MLX5E_IPSEC_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c index 6ccfc2af07b7..feef86fff4bf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c @@ -1069,7 +1069,9 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, /* Create FT */ if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL) - rx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev); + rx->allow_tunnel_mode = + mlx5_eswitch_block_encap(mdev, rx == ipsec->rx_esw); + if (rx->allow_tunnel_mode) flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; ft = ipsec_ft_create(attr.ns, attr.sa_level, attr.prio, 1, 2, flags); @@ -1310,7 +1312,9 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx, goto err_status_rule; if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL) - tx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev); + tx->allow_tunnel_mode = + mlx5_eswitch_block_encap(mdev, tx == ipsec->tx_esw); + if (tx->allow_tunnel_mode) flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; ft = ipsec_ft_create(tx->ns, attr.sa_level, attr.prio, 1, 4, flags); @@ -2846,18 +2850,24 @@ void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry) memcpy(sa_entry, &sa_entry_shadow, sizeof(*sa_entry)); } -bool mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry *sa_entry) +bool mlx5e_ipsec_fs_tunnel_allowed(struct mlx5e_ipsec_sa_entry *sa_entry) { - struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs; - struct mlx5e_ipsec_rx *rx; - struct mlx5e_ipsec_tx *tx; + struct mlx5e_ipsec *ipsec = sa_entry->ipsec; + struct xfrm_state *x = sa_entry->x; + bool from_fdb; - rx = ipsec_rx(sa_entry->ipsec, attrs->addrs.family, attrs->type); - tx = ipsec_tx(sa_entry->ipsec, attrs->type); - if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT) - return tx->allow_tunnel_mode; + if (x->xso.dir == XFRM_DEV_OFFLOAD_OUT) { + struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, x->xso.type); + + from_fdb = (tx == ipsec->tx_esw); + } else { + struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, x->props.family, + x->xso.type); + + from_fdb = (rx == ipsec->rx_esw); + } - return rx->allow_tunnel_mode; + return mlx5_eswitch_block_encap(ipsec->mdev, from_fdb); } void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv, @@ -2883,9 +2893,30 @@ void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv, void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event) { - if (!priv->ipsec) - return; /* IPsec not supported */ + if (!priv->ipsec || mlx5_devcom_comp_get_size(priv->devcom) < 2) + return; /* IPsec not supported or no peers */ mlx5_devcom_send_event(priv->devcom, event, event, priv); wait_for_completion(&priv->ipsec->comp); } + +void mlx5e_ipsec_disable_events(struct mlx5e_priv *priv) +{ + struct mlx5_devcom_comp_dev *tmp = NULL; + struct mlx5e_priv *peer_priv; + + if (!priv->devcom) + return; + + if (!mlx5_devcom_for_each_peer_begin(priv->devcom)) + goto out; + + peer_priv = mlx5_devcom_get_next_peer_data(priv->devcom, &tmp); + if (peer_priv) + complete_all(&peer_priv->ipsec->comp); + + mlx5_devcom_for_each_peer_end(priv->devcom); +out: + mlx5_devcom_unregister_component(priv->devcom); + priv->devcom = NULL; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c index b4cb131c5f81..8565cfe8d7dc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c @@ -893,27 +893,27 @@ int mlx5e_psp_init(struct mlx5e_priv *priv) if (!mlx5_is_psp_device(mdev)) { mlx5_core_dbg(mdev, "PSP offload not supported\n"); - return -EOPNOTSUPP; + return 0; } if (!MLX5_CAP_ETH(mdev, swp)) { mlx5_core_dbg(mdev, "SWP not supported\n"); - return -EOPNOTSUPP; + return 0; } if (!MLX5_CAP_ETH(mdev, swp_csum)) { mlx5_core_dbg(mdev, "SWP checksum not supported\n"); - return -EOPNOTSUPP; + return 0; } if (!MLX5_CAP_ETH(mdev, swp_csum_l4_partial)) { mlx5_core_dbg(mdev, "SWP L4 partial checksum not supported\n"); - return -EOPNOTSUPP; + return 0; } if (!MLX5_CAP_ETH(mdev, swp_lso)) { mlx5_core_dbg(mdev, "PSP LSO not supported\n"); - return -EOPNOTSUPP; + return 0; } psp = kzalloc(sizeof(*psp), GFP_KERNEL); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index a56825921c23..9c46511e7b43 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -242,8 +242,8 @@ static int mlx5e_devcom_init_mpv(struct mlx5e_priv *priv, u64 *data) &attr, mlx5e_devcom_event_mpv, priv); - if (IS_ERR(priv->devcom)) - return PTR_ERR(priv->devcom); + if (!priv->devcom) + return -EINVAL; if (mlx5_core_is_mp_master(priv->mdev)) { mlx5_devcom_send_event(priv->devcom, MPV_DEVCOM_MASTER_UP, @@ -256,7 +256,7 @@ static int mlx5e_devcom_init_mpv(struct mlx5e_priv *priv, u64 *data) static void mlx5e_devcom_cleanup_mpv(struct mlx5e_priv *priv) { - if (IS_ERR_OR_NULL(priv->devcom)) + if (!priv->devcom) return; if (mlx5_core_is_mp_master(priv->mdev)) { @@ -266,6 +266,7 @@ static void mlx5e_devcom_cleanup_mpv(struct mlx5e_priv *priv) } mlx5_devcom_unregister_component(priv->devcom); + priv->devcom = NULL; } static int blocking_event(struct notifier_block *nb, unsigned long event, void *data) @@ -6120,6 +6121,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) if (mlx5e_monitor_counter_supported(priv)) mlx5e_monitor_counter_cleanup(priv); + mlx5e_ipsec_disable_events(priv); mlx5e_disable_blocking_events(priv); mlx5e_disable_async_events(priv); mlx5_lag_remove_netdev(mdev, priv->netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 263d5628ee44..1c79adc51a04 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -1794,14 +1794,27 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi } prog = rcu_dereference(rq->xdp_prog); - if (prog && mlx5e_xdp_handle(rq, prog, mxbuf)) { - if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { - struct mlx5e_wqe_frag_info *pwi; + if (prog) { + u8 nr_frags_free, old_nr_frags = sinfo->nr_frags; + + if (mlx5e_xdp_handle(rq, prog, mxbuf)) { + if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, + rq->flags)) { + struct mlx5e_wqe_frag_info *pwi; + + wi -= old_nr_frags - sinfo->nr_frags; + + for (pwi = head_wi; pwi < wi; pwi++) + pwi->frag_page->frags++; + } + return NULL; /* page/packet was consumed by XDP */ + } - for (pwi = head_wi; pwi < wi; pwi++) - pwi->frag_page->frags++; + nr_frags_free = old_nr_frags - sinfo->nr_frags; + if (unlikely(nr_frags_free)) { + wi -= nr_frags_free; + truesize -= nr_frags_free * frag_info->frag_stride; } - return NULL; /* page/packet was consumed by XDP */ } skb = mlx5e_build_linear_skb( @@ -2027,6 +2040,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w u32 byte_cnt = cqe_bcnt; struct skb_shared_info *sinfo; unsigned int truesize = 0; + u32 pg_consumed_bytes; struct bpf_prog *prog; struct sk_buff *skb; u32 linear_frame_sz; @@ -2080,7 +2094,8 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w while (byte_cnt) { /* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */ - u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - frag_offset, byte_cnt); + pg_consumed_bytes = + min_t(u32, PAGE_SIZE - frag_offset, byte_cnt); if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) truesize += pg_consumed_bytes; @@ -2096,10 +2111,15 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w } if (prog) { + u8 nr_frags_free, old_nr_frags = sinfo->nr_frags; + u32 len; + if (mlx5e_xdp_handle(rq, prog, mxbuf)) { if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { struct mlx5e_frag_page *pfp; + frag_page -= old_nr_frags - sinfo->nr_frags; + for (pfp = head_page; pfp < frag_page; pfp++) pfp->frags++; @@ -2110,9 +2130,19 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w return NULL; /* page/packet was consumed by XDP */ } + nr_frags_free = old_nr_frags - sinfo->nr_frags; + if (unlikely(nr_frags_free)) { + frag_page -= nr_frags_free; + truesize -= (nr_frags_free - 1) * PAGE_SIZE + + ALIGN(pg_consumed_bytes, + BIT(rq->mpwqe.log_stride_sz)); + } + + len = mxbuf->xdp.data_end - mxbuf->xdp.data; + skb = mlx5e_build_linear_skb( rq, mxbuf->xdp.data_hard_start, linear_frame_sz, - mxbuf->xdp.data - mxbuf->xdp.data_hard_start, 0, + mxbuf->xdp.data - mxbuf->xdp.data_hard_start, len, mxbuf->xdp.data - mxbuf->xdp.data_meta); if (unlikely(!skb)) { mlx5e_page_release_fragmented(rq->page_pool, @@ -2137,8 +2167,11 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w do pagep->frags++; while (++pagep < frag_page); + + headlen = min_t(u16, MLX5E_RX_MAX_HEAD - len, + skb->data_len); + __pskb_pull_tail(skb, headlen); } - __pskb_pull_tail(skb, headlen); } else { dma_addr_t addr; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 7c029a7d0fd7..a2802cfc9b98 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -1614,7 +1614,9 @@ void mlx5e_stats_fec_get(struct mlx5e_priv *priv, fec_set_corrected_bits_total(priv, fec_stats); fec_set_block_stats(priv, mode, fec_stats); - fec_set_histograms_stats(priv, mode, hist); + + if (MLX5_CAP_PCAM_REG(priv->mdev, pphcr)) + fec_set_histograms_stats(priv, mode, hist); } #define PPORT_ETH_EXT_OFF(c) \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index b7227afcb51d..2702b3885f06 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -256,7 +256,7 @@ mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct sk_buff *skb, u8 mode; #ifdef CONFIG_MLX5_EN_TLS - if (accel && accel->tls.tls_tisn) + if (accel->tls.tls_tisn) return MLX5_INLINE_MODE_TCP_UDP; #endif @@ -982,6 +982,7 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5e_tx_attr attr; struct mlx5i_tx_wqe *wqe; + struct mlx5e_accel_tx_state accel = {}; struct mlx5_wqe_datagram_seg *datagram; struct mlx5_wqe_ctrl_seg *cseg; struct mlx5_wqe_eth_seg *eseg; @@ -992,7 +993,7 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, int num_dma; u16 pi; - mlx5e_sq_xmit_prepare(sq, skb, NULL, &attr); + mlx5e_sq_xmit_prepare(sq, skb, &accel, &attr); mlx5i_sq_calc_wqe_attr(skb, &attr, &wqe_attr); pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs); @@ -1009,7 +1010,7 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram); - mlx5e_txwqe_build_eseg_csum(sq, skb, NULL, eseg); + mlx5e_txwqe_build_eseg_csum(sq, skb, &accel, eseg); eseg->mss = attr.mss; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index df3756d7e52e..16eb99aba2a7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -879,7 +879,7 @@ void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw, struct mlx5_eswitch *slave_esw); int mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw); -bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev); +bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev, bool from_fdb); void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev); int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev); @@ -974,7 +974,8 @@ mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw) return 0; } -static inline bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev) +static inline bool +mlx5_eswitch_block_encap(struct mlx5_core_dev *dev, bool from_fdb) { return true; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 52c3de24bea3..34749814f19b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -3129,7 +3129,7 @@ void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, attr, mlx5_esw_offloads_devcom_event, esw); - if (IS_ERR(esw->devcom)) + if (!esw->devcom) return; mlx5_devcom_send_event(esw->devcom, @@ -3140,7 +3140,7 @@ void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) { - if (IS_ERR_OR_NULL(esw->devcom)) + if (!esw->devcom) return; mlx5_devcom_send_event(esw->devcom, @@ -4006,23 +4006,25 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode) return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); } -bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev) +bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev, bool from_fdb) { struct mlx5_eswitch *esw = dev->priv.eswitch; + enum devlink_eswitch_encap_mode encap; + bool allow_tunnel = false; if (!mlx5_esw_allowed(esw)) return true; down_write(&esw->mode_lock); - if (esw->mode != MLX5_ESWITCH_LEGACY && - esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) { - up_write(&esw->mode_lock); - return false; + encap = esw->offloads.encap; + if (esw->mode == MLX5_ESWITCH_LEGACY || + (encap == DEVLINK_ESWITCH_ENCAP_MODE_NONE && !from_fdb)) { + allow_tunnel = true; + esw->offloads.num_block_encap++; } - - esw->offloads.num_block_encap++; up_write(&esw->mode_lock); - return true; + + return allow_tunnel; } void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index 59c00c911275..3db0387bf6dc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -1430,11 +1430,10 @@ static int mlx5_lag_register_hca_devcom_comp(struct mlx5_core_dev *dev) mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_HCA_PORTS, &attr, NULL, dev); - if (IS_ERR(dev->priv.hca_devcom_comp)) { + if (!dev->priv.hca_devcom_comp) { mlx5_core_err(dev, - "Failed to register devcom HCA component, err: %ld\n", - PTR_ERR(dev->priv.hca_devcom_comp)); - return PTR_ERR(dev->priv.hca_devcom_comp); + "Failed to register devcom HCA component."); + return -EINVAL; } return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index d0ba83d77cd1..29e7fa09c32c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -1444,7 +1444,7 @@ static void mlx5_shared_clock_register(struct mlx5_core_dev *mdev, u64 key) compd = mlx5_devcom_register_component(mdev->priv.devc, MLX5_DEVCOM_SHARED_CLOCK, &attr, NULL, mdev); - if (IS_ERR(compd)) + if (!compd) return; mdev->clock_state->compdev = compd; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c index faa2833602c8..e749618229bc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c @@ -76,20 +76,18 @@ mlx5_devcom_dev_alloc(struct mlx5_core_dev *dev) struct mlx5_devcom_dev * mlx5_devcom_register_device(struct mlx5_core_dev *dev) { - struct mlx5_devcom_dev *devc; + struct mlx5_devcom_dev *devc = NULL; mutex_lock(&dev_list_lock); if (devcom_dev_exists(dev)) { - devc = ERR_PTR(-EEXIST); + mlx5_core_err(dev, "devcom device already exists"); goto out; } devc = mlx5_devcom_dev_alloc(dev); - if (!devc) { - devc = ERR_PTR(-ENOMEM); + if (!devc) goto out; - } list_add_tail(&devc->list, &devcom_dev_list); out: @@ -110,8 +108,10 @@ mlx5_devcom_dev_release(struct kref *ref) void mlx5_devcom_unregister_device(struct mlx5_devcom_dev *devc) { - if (!IS_ERR_OR_NULL(devc)) - kref_put(&devc->ref, mlx5_devcom_dev_release); + if (!devc) + return; + + kref_put(&devc->ref, mlx5_devcom_dev_release); } static struct mlx5_devcom_comp * @@ -122,7 +122,7 @@ mlx5_devcom_comp_alloc(u64 id, const struct mlx5_devcom_match_attr *attr, comp = kzalloc(sizeof(*comp), GFP_KERNEL); if (!comp) - return ERR_PTR(-ENOMEM); + return NULL; comp->id = id; comp->key.key = attr->key; @@ -160,7 +160,7 @@ devcom_alloc_comp_dev(struct mlx5_devcom_dev *devc, devcom = kzalloc(sizeof(*devcom), GFP_KERNEL); if (!devcom) - return ERR_PTR(-ENOMEM); + return NULL; kref_get(&devc->ref); devcom->devc = devc; @@ -240,31 +240,28 @@ mlx5_devcom_register_component(struct mlx5_devcom_dev *devc, mlx5_devcom_event_handler_t handler, void *data) { - struct mlx5_devcom_comp_dev *devcom; + struct mlx5_devcom_comp_dev *devcom = NULL; struct mlx5_devcom_comp *comp; - if (IS_ERR_OR_NULL(devc)) - return ERR_PTR(-EINVAL); + if (!devc) + return NULL; mutex_lock(&comp_list_lock); comp = devcom_component_get(devc, id, attr, handler); - if (IS_ERR(comp)) { - devcom = ERR_PTR(-EINVAL); + if (IS_ERR(comp)) goto out_unlock; - } if (!comp) { comp = mlx5_devcom_comp_alloc(id, attr, handler); - if (IS_ERR(comp)) { - devcom = ERR_CAST(comp); + if (!comp) goto out_unlock; - } + list_add_tail(&comp->comp_list, &devcom_comp_list); } mutex_unlock(&comp_list_lock); devcom = devcom_alloc_comp_dev(devc, comp, data); - if (IS_ERR(devcom)) + if (!devcom) kref_put(&comp->ref, mlx5_devcom_comp_release); return devcom; @@ -276,8 +273,10 @@ out_unlock: void mlx5_devcom_unregister_component(struct mlx5_devcom_comp_dev *devcom) { - if (!IS_ERR_OR_NULL(devcom)) - devcom_free_comp_dev(devcom); + if (!devcom) + return; + + devcom_free_comp_dev(devcom); } int mlx5_devcom_comp_get_size(struct mlx5_devcom_comp_dev *devcom) @@ -296,7 +295,7 @@ int mlx5_devcom_send_event(struct mlx5_devcom_comp_dev *devcom, int err = 0; void *data; - if (IS_ERR_OR_NULL(devcom)) + if (!devcom) return -ENODEV; comp = devcom->comp; @@ -338,7 +337,7 @@ void mlx5_devcom_comp_set_ready(struct mlx5_devcom_comp_dev *devcom, bool ready) bool mlx5_devcom_comp_is_ready(struct mlx5_devcom_comp_dev *devcom) { - if (IS_ERR_OR_NULL(devcom)) + if (!devcom) return false; return READ_ONCE(devcom->comp->ready); @@ -348,7 +347,7 @@ bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom_comp_dev *devcom) { struct mlx5_devcom_comp *comp; - if (IS_ERR_OR_NULL(devcom)) + if (!devcom) return false; comp = devcom->comp; @@ -421,21 +420,21 @@ void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom_comp_dev *devcom, void mlx5_devcom_comp_lock(struct mlx5_devcom_comp_dev *devcom) { - if (IS_ERR_OR_NULL(devcom)) + if (!devcom) return; down_write(&devcom->comp->sem); } void mlx5_devcom_comp_unlock(struct mlx5_devcom_comp_dev *devcom) { - if (IS_ERR_OR_NULL(devcom)) + if (!devcom) return; up_write(&devcom->comp->sem); } int mlx5_devcom_comp_trylock(struct mlx5_devcom_comp_dev *devcom) { - if (IS_ERR_OR_NULL(devcom)) + if (!devcom) return 0; return down_write_trylock(&devcom->comp->sem); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c index f5c2701f6e87..8e17daae48af 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c @@ -221,8 +221,8 @@ static int sd_register(struct mlx5_core_dev *dev) attr.net = mlx5_core_net(dev); devcom = mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_SD_GROUP, &attr, NULL, dev); - if (IS_ERR(devcom)) - return PTR_ERR(devcom); + if (!devcom) + return -EINVAL; sd->devcom = devcom; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index df93625c9dfa..70c156591b0b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -978,9 +978,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) int err; dev->priv.devc = mlx5_devcom_register_device(dev); - if (IS_ERR(dev->priv.devc)) - mlx5_core_warn(dev, "failed to register devcom device %pe\n", - dev->priv.devc); + if (!dev->priv.devc) + mlx5_core_warn(dev, "failed to register devcom device\n"); err = mlx5_query_board_id(dev); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wc.c b/drivers/net/ethernet/mellanox/mlx5/core/wc.c index c281153bd411..05e5fd777d4f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/wc.c @@ -266,7 +266,7 @@ static void mlx5_iowrite64_copy(struct mlx5_wc_sq *sq, __be32 mmio_wqe[16], if (cpu_has_neon()) { kernel_neon_begin(); asm volatile - (".arch_extension simd;\n\t" + (".arch_extension simd\n\t" "ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [%0]\n\t" "st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [%1]" : diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c index a1c2db69b198..95fac020eb93 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c @@ -185,13 +185,13 @@ static void fbnic_aggregate_vector_counters(struct fbnic_net *fbn, for (i = 0; i < nv->txt_count; i++) { fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].sub0); - fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].sub1); + fbnic_aggregate_ring_xdp_counters(fbn, &nv->qt[i].sub1); fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].cmpl); } for (j = 0; j < nv->rxt_count; j++, i++) { - fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].sub0); - fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].sub1); + fbnic_aggregate_ring_bdq_counters(fbn, &nv->qt[i].sub0); + fbnic_aggregate_ring_bdq_counters(fbn, &nv->qt[i].sub1); fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].cmpl); } } diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c index 8f998d26b9a3..2a84bd1d7e26 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c @@ -83,8 +83,16 @@ static void fbnic_mac_init_axi(struct fbnic_dev *fbd) static void fbnic_mac_init_qm(struct fbnic_dev *fbd) { + u64 default_meta = FIELD_PREP(FBNIC_TWD_L2_HLEN_MASK, ETH_HLEN) | + FBNIC_TWD_FLAG_REQ_COMPLETION; u32 clock_freq; + /* Configure default TWQ Metadata descriptor */ + wr32(fbd, FBNIC_QM_TWQ_DEFAULT_META_L, + lower_32_bits(default_meta)); + wr32(fbd, FBNIC_QM_TWQ_DEFAULT_META_H, + upper_32_bits(default_meta)); + /* Configure TSO behavior */ wr32(fbd, FBNIC_QM_TQS_CTL0, FIELD_PREP(FBNIC_QM_TQS_CTL0_LSO_TS_MASK, diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c index d12b4cad84a5..e95be0e7bd9e 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c @@ -543,17 +543,21 @@ static const struct net_device_ops fbnic_netdev_ops = { static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx, struct netdev_queue_stats_rx *rx) { + u64 bytes, packets, alloc_fail, alloc_fail_bdq; struct fbnic_net *fbn = netdev_priv(dev); struct fbnic_ring *rxr = fbn->rx[idx]; struct fbnic_dev *fbd = fbn->fbd; struct fbnic_queue_stats *stats; - u64 bytes, packets, alloc_fail; u64 csum_complete, csum_none; + struct fbnic_q_triad *qt; unsigned int start; if (!rxr) return; + /* fbn->rx points to completion queues */ + qt = container_of(rxr, struct fbnic_q_triad, cmpl); + stats = &rxr->stats; do { start = u64_stats_fetch_begin(&stats->syncp); @@ -564,6 +568,20 @@ static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx, csum_none = stats->rx.csum_none; } while (u64_stats_fetch_retry(&stats->syncp, start)); + stats = &qt->sub0.stats; + do { + start = u64_stats_fetch_begin(&stats->syncp); + alloc_fail_bdq = stats->bdq.alloc_failed; + } while (u64_stats_fetch_retry(&stats->syncp, start)); + alloc_fail += alloc_fail_bdq; + + stats = &qt->sub1.stats; + do { + start = u64_stats_fetch_begin(&stats->syncp); + alloc_fail_bdq = stats->bdq.alloc_failed; + } while (u64_stats_fetch_retry(&stats->syncp, start)); + alloc_fail += alloc_fail_bdq; + rx->bytes = bytes; rx->packets = packets; rx->alloc_fail = alloc_fail; @@ -641,7 +659,8 @@ static void fbnic_get_base_stats(struct net_device *dev, rx->bytes = fbn->rx_stats.bytes; rx->packets = fbn->rx_stats.packets; - rx->alloc_fail = fbn->rx_stats.rx.alloc_failed; + rx->alloc_fail = fbn->rx_stats.rx.alloc_failed + + fbn->bdq_stats.bdq.alloc_failed; rx->csum_complete = fbn->rx_stats.rx.csum_complete; rx->csum_none = fbn->rx_stats.rx.csum_none; } diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h index e84e0527c3a9..b0a87c57910f 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h @@ -68,6 +68,7 @@ struct fbnic_net { /* Storage for stats after ring destruction */ struct fbnic_queue_stats tx_stats; struct fbnic_queue_stats rx_stats; + struct fbnic_queue_stats bdq_stats; u64 link_down_events; /* Time stamping filter config */ diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c index cf773cc78e40..b1e8ce89870f 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c @@ -904,7 +904,7 @@ static void fbnic_fill_bdq(struct fbnic_ring *bdq) netmem = page_pool_dev_alloc_netmems(bdq->page_pool); if (!netmem) { u64_stats_update_begin(&bdq->stats.syncp); - bdq->stats.rx.alloc_failed++; + bdq->stats.bdq.alloc_failed++; u64_stats_update_end(&bdq->stats.syncp); break; @@ -1242,6 +1242,7 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv, /* Walk the completion queue collecting the heads reported by NIC */ while (likely(packets < budget)) { struct sk_buff *skb = ERR_PTR(-EINVAL); + u32 pkt_bytes; u64 rcd; if ((*raw_rcd & cpu_to_le64(FBNIC_RCD_DONE)) == done) @@ -1272,37 +1273,38 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv, /* We currently ignore the action table index */ break; case FBNIC_RCD_TYPE_META: - if (unlikely(pkt->add_frag_failed)) - skb = NULL; - else if (likely(!fbnic_rcd_metadata_err(rcd))) + if (likely(!fbnic_rcd_metadata_err(rcd) && + !pkt->add_frag_failed)) { + pkt_bytes = xdp_get_buff_len(&pkt->buff); skb = fbnic_run_xdp(nv, pkt); + } /* Populate skb and invalidate XDP */ if (!IS_ERR_OR_NULL(skb)) { fbnic_populate_skb_fields(nv, rcd, skb, qt, &csum_complete, &csum_none); - - packets++; - bytes += skb->len; - napi_gro_receive(&nv->napi, skb); } else if (skb == ERR_PTR(-FBNIC_XDP_TX)) { pkt_tail = nv->qt[0].sub1.tail; - bytes += xdp_get_buff_len(&pkt->buff); + } else if (PTR_ERR(skb) == -FBNIC_XDP_CONSUME) { + fbnic_put_pkt_buff(qt, pkt, 1); } else { - if (!skb) { + if (!skb) alloc_failed++; - dropped++; - } else if (skb == ERR_PTR(-FBNIC_XDP_LEN_ERR)) { + + if (skb == ERR_PTR(-FBNIC_XDP_LEN_ERR)) length_errors++; - } else { + else dropped++; - } fbnic_put_pkt_buff(qt, pkt, 1); + goto next_dont_count; } + packets++; + bytes += pkt_bytes; +next_dont_count: pkt->buff.data_hard_start = NULL; break; @@ -1319,8 +1321,6 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv, u64_stats_update_begin(&rcq->stats.syncp); rcq->stats.packets += packets; rcq->stats.bytes += bytes; - /* Re-add ethernet header length (removed in fbnic_build_skb) */ - rcq->stats.bytes += ETH_HLEN * packets; rcq->stats.dropped += dropped; rcq->stats.rx.alloc_failed += alloc_failed; rcq->stats.rx.csum_complete += csum_complete; @@ -1414,6 +1414,17 @@ void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn, BUILD_BUG_ON(sizeof(fbn->rx_stats.rx) / 8 != 4); } +void fbnic_aggregate_ring_bdq_counters(struct fbnic_net *fbn, + struct fbnic_ring *bdq) +{ + struct fbnic_queue_stats *stats = &bdq->stats; + + /* Capture stats from queues before dissasociating them */ + fbn->bdq_stats.bdq.alloc_failed += stats->bdq.alloc_failed; + /* Remember to add new stats here */ + BUILD_BUG_ON(sizeof(fbn->rx_stats.bdq) / 8 != 1); +} + void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn, struct fbnic_ring *txr) { @@ -1433,8 +1444,8 @@ void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn, BUILD_BUG_ON(sizeof(fbn->tx_stats.twq) / 8 != 6); } -static void fbnic_aggregate_ring_xdp_counters(struct fbnic_net *fbn, - struct fbnic_ring *xdpr) +void fbnic_aggregate_ring_xdp_counters(struct fbnic_net *fbn, + struct fbnic_ring *xdpr) { struct fbnic_queue_stats *stats = &xdpr->stats; @@ -1442,9 +1453,7 @@ static void fbnic_aggregate_ring_xdp_counters(struct fbnic_net *fbn, return; /* Capture stats from queues before dissasociating them */ - fbn->rx_stats.bytes += stats->bytes; - fbn->rx_stats.packets += stats->packets; - fbn->rx_stats.dropped += stats->dropped; + fbn->tx_stats.dropped += stats->dropped; fbn->tx_stats.bytes += stats->bytes; fbn->tx_stats.packets += stats->packets; } @@ -1488,6 +1497,15 @@ static void fbnic_remove_rx_ring(struct fbnic_net *fbn, fbn->rx[rxr->q_idx] = NULL; } +static void fbnic_remove_bdq_ring(struct fbnic_net *fbn, + struct fbnic_ring *bdq) +{ + if (!(bdq->flags & FBNIC_RING_F_STATS)) + return; + + fbnic_aggregate_ring_bdq_counters(fbn, bdq); +} + static void fbnic_free_qt_page_pools(struct fbnic_q_triad *qt) { page_pool_destroy(qt->sub0.page_pool); @@ -1507,8 +1525,8 @@ static void fbnic_free_napi_vector(struct fbnic_net *fbn, } for (j = 0; j < nv->rxt_count; j++, i++) { - fbnic_remove_rx_ring(fbn, &nv->qt[i].sub0); - fbnic_remove_rx_ring(fbn, &nv->qt[i].sub1); + fbnic_remove_bdq_ring(fbn, &nv->qt[i].sub0); + fbnic_remove_bdq_ring(fbn, &nv->qt[i].sub1); fbnic_remove_rx_ring(fbn, &nv->qt[i].cmpl); } @@ -1707,11 +1725,13 @@ static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn, while (rxt_count) { /* Configure header queue */ db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_BDQ_HPQ_TAIL]; - fbnic_ring_init(&qt->sub0, db, 0, FBNIC_RING_F_CTX); + fbnic_ring_init(&qt->sub0, db, 0, + FBNIC_RING_F_CTX | FBNIC_RING_F_STATS); /* Configure payload queue */ db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_BDQ_PPQ_TAIL]; - fbnic_ring_init(&qt->sub1, db, 0, FBNIC_RING_F_CTX); + fbnic_ring_init(&qt->sub1, db, 0, + FBNIC_RING_F_CTX | FBNIC_RING_F_STATS); /* Configure Rx completion queue */ db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_RCQ_HEAD]; @@ -2830,8 +2850,8 @@ static int fbnic_queue_start(struct net_device *dev, void *qmem, int idx) real = container_of(fbn->rx[idx], struct fbnic_q_triad, cmpl); nv = fbn->napi[idx % fbn->num_napi]; - fbnic_aggregate_ring_rx_counters(fbn, &real->sub0); - fbnic_aggregate_ring_rx_counters(fbn, &real->sub1); + fbnic_aggregate_ring_bdq_counters(fbn, &real->sub0); + fbnic_aggregate_ring_bdq_counters(fbn, &real->sub1); fbnic_aggregate_ring_rx_counters(fbn, &real->cmpl); memcpy(real, qmem, sizeof(*real)); diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h index 31fac0ba0902..ca37da5a0b17 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h @@ -92,6 +92,9 @@ struct fbnic_queue_stats { u64 csum_none; u64 length_errors; } rx; + struct { + u64 alloc_failed; + } bdq; }; u64 dropped; struct u64_stats_sync syncp; @@ -165,8 +168,12 @@ fbnic_features_check(struct sk_buff *skb, struct net_device *dev, void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn, struct fbnic_ring *rxr); +void fbnic_aggregate_ring_bdq_counters(struct fbnic_net *fbn, + struct fbnic_ring *rxr); void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn, struct fbnic_ring *txr); +void fbnic_aggregate_ring_xdp_counters(struct fbnic_net *fbn, + struct fbnic_ring *xdpr); int fbnic_alloc_napi_vectors(struct fbnic_net *fbn); void fbnic_free_napi_vectors(struct fbnic_net *fbn); diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c index 74ad1d73b465..40b1bfc600a7 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c @@ -708,6 +708,11 @@ static int sparx5_start(struct sparx5 *sparx5) /* Init masks */ sparx5_update_fwd(sparx5); + /* Init flood masks */ + for (int pgid = sparx5_get_pgid(sparx5, PGID_UC_FLOOD); + pgid <= sparx5_get_pgid(sparx5, PGID_BCAST); pgid++) + sparx5_pgid_clear(sparx5, pgid); + /* CPU copy CPU pgids */ spx5_wr(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1), sparx5, ANA_AC_PGID_MISC_CFG(sparx5_get_pgid(sparx5, PGID_CPU))); diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c index bc9ecb9392cd..0a71abbd3da5 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c @@ -176,6 +176,7 @@ static int sparx5_port_bridge_join(struct sparx5_port *port, struct net_device *bridge, struct netlink_ext_ack *extack) { + struct switchdev_brport_flags flags = {0}; struct sparx5 *sparx5 = port->sparx5; struct net_device *ndev = port->ndev; int err; @@ -205,6 +206,11 @@ static int sparx5_port_bridge_join(struct sparx5_port *port, */ __dev_mc_unsync(ndev, sparx5_mc_unsync); + /* Enable uc/mc/bc flooding */ + flags.mask = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; + flags.val = flags.mask; + sparx5_port_attr_bridge_flags(port, flags); + return 0; err_switchdev_offload: @@ -215,6 +221,7 @@ err_switchdev_offload: static void sparx5_port_bridge_leave(struct sparx5_port *port, struct net_device *bridge) { + struct switchdev_brport_flags flags = {0}; struct sparx5 *sparx5 = port->sparx5; switchdev_bridge_port_unoffload(port->ndev, NULL, NULL, NULL); @@ -234,6 +241,11 @@ static void sparx5_port_bridge_leave(struct sparx5_port *port, /* Port enters in host more therefore restore mc list */ __dev_mc_sync(port->ndev, sparx5_mc_sync, sparx5_mc_unsync); + + /* Disable uc/mc/bc flooding */ + flags.mask = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; + flags.val = 0; + sparx5_port_attr_bridge_flags(port, flags); } static int sparx5_port_changeupper(struct net_device *dev, diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c index d42097aa60a0..494782871903 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c @@ -167,16 +167,6 @@ void sparx5_update_fwd(struct sparx5 *sparx5) /* Divide up fwd mask in 32 bit words */ bitmap_to_arr32(mask, sparx5->bridge_fwd_mask, SPX5_PORTS); - /* Update flood masks */ - for (port = sparx5_get_pgid(sparx5, PGID_UC_FLOOD); - port <= sparx5_get_pgid(sparx5, PGID_BCAST); port++) { - spx5_wr(mask[0], sparx5, ANA_AC_PGID_CFG(port)); - if (is_sparx5(sparx5)) { - spx5_wr(mask[1], sparx5, ANA_AC_PGID_CFG1(port)); - spx5_wr(mask[2], sparx5, ANA_AC_PGID_CFG2(port)); - } - } - /* Update SRC masks */ for (port = 0; port < sparx5->data->consts->n_ports; port++) { if (test_bit(port, sparx5->bridge_fwd_mask)) { diff --git a/drivers/net/ethernet/mscc/ocelot_stats.c b/drivers/net/ethernet/mscc/ocelot_stats.c index 545710dadcf5..d2be1be37716 100644 --- a/drivers/net/ethernet/mscc/ocelot_stats.c +++ b/drivers/net/ethernet/mscc/ocelot_stats.c @@ -1021,6 +1021,6 @@ int ocelot_stats_init(struct ocelot *ocelot) void ocelot_stats_deinit(struct ocelot *ocelot) { - cancel_delayed_work(&ocelot->stats_work); + disable_delayed_work_sync(&ocelot->stats_work); destroy_workqueue(ocelot->stats_queue); } diff --git a/drivers/net/ethernet/pensando/Kconfig b/drivers/net/ethernet/pensando/Kconfig index 01fe76786f77..c99758adf3ad 100644 --- a/drivers/net/ethernet/pensando/Kconfig +++ b/drivers/net/ethernet/pensando/Kconfig @@ -24,6 +24,7 @@ config IONIC select NET_DEVLINK select DIMLIB select PAGE_POOL + select AUXILIARY_BUS help This enables the support for the Pensando family of Ethernet adapters. More specific information on this driver can be diff --git a/drivers/net/ethernet/pensando/ionic/Makefile b/drivers/net/ethernet/pensando/ionic/Makefile index 4e7642a2d25f..a598972fef41 100644 --- a/drivers/net/ethernet/pensando/ionic/Makefile +++ b/drivers/net/ethernet/pensando/ionic/Makefile @@ -5,5 +5,5 @@ obj-$(CONFIG_IONIC) := ionic.o ionic-y := ionic_main.o ionic_bus_pci.o ionic_devlink.o ionic_dev.o \ ionic_debugfs.o ionic_lif.o ionic_rx_filter.o ionic_ethtool.o \ - ionic_txrx.o ionic_stats.o ionic_fw.o + ionic_txrx.o ionic_stats.o ionic_fw.o ionic_aux.o ionic-$(CONFIG_PTP_1588_CLOCK) += ionic_phc.o diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h index 04f00ea94230..85198e6a806e 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic.h +++ b/drivers/net/ethernet/pensando/ionic/ionic.h @@ -65,16 +65,9 @@ struct ionic { int watchdog_period; }; -struct ionic_admin_ctx { - struct completion work; - union ionic_adminq_cmd cmd; - union ionic_adminq_comp comp; -}; - int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx); int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx, const int err, const bool do_msg); -int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx); int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx); void ionic_adminq_netdev_err_print(struct ionic_lif *lif, u8 opcode, u8 status, int err); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_api.h b/drivers/net/ethernet/pensando/ionic/ionic_api.h new file mode 100644 index 000000000000..bd88666836b8 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_api.h @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */ + +#ifndef _IONIC_API_H_ +#define _IONIC_API_H_ + +#include <linux/auxiliary_bus.h> +#include "ionic_if.h" +#include "ionic_regs.h" + +/** + * struct ionic_aux_dev - Auxiliary device information + * @lif: Logical interface + * @idx: Index identifier + * @adev: Auxiliary device + */ +struct ionic_aux_dev { + struct ionic_lif *lif; + int idx; + struct auxiliary_device adev; +}; + +/** + * struct ionic_admin_ctx - Admin command context + * @work: Work completion wait queue element + * @cmd: Admin command (64B) to be copied to the queue + * @comp: Admin completion (16B) copied from the queue + */ +struct ionic_admin_ctx { + struct completion work; + union ionic_adminq_cmd cmd; + union ionic_adminq_comp comp; +}; + +#define IONIC_INTR_INDEX_NOT_ASSIGNED -1 +#define IONIC_INTR_NAME_MAX_SZ 32 + +/** + * struct ionic_intr_info - Interrupt information + * @name: Name identifier + * @rearm_count: Interrupt rearm count + * @index: Interrupt index position + * @vector: Interrupt number + * @dim_coal_hw: Interrupt coalesce value in hardware units + * @affinity_mask: CPU affinity mask + * @aff_notify: context for notification of IRQ affinity changes + */ +struct ionic_intr_info { + char name[IONIC_INTR_NAME_MAX_SZ]; + u64 rearm_count; + unsigned int index; + unsigned int vector; + u32 dim_coal_hw; + cpumask_var_t *affinity_mask; + struct irq_affinity_notify aff_notify; +}; + +/** + * ionic_adminq_post_wait - Post an admin command and wait for response + * @lif: Logical interface + * @ctx: API admin command context + * + * Post the command to an admin queue in the ethernet driver. If this command + * succeeds, then the command has been posted, but that does not indicate a + * completion. If this command returns success, then the completion callback + * will eventually be called. + * + * Return: zero or negative error status + */ +int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx); + +/** + * ionic_error_to_errno - Transform ionic_if errors to os errno + * @code: Ionic error number + * + * Return: Negative OS error number or zero + */ +int ionic_error_to_errno(enum ionic_status_code code); + +/** + * ionic_request_rdma_reset - request reset or disable the device or lif + * @lif: Logical interface + * + * The reset is triggered asynchronously. It will wait until reset request + * completes or times out. + */ +void ionic_request_rdma_reset(struct ionic_lif *lif); + +/** + * ionic_intr_alloc - Reserve a device interrupt + * @lif: Logical interface + * @intr: Reserved ionic interrupt structure + * + * Reserve an interrupt index and get irq number for that index. + * + * Return: zero or negative error status + */ +int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr); + +/** + * ionic_intr_free - Release a device interrupt index + * @lif: Logical interface + * @intr: Interrupt index + * + * Mark the interrupt index unused so that it can be reserved again. + */ +void ionic_intr_free(struct ionic_lif *lif, int intr); + +/** + * ionic_get_cmb - Reserve cmb pages + * @lif: Logical interface + * @pgid: First page index + * @pgaddr: First page bus addr (contiguous) + * @order: Log base two number of pages (PAGE_SIZE) + * @stride_log2: Size of stride to determine CMB pool + * @expdb: Will be set to true if this CMB region has expdb enabled + * + * Return: zero or negative error status + */ +int ionic_get_cmb(struct ionic_lif *lif, u32 *pgid, phys_addr_t *pgaddr, + int order, u8 stride_log2, bool *expdb); + +/** + * ionic_put_cmb - Release cmb pages + * @lif: Logical interface + * @pgid: First page index + * @order: Log base two number of pages (PAGE_SIZE) + */ +void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order); + +#endif /* _IONIC_API_H_ */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_aux.c b/drivers/net/ethernet/pensando/ionic/ionic_aux.c new file mode 100644 index 000000000000..a2be338eb3e5 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_aux.c @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */ + +#include <linux/kernel.h> +#include "ionic.h" +#include "ionic_lif.h" +#include "ionic_aux.h" + +static DEFINE_IDA(aux_ida); + +static void ionic_auxbus_release(struct device *dev) +{ + struct ionic_aux_dev *ionic_adev; + + ionic_adev = container_of(dev, struct ionic_aux_dev, adev.dev); + ida_free(&aux_ida, ionic_adev->adev.id); + kfree(ionic_adev); +} + +int ionic_auxbus_register(struct ionic_lif *lif) +{ + struct ionic_aux_dev *ionic_adev; + struct auxiliary_device *aux_dev; + int err, id; + + if (!(le64_to_cpu(lif->ionic->ident.lif.capabilities) & IONIC_LIF_CAP_RDMA)) + return 0; + + ionic_adev = kzalloc(sizeof(*ionic_adev), GFP_KERNEL); + if (!ionic_adev) + return -ENOMEM; + + aux_dev = &ionic_adev->adev; + + id = ida_alloc(&aux_ida, GFP_KERNEL); + if (id < 0) { + dev_err(lif->ionic->dev, "Failed to allocate aux id: %d\n", id); + kfree(ionic_adev); + return id; + } + + aux_dev->id = id; + aux_dev->name = "rdma"; + aux_dev->dev.parent = &lif->ionic->pdev->dev; + aux_dev->dev.release = ionic_auxbus_release; + ionic_adev->lif = lif; + err = auxiliary_device_init(aux_dev); + if (err) { + dev_err(lif->ionic->dev, "Failed to initialize %s aux device: %d\n", + aux_dev->name, err); + ida_free(&aux_ida, id); + kfree(ionic_adev); + return err; + } + + err = auxiliary_device_add(aux_dev); + if (err) { + dev_err(lif->ionic->dev, "Failed to add %s aux device: %d\n", + aux_dev->name, err); + auxiliary_device_uninit(aux_dev); + return err; + } + + lif->ionic_adev = ionic_adev; + return 0; +} + +void ionic_auxbus_unregister(struct ionic_lif *lif) +{ + mutex_lock(&lif->adev_lock); + if (!lif->ionic_adev) + goto out; + + auxiliary_device_delete(&lif->ionic_adev->adev); + auxiliary_device_uninit(&lif->ionic_adev->adev); + + lif->ionic_adev = NULL; +out: + mutex_unlock(&lif->adev_lock); +} + +void ionic_request_rdma_reset(struct ionic_lif *lif) +{ + struct ionic *ionic = lif->ionic; + int err; + + union ionic_dev_cmd cmd = { + .cmd.opcode = IONIC_CMD_RDMA_RESET_LIF, + .cmd.lif_index = cpu_to_le16(lif->index), + }; + + mutex_lock(&ionic->dev_cmd_lock); + + ionic_dev_cmd_go(&ionic->idev, &cmd); + err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); + + mutex_unlock(&ionic->dev_cmd_lock); + + if (err) + pr_warn("%s request_reset: error %d\n", __func__, err); +} +EXPORT_SYMBOL_NS(ionic_request_rdma_reset, "NET_IONIC"); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_aux.h b/drivers/net/ethernet/pensando/ionic/ionic_aux.h new file mode 100644 index 000000000000..f5528a9f187d --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_aux.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */ + +#ifndef _IONIC_AUX_H_ +#define _IONIC_AUX_H_ + +int ionic_auxbus_register(struct ionic_lif *lif); +void ionic_auxbus_unregister(struct ionic_lif *lif); + +#endif /* _IONIC_AUX_H_ */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c index 136bfa3516d0..70d86c5f52fb 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c @@ -9,6 +9,7 @@ #include "ionic.h" #include "ionic_bus.h" #include "ionic_lif.h" +#include "ionic_aux.h" #include "ionic_debugfs.h" /* Supported devices */ @@ -271,6 +272,8 @@ static int ionic_setup_one(struct ionic *ionic) } ionic_debugfs_add_ident(ionic); + ionic_map_cmb(ionic); + err = ionic_init(ionic); if (err) { dev_err(dev, "Cannot init device: %d, aborting\n", err); @@ -375,6 +378,8 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_deregister_devlink; } + ionic_auxbus_register(ionic->lif); + mod_timer(&ionic->watchdog_timer, round_jiffies(jiffies + ionic->watchdog_period)); ionic_queue_doorbell_check(ionic, IONIC_NAPI_DEADLINE); @@ -416,6 +421,7 @@ static void ionic_remove(struct pci_dev *pdev) if (ionic->lif->doorbell_wa) cancel_delayed_work_sync(&ionic->doorbell_check_dwork); + ionic_auxbus_unregister(ionic->lif); ionic_lif_unregister(ionic->lif); ionic_devlink_unregister(ionic); ionic_lif_deinit(ionic->lif); @@ -445,6 +451,7 @@ static void ionic_reset_prepare(struct pci_dev *pdev) timer_delete_sync(&ionic->watchdog_timer); cancel_work_sync(&lif->deferred.work); + ionic_auxbus_unregister(ionic->lif); mutex_lock(&lif->queue_lock); ionic_stop_queues_reconfig(lif); ionic_txrx_free(lif); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c index 093c5358b6e8..ab27e9225c1e 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c @@ -199,13 +199,201 @@ void ionic_init_devinfo(struct ionic *ionic) dev_dbg(ionic->dev, "fw_version %s\n", idev->dev_info.fw_version); } +static void ionic_map_disc_cmb(struct ionic *ionic) +{ + struct ionic_identity *ident = &ionic->ident; + u32 length_reg0, length, offset, num_regions; + struct ionic_dev_bar *bar = ionic->bars; + struct ionic_dev *idev = &ionic->idev; + struct device *dev = ionic->dev; + int err, sz, i; + u64 end; + + mutex_lock(&ionic->dev_cmd_lock); + + ionic_dev_cmd_discover_cmb(idev); + err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); + if (!err) { + sz = min(sizeof(ident->cmb_layout), + sizeof(idev->dev_cmd_regs->data)); + memcpy_fromio(&ident->cmb_layout, + &idev->dev_cmd_regs->data, sz); + } + mutex_unlock(&ionic->dev_cmd_lock); + + if (err) { + dev_warn(dev, "Cannot discover CMB layout, disabling CMB\n"); + return; + } + + bar += 2; + + num_regions = le32_to_cpu(ident->cmb_layout.num_regions); + if (!num_regions || num_regions > IONIC_MAX_CMB_REGIONS) { + dev_warn(dev, "Invalid number of CMB entries (%d)\n", + num_regions); + return; + } + + dev_dbg(dev, "ionic_cmb_layout_identity num_regions %d flags %x:\n", + num_regions, ident->cmb_layout.flags); + + for (i = 0; i < num_regions; i++) { + offset = le32_to_cpu(ident->cmb_layout.region[i].offset); + length = le32_to_cpu(ident->cmb_layout.region[i].length); + end = offset + length; + + dev_dbg(dev, "CMB entry %d: bar_num %u cmb_type %u offset %x length %u\n", + i, ident->cmb_layout.region[i].bar_num, + ident->cmb_layout.region[i].cmb_type, + offset, length); + + if (end > (bar->len >> IONIC_CMB_SHIFT_64K)) { + dev_warn(dev, "Out of bounds CMB region %d offset %x length %u\n", + i, offset, length); + return; + } + } + + /* if first entry matches PCI config, expdb is not supported */ + if (ident->cmb_layout.region[0].bar_num == bar->res_index && + le32_to_cpu(ident->cmb_layout.region[0].length) == bar->len && + !ident->cmb_layout.region[0].offset) { + dev_warn(dev, "No CMB mapping discovered\n"); + return; + } + + /* process first entry for regular mapping */ + length_reg0 = le32_to_cpu(ident->cmb_layout.region[0].length); + if (!length_reg0) { + dev_warn(dev, "region len = 0. No CMB mapping discovered\n"); + return; + } + + /* Verify first entry size matches expected 8MB size (in 64KB pages) */ + if (length_reg0 != IONIC_BAR2_CMB_ENTRY_SIZE >> IONIC_CMB_SHIFT_64K) { + dev_warn(dev, "Unexpected CMB size in entry 0: %u pages\n", + length_reg0); + return; + } + + sz = BITS_TO_LONGS((length_reg0 << IONIC_CMB_SHIFT_64K) / + PAGE_SIZE) * sizeof(long); + idev->cmb_inuse = kzalloc(sz, GFP_KERNEL); + if (!idev->cmb_inuse) { + dev_warn(dev, "No memory for CMB, disabling\n"); + idev->phy_cmb_pages = 0; + idev->phy_cmb_expdb64_pages = 0; + idev->phy_cmb_expdb128_pages = 0; + idev->phy_cmb_expdb256_pages = 0; + idev->phy_cmb_expdb512_pages = 0; + idev->cmb_npages = 0; + return; + } + + for (i = 0; i < num_regions; i++) { + /* check this region matches first region length as to + * ease implementation + */ + if (le32_to_cpu(ident->cmb_layout.region[i].length) != + length_reg0) + continue; + + offset = le32_to_cpu(ident->cmb_layout.region[i].offset); + + switch (ident->cmb_layout.region[i].cmb_type) { + case IONIC_CMB_TYPE_DEVMEM: + idev->phy_cmb_pages = bar->bus_addr + offset; + idev->cmb_npages = + (length_reg0 << IONIC_CMB_SHIFT_64K) / PAGE_SIZE; + dev_dbg(dev, "regular cmb mapping: bar->bus_addr %pa region[%d].length %u\n", + &bar->bus_addr, i, length); + dev_dbg(dev, "idev->phy_cmb_pages %pad, idev->cmb_npages %u\n", + &idev->phy_cmb_pages, idev->cmb_npages); + break; + + case IONIC_CMB_TYPE_EXPDB64: + idev->phy_cmb_expdb64_pages = + bar->bus_addr + (offset << IONIC_CMB_SHIFT_64K); + dev_dbg(dev, "idev->phy_cmb_expdb64_pages %pad\n", + &idev->phy_cmb_expdb64_pages); + break; + + case IONIC_CMB_TYPE_EXPDB128: + idev->phy_cmb_expdb128_pages = + bar->bus_addr + (offset << IONIC_CMB_SHIFT_64K); + dev_dbg(dev, "idev->phy_cmb_expdb128_pages %pad\n", + &idev->phy_cmb_expdb128_pages); + break; + + case IONIC_CMB_TYPE_EXPDB256: + idev->phy_cmb_expdb256_pages = + bar->bus_addr + (offset << IONIC_CMB_SHIFT_64K); + dev_dbg(dev, "idev->phy_cmb_expdb256_pages %pad\n", + &idev->phy_cmb_expdb256_pages); + break; + + case IONIC_CMB_TYPE_EXPDB512: + idev->phy_cmb_expdb512_pages = + bar->bus_addr + (offset << IONIC_CMB_SHIFT_64K); + dev_dbg(dev, "idev->phy_cmb_expdb512_pages %pad\n", + &idev->phy_cmb_expdb512_pages); + break; + + default: + dev_warn(dev, "[%d] Invalid cmb_type (%d)\n", + i, ident->cmb_layout.region[i].cmb_type); + break; + } + } +} + +static void ionic_map_classic_cmb(struct ionic *ionic) +{ + struct ionic_dev_bar *bar = ionic->bars; + struct ionic_dev *idev = &ionic->idev; + struct device *dev = ionic->dev; + int sz; + + bar += 2; + /* classic CMB mapping */ + idev->phy_cmb_pages = bar->bus_addr; + idev->cmb_npages = bar->len / PAGE_SIZE; + dev_dbg(dev, "classic cmb mapping: bar->bus_addr %pa bar->len %lu\n", + &bar->bus_addr, bar->len); + dev_dbg(dev, "idev->phy_cmb_pages %pad, idev->cmb_npages %u\n", + &idev->phy_cmb_pages, idev->cmb_npages); + + sz = BITS_TO_LONGS(idev->cmb_npages) * sizeof(long); + idev->cmb_inuse = kzalloc(sz, GFP_KERNEL); + if (!idev->cmb_inuse) { + idev->phy_cmb_pages = 0; + idev->cmb_npages = 0; + } +} + +void ionic_map_cmb(struct ionic *ionic) +{ + struct pci_dev *pdev = ionic->pdev; + struct device *dev = ionic->dev; + + if (!(pci_resource_flags(pdev, 4) & IORESOURCE_MEM)) { + dev_dbg(dev, "No CMB, disabling\n"); + return; + } + + if (ionic->ident.dev.capabilities & cpu_to_le64(IONIC_DEV_CAP_DISC_CMB)) + ionic_map_disc_cmb(ionic); + else + ionic_map_classic_cmb(ionic); +} + int ionic_dev_setup(struct ionic *ionic) { struct ionic_dev_bar *bar = ionic->bars; unsigned int num_bars = ionic->num_bars; struct ionic_dev *idev = &ionic->idev; struct device *dev = ionic->dev; - int size; u32 sig; int err; @@ -255,16 +443,11 @@ int ionic_dev_setup(struct ionic *ionic) mutex_init(&idev->cmb_inuse_lock); if (num_bars < 3 || !ionic->bars[IONIC_PCI_BAR_CMB].len) { idev->cmb_inuse = NULL; + idev->phy_cmb_pages = 0; + idev->cmb_npages = 0; return 0; } - idev->phy_cmb_pages = bar->bus_addr; - idev->cmb_npages = bar->len / PAGE_SIZE; - size = BITS_TO_LONGS(idev->cmb_npages) * sizeof(long); - idev->cmb_inuse = kzalloc(size, GFP_KERNEL); - if (!idev->cmb_inuse) - dev_warn(dev, "No memory for CMB, disabling\n"); - return 0; } @@ -277,6 +460,11 @@ void ionic_dev_teardown(struct ionic *ionic) idev->phy_cmb_pages = 0; idev->cmb_npages = 0; + idev->phy_cmb_expdb64_pages = 0; + idev->phy_cmb_expdb128_pages = 0; + idev->phy_cmb_expdb256_pages = 0; + idev->phy_cmb_expdb512_pages = 0; + if (ionic->wq) { destroy_workqueue(ionic->wq); ionic->wq = NULL; @@ -698,28 +886,79 @@ void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq, ionic_dev_cmd_go(idev, &cmd); } +void ionic_dev_cmd_discover_cmb(struct ionic_dev *idev) +{ + union ionic_dev_cmd cmd = { + .discover_cmb.opcode = IONIC_CMD_DISCOVER_CMB, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + int ionic_db_page_num(struct ionic_lif *lif, int pid) { return (lif->hw_index * lif->dbid_count) + pid; } -int ionic_get_cmb(struct ionic_lif *lif, u32 *pgid, phys_addr_t *pgaddr, int order) +int ionic_get_cmb(struct ionic_lif *lif, u32 *pgid, phys_addr_t *pgaddr, + int order, u8 stride_log2, bool *expdb) { struct ionic_dev *idev = &lif->ionic->idev; - int ret; + void __iomem *nonexpdb_pgptr; + phys_addr_t nonexpdb_pgaddr; + int i, idx; mutex_lock(&idev->cmb_inuse_lock); - ret = bitmap_find_free_region(idev->cmb_inuse, idev->cmb_npages, order); + idx = bitmap_find_free_region(idev->cmb_inuse, idev->cmb_npages, order); mutex_unlock(&idev->cmb_inuse_lock); - if (ret < 0) - return ret; + if (idx < 0) + return idx; + + *pgid = (u32)idx; + + if (idev->phy_cmb_expdb64_pages && + stride_log2 == IONIC_EXPDB_64B_WQE_LG2) { + *pgaddr = idev->phy_cmb_expdb64_pages + idx * PAGE_SIZE; + if (expdb) + *expdb = true; + } else if (idev->phy_cmb_expdb128_pages && + stride_log2 == IONIC_EXPDB_128B_WQE_LG2) { + *pgaddr = idev->phy_cmb_expdb128_pages + idx * PAGE_SIZE; + if (expdb) + *expdb = true; + } else if (idev->phy_cmb_expdb256_pages && + stride_log2 == IONIC_EXPDB_256B_WQE_LG2) { + *pgaddr = idev->phy_cmb_expdb256_pages + idx * PAGE_SIZE; + if (expdb) + *expdb = true; + } else if (idev->phy_cmb_expdb512_pages && + stride_log2 == IONIC_EXPDB_512B_WQE_LG2) { + *pgaddr = idev->phy_cmb_expdb512_pages + idx * PAGE_SIZE; + if (expdb) + *expdb = true; + } else { + *pgaddr = idev->phy_cmb_pages + idx * PAGE_SIZE; + if (expdb) + *expdb = false; + } - *pgid = ret; - *pgaddr = idev->phy_cmb_pages + ret * PAGE_SIZE; + /* clear the requested CMB region, 1 PAGE_SIZE ioremap at a time */ + nonexpdb_pgaddr = idev->phy_cmb_pages + idx * PAGE_SIZE; + for (i = 0; i < (1 << order); i++) { + nonexpdb_pgptr = + ioremap_wc(nonexpdb_pgaddr + i * PAGE_SIZE, PAGE_SIZE); + if (!nonexpdb_pgptr) { + ionic_put_cmb(lif, *pgid, order); + return -ENOMEM; + } + memset_io(nonexpdb_pgptr, 0, PAGE_SIZE); + iounmap(nonexpdb_pgptr); + } return 0; } +EXPORT_SYMBOL_NS(ionic_get_cmb, "NET_IONIC"); void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order) { @@ -729,6 +968,7 @@ void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order) bitmap_release_region(idev->cmb_inuse, pgid, order); mutex_unlock(&idev->cmb_inuse_lock); } +EXPORT_SYMBOL_NS(ionic_put_cmb, "NET_IONIC"); int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq, struct ionic_intr_info *intr, diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h index c8c710cfe70c..35566f97eaea 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h @@ -12,6 +12,7 @@ #include "ionic_if.h" #include "ionic_regs.h" +#include "ionic_api.h" #define IONIC_MAX_TX_DESC 8192 #define IONIC_MAX_RX_DESC 16384 @@ -34,6 +35,11 @@ #define IONIC_RX_MIN_DOORBELL_DEADLINE (HZ / 100) /* 10ms */ #define IONIC_RX_MAX_DOORBELL_DEADLINE (HZ * 4) /* 4s */ +#define IONIC_EXPDB_64B_WQE_LG2 6 +#define IONIC_EXPDB_128B_WQE_LG2 7 +#define IONIC_EXPDB_256B_WQE_LG2 8 +#define IONIC_EXPDB_512B_WQE_LG2 9 + struct ionic_dev_bar { void __iomem *vaddr; phys_addr_t bus_addr; @@ -170,6 +176,11 @@ struct ionic_dev { dma_addr_t phy_cmb_pages; u32 cmb_npages; + dma_addr_t phy_cmb_expdb64_pages; + dma_addr_t phy_cmb_expdb128_pages; + dma_addr_t phy_cmb_expdb256_pages; + dma_addr_t phy_cmb_expdb512_pages; + u32 port_info_sz; struct ionic_port_info *port_info; dma_addr_t port_info_pa; @@ -273,19 +284,6 @@ struct ionic_queue { char name[IONIC_QUEUE_NAME_MAX_SZ]; } ____cacheline_aligned_in_smp; -#define IONIC_INTR_INDEX_NOT_ASSIGNED -1 -#define IONIC_INTR_NAME_MAX_SZ 32 - -struct ionic_intr_info { - char name[IONIC_INTR_NAME_MAX_SZ]; - u64 rearm_count; - unsigned int index; - unsigned int vector; - u32 dim_coal_hw; - cpumask_var_t *affinity_mask; - struct irq_affinity_notify aff_notify; -}; - struct ionic_cq { struct ionic_lif *lif; struct ionic_queue *bound_q; @@ -363,8 +361,8 @@ void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq, int ionic_db_page_num(struct ionic_lif *lif, int pid); -int ionic_get_cmb(struct ionic_lif *lif, u32 *pgid, phys_addr_t *pgaddr, int order); -void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order); +void ionic_dev_cmd_discover_cmb(struct ionic_dev *idev); +void ionic_map_cmb(struct ionic *ionic); int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq, struct ionic_intr_info *intr, diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h index 9886cd66ce68..47559c909c8b 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_if.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h @@ -56,6 +56,9 @@ enum ionic_cmd_opcode { IONIC_CMD_VF_SETATTR = 61, IONIC_CMD_VF_CTRL = 62, + /* CMB command */ + IONIC_CMD_DISCOVER_CMB = 80, + /* QoS commands */ IONIC_CMD_QOS_CLASS_IDENTIFY = 240, IONIC_CMD_QOS_CLASS_INIT = 241, @@ -269,9 +272,11 @@ union ionic_drv_identity { /** * enum ionic_dev_capability - Device capabilities * @IONIC_DEV_CAP_VF_CTRL: Device supports VF ctrl operations + * @IONIC_DEV_CAP_DISC_CMB: Device supports CMB discovery operations */ enum ionic_dev_capability { IONIC_DEV_CAP_VF_CTRL = BIT(0), + IONIC_DEV_CAP_DISC_CMB = BIT(1), }; /** @@ -395,6 +400,7 @@ enum ionic_logical_qtype { * @IONIC_Q_F_4X_DESC: Quadruple main descriptor size * @IONIC_Q_F_4X_CQ_DESC: Quadruple cq descriptor size * @IONIC_Q_F_4X_SG_DESC: Quadruple sg descriptor size + * @IONIC_QIDENT_F_EXPDB: Queue supports express doorbell */ enum ionic_q_feature { IONIC_QIDENT_F_CQ = BIT_ULL(0), @@ -407,6 +413,7 @@ enum ionic_q_feature { IONIC_Q_F_4X_DESC = BIT_ULL(7), IONIC_Q_F_4X_CQ_DESC = BIT_ULL(8), IONIC_Q_F_4X_SG_DESC = BIT_ULL(9), + IONIC_QIDENT_F_EXPDB = BIT_ULL(10), }; /** @@ -495,6 +502,16 @@ union ionic_lif_config { }; /** + * enum ionic_lif_rdma_cap_stats - LIF stat type + * @IONIC_LIF_RDMA_STAT_GLOBAL: Global stats + * @IONIC_LIF_RDMA_STAT_QP: Queue pair stats + */ +enum ionic_lif_rdma_cap_stats { + IONIC_LIF_RDMA_STAT_GLOBAL = BIT(0), + IONIC_LIF_RDMA_STAT_QP = BIT(1), +}; + +/** * struct ionic_lif_identity - LIF identity information (type-specific) * * @capabilities: LIF capabilities @@ -513,10 +530,10 @@ union ionic_lif_config { * @eth.config: LIF config struct with features, mtu, mac, q counts * * @rdma: RDMA identify structure - * @rdma.version: RDMA version of opcodes and queue descriptors + * @rdma.version: RDMA capability version * @rdma.qp_opcodes: Number of RDMA queue pair opcodes supported * @rdma.admin_opcodes: Number of RDMA admin opcodes supported - * @rdma.rsvd: reserved byte(s) + * @rdma.minor_version: RDMA capability minor version * @rdma.npts_per_lif: Page table size per LIF * @rdma.nmrs_per_lif: Number of memory regions per LIF * @rdma.nahs_per_lif: Number of address handles per LIF @@ -526,12 +543,17 @@ union ionic_lif_config { * @rdma.rrq_stride: Remote RQ work request stride * @rdma.rsq_stride: Remote SQ work request stride * @rdma.dcqcn_profiles: Number of DCQCN profiles - * @rdma.rsvd_dimensions: reserved byte(s) + * @rdma.udma_shift: Log2 number of queues per queue group + * @rdma.rsvd_dimensions: Reserved byte + * @rdma.page_size_cap: Supported page sizes * @rdma.aq_qtype: RDMA Admin Qtype * @rdma.sq_qtype: RDMA Send Qtype * @rdma.rq_qtype: RDMA Receive Qtype * @rdma.cq_qtype: RDMA Completion Qtype * @rdma.eq_qtype: RDMA Event Qtype + * @rdma.stats_type: Supported statistics type + * (enum ionic_lif_rdma_cap_stats) + * @rdma.rsvd1: Reserved byte(s) * @words: word access to struct contents */ union ionic_lif_identity { @@ -557,7 +579,7 @@ union ionic_lif_identity { u8 version; u8 qp_opcodes; u8 admin_opcodes; - u8 rsvd; + u8 minor_version; __le32 npts_per_lif; __le32 nmrs_per_lif; __le32 nahs_per_lif; @@ -567,12 +589,16 @@ union ionic_lif_identity { u8 rrq_stride; u8 rsq_stride; u8 dcqcn_profiles; - u8 rsvd_dimensions[10]; + u8 udma_shift; + u8 rsvd_dimensions; + __le64 page_size_cap; struct ionic_lif_logical_qtype aq_qtype; struct ionic_lif_logical_qtype sq_qtype; struct ionic_lif_logical_qtype rq_qtype; struct ionic_lif_logical_qtype cq_qtype; struct ionic_lif_logical_qtype eq_qtype; + __le16 stats_type; + u8 rsvd1[162]; } __packed rdma; } __packed; __le32 words[478]; @@ -2195,6 +2221,80 @@ struct ionic_vf_ctrl_comp { }; /** + * struct ionic_discover_cmb_cmd - CMB discovery command + * @opcode: Opcode for the command + * @rsvd: Reserved bytes + */ +struct ionic_discover_cmb_cmd { + u8 opcode; + u8 rsvd[63]; +}; + +/** + * struct ionic_discover_cmb_comp - CMB discover command completion. + * @status: Status of the command (enum ionic_status_code) + * @rsvd: Reserved bytes + */ +struct ionic_discover_cmb_comp { + u8 status; + u8 rsvd[15]; +}; + +#define IONIC_MAX_CMB_REGIONS 16 +#define IONIC_CMB_SHIFT_64K 16 + +enum ionic_cmb_type { + IONIC_CMB_TYPE_DEVMEM = 0, + IONIC_CMB_TYPE_EXPDB64 = 1, + IONIC_CMB_TYPE_EXPDB128 = 2, + IONIC_CMB_TYPE_EXPDB256 = 3, + IONIC_CMB_TYPE_EXPDB512 = 4, +}; + +/** + * union ionic_cmb_region - Configuration for CMB region + * @bar_num: CMB mapping number from FW + * @cmb_type: Type of CMB this region describes (enum ionic_cmb_type) + * @rsvd: Reserved + * @offset: Offset within BAR in 64KB pages + * @length: Length of the CMB region + * @words: 32-bit words for direct access to the entire region + */ +union ionic_cmb_region { + struct { + u8 bar_num; + u8 cmb_type; + u8 rsvd[6]; + __le32 offset; + __le32 length; + } __packed; + __le32 words[4]; +}; + +/** + * union ionic_discover_cmb_identity - CMB layout identity structure + * @num_regions: Number of CMB regions, up to 16 + * @flags: Feature and capability bits (0 for express + * doorbell, 1 for 4K alignment indicator, + * 31-24 for version information) + * @region: CMB mappings region, entry 0 for regular + * mapping, entries 1-7 for WQE sizes 64, + * 128, 256, 512, 1024, 2048 and 4096 bytes + * @words: Full union buffer size + */ +union ionic_discover_cmb_identity { + struct { + __le32 num_regions; +#define IONIC_CMB_FLAG_EXPDB BIT(0) +#define IONIC_CMB_FLAG_4KALIGN BIT(1) +#define IONIC_CMB_FLAG_VERSION 0xff000000 + __le32 flags; + union ionic_cmb_region region[IONIC_MAX_CMB_REGIONS]; + }; + __le32 words[478]; +}; + +/** * struct ionic_qos_identify_cmd - QoS identify command * @opcode: opcode * @ver: Highest version of identify supported by driver @@ -3054,6 +3154,8 @@ union ionic_dev_cmd { struct ionic_vf_getattr_cmd vf_getattr; struct ionic_vf_ctrl_cmd vf_ctrl; + struct ionic_discover_cmb_cmd discover_cmb; + struct ionic_lif_identify_cmd lif_identify; struct ionic_lif_init_cmd lif_init; struct ionic_lif_reset_cmd lif_reset; @@ -3093,6 +3195,8 @@ union ionic_dev_cmd_comp { struct ionic_vf_getattr_comp vf_getattr; struct ionic_vf_ctrl_comp vf_ctrl; + struct ionic_discover_cmb_comp discover_cmb; + struct ionic_lif_identify_comp lif_identify; struct ionic_lif_init_comp lif_init; ionic_lif_reset_comp lif_reset; @@ -3234,6 +3338,9 @@ union ionic_adminq_comp { #define IONIC_BAR0_DEV_CMD_DATA_REGS_OFFSET 0x0c00 #define IONIC_BAR0_INTR_STATUS_OFFSET 0x1000 #define IONIC_BAR0_INTR_CTRL_OFFSET 0x2000 + +/* BAR2 */ +#define IONIC_BAR2_CMB_ENTRY_SIZE 0x800000 #define IONIC_DEV_CMD_DONE 0x00000001 #define IONIC_ASIC_TYPE_NONE 0 @@ -3287,6 +3394,7 @@ struct ionic_identity { union ionic_port_identity port; union ionic_qos_identity qos; union ionic_q_identity txq; + union ionic_discover_cmb_identity cmb_layout; }; #endif /* _IONIC_IF_H_ */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 48cb5d30b5f6..b28966ae50c2 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -19,6 +19,7 @@ #include "ionic_bus.h" #include "ionic_dev.h" #include "ionic_lif.h" +#include "ionic_aux.h" #include "ionic_txrx.h" #include "ionic_ethtool.h" #include "ionic_debugfs.h" @@ -243,29 +244,36 @@ static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq) 0, intr->name, &qcq->napi); } -static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr) +int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr) { struct ionic *ionic = lif->ionic; - int index; + int index, err; index = find_first_zero_bit(ionic->intrs, ionic->nintrs); - if (index == ionic->nintrs) { - netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n", - __func__, index, ionic->nintrs); + if (index == ionic->nintrs) return -ENOSPC; - } set_bit(index, ionic->intrs); ionic_intr_init(&ionic->idev, intr, index); + err = ionic_bus_get_irq(ionic, intr->index); + if (err < 0) { + clear_bit(index, ionic->intrs); + return err; + } + + intr->vector = err; + return 0; } +EXPORT_SYMBOL_NS(ionic_intr_alloc, "NET_IONIC"); -static void ionic_intr_free(struct ionic *ionic, int index) +void ionic_intr_free(struct ionic_lif *lif, int index) { - if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs) - clear_bit(index, ionic->intrs); + if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < lif->ionic->nintrs) + clear_bit(index, lif->ionic->intrs); } +EXPORT_SYMBOL_NS(ionic_intr_free, "NET_IONIC"); static void ionic_irq_aff_notify(struct irq_affinity_notify *notify, const cpumask_t *mask) @@ -400,7 +408,7 @@ static void ionic_qcq_intr_free(struct ionic_lif *lif, struct ionic_qcq *qcq) irq_set_affinity_hint(qcq->intr.vector, NULL); devm_free_irq(lif->ionic->dev, qcq->intr.vector, &qcq->napi); qcq->intr.vector = 0; - ionic_intr_free(lif->ionic, qcq->intr.index); + ionic_intr_free(lif, qcq->intr.index); qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED; } @@ -510,13 +518,6 @@ static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qc goto err_out; } - err = ionic_bus_get_irq(lif->ionic, qcq->intr.index); - if (err < 0) { - netdev_warn(lif->netdev, "no vector for %s: %d\n", - qcq->q.name, err); - goto err_out_free_intr; - } - qcq->intr.vector = err; ionic_intr_mask_assert(lif->ionic->idev.intr_ctrl, qcq->intr.index, IONIC_INTR_MASK_SET); @@ -545,7 +546,7 @@ static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qc return 0; err_out_free_intr: - ionic_intr_free(lif->ionic, qcq->intr.index); + ionic_intr_free(lif, qcq->intr.index); err_out: return err; } @@ -672,7 +673,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, new->cmb_order = order_base_2(new->cmb_q_size / PAGE_SIZE); err = ionic_get_cmb(lif, &new->cmb_pgid, &new->cmb_q_base_pa, - new->cmb_order); + new->cmb_order, 0, NULL); if (err) { netdev_err(lif->netdev, "Cannot allocate queue order %d from cmb: err %d\n", @@ -740,7 +741,7 @@ err_out_free_q: err_out_free_irq: if (flags & IONIC_QCQ_F_INTR) { devm_free_irq(dev, new->intr.vector, &new->napi); - ionic_intr_free(lif->ionic, new->intr.index); + ionic_intr_free(lif, new->intr.index); } err_out_free_page_pool: page_pool_destroy(new->q.page_pool); @@ -3293,6 +3294,7 @@ int ionic_lif_alloc(struct ionic *ionic) mutex_init(&lif->queue_lock); mutex_init(&lif->config_lock); + mutex_init(&lif->adev_lock); spin_lock_init(&lif->adminq_lock); @@ -3349,6 +3351,7 @@ err_out_free_lif_info: lif->info = NULL; lif->info_pa = 0; err_out_free_mutex: + mutex_destroy(&lif->adev_lock); mutex_destroy(&lif->config_lock); mutex_destroy(&lif->queue_lock); err_out_free_netdev: @@ -3384,6 +3387,7 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif) netif_device_detach(lif->netdev); + ionic_auxbus_unregister(ionic->lif); mutex_lock(&lif->queue_lock); if (test_bit(IONIC_LIF_F_UP, lif->state)) { dev_info(ionic->dev, "Surprise FW stop, stopping queues\n"); @@ -3446,6 +3450,8 @@ int ionic_restart_lif(struct ionic_lif *lif) netif_device_attach(lif->netdev); ionic_queue_doorbell_check(ionic, IONIC_NAPI_DEADLINE); + ionic_auxbus_register(ionic->lif); + return 0; err_txrx_free: @@ -3528,6 +3534,7 @@ void ionic_lif_free(struct ionic_lif *lif) mutex_destroy(&lif->config_lock); mutex_destroy(&lif->queue_lock); + mutex_destroy(&lif->adev_lock); /* free netdev & lif */ ionic_debugfs_del_lif(lif); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h index e01756fb7fdd..43bdd0fb8733 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h @@ -10,6 +10,7 @@ #include <linux/dim.h> #include <linux/pci.h> #include "ionic_rx_filter.h" +#include "ionic_api.h" #define IONIC_ADMINQ_LENGTH 16 /* must be a power of two */ #define IONIC_NOTIFYQ_LENGTH 64 /* must be a power of two */ @@ -225,6 +226,8 @@ struct ionic_lif { dma_addr_t info_pa; u32 info_sz; struct ionic_qtype_info qtype_info[IONIC_QTYPE_MAX]; + struct ionic_aux_dev *ionic_adev; + struct mutex adev_lock; /* lock for aux_dev actions */ u8 rss_hash_key[IONIC_RSS_HASH_KEY_SIZE]; u8 *rss_ind_tbl; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c index 0e60a6bef99a..14dc055be3e9 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_main.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c @@ -72,7 +72,7 @@ static const char *ionic_error_to_str(enum ionic_status_code code) } } -static int ionic_error_to_errno(enum ionic_status_code code) +int ionic_error_to_errno(enum ionic_status_code code) { switch (code) { case IONIC_RC_SUCCESS: @@ -114,6 +114,7 @@ static int ionic_error_to_errno(enum ionic_status_code code) return -EIO; } } +EXPORT_SYMBOL_NS(ionic_error_to_errno, "NET_IONIC"); static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode) { @@ -480,6 +481,7 @@ int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) { return __ionic_adminq_post_wait(lif, ctx, true); } +EXPORT_SYMBOL_NS(ionic_adminq_post_wait, "NET_IONIC"); int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index d7cdea8f604d..91e7b38143ea 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -4215,7 +4215,6 @@ static pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *pdev) struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); int err = 0; - pdev->error_state = pci_channel_io_normal; err = pci_enable_device(pdev); if (err) goto disconnect; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 53cdd36c4123..e051d8c7a28d 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -3766,8 +3766,6 @@ static int qlcnic_attach_func(struct pci_dev *pdev) struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; - pdev->error_state = pci_channel_io_normal; - err = pci_enable_device(pdev); if (err) return err; diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 8903ae90afcb..d18734fe12e4 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -4994,8 +4994,9 @@ static int rtl8169_resume(struct device *device) if (!device_may_wakeup(tp_to_dev(tp))) clk_prepare_enable(tp->clk); - /* Reportedly at least Asus X453MA truncates packets otherwise */ - if (tp->mac_version == RTL_GIGA_MAC_VER_37) + /* Some chip versions may truncate packets without this initialization */ + if (tp->mac_version == RTL_GIGA_MAC_VER_37 || + tp->mac_version == RTL_GIGA_MAC_VER_46) rtl_init_rxcfg(tp); return rtl8169_runtime_resume(device); diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 9d3bd65b85ff..e2d7ce1a85e8 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -2211,15 +2211,35 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) skb_tx_timestamp(skb); } - /* Descriptor type must be set after all the above writes */ - dma_wmb(); + if (num_tx_desc > 1) { desc->die_dt = DT_FEND; desc--; + /* When using multi-descriptors, DT_FEND needs to get written + * before DT_FSTART, but the compiler may reorder the memory + * writes in an attempt to optimize the code. + * Use a dma_wmb() barrier to make sure DT_FEND and DT_FSTART + * are written exactly in the order shown in the code. + * This is particularly important for cases where the DMA engine + * is already running when we are running this code. If the DMA + * sees DT_FSTART without the corresponding DT_FEND it will enter + * an error condition. + */ + dma_wmb(); desc->die_dt = DT_FSTART; } else { + /* Descriptor type must be set after all the above writes */ + dma_wmb(); desc->die_dt = DT_FSINGLE; } + + /* Before ringing the doorbell we need to make sure that the latest + * writes have been committed to memory, otherwise it could delay + * things until the doorbell is rang again. + * This is in replacement of the read operation mentioned in the HW + * manuals. + */ + dma_wmb(); ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q); priv->cur_tx[q] += num_tx_desc; diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c index 5a14d94163b1..e8fdbb62d872 100644 --- a/drivers/net/ethernet/sfc/efx_common.c +++ b/drivers/net/ethernet/sfc/efx_common.c @@ -1258,9 +1258,6 @@ out: /* For simplicity and reliability, we always require a slot reset and try to * reset the hardware when a pci error affecting the device is detected. - * We leave both the link_reset and mmio_enabled callback unimplemented: - * with our request for slot reset the mmio_enabled callback will never be - * called, and the link_reset callback is not used by AER or EEH mechanisms. */ const struct pci_error_handlers efx_err_handlers = { .error_detected = efx_io_error_detected, diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index d19fbf8732ff..6ea41f6c9ef5 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c @@ -3127,9 +3127,6 @@ out: /* For simplicity and reliability, we always require a slot reset and try to * reset the hardware when a pci error affecting the device is detected. - * We leave both the link_reset and mmio_enabled callback unimplemented: - * with our request for slot reset the mmio_enabled callback will never be - * called, and the link_reset callback is not used by AER or EEH mechanisms. */ static const struct pci_error_handlers ef4_err_handlers = { .error_detected = ef4_io_error_detected, diff --git a/drivers/net/ethernet/sfc/siena/efx_common.c b/drivers/net/ethernet/sfc/siena/efx_common.c index a0966f879664..35036cc902fe 100644 --- a/drivers/net/ethernet/sfc/siena/efx_common.c +++ b/drivers/net/ethernet/sfc/siena/efx_common.c @@ -1285,9 +1285,6 @@ out: /* For simplicity and reliability, we always require a slot reset and try to * reset the hardware when a pci error affecting the device is detected. - * We leave both the link_reset and mmio_enabled callback unimplemented: - * with our request for slot reset the mmio_enabled callback will never be - * called, and the link_reset callback is not used by AER or EEH mechanisms. */ const struct pci_error_handlers efx_siena_err_handlers = { .error_detected = efx_io_error_detected, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 51ea0caf16c1..0786816e05f0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -1446,14 +1446,15 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable) } } else { if (bsp_priv->clk_enabled) { + if (bsp_priv->ops && bsp_priv->ops->set_clock_selection) { + bsp_priv->ops->set_clock_selection(bsp_priv, + bsp_priv->clock_input, false); + } + clk_bulk_disable_unprepare(bsp_priv->num_clks, bsp_priv->clks); clk_disable_unprepare(bsp_priv->clk_phy); - if (bsp_priv->ops && bsp_priv->ops->set_clock_selection) - bsp_priv->ops->set_clock_selection(bsp_priv, - bsp_priv->clock_input, false); - bsp_priv->clk_enabled = false; } } diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c index 59d6ab989c55..8ffbfaa3ab18 100644 --- a/drivers/net/ethernet/ti/am65-cpts.c +++ b/drivers/net/ethernet/ti/am65-cpts.c @@ -163,7 +163,9 @@ struct am65_cpts { struct device_node *clk_mux_np; struct clk *refclk; u32 refclk_freq; - struct list_head events; + /* separate lists to handle TX and RX timestamp independently */ + struct list_head events_tx; + struct list_head events_rx; struct list_head pool; struct am65_cpts_event pool_data[AM65_CPTS_MAX_EVENTS]; spinlock_t lock; /* protects events lists*/ @@ -227,6 +229,24 @@ static void am65_cpts_disable(struct am65_cpts *cpts) am65_cpts_write32(cpts, 0, int_enable); } +static int am65_cpts_purge_event_list(struct am65_cpts *cpts, + struct list_head *events) +{ + struct list_head *this, *next; + struct am65_cpts_event *event; + int removed = 0; + + list_for_each_safe(this, next, events) { + event = list_entry(this, struct am65_cpts_event, list); + if (time_after(jiffies, event->tmo)) { + list_del_init(&event->list); + list_add(&event->list, &cpts->pool); + ++removed; + } + } + return removed; +} + static int am65_cpts_event_get_port(struct am65_cpts_event *event) { return (event->event1 & AM65_CPTS_EVENT_1_PORT_NUMBER_MASK) >> @@ -239,20 +259,12 @@ static int am65_cpts_event_get_type(struct am65_cpts_event *event) AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT; } -static int am65_cpts_cpts_purge_events(struct am65_cpts *cpts) +static int am65_cpts_purge_events(struct am65_cpts *cpts) { - struct list_head *this, *next; - struct am65_cpts_event *event; int removed = 0; - list_for_each_safe(this, next, &cpts->events) { - event = list_entry(this, struct am65_cpts_event, list); - if (time_after(jiffies, event->tmo)) { - list_del_init(&event->list); - list_add(&event->list, &cpts->pool); - ++removed; - } - } + removed += am65_cpts_purge_event_list(cpts, &cpts->events_tx); + removed += am65_cpts_purge_event_list(cpts, &cpts->events_rx); if (removed) dev_dbg(cpts->dev, "event pool cleaned up %d\n", removed); @@ -287,7 +299,7 @@ static int __am65_cpts_fifo_read(struct am65_cpts *cpts) struct am65_cpts_event, list); if (!event) { - if (am65_cpts_cpts_purge_events(cpts)) { + if (am65_cpts_purge_events(cpts)) { dev_err(cpts->dev, "cpts: event pool empty\n"); ret = -1; goto out; @@ -306,11 +318,21 @@ static int __am65_cpts_fifo_read(struct am65_cpts *cpts) cpts->timestamp); break; case AM65_CPTS_EV_RX: + event->tmo = jiffies + + msecs_to_jiffies(AM65_CPTS_EVENT_RX_TX_TIMEOUT); + + list_move_tail(&event->list, &cpts->events_rx); + + dev_dbg(cpts->dev, + "AM65_CPTS_EV_RX e1:%08x e2:%08x t:%lld\n", + event->event1, event->event2, + event->timestamp); + break; case AM65_CPTS_EV_TX: event->tmo = jiffies + msecs_to_jiffies(AM65_CPTS_EVENT_RX_TX_TIMEOUT); - list_move_tail(&event->list, &cpts->events); + list_move_tail(&event->list, &cpts->events_tx); dev_dbg(cpts->dev, "AM65_CPTS_EV_TX e1:%08x e2:%08x t:%lld\n", @@ -828,7 +850,7 @@ static bool am65_cpts_match_tx_ts(struct am65_cpts *cpts, return found; } -static void am65_cpts_find_ts(struct am65_cpts *cpts) +static void am65_cpts_find_tx_ts(struct am65_cpts *cpts) { struct am65_cpts_event *event; struct list_head *this, *next; @@ -837,7 +859,7 @@ static void am65_cpts_find_ts(struct am65_cpts *cpts) LIST_HEAD(events); spin_lock_irqsave(&cpts->lock, flags); - list_splice_init(&cpts->events, &events); + list_splice_init(&cpts->events_tx, &events); spin_unlock_irqrestore(&cpts->lock, flags); list_for_each_safe(this, next, &events) { @@ -850,7 +872,7 @@ static void am65_cpts_find_ts(struct am65_cpts *cpts) } spin_lock_irqsave(&cpts->lock, flags); - list_splice_tail(&events, &cpts->events); + list_splice_tail(&events, &cpts->events_tx); list_splice_tail(&events_free, &cpts->pool); spin_unlock_irqrestore(&cpts->lock, flags); } @@ -861,7 +883,7 @@ static long am65_cpts_ts_work(struct ptp_clock_info *ptp) unsigned long flags; long delay = -1; - am65_cpts_find_ts(cpts); + am65_cpts_find_tx_ts(cpts); spin_lock_irqsave(&cpts->txq.lock, flags); if (!skb_queue_empty(&cpts->txq)) @@ -905,7 +927,7 @@ static u64 am65_cpts_find_rx_ts(struct am65_cpts *cpts, u32 skb_mtype_seqid) spin_lock_irqsave(&cpts->lock, flags); __am65_cpts_fifo_read(cpts); - list_for_each_safe(this, next, &cpts->events) { + list_for_each_safe(this, next, &cpts->events_rx) { event = list_entry(this, struct am65_cpts_event, list); if (time_after(jiffies, event->tmo)) { list_move(&event->list, &cpts->pool); @@ -1155,7 +1177,8 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs, return ERR_PTR(ret); mutex_init(&cpts->ptp_clk_lock); - INIT_LIST_HEAD(&cpts->events); + INIT_LIST_HEAD(&cpts->events_tx); + INIT_LIST_HEAD(&cpts->events_rx); INIT_LIST_HEAD(&cpts->pool); spin_lock_init(&cpts->lock); skb_queue_head_init(&cpts->txq); |
